https://github.com/torvalds/linux
Revision 7ef55b8a05c02db7c07d81827c69fe8f124e8654 authored by Srinivasa Ds on 03 November 2006, 06:07:12 UTC, committed by Linus Torvalds on 03 November 2006, 20:27:57 UTC
When I was performing some operations on NFS, I got below error on server
side.

  =============================================
  [ INFO: possible recursive locking detected ]
  2.6.19-prep #1
  ---------------------------------------------
  nfsd4/3525 is trying to acquire lock:
   (&inode->i_mutex){--..}, at: [<c0611e5a>] mutex_lock+0x21/0x24

  but task is already holding lock:
   (&inode->i_mutex){--..}, at: [<c0611e5a>] mutex_lock+0x21/0x24

  other info that might help us debug this:
  2 locks held by nfsd4/3525:
   #0:  (client_mutex){--..}, at: [<c0611e5a>] mutex_lock+0x21/0x24
   #1:  (&inode->i_mutex){--..}, at: [<c0611e5a>] mutex_lock+0x21/0x24

  stack backtrace:
   [<c04051ed>] show_trace_log_lvl+0x58/0x16a
   [<c04057fa>] show_trace+0xd/0x10
   [<c0405913>] dump_stack+0x19/0x1b
   [<c043b6f1>] __lock_acquire+0x778/0x99c
   [<c043be86>] lock_acquire+0x4b/0x6d
   [<c0611ceb>] __mutex_lock_slowpath+0xbc/0x20a
   [<c0611e5a>] mutex_lock+0x21/0x24
   [<c047fd7e>] vfs_rmdir+0x76/0xf8
   [<f94b7ce9>] nfsd4_clear_clid_dir+0x2c/0x41 [nfsd]
   [<f94b7de9>] nfsd4_remove_clid_dir+0xb1/0xe8 [nfsd]
   [<f94b307b>] laundromat_main+0x9b/0x1c3 [nfsd]
   [<c04333d6>] run_workqueue+0x7a/0xbb
   [<c0433d0b>] worker_thread+0xd2/0x107
   [<c0436285>] kthread+0xc3/0xf2
   [<c0402005>] kernel_thread_helper+0x5/0xb
  ===================================================================

Cause for this problem was,2 successive mutex_lock calls on 2 diffrent inodes ,as shown below

	static int
	nfsd4_clear_clid_dir(struct dentry *dir, struct dentry *dentry)
	{
	        int status;

	        /* For now this directory should already be empty, but we empty it of
        	 * any regular files anyway, just in case the directory was created by
	         * a kernel from the future.... */
        	nfsd4_list_rec_dir(dentry, nfsd4_remove_clid_file);
	        mutex_lock(&dir->d_inode->i_mutex);
	        status = vfs_rmdir(dir->d_inode, dentry);
	...

	int vfs_rmdir(struct inode *dir, struct dentry *dentry)
	{
	        int error = may_delete(dir, dentry, 1);

	        if (error)
	                return error;

	        if (!dir->i_op || !dir->i_op->rmdir)
        	        return -EPERM;

	        DQUOT_INIT(dir);

	        mutex_lock(&dentry->d_inode->i_mutex);
	...

So I have developed the patch to overcome this problem.

Signed-off-by: Srinivasa DS <srinivasa@in.ibm.com>
Cc: Neil Brown <neilb@suse.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
1 parent 77d6e13
Raw File
Tip revision: 7ef55b8a05c02db7c07d81827c69fe8f124e8654 authored by Srinivasa Ds on 03 November 2006, 06:07:12 UTC
[PATCH] NFS4: fix for recursive locking problem
Tip revision: 7ef55b8
rwsem-spinlock.c
/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
 * generic spinlock implementation
 *
 * Copyright (c) 2001   David Howells (dhowells@redhat.com).
 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
 * - Derived also from comments by Linus
 */
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/module.h>

struct rwsem_waiter {
	struct list_head list;
	struct task_struct *task;
	unsigned int flags;
#define RWSEM_WAITING_FOR_READ	0x00000001
#define RWSEM_WAITING_FOR_WRITE	0x00000002
};

/*
 * initialise the semaphore
 */
void __init_rwsem(struct rw_semaphore *sem, const char *name,
		  struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held semaphore:
	 */
	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
	lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
	sem->activity = 0;
	spin_lock_init(&sem->wait_lock);
	INIT_LIST_HEAD(&sem->wait_list);
}

/*
 * handle the lock release when processes blocked on it that can now run
 * - if we come here, then:
 *   - the 'active count' _reached_ zero
 *   - the 'waiting count' is non-zero
 * - the spinlock must be held by the caller
 * - woken process blocks are discarded from the list after having task zeroed
 * - writers are only woken if wakewrite is non-zero
 */
static inline struct rw_semaphore *
__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
{
	struct rwsem_waiter *waiter;
	struct task_struct *tsk;
	int woken;

	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);

	if (!wakewrite) {
		if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
			goto out;
		goto dont_wake_writers;
	}

	/* if we are allowed to wake writers try to grant a single write lock
	 * if there's a writer at the front of the queue
	 * - we leave the 'waiting count' incremented to signify potential
	 *   contention
	 */
	if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
		sem->activity = -1;
		list_del(&waiter->list);
		tsk = waiter->task;
		/* Don't touch waiter after ->task has been NULLed */
		smp_mb();
		waiter->task = NULL;
		wake_up_process(tsk);
		put_task_struct(tsk);
		goto out;
	}

	/* grant an infinite number of read locks to the front of the queue */
 dont_wake_writers:
	woken = 0;
	while (waiter->flags & RWSEM_WAITING_FOR_READ) {
		struct list_head *next = waiter->list.next;

		list_del(&waiter->list);
		tsk = waiter->task;
		smp_mb();
		waiter->task = NULL;
		wake_up_process(tsk);
		put_task_struct(tsk);
		woken++;
		if (list_empty(&sem->wait_list))
			break;
		waiter = list_entry(next, struct rwsem_waiter, list);
	}

	sem->activity += woken;

 out:
	return sem;
}

/*
 * wake a single writer
 */
static inline struct rw_semaphore *
__rwsem_wake_one_writer(struct rw_semaphore *sem)
{
	struct rwsem_waiter *waiter;
	struct task_struct *tsk;

	sem->activity = -1;

	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
	list_del(&waiter->list);

	tsk = waiter->task;
	smp_mb();
	waiter->task = NULL;
	wake_up_process(tsk);
	put_task_struct(tsk);
	return sem;
}

/*
 * get a read lock on the semaphore
 */
void fastcall __sched __down_read(struct rw_semaphore *sem)
{
	struct rwsem_waiter waiter;
	struct task_struct *tsk;

	spin_lock_irq(&sem->wait_lock);

	if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
		/* granted */
		sem->activity++;
		spin_unlock_irq(&sem->wait_lock);
		goto out;
	}

	tsk = current;
	set_task_state(tsk, TASK_UNINTERRUPTIBLE);

	/* set up my own style of waitqueue */
	waiter.task = tsk;
	waiter.flags = RWSEM_WAITING_FOR_READ;
	get_task_struct(tsk);

	list_add_tail(&waiter.list, &sem->wait_list);

	/* we don't need to touch the semaphore struct anymore */
	spin_unlock_irq(&sem->wait_lock);

	/* wait to be given the lock */
	for (;;) {
		if (!waiter.task)
			break;
		schedule();
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
	}

	tsk->state = TASK_RUNNING;
 out:
	;
}

/*
 * trylock for reading -- returns 1 if successful, 0 if contention
 */
int fastcall __down_read_trylock(struct rw_semaphore *sem)
{
	unsigned long flags;
	int ret = 0;


	spin_lock_irqsave(&sem->wait_lock, flags);

	if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
		/* granted */
		sem->activity++;
		ret = 1;
	}

	spin_unlock_irqrestore(&sem->wait_lock, flags);

	return ret;
}

/*
 * get a write lock on the semaphore
 * - we increment the waiting count anyway to indicate an exclusive lock
 */
void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
{
	struct rwsem_waiter waiter;
	struct task_struct *tsk;

	spin_lock_irq(&sem->wait_lock);

	if (sem->activity == 0 && list_empty(&sem->wait_list)) {
		/* granted */
		sem->activity = -1;
		spin_unlock_irq(&sem->wait_lock);
		goto out;
	}

	tsk = current;
	set_task_state(tsk, TASK_UNINTERRUPTIBLE);

	/* set up my own style of waitqueue */
	waiter.task = tsk;
	waiter.flags = RWSEM_WAITING_FOR_WRITE;
	get_task_struct(tsk);

	list_add_tail(&waiter.list, &sem->wait_list);

	/* we don't need to touch the semaphore struct anymore */
	spin_unlock_irq(&sem->wait_lock);

	/* wait to be given the lock */
	for (;;) {
		if (!waiter.task)
			break;
		schedule();
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
	}

	tsk->state = TASK_RUNNING;
 out:
	;
}

void fastcall __sched __down_write(struct rw_semaphore *sem)
{
	__down_write_nested(sem, 0);
}

/*
 * trylock for writing -- returns 1 if successful, 0 if contention
 */
int fastcall __down_write_trylock(struct rw_semaphore *sem)
{
	unsigned long flags;
	int ret = 0;

	spin_lock_irqsave(&sem->wait_lock, flags);

	if (sem->activity == 0 && list_empty(&sem->wait_list)) {
		/* granted */
		sem->activity = -1;
		ret = 1;
	}

	spin_unlock_irqrestore(&sem->wait_lock, flags);

	return ret;
}

/*
 * release a read lock on the semaphore
 */
void fastcall __up_read(struct rw_semaphore *sem)
{
	unsigned long flags;

	spin_lock_irqsave(&sem->wait_lock, flags);

	if (--sem->activity == 0 && !list_empty(&sem->wait_list))
		sem = __rwsem_wake_one_writer(sem);

	spin_unlock_irqrestore(&sem->wait_lock, flags);
}

/*
 * release a write lock on the semaphore
 */
void fastcall __up_write(struct rw_semaphore *sem)
{
	unsigned long flags;

	spin_lock_irqsave(&sem->wait_lock, flags);

	sem->activity = 0;
	if (!list_empty(&sem->wait_list))
		sem = __rwsem_do_wake(sem, 1);

	spin_unlock_irqrestore(&sem->wait_lock, flags);
}

/*
 * downgrade a write lock into a read lock
 * - just wake up any readers at the front of the queue
 */
void fastcall __downgrade_write(struct rw_semaphore *sem)
{
	unsigned long flags;

	spin_lock_irqsave(&sem->wait_lock, flags);

	sem->activity = 1;
	if (!list_empty(&sem->wait_list))
		sem = __rwsem_do_wake(sem, 0);

	spin_unlock_irqrestore(&sem->wait_lock, flags);
}

EXPORT_SYMBOL(__init_rwsem);
EXPORT_SYMBOL(__down_read);
EXPORT_SYMBOL(__down_read_trylock);
EXPORT_SYMBOL(__down_write_nested);
EXPORT_SYMBOL(__down_write);
EXPORT_SYMBOL(__down_write_trylock);
EXPORT_SYMBOL(__up_read);
EXPORT_SYMBOL(__up_write);
EXPORT_SYMBOL(__downgrade_write);
back to top