Revision 7bf811a595a895b7a886dcf218d0d34f97df76dc authored by Josef Bacik on 08 October 2013, 02:11:09 UTC, committed by Chris Mason on 11 October 2013, 01:27:56 UTC
Liu fixed part of this problem and unfortunately I steered him in slightly the
wrong direction and so didn't completely fix the problem.  The problem is we
limit the size of the delalloc range we are looking for to max bytes and then we
try to lock that range.  If we fail to lock the pages in that range we will
shrink the max bytes to a single page and re loop.  However if our first page is
inside of the delalloc range then we will end up limiting the end of the range
to a period before our first page.  This is illustrated below

[0 -------- delalloc range --------- 256mb]
                                  [page]

So find_delalloc_range will return with delalloc_start as 0 and end as 128mb,
and then we will notice that delalloc_start < *start and adjust it up, but not
adjust delalloc_end up, so things go sideways.  To fix this we need to not limit
the max bytes in find_delalloc_range, but in find_lock_delalloc_range and that
way we don't end up with this confusion.  Thanks,

Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
1 parent 4871c15
Raw File
lockref.h
#ifndef __LINUX_LOCKREF_H
#define __LINUX_LOCKREF_H

/*
 * Locked reference counts.
 *
 * These are different from just plain atomic refcounts in that they
 * are atomic with respect to the spinlock that goes with them.  In
 * particular, there can be implementations that don't actually get
 * the spinlock for the common decrement/increment operations, but they
 * still have to check that the operation is done semantically as if
 * the spinlock had been taken (using a cmpxchg operation that covers
 * both the lock and the count word, or using memory transactions, for
 * example).
 */

#include <linux/spinlock.h>

struct lockref {
	spinlock_t lock;
	unsigned int count;
};

/**
 * lockref_get - Increments reference count unconditionally
 * @lockcnt: pointer to lockref structure
 *
 * This operation is only valid if you already hold a reference
 * to the object, so you know the count cannot be zero.
 */
static inline void lockref_get(struct lockref *lockref)
{
	spin_lock(&lockref->lock);
	lockref->count++;
	spin_unlock(&lockref->lock);
}

/**
 * lockref_get_not_zero - Increments count unless the count is 0
 * @lockcnt: pointer to lockref structure
 * Return: 1 if count updated successfully or 0 if count is 0
 */
static inline int lockref_get_not_zero(struct lockref *lockref)
{
	int retval = 0;

	spin_lock(&lockref->lock);
	if (lockref->count) {
		lockref->count++;
		retval = 1;
	}
	spin_unlock(&lockref->lock);
	return retval;
}

/**
 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
 * @lockcnt: pointer to lockref structure
 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
 */
static inline int lockref_put_or_lock(struct lockref *lockref)
{
	spin_lock(&lockref->lock);
	if (lockref->count <= 1)
		return 0;
	lockref->count--;
	spin_unlock(&lockref->lock);
	return 1;
}

#endif /* __LINUX_LOCKREF_H */
back to top