Revision ca64cae96037de16e4af92678814f5d4bf0c1c65 authored by NeilBrown on 21 November 2012, 05:33:40 UTC, committed by NeilBrown on 21 November 2012, 22:14:13 UTC
commit 9e44476851e91c86c98eb92b9bc27fb801f89072
    MD: raid5 avoid unnecessary zero page for trim

change raid5 to clear R5_Discard when the complete request is
handled rather than when submitting the per-device discard request.
However it did not clear R5_Discard for the parity device.

This means that if the stripe_head was reused before it expired from
the cache, the setting would be wrong and a hang would result.

Also if the R5_Uptodate bit happens to be set, R5_Discard again
won't be cleared.  But R5_Uptodate really should be clear at this point.

So make sure R5_Discard is cleared in all cases, and clear
R5_Uptodate when a 'discard' completes.

Signed-off-by: NeilBrown <neilb@suse.de>
1 parent ef5b7c6
Raw File
user-return-notifier.c

#include <linux/user-return-notifier.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/export.h>

static DEFINE_PER_CPU(struct hlist_head, return_notifier_list);

/*
 * Request a notification when the current cpu returns to userspace.  Must be
 * called in atomic context.  The notifier will also be called in atomic
 * context.
 */
void user_return_notifier_register(struct user_return_notifier *urn)
{
	set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
	hlist_add_head(&urn->link, &__get_cpu_var(return_notifier_list));
}
EXPORT_SYMBOL_GPL(user_return_notifier_register);

/*
 * Removes a registered user return notifier.  Must be called from atomic
 * context, and from the same cpu registration occurred in.
 */
void user_return_notifier_unregister(struct user_return_notifier *urn)
{
	hlist_del(&urn->link);
	if (hlist_empty(&__get_cpu_var(return_notifier_list)))
		clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
}
EXPORT_SYMBOL_GPL(user_return_notifier_unregister);

/* Calls registered user return notifiers */
void fire_user_return_notifiers(void)
{
	struct user_return_notifier *urn;
	struct hlist_node *tmp1, *tmp2;
	struct hlist_head *head;

	head = &get_cpu_var(return_notifier_list);
	hlist_for_each_entry_safe(urn, tmp1, tmp2, head, link)
		urn->on_user_return(urn);
	put_cpu_var(return_notifier_list);
}
back to top