Revision 5309809129ca3ab14f8bd5e5ef66c1b7686eb639 authored by Linus Torvalds on 26 September 2007, 16:16:21 UTC, committed by Linus Torvalds on 26 September 2007, 16:16:21 UTC
As Stephen Hemminger says, this is a "belt and suspenders" patch that
zeroes the envp array at allocation time, even though all the users
should NULL-terminate it anyway (and we've hopefully fixed everybody
that doesn't do that).

And we'll apparently clean the whole envp thing up for 2.6.24 anyway.

But let's just be robust, and do both this *and* make sure that all
users are doing the right thing.

Acked-by: Stephen Hemminger <shemminger@linux-foundation.org>
Acked-by: Greg Kroah-Hartman <gregkh@suse.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 544002e
Raw File
semaphore-helper.h
#ifndef __ASM_SH_SEMAPHORE_HELPER_H
#define __ASM_SH_SEMAPHORE_HELPER_H

/*
 * SMP- and interrupt-safe semaphores helper functions.
 *
 * (C) Copyright 1996 Linus Torvalds
 * (C) Copyright 1999 Andrea Arcangeli
 */

/*
 * These two _must_ execute atomically wrt each other.
 *
 * This is trivially done with load_locked/store_cond,
 * which we have.  Let the rest of the losers suck eggs.
 */
static __inline__ void wake_one_more(struct semaphore * sem)
{
	atomic_inc((atomic_t *)&sem->sleepers);
}

static __inline__ int waking_non_zero(struct semaphore *sem)
{
	unsigned long flags;
	int ret = 0;

	spin_lock_irqsave(&semaphore_wake_lock, flags);
	if (sem->sleepers > 0) {
		sem->sleepers--;
		ret = 1;
	}
	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
	return ret;
}

/*
 * waking_non_zero_interruptible:
 *	1	got the lock
 *	0	go to sleep
 *	-EINTR	interrupted
 *
 * We must undo the sem->count down_interruptible() increment while we are
 * protected by the spinlock in order to make atomic this atomic_inc() with the
 * atomic_read() in wake_one_more(), otherwise we can race. -arca
 */
static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
						struct task_struct *tsk)
{
	unsigned long flags;
	int ret = 0;

	spin_lock_irqsave(&semaphore_wake_lock, flags);
	if (sem->sleepers > 0) {
		sem->sleepers--;
		ret = 1;
	} else if (signal_pending(tsk)) {
		atomic_inc(&sem->count);
		ret = -EINTR;
	}
	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
	return ret;
}

/*
 * waking_non_zero_trylock:
 *	1	failed to lock
 *	0	got the lock
 *
 * We must undo the sem->count down_trylock() increment while we are
 * protected by the spinlock in order to make atomic this atomic_inc() with the
 * atomic_read() in wake_one_more(), otherwise we can race. -arca
 */
static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
{
	unsigned long flags;
	int ret = 1;

	spin_lock_irqsave(&semaphore_wake_lock, flags);
	if (sem->sleepers <= 0)
		atomic_inc(&sem->count);
	else {
		sem->sleepers--;
		ret = 0;
	}
	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
	return ret;
}

#endif /* __ASM_SH_SEMAPHORE_HELPER_H */
back to top