Revision 24148d8648e37f8c15bedddfa50d14a31a0582c5 authored by Linus Torvalds on 28 August 2020, 23:23:16 UTC, committed by Linus Torvalds on 28 August 2020, 23:23:16 UTC
Pull io_uring fixes from Jens Axboe:
 "A few fixes in here, all based on reports and test cases from folks
  using it. Most of it is stable material as well:

   - Hashed work cancelation fix (Pavel)

   - poll wakeup signalfd fix

   - memlock accounting fix

   - nonblocking poll retry fix

   - ensure we never return -ERESTARTSYS for reads

   - ensure offset == -1 is consistent with preadv2() as documented

   - IOPOLL -EAGAIN handling fixes

   - remove useless task_work bounce for block based -EAGAIN retry"

* tag 'io_uring-5.9-2020-08-28' of git://git.kernel.dk/linux-block:
  io_uring: don't bounce block based -EAGAIN retry off task_work
  io_uring: fix IOPOLL -EAGAIN retries
  io_uring: clear req->result on IOPOLL re-issue
  io_uring: make offset == -1 consistent with preadv2/pwritev2
  io_uring: ensure read requests go through -ERESTART* transformation
  io_uring: don't use poll handler if file can't be nonblocking read/written
  io_uring: fix imbalanced sqo_mm accounting
  io_uring: revert consumed iov_iter bytes on error
  io-wq: fix hang after cancelling pending hashed work
  io_uring: don't recurse on tsk->sighand->siglock with signalfd
2 parent s 005c534 + fdee946
Raw File
once.c
// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/once.h>
#include <linux/random.h>

struct once_work {
	struct work_struct work;
	struct static_key_true *key;
};

static void once_deferred(struct work_struct *w)
{
	struct once_work *work;

	work = container_of(w, struct once_work, work);
	BUG_ON(!static_key_enabled(work->key));
	static_branch_disable(work->key);
	kfree(work);
}

static void once_disable_jump(struct static_key_true *key)
{
	struct once_work *w;

	w = kmalloc(sizeof(*w), GFP_ATOMIC);
	if (!w)
		return;

	INIT_WORK(&w->work, once_deferred);
	w->key = key;
	schedule_work(&w->work);
}

static DEFINE_SPINLOCK(once_lock);

bool __do_once_start(bool *done, unsigned long *flags)
	__acquires(once_lock)
{
	spin_lock_irqsave(&once_lock, *flags);
	if (*done) {
		spin_unlock_irqrestore(&once_lock, *flags);
		/* Keep sparse happy by restoring an even lock count on
		 * this lock. In case we return here, we don't call into
		 * __do_once_done but return early in the DO_ONCE() macro.
		 */
		__acquire(once_lock);
		return false;
	}

	return true;
}
EXPORT_SYMBOL(__do_once_start);

void __do_once_done(bool *done, struct static_key_true *once_key,
		    unsigned long *flags)
	__releases(once_lock)
{
	*done = true;
	spin_unlock_irqrestore(&once_lock, *flags);
	once_disable_jump(once_key);
}
EXPORT_SYMBOL(__do_once_done);
back to top