Revision e2921f9f95f1c1355a39e54dc038ad95b6e032be authored by Linus Torvalds on 26 July 2019, 21:12:54 UTC, committed by Linus Torvalds on 26 July 2019, 21:12:54 UTC
Pull drm fixes from Daniel Vetter:
 "Dave seems to collect an entire streak of things happening, so again
  me typing pull summary.

  Nothing nefarious here, most of the fixes are for new stuff or things
  users won't see. The amd-display patches are a bit different, and very
  much look like they should have at least some cc: stable tags. Might
  be amd is a bit too comfortable with their internal tree and not
  enough looking at upstream. Dave&me are looking into this, in case
  something needs rectified with process here.

  Also no intel fixes pull, but intel CI is general become rather good,
  still I guess expect a notch more for -rc3.

  Summary:

  amdgpu:
   - fixes for (new in 5.3) hw support (vega20, navi)
   - disable RAS
   - lots of display fixes all over (audio, DSC, dongle, clock mgr)

  ttm:
   - fix dma_free_attrs calls to appease dma debugging

  msm:
   - fixes for dma-api, locking debug and compiler splats

  core:
   - fix cmdline mode to not apply rotation if not specified (new in 5.3)
   - compiler warn fix"

* tag 'drm-fixes-2019-07-26' of git://anongit.freedesktop.org/drm/drm: (46 commits)
  drm/amd/display: Set enabled to false at start of audio disable
  drm/amdgpu/smu: move fan rpm query into the asic specific code
  drm/amd/powerplay: custom peak clock freq for navi10
  drm: silence variable 'conn' set but not used
  drm/msm: stop abusing dma_map/unmap for cache
  drm/msm/dpu: Correct dpu encoder spinlock initialization
  drm/msm: correct NULL pointer dereference in context_init
  drm/amd/display: handle active dongle port type is DP++ or DP case
  drm/amd/display: do not read link setting if edp not connected
  drm/amd/display: Increase size of audios array
  drm/amd/display: drop ASSERT() if eDP panel is not connected
  drm/amd/display: Only enable audio if speaker allocation exists
  drm/amd/display: Fix dc_create failure handling and 666 color depths
  drm/amd/display: allocate 4 ddc engines for RV2
  drm/amd/display: put back front end initialization sequence
  drm/amd/display: Wait for flip to complete
  drm/amd/display: Change min_h_sync_width from 8 to 4
  drm/amd/display: use encoder's engine id to find matched free audio device
  drm/amd/display: fix DMCU hang when going into Modern Standby
  drm/amd/display: Disable Audio on reinitialize hardware
  ...
2 parent s 3ea54d9 + 4d5308e
Raw File
user.c
// SPDX-License-Identifier: GPL-2.0-only
/*
 * The "user cache".
 *
 * (C) Copyright 1991-2000 Linus Torvalds
 *
 * We have a per-user structure to keep track of how many
 * processes, files etc the user has claimed, in order to be
 * able to have per-user limits for system resources. 
 */

#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/key.h>
#include <linux/sched/user.h>
#include <linux/interrupt.h>
#include <linux/export.h>
#include <linux/user_namespace.h>
#include <linux/proc_ns.h>

/*
 * userns count is 1 for root user, 1 for init_uts_ns,
 * and 1 for... ?
 */
struct user_namespace init_user_ns = {
	.uid_map = {
		.nr_extents = 1,
		{
			.extent[0] = {
				.first = 0,
				.lower_first = 0,
				.count = 4294967295U,
			},
		},
	},
	.gid_map = {
		.nr_extents = 1,
		{
			.extent[0] = {
				.first = 0,
				.lower_first = 0,
				.count = 4294967295U,
			},
		},
	},
	.projid_map = {
		.nr_extents = 1,
		{
			.extent[0] = {
				.first = 0,
				.lower_first = 0,
				.count = 4294967295U,
			},
		},
	},
	.count = ATOMIC_INIT(3),
	.owner = GLOBAL_ROOT_UID,
	.group = GLOBAL_ROOT_GID,
	.ns.inum = PROC_USER_INIT_INO,
#ifdef CONFIG_USER_NS
	.ns.ops = &userns_operations,
#endif
	.flags = USERNS_INIT_FLAGS,
#ifdef CONFIG_KEYS
	.keyring_name_list = LIST_HEAD_INIT(init_user_ns.keyring_name_list),
	.keyring_sem = __RWSEM_INITIALIZER(init_user_ns.keyring_sem),
#endif
};
EXPORT_SYMBOL_GPL(init_user_ns);

/*
 * UID task count cache, to get fast user lookup in "alloc_uid"
 * when changing user ID's (ie setuid() and friends).
 */

#define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 7)
#define UIDHASH_SZ	(1 << UIDHASH_BITS)
#define UIDHASH_MASK		(UIDHASH_SZ - 1)
#define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
#define uidhashentry(uid)	(uidhash_table + __uidhashfn((__kuid_val(uid))))

static struct kmem_cache *uid_cachep;
struct hlist_head uidhash_table[UIDHASH_SZ];

/*
 * The uidhash_lock is mostly taken from process context, but it is
 * occasionally also taken from softirq/tasklet context, when
 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
 * But free_uid() is also called with local interrupts disabled, and running
 * local_bh_enable() with local interrupts disabled is an error - we'll run
 * softirq callbacks, and they can unconditionally enable interrupts, and
 * the caller of free_uid() didn't expect that..
 */
static DEFINE_SPINLOCK(uidhash_lock);

/* root_user.__count is 1, for init task cred */
struct user_struct root_user = {
	.__count	= REFCOUNT_INIT(1),
	.processes	= ATOMIC_INIT(1),
	.sigpending	= ATOMIC_INIT(0),
	.locked_shm     = 0,
	.uid		= GLOBAL_ROOT_UID,
	.ratelimit	= RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
};

/*
 * These routines must be called with the uidhash spinlock held!
 */
static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
{
	hlist_add_head(&up->uidhash_node, hashent);
}

static void uid_hash_remove(struct user_struct *up)
{
	hlist_del_init(&up->uidhash_node);
}

static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
{
	struct user_struct *user;

	hlist_for_each_entry(user, hashent, uidhash_node) {
		if (uid_eq(user->uid, uid)) {
			refcount_inc(&user->__count);
			return user;
		}
	}

	return NULL;
}

/* IRQs are disabled and uidhash_lock is held upon function entry.
 * IRQ state (as stored in flags) is restored and uidhash_lock released
 * upon function exit.
 */
static void free_user(struct user_struct *up, unsigned long flags)
	__releases(&uidhash_lock)
{
	uid_hash_remove(up);
	spin_unlock_irqrestore(&uidhash_lock, flags);
	kmem_cache_free(uid_cachep, up);
}

/*
 * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
 * caller must undo that ref with free_uid().
 *
 * If the user_struct could not be found, return NULL.
 */
struct user_struct *find_user(kuid_t uid)
{
	struct user_struct *ret;
	unsigned long flags;

	spin_lock_irqsave(&uidhash_lock, flags);
	ret = uid_hash_find(uid, uidhashentry(uid));
	spin_unlock_irqrestore(&uidhash_lock, flags);
	return ret;
}

void free_uid(struct user_struct *up)
{
	unsigned long flags;

	if (!up)
		return;

	if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
		free_user(up, flags);
}

struct user_struct *alloc_uid(kuid_t uid)
{
	struct hlist_head *hashent = uidhashentry(uid);
	struct user_struct *up, *new;

	spin_lock_irq(&uidhash_lock);
	up = uid_hash_find(uid, hashent);
	spin_unlock_irq(&uidhash_lock);

	if (!up) {
		new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
		if (!new)
			return NULL;

		new->uid = uid;
		refcount_set(&new->__count, 1);
		ratelimit_state_init(&new->ratelimit, HZ, 100);
		ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);

		/*
		 * Before adding this, check whether we raced
		 * on adding the same user already..
		 */
		spin_lock_irq(&uidhash_lock);
		up = uid_hash_find(uid, hashent);
		if (up) {
			kmem_cache_free(uid_cachep, new);
		} else {
			uid_hash_insert(new, hashent);
			up = new;
		}
		spin_unlock_irq(&uidhash_lock);
	}

	return up;
}

static int __init uid_cache_init(void)
{
	int n;

	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);

	for(n = 0; n < UIDHASH_SZ; ++n)
		INIT_HLIST_HEAD(uidhash_table + n);

	/* Insert the root user immediately (init already runs as root) */
	spin_lock_irq(&uidhash_lock);
	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
	spin_unlock_irq(&uidhash_lock);

	return 0;
}
subsys_initcall(uid_cache_init);
back to top