https://github.com/torvalds/linux
Revision 9bfacd01dc9b7519e1e6da12b01963550b9d09a2 authored by Roland Dreier on 22 September 2011, 07:06:05 UTC, committed by James Bottomley on 02 October 2011, 16:58:30 UTC
I hit a crash in qla2x00_abort_all_cmds() if the qla2xxx module is
unloaded right after it is loaded.  I debugged this down to the abort
handling improperly treating a command of type SRB_ADISC_CMD as if it
had a bsg_job to complete when that command actually uses the iocb_cmd
part of the union.  (I guess to hit this one has to unload the module
while the async FC initialization is still in progress)

It seems we should only look for a bsg_job if type is SRB_ELS_CMD_RPT,
SRB_ELS_CMD_HST or SRB_CT_CMD, so switch the test to make that explicit.

Signed-off-by: Roland Dreier <roland@purestorage.com>
Acked-by: Chad Dupuis <chad.dupuis@qlogic.com>
Cc: stable@kernel.org
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
1 parent f72a209
Raw File
Tip revision: 9bfacd01dc9b7519e1e6da12b01963550b9d09a2 authored by Roland Dreier on 22 September 2011, 07:06:05 UTC
[SCSI] qla2xxx: Fix crash in qla2x00_abort_all_cmds() on unload
Tip revision: 9bfacd0
namespace.c
/*
 * linux/ipc/namespace.c
 * Copyright (C) 2006 Pavel Emelyanov <xemul@openvz.org> OpenVZ, SWsoft Inc.
 */

#include <linux/ipc.h>
#include <linux/msg.h>
#include <linux/ipc_namespace.h>
#include <linux/rcupdate.h>
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/user_namespace.h>
#include <linux/proc_fs.h>

#include "util.h"

static struct ipc_namespace *create_ipc_ns(struct task_struct *tsk,
					   struct ipc_namespace *old_ns)
{
	struct ipc_namespace *ns;
	int err;

	ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL);
	if (ns == NULL)
		return ERR_PTR(-ENOMEM);

	atomic_set(&ns->count, 1);
	err = mq_init_ns(ns);
	if (err) {
		kfree(ns);
		return ERR_PTR(err);
	}
	atomic_inc(&nr_ipc_ns);

	sem_init_ns(ns);
	msg_init_ns(ns);
	shm_init_ns(ns);

	/*
	 * msgmni has already been computed for the new ipc ns.
	 * Thus, do the ipcns creation notification before registering that
	 * new ipcns in the chain.
	 */
	ipcns_notify(IPCNS_CREATED);
	register_ipcns_notifier(ns);

	ns->user_ns = get_user_ns(task_cred_xxx(tsk, user)->user_ns);

	return ns;
}

struct ipc_namespace *copy_ipcs(unsigned long flags,
				struct task_struct *tsk)
{
	struct ipc_namespace *ns = tsk->nsproxy->ipc_ns;

	if (!(flags & CLONE_NEWIPC))
		return get_ipc_ns(ns);
	return create_ipc_ns(tsk, ns);
}

/*
 * free_ipcs - free all ipcs of one type
 * @ns:   the namespace to remove the ipcs from
 * @ids:  the table of ipcs to free
 * @free: the function called to free each individual ipc
 *
 * Called for each kind of ipc when an ipc_namespace exits.
 */
void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
	       void (*free)(struct ipc_namespace *, struct kern_ipc_perm *))
{
	struct kern_ipc_perm *perm;
	int next_id;
	int total, in_use;

	down_write(&ids->rw_mutex);

	in_use = ids->in_use;

	for (total = 0, next_id = 0; total < in_use; next_id++) {
		perm = idr_find(&ids->ipcs_idr, next_id);
		if (perm == NULL)
			continue;
		ipc_lock_by_ptr(perm);
		free(ns, perm);
		total++;
	}
	up_write(&ids->rw_mutex);
}

static void free_ipc_ns(struct ipc_namespace *ns)
{
	/*
	 * Unregistering the hotplug notifier at the beginning guarantees
	 * that the ipc namespace won't be freed while we are inside the
	 * callback routine. Since the blocking_notifier_chain_XXX routines
	 * hold a rw lock on the notifier list, unregister_ipcns_notifier()
	 * won't take the rw lock before blocking_notifier_call_chain() has
	 * released the rd lock.
	 */
	unregister_ipcns_notifier(ns);
	sem_exit_ns(ns);
	msg_exit_ns(ns);
	shm_exit_ns(ns);
	atomic_dec(&nr_ipc_ns);

	/*
	 * Do the ipcns removal notification after decrementing nr_ipc_ns in
	 * order to have a correct value when recomputing msgmni.
	 */
	ipcns_notify(IPCNS_REMOVED);
	put_user_ns(ns->user_ns);
	kfree(ns);
}

/*
 * put_ipc_ns - drop a reference to an ipc namespace.
 * @ns: the namespace to put
 *
 * If this is the last task in the namespace exiting, and
 * it is dropping the refcount to 0, then it can race with
 * a task in another ipc namespace but in a mounts namespace
 * which has this ipcns's mqueuefs mounted, doing some action
 * with one of the mqueuefs files.  That can raise the refcount.
 * So dropping the refcount, and raising the refcount when
 * accessing it through the VFS, are protected with mq_lock.
 *
 * (Clearly, a task raising the refcount on its own ipc_ns
 * needn't take mq_lock since it can't race with the last task
 * in the ipcns exiting).
 */
void put_ipc_ns(struct ipc_namespace *ns)
{
	if (atomic_dec_and_lock(&ns->count, &mq_lock)) {
		mq_clear_sbinfo(ns);
		spin_unlock(&mq_lock);
		mq_put_mnt(ns);
		free_ipc_ns(ns);
	}
}

static void *ipcns_get(struct task_struct *task)
{
	struct ipc_namespace *ns = NULL;
	struct nsproxy *nsproxy;

	rcu_read_lock();
	nsproxy = task_nsproxy(task);
	if (nsproxy)
		ns = get_ipc_ns(nsproxy->ipc_ns);
	rcu_read_unlock();

	return ns;
}

static void ipcns_put(void *ns)
{
	return put_ipc_ns(ns);
}

static int ipcns_install(struct nsproxy *nsproxy, void *ns)
{
	/* Ditch state from the old ipc namespace */
	exit_sem(current);
	put_ipc_ns(nsproxy->ipc_ns);
	nsproxy->ipc_ns = get_ipc_ns(ns);
	return 0;
}

const struct proc_ns_operations ipcns_operations = {
	.name		= "ipc",
	.type		= CLONE_NEWIPC,
	.get		= ipcns_get,
	.put		= ipcns_put,
	.install	= ipcns_install,
};
back to top