Revision 043d1e729b0fbaf2b69386fe45290b8a9a18a6a9 authored by Eugeniy Paltsev on 06 September 2017, 18:21:08 UTC, committed by Vineet Gupta on 04 October 2017, 03:36:49 UTC
DW ethernet controller on AXS10x hangs sometimes after SW reset, so
add temporary quirk to reset DW ethernet controller IP core.
This quirk can be removed after axs10x reset driver
(see http://patchwork.ozlabs.org/patch/800273/)
or simple reset driver
(see https://patchwork.kernel.org/patch/9903375/)
will be available in upstream.

Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
1 parent 9e66317
Raw File
blk-mq-rdma.c
/*
 * Copyright (c) 2017 Sagi Grimberg.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */
#include <linux/blk-mq.h>
#include <linux/blk-mq-rdma.h>
#include <rdma/ib_verbs.h>

/**
 * blk_mq_rdma_map_queues - provide a default queue mapping for rdma device
 * @set:	tagset to provide the mapping for
 * @dev:	rdma device associated with @set.
 * @first_vec:	first interrupt vectors to use for queues (usually 0)
 *
 * This function assumes the rdma device @dev has at least as many available
 * interrupt vetors as @set has queues.  It will then query it's affinity mask
 * and built queue mapping that maps a queue to the CPUs that have irq affinity
 * for the corresponding vector.
 *
 * In case either the driver passed a @dev with less vectors than
 * @set->nr_hw_queues, or @dev does not provide an affinity mask for a
 * vector, we fallback to the naive mapping.
 */
int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
		struct ib_device *dev, int first_vec)
{
	const struct cpumask *mask;
	unsigned int queue, cpu;

	for (queue = 0; queue < set->nr_hw_queues; queue++) {
		mask = ib_get_vector_affinity(dev, first_vec + queue);
		if (!mask)
			goto fallback;

		for_each_cpu(cpu, mask)
			set->mq_map[cpu] = queue;
	}

	return 0;

fallback:
	return blk_mq_map_queues(set);
}
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
back to top