Revision 5501e9229a80d95a1ea68609f44c447a75d23ed5 authored by Adrian Hunter on 07 January 2021, 17:41:59 UTC, committed by Arnaldo Carvalho de Melo on 15 January 2021, 20:28:27 UTC
In some cases, the number of cpus (nr_cpus_online) is confused with the
maximum cpu number (nr_cpus_avail), which results in the error in the
example below:

Example on system with 8 cpus:

 Before:
   # echo 0 > /sys/devices/system/cpu/cpu2/online
   # ./perf record --kcore -e intel_pt// taskset --cpu-list 7 uname
   Linux
   [ perf record: Woken up 1 times to write data ]
   [ perf record: Captured and wrote 0.147 MB perf.data ]
   # ./perf script --itrace=e
   Requested CPU 7 too large. Consider raising MAX_NR_CPUS
   0x25908 [0x8]: failed to process type: 68 [Invalid argument]

 After:
   # ./perf script --itrace=e
   #

Fixes: 8c7274691f0d ("perf machine: Replace MAX_NR_CPUS with perf_env::nr_cpus_online")
Fixes: 7df4e36a4785 ("perf session: Replace MAX_NR_CPUS with perf_env::nr_cpus_online")
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Tested-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: stable@vger.kernel.org
Link: http://lore.kernel.org/lkml/20210107174159.24897-1-adrian.hunter@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
1 parent a1bf230
Raw File
blk-mq-rdma.c
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2017 Sagi Grimberg.
 */
#include <linux/blk-mq.h>
#include <linux/blk-mq-rdma.h>
#include <rdma/ib_verbs.h>

/**
 * blk_mq_rdma_map_queues - provide a default queue mapping for rdma device
 * @map:	CPU to hardware queue map.
 * @dev:	rdma device to provide a mapping for.
 * @first_vec:	first interrupt vectors to use for queues (usually 0)
 *
 * This function assumes the rdma device @dev has at least as many available
 * interrupt vetors as @set has queues.  It will then query it's affinity mask
 * and built queue mapping that maps a queue to the CPUs that have irq affinity
 * for the corresponding vector.
 *
 * In case either the driver passed a @dev with less vectors than
 * @set->nr_hw_queues, or @dev does not provide an affinity mask for a
 * vector, we fallback to the naive mapping.
 */
int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
		struct ib_device *dev, int first_vec)
{
	const struct cpumask *mask;
	unsigned int queue, cpu;

	for (queue = 0; queue < map->nr_queues; queue++) {
		mask = ib_get_vector_affinity(dev, first_vec + queue);
		if (!mask)
			goto fallback;

		for_each_cpu(cpu, mask)
			map->mq_map[cpu] = map->queue_offset + queue;
	}

	return 0;

fallback:
	return blk_mq_map_queues(map);
}
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
back to top