Revision d38b4d289486daee01c1fdf056b46b7cdfe72e9e authored by Jens Axboe on 19 March 2021, 12:40:47 UTC, committed by Jens Axboe on 19 March 2021, 12:40:47 UTC
Pull NVMe updates from Christoph:

"nvme fixes for 5.12

 - fix tag allocation for keep alive
 - fix a unit mismatch for the Write Zeroes limits
 - various TCP transport fixes (Sagi Grimberg, Elad Grupi)
 - fix iosqes and iocqes validation for discovery controllers (Sagi Grimberg)"

* tag 'nvme-5.12-20210319' of git://git.infradead.org/nvme:
  nvmet-tcp: fix kmap leak when data digest in use
  nvmet: don't check iosqes,iocqes for discovery controllers
  nvme-rdma: fix possible hang when failing to set io queues
  nvme-tcp: fix possible hang when failing to set io queues
  nvme-tcp: fix misuse of __smp_processor_id with preemption enabled
  nvme-tcp: fix a NULL deref when receiving a 0-length r2t PDU
  nvme: fix Write Zeroes limitations
  nvme: allocate the keep alive request using BLK_MQ_REQ_NOWAIT
  nvme: merge nvme_keep_alive into nvme_keep_alive_work
  nvme-fabrics: only reserve a single tag
2 parent s 1e28eed + bac0445
Raw File
blk-mq-debugfs.h
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef INT_BLK_MQ_DEBUGFS_H
#define INT_BLK_MQ_DEBUGFS_H

#ifdef CONFIG_BLK_DEBUG_FS

#include <linux/seq_file.h>

struct blk_mq_debugfs_attr {
	const char *name;
	umode_t mode;
	int (*show)(void *, struct seq_file *);
	ssize_t (*write)(void *, const char __user *, size_t, loff_t *);
	/* Set either .show or .seq_ops. */
	const struct seq_operations *seq_ops;
};

int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);

void blk_mq_debugfs_register(struct request_queue *q);
void blk_mq_debugfs_unregister(struct request_queue *q);
void blk_mq_debugfs_register_hctx(struct request_queue *q,
				  struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_register_hctxs(struct request_queue *q);
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);

void blk_mq_debugfs_register_sched(struct request_queue *q);
void blk_mq_debugfs_unregister_sched(struct request_queue *q);
void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
				       struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);

void blk_mq_debugfs_register_rqos(struct rq_qos *rqos);
void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos);
void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q);
#else
static inline void blk_mq_debugfs_register(struct request_queue *q)
{
}

static inline void blk_mq_debugfs_unregister(struct request_queue *q)
{
}

static inline void blk_mq_debugfs_register_hctx(struct request_queue *q,
						struct blk_mq_hw_ctx *hctx)
{
}

static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
{
}

static inline void blk_mq_debugfs_register_hctxs(struct request_queue *q)
{
}

static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
{
}

static inline void blk_mq_debugfs_register_sched(struct request_queue *q)
{
}

static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q)
{
}

static inline void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
						      struct blk_mq_hw_ctx *hctx)
{
}

static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
{
}

static inline void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
{
}

static inline void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
{
}

static inline void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
{
}
#endif

#ifdef CONFIG_BLK_DEBUG_FS_ZONED
int queue_zone_wlock_show(void *data, struct seq_file *m);
#else
static inline int queue_zone_wlock_show(void *data, struct seq_file *m)
{
	return 0;
}
#endif

#endif
back to top