xref: /linux/drivers/block/null_blk/null_blk.h (revision b4db9f840283caca0d904436f187ef56a9126eaa)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __BLK_NULL_BLK_H
3 #define __BLK_NULL_BLK_H
4 
5 #undef pr_fmt
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/blk-mq.h>
11 #include <linux/hrtimer.h>
12 #include <linux/configfs.h>
13 #include <linux/badblocks.h>
14 #include <linux/fault-inject.h>
15 #include <linux/spinlock.h>
16 #include <linux/mutex.h>
17 
18 struct nullb_cmd {
19 	blk_status_t error;
20 	bool fake_timeout;
21 	struct nullb_queue *nq;
22 	struct hrtimer timer;
23 };
24 
25 struct nullb_queue {
26 	struct nullb_device *dev;
27 	unsigned int requeue_selection;
28 
29 	struct list_head poll_list;
30 	spinlock_t poll_lock;
31 };
32 
33 struct nullb_zone {
34 	/*
35 	 * Zone lock to prevent concurrent modification of a zone write
36 	 * pointer position and condition: with memory backing, a write
37 	 * command execution may sleep on memory allocation. For this case,
38 	 * use mutex as the zone lock. Otherwise, use the spinlock for
39 	 * locking the zone.
40 	 */
41 	union {
42 		spinlock_t spinlock;
43 		struct mutex mutex;
44 	};
45 	enum blk_zone_type type;
46 	enum blk_zone_cond cond;
47 	sector_t start;
48 	sector_t wp;
49 	unsigned int len;
50 	unsigned int capacity;
51 };
52 
53 struct nullb_device {
54 	struct nullb *nullb;
55 	struct config_group group;
56 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
57 	struct fault_config timeout_config;
58 	struct fault_config requeue_config;
59 	struct fault_config init_hctx_fault_config;
60 #endif
61 	struct radix_tree_root data; /* data stored in the disk */
62 	struct radix_tree_root cache; /* disk cache data */
63 	unsigned long flags; /* device flags */
64 	unsigned int curr_cache;
65 	struct badblocks badblocks;
66 
67 	unsigned int nr_zones;
68 	unsigned int nr_zones_imp_open;
69 	unsigned int nr_zones_exp_open;
70 	unsigned int nr_zones_closed;
71 	unsigned int imp_close_zone_no;
72 	struct nullb_zone *zones;
73 	sector_t zone_size_sects;
74 	bool need_zone_res_mgmt;
75 	spinlock_t zone_res_lock;
76 
77 	unsigned long size; /* device size in MB */
78 	unsigned long completion_nsec; /* time in ns to complete a request */
79 	unsigned long cache_size; /* disk cache size in MB */
80 	unsigned long zone_size; /* zone size in MB if device is zoned */
81 	unsigned long zone_capacity; /* zone capacity in MB if device is zoned */
82 	unsigned int zone_nr_conv; /* number of conventional zones */
83 	unsigned int zone_max_open; /* max number of open zones */
84 	unsigned int zone_max_active; /* max number of active zones */
85 	unsigned int submit_queues; /* number of submission queues */
86 	unsigned int prev_submit_queues; /* number of submission queues before change */
87 	unsigned int poll_queues; /* number of IOPOLL submission queues */
88 	unsigned int prev_poll_queues; /* number of IOPOLL submission queues before change */
89 	unsigned int home_node; /* home node for the device */
90 	unsigned int queue_mode; /* block interface */
91 	unsigned int blocksize; /* block size */
92 	unsigned int max_sectors; /* Max sectors per command */
93 	unsigned int irqmode; /* IRQ completion handler */
94 	unsigned int hw_queue_depth; /* queue depth */
95 	unsigned int index; /* index of the disk, only valid with a disk */
96 	unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
97 	bool blocking; /* blocking blk-mq device */
98 	bool use_per_node_hctx; /* use per-node allocation for hardware context */
99 	bool power; /* power on/off the device */
100 	bool memory_backed; /* if data is stored in memory */
101 	bool discard; /* if support discard */
102 	bool zoned; /* if device is zoned */
103 	bool virt_boundary; /* virtual boundary on/off for the device */
104 	bool no_sched; /* no IO scheduler for the device */
105 	bool shared_tags; /* share tag set between devices for blk-mq */
106 	bool shared_tag_bitmap; /* use hostwide shared tags */
107 };
108 
109 struct nullb {
110 	struct nullb_device *dev;
111 	struct list_head list;
112 	unsigned int index;
113 	struct request_queue *q;
114 	struct gendisk *disk;
115 	struct blk_mq_tag_set *tag_set;
116 	struct blk_mq_tag_set __tag_set;
117 	atomic_long_t cur_bytes;
118 	struct hrtimer bw_timer;
119 	unsigned long cache_flush_pos;
120 	spinlock_t lock;
121 
122 	struct nullb_queue *queues;
123 	char disk_name[DISK_NAME_LEN];
124 };
125 
126 blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
127 				 sector_t nr_sectors);
128 blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
129 			      sector_t sector, unsigned int nr_sectors);
130 
131 #ifdef CONFIG_BLK_DEV_ZONED
132 int null_init_zoned_dev(struct nullb_device *dev, struct queue_limits *lim);
133 int null_register_zoned_dev(struct nullb *nullb);
134 void null_free_zoned_dev(struct nullb_device *dev);
135 int null_report_zones(struct gendisk *disk, sector_t sector,
136 		      unsigned int nr_zones, report_zones_cb cb, void *data);
137 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
138 				    sector_t sector, sector_t nr_sectors);
139 size_t null_zone_valid_read_len(struct nullb *nullb,
140 				sector_t sector, unsigned int len);
141 ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
142 			size_t count, enum blk_zone_cond cond);
143 #else
144 static inline int null_init_zoned_dev(struct nullb_device *dev,
145 		struct queue_limits *lim)
146 {
147 	pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
148 	return -EINVAL;
149 }
150 static inline int null_register_zoned_dev(struct nullb *nullb)
151 {
152 	return -ENODEV;
153 }
154 static inline void null_free_zoned_dev(struct nullb_device *dev) {}
155 static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
156 			enum req_op op, sector_t sector, sector_t nr_sectors)
157 {
158 	return BLK_STS_NOTSUPP;
159 }
160 static inline size_t null_zone_valid_read_len(struct nullb *nullb,
161 					      sector_t sector,
162 					      unsigned int len)
163 {
164 	return len;
165 }
166 static inline ssize_t zone_cond_store(struct nullb_device *dev,
167 				      const char *page, size_t count,
168 				      enum blk_zone_cond cond)
169 {
170 	return -EOPNOTSUPP;
171 }
172 #define null_report_zones	NULL
173 #endif /* CONFIG_BLK_DEV_ZONED */
174 #endif /* __NULL_BLK_H */
175