xref: /linux/block/blk.h (revision 0154ec71d597692a0d0682b19eac4b3adfb7f3dc)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef BLK_INTERNAL_H
3  #define BLK_INTERNAL_H
4  
5  #include <linux/idr.h>
6  #include <linux/blk-mq.h>
7  #include <xen/xen.h>
8  #include "blk-mq.h"
9  #include "blk-mq-sched.h"
10  
11  /* Max future timer expiry for timeouts */
12  #define BLK_MAX_TIMEOUT		(5 * HZ)
13  
14  #ifdef CONFIG_DEBUG_FS
15  extern struct dentry *blk_debugfs_root;
16  #endif
17  
18  struct blk_flush_queue {
19  	unsigned int		flush_queue_delayed:1;
20  	unsigned int		flush_pending_idx:1;
21  	unsigned int		flush_running_idx:1;
22  	unsigned long		flush_pending_since;
23  	struct list_head	flush_queue[2];
24  	struct list_head	flush_data_in_flight;
25  	struct request		*flush_rq;
26  
27  	/*
28  	 * flush_rq shares tag with this rq, both can't be active
29  	 * at the same time
30  	 */
31  	struct request		*orig_rq;
32  	spinlock_t		mq_flush_lock;
33  };
34  
35  extern struct kmem_cache *blk_requestq_cachep;
36  extern struct kobj_type blk_queue_ktype;
37  extern struct ida blk_queue_ida;
38  
39  static inline struct blk_flush_queue *
40  blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
41  {
42  	return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
43  }
44  
45  static inline void __blk_get_queue(struct request_queue *q)
46  {
47  	kobject_get(&q->kobj);
48  }
49  
50  struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
51  		int node, int cmd_size, gfp_t flags);
52  void blk_free_flush_queue(struct blk_flush_queue *q);
53  
54  void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
55  			struct bio *bio);
56  void blk_freeze_queue(struct request_queue *q);
57  
58  static inline void blk_queue_enter_live(struct request_queue *q)
59  {
60  	/*
61  	 * Given that running in generic_make_request() context
62  	 * guarantees that a live reference against q_usage_counter has
63  	 * been established, further references under that same context
64  	 * need not check that the queue has been frozen (marked dead).
65  	 */
66  	percpu_ref_get(&q->q_usage_counter);
67  }
68  
69  static inline bool biovec_phys_mergeable(struct request_queue *q,
70  		struct bio_vec *vec1, struct bio_vec *vec2)
71  {
72  	unsigned long mask = queue_segment_boundary(q);
73  	phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
74  	phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
75  
76  	if (addr1 + vec1->bv_len != addr2)
77  		return false;
78  	if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
79  		return false;
80  	if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
81  		return false;
82  	return true;
83  }
84  
85  static inline bool __bvec_gap_to_prev(struct request_queue *q,
86  		struct bio_vec *bprv, unsigned int offset)
87  {
88  	return (offset & queue_virt_boundary(q)) ||
89  		((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
90  }
91  
92  /*
93   * Check if adding a bio_vec after bprv with offset would create a gap in
94   * the SG list. Most drivers don't care about this, but some do.
95   */
96  static inline bool bvec_gap_to_prev(struct request_queue *q,
97  		struct bio_vec *bprv, unsigned int offset)
98  {
99  	if (!queue_virt_boundary(q))
100  		return false;
101  	return __bvec_gap_to_prev(q, bprv, offset);
102  }
103  
104  #ifdef CONFIG_BLK_DEV_INTEGRITY
105  void blk_flush_integrity(void);
106  bool __bio_integrity_endio(struct bio *);
107  static inline bool bio_integrity_endio(struct bio *bio)
108  {
109  	if (bio_integrity(bio))
110  		return __bio_integrity_endio(bio);
111  	return true;
112  }
113  
114  static inline bool integrity_req_gap_back_merge(struct request *req,
115  		struct bio *next)
116  {
117  	struct bio_integrity_payload *bip = bio_integrity(req->bio);
118  	struct bio_integrity_payload *bip_next = bio_integrity(next);
119  
120  	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
121  				bip_next->bip_vec[0].bv_offset);
122  }
123  
124  static inline bool integrity_req_gap_front_merge(struct request *req,
125  		struct bio *bio)
126  {
127  	struct bio_integrity_payload *bip = bio_integrity(bio);
128  	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
129  
130  	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
131  				bip_next->bip_vec[0].bv_offset);
132  }
133  #else /* CONFIG_BLK_DEV_INTEGRITY */
134  static inline bool integrity_req_gap_back_merge(struct request *req,
135  		struct bio *next)
136  {
137  	return false;
138  }
139  static inline bool integrity_req_gap_front_merge(struct request *req,
140  		struct bio *bio)
141  {
142  	return false;
143  }
144  
145  static inline void blk_flush_integrity(void)
146  {
147  }
148  static inline bool bio_integrity_endio(struct bio *bio)
149  {
150  	return true;
151  }
152  #endif /* CONFIG_BLK_DEV_INTEGRITY */
153  
154  unsigned long blk_rq_timeout(unsigned long timeout);
155  void blk_add_timer(struct request *req);
156  
157  bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
158  			     struct bio *bio);
159  bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
160  			    struct bio *bio);
161  bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
162  		struct bio *bio);
163  bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
164  			    struct request **same_queue_rq);
165  
166  void blk_account_io_start(struct request *req, bool new_io);
167  void blk_account_io_completion(struct request *req, unsigned int bytes);
168  void blk_account_io_done(struct request *req, u64 now);
169  
170  /*
171   * Internal elevator interface
172   */
173  #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
174  
175  void blk_insert_flush(struct request *rq);
176  
177  int elevator_init_mq(struct request_queue *q);
178  int elevator_switch_mq(struct request_queue *q,
179  			      struct elevator_type *new_e);
180  void __elevator_exit(struct request_queue *, struct elevator_queue *);
181  int elv_register_queue(struct request_queue *q);
182  void elv_unregister_queue(struct request_queue *q);
183  
184  static inline void elevator_exit(struct request_queue *q,
185  		struct elevator_queue *e)
186  {
187  	blk_mq_sched_free_requests(q);
188  	__elevator_exit(q, e);
189  }
190  
191  struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
192  
193  #ifdef CONFIG_FAIL_IO_TIMEOUT
194  int blk_should_fake_timeout(struct request_queue *);
195  ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
196  ssize_t part_timeout_store(struct device *, struct device_attribute *,
197  				const char *, size_t);
198  #else
199  static inline int blk_should_fake_timeout(struct request_queue *q)
200  {
201  	return 0;
202  }
203  #endif
204  
205  int ll_back_merge_fn(struct request_queue *q, struct request *req,
206  		     struct bio *bio);
207  int ll_front_merge_fn(struct request_queue *q, struct request *req,
208  		      struct bio *bio);
209  struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
210  struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
211  int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
212  				struct request *next);
213  void blk_recalc_rq_segments(struct request *rq);
214  void blk_rq_set_mixed_merge(struct request *rq);
215  bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
216  enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
217  
218  int blk_dev_init(void);
219  
220  /*
221   * Contribute to IO statistics IFF:
222   *
223   *	a) it's attached to a gendisk, and
224   *	b) the queue had IO stats enabled when this request was started, and
225   *	c) it's a file system request
226   */
227  static inline bool blk_do_io_stat(struct request *rq)
228  {
229  	return rq->rq_disk &&
230  	       (rq->rq_flags & RQF_IO_STAT) &&
231  		!blk_rq_is_passthrough(rq);
232  }
233  
234  static inline void req_set_nomerge(struct request_queue *q, struct request *req)
235  {
236  	req->cmd_flags |= REQ_NOMERGE;
237  	if (req == q->last_merge)
238  		q->last_merge = NULL;
239  }
240  
241  /*
242   * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
243   * is defined as 'unsigned int', meantime it has to aligned to with logical
244   * block size which is the minimum accepted unit by hardware.
245   */
246  static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
247  {
248  	return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
249  }
250  
251  /*
252   * Internal io_context interface
253   */
254  void get_io_context(struct io_context *ioc);
255  struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
256  struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
257  			     gfp_t gfp_mask);
258  void ioc_clear_queue(struct request_queue *q);
259  
260  int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
261  
262  /**
263   * create_io_context - try to create task->io_context
264   * @gfp_mask: allocation mask
265   * @node: allocation node
266   *
267   * If %current->io_context is %NULL, allocate a new io_context and install
268   * it.  Returns the current %current->io_context which may be %NULL if
269   * allocation failed.
270   *
271   * Note that this function can't be called with IRQ disabled because
272   * task_lock which protects %current->io_context is IRQ-unsafe.
273   */
274  static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
275  {
276  	WARN_ON_ONCE(irqs_disabled());
277  	if (unlikely(!current->io_context))
278  		create_task_io_context(current, gfp_mask, node);
279  	return current->io_context;
280  }
281  
282  /*
283   * Internal throttling interface
284   */
285  #ifdef CONFIG_BLK_DEV_THROTTLING
286  extern void blk_throtl_drain(struct request_queue *q);
287  extern int blk_throtl_init(struct request_queue *q);
288  extern void blk_throtl_exit(struct request_queue *q);
289  extern void blk_throtl_register_queue(struct request_queue *q);
290  #else /* CONFIG_BLK_DEV_THROTTLING */
291  static inline void blk_throtl_drain(struct request_queue *q) { }
292  static inline int blk_throtl_init(struct request_queue *q) { return 0; }
293  static inline void blk_throtl_exit(struct request_queue *q) { }
294  static inline void blk_throtl_register_queue(struct request_queue *q) { }
295  #endif /* CONFIG_BLK_DEV_THROTTLING */
296  #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
297  extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
298  extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
299  	const char *page, size_t count);
300  extern void blk_throtl_bio_endio(struct bio *bio);
301  extern void blk_throtl_stat_add(struct request *rq, u64 time);
302  #else
303  static inline void blk_throtl_bio_endio(struct bio *bio) { }
304  static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
305  #endif
306  
307  #ifdef CONFIG_BOUNCE
308  extern int init_emergency_isa_pool(void);
309  extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
310  #else
311  static inline int init_emergency_isa_pool(void)
312  {
313  	return 0;
314  }
315  static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
316  {
317  }
318  #endif /* CONFIG_BOUNCE */
319  
320  #ifdef CONFIG_BLK_CGROUP_IOLATENCY
321  extern int blk_iolatency_init(struct request_queue *q);
322  #else
323  static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
324  #endif
325  
326  struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
327  
328  #ifdef CONFIG_BLK_DEV_ZONED
329  void blk_queue_free_zone_bitmaps(struct request_queue *q);
330  #else
331  static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
332  #endif
333  
334  #endif /* BLK_INTERNAL_H */
335