1 #ifndef BLK_INTERNAL_H 2 #define BLK_INTERNAL_H 3 4 #include <linux/idr.h> 5 6 /* Amount of time in which a process may batch requests */ 7 #define BLK_BATCH_TIME (HZ/50UL) 8 9 /* Number of requests a "batching" process may submit */ 10 #define BLK_BATCH_REQ 32 11 12 /* Max future timer expiry for timeouts */ 13 #define BLK_MAX_TIMEOUT (5 * HZ) 14 15 extern struct kmem_cache *blk_requestq_cachep; 16 extern struct kmem_cache *request_cachep; 17 extern struct kobj_type blk_queue_ktype; 18 extern struct ida blk_queue_ida; 19 20 static inline void __blk_get_queue(struct request_queue *q) 21 { 22 kobject_get(&q->kobj); 23 } 24 25 int blk_init_rl(struct request_list *rl, struct request_queue *q, 26 gfp_t gfp_mask); 27 void blk_exit_rl(struct request_list *rl); 28 void init_request_from_bio(struct request *req, struct bio *bio); 29 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 30 struct bio *bio); 31 int blk_rq_append_bio(struct request_queue *q, struct request *rq, 32 struct bio *bio); 33 void blk_queue_bypass_start(struct request_queue *q); 34 void blk_queue_bypass_end(struct request_queue *q); 35 void blk_dequeue_request(struct request *rq); 36 void __blk_queue_free_tags(struct request_queue *q); 37 bool __blk_end_bidi_request(struct request *rq, int error, 38 unsigned int nr_bytes, unsigned int bidi_bytes); 39 40 void blk_rq_timed_out_timer(unsigned long data); 41 void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, 42 unsigned int *next_set); 43 unsigned long blk_rq_timeout(unsigned long timeout); 44 void blk_add_timer(struct request *req); 45 void blk_delete_timer(struct request *); 46 47 48 bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 49 struct bio *bio); 50 bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 51 struct bio *bio); 52 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 53 unsigned int *request_count); 54 55 void blk_account_io_start(struct request *req, bool new_io); 56 void blk_account_io_completion(struct request *req, unsigned int bytes); 57 void blk_account_io_done(struct request *req); 58 59 /* 60 * Internal atomic flags for request handling 61 */ 62 enum rq_atomic_flags { 63 REQ_ATOM_COMPLETE = 0, 64 REQ_ATOM_STARTED, 65 }; 66 67 /* 68 * EH timer and IO completion will both attempt to 'grab' the request, make 69 * sure that only one of them succeeds 70 */ 71 static inline int blk_mark_rq_complete(struct request *rq) 72 { 73 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 74 } 75 76 static inline void blk_clear_rq_complete(struct request *rq) 77 { 78 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 79 } 80 81 /* 82 * Internal elevator interface 83 */ 84 #define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED) 85 86 void blk_insert_flush(struct request *rq); 87 88 static inline struct request *__elv_next_request(struct request_queue *q) 89 { 90 struct request *rq; 91 92 while (1) { 93 if (!list_empty(&q->queue_head)) { 94 rq = list_entry_rq(q->queue_head.next); 95 return rq; 96 } 97 98 /* 99 * Flush request is running and flush request isn't queueable 100 * in the drive, we can hold the queue till flush request is 101 * finished. Even we don't do this, driver can't dispatch next 102 * requests and will requeue them. And this can improve 103 * throughput too. For example, we have request flush1, write1, 104 * flush 2. flush1 is dispatched, then queue is hold, write1 105 * isn't inserted to queue. After flush1 is finished, flush2 106 * will be dispatched. Since disk cache is already clean, 107 * flush2 will be finished very soon, so looks like flush2 is 108 * folded to flush1. 109 * Since the queue is hold, a flag is set to indicate the queue 110 * should be restarted later. Please see flush_end_io() for 111 * details. 112 */ 113 if (q->flush_pending_idx != q->flush_running_idx && 114 !queue_flush_queueable(q)) { 115 q->flush_queue_delayed = 1; 116 return NULL; 117 } 118 if (unlikely(blk_queue_bypass(q)) || 119 !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) 120 return NULL; 121 } 122 } 123 124 static inline void elv_activate_rq(struct request_queue *q, struct request *rq) 125 { 126 struct elevator_queue *e = q->elevator; 127 128 if (e->type->ops.elevator_activate_req_fn) 129 e->type->ops.elevator_activate_req_fn(q, rq); 130 } 131 132 static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) 133 { 134 struct elevator_queue *e = q->elevator; 135 136 if (e->type->ops.elevator_deactivate_req_fn) 137 e->type->ops.elevator_deactivate_req_fn(q, rq); 138 } 139 140 #ifdef CONFIG_FAIL_IO_TIMEOUT 141 int blk_should_fake_timeout(struct request_queue *); 142 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); 143 ssize_t part_timeout_store(struct device *, struct device_attribute *, 144 const char *, size_t); 145 #else 146 static inline int blk_should_fake_timeout(struct request_queue *q) 147 { 148 return 0; 149 } 150 #endif 151 152 int ll_back_merge_fn(struct request_queue *q, struct request *req, 153 struct bio *bio); 154 int ll_front_merge_fn(struct request_queue *q, struct request *req, 155 struct bio *bio); 156 int attempt_back_merge(struct request_queue *q, struct request *rq); 157 int attempt_front_merge(struct request_queue *q, struct request *rq); 158 int blk_attempt_req_merge(struct request_queue *q, struct request *rq, 159 struct request *next); 160 void blk_recalc_rq_segments(struct request *rq); 161 void blk_rq_set_mixed_merge(struct request *rq); 162 bool blk_rq_merge_ok(struct request *rq, struct bio *bio); 163 int blk_try_merge(struct request *rq, struct bio *bio); 164 165 void blk_queue_congestion_threshold(struct request_queue *q); 166 167 void __blk_run_queue_uncond(struct request_queue *q); 168 169 int blk_dev_init(void); 170 171 172 /* 173 * Return the threshold (number of used requests) at which the queue is 174 * considered to be congested. It include a little hysteresis to keep the 175 * context switch rate down. 176 */ 177 static inline int queue_congestion_on_threshold(struct request_queue *q) 178 { 179 return q->nr_congestion_on; 180 } 181 182 /* 183 * The threshold at which a queue is considered to be uncongested 184 */ 185 static inline int queue_congestion_off_threshold(struct request_queue *q) 186 { 187 return q->nr_congestion_off; 188 } 189 190 extern int blk_update_nr_requests(struct request_queue *, unsigned int); 191 192 /* 193 * Contribute to IO statistics IFF: 194 * 195 * a) it's attached to a gendisk, and 196 * b) the queue had IO stats enabled when this request was started, and 197 * c) it's a file system request 198 */ 199 static inline int blk_do_io_stat(struct request *rq) 200 { 201 return rq->rq_disk && 202 (rq->cmd_flags & REQ_IO_STAT) && 203 (rq->cmd_type == REQ_TYPE_FS); 204 } 205 206 /* 207 * Internal io_context interface 208 */ 209 void get_io_context(struct io_context *ioc); 210 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); 211 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, 212 gfp_t gfp_mask); 213 void ioc_clear_queue(struct request_queue *q); 214 215 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); 216 217 /** 218 * create_io_context - try to create task->io_context 219 * @gfp_mask: allocation mask 220 * @node: allocation node 221 * 222 * If %current->io_context is %NULL, allocate a new io_context and install 223 * it. Returns the current %current->io_context which may be %NULL if 224 * allocation failed. 225 * 226 * Note that this function can't be called with IRQ disabled because 227 * task_lock which protects %current->io_context is IRQ-unsafe. 228 */ 229 static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) 230 { 231 WARN_ON_ONCE(irqs_disabled()); 232 if (unlikely(!current->io_context)) 233 create_task_io_context(current, gfp_mask, node); 234 return current->io_context; 235 } 236 237 /* 238 * Internal throttling interface 239 */ 240 #ifdef CONFIG_BLK_DEV_THROTTLING 241 extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio); 242 extern void blk_throtl_drain(struct request_queue *q); 243 extern int blk_throtl_init(struct request_queue *q); 244 extern void blk_throtl_exit(struct request_queue *q); 245 #else /* CONFIG_BLK_DEV_THROTTLING */ 246 static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) 247 { 248 return false; 249 } 250 static inline void blk_throtl_drain(struct request_queue *q) { } 251 static inline int blk_throtl_init(struct request_queue *q) { return 0; } 252 static inline void blk_throtl_exit(struct request_queue *q) { } 253 #endif /* CONFIG_BLK_DEV_THROTTLING */ 254 255 #endif /* BLK_INTERNAL_H */ 256