1 #ifndef BLK_INTERNAL_H 2 #define BLK_INTERNAL_H 3 4 #include <linux/idr.h> 5 6 /* Amount of time in which a process may batch requests */ 7 #define BLK_BATCH_TIME (HZ/50UL) 8 9 /* Number of requests a "batching" process may submit */ 10 #define BLK_BATCH_REQ 32 11 12 extern struct kmem_cache *blk_requestq_cachep; 13 extern struct kobj_type blk_queue_ktype; 14 extern struct ida blk_queue_ida; 15 16 static inline void __blk_get_queue(struct request_queue *q) 17 { 18 kobject_get(&q->kobj); 19 } 20 21 void init_request_from_bio(struct request *req, struct bio *bio); 22 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 23 struct bio *bio); 24 int blk_rq_append_bio(struct request_queue *q, struct request *rq, 25 struct bio *bio); 26 void blk_queue_bypass_start(struct request_queue *q); 27 void blk_queue_bypass_end(struct request_queue *q); 28 void blk_dequeue_request(struct request *rq); 29 void __blk_queue_free_tags(struct request_queue *q); 30 bool __blk_end_bidi_request(struct request *rq, int error, 31 unsigned int nr_bytes, unsigned int bidi_bytes); 32 33 void blk_rq_timed_out_timer(unsigned long data); 34 void blk_delete_timer(struct request *); 35 void blk_add_timer(struct request *); 36 void __generic_unplug_device(struct request_queue *); 37 38 /* 39 * Internal atomic flags for request handling 40 */ 41 enum rq_atomic_flags { 42 REQ_ATOM_COMPLETE = 0, 43 }; 44 45 /* 46 * EH timer and IO completion will both attempt to 'grab' the request, make 47 * sure that only one of them succeeds 48 */ 49 static inline int blk_mark_rq_complete(struct request *rq) 50 { 51 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 52 } 53 54 static inline void blk_clear_rq_complete(struct request *rq) 55 { 56 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 57 } 58 59 /* 60 * Internal elevator interface 61 */ 62 #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) 63 64 void blk_insert_flush(struct request *rq); 65 void blk_abort_flushes(struct request_queue *q); 66 67 static inline struct request *__elv_next_request(struct request_queue *q) 68 { 69 struct request *rq; 70 71 while (1) { 72 if (!list_empty(&q->queue_head)) { 73 rq = list_entry_rq(q->queue_head.next); 74 return rq; 75 } 76 77 /* 78 * Flush request is running and flush request isn't queueable 79 * in the drive, we can hold the queue till flush request is 80 * finished. Even we don't do this, driver can't dispatch next 81 * requests and will requeue them. And this can improve 82 * throughput too. For example, we have request flush1, write1, 83 * flush 2. flush1 is dispatched, then queue is hold, write1 84 * isn't inserted to queue. After flush1 is finished, flush2 85 * will be dispatched. Since disk cache is already clean, 86 * flush2 will be finished very soon, so looks like flush2 is 87 * folded to flush1. 88 * Since the queue is hold, a flag is set to indicate the queue 89 * should be restarted later. Please see flush_end_io() for 90 * details. 91 */ 92 if (q->flush_pending_idx != q->flush_running_idx && 93 !queue_flush_queueable(q)) { 94 q->flush_queue_delayed = 1; 95 return NULL; 96 } 97 if (unlikely(blk_queue_dead(q)) || 98 !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) 99 return NULL; 100 } 101 } 102 103 static inline void elv_activate_rq(struct request_queue *q, struct request *rq) 104 { 105 struct elevator_queue *e = q->elevator; 106 107 if (e->type->ops.elevator_activate_req_fn) 108 e->type->ops.elevator_activate_req_fn(q, rq); 109 } 110 111 static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) 112 { 113 struct elevator_queue *e = q->elevator; 114 115 if (e->type->ops.elevator_deactivate_req_fn) 116 e->type->ops.elevator_deactivate_req_fn(q, rq); 117 } 118 119 #ifdef CONFIG_FAIL_IO_TIMEOUT 120 int blk_should_fake_timeout(struct request_queue *); 121 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); 122 ssize_t part_timeout_store(struct device *, struct device_attribute *, 123 const char *, size_t); 124 #else 125 static inline int blk_should_fake_timeout(struct request_queue *q) 126 { 127 return 0; 128 } 129 #endif 130 131 int ll_back_merge_fn(struct request_queue *q, struct request *req, 132 struct bio *bio); 133 int ll_front_merge_fn(struct request_queue *q, struct request *req, 134 struct bio *bio); 135 int attempt_back_merge(struct request_queue *q, struct request *rq); 136 int attempt_front_merge(struct request_queue *q, struct request *rq); 137 int blk_attempt_req_merge(struct request_queue *q, struct request *rq, 138 struct request *next); 139 void blk_recalc_rq_segments(struct request *rq); 140 void blk_rq_set_mixed_merge(struct request *rq); 141 bool blk_rq_merge_ok(struct request *rq, struct bio *bio); 142 int blk_try_merge(struct request *rq, struct bio *bio); 143 144 void blk_queue_congestion_threshold(struct request_queue *q); 145 146 int blk_dev_init(void); 147 148 149 /* 150 * Return the threshold (number of used requests) at which the queue is 151 * considered to be congested. It include a little hysteresis to keep the 152 * context switch rate down. 153 */ 154 static inline int queue_congestion_on_threshold(struct request_queue *q) 155 { 156 return q->nr_congestion_on; 157 } 158 159 /* 160 * The threshold at which a queue is considered to be uncongested 161 */ 162 static inline int queue_congestion_off_threshold(struct request_queue *q) 163 { 164 return q->nr_congestion_off; 165 } 166 167 /* 168 * Contribute to IO statistics IFF: 169 * 170 * a) it's attached to a gendisk, and 171 * b) the queue had IO stats enabled when this request was started, and 172 * c) it's a file system request or a discard request 173 */ 174 static inline int blk_do_io_stat(struct request *rq) 175 { 176 return rq->rq_disk && 177 (rq->cmd_flags & REQ_IO_STAT) && 178 (rq->cmd_type == REQ_TYPE_FS || 179 (rq->cmd_flags & REQ_DISCARD)); 180 } 181 182 /* 183 * Internal io_context interface 184 */ 185 void get_io_context(struct io_context *ioc); 186 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); 187 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, 188 gfp_t gfp_mask); 189 void ioc_clear_queue(struct request_queue *q); 190 191 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); 192 193 /** 194 * create_io_context - try to create task->io_context 195 * @gfp_mask: allocation mask 196 * @node: allocation node 197 * 198 * If %current->io_context is %NULL, allocate a new io_context and install 199 * it. Returns the current %current->io_context which may be %NULL if 200 * allocation failed. 201 * 202 * Note that this function can't be called with IRQ disabled because 203 * task_lock which protects %current->io_context is IRQ-unsafe. 204 */ 205 static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) 206 { 207 WARN_ON_ONCE(irqs_disabled()); 208 if (unlikely(!current->io_context)) 209 create_task_io_context(current, gfp_mask, node); 210 return current->io_context; 211 } 212 213 /* 214 * Internal throttling interface 215 */ 216 #ifdef CONFIG_BLK_DEV_THROTTLING 217 extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio); 218 extern void blk_throtl_drain(struct request_queue *q); 219 extern int blk_throtl_init(struct request_queue *q); 220 extern void blk_throtl_exit(struct request_queue *q); 221 #else /* CONFIG_BLK_DEV_THROTTLING */ 222 static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) 223 { 224 return false; 225 } 226 static inline void blk_throtl_drain(struct request_queue *q) { } 227 static inline int blk_throtl_init(struct request_queue *q) { return 0; } 228 static inline void blk_throtl_exit(struct request_queue *q) { } 229 #endif /* CONFIG_BLK_DEV_THROTTLING */ 230 231 #endif /* BLK_INTERNAL_H */ 232