xref: /linux/block/bfq-iosched.h (revision fd639726bf15fca8ee1a00dce8e0096d0ad9bd18)
1 /*
2  * Header file for the BFQ I/O scheduler: data structures and
3  * prototypes of interface functions among BFQ components.
4  *
5  *  This program is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU General Public License as
7  *  published by the Free Software Foundation; either version 2 of the
8  *  License, or (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  General Public License for more details.
14  */
15 #ifndef _BFQ_H
16 #define _BFQ_H
17 
18 #include <linux/blktrace_api.h>
19 #include <linux/hrtimer.h>
20 #include <linux/blk-cgroup.h>
21 
22 #define BFQ_IOPRIO_CLASSES	3
23 #define BFQ_CL_IDLE_TIMEOUT	(HZ/5)
24 
25 #define BFQ_MIN_WEIGHT			1
26 #define BFQ_MAX_WEIGHT			1000
27 #define BFQ_WEIGHT_CONVERSION_COEFF	10
28 
29 #define BFQ_DEFAULT_QUEUE_IOPRIO	4
30 
31 #define BFQ_WEIGHT_LEGACY_DFL	100
32 #define BFQ_DEFAULT_GRP_IOPRIO	0
33 #define BFQ_DEFAULT_GRP_CLASS	IOPRIO_CLASS_BE
34 
35 /*
36  * Soft real-time applications are extremely more latency sensitive
37  * than interactive ones. Over-raise the weight of the former to
38  * privilege them against the latter.
39  */
40 #define BFQ_SOFTRT_WEIGHT_FACTOR	100
41 
42 struct bfq_entity;
43 
44 /**
45  * struct bfq_service_tree - per ioprio_class service tree.
46  *
47  * Each service tree represents a B-WF2Q+ scheduler on its own.  Each
48  * ioprio_class has its own independent scheduler, and so its own
49  * bfq_service_tree.  All the fields are protected by the queue lock
50  * of the containing bfqd.
51  */
52 struct bfq_service_tree {
53 	/* tree for active entities (i.e., those backlogged) */
54 	struct rb_root active;
55 	/* tree for idle entities (i.e., not backlogged, with V < F_i)*/
56 	struct rb_root idle;
57 
58 	/* idle entity with minimum F_i */
59 	struct bfq_entity *first_idle;
60 	/* idle entity with maximum F_i */
61 	struct bfq_entity *last_idle;
62 
63 	/* scheduler virtual time */
64 	u64 vtime;
65 	/* scheduler weight sum; active and idle entities contribute to it */
66 	unsigned long wsum;
67 };
68 
69 /**
70  * struct bfq_sched_data - multi-class scheduler.
71  *
72  * bfq_sched_data is the basic scheduler queue.  It supports three
73  * ioprio_classes, and can be used either as a toplevel queue or as an
74  * intermediate queue in a hierarchical setup.
75  *
76  * The supported ioprio_classes are the same as in CFQ, in descending
77  * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
78  * Requests from higher priority queues are served before all the
79  * requests from lower priority queues; among requests of the same
80  * queue requests are served according to B-WF2Q+.
81  *
82  * The schedule is implemented by the service trees, plus the field
83  * @next_in_service, which points to the entity on the active trees
84  * that will be served next, if 1) no changes in the schedule occurs
85  * before the current in-service entity is expired, 2) the in-service
86  * queue becomes idle when it expires, and 3) if the entity pointed by
87  * in_service_entity is not a queue, then the in-service child entity
88  * of the entity pointed by in_service_entity becomes idle on
89  * expiration. This peculiar definition allows for the following
90  * optimization, not yet exploited: while a given entity is still in
91  * service, we already know which is the best candidate for next
92  * service among the other active entitities in the same parent
93  * entity. We can then quickly compare the timestamps of the
94  * in-service entity with those of such best candidate.
95  *
96  * All fields are protected by the lock of the containing bfqd.
97  */
98 struct bfq_sched_data {
99 	/* entity in service */
100 	struct bfq_entity *in_service_entity;
101 	/* head-of-line entity (see comments above) */
102 	struct bfq_entity *next_in_service;
103 	/* array of service trees, one per ioprio_class */
104 	struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
105 	/* last time CLASS_IDLE was served */
106 	unsigned long bfq_class_idle_last_service;
107 
108 };
109 
110 /**
111  * struct bfq_weight_counter - counter of the number of all active entities
112  *                             with a given weight.
113  */
114 struct bfq_weight_counter {
115 	unsigned int weight; /* weight of the entities this counter refers to */
116 	unsigned int num_active; /* nr of active entities with this weight */
117 	/*
118 	 * Weights tree member (see bfq_data's @queue_weights_tree and
119 	 * @group_weights_tree)
120 	 */
121 	struct rb_node weights_node;
122 };
123 
124 /**
125  * struct bfq_entity - schedulable entity.
126  *
127  * A bfq_entity is used to represent either a bfq_queue (leaf node in the
128  * cgroup hierarchy) or a bfq_group into the upper level scheduler.  Each
129  * entity belongs to the sched_data of the parent group in the cgroup
130  * hierarchy.  Non-leaf entities have also their own sched_data, stored
131  * in @my_sched_data.
132  *
133  * Each entity stores independently its priority values; this would
134  * allow different weights on different devices, but this
135  * functionality is not exported to userspace by now.  Priorities and
136  * weights are updated lazily, first storing the new values into the
137  * new_* fields, then setting the @prio_changed flag.  As soon as
138  * there is a transition in the entity state that allows the priority
139  * update to take place the effective and the requested priority
140  * values are synchronized.
141  *
142  * Unless cgroups are used, the weight value is calculated from the
143  * ioprio to export the same interface as CFQ.  When dealing with
144  * ``well-behaved'' queues (i.e., queues that do not spend too much
145  * time to consume their budget and have true sequential behavior, and
146  * when there are no external factors breaking anticipation) the
147  * relative weights at each level of the cgroups hierarchy should be
148  * guaranteed.  All the fields are protected by the queue lock of the
149  * containing bfqd.
150  */
151 struct bfq_entity {
152 	/* service_tree member */
153 	struct rb_node rb_node;
154 	/* pointer to the weight counter associated with this entity */
155 	struct bfq_weight_counter *weight_counter;
156 
157 	/*
158 	 * Flag, true if the entity is on a tree (either the active or
159 	 * the idle one of its service_tree) or is in service.
160 	 */
161 	bool on_st;
162 
163 	/* B-WF2Q+ start and finish timestamps [sectors/weight] */
164 	u64 start, finish;
165 
166 	/* tree the entity is enqueued into; %NULL if not on a tree */
167 	struct rb_root *tree;
168 
169 	/*
170 	 * minimum start time of the (active) subtree rooted at this
171 	 * entity; used for O(log N) lookups into active trees
172 	 */
173 	u64 min_start;
174 
175 	/* amount of service received during the last service slot */
176 	int service;
177 
178 	/* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */
179 	int budget;
180 
181 	/* weight of the queue */
182 	int weight;
183 	/* next weight if a change is in progress */
184 	int new_weight;
185 
186 	/* original weight, used to implement weight boosting */
187 	int orig_weight;
188 
189 	/* parent entity, for hierarchical scheduling */
190 	struct bfq_entity *parent;
191 
192 	/*
193 	 * For non-leaf nodes in the hierarchy, the associated
194 	 * scheduler queue, %NULL on leaf nodes.
195 	 */
196 	struct bfq_sched_data *my_sched_data;
197 	/* the scheduler queue this entity belongs to */
198 	struct bfq_sched_data *sched_data;
199 
200 	/* flag, set to request a weight, ioprio or ioprio_class change  */
201 	int prio_changed;
202 };
203 
204 struct bfq_group;
205 
206 /**
207  * struct bfq_ttime - per process thinktime stats.
208  */
209 struct bfq_ttime {
210 	/* completion time of the last request */
211 	u64 last_end_request;
212 
213 	/* total process thinktime */
214 	u64 ttime_total;
215 	/* number of thinktime samples */
216 	unsigned long ttime_samples;
217 	/* average process thinktime */
218 	u64 ttime_mean;
219 };
220 
221 /**
222  * struct bfq_queue - leaf schedulable entity.
223  *
224  * A bfq_queue is a leaf request queue; it can be associated with an
225  * io_context or more, if it  is  async or shared  between  cooperating
226  * processes. @cgroup holds a reference to the cgroup, to be sure that it
227  * does not disappear while a bfqq still references it (mostly to avoid
228  * races between request issuing and task migration followed by cgroup
229  * destruction).
230  * All the fields are protected by the queue lock of the containing bfqd.
231  */
232 struct bfq_queue {
233 	/* reference counter */
234 	int ref;
235 	/* parent bfq_data */
236 	struct bfq_data *bfqd;
237 
238 	/* current ioprio and ioprio class */
239 	unsigned short ioprio, ioprio_class;
240 	/* next ioprio and ioprio class if a change is in progress */
241 	unsigned short new_ioprio, new_ioprio_class;
242 
243 	/*
244 	 * Shared bfq_queue if queue is cooperating with one or more
245 	 * other queues.
246 	 */
247 	struct bfq_queue *new_bfqq;
248 	/* request-position tree member (see bfq_group's @rq_pos_tree) */
249 	struct rb_node pos_node;
250 	/* request-position tree root (see bfq_group's @rq_pos_tree) */
251 	struct rb_root *pos_root;
252 
253 	/* sorted list of pending requests */
254 	struct rb_root sort_list;
255 	/* if fifo isn't expired, next request to serve */
256 	struct request *next_rq;
257 	/* number of sync and async requests queued */
258 	int queued[2];
259 	/* number of requests currently allocated */
260 	int allocated;
261 	/* number of pending metadata requests */
262 	int meta_pending;
263 	/* fifo list of requests in sort_list */
264 	struct list_head fifo;
265 
266 	/* entity representing this queue in the scheduler */
267 	struct bfq_entity entity;
268 
269 	/* maximum budget allowed from the feedback mechanism */
270 	int max_budget;
271 	/* budget expiration (in jiffies) */
272 	unsigned long budget_timeout;
273 
274 	/* number of requests on the dispatch list or inside driver */
275 	int dispatched;
276 
277 	/* status flags */
278 	unsigned long flags;
279 
280 	/* node for active/idle bfqq list inside parent bfqd */
281 	struct list_head bfqq_list;
282 
283 	/* associated @bfq_ttime struct */
284 	struct bfq_ttime ttime;
285 
286 	/* bit vector: a 1 for each seeky requests in history */
287 	u32 seek_history;
288 
289 	/* node for the device's burst list */
290 	struct hlist_node burst_list_node;
291 
292 	/* position of the last request enqueued */
293 	sector_t last_request_pos;
294 
295 	/* Number of consecutive pairs of request completion and
296 	 * arrival, such that the queue becomes idle after the
297 	 * completion, but the next request arrives within an idle
298 	 * time slice; used only if the queue's IO_bound flag has been
299 	 * cleared.
300 	 */
301 	unsigned int requests_within_timer;
302 
303 	/* pid of the process owning the queue, used for logging purposes */
304 	pid_t pid;
305 
306 	/*
307 	 * Pointer to the bfq_io_cq owning the bfq_queue, set to %NULL
308 	 * if the queue is shared.
309 	 */
310 	struct bfq_io_cq *bic;
311 
312 	/* current maximum weight-raising time for this queue */
313 	unsigned long wr_cur_max_time;
314 	/*
315 	 * Minimum time instant such that, only if a new request is
316 	 * enqueued after this time instant in an idle @bfq_queue with
317 	 * no outstanding requests, then the task associated with the
318 	 * queue it is deemed as soft real-time (see the comments on
319 	 * the function bfq_bfqq_softrt_next_start())
320 	 */
321 	unsigned long soft_rt_next_start;
322 	/*
323 	 * Start time of the current weight-raising period if
324 	 * the @bfq-queue is being weight-raised, otherwise
325 	 * finish time of the last weight-raising period.
326 	 */
327 	unsigned long last_wr_start_finish;
328 	/* factor by which the weight of this queue is multiplied */
329 	unsigned int wr_coeff;
330 	/*
331 	 * Time of the last transition of the @bfq_queue from idle to
332 	 * backlogged.
333 	 */
334 	unsigned long last_idle_bklogged;
335 	/*
336 	 * Cumulative service received from the @bfq_queue since the
337 	 * last transition from idle to backlogged.
338 	 */
339 	unsigned long service_from_backlogged;
340 
341 	/*
342 	 * Value of wr start time when switching to soft rt
343 	 */
344 	unsigned long wr_start_at_switch_to_srt;
345 
346 	unsigned long split_time; /* time of last split */
347 };
348 
349 /**
350  * struct bfq_io_cq - per (request_queue, io_context) structure.
351  */
352 struct bfq_io_cq {
353 	/* associated io_cq structure */
354 	struct io_cq icq; /* must be the first member */
355 	/* array of two process queues, the sync and the async */
356 	struct bfq_queue *bfqq[2];
357 	/* per (request_queue, blkcg) ioprio */
358 	int ioprio;
359 #ifdef CONFIG_BFQ_GROUP_IOSCHED
360 	uint64_t blkcg_serial_nr; /* the current blkcg serial */
361 #endif
362 	/*
363 	 * Snapshot of the has_short_time flag before merging; taken
364 	 * to remember its value while the queue is merged, so as to
365 	 * be able to restore it in case of split.
366 	 */
367 	bool saved_has_short_ttime;
368 	/*
369 	 * Same purpose as the previous two fields for the I/O bound
370 	 * classification of a queue.
371 	 */
372 	bool saved_IO_bound;
373 
374 	/*
375 	 * Same purpose as the previous fields for the value of the
376 	 * field keeping the queue's belonging to a large burst
377 	 */
378 	bool saved_in_large_burst;
379 	/*
380 	 * True if the queue belonged to a burst list before its merge
381 	 * with another cooperating queue.
382 	 */
383 	bool was_in_burst_list;
384 
385 	/*
386 	 * Similar to previous fields: save wr information.
387 	 */
388 	unsigned long saved_wr_coeff;
389 	unsigned long saved_last_wr_start_finish;
390 	unsigned long saved_wr_start_at_switch_to_srt;
391 	unsigned int saved_wr_cur_max_time;
392 	struct bfq_ttime saved_ttime;
393 };
394 
395 enum bfq_device_speed {
396 	BFQ_BFQD_FAST,
397 	BFQ_BFQD_SLOW,
398 };
399 
400 /**
401  * struct bfq_data - per-device data structure.
402  *
403  * All the fields are protected by @lock.
404  */
405 struct bfq_data {
406 	/* device request queue */
407 	struct request_queue *queue;
408 	/* dispatch queue */
409 	struct list_head dispatch;
410 
411 	/* root bfq_group for the device */
412 	struct bfq_group *root_group;
413 
414 	/*
415 	 * rbtree of weight counters of @bfq_queues, sorted by
416 	 * weight. Used to keep track of whether all @bfq_queues have
417 	 * the same weight. The tree contains one counter for each
418 	 * distinct weight associated to some active and not
419 	 * weight-raised @bfq_queue (see the comments to the functions
420 	 * bfq_weights_tree_[add|remove] for further details).
421 	 */
422 	struct rb_root queue_weights_tree;
423 	/*
424 	 * rbtree of non-queue @bfq_entity weight counters, sorted by
425 	 * weight. Used to keep track of whether all @bfq_groups have
426 	 * the same weight. The tree contains one counter for each
427 	 * distinct weight associated to some active @bfq_group (see
428 	 * the comments to the functions bfq_weights_tree_[add|remove]
429 	 * for further details).
430 	 */
431 	struct rb_root group_weights_tree;
432 
433 	/*
434 	 * Number of bfq_queues containing requests (including the
435 	 * queue in service, even if it is idling).
436 	 */
437 	int busy_queues;
438 	/* number of weight-raised busy @bfq_queues */
439 	int wr_busy_queues;
440 	/* number of queued requests */
441 	int queued;
442 	/* number of requests dispatched and waiting for completion */
443 	int rq_in_driver;
444 
445 	/*
446 	 * Maximum number of requests in driver in the last
447 	 * @hw_tag_samples completed requests.
448 	 */
449 	int max_rq_in_driver;
450 	/* number of samples used to calculate hw_tag */
451 	int hw_tag_samples;
452 	/* flag set to one if the driver is showing a queueing behavior */
453 	int hw_tag;
454 
455 	/* number of budgets assigned */
456 	int budgets_assigned;
457 
458 	/*
459 	 * Timer set when idling (waiting) for the next request from
460 	 * the queue in service.
461 	 */
462 	struct hrtimer idle_slice_timer;
463 
464 	/* bfq_queue in service */
465 	struct bfq_queue *in_service_queue;
466 
467 	/* on-disk position of the last served request */
468 	sector_t last_position;
469 
470 	/* time of last request completion (ns) */
471 	u64 last_completion;
472 
473 	/* time of first rq dispatch in current observation interval (ns) */
474 	u64 first_dispatch;
475 	/* time of last rq dispatch in current observation interval (ns) */
476 	u64 last_dispatch;
477 
478 	/* beginning of the last budget */
479 	ktime_t last_budget_start;
480 	/* beginning of the last idle slice */
481 	ktime_t last_idling_start;
482 
483 	/* number of samples in current observation interval */
484 	int peak_rate_samples;
485 	/* num of samples of seq dispatches in current observation interval */
486 	u32 sequential_samples;
487 	/* total num of sectors transferred in current observation interval */
488 	u64 tot_sectors_dispatched;
489 	/* max rq size seen during current observation interval (sectors) */
490 	u32 last_rq_max_size;
491 	/* time elapsed from first dispatch in current observ. interval (us) */
492 	u64 delta_from_first;
493 	/*
494 	 * Current estimate of the device peak rate, measured in
495 	 * [BFQ_RATE_SHIFT * sectors/usec]. The left-shift by
496 	 * BFQ_RATE_SHIFT is performed to increase precision in
497 	 * fixed-point calculations.
498 	 */
499 	u32 peak_rate;
500 
501 	/* maximum budget allotted to a bfq_queue before rescheduling */
502 	int bfq_max_budget;
503 
504 	/* list of all the bfq_queues active on the device */
505 	struct list_head active_list;
506 	/* list of all the bfq_queues idle on the device */
507 	struct list_head idle_list;
508 
509 	/*
510 	 * Timeout for async/sync requests; when it fires, requests
511 	 * are served in fifo order.
512 	 */
513 	u64 bfq_fifo_expire[2];
514 	/* weight of backward seeks wrt forward ones */
515 	unsigned int bfq_back_penalty;
516 	/* maximum allowed backward seek */
517 	unsigned int bfq_back_max;
518 	/* maximum idling time */
519 	u32 bfq_slice_idle;
520 
521 	/* user-configured max budget value (0 for auto-tuning) */
522 	int bfq_user_max_budget;
523 	/*
524 	 * Timeout for bfq_queues to consume their budget; used to
525 	 * prevent seeky queues from imposing long latencies to
526 	 * sequential or quasi-sequential ones (this also implies that
527 	 * seeky queues cannot receive guarantees in the service
528 	 * domain; after a timeout they are charged for the time they
529 	 * have been in service, to preserve fairness among them, but
530 	 * without service-domain guarantees).
531 	 */
532 	unsigned int bfq_timeout;
533 
534 	/*
535 	 * Number of consecutive requests that must be issued within
536 	 * the idle time slice to set again idling to a queue which
537 	 * was marked as non-I/O-bound (see the definition of the
538 	 * IO_bound flag for further details).
539 	 */
540 	unsigned int bfq_requests_within_timer;
541 
542 	/*
543 	 * Force device idling whenever needed to provide accurate
544 	 * service guarantees, without caring about throughput
545 	 * issues. CAVEAT: this may even increase latencies, in case
546 	 * of useless idling for processes that did stop doing I/O.
547 	 */
548 	bool strict_guarantees;
549 
550 	/*
551 	 * Last time at which a queue entered the current burst of
552 	 * queues being activated shortly after each other; for more
553 	 * details about this and the following parameters related to
554 	 * a burst of activations, see the comments on the function
555 	 * bfq_handle_burst.
556 	 */
557 	unsigned long last_ins_in_burst;
558 	/*
559 	 * Reference time interval used to decide whether a queue has
560 	 * been activated shortly after @last_ins_in_burst.
561 	 */
562 	unsigned long bfq_burst_interval;
563 	/* number of queues in the current burst of queue activations */
564 	int burst_size;
565 
566 	/* common parent entity for the queues in the burst */
567 	struct bfq_entity *burst_parent_entity;
568 	/* Maximum burst size above which the current queue-activation
569 	 * burst is deemed as 'large'.
570 	 */
571 	unsigned long bfq_large_burst_thresh;
572 	/* true if a large queue-activation burst is in progress */
573 	bool large_burst;
574 	/*
575 	 * Head of the burst list (as for the above fields, more
576 	 * details in the comments on the function bfq_handle_burst).
577 	 */
578 	struct hlist_head burst_list;
579 
580 	/* if set to true, low-latency heuristics are enabled */
581 	bool low_latency;
582 	/*
583 	 * Maximum factor by which the weight of a weight-raised queue
584 	 * is multiplied.
585 	 */
586 	unsigned int bfq_wr_coeff;
587 	/* maximum duration of a weight-raising period (jiffies) */
588 	unsigned int bfq_wr_max_time;
589 
590 	/* Maximum weight-raising duration for soft real-time processes */
591 	unsigned int bfq_wr_rt_max_time;
592 	/*
593 	 * Minimum idle period after which weight-raising may be
594 	 * reactivated for a queue (in jiffies).
595 	 */
596 	unsigned int bfq_wr_min_idle_time;
597 	/*
598 	 * Minimum period between request arrivals after which
599 	 * weight-raising may be reactivated for an already busy async
600 	 * queue (in jiffies).
601 	 */
602 	unsigned long bfq_wr_min_inter_arr_async;
603 
604 	/* Max service-rate for a soft real-time queue, in sectors/sec */
605 	unsigned int bfq_wr_max_softrt_rate;
606 	/*
607 	 * Cached value of the product R*T, used for computing the
608 	 * maximum duration of weight raising automatically.
609 	 */
610 	u64 RT_prod;
611 	/* device-speed class for the low-latency heuristic */
612 	enum bfq_device_speed device_speed;
613 
614 	/* fallback dummy bfqq for extreme OOM conditions */
615 	struct bfq_queue oom_bfqq;
616 
617 	spinlock_t lock;
618 
619 	/*
620 	 * bic associated with the task issuing current bio for
621 	 * merging. This and the next field are used as a support to
622 	 * be able to perform the bic lookup, needed by bio-merge
623 	 * functions, before the scheduler lock is taken, and thus
624 	 * avoid taking the request-queue lock while the scheduler
625 	 * lock is being held.
626 	 */
627 	struct bfq_io_cq *bio_bic;
628 	/* bfqq associated with the task issuing current bio for merging */
629 	struct bfq_queue *bio_bfqq;
630 };
631 
632 enum bfqq_state_flags {
633 	BFQQF_just_created = 0,	/* queue just allocated */
634 	BFQQF_busy,		/* has requests or is in service */
635 	BFQQF_wait_request,	/* waiting for a request */
636 	BFQQF_non_blocking_wait_rq, /*
637 				     * waiting for a request
638 				     * without idling the device
639 				     */
640 	BFQQF_fifo_expire,	/* FIFO checked in this slice */
641 	BFQQF_has_short_ttime,	/* queue has a short think time */
642 	BFQQF_sync,		/* synchronous queue */
643 	BFQQF_IO_bound,		/*
644 				 * bfqq has timed-out at least once
645 				 * having consumed at most 2/10 of
646 				 * its budget
647 				 */
648 	BFQQF_in_large_burst,	/*
649 				 * bfqq activated in a large burst,
650 				 * see comments to bfq_handle_burst.
651 				 */
652 	BFQQF_softrt_update,	/*
653 				 * may need softrt-next-start
654 				 * update
655 				 */
656 	BFQQF_coop,		/* bfqq is shared */
657 	BFQQF_split_coop	/* shared bfqq will be split */
658 };
659 
660 #define BFQ_BFQQ_FNS(name)						\
661 void bfq_mark_bfqq_##name(struct bfq_queue *bfqq);			\
662 void bfq_clear_bfqq_##name(struct bfq_queue *bfqq);			\
663 int bfq_bfqq_##name(const struct bfq_queue *bfqq);
664 
665 BFQ_BFQQ_FNS(just_created);
666 BFQ_BFQQ_FNS(busy);
667 BFQ_BFQQ_FNS(wait_request);
668 BFQ_BFQQ_FNS(non_blocking_wait_rq);
669 BFQ_BFQQ_FNS(fifo_expire);
670 BFQ_BFQQ_FNS(has_short_ttime);
671 BFQ_BFQQ_FNS(sync);
672 BFQ_BFQQ_FNS(IO_bound);
673 BFQ_BFQQ_FNS(in_large_burst);
674 BFQ_BFQQ_FNS(coop);
675 BFQ_BFQQ_FNS(split_coop);
676 BFQ_BFQQ_FNS(softrt_update);
677 #undef BFQ_BFQQ_FNS
678 
679 /* Expiration reasons. */
680 enum bfqq_expiration {
681 	BFQQE_TOO_IDLE = 0,		/*
682 					 * queue has been idling for
683 					 * too long
684 					 */
685 	BFQQE_BUDGET_TIMEOUT,	/* budget took too long to be used */
686 	BFQQE_BUDGET_EXHAUSTED,	/* budget consumed */
687 	BFQQE_NO_MORE_REQUESTS,	/* the queue has no more requests */
688 	BFQQE_PREEMPTED		/* preemption in progress */
689 };
690 
691 struct bfqg_stats {
692 #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
693 	/* number of ios merged */
694 	struct blkg_rwstat		merged;
695 	/* total time spent on device in ns, may not be accurate w/ queueing */
696 	struct blkg_rwstat		service_time;
697 	/* total time spent waiting in scheduler queue in ns */
698 	struct blkg_rwstat		wait_time;
699 	/* number of IOs queued up */
700 	struct blkg_rwstat		queued;
701 	/* total disk time and nr sectors dispatched by this group */
702 	struct blkg_stat		time;
703 	/* sum of number of ios queued across all samples */
704 	struct blkg_stat		avg_queue_size_sum;
705 	/* count of samples taken for average */
706 	struct blkg_stat		avg_queue_size_samples;
707 	/* how many times this group has been removed from service tree */
708 	struct blkg_stat		dequeue;
709 	/* total time spent waiting for it to be assigned a timeslice. */
710 	struct blkg_stat		group_wait_time;
711 	/* time spent idling for this blkcg_gq */
712 	struct blkg_stat		idle_time;
713 	/* total time with empty current active q with other requests queued */
714 	struct blkg_stat		empty_time;
715 	/* fields after this shouldn't be cleared on stat reset */
716 	uint64_t			start_group_wait_time;
717 	uint64_t			start_idle_time;
718 	uint64_t			start_empty_time;
719 	uint16_t			flags;
720 #endif	/* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
721 };
722 
723 #ifdef CONFIG_BFQ_GROUP_IOSCHED
724 
725 /*
726  * struct bfq_group_data - per-blkcg storage for the blkio subsystem.
727  *
728  * @ps: @blkcg_policy_storage that this structure inherits
729  * @weight: weight of the bfq_group
730  */
731 struct bfq_group_data {
732 	/* must be the first member */
733 	struct blkcg_policy_data pd;
734 
735 	unsigned int weight;
736 };
737 
738 /**
739  * struct bfq_group - per (device, cgroup) data structure.
740  * @entity: schedulable entity to insert into the parent group sched_data.
741  * @sched_data: own sched_data, to contain child entities (they may be
742  *              both bfq_queues and bfq_groups).
743  * @bfqd: the bfq_data for the device this group acts upon.
744  * @async_bfqq: array of async queues for all the tasks belonging to
745  *              the group, one queue per ioprio value per ioprio_class,
746  *              except for the idle class that has only one queue.
747  * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
748  * @my_entity: pointer to @entity, %NULL for the toplevel group; used
749  *             to avoid too many special cases during group creation/
750  *             migration.
751  * @stats: stats for this bfqg.
752  * @active_entities: number of active entities belonging to the group;
753  *                   unused for the root group. Used to know whether there
754  *                   are groups with more than one active @bfq_entity
755  *                   (see the comments to the function
756  *                   bfq_bfqq_may_idle()).
757  * @rq_pos_tree: rbtree sorted by next_request position, used when
758  *               determining if two or more queues have interleaving
759  *               requests (see bfq_find_close_cooperator()).
760  *
761  * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
762  * there is a set of bfq_groups, each one collecting the lower-level
763  * entities belonging to the group that are acting on the same device.
764  *
765  * Locking works as follows:
766  *    o @bfqd is protected by the queue lock, RCU is used to access it
767  *      from the readers.
768  *    o All the other fields are protected by the @bfqd queue lock.
769  */
770 struct bfq_group {
771 	/* must be the first member */
772 	struct blkg_policy_data pd;
773 
774 	/* cached path for this blkg (see comments in bfq_bic_update_cgroup) */
775 	char blkg_path[128];
776 
777 	/* reference counter (see comments in bfq_bic_update_cgroup) */
778 	int ref;
779 
780 	struct bfq_entity entity;
781 	struct bfq_sched_data sched_data;
782 
783 	void *bfqd;
784 
785 	struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
786 	struct bfq_queue *async_idle_bfqq;
787 
788 	struct bfq_entity *my_entity;
789 
790 	int active_entities;
791 
792 	struct rb_root rq_pos_tree;
793 
794 	struct bfqg_stats stats;
795 };
796 
797 #else
798 struct bfq_group {
799 	struct bfq_sched_data sched_data;
800 
801 	struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
802 	struct bfq_queue *async_idle_bfqq;
803 
804 	struct rb_root rq_pos_tree;
805 };
806 #endif
807 
808 struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
809 
810 /* --------------- main algorithm interface ----------------- */
811 
812 #define BFQ_SERVICE_TREE_INIT	((struct bfq_service_tree)		\
813 				{ RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
814 
815 extern const int bfq_timeout;
816 
817 struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync);
818 void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync);
819 struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic);
820 void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq);
821 void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
822 			  struct rb_root *root);
823 void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_entity *entity,
824 			     struct rb_root *root);
825 void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
826 		     bool compensate, enum bfqq_expiration reason);
827 void bfq_put_queue(struct bfq_queue *bfqq);
828 void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
829 void bfq_schedule_dispatch(struct bfq_data *bfqd);
830 void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
831 
832 /* ------------ end of main algorithm interface -------------- */
833 
834 /* ---------------- cgroups-support interface ---------------- */
835 
836 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
837 			      unsigned int op);
838 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op);
839 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op);
840 void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
841 				  uint64_t io_start_time, unsigned int op);
842 void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
843 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
844 void bfqg_stats_update_idle_time(struct bfq_group *bfqg);
845 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg);
846 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg);
847 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
848 		   struct bfq_group *bfqg);
849 
850 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg);
851 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio);
852 void bfq_end_wr_async(struct bfq_data *bfqd);
853 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
854 				     struct blkcg *blkcg);
855 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
856 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
857 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
858 void bfqg_and_blkg_put(struct bfq_group *bfqg);
859 
860 #ifdef CONFIG_BFQ_GROUP_IOSCHED
861 extern struct cftype bfq_blkcg_legacy_files[];
862 extern struct cftype bfq_blkg_files[];
863 extern struct blkcg_policy blkcg_policy_bfq;
864 #endif
865 
866 /* ------------- end of cgroups-support interface ------------- */
867 
868 /* - interface of the internal hierarchical B-WF2Q+ scheduler - */
869 
870 #ifdef CONFIG_BFQ_GROUP_IOSCHED
871 /* both next loops stop at one of the child entities of the root group */
872 #define for_each_entity(entity)	\
873 	for (; entity ; entity = entity->parent)
874 
875 /*
876  * For each iteration, compute parent in advance, so as to be safe if
877  * entity is deallocated during the iteration. Such a deallocation may
878  * happen as a consequence of a bfq_put_queue that frees the bfq_queue
879  * containing entity.
880  */
881 #define for_each_entity_safe(entity, parent) \
882 	for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
883 
884 #else /* CONFIG_BFQ_GROUP_IOSCHED */
885 /*
886  * Next two macros are fake loops when cgroups support is not
887  * enabled. I fact, in such a case, there is only one level to go up
888  * (to reach the root group).
889  */
890 #define for_each_entity(entity)	\
891 	for (; entity ; entity = NULL)
892 
893 #define for_each_entity_safe(entity, parent) \
894 	for (parent = NULL; entity ; entity = parent)
895 #endif /* CONFIG_BFQ_GROUP_IOSCHED */
896 
897 struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq);
898 struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
899 struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity);
900 struct bfq_entity *bfq_entity_of(struct rb_node *node);
901 unsigned short bfq_ioprio_to_weight(int ioprio);
902 void bfq_put_idle_entity(struct bfq_service_tree *st,
903 			 struct bfq_entity *entity);
904 struct bfq_service_tree *
905 __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
906 				struct bfq_entity *entity,
907 				bool update_class_too);
908 void bfq_bfqq_served(struct bfq_queue *bfqq, int served);
909 void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
910 			  unsigned long time_ms);
911 bool __bfq_deactivate_entity(struct bfq_entity *entity,
912 			     bool ins_into_idle_tree);
913 bool next_queue_may_preempt(struct bfq_data *bfqd);
914 struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd);
915 void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
916 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
917 			 bool ins_into_idle_tree, bool expiration);
918 void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
919 void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
920 		      bool expiration);
921 void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
922 		       bool expiration);
923 void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
924 
925 /* --------------- end of interface of B-WF2Q+ ---------------- */
926 
927 /* Logging facilities. */
928 #ifdef CONFIG_BFQ_GROUP_IOSCHED
929 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
930 
931 #define bfq_log_bfqq(bfqd, bfqq, fmt, args...)	do {			\
932 	blk_add_cgroup_trace_msg((bfqd)->queue,				\
933 			bfqg_to_blkg(bfqq_group(bfqq))->blkcg,		\
934 			"bfq%d%c " fmt, (bfqq)->pid,			\
935 			bfq_bfqq_sync((bfqq)) ? 'S' : 'A', ##args);	\
936 } while (0)
937 
938 #define bfq_log_bfqg(bfqd, bfqg, fmt, args...)	do {			\
939 	blk_add_cgroup_trace_msg((bfqd)->queue,				\
940 		bfqg_to_blkg(bfqg)->blkcg, fmt, ##args);		\
941 } while (0)
942 
943 #else /* CONFIG_BFQ_GROUP_IOSCHED */
944 
945 #define bfq_log_bfqq(bfqd, bfqq, fmt, args...)	\
946 	blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid,	\
947 			bfq_bfqq_sync((bfqq)) ? 'S' : 'A',		\
948 				##args)
949 #define bfq_log_bfqg(bfqd, bfqg, fmt, args...)		do {} while (0)
950 
951 #endif /* CONFIG_BFQ_GROUP_IOSCHED */
952 
953 #define bfq_log(bfqd, fmt, args...) \
954 	blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
955 
956 #endif /* _BFQ_H */
957