Lines Matching +full:high +full:- +full:throughput

1 // SPDX-License-Identifier: GPL-2.0-or-later
16 * BFQ is a proportional-share I/O scheduler, with some extra
17 * low-latency capabilities. BFQ also supports full hierarchical
20 * limitations can be found in Documentation/block/bfq-iosched.rst.
22 * BFQ is a proportional-share storage-I/O scheduling algorithm based
23 * on the slice-by-slice service scheme of CFQ. But BFQ assigns
25 * time slices. The device is not granted to the in-service process
28 * to distribute the device throughput among processes as desired,
29 * without any distortion due to throughput fluctuations, or to device
31 * B-WF2Q+, to schedule processes according to their budgets. More
33 * process/queue is assigned a user-configurable weight, and B-WF2Q+
34 * guarantees that each queue receives a fraction of the throughput
36 * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
37 * processes issuing sequential requests (to boost the throughput),
38 * and yet guarantee a low latency to interactive and soft real-time
41 * In particular, to provide these low-latency guarantees, BFQ
42 * explicitly privileges the I/O of two classes of time-sensitive
43 * applications: interactive and soft real-time. In more detail, BFQ
50 * real-time application. For brevity, in these cases, the queue is
51 * said to be interactive or soft real-time. In both cases, BFQ
52 * privileges the service of the queue, over that of non-interactive
53 * and non-soft-real-time queues. This privileging is performed,
55 * call just weight-raising periods the time periods during which a
56 * queue is privileged, because deemed interactive or soft real-time.
58 * The detection of soft real-time queues/applications is described in
70 * non-empty queue stops being deemed interactive. Since a queue is
71 * weight-raised while it is deemed interactive, this maximum time
73 * weight-raising for interactive queues.
76 * preserving both a low latency and a high throughput on NCQ-capable,
77 * rotational or flash-based devices, and to get the job done quickly
78 * for applications consisting in many I/O-bound processes.
81 * the maximum-possible throughput at all times, then do switch off
82 * all low-latency heuristics for that device, by setting low_latency
91 * ones that guarantee a low latency to interactive and soft real-time
92 * applications, and a hierarchical extension based on H-WF2Q+.
94 * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
95 * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
101 * Technologies (MST-2015), May 2015.
102 * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
105 * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
108 * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
110 * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
114 * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
125 #include <linux/backing-dev.h>
131 #include "blk-mq.h"
132 #include "blk-mq-sched.h"
133 #include "bfq-iosched.h"
134 #include "blk-wbt.h"
139 __set_bit(BFQQF_##name, &(bfqq)->flags); \
143 __clear_bit(BFQQF_##name, &(bfqq)->flags); \
147 return test_bit(BFQQF_##name, &(bfqq)->flags); \
190 * writes to steal I/O throughput to reads.
198 * - when the group does writes, w.r.t. to when it does reads;
199 * - when other groups do reads, w.r.t. to when they do writes.
234 (!blk_queue_nonrot(bfqd->queue) || \
237 #define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
239 * Sync random I/O is likely to be confused with soft real-time I/O,
240 * because it is characterized by limited throughput and apparently
243 * as soft real-time.
245 #define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history == -1)
247 /* Min number of samples required to perform peak-rate update */
249 /* Min observation time interval required to perform a peak-rate update (ns) */
251 /* Target observation time interval for a peak-rate update (ns) */
255 * Shift used for peak-rate fixed precision calculations.
257 * - the current shift: 16 positions
258 * - the current type used to store rate: u32
259 * - the current unit of measure for rate: [sectors/usec], or, more precisely,
262 * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec =
263 * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec =
271 * When configured for computing the duration of the weight-raising
288 * depending on whether the device is rotational or non-rotational.
293 * non-rotational device. The reference rates are not the actual peak
296 * peak-rate estimator tends to yield slightly lower values than the
301 * The reference peak rates are measured in sectors/usec, left-shifted
313 * BFQ uses the above-detailed, time-based weight-raising mechanism to
315 * following false positives: I/O-bound applications that will go on
318 * weight-raised at the beginning of their I/O. On the opposite end,
319 * while being weight-raised, these applications
320 * a) unjustly steal throughput to applications that may actually need
323 * in loss of device throughput with most flash-based storage, and may
328 * finish explaining how the duration of weight-raising for
337 * reference interactive task is the start-up of LibreOffice Writer,
342 * duration of weight-raising for at least one class of I/O-bound
343 * applications: those doing sequential or quasi-sequential I/O. An
344 * example is file copy. In fact, once started, the main I/O-bound
347 * is starting, because these I/O-bound processes will greedily devote
349 * throughput-friendly I/O operations. This is even more true if BFQ
353 * have no right to be weight-raised any longer.
355 * Basing on the last consideration, BFQ ends weight-raising for a
360 * This early ending of weight-raising reduces the amount of time
376 #define RQ_BIC(rq) ((struct bfq_io_cq *)((rq)->elv.priv[0]))
377 #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
383 return bic->bfqq[1][actuator_idx]; in bic_to_bfqq()
385 return bic->bfqq[0][actuator_idx]; in bic_to_bfqq()
395 struct bfq_queue *old_bfqq = bic->bfqq[is_sync][actuator_idx]; in bic_set_bfqq()
398 * If bfqq != NULL, then a non-stable queue merge between in bic_set_bfqq()
399 * bic->bfqq and bfqq is happening here. This causes troubles in bic_set_bfqq()
400 * in the following case: bic->bfqq has also been scheduled in bic_set_bfqq()
401 * for a possible stable merge with bic->stable_merge_bfqq, in bic_set_bfqq()
402 * and bic->stable_merge_bfqq == bfqq happens to in bic_set_bfqq()
405 * bic->stable_merge_bfqq points exactly to bfqq, then bfqq in bic_set_bfqq()
408 * bic->stable_merge_bfqq == bfqq. in bic_set_bfqq()
410 struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[actuator_idx]; in bic_set_bfqq()
413 if (old_bfqq && old_bfqq->bic == bic) in bic_set_bfqq()
414 old_bfqq->bic = NULL; in bic_set_bfqq()
417 bic->bfqq[1][actuator_idx] = bfqq; in bic_set_bfqq()
419 bic->bfqq[0][actuator_idx] = bfqq; in bic_set_bfqq()
421 if (bfqq && bfqq_data->stable_merge_bfqq == bfqq) { in bic_set_bfqq()
430 bfq_put_stable_ref(bfqq_data->stable_merge_bfqq); in bic_set_bfqq()
432 bfqq_data->stable_merge_bfqq = NULL; in bic_set_bfqq()
438 return bic->icq.q->elevator->elevator_data; in bic_to_bfqd()
442 * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
447 /* bic->icq is the first member, %NULL will convert to %NULL */ in icq_to_bic()
452 * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
460 if (!current->io_context) in bfq_bic_lookup()
463 spin_lock_irqsave(&q->queue_lock, flags); in bfq_bic_lookup()
465 spin_unlock_irqrestore(&q->queue_lock, flags); in bfq_bic_lookup()
476 lockdep_assert_held(&bfqd->lock); in bfq_schedule_dispatch()
478 if (bfqd->queued != 0) { in bfq_schedule_dispatch()
480 blk_mq_run_hw_queues(bfqd->queue, true); in bfq_schedule_dispatch()
484 #define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
489 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
513 if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) in bfq_choose_req()
515 else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META)) in bfq_choose_req()
524 back_max = bfqd->bfq_back_max * 2; in bfq_choose_req()
532 d1 = s1 - last; in bfq_choose_req()
534 d1 = (last - s1) * bfqd->bfq_back_penalty; in bfq_choose_req()
539 d2 = s2 - last; in bfq_choose_req()
541 d2 = (last - s2) * bfqd->bfq_back_penalty; in bfq_choose_req()
549 * check two variables for all permutations: --> faster! in bfq_choose_req()
572 * (--> only *one* back seek required), in bfq_choose_req()
587 struct bfq_data *bfqd = bfqq->bfqd; in bfqq_request_over_limit()
588 struct bfq_entity *entity = &bfqq->entity; in bfqq_request_over_limit()
592 int class_idx = bfqq->ioprio_class - 1; in bfqq_request_over_limit()
597 if (!entity->on_st_or_in_serv) in bfqq_request_over_limit()
601 spin_lock_irq(&bfqd->lock); in bfqq_request_over_limit()
603 depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1; in bfqq_request_over_limit()
605 spin_unlock_irq(&bfqd->lock); in bfqq_request_over_limit()
615 sched_data = entity->sched_data; in bfqq_request_over_limit()
624 if (!entity->on_st_or_in_serv) in bfqq_request_over_limit()
632 for (level--; level >= 0; level--) { in bfqq_request_over_limit()
635 wsum = bfq_entity_service_tree(entity)->wsum; in bfqq_request_over_limit()
642 * gets more requests than high prio queue from lower in bfqq_request_over_limit()
648 sched_data->service_tree[i].wsum; in bfqq_request_over_limit()
653 limit = DIV_ROUND_CLOSEST(limit * entity->weight, wsum); in bfqq_request_over_limit()
654 if (entity->allocated >= limit) { in bfqq_request_over_limit()
655 bfq_log_bfqq(bfqq->bfqd, bfqq, in bfqq_request_over_limit()
657 entity->allocated, limit, level); in bfqq_request_over_limit()
663 spin_unlock_irq(&bfqd->lock); in bfqq_request_over_limit()
692 struct bfq_data *bfqd = data->q->elevator->elevator_data; in bfq_limit_depth()
693 struct bfq_io_cq *bic = bfq_bic_lookup(data->q); in bfq_limit_depth()
695 unsigned limit = data->q->nr_requests; in bfq_limit_depth()
702 depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)]; in bfq_limit_depth()
703 limit = (limit * depth) >> bfqd->full_depth_shift; in bfq_limit_depth()
706 for (act_idx = 0; bic && act_idx < bfqd->num_actuators; act_idx++) { in bfq_limit_depth()
722 __func__, bfqd->wr_busy_queues, op_is_sync(opf), depth); in bfq_limit_depth()
724 data->shallow_depth = depth; in bfq_limit_depth()
736 p = &root->rb_node; in bfq_rq_pos_tree_lookup()
747 if (sector > blk_rq_pos(bfqq->next_rq)) in bfq_rq_pos_tree_lookup()
748 n = &(*p)->rb_right; in bfq_rq_pos_tree_lookup()
749 else if (sector < blk_rq_pos(bfqq->next_rq)) in bfq_rq_pos_tree_lookup()
750 n = &(*p)->rb_left; in bfq_rq_pos_tree_lookup()
763 bfqq ? bfqq->pid : 0); in bfq_rq_pos_tree_lookup()
770 return bfqq->service_from_backlogged > 0 && in bfq_too_late_for_merging()
771 time_is_before_jiffies(bfqq->first_IO_time + in bfq_too_late_for_merging()
789 if (bfqq->pos_root) { in bfq_pos_tree_add_move()
790 rb_erase(&bfqq->pos_node, bfqq->pos_root); in bfq_pos_tree_add_move()
791 bfqq->pos_root = NULL; in bfq_pos_tree_add_move()
795 if (bfqq == &bfqd->oom_bfqq) in bfq_pos_tree_add_move()
808 if (!bfqq->next_rq) in bfq_pos_tree_add_move()
811 bfqq->pos_root = &bfqq_group(bfqq)->rq_pos_tree; in bfq_pos_tree_add_move()
812 __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root, in bfq_pos_tree_add_move()
813 blk_rq_pos(bfqq->next_rq), &parent, &p); in bfq_pos_tree_add_move()
815 rb_link_node(&bfqq->pos_node, parent, p); in bfq_pos_tree_add_move()
816 rb_insert_color(&bfqq->pos_node, bfqq->pos_root); in bfq_pos_tree_add_move()
818 bfqq->pos_root = NULL; in bfq_pos_tree_add_move()
823 * must receive the same share of the throughput (symmetric scenario),
825 * throughput lower than or equal to the share that every other active
828 * throughput even if I/O dispatching is not plugged when bfqq remains
831 * of this function is used to check whether I/O-dispatch plugging can
836 * 2) all active queues belong to the same I/O-priority class,
843 * the last two symmetry sub-conditions above would be quite complex
845 * only the following stronger three sub-conditions, for which it is
848 * 2) all active queues belong to the same I/O-priority class,
858 bfqq->weight_counter && in bfq_asymmetric_scenario()
859 bfqq->weight_counter == in bfq_asymmetric_scenario()
861 rb_first_cached(&bfqd->queue_weights_tree), in bfq_asymmetric_scenario()
870 !RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) && in bfq_asymmetric_scenario()
871 (bfqd->queue_weights_tree.rb_root.rb_node->rb_left || in bfq_asymmetric_scenario()
872 bfqd->queue_weights_tree.rb_root.rb_node->rb_right); in bfq_asymmetric_scenario()
875 (bfqd->busy_queues[0] && bfqd->busy_queues[1]) || in bfq_asymmetric_scenario()
876 (bfqd->busy_queues[0] && bfqd->busy_queues[2]) || in bfq_asymmetric_scenario()
877 (bfqd->busy_queues[1] && bfqd->busy_queues[2]); in bfq_asymmetric_scenario()
881 || bfqd->num_groups_with_pending_reqs > 1 in bfq_asymmetric_scenario()
887 * If the weight-counter tree passed as input contains no counter for
891 * Note that weight-counter trees contain few nodes in mostly symmetric
893 * weight-counter tree for the queues may contain at most one node.
894 * This holds even if low_latency is on, because weight-raised queues
901 struct rb_root_cached *root = &bfqq->bfqd->queue_weights_tree; in bfq_weights_tree_add()
902 struct bfq_entity *entity = &bfqq->entity; in bfq_weights_tree_add()
903 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL; in bfq_weights_tree_add()
910 * non-weight-raised, and hence change its weight, and in bfq_weights_tree_add()
918 if (bfqq->weight_counter) in bfq_weights_tree_add()
927 if (entity->weight == __counter->weight) { in bfq_weights_tree_add()
928 bfqq->weight_counter = __counter; in bfq_weights_tree_add()
931 if (entity->weight < __counter->weight) in bfq_weights_tree_add()
932 new = &((*new)->rb_left); in bfq_weights_tree_add()
934 new = &((*new)->rb_right); in bfq_weights_tree_add()
939 bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter), in bfq_weights_tree_add()
952 * if !bfqq->weight_counter. in bfq_weights_tree_add()
954 if (unlikely(!bfqq->weight_counter)) in bfq_weights_tree_add()
957 bfqq->weight_counter->weight = entity->weight; in bfq_weights_tree_add()
958 rb_link_node(&bfqq->weight_counter->weights_node, parent, new); in bfq_weights_tree_add()
959 rb_insert_color_cached(&bfqq->weight_counter->weights_node, root, in bfq_weights_tree_add()
963 bfqq->weight_counter->num_active++; in bfq_weights_tree_add()
964 bfqq->ref++; in bfq_weights_tree_add()
977 if (!bfqq->weight_counter) in bfq_weights_tree_remove()
980 root = &bfqq->bfqd->queue_weights_tree; in bfq_weights_tree_remove()
981 bfqq->weight_counter->num_active--; in bfq_weights_tree_remove()
982 if (bfqq->weight_counter->num_active > 0) in bfq_weights_tree_remove()
985 rb_erase_cached(&bfqq->weight_counter->weights_node, root); in bfq_weights_tree_remove()
986 kfree(bfqq->weight_counter); in bfq_weights_tree_remove()
989 bfqq->weight_counter = NULL; in bfq_weights_tree_remove()
1006 rq = rq_entry_fifo(bfqq->fifo.next); in bfq_check_fifo()
1008 if (rq == last || blk_time_get_ns() < rq->fifo_time) in bfq_check_fifo()
1011 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); in bfq_check_fifo()
1019 struct rb_node *rbnext = rb_next(&last->rb_node); in bfq_find_next_rq()
1020 struct rb_node *rbprev = rb_prev(&last->rb_node); in bfq_find_next_rq()
1034 rbnext = rb_first(&bfqq->sort_list); in bfq_find_next_rq()
1035 if (rbnext && rbnext != &last->rb_node) in bfq_find_next_rq()
1046 if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 || in bfq_serv_to_charge()
1047 bfq_asymmetric_scenario(bfqq->bfqd, bfqq)) in bfq_serv_to_charge()
1054 * bfq_updated_next_req - update the queue after a new next_rq selection.
1067 struct bfq_entity *entity = &bfqq->entity; in bfq_updated_next_req()
1068 struct request *next_rq = bfqq->next_rq; in bfq_updated_next_req()
1074 if (bfqq == bfqd->in_service_queue) in bfq_updated_next_req()
1082 max_t(unsigned long, bfqq->max_budget, in bfq_updated_next_req()
1084 entity->service); in bfq_updated_next_req()
1085 if (entity->budget != new_budget) { in bfq_updated_next_req()
1086 entity->budget = new_budget; in bfq_updated_next_req()
1097 dur = bfqd->rate_dur_prod; in bfq_wr_duration()
1098 do_div(dur, bfqd->peak_rate); in bfq_wr_duration()
1104 * - running in a slow PC in bfq_wr_duration()
1105 * - with a virtual disk stacked on a slow low-end 5400rpm HDD in bfq_wr_duration()
1106 * - serving a heavy I/O workload, such as the sequential reading in bfq_wr_duration()
1108 * mplayer took 23 seconds to start, if constantly weight-raised. in bfq_wr_duration()
1113 * responsiveness by allowing non-interactive applications to in bfq_wr_duration()
1118 * before weight-raising finishes. in bfq_wr_duration()
1123 /* switch back from soft real-time to interactive weight raising */
1127 bfqq->wr_coeff = bfqd->bfq_wr_coeff; in switch_back_to_interactive_wr()
1128 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); in switch_back_to_interactive_wr()
1129 bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt; in switch_back_to_interactive_wr()
1138 unsigned int a_idx = bfqq->actuator_idx; in bfq_bfqq_resume_state()
1139 struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx]; in bfq_bfqq_resume_state()
1141 if (bfqq_data->saved_has_short_ttime) in bfq_bfqq_resume_state()
1146 if (bfqq_data->saved_IO_bound) in bfq_bfqq_resume_state()
1151 bfqq->last_serv_time_ns = bfqq_data->saved_last_serv_time_ns; in bfq_bfqq_resume_state()
1152 bfqq->inject_limit = bfqq_data->saved_inject_limit; in bfq_bfqq_resume_state()
1153 bfqq->decrease_time_jif = bfqq_data->saved_decrease_time_jif; in bfq_bfqq_resume_state()
1155 bfqq->entity.new_weight = bfqq_data->saved_weight; in bfq_bfqq_resume_state()
1156 bfqq->ttime = bfqq_data->saved_ttime; in bfq_bfqq_resume_state()
1157 bfqq->io_start_time = bfqq_data->saved_io_start_time; in bfq_bfqq_resume_state()
1158 bfqq->tot_idle_time = bfqq_data->saved_tot_idle_time; in bfq_bfqq_resume_state()
1162 if (bfqd->low_latency) { in bfq_bfqq_resume_state()
1163 old_wr_coeff = bfqq->wr_coeff; in bfq_bfqq_resume_state()
1164 bfqq->wr_coeff = bfqq_data->saved_wr_coeff; in bfq_bfqq_resume_state()
1166 bfqq->service_from_wr = bfqq_data->saved_service_from_wr; in bfq_bfqq_resume_state()
1167 bfqq->wr_start_at_switch_to_srt = in bfq_bfqq_resume_state()
1168 bfqq_data->saved_wr_start_at_switch_to_srt; in bfq_bfqq_resume_state()
1169 bfqq->last_wr_start_finish = bfqq_data->saved_last_wr_start_finish; in bfq_bfqq_resume_state()
1170 bfqq->wr_cur_max_time = bfqq_data->saved_wr_cur_max_time; in bfq_bfqq_resume_state()
1172 if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) || in bfq_bfqq_resume_state()
1173 time_is_before_jiffies(bfqq->last_wr_start_finish + in bfq_bfqq_resume_state()
1174 bfqq->wr_cur_max_time))) { in bfq_bfqq_resume_state()
1175 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && in bfq_bfqq_resume_state()
1177 time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt + in bfq_bfqq_resume_state()
1181 bfqq->wr_coeff = 1; in bfq_bfqq_resume_state()
1182 bfq_log_bfqq(bfqq->bfqd, bfqq, in bfq_bfqq_resume_state()
1188 bfqq->entity.prio_changed = 1; in bfq_bfqq_resume_state()
1193 if (old_wr_coeff == 1 && bfqq->wr_coeff > 1) in bfq_bfqq_resume_state()
1194 bfqd->wr_busy_queues++; in bfq_bfqq_resume_state()
1195 else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1) in bfq_bfqq_resume_state()
1196 bfqd->wr_busy_queues--; in bfq_bfqq_resume_state()
1201 return bfqq->ref - bfqq->entity.allocated - in bfqq_process_refs()
1202 bfqq->entity.on_st_or_in_serv - in bfqq_process_refs()
1203 (bfqq->weight_counter != NULL) - bfqq->stable_ref; in bfqq_process_refs()
1212 hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) in bfq_reset_burst_list()
1213 hlist_del_init(&item->burst_list_node); in bfq_reset_burst_list()
1221 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); in bfq_reset_burst_list()
1222 bfqd->burst_size = 1; in bfq_reset_burst_list()
1224 bfqd->burst_size = 0; in bfq_reset_burst_list()
1226 bfqd->burst_parent_entity = bfqq->entity.parent; in bfq_reset_burst_list()
1233 bfqd->burst_size++; in bfq_add_to_burst()
1235 if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { in bfq_add_to_burst()
1243 bfqd->large_burst = true; in bfq_add_to_burst()
1249 hlist_for_each_entry(bfqq_item, &bfqd->burst_list, in bfq_add_to_burst()
1261 hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, in bfq_add_to_burst()
1263 hlist_del_init(&pos->burst_list_node); in bfq_add_to_burst()
1270 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); in bfq_add_to_burst()
1280 * possible, it is usually better to not grant either weight-raising
1289 * The above services or applications benefit mostly from a high
1290 * throughput: the quicker the requests of the activated queues are
1292 * completed. As a consequence, weight-raising any of these queues,
1296 * weight-raising these new queues just lowers throughput in most
1301 * parallel I/O-bound threads. In fact, with a complex application,
1302 * several short processes may need to be executed to start-up the
1308 * weight-raise all the queues created during the burst. This is the
1318 * weight-raise queues whose creation occurs in a large burst. In
1320 * idling depending on which choice boosts the throughput more. The
1351 * the large-burst threshold, then
1359 * previous sub-step), and now is not needed any more
1361 * . the device enters a large-burst mode
1364 * the device is in large-burst mode and shortly after the last time
1374 * . the large-burst mode is reset if set
1389 if (!hlist_unhashed(&bfqq->burst_list_node) || in bfq_handle_burst()
1391 time_is_after_eq_jiffies(bfqq->split_time + in bfq_handle_burst()
1412 if (time_is_before_jiffies(bfqd->last_ins_in_burst + in bfq_handle_burst()
1413 bfqd->bfq_burst_interval) || in bfq_handle_burst()
1414 bfqq->entity.parent != bfqd->burst_parent_entity) { in bfq_handle_burst()
1415 bfqd->large_burst = false; in bfq_handle_burst()
1425 if (bfqd->large_burst) { in bfq_handle_burst()
1431 * If we get here, then a large-burst state has not yet been in bfq_handle_burst()
1445 bfqd->last_ins_in_burst = jiffies; in bfq_handle_burst()
1450 struct bfq_entity *entity = &bfqq->entity; in bfq_bfqq_budget_left()
1452 return entity->budget - entity->service; in bfq_bfqq_budget_left()
1462 if (bfqd->budgets_assigned < bfq_stats_min_budgets) in bfq_max_budget()
1465 return bfqd->bfq_max_budget; in bfq_max_budget()
1474 if (bfqd->budgets_assigned < bfq_stats_min_budgets) in bfq_min_budget()
1477 return bfqd->bfq_max_budget / 32; in bfq_min_budget()
1483 * whether the in-service queue should be expired, by returning
1484 * true. The purpose of expiring the in-service queue is to give bfqq
1485 * the chance to possibly preempt the in-service queue, and the reason
1486 * for preempting the in-service queue is to achieve one of the two
1493 * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
1497 * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
1498 * a new request before the expiration of the idling-time.
1503 * remained idle for other reasons: CPU high load, bfqq not enjoying
1527 * before last expiration. Thus timestamps need to be back-shifted
1531 * Secondly, to allow the process to recover the hole, the in-service
1534 * in-service queue to be completed, then it may become impossible to
1535 * let the process recover the hole, even if the back-shifted
1536 * timestamps of bfqq are lower than those of the in-service queue. If
1552 * above-described special way, and signals that the in-service queue
1553 * should be expired. Timestamp back-shifting is done later in
1559 * timestamp than the in-service queue. That is, the next budget of
1560 * bfqq may have to be completed before the one of the in-service
1561 * queue. If this is the case, then preempting the in-service queue
1567 * the in-service queue must be preempted. To have service trees
1568 * correctly updated, the in-service queue must be expired and
1571 * mechanism may be re-designed in such a way to make it possible to
1574 * I/O, which may in turn cause loss of throughput. Finally, there may
1575 * even be no in-service queue when the next function is invoked (so,
1580 * in-service queue (unconditionally) only for queues that need to
1588 struct bfq_entity *entity = &bfqq->entity; in bfq_bfqq_update_budg_for_activation()
1602 * that timestamps need to be back-shifted (and is in bfq_bfqq_update_budg_for_activation()
1608 * entity->service or entity->budget are not updated in bfq_bfqq_update_budg_for_activation()
1613 * entity->budget the remaining budget on such an in bfq_bfqq_update_budg_for_activation()
1616 entity->budget = min_t(unsigned long, in bfq_bfqq_update_budg_for_activation()
1618 bfqq->max_budget); in bfq_bfqq_update_budg_for_activation()
1621 * At this point, we have used entity->service to get in bfq_bfqq_update_budg_for_activation()
1623 * entity->budget). Thus we finally can, and have to, in bfq_bfqq_update_budg_for_activation()
1624 * reset entity->service. The latter must be reset in bfq_bfqq_update_budg_for_activation()
1629 entity->service = 0; in bfq_bfqq_update_budg_for_activation()
1637 entity->service = 0; in bfq_bfqq_update_budg_for_activation()
1638 entity->budget = max_t(unsigned long, bfqq->max_budget, in bfq_bfqq_update_budg_for_activation()
1639 bfq_serv_to_charge(bfqq->next_rq, bfqq)); in bfq_bfqq_update_budg_for_activation()
1650 return jiffies - MAX_JIFFY_OFFSET; in bfq_smallest_from_now()
1662 /* start a weight-raising period */ in bfq_update_bfqq_wr_on_rq_arrival()
1664 bfqq->service_from_wr = 0; in bfq_update_bfqq_wr_on_rq_arrival()
1665 bfqq->wr_coeff = bfqd->bfq_wr_coeff; in bfq_update_bfqq_wr_on_rq_arrival()
1666 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); in bfq_update_bfqq_wr_on_rq_arrival()
1672 * that, at the end of the soft-real-time in bfq_update_bfqq_wr_on_rq_arrival()
1674 * now, no interactive weight-raising period in bfq_update_bfqq_wr_on_rq_arrival()
1679 bfqq->wr_start_at_switch_to_srt = in bfq_update_bfqq_wr_on_rq_arrival()
1681 bfqq->wr_coeff = bfqd->bfq_wr_coeff * in bfq_update_bfqq_wr_on_rq_arrival()
1683 bfqq->wr_cur_max_time = in bfq_update_bfqq_wr_on_rq_arrival()
1684 bfqd->bfq_wr_rt_max_time; in bfq_update_bfqq_wr_on_rq_arrival()
1690 * scheduling-error component due to a too large in bfq_update_bfqq_wr_on_rq_arrival()
1691 * budget. Do not care about throughput consequences, in bfq_update_bfqq_wr_on_rq_arrival()
1696 bfqq->entity.budget = min_t(unsigned long, in bfq_update_bfqq_wr_on_rq_arrival()
1697 bfqq->entity.budget, in bfq_update_bfqq_wr_on_rq_arrival()
1701 bfqq->wr_coeff = bfqd->bfq_wr_coeff; in bfq_update_bfqq_wr_on_rq_arrival()
1702 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); in bfq_update_bfqq_wr_on_rq_arrival()
1704 bfqq->wr_coeff = 1; in bfq_update_bfqq_wr_on_rq_arrival()
1710 * the weight-raising duration for the in bfq_update_bfqq_wr_on_rq_arrival()
1711 * application with the weight-raising in bfq_update_bfqq_wr_on_rq_arrival()
1715 * before the weight-raising period for the in bfq_update_bfqq_wr_on_rq_arrival()
1722 * at a certain time weight-raising is in bfq_update_bfqq_wr_on_rq_arrival()
1731 * 4) these pending requests experience a high in bfq_update_bfqq_wr_on_rq_arrival()
1733 * weight-raised while they are pending. in bfq_update_bfqq_wr_on_rq_arrival()
1735 if (bfqq->wr_cur_max_time != in bfq_update_bfqq_wr_on_rq_arrival()
1736 bfqd->bfq_wr_rt_max_time) { in bfq_update_bfqq_wr_on_rq_arrival()
1737 bfqq->wr_start_at_switch_to_srt = in bfq_update_bfqq_wr_on_rq_arrival()
1738 bfqq->last_wr_start_finish; in bfq_update_bfqq_wr_on_rq_arrival()
1740 bfqq->wr_cur_max_time = in bfq_update_bfqq_wr_on_rq_arrival()
1741 bfqd->bfq_wr_rt_max_time; in bfq_update_bfqq_wr_on_rq_arrival()
1742 bfqq->wr_coeff = bfqd->bfq_wr_coeff * in bfq_update_bfqq_wr_on_rq_arrival()
1745 bfqq->last_wr_start_finish = jiffies; in bfq_update_bfqq_wr_on_rq_arrival()
1753 return bfqq->dispatched == 0 && in bfq_bfqq_idle_for_long_time()
1755 bfqq->budget_timeout + in bfq_bfqq_idle_for_long_time()
1756 bfqd->bfq_wr_min_idle_time); in bfq_bfqq_idle_for_long_time()
1762 * weight than the in-service queue.
1769 if (bfqq->ioprio_class < in_serv_bfqq->ioprio_class) in bfq_bfqq_higher_class_or_weight()
1772 if (in_serv_bfqq->entity.parent == bfqq->entity.parent) { in bfq_bfqq_higher_class_or_weight()
1773 bfqq_weight = bfqq->entity.weight; in bfq_bfqq_higher_class_or_weight()
1774 in_serv_weight = in_serv_bfqq->entity.weight; in bfq_bfqq_higher_class_or_weight()
1776 if (bfqq->entity.parent) in bfq_bfqq_higher_class_or_weight()
1777 bfqq_weight = bfqq->entity.parent->weight; in bfq_bfqq_higher_class_or_weight()
1779 bfqq_weight = bfqq->entity.weight; in bfq_bfqq_higher_class_or_weight()
1780 if (in_serv_bfqq->entity.parent) in bfq_bfqq_higher_class_or_weight()
1781 in_serv_weight = in_serv_bfqq->entity.parent->weight; in bfq_bfqq_higher_class_or_weight()
1783 in_serv_weight = in_serv_bfqq->entity.weight; in bfq_bfqq_higher_class_or_weight()
1798 if (bfqd->num_actuators == 1) in bfq_actuator_index()
1802 end = bio_end_sector(bio) - 1; in bfq_actuator_index()
1804 for (i = 0; i < bfqd->num_actuators; i++) { in bfq_actuator_index()
1805 if (end >= bfqd->sector[i] && in bfq_actuator_index()
1806 end < bfqd->sector[i] + bfqd->nr_sectors[i]) in bfq_actuator_index()
1833 bfqq->ttime.last_end_request + in bfq_bfqq_handle_idle_busy_switch()
1834 bfqd->bfq_slice_idle * 3; in bfq_bfqq_handle_idle_busy_switch()
1835 unsigned int act_idx = bfq_actuator_index(bfqd, rq->bio); in bfq_bfqq_handle_idle_busy_switch()
1837 bfqq->bic || RQ_BIC(rq)->bfqq_data[act_idx].stably_merged; in bfq_bfqq_handle_idle_busy_switch()
1840 * bfqq deserves to be weight-raised if: in bfq_bfqq_handle_idle_busy_switch()
1841 * - it is sync, in bfq_bfqq_handle_idle_busy_switch()
1842 * - it does not belong to a large burst, in bfq_bfqq_handle_idle_busy_switch()
1843 * - it has been idle for enough time or is soft real-time, in bfq_bfqq_handle_idle_busy_switch()
1844 * - is linked to a bfq_io_cq (it is not shared in any sense), in bfq_bfqq_handle_idle_busy_switch()
1845 * - has a default weight (otherwise we assume the user wanted in bfq_bfqq_handle_idle_busy_switch()
1849 soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 && in bfq_bfqq_handle_idle_busy_switch()
1852 time_is_before_jiffies(bfqq->soft_rt_next_start) && in bfq_bfqq_handle_idle_busy_switch()
1853 bfqq->dispatched == 0 && in bfq_bfqq_handle_idle_busy_switch()
1854 bfqq->entity.new_weight == 40; in bfq_bfqq_handle_idle_busy_switch()
1856 bfqq->entity.new_weight == 40; in bfq_bfqq_handle_idle_busy_switch()
1858 * Merged bfq_queues are kept out of weight-raising in bfq_bfqq_handle_idle_busy_switch()
1859 * (low-latency) mechanisms. The reason is that these queues in bfq_bfqq_handle_idle_busy_switch()
1860 * are usually created for non-interactive and in bfq_bfqq_handle_idle_busy_switch()
1861 * non-soft-real-time tasks. Yet this is not the case for in bfq_bfqq_handle_idle_busy_switch()
1862 * stably-merged queues. These queues are merged just because in bfq_bfqq_handle_idle_busy_switch()
1864 * easily serve the I/O of an interactive or soft-real time in bfq_bfqq_handle_idle_busy_switch()
1866 * processes. So let also stably-merged queued enjoy weight in bfq_bfqq_handle_idle_busy_switch()
1869 wr_or_deserves_wr = bfqd->low_latency && in bfq_bfqq_handle_idle_busy_switch()
1870 (bfqq->wr_coeff > 1 || in bfq_bfqq_handle_idle_busy_switch()
1876 * may want to preempt the in-service queue. in bfq_bfqq_handle_idle_busy_switch()
1898 bfqq->budget_timeout + in bfq_bfqq_handle_idle_busy_switch()
1900 hlist_del_init(&bfqq->burst_list_node); in bfq_bfqq_handle_idle_busy_switch()
1906 if (bfqd->low_latency) { in bfq_bfqq_handle_idle_busy_switch()
1907 if (unlikely(time_is_after_jiffies(bfqq->split_time))) in bfq_bfqq_handle_idle_busy_switch()
1909 bfqq->split_time = in bfq_bfqq_handle_idle_busy_switch()
1910 jiffies - bfqd->bfq_wr_min_idle_time - 1; in bfq_bfqq_handle_idle_busy_switch()
1912 if (time_is_before_jiffies(bfqq->split_time + in bfq_bfqq_handle_idle_busy_switch()
1913 bfqd->bfq_wr_min_idle_time)) { in bfq_bfqq_handle_idle_busy_switch()
1921 if (old_wr_coeff != bfqq->wr_coeff) in bfq_bfqq_handle_idle_busy_switch()
1922 bfqq->entity.prio_changed = 1; in bfq_bfqq_handle_idle_busy_switch()
1926 bfqq->last_idle_bklogged = jiffies; in bfq_bfqq_handle_idle_busy_switch()
1927 bfqq->service_from_backlogged = 0; in bfq_bfqq_handle_idle_busy_switch()
1933 * Expire in-service queue if preemption may be needed for in bfq_bfqq_handle_idle_busy_switch()
1934 * guarantees or throughput. As for guarantees, we care in bfq_bfqq_handle_idle_busy_switch()
1939 * carry time-critical I/O, then bfqq's bandwidth is less in bfq_bfqq_handle_idle_busy_switch()
1940 * important than that of queues that carry time-critical I/O. in bfq_bfqq_handle_idle_busy_switch()
1942 * bfqq is at least as weight-raised, i.e., at least as time in bfq_bfqq_handle_idle_busy_switch()
1943 * critical, as the in-service queue. in bfq_bfqq_handle_idle_busy_switch()
1946 * or has a higher weight than the in-service queue. If this in bfq_bfqq_handle_idle_busy_switch()
1953 * the timestamps of both bfqq and of the in-service queue, in bfq_bfqq_handle_idle_busy_switch()
1960 * timestamps of the in-service queue would need to be in bfq_bfqq_handle_idle_busy_switch()
1964 * As for throughput, we ask bfq_better_to_idle() whether we in bfq_bfqq_handle_idle_busy_switch()
1967 * boost throughput or to perserve service guarantees. Then in bfq_bfqq_handle_idle_busy_switch()
1969 * would certainly lower throughput. We may end up in this in bfq_bfqq_handle_idle_busy_switch()
1972 * request to arrive for the currently in-service queue, but in bfq_bfqq_handle_idle_busy_switch()
1975 if (bfqd->in_service_queue && in bfq_bfqq_handle_idle_busy_switch()
1977 bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) || in bfq_bfqq_handle_idle_busy_switch()
1978 bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue) || in bfq_bfqq_handle_idle_busy_switch()
1979 !bfq_better_to_idle(bfqd->in_service_queue)) && in bfq_bfqq_handle_idle_busy_switch()
1981 bfq_bfqq_expire(bfqd, bfqd->in_service_queue, in bfq_bfqq_handle_idle_busy_switch()
1989 bfqq->last_serv_time_ns = 0; in bfq_reset_inject_limit()
1995 bfqd->waited_rq = NULL; in bfq_reset_inject_limit()
2008 * get new I/O enqueued---and then completed---before being in bfq_reset_inject_limit()
2010 * limit-update algorithm the chance to measure the effect of in bfq_reset_inject_limit()
2021 * throughput, as explained in detail in the comments in in bfq_reset_inject_limit()
2040 * limit-update algorithm and possibly raise the limit to more in bfq_reset_inject_limit()
2044 bfqq->inject_limit = 0; in bfq_reset_inject_limit()
2046 bfqq->inject_limit = 1; in bfq_reset_inject_limit()
2048 bfqq->decrease_time_jif = jiffies; in bfq_reset_inject_limit()
2053 u64 tot_io_time = now_ns - bfqq->io_start_time; in bfq_update_io_intensity()
2055 if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfqq->dispatched == 0) in bfq_update_io_intensity()
2056 bfqq->tot_idle_time += in bfq_update_io_intensity()
2057 now_ns - bfqq->ttime.last_end_request; in bfq_update_io_intensity()
2066 if (bfqq->tot_idle_time * 5 > tot_io_time) in bfq_update_io_intensity()
2076 bfqq->io_start_time = now_ns - (tot_io_time>>1); in bfq_update_io_intensity()
2077 bfqq->tot_idle_time >>= 1; in bfq_update_io_intensity()
2089 * A remarkable throughput boost can be reached by unconditionally
2092 * plugged for bfqq. In addition to boosting throughput, this
2103 * if it still has some in-flight I/O. In fact, in this case bfqq is actually
2105 * of some of the in-flight requests. In particular, on the first time, Q is
2111 * above three-times requirement and time limit for detection, make false
2116 * The sooner a waker queue is detected, the sooner throughput can be
2123 * first I/O-plugging time interval for bfqq. This triggers the first
2126 * during the next I/O-plugging interval for bfqq.
2137 if (!bfqd->last_completed_rq_bfqq || in bfq_check_waker()
2138 bfqd->last_completed_rq_bfqq == bfqq || in bfq_check_waker()
2140 now_ns - bfqd->last_completion >= 4 * NSEC_PER_MSEC || in bfq_check_waker()
2141 bfqd->last_completed_rq_bfqq == &bfqd->oom_bfqq || in bfq_check_waker()
2142 bfqq == &bfqd->oom_bfqq) in bfq_check_waker()
2148 * doesn't hurt throughput that much. The condition below makes sure in bfq_check_waker()
2151 if (bfqd->last_completed_rq_bfqq != in bfq_check_waker()
2152 bfqq->tentative_waker_bfqq || in bfq_check_waker()
2153 now_ns > bfqq->waker_detection_started + in bfq_check_waker()
2154 128 * (u64)bfqd->bfq_slice_idle) { in bfq_check_waker()
2160 bfqq->tentative_waker_bfqq = in bfq_check_waker()
2161 bfqd->last_completed_rq_bfqq; in bfq_check_waker()
2162 bfqq->num_waker_detections = 1; in bfq_check_waker()
2163 bfqq->waker_detection_started = now_ns; in bfq_check_waker()
2164 bfq_bfqq_name(bfqq->tentative_waker_bfqq, waker_name, in bfq_check_waker()
2168 bfqq->num_waker_detections++; in bfq_check_waker()
2170 if (bfqq->num_waker_detections == 3) { in bfq_check_waker()
2171 bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq; in bfq_check_waker()
2172 bfqq->tentative_waker_bfqq = NULL; in bfq_check_waker()
2173 bfq_bfqq_name(bfqq->waker_bfqq, waker_name, in bfq_check_waker()
2179 * bfqq->waker_bfqq must be reset. To in bfq_check_waker()
2197 if (!hlist_unhashed(&bfqq->woken_list_node)) in bfq_check_waker()
2198 hlist_del_init(&bfqq->woken_list_node); in bfq_check_waker()
2199 hlist_add_head(&bfqq->woken_list_node, in bfq_check_waker()
2200 &bfqd->last_completed_rq_bfqq->woken_list); in bfq_check_waker()
2207 struct bfq_data *bfqd = bfqq->bfqd; in bfq_add_request()
2209 unsigned int old_wr_coeff = bfqq->wr_coeff; in bfq_add_request()
2214 bfqq->queued[rq_is_sync(rq)]++; in bfq_add_request()
2216 * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it in bfq_add_request()
2219 WRITE_ONCE(bfqd->queued, bfqd->queued + 1); in bfq_add_request()
2221 if (bfq_bfqq_sync(bfqq) && RQ_BIC(rq)->requests <= 1) { in bfq_add_request()
2230 if (time_is_before_eq_jiffies(bfqq->decrease_time_jif + in bfq_add_request()
2238 * - bfqq is in service, because the total service in bfq_add_request()
2241 * - this is the right occasion to compute or to in bfq_add_request()
2252 * hole, and there are still in-flight requests, in bfq_add_request()
2255 * - the minimum interval for sampling the total in bfq_add_request()
2259 if (bfqq == bfqd->in_service_queue && in bfq_add_request()
2260 (bfqd->tot_rq_in_driver == 0 || in bfq_add_request()
2261 (bfqq->last_serv_time_ns > 0 && in bfq_add_request()
2262 bfqd->rqs_injected && bfqd->tot_rq_in_driver > 0)) && in bfq_add_request()
2263 time_is_before_eq_jiffies(bfqq->decrease_time_jif + in bfq_add_request()
2265 bfqd->last_empty_occupied_ns = blk_time_get_ns(); in bfq_add_request()
2269 * wait_dispatch will cause bfqd->waited_rq to in bfq_add_request()
2272 bfqd->wait_dispatch = true; in bfq_add_request()
2286 if (bfqd->tot_rq_in_driver == 0) in bfq_add_request()
2287 bfqd->rqs_injected = false; in bfq_add_request()
2294 elv_rb_add(&bfqq->sort_list, rq); in bfq_add_request()
2297 * Check if this request is a better next-serve candidate. in bfq_add_request()
2299 prev = bfqq->next_rq; in bfq_add_request()
2300 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); in bfq_add_request()
2301 bfqq->next_rq = next_rq; in bfq_add_request()
2307 if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq)) in bfq_add_request()
2314 if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && in bfq_add_request()
2316 bfqq->last_wr_start_finish + in bfq_add_request()
2317 bfqd->bfq_wr_min_inter_arr_async)) { in bfq_add_request()
2318 bfqq->wr_coeff = bfqd->bfq_wr_coeff; in bfq_add_request()
2319 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); in bfq_add_request()
2321 bfqd->wr_busy_queues++; in bfq_add_request()
2322 bfqq->entity.prio_changed = 1; in bfq_add_request()
2324 if (prev != bfqq->next_rq) in bfq_add_request()
2332 * . if bfqq is not going to be weight-raised, because, for in bfq_add_request()
2333 * non weight-raised queues, last_wr_start_finish stores the in bfq_add_request()
2336 * weight-raise async queues in bfq_add_request()
2338 * . if bfqq is not weight-raised, because, if bfqq is now in bfq_add_request()
2339 * switching to weight-raised, then last_wr_start_finish in bfq_add_request()
2340 * stores the time when weight-raising starts in bfq_add_request()
2343 * bfqq is currently weight-raised, the weight-raising in bfq_add_request()
2346 * conditions, if bfqq is already weight-raised) in bfq_add_request()
2349 * real-time, because the weight-raising period is constantly in bfq_add_request()
2350 * restarted on idle-to-busy transitions for these queues, but in bfq_add_request()
2354 if (bfqd->low_latency && in bfq_add_request()
2355 (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive)) in bfq_add_request()
2356 bfqq->last_wr_start_finish = jiffies; in bfq_add_request()
2363 struct bfq_queue *bfqq = bfqd->bio_bfqq; in bfq_find_rq_fmerge()
2367 return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); in bfq_find_rq_fmerge()
2375 return abs(blk_rq_pos(rq) - last_pos); in get_sdist()
2384 struct bfq_data *bfqd = bfqq->bfqd; in bfq_remove_request()
2387 if (bfqq->next_rq == rq) { in bfq_remove_request()
2388 bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); in bfq_remove_request()
2392 if (rq->queuelist.prev != &rq->queuelist) in bfq_remove_request()
2393 list_del_init(&rq->queuelist); in bfq_remove_request()
2394 bfqq->queued[sync]--; in bfq_remove_request()
2396 * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it in bfq_remove_request()
2399 WRITE_ONCE(bfqd->queued, bfqd->queued - 1); in bfq_remove_request()
2400 elv_rb_del(&bfqq->sort_list, rq); in bfq_remove_request()
2403 if (q->last_merge == rq) in bfq_remove_request()
2404 q->last_merge = NULL; in bfq_remove_request()
2406 if (RB_EMPTY_ROOT(&bfqq->sort_list)) { in bfq_remove_request()
2407 bfqq->next_rq = NULL; in bfq_remove_request()
2409 if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { in bfq_remove_request()
2413 * bfqq is empty, bfqq->entity.service and in bfq_remove_request()
2414 * bfqq->entity.budget must contain, in bfq_remove_request()
2420 * reset both bfqq->entity.service and in bfq_remove_request()
2421 * bfqq->entity.budget, if bfqq has still a in bfq_remove_request()
2424 bfqq->entity.budget = bfqq->entity.service = 0; in bfq_remove_request()
2428 * Remove queue from request-position tree as it is empty. in bfq_remove_request()
2430 if (bfqq->pos_root) { in bfq_remove_request()
2431 rb_erase(&bfqq->pos_node, bfqq->pos_root); in bfq_remove_request()
2432 bfqq->pos_root = NULL; in bfq_remove_request()
2436 if (unlikely(!bfqd->nonrot_with_queueing)) in bfq_remove_request()
2440 if (rq->cmd_flags & REQ_META) in bfq_remove_request()
2441 bfqq->meta_pending--; in bfq_remove_request()
2448 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_bio_merge()
2453 * queue_lock inside the bfqd->lock. We assume that the bic in bfq_bio_merge()
2455 * bfqd->lock is taken. in bfq_bio_merge()
2460 spin_lock_irq(&bfqd->lock); in bfq_bio_merge()
2469 bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf), in bfq_bio_merge()
2472 bfqd->bio_bfqq = NULL; in bfq_bio_merge()
2474 bfqd->bio_bic = bic; in bfq_bio_merge()
2478 spin_unlock_irq(&bfqd->lock); in bfq_bio_merge()
2488 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_request_merge()
2507 rb_prev(&req->rb_node) && in bfq_request_merged()
2509 blk_rq_pos(container_of(rb_prev(&req->rb_node), in bfq_request_merged()
2518 bfqd = bfqq->bfqd; in bfq_request_merged()
2521 elv_rb_del(&bfqq->sort_list, req); in bfq_request_merged()
2522 elv_rb_add(&bfqq->sort_list, req); in bfq_request_merged()
2525 prev = bfqq->next_rq; in bfq_request_merged()
2526 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, in bfq_request_merged()
2527 bfqd->last_position); in bfq_request_merged()
2528 bfqq->next_rq = next_rq; in bfq_request_merged()
2534 if (prev != bfqq->next_rq) { in bfq_request_merged()
2540 if (unlikely(!bfqd->nonrot_with_queueing)) in bfq_request_merged()
2554 * on that rq is picked from the hash table q->elevator->hash, which,
2579 !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && in bfq_requests_merged()
2580 next->fifo_time < rq->fifo_time) { in bfq_requests_merged()
2581 list_del_init(&rq->queuelist); in bfq_requests_merged()
2582 list_replace_init(&next->queuelist, &rq->queuelist); in bfq_requests_merged()
2583 rq->fifo_time = next->fifo_time; in bfq_requests_merged()
2586 if (bfqq->next_rq == next) in bfq_requests_merged()
2587 bfqq->next_rq = rq; in bfq_requests_merged()
2589 bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); in bfq_requests_merged()
2592 if (!RB_EMPTY_NODE(&next->rb_node)) { in bfq_requests_merged()
2593 bfq_remove_request(next->q, next); in bfq_requests_merged()
2596 next->cmd_flags); in bfq_requests_merged()
2604 * If bfqq has been enjoying interactive weight-raising, then in bfq_bfqq_end_wr()
2607 * a soft real-time application. Such an application actually in bfq_bfqq_end_wr()
2608 * exhibits a soft real-time I/O pattern after it finishes in bfq_bfqq_end_wr()
2612 * high value that. So, without this reset, bfqq would be in bfq_bfqq_end_wr()
2617 if (bfqq->wr_cur_max_time != in bfq_bfqq_end_wr()
2618 bfqq->bfqd->bfq_wr_rt_max_time) in bfq_bfqq_end_wr()
2619 bfqq->soft_rt_next_start = jiffies; in bfq_bfqq_end_wr()
2622 bfqq->bfqd->wr_busy_queues--; in bfq_bfqq_end_wr()
2623 bfqq->wr_coeff = 1; in bfq_bfqq_end_wr()
2624 bfqq->wr_cur_max_time = 0; in bfq_bfqq_end_wr()
2625 bfqq->last_wr_start_finish = jiffies; in bfq_bfqq_end_wr()
2630 bfqq->entity.prio_changed = 1; in bfq_bfqq_end_wr()
2638 for (k = 0; k < bfqd->num_actuators; k++) { in bfq_end_wr_async_queues()
2641 if (bfqg->async_bfqq[i][j][k]) in bfq_end_wr_async_queues()
2642 bfq_bfqq_end_wr(bfqg->async_bfqq[i][j][k]); in bfq_end_wr_async_queues()
2643 if (bfqg->async_idle_bfqq[k]) in bfq_end_wr_async_queues()
2644 bfq_bfqq_end_wr(bfqg->async_idle_bfqq[k]); in bfq_end_wr_async_queues()
2653 spin_lock_irq(&bfqd->lock); in bfq_end_wr()
2655 for (i = 0; i < bfqd->num_actuators; i++) { in bfq_end_wr()
2656 list_for_each_entry(bfqq, &bfqd->active_list[i], bfqq_list) in bfq_end_wr()
2659 list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) in bfq_end_wr()
2663 spin_unlock_irq(&bfqd->lock); in bfq_end_wr()
2671 return ((struct bio *)io_struct)->bi_iter.bi_sector; in bfq_io_struct_pos()
2677 return abs(bfq_io_struct_pos(io_struct, request) - sector) <= in bfq_rq_close_to_sector()
2685 struct rb_root *root = &bfqq_group(bfqq)->rq_pos_tree; in bfqq_find_close()
2706 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) in bfqq_find_close()
2709 if (blk_rq_pos(__bfqq->next_rq) < sector) in bfqq_find_close()
2710 node = rb_next(&__bfqq->pos_node); in bfqq_find_close()
2712 node = rb_prev(&__bfqq->pos_node); in bfqq_find_close()
2717 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) in bfqq_find_close()
2734 * the best possible order for throughput. in bfq_find_close_cooperator()
2751 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain in bfq_setup_merge()
2759 while ((__bfqq = new_bfqq->new_bfqq)) { in bfq_setup_merge()
2779 if (new_bfqq->entity.parent != bfqq->entity.parent) in bfq_setup_merge()
2782 bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", in bfq_setup_merge()
2783 new_bfqq->pid); in bfq_setup_merge()
2797 * not available any more (new_bfqq->bic == NULL). in bfq_setup_merge()
2799 * Anyway, even in case new_bfqq coincides with the in-service in bfq_setup_merge()
2800 * queue, redirecting requests the in-service queue is the in bfq_setup_merge()
2801 * best option, as we feed the in-service queue with new in bfq_setup_merge()
2803 * are likely to increase the throughput. in bfq_setup_merge()
2805 bfqq->new_bfqq = new_bfqq; in bfq_setup_merge()
2810 * associated with new_bfqq. Here we increases new_bfqq->ref in bfq_setup_merge()
2815 new_bfqq->ref += process_refs; in bfq_setup_merge()
2826 (bfqq->ioprio_class != new_bfqq->ioprio_class)) in bfq_may_be_close_cooperator()
2860 bfqq_data->stable_merge_bfqq = NULL; in bfq_setup_stable_merge()
2868 bfqq_data->stably_merged = true; in bfq_setup_stable_merge()
2869 if (new_bfqq->bic) { in bfq_setup_stable_merge()
2870 unsigned int new_a_idx = new_bfqq->actuator_idx; in bfq_setup_stable_merge()
2872 &new_bfqq->bic->bfqq_data[new_a_idx]; in bfq_setup_stable_merge()
2874 new_bfqq_data->stably_merged = true; in bfq_setup_stable_merge()
2886 * Attempt to schedule a merge of bfqq with the currently in-service
2898 * WARNING: queue merging may impair fairness among non-weight raised
2902 * requests than the ones produced by its originally-associated
2910 unsigned int a_idx = bfqq->actuator_idx; in bfq_setup_cooperator()
2911 struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx]; in bfq_setup_cooperator()
2914 new_bfqq = bfqq->new_bfqq; in bfq_setup_cooperator()
2916 while (new_bfqq->new_bfqq) in bfq_setup_cooperator()
2917 new_bfqq = new_bfqq->new_bfqq; in bfq_setup_cooperator()
2922 * Check delayed stable merge for rotational or non-queueing in bfq_setup_cooperator()
2924 * currently merged with some other queue (i.e., bfqq->bic in bfq_setup_cooperator()
2927 * merged with bic->stable_merge_bfqq. But this would be in bfq_setup_cooperator()
2930 if (unlikely(!bfqd->nonrot_with_queueing)) { in bfq_setup_cooperator()
2933 * bic->stable_merge_bfqq may point to some queue (for in bfq_setup_cooperator()
2937 if (bfq_bfqq_sync(bfqq) && bfqq_data->stable_merge_bfqq && in bfq_setup_cooperator()
2939 time_is_before_jiffies(bfqq->split_time + in bfq_setup_cooperator()
2941 time_is_before_jiffies(bfqq->creation_time + in bfq_setup_cooperator()
2944 bfqq_data->stable_merge_bfqq; in bfq_setup_cooperator()
2955 * device reaches a high speed through internal parallelism in bfq_setup_cooperator()
2956 * and pipelining. This means that, to reach a high in bfq_setup_cooperator()
2957 * throughput, it must have many requests enqueued at the same in bfq_setup_cooperator()
2963 * the throughput reached by the device is likely to be the in bfq_setup_cooperator()
2967 * terms of throughput. Merging tends to make many workloads in bfq_setup_cooperator()
2970 * non-merged queues. This may accentuate workload in bfq_setup_cooperator()
2973 * the shared queue may inherit such a high weight and, by in bfq_setup_cooperator()
2976 * for BFQ to let the device reach a high throughput. in bfq_setup_cooperator()
2989 if (likely(bfqd->nonrot_with_queueing)) in bfq_setup_cooperator()
2999 * probability that two non-cooperating processes, which just in bfq_setup_cooperator()
3006 if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) in bfq_setup_cooperator()
3013 in_service_bfqq = bfqd->in_service_queue; in bfq_setup_cooperator()
3016 likely(in_service_bfqq != &bfqd->oom_bfqq) && in bfq_setup_cooperator()
3018 bfqd->in_serv_last_pos) && in bfq_setup_cooperator()
3019 bfqq->entity.parent == in_service_bfqq->entity.parent && in bfq_setup_cooperator()
3033 if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) && in bfq_setup_cooperator()
3042 struct bfq_io_cq *bic = bfqq->bic; in bfq_bfqq_save_state()
3043 unsigned int a_idx = bfqq->actuator_idx; in bfq_bfqq_save_state()
3044 struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx]; in bfq_bfqq_save_state()
3047 * If !bfqq->bic, the queue is already shared or its requests in bfq_bfqq_save_state()
3054 bfqq_data->saved_last_serv_time_ns = bfqq->last_serv_time_ns; in bfq_bfqq_save_state()
3055 bfqq_data->saved_inject_limit = bfqq->inject_limit; in bfq_bfqq_save_state()
3056 bfqq_data->saved_decrease_time_jif = bfqq->decrease_time_jif; in bfq_bfqq_save_state()
3058 bfqq_data->saved_weight = bfqq->entity.orig_weight; in bfq_bfqq_save_state()
3059 bfqq_data->saved_ttime = bfqq->ttime; in bfq_bfqq_save_state()
3060 bfqq_data->saved_has_short_ttime = in bfq_bfqq_save_state()
3062 bfqq_data->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); in bfq_bfqq_save_state()
3063 bfqq_data->saved_io_start_time = bfqq->io_start_time; in bfq_bfqq_save_state()
3064 bfqq_data->saved_tot_idle_time = bfqq->tot_idle_time; in bfq_bfqq_save_state()
3065 bfqq_data->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); in bfq_bfqq_save_state()
3066 bfqq_data->was_in_burst_list = in bfq_bfqq_save_state()
3067 !hlist_unhashed(&bfqq->burst_list_node); in bfq_bfqq_save_state()
3071 bfqq->bfqd->low_latency)) { in bfq_bfqq_save_state()
3075 * did not make it to be set in a weight-raised state, in bfq_bfqq_save_state()
3077 * weight-raising state that would have been assigned in bfq_bfqq_save_state()
3081 bfqq_data->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff; in bfq_bfqq_save_state()
3082 bfqq_data->saved_wr_start_at_switch_to_srt = in bfq_bfqq_save_state()
3084 bfqq_data->saved_wr_cur_max_time = in bfq_bfqq_save_state()
3085 bfq_wr_duration(bfqq->bfqd); in bfq_bfqq_save_state()
3086 bfqq_data->saved_last_wr_start_finish = jiffies; in bfq_bfqq_save_state()
3088 bfqq_data->saved_wr_coeff = bfqq->wr_coeff; in bfq_bfqq_save_state()
3089 bfqq_data->saved_wr_start_at_switch_to_srt = in bfq_bfqq_save_state()
3090 bfqq->wr_start_at_switch_to_srt; in bfq_bfqq_save_state()
3091 bfqq_data->saved_service_from_wr = in bfq_bfqq_save_state()
3092 bfqq->service_from_wr; in bfq_bfqq_save_state()
3093 bfqq_data->saved_last_wr_start_finish = in bfq_bfqq_save_state()
3094 bfqq->last_wr_start_finish; in bfq_bfqq_save_state()
3095 bfqq_data->saved_wr_cur_max_time = bfqq->wr_cur_max_time; in bfq_bfqq_save_state()
3103 if (cur_bfqq->entity.parent && in bfq_reassign_last_bfqq()
3104 cur_bfqq->entity.parent->last_bfqq_created == cur_bfqq) in bfq_reassign_last_bfqq()
3105 cur_bfqq->entity.parent->last_bfqq_created = new_bfqq; in bfq_reassign_last_bfqq()
3106 else if (cur_bfqq->bfqd && cur_bfqq->bfqd->last_bfqq_created == cur_bfqq) in bfq_reassign_last_bfqq()
3107 cur_bfqq->bfqd->last_bfqq_created = new_bfqq; in bfq_reassign_last_bfqq()
3123 if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) && in bfq_release_process_ref()
3124 bfqq != bfqd->in_service_queue) in bfq_release_process_ref()
3136 struct bfq_queue *new_bfqq = bfqq->new_bfqq; in bfq_merge_bfqqs()
3139 (unsigned long)new_bfqq->pid); in bfq_merge_bfqqs()
3154 if (bfqq->waker_bfqq && !new_bfqq->waker_bfqq && in bfq_merge_bfqqs()
3155 bfqq->waker_bfqq != new_bfqq) { in bfq_merge_bfqqs()
3156 new_bfqq->waker_bfqq = bfqq->waker_bfqq; in bfq_merge_bfqqs()
3157 new_bfqq->tentative_waker_bfqq = NULL; in bfq_merge_bfqqs()
3161 * new_bfqq->waker_bfqq must be reset. So insert in bfq_merge_bfqqs()
3165 hlist_add_head(&new_bfqq->woken_list_node, in bfq_merge_bfqqs()
3166 &new_bfqq->waker_bfqq->woken_list); in bfq_merge_bfqqs()
3171 * If bfqq is weight-raised, then let new_bfqq inherit in bfq_merge_bfqqs()
3172 * weight-raising. To reduce false positives, neglect the case in bfq_merge_bfqqs()
3174 * to be weight-raised (which may happen because EQM may merge in bfq_merge_bfqqs()
3179 if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) { in bfq_merge_bfqqs()
3180 new_bfqq->wr_coeff = bfqq->wr_coeff; in bfq_merge_bfqqs()
3181 new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time; in bfq_merge_bfqqs()
3182 new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish; in bfq_merge_bfqqs()
3183 new_bfqq->wr_start_at_switch_to_srt = in bfq_merge_bfqqs()
3184 bfqq->wr_start_at_switch_to_srt; in bfq_merge_bfqqs()
3186 bfqd->wr_busy_queues++; in bfq_merge_bfqqs()
3187 new_bfqq->entity.prio_changed = 1; in bfq_merge_bfqqs()
3190 if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */ in bfq_merge_bfqqs()
3191 bfqq->wr_coeff = 1; in bfq_merge_bfqqs()
3192 bfqq->entity.prio_changed = 1; in bfq_merge_bfqqs()
3194 bfqd->wr_busy_queues--; in bfq_merge_bfqqs()
3198 bfqd->wr_busy_queues); in bfq_merge_bfqqs()
3203 bic_set_bfqq(bic, new_bfqq, true, bfqq->actuator_idx); in bfq_merge_bfqqs()
3207 * set new_bfqq->bic to NULL. bfqq either: in bfq_merge_bfqqs()
3208 * - does not belong to any bic any more, and hence bfqq->bic must in bfq_merge_bfqqs()
3210 * - is a queue whose owning bics have already been redirected to a in bfq_merge_bfqqs()
3212 * any bic soon and bfqq->bic is already NULL (therefore the next in bfq_merge_bfqqs()
3215 new_bfqq->bic = NULL; in bfq_merge_bfqqs()
3222 * We mark such a queue with a pid -1, and then print SHARED instead of in bfq_merge_bfqqs()
3225 new_bfqq->pid = -1; in bfq_merge_bfqqs()
3226 bfqq->bic = NULL; in bfq_merge_bfqqs()
3238 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_allow_bio_merge()
3239 bool is_sync = op_is_sync(bio->bi_opf); in bfq_allow_bio_merge()
3240 struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq; in bfq_allow_bio_merge()
3259 new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false, bfqd->bio_bic); in bfq_allow_bio_merge()
3269 bfqq = bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq); in bfq_allow_bio_merge()
3272 * Change also bqfd->bio_bfqq, as in bfq_allow_bio_merge()
3273 * bfqd->bio_bic now points to new_bfqq, and in bfq_allow_bio_merge()
3275 * use again bqfd->bio_bfqq). in bfq_allow_bio_merge()
3277 bfqd->bio_bfqq = bfqq; in bfq_allow_bio_merge()
3284 * Set the maximum time for the in-service queue to consume its
3285 * budget. This prevents seeky processes from lowering the throughput.
3286 * In practice, a time-slice service scheme is used with seeky
3294 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) in bfq_set_budget_timeout()
3297 timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; in bfq_set_budget_timeout()
3299 bfqd->last_budget_start = blk_time_get(); in bfq_set_budget_timeout()
3301 bfqq->budget_timeout = jiffies + in bfq_set_budget_timeout()
3302 bfqd->bfq_timeout * timeout_coeff; in bfq_set_budget_timeout()
3311 bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8; in __bfq_set_in_service_queue()
3313 if (time_is_before_jiffies(bfqq->last_wr_start_finish) && in __bfq_set_in_service_queue()
3314 bfqq->wr_coeff > 1 && in __bfq_set_in_service_queue()
3315 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && in __bfq_set_in_service_queue()
3316 time_is_before_jiffies(bfqq->budget_timeout)) { in __bfq_set_in_service_queue()
3318 * For soft real-time queues, move the start in __bfq_set_in_service_queue()
3319 * of the weight-raising period forward by the in __bfq_set_in_service_queue()
3323 * weight-raising period of the queue to end, in __bfq_set_in_service_queue()
3325 * weight-raising period of a soft real-time in __bfq_set_in_service_queue()
3328 * because soft real-time queues are not in __bfq_set_in_service_queue()
3341 if (time_after(bfqq->budget_timeout, in __bfq_set_in_service_queue()
3342 bfqq->last_wr_start_finish)) in __bfq_set_in_service_queue()
3343 bfqq->last_wr_start_finish += in __bfq_set_in_service_queue()
3344 jiffies - bfqq->budget_timeout; in __bfq_set_in_service_queue()
3346 bfqq->last_wr_start_finish = jiffies; in __bfq_set_in_service_queue()
3351 "set_in_service_queue, cur-budget = %d", in __bfq_set_in_service_queue()
3352 bfqq->entity.budget); in __bfq_set_in_service_queue()
3355 bfqd->in_service_queue = bfqq; in __bfq_set_in_service_queue()
3356 bfqd->in_serv_last_pos = 0; in __bfq_set_in_service_queue()
3372 struct bfq_queue *bfqq = bfqd->in_service_queue; in bfq_arm_slice_timer()
3379 * fair distribution of slice time for a process doing back-to-back in bfq_arm_slice_timer()
3382 sl = bfqd->bfq_slice_idle; in bfq_arm_slice_timer()
3384 * Unless the queue is being weight-raised or the scenario is in bfq_arm_slice_timer()
3386 * is seeky. A long idling is preserved for a weight-raised in bfq_arm_slice_timer()
3389 * its reserved share of the throughput (in particular, it is in bfq_arm_slice_timer()
3393 if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && in bfq_arm_slice_timer()
3396 else if (bfqq->wr_coeff > 1) in bfq_arm_slice_timer()
3399 bfqd->last_idling_start = blk_time_get(); in bfq_arm_slice_timer()
3400 bfqd->last_idling_start_jiffies = jiffies; in bfq_arm_slice_timer()
3402 hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), in bfq_arm_slice_timer()
3411 * budget, even if the in-service queue is served at peak rate. And
3412 * this maximises throughput with sequential workloads.
3416 return (u64)bfqd->peak_rate * USEC_PER_MSEC * in bfq_calc_max_budget()
3417 jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT; in bfq_calc_max_budget()
3421 * Update parameters related to throughput and responsiveness, as a
3427 if (bfqd->bfq_user_max_budget == 0) { in update_thr_responsiveness_params()
3428 bfqd->bfq_max_budget = in update_thr_responsiveness_params()
3430 bfq_log(bfqd, "new max_budget = %d", bfqd->bfq_max_budget); in update_thr_responsiveness_params()
3438 bfqd->last_dispatch = bfqd->first_dispatch = blk_time_get_ns(); in bfq_reset_rate_computation()
3439 bfqd->peak_rate_samples = 1; in bfq_reset_rate_computation()
3440 bfqd->sequential_samples = 0; in bfq_reset_rate_computation()
3441 bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = in bfq_reset_rate_computation()
3444 bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ in bfq_reset_rate_computation()
3448 bfqd->peak_rate_samples, bfqd->sequential_samples, in bfq_reset_rate_computation()
3449 bfqd->tot_sectors_dispatched); in bfq_reset_rate_computation()
3464 if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || in bfq_update_rate_reset()
3465 bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) in bfq_update_rate_reset()
3474 bfqd->delta_from_first = in bfq_update_rate_reset()
3475 max_t(u64, bfqd->delta_from_first, in bfq_update_rate_reset()
3476 bfqd->last_completion - bfqd->first_dispatch); in bfq_update_rate_reset()
3482 rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT, in bfq_update_rate_reset()
3483 div_u64(bfqd->delta_from_first, NSEC_PER_USEC)); in bfq_update_rate_reset()
3487 * - the percentage of sequential dispatches is below 3/4 of the in bfq_update_rate_reset()
3489 * - rate is unreasonably high (> 20M sectors/sec) in bfq_update_rate_reset()
3491 if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 && in bfq_update_rate_reset()
3492 rate <= bfqd->peak_rate) || in bfq_update_rate_reset()
3498 * we use a low-pass filter. We compute the smoothing constant in bfq_update_rate_reset()
3514 * cannot reach 9, because bfqd->sequential_samples cannot in bfq_update_rate_reset()
3515 * become equal to bfqd->peak_rate_samples, which, in its in bfq_update_rate_reset()
3516 * turn, holds true because bfqd->sequential_samples is not in bfq_update_rate_reset()
3519 weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples; in bfq_update_rate_reset()
3526 div_u64(weight * bfqd->delta_from_first, in bfq_update_rate_reset()
3533 divisor = 10 - weight; in bfq_update_rate_reset()
3538 * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor in bfq_update_rate_reset()
3540 bfqd->peak_rate *= divisor-1; in bfq_update_rate_reset()
3541 bfqd->peak_rate /= divisor; in bfq_update_rate_reset()
3544 bfqd->peak_rate += rate; in bfq_update_rate_reset()
3547 * For a very slow device, bfqd->peak_rate can reach 0 (see in bfq_update_rate_reset()
3550 * divisions by zero where bfqd->peak_rate is used as a in bfq_update_rate_reset()
3553 bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate); in bfq_update_rate_reset()
3563 * auto-tuning, see update_thr_responsiveness_params()).
3578 * unknown, namely in-device request service rate.
3597 if (bfqd->peak_rate_samples == 0) { /* first dispatch */ in bfq_update_peak_rate()
3599 bfqd->peak_rate_samples); in bfq_update_peak_rate()
3607 * for computing a new peak rate (similarly to the late- in bfq_update_peak_rate()
3611 * - close the observation interval at the last (previous) in bfq_update_peak_rate()
3613 * - compute rate, if possible, for that observation interval in bfq_update_peak_rate()
3614 * - start a new observation interval with this dispatch in bfq_update_peak_rate()
3616 if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && in bfq_update_peak_rate()
3617 bfqd->tot_rq_in_driver == 0) in bfq_update_peak_rate()
3621 bfqd->peak_rate_samples++; in bfq_update_peak_rate()
3623 if ((bfqd->tot_rq_in_driver > 0 || in bfq_update_peak_rate()
3624 now_ns - bfqd->last_completion < BFQ_MIN_TT) in bfq_update_peak_rate()
3625 && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq)) in bfq_update_peak_rate()
3626 bfqd->sequential_samples++; in bfq_update_peak_rate()
3628 bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); in bfq_update_peak_rate()
3631 if (likely(bfqd->peak_rate_samples % 32)) in bfq_update_peak_rate()
3632 bfqd->last_rq_max_size = in bfq_update_peak_rate()
3633 max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); in bfq_update_peak_rate()
3635 bfqd->last_rq_max_size = blk_rq_sectors(rq); in bfq_update_peak_rate()
3637 bfqd->delta_from_first = now_ns - bfqd->first_dispatch; in bfq_update_peak_rate()
3640 if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL) in bfq_update_peak_rate()
3646 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); in bfq_update_peak_rate()
3647 if (RQ_BFQQ(rq) == bfqd->in_service_queue) in bfq_update_peak_rate()
3648 bfqd->in_serv_last_pos = bfqd->last_position; in bfq_update_peak_rate()
3649 bfqd->last_dispatch = now_ns; in bfq_update_peak_rate()
3665 * dispatch occur for a non in-service bfqq, this anticipated in bfq_dispatch_remove()
3666 * increment prevents two counters related to bfqq->dispatched in bfq_dispatch_remove()
3668 * incremented again when the (new) value of bfqq->dispatched in bfq_dispatch_remove()
3671 bfqq->dispatched++; in bfq_dispatch_remove()
3672 bfq_update_peak_rate(q->elevator->elevator_data, rq); in bfq_dispatch_remove()
3679 * throughput concerns, but to preserve the throughput share of
3690 * the service order of the internally-queued requests, does
3691 * determine also the actual throughput distribution among
3693 * concern about per-process throughput distribution, and
3694 * makes its decisions only on a per-request basis. Therefore,
3696 * scheduler is likely to coincide with the desired throughput
3699 * (i-a) each of these processes must get the same throughput as
3701 * (i-b) in case (i-a) does not hold, it holds that the process
3703 * throughput than any of the other processes;
3707 * (from I/O-bound to sporadic), and so on;
3712 * same throughput. This is exactly the desired throughput
3713 * distribution if (i-a) holds, or, if (i-b) holds instead, this is an
3718 * idling (I/O-dispatch plugging) is certainly needed to guarantee
3719 * that bfqq receives its assigned fraction of the device throughput
3722 * The problem is that idling may significantly reduce throughput with
3726 * throughput, it is important to check conditions (i-a), i(-b) and
3732 * very difficult to check conditions (i-a) and (i-b) too. In fact,
3733 * if there are active groups, then, for conditions (i-a) or (i-b) to
3735 * contains more active processes or sub-groups than some other active
3736 * group. More precisely, for conditions (i-a) or (i-b) to become
3742 * share of the throughput even after being dispatched. In this
3744 * inactive while still having in-flight requests, and if, when this
3747 * guaranteed its fair share of the throughput (basically because
3750 * bi-modal behavior, implemented in the function
3757 * conditions (i-a), (i-b) or (ii). So the device is idled for bfqq.
3763 * for completion, then only conditions (i-a) and (i-b) are actually
3764 * controlled, i.e., provided that conditions (i-a) or (i-b) holds,
3766 * holds. In other words, only if conditions (i-a) and (i-b) do not
3770 * control conditions (i-a) and (i-b) it is enough to check just
3775 * risk of getting less throughput than its fair share.
3779 * throughput. This mechanism and its benefits are explained
3783 * can still preempt the new in-service queue if the next
3787 * combined with the hole-recovery heuristic described in the
3794 * minimum of mid-term fairness.
3796 * More precisely, this preemption-based, idleless approach
3810 * 1024/8 times as high as the service received by the other
3816 * part) without minimally sacrificing throughput. And, if
3818 * this device is probably a high throughput.
3820 * We are now left only with explaining the two sub-conditions in the
3823 * sub-condition, we need to add that the function
3825 * non-weight-raised queues, for efficiency reasons (see comments on
3826 * bfq_weights_tree_add()). Then the fact that bfqq is weight-raised
3829 * weight-raised, the scenario is still symmetric if all queues with
3831 * weight-raised. Actually, we should be even more precise here, and
3832 * differentiate between interactive weight raising and soft real-time
3835 * The second sub-condition checked in the compound condition is
3836 * whether there is a fair amount of already in-flight I/O not
3838 * following reason. The drive may decide to serve in-flight
3839 * non-bfqq's I/O requests before bfqq's ones, thereby delaying the
3841 * I/O-dispatching is not plugged, then, while bfqq remains empty, a
3849 * in-flight I/O, and enables bfqq to recover the bandwidth it may
3853 * device-idling countermeasures may however fail in the following
3854 * unlucky scenario: if I/O-dispatch plugging is (correctly) disabled
3855 * in a time period during which all symmetry sub-conditions hold, and
3857 * some later point in time some sub-condition stops to hold, then it
3860 * served. The last sub-condition commented above somewhat mitigates
3861 * this problem for weight-raised queues.
3887 return (bfqq->wr_coeff > 1 && in idling_needed_for_service_guarantees()
3888 (bfqd->wr_busy_queues < tot_busy_queues || in idling_needed_for_service_guarantees()
3889 bfqd->tot_rq_in_driver >= bfqq->dispatched + 4)) || in idling_needed_for_service_guarantees()
3914 * not re-scheduled. To prevent this from happening, re-queue in __bfq_bfqq_expire()
3915 * bfqq if it needs I/O-dispatch plugging, even if it is in __bfq_bfqq_expire()
3919 if (RB_EMPTY_ROOT(&bfqq->sort_list) && in __bfq_bfqq_expire()
3922 if (bfqq->dispatched == 0) in __bfq_bfqq_expire()
3927 * the weight-raising mechanism. in __bfq_bfqq_expire()
3929 bfqq->budget_timeout = jiffies; in __bfq_bfqq_expire()
3938 if (unlikely(!bfqd->nonrot_with_queueing && in __bfq_bfqq_expire()
3939 !RB_EMPTY_ROOT(&bfqq->sort_list))) in __bfq_bfqq_expire()
3944 * All in-service entities must have been properly deactivated in __bfq_bfqq_expire()
3946 * resets all in-service entities as no more in service. This in __bfq_bfqq_expire()
3954 * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
3971 if (bfqq->wr_coeff == 1) in __bfq_bfqq_recalc_budget()
3972 budget = bfqq->max_budget; in __bfq_bfqq_recalc_budget()
3974 * Use a constant, low budget for weight-raised queues, in __bfq_bfqq_recalc_budget()
3982 bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); in __bfq_bfqq_recalc_budget()
3986 bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); in __bfq_bfqq_recalc_budget()
3988 if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) { in __bfq_bfqq_recalc_budget()
3992 * for throughput. in __bfq_bfqq_recalc_budget()
4016 * the throughput, as discussed in the in __bfq_bfqq_recalc_budget()
4019 if (bfqq->dispatched > 0) /* still outstanding reqs */ in __bfq_bfqq_recalc_budget()
4020 budget = min(budget * 2, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
4023 budget -= 4 * min_budget; in __bfq_bfqq_recalc_budget()
4031 * the chance to boost the throughput if this in __bfq_bfqq_recalc_budget()
4035 budget = min(budget * 2, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
4045 * candidate to boost the disk throughput. in __bfq_bfqq_recalc_budget()
4047 budget = min(budget * 4, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
4061 * back-shifting. The larger the budget of the in __bfq_bfqq_recalc_budget()
4070 * many re-activations a lower finish time in __bfq_bfqq_recalc_budget()
4074 * quite precisely by bfqq->entity.service. in __bfq_bfqq_recalc_budget()
4076 * bfqq->entity.service is equal to the number in __bfq_bfqq_recalc_budget()
4082 budget = max_t(int, bfqq->entity.service, min_budget); in __bfq_bfqq_recalc_budget()
4094 budget = bfqd->bfq_max_budget; in __bfq_bfqq_recalc_budget()
4097 bfqq->max_budget = budget; in __bfq_bfqq_recalc_budget()
4099 if (bfqd->budgets_assigned >= bfq_stats_min_budgets && in __bfq_bfqq_recalc_budget()
4100 !bfqd->bfq_user_max_budget) in __bfq_bfqq_recalc_budget()
4101 bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); in __bfq_bfqq_recalc_budget()
4113 next_rq = bfqq->next_rq; in __bfq_bfqq_recalc_budget()
4115 bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget, in __bfq_bfqq_recalc_budget()
4120 bfqq->entity.budget); in __bfq_bfqq_recalc_budget()
4127 * their chances to lower the throughput. More details in the comments
4143 * service slots. On the opposite end, the requests of the in-service
4165 delta_ktime = bfqd->last_idling_start; in bfq_bfqq_is_slow()
4168 delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); in bfq_bfqq_is_slow()
4173 if (blk_queue_nonrot(bfqd->queue)) in bfq_bfqq_is_slow()
4175 * give same worst-case guarantees as idling in bfq_bfqq_is_slow()
4202 slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; in bfq_bfqq_is_slow()
4211 * To be deemed as soft real-time, an application must meet two
4214 * record a compressed high-definition video.
4216 * batch, to compute the next-start time instant, soft_rt_next_start, such
4230 * Unfortunately, even a greedy (i.e., I/O-bound) application may
4233 * the following circumstances. First, if the CPU load is high. The
4237 * device: the storage device is highly loaded or reaches a low-enough
4238 * throughput with the I/O of the application (e.g., because the I/O
4242 * that greedy applications are deemed as soft real-time in these
4249 * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
4251 * jiffies; we get back to it after next item (b). Lower-bounding
4253 * bfqd->bfq_slice_idle tends to filter out greedy applications,
4256 * real-time application spends some time processing data, after a
4259 * (b) Current value of bfqq->soft_rt_next_start. As pointed out
4262 * storage-device load. In more detail, in these scenarios, these
4265 * including the filtering in above item (a). These slow-speed
4267 * intervals during which these applications do I/O at a very high
4268 * speed. Fortunately, exactly because of the high speed of the
4269 * I/O in the high-speed intervals, the values returned by this
4270 * function happen to be so high, near the end of any such
4271 * high-speed interval, to be likely to fall *after* the end of
4272 * the low-speed time interval that follows. These high values are
4273 * stored in bfqq->soft_rt_next_start after each invocation of
4275 * bfqq->soft_rt_next_start is constantly used to lower-bound the
4277 * beginning of a low-speed interval, bfqq->soft_rt_next_start is
4278 * likely to be constantly kept so high that any I/O request
4279 * issued during the low-speed interval is considered as arriving
4281 * real-time. Then, in the high-speed interval that follows, the
4282 * application will not be deemed as soft real-time, just because
4283 * it will do I/O at a high speed. And so on.
4288 * bfqd->bfq_slice_idle:
4290 * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
4292 * that the approximation, in jiffies, of bfqd->bfq_slice_idle
4298 * reference time interval just bfqd->bfq_slice_idle, but
4299 * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
4306 return max3(bfqq->soft_rt_next_start, in bfq_bfqq_softrt_next_start()
4307 bfqq->last_idle_bklogged + in bfq_bfqq_softrt_next_start()
4308 HZ * bfqq->service_from_backlogged / in bfq_bfqq_softrt_next_start()
4309 bfqd->bfq_wr_max_softrt_rate, in bfq_bfqq_softrt_next_start()
4310 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); in bfq_bfqq_softrt_next_start()
4314 * bfq_bfqq_expire - expire a queue.
4329 * tends to lower the throughput). In addition, this time-charging
4346 struct bfq_entity *entity = &bfqq->entity; in bfq_bfqq_expire()
4355 * timed-out queues with the time and not the service in bfq_bfqq_expire()
4366 * or quasi-sequential processes. in bfq_bfqq_expire()
4368 if (bfqq->wr_coeff == 1 && in bfq_bfqq_expire()
4371 bfq_bfqq_budget_left(bfqq) >= entity->budget / 3))) in bfq_bfqq_expire()
4374 if (bfqd->low_latency && bfqq->wr_coeff == 1) in bfq_bfqq_expire()
4375 bfqq->last_wr_start_finish = jiffies; in bfq_bfqq_expire()
4377 if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && in bfq_bfqq_expire()
4378 RB_EMPTY_ROOT(&bfqq->sort_list)) { in bfq_bfqq_expire()
4391 if (bfqq->dispatched == 0) in bfq_bfqq_expire()
4392 bfqq->soft_rt_next_start = in bfq_bfqq_expire()
4394 else if (bfqq->dispatched > 0) { in bfq_bfqq_expire()
4405 slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq)); in bfq_bfqq_expire()
4412 bfqd->rqs_injected = bfqd->wait_dispatch = false; in bfq_bfqq_expire()
4413 bfqd->waited_rq = NULL; in bfq_bfqq_expire()
4435 entity->service = 0; in bfq_bfqq_expire()
4438 * Reset the received-service counter for every parent entity. in bfq_bfqq_expire()
4439 * Differently from what happens with bfqq->entity.service, in bfq_bfqq_expire()
4443 * consumed budget, bfqq->entity.service needs to be kept, in bfq_bfqq_expire()
4445 * the same budget, the last value of bfqq->entity.service is in bfq_bfqq_expire()
4446 * needed to properly decrement bfqq->entity.budget by the in bfq_bfqq_expire()
4448 * to keep entity->service for parent entities too, because in bfq_bfqq_expire()
4449 * the bubble up of the new value of bfqq->entity.budget will in bfq_bfqq_expire()
4454 entity = entity->parent; in bfq_bfqq_expire()
4456 entity->service = 0; in bfq_bfqq_expire()
4466 return time_is_before_eq_jiffies(bfqq->budget_timeout); in bfq_bfqq_budget_timeout()
4475 * only to be kicked off for preserving a high throughput.
4479 bfq_log_bfqq(bfqq->bfqd, bfqq, in bfq_may_expire_for_budg_timeout()
4482 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3, in bfq_may_expire_for_budg_timeout()
4486 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3) in bfq_may_expire_for_budg_timeout()
4495 !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, in idling_boosts_thr_without_issues()
4508 * boosts the throughput. in idling_boosts_thr_without_issues()
4511 * idling is virtually always beneficial for the throughput if: in idling_boosts_thr_without_issues()
4512 * (a) the device is not NCQ-capable and rotational, or in idling_boosts_thr_without_issues()
4514 * the request pattern for bfqq is I/O-bound and sequential, or in idling_boosts_thr_without_issues()
4516 * not NCQ-capable and the request pattern for bfqq is in idling_boosts_thr_without_issues()
4517 * I/O-bound and sequential. in idling_boosts_thr_without_issues()
4520 * NCQ-capable flash-based device would not boost the in idling_boosts_thr_without_issues()
4521 * throughput even with sequential I/O; rather it would lower in idling_boosts_thr_without_issues()
4522 * the throughput in proportion to how fast the device in idling_boosts_thr_without_issues()
4525 * particular, happens to be false if bfqd is an NCQ-capable in idling_boosts_thr_without_issues()
4526 * flash-based device. in idling_boosts_thr_without_issues()
4529 ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && in idling_boosts_thr_without_issues()
4536 * weight-raised queues. in idling_boosts_thr_without_issues()
4540 * non-weight-raised queues ask for requests at a lower rate, in idling_boosts_thr_without_issues()
4541 * then processes associated with weight-raised queues have a in idling_boosts_thr_without_issues()
4545 * of the device throughput proportional to their high in idling_boosts_thr_without_issues()
4546 * weight. This is especially true with NCQ-capable drives, in idling_boosts_thr_without_issues()
4548 * reorder internally-queued requests. in idling_boosts_thr_without_issues()
4551 * there are weight-raised busy queues. In this case, and if in idling_boosts_thr_without_issues()
4552 * bfqq is not weight-raised, this guarantees that the device in idling_boosts_thr_without_issues()
4553 * is not idled for bfqq (if, instead, bfqq is weight-raised, in idling_boosts_thr_without_issues()
4557 * sync non-weight-raised queue, to get a lower number of in idling_boosts_thr_without_issues()
4560 * weight-raised queues get served again. This often mitigates in idling_boosts_thr_without_issues()
4567 bfqd->wr_busy_queues == 0; in idling_boosts_thr_without_issues()
4573 * device idling plays a critical role for both throughput boosting
4578 * beneficial for throughput or, even if detrimental for throughput,
4580 * latency, desired throughput distribution, ...). In particular, on
4581 * NCQ-capable devices, this function tries to return false, so as to
4583 * device boost the throughput without causing any service-guarantee
4593 struct bfq_data *bfqd = bfqq->bfqd; in bfq_better_to_idle()
4600 if (unlikely(bfqd->strict_guarantees)) in bfq_better_to_idle()
4609 * queues in this class can steal to higher-priority queues in bfq_better_to_idle()
4611 if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || in bfq_better_to_idle()
4624 * either boosts the throughput (without issues), or is in bfq_better_to_idle()
4632 * If the in-service queue is empty but the function bfq_better_to_idle
4638 * why performing device idling is the best choice to boost the throughput
4644 return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq); in bfq_bfqq_must_idle()
4657 struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue; in bfq_choose_bfqq_for_injection()
4658 unsigned int limit = in_serv_bfqq->inject_limit; in bfq_choose_bfqq_for_injection()
4663 * - bfqq is not weight-raised and therefore does not carry in bfq_choose_bfqq_for_injection()
4664 * time-critical I/O, in bfq_choose_bfqq_for_injection()
4666 * - regardless of whether bfqq is weight-raised, bfqq has in bfq_choose_bfqq_for_injection()
4673 bool in_serv_always_inject = in_serv_bfqq->wr_coeff == 1 || in bfq_choose_bfqq_for_injection()
4678 * - the baseline total service time could not be sampled yet, in bfq_choose_bfqq_for_injection()
4680 * - a lot of time has elapsed since the plugging of I/O in bfq_choose_bfqq_for_injection()
4685 if (limit == 0 && in_serv_bfqq->last_serv_time_ns == 0 && in bfq_choose_bfqq_for_injection()
4687 time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies + in bfq_choose_bfqq_for_injection()
4688 bfqd->bfq_slice_idle) in bfq_choose_bfqq_for_injection()
4692 if (bfqd->tot_rq_in_driver >= limit) in bfq_choose_bfqq_for_injection()
4697 * a high probability, very few steps are needed to find a in bfq_choose_bfqq_for_injection()
4700 * - BFQ dynamically updates the budget of every queue so as in bfq_choose_bfqq_for_injection()
4702 * - if a queue gets all its requests dispatched as injected in bfq_choose_bfqq_for_injection()
4704 * (and re-added only if it gets new requests, but then it in bfq_choose_bfqq_for_injection()
4707 for (i = 0; i < bfqd->num_actuators; i++) { in bfq_choose_bfqq_for_injection()
4708 list_for_each_entry(bfqq, &bfqd->active_list[i], bfqq_list) in bfq_choose_bfqq_for_injection()
4709 if (!RB_EMPTY_ROOT(&bfqq->sort_list) && in bfq_choose_bfqq_for_injection()
4710 (in_serv_always_inject || bfqq->wr_coeff > 1) && in bfq_choose_bfqq_for_injection()
4711 bfq_serv_to_charge(bfqq->next_rq, bfqq) <= in bfq_choose_bfqq_for_injection()
4714 * Allow for only one large in-flight request in bfq_choose_bfqq_for_injection()
4715 * on non-rotational devices, for the in bfq_choose_bfqq_for_injection()
4716 * following reason. On non-rotationl drives, in bfq_choose_bfqq_for_injection()
4723 * request of the in-service queue wait for so in bfq_choose_bfqq_for_injection()
4726 * drive reach a very high throughput, even if in bfq_choose_bfqq_for_injection()
4727 * there is only one in-flight large request in bfq_choose_bfqq_for_injection()
4730 if (blk_queue_nonrot(bfqd->queue) && in bfq_choose_bfqq_for_injection()
4731 blk_rq_sectors(bfqq->next_rq) >= in bfq_choose_bfqq_for_injection()
4733 bfqd->tot_rq_in_driver >= 1) in bfq_choose_bfqq_for_injection()
4736 bfqd->rqs_injected = true; in bfq_choose_bfqq_for_injection()
4750 if (bfqd->in_service_queue && in bfq_find_active_bfqq_for_actuator()
4751 bfqd->in_service_queue->actuator_idx == idx) in bfq_find_active_bfqq_for_actuator()
4752 return bfqd->in_service_queue; in bfq_find_active_bfqq_for_actuator()
4754 list_for_each_entry(bfqq, &bfqd->active_list[idx], bfqq_list) { in bfq_find_active_bfqq_for_actuator()
4755 if (!RB_EMPTY_ROOT(&bfqq->sort_list) && in bfq_find_active_bfqq_for_actuator()
4756 bfq_serv_to_charge(bfqq->next_rq, bfqq) <= in bfq_find_active_bfqq_for_actuator()
4783 for (i = 0 ; i < bfqd->num_actuators; i++) { in bfq_find_bfqq_for_underused_actuator()
4784 if (bfqd->rq_in_driver[i] < bfqd->actuator_load_threshold && in bfq_find_bfqq_for_underused_actuator()
4785 (i == bfqd->num_actuators - 1 || in bfq_find_bfqq_for_underused_actuator()
4786 bfqd->rq_in_driver[i] < bfqd->rq_in_driver[i+1])) { in bfq_find_bfqq_for_underused_actuator()
4809 bfqq = bfqd->in_service_queue; in bfq_select_queue()
4813 bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); in bfq_select_queue()
4828 * If some actuator is underutilized, but the in-service in bfq_select_queue()
4838 * happens, it is much more convenient to re-execute this loop in bfq_select_queue()
4842 next_rq = bfqq->next_rq; in bfq_select_queue()
4875 * provide a reasonable throughput. in bfq_select_queue()
4879 hrtimer_try_to_cancel(&bfqd->idle_slice_timer); in bfq_select_queue()
4886 * No requests pending. However, if the in-service queue is idling in bfq_select_queue()
4891 * throughput and is possible. in bfq_select_queue()
4894 (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) { in bfq_select_queue()
4895 unsigned int act_idx = bfqq->actuator_idx; in bfq_select_queue()
4898 !hlist_empty(&bfqq->woken_list) ? in bfq_select_queue()
4899 container_of(bfqq->woken_list.first, in bfq_select_queue()
4904 if (bfqq->bic && bfqq->bic->bfqq[0][act_idx] && in bfq_select_queue()
4905 bfq_bfqq_busy(bfqq->bic->bfqq[0][act_idx]) && in bfq_select_queue()
4906 bfqq->bic->bfqq[0][act_idx]->next_rq) in bfq_select_queue()
4907 async_bfqq = bfqq->bic->bfqq[0][act_idx]; in bfq_select_queue()
4909 * The next four mutually-exclusive ifs decide in bfq_select_queue()
4921 * non-empty waker queue for bfqq, i.e., a queue whose in bfq_select_queue()
4932 * throughput. The best action to take is therefore to in bfq_select_queue()
4952 * bfqq delivers more throughput when served without in bfq_select_queue()
4955 * count more than overall throughput, and may be in bfq_select_queue()
4972 * I/O-plugging timeout fires. So one may deem the in bfq_select_queue()
4976 * reasons. First, throughput may be low because the in bfq_select_queue()
4989 icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic && in bfq_select_queue()
4990 bfq_serv_to_charge(async_bfqq->next_rq, async_bfqq) <= in bfq_select_queue()
4993 else if (bfqq->waker_bfqq && in bfq_select_queue()
4994 bfq_bfqq_busy(bfqq->waker_bfqq) && in bfq_select_queue()
4995 bfqq->waker_bfqq->next_rq && in bfq_select_queue()
4996 bfq_serv_to_charge(bfqq->waker_bfqq->next_rq, in bfq_select_queue()
4997 bfqq->waker_bfqq) <= in bfq_select_queue()
4998 bfq_bfqq_budget_left(bfqq->waker_bfqq) in bfq_select_queue()
5000 bfqq = bfqq->waker_bfqq; in bfq_select_queue()
5003 blocked_bfqq->next_rq && in bfq_select_queue()
5004 bfq_serv_to_charge(blocked_bfqq->next_rq, in bfq_select_queue()
5010 (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 || in bfq_select_queue()
5039 struct bfq_entity *entity = &bfqq->entity; in bfq_update_wr_data()
5041 if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ in bfq_update_wr_data()
5044 jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), in bfq_update_wr_data()
5045 jiffies_to_msecs(bfqq->wr_cur_max_time), in bfq_update_wr_data()
5046 bfqq->wr_coeff, in bfq_update_wr_data()
5047 bfqq->entity.weight, bfqq->entity.orig_weight); in bfq_update_wr_data()
5049 if (entity->prio_changed) in bfq_update_wr_data()
5055 * weight-raising period, then end weight raising. in bfq_update_wr_data()
5059 else if (time_is_before_jiffies(bfqq->last_wr_start_finish + in bfq_update_wr_data()
5060 bfqq->wr_cur_max_time)) { in bfq_update_wr_data()
5061 if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time || in bfq_update_wr_data()
5062 time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt + in bfq_update_wr_data()
5068 * interactive-weight-raising period in bfq_update_wr_data()
5079 bfqq->entity.prio_changed = 1; in bfq_update_wr_data()
5082 if (bfqq->wr_coeff > 1 && in bfq_update_wr_data()
5083 bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time && in bfq_update_wr_data()
5084 bfqq->service_from_wr > max_service_from_wr) { in bfq_update_wr_data()
5097 if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1)) in bfq_update_wr_data()
5108 struct request *rq = bfqq->next_rq; in bfq_dispatch_rq_from_bfqq()
5115 if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) { in bfq_dispatch_rq_from_bfqq()
5116 bfqd->wait_dispatch = false; in bfq_dispatch_rq_from_bfqq()
5117 bfqd->waited_rq = rq; in bfq_dispatch_rq_from_bfqq()
5120 bfq_dispatch_remove(bfqd->queue, rq); in bfq_dispatch_rq_from_bfqq()
5122 if (bfqq != bfqd->in_service_queue) in bfq_dispatch_rq_from_bfqq()
5130 * weight-raised during this service slot, even if it has in bfq_dispatch_rq_from_bfqq()
5132 * weight-raised queue. This inflates bfqq's timestamps, which in bfq_dispatch_rq_from_bfqq()
5134 * device immediately to possible other weight-raised queues. in bfq_dispatch_rq_from_bfqq()
5151 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_has_work()
5154 * Avoiding lock: a race on bfqd->queued should cause at in bfq_has_work()
5157 return !list_empty_careful(&bfqd->dispatch) || in bfq_has_work()
5158 READ_ONCE(bfqd->queued); in bfq_has_work()
5163 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in __bfq_dispatch_request()
5167 if (!list_empty(&bfqd->dispatch)) { in __bfq_dispatch_request()
5168 rq = list_first_entry(&bfqd->dispatch, struct request, in __bfq_dispatch_request()
5170 list_del_init(&rq->queuelist); in __bfq_dispatch_request()
5181 bfqq->dispatched++; in __bfq_dispatch_request()
5206 * being the frequency of non-elevator-private in __bfq_dispatch_request()
5228 * throughput. in __bfq_dispatch_request()
5230 if (bfqd->strict_guarantees && bfqd->tot_rq_in_driver > 0) in __bfq_dispatch_request()
5241 bfqd->rq_in_driver[bfqq->actuator_idx]++; in __bfq_dispatch_request()
5242 bfqd->tot_rq_in_driver++; in __bfq_dispatch_request()
5244 rq->rq_flags |= RQF_STARTED; in __bfq_dispatch_request()
5274 spin_lock_irq(&q->queue_lock); in bfq_update_dispatch_stats()
5291 bfqg_stats_update_io_remove(bfqg, rq->cmd_flags); in bfq_update_dispatch_stats()
5293 spin_unlock_irq(&q->queue_lock); in bfq_update_dispatch_stats()
5304 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_dispatch_request()
5309 spin_lock_irq(&bfqd->lock); in bfq_dispatch_request()
5311 in_serv_queue = bfqd->in_service_queue; in bfq_dispatch_request()
5315 if (in_serv_queue == bfqd->in_service_queue) { in bfq_dispatch_request()
5320 spin_unlock_irq(&bfqd->lock); in bfq_dispatch_request()
5321 bfq_update_dispatch_stats(hctx->queue, rq, in bfq_dispatch_request()
5330 * in-flight on this queue also holds a reference, dropped when rq is freed.
5341 bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref); in bfq_put_queue()
5343 bfqq->ref--; in bfq_put_queue()
5344 if (bfqq->ref) in bfq_put_queue()
5347 if (!hlist_unhashed(&bfqq->burst_list_node)) { in bfq_put_queue()
5348 hlist_del_init(&bfqq->burst_list_node); in bfq_put_queue()
5354 * bursts, when some short-lived process (often due to in bfq_put_queue()
5369 * the current burst list--without incrementing in bfq_put_queue()
5370 * bust_size--because of a split, but the current in bfq_put_queue()
5375 if (bfqq->bic && bfqq->bfqd->burst_size > 0) in bfq_put_queue()
5376 bfqq->bfqd->burst_size--; in bfq_put_queue()
5395 if (!hlist_unhashed(&bfqq->woken_list_node)) in bfq_put_queue()
5396 hlist_del_init(&bfqq->woken_list_node); in bfq_put_queue()
5399 hlist_for_each_entry_safe(item, n, &bfqq->woken_list, in bfq_put_queue()
5401 item->waker_bfqq = NULL; in bfq_put_queue()
5402 hlist_del_init(&item->woken_list_node); in bfq_put_queue()
5405 if (bfqq->bfqd->last_completed_rq_bfqq == bfqq) in bfq_put_queue()
5406 bfqq->bfqd->last_completed_rq_bfqq = NULL; in bfq_put_queue()
5408 WARN_ON_ONCE(!list_empty(&bfqq->fifo)); in bfq_put_queue()
5409 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bfqq->sort_list)); in bfq_put_queue()
5410 WARN_ON_ONCE(bfqq->dispatched); in bfq_put_queue()
5418 bfqq->stable_ref--; in bfq_put_stable_ref()
5431 __bfqq = bfqq->new_bfqq; in bfq_put_cooperator()
5433 next = __bfqq->new_bfqq; in bfq_put_cooperator()
5438 bfq_release_process_ref(bfqq->bfqd, bfqq); in bfq_put_cooperator()
5443 if (bfqq == bfqd->in_service_queue) { in bfq_exit_bfqq()
5448 bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); in bfq_exit_bfqq()
5460 bfqd = bfqq->bfqd; /* NULL if scheduler already exited */ in bfq_exit_icq_bfqq()
5470 struct bfq_iocq_bfqq_data *bfqq_data = bic->bfqq_data; in _bfq_exit_icq()
5489 * If bfqd and thus bfqd->num_actuators is not available any in bfq_exit_icq()
5490 * longer, then cycle over all possible per-actuator bfqqs in in bfq_exit_icq()
5492 * therefore on its unused per-actuator fields being NULL. in bfq_exit_icq()
5498 spin_lock_irqsave(&bfqd->lock, flags); in bfq_exit_icq()
5499 _bfq_exit_icq(bic, bfqd->num_actuators); in bfq_exit_icq()
5500 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_exit_icq()
5515 struct bfq_data *bfqd = bfqq->bfqd; in bfq_set_next_ioprio_data()
5520 ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); in bfq_set_next_ioprio_data()
5524 bdi_dev_name(bfqq->bfqd->queue->disk->bdi), in bfq_set_next_ioprio_data()
5531 bfqq->new_ioprio = task_nice_ioprio(tsk); in bfq_set_next_ioprio_data()
5532 bfqq->new_ioprio_class = task_nice_ioclass(tsk); in bfq_set_next_ioprio_data()
5535 bfqq->new_ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio); in bfq_set_next_ioprio_data()
5536 bfqq->new_ioprio_class = IOPRIO_CLASS_RT; in bfq_set_next_ioprio_data()
5539 bfqq->new_ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio); in bfq_set_next_ioprio_data()
5540 bfqq->new_ioprio_class = IOPRIO_CLASS_BE; in bfq_set_next_ioprio_data()
5543 bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE; in bfq_set_next_ioprio_data()
5544 bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1; in bfq_set_next_ioprio_data()
5548 if (bfqq->new_ioprio >= IOPRIO_NR_LEVELS) { in bfq_set_next_ioprio_data()
5550 bfqq->new_ioprio); in bfq_set_next_ioprio_data()
5551 bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1; in bfq_set_next_ioprio_data()
5554 bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio); in bfq_set_next_ioprio_data()
5556 bfqq->new_ioprio, bfqq->entity.new_weight); in bfq_set_next_ioprio_data()
5557 bfqq->entity.prio_changed = 1; in bfq_set_next_ioprio_data()
5569 int ioprio = bic->icq.ioc->ioprio; in bfq_check_ioprio_change()
5575 if (unlikely(!bfqd) || likely(bic->ioprio == ioprio)) in bfq_check_ioprio_change()
5578 bic->ioprio = ioprio; in bfq_check_ioprio_change()
5600 bfqq->actuator_idx = act_idx; in bfq_init_bfqq()
5601 RB_CLEAR_NODE(&bfqq->entity.rb_node); in bfq_init_bfqq()
5602 INIT_LIST_HEAD(&bfqq->fifo); in bfq_init_bfqq()
5603 INIT_HLIST_NODE(&bfqq->burst_list_node); in bfq_init_bfqq()
5604 INIT_HLIST_NODE(&bfqq->woken_list_node); in bfq_init_bfqq()
5605 INIT_HLIST_HEAD(&bfqq->woken_list); in bfq_init_bfqq()
5607 bfqq->ref = 0; in bfq_init_bfqq()
5608 bfqq->bfqd = bfqd; in bfq_init_bfqq()
5628 bfqq->ttime.last_end_request = now_ns + 1; in bfq_init_bfqq()
5630 bfqq->creation_time = jiffies; in bfq_init_bfqq()
5632 bfqq->io_start_time = now_ns; in bfq_init_bfqq()
5636 bfqq->pid = pid; in bfq_init_bfqq()
5639 bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; in bfq_init_bfqq()
5640 bfqq->budget_timeout = bfq_smallest_from_now(); in bfq_init_bfqq()
5642 bfqq->wr_coeff = 1; in bfq_init_bfqq()
5643 bfqq->last_wr_start_finish = jiffies; in bfq_init_bfqq()
5644 bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now(); in bfq_init_bfqq()
5645 bfqq->split_time = bfq_smallest_from_now(); in bfq_init_bfqq()
5648 * To not forget the possibly high bandwidth consumed by a in bfq_init_bfqq()
5651 * to the current value of bfqq->soft_rt_next_start (see in bfq_init_bfqq()
5656 bfqq->soft_rt_next_start = jiffies; in bfq_init_bfqq()
5659 bfqq->seek_history = 1; in bfq_init_bfqq()
5661 bfqq->decrease_time_jif = jiffies; in bfq_init_bfqq()
5670 return &bfqg->async_bfqq[0][ioprio][act_idx]; in bfq_async_queue_prio()
5675 return &bfqg->async_bfqq[1][ioprio][act_idx]; in bfq_async_queue_prio()
5677 return &bfqg->async_idle_bfqq[act_idx]; in bfq_async_queue_prio()
5688 unsigned int a_idx = last_bfqq_created->actuator_idx; in bfq_do_early_stable_merge()
5695 if (new_bfqq->bic) in bfq_do_early_stable_merge()
5696 new_bfqq->bic->bfqq_data[a_idx].stably_merged = true; in bfq_do_early_stable_merge()
5697 bic->bfqq_data[a_idx].stably_merged = true; in bfq_do_early_stable_merge()
5701 * bfqq->bic must be set too, for in bfq_do_early_stable_merge()
5705 bfqq->bic = bic; in bfq_do_early_stable_merge()
5710 * Many throughput-sensitive workloads are made of several parallel
5717 * To avoid this plugging, BFQ has been using a burst-handling
5719 * throughput, and not detrimental for service guarantees. The
5726 * throughput of the flows and task-wide I/O latency. In particular,
5742 * - very little time has elapsed since when Q1 was created
5743 * - Q2 has the same ioprio as Q1
5744 * - Q2 belongs to the same group as Q1
5747 * with ten random readers on /dev/nullb shows a throughput boost of
5749 * the total per-request processing time, the above throughput boost
5753 * burst-handling heuristics. We keep those heuristics for the moment.
5759 struct bfq_queue **source_bfqq = bfqq->entity.parent ? in bfq_do_or_sched_stable_merge()
5760 &bfqq->entity.parent->last_bfqq_created : in bfq_do_or_sched_stable_merge()
5761 &bfqd->last_bfqq_created; in bfq_do_or_sched_stable_merge()
5776 * underutilized, and throughput may decrease. in bfq_do_or_sched_stable_merge()
5780 * throughput-beneficial if not merged. Currently this is in bfq_do_or_sched_stable_merge()
5782 * such a drive, not merging bfqq is better for throughput if in bfq_do_or_sched_stable_merge()
5789 time_before(last_bfqq_created->creation_time + in bfq_do_or_sched_stable_merge()
5791 bfqq->creation_time) || in bfq_do_or_sched_stable_merge()
5792 bfqq->entity.parent != last_bfqq_created->entity.parent || in bfq_do_or_sched_stable_merge()
5793 bfqq->ioprio != last_bfqq_created->ioprio || in bfq_do_or_sched_stable_merge()
5794 bfqq->ioprio_class != last_bfqq_created->ioprio_class || in bfq_do_or_sched_stable_merge()
5795 bfqq->actuator_idx != last_bfqq_created->actuator_idx) in bfq_do_or_sched_stable_merge()
5797 else if (time_after_eq(last_bfqq_created->creation_time + in bfq_do_or_sched_stable_merge()
5798 bfqd->bfq_burst_interval, in bfq_do_or_sched_stable_merge()
5799 bfqq->creation_time)) { in bfq_do_or_sched_stable_merge()
5800 if (likely(bfqd->nonrot_with_queueing)) in bfq_do_or_sched_stable_merge()
5804 * throughput benefits compared with in bfq_do_or_sched_stable_merge()
5816 last_bfqq_created->ref++; in bfq_do_or_sched_stable_merge()
5821 last_bfqq_created->stable_ref++; in bfq_do_or_sched_stable_merge()
5825 bic->bfqq_data[last_bfqq_created->actuator_idx].stable_merge_bfqq = in bfq_do_or_sched_stable_merge()
5839 const int ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio); in bfq_get_queue()
5840 const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); in bfq_get_queue()
5857 bfqd->queue->node); in bfq_get_queue()
5860 bfq_init_bfqq(bfqd, bfqq, bic, current->pid, in bfq_get_queue()
5862 bfq_init_entity(&bfqq->entity, bfqg); in bfq_get_queue()
5865 bfqq = &bfqd->oom_bfqq; in bfq_get_queue()
5875 bfqq->ref++; /* in bfq_get_queue()
5878 * only if bfqq->bfqg disappears, to in bfq_get_queue()
5883 bfqq, bfqq->ref); in bfq_get_queue()
5888 bfqq->ref++; /* get a process reference to this queue */ in bfq_get_queue()
5890 if (bfqq != &bfqd->oom_bfqq && is_sync && !respawn) in bfq_get_queue()
5898 struct bfq_ttime *ttime = &bfqq->ttime; in bfq_update_io_thinktime()
5906 if (bfqq->dispatched || bfq_bfqq_busy(bfqq)) in bfq_update_io_thinktime()
5908 elapsed = blk_time_get_ns() - bfqq->ttime.last_end_request; in bfq_update_io_thinktime()
5909 elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle); in bfq_update_io_thinktime()
5911 ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8; in bfq_update_io_thinktime()
5912 ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8); in bfq_update_io_thinktime()
5913 ttime->ttime_mean = div64_ul(ttime->ttime_total + 128, in bfq_update_io_thinktime()
5914 ttime->ttime_samples); in bfq_update_io_thinktime()
5921 bfqq->seek_history <<= 1; in bfq_update_io_seektime()
5922 bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq); in bfq_update_io_seektime()
5924 if (bfqq->wr_coeff > 1 && in bfq_update_io_seektime()
5925 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && in bfq_update_io_seektime()
5927 if (time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt + in bfq_update_io_seektime()
5931 * interactive-weight-raising period in bfq_update_io_seektime()
5943 bfqq->entity.prio_changed = 1; in bfq_update_io_seektime()
5960 bfqd->bfq_slice_idle == 0) in bfq_update_has_short_ttime()
5964 if (time_is_after_eq_jiffies(bfqq->split_time + in bfq_update_has_short_ttime()
5965 bfqd->bfq_wr_min_idle_time)) in bfq_update_has_short_ttime()
5971 * think time with half the I/O-plugging timeout. in bfq_update_has_short_ttime()
5973 if (atomic_read(&bic->icq.ioc->active_ref) == 0 || in bfq_update_has_short_ttime()
5974 (bfq_sample_valid(bfqq->ttime.ttime_samples) && in bfq_update_has_short_ttime()
5975 bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle>>1)) in bfq_update_has_short_ttime()
5988 * the think-time state (short|long). In particular, the limit in bfq_update_has_short_ttime()
5992 * instructions reset the inject limit if the think-time state in bfq_update_has_short_ttime()
6011 * I/O-dispatch-plugging, then bfqq remains empty, and no I/O in bfq_update_has_short_ttime()
6014 * and in a severe loss of total throughput. in bfq_update_has_short_ttime()
6016 * On the opposite end, a non-zero inject limit may allow the in bfq_update_has_short_ttime()
6021 * next think-time sample for bfqq may be very low. This in in bfq_update_has_short_ttime()
6029 * of such a steady oscillation between the two think-time in bfq_update_has_short_ttime()
6040 * performed at all times, and throughput gets boosted. in bfq_update_has_short_ttime()
6055 * more frequently than once per I/O-plugging timeout, makes in bfq_update_has_short_ttime()
6059 * to boost throughput more effectively, by injecting the I/O in bfq_update_has_short_ttime()
6069 if (state_changed && bfqq->last_serv_time_ns == 0 && in bfq_update_has_short_ttime()
6070 (time_is_before_eq_jiffies(bfqq->decrease_time_jif + in bfq_update_has_short_ttime()
6083 if (rq->cmd_flags & REQ_META) in bfq_rq_enqueued()
6084 bfqq->meta_pending++; in bfq_rq_enqueued()
6086 bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); in bfq_rq_enqueued()
6088 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { in bfq_rq_enqueued()
6089 bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 && in bfq_rq_enqueued()
6095 * - the request is small, and in bfq_rq_enqueued()
6096 * - we are idling to boost throughput, and in bfq_rq_enqueued()
6097 * - the queue is not to be expired, in bfq_rq_enqueued()
6101 * for a new request from the in-service queue, we in bfq_rq_enqueued()
6121 hrtimer_try_to_cancel(&bfqd->idle_slice_timer); in bfq_rq_enqueued()
6138 struct bfq_entity *entity = &bfqq->entity; in bfqq_request_allocated()
6141 entity->allocated++; in bfqq_request_allocated()
6146 struct bfq_entity *entity = &bfqq->entity; in bfqq_request_freed()
6149 entity->allocated--; in bfqq_request_freed()
6168 new_bfqq->ref++; in __bfq_insert_request()
6178 bfq_actuator_index(bfqd, rq->bio)) == bfqq) { in __bfq_insert_request()
6189 rq->elv.priv[1] = new_bfqq; in __bfq_insert_request()
6200 rq->fifo_time = blk_time_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; in __bfq_insert_request()
6201 list_add_tail(&rq->queuelist, &bfqq->fifo); in __bfq_insert_request()
6227 spin_lock_irq(&q->queue_lock); in bfq_update_insert_stats()
6231 spin_unlock_irq(&q->queue_lock); in bfq_update_insert_stats()
6245 struct request_queue *q = hctx->queue; in bfq_insert_request()
6246 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_insert_request()
6253 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio) in bfq_insert_request()
6256 spin_lock_irq(&bfqd->lock); in bfq_insert_request()
6259 spin_unlock_irq(&bfqd->lock); in bfq_insert_request()
6267 list_add(&rq->queuelist, &bfqd->dispatch); in bfq_insert_request()
6269 list_add_tail(&rq->queuelist, &bfqd->dispatch); in bfq_insert_request()
6281 if (!q->last_merge) in bfq_insert_request()
6282 q->last_merge = rq; in bfq_insert_request()
6291 cmd_flags = rq->cmd_flags; in bfq_insert_request()
6292 spin_unlock_irq(&bfqd->lock); in bfq_insert_request()
6306 list_del_init(&rq->queuelist); in bfq_insert_requests()
6313 struct bfq_queue *bfqq = bfqd->in_service_queue; in bfq_update_hw_tag()
6315 bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver, in bfq_update_hw_tag()
6316 bfqd->tot_rq_in_driver); in bfq_update_hw_tag()
6318 if (bfqd->hw_tag == 1) in bfq_update_hw_tag()
6327 if (bfqd->tot_rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD) in bfq_update_hw_tag()
6336 bfqq->dispatched + bfqq->queued[0] + bfqq->queued[1] < in bfq_update_hw_tag()
6338 bfqd->tot_rq_in_driver < BFQ_HW_QUEUE_THRESHOLD) in bfq_update_hw_tag()
6341 if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) in bfq_update_hw_tag()
6344 bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; in bfq_update_hw_tag()
6345 bfqd->max_rq_in_driver = 0; in bfq_update_hw_tag()
6346 bfqd->hw_tag_samples = 0; in bfq_update_hw_tag()
6348 bfqd->nonrot_with_queueing = in bfq_update_hw_tag()
6349 blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag; in bfq_update_hw_tag()
6359 bfqd->rq_in_driver[bfqq->actuator_idx]--; in bfq_completed_request()
6360 bfqd->tot_rq_in_driver--; in bfq_completed_request()
6361 bfqq->dispatched--; in bfq_completed_request()
6363 if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { in bfq_completed_request()
6367 * no outstanding request; used by the weight-raising in bfq_completed_request()
6370 bfqq->budget_timeout = jiffies; in bfq_completed_request()
6378 bfqq->ttime.last_end_request = now_ns; in bfq_completed_request()
6384 delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); in bfq_completed_request()
6395 * - close the observation interval at the last (previous) in bfq_completed_request()
6397 * - compute rate, if possible, for that observation interval in bfq_completed_request()
6398 * - reset to zero samples, which will trigger a proper in bfq_completed_request()
6399 * re-initialization of the observation interval on next in bfq_completed_request()
6403 (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us < in bfq_completed_request()
6404 1UL<<(BFQ_RATE_SHIFT - 10)) in bfq_completed_request()
6406 bfqd->last_completion = now_ns; in bfq_completed_request()
6408 * Shared queues are likely to receive I/O at a high in bfq_completed_request()
6413 * control troubles than throughput benefits. Then reset in bfq_completed_request()
6417 bfqd->last_completed_rq_bfqq = bfqq; in bfq_completed_request()
6419 bfqd->last_completed_rq_bfqq = NULL; in bfq_completed_request()
6430 * expires, if it still has in-flight requests. in bfq_completed_request()
6432 if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 && in bfq_completed_request()
6433 RB_EMPTY_ROOT(&bfqq->sort_list) && in bfq_completed_request()
6434 bfqq->wr_coeff != bfqd->bfq_wr_coeff) in bfq_completed_request()
6435 bfqq->soft_rt_next_start = in bfq_completed_request()
6439 * If this is the in-service queue, check if it needs to be expired, in bfq_completed_request()
6442 if (bfqd->in_service_queue == bfqq) { in bfq_completed_request()
6444 if (bfqq->dispatched == 0) in bfq_completed_request()
6453 * Here bfqq->dispatched > 0 holds, but in bfq_completed_request()
6456 * for bfqq before bfqq->dispatched reaches 0, in bfq_completed_request()
6458 * completion event that causes bfqq->dispatch in bfq_completed_request()
6461 * (I/O-dispatch plugging). in bfq_completed_request()
6465 * when bfqq->dispatched finally reaches in bfq_completed_request()
6473 else if (RB_EMPTY_ROOT(&bfqq->sort_list) && in bfq_completed_request()
6474 (bfqq->dispatched == 0 || in bfq_completed_request()
6480 if (!bfqd->tot_rq_in_driver) in bfq_completed_request()
6492 * and the throughput is not affected. In contrast, if BFQ is not
6493 * allowed to switch to another queue---because bfqq is sync and
6494 * I/O-dispatch needs to be plugged while bfqq is temporarily
6495 * empty---then, during the service of bfqq, there will be frequent
6503 * To counter this loss of throughput, BFQ implements a "request
6507 * both boost throughput and not break bfqq's bandwidth and latency
6508 * guarantees. In this respect, the mechanism maintains a per-queue
6511 * of I/O requests in flight---i.e., already dispatched but not yet
6512 * completed---remains lower than this limit.
6517 * service, and causes bfqq to switch from empty to non-empty. The
6542 * The limit-update algorithm works as follows. On the arrival of a
6547 * (1) If there is no in-flight request. This gives a baseline for the
6550 * set to 1, to start boosting throughput, and to prepare the
6555 * (2) If the limit is higher than 0 and there are in-flight
6591 u64 tot_time_ns = blk_time_get_ns() - bfqd->last_empty_occupied_ns; in bfq_update_inject_limit()
6592 unsigned int old_limit = bfqq->inject_limit; in bfq_update_inject_limit()
6594 if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) { in bfq_update_inject_limit()
6595 u64 threshold = (bfqq->last_serv_time_ns * 3)>>1; in bfq_update_inject_limit()
6598 bfqq->inject_limit--; in bfq_update_inject_limit()
6599 bfqq->decrease_time_jif = jiffies; in bfq_update_inject_limit()
6601 old_limit <= bfqd->max_rq_in_driver) in bfq_update_inject_limit()
6602 bfqq->inject_limit++; in bfq_update_inject_limit()
6611 * NOTE: (bfqd->tot_rq_in_driver == 1) means that there is no I/O in bfq_update_inject_limit()
6615 * bfqd->tot_rq_in_driver is decremented in such a code path. in bfq_update_inject_limit()
6617 if ((bfqq->last_serv_time_ns == 0 && bfqd->tot_rq_in_driver == 1) || in bfq_update_inject_limit()
6618 tot_time_ns < bfqq->last_serv_time_ns) { in bfq_update_inject_limit()
6619 if (bfqq->last_serv_time_ns == 0) { in bfq_update_inject_limit()
6624 bfqq->inject_limit = max_t(unsigned int, 1, old_limit); in bfq_update_inject_limit()
6626 bfqq->last_serv_time_ns = tot_time_ns; in bfq_update_inject_limit()
6627 } else if (!bfqd->rqs_injected && bfqd->tot_rq_in_driver == 1) in bfq_update_inject_limit()
6637 bfqq->last_serv_time_ns = tot_time_ns; in bfq_update_inject_limit()
6641 bfqd->waited_rq = NULL; in bfq_update_inject_limit()
6642 bfqd->rqs_injected = false; in bfq_update_inject_limit()
6659 * requeued request that has not (yet) been re-inserted into in bfq_finish_requeue_request()
6662 if (!rq->elv.icq || !bfqq) in bfq_finish_requeue_request()
6665 bfqd = bfqq->bfqd; in bfq_finish_requeue_request()
6667 if (rq->rq_flags & RQF_STARTED) in bfq_finish_requeue_request()
6669 rq->start_time_ns, in bfq_finish_requeue_request()
6670 rq->io_start_time_ns, in bfq_finish_requeue_request()
6671 rq->cmd_flags); in bfq_finish_requeue_request()
6673 spin_lock_irqsave(&bfqd->lock, flags); in bfq_finish_requeue_request()
6674 if (likely(rq->rq_flags & RQF_STARTED)) { in bfq_finish_requeue_request()
6675 if (rq == bfqd->waited_rq) in bfq_finish_requeue_request()
6682 RQ_BIC(rq)->requests--; in bfq_finish_requeue_request()
6683 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_finish_requeue_request()
6690 * design would be to prevent blk-mq from invoking the requeue in bfq_finish_requeue_request()
6695 * request-insertion logic if rq is re-inserted into a bfq in bfq_finish_requeue_request()
6696 * internal queue, without a re-preparation. Here we assume in bfq_finish_requeue_request()
6697 * that re-insertions of requeued requests, without in bfq_finish_requeue_request()
6698 * re-preparation, can happen only for pass_through or at_head in bfq_finish_requeue_request()
6699 * requests (which are not re-inserted into bfq internal in bfq_finish_requeue_request()
6702 rq->elv.priv[0] = NULL; in bfq_finish_requeue_request()
6703 rq->elv.priv[1] = NULL; in bfq_finish_requeue_request()
6710 if (rq->elv.icq) { in bfq_finish_request()
6711 put_io_context(rq->elv.icq->ioc); in bfq_finish_request()
6712 rq->elv.icq = NULL; in bfq_finish_request()
6725 bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); in bfq_split_bfqq()
6727 if (bfqq_process_refs(bfqq) == 1 && !bfqq->new_bfqq) { in bfq_split_bfqq()
6728 bfqq->pid = current->pid; in bfq_split_bfqq()
6734 bic_set_bfqq(bic, NULL, true, bfqq->actuator_idx); in bfq_split_bfqq()
6747 struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[act_idx]; in __bfq_get_bfqq_handle_split()
6749 if (likely(bfqq && bfqq != &bfqd->oom_bfqq)) in __bfq_get_bfqq_handle_split()
6761 if ((bfqq_data->was_in_burst_list && bfqd->large_burst) || in __bfq_get_bfqq_handle_split()
6762 bfqq_data->saved_in_large_burst) in __bfq_get_bfqq_handle_split()
6766 if (bfqq_data->was_in_burst_list) in __bfq_get_bfqq_handle_split()
6795 hlist_add_head(&bfqq->burst_list_node, in __bfq_get_bfqq_handle_split()
6796 &bfqd->burst_list); in __bfq_get_bfqq_handle_split()
6798 bfqq->split_time = jiffies; in __bfq_get_bfqq_handle_split()
6812 rq->elv.icq = ioc_find_get_icq(rq->q); in bfq_prepare_request()
6819 rq->elv.priv[0] = rq->elv.priv[1] = NULL; in bfq_prepare_request()
6824 struct bfq_queue *new_bfqq = bfqq->new_bfqq; in bfq_waker_bfqq()
6825 struct bfq_queue *waker_bfqq = bfqq->waker_bfqq; in bfq_waker_bfqq()
6841 new_bfqq = new_bfqq->new_bfqq; in bfq_waker_bfqq()
6864 bic->bfqq_data[idx].stably_merged) in bfq_get_bfqq_handle_split()
6871 bic->bfqq_data[idx].saved_in_large_burst = true; in bfq_get_bfqq_handle_split()
6880 if (unlikely(bfqq == &bfqd->oom_bfqq)) in bfq_get_bfqq_handle_split()
6884 bfqq->waker_bfqq = waker_bfqq; in bfq_get_bfqq_handle_split()
6885 bfqq->tentative_waker_bfqq = NULL; in bfq_get_bfqq_handle_split()
6888 * If the waker queue disappears, then new_bfqq->waker_bfqq must be in bfq_get_bfqq_handle_split()
6894 hlist_add_head(&bfqq->woken_list_node, in bfq_get_bfqq_handle_split()
6895 &bfqq->waker_bfqq->woken_list); in bfq_get_bfqq_handle_split()
6925 struct request_queue *q = rq->q; in bfq_init_rq()
6926 struct bio *bio = rq->bio; in bfq_init_rq()
6927 struct bfq_data *bfqd = q->elevator->elevator_data; in bfq_init_rq()
6933 if (unlikely(!rq->elv.icq)) in bfq_init_rq()
6946 bic = icq_to_bic(rq->elv.icq); in bfq_init_rq()
6952 bfqq->ref++; in bfq_init_rq()
6953 bic->requests++; in bfq_init_rq()
6955 rq, bfqq, bfqq->ref); in bfq_init_rq()
6957 rq->elv.priv[0] = bic; in bfq_init_rq()
6958 rq->elv.priv[1] = bfqq; in bfq_init_rq()
6962 * by only this bic: we can then set bfqq->bic = bic. in in bfq_init_rq()
6966 if (likely(bfqq != &bfqd->oom_bfqq) && !bfqq->new_bfqq && in bfq_init_rq()
6968 bfqq->bic = bic; in bfq_init_rq()
6973 * 1) A burst is actually happening (bfqd->burst_size > 0) in bfq_init_rq()
6979 * therefore in not weight-raising bfqq. See comments on in bfq_init_rq()
6991 (bfqd->burst_size > 0 || in bfq_init_rq()
7004 spin_lock_irqsave(&bfqd->lock, flags); in bfq_idle_slice_timer_body()
7013 if (bfqq != bfqd->in_service_queue) { in bfq_idle_slice_timer_body()
7014 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_idle_slice_timer_body()
7027 else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) in bfq_idle_slice_timer_body()
7031 * first request of the in-service queue arrives in bfq_idle_slice_timer_body()
7042 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_idle_slice_timer_body()
7046 * Handler of the expiration of the timer running if the in-service queue
7053 struct bfq_queue *bfqq = bfqd->in_service_queue; in bfq_idle_slice_timer()
7056 * Theoretical race here: the in-service queue can be NULL or in bfq_idle_slice_timer()
7059 * cycle that changes the in-service queue. This can hardly in bfq_idle_slice_timer()
7076 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); in __bfq_put_async_bfqq()
7079 bfqq, bfqq->ref); in __bfq_put_async_bfqq()
7095 for (k = 0; k < bfqd->num_actuators; k++) { in bfq_put_async_queues()
7098 __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j][k]); in bfq_put_async_queues()
7100 __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq[k]); in bfq_put_async_queues()
7110 unsigned int depth = 1U << bt->sb.shift; in bfq_update_depths()
7112 bfqd->full_depth_shift = bt->sb.shift; in bfq_update_depths()
7114 * In-word depths if no bfq_queue is being weight-raised: in bfq_update_depths()
7117 * In next formulas, right-shift the value in bfq_update_depths()
7118 * (1U<<bt->sb.shift), instead of computing directly in bfq_update_depths()
7119 * (1U<<(bt->sb.shift - something)), to be robust against in bfq_update_depths()
7120 * any possible value of bt->sb.shift, without having to in bfq_update_depths()
7124 bfqd->word_depths[0][0] = max(depth >> 1, 1U); in bfq_update_depths()
7130 bfqd->word_depths[0][1] = max((depth * 3) >> 2, 1U); in bfq_update_depths()
7133 * In-word depths in case some bfq_queue is being weight- in bfq_update_depths()
7136 * start-up times didn't suffer from any regression due to tag in bfq_update_depths()
7140 bfqd->word_depths[1][0] = max((depth * 3) >> 4, 1U); in bfq_update_depths()
7142 bfqd->word_depths[1][1] = max((depth * 6) >> 4, 1U); in bfq_update_depths()
7147 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; in bfq_depth_updated()
7148 struct blk_mq_tags *tags = hctx->sched_tags; in bfq_depth_updated()
7150 bfq_update_depths(bfqd, &tags->bitmap_tags); in bfq_depth_updated()
7151 sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1); in bfq_depth_updated()
7162 struct bfq_data *bfqd = e->elevator_data; in bfq_exit_queue()
7166 hrtimer_cancel(&bfqd->idle_slice_timer); in bfq_exit_queue()
7168 spin_lock_irq(&bfqd->lock); in bfq_exit_queue()
7169 list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) in bfq_exit_queue()
7171 spin_unlock_irq(&bfqd->lock); in bfq_exit_queue()
7173 for (actuator = 0; actuator < bfqd->num_actuators; actuator++) in bfq_exit_queue()
7174 WARN_ON_ONCE(bfqd->rq_in_driver[actuator]); in bfq_exit_queue()
7175 WARN_ON_ONCE(bfqd->tot_rq_in_driver); in bfq_exit_queue()
7177 hrtimer_cancel(&bfqd->idle_slice_timer); in bfq_exit_queue()
7179 /* release oom-queue reference to root group */ in bfq_exit_queue()
7180 bfqg_and_blkg_put(bfqd->root_group); in bfq_exit_queue()
7183 blkcg_deactivate_policy(bfqd->queue->disk, &blkcg_policy_bfq); in bfq_exit_queue()
7185 spin_lock_irq(&bfqd->lock); in bfq_exit_queue()
7186 bfq_put_async_queues(bfqd, bfqd->root_group); in bfq_exit_queue()
7187 kfree(bfqd->root_group); in bfq_exit_queue()
7188 spin_unlock_irq(&bfqd->lock); in bfq_exit_queue()
7191 blk_stat_disable_accounting(bfqd->queue); in bfq_exit_queue()
7192 clear_bit(ELEVATOR_FLAG_DISABLE_WBT, &e->flags); in bfq_exit_queue()
7193 wbt_enable_default(bfqd->queue->disk); in bfq_exit_queue()
7204 root_group->entity.parent = NULL; in bfq_init_root_group()
7205 root_group->my_entity = NULL; in bfq_init_root_group()
7206 root_group->bfqd = bfqd; in bfq_init_root_group()
7208 root_group->rq_pos_tree = RB_ROOT; in bfq_init_root_group()
7210 root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; in bfq_init_root_group()
7211 root_group->sched_data.bfq_class_idle_last_service = jiffies; in bfq_init_root_group()
7219 struct blk_independent_access_ranges *ia_ranges = q->disk->ia_ranges; in bfq_init_queue()
7223 return -ENOMEM; in bfq_init_queue()
7225 bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); in bfq_init_queue()
7227 kobject_put(&eq->kobj); in bfq_init_queue()
7228 return -ENOMEM; in bfq_init_queue()
7230 eq->elevator_data = bfqd; in bfq_init_queue()
7232 spin_lock_irq(&q->queue_lock); in bfq_init_queue()
7233 q->elevator = eq; in bfq_init_queue()
7234 spin_unlock_irq(&q->queue_lock); in bfq_init_queue()
7243 bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0, 0); in bfq_init_queue()
7244 bfqd->oom_bfqq.ref++; in bfq_init_queue()
7245 bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO; in bfq_init_queue()
7246 bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE; in bfq_init_queue()
7247 bfqd->oom_bfqq.entity.new_weight = in bfq_init_queue()
7248 bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio); in bfq_init_queue()
7251 bfq_clear_bfqq_just_created(&bfqd->oom_bfqq); in bfq_init_queue()
7258 bfqd->oom_bfqq.entity.prio_changed = 1; in bfq_init_queue()
7260 bfqd->queue = q; in bfq_init_queue()
7262 bfqd->num_actuators = 1; in bfq_init_queue()
7267 spin_lock_irq(&q->queue_lock); in bfq_init_queue()
7273 if (ia_ranges->nr_ia_ranges > BFQ_MAX_ACTUATORS) { in bfq_init_queue()
7275 ia_ranges->nr_ia_ranges, BFQ_MAX_ACTUATORS); in bfq_init_queue()
7278 bfqd->num_actuators = ia_ranges->nr_ia_ranges; in bfq_init_queue()
7280 for (i = 0; i < bfqd->num_actuators; i++) { in bfq_init_queue()
7281 bfqd->sector[i] = ia_ranges->ia_range[i].sector; in bfq_init_queue()
7282 bfqd->nr_sectors[i] = in bfq_init_queue()
7283 ia_ranges->ia_range[i].nr_sectors; in bfq_init_queue()
7288 /* Otherwise use single-actuator dev info */ in bfq_init_queue()
7289 if (bfqd->num_actuators == 1) { in bfq_init_queue()
7290 bfqd->sector[0] = 0; in bfq_init_queue()
7291 bfqd->nr_sectors[0] = get_capacity(q->disk); in bfq_init_queue()
7293 spin_unlock_irq(&q->queue_lock); in bfq_init_queue()
7295 INIT_LIST_HEAD(&bfqd->dispatch); in bfq_init_queue()
7297 hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC, in bfq_init_queue()
7299 bfqd->idle_slice_timer.function = bfq_idle_slice_timer; in bfq_init_queue()
7301 bfqd->queue_weights_tree = RB_ROOT_CACHED; in bfq_init_queue()
7303 bfqd->num_groups_with_pending_reqs = 0; in bfq_init_queue()
7306 INIT_LIST_HEAD(&bfqd->active_list[0]); in bfq_init_queue()
7307 INIT_LIST_HEAD(&bfqd->active_list[1]); in bfq_init_queue()
7308 INIT_LIST_HEAD(&bfqd->idle_list); in bfq_init_queue()
7309 INIT_HLIST_HEAD(&bfqd->burst_list); in bfq_init_queue()
7311 bfqd->hw_tag = -1; in bfq_init_queue()
7312 bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue); in bfq_init_queue()
7314 bfqd->bfq_max_budget = bfq_default_max_budget; in bfq_init_queue()
7316 bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; in bfq_init_queue()
7317 bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; in bfq_init_queue()
7318 bfqd->bfq_back_max = bfq_back_max; in bfq_init_queue()
7319 bfqd->bfq_back_penalty = bfq_back_penalty; in bfq_init_queue()
7320 bfqd->bfq_slice_idle = bfq_slice_idle; in bfq_init_queue()
7321 bfqd->bfq_timeout = bfq_timeout; in bfq_init_queue()
7323 bfqd->bfq_large_burst_thresh = 8; in bfq_init_queue()
7324 bfqd->bfq_burst_interval = msecs_to_jiffies(180); in bfq_init_queue()
7326 bfqd->low_latency = true; in bfq_init_queue()
7329 * Trade-off between responsiveness and fairness. in bfq_init_queue()
7331 bfqd->bfq_wr_coeff = 30; in bfq_init_queue()
7332 bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300); in bfq_init_queue()
7333 bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000); in bfq_init_queue()
7334 bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500); in bfq_init_queue()
7335 bfqd->bfq_wr_max_softrt_rate = 7000; /* in bfq_init_queue()
7338 * high-definition compressed in bfq_init_queue()
7341 bfqd->wr_busy_queues = 0; in bfq_init_queue()
7347 bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] * in bfq_init_queue()
7348 ref_wr_duration[blk_queue_nonrot(bfqd->queue)]; in bfq_init_queue()
7349 bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3; in bfq_init_queue()
7352 bfqd->actuator_load_threshold = 4; in bfq_init_queue()
7354 spin_lock_init(&bfqd->lock); in bfq_init_queue()
7359 * (bfq_create_group_hierarchy->blkcg_activate_policy-> in bfq_init_queue()
7367 * other inconsistencies, the blk-mq stack must then refrain in bfq_init_queue()
7371 bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); in bfq_init_queue()
7372 if (!bfqd->root_group) in bfq_init_queue()
7374 bfq_init_root_group(bfqd->root_group, bfqd); in bfq_init_queue()
7375 bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); in bfq_init_queue()
7380 set_bit(ELEVATOR_FLAG_DISABLE_WBT, &eq->flags); in bfq_init_queue()
7381 wbt_disable_default(q->disk); in bfq_init_queue()
7388 kobject_put(&eq->kobj); in bfq_init_queue()
7389 return -ENOMEM; in bfq_init_queue()
7401 return -ENOMEM; in bfq_slab_setup()
7424 struct bfq_data *bfqd = e->elevator_data; \
7432 SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
7433 SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
7434 SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
7435 SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
7436 SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
7437 SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
7438 SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
7439 SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
7440 SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
7446 struct bfq_data *bfqd = e->elevator_data; \
7451 USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
7458 struct bfq_data *bfqd = e->elevator_data; \
7477 STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
7479 STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
7481 STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
7482 STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
7484 STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
7490 struct bfq_data *bfqd = e->elevator_data; \
7504 USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
7511 struct bfq_data *bfqd = e->elevator_data; in bfq_max_budget_store()
7520 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); in bfq_max_budget_store()
7524 bfqd->bfq_max_budget = __data; in bfq_max_budget_store()
7527 bfqd->bfq_user_max_budget = __data; in bfq_max_budget_store()
7539 struct bfq_data *bfqd = e->elevator_data; in bfq_timeout_sync_store()
7552 bfqd->bfq_timeout = msecs_to_jiffies(__data); in bfq_timeout_sync_store()
7553 if (bfqd->bfq_user_max_budget == 0) in bfq_timeout_sync_store()
7554 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); in bfq_timeout_sync_store()
7562 struct bfq_data *bfqd = e->elevator_data; in bfq_strict_guarantees_store()
7572 if (!bfqd->strict_guarantees && __data == 1 in bfq_strict_guarantees_store()
7573 && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC) in bfq_strict_guarantees_store()
7574 bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC; in bfq_strict_guarantees_store()
7576 bfqd->strict_guarantees = __data; in bfq_strict_guarantees_store()
7584 struct bfq_data *bfqd = e->elevator_data; in bfq_low_latency_store()
7594 if (__data == 0 && bfqd->low_latency != 0) in bfq_low_latency_store()
7596 bfqd->low_latency = __data; in bfq_low_latency_store()
7647 MODULE_ALIAS("bfq-iosched");
7659 ret = -ENOMEM; in bfq_init()
7673 * scheduler cannot rely on a peak-rate-evaluation workload to in bfq_init()