Lines Matching +full:pd +full:- +full:node

1 // SPDX-License-Identifier: GPL-2.0
14 #include "blk-cgroup-rwstat.h"
15 #include "blk-throttle.h"
29 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) argument
49 return pd_to_blkg(&tg->pd); in tg_to_blkg()
53 * sq_to_tg - return the throl_grp the specified service queue belongs to
56 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
61 if (sq && sq->parent_sq) in sq_to_tg()
68 * sq_to_td - return throtl_data the specified service queue belongs to
79 return tg->td; in sq_to_td()
88 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) in tg_bps_limit()
91 return tg->bps[rw]; in tg_bps_limit()
98 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) in tg_iops_limit()
101 return tg->iops[rw]; in tg_iops_limit()
105 * throtl_log - log debug message via blktrace
118 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
121 blk_add_cgroup_trace_msg(__td->queue, \
122 &tg_to_blkg(__tg)->blkcg->css, "throtl " fmt, ##args);\
124 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
133 return bio->bi_iter.bi_size; in throtl_bio_data_size()
138 INIT_LIST_HEAD(&qn->node); in throtl_qnode_init()
139 bio_list_init(&qn->bios_bps); in throtl_qnode_init()
140 bio_list_init(&qn->bios_iops); in throtl_qnode_init()
141 qn->tg = tg; in throtl_qnode_init()
145 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
150 * Add @bio to @qn and put @qn on @sq->queued if it's not already on.
151 * @qn->tg's reference count is bumped when @qn is activated. See the
165 bio_list_add(&qn->bios_iops, bio); in throtl_qnode_add_bio()
166 sq->nr_queued_iops[rw]++; in throtl_qnode_add_bio()
168 bio_list_add(&qn->bios_bps, bio); in throtl_qnode_add_bio()
169 sq->nr_queued_bps[rw]++; in throtl_qnode_add_bio()
172 if (list_empty(&qn->node)) { in throtl_qnode_add_bio()
173 list_add_tail(&qn->node, &sq->queued[rw]); in throtl_qnode_add_bio()
174 blkg_get(tg_to_blkg(qn->tg)); in throtl_qnode_add_bio()
179 * throtl_peek_queued - peek the first bio on a qnode list
194 qn = list_first_entry(queued, struct throtl_qnode, node); in throtl_peek_queued()
195 bio = bio_list_peek(&qn->bios_iops); in throtl_peek_queued()
197 bio = bio_list_peek(&qn->bios_bps); in throtl_peek_queued()
203 * throtl_pop_queued - pop the first bio form a qnode list
208 * Pop the first bio from the qnode list @sq->queued. Note that we firstly
210 * After popping, the first qnode is removed from @sq->queued if empty or moved
211 * to the end of @sq->queued so that the popping order is round-robin.
221 struct list_head *queued = &sq->queued[rw]; in throtl_pop_queued()
228 qn = list_first_entry(queued, struct throtl_qnode, node); in throtl_pop_queued()
229 bio = bio_list_pop(&qn->bios_iops); in throtl_pop_queued()
231 sq->nr_queued_iops[rw]--; in throtl_pop_queued()
233 bio = bio_list_pop(&qn->bios_bps); in throtl_pop_queued()
235 sq->nr_queued_bps[rw]--; in throtl_pop_queued()
239 if (bio_list_empty(&qn->bios_bps) && bio_list_empty(&qn->bios_iops)) { in throtl_pop_queued()
240 list_del_init(&qn->node); in throtl_pop_queued()
242 *tg_to_put = qn->tg; in throtl_pop_queued()
244 blkg_put(tg_to_blkg(qn->tg)); in throtl_pop_queued()
246 list_move_tail(&qn->node, queued); in throtl_pop_queued()
255 INIT_LIST_HEAD(&sq->queued[READ]); in throtl_service_queue_init()
256 INIT_LIST_HEAD(&sq->queued[WRITE]); in throtl_service_queue_init()
257 sq->pending_tree = RB_ROOT_CACHED; in throtl_service_queue_init()
258 timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0); in throtl_service_queue_init()
267 tg = kzalloc_node(sizeof(*tg), gfp, disk->node_id); in throtl_pd_alloc()
271 if (blkg_rwstat_init(&tg->stat_bytes, gfp)) in throtl_pd_alloc()
274 if (blkg_rwstat_init(&tg->stat_ios, gfp)) in throtl_pd_alloc()
277 throtl_service_queue_init(&tg->service_queue); in throtl_pd_alloc()
280 throtl_qnode_init(&tg->qnode_on_self[rw], tg); in throtl_pd_alloc()
281 throtl_qnode_init(&tg->qnode_on_parent[rw], tg); in throtl_pd_alloc()
284 RB_CLEAR_NODE(&tg->rb_node); in throtl_pd_alloc()
285 tg->bps[READ] = U64_MAX; in throtl_pd_alloc()
286 tg->bps[WRITE] = U64_MAX; in throtl_pd_alloc()
287 tg->iops[READ] = UINT_MAX; in throtl_pd_alloc()
288 tg->iops[WRITE] = UINT_MAX; in throtl_pd_alloc()
290 return &tg->pd; in throtl_pd_alloc()
293 blkg_rwstat_exit(&tg->stat_bytes); in throtl_pd_alloc()
299 static void throtl_pd_init(struct blkg_policy_data *pd) in throtl_pd_init() argument
301 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_init()
303 struct throtl_data *td = blkg->q->td; in throtl_pd_init()
304 struct throtl_service_queue *sq = &tg->service_queue; in throtl_pd_init()
320 sq->parent_sq = &td->service_queue; in throtl_pd_init()
321 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) in throtl_pd_init()
322 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue; in throtl_pd_init()
323 tg->td = td; in throtl_pd_init()
333 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq); in tg_update_has_rules()
337 tg->has_rules_iops[rw] = in tg_update_has_rules()
338 (parent_tg && parent_tg->has_rules_iops[rw]) || in tg_update_has_rules()
340 tg->has_rules_bps[rw] = in tg_update_has_rules()
341 (parent_tg && parent_tg->has_rules_bps[rw]) || in tg_update_has_rules()
346 static void throtl_pd_online(struct blkg_policy_data *pd) in throtl_pd_online() argument
348 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_online()
356 static void throtl_pd_free(struct blkg_policy_data *pd) in throtl_pd_free() argument
358 struct throtl_grp *tg = pd_to_tg(pd); in throtl_pd_free()
360 timer_delete_sync(&tg->service_queue.pending_timer); in throtl_pd_free()
361 blkg_rwstat_exit(&tg->stat_bytes); in throtl_pd_free()
362 blkg_rwstat_exit(&tg->stat_ios); in throtl_pd_free()
371 n = rb_first_cached(&parent_sq->pending_tree); in throtl_rb_first()
381 rb_erase_cached(n, &parent_sq->pending_tree); in throtl_rb_erase()
393 parent_sq->first_pending_disptime = tg->disptime; in update_min_dispatch_time()
398 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq; in tg_service_queue_add()
399 struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node; in tg_service_queue_add() local
402 unsigned long key = tg->disptime; in tg_service_queue_add()
405 while (*node != NULL) { in tg_service_queue_add()
406 parent = *node; in tg_service_queue_add()
409 if (time_before(key, __tg->disptime)) in tg_service_queue_add()
410 node = &parent->rb_left; in tg_service_queue_add()
412 node = &parent->rb_right; in tg_service_queue_add()
417 rb_link_node(&tg->rb_node, parent, node); in tg_service_queue_add()
418 rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree, in tg_service_queue_add()
424 if (!(tg->flags & THROTL_TG_PENDING)) { in throtl_enqueue_tg()
426 tg->flags |= THROTL_TG_PENDING; in throtl_enqueue_tg()
427 tg->service_queue.parent_sq->nr_pending++; in throtl_enqueue_tg()
433 if (tg->flags & THROTL_TG_PENDING) { in throtl_dequeue_tg()
435 tg->service_queue.parent_sq; in throtl_dequeue_tg()
437 throtl_rb_erase(&tg->rb_node, parent_sq); in throtl_dequeue_tg()
438 --parent_sq->nr_pending; in throtl_dequeue_tg()
439 tg->flags &= ~THROTL_TG_PENDING; in throtl_dequeue_tg()
458 mod_timer(&sq->pending_timer, expires); in throtl_schedule_pending_timer()
460 expires - jiffies, jiffies); in throtl_schedule_pending_timer()
464 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
468 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
478 * delay before dispatch starts even if @sq->first_pending_disptime is not
485 if (!sq->nr_pending) in throtl_schedule_next_dispatch()
491 if (force || time_after(sq->first_pending_disptime, jiffies)) { in throtl_schedule_next_dispatch()
492 throtl_schedule_pending_timer(sq, sq->first_pending_disptime); in throtl_schedule_next_dispatch()
503 tg->bytes_disp[rw] = 0; in throtl_start_new_slice_with_credit()
504 tg->io_disp[rw] = 0; in throtl_start_new_slice_with_credit()
512 if (time_after(start, tg->slice_start[rw])) in throtl_start_new_slice_with_credit()
513 tg->slice_start[rw] = start; in throtl_start_new_slice_with_credit()
515 tg->slice_end[rw] = jiffies + DFL_THROTL_SLICE; in throtl_start_new_slice_with_credit()
516 throtl_log(&tg->service_queue, in throtl_start_new_slice_with_credit()
518 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice_with_credit()
519 tg->slice_end[rw], jiffies); in throtl_start_new_slice_with_credit()
526 tg->bytes_disp[rw] = 0; in throtl_start_new_slice()
527 tg->io_disp[rw] = 0; in throtl_start_new_slice()
529 tg->slice_start[rw] = jiffies; in throtl_start_new_slice()
530 tg->slice_end[rw] = jiffies + DFL_THROTL_SLICE; in throtl_start_new_slice()
532 throtl_log(&tg->service_queue, in throtl_start_new_slice()
534 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_start_new_slice()
535 tg->slice_end[rw], jiffies); in throtl_start_new_slice()
541 tg->slice_end[rw] = roundup(jiffy_end, DFL_THROTL_SLICE); in throtl_set_slice_end()
547 if (!time_before(tg->slice_end[rw], jiffy_end)) in throtl_extend_slice()
551 throtl_log(&tg->service_queue, in throtl_extend_slice()
553 rw == READ ? 'R' : 'W', tg->slice_start[rw], in throtl_extend_slice()
554 tg->slice_end[rw], jiffies); in throtl_extend_slice()
560 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) in throtl_slice_used()
568 return sq->nr_queued_bps[type] + sq->nr_queued_iops[type]; in sq_queued()
601 if (ilog2(bps_limit) + ilog2(jiffy_elapsed) - ilog2(HZ) > 62) in calculate_bytes_allowed()
617 if (bytes_trim <= 0 || tg->bytes_disp[rw] < bytes_trim) { in throtl_trim_bps()
618 bytes_trim = tg->bytes_disp[rw]; in throtl_trim_bps()
619 tg->bytes_disp[rw] = 0; in throtl_trim_bps()
621 tg->bytes_disp[rw] -= bytes_trim; in throtl_trim_bps()
638 if (io_trim <= 0 || tg->io_disp[rw] < io_trim) { in throtl_trim_iops()
639 io_trim = tg->io_disp[rw]; in throtl_trim_iops()
640 tg->io_disp[rw] = 0; in throtl_trim_iops()
642 tg->io_disp[rw] -= io_trim; in throtl_trim_iops()
655 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); in throtl_trim_slice()
658 * If bps are unlimited (-1), then time slice don't get in throtl_trim_slice()
674 time_elapsed = rounddown(jiffies - tg->slice_start[rw], in throtl_trim_slice()
688 time_elapsed -= DFL_THROTL_SLICE; in throtl_trim_slice()
694 tg->slice_start[rw] += time_elapsed; in throtl_trim_slice()
696 throtl_log(&tg->service_queue, in throtl_trim_slice()
699 bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw], in throtl_trim_slice()
706 unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw]; in __tg_update_carryover()
714 * tg->[bytes/io]_disp should be reset to 0 to avoid impacting the dispatch in __tg_update_carryover()
718 if (sq_queued(&tg->service_queue, rw) == 0) { in __tg_update_carryover()
719 tg->bytes_disp[rw] = 0; in __tg_update_carryover()
720 tg->io_disp[rw] = 0; in __tg_update_carryover()
734 *bytes = bytes_allowed - tg->bytes_disp[rw]; in __tg_update_carryover()
739 *ios = io_allowed - tg->io_disp[rw]; in __tg_update_carryover()
742 tg->bytes_disp[rw] = -*bytes; in __tg_update_carryover()
743 tg->io_disp[rw] = -*ios; in __tg_update_carryover()
755 throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__, in tg_update_carryover()
766 jiffy_elapsed = jiffies - tg->slice_start[rw]; in tg_within_iops_limit()
771 if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed) in tg_within_iops_limit()
775 jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed; in tg_within_iops_limit()
791 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; in tg_within_bps_limit()
800 if ((bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed) in tg_within_bps_limit()
805 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed; in tg_within_bps_limit()
815 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed); in tg_within_bps_limit()
827 tg->bytes_disp[bio_data_dir(bio)] += bio_size; in throtl_charge_bps_bio()
834 tg->io_disp[bio_data_dir(bio)]++; in throtl_charge_iops_bio()
846 sq_queued(&tg->service_queue, rw) == 0) in tg_update_slice()
859 if (bps_limit == U64_MAX || tg->flags & THROTL_TG_CANCELING || in tg_dispatch_bps_time()
877 if (iops_limit == UINT_MAX || tg->flags & THROTL_TG_CANCELING) in tg_dispatch_iops_time()
888 * Returns approx number of jiffies to wait before this bio is with-in IO rate
902 BUG_ON(sq_queued(&tg->service_queue, rw) && in tg_dispatch_time()
903 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); in tg_dispatch_time()
919 * throtl_add_bio_tg - add a bio to the specified throtl_grp
925 * tg->qnode_on_self[] is used.
930 struct throtl_service_queue *sq = &tg->service_queue; in throtl_add_bio_tg()
934 qn = &tg->qnode_on_self[rw]; in throtl_add_bio_tg()
943 tg->flags |= THROTL_TG_WAS_EMPTY; in throtl_add_bio_tg()
950 * we also need to update the @tg->disptime. in throtl_add_bio_tg()
953 bio == throtl_peek_queued(&sq->queued[rw])) in throtl_add_bio_tg()
954 tg->flags |= THROTL_TG_IOPS_WAS_EMPTY; in throtl_add_bio_tg()
961 struct throtl_service_queue *sq = &tg->service_queue; in tg_update_disptime()
962 unsigned long read_wait = -1, write_wait = -1, min_wait, disptime; in tg_update_disptime()
965 bio = throtl_peek_queued(&sq->queued[READ]); in tg_update_disptime()
969 bio = throtl_peek_queued(&sq->queued[WRITE]); in tg_update_disptime()
977 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq); in tg_update_disptime()
978 tg->disptime = disptime; in tg_update_disptime()
982 tg->flags &= ~THROTL_TG_WAS_EMPTY; in tg_update_disptime()
983 tg->flags &= ~THROTL_TG_IOPS_WAS_EMPTY; in tg_update_disptime()
991 child_tg->slice_start[rw]); in start_parent_slice_with_credit()
998 struct throtl_service_queue *sq = &tg->service_queue; in tg_dispatch_one_bio()
999 struct throtl_service_queue *parent_sq = sq->parent_sq; in tg_dispatch_one_bio()
1017 * @td->service_queue, @bio is ready to be issued. Put it on its in tg_dispatch_one_bio()
1022 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); in tg_dispatch_one_bio()
1026 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], in tg_dispatch_one_bio()
1028 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
1029 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
1040 struct throtl_service_queue *sq = &tg->service_queue; in throtl_dispatch_tg()
1043 unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads; in throtl_dispatch_tg()
1048 while ((bio = throtl_peek_queued(&sq->queued[READ])) && in throtl_dispatch_tg()
1058 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && in throtl_dispatch_tg()
1079 if (!parent_sq->nr_pending) in throtl_select_dispatch()
1086 if (time_before(jiffies, tg->disptime)) in throtl_select_dispatch()
1091 sq = &tg->service_queue; in throtl_select_dispatch()
1105 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1116 * the top-level service_tree is reached, throtl_data->dispatch_work is
1132 q = tg->pd.blkg->q; in throtl_pending_timer_fn()
1134 q = td->queue; in throtl_pending_timer_fn()
1136 spin_lock_irq(&q->queue_lock); in throtl_pending_timer_fn()
1138 if (!q->root_blkg) in throtl_pending_timer_fn()
1142 parent_sq = sq->parent_sq; in throtl_pending_timer_fn()
1162 spin_unlock_irq(&q->queue_lock); in throtl_pending_timer_fn()
1164 spin_lock_irq(&q->queue_lock); in throtl_pending_timer_fn()
1172 if (tg->flags & THROTL_TG_WAS_EMPTY || in throtl_pending_timer_fn()
1173 tg->flags & THROTL_TG_IOPS_WAS_EMPTY) { in throtl_pending_timer_fn()
1183 /* reached the top-level, queue issuing */ in throtl_pending_timer_fn()
1184 queue_work(kthrotld_workqueue, &td->dispatch_work); in throtl_pending_timer_fn()
1187 spin_unlock_irq(&q->queue_lock); in throtl_pending_timer_fn()
1191 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1195 * of throtl_data->service_queue. Those bios are ready and issued by this
1202 struct throtl_service_queue *td_sq = &td->service_queue; in blk_throtl_dispatch_work_fn()
1203 struct request_queue *q = td->queue; in blk_throtl_dispatch_work_fn()
1211 spin_lock_irq(&q->queue_lock); in blk_throtl_dispatch_work_fn()
1215 spin_unlock_irq(&q->queue_lock); in blk_throtl_dispatch_work_fn()
1225 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd, in tg_prfill_conf_u64() argument
1228 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_conf_u64()
1233 return __blkg_prfill_u64(sf, pd, v); in tg_prfill_conf_u64()
1236 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd, in tg_prfill_conf_uint() argument
1239 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_conf_uint()
1244 return __blkg_prfill_u64(sf, pd, v); in tg_prfill_conf_uint()
1250 &blkcg_policy_throtl, seq_cft(sf)->private, false); in tg_print_conf_u64()
1257 &blkcg_policy_throtl, seq_cft(sf)->private, false); in tg_print_conf_uint()
1263 struct throtl_service_queue *sq = &tg->service_queue; in tg_conf_updated()
1267 throtl_log(&tg->service_queue, in tg_conf_updated()
1278 * blk-throttle. in tg_conf_updated()
1281 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { in tg_conf_updated()
1286 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent || in tg_conf_updated()
1287 !blkg->parent->parent) in tg_conf_updated()
1303 if (tg->flags & THROTL_TG_PENDING) { in tg_conf_updated()
1305 throtl_schedule_next_dispatch(sq->parent_sq, true); in tg_conf_updated()
1311 struct request_queue *q = disk->queue; in blk_throtl_init()
1316 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); in blk_throtl_init()
1318 return -ENOMEM; in blk_throtl_init()
1320 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); in blk_throtl_init()
1321 throtl_service_queue_init(&td->service_queue); in blk_throtl_init()
1323 memflags = blk_mq_freeze_queue(disk->queue); in blk_throtl_init()
1324 blk_mq_quiesce_queue(disk->queue); in blk_throtl_init()
1326 q->td = td; in blk_throtl_init()
1327 td->queue = q; in blk_throtl_init()
1332 q->td = NULL; in blk_throtl_init()
1336 blk_mq_unquiesce_queue(disk->queue); in blk_throtl_init()
1337 blk_mq_unfreeze_queue(disk->queue, memflags); in blk_throtl_init()
1358 if (!blk_throtl_activated(ctx.bdev->bd_queue)) { in tg_set_conf()
1359 ret = blk_throtl_init(ctx.bdev->bd_disk); in tg_set_conf()
1368 ret = -EINVAL; in tg_set_conf()
1378 *(u64 *)((void *)tg + of_cft(of)->private) = v; in tg_set_conf()
1380 *(unsigned int *)((void *)tg + of_cft(of)->private) = v; in tg_set_conf()
1405 seq_cft(sf)->private, true); in tg_print_rwstat()
1410 struct blkg_policy_data *pd, int off) in tg_prfill_rwstat_recursive() argument
1414 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off, in tg_prfill_rwstat_recursive()
1416 return __blkg_prfill_rwstat(sf, pd, &sum); in tg_prfill_rwstat_recursive()
1423 seq_cft(sf)->private, true); in tg_print_rwstat_recursive()
1475 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd, in tg_prfill_limit() argument
1478 struct throtl_grp *tg = pd_to_tg(pd); in tg_prfill_limit()
1479 const char *dname = blkg_dev_name(pd->blkg); in tg_prfill_limit()
1489 if (tg->bps[READ] == bps_dft && in tg_prfill_limit()
1490 tg->bps[WRITE] == bps_dft && in tg_prfill_limit()
1491 tg->iops[READ] == iops_dft && in tg_prfill_limit()
1492 tg->iops[WRITE] == iops_dft) in tg_prfill_limit()
1496 if (tg->bps[READ] == U64_MAX) in tg_prfill_limit()
1499 seq_printf(sf, " rbps=%llu", tg->bps[READ]); in tg_prfill_limit()
1501 if (tg->bps[WRITE] == U64_MAX) in tg_prfill_limit()
1504 seq_printf(sf, " wbps=%llu", tg->bps[WRITE]); in tg_prfill_limit()
1506 if (tg->iops[READ] == UINT_MAX) in tg_prfill_limit()
1509 seq_printf(sf, " riops=%u", tg->iops[READ]); in tg_prfill_limit()
1511 if (tg->iops[WRITE] == UINT_MAX) in tg_prfill_limit()
1514 seq_printf(sf, " wiops=%u", tg->iops[WRITE]); in tg_prfill_limit()
1523 &blkcg_policy_throtl, seq_cft(sf)->private, false); in tg_print_limit()
1542 if (!blk_throtl_activated(ctx.bdev->bd_queue)) { in tg_set_limit()
1543 ret = blk_throtl_init(ctx.bdev->bd_disk); in tg_set_limit()
1555 v[0] = tg->bps[READ]; in tg_set_limit()
1556 v[1] = tg->bps[WRITE]; in tg_set_limit()
1557 v[2] = tg->iops[READ]; in tg_set_limit()
1558 v[3] = tg->iops[WRITE]; in tg_set_limit()
1572 ret = -EINVAL; in tg_set_limit()
1578 ret = -ERANGE; in tg_set_limit()
1582 ret = -EINVAL; in tg_set_limit()
1595 tg->bps[READ] = v[0]; in tg_set_limit()
1596 tg->bps[WRITE] = v[1]; in tg_set_limit()
1597 tg->iops[READ] = v[2]; in tg_set_limit()
1598 tg->iops[WRITE] = v[3]; in tg_set_limit()
1619 struct throtl_data *td = q->td; in throtl_shutdown_wq()
1621 cancel_work_sync(&td->dispatch_work); in throtl_shutdown_wq()
1626 struct throtl_service_queue *sq = &tg->service_queue; in tg_flush_bios()
1628 if (tg->flags & THROTL_TG_CANCELING) in tg_flush_bios()
1634 tg->flags |= THROTL_TG_CANCELING; in tg_flush_bios()
1643 if (!(tg->flags & THROTL_TG_PENDING)) in tg_flush_bios()
1655 static void throtl_pd_offline(struct blkg_policy_data *pd) in throtl_pd_offline() argument
1657 tg_flush_bios(pd_to_tg(pd)); in throtl_pd_offline()
1673 struct request_queue *q = disk->queue; in blk_throtl_cancel_bios()
1680 spin_lock_irq(&q->queue_lock); in blk_throtl_cancel_bios()
1687 blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) { in blk_throtl_cancel_bios()
1699 spin_unlock_irq(&q->queue_lock); in blk_throtl_cancel_bios()
1704 struct throtl_service_queue *sq = &tg->service_queue; in tg_within_limit()
1711 return sq->nr_queued_iops[rw] == 0 && in tg_within_limit()
1715 * Throtl is FIFO - if bios are already queued, should queue. in tg_within_limit()
1719 if (sq_queued(&tg->service_queue, rw)) { in tg_within_limit()
1720 if (sq->nr_queued_bps[rw] == 0 && in tg_within_limit()
1732 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in __blk_throtl_bio()
1733 struct blkcg_gq *blkg = bio->bi_blkg; in __blk_throtl_bio()
1739 struct throtl_data *td = tg->td; in __blk_throtl_bio()
1742 spin_lock_irq(&q->queue_lock); in __blk_throtl_bio()
1743 sq = &tg->service_queue; in __blk_throtl_bio()
1783 qn = &tg->qnode_on_parent[rw]; in __blk_throtl_bio()
1784 sq = sq->parent_sq; in __blk_throtl_bio()
1792 /* out-of-limit, queue to @tg */ in __blk_throtl_bio()
1795 tg->bytes_disp[rw], bio->bi_iter.bi_size, in __blk_throtl_bio()
1797 tg->io_disp[rw], tg_iops_limit(tg, rw), in __blk_throtl_bio()
1800 td->nr_queued[rw]++; in __blk_throtl_bio()
1811 if (tg->flags & THROTL_TG_WAS_EMPTY || in __blk_throtl_bio()
1812 tg->flags & THROTL_TG_IOPS_WAS_EMPTY) { in __blk_throtl_bio()
1814 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); in __blk_throtl_bio()
1818 spin_unlock_irq(&q->queue_lock); in __blk_throtl_bio()
1826 struct request_queue *q = disk->queue; in blk_throtl_exit()
1832 if (!q->td) in blk_throtl_exit()
1835 timer_delete_sync(&q->td->service_queue.pending_timer); in blk_throtl_exit()
1837 kfree(q->td); in blk_throtl_exit()