xref: /linux/block/blk-throttle.h (revision 4f77c3462308c62ffe7129cc18b9ac937f44b5a5)
1 #ifndef BLK_THROTTLE_H
2 #define BLK_THROTTLE_H
3 
4 #include "blk-cgroup-rwstat.h"
5 
6 /*
7  * To implement hierarchical throttling, throtl_grps form a tree and bios
8  * are dispatched upwards level by level until they reach the top and get
9  * issued.  When dispatching bios from the children and local group at each
10  * level, if the bios are dispatched into a single bio_list, there's a risk
11  * of a local or child group which can queue many bios at once filling up
12  * the list starving others.
13  *
14  * To avoid such starvation, dispatched bios are queued separately
15  * according to where they came from.  When they are again dispatched to
16  * the parent, they're popped in round-robin order so that no single source
17  * hogs the dispatch window.
18  *
19  * throtl_qnode is used to keep the queued bios separated by their sources.
20  * Bios are queued to throtl_qnode which in turn is queued to
21  * throtl_service_queue and then dispatched in round-robin order.
22  *
23  * It's also used to track the reference counts on blkg's.  A qnode always
24  * belongs to a throtl_grp and gets queued on itself or the parent, so
25  * incrementing the reference of the associated throtl_grp when a qnode is
26  * queued and decrementing when dequeued is enough to keep the whole blkg
27  * tree pinned while bios are in flight.
28  */
29 struct throtl_qnode {
30 	struct list_head	node;		/* service_queue->queued[] */
31 	struct bio_list		bios;		/* queued bios */
32 	struct throtl_grp	*tg;		/* tg this qnode belongs to */
33 };
34 
35 struct throtl_service_queue {
36 	struct throtl_service_queue *parent_sq;	/* the parent service_queue */
37 
38 	/*
39 	 * Bios queued directly to this service_queue or dispatched from
40 	 * children throtl_grp's.
41 	 */
42 	struct list_head	queued[2];	/* throtl_qnode [READ/WRITE] */
43 	unsigned int		nr_queued[2];	/* number of queued bios */
44 
45 	/*
46 	 * RB tree of active children throtl_grp's, which are sorted by
47 	 * their ->disptime.
48 	 */
49 	struct rb_root_cached	pending_tree;	/* RB tree of active tgs */
50 	unsigned int		nr_pending;	/* # queued in the tree */
51 	unsigned long		first_pending_disptime;	/* disptime of the first tg */
52 	struct timer_list	pending_timer;	/* fires on first_pending_disptime */
53 };
54 
55 enum tg_state_flags {
56 	THROTL_TG_PENDING	= 1 << 0,	/* on parent's pending tree */
57 	THROTL_TG_WAS_EMPTY	= 1 << 1,	/* bio_lists[] became non-empty */
58 	THROTL_TG_CANCELING	= 1 << 2,	/* starts to cancel bio */
59 };
60 
61 struct throtl_grp {
62 	/* must be the first member */
63 	struct blkg_policy_data pd;
64 
65 	/* active throtl group service_queue member */
66 	struct rb_node rb_node;
67 
68 	/* throtl_data this group belongs to */
69 	struct throtl_data *td;
70 
71 	/* this group's service queue */
72 	struct throtl_service_queue service_queue;
73 
74 	/*
75 	 * qnode_on_self is used when bios are directly queued to this
76 	 * throtl_grp so that local bios compete fairly with bios
77 	 * dispatched from children.  qnode_on_parent is used when bios are
78 	 * dispatched from this throtl_grp into its parent and will compete
79 	 * with the sibling qnode_on_parents and the parent's
80 	 * qnode_on_self.
81 	 */
82 	struct throtl_qnode qnode_on_self[2];
83 	struct throtl_qnode qnode_on_parent[2];
84 
85 	/*
86 	 * Dispatch time in jiffies. This is the estimated time when group
87 	 * will unthrottle and is ready to dispatch more bio. It is used as
88 	 * key to sort active groups in service tree.
89 	 */
90 	unsigned long disptime;
91 
92 	unsigned int flags;
93 
94 	/* are there any throtl rules between this group and td? */
95 	bool has_rules_bps[2];
96 	bool has_rules_iops[2];
97 
98 	/* bytes per second rate limits */
99 	uint64_t bps[2];
100 
101 	/* IOPS limits */
102 	unsigned int iops[2];
103 
104 	/* Number of bytes dispatched in current slice */
105 	uint64_t bytes_disp[2];
106 	/* Number of bio's dispatched in current slice */
107 	unsigned int io_disp[2];
108 
109 	unsigned long last_low_overflow_time[2];
110 
111 	uint64_t last_bytes_disp[2];
112 	unsigned int last_io_disp[2];
113 
114 	/*
115 	 * The following two fields are updated when new configuration is
116 	 * submitted while some bios are still throttled, they record how many
117 	 * bytes/ios are waited already in previous configuration, and they will
118 	 * be used to calculate wait time under new configuration.
119 	 */
120 	long long carryover_bytes[2];
121 	int carryover_ios[2];
122 
123 	unsigned long last_check_time;
124 
125 	/* When did we start a new slice */
126 	unsigned long slice_start[2];
127 	unsigned long slice_end[2];
128 
129 	struct blkg_rwstat stat_bytes;
130 	struct blkg_rwstat stat_ios;
131 };
132 
133 extern struct blkcg_policy blkcg_policy_throtl;
134 
135 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
136 {
137 	return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
138 }
139 
140 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
141 {
142 	return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
143 }
144 
145 /*
146  * Internal throttling interface
147  */
148 #ifndef CONFIG_BLK_DEV_THROTTLING
149 static inline void blk_throtl_exit(struct gendisk *disk) { }
150 static inline bool blk_throtl_bio(struct bio *bio) { return false; }
151 static inline void blk_throtl_cancel_bios(struct gendisk *disk) { }
152 #else /* CONFIG_BLK_DEV_THROTTLING */
153 void blk_throtl_exit(struct gendisk *disk);
154 bool __blk_throtl_bio(struct bio *bio);
155 void blk_throtl_cancel_bios(struct gendisk *disk);
156 
157 static inline bool blk_throtl_activated(struct request_queue *q)
158 {
159 	return q->td != NULL;
160 }
161 
162 static inline bool blk_should_throtl(struct bio *bio)
163 {
164 	struct throtl_grp *tg;
165 	int rw = bio_data_dir(bio);
166 
167 	/*
168 	 * This is called under bio_queue_enter(), and it's synchronized with
169 	 * the activation of blk-throtl, which is protected by
170 	 * blk_mq_freeze_queue().
171 	 */
172 	if (!blk_throtl_activated(bio->bi_bdev->bd_queue))
173 		return false;
174 
175 	tg = blkg_to_tg(bio->bi_blkg);
176 	if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
177 		if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
178 			bio_set_flag(bio, BIO_CGROUP_ACCT);
179 			blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
180 					bio->bi_iter.bi_size);
181 		}
182 		blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
183 	}
184 
185 	/* iops limit is always counted */
186 	if (tg->has_rules_iops[rw])
187 		return true;
188 
189 	if (tg->has_rules_bps[rw] && !bio_flagged(bio, BIO_BPS_THROTTLED))
190 		return true;
191 
192 	return false;
193 }
194 
195 static inline bool blk_throtl_bio(struct bio *bio)
196 {
197 
198 	if (!blk_should_throtl(bio))
199 		return false;
200 
201 	return __blk_throtl_bio(bio);
202 }
203 #endif /* CONFIG_BLK_DEV_THROTTLING */
204 
205 #endif
206