xref: /linux/block/bfq-cgroup.c (revision 62a31d6e38bd0faef7c956b358d651f7bdc4ae0c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * cgroups support for the BFQ I/O scheduler.
4  */
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/cgroup.h>
9 #include <linux/ktime.h>
10 #include <linux/rbtree.h>
11 #include <linux/ioprio.h>
12 #include <linux/sbitmap.h>
13 #include <linux/delay.h>
14 
15 #include "elevator.h"
16 #include "bfq-iosched.h"
17 
18 #ifdef CONFIG_BFQ_CGROUP_DEBUG
19 static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
20 {
21 	int ret;
22 
23 	ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
24 	if (ret)
25 		return ret;
26 
27 	atomic64_set(&stat->aux_cnt, 0);
28 	return 0;
29 }
30 
31 static void bfq_stat_exit(struct bfq_stat *stat)
32 {
33 	percpu_counter_destroy(&stat->cpu_cnt);
34 }
35 
36 /**
37  * bfq_stat_add - add a value to a bfq_stat
38  * @stat: target bfq_stat
39  * @val: value to add
40  *
41  * Add @val to @stat.  The caller must ensure that IRQ on the same CPU
42  * don't re-enter this function for the same counter.
43  */
44 static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
45 {
46 	percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
47 }
48 
49 /**
50  * bfq_stat_read - read the current value of a bfq_stat
51  * @stat: bfq_stat to read
52  */
53 static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
54 {
55 	return percpu_counter_sum_positive(&stat->cpu_cnt);
56 }
57 
58 /**
59  * bfq_stat_reset - reset a bfq_stat
60  * @stat: bfq_stat to reset
61  */
62 static inline void bfq_stat_reset(struct bfq_stat *stat)
63 {
64 	percpu_counter_set(&stat->cpu_cnt, 0);
65 	atomic64_set(&stat->aux_cnt, 0);
66 }
67 
68 /**
69  * bfq_stat_add_aux - add a bfq_stat into another's aux count
70  * @to: the destination bfq_stat
71  * @from: the source
72  *
73  * Add @from's count including the aux one to @to's aux count.
74  */
75 static inline void bfq_stat_add_aux(struct bfq_stat *to,
76 				     struct bfq_stat *from)
77 {
78 	atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
79 		     &to->aux_cnt);
80 }
81 
82 /**
83  * blkg_prfill_stat - prfill callback for bfq_stat
84  * @sf: seq_file to print to
85  * @pd: policy private data of interest
86  * @off: offset to the bfq_stat in @pd
87  *
88  * prfill callback for printing a bfq_stat.
89  */
90 static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
91 		int off)
92 {
93 	return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
94 }
95 
96 /* bfqg stats flags */
97 enum bfqg_stats_flags {
98 	BFQG_stats_waiting = 0,
99 	BFQG_stats_idling,
100 	BFQG_stats_empty,
101 };
102 
103 #define BFQG_FLAG_FNS(name)						\
104 static void bfqg_stats_mark_##name(struct bfqg_stats *stats)	\
105 {									\
106 	stats->flags |= (1 << BFQG_stats_##name);			\
107 }									\
108 static void bfqg_stats_clear_##name(struct bfqg_stats *stats)	\
109 {									\
110 	stats->flags &= ~(1 << BFQG_stats_##name);			\
111 }									\
112 static int bfqg_stats_##name(struct bfqg_stats *stats)		\
113 {									\
114 	return (stats->flags & (1 << BFQG_stats_##name)) != 0;		\
115 }									\
116 
117 BFQG_FLAG_FNS(waiting)
118 BFQG_FLAG_FNS(idling)
119 BFQG_FLAG_FNS(empty)
120 #undef BFQG_FLAG_FNS
121 
122 /* This should be called with the scheduler lock held. */
123 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
124 {
125 	u64 now;
126 
127 	if (!bfqg_stats_waiting(stats))
128 		return;
129 
130 	now = ktime_get_ns();
131 	if (now > stats->start_group_wait_time)
132 		bfq_stat_add(&stats->group_wait_time,
133 			      now - stats->start_group_wait_time);
134 	bfqg_stats_clear_waiting(stats);
135 }
136 
137 /* This should be called with the scheduler lock held. */
138 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
139 						 struct bfq_group *curr_bfqg)
140 {
141 	struct bfqg_stats *stats = &bfqg->stats;
142 
143 	if (bfqg_stats_waiting(stats))
144 		return;
145 	if (bfqg == curr_bfqg)
146 		return;
147 	stats->start_group_wait_time = ktime_get_ns();
148 	bfqg_stats_mark_waiting(stats);
149 }
150 
151 /* This should be called with the scheduler lock held. */
152 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
153 {
154 	u64 now;
155 
156 	if (!bfqg_stats_empty(stats))
157 		return;
158 
159 	now = ktime_get_ns();
160 	if (now > stats->start_empty_time)
161 		bfq_stat_add(&stats->empty_time,
162 			      now - stats->start_empty_time);
163 	bfqg_stats_clear_empty(stats);
164 }
165 
166 void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
167 {
168 	bfq_stat_add(&bfqg->stats.dequeue, 1);
169 }
170 
171 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
172 {
173 	struct bfqg_stats *stats = &bfqg->stats;
174 
175 	if (blkg_rwstat_total(&stats->queued))
176 		return;
177 
178 	/*
179 	 * group is already marked empty. This can happen if bfqq got new
180 	 * request in parent group and moved to this group while being added
181 	 * to service tree. Just ignore the event and move on.
182 	 */
183 	if (bfqg_stats_empty(stats))
184 		return;
185 
186 	stats->start_empty_time = ktime_get_ns();
187 	bfqg_stats_mark_empty(stats);
188 }
189 
190 void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
191 {
192 	struct bfqg_stats *stats = &bfqg->stats;
193 
194 	if (bfqg_stats_idling(stats)) {
195 		u64 now = ktime_get_ns();
196 
197 		if (now > stats->start_idle_time)
198 			bfq_stat_add(&stats->idle_time,
199 				      now - stats->start_idle_time);
200 		bfqg_stats_clear_idling(stats);
201 	}
202 }
203 
204 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
205 {
206 	struct bfqg_stats *stats = &bfqg->stats;
207 
208 	stats->start_idle_time = ktime_get_ns();
209 	bfqg_stats_mark_idling(stats);
210 }
211 
212 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
213 {
214 	struct bfqg_stats *stats = &bfqg->stats;
215 
216 	bfq_stat_add(&stats->avg_queue_size_sum,
217 		      blkg_rwstat_total(&stats->queued));
218 	bfq_stat_add(&stats->avg_queue_size_samples, 1);
219 	bfqg_stats_update_group_wait_time(stats);
220 }
221 
222 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
223 			      blk_opf_t opf)
224 {
225 	blkg_rwstat_add(&bfqg->stats.queued, opf, 1);
226 	bfqg_stats_end_empty_time(&bfqg->stats);
227 	if (!(bfqq == bfqg->bfqd->in_service_queue))
228 		bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
229 }
230 
231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf)
232 {
233 	blkg_rwstat_add(&bfqg->stats.queued, opf, -1);
234 }
235 
236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf)
237 {
238 	blkg_rwstat_add(&bfqg->stats.merged, opf, 1);
239 }
240 
241 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
242 				  u64 io_start_time_ns, blk_opf_t opf)
243 {
244 	struct bfqg_stats *stats = &bfqg->stats;
245 	u64 now = ktime_get_ns();
246 
247 	if (now > io_start_time_ns)
248 		blkg_rwstat_add(&stats->service_time, opf,
249 				now - io_start_time_ns);
250 	if (io_start_time_ns > start_time_ns)
251 		blkg_rwstat_add(&stats->wait_time, opf,
252 				io_start_time_ns - start_time_ns);
253 }
254 
255 #else /* CONFIG_BFQ_CGROUP_DEBUG */
256 
257 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { }
258 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { }
259 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
260 				  u64 io_start_time_ns, blk_opf_t opf) { }
261 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
262 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
263 
264 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
265 
266 #ifdef CONFIG_BFQ_GROUP_IOSCHED
267 
268 /*
269  * blk-cgroup policy-related handlers
270  * The following functions help in converting between blk-cgroup
271  * internal structures and BFQ-specific structures.
272  */
273 
274 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
275 {
276 	return pd ? container_of(pd, struct bfq_group, pd) : NULL;
277 }
278 
279 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
280 {
281 	return pd_to_blkg(&bfqg->pd);
282 }
283 
284 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
285 {
286 	return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
287 }
288 
289 /*
290  * bfq_group handlers
291  * The following functions help in navigating the bfq_group hierarchy
292  * by allowing to find the parent of a bfq_group or the bfq_group
293  * associated to a bfq_queue.
294  */
295 
296 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
297 {
298 	struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
299 
300 	return pblkg ? blkg_to_bfqg(pblkg) : NULL;
301 }
302 
303 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
304 {
305 	struct bfq_entity *group_entity = bfqq->entity.parent;
306 
307 	return group_entity ? container_of(group_entity, struct bfq_group,
308 					   entity) :
309 			      bfqq->bfqd->root_group;
310 }
311 
312 /*
313  * The following two functions handle get and put of a bfq_group by
314  * wrapping the related blk-cgroup hooks.
315  */
316 
317 static void bfqg_get(struct bfq_group *bfqg)
318 {
319 	refcount_inc(&bfqg->ref);
320 }
321 
322 static void bfqg_put(struct bfq_group *bfqg)
323 {
324 	if (refcount_dec_and_test(&bfqg->ref))
325 		kfree(bfqg);
326 }
327 
328 static void bfqg_and_blkg_get(struct bfq_group *bfqg)
329 {
330 	/* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
331 	bfqg_get(bfqg);
332 
333 	blkg_get(bfqg_to_blkg(bfqg));
334 }
335 
336 void bfqg_and_blkg_put(struct bfq_group *bfqg)
337 {
338 	blkg_put(bfqg_to_blkg(bfqg));
339 
340 	bfqg_put(bfqg);
341 }
342 
343 void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
344 {
345 	struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
346 
347 	if (!bfqg)
348 		return;
349 
350 	blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
351 	blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
352 }
353 
354 /* @stats = 0 */
355 static void bfqg_stats_reset(struct bfqg_stats *stats)
356 {
357 #ifdef CONFIG_BFQ_CGROUP_DEBUG
358 	/* queued stats shouldn't be cleared */
359 	blkg_rwstat_reset(&stats->merged);
360 	blkg_rwstat_reset(&stats->service_time);
361 	blkg_rwstat_reset(&stats->wait_time);
362 	bfq_stat_reset(&stats->time);
363 	bfq_stat_reset(&stats->avg_queue_size_sum);
364 	bfq_stat_reset(&stats->avg_queue_size_samples);
365 	bfq_stat_reset(&stats->dequeue);
366 	bfq_stat_reset(&stats->group_wait_time);
367 	bfq_stat_reset(&stats->idle_time);
368 	bfq_stat_reset(&stats->empty_time);
369 #endif
370 }
371 
372 /* @to += @from */
373 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
374 {
375 	if (!to || !from)
376 		return;
377 
378 #ifdef CONFIG_BFQ_CGROUP_DEBUG
379 	/* queued stats shouldn't be cleared */
380 	blkg_rwstat_add_aux(&to->merged, &from->merged);
381 	blkg_rwstat_add_aux(&to->service_time, &from->service_time);
382 	blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
383 	bfq_stat_add_aux(&from->time, &from->time);
384 	bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
385 	bfq_stat_add_aux(&to->avg_queue_size_samples,
386 			  &from->avg_queue_size_samples);
387 	bfq_stat_add_aux(&to->dequeue, &from->dequeue);
388 	bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
389 	bfq_stat_add_aux(&to->idle_time, &from->idle_time);
390 	bfq_stat_add_aux(&to->empty_time, &from->empty_time);
391 #endif
392 }
393 
394 /*
395  * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
396  * recursive stats can still account for the amount used by this bfqg after
397  * it's gone.
398  */
399 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
400 {
401 	struct bfq_group *parent;
402 
403 	if (!bfqg) /* root_group */
404 		return;
405 
406 	parent = bfqg_parent(bfqg);
407 
408 	lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
409 
410 	if (unlikely(!parent))
411 		return;
412 
413 	bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
414 	bfqg_stats_reset(&bfqg->stats);
415 }
416 
417 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
418 {
419 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
420 
421 	entity->weight = entity->new_weight;
422 	entity->orig_weight = entity->new_weight;
423 	if (bfqq) {
424 		bfqq->ioprio = bfqq->new_ioprio;
425 		bfqq->ioprio_class = bfqq->new_ioprio_class;
426 		/*
427 		 * Make sure that bfqg and its associated blkg do not
428 		 * disappear before entity.
429 		 */
430 		bfqg_and_blkg_get(bfqg);
431 	}
432 	entity->parent = bfqg->my_entity; /* NULL for root group */
433 	entity->sched_data = &bfqg->sched_data;
434 }
435 
436 static void bfqg_stats_exit(struct bfqg_stats *stats)
437 {
438 	blkg_rwstat_exit(&stats->bytes);
439 	blkg_rwstat_exit(&stats->ios);
440 #ifdef CONFIG_BFQ_CGROUP_DEBUG
441 	blkg_rwstat_exit(&stats->merged);
442 	blkg_rwstat_exit(&stats->service_time);
443 	blkg_rwstat_exit(&stats->wait_time);
444 	blkg_rwstat_exit(&stats->queued);
445 	bfq_stat_exit(&stats->time);
446 	bfq_stat_exit(&stats->avg_queue_size_sum);
447 	bfq_stat_exit(&stats->avg_queue_size_samples);
448 	bfq_stat_exit(&stats->dequeue);
449 	bfq_stat_exit(&stats->group_wait_time);
450 	bfq_stat_exit(&stats->idle_time);
451 	bfq_stat_exit(&stats->empty_time);
452 #endif
453 }
454 
455 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
456 {
457 	if (blkg_rwstat_init(&stats->bytes, gfp) ||
458 	    blkg_rwstat_init(&stats->ios, gfp))
459 		goto error;
460 
461 #ifdef CONFIG_BFQ_CGROUP_DEBUG
462 	if (blkg_rwstat_init(&stats->merged, gfp) ||
463 	    blkg_rwstat_init(&stats->service_time, gfp) ||
464 	    blkg_rwstat_init(&stats->wait_time, gfp) ||
465 	    blkg_rwstat_init(&stats->queued, gfp) ||
466 	    bfq_stat_init(&stats->time, gfp) ||
467 	    bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
468 	    bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
469 	    bfq_stat_init(&stats->dequeue, gfp) ||
470 	    bfq_stat_init(&stats->group_wait_time, gfp) ||
471 	    bfq_stat_init(&stats->idle_time, gfp) ||
472 	    bfq_stat_init(&stats->empty_time, gfp))
473 		goto error;
474 #endif
475 
476 	return 0;
477 
478 error:
479 	bfqg_stats_exit(stats);
480 	return -ENOMEM;
481 }
482 
483 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
484 {
485 	return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
486 }
487 
488 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
489 {
490 	return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
491 }
492 
493 static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
494 {
495 	struct bfq_group_data *bgd;
496 
497 	bgd = kzalloc(sizeof(*bgd), gfp);
498 	if (!bgd)
499 		return NULL;
500 	return &bgd->pd;
501 }
502 
503 static void bfq_cpd_init(struct blkcg_policy_data *cpd)
504 {
505 	struct bfq_group_data *d = cpd_to_bfqgd(cpd);
506 
507 	d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
508 		CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
509 }
510 
511 static void bfq_cpd_free(struct blkcg_policy_data *cpd)
512 {
513 	kfree(cpd_to_bfqgd(cpd));
514 }
515 
516 static struct blkg_policy_data *bfq_pd_alloc(struct gendisk *disk,
517 		struct blkcg *blkcg, gfp_t gfp)
518 {
519 	struct bfq_group *bfqg;
520 
521 	bfqg = kzalloc_node(sizeof(*bfqg), gfp, disk->node_id);
522 	if (!bfqg)
523 		return NULL;
524 
525 	if (bfqg_stats_init(&bfqg->stats, gfp)) {
526 		kfree(bfqg);
527 		return NULL;
528 	}
529 
530 	/* see comments in bfq_bic_update_cgroup for why refcounting */
531 	refcount_set(&bfqg->ref, 1);
532 	return &bfqg->pd;
533 }
534 
535 static void bfq_pd_init(struct blkg_policy_data *pd)
536 {
537 	struct blkcg_gq *blkg = pd_to_blkg(pd);
538 	struct bfq_group *bfqg = blkg_to_bfqg(blkg);
539 	struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
540 	struct bfq_entity *entity = &bfqg->entity;
541 	struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
542 
543 	entity->orig_weight = entity->weight = entity->new_weight = d->weight;
544 	entity->my_sched_data = &bfqg->sched_data;
545 	entity->last_bfqq_created = NULL;
546 
547 	bfqg->my_entity = entity; /*
548 				   * the root_group's will be set to NULL
549 				   * in bfq_init_queue()
550 				   */
551 	bfqg->bfqd = bfqd;
552 	bfqg->active_entities = 0;
553 	bfqg->num_queues_with_pending_reqs = 0;
554 	bfqg->rq_pos_tree = RB_ROOT;
555 }
556 
557 static void bfq_pd_free(struct blkg_policy_data *pd)
558 {
559 	struct bfq_group *bfqg = pd_to_bfqg(pd);
560 
561 	bfqg_stats_exit(&bfqg->stats);
562 	bfqg_put(bfqg);
563 }
564 
565 static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
566 {
567 	struct bfq_group *bfqg = pd_to_bfqg(pd);
568 
569 	bfqg_stats_reset(&bfqg->stats);
570 }
571 
572 static void bfq_group_set_parent(struct bfq_group *bfqg,
573 					struct bfq_group *parent)
574 {
575 	struct bfq_entity *entity;
576 
577 	entity = &bfqg->entity;
578 	entity->parent = parent->my_entity;
579 	entity->sched_data = &parent->sched_data;
580 }
581 
582 static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
583 {
584 	struct bfq_group *parent;
585 	struct bfq_entity *entity;
586 
587 	/*
588 	 * Update chain of bfq_groups as we might be handling a leaf group
589 	 * which, along with some of its relatives, has not been hooked yet
590 	 * to the private hierarchy of BFQ.
591 	 */
592 	entity = &bfqg->entity;
593 	for_each_entity(entity) {
594 		struct bfq_group *curr_bfqg = container_of(entity,
595 						struct bfq_group, entity);
596 		if (curr_bfqg != bfqd->root_group) {
597 			parent = bfqg_parent(curr_bfqg);
598 			if (!parent)
599 				parent = bfqd->root_group;
600 			bfq_group_set_parent(curr_bfqg, parent);
601 		}
602 	}
603 }
604 
605 struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
606 {
607 	struct blkcg_gq *blkg = bio->bi_blkg;
608 	struct bfq_group *bfqg;
609 
610 	while (blkg) {
611 		if (!blkg->online) {
612 			blkg = blkg->parent;
613 			continue;
614 		}
615 		bfqg = blkg_to_bfqg(blkg);
616 		if (bfqg->pd.online) {
617 			bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
618 			return bfqg;
619 		}
620 		blkg = blkg->parent;
621 	}
622 	bio_associate_blkg_from_css(bio,
623 				&bfqg_to_blkg(bfqd->root_group)->blkcg->css);
624 	return bfqd->root_group;
625 }
626 
627 /**
628  * bfq_bfqq_move - migrate @bfqq to @bfqg.
629  * @bfqd: queue descriptor.
630  * @bfqq: the queue to move.
631  * @bfqg: the group to move to.
632  *
633  * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
634  * it on the new one.  Avoid putting the entity on the old group idle tree.
635  *
636  * Must be called under the scheduler lock, to make sure that the blkg
637  * owning @bfqg does not disappear (see comments in
638  * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
639  * objects).
640  */
641 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
642 		   struct bfq_group *bfqg)
643 {
644 	struct bfq_entity *entity = &bfqq->entity;
645 	struct bfq_group *old_parent = bfqq_group(bfqq);
646 	bool has_pending_reqs = false;
647 
648 	/*
649 	 * No point to move bfqq to the same group, which can happen when
650 	 * root group is offlined
651 	 */
652 	if (old_parent == bfqg)
653 		return;
654 
655 	/*
656 	 * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group
657 	 * until elevator exit.
658 	 */
659 	if (bfqq == &bfqd->oom_bfqq)
660 		return;
661 	/*
662 	 * Get extra reference to prevent bfqq from being freed in
663 	 * next possible expire or deactivate.
664 	 */
665 	bfqq->ref++;
666 
667 	if (entity->in_groups_with_pending_reqs) {
668 		has_pending_reqs = true;
669 		bfq_del_bfqq_in_groups_with_pending_reqs(bfqq);
670 	}
671 
672 	/* If bfqq is empty, then bfq_bfqq_expire also invokes
673 	 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
674 	 * from data structures related to current group. Otherwise we
675 	 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
676 	 * we do below.
677 	 */
678 	if (bfqq == bfqd->in_service_queue)
679 		bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
680 				false, BFQQE_PREEMPTED);
681 
682 	if (bfq_bfqq_busy(bfqq))
683 		bfq_deactivate_bfqq(bfqd, bfqq, false, false);
684 	else if (entity->on_st_or_in_serv)
685 		bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
686 	bfqg_and_blkg_put(old_parent);
687 
688 	if (entity->parent &&
689 	    entity->parent->last_bfqq_created == bfqq)
690 		entity->parent->last_bfqq_created = NULL;
691 	else if (bfqd->last_bfqq_created == bfqq)
692 		bfqd->last_bfqq_created = NULL;
693 
694 	entity->parent = bfqg->my_entity;
695 	entity->sched_data = &bfqg->sched_data;
696 	/* pin down bfqg and its associated blkg  */
697 	bfqg_and_blkg_get(bfqg);
698 
699 	if (has_pending_reqs)
700 		bfq_add_bfqq_in_groups_with_pending_reqs(bfqq);
701 
702 	if (bfq_bfqq_busy(bfqq)) {
703 		if (unlikely(!bfqd->nonrot_with_queueing))
704 			bfq_pos_tree_add_move(bfqd, bfqq);
705 		bfq_activate_bfqq(bfqd, bfqq);
706 	}
707 
708 	if (!bfqd->in_service_queue && !bfqd->tot_rq_in_driver)
709 		bfq_schedule_dispatch(bfqd);
710 	/* release extra ref taken above, bfqq may happen to be freed now */
711 	bfq_put_queue(bfqq);
712 }
713 
714 static void bfq_sync_bfqq_move(struct bfq_data *bfqd,
715 			       struct bfq_queue *sync_bfqq,
716 			       struct bfq_io_cq *bic,
717 			       struct bfq_group *bfqg,
718 			       unsigned int act_idx)
719 {
720 	struct bfq_queue *bfqq;
721 
722 	if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) {
723 		/* We are the only user of this bfqq, just move it */
724 		if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
725 			bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
726 		return;
727 	}
728 
729 	/*
730 	 * The queue was merged to a different queue. Check
731 	 * that the merge chain still belongs to the same
732 	 * cgroup.
733 	 */
734 	for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq)
735 		if (bfqq->entity.sched_data != &bfqg->sched_data)
736 			break;
737 	if (bfqq) {
738 		/*
739 		 * Some queue changed cgroup so the merge is not valid
740 		 * anymore. We cannot easily just cancel the merge (by
741 		 * clearing new_bfqq) as there may be other processes
742 		 * using this queue and holding refs to all queues
743 		 * below sync_bfqq->new_bfqq. Similarly if the merge
744 		 * already happened, we need to detach from bfqq now
745 		 * so that we cannot merge bio to a request from the
746 		 * old cgroup.
747 		 */
748 		bfq_put_cooperator(sync_bfqq);
749 		bic_set_bfqq(bic, NULL, true, act_idx);
750 		bfq_release_process_ref(bfqd, sync_bfqq);
751 	}
752 }
753 
754 /**
755  * __bfq_bic_change_cgroup - move @bic to @bfqg.
756  * @bfqd: the queue descriptor.
757  * @bic: the bic to move.
758  * @bfqg: the group to move to.
759  *
760  * Move bic to blkcg, assuming that bfqd->lock is held; which makes
761  * sure that the reference to cgroup is valid across the call (see
762  * comments in bfq_bic_update_cgroup on this issue)
763  */
764 static void __bfq_bic_change_cgroup(struct bfq_data *bfqd,
765 				    struct bfq_io_cq *bic,
766 				    struct bfq_group *bfqg)
767 {
768 	unsigned int act_idx;
769 
770 	for (act_idx = 0; act_idx < bfqd->num_actuators; act_idx++) {
771 		struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false, act_idx);
772 		struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true, act_idx);
773 
774 		if (async_bfqq &&
775 		    async_bfqq->entity.sched_data != &bfqg->sched_data) {
776 			bic_set_bfqq(bic, NULL, false, act_idx);
777 			bfq_release_process_ref(bfqd, async_bfqq);
778 		}
779 
780 		if (sync_bfqq)
781 			bfq_sync_bfqq_move(bfqd, sync_bfqq, bic, bfqg, act_idx);
782 	}
783 }
784 
785 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
786 {
787 	struct bfq_data *bfqd = bic_to_bfqd(bic);
788 	struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
789 	uint64_t serial_nr;
790 
791 	serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr;
792 
793 	/*
794 	 * Check whether blkcg has changed.  The condition may trigger
795 	 * spuriously on a newly created cic but there's no harm.
796 	 */
797 	if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
798 		return;
799 
800 	/*
801 	 * New cgroup for this process. Make sure it is linked to bfq internal
802 	 * cgroup hierarchy.
803 	 */
804 	bfq_link_bfqg(bfqd, bfqg);
805 	__bfq_bic_change_cgroup(bfqd, bic, bfqg);
806 	/*
807 	 * Update blkg_path for bfq_log_* functions. We cache this
808 	 * path, and update it here, for the following
809 	 * reasons. Operations on blkg objects in blk-cgroup are
810 	 * protected with the request_queue lock, and not with the
811 	 * lock that protects the instances of this scheduler
812 	 * (bfqd->lock). This exposes BFQ to the following sort of
813 	 * race.
814 	 *
815 	 * The blkg_lookup performed in bfq_get_queue, protected
816 	 * through rcu, may happen to return the address of a copy of
817 	 * the original blkg. If this is the case, then the
818 	 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
819 	 * the blkg, is useless: it does not prevent blk-cgroup code
820 	 * from destroying both the original blkg and all objects
821 	 * directly or indirectly referred by the copy of the
822 	 * blkg.
823 	 *
824 	 * On the bright side, destroy operations on a blkg invoke, as
825 	 * a first step, hooks of the scheduler associated with the
826 	 * blkg. And these hooks are executed with bfqd->lock held for
827 	 * BFQ. As a consequence, for any blkg associated with the
828 	 * request queue this instance of the scheduler is attached
829 	 * to, we are guaranteed that such a blkg is not destroyed, and
830 	 * that all the pointers it contains are consistent, while we
831 	 * are holding bfqd->lock. A blkg_lookup performed with
832 	 * bfqd->lock held then returns a fully consistent blkg, which
833 	 * remains consistent until this lock is held.
834 	 *
835 	 * Thanks to the last fact, and to the fact that: (1) bfqg has
836 	 * been obtained through a blkg_lookup in the above
837 	 * assignment, and (2) bfqd->lock is being held, here we can
838 	 * safely use the policy data for the involved blkg (i.e., the
839 	 * field bfqg->pd) to get to the blkg associated with bfqg,
840 	 * and then we can safely use any field of blkg. After we
841 	 * release bfqd->lock, even just getting blkg through this
842 	 * bfqg may cause dangling references to be traversed, as
843 	 * bfqg->pd may not exist any more.
844 	 *
845 	 * In view of the above facts, here we cache, in the bfqg, any
846 	 * blkg data we may need for this bic, and for its associated
847 	 * bfq_queue. As of now, we need to cache only the path of the
848 	 * blkg, which is used in the bfq_log_* functions.
849 	 *
850 	 * Finally, note that bfqg itself needs to be protected from
851 	 * destruction on the blkg_free of the original blkg (which
852 	 * invokes bfq_pd_free). We use an additional private
853 	 * refcounter for bfqg, to let it disappear only after no
854 	 * bfq_queue refers to it any longer.
855 	 */
856 	blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
857 	bic->blkcg_serial_nr = serial_nr;
858 }
859 
860 /**
861  * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
862  * @st: the service tree being flushed.
863  */
864 static void bfq_flush_idle_tree(struct bfq_service_tree *st)
865 {
866 	struct bfq_entity *entity = st->first_idle;
867 
868 	for (; entity ; entity = st->first_idle)
869 		__bfq_deactivate_entity(entity, false);
870 }
871 
872 /**
873  * bfq_reparent_leaf_entity - move leaf entity to the root_group.
874  * @bfqd: the device data structure with the root group.
875  * @entity: the entity to move, if entity is a leaf; or the parent entity
876  *	    of an active leaf entity to move, if entity is not a leaf.
877  * @ioprio_class: I/O priority class to reparent.
878  */
879 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
880 				     struct bfq_entity *entity,
881 				     int ioprio_class)
882 {
883 	struct bfq_queue *bfqq;
884 	struct bfq_entity *child_entity = entity;
885 
886 	while (child_entity->my_sched_data) { /* leaf not reached yet */
887 		struct bfq_sched_data *child_sd = child_entity->my_sched_data;
888 		struct bfq_service_tree *child_st = child_sd->service_tree +
889 			ioprio_class;
890 		struct rb_root *child_active = &child_st->active;
891 
892 		child_entity = bfq_entity_of(rb_first(child_active));
893 
894 		if (!child_entity)
895 			child_entity = child_sd->in_service_entity;
896 	}
897 
898 	bfqq = bfq_entity_to_bfqq(child_entity);
899 	bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
900 }
901 
902 /**
903  * bfq_reparent_active_queues - move to the root group all active queues.
904  * @bfqd: the device data structure with the root group.
905  * @bfqg: the group to move from.
906  * @st: the service tree to start the search from.
907  * @ioprio_class: I/O priority class to reparent.
908  */
909 static void bfq_reparent_active_queues(struct bfq_data *bfqd,
910 				       struct bfq_group *bfqg,
911 				       struct bfq_service_tree *st,
912 				       int ioprio_class)
913 {
914 	struct rb_root *active = &st->active;
915 	struct bfq_entity *entity;
916 
917 	while ((entity = bfq_entity_of(rb_first(active))))
918 		bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
919 
920 	if (bfqg->sched_data.in_service_entity)
921 		bfq_reparent_leaf_entity(bfqd,
922 					 bfqg->sched_data.in_service_entity,
923 					 ioprio_class);
924 }
925 
926 /**
927  * bfq_pd_offline - deactivate the entity associated with @pd,
928  *		    and reparent its children entities.
929  * @pd: descriptor of the policy going offline.
930  *
931  * blkio already grabs the queue_lock for us, so no need to use
932  * RCU-based magic
933  */
934 static void bfq_pd_offline(struct blkg_policy_data *pd)
935 {
936 	struct bfq_service_tree *st;
937 	struct bfq_group *bfqg = pd_to_bfqg(pd);
938 	struct bfq_data *bfqd = bfqg->bfqd;
939 	struct bfq_entity *entity = bfqg->my_entity;
940 	unsigned long flags;
941 	int i;
942 
943 	spin_lock_irqsave(&bfqd->lock, flags);
944 
945 	if (!entity) /* root group */
946 		goto put_async_queues;
947 
948 	/*
949 	 * Empty all service_trees belonging to this group before
950 	 * deactivating the group itself.
951 	 */
952 	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
953 		st = bfqg->sched_data.service_tree + i;
954 
955 		/*
956 		 * It may happen that some queues are still active
957 		 * (busy) upon group destruction (if the corresponding
958 		 * processes have been forced to terminate). We move
959 		 * all the leaf entities corresponding to these queues
960 		 * to the root_group.
961 		 * Also, it may happen that the group has an entity
962 		 * in service, which is disconnected from the active
963 		 * tree: it must be moved, too.
964 		 * There is no need to put the sync queues, as the
965 		 * scheduler has taken no reference.
966 		 */
967 		bfq_reparent_active_queues(bfqd, bfqg, st, i);
968 
969 		/*
970 		 * The idle tree may still contain bfq_queues
971 		 * belonging to exited task because they never
972 		 * migrated to a different cgroup from the one being
973 		 * destroyed now. In addition, even
974 		 * bfq_reparent_active_queues() may happen to add some
975 		 * entities to the idle tree. It happens if, in some
976 		 * of the calls to bfq_bfqq_move() performed by
977 		 * bfq_reparent_active_queues(), the queue to move is
978 		 * empty and gets expired.
979 		 */
980 		bfq_flush_idle_tree(st);
981 	}
982 
983 	__bfq_deactivate_entity(entity, false);
984 
985 put_async_queues:
986 	bfq_put_async_queues(bfqd, bfqg);
987 
988 	spin_unlock_irqrestore(&bfqd->lock, flags);
989 	/*
990 	 * @blkg is going offline and will be ignored by
991 	 * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
992 	 * that they don't get lost.  If IOs complete after this point, the
993 	 * stats for them will be lost.  Oh well...
994 	 */
995 	bfqg_stats_xfer_dead(bfqg);
996 }
997 
998 void bfq_end_wr_async(struct bfq_data *bfqd)
999 {
1000 	struct blkcg_gq *blkg;
1001 
1002 	list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
1003 		struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1004 
1005 		bfq_end_wr_async_queues(bfqd, bfqg);
1006 	}
1007 	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1008 }
1009 
1010 static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
1011 {
1012 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1013 	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1014 	unsigned int val = 0;
1015 
1016 	if (bfqgd)
1017 		val = bfqgd->weight;
1018 
1019 	seq_printf(sf, "%u\n", val);
1020 
1021 	return 0;
1022 }
1023 
1024 static u64 bfqg_prfill_weight_device(struct seq_file *sf,
1025 				     struct blkg_policy_data *pd, int off)
1026 {
1027 	struct bfq_group *bfqg = pd_to_bfqg(pd);
1028 
1029 	if (!bfqg->entity.dev_weight)
1030 		return 0;
1031 	return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
1032 }
1033 
1034 static int bfq_io_show_weight(struct seq_file *sf, void *v)
1035 {
1036 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1037 	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1038 
1039 	seq_printf(sf, "default %u\n", bfqgd->weight);
1040 	blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
1041 			  &blkcg_policy_bfq, 0, false);
1042 	return 0;
1043 }
1044 
1045 static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
1046 {
1047 	weight = dev_weight ?: weight;
1048 
1049 	bfqg->entity.dev_weight = dev_weight;
1050 	/*
1051 	 * Setting the prio_changed flag of the entity
1052 	 * to 1 with new_weight == weight would re-set
1053 	 * the value of the weight to its ioprio mapping.
1054 	 * Set the flag only if necessary.
1055 	 */
1056 	if ((unsigned short)weight != bfqg->entity.new_weight) {
1057 		bfqg->entity.new_weight = (unsigned short)weight;
1058 		/*
1059 		 * Make sure that the above new value has been
1060 		 * stored in bfqg->entity.new_weight before
1061 		 * setting the prio_changed flag. In fact,
1062 		 * this flag may be read asynchronously (in
1063 		 * critical sections protected by a different
1064 		 * lock than that held here), and finding this
1065 		 * flag set may cause the execution of the code
1066 		 * for updating parameters whose value may
1067 		 * depend also on bfqg->entity.new_weight (in
1068 		 * __bfq_entity_update_weight_prio).
1069 		 * This barrier makes sure that the new value
1070 		 * of bfqg->entity.new_weight is correctly
1071 		 * seen in that code.
1072 		 */
1073 		smp_wmb();
1074 		bfqg->entity.prio_changed = 1;
1075 	}
1076 }
1077 
1078 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
1079 				    struct cftype *cftype,
1080 				    u64 val)
1081 {
1082 	struct blkcg *blkcg = css_to_blkcg(css);
1083 	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1084 	struct blkcg_gq *blkg;
1085 	int ret = -ERANGE;
1086 
1087 	if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
1088 		return ret;
1089 
1090 	ret = 0;
1091 	spin_lock_irq(&blkcg->lock);
1092 	bfqgd->weight = (unsigned short)val;
1093 	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1094 		struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1095 
1096 		if (bfqg)
1097 			bfq_group_set_weight(bfqg, val, 0);
1098 	}
1099 	spin_unlock_irq(&blkcg->lock);
1100 
1101 	return ret;
1102 }
1103 
1104 static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
1105 					char *buf, size_t nbytes,
1106 					loff_t off)
1107 {
1108 	int ret;
1109 	struct blkg_conf_ctx ctx;
1110 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1111 	struct bfq_group *bfqg;
1112 	u64 v;
1113 
1114 	ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx);
1115 	if (ret)
1116 		return ret;
1117 
1118 	if (sscanf(ctx.body, "%llu", &v) == 1) {
1119 		/* require "default" on dfl */
1120 		ret = -ERANGE;
1121 		if (!v)
1122 			goto out;
1123 	} else if (!strcmp(strim(ctx.body), "default")) {
1124 		v = 0;
1125 	} else {
1126 		ret = -EINVAL;
1127 		goto out;
1128 	}
1129 
1130 	bfqg = blkg_to_bfqg(ctx.blkg);
1131 
1132 	ret = -ERANGE;
1133 	if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
1134 		bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
1135 		ret = 0;
1136 	}
1137 out:
1138 	blkg_conf_finish(&ctx);
1139 	return ret ?: nbytes;
1140 }
1141 
1142 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
1143 				 char *buf, size_t nbytes,
1144 				 loff_t off)
1145 {
1146 	char *endp;
1147 	int ret;
1148 	u64 v;
1149 
1150 	buf = strim(buf);
1151 
1152 	/* "WEIGHT" or "default WEIGHT" sets the default weight */
1153 	v = simple_strtoull(buf, &endp, 0);
1154 	if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
1155 		ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
1156 		return ret ?: nbytes;
1157 	}
1158 
1159 	return bfq_io_set_device_weight(of, buf, nbytes, off);
1160 }
1161 
1162 static int bfqg_print_rwstat(struct seq_file *sf, void *v)
1163 {
1164 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1165 			  &blkcg_policy_bfq, seq_cft(sf)->private, true);
1166 	return 0;
1167 }
1168 
1169 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
1170 					struct blkg_policy_data *pd, int off)
1171 {
1172 	struct blkg_rwstat_sample sum;
1173 
1174 	blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
1175 	return __blkg_prfill_rwstat(sf, pd, &sum);
1176 }
1177 
1178 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1179 {
1180 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1181 			  bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
1182 			  seq_cft(sf)->private, true);
1183 	return 0;
1184 }
1185 
1186 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1187 static int bfqg_print_stat(struct seq_file *sf, void *v)
1188 {
1189 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1190 			  &blkcg_policy_bfq, seq_cft(sf)->private, false);
1191 	return 0;
1192 }
1193 
1194 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
1195 				      struct blkg_policy_data *pd, int off)
1196 {
1197 	struct blkcg_gq *blkg = pd_to_blkg(pd);
1198 	struct blkcg_gq *pos_blkg;
1199 	struct cgroup_subsys_state *pos_css;
1200 	u64 sum = 0;
1201 
1202 	lockdep_assert_held(&blkg->q->queue_lock);
1203 
1204 	rcu_read_lock();
1205 	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1206 		struct bfq_stat *stat;
1207 
1208 		if (!pos_blkg->online)
1209 			continue;
1210 
1211 		stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
1212 		sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
1213 	}
1214 	rcu_read_unlock();
1215 
1216 	return __blkg_prfill_u64(sf, pd, sum);
1217 }
1218 
1219 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
1220 {
1221 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1222 			  bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
1223 			  seq_cft(sf)->private, false);
1224 	return 0;
1225 }
1226 
1227 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1228 			       int off)
1229 {
1230 	struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
1231 	u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
1232 
1233 	return __blkg_prfill_u64(sf, pd, sum >> 9);
1234 }
1235 
1236 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
1237 {
1238 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1239 			  bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
1240 	return 0;
1241 }
1242 
1243 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
1244 					 struct blkg_policy_data *pd, int off)
1245 {
1246 	struct blkg_rwstat_sample tmp;
1247 
1248 	blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,
1249 			offsetof(struct bfq_group, stats.bytes), &tmp);
1250 
1251 	return __blkg_prfill_u64(sf, pd,
1252 		(tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
1253 }
1254 
1255 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1256 {
1257 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1258 			  bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
1259 			  false);
1260 	return 0;
1261 }
1262 
1263 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
1264 				      struct blkg_policy_data *pd, int off)
1265 {
1266 	struct bfq_group *bfqg = pd_to_bfqg(pd);
1267 	u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
1268 	u64 v = 0;
1269 
1270 	if (samples) {
1271 		v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
1272 		v = div64_u64(v, samples);
1273 	}
1274 	__blkg_prfill_u64(sf, pd, v);
1275 	return 0;
1276 }
1277 
1278 /* print avg_queue_size */
1279 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1280 {
1281 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1282 			  bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
1283 			  0, false);
1284 	return 0;
1285 }
1286 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1287 
1288 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1289 {
1290 	int ret;
1291 
1292 	ret = blkcg_activate_policy(bfqd->queue->disk, &blkcg_policy_bfq);
1293 	if (ret)
1294 		return NULL;
1295 
1296 	return blkg_to_bfqg(bfqd->queue->root_blkg);
1297 }
1298 
1299 struct blkcg_policy blkcg_policy_bfq = {
1300 	.dfl_cftypes		= bfq_blkg_files,
1301 	.legacy_cftypes		= bfq_blkcg_legacy_files,
1302 
1303 	.cpd_alloc_fn		= bfq_cpd_alloc,
1304 	.cpd_init_fn		= bfq_cpd_init,
1305 	.cpd_bind_fn	        = bfq_cpd_init,
1306 	.cpd_free_fn		= bfq_cpd_free,
1307 
1308 	.pd_alloc_fn		= bfq_pd_alloc,
1309 	.pd_init_fn		= bfq_pd_init,
1310 	.pd_offline_fn		= bfq_pd_offline,
1311 	.pd_free_fn		= bfq_pd_free,
1312 	.pd_reset_stats_fn	= bfq_pd_reset_stats,
1313 };
1314 
1315 struct cftype bfq_blkcg_legacy_files[] = {
1316 	{
1317 		.name = "bfq.weight",
1318 		.flags = CFTYPE_NOT_ON_ROOT,
1319 		.seq_show = bfq_io_show_weight_legacy,
1320 		.write_u64 = bfq_io_set_weight_legacy,
1321 	},
1322 	{
1323 		.name = "bfq.weight_device",
1324 		.flags = CFTYPE_NOT_ON_ROOT,
1325 		.seq_show = bfq_io_show_weight,
1326 		.write = bfq_io_set_weight,
1327 	},
1328 
1329 	/* statistics, covers only the tasks in the bfqg */
1330 	{
1331 		.name = "bfq.io_service_bytes",
1332 		.private = offsetof(struct bfq_group, stats.bytes),
1333 		.seq_show = bfqg_print_rwstat,
1334 	},
1335 	{
1336 		.name = "bfq.io_serviced",
1337 		.private = offsetof(struct bfq_group, stats.ios),
1338 		.seq_show = bfqg_print_rwstat,
1339 	},
1340 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1341 	{
1342 		.name = "bfq.time",
1343 		.private = offsetof(struct bfq_group, stats.time),
1344 		.seq_show = bfqg_print_stat,
1345 	},
1346 	{
1347 		.name = "bfq.sectors",
1348 		.seq_show = bfqg_print_stat_sectors,
1349 	},
1350 	{
1351 		.name = "bfq.io_service_time",
1352 		.private = offsetof(struct bfq_group, stats.service_time),
1353 		.seq_show = bfqg_print_rwstat,
1354 	},
1355 	{
1356 		.name = "bfq.io_wait_time",
1357 		.private = offsetof(struct bfq_group, stats.wait_time),
1358 		.seq_show = bfqg_print_rwstat,
1359 	},
1360 	{
1361 		.name = "bfq.io_merged",
1362 		.private = offsetof(struct bfq_group, stats.merged),
1363 		.seq_show = bfqg_print_rwstat,
1364 	},
1365 	{
1366 		.name = "bfq.io_queued",
1367 		.private = offsetof(struct bfq_group, stats.queued),
1368 		.seq_show = bfqg_print_rwstat,
1369 	},
1370 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1371 
1372 	/* the same statistics which cover the bfqg and its descendants */
1373 	{
1374 		.name = "bfq.io_service_bytes_recursive",
1375 		.private = offsetof(struct bfq_group, stats.bytes),
1376 		.seq_show = bfqg_print_rwstat_recursive,
1377 	},
1378 	{
1379 		.name = "bfq.io_serviced_recursive",
1380 		.private = offsetof(struct bfq_group, stats.ios),
1381 		.seq_show = bfqg_print_rwstat_recursive,
1382 	},
1383 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1384 	{
1385 		.name = "bfq.time_recursive",
1386 		.private = offsetof(struct bfq_group, stats.time),
1387 		.seq_show = bfqg_print_stat_recursive,
1388 	},
1389 	{
1390 		.name = "bfq.sectors_recursive",
1391 		.seq_show = bfqg_print_stat_sectors_recursive,
1392 	},
1393 	{
1394 		.name = "bfq.io_service_time_recursive",
1395 		.private = offsetof(struct bfq_group, stats.service_time),
1396 		.seq_show = bfqg_print_rwstat_recursive,
1397 	},
1398 	{
1399 		.name = "bfq.io_wait_time_recursive",
1400 		.private = offsetof(struct bfq_group, stats.wait_time),
1401 		.seq_show = bfqg_print_rwstat_recursive,
1402 	},
1403 	{
1404 		.name = "bfq.io_merged_recursive",
1405 		.private = offsetof(struct bfq_group, stats.merged),
1406 		.seq_show = bfqg_print_rwstat_recursive,
1407 	},
1408 	{
1409 		.name = "bfq.io_queued_recursive",
1410 		.private = offsetof(struct bfq_group, stats.queued),
1411 		.seq_show = bfqg_print_rwstat_recursive,
1412 	},
1413 	{
1414 		.name = "bfq.avg_queue_size",
1415 		.seq_show = bfqg_print_avg_queue_size,
1416 	},
1417 	{
1418 		.name = "bfq.group_wait_time",
1419 		.private = offsetof(struct bfq_group, stats.group_wait_time),
1420 		.seq_show = bfqg_print_stat,
1421 	},
1422 	{
1423 		.name = "bfq.idle_time",
1424 		.private = offsetof(struct bfq_group, stats.idle_time),
1425 		.seq_show = bfqg_print_stat,
1426 	},
1427 	{
1428 		.name = "bfq.empty_time",
1429 		.private = offsetof(struct bfq_group, stats.empty_time),
1430 		.seq_show = bfqg_print_stat,
1431 	},
1432 	{
1433 		.name = "bfq.dequeue",
1434 		.private = offsetof(struct bfq_group, stats.dequeue),
1435 		.seq_show = bfqg_print_stat,
1436 	},
1437 #endif	/* CONFIG_BFQ_CGROUP_DEBUG */
1438 	{ }	/* terminate */
1439 };
1440 
1441 struct cftype bfq_blkg_files[] = {
1442 	{
1443 		.name = "bfq.weight",
1444 		.flags = CFTYPE_NOT_ON_ROOT,
1445 		.seq_show = bfq_io_show_weight,
1446 		.write = bfq_io_set_weight,
1447 	},
1448 	{} /* terminate */
1449 };
1450 
1451 #else	/* CONFIG_BFQ_GROUP_IOSCHED */
1452 
1453 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1454 		   struct bfq_group *bfqg) {}
1455 
1456 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1457 {
1458 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1459 
1460 	entity->weight = entity->new_weight;
1461 	entity->orig_weight = entity->new_weight;
1462 	if (bfqq) {
1463 		bfqq->ioprio = bfqq->new_ioprio;
1464 		bfqq->ioprio_class = bfqq->new_ioprio_class;
1465 	}
1466 	entity->sched_data = &bfqg->sched_data;
1467 }
1468 
1469 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1470 
1471 void bfq_end_wr_async(struct bfq_data *bfqd)
1472 {
1473 	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1474 }
1475 
1476 struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
1477 {
1478 	return bfqd->root_group;
1479 }
1480 
1481 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1482 {
1483 	return bfqq->bfqd->root_group;
1484 }
1485 
1486 void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
1487 
1488 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1489 {
1490 	struct bfq_group *bfqg;
1491 	int i;
1492 
1493 	bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1494 	if (!bfqg)
1495 		return NULL;
1496 
1497 	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1498 		bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1499 
1500 	return bfqg;
1501 }
1502 #endif	/* CONFIG_BFQ_GROUP_IOSCHED */
1503