xref: /linux/block/bfq-cgroup.c (revision d257f9bf06129613de539ea71ecea60848b662cd)
1 /*
2  * cgroups support for the BFQ I/O scheduler.
3  *
4  *  This program is free software; you can redistribute it and/or
5  *  modify it under the terms of the GNU General Public License as
6  *  published by the Free Software Foundation; either version 2 of the
7  *  License, or (at your option) any later version.
8  *
9  *  This program is distributed in the hope that it will be useful,
10  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  *  General Public License for more details.
13  */
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/blkdev.h>
17 #include <linux/cgroup.h>
18 #include <linux/elevator.h>
19 #include <linux/ktime.h>
20 #include <linux/rbtree.h>
21 #include <linux/ioprio.h>
22 #include <linux/sbitmap.h>
23 #include <linux/delay.h>
24 
25 #include "bfq-iosched.h"
26 
27 #ifdef CONFIG_BFQ_GROUP_IOSCHED
28 
29 /* bfqg stats flags */
30 enum bfqg_stats_flags {
31 	BFQG_stats_waiting = 0,
32 	BFQG_stats_idling,
33 	BFQG_stats_empty,
34 };
35 
36 #define BFQG_FLAG_FNS(name)						\
37 static void bfqg_stats_mark_##name(struct bfqg_stats *stats)	\
38 {									\
39 	stats->flags |= (1 << BFQG_stats_##name);			\
40 }									\
41 static void bfqg_stats_clear_##name(struct bfqg_stats *stats)	\
42 {									\
43 	stats->flags &= ~(1 << BFQG_stats_##name);			\
44 }									\
45 static int bfqg_stats_##name(struct bfqg_stats *stats)		\
46 {									\
47 	return (stats->flags & (1 << BFQG_stats_##name)) != 0;		\
48 }									\
49 
50 BFQG_FLAG_FNS(waiting)
51 BFQG_FLAG_FNS(idling)
52 BFQG_FLAG_FNS(empty)
53 #undef BFQG_FLAG_FNS
54 
55 /* This should be called with the scheduler lock held. */
56 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
57 {
58 	unsigned long long now;
59 
60 	if (!bfqg_stats_waiting(stats))
61 		return;
62 
63 	now = sched_clock();
64 	if (time_after64(now, stats->start_group_wait_time))
65 		blkg_stat_add(&stats->group_wait_time,
66 			      now - stats->start_group_wait_time);
67 	bfqg_stats_clear_waiting(stats);
68 }
69 
70 /* This should be called with the scheduler lock held. */
71 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
72 						 struct bfq_group *curr_bfqg)
73 {
74 	struct bfqg_stats *stats = &bfqg->stats;
75 
76 	if (bfqg_stats_waiting(stats))
77 		return;
78 	if (bfqg == curr_bfqg)
79 		return;
80 	stats->start_group_wait_time = sched_clock();
81 	bfqg_stats_mark_waiting(stats);
82 }
83 
84 /* This should be called with the scheduler lock held. */
85 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
86 {
87 	unsigned long long now;
88 
89 	if (!bfqg_stats_empty(stats))
90 		return;
91 
92 	now = sched_clock();
93 	if (time_after64(now, stats->start_empty_time))
94 		blkg_stat_add(&stats->empty_time,
95 			      now - stats->start_empty_time);
96 	bfqg_stats_clear_empty(stats);
97 }
98 
99 void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
100 {
101 	blkg_stat_add(&bfqg->stats.dequeue, 1);
102 }
103 
104 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
105 {
106 	struct bfqg_stats *stats = &bfqg->stats;
107 
108 	if (blkg_rwstat_total(&stats->queued))
109 		return;
110 
111 	/*
112 	 * group is already marked empty. This can happen if bfqq got new
113 	 * request in parent group and moved to this group while being added
114 	 * to service tree. Just ignore the event and move on.
115 	 */
116 	if (bfqg_stats_empty(stats))
117 		return;
118 
119 	stats->start_empty_time = sched_clock();
120 	bfqg_stats_mark_empty(stats);
121 }
122 
123 void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
124 {
125 	struct bfqg_stats *stats = &bfqg->stats;
126 
127 	if (bfqg_stats_idling(stats)) {
128 		unsigned long long now = sched_clock();
129 
130 		if (time_after64(now, stats->start_idle_time))
131 			blkg_stat_add(&stats->idle_time,
132 				      now - stats->start_idle_time);
133 		bfqg_stats_clear_idling(stats);
134 	}
135 }
136 
137 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
138 {
139 	struct bfqg_stats *stats = &bfqg->stats;
140 
141 	stats->start_idle_time = sched_clock();
142 	bfqg_stats_mark_idling(stats);
143 }
144 
145 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
146 {
147 	struct bfqg_stats *stats = &bfqg->stats;
148 
149 	blkg_stat_add(&stats->avg_queue_size_sum,
150 		      blkg_rwstat_total(&stats->queued));
151 	blkg_stat_add(&stats->avg_queue_size_samples, 1);
152 	bfqg_stats_update_group_wait_time(stats);
153 }
154 
155 /*
156  * blk-cgroup policy-related handlers
157  * The following functions help in converting between blk-cgroup
158  * internal structures and BFQ-specific structures.
159  */
160 
161 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
162 {
163 	return pd ? container_of(pd, struct bfq_group, pd) : NULL;
164 }
165 
166 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
167 {
168 	return pd_to_blkg(&bfqg->pd);
169 }
170 
171 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
172 {
173 	return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
174 }
175 
176 /*
177  * bfq_group handlers
178  * The following functions help in navigating the bfq_group hierarchy
179  * by allowing to find the parent of a bfq_group or the bfq_group
180  * associated to a bfq_queue.
181  */
182 
183 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
184 {
185 	struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
186 
187 	return pblkg ? blkg_to_bfqg(pblkg) : NULL;
188 }
189 
190 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
191 {
192 	struct bfq_entity *group_entity = bfqq->entity.parent;
193 
194 	return group_entity ? container_of(group_entity, struct bfq_group,
195 					   entity) :
196 			      bfqq->bfqd->root_group;
197 }
198 
199 /*
200  * The following two functions handle get and put of a bfq_group by
201  * wrapping the related blk-cgroup hooks.
202  */
203 
204 static void bfqg_get(struct bfq_group *bfqg)
205 {
206 	bfqg->ref++;
207 }
208 
209 void bfqg_put(struct bfq_group *bfqg)
210 {
211 	bfqg->ref--;
212 
213 	if (bfqg->ref == 0)
214 		kfree(bfqg);
215 }
216 
217 static void bfqg_and_blkg_get(struct bfq_group *bfqg)
218 {
219 	/* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
220 	bfqg_get(bfqg);
221 
222 	blkg_get(bfqg_to_blkg(bfqg));
223 }
224 
225 void bfqg_and_blkg_put(struct bfq_group *bfqg)
226 {
227 	bfqg_put(bfqg);
228 
229 	blkg_put(bfqg_to_blkg(bfqg));
230 }
231 
232 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
233 			      unsigned int op)
234 {
235 	blkg_rwstat_add(&bfqg->stats.queued, op, 1);
236 	bfqg_stats_end_empty_time(&bfqg->stats);
237 	if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
238 		bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
239 }
240 
241 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
242 {
243 	blkg_rwstat_add(&bfqg->stats.queued, op, -1);
244 }
245 
246 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
247 {
248 	blkg_rwstat_add(&bfqg->stats.merged, op, 1);
249 }
250 
251 void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
252 				  uint64_t io_start_time, unsigned int op)
253 {
254 	struct bfqg_stats *stats = &bfqg->stats;
255 	unsigned long long now = sched_clock();
256 
257 	if (time_after64(now, io_start_time))
258 		blkg_rwstat_add(&stats->service_time, op,
259 				now - io_start_time);
260 	if (time_after64(io_start_time, start_time))
261 		blkg_rwstat_add(&stats->wait_time, op,
262 				io_start_time - start_time);
263 }
264 
265 /* @stats = 0 */
266 static void bfqg_stats_reset(struct bfqg_stats *stats)
267 {
268 	/* queued stats shouldn't be cleared */
269 	blkg_rwstat_reset(&stats->merged);
270 	blkg_rwstat_reset(&stats->service_time);
271 	blkg_rwstat_reset(&stats->wait_time);
272 	blkg_stat_reset(&stats->time);
273 	blkg_stat_reset(&stats->avg_queue_size_sum);
274 	blkg_stat_reset(&stats->avg_queue_size_samples);
275 	blkg_stat_reset(&stats->dequeue);
276 	blkg_stat_reset(&stats->group_wait_time);
277 	blkg_stat_reset(&stats->idle_time);
278 	blkg_stat_reset(&stats->empty_time);
279 }
280 
281 /* @to += @from */
282 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
283 {
284 	if (!to || !from)
285 		return;
286 
287 	/* queued stats shouldn't be cleared */
288 	blkg_rwstat_add_aux(&to->merged, &from->merged);
289 	blkg_rwstat_add_aux(&to->service_time, &from->service_time);
290 	blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
291 	blkg_stat_add_aux(&from->time, &from->time);
292 	blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
293 	blkg_stat_add_aux(&to->avg_queue_size_samples,
294 			  &from->avg_queue_size_samples);
295 	blkg_stat_add_aux(&to->dequeue, &from->dequeue);
296 	blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
297 	blkg_stat_add_aux(&to->idle_time, &from->idle_time);
298 	blkg_stat_add_aux(&to->empty_time, &from->empty_time);
299 }
300 
301 /*
302  * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
303  * recursive stats can still account for the amount used by this bfqg after
304  * it's gone.
305  */
306 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
307 {
308 	struct bfq_group *parent;
309 
310 	if (!bfqg) /* root_group */
311 		return;
312 
313 	parent = bfqg_parent(bfqg);
314 
315 	lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
316 
317 	if (unlikely(!parent))
318 		return;
319 
320 	bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
321 	bfqg_stats_reset(&bfqg->stats);
322 }
323 
324 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
325 {
326 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
327 
328 	entity->weight = entity->new_weight;
329 	entity->orig_weight = entity->new_weight;
330 	if (bfqq) {
331 		bfqq->ioprio = bfqq->new_ioprio;
332 		bfqq->ioprio_class = bfqq->new_ioprio_class;
333 		/*
334 		 * Make sure that bfqg and its associated blkg do not
335 		 * disappear before entity.
336 		 */
337 		bfqg_and_blkg_get(bfqg);
338 	}
339 	entity->parent = bfqg->my_entity; /* NULL for root group */
340 	entity->sched_data = &bfqg->sched_data;
341 }
342 
343 static void bfqg_stats_exit(struct bfqg_stats *stats)
344 {
345 	blkg_rwstat_exit(&stats->merged);
346 	blkg_rwstat_exit(&stats->service_time);
347 	blkg_rwstat_exit(&stats->wait_time);
348 	blkg_rwstat_exit(&stats->queued);
349 	blkg_stat_exit(&stats->time);
350 	blkg_stat_exit(&stats->avg_queue_size_sum);
351 	blkg_stat_exit(&stats->avg_queue_size_samples);
352 	blkg_stat_exit(&stats->dequeue);
353 	blkg_stat_exit(&stats->group_wait_time);
354 	blkg_stat_exit(&stats->idle_time);
355 	blkg_stat_exit(&stats->empty_time);
356 }
357 
358 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
359 {
360 	if (blkg_rwstat_init(&stats->merged, gfp) ||
361 	    blkg_rwstat_init(&stats->service_time, gfp) ||
362 	    blkg_rwstat_init(&stats->wait_time, gfp) ||
363 	    blkg_rwstat_init(&stats->queued, gfp) ||
364 	    blkg_stat_init(&stats->time, gfp) ||
365 	    blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
366 	    blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
367 	    blkg_stat_init(&stats->dequeue, gfp) ||
368 	    blkg_stat_init(&stats->group_wait_time, gfp) ||
369 	    blkg_stat_init(&stats->idle_time, gfp) ||
370 	    blkg_stat_init(&stats->empty_time, gfp)) {
371 		bfqg_stats_exit(stats);
372 		return -ENOMEM;
373 	}
374 
375 	return 0;
376 }
377 
378 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
379 {
380 	return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
381 }
382 
383 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
384 {
385 	return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
386 }
387 
388 struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
389 {
390 	struct bfq_group_data *bgd;
391 
392 	bgd = kzalloc(sizeof(*bgd), gfp);
393 	if (!bgd)
394 		return NULL;
395 	return &bgd->pd;
396 }
397 
398 void bfq_cpd_init(struct blkcg_policy_data *cpd)
399 {
400 	struct bfq_group_data *d = cpd_to_bfqgd(cpd);
401 
402 	d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
403 		CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
404 }
405 
406 void bfq_cpd_free(struct blkcg_policy_data *cpd)
407 {
408 	kfree(cpd_to_bfqgd(cpd));
409 }
410 
411 struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
412 {
413 	struct bfq_group *bfqg;
414 
415 	bfqg = kzalloc_node(sizeof(*bfqg), gfp, node);
416 	if (!bfqg)
417 		return NULL;
418 
419 	if (bfqg_stats_init(&bfqg->stats, gfp)) {
420 		kfree(bfqg);
421 		return NULL;
422 	}
423 
424 	/* see comments in bfq_bic_update_cgroup for why refcounting */
425 	bfqg_get(bfqg);
426 	return &bfqg->pd;
427 }
428 
429 void bfq_pd_init(struct blkg_policy_data *pd)
430 {
431 	struct blkcg_gq *blkg = pd_to_blkg(pd);
432 	struct bfq_group *bfqg = blkg_to_bfqg(blkg);
433 	struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
434 	struct bfq_entity *entity = &bfqg->entity;
435 	struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
436 
437 	entity->orig_weight = entity->weight = entity->new_weight = d->weight;
438 	entity->my_sched_data = &bfqg->sched_data;
439 	bfqg->my_entity = entity; /*
440 				   * the root_group's will be set to NULL
441 				   * in bfq_init_queue()
442 				   */
443 	bfqg->bfqd = bfqd;
444 	bfqg->active_entities = 0;
445 	bfqg->rq_pos_tree = RB_ROOT;
446 }
447 
448 void bfq_pd_free(struct blkg_policy_data *pd)
449 {
450 	struct bfq_group *bfqg = pd_to_bfqg(pd);
451 
452 	bfqg_stats_exit(&bfqg->stats);
453 	bfqg_put(bfqg);
454 }
455 
456 void bfq_pd_reset_stats(struct blkg_policy_data *pd)
457 {
458 	struct bfq_group *bfqg = pd_to_bfqg(pd);
459 
460 	bfqg_stats_reset(&bfqg->stats);
461 }
462 
463 static void bfq_group_set_parent(struct bfq_group *bfqg,
464 					struct bfq_group *parent)
465 {
466 	struct bfq_entity *entity;
467 
468 	entity = &bfqg->entity;
469 	entity->parent = parent->my_entity;
470 	entity->sched_data = &parent->sched_data;
471 }
472 
473 static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
474 					 struct blkcg *blkcg)
475 {
476 	struct blkcg_gq *blkg;
477 
478 	blkg = blkg_lookup(blkcg, bfqd->queue);
479 	if (likely(blkg))
480 		return blkg_to_bfqg(blkg);
481 	return NULL;
482 }
483 
484 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
485 				     struct blkcg *blkcg)
486 {
487 	struct bfq_group *bfqg, *parent;
488 	struct bfq_entity *entity;
489 
490 	bfqg = bfq_lookup_bfqg(bfqd, blkcg);
491 
492 	if (unlikely(!bfqg))
493 		return NULL;
494 
495 	/*
496 	 * Update chain of bfq_groups as we might be handling a leaf group
497 	 * which, along with some of its relatives, has not been hooked yet
498 	 * to the private hierarchy of BFQ.
499 	 */
500 	entity = &bfqg->entity;
501 	for_each_entity(entity) {
502 		bfqg = container_of(entity, struct bfq_group, entity);
503 		if (bfqg != bfqd->root_group) {
504 			parent = bfqg_parent(bfqg);
505 			if (!parent)
506 				parent = bfqd->root_group;
507 			bfq_group_set_parent(bfqg, parent);
508 		}
509 	}
510 
511 	return bfqg;
512 }
513 
514 /**
515  * bfq_bfqq_move - migrate @bfqq to @bfqg.
516  * @bfqd: queue descriptor.
517  * @bfqq: the queue to move.
518  * @bfqg: the group to move to.
519  *
520  * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
521  * it on the new one.  Avoid putting the entity on the old group idle tree.
522  *
523  * Must be called under the scheduler lock, to make sure that the blkg
524  * owning @bfqg does not disappear (see comments in
525  * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
526  * objects).
527  */
528 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
529 		   struct bfq_group *bfqg)
530 {
531 	struct bfq_entity *entity = &bfqq->entity;
532 
533 	/* If bfqq is empty, then bfq_bfqq_expire also invokes
534 	 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
535 	 * from data structures related to current group. Otherwise we
536 	 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
537 	 * we do below.
538 	 */
539 	if (bfqq == bfqd->in_service_queue)
540 		bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
541 				false, BFQQE_PREEMPTED);
542 
543 	if (bfq_bfqq_busy(bfqq))
544 		bfq_deactivate_bfqq(bfqd, bfqq, false, false);
545 	else if (entity->on_st)
546 		bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
547 	bfqg_and_blkg_put(bfqq_group(bfqq));
548 
549 	entity->parent = bfqg->my_entity;
550 	entity->sched_data = &bfqg->sched_data;
551 	/* pin down bfqg and its associated blkg  */
552 	bfqg_and_blkg_get(bfqg);
553 
554 	if (bfq_bfqq_busy(bfqq)) {
555 		bfq_pos_tree_add_move(bfqd, bfqq);
556 		bfq_activate_bfqq(bfqd, bfqq);
557 	}
558 
559 	if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
560 		bfq_schedule_dispatch(bfqd);
561 }
562 
563 /**
564  * __bfq_bic_change_cgroup - move @bic to @cgroup.
565  * @bfqd: the queue descriptor.
566  * @bic: the bic to move.
567  * @blkcg: the blk-cgroup to move to.
568  *
569  * Move bic to blkcg, assuming that bfqd->lock is held; which makes
570  * sure that the reference to cgroup is valid across the call (see
571  * comments in bfq_bic_update_cgroup on this issue)
572  *
573  * NOTE: an alternative approach might have been to store the current
574  * cgroup in bfqq and getting a reference to it, reducing the lookup
575  * time here, at the price of slightly more complex code.
576  */
577 static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
578 						struct bfq_io_cq *bic,
579 						struct blkcg *blkcg)
580 {
581 	struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
582 	struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
583 	struct bfq_group *bfqg;
584 	struct bfq_entity *entity;
585 
586 	bfqg = bfq_find_set_group(bfqd, blkcg);
587 
588 	if (unlikely(!bfqg))
589 		bfqg = bfqd->root_group;
590 
591 	if (async_bfqq) {
592 		entity = &async_bfqq->entity;
593 
594 		if (entity->sched_data != &bfqg->sched_data) {
595 			bic_set_bfqq(bic, NULL, 0);
596 			bfq_log_bfqq(bfqd, async_bfqq,
597 				     "bic_change_group: %p %d",
598 				     async_bfqq, async_bfqq->ref);
599 			bfq_put_queue(async_bfqq);
600 		}
601 	}
602 
603 	if (sync_bfqq) {
604 		entity = &sync_bfqq->entity;
605 		if (entity->sched_data != &bfqg->sched_data)
606 			bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
607 	}
608 
609 	return bfqg;
610 }
611 
612 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
613 {
614 	struct bfq_data *bfqd = bic_to_bfqd(bic);
615 	struct bfq_group *bfqg = NULL;
616 	uint64_t serial_nr;
617 
618 	rcu_read_lock();
619 	serial_nr = bio_blkcg(bio)->css.serial_nr;
620 
621 	/*
622 	 * Check whether blkcg has changed.  The condition may trigger
623 	 * spuriously on a newly created cic but there's no harm.
624 	 */
625 	if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
626 		goto out;
627 
628 	bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
629 	/*
630 	 * Update blkg_path for bfq_log_* functions. We cache this
631 	 * path, and update it here, for the following
632 	 * reasons. Operations on blkg objects in blk-cgroup are
633 	 * protected with the request_queue lock, and not with the
634 	 * lock that protects the instances of this scheduler
635 	 * (bfqd->lock). This exposes BFQ to the following sort of
636 	 * race.
637 	 *
638 	 * The blkg_lookup performed in bfq_get_queue, protected
639 	 * through rcu, may happen to return the address of a copy of
640 	 * the original blkg. If this is the case, then the
641 	 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
642 	 * the blkg, is useless: it does not prevent blk-cgroup code
643 	 * from destroying both the original blkg and all objects
644 	 * directly or indirectly referred by the copy of the
645 	 * blkg.
646 	 *
647 	 * On the bright side, destroy operations on a blkg invoke, as
648 	 * a first step, hooks of the scheduler associated with the
649 	 * blkg. And these hooks are executed with bfqd->lock held for
650 	 * BFQ. As a consequence, for any blkg associated with the
651 	 * request queue this instance of the scheduler is attached
652 	 * to, we are guaranteed that such a blkg is not destroyed, and
653 	 * that all the pointers it contains are consistent, while we
654 	 * are holding bfqd->lock. A blkg_lookup performed with
655 	 * bfqd->lock held then returns a fully consistent blkg, which
656 	 * remains consistent until this lock is held.
657 	 *
658 	 * Thanks to the last fact, and to the fact that: (1) bfqg has
659 	 * been obtained through a blkg_lookup in the above
660 	 * assignment, and (2) bfqd->lock is being held, here we can
661 	 * safely use the policy data for the involved blkg (i.e., the
662 	 * field bfqg->pd) to get to the blkg associated with bfqg,
663 	 * and then we can safely use any field of blkg. After we
664 	 * release bfqd->lock, even just getting blkg through this
665 	 * bfqg may cause dangling references to be traversed, as
666 	 * bfqg->pd may not exist any more.
667 	 *
668 	 * In view of the above facts, here we cache, in the bfqg, any
669 	 * blkg data we may need for this bic, and for its associated
670 	 * bfq_queue. As of now, we need to cache only the path of the
671 	 * blkg, which is used in the bfq_log_* functions.
672 	 *
673 	 * Finally, note that bfqg itself needs to be protected from
674 	 * destruction on the blkg_free of the original blkg (which
675 	 * invokes bfq_pd_free). We use an additional private
676 	 * refcounter for bfqg, to let it disappear only after no
677 	 * bfq_queue refers to it any longer.
678 	 */
679 	blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
680 	bic->blkcg_serial_nr = serial_nr;
681 out:
682 	rcu_read_unlock();
683 }
684 
685 /**
686  * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
687  * @st: the service tree being flushed.
688  */
689 static void bfq_flush_idle_tree(struct bfq_service_tree *st)
690 {
691 	struct bfq_entity *entity = st->first_idle;
692 
693 	for (; entity ; entity = st->first_idle)
694 		__bfq_deactivate_entity(entity, false);
695 }
696 
697 /**
698  * bfq_reparent_leaf_entity - move leaf entity to the root_group.
699  * @bfqd: the device data structure with the root group.
700  * @entity: the entity to move.
701  */
702 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
703 				     struct bfq_entity *entity)
704 {
705 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
706 
707 	bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
708 }
709 
710 /**
711  * bfq_reparent_active_entities - move to the root group all active
712  *                                entities.
713  * @bfqd: the device data structure with the root group.
714  * @bfqg: the group to move from.
715  * @st: the service tree with the entities.
716  */
717 static void bfq_reparent_active_entities(struct bfq_data *bfqd,
718 					 struct bfq_group *bfqg,
719 					 struct bfq_service_tree *st)
720 {
721 	struct rb_root *active = &st->active;
722 	struct bfq_entity *entity = NULL;
723 
724 	if (!RB_EMPTY_ROOT(&st->active))
725 		entity = bfq_entity_of(rb_first(active));
726 
727 	for (; entity ; entity = bfq_entity_of(rb_first(active)))
728 		bfq_reparent_leaf_entity(bfqd, entity);
729 
730 	if (bfqg->sched_data.in_service_entity)
731 		bfq_reparent_leaf_entity(bfqd,
732 			bfqg->sched_data.in_service_entity);
733 }
734 
735 /**
736  * bfq_pd_offline - deactivate the entity associated with @pd,
737  *		    and reparent its children entities.
738  * @pd: descriptor of the policy going offline.
739  *
740  * blkio already grabs the queue_lock for us, so no need to use
741  * RCU-based magic
742  */
743 void bfq_pd_offline(struct blkg_policy_data *pd)
744 {
745 	struct bfq_service_tree *st;
746 	struct bfq_group *bfqg = pd_to_bfqg(pd);
747 	struct bfq_data *bfqd = bfqg->bfqd;
748 	struct bfq_entity *entity = bfqg->my_entity;
749 	unsigned long flags;
750 	int i;
751 
752 	if (!entity) /* root group */
753 		return;
754 
755 	spin_lock_irqsave(&bfqd->lock, flags);
756 	/*
757 	 * Empty all service_trees belonging to this group before
758 	 * deactivating the group itself.
759 	 */
760 	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
761 		st = bfqg->sched_data.service_tree + i;
762 
763 		/*
764 		 * The idle tree may still contain bfq_queues belonging
765 		 * to exited task because they never migrated to a different
766 		 * cgroup from the one being destroyed now.
767 		 */
768 		bfq_flush_idle_tree(st);
769 
770 		/*
771 		 * It may happen that some queues are still active
772 		 * (busy) upon group destruction (if the corresponding
773 		 * processes have been forced to terminate). We move
774 		 * all the leaf entities corresponding to these queues
775 		 * to the root_group.
776 		 * Also, it may happen that the group has an entity
777 		 * in service, which is disconnected from the active
778 		 * tree: it must be moved, too.
779 		 * There is no need to put the sync queues, as the
780 		 * scheduler has taken no reference.
781 		 */
782 		bfq_reparent_active_entities(bfqd, bfqg, st);
783 	}
784 
785 	__bfq_deactivate_entity(entity, false);
786 	bfq_put_async_queues(bfqd, bfqg);
787 
788 	spin_unlock_irqrestore(&bfqd->lock, flags);
789 	/*
790 	 * @blkg is going offline and will be ignored by
791 	 * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
792 	 * that they don't get lost.  If IOs complete after this point, the
793 	 * stats for them will be lost.  Oh well...
794 	 */
795 	bfqg_stats_xfer_dead(bfqg);
796 }
797 
798 void bfq_end_wr_async(struct bfq_data *bfqd)
799 {
800 	struct blkcg_gq *blkg;
801 
802 	list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
803 		struct bfq_group *bfqg = blkg_to_bfqg(blkg);
804 
805 		bfq_end_wr_async_queues(bfqd, bfqg);
806 	}
807 	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
808 }
809 
810 static int bfq_io_show_weight(struct seq_file *sf, void *v)
811 {
812 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
813 	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
814 	unsigned int val = 0;
815 
816 	if (bfqgd)
817 		val = bfqgd->weight;
818 
819 	seq_printf(sf, "%u\n", val);
820 
821 	return 0;
822 }
823 
824 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
825 				    struct cftype *cftype,
826 				    u64 val)
827 {
828 	struct blkcg *blkcg = css_to_blkcg(css);
829 	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
830 	struct blkcg_gq *blkg;
831 	int ret = -ERANGE;
832 
833 	if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
834 		return ret;
835 
836 	ret = 0;
837 	spin_lock_irq(&blkcg->lock);
838 	bfqgd->weight = (unsigned short)val;
839 	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
840 		struct bfq_group *bfqg = blkg_to_bfqg(blkg);
841 
842 		if (!bfqg)
843 			continue;
844 		/*
845 		 * Setting the prio_changed flag of the entity
846 		 * to 1 with new_weight == weight would re-set
847 		 * the value of the weight to its ioprio mapping.
848 		 * Set the flag only if necessary.
849 		 */
850 		if ((unsigned short)val != bfqg->entity.new_weight) {
851 			bfqg->entity.new_weight = (unsigned short)val;
852 			/*
853 			 * Make sure that the above new value has been
854 			 * stored in bfqg->entity.new_weight before
855 			 * setting the prio_changed flag. In fact,
856 			 * this flag may be read asynchronously (in
857 			 * critical sections protected by a different
858 			 * lock than that held here), and finding this
859 			 * flag set may cause the execution of the code
860 			 * for updating parameters whose value may
861 			 * depend also on bfqg->entity.new_weight (in
862 			 * __bfq_entity_update_weight_prio).
863 			 * This barrier makes sure that the new value
864 			 * of bfqg->entity.new_weight is correctly
865 			 * seen in that code.
866 			 */
867 			smp_wmb();
868 			bfqg->entity.prio_changed = 1;
869 		}
870 	}
871 	spin_unlock_irq(&blkcg->lock);
872 
873 	return ret;
874 }
875 
876 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
877 				 char *buf, size_t nbytes,
878 				 loff_t off)
879 {
880 	u64 weight;
881 	/* First unsigned long found in the file is used */
882 	int ret = kstrtoull(strim(buf), 0, &weight);
883 
884 	if (ret)
885 		return ret;
886 
887 	return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
888 }
889 
890 static int bfqg_print_stat(struct seq_file *sf, void *v)
891 {
892 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
893 			  &blkcg_policy_bfq, seq_cft(sf)->private, false);
894 	return 0;
895 }
896 
897 static int bfqg_print_rwstat(struct seq_file *sf, void *v)
898 {
899 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
900 			  &blkcg_policy_bfq, seq_cft(sf)->private, true);
901 	return 0;
902 }
903 
904 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
905 				      struct blkg_policy_data *pd, int off)
906 {
907 	u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
908 					  &blkcg_policy_bfq, off);
909 	return __blkg_prfill_u64(sf, pd, sum);
910 }
911 
912 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
913 					struct blkg_policy_data *pd, int off)
914 {
915 	struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
916 							   &blkcg_policy_bfq,
917 							   off);
918 	return __blkg_prfill_rwstat(sf, pd, &sum);
919 }
920 
921 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
922 {
923 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
924 			  bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
925 			  seq_cft(sf)->private, false);
926 	return 0;
927 }
928 
929 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
930 {
931 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
932 			  bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
933 			  seq_cft(sf)->private, true);
934 	return 0;
935 }
936 
937 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
938 			       int off)
939 {
940 	u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
941 
942 	return __blkg_prfill_u64(sf, pd, sum >> 9);
943 }
944 
945 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
946 {
947 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
948 			  bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
949 	return 0;
950 }
951 
952 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
953 					 struct blkg_policy_data *pd, int off)
954 {
955 	struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
956 					offsetof(struct blkcg_gq, stat_bytes));
957 	u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
958 		atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
959 
960 	return __blkg_prfill_u64(sf, pd, sum >> 9);
961 }
962 
963 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
964 {
965 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
966 			  bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
967 			  false);
968 	return 0;
969 }
970 
971 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
972 				      struct blkg_policy_data *pd, int off)
973 {
974 	struct bfq_group *bfqg = pd_to_bfqg(pd);
975 	u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples);
976 	u64 v = 0;
977 
978 	if (samples) {
979 		v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum);
980 		v = div64_u64(v, samples);
981 	}
982 	__blkg_prfill_u64(sf, pd, v);
983 	return 0;
984 }
985 
986 /* print avg_queue_size */
987 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
988 {
989 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
990 			  bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
991 			  0, false);
992 	return 0;
993 }
994 
995 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
996 {
997 	int ret;
998 
999 	ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
1000 	if (ret)
1001 		return NULL;
1002 
1003 	return blkg_to_bfqg(bfqd->queue->root_blkg);
1004 }
1005 
1006 struct blkcg_policy blkcg_policy_bfq = {
1007 	.dfl_cftypes		= bfq_blkg_files,
1008 	.legacy_cftypes		= bfq_blkcg_legacy_files,
1009 
1010 	.cpd_alloc_fn		= bfq_cpd_alloc,
1011 	.cpd_init_fn		= bfq_cpd_init,
1012 	.cpd_bind_fn	        = bfq_cpd_init,
1013 	.cpd_free_fn		= bfq_cpd_free,
1014 
1015 	.pd_alloc_fn		= bfq_pd_alloc,
1016 	.pd_init_fn		= bfq_pd_init,
1017 	.pd_offline_fn		= bfq_pd_offline,
1018 	.pd_free_fn		= bfq_pd_free,
1019 	.pd_reset_stats_fn	= bfq_pd_reset_stats,
1020 };
1021 
1022 struct cftype bfq_blkcg_legacy_files[] = {
1023 	{
1024 		.name = "bfq.weight",
1025 		.flags = CFTYPE_NOT_ON_ROOT,
1026 		.seq_show = bfq_io_show_weight,
1027 		.write_u64 = bfq_io_set_weight_legacy,
1028 	},
1029 
1030 	/* statistics, covers only the tasks in the bfqg */
1031 	{
1032 		.name = "bfq.time",
1033 		.private = offsetof(struct bfq_group, stats.time),
1034 		.seq_show = bfqg_print_stat,
1035 	},
1036 	{
1037 		.name = "bfq.sectors",
1038 		.seq_show = bfqg_print_stat_sectors,
1039 	},
1040 	{
1041 		.name = "bfq.io_service_bytes",
1042 		.private = (unsigned long)&blkcg_policy_bfq,
1043 		.seq_show = blkg_print_stat_bytes,
1044 	},
1045 	{
1046 		.name = "bfq.io_serviced",
1047 		.private = (unsigned long)&blkcg_policy_bfq,
1048 		.seq_show = blkg_print_stat_ios,
1049 	},
1050 	{
1051 		.name = "bfq.io_service_time",
1052 		.private = offsetof(struct bfq_group, stats.service_time),
1053 		.seq_show = bfqg_print_rwstat,
1054 	},
1055 	{
1056 		.name = "bfq.io_wait_time",
1057 		.private = offsetof(struct bfq_group, stats.wait_time),
1058 		.seq_show = bfqg_print_rwstat,
1059 	},
1060 	{
1061 		.name = "bfq.io_merged",
1062 		.private = offsetof(struct bfq_group, stats.merged),
1063 		.seq_show = bfqg_print_rwstat,
1064 	},
1065 	{
1066 		.name = "bfq.io_queued",
1067 		.private = offsetof(struct bfq_group, stats.queued),
1068 		.seq_show = bfqg_print_rwstat,
1069 	},
1070 
1071 	/* the same statictics which cover the bfqg and its descendants */
1072 	{
1073 		.name = "bfq.time_recursive",
1074 		.private = offsetof(struct bfq_group, stats.time),
1075 		.seq_show = bfqg_print_stat_recursive,
1076 	},
1077 	{
1078 		.name = "bfq.sectors_recursive",
1079 		.seq_show = bfqg_print_stat_sectors_recursive,
1080 	},
1081 	{
1082 		.name = "bfq.io_service_bytes_recursive",
1083 		.private = (unsigned long)&blkcg_policy_bfq,
1084 		.seq_show = blkg_print_stat_bytes_recursive,
1085 	},
1086 	{
1087 		.name = "bfq.io_serviced_recursive",
1088 		.private = (unsigned long)&blkcg_policy_bfq,
1089 		.seq_show = blkg_print_stat_ios_recursive,
1090 	},
1091 	{
1092 		.name = "bfq.io_service_time_recursive",
1093 		.private = offsetof(struct bfq_group, stats.service_time),
1094 		.seq_show = bfqg_print_rwstat_recursive,
1095 	},
1096 	{
1097 		.name = "bfq.io_wait_time_recursive",
1098 		.private = offsetof(struct bfq_group, stats.wait_time),
1099 		.seq_show = bfqg_print_rwstat_recursive,
1100 	},
1101 	{
1102 		.name = "bfq.io_merged_recursive",
1103 		.private = offsetof(struct bfq_group, stats.merged),
1104 		.seq_show = bfqg_print_rwstat_recursive,
1105 	},
1106 	{
1107 		.name = "bfq.io_queued_recursive",
1108 		.private = offsetof(struct bfq_group, stats.queued),
1109 		.seq_show = bfqg_print_rwstat_recursive,
1110 	},
1111 	{
1112 		.name = "bfq.avg_queue_size",
1113 		.seq_show = bfqg_print_avg_queue_size,
1114 	},
1115 	{
1116 		.name = "bfq.group_wait_time",
1117 		.private = offsetof(struct bfq_group, stats.group_wait_time),
1118 		.seq_show = bfqg_print_stat,
1119 	},
1120 	{
1121 		.name = "bfq.idle_time",
1122 		.private = offsetof(struct bfq_group, stats.idle_time),
1123 		.seq_show = bfqg_print_stat,
1124 	},
1125 	{
1126 		.name = "bfq.empty_time",
1127 		.private = offsetof(struct bfq_group, stats.empty_time),
1128 		.seq_show = bfqg_print_stat,
1129 	},
1130 	{
1131 		.name = "bfq.dequeue",
1132 		.private = offsetof(struct bfq_group, stats.dequeue),
1133 		.seq_show = bfqg_print_stat,
1134 	},
1135 	{ }	/* terminate */
1136 };
1137 
1138 struct cftype bfq_blkg_files[] = {
1139 	{
1140 		.name = "bfq.weight",
1141 		.flags = CFTYPE_NOT_ON_ROOT,
1142 		.seq_show = bfq_io_show_weight,
1143 		.write = bfq_io_set_weight,
1144 	},
1145 	{} /* terminate */
1146 };
1147 
1148 #else	/* CONFIG_BFQ_GROUP_IOSCHED */
1149 
1150 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
1151 			      unsigned int op) { }
1152 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
1153 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
1154 void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
1155 				  uint64_t io_start_time, unsigned int op) { }
1156 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
1157 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
1158 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
1159 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
1160 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
1161 
1162 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1163 		   struct bfq_group *bfqg) {}
1164 
1165 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1166 {
1167 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1168 
1169 	entity->weight = entity->new_weight;
1170 	entity->orig_weight = entity->new_weight;
1171 	if (bfqq) {
1172 		bfqq->ioprio = bfqq->new_ioprio;
1173 		bfqq->ioprio_class = bfqq->new_ioprio_class;
1174 	}
1175 	entity->sched_data = &bfqg->sched_data;
1176 }
1177 
1178 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1179 
1180 void bfq_end_wr_async(struct bfq_data *bfqd)
1181 {
1182 	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1183 }
1184 
1185 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
1186 {
1187 	return bfqd->root_group;
1188 }
1189 
1190 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1191 {
1192 	return bfqq->bfqd->root_group;
1193 }
1194 
1195 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1196 {
1197 	struct bfq_group *bfqg;
1198 	int i;
1199 
1200 	bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1201 	if (!bfqg)
1202 		return NULL;
1203 
1204 	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1205 		bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1206 
1207 	return bfqg;
1208 }
1209 #endif	/* CONFIG_BFQ_GROUP_IOSCHED */
1210