xref: /linux/block/bfq-cgroup.c (revision cfd47302ac64b595beb0a67a337b81942146448a)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * cgroups support for the BFQ I/O scheduler.
4   */
5  #include <linux/module.h>
6  #include <linux/slab.h>
7  #include <linux/blkdev.h>
8  #include <linux/cgroup.h>
9  #include <linux/ktime.h>
10  #include <linux/rbtree.h>
11  #include <linux/ioprio.h>
12  #include <linux/sbitmap.h>
13  #include <linux/delay.h>
14  
15  #include "elevator.h"
16  #include "bfq-iosched.h"
17  
18  #ifdef CONFIG_BFQ_CGROUP_DEBUG
bfq_stat_init(struct bfq_stat * stat,gfp_t gfp)19  static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
20  {
21  	int ret;
22  
23  	ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
24  	if (ret)
25  		return ret;
26  
27  	atomic64_set(&stat->aux_cnt, 0);
28  	return 0;
29  }
30  
bfq_stat_exit(struct bfq_stat * stat)31  static void bfq_stat_exit(struct bfq_stat *stat)
32  {
33  	percpu_counter_destroy(&stat->cpu_cnt);
34  }
35  
36  /**
37   * bfq_stat_add - add a value to a bfq_stat
38   * @stat: target bfq_stat
39   * @val: value to add
40   *
41   * Add @val to @stat.  The caller must ensure that IRQ on the same CPU
42   * don't re-enter this function for the same counter.
43   */
bfq_stat_add(struct bfq_stat * stat,uint64_t val)44  static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
45  {
46  	percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
47  }
48  
49  /**
50   * bfq_stat_read - read the current value of a bfq_stat
51   * @stat: bfq_stat to read
52   */
bfq_stat_read(struct bfq_stat * stat)53  static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
54  {
55  	return percpu_counter_sum_positive(&stat->cpu_cnt);
56  }
57  
58  /**
59   * bfq_stat_reset - reset a bfq_stat
60   * @stat: bfq_stat to reset
61   */
bfq_stat_reset(struct bfq_stat * stat)62  static inline void bfq_stat_reset(struct bfq_stat *stat)
63  {
64  	percpu_counter_set(&stat->cpu_cnt, 0);
65  	atomic64_set(&stat->aux_cnt, 0);
66  }
67  
68  /**
69   * bfq_stat_add_aux - add a bfq_stat into another's aux count
70   * @to: the destination bfq_stat
71   * @from: the source
72   *
73   * Add @from's count including the aux one to @to's aux count.
74   */
bfq_stat_add_aux(struct bfq_stat * to,struct bfq_stat * from)75  static inline void bfq_stat_add_aux(struct bfq_stat *to,
76  				     struct bfq_stat *from)
77  {
78  	atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
79  		     &to->aux_cnt);
80  }
81  
82  /**
83   * blkg_prfill_stat - prfill callback for bfq_stat
84   * @sf: seq_file to print to
85   * @pd: policy private data of interest
86   * @off: offset to the bfq_stat in @pd
87   *
88   * prfill callback for printing a bfq_stat.
89   */
blkg_prfill_stat(struct seq_file * sf,struct blkg_policy_data * pd,int off)90  static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
91  		int off)
92  {
93  	return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
94  }
95  
96  /* bfqg stats flags */
97  enum bfqg_stats_flags {
98  	BFQG_stats_waiting = 0,
99  	BFQG_stats_idling,
100  	BFQG_stats_empty,
101  };
102  
103  #define BFQG_FLAG_FNS(name)						\
104  static void bfqg_stats_mark_##name(struct bfqg_stats *stats)	\
105  {									\
106  	stats->flags |= (1 << BFQG_stats_##name);			\
107  }									\
108  static void bfqg_stats_clear_##name(struct bfqg_stats *stats)	\
109  {									\
110  	stats->flags &= ~(1 << BFQG_stats_##name);			\
111  }									\
112  static int bfqg_stats_##name(struct bfqg_stats *stats)		\
113  {									\
114  	return (stats->flags & (1 << BFQG_stats_##name)) != 0;		\
115  }									\
116  
117  BFQG_FLAG_FNS(waiting)
BFQG_FLAG_FNS(idling)118  BFQG_FLAG_FNS(idling)
119  BFQG_FLAG_FNS(empty)
120  #undef BFQG_FLAG_FNS
121  
122  /* This should be called with the scheduler lock held. */
123  static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
124  {
125  	u64 now;
126  
127  	if (!bfqg_stats_waiting(stats))
128  		return;
129  
130  	now = blk_time_get_ns();
131  	if (now > stats->start_group_wait_time)
132  		bfq_stat_add(&stats->group_wait_time,
133  			      now - stats->start_group_wait_time);
134  	bfqg_stats_clear_waiting(stats);
135  }
136  
137  /* This should be called with the scheduler lock held. */
bfqg_stats_set_start_group_wait_time(struct bfq_group * bfqg,struct bfq_group * curr_bfqg)138  static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
139  						 struct bfq_group *curr_bfqg)
140  {
141  	struct bfqg_stats *stats = &bfqg->stats;
142  
143  	if (bfqg_stats_waiting(stats))
144  		return;
145  	if (bfqg == curr_bfqg)
146  		return;
147  	stats->start_group_wait_time = blk_time_get_ns();
148  	bfqg_stats_mark_waiting(stats);
149  }
150  
151  /* This should be called with the scheduler lock held. */
bfqg_stats_end_empty_time(struct bfqg_stats * stats)152  static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
153  {
154  	u64 now;
155  
156  	if (!bfqg_stats_empty(stats))
157  		return;
158  
159  	now = blk_time_get_ns();
160  	if (now > stats->start_empty_time)
161  		bfq_stat_add(&stats->empty_time,
162  			      now - stats->start_empty_time);
163  	bfqg_stats_clear_empty(stats);
164  }
165  
bfqg_stats_update_dequeue(struct bfq_group * bfqg)166  void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
167  {
168  	bfq_stat_add(&bfqg->stats.dequeue, 1);
169  }
170  
bfqg_stats_set_start_empty_time(struct bfq_group * bfqg)171  void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
172  {
173  	struct bfqg_stats *stats = &bfqg->stats;
174  
175  	if (blkg_rwstat_total(&stats->queued))
176  		return;
177  
178  	/*
179  	 * group is already marked empty. This can happen if bfqq got new
180  	 * request in parent group and moved to this group while being added
181  	 * to service tree. Just ignore the event and move on.
182  	 */
183  	if (bfqg_stats_empty(stats))
184  		return;
185  
186  	stats->start_empty_time = blk_time_get_ns();
187  	bfqg_stats_mark_empty(stats);
188  }
189  
bfqg_stats_update_idle_time(struct bfq_group * bfqg)190  void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
191  {
192  	struct bfqg_stats *stats = &bfqg->stats;
193  
194  	if (bfqg_stats_idling(stats)) {
195  		u64 now = blk_time_get_ns();
196  
197  		if (now > stats->start_idle_time)
198  			bfq_stat_add(&stats->idle_time,
199  				      now - stats->start_idle_time);
200  		bfqg_stats_clear_idling(stats);
201  	}
202  }
203  
bfqg_stats_set_start_idle_time(struct bfq_group * bfqg)204  void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
205  {
206  	struct bfqg_stats *stats = &bfqg->stats;
207  
208  	stats->start_idle_time = blk_time_get_ns();
209  	bfqg_stats_mark_idling(stats);
210  }
211  
bfqg_stats_update_avg_queue_size(struct bfq_group * bfqg)212  void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
213  {
214  	struct bfqg_stats *stats = &bfqg->stats;
215  
216  	bfq_stat_add(&stats->avg_queue_size_sum,
217  		      blkg_rwstat_total(&stats->queued));
218  	bfq_stat_add(&stats->avg_queue_size_samples, 1);
219  	bfqg_stats_update_group_wait_time(stats);
220  }
221  
bfqg_stats_update_io_add(struct bfq_group * bfqg,struct bfq_queue * bfqq,blk_opf_t opf)222  void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
223  			      blk_opf_t opf)
224  {
225  	blkg_rwstat_add(&bfqg->stats.queued, opf, 1);
226  	bfqg_stats_end_empty_time(&bfqg->stats);
227  	if (!(bfqq == bfqg->bfqd->in_service_queue))
228  		bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
229  }
230  
bfqg_stats_update_io_remove(struct bfq_group * bfqg,blk_opf_t opf)231  void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf)
232  {
233  	blkg_rwstat_add(&bfqg->stats.queued, opf, -1);
234  }
235  
bfqg_stats_update_io_merged(struct bfq_group * bfqg,blk_opf_t opf)236  void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf)
237  {
238  	blkg_rwstat_add(&bfqg->stats.merged, opf, 1);
239  }
240  
bfqg_stats_update_completion(struct bfq_group * bfqg,u64 start_time_ns,u64 io_start_time_ns,blk_opf_t opf)241  void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
242  				  u64 io_start_time_ns, blk_opf_t opf)
243  {
244  	struct bfqg_stats *stats = &bfqg->stats;
245  	u64 now = blk_time_get_ns();
246  
247  	if (now > io_start_time_ns)
248  		blkg_rwstat_add(&stats->service_time, opf,
249  				now - io_start_time_ns);
250  	if (io_start_time_ns > start_time_ns)
251  		blkg_rwstat_add(&stats->wait_time, opf,
252  				io_start_time_ns - start_time_ns);
253  }
254  
255  #else /* CONFIG_BFQ_CGROUP_DEBUG */
256  
bfqg_stats_update_io_remove(struct bfq_group * bfqg,blk_opf_t opf)257  void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { }
bfqg_stats_update_io_merged(struct bfq_group * bfqg,blk_opf_t opf)258  void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { }
bfqg_stats_update_completion(struct bfq_group * bfqg,u64 start_time_ns,u64 io_start_time_ns,blk_opf_t opf)259  void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
260  				  u64 io_start_time_ns, blk_opf_t opf) { }
bfqg_stats_update_dequeue(struct bfq_group * bfqg)261  void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
bfqg_stats_set_start_idle_time(struct bfq_group * bfqg)262  void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
263  
264  #endif /* CONFIG_BFQ_CGROUP_DEBUG */
265  
266  #ifdef CONFIG_BFQ_GROUP_IOSCHED
267  
268  /*
269   * blk-cgroup policy-related handlers
270   * The following functions help in converting between blk-cgroup
271   * internal structures and BFQ-specific structures.
272   */
273  
pd_to_bfqg(struct blkg_policy_data * pd)274  static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
275  {
276  	return pd ? container_of(pd, struct bfq_group, pd) : NULL;
277  }
278  
bfqg_to_blkg(struct bfq_group * bfqg)279  struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
280  {
281  	return pd_to_blkg(&bfqg->pd);
282  }
283  
blkg_to_bfqg(struct blkcg_gq * blkg)284  static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
285  {
286  	return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
287  }
288  
289  /*
290   * bfq_group handlers
291   * The following functions help in navigating the bfq_group hierarchy
292   * by allowing to find the parent of a bfq_group or the bfq_group
293   * associated to a bfq_queue.
294   */
295  
bfqg_parent(struct bfq_group * bfqg)296  static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
297  {
298  	struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
299  
300  	return pblkg ? blkg_to_bfqg(pblkg) : NULL;
301  }
302  
bfqq_group(struct bfq_queue * bfqq)303  struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
304  {
305  	struct bfq_entity *group_entity = bfqq->entity.parent;
306  
307  	return group_entity ? container_of(group_entity, struct bfq_group,
308  					   entity) :
309  			      bfqq->bfqd->root_group;
310  }
311  
312  /*
313   * The following two functions handle get and put of a bfq_group by
314   * wrapping the related blk-cgroup hooks.
315   */
316  
bfqg_get(struct bfq_group * bfqg)317  static void bfqg_get(struct bfq_group *bfqg)
318  {
319  	refcount_inc(&bfqg->ref);
320  }
321  
bfqg_put(struct bfq_group * bfqg)322  static void bfqg_put(struct bfq_group *bfqg)
323  {
324  	if (refcount_dec_and_test(&bfqg->ref))
325  		kfree(bfqg);
326  }
327  
bfqg_and_blkg_get(struct bfq_group * bfqg)328  static void bfqg_and_blkg_get(struct bfq_group *bfqg)
329  {
330  	/* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
331  	bfqg_get(bfqg);
332  
333  	blkg_get(bfqg_to_blkg(bfqg));
334  }
335  
bfqg_and_blkg_put(struct bfq_group * bfqg)336  void bfqg_and_blkg_put(struct bfq_group *bfqg)
337  {
338  	blkg_put(bfqg_to_blkg(bfqg));
339  
340  	bfqg_put(bfqg);
341  }
342  
bfqg_stats_update_legacy_io(struct request_queue * q,struct request * rq)343  void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
344  {
345  	struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
346  
347  	if (!bfqg)
348  		return;
349  
350  	blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
351  	blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
352  }
353  
354  /* @stats = 0 */
bfqg_stats_reset(struct bfqg_stats * stats)355  static void bfqg_stats_reset(struct bfqg_stats *stats)
356  {
357  #ifdef CONFIG_BFQ_CGROUP_DEBUG
358  	/* queued stats shouldn't be cleared */
359  	blkg_rwstat_reset(&stats->merged);
360  	blkg_rwstat_reset(&stats->service_time);
361  	blkg_rwstat_reset(&stats->wait_time);
362  	bfq_stat_reset(&stats->time);
363  	bfq_stat_reset(&stats->avg_queue_size_sum);
364  	bfq_stat_reset(&stats->avg_queue_size_samples);
365  	bfq_stat_reset(&stats->dequeue);
366  	bfq_stat_reset(&stats->group_wait_time);
367  	bfq_stat_reset(&stats->idle_time);
368  	bfq_stat_reset(&stats->empty_time);
369  #endif
370  }
371  
372  /* @to += @from */
bfqg_stats_add_aux(struct bfqg_stats * to,struct bfqg_stats * from)373  static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
374  {
375  	if (!to || !from)
376  		return;
377  
378  #ifdef CONFIG_BFQ_CGROUP_DEBUG
379  	/* queued stats shouldn't be cleared */
380  	blkg_rwstat_add_aux(&to->merged, &from->merged);
381  	blkg_rwstat_add_aux(&to->service_time, &from->service_time);
382  	blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
383  	bfq_stat_add_aux(&from->time, &from->time);
384  	bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
385  	bfq_stat_add_aux(&to->avg_queue_size_samples,
386  			  &from->avg_queue_size_samples);
387  	bfq_stat_add_aux(&to->dequeue, &from->dequeue);
388  	bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
389  	bfq_stat_add_aux(&to->idle_time, &from->idle_time);
390  	bfq_stat_add_aux(&to->empty_time, &from->empty_time);
391  #endif
392  }
393  
394  /*
395   * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
396   * recursive stats can still account for the amount used by this bfqg after
397   * it's gone.
398   */
bfqg_stats_xfer_dead(struct bfq_group * bfqg)399  static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
400  {
401  	struct bfq_group *parent;
402  
403  	if (!bfqg) /* root_group */
404  		return;
405  
406  	parent = bfqg_parent(bfqg);
407  
408  	lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
409  
410  	if (unlikely(!parent))
411  		return;
412  
413  	bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
414  	bfqg_stats_reset(&bfqg->stats);
415  }
416  
bfq_init_entity(struct bfq_entity * entity,struct bfq_group * bfqg)417  void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
418  {
419  	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
420  
421  	entity->weight = entity->new_weight;
422  	entity->orig_weight = entity->new_weight;
423  	if (bfqq) {
424  		bfqq->ioprio = bfqq->new_ioprio;
425  		bfqq->ioprio_class = bfqq->new_ioprio_class;
426  		/*
427  		 * Make sure that bfqg and its associated blkg do not
428  		 * disappear before entity.
429  		 */
430  		bfqg_and_blkg_get(bfqg);
431  	}
432  	entity->parent = bfqg->my_entity; /* NULL for root group */
433  	entity->sched_data = &bfqg->sched_data;
434  }
435  
bfqg_stats_exit(struct bfqg_stats * stats)436  static void bfqg_stats_exit(struct bfqg_stats *stats)
437  {
438  	blkg_rwstat_exit(&stats->bytes);
439  	blkg_rwstat_exit(&stats->ios);
440  #ifdef CONFIG_BFQ_CGROUP_DEBUG
441  	blkg_rwstat_exit(&stats->merged);
442  	blkg_rwstat_exit(&stats->service_time);
443  	blkg_rwstat_exit(&stats->wait_time);
444  	blkg_rwstat_exit(&stats->queued);
445  	bfq_stat_exit(&stats->time);
446  	bfq_stat_exit(&stats->avg_queue_size_sum);
447  	bfq_stat_exit(&stats->avg_queue_size_samples);
448  	bfq_stat_exit(&stats->dequeue);
449  	bfq_stat_exit(&stats->group_wait_time);
450  	bfq_stat_exit(&stats->idle_time);
451  	bfq_stat_exit(&stats->empty_time);
452  #endif
453  }
454  
bfqg_stats_init(struct bfqg_stats * stats,gfp_t gfp)455  static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
456  {
457  	if (blkg_rwstat_init(&stats->bytes, gfp) ||
458  	    blkg_rwstat_init(&stats->ios, gfp))
459  		goto error;
460  
461  #ifdef CONFIG_BFQ_CGROUP_DEBUG
462  	if (blkg_rwstat_init(&stats->merged, gfp) ||
463  	    blkg_rwstat_init(&stats->service_time, gfp) ||
464  	    blkg_rwstat_init(&stats->wait_time, gfp) ||
465  	    blkg_rwstat_init(&stats->queued, gfp) ||
466  	    bfq_stat_init(&stats->time, gfp) ||
467  	    bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
468  	    bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
469  	    bfq_stat_init(&stats->dequeue, gfp) ||
470  	    bfq_stat_init(&stats->group_wait_time, gfp) ||
471  	    bfq_stat_init(&stats->idle_time, gfp) ||
472  	    bfq_stat_init(&stats->empty_time, gfp))
473  		goto error;
474  #endif
475  
476  	return 0;
477  
478  error:
479  	bfqg_stats_exit(stats);
480  	return -ENOMEM;
481  }
482  
cpd_to_bfqgd(struct blkcg_policy_data * cpd)483  static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
484  {
485  	return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
486  }
487  
blkcg_to_bfqgd(struct blkcg * blkcg)488  static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
489  {
490  	return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
491  }
492  
bfq_cpd_alloc(gfp_t gfp)493  static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
494  {
495  	struct bfq_group_data *bgd;
496  
497  	bgd = kzalloc(sizeof(*bgd), gfp);
498  	if (!bgd)
499  		return NULL;
500  
501  	bgd->weight = CGROUP_WEIGHT_DFL;
502  	return &bgd->pd;
503  }
504  
bfq_cpd_free(struct blkcg_policy_data * cpd)505  static void bfq_cpd_free(struct blkcg_policy_data *cpd)
506  {
507  	kfree(cpd_to_bfqgd(cpd));
508  }
509  
bfq_pd_alloc(struct gendisk * disk,struct blkcg * blkcg,gfp_t gfp)510  static struct blkg_policy_data *bfq_pd_alloc(struct gendisk *disk,
511  		struct blkcg *blkcg, gfp_t gfp)
512  {
513  	struct bfq_group *bfqg;
514  
515  	bfqg = kzalloc_node(sizeof(*bfqg), gfp, disk->node_id);
516  	if (!bfqg)
517  		return NULL;
518  
519  	if (bfqg_stats_init(&bfqg->stats, gfp)) {
520  		kfree(bfqg);
521  		return NULL;
522  	}
523  
524  	/* see comments in bfq_bic_update_cgroup for why refcounting */
525  	refcount_set(&bfqg->ref, 1);
526  	return &bfqg->pd;
527  }
528  
bfq_pd_init(struct blkg_policy_data * pd)529  static void bfq_pd_init(struct blkg_policy_data *pd)
530  {
531  	struct blkcg_gq *blkg = pd_to_blkg(pd);
532  	struct bfq_group *bfqg = blkg_to_bfqg(blkg);
533  	struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
534  	struct bfq_entity *entity = &bfqg->entity;
535  	struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
536  
537  	entity->orig_weight = entity->weight = entity->new_weight = d->weight;
538  	entity->my_sched_data = &bfqg->sched_data;
539  	entity->last_bfqq_created = NULL;
540  
541  	bfqg->my_entity = entity; /*
542  				   * the root_group's will be set to NULL
543  				   * in bfq_init_queue()
544  				   */
545  	bfqg->bfqd = bfqd;
546  	bfqg->active_entities = 0;
547  	bfqg->num_queues_with_pending_reqs = 0;
548  	bfqg->rq_pos_tree = RB_ROOT;
549  }
550  
bfq_pd_free(struct blkg_policy_data * pd)551  static void bfq_pd_free(struct blkg_policy_data *pd)
552  {
553  	struct bfq_group *bfqg = pd_to_bfqg(pd);
554  
555  	bfqg_stats_exit(&bfqg->stats);
556  	bfqg_put(bfqg);
557  }
558  
bfq_pd_reset_stats(struct blkg_policy_data * pd)559  static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
560  {
561  	struct bfq_group *bfqg = pd_to_bfqg(pd);
562  
563  	bfqg_stats_reset(&bfqg->stats);
564  }
565  
bfq_group_set_parent(struct bfq_group * bfqg,struct bfq_group * parent)566  static void bfq_group_set_parent(struct bfq_group *bfqg,
567  					struct bfq_group *parent)
568  {
569  	struct bfq_entity *entity;
570  
571  	entity = &bfqg->entity;
572  	entity->parent = parent->my_entity;
573  	entity->sched_data = &parent->sched_data;
574  }
575  
bfq_link_bfqg(struct bfq_data * bfqd,struct bfq_group * bfqg)576  static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
577  {
578  	struct bfq_group *parent;
579  	struct bfq_entity *entity;
580  
581  	/*
582  	 * Update chain of bfq_groups as we might be handling a leaf group
583  	 * which, along with some of its relatives, has not been hooked yet
584  	 * to the private hierarchy of BFQ.
585  	 */
586  	entity = &bfqg->entity;
587  	for_each_entity(entity) {
588  		struct bfq_group *curr_bfqg = container_of(entity,
589  						struct bfq_group, entity);
590  		if (curr_bfqg != bfqd->root_group) {
591  			parent = bfqg_parent(curr_bfqg);
592  			if (!parent)
593  				parent = bfqd->root_group;
594  			bfq_group_set_parent(curr_bfqg, parent);
595  		}
596  	}
597  }
598  
bfq_bio_bfqg(struct bfq_data * bfqd,struct bio * bio)599  struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
600  {
601  	struct blkcg_gq *blkg = bio->bi_blkg;
602  	struct bfq_group *bfqg;
603  
604  	while (blkg) {
605  		if (!blkg->online) {
606  			blkg = blkg->parent;
607  			continue;
608  		}
609  		bfqg = blkg_to_bfqg(blkg);
610  		if (bfqg->pd.online) {
611  			bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
612  			return bfqg;
613  		}
614  		blkg = blkg->parent;
615  	}
616  	bio_associate_blkg_from_css(bio,
617  				&bfqg_to_blkg(bfqd->root_group)->blkcg->css);
618  	return bfqd->root_group;
619  }
620  
621  /**
622   * bfq_bfqq_move - migrate @bfqq to @bfqg.
623   * @bfqd: queue descriptor.
624   * @bfqq: the queue to move.
625   * @bfqg: the group to move to.
626   *
627   * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
628   * it on the new one.  Avoid putting the entity on the old group idle tree.
629   *
630   * Must be called under the scheduler lock, to make sure that the blkg
631   * owning @bfqg does not disappear (see comments in
632   * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
633   * objects).
634   */
bfq_bfqq_move(struct bfq_data * bfqd,struct bfq_queue * bfqq,struct bfq_group * bfqg)635  void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
636  		   struct bfq_group *bfqg)
637  {
638  	struct bfq_entity *entity = &bfqq->entity;
639  	struct bfq_group *old_parent = bfqq_group(bfqq);
640  	bool has_pending_reqs = false;
641  
642  	/*
643  	 * No point to move bfqq to the same group, which can happen when
644  	 * root group is offlined
645  	 */
646  	if (old_parent == bfqg)
647  		return;
648  
649  	/*
650  	 * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group
651  	 * until elevator exit.
652  	 */
653  	if (bfqq == &bfqd->oom_bfqq)
654  		return;
655  	/*
656  	 * Get extra reference to prevent bfqq from being freed in
657  	 * next possible expire or deactivate.
658  	 */
659  	bfqq->ref++;
660  
661  	if (entity->in_groups_with_pending_reqs) {
662  		has_pending_reqs = true;
663  		bfq_del_bfqq_in_groups_with_pending_reqs(bfqq);
664  	}
665  
666  	/* If bfqq is empty, then bfq_bfqq_expire also invokes
667  	 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
668  	 * from data structures related to current group. Otherwise we
669  	 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
670  	 * we do below.
671  	 */
672  	if (bfqq == bfqd->in_service_queue)
673  		bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
674  				false, BFQQE_PREEMPTED);
675  
676  	if (bfq_bfqq_busy(bfqq))
677  		bfq_deactivate_bfqq(bfqd, bfqq, false, false);
678  	else if (entity->on_st_or_in_serv)
679  		bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
680  	bfqg_and_blkg_put(old_parent);
681  
682  	bfq_reassign_last_bfqq(bfqq, NULL);
683  	entity->parent = bfqg->my_entity;
684  	entity->sched_data = &bfqg->sched_data;
685  	/* pin down bfqg and its associated blkg  */
686  	bfqg_and_blkg_get(bfqg);
687  
688  	if (has_pending_reqs)
689  		bfq_add_bfqq_in_groups_with_pending_reqs(bfqq);
690  
691  	if (bfq_bfqq_busy(bfqq)) {
692  		if (unlikely(!bfqd->nonrot_with_queueing))
693  			bfq_pos_tree_add_move(bfqd, bfqq);
694  		bfq_activate_bfqq(bfqd, bfqq);
695  	}
696  
697  	if (!bfqd->in_service_queue && !bfqd->tot_rq_in_driver)
698  		bfq_schedule_dispatch(bfqd);
699  	/* release extra ref taken above, bfqq may happen to be freed now */
700  	bfq_put_queue(bfqq);
701  }
702  
bfq_sync_bfqq_move(struct bfq_data * bfqd,struct bfq_queue * sync_bfqq,struct bfq_io_cq * bic,struct bfq_group * bfqg,unsigned int act_idx)703  static void bfq_sync_bfqq_move(struct bfq_data *bfqd,
704  			       struct bfq_queue *sync_bfqq,
705  			       struct bfq_io_cq *bic,
706  			       struct bfq_group *bfqg,
707  			       unsigned int act_idx)
708  {
709  	struct bfq_queue *bfqq;
710  
711  	if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) {
712  		/* We are the only user of this bfqq, just move it */
713  		if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
714  			bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
715  		return;
716  	}
717  
718  	/*
719  	 * The queue was merged to a different queue. Check
720  	 * that the merge chain still belongs to the same
721  	 * cgroup.
722  	 */
723  	for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq)
724  		if (bfqq->entity.sched_data != &bfqg->sched_data)
725  			break;
726  	if (bfqq) {
727  		/*
728  		 * Some queue changed cgroup so the merge is not valid
729  		 * anymore. We cannot easily just cancel the merge (by
730  		 * clearing new_bfqq) as there may be other processes
731  		 * using this queue and holding refs to all queues
732  		 * below sync_bfqq->new_bfqq. Similarly if the merge
733  		 * already happened, we need to detach from bfqq now
734  		 * so that we cannot merge bio to a request from the
735  		 * old cgroup.
736  		 */
737  		bfq_put_cooperator(sync_bfqq);
738  		bic_set_bfqq(bic, NULL, true, act_idx);
739  		bfq_release_process_ref(bfqd, sync_bfqq);
740  	}
741  }
742  
743  /**
744   * __bfq_bic_change_cgroup - move @bic to @bfqg.
745   * @bfqd: the queue descriptor.
746   * @bic: the bic to move.
747   * @bfqg: the group to move to.
748   *
749   * Move bic to blkcg, assuming that bfqd->lock is held; which makes
750   * sure that the reference to cgroup is valid across the call (see
751   * comments in bfq_bic_update_cgroup on this issue)
752   */
__bfq_bic_change_cgroup(struct bfq_data * bfqd,struct bfq_io_cq * bic,struct bfq_group * bfqg)753  static void __bfq_bic_change_cgroup(struct bfq_data *bfqd,
754  				    struct bfq_io_cq *bic,
755  				    struct bfq_group *bfqg)
756  {
757  	unsigned int act_idx;
758  
759  	for (act_idx = 0; act_idx < bfqd->num_actuators; act_idx++) {
760  		struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false, act_idx);
761  		struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true, act_idx);
762  
763  		if (async_bfqq &&
764  		    async_bfqq->entity.sched_data != &bfqg->sched_data) {
765  			bic_set_bfqq(bic, NULL, false, act_idx);
766  			bfq_release_process_ref(bfqd, async_bfqq);
767  		}
768  
769  		if (sync_bfqq)
770  			bfq_sync_bfqq_move(bfqd, sync_bfqq, bic, bfqg, act_idx);
771  	}
772  }
773  
bfq_bic_update_cgroup(struct bfq_io_cq * bic,struct bio * bio)774  void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
775  {
776  	struct bfq_data *bfqd = bic_to_bfqd(bic);
777  	struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
778  	uint64_t serial_nr;
779  
780  	serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr;
781  
782  	/*
783  	 * Check whether blkcg has changed.  The condition may trigger
784  	 * spuriously on a newly created cic but there's no harm.
785  	 */
786  	if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
787  		return;
788  
789  	/*
790  	 * New cgroup for this process. Make sure it is linked to bfq internal
791  	 * cgroup hierarchy.
792  	 */
793  	bfq_link_bfqg(bfqd, bfqg);
794  	__bfq_bic_change_cgroup(bfqd, bic, bfqg);
795  	bic->blkcg_serial_nr = serial_nr;
796  }
797  
798  /**
799   * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
800   * @st: the service tree being flushed.
801   */
bfq_flush_idle_tree(struct bfq_service_tree * st)802  static void bfq_flush_idle_tree(struct bfq_service_tree *st)
803  {
804  	struct bfq_entity *entity = st->first_idle;
805  
806  	for (; entity ; entity = st->first_idle)
807  		__bfq_deactivate_entity(entity, false);
808  }
809  
810  /**
811   * bfq_reparent_leaf_entity - move leaf entity to the root_group.
812   * @bfqd: the device data structure with the root group.
813   * @entity: the entity to move, if entity is a leaf; or the parent entity
814   *	    of an active leaf entity to move, if entity is not a leaf.
815   * @ioprio_class: I/O priority class to reparent.
816   */
bfq_reparent_leaf_entity(struct bfq_data * bfqd,struct bfq_entity * entity,int ioprio_class)817  static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
818  				     struct bfq_entity *entity,
819  				     int ioprio_class)
820  {
821  	struct bfq_queue *bfqq;
822  	struct bfq_entity *child_entity = entity;
823  
824  	while (child_entity->my_sched_data) { /* leaf not reached yet */
825  		struct bfq_sched_data *child_sd = child_entity->my_sched_data;
826  		struct bfq_service_tree *child_st = child_sd->service_tree +
827  			ioprio_class;
828  		struct rb_root *child_active = &child_st->active;
829  
830  		child_entity = bfq_entity_of(rb_first(child_active));
831  
832  		if (!child_entity)
833  			child_entity = child_sd->in_service_entity;
834  	}
835  
836  	bfqq = bfq_entity_to_bfqq(child_entity);
837  	bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
838  }
839  
840  /**
841   * bfq_reparent_active_queues - move to the root group all active queues.
842   * @bfqd: the device data structure with the root group.
843   * @bfqg: the group to move from.
844   * @st: the service tree to start the search from.
845   * @ioprio_class: I/O priority class to reparent.
846   */
bfq_reparent_active_queues(struct bfq_data * bfqd,struct bfq_group * bfqg,struct bfq_service_tree * st,int ioprio_class)847  static void bfq_reparent_active_queues(struct bfq_data *bfqd,
848  				       struct bfq_group *bfqg,
849  				       struct bfq_service_tree *st,
850  				       int ioprio_class)
851  {
852  	struct rb_root *active = &st->active;
853  	struct bfq_entity *entity;
854  
855  	while ((entity = bfq_entity_of(rb_first(active))))
856  		bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
857  
858  	if (bfqg->sched_data.in_service_entity)
859  		bfq_reparent_leaf_entity(bfqd,
860  					 bfqg->sched_data.in_service_entity,
861  					 ioprio_class);
862  }
863  
864  /**
865   * bfq_pd_offline - deactivate the entity associated with @pd,
866   *		    and reparent its children entities.
867   * @pd: descriptor of the policy going offline.
868   *
869   * blkio already grabs the queue_lock for us, so no need to use
870   * RCU-based magic
871   */
bfq_pd_offline(struct blkg_policy_data * pd)872  static void bfq_pd_offline(struct blkg_policy_data *pd)
873  {
874  	struct bfq_service_tree *st;
875  	struct bfq_group *bfqg = pd_to_bfqg(pd);
876  	struct bfq_data *bfqd = bfqg->bfqd;
877  	struct bfq_entity *entity = bfqg->my_entity;
878  	unsigned long flags;
879  	int i;
880  
881  	spin_lock_irqsave(&bfqd->lock, flags);
882  
883  	if (!entity) /* root group */
884  		goto put_async_queues;
885  
886  	/*
887  	 * Empty all service_trees belonging to this group before
888  	 * deactivating the group itself.
889  	 */
890  	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
891  		st = bfqg->sched_data.service_tree + i;
892  
893  		/*
894  		 * It may happen that some queues are still active
895  		 * (busy) upon group destruction (if the corresponding
896  		 * processes have been forced to terminate). We move
897  		 * all the leaf entities corresponding to these queues
898  		 * to the root_group.
899  		 * Also, it may happen that the group has an entity
900  		 * in service, which is disconnected from the active
901  		 * tree: it must be moved, too.
902  		 * There is no need to put the sync queues, as the
903  		 * scheduler has taken no reference.
904  		 */
905  		bfq_reparent_active_queues(bfqd, bfqg, st, i);
906  
907  		/*
908  		 * The idle tree may still contain bfq_queues
909  		 * belonging to exited task because they never
910  		 * migrated to a different cgroup from the one being
911  		 * destroyed now. In addition, even
912  		 * bfq_reparent_active_queues() may happen to add some
913  		 * entities to the idle tree. It happens if, in some
914  		 * of the calls to bfq_bfqq_move() performed by
915  		 * bfq_reparent_active_queues(), the queue to move is
916  		 * empty and gets expired.
917  		 */
918  		bfq_flush_idle_tree(st);
919  	}
920  
921  	__bfq_deactivate_entity(entity, false);
922  
923  put_async_queues:
924  	bfq_put_async_queues(bfqd, bfqg);
925  
926  	spin_unlock_irqrestore(&bfqd->lock, flags);
927  	/*
928  	 * @blkg is going offline and will be ignored by
929  	 * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
930  	 * that they don't get lost.  If IOs complete after this point, the
931  	 * stats for them will be lost.  Oh well...
932  	 */
933  	bfqg_stats_xfer_dead(bfqg);
934  }
935  
bfq_end_wr_async(struct bfq_data * bfqd)936  void bfq_end_wr_async(struct bfq_data *bfqd)
937  {
938  	struct blkcg_gq *blkg;
939  
940  	list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
941  		struct bfq_group *bfqg = blkg_to_bfqg(blkg);
942  
943  		bfq_end_wr_async_queues(bfqd, bfqg);
944  	}
945  	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
946  }
947  
bfq_io_show_weight_legacy(struct seq_file * sf,void * v)948  static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
949  {
950  	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
951  	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
952  	unsigned int val = 0;
953  
954  	if (bfqgd)
955  		val = bfqgd->weight;
956  
957  	seq_printf(sf, "%u\n", val);
958  
959  	return 0;
960  }
961  
bfqg_prfill_weight_device(struct seq_file * sf,struct blkg_policy_data * pd,int off)962  static u64 bfqg_prfill_weight_device(struct seq_file *sf,
963  				     struct blkg_policy_data *pd, int off)
964  {
965  	struct bfq_group *bfqg = pd_to_bfqg(pd);
966  
967  	if (!bfqg->entity.dev_weight)
968  		return 0;
969  	return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
970  }
971  
bfq_io_show_weight(struct seq_file * sf,void * v)972  static int bfq_io_show_weight(struct seq_file *sf, void *v)
973  {
974  	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
975  	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
976  
977  	seq_printf(sf, "default %u\n", bfqgd->weight);
978  	blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
979  			  &blkcg_policy_bfq, 0, false);
980  	return 0;
981  }
982  
bfq_group_set_weight(struct bfq_group * bfqg,u64 weight,u64 dev_weight)983  static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
984  {
985  	weight = dev_weight ?: weight;
986  
987  	bfqg->entity.dev_weight = dev_weight;
988  	/*
989  	 * Setting the prio_changed flag of the entity
990  	 * to 1 with new_weight == weight would re-set
991  	 * the value of the weight to its ioprio mapping.
992  	 * Set the flag only if necessary.
993  	 */
994  	if ((unsigned short)weight != bfqg->entity.new_weight) {
995  		bfqg->entity.new_weight = (unsigned short)weight;
996  		/*
997  		 * Make sure that the above new value has been
998  		 * stored in bfqg->entity.new_weight before
999  		 * setting the prio_changed flag. In fact,
1000  		 * this flag may be read asynchronously (in
1001  		 * critical sections protected by a different
1002  		 * lock than that held here), and finding this
1003  		 * flag set may cause the execution of the code
1004  		 * for updating parameters whose value may
1005  		 * depend also on bfqg->entity.new_weight (in
1006  		 * __bfq_entity_update_weight_prio).
1007  		 * This barrier makes sure that the new value
1008  		 * of bfqg->entity.new_weight is correctly
1009  		 * seen in that code.
1010  		 */
1011  		smp_wmb();
1012  		bfqg->entity.prio_changed = 1;
1013  	}
1014  }
1015  
bfq_io_set_weight_legacy(struct cgroup_subsys_state * css,struct cftype * cftype,u64 val)1016  static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
1017  				    struct cftype *cftype,
1018  				    u64 val)
1019  {
1020  	struct blkcg *blkcg = css_to_blkcg(css);
1021  	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1022  	struct blkcg_gq *blkg;
1023  	int ret = -ERANGE;
1024  
1025  	if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
1026  		return ret;
1027  
1028  	ret = 0;
1029  	spin_lock_irq(&blkcg->lock);
1030  	bfqgd->weight = (unsigned short)val;
1031  	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1032  		struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1033  
1034  		if (bfqg)
1035  			bfq_group_set_weight(bfqg, val, 0);
1036  	}
1037  	spin_unlock_irq(&blkcg->lock);
1038  
1039  	return ret;
1040  }
1041  
bfq_io_set_device_weight(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1042  static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
1043  					char *buf, size_t nbytes,
1044  					loff_t off)
1045  {
1046  	int ret;
1047  	struct blkg_conf_ctx ctx;
1048  	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1049  	struct bfq_group *bfqg;
1050  	u64 v;
1051  
1052  	blkg_conf_init(&ctx, buf);
1053  
1054  	ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, &ctx);
1055  	if (ret)
1056  		goto out;
1057  
1058  	if (sscanf(ctx.body, "%llu", &v) == 1) {
1059  		/* require "default" on dfl */
1060  		ret = -ERANGE;
1061  		if (!v)
1062  			goto out;
1063  	} else if (!strcmp(strim(ctx.body), "default")) {
1064  		v = 0;
1065  	} else {
1066  		ret = -EINVAL;
1067  		goto out;
1068  	}
1069  
1070  	bfqg = blkg_to_bfqg(ctx.blkg);
1071  
1072  	ret = -ERANGE;
1073  	if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
1074  		bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
1075  		ret = 0;
1076  	}
1077  out:
1078  	blkg_conf_exit(&ctx);
1079  	return ret ?: nbytes;
1080  }
1081  
bfq_io_set_weight(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1082  static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
1083  				 char *buf, size_t nbytes,
1084  				 loff_t off)
1085  {
1086  	char *endp;
1087  	int ret;
1088  	u64 v;
1089  
1090  	buf = strim(buf);
1091  
1092  	/* "WEIGHT" or "default WEIGHT" sets the default weight */
1093  	v = simple_strtoull(buf, &endp, 0);
1094  	if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
1095  		ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
1096  		return ret ?: nbytes;
1097  	}
1098  
1099  	return bfq_io_set_device_weight(of, buf, nbytes, off);
1100  }
1101  
bfqg_print_rwstat(struct seq_file * sf,void * v)1102  static int bfqg_print_rwstat(struct seq_file *sf, void *v)
1103  {
1104  	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1105  			  &blkcg_policy_bfq, seq_cft(sf)->private, true);
1106  	return 0;
1107  }
1108  
bfqg_prfill_rwstat_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)1109  static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
1110  					struct blkg_policy_data *pd, int off)
1111  {
1112  	struct blkg_rwstat_sample sum;
1113  
1114  	blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
1115  	return __blkg_prfill_rwstat(sf, pd, &sum);
1116  }
1117  
bfqg_print_rwstat_recursive(struct seq_file * sf,void * v)1118  static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1119  {
1120  	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1121  			  bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
1122  			  seq_cft(sf)->private, true);
1123  	return 0;
1124  }
1125  
1126  #ifdef CONFIG_BFQ_CGROUP_DEBUG
bfqg_print_stat(struct seq_file * sf,void * v)1127  static int bfqg_print_stat(struct seq_file *sf, void *v)
1128  {
1129  	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1130  			  &blkcg_policy_bfq, seq_cft(sf)->private, false);
1131  	return 0;
1132  }
1133  
bfqg_prfill_stat_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)1134  static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
1135  				      struct blkg_policy_data *pd, int off)
1136  {
1137  	struct blkcg_gq *blkg = pd_to_blkg(pd);
1138  	struct blkcg_gq *pos_blkg;
1139  	struct cgroup_subsys_state *pos_css;
1140  	u64 sum = 0;
1141  
1142  	lockdep_assert_held(&blkg->q->queue_lock);
1143  
1144  	rcu_read_lock();
1145  	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1146  		struct bfq_stat *stat;
1147  
1148  		if (!pos_blkg->online)
1149  			continue;
1150  
1151  		stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
1152  		sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
1153  	}
1154  	rcu_read_unlock();
1155  
1156  	return __blkg_prfill_u64(sf, pd, sum);
1157  }
1158  
bfqg_print_stat_recursive(struct seq_file * sf,void * v)1159  static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
1160  {
1161  	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1162  			  bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
1163  			  seq_cft(sf)->private, false);
1164  	return 0;
1165  }
1166  
bfqg_prfill_sectors(struct seq_file * sf,struct blkg_policy_data * pd,int off)1167  static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1168  			       int off)
1169  {
1170  	struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
1171  	u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
1172  
1173  	return __blkg_prfill_u64(sf, pd, sum >> 9);
1174  }
1175  
bfqg_print_stat_sectors(struct seq_file * sf,void * v)1176  static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
1177  {
1178  	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1179  			  bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
1180  	return 0;
1181  }
1182  
bfqg_prfill_sectors_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)1183  static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
1184  					 struct blkg_policy_data *pd, int off)
1185  {
1186  	struct blkg_rwstat_sample tmp;
1187  
1188  	blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,
1189  			offsetof(struct bfq_group, stats.bytes), &tmp);
1190  
1191  	return __blkg_prfill_u64(sf, pd,
1192  		(tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
1193  }
1194  
bfqg_print_stat_sectors_recursive(struct seq_file * sf,void * v)1195  static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1196  {
1197  	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1198  			  bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
1199  			  false);
1200  	return 0;
1201  }
1202  
bfqg_prfill_avg_queue_size(struct seq_file * sf,struct blkg_policy_data * pd,int off)1203  static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
1204  				      struct blkg_policy_data *pd, int off)
1205  {
1206  	struct bfq_group *bfqg = pd_to_bfqg(pd);
1207  	u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
1208  	u64 v = 0;
1209  
1210  	if (samples) {
1211  		v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
1212  		v = div64_u64(v, samples);
1213  	}
1214  	__blkg_prfill_u64(sf, pd, v);
1215  	return 0;
1216  }
1217  
1218  /* print avg_queue_size */
bfqg_print_avg_queue_size(struct seq_file * sf,void * v)1219  static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1220  {
1221  	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1222  			  bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
1223  			  0, false);
1224  	return 0;
1225  }
1226  #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1227  
bfq_create_group_hierarchy(struct bfq_data * bfqd,int node)1228  struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1229  {
1230  	int ret;
1231  
1232  	ret = blkcg_activate_policy(bfqd->queue->disk, &blkcg_policy_bfq);
1233  	if (ret)
1234  		return NULL;
1235  
1236  	return blkg_to_bfqg(bfqd->queue->root_blkg);
1237  }
1238  
1239  struct blkcg_policy blkcg_policy_bfq = {
1240  	.dfl_cftypes		= bfq_blkg_files,
1241  	.legacy_cftypes		= bfq_blkcg_legacy_files,
1242  
1243  	.cpd_alloc_fn		= bfq_cpd_alloc,
1244  	.cpd_free_fn		= bfq_cpd_free,
1245  
1246  	.pd_alloc_fn		= bfq_pd_alloc,
1247  	.pd_init_fn		= bfq_pd_init,
1248  	.pd_offline_fn		= bfq_pd_offline,
1249  	.pd_free_fn		= bfq_pd_free,
1250  	.pd_reset_stats_fn	= bfq_pd_reset_stats,
1251  };
1252  
1253  struct cftype bfq_blkcg_legacy_files[] = {
1254  	{
1255  		.name = "bfq.weight",
1256  		.flags = CFTYPE_NOT_ON_ROOT,
1257  		.seq_show = bfq_io_show_weight_legacy,
1258  		.write_u64 = bfq_io_set_weight_legacy,
1259  	},
1260  	{
1261  		.name = "bfq.weight_device",
1262  		.flags = CFTYPE_NOT_ON_ROOT,
1263  		.seq_show = bfq_io_show_weight,
1264  		.write = bfq_io_set_weight,
1265  	},
1266  
1267  	/* statistics, covers only the tasks in the bfqg */
1268  	{
1269  		.name = "bfq.io_service_bytes",
1270  		.private = offsetof(struct bfq_group, stats.bytes),
1271  		.seq_show = bfqg_print_rwstat,
1272  	},
1273  	{
1274  		.name = "bfq.io_serviced",
1275  		.private = offsetof(struct bfq_group, stats.ios),
1276  		.seq_show = bfqg_print_rwstat,
1277  	},
1278  #ifdef CONFIG_BFQ_CGROUP_DEBUG
1279  	{
1280  		.name = "bfq.time",
1281  		.private = offsetof(struct bfq_group, stats.time),
1282  		.seq_show = bfqg_print_stat,
1283  	},
1284  	{
1285  		.name = "bfq.sectors",
1286  		.seq_show = bfqg_print_stat_sectors,
1287  	},
1288  	{
1289  		.name = "bfq.io_service_time",
1290  		.private = offsetof(struct bfq_group, stats.service_time),
1291  		.seq_show = bfqg_print_rwstat,
1292  	},
1293  	{
1294  		.name = "bfq.io_wait_time",
1295  		.private = offsetof(struct bfq_group, stats.wait_time),
1296  		.seq_show = bfqg_print_rwstat,
1297  	},
1298  	{
1299  		.name = "bfq.io_merged",
1300  		.private = offsetof(struct bfq_group, stats.merged),
1301  		.seq_show = bfqg_print_rwstat,
1302  	},
1303  	{
1304  		.name = "bfq.io_queued",
1305  		.private = offsetof(struct bfq_group, stats.queued),
1306  		.seq_show = bfqg_print_rwstat,
1307  	},
1308  #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1309  
1310  	/* the same statistics which cover the bfqg and its descendants */
1311  	{
1312  		.name = "bfq.io_service_bytes_recursive",
1313  		.private = offsetof(struct bfq_group, stats.bytes),
1314  		.seq_show = bfqg_print_rwstat_recursive,
1315  	},
1316  	{
1317  		.name = "bfq.io_serviced_recursive",
1318  		.private = offsetof(struct bfq_group, stats.ios),
1319  		.seq_show = bfqg_print_rwstat_recursive,
1320  	},
1321  #ifdef CONFIG_BFQ_CGROUP_DEBUG
1322  	{
1323  		.name = "bfq.time_recursive",
1324  		.private = offsetof(struct bfq_group, stats.time),
1325  		.seq_show = bfqg_print_stat_recursive,
1326  	},
1327  	{
1328  		.name = "bfq.sectors_recursive",
1329  		.seq_show = bfqg_print_stat_sectors_recursive,
1330  	},
1331  	{
1332  		.name = "bfq.io_service_time_recursive",
1333  		.private = offsetof(struct bfq_group, stats.service_time),
1334  		.seq_show = bfqg_print_rwstat_recursive,
1335  	},
1336  	{
1337  		.name = "bfq.io_wait_time_recursive",
1338  		.private = offsetof(struct bfq_group, stats.wait_time),
1339  		.seq_show = bfqg_print_rwstat_recursive,
1340  	},
1341  	{
1342  		.name = "bfq.io_merged_recursive",
1343  		.private = offsetof(struct bfq_group, stats.merged),
1344  		.seq_show = bfqg_print_rwstat_recursive,
1345  	},
1346  	{
1347  		.name = "bfq.io_queued_recursive",
1348  		.private = offsetof(struct bfq_group, stats.queued),
1349  		.seq_show = bfqg_print_rwstat_recursive,
1350  	},
1351  	{
1352  		.name = "bfq.avg_queue_size",
1353  		.seq_show = bfqg_print_avg_queue_size,
1354  	},
1355  	{
1356  		.name = "bfq.group_wait_time",
1357  		.private = offsetof(struct bfq_group, stats.group_wait_time),
1358  		.seq_show = bfqg_print_stat,
1359  	},
1360  	{
1361  		.name = "bfq.idle_time",
1362  		.private = offsetof(struct bfq_group, stats.idle_time),
1363  		.seq_show = bfqg_print_stat,
1364  	},
1365  	{
1366  		.name = "bfq.empty_time",
1367  		.private = offsetof(struct bfq_group, stats.empty_time),
1368  		.seq_show = bfqg_print_stat,
1369  	},
1370  	{
1371  		.name = "bfq.dequeue",
1372  		.private = offsetof(struct bfq_group, stats.dequeue),
1373  		.seq_show = bfqg_print_stat,
1374  	},
1375  #endif	/* CONFIG_BFQ_CGROUP_DEBUG */
1376  	{ }	/* terminate */
1377  };
1378  
1379  struct cftype bfq_blkg_files[] = {
1380  	{
1381  		.name = "bfq.weight",
1382  		.flags = CFTYPE_NOT_ON_ROOT,
1383  		.seq_show = bfq_io_show_weight,
1384  		.write = bfq_io_set_weight,
1385  	},
1386  	{} /* terminate */
1387  };
1388  
1389  #else	/* CONFIG_BFQ_GROUP_IOSCHED */
1390  
bfq_bfqq_move(struct bfq_data * bfqd,struct bfq_queue * bfqq,struct bfq_group * bfqg)1391  void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1392  		   struct bfq_group *bfqg) {}
1393  
bfq_init_entity(struct bfq_entity * entity,struct bfq_group * bfqg)1394  void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1395  {
1396  	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1397  
1398  	entity->weight = entity->new_weight;
1399  	entity->orig_weight = entity->new_weight;
1400  	if (bfqq) {
1401  		bfqq->ioprio = bfqq->new_ioprio;
1402  		bfqq->ioprio_class = bfqq->new_ioprio_class;
1403  	}
1404  	entity->sched_data = &bfqg->sched_data;
1405  }
1406  
bfq_bic_update_cgroup(struct bfq_io_cq * bic,struct bio * bio)1407  void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1408  
bfq_end_wr_async(struct bfq_data * bfqd)1409  void bfq_end_wr_async(struct bfq_data *bfqd)
1410  {
1411  	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1412  }
1413  
bfq_bio_bfqg(struct bfq_data * bfqd,struct bio * bio)1414  struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
1415  {
1416  	return bfqd->root_group;
1417  }
1418  
bfqq_group(struct bfq_queue * bfqq)1419  struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1420  {
1421  	return bfqq->bfqd->root_group;
1422  }
1423  
bfqg_and_blkg_put(struct bfq_group * bfqg)1424  void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
1425  
bfq_create_group_hierarchy(struct bfq_data * bfqd,int node)1426  struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1427  {
1428  	struct bfq_group *bfqg;
1429  	int i;
1430  
1431  	bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1432  	if (!bfqg)
1433  		return NULL;
1434  
1435  	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1436  		bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1437  
1438  	return bfqg;
1439  }
1440  #endif	/* CONFIG_BFQ_GROUP_IOSCHED */
1441