1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Block stat tracking code
4 *
5 * Copyright (C) 2016 Jens Axboe
6 */
7 #include <linux/kernel.h>
8 #include <linux/rculist.h>
9
10 #include "blk-stat.h"
11 #include "blk-mq.h"
12 #include "blk.h"
13
14 struct blk_queue_stats {
15 struct list_head callbacks;
16 spinlock_t lock;
17 int accounting;
18 };
19
blk_rq_stat_init(struct blk_rq_stat * stat)20 void blk_rq_stat_init(struct blk_rq_stat *stat)
21 {
22 stat->min = -1ULL;
23 stat->max = stat->nr_samples = stat->mean = 0;
24 stat->batch = 0;
25 }
26
27 /* src is a per-cpu stat, mean isn't initialized */
blk_rq_stat_sum(struct blk_rq_stat * dst,struct blk_rq_stat * src)28 void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
29 {
30 if (dst->nr_samples + src->nr_samples <= dst->nr_samples)
31 return;
32
33 dst->min = min(dst->min, src->min);
34 dst->max = max(dst->max, src->max);
35
36 dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
37 dst->nr_samples + src->nr_samples);
38
39 dst->nr_samples += src->nr_samples;
40 }
41
blk_rq_stat_add(struct blk_rq_stat * stat,u64 value)42 void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
43 {
44 stat->min = min(stat->min, value);
45 stat->max = max(stat->max, value);
46 stat->batch += value;
47 stat->nr_samples++;
48 }
49
blk_stat_add(struct request * rq,u64 now)50 void blk_stat_add(struct request *rq, u64 now)
51 {
52 struct request_queue *q = rq->q;
53 struct blk_stat_callback *cb;
54 struct blk_rq_stat *stat;
55 int bucket, cpu;
56 u64 value;
57
58 value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
59
60 rcu_read_lock();
61 cpu = get_cpu();
62 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
63 if (!blk_stat_is_active(cb))
64 continue;
65
66 bucket = cb->bucket_fn(rq);
67 if (bucket < 0)
68 continue;
69
70 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
71 blk_rq_stat_add(stat, value);
72 }
73 put_cpu();
74 rcu_read_unlock();
75 }
76
blk_stat_timer_fn(struct timer_list * t)77 static void blk_stat_timer_fn(struct timer_list *t)
78 {
79 struct blk_stat_callback *cb = timer_container_of(cb, t, timer);
80 unsigned int bucket;
81 int cpu;
82
83 for (bucket = 0; bucket < cb->buckets; bucket++)
84 blk_rq_stat_init(&cb->stat[bucket]);
85
86 for_each_online_cpu(cpu) {
87 struct blk_rq_stat *cpu_stat;
88
89 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
90 for (bucket = 0; bucket < cb->buckets; bucket++) {
91 blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
92 blk_rq_stat_init(&cpu_stat[bucket]);
93 }
94 }
95
96 cb->timer_fn(cb);
97 }
98
99 struct blk_stat_callback *
blk_stat_alloc_callback(void (* timer_fn)(struct blk_stat_callback *),int (* bucket_fn)(const struct request *),unsigned int buckets,void * data)100 blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
101 int (*bucket_fn)(const struct request *),
102 unsigned int buckets, void *data)
103 {
104 struct blk_stat_callback *cb;
105
106 cb = kmalloc_obj(*cb);
107 if (!cb)
108 return NULL;
109
110 cb->stat = kmalloc_objs(struct blk_rq_stat, buckets);
111 if (!cb->stat) {
112 kfree(cb);
113 return NULL;
114 }
115 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
116 __alignof__(struct blk_rq_stat));
117 if (!cb->cpu_stat) {
118 kfree(cb->stat);
119 kfree(cb);
120 return NULL;
121 }
122
123 cb->timer_fn = timer_fn;
124 cb->bucket_fn = bucket_fn;
125 cb->data = data;
126 cb->buckets = buckets;
127 timer_setup(&cb->timer, blk_stat_timer_fn, 0);
128
129 return cb;
130 }
131
blk_stat_add_callback(struct request_queue * q,struct blk_stat_callback * cb)132 void blk_stat_add_callback(struct request_queue *q,
133 struct blk_stat_callback *cb)
134 {
135 unsigned int bucket;
136 unsigned long flags;
137 int cpu;
138
139 for_each_possible_cpu(cpu) {
140 struct blk_rq_stat *cpu_stat;
141
142 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
143 for (bucket = 0; bucket < cb->buckets; bucket++)
144 blk_rq_stat_init(&cpu_stat[bucket]);
145 }
146
147 spin_lock_irqsave(&q->stats->lock, flags);
148 list_add_tail_rcu(&cb->list, &q->stats->callbacks);
149 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
150 spin_unlock_irqrestore(&q->stats->lock, flags);
151 }
152
blk_stat_remove_callback(struct request_queue * q,struct blk_stat_callback * cb)153 void blk_stat_remove_callback(struct request_queue *q,
154 struct blk_stat_callback *cb)
155 {
156 unsigned long flags;
157
158 spin_lock_irqsave(&q->stats->lock, flags);
159 list_del_rcu(&cb->list);
160 if (list_empty(&q->stats->callbacks) && !q->stats->accounting)
161 blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
162 spin_unlock_irqrestore(&q->stats->lock, flags);
163
164 timer_delete_sync(&cb->timer);
165 }
166
blk_stat_free_callback_rcu(struct rcu_head * head)167 static void blk_stat_free_callback_rcu(struct rcu_head *head)
168 {
169 struct blk_stat_callback *cb;
170
171 cb = container_of(head, struct blk_stat_callback, rcu);
172 free_percpu(cb->cpu_stat);
173 kfree(cb->stat);
174 kfree(cb);
175 }
176
blk_stat_free_callback(struct blk_stat_callback * cb)177 void blk_stat_free_callback(struct blk_stat_callback *cb)
178 {
179 if (cb)
180 call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
181 }
182
blk_stat_disable_accounting(struct request_queue * q)183 void blk_stat_disable_accounting(struct request_queue *q)
184 {
185 unsigned long flags;
186
187 spin_lock_irqsave(&q->stats->lock, flags);
188 if (!--q->stats->accounting && list_empty(&q->stats->callbacks))
189 blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
190 spin_unlock_irqrestore(&q->stats->lock, flags);
191 }
192 EXPORT_SYMBOL_GPL(blk_stat_disable_accounting);
193
blk_stat_enable_accounting(struct request_queue * q)194 void blk_stat_enable_accounting(struct request_queue *q)
195 {
196 unsigned long flags;
197
198 spin_lock_irqsave(&q->stats->lock, flags);
199 if (!q->stats->accounting++ && list_empty(&q->stats->callbacks))
200 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
201 spin_unlock_irqrestore(&q->stats->lock, flags);
202 }
203 EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
204
blk_alloc_queue_stats(void)205 struct blk_queue_stats *blk_alloc_queue_stats(void)
206 {
207 struct blk_queue_stats *stats;
208
209 stats = kmalloc_obj(*stats);
210 if (!stats)
211 return NULL;
212
213 INIT_LIST_HEAD(&stats->callbacks);
214 spin_lock_init(&stats->lock);
215 stats->accounting = 0;
216
217 return stats;
218 }
219
blk_free_queue_stats(struct blk_queue_stats * stats)220 void blk_free_queue_stats(struct blk_queue_stats *stats)
221 {
222 if (!stats)
223 return;
224
225 WARN_ON(!list_empty(&stats->callbacks));
226
227 kfree(stats);
228 }
229