xref: /linux/block/blk-stat.c (revision ff19a8dee196d757dbc32a946843260f0b784ca3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Block stat tracking code
4  *
5  * Copyright (C) 2016 Jens Axboe
6  */
7 #include <linux/kernel.h>
8 #include <linux/rculist.h>
9 #include <linux/blk-mq.h>
10 
11 #include "blk-stat.h"
12 #include "blk-mq.h"
13 #include "blk.h"
14 
15 struct blk_queue_stats {
16 	struct list_head callbacks;
17 	spinlock_t lock;
18 	int accounting;
19 };
20 
21 void blk_rq_stat_init(struct blk_rq_stat *stat)
22 {
23 	stat->min = -1ULL;
24 	stat->max = stat->nr_samples = stat->mean = 0;
25 	stat->batch = 0;
26 }
27 
28 /* src is a per-cpu stat, mean isn't initialized */
29 void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
30 {
31 	if (!src->nr_samples)
32 		return;
33 
34 	dst->min = min(dst->min, src->min);
35 	dst->max = max(dst->max, src->max);
36 
37 	dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
38 				dst->nr_samples + src->nr_samples);
39 
40 	dst->nr_samples += src->nr_samples;
41 }
42 
43 void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
44 {
45 	stat->min = min(stat->min, value);
46 	stat->max = max(stat->max, value);
47 	stat->batch += value;
48 	stat->nr_samples++;
49 }
50 
51 void blk_stat_add(struct request *rq, u64 now)
52 {
53 	struct request_queue *q = rq->q;
54 	struct blk_stat_callback *cb;
55 	struct blk_rq_stat *stat;
56 	int bucket, cpu;
57 	u64 value;
58 
59 	value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
60 
61 	if (req_op(rq) == REQ_OP_READ || req_op(rq) == REQ_OP_WRITE)
62 		blk_throtl_stat_add(rq, value);
63 
64 	rcu_read_lock();
65 	cpu = get_cpu();
66 	list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
67 		if (!blk_stat_is_active(cb))
68 			continue;
69 
70 		bucket = cb->bucket_fn(rq);
71 		if (bucket < 0)
72 			continue;
73 
74 		stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
75 		blk_rq_stat_add(stat, value);
76 	}
77 	put_cpu();
78 	rcu_read_unlock();
79 }
80 
81 static void blk_stat_timer_fn(struct timer_list *t)
82 {
83 	struct blk_stat_callback *cb = from_timer(cb, t, timer);
84 	unsigned int bucket;
85 	int cpu;
86 
87 	for (bucket = 0; bucket < cb->buckets; bucket++)
88 		blk_rq_stat_init(&cb->stat[bucket]);
89 
90 	for_each_online_cpu(cpu) {
91 		struct blk_rq_stat *cpu_stat;
92 
93 		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
94 		for (bucket = 0; bucket < cb->buckets; bucket++) {
95 			blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
96 			blk_rq_stat_init(&cpu_stat[bucket]);
97 		}
98 	}
99 
100 	cb->timer_fn(cb);
101 }
102 
103 struct blk_stat_callback *
104 blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
105 			int (*bucket_fn)(const struct request *),
106 			unsigned int buckets, void *data)
107 {
108 	struct blk_stat_callback *cb;
109 
110 	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
111 	if (!cb)
112 		return NULL;
113 
114 	cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
115 				 GFP_KERNEL);
116 	if (!cb->stat) {
117 		kfree(cb);
118 		return NULL;
119 	}
120 	cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
121 				      __alignof__(struct blk_rq_stat));
122 	if (!cb->cpu_stat) {
123 		kfree(cb->stat);
124 		kfree(cb);
125 		return NULL;
126 	}
127 
128 	cb->timer_fn = timer_fn;
129 	cb->bucket_fn = bucket_fn;
130 	cb->data = data;
131 	cb->buckets = buckets;
132 	timer_setup(&cb->timer, blk_stat_timer_fn, 0);
133 
134 	return cb;
135 }
136 
137 void blk_stat_add_callback(struct request_queue *q,
138 			   struct blk_stat_callback *cb)
139 {
140 	unsigned int bucket;
141 	unsigned long flags;
142 	int cpu;
143 
144 	for_each_possible_cpu(cpu) {
145 		struct blk_rq_stat *cpu_stat;
146 
147 		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
148 		for (bucket = 0; bucket < cb->buckets; bucket++)
149 			blk_rq_stat_init(&cpu_stat[bucket]);
150 	}
151 
152 	spin_lock_irqsave(&q->stats->lock, flags);
153 	list_add_tail_rcu(&cb->list, &q->stats->callbacks);
154 	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
155 	spin_unlock_irqrestore(&q->stats->lock, flags);
156 }
157 
158 void blk_stat_remove_callback(struct request_queue *q,
159 			      struct blk_stat_callback *cb)
160 {
161 	unsigned long flags;
162 
163 	spin_lock_irqsave(&q->stats->lock, flags);
164 	list_del_rcu(&cb->list);
165 	if (list_empty(&q->stats->callbacks) && !q->stats->accounting)
166 		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
167 	spin_unlock_irqrestore(&q->stats->lock, flags);
168 
169 	del_timer_sync(&cb->timer);
170 }
171 
172 static void blk_stat_free_callback_rcu(struct rcu_head *head)
173 {
174 	struct blk_stat_callback *cb;
175 
176 	cb = container_of(head, struct blk_stat_callback, rcu);
177 	free_percpu(cb->cpu_stat);
178 	kfree(cb->stat);
179 	kfree(cb);
180 }
181 
182 void blk_stat_free_callback(struct blk_stat_callback *cb)
183 {
184 	if (cb)
185 		call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
186 }
187 
188 void blk_stat_disable_accounting(struct request_queue *q)
189 {
190 	unsigned long flags;
191 
192 	spin_lock_irqsave(&q->stats->lock, flags);
193 	if (!--q->stats->accounting)
194 		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
195 	spin_unlock_irqrestore(&q->stats->lock, flags);
196 }
197 EXPORT_SYMBOL_GPL(blk_stat_disable_accounting);
198 
199 void blk_stat_enable_accounting(struct request_queue *q)
200 {
201 	unsigned long flags;
202 
203 	spin_lock_irqsave(&q->stats->lock, flags);
204 	if (!q->stats->accounting++)
205 		blk_queue_flag_set(QUEUE_FLAG_STATS, q);
206 	spin_unlock_irqrestore(&q->stats->lock, flags);
207 }
208 EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
209 
210 struct blk_queue_stats *blk_alloc_queue_stats(void)
211 {
212 	struct blk_queue_stats *stats;
213 
214 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
215 	if (!stats)
216 		return NULL;
217 
218 	INIT_LIST_HEAD(&stats->callbacks);
219 	spin_lock_init(&stats->lock);
220 	stats->accounting = 0;
221 
222 	return stats;
223 }
224 
225 void blk_free_queue_stats(struct blk_queue_stats *stats)
226 {
227 	if (!stats)
228 		return;
229 
230 	WARN_ON(!list_empty(&stats->callbacks));
231 
232 	kfree(stats);
233 }
234 
235 bool blk_stats_alloc_enable(struct request_queue *q)
236 {
237 	struct blk_rq_stat *poll_stat;
238 
239 	poll_stat = kcalloc(BLK_MQ_POLL_STATS_BKTS, sizeof(*poll_stat),
240 				GFP_ATOMIC);
241 	if (!poll_stat)
242 		return false;
243 
244 	if (cmpxchg(&q->poll_stat, NULL, poll_stat) != NULL) {
245 		kfree(poll_stat);
246 		return true;
247 	}
248 
249 	blk_stat_add_callback(q, q->poll_cb);
250 	return false;
251 }
252