xref: /linux/drivers/md/dm-stats.c (revision cf21f328fcafacf4f96e7a30ef9dceede1076378)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/errno.h>
3 #include <linux/numa.h>
4 #include <linux/slab.h>
5 #include <linux/rculist.h>
6 #include <linux/threads.h>
7 #include <linux/preempt.h>
8 #include <linux/irqflags.h>
9 #include <linux/vmalloc.h>
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/device-mapper.h>
13 
14 #include "dm-core.h"
15 #include "dm-stats.h"
16 
17 #define DM_MSG_PREFIX "stats"
18 
19 static int dm_stat_need_rcu_barrier;
20 
21 /*
22  * Using 64-bit values to avoid overflow (which is a
23  * problem that block/genhd.c's IO accounting has).
24  */
25 struct dm_stat_percpu {
26 	unsigned long long sectors[2];
27 	unsigned long long ios[2];
28 	unsigned long long merges[2];
29 	unsigned long long ticks[2];
30 	unsigned long long io_ticks[2];
31 	unsigned long long io_ticks_total;
32 	unsigned long long time_in_queue;
33 	unsigned long long *histogram;
34 };
35 
36 struct dm_stat_shared {
37 	atomic_t in_flight[2];
38 	unsigned long long stamp;
39 	struct dm_stat_percpu tmp;
40 };
41 
42 struct dm_stat {
43 	struct list_head list_entry;
44 	int id;
45 	unsigned int stat_flags;
46 	size_t n_entries;
47 	sector_t start;
48 	sector_t end;
49 	sector_t step;
50 	unsigned int n_histogram_entries;
51 	unsigned long long *histogram_boundaries;
52 	const char *program_id;
53 	const char *aux_data;
54 	struct rcu_head rcu_head;
55 	size_t shared_alloc_size;
56 	size_t percpu_alloc_size;
57 	size_t histogram_alloc_size;
58 	struct dm_stat_percpu *stat_percpu[NR_CPUS];
59 	struct dm_stat_shared stat_shared[];
60 };
61 
62 #define STAT_PRECISE_TIMESTAMPS		1
63 
64 struct dm_stats_last_position {
65 	sector_t last_sector;
66 	unsigned int last_rw;
67 };
68 
69 /*
70  * A typo on the command line could possibly make the kernel run out of memory
71  * and crash. To prevent the crash we account all used memory. We fail if we
72  * exhaust 1/4 of all memory or 1/2 of vmalloc space.
73  */
74 #define DM_STATS_MEMORY_FACTOR		4
75 #define DM_STATS_VMALLOC_FACTOR		2
76 
77 static DEFINE_SPINLOCK(shared_memory_lock);
78 
79 static unsigned long shared_memory_amount;
80 
81 static bool __check_shared_memory(size_t alloc_size)
82 {
83 	size_t a;
84 
85 	a = shared_memory_amount + alloc_size;
86 	if (a < shared_memory_amount)
87 		return false;
88 	if (a >> PAGE_SHIFT > totalram_pages() / DM_STATS_MEMORY_FACTOR)
89 		return false;
90 #ifdef CONFIG_MMU
91 	if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
92 		return false;
93 #endif
94 	return true;
95 }
96 
97 static bool check_shared_memory(size_t alloc_size)
98 {
99 	bool ret;
100 
101 	spin_lock_irq(&shared_memory_lock);
102 
103 	ret = __check_shared_memory(alloc_size);
104 
105 	spin_unlock_irq(&shared_memory_lock);
106 
107 	return ret;
108 }
109 
110 static bool claim_shared_memory(size_t alloc_size)
111 {
112 	spin_lock_irq(&shared_memory_lock);
113 
114 	if (!__check_shared_memory(alloc_size)) {
115 		spin_unlock_irq(&shared_memory_lock);
116 		return false;
117 	}
118 
119 	shared_memory_amount += alloc_size;
120 
121 	spin_unlock_irq(&shared_memory_lock);
122 
123 	return true;
124 }
125 
126 static void free_shared_memory(size_t alloc_size)
127 {
128 	unsigned long flags;
129 
130 	spin_lock_irqsave(&shared_memory_lock, flags);
131 
132 	if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
133 		spin_unlock_irqrestore(&shared_memory_lock, flags);
134 		DMCRIT("Memory usage accounting bug.");
135 		return;
136 	}
137 
138 	shared_memory_amount -= alloc_size;
139 
140 	spin_unlock_irqrestore(&shared_memory_lock, flags);
141 }
142 
143 static void *dm_kvzalloc(size_t alloc_size, int node)
144 {
145 	void *p;
146 
147 	if (!claim_shared_memory(alloc_size))
148 		return NULL;
149 
150 	p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node);
151 	if (p)
152 		return p;
153 
154 	free_shared_memory(alloc_size);
155 
156 	return NULL;
157 }
158 
159 static void dm_kvfree(void *ptr, size_t alloc_size)
160 {
161 	if (!ptr)
162 		return;
163 
164 	free_shared_memory(alloc_size);
165 
166 	kvfree(ptr);
167 }
168 
169 static void dm_stat_free(struct rcu_head *head)
170 {
171 	int cpu;
172 	struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
173 
174 	kfree(s->histogram_boundaries);
175 	kfree(s->program_id);
176 	kfree(s->aux_data);
177 	for_each_possible_cpu(cpu) {
178 		dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size);
179 		dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
180 	}
181 	dm_kvfree(s->stat_shared[0].tmp.histogram, s->histogram_alloc_size);
182 	dm_kvfree(s, s->shared_alloc_size);
183 }
184 
185 static int dm_stat_in_flight(struct dm_stat_shared *shared)
186 {
187 	return atomic_read(&shared->in_flight[READ]) +
188 	       atomic_read(&shared->in_flight[WRITE]);
189 }
190 
191 void dm_stats_init(struct dm_stats *stats)
192 {
193 	int cpu;
194 	struct dm_stats_last_position *last;
195 
196 	mutex_init(&stats->mutex);
197 	INIT_LIST_HEAD(&stats->list);
198 	stats->precise_timestamps = false;
199 	stats->last = alloc_percpu(struct dm_stats_last_position);
200 	for_each_possible_cpu(cpu) {
201 		last = per_cpu_ptr(stats->last, cpu);
202 		last->last_sector = (sector_t)ULLONG_MAX;
203 		last->last_rw = UINT_MAX;
204 	}
205 }
206 
207 void dm_stats_cleanup(struct dm_stats *stats)
208 {
209 	size_t ni;
210 	struct dm_stat *s;
211 	struct dm_stat_shared *shared;
212 
213 	while (!list_empty(&stats->list)) {
214 		s = container_of(stats->list.next, struct dm_stat, list_entry);
215 		list_del(&s->list_entry);
216 		for (ni = 0; ni < s->n_entries; ni++) {
217 			shared = &s->stat_shared[ni];
218 			if (WARN_ON(dm_stat_in_flight(shared))) {
219 				DMCRIT("leaked in-flight counter at index %lu "
220 				       "(start %llu, end %llu, step %llu): reads %d, writes %d",
221 				       (unsigned long)ni,
222 				       (unsigned long long)s->start,
223 				       (unsigned long long)s->end,
224 				       (unsigned long long)s->step,
225 				       atomic_read(&shared->in_flight[READ]),
226 				       atomic_read(&shared->in_flight[WRITE]));
227 			}
228 			cond_resched();
229 		}
230 		dm_stat_free(&s->rcu_head);
231 	}
232 	free_percpu(stats->last);
233 	mutex_destroy(&stats->mutex);
234 }
235 
236 static void dm_stats_recalc_precise_timestamps(struct dm_stats *stats)
237 {
238 	struct list_head *l;
239 	struct dm_stat *tmp_s;
240 	bool precise_timestamps = false;
241 
242 	list_for_each(l, &stats->list) {
243 		tmp_s = container_of(l, struct dm_stat, list_entry);
244 		if (tmp_s->stat_flags & STAT_PRECISE_TIMESTAMPS) {
245 			precise_timestamps = true;
246 			break;
247 		}
248 	}
249 	stats->precise_timestamps = precise_timestamps;
250 }
251 
252 static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
253 			   sector_t step, unsigned int stat_flags,
254 			   unsigned int n_histogram_entries,
255 			   unsigned long long *histogram_boundaries,
256 			   const char *program_id, const char *aux_data,
257 			   void (*suspend_callback)(struct mapped_device *),
258 			   void (*resume_callback)(struct mapped_device *),
259 			   struct mapped_device *md)
260 {
261 	struct list_head *l;
262 	struct dm_stat *s, *tmp_s;
263 	sector_t n_entries;
264 	size_t ni;
265 	size_t shared_alloc_size;
266 	size_t percpu_alloc_size;
267 	size_t histogram_alloc_size;
268 	struct dm_stat_percpu *p;
269 	int cpu;
270 	int ret_id;
271 	int r;
272 
273 	if (end < start || !step)
274 		return -EINVAL;
275 
276 	n_entries = end - start;
277 	if (dm_sector_div64(n_entries, step))
278 		n_entries++;
279 
280 	if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
281 		return -EOVERFLOW;
282 
283 	shared_alloc_size = struct_size(s, stat_shared, n_entries);
284 	if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
285 		return -EOVERFLOW;
286 
287 	percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
288 	if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
289 		return -EOVERFLOW;
290 
291 	histogram_alloc_size = (n_histogram_entries + 1) * (size_t)n_entries * sizeof(unsigned long long);
292 	if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
293 		return -EOVERFLOW;
294 
295 	if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
296 				 num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
297 		return -ENOMEM;
298 
299 	s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
300 	if (!s)
301 		return -ENOMEM;
302 
303 	s->stat_flags = stat_flags;
304 	s->n_entries = n_entries;
305 	s->start = start;
306 	s->end = end;
307 	s->step = step;
308 	s->shared_alloc_size = shared_alloc_size;
309 	s->percpu_alloc_size = percpu_alloc_size;
310 	s->histogram_alloc_size = histogram_alloc_size;
311 
312 	s->n_histogram_entries = n_histogram_entries;
313 	s->histogram_boundaries = kmemdup(histogram_boundaries,
314 					  s->n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
315 	if (!s->histogram_boundaries) {
316 		r = -ENOMEM;
317 		goto out;
318 	}
319 
320 	s->program_id = kstrdup(program_id, GFP_KERNEL);
321 	if (!s->program_id) {
322 		r = -ENOMEM;
323 		goto out;
324 	}
325 	s->aux_data = kstrdup(aux_data, GFP_KERNEL);
326 	if (!s->aux_data) {
327 		r = -ENOMEM;
328 		goto out;
329 	}
330 
331 	for (ni = 0; ni < n_entries; ni++) {
332 		atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
333 		atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
334 		cond_resched();
335 	}
336 
337 	if (s->n_histogram_entries) {
338 		unsigned long long *hi;
339 
340 		hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE);
341 		if (!hi) {
342 			r = -ENOMEM;
343 			goto out;
344 		}
345 		for (ni = 0; ni < n_entries; ni++) {
346 			s->stat_shared[ni].tmp.histogram = hi;
347 			hi += s->n_histogram_entries + 1;
348 			cond_resched();
349 		}
350 	}
351 
352 	for_each_possible_cpu(cpu) {
353 		p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
354 		if (!p) {
355 			r = -ENOMEM;
356 			goto out;
357 		}
358 		s->stat_percpu[cpu] = p;
359 		if (s->n_histogram_entries) {
360 			unsigned long long *hi;
361 
362 			hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
363 			if (!hi) {
364 				r = -ENOMEM;
365 				goto out;
366 			}
367 			for (ni = 0; ni < n_entries; ni++) {
368 				p[ni].histogram = hi;
369 				hi += s->n_histogram_entries + 1;
370 				cond_resched();
371 			}
372 		}
373 	}
374 
375 	/*
376 	 * Suspend/resume to make sure there is no i/o in flight,
377 	 * so that newly created statistics will be exact.
378 	 *
379 	 * (note: we couldn't suspend earlier because we must not
380 	 * allocate memory while suspended)
381 	 */
382 	suspend_callback(md);
383 
384 	mutex_lock(&stats->mutex);
385 	s->id = 0;
386 	list_for_each(l, &stats->list) {
387 		tmp_s = container_of(l, struct dm_stat, list_entry);
388 		if (WARN_ON(tmp_s->id < s->id)) {
389 			r = -EINVAL;
390 			goto out_unlock_resume;
391 		}
392 		if (tmp_s->id > s->id)
393 			break;
394 		if (unlikely(s->id == INT_MAX)) {
395 			r = -ENFILE;
396 			goto out_unlock_resume;
397 		}
398 		s->id++;
399 	}
400 	ret_id = s->id;
401 	list_add_tail_rcu(&s->list_entry, l);
402 
403 	dm_stats_recalc_precise_timestamps(stats);
404 
405 	if (!static_key_enabled(&stats_enabled.key))
406 		static_branch_enable(&stats_enabled);
407 
408 	mutex_unlock(&stats->mutex);
409 
410 	resume_callback(md);
411 
412 	return ret_id;
413 
414 out_unlock_resume:
415 	mutex_unlock(&stats->mutex);
416 	resume_callback(md);
417 out:
418 	dm_stat_free(&s->rcu_head);
419 	return r;
420 }
421 
422 static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
423 {
424 	struct dm_stat *s;
425 
426 	list_for_each_entry(s, &stats->list, list_entry) {
427 		if (s->id > id)
428 			break;
429 		if (s->id == id)
430 			return s;
431 	}
432 
433 	return NULL;
434 }
435 
436 static int dm_stats_delete(struct dm_stats *stats, int id)
437 {
438 	struct dm_stat *s;
439 	int cpu;
440 
441 	mutex_lock(&stats->mutex);
442 
443 	s = __dm_stats_find(stats, id);
444 	if (!s) {
445 		mutex_unlock(&stats->mutex);
446 		return -ENOENT;
447 	}
448 
449 	list_del_rcu(&s->list_entry);
450 
451 	dm_stats_recalc_precise_timestamps(stats);
452 
453 	mutex_unlock(&stats->mutex);
454 
455 	/*
456 	 * vfree can't be called from RCU callback
457 	 */
458 	for_each_possible_cpu(cpu)
459 		if (is_vmalloc_addr(s->stat_percpu) ||
460 		    is_vmalloc_addr(s->stat_percpu[cpu][0].histogram))
461 			goto do_sync_free;
462 	if (is_vmalloc_addr(s) ||
463 	    is_vmalloc_addr(s->stat_shared[0].tmp.histogram)) {
464 do_sync_free:
465 		synchronize_rcu_expedited();
466 		dm_stat_free(&s->rcu_head);
467 	} else {
468 		WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
469 		call_rcu(&s->rcu_head, dm_stat_free);
470 	}
471 	return 0;
472 }
473 
474 static int dm_stats_list(struct dm_stats *stats, const char *program,
475 			 char *result, unsigned int maxlen)
476 {
477 	struct dm_stat *s;
478 	sector_t len;
479 	unsigned int sz = 0;
480 
481 	/*
482 	 * Output format:
483 	 *   <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
484 	 */
485 
486 	mutex_lock(&stats->mutex);
487 	list_for_each_entry(s, &stats->list, list_entry) {
488 		if (!program || !strcmp(program, s->program_id)) {
489 			len = s->end - s->start;
490 			DMEMIT("%d: %llu+%llu %llu %s %s", s->id,
491 				(unsigned long long)s->start,
492 				(unsigned long long)len,
493 				(unsigned long long)s->step,
494 				s->program_id,
495 				s->aux_data);
496 			if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
497 				DMEMIT(" precise_timestamps");
498 			if (s->n_histogram_entries) {
499 				unsigned int i;
500 
501 				DMEMIT(" histogram:");
502 				for (i = 0; i < s->n_histogram_entries; i++) {
503 					if (i)
504 						DMEMIT(",");
505 					DMEMIT("%llu", s->histogram_boundaries[i]);
506 				}
507 			}
508 			DMEMIT("\n");
509 		}
510 		cond_resched();
511 	}
512 	mutex_unlock(&stats->mutex);
513 
514 	return 1;
515 }
516 
517 static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
518 			  struct dm_stat_percpu *p)
519 {
520 	/*
521 	 * This is racy, but so is part_round_stats_single.
522 	 */
523 	unsigned long long now, difference;
524 	unsigned int in_flight_read, in_flight_write;
525 
526 	if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
527 		now = jiffies;
528 	else
529 		now = ktime_to_ns(ktime_get());
530 
531 	difference = now - shared->stamp;
532 	if (!difference)
533 		return;
534 
535 	in_flight_read = (unsigned int)atomic_read(&shared->in_flight[READ]);
536 	in_flight_write = (unsigned int)atomic_read(&shared->in_flight[WRITE]);
537 	if (in_flight_read)
538 		p->io_ticks[READ] += difference;
539 	if (in_flight_write)
540 		p->io_ticks[WRITE] += difference;
541 	if (in_flight_read + in_flight_write) {
542 		p->io_ticks_total += difference;
543 		p->time_in_queue += (in_flight_read + in_flight_write) * difference;
544 	}
545 	shared->stamp = now;
546 }
547 
548 static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
549 			      int idx, sector_t len,
550 			      struct dm_stats_aux *stats_aux, bool end,
551 			      unsigned long duration_jiffies)
552 {
553 	struct dm_stat_shared *shared = &s->stat_shared[entry];
554 	struct dm_stat_percpu *p;
555 
556 	/*
557 	 * For strict correctness we should use local_irq_save/restore
558 	 * instead of preempt_disable/enable.
559 	 *
560 	 * preempt_disable/enable is racy if the driver finishes bios
561 	 * from non-interrupt context as well as from interrupt context
562 	 * or from more different interrupts.
563 	 *
564 	 * On 64-bit architectures the race only results in not counting some
565 	 * events, so it is acceptable.  On 32-bit architectures the race could
566 	 * cause the counter going off by 2^32, so we need to do proper locking
567 	 * there.
568 	 *
569 	 * part_stat_lock()/part_stat_unlock() have this race too.
570 	 */
571 #if BITS_PER_LONG == 32
572 	unsigned long flags;
573 
574 	local_irq_save(flags);
575 #else
576 	preempt_disable();
577 #endif
578 	p = &s->stat_percpu[smp_processor_id()][entry];
579 
580 	if (!end) {
581 		dm_stat_round(s, shared, p);
582 		atomic_inc(&shared->in_flight[idx]);
583 	} else {
584 		unsigned long long duration;
585 
586 		dm_stat_round(s, shared, p);
587 		atomic_dec(&shared->in_flight[idx]);
588 		p->sectors[idx] += len;
589 		p->ios[idx] += 1;
590 		p->merges[idx] += stats_aux->merged;
591 		if (!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)) {
592 			p->ticks[idx] += duration_jiffies;
593 			duration = jiffies_to_msecs(duration_jiffies);
594 		} else {
595 			p->ticks[idx] += stats_aux->duration_ns;
596 			duration = stats_aux->duration_ns;
597 		}
598 		if (s->n_histogram_entries) {
599 			unsigned int lo = 0, hi = s->n_histogram_entries + 1;
600 
601 			while (lo + 1 < hi) {
602 				unsigned int mid = (lo + hi) / 2;
603 
604 				if (s->histogram_boundaries[mid - 1] > duration)
605 					hi = mid;
606 				else
607 					lo = mid;
608 			}
609 			p->histogram[lo]++;
610 		}
611 	}
612 
613 #if BITS_PER_LONG == 32
614 	local_irq_restore(flags);
615 #else
616 	preempt_enable();
617 #endif
618 }
619 
620 static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
621 			  sector_t bi_sector, sector_t end_sector,
622 			  bool end, unsigned long duration_jiffies,
623 			  struct dm_stats_aux *stats_aux)
624 {
625 	sector_t rel_sector, offset, todo, fragment_len;
626 	size_t entry;
627 
628 	if (end_sector <= s->start || bi_sector >= s->end)
629 		return;
630 	if (unlikely(bi_sector < s->start)) {
631 		rel_sector = 0;
632 		todo = end_sector - s->start;
633 	} else {
634 		rel_sector = bi_sector - s->start;
635 		todo = end_sector - bi_sector;
636 	}
637 	if (unlikely(end_sector > s->end))
638 		todo -= (end_sector - s->end);
639 
640 	offset = dm_sector_div64(rel_sector, s->step);
641 	entry = rel_sector;
642 	do {
643 		if (WARN_ON_ONCE(entry >= s->n_entries)) {
644 			DMCRIT("Invalid area access in region id %d", s->id);
645 			return;
646 		}
647 		fragment_len = todo;
648 		if (fragment_len > s->step - offset)
649 			fragment_len = s->step - offset;
650 		dm_stat_for_entry(s, entry, bi_rw, fragment_len,
651 				  stats_aux, end, duration_jiffies);
652 		todo -= fragment_len;
653 		entry++;
654 		offset = 0;
655 	} while (unlikely(todo != 0));
656 }
657 
658 void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
659 			 sector_t bi_sector, unsigned int bi_sectors, bool end,
660 			 unsigned long start_time,
661 			 struct dm_stats_aux *stats_aux)
662 {
663 	struct dm_stat *s;
664 	sector_t end_sector;
665 	struct dm_stats_last_position *last;
666 	bool got_precise_time;
667 	unsigned long duration_jiffies = 0;
668 
669 	if (unlikely(!bi_sectors))
670 		return;
671 
672 	end_sector = bi_sector + bi_sectors;
673 
674 	if (!end) {
675 		/*
676 		 * A race condition can at worst result in the merged flag being
677 		 * misrepresented, so we don't have to disable preemption here.
678 		 */
679 		last = raw_cpu_ptr(stats->last);
680 		stats_aux->merged =
681 			(bi_sector == (READ_ONCE(last->last_sector) &&
682 				       ((bi_rw == WRITE) ==
683 					(READ_ONCE(last->last_rw) == WRITE))
684 				       ));
685 		WRITE_ONCE(last->last_sector, end_sector);
686 		WRITE_ONCE(last->last_rw, bi_rw);
687 	} else
688 		duration_jiffies = jiffies - start_time;
689 
690 	rcu_read_lock();
691 
692 	got_precise_time = false;
693 	list_for_each_entry_rcu(s, &stats->list, list_entry) {
694 		if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
695 			/* start (!end) duration_ns is set by DM core's alloc_io() */
696 			if (end)
697 				stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
698 			got_precise_time = true;
699 		}
700 		__dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux);
701 	}
702 
703 	rcu_read_unlock();
704 }
705 
706 static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
707 						   struct dm_stat *s, size_t x)
708 {
709 	int cpu;
710 	struct dm_stat_percpu *p;
711 
712 	local_irq_disable();
713 	p = &s->stat_percpu[smp_processor_id()][x];
714 	dm_stat_round(s, shared, p);
715 	local_irq_enable();
716 
717 	shared->tmp.sectors[READ] = 0;
718 	shared->tmp.sectors[WRITE] = 0;
719 	shared->tmp.ios[READ] = 0;
720 	shared->tmp.ios[WRITE] = 0;
721 	shared->tmp.merges[READ] = 0;
722 	shared->tmp.merges[WRITE] = 0;
723 	shared->tmp.ticks[READ] = 0;
724 	shared->tmp.ticks[WRITE] = 0;
725 	shared->tmp.io_ticks[READ] = 0;
726 	shared->tmp.io_ticks[WRITE] = 0;
727 	shared->tmp.io_ticks_total = 0;
728 	shared->tmp.time_in_queue = 0;
729 
730 	if (s->n_histogram_entries)
731 		memset(shared->tmp.histogram, 0, (s->n_histogram_entries + 1) * sizeof(unsigned long long));
732 
733 	for_each_possible_cpu(cpu) {
734 		p = &s->stat_percpu[cpu][x];
735 		shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
736 		shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
737 		shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
738 		shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
739 		shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
740 		shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
741 		shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
742 		shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
743 		shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
744 		shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
745 		shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
746 		shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
747 		if (s->n_histogram_entries) {
748 			unsigned int i;
749 
750 			for (i = 0; i < s->n_histogram_entries + 1; i++)
751 				shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
752 		}
753 	}
754 }
755 
756 static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
757 			    bool init_tmp_percpu_totals)
758 {
759 	size_t x;
760 	struct dm_stat_shared *shared;
761 	struct dm_stat_percpu *p;
762 
763 	for (x = idx_start; x < idx_end; x++) {
764 		shared = &s->stat_shared[x];
765 		if (init_tmp_percpu_totals)
766 			__dm_stat_init_temporary_percpu_totals(shared, s, x);
767 		local_irq_disable();
768 		p = &s->stat_percpu[smp_processor_id()][x];
769 		p->sectors[READ] -= shared->tmp.sectors[READ];
770 		p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
771 		p->ios[READ] -= shared->tmp.ios[READ];
772 		p->ios[WRITE] -= shared->tmp.ios[WRITE];
773 		p->merges[READ] -= shared->tmp.merges[READ];
774 		p->merges[WRITE] -= shared->tmp.merges[WRITE];
775 		p->ticks[READ] -= shared->tmp.ticks[READ];
776 		p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
777 		p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
778 		p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
779 		p->io_ticks_total -= shared->tmp.io_ticks_total;
780 		p->time_in_queue -= shared->tmp.time_in_queue;
781 		local_irq_enable();
782 		if (s->n_histogram_entries) {
783 			unsigned int i;
784 
785 			for (i = 0; i < s->n_histogram_entries + 1; i++) {
786 				local_irq_disable();
787 				p = &s->stat_percpu[smp_processor_id()][x];
788 				p->histogram[i] -= shared->tmp.histogram[i];
789 				local_irq_enable();
790 			}
791 		}
792 		cond_resched();
793 	}
794 }
795 
796 static int dm_stats_clear(struct dm_stats *stats, int id)
797 {
798 	struct dm_stat *s;
799 
800 	mutex_lock(&stats->mutex);
801 
802 	s = __dm_stats_find(stats, id);
803 	if (!s) {
804 		mutex_unlock(&stats->mutex);
805 		return -ENOENT;
806 	}
807 
808 	__dm_stat_clear(s, 0, s->n_entries, true);
809 
810 	mutex_unlock(&stats->mutex);
811 
812 	return 1;
813 }
814 
815 /*
816  * This is like jiffies_to_msec, but works for 64-bit values.
817  */
818 static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
819 {
820 	unsigned long long result;
821 	unsigned int mult;
822 
823 	if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
824 		return j;
825 
826 	result = 0;
827 	if (j)
828 		result = jiffies_to_msecs(j & 0x3fffff);
829 	if (j >= 1 << 22) {
830 		mult = jiffies_to_msecs(1 << 22);
831 		result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
832 	}
833 	if (j >= 1ULL << 44)
834 		result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
835 
836 	return result;
837 }
838 
839 static int dm_stats_print(struct dm_stats *stats, int id,
840 			  size_t idx_start, size_t idx_len,
841 			  bool clear, char *result, unsigned int maxlen)
842 {
843 	unsigned int sz = 0;
844 	struct dm_stat *s;
845 	size_t x;
846 	sector_t start, end, step;
847 	size_t idx_end;
848 	struct dm_stat_shared *shared;
849 
850 	/*
851 	 * Output format:
852 	 *   <start_sector>+<length> counters
853 	 */
854 
855 	mutex_lock(&stats->mutex);
856 
857 	s = __dm_stats_find(stats, id);
858 	if (!s) {
859 		mutex_unlock(&stats->mutex);
860 		return -ENOENT;
861 	}
862 
863 	idx_end = idx_start + idx_len;
864 	if (idx_end < idx_start ||
865 	    idx_end > s->n_entries)
866 		idx_end = s->n_entries;
867 
868 	if (idx_start > idx_end)
869 		idx_start = idx_end;
870 
871 	step = s->step;
872 	start = s->start + (step * idx_start);
873 
874 	for (x = idx_start; x < idx_end; x++, start = end) {
875 		shared = &s->stat_shared[x];
876 		end = start + step;
877 		if (unlikely(end > s->end))
878 			end = s->end;
879 
880 		__dm_stat_init_temporary_percpu_totals(shared, s, x);
881 
882 		DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu",
883 		       (unsigned long long)start,
884 		       (unsigned long long)step,
885 		       shared->tmp.ios[READ],
886 		       shared->tmp.merges[READ],
887 		       shared->tmp.sectors[READ],
888 		       dm_jiffies_to_msec64(s, shared->tmp.ticks[READ]),
889 		       shared->tmp.ios[WRITE],
890 		       shared->tmp.merges[WRITE],
891 		       shared->tmp.sectors[WRITE],
892 		       dm_jiffies_to_msec64(s, shared->tmp.ticks[WRITE]),
893 		       dm_stat_in_flight(shared),
894 		       dm_jiffies_to_msec64(s, shared->tmp.io_ticks_total),
895 		       dm_jiffies_to_msec64(s, shared->tmp.time_in_queue),
896 		       dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
897 		       dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
898 		if (s->n_histogram_entries) {
899 			unsigned int i;
900 
901 			for (i = 0; i < s->n_histogram_entries + 1; i++)
902 				DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
903 		}
904 		DMEMIT("\n");
905 
906 		if (unlikely(sz + 1 >= maxlen))
907 			goto buffer_overflow;
908 
909 		cond_resched();
910 	}
911 
912 	if (clear)
913 		__dm_stat_clear(s, idx_start, idx_end, false);
914 
915 buffer_overflow:
916 	mutex_unlock(&stats->mutex);
917 
918 	return 1;
919 }
920 
921 static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
922 {
923 	struct dm_stat *s;
924 	const char *new_aux_data;
925 
926 	mutex_lock(&stats->mutex);
927 
928 	s = __dm_stats_find(stats, id);
929 	if (!s) {
930 		mutex_unlock(&stats->mutex);
931 		return -ENOENT;
932 	}
933 
934 	new_aux_data = kstrdup(aux_data, GFP_KERNEL);
935 	if (!new_aux_data) {
936 		mutex_unlock(&stats->mutex);
937 		return -ENOMEM;
938 	}
939 
940 	kfree(s->aux_data);
941 	s->aux_data = new_aux_data;
942 
943 	mutex_unlock(&stats->mutex);
944 
945 	return 0;
946 }
947 
948 static int parse_histogram(const char *h, unsigned int *n_histogram_entries,
949 			   unsigned long long **histogram_boundaries)
950 {
951 	const char *q;
952 	unsigned int n;
953 	unsigned long long last;
954 
955 	*n_histogram_entries = 1;
956 	for (q = h; *q; q++)
957 		if (*q == ',')
958 			(*n_histogram_entries)++;
959 
960 	*histogram_boundaries = kmalloc_array(*n_histogram_entries,
961 					      sizeof(unsigned long long),
962 					      GFP_KERNEL);
963 	if (!*histogram_boundaries)
964 		return -ENOMEM;
965 
966 	n = 0;
967 	last = 0;
968 	while (1) {
969 		unsigned long long hi;
970 		int s;
971 		char ch;
972 
973 		s = sscanf(h, "%llu%c", &hi, &ch);
974 		if (!s || (s == 2 && ch != ','))
975 			return -EINVAL;
976 		if (hi <= last)
977 			return -EINVAL;
978 		last = hi;
979 		(*histogram_boundaries)[n] = hi;
980 		if (s == 1)
981 			return 0;
982 		h = strchr(h, ',') + 1;
983 		n++;
984 	}
985 }
986 
987 static int message_stats_create(struct mapped_device *md,
988 				unsigned int argc, char **argv,
989 				char *result, unsigned int maxlen)
990 {
991 	int r;
992 	int id;
993 	char dummy;
994 	unsigned long long start, end, len, step;
995 	unsigned int divisor;
996 	const char *program_id, *aux_data;
997 	unsigned int stat_flags = 0;
998 	unsigned int n_histogram_entries = 0;
999 	unsigned long long *histogram_boundaries = NULL;
1000 	struct dm_arg_set as, as_backup;
1001 	const char *a;
1002 	unsigned int feature_args;
1003 
1004 	/*
1005 	 * Input format:
1006 	 *   <range> <step> [<extra_parameters> <parameters>] [<program_id> [<aux_data>]]
1007 	 */
1008 
1009 	if (argc < 3)
1010 		goto ret_einval;
1011 
1012 	as.argc = argc;
1013 	as.argv = argv;
1014 	dm_consume_args(&as, 1);
1015 
1016 	a = dm_shift_arg(&as);
1017 	if (!strcmp(a, "-")) {
1018 		start = 0;
1019 		len = dm_get_size(md);
1020 		if (!len)
1021 			len = 1;
1022 	} else if (sscanf(a, "%llu+%llu%c", &start, &len, &dummy) != 2 ||
1023 		   start != (sector_t)start || len != (sector_t)len)
1024 		goto ret_einval;
1025 
1026 	end = start + len;
1027 	if (start >= end)
1028 		goto ret_einval;
1029 
1030 	a = dm_shift_arg(&as);
1031 	if (sscanf(a, "/%u%c", &divisor, &dummy) == 1) {
1032 		if (!divisor)
1033 			return -EINVAL;
1034 		step = end - start;
1035 		if (do_div(step, divisor))
1036 			step++;
1037 		if (!step)
1038 			step = 1;
1039 	} else if (sscanf(a, "%llu%c", &step, &dummy) != 1 ||
1040 		   step != (sector_t)step || !step)
1041 		goto ret_einval;
1042 
1043 	as_backup = as;
1044 	a = dm_shift_arg(&as);
1045 	if (a && sscanf(a, "%u%c", &feature_args, &dummy) == 1) {
1046 		while (feature_args--) {
1047 			a = dm_shift_arg(&as);
1048 			if (!a)
1049 				goto ret_einval;
1050 			if (!strcasecmp(a, "precise_timestamps"))
1051 				stat_flags |= STAT_PRECISE_TIMESTAMPS;
1052 			else if (!strncasecmp(a, "histogram:", 10)) {
1053 				if (n_histogram_entries)
1054 					goto ret_einval;
1055 				r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries);
1056 				if (r)
1057 					goto ret;
1058 			} else
1059 				goto ret_einval;
1060 		}
1061 	} else {
1062 		as = as_backup;
1063 	}
1064 
1065 	program_id = "-";
1066 	aux_data = "-";
1067 
1068 	a = dm_shift_arg(&as);
1069 	if (a)
1070 		program_id = a;
1071 
1072 	a = dm_shift_arg(&as);
1073 	if (a)
1074 		aux_data = a;
1075 
1076 	if (as.argc)
1077 		goto ret_einval;
1078 
1079 	/*
1080 	 * If a buffer overflow happens after we created the region,
1081 	 * it's too late (the userspace would retry with a larger
1082 	 * buffer, but the region id that caused the overflow is already
1083 	 * leaked).  So we must detect buffer overflow in advance.
1084 	 */
1085 	snprintf(result, maxlen, "%d", INT_MAX);
1086 	if (dm_message_test_buffer_overflow(result, maxlen)) {
1087 		r = 1;
1088 		goto ret;
1089 	}
1090 
1091 	id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags,
1092 			     n_histogram_entries, histogram_boundaries, program_id, aux_data,
1093 			     dm_internal_suspend_fast, dm_internal_resume_fast, md);
1094 	if (id < 0) {
1095 		r = id;
1096 		goto ret;
1097 	}
1098 
1099 	snprintf(result, maxlen, "%d", id);
1100 
1101 	r = 1;
1102 	goto ret;
1103 
1104 ret_einval:
1105 	r = -EINVAL;
1106 ret:
1107 	kfree(histogram_boundaries);
1108 	return r;
1109 }
1110 
1111 static int message_stats_delete(struct mapped_device *md,
1112 				unsigned int argc, char **argv)
1113 {
1114 	int id;
1115 	char dummy;
1116 
1117 	if (argc != 2)
1118 		return -EINVAL;
1119 
1120 	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1121 		return -EINVAL;
1122 
1123 	return dm_stats_delete(dm_get_stats(md), id);
1124 }
1125 
1126 static int message_stats_clear(struct mapped_device *md,
1127 			       unsigned int argc, char **argv)
1128 {
1129 	int id;
1130 	char dummy;
1131 
1132 	if (argc != 2)
1133 		return -EINVAL;
1134 
1135 	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1136 		return -EINVAL;
1137 
1138 	return dm_stats_clear(dm_get_stats(md), id);
1139 }
1140 
1141 static int message_stats_list(struct mapped_device *md,
1142 			      unsigned int argc, char **argv,
1143 			      char *result, unsigned int maxlen)
1144 {
1145 	int r;
1146 	const char *program = NULL;
1147 
1148 	if (argc < 1 || argc > 2)
1149 		return -EINVAL;
1150 
1151 	if (argc > 1) {
1152 		program = kstrdup(argv[1], GFP_KERNEL);
1153 		if (!program)
1154 			return -ENOMEM;
1155 	}
1156 
1157 	r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
1158 
1159 	kfree(program);
1160 
1161 	return r;
1162 }
1163 
1164 static int message_stats_print(struct mapped_device *md,
1165 			       unsigned int argc, char **argv, bool clear,
1166 			       char *result, unsigned int maxlen)
1167 {
1168 	int id;
1169 	char dummy;
1170 	unsigned long idx_start = 0, idx_len = ULONG_MAX;
1171 
1172 	if (argc != 2 && argc != 4)
1173 		return -EINVAL;
1174 
1175 	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1176 		return -EINVAL;
1177 
1178 	if (argc > 3) {
1179 		if (strcmp(argv[2], "-") &&
1180 		    sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
1181 			return -EINVAL;
1182 		if (strcmp(argv[3], "-") &&
1183 		    sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
1184 			return -EINVAL;
1185 	}
1186 
1187 	return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
1188 			      result, maxlen);
1189 }
1190 
1191 static int message_stats_set_aux(struct mapped_device *md,
1192 				 unsigned int argc, char **argv)
1193 {
1194 	int id;
1195 	char dummy;
1196 
1197 	if (argc != 3)
1198 		return -EINVAL;
1199 
1200 	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1201 		return -EINVAL;
1202 
1203 	return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
1204 }
1205 
1206 int dm_stats_message(struct mapped_device *md, unsigned int argc, char **argv,
1207 		     char *result, unsigned int maxlen)
1208 {
1209 	int r;
1210 
1211 	/* All messages here must start with '@' */
1212 	if (!strcasecmp(argv[0], "@stats_create"))
1213 		r = message_stats_create(md, argc, argv, result, maxlen);
1214 	else if (!strcasecmp(argv[0], "@stats_delete"))
1215 		r = message_stats_delete(md, argc, argv);
1216 	else if (!strcasecmp(argv[0], "@stats_clear"))
1217 		r = message_stats_clear(md, argc, argv);
1218 	else if (!strcasecmp(argv[0], "@stats_list"))
1219 		r = message_stats_list(md, argc, argv, result, maxlen);
1220 	else if (!strcasecmp(argv[0], "@stats_print"))
1221 		r = message_stats_print(md, argc, argv, false, result, maxlen);
1222 	else if (!strcasecmp(argv[0], "@stats_print_clear"))
1223 		r = message_stats_print(md, argc, argv, true, result, maxlen);
1224 	else if (!strcasecmp(argv[0], "@stats_set_aux"))
1225 		r = message_stats_set_aux(md, argc, argv);
1226 	else
1227 		return 2; /* this wasn't a stats message */
1228 
1229 	if (r == -EINVAL)
1230 		DMCRIT("Invalid parameters for message %s", argv[0]);
1231 
1232 	return r;
1233 }
1234 
1235 int __init dm_statistics_init(void)
1236 {
1237 	shared_memory_amount = 0;
1238 	dm_stat_need_rcu_barrier = 0;
1239 	return 0;
1240 }
1241 
1242 void dm_statistics_exit(void)
1243 {
1244 	if (dm_stat_need_rcu_barrier)
1245 		rcu_barrier();
1246 	if (WARN_ON(shared_memory_amount))
1247 		DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
1248 }
1249 
1250 module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, 0444);
1251 MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");
1252