xref: /linux/kernel/locking/lock_events.c (revision e53524cdcc02d089e757b668da031ba06ff665c3)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License as published by
5  * the Free Software Foundation; either version 2 of the License, or
6  * (at your option) any later version.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * Authors: Waiman Long <waiman.long@hpe.com>
14  */
15 
16 /*
17  * Collect locking event counts
18  */
19 #include <linux/debugfs.h>
20 #include <linux/sched.h>
21 #include <linux/sched/clock.h>
22 #include <linux/fs.h>
23 
24 #include "lock_events.h"
25 
26 #undef  LOCK_EVENT
27 #define LOCK_EVENT(name)	[LOCKEVENT_ ## name] = #name,
28 
29 #define LOCK_EVENTS_DIR		"lock_event_counts"
30 
31 /*
32  * When CONFIG_LOCK_EVENT_COUNTS is enabled, event counts of different
33  * types of locks will be reported under the <debugfs>/lock_event_counts/
34  * directory. See lock_events_list.h for the list of available locking
35  * events.
36  *
37  * Writing to the special ".reset_counts" file will reset all the above
38  * locking event counts. This is a very slow operation and so should not
39  * be done frequently.
40  *
41  * These event counts are implemented as per-cpu variables which are
42  * summed and computed whenever the corresponding debugfs files are read. This
43  * minimizes added overhead making the counts usable even in a production
44  * environment.
45  */
46 static const char * const lockevent_names[lockevent_num + 1] = {
47 
48 #include "lock_events_list.h"
49 
50 	[LOCKEVENT_reset_cnts] = ".reset_counts",
51 };
52 
53 /*
54  * Per-cpu counts
55  */
56 DEFINE_PER_CPU(unsigned long, lockevents[lockevent_num]);
57 
58 /*
59  * The lockevent_read() function can be overridden.
60  */
61 ssize_t __weak lockevent_read(struct file *file, char __user *user_buf,
62 			      size_t count, loff_t *ppos)
63 {
64 	char buf[64];
65 	int cpu, id, len;
66 	u64 sum = 0;
67 
68 	/*
69 	 * Get the counter ID stored in file->f_inode->i_private
70 	 */
71 	id = (long)file_inode(file)->i_private;
72 
73 	if (id >= lockevent_num)
74 		return -EBADF;
75 
76 	for_each_possible_cpu(cpu)
77 		sum += per_cpu(lockevents[id], cpu);
78 	len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum);
79 
80 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
81 }
82 
83 /*
84  * Function to handle write request
85  *
86  * When idx = reset_cnts, reset all the counts.
87  */
88 static ssize_t lockevent_write(struct file *file, const char __user *user_buf,
89 			   size_t count, loff_t *ppos)
90 {
91 	int cpu;
92 
93 	/*
94 	 * Get the counter ID stored in file->f_inode->i_private
95 	 */
96 	if ((long)file_inode(file)->i_private != LOCKEVENT_reset_cnts)
97 		return count;
98 
99 	for_each_possible_cpu(cpu) {
100 		int i;
101 		unsigned long *ptr = per_cpu_ptr(lockevents, cpu);
102 
103 		for (i = 0 ; i < lockevent_num; i++)
104 			WRITE_ONCE(ptr[i], 0);
105 	}
106 	return count;
107 }
108 
109 /*
110  * Debugfs data structures
111  */
112 static const struct file_operations fops_lockevent = {
113 	.read = lockevent_read,
114 	.write = lockevent_write,
115 	.llseek = default_llseek,
116 };
117 
118 #ifdef CONFIG_PARAVIRT_SPINLOCKS
119 #include <asm/paravirt.h>
120 
121 static bool __init skip_lockevent(const char *name)
122 {
123 	static int pv_on __initdata = -1;
124 
125 	if (pv_on < 0)
126 		pv_on = !pv_is_native_spin_unlock();
127 	/*
128 	 * Skip PV qspinlock events on bare metal.
129 	 */
130 	if (!pv_on && !memcmp(name, "pv_", 3))
131 		return true;
132 	return false;
133 }
134 #else
135 static inline bool skip_lockevent(const char *name)
136 {
137 	return false;
138 }
139 #endif
140 
141 /*
142  * Initialize debugfs for the locking event counts.
143  */
144 static int __init init_lockevent_counts(void)
145 {
146 	struct dentry *d_counts = debugfs_create_dir(LOCK_EVENTS_DIR, NULL);
147 	int i;
148 
149 	if (!d_counts)
150 		goto out;
151 
152 	/*
153 	 * Create the debugfs files
154 	 *
155 	 * As reading from and writing to the stat files can be slow, only
156 	 * root is allowed to do the read/write to limit impact to system
157 	 * performance.
158 	 */
159 	for (i = 0; i < lockevent_num; i++) {
160 		if (skip_lockevent(lockevent_names[i]))
161 			continue;
162 		if (!debugfs_create_file(lockevent_names[i], 0400, d_counts,
163 					 (void *)(long)i, &fops_lockevent))
164 			goto fail_undo;
165 	}
166 
167 	if (!debugfs_create_file(lockevent_names[LOCKEVENT_reset_cnts], 0200,
168 				 d_counts, (void *)(long)LOCKEVENT_reset_cnts,
169 				 &fops_lockevent))
170 		goto fail_undo;
171 
172 	return 0;
173 fail_undo:
174 	debugfs_remove_recursive(d_counts);
175 out:
176 	pr_warn("Could not create '%s' debugfs entries\n", LOCK_EVENTS_DIR);
177 	return -ENOMEM;
178 }
179 fs_initcall(init_lockevent_counts);
180