xref: /linux/kernel/locking/qspinlock_stat.h (revision 75bf465f0bc33e9b776a46d6a1b9b990f5fb7c37)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  *
4  * Authors: Waiman Long <longman@redhat.com>
5  */
6 
7 #include "lock_events.h"
8 
9 #ifdef CONFIG_LOCK_EVENT_COUNTS
10 #ifdef CONFIG_PARAVIRT_SPINLOCKS
11 /*
12  * Collect pvqspinlock locking event counts
13  */
14 #include <linux/sched.h>
15 #include <linux/sched/clock.h>
16 #include <linux/fs.h>
17 
18 #define EVENT_COUNT(ev)	lockevents[LOCKEVENT_ ## ev]
19 
20 /*
21  * PV specific per-cpu counter
22  */
23 static DEFINE_PER_CPU(u64, pv_kick_time);
24 
25 /*
26  * Function to read and return the PV qspinlock counts.
27  *
28  * The following counters are handled specially:
29  * 1. pv_latency_kick
30  *    Average kick latency (ns) = pv_latency_kick/pv_kick_unlock
31  * 2. pv_latency_wake
32  *    Average wake latency (ns) = pv_latency_wake/pv_kick_wake
33  * 3. pv_hash_hops
34  *    Average hops/hash = pv_hash_hops/pv_kick_unlock
35  */
lockevent_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)36 ssize_t lockevent_read(struct file *file, char __user *user_buf,
37 		       size_t count, loff_t *ppos)
38 {
39 	char buf[64];
40 	int cpu, id, len;
41 	u64 sum = 0, kicks = 0;
42 
43 	/*
44 	 * Get the counter ID stored in file->f_inode->i_private
45 	 */
46 	id = (long)file_inode(file)->i_private;
47 
48 	if (id >= lockevent_num)
49 		return -EBADF;
50 
51 	for_each_possible_cpu(cpu) {
52 		sum += per_cpu(lockevents[id], cpu);
53 		/*
54 		 * Need to sum additional counters for some of them
55 		 */
56 		switch (id) {
57 
58 		case LOCKEVENT_pv_latency_kick:
59 		case LOCKEVENT_pv_hash_hops:
60 			kicks += per_cpu(EVENT_COUNT(pv_kick_unlock), cpu);
61 			break;
62 
63 		case LOCKEVENT_pv_latency_wake:
64 			kicks += per_cpu(EVENT_COUNT(pv_kick_wake), cpu);
65 			break;
66 		}
67 	}
68 
69 	if (id == LOCKEVENT_pv_hash_hops) {
70 		u64 frac = 0;
71 
72 		if (kicks) {
73 			frac = 100ULL * do_div(sum, kicks);
74 			frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
75 		}
76 
77 		/*
78 		 * Return a X.XX decimal number
79 		 */
80 		len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n",
81 			       sum, frac);
82 	} else {
83 		/*
84 		 * Round to the nearest ns
85 		 */
86 		if ((id == LOCKEVENT_pv_latency_kick) ||
87 		    (id == LOCKEVENT_pv_latency_wake)) {
88 			if (kicks)
89 				sum = DIV_ROUND_CLOSEST_ULL(sum, kicks);
90 		}
91 		len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum);
92 	}
93 
94 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
95 }
96 
97 /*
98  * PV hash hop count
99  */
lockevent_pv_hop(int hopcnt)100 static inline void lockevent_pv_hop(int hopcnt)
101 {
102 	this_cpu_add(EVENT_COUNT(pv_hash_hops), hopcnt);
103 }
104 
105 /*
106  * Replacement function for pv_kick()
107  */
__pv_kick(int cpu)108 static inline void __pv_kick(int cpu)
109 {
110 	u64 start = sched_clock();
111 
112 	per_cpu(pv_kick_time, cpu) = start;
113 	pv_kick(cpu);
114 	this_cpu_add(EVENT_COUNT(pv_latency_kick), sched_clock() - start);
115 }
116 
117 /*
118  * Replacement function for pv_wait()
119  */
__pv_wait(u8 * ptr,u8 val)120 static inline void __pv_wait(u8 *ptr, u8 val)
121 {
122 	u64 *pkick_time = this_cpu_ptr(&pv_kick_time);
123 
124 	*pkick_time = 0;
125 	pv_wait(ptr, val);
126 	if (*pkick_time) {
127 		this_cpu_add(EVENT_COUNT(pv_latency_wake),
128 			     sched_clock() - *pkick_time);
129 		lockevent_inc(pv_kick_wake);
130 	}
131 }
132 
133 #define pv_kick(c)	__pv_kick(c)
134 #define pv_wait(p, v)	__pv_wait(p, v)
135 
136 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
137 
138 #else /* CONFIG_LOCK_EVENT_COUNTS */
139 
lockevent_pv_hop(int hopcnt)140 static inline void lockevent_pv_hop(int hopcnt)	{ }
141 
142 #endif /* CONFIG_LOCK_EVENT_COUNTS */
143