1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2022, Linaro Limited, All rights reserved.
4 * Author: Mike Leach <mike.leach@linaro.org>
5 */
6 #include <linux/coresight.h>
7 #include <linux/coresight-pmu.h>
8 #include <linux/cpumask.h>
9 #include <linux/kernel.h>
10 #include <linux/spinlock.h>
11 #include <linux/types.h>
12
13 #include "coresight-trace-id.h"
14
15 /* Default trace ID map. Used in sysfs mode and for system sources */
16 static DEFINE_PER_CPU(atomic_t, id_map_default_cpu_ids) = ATOMIC_INIT(0);
17 static struct coresight_trace_id_map id_map_default = {
18 .cpu_map = &id_map_default_cpu_ids,
19 .lock = __SPIN_LOCK_UNLOCKED(id_map_default.lock)
20 };
21
22 /* #define TRACE_ID_DEBUG 1 */
23 #if defined(TRACE_ID_DEBUG) || defined(CONFIG_COMPILE_TEST)
24
coresight_trace_id_dump_table(struct coresight_trace_id_map * id_map,const char * func_name)25 static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
26 const char *func_name)
27 {
28 pr_debug("%s id_map::\n", func_name);
29 pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids);
30 }
31 #define DUMP_ID_MAP(map) coresight_trace_id_dump_table(map, __func__)
32 #define DUMP_ID_CPU(cpu, id) pr_debug("%s called; cpu=%d, id=%d\n", __func__, cpu, id)
33 #define DUMP_ID(id) pr_debug("%s called; id=%d\n", __func__, id)
34 #define PERF_SESSION(n) pr_debug("%s perf count %d\n", __func__, n)
35 #else
36 #define DUMP_ID_MAP(map)
37 #define DUMP_ID(id)
38 #define DUMP_ID_CPU(cpu, id)
39 #define PERF_SESSION(n)
40 #endif
41
42 /* unlocked read of current trace ID value for given CPU */
_coresight_trace_id_read_cpu_id(int cpu,struct coresight_trace_id_map * id_map)43 static int _coresight_trace_id_read_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
44 {
45 return atomic_read(per_cpu_ptr(id_map->cpu_map, cpu));
46 }
47
48 /* look for next available odd ID, return 0 if none found */
coresight_trace_id_find_odd_id(struct coresight_trace_id_map * id_map)49 static int coresight_trace_id_find_odd_id(struct coresight_trace_id_map *id_map)
50 {
51 int found_id = 0, bit = 1, next_id;
52
53 while ((bit < CORESIGHT_TRACE_ID_RES_TOP) && !found_id) {
54 /*
55 * bitmap length of CORESIGHT_TRACE_ID_RES_TOP,
56 * search from offset `bit`.
57 */
58 next_id = find_next_zero_bit(id_map->used_ids,
59 CORESIGHT_TRACE_ID_RES_TOP, bit);
60 if ((next_id < CORESIGHT_TRACE_ID_RES_TOP) && (next_id & 0x1))
61 found_id = next_id;
62 else
63 bit = next_id + 1;
64 }
65 return found_id;
66 }
67
68 /*
69 * Allocate new ID and set in use
70 *
71 * if @preferred_id is a valid id then try to use that value if available.
72 * if @preferred_id is not valid and @prefer_odd_id is true, try for odd id.
73 *
74 * Otherwise allocate next available ID.
75 */
coresight_trace_id_alloc_new_id(struct coresight_trace_id_map * id_map,int preferred_id,bool prefer_odd_id)76 static int coresight_trace_id_alloc_new_id(struct coresight_trace_id_map *id_map,
77 int preferred_id, bool prefer_odd_id)
78 {
79 int id = 0;
80
81 /* for backwards compatibility, cpu IDs may use preferred value */
82 if (IS_VALID_CS_TRACE_ID(preferred_id) &&
83 !test_bit(preferred_id, id_map->used_ids)) {
84 id = preferred_id;
85 goto trace_id_allocated;
86 } else if (prefer_odd_id) {
87 /* may use odd ids to avoid preferred legacy cpu IDs */
88 id = coresight_trace_id_find_odd_id(id_map);
89 if (id)
90 goto trace_id_allocated;
91 }
92
93 /*
94 * skip reserved bit 0, look at bitmap length of
95 * CORESIGHT_TRACE_ID_RES_TOP from offset of bit 1.
96 */
97 id = find_next_zero_bit(id_map->used_ids, CORESIGHT_TRACE_ID_RES_TOP, 1);
98 if (id >= CORESIGHT_TRACE_ID_RES_TOP)
99 return -EINVAL;
100
101 /* mark as used */
102 trace_id_allocated:
103 set_bit(id, id_map->used_ids);
104 return id;
105 }
106
coresight_trace_id_free(int id,struct coresight_trace_id_map * id_map)107 static void coresight_trace_id_free(int id, struct coresight_trace_id_map *id_map)
108 {
109 if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id))
110 return;
111 if (WARN(!test_bit(id, id_map->used_ids), "Freeing unused ID %d\n", id))
112 return;
113 clear_bit(id, id_map->used_ids);
114 }
115
116 /*
117 * Release all IDs and clear CPU associations.
118 */
coresight_trace_id_release_all(struct coresight_trace_id_map * id_map)119 static void coresight_trace_id_release_all(struct coresight_trace_id_map *id_map)
120 {
121 unsigned long flags;
122 int cpu;
123
124 spin_lock_irqsave(&id_map->lock, flags);
125 bitmap_zero(id_map->used_ids, CORESIGHT_TRACE_IDS_MAX);
126 for_each_possible_cpu(cpu)
127 atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
128 spin_unlock_irqrestore(&id_map->lock, flags);
129 DUMP_ID_MAP(id_map);
130 }
131
_coresight_trace_id_get_cpu_id(int cpu,struct coresight_trace_id_map * id_map)132 static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
133 {
134 unsigned long flags;
135 int id;
136
137 spin_lock_irqsave(&id_map->lock, flags);
138
139 /* check for existing allocation for this CPU */
140 id = _coresight_trace_id_read_cpu_id(cpu, id_map);
141 if (id)
142 goto get_cpu_id_out_unlock;
143
144 /*
145 * Find a new ID.
146 *
147 * Use legacy values where possible in the dynamic trace ID allocator to
148 * allow older tools to continue working if they are not upgraded at the
149 * same time as the kernel drivers.
150 *
151 * If the generated legacy ID is invalid, or not available then the next
152 * available dynamic ID will be used.
153 */
154 id = coresight_trace_id_alloc_new_id(id_map,
155 CORESIGHT_LEGACY_CPU_TRACE_ID(cpu),
156 false);
157 if (!IS_VALID_CS_TRACE_ID(id))
158 goto get_cpu_id_out_unlock;
159
160 /* allocate the new id to the cpu */
161 atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id);
162
163 get_cpu_id_out_unlock:
164 spin_unlock_irqrestore(&id_map->lock, flags);
165
166 DUMP_ID_CPU(cpu, id);
167 DUMP_ID_MAP(id_map);
168 return id;
169 }
170
_coresight_trace_id_put_cpu_id(int cpu,struct coresight_trace_id_map * id_map)171 static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
172 {
173 unsigned long flags;
174 int id;
175
176 /* check for existing allocation for this CPU */
177 id = _coresight_trace_id_read_cpu_id(cpu, id_map);
178 if (!id)
179 return;
180
181 spin_lock_irqsave(&id_map->lock, flags);
182
183 coresight_trace_id_free(id, id_map);
184 atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
185
186 spin_unlock_irqrestore(&id_map->lock, flags);
187 DUMP_ID_CPU(cpu, id);
188 DUMP_ID_MAP(id_map);
189 }
190
coresight_trace_id_map_get_system_id(struct coresight_trace_id_map * id_map)191 static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *id_map)
192 {
193 unsigned long flags;
194 int id;
195
196 spin_lock_irqsave(&id_map->lock, flags);
197 /* prefer odd IDs for system components to avoid legacy CPU IDS */
198 id = coresight_trace_id_alloc_new_id(id_map, 0, true);
199 spin_unlock_irqrestore(&id_map->lock, flags);
200
201 DUMP_ID(id);
202 DUMP_ID_MAP(id_map);
203 return id;
204 }
205
coresight_trace_id_map_put_system_id(struct coresight_trace_id_map * id_map,int id)206 static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *id_map, int id)
207 {
208 unsigned long flags;
209
210 spin_lock_irqsave(&id_map->lock, flags);
211 coresight_trace_id_free(id, id_map);
212 spin_unlock_irqrestore(&id_map->lock, flags);
213
214 DUMP_ID(id);
215 DUMP_ID_MAP(id_map);
216 }
217
218 /* API functions */
219
coresight_trace_id_get_cpu_id(int cpu)220 int coresight_trace_id_get_cpu_id(int cpu)
221 {
222 return _coresight_trace_id_get_cpu_id(cpu, &id_map_default);
223 }
224 EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id);
225
coresight_trace_id_get_cpu_id_map(int cpu,struct coresight_trace_id_map * id_map)226 int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
227 {
228 return _coresight_trace_id_get_cpu_id(cpu, id_map);
229 }
230 EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id_map);
231
coresight_trace_id_put_cpu_id(int cpu)232 void coresight_trace_id_put_cpu_id(int cpu)
233 {
234 _coresight_trace_id_put_cpu_id(cpu, &id_map_default);
235 }
236 EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id);
237
coresight_trace_id_put_cpu_id_map(int cpu,struct coresight_trace_id_map * id_map)238 void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
239 {
240 _coresight_trace_id_put_cpu_id(cpu, id_map);
241 }
242 EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id_map);
243
coresight_trace_id_read_cpu_id(int cpu)244 int coresight_trace_id_read_cpu_id(int cpu)
245 {
246 return _coresight_trace_id_read_cpu_id(cpu, &id_map_default);
247 }
248 EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id);
249
coresight_trace_id_read_cpu_id_map(int cpu,struct coresight_trace_id_map * id_map)250 int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
251 {
252 return _coresight_trace_id_read_cpu_id(cpu, id_map);
253 }
254 EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id_map);
255
coresight_trace_id_get_system_id(void)256 int coresight_trace_id_get_system_id(void)
257 {
258 return coresight_trace_id_map_get_system_id(&id_map_default);
259 }
260 EXPORT_SYMBOL_GPL(coresight_trace_id_get_system_id);
261
coresight_trace_id_put_system_id(int id)262 void coresight_trace_id_put_system_id(int id)
263 {
264 coresight_trace_id_map_put_system_id(&id_map_default, id);
265 }
266 EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id);
267
coresight_trace_id_perf_start(struct coresight_trace_id_map * id_map)268 void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map)
269 {
270 atomic_inc(&id_map->perf_cs_etm_session_active);
271 PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active));
272 }
273 EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start);
274
coresight_trace_id_perf_stop(struct coresight_trace_id_map * id_map)275 void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map)
276 {
277 if (!atomic_dec_return(&id_map->perf_cs_etm_session_active))
278 coresight_trace_id_release_all(id_map);
279 PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active));
280 }
281 EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop);
282