1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2022, Linaro Limited, All rights reserved. 4 * Author: Mike Leach <mike.leach@linaro.org> 5 */ 6 #include <linux/coresight.h> 7 #include <linux/coresight-pmu.h> 8 #include <linux/cpumask.h> 9 #include <linux/kernel.h> 10 #include <linux/spinlock.h> 11 #include <linux/types.h> 12 13 #include "coresight-trace-id.h" 14 15 enum trace_id_flags { 16 TRACE_ID_ANY = 0x0, 17 TRACE_ID_PREFER_ODD = 0x1, 18 TRACE_ID_REQ_STATIC = 0x2, 19 }; 20 21 /* Default trace ID map. Used in sysfs mode and for system sources */ 22 static DEFINE_PER_CPU(atomic_t, id_map_default_cpu_ids) = ATOMIC_INIT(0); 23 static struct coresight_trace_id_map id_map_default = { 24 .cpu_map = &id_map_default_cpu_ids, 25 .lock = __SPIN_LOCK_UNLOCKED(id_map_default.lock) 26 }; 27 28 /* #define TRACE_ID_DEBUG 1 */ 29 #if defined(TRACE_ID_DEBUG) || defined(CONFIG_COMPILE_TEST) 30 31 static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map, 32 const char *func_name) 33 { 34 pr_debug("%s id_map::\n", func_name); 35 pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids); 36 } 37 #define DUMP_ID_MAP(map) coresight_trace_id_dump_table(map, __func__) 38 #define DUMP_ID_CPU(cpu, id) pr_debug("%s called; cpu=%d, id=%d\n", __func__, cpu, id) 39 #define DUMP_ID(id) pr_debug("%s called; id=%d\n", __func__, id) 40 #define PERF_SESSION(n) pr_debug("%s perf count %d\n", __func__, n) 41 #else 42 #define DUMP_ID_MAP(map) 43 #define DUMP_ID(id) 44 #define DUMP_ID_CPU(cpu, id) 45 #define PERF_SESSION(n) 46 #endif 47 48 /* unlocked read of current trace ID value for given CPU */ 49 static int _coresight_trace_id_read_cpu_id(int cpu, struct coresight_trace_id_map *id_map) 50 { 51 return atomic_read(per_cpu_ptr(id_map->cpu_map, cpu)); 52 } 53 54 /* look for next available odd ID, return 0 if none found */ 55 static int coresight_trace_id_find_odd_id(struct coresight_trace_id_map *id_map) 56 { 57 int found_id = 0, bit = 1, next_id; 58 59 while ((bit < CORESIGHT_TRACE_ID_RES_TOP) && !found_id) { 60 /* 61 * bitmap length of CORESIGHT_TRACE_ID_RES_TOP, 62 * search from offset `bit`. 63 */ 64 next_id = find_next_zero_bit(id_map->used_ids, 65 CORESIGHT_TRACE_ID_RES_TOP, bit); 66 if ((next_id < CORESIGHT_TRACE_ID_RES_TOP) && (next_id & 0x1)) 67 found_id = next_id; 68 else 69 bit = next_id + 1; 70 } 71 return found_id; 72 } 73 74 /* 75 * Allocate new ID and set in use 76 * 77 * if @preferred_id is a valid id then try to use that value if available. 78 * if @preferred_id is not valid and @prefer_odd_id is true, try for odd id. 79 * 80 * Otherwise allocate next available ID. 81 */ 82 static int coresight_trace_id_alloc_new_id(struct coresight_trace_id_map *id_map, 83 int preferred_id, unsigned int flags) 84 { 85 int id = 0; 86 87 /* for backwards compatibility, cpu IDs may use preferred value */ 88 if (IS_VALID_CS_TRACE_ID(preferred_id)) { 89 if (!test_bit(preferred_id, id_map->used_ids)) { 90 id = preferred_id; 91 goto trace_id_allocated; 92 } else if (flags & TRACE_ID_REQ_STATIC) 93 return -EBUSY; 94 } else if (flags & TRACE_ID_PREFER_ODD) { 95 /* may use odd ids to avoid preferred legacy cpu IDs */ 96 id = coresight_trace_id_find_odd_id(id_map); 97 if (id) 98 goto trace_id_allocated; 99 } else if (!IS_VALID_CS_TRACE_ID(preferred_id) && 100 (flags & TRACE_ID_REQ_STATIC)) 101 return -EINVAL; 102 103 /* 104 * skip reserved bit 0, look at bitmap length of 105 * CORESIGHT_TRACE_ID_RES_TOP from offset of bit 1. 106 */ 107 id = find_next_zero_bit(id_map->used_ids, CORESIGHT_TRACE_ID_RES_TOP, 1); 108 if (id >= CORESIGHT_TRACE_ID_RES_TOP) 109 return -EINVAL; 110 111 /* mark as used */ 112 trace_id_allocated: 113 set_bit(id, id_map->used_ids); 114 return id; 115 } 116 117 static void coresight_trace_id_free(int id, struct coresight_trace_id_map *id_map) 118 { 119 if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id)) 120 return; 121 if (WARN(!test_bit(id, id_map->used_ids), "Freeing unused ID %d\n", id)) 122 return; 123 clear_bit(id, id_map->used_ids); 124 } 125 126 /* 127 * Release all IDs and clear CPU associations. 128 */ 129 static void coresight_trace_id_release_all(struct coresight_trace_id_map *id_map) 130 { 131 unsigned long flags; 132 int cpu; 133 134 spin_lock_irqsave(&id_map->lock, flags); 135 bitmap_zero(id_map->used_ids, CORESIGHT_TRACE_IDS_MAX); 136 for_each_possible_cpu(cpu) 137 atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0); 138 spin_unlock_irqrestore(&id_map->lock, flags); 139 DUMP_ID_MAP(id_map); 140 } 141 142 static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map) 143 { 144 unsigned long flags; 145 int id; 146 147 spin_lock_irqsave(&id_map->lock, flags); 148 149 /* check for existing allocation for this CPU */ 150 id = _coresight_trace_id_read_cpu_id(cpu, id_map); 151 if (id) 152 goto get_cpu_id_out_unlock; 153 154 /* 155 * Find a new ID. 156 * 157 * Use legacy values where possible in the dynamic trace ID allocator to 158 * allow older tools to continue working if they are not upgraded at the 159 * same time as the kernel drivers. 160 * 161 * If the generated legacy ID is invalid, or not available then the next 162 * available dynamic ID will be used. 163 */ 164 id = coresight_trace_id_alloc_new_id(id_map, 165 CORESIGHT_LEGACY_CPU_TRACE_ID(cpu), 166 TRACE_ID_ANY); 167 if (!IS_VALID_CS_TRACE_ID(id)) 168 goto get_cpu_id_out_unlock; 169 170 /* allocate the new id to the cpu */ 171 atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id); 172 173 get_cpu_id_out_unlock: 174 spin_unlock_irqrestore(&id_map->lock, flags); 175 176 DUMP_ID_CPU(cpu, id); 177 DUMP_ID_MAP(id_map); 178 return id; 179 } 180 181 static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map) 182 { 183 unsigned long flags; 184 int id; 185 186 /* check for existing allocation for this CPU */ 187 id = _coresight_trace_id_read_cpu_id(cpu, id_map); 188 if (!id) 189 return; 190 191 spin_lock_irqsave(&id_map->lock, flags); 192 193 coresight_trace_id_free(id, id_map); 194 atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0); 195 196 spin_unlock_irqrestore(&id_map->lock, flags); 197 DUMP_ID_CPU(cpu, id); 198 DUMP_ID_MAP(id_map); 199 } 200 201 static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *id_map, 202 int preferred_id, unsigned int traceid_flags) 203 { 204 unsigned long flags; 205 int id; 206 207 spin_lock_irqsave(&id_map->lock, flags); 208 id = coresight_trace_id_alloc_new_id(id_map, preferred_id, traceid_flags); 209 spin_unlock_irqrestore(&id_map->lock, flags); 210 211 DUMP_ID(id); 212 DUMP_ID_MAP(id_map); 213 return id; 214 } 215 216 static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *id_map, int id) 217 { 218 unsigned long flags; 219 220 spin_lock_irqsave(&id_map->lock, flags); 221 coresight_trace_id_free(id, id_map); 222 spin_unlock_irqrestore(&id_map->lock, flags); 223 224 DUMP_ID(id); 225 DUMP_ID_MAP(id_map); 226 } 227 228 /* API functions */ 229 230 int coresight_trace_id_get_cpu_id(int cpu) 231 { 232 return _coresight_trace_id_get_cpu_id(cpu, &id_map_default); 233 } 234 EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id); 235 236 int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map) 237 { 238 return _coresight_trace_id_get_cpu_id(cpu, id_map); 239 } 240 EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id_map); 241 242 void coresight_trace_id_put_cpu_id(int cpu) 243 { 244 _coresight_trace_id_put_cpu_id(cpu, &id_map_default); 245 } 246 EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id); 247 248 void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map) 249 { 250 _coresight_trace_id_put_cpu_id(cpu, id_map); 251 } 252 EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id_map); 253 254 int coresight_trace_id_read_cpu_id(int cpu) 255 { 256 return _coresight_trace_id_read_cpu_id(cpu, &id_map_default); 257 } 258 EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id); 259 260 int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map) 261 { 262 return _coresight_trace_id_read_cpu_id(cpu, id_map); 263 } 264 EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id_map); 265 266 int coresight_trace_id_get_system_id(void) 267 { 268 /* prefer odd IDs for system components to avoid legacy CPU IDS */ 269 return coresight_trace_id_map_get_system_id(&id_map_default, 0, 270 TRACE_ID_PREFER_ODD); 271 } 272 EXPORT_SYMBOL_GPL(coresight_trace_id_get_system_id); 273 274 int coresight_trace_id_get_static_system_id(int trace_id) 275 { 276 return coresight_trace_id_map_get_system_id(&id_map_default, 277 trace_id, TRACE_ID_REQ_STATIC); 278 } 279 EXPORT_SYMBOL_GPL(coresight_trace_id_get_static_system_id); 280 281 void coresight_trace_id_put_system_id(int id) 282 { 283 coresight_trace_id_map_put_system_id(&id_map_default, id); 284 } 285 EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id); 286 287 void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map) 288 { 289 atomic_inc(&id_map->perf_cs_etm_session_active); 290 PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active)); 291 } 292 EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start); 293 294 void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map) 295 { 296 if (!atomic_dec_return(&id_map->perf_cs_etm_session_active)) 297 coresight_trace_id_release_all(id_map); 298 PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active)); 299 } 300 EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop); 301