1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This driver enables Trace Buffer Extension (TRBE) as a per-cpu coresight 4 * sink device could then pair with an appropriate per-cpu coresight source 5 * device (ETE) thus generating required trace data. Trace can be enabled 6 * via the perf framework. 7 * 8 * The AUX buffer handling is inspired from Arm SPE PMU driver. 9 * 10 * Copyright (C) 2020 ARM Ltd. 11 * 12 * Author: Anshuman Khandual <anshuman.khandual@arm.com> 13 */ 14 #define DRVNAME "arm_trbe" 15 16 #define pr_fmt(fmt) DRVNAME ": " fmt 17 18 #include <asm/barrier.h> 19 #include <asm/cpufeature.h> 20 #include <linux/vmalloc.h> 21 22 #include "coresight-self-hosted-trace.h" 23 #include "coresight-trbe.h" 24 25 #define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT)) 26 27 /* 28 * A padding packet that will help the user space tools 29 * in skipping relevant sections in the captured trace 30 * data which could not be decoded. TRBE doesn't support 31 * formatting the trace data, unlike the legacy CoreSight 32 * sinks and thus we use ETE trace packets to pad the 33 * sections of the buffer. 34 */ 35 #define ETE_IGNORE_PACKET 0x70 36 37 /* 38 * Minimum amount of meaningful trace will contain: 39 * A-Sync, Trace Info, Trace On, Address, Atom. 40 * This is about 44bytes of ETE trace. To be on 41 * the safer side, we assume 64bytes is the minimum 42 * space required for a meaningful session, before 43 * we hit a "WRAP" event. 44 */ 45 #define TRBE_TRACE_MIN_BUF_SIZE 64 46 47 enum trbe_fault_action { 48 TRBE_FAULT_ACT_WRAP, 49 TRBE_FAULT_ACT_SPURIOUS, 50 TRBE_FAULT_ACT_FATAL, 51 }; 52 53 struct trbe_buf { 54 /* 55 * Even though trbe_base represents vmap() 56 * mapped allocated buffer's start address, 57 * it's being as unsigned long for various 58 * arithmetic and comparision operations & 59 * also to be consistent with trbe_write & 60 * trbe_limit sibling pointers. 61 */ 62 unsigned long trbe_base; 63 /* The base programmed into the TRBE */ 64 unsigned long trbe_hw_base; 65 unsigned long trbe_limit; 66 unsigned long trbe_write; 67 int nr_pages; 68 void **pages; 69 bool snapshot; 70 struct trbe_cpudata *cpudata; 71 }; 72 73 /* 74 * TRBE erratum list 75 * 76 * The errata are defined in arm64 generic cpu_errata framework. 77 * Since the errata work arounds could be applied individually 78 * to the affected CPUs inside the TRBE driver, we need to know if 79 * a given CPU is affected by the erratum. Unlike the other erratum 80 * work arounds, TRBE driver needs to check multiple times during 81 * a trace session. Thus we need a quicker access to per-CPU 82 * errata and not issue costly this_cpu_has_cap() everytime. 83 * We keep a set of the affected errata in trbe_cpudata, per TRBE. 84 * 85 * We rely on the corresponding cpucaps to be defined for a given 86 * TRBE erratum. We map the given cpucap into a TRBE internal number 87 * to make the tracking of the errata lean. 88 * 89 * This helps in : 90 * - Not duplicating the detection logic 91 * - Streamlined detection of erratum across the system 92 */ 93 #define TRBE_WORKAROUND_OVERWRITE_FILL_MODE 0 94 #define TRBE_WORKAROUND_WRITE_OUT_OF_RANGE 1 95 #define TRBE_NEEDS_DRAIN_AFTER_DISABLE 2 96 #define TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE 3 97 #define TRBE_IS_BROKEN 4 98 99 static int trbe_errata_cpucaps[] = { 100 [TRBE_WORKAROUND_OVERWRITE_FILL_MODE] = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE, 101 [TRBE_WORKAROUND_WRITE_OUT_OF_RANGE] = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE, 102 [TRBE_NEEDS_DRAIN_AFTER_DISABLE] = ARM64_WORKAROUND_2064142, 103 [TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE] = ARM64_WORKAROUND_2038923, 104 [TRBE_IS_BROKEN] = ARM64_WORKAROUND_1902691, 105 -1, /* Sentinel, must be the last entry */ 106 }; 107 108 /* The total number of listed errata in trbe_errata_cpucaps */ 109 #define TRBE_ERRATA_MAX (ARRAY_SIZE(trbe_errata_cpucaps) - 1) 110 111 /* 112 * Safe limit for the number of bytes that may be overwritten 113 * when ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE is triggered. 114 */ 115 #define TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES 256 116 117 /* 118 * struct trbe_cpudata: TRBE instance specific data 119 * @trbe_flag - TRBE dirty/access flag support 120 * @trbe_hw_align - Actual TRBE alignment required for TRBPTR_EL1. 121 * @trbe_align - Software alignment used for the TRBPTR_EL1. 122 * @cpu - CPU this TRBE belongs to. 123 * @mode - Mode of current operation. (perf/disabled) 124 * @drvdata - TRBE specific drvdata 125 * @errata - Bit map for the errata on this TRBE. 126 */ 127 struct trbe_cpudata { 128 bool trbe_flag; 129 u64 trbe_hw_align; 130 u64 trbe_align; 131 int cpu; 132 enum cs_mode mode; 133 struct trbe_buf *buf; 134 struct trbe_drvdata *drvdata; 135 DECLARE_BITMAP(errata, TRBE_ERRATA_MAX); 136 }; 137 138 struct trbe_drvdata { 139 struct trbe_cpudata __percpu *cpudata; 140 struct perf_output_handle * __percpu *handle; 141 struct hlist_node hotplug_node; 142 int irq; 143 cpumask_t supported_cpus; 144 enum cpuhp_state trbe_online; 145 struct platform_device *pdev; 146 }; 147 148 static void trbe_check_errata(struct trbe_cpudata *cpudata) 149 { 150 int i; 151 152 for (i = 0; i < TRBE_ERRATA_MAX; i++) { 153 int cap = trbe_errata_cpucaps[i]; 154 155 if (WARN_ON_ONCE(cap < 0)) 156 return; 157 if (this_cpu_has_cap(cap)) 158 set_bit(i, cpudata->errata); 159 } 160 } 161 162 static inline bool trbe_has_erratum(struct trbe_cpudata *cpudata, int i) 163 { 164 return (i < TRBE_ERRATA_MAX) && test_bit(i, cpudata->errata); 165 } 166 167 static inline bool trbe_may_overwrite_in_fill_mode(struct trbe_cpudata *cpudata) 168 { 169 return trbe_has_erratum(cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE); 170 } 171 172 static inline bool trbe_may_write_out_of_range(struct trbe_cpudata *cpudata) 173 { 174 return trbe_has_erratum(cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE); 175 } 176 177 static inline bool trbe_needs_drain_after_disable(struct trbe_cpudata *cpudata) 178 { 179 /* 180 * Errata affected TRBE implementation will need TSB CSYNC and 181 * DSB in order to prevent subsequent writes into certain TRBE 182 * system registers from being ignored and not effected. 183 */ 184 return trbe_has_erratum(cpudata, TRBE_NEEDS_DRAIN_AFTER_DISABLE); 185 } 186 187 static inline bool trbe_needs_ctxt_sync_after_enable(struct trbe_cpudata *cpudata) 188 { 189 /* 190 * Errata affected TRBE implementation will need an additional 191 * context synchronization in order to prevent an inconsistent 192 * TRBE prohibited region view on the CPU which could possibly 193 * corrupt the TRBE buffer or the TRBE state. 194 */ 195 return trbe_has_erratum(cpudata, TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE); 196 } 197 198 static inline bool trbe_is_broken(struct trbe_cpudata *cpudata) 199 { 200 return trbe_has_erratum(cpudata, TRBE_IS_BROKEN); 201 } 202 203 static int trbe_alloc_node(struct perf_event *event) 204 { 205 if (event->cpu == -1) 206 return NUMA_NO_NODE; 207 return cpu_to_node(event->cpu); 208 } 209 210 static inline void trbe_drain_buffer(void) 211 { 212 tsb_csync(); 213 dsb(nsh); 214 } 215 216 static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr) 217 { 218 /* 219 * Enable the TRBE without clearing LIMITPTR which 220 * might be required for fetching the buffer limits. 221 */ 222 trblimitr |= TRBLIMITR_EL1_E; 223 write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); 224 225 /* Synchronize the TRBE enable event */ 226 isb(); 227 228 if (trbe_needs_ctxt_sync_after_enable(cpudata)) 229 isb(); 230 } 231 232 static inline void set_trbe_disabled(struct trbe_cpudata *cpudata) 233 { 234 u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); 235 236 /* 237 * Disable the TRBE without clearing LIMITPTR which 238 * might be required for fetching the buffer limits. 239 */ 240 trblimitr &= ~TRBLIMITR_EL1_E; 241 write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); 242 243 if (trbe_needs_drain_after_disable(cpudata)) 244 trbe_drain_buffer(); 245 isb(); 246 } 247 248 static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata) 249 { 250 trbe_drain_buffer(); 251 set_trbe_disabled(cpudata); 252 } 253 254 static void trbe_reset_local(struct trbe_cpudata *cpudata) 255 { 256 trbe_drain_and_disable_local(cpudata); 257 write_sysreg_s(0, SYS_TRBLIMITR_EL1); 258 write_sysreg_s(0, SYS_TRBPTR_EL1); 259 write_sysreg_s(0, SYS_TRBBASER_EL1); 260 write_sysreg_s(0, SYS_TRBSR_EL1); 261 } 262 263 static void trbe_report_wrap_event(struct perf_output_handle *handle) 264 { 265 /* 266 * Mark the buffer to indicate that there was a WRAP event by 267 * setting the COLLISION flag. This indicates to the user that 268 * the TRBE trace collection was stopped without stopping the 269 * ETE and thus there might be some amount of trace that was 270 * lost between the time the WRAP was detected and the IRQ 271 * was consumed by the CPU. 272 * 273 * Setting the TRUNCATED flag would move the event to STOPPED 274 * state unnecessarily, even when there is space left in the 275 * ring buffer. Using the COLLISION flag doesn't have this side 276 * effect. We only set TRUNCATED flag when there is no space 277 * left in the ring buffer. 278 */ 279 perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION); 280 } 281 282 static void trbe_stop_and_truncate_event(struct perf_output_handle *handle) 283 { 284 struct trbe_buf *buf = etm_perf_sink_config(handle); 285 286 /* 287 * We cannot proceed with the buffer collection and we 288 * do not have any data for the current session. The 289 * etm_perf driver expects to close out the aux_buffer 290 * at event_stop(). So disable the TRBE here and leave 291 * the update_buffer() to return a 0 size. 292 */ 293 trbe_drain_and_disable_local(buf->cpudata); 294 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); 295 perf_aux_output_end(handle, 0); 296 *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL; 297 } 298 299 /* 300 * TRBE Buffer Management 301 * 302 * The TRBE buffer spans from the base pointer till the limit pointer. When enabled, 303 * it starts writing trace data from the write pointer onward till the limit pointer. 304 * When the write pointer reaches the address just before the limit pointer, it gets 305 * wrapped around again to the base pointer. This is called a TRBE wrap event, which 306 * generates a maintenance interrupt when operated in WRAP or FILL mode. This driver 307 * uses FILL mode, where the TRBE stops the trace collection at wrap event. The IRQ 308 * handler updates the AUX buffer and re-enables the TRBE with updated WRITE and 309 * LIMIT pointers. 310 * 311 * Wrap around with an IRQ 312 * ------ < ------ < ------- < ----- < ----- 313 * | | 314 * ------ > ------ > ------- > ----- > ----- 315 * 316 * +---------------+-----------------------+ 317 * | | | 318 * +---------------+-----------------------+ 319 * Base Pointer Write Pointer Limit Pointer 320 * 321 * The base and limit pointers always needs to be PAGE_SIZE aligned. But the write 322 * pointer can be aligned to the implementation defined TRBE trace buffer alignment 323 * as captured in trbe_cpudata->trbe_align. 324 * 325 * 326 * head tail wakeup 327 * +---------------------------------------+----- ~ ~ ------ 328 * |$$$$$$$|################|$$$$$$$$$$$$$$| | 329 * +---------------------------------------+----- ~ ~ ------ 330 * Base Pointer Write Pointer Limit Pointer 331 * 332 * The perf_output_handle indices (head, tail, wakeup) are monotonically increasing 333 * values which tracks all the driver writes and user reads from the perf auxiliary 334 * buffer. Generally [head..tail] is the area where the driver can write into unless 335 * the wakeup is behind the tail. Enabled TRBE buffer span needs to be adjusted and 336 * configured depending on the perf_output_handle indices, so that the driver does 337 * not override into areas in the perf auxiliary buffer which is being or yet to be 338 * consumed from the user space. The enabled TRBE buffer area is a moving subset of 339 * the allocated perf auxiliary buffer. 340 */ 341 342 static void __trbe_pad_buf(struct trbe_buf *buf, u64 offset, int len) 343 { 344 memset((void *)buf->trbe_base + offset, ETE_IGNORE_PACKET, len); 345 } 346 347 static void trbe_pad_buf(struct perf_output_handle *handle, int len) 348 { 349 struct trbe_buf *buf = etm_perf_sink_config(handle); 350 u64 head = PERF_IDX2OFF(handle->head, buf); 351 352 __trbe_pad_buf(buf, head, len); 353 if (!buf->snapshot) 354 perf_aux_output_skip(handle, len); 355 } 356 357 static unsigned long trbe_snapshot_offset(struct perf_output_handle *handle) 358 { 359 struct trbe_buf *buf = etm_perf_sink_config(handle); 360 361 /* 362 * The ETE trace has alignment synchronization packets allowing 363 * the decoder to reset in case of an overflow or corruption. 364 * So we can use the entire buffer for the snapshot mode. 365 */ 366 return buf->nr_pages * PAGE_SIZE; 367 } 368 369 static u64 trbe_min_trace_buf_size(struct perf_output_handle *handle) 370 { 371 u64 size = TRBE_TRACE_MIN_BUF_SIZE; 372 struct trbe_buf *buf = etm_perf_sink_config(handle); 373 struct trbe_cpudata *cpudata = buf->cpudata; 374 375 /* 376 * When the TRBE is affected by an erratum that could make it 377 * write to the next "virtually addressed" page beyond the LIMIT. 378 * We need to make sure there is always a PAGE after the LIMIT, 379 * within the buffer. Thus we ensure there is at least an extra 380 * page than normal. With this we could then adjust the LIMIT 381 * pointer down by a PAGE later. 382 */ 383 if (trbe_may_write_out_of_range(cpudata)) 384 size += PAGE_SIZE; 385 return size; 386 } 387 388 /* 389 * TRBE Limit Calculation 390 * 391 * The following markers are used to illustrate various TRBE buffer situations. 392 * 393 * $$$$ - Data area, unconsumed captured trace data, not to be overridden 394 * #### - Free area, enabled, trace will be written 395 * %%%% - Free area, disabled, trace will not be written 396 * ==== - Free area, padded with ETE_IGNORE_PACKET, trace will be skipped 397 */ 398 static unsigned long __trbe_normal_offset(struct perf_output_handle *handle) 399 { 400 struct trbe_buf *buf = etm_perf_sink_config(handle); 401 struct trbe_cpudata *cpudata = buf->cpudata; 402 const u64 bufsize = buf->nr_pages * PAGE_SIZE; 403 u64 limit = bufsize; 404 u64 head, tail, wakeup; 405 406 head = PERF_IDX2OFF(handle->head, buf); 407 408 /* 409 * head 410 * ------->| 411 * | 412 * head TRBE align tail 413 * +----|-------|---------------|-------+ 414 * |$$$$|=======|###############|$$$$$$$| 415 * +----|-------|---------------|-------+ 416 * trbe_base trbe_base + nr_pages 417 * 418 * Perf aux buffer output head position can be misaligned depending on 419 * various factors including user space reads. In case misaligned, head 420 * needs to be aligned before TRBE can be configured. Pad the alignment 421 * gap with ETE_IGNORE_PACKET bytes that will be ignored by user tools 422 * and skip this section thus advancing the head. 423 */ 424 if (!IS_ALIGNED(head, cpudata->trbe_align)) { 425 unsigned long delta = roundup(head, cpudata->trbe_align) - head; 426 427 delta = min(delta, handle->size); 428 trbe_pad_buf(handle, delta); 429 head = PERF_IDX2OFF(handle->head, buf); 430 } 431 432 /* 433 * head = tail (size = 0) 434 * +----|-------------------------------+ 435 * |$$$$|$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ | 436 * +----|-------------------------------+ 437 * trbe_base trbe_base + nr_pages 438 * 439 * Perf aux buffer does not have any space for the driver to write into. 440 */ 441 if (!handle->size) 442 return 0; 443 444 /* Compute the tail and wakeup indices now that we've aligned head */ 445 tail = PERF_IDX2OFF(handle->head + handle->size, buf); 446 wakeup = PERF_IDX2OFF(handle->wakeup, buf); 447 448 /* 449 * Lets calculate the buffer area which TRBE could write into. There 450 * are three possible scenarios here. Limit needs to be aligned with 451 * PAGE_SIZE per the TRBE requirement. Always avoid clobbering the 452 * unconsumed data. 453 * 454 * 1) head < tail 455 * 456 * head tail 457 * +----|-----------------------|-------+ 458 * |$$$$|#######################|$$$$$$$| 459 * +----|-----------------------|-------+ 460 * trbe_base limit trbe_base + nr_pages 461 * 462 * TRBE could write into [head..tail] area. Unless the tail is right at 463 * the end of the buffer, neither an wrap around nor an IRQ is expected 464 * while being enabled. 465 * 466 * 2) head == tail 467 * 468 * head = tail (size > 0) 469 * +----|-------------------------------+ 470 * |%%%%|###############################| 471 * +----|-------------------------------+ 472 * trbe_base limit = trbe_base + nr_pages 473 * 474 * TRBE should just write into [head..base + nr_pages] area even though 475 * the entire buffer is empty. Reason being, when the trace reaches the 476 * end of the buffer, it will just wrap around with an IRQ giving an 477 * opportunity to reconfigure the buffer. 478 * 479 * 3) tail < head 480 * 481 * tail head 482 * +----|-----------------------|-------+ 483 * |%%%%|$$$$$$$$$$$$$$$$$$$$$$$|#######| 484 * +----|-----------------------|-------+ 485 * trbe_base limit = trbe_base + nr_pages 486 * 487 * TRBE should just write into [head..base + nr_pages] area even though 488 * the [trbe_base..tail] is also empty. Reason being, when the trace 489 * reaches the end of the buffer, it will just wrap around with an IRQ 490 * giving an opportunity to reconfigure the buffer. 491 */ 492 if (head < tail) 493 limit = round_down(tail, PAGE_SIZE); 494 495 /* 496 * Wakeup may be arbitrarily far into the future. If it's not in the 497 * current generation, either we'll wrap before hitting it, or it's 498 * in the past and has been handled already. 499 * 500 * If there's a wakeup before we wrap, arrange to be woken up by the 501 * page boundary following it. Keep the tail boundary if that's lower. 502 * 503 * head wakeup tail 504 * +----|---------------|-------|-------+ 505 * |$$$$|###############|%%%%%%%|$$$$$$$| 506 * +----|---------------|-------|-------+ 507 * trbe_base limit trbe_base + nr_pages 508 */ 509 if (handle->wakeup < (handle->head + handle->size) && head <= wakeup) 510 limit = min(limit, round_up(wakeup, PAGE_SIZE)); 511 512 /* 513 * There are two situation when this can happen i.e limit is before 514 * the head and hence TRBE cannot be configured. 515 * 516 * 1) head < tail (aligned down with PAGE_SIZE) and also they are both 517 * within the same PAGE size range. 518 * 519 * PAGE_SIZE 520 * |----------------------| 521 * 522 * limit head tail 523 * +------------|------|--------|-------+ 524 * |$$$$$$$$$$$$$$$$$$$|========|$$$$$$$| 525 * +------------|------|--------|-------+ 526 * trbe_base trbe_base + nr_pages 527 * 528 * 2) head < wakeup (aligned up with PAGE_SIZE) < tail and also both 529 * head and wakeup are within same PAGE size range. 530 * 531 * PAGE_SIZE 532 * |----------------------| 533 * 534 * limit head wakeup tail 535 * +----|------|-------|--------|-------+ 536 * |$$$$$$$$$$$|=======|========|$$$$$$$| 537 * +----|------|-------|--------|-------+ 538 * trbe_base trbe_base + nr_pages 539 */ 540 if (limit > head) 541 return limit; 542 543 trbe_pad_buf(handle, handle->size); 544 return 0; 545 } 546 547 static unsigned long trbe_normal_offset(struct perf_output_handle *handle) 548 { 549 struct trbe_buf *buf = etm_perf_sink_config(handle); 550 u64 limit = __trbe_normal_offset(handle); 551 u64 head = PERF_IDX2OFF(handle->head, buf); 552 553 /* 554 * If the head is too close to the limit and we don't 555 * have space for a meaningful run, we rather pad it 556 * and start fresh. 557 * 558 * We might have to do this more than once to make sure 559 * we have enough required space. 560 */ 561 while (limit && ((limit - head) < trbe_min_trace_buf_size(handle))) { 562 trbe_pad_buf(handle, limit - head); 563 limit = __trbe_normal_offset(handle); 564 head = PERF_IDX2OFF(handle->head, buf); 565 } 566 return limit; 567 } 568 569 static unsigned long compute_trbe_buffer_limit(struct perf_output_handle *handle) 570 { 571 struct trbe_buf *buf = etm_perf_sink_config(handle); 572 unsigned long offset; 573 574 if (buf->snapshot) 575 offset = trbe_snapshot_offset(handle); 576 else 577 offset = trbe_normal_offset(handle); 578 return buf->trbe_base + offset; 579 } 580 581 static void clr_trbe_status(void) 582 { 583 u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1); 584 585 WARN_ON(is_trbe_enabled()); 586 trbsr &= ~TRBSR_EL1_IRQ; 587 trbsr &= ~TRBSR_EL1_TRG; 588 trbsr &= ~TRBSR_EL1_WRAP; 589 trbsr &= ~TRBSR_EL1_EC_MASK; 590 trbsr &= ~TRBSR_EL1_BSC_MASK; 591 trbsr &= ~TRBSR_EL1_S; 592 write_sysreg_s(trbsr, SYS_TRBSR_EL1); 593 } 594 595 static void set_trbe_limit_pointer_enabled(struct trbe_buf *buf) 596 { 597 u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); 598 unsigned long addr = buf->trbe_limit; 599 600 WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_EL1_LIMIT_SHIFT))); 601 WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); 602 603 trblimitr &= ~TRBLIMITR_EL1_nVM; 604 trblimitr &= ~TRBLIMITR_EL1_FM_MASK; 605 trblimitr &= ~TRBLIMITR_EL1_TM_MASK; 606 trblimitr &= ~TRBLIMITR_EL1_LIMIT_MASK; 607 608 /* 609 * Fill trace buffer mode is used here while configuring the 610 * TRBE for trace capture. In this particular mode, the trace 611 * collection is stopped and a maintenance interrupt is raised 612 * when the current write pointer wraps. This pause in trace 613 * collection gives the software an opportunity to capture the 614 * trace data in the interrupt handler, before reconfiguring 615 * the TRBE. 616 */ 617 trblimitr |= (TRBLIMITR_EL1_FM_FILL << TRBLIMITR_EL1_FM_SHIFT) & 618 TRBLIMITR_EL1_FM_MASK; 619 620 /* 621 * Trigger mode is not used here while configuring the TRBE for 622 * the trace capture. Hence just keep this in the ignore mode. 623 */ 624 trblimitr |= (TRBLIMITR_EL1_TM_IGNR << TRBLIMITR_EL1_TM_SHIFT) & 625 TRBLIMITR_EL1_TM_MASK; 626 trblimitr |= (addr & PAGE_MASK); 627 set_trbe_enabled(buf->cpudata, trblimitr); 628 } 629 630 static void trbe_enable_hw(struct trbe_buf *buf) 631 { 632 WARN_ON(buf->trbe_hw_base < buf->trbe_base); 633 WARN_ON(buf->trbe_write < buf->trbe_hw_base); 634 WARN_ON(buf->trbe_write >= buf->trbe_limit); 635 set_trbe_disabled(buf->cpudata); 636 clr_trbe_status(); 637 set_trbe_base_pointer(buf->trbe_hw_base); 638 set_trbe_write_pointer(buf->trbe_write); 639 640 /* 641 * Synchronize all the register updates 642 * till now before enabling the TRBE. 643 */ 644 isb(); 645 set_trbe_limit_pointer_enabled(buf); 646 } 647 648 static enum trbe_fault_action trbe_get_fault_act(struct perf_output_handle *handle, 649 u64 trbsr) 650 { 651 int ec = get_trbe_ec(trbsr); 652 int bsc = get_trbe_bsc(trbsr); 653 struct trbe_buf *buf = etm_perf_sink_config(handle); 654 struct trbe_cpudata *cpudata = buf->cpudata; 655 656 WARN_ON(is_trbe_running(trbsr)); 657 if (is_trbe_trg(trbsr) || is_trbe_abort(trbsr)) 658 return TRBE_FAULT_ACT_FATAL; 659 660 if ((ec == TRBE_EC_STAGE1_ABORT) || (ec == TRBE_EC_STAGE2_ABORT)) 661 return TRBE_FAULT_ACT_FATAL; 662 663 /* 664 * If the trbe is affected by TRBE_WORKAROUND_OVERWRITE_FILL_MODE, 665 * it might write data after a WRAP event in the fill mode. 666 * Thus the check TRBPTR == TRBBASER will not be honored. 667 */ 668 if ((is_trbe_wrap(trbsr) && (ec == TRBE_EC_OTHERS) && (bsc == TRBE_BSC_FILLED)) && 669 (trbe_may_overwrite_in_fill_mode(cpudata) || 670 get_trbe_write_pointer() == get_trbe_base_pointer())) 671 return TRBE_FAULT_ACT_WRAP; 672 673 return TRBE_FAULT_ACT_SPURIOUS; 674 } 675 676 static unsigned long trbe_get_trace_size(struct perf_output_handle *handle, 677 struct trbe_buf *buf, bool wrap) 678 { 679 u64 write; 680 u64 start_off, end_off; 681 u64 size; 682 u64 overwrite_skip = TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES; 683 684 /* 685 * If the TRBE has wrapped around the write pointer has 686 * wrapped and should be treated as limit. 687 * 688 * When the TRBE is affected by TRBE_WORKAROUND_WRITE_OUT_OF_RANGE, 689 * it may write upto 64bytes beyond the "LIMIT". The driver already 690 * keeps a valid page next to the LIMIT and we could potentially 691 * consume the trace data that may have been collected there. But we 692 * cannot be really sure it is available, and the TRBPTR may not 693 * indicate the same. Also, affected cores are also affected by another 694 * erratum which forces the PAGE_SIZE alignment on the TRBPTR, and thus 695 * could potentially pad an entire PAGE_SIZE - 64bytes, to get those 696 * 64bytes. Thus we ignore the potential triggering of the erratum 697 * on WRAP and limit the data to LIMIT. 698 */ 699 if (wrap) 700 write = get_trbe_limit_pointer(); 701 else 702 write = get_trbe_write_pointer(); 703 704 /* 705 * TRBE may use a different base address than the base 706 * of the ring buffer. Thus use the beginning of the ring 707 * buffer to compute the offsets. 708 */ 709 end_off = write - buf->trbe_base; 710 start_off = PERF_IDX2OFF(handle->head, buf); 711 712 if (WARN_ON_ONCE(end_off < start_off)) 713 return 0; 714 715 size = end_off - start_off; 716 /* 717 * If the TRBE is affected by the following erratum, we must fill 718 * the space we skipped with IGNORE packets. And we are always 719 * guaranteed to have at least a PAGE_SIZE space in the buffer. 720 */ 721 if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE) && 722 !WARN_ON(size < overwrite_skip)) 723 __trbe_pad_buf(buf, start_off, overwrite_skip); 724 725 return size; 726 } 727 728 static void *arm_trbe_alloc_buffer(struct coresight_device *csdev, 729 struct perf_event *event, void **pages, 730 int nr_pages, bool snapshot) 731 { 732 struct trbe_buf *buf; 733 struct page **pglist; 734 int i; 735 736 /* 737 * TRBE LIMIT and TRBE WRITE pointers must be page aligned. But with 738 * just a single page, there would not be any room left while writing 739 * into a partially filled TRBE buffer after the page size alignment. 740 * Hence restrict the minimum buffer size as two pages. 741 */ 742 if (nr_pages < 2) 743 return NULL; 744 745 buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event)); 746 if (!buf) 747 return ERR_PTR(-ENOMEM); 748 749 pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL); 750 if (!pglist) { 751 kfree(buf); 752 return ERR_PTR(-ENOMEM); 753 } 754 755 for (i = 0; i < nr_pages; i++) 756 pglist[i] = virt_to_page(pages[i]); 757 758 buf->trbe_base = (unsigned long)vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL); 759 if (!buf->trbe_base) { 760 kfree(pglist); 761 kfree(buf); 762 return ERR_PTR(-ENOMEM); 763 } 764 buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE; 765 buf->trbe_write = buf->trbe_base; 766 buf->snapshot = snapshot; 767 buf->nr_pages = nr_pages; 768 buf->pages = pages; 769 kfree(pglist); 770 return buf; 771 } 772 773 static void arm_trbe_free_buffer(void *config) 774 { 775 struct trbe_buf *buf = config; 776 777 vunmap((void *)buf->trbe_base); 778 kfree(buf); 779 } 780 781 static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev, 782 struct perf_output_handle *handle, 783 void *config) 784 { 785 struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 786 struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev); 787 struct trbe_buf *buf = config; 788 enum trbe_fault_action act; 789 unsigned long size, status; 790 unsigned long flags; 791 bool wrap = false; 792 793 WARN_ON(buf->cpudata != cpudata); 794 WARN_ON(cpudata->cpu != smp_processor_id()); 795 WARN_ON(cpudata->drvdata != drvdata); 796 if (cpudata->mode != CS_MODE_PERF) 797 return 0; 798 799 /* 800 * We are about to disable the TRBE. And this could in turn 801 * fill up the buffer triggering, an IRQ. This could be consumed 802 * by the PE asynchronously, causing a race here against 803 * the IRQ handler in closing out the handle. So, let us 804 * make sure the IRQ can't trigger while we are collecting 805 * the buffer. We also make sure that a WRAP event is handled 806 * accordingly. 807 */ 808 local_irq_save(flags); 809 810 /* 811 * If the TRBE was disabled due to lack of space in the AUX buffer or a 812 * spurious fault, the driver leaves it disabled, truncating the buffer. 813 * Since the etm_perf driver expects to close out the AUX buffer, the 814 * driver skips it. Thus, just pass in 0 size here to indicate that the 815 * buffer was truncated. 816 */ 817 if (!is_trbe_enabled()) { 818 size = 0; 819 goto done; 820 } 821 /* 822 * perf handle structure needs to be shared with the TRBE IRQ handler for 823 * capturing trace data and restarting the handle. There is a probability 824 * of an undefined reference based crash when etm event is being stopped 825 * while a TRBE IRQ also getting processed. This happens due the release 826 * of perf handle via perf_aux_output_end() in etm_event_stop(). Stopping 827 * the TRBE here will ensure that no IRQ could be generated when the perf 828 * handle gets freed in etm_event_stop(). 829 */ 830 trbe_drain_and_disable_local(cpudata); 831 832 /* Check if there is a pending interrupt and handle it here */ 833 status = read_sysreg_s(SYS_TRBSR_EL1); 834 if (is_trbe_irq(status)) { 835 836 /* 837 * Now that we are handling the IRQ here, clear the IRQ 838 * from the status, to let the irq handler know that it 839 * is taken care of. 840 */ 841 clr_trbe_irq(); 842 isb(); 843 844 act = trbe_get_fault_act(handle, status); 845 /* 846 * If this was not due to a WRAP event, we have some 847 * errors and as such buffer is empty. 848 */ 849 if (act != TRBE_FAULT_ACT_WRAP) { 850 size = 0; 851 goto done; 852 } 853 854 trbe_report_wrap_event(handle); 855 wrap = true; 856 } 857 858 size = trbe_get_trace_size(handle, buf, wrap); 859 860 done: 861 local_irq_restore(flags); 862 863 if (buf->snapshot) 864 handle->head += size; 865 return size; 866 } 867 868 869 static int trbe_apply_work_around_before_enable(struct trbe_buf *buf) 870 { 871 /* 872 * TRBE_WORKAROUND_OVERWRITE_FILL_MODE causes the TRBE to overwrite a few cache 873 * line size from the "TRBBASER_EL1" in the event of a "FILL". 874 * Thus, we could loose some amount of the trace at the base. 875 * 876 * Before Fix: 877 * 878 * normal-BASE head (normal-TRBPTR) tail (normal-LIMIT) 879 * | \/ / 880 * ------------------------------------------------------------- 881 * | Pg0 | Pg1 | | | PgN | 882 * ------------------------------------------------------------- 883 * 884 * In the normal course of action, we would set the TRBBASER to the 885 * beginning of the ring-buffer (normal-BASE). But with the erratum, 886 * the TRBE could overwrite the contents at the "normal-BASE", after 887 * hitting the "normal-LIMIT", since it doesn't stop as expected. And 888 * this is wrong. This could result in overwriting trace collected in 889 * one of the previous runs, being consumed by the user. So we must 890 * always make sure that the TRBBASER is within the region 891 * [head, head+size]. Note that TRBBASER must be PAGE aligned, 892 * 893 * After moving the BASE: 894 * 895 * normal-BASE head (normal-TRBPTR) tail (normal-LIMIT) 896 * | \/ / 897 * ------------------------------------------------------------- 898 * | | |xyzdef. |.. tuvw| | 899 * ------------------------------------------------------------- 900 * / 901 * New-BASER 902 * 903 * Also, we would set the TRBPTR to head (after adjusting for 904 * alignment) at normal-PTR. This would mean that the last few bytes 905 * of the trace (say, "xyz") might overwrite the first few bytes of 906 * trace written ("abc"). More importantly they will appear in what 907 * userspace sees as the beginning of the trace, which is wrong. We may 908 * not always have space to move the latest trace "xyz" to the correct 909 * order as it must appear beyond the LIMIT. (i.e, [head..head+size]). 910 * Thus it is easier to ignore those bytes than to complicate the 911 * driver to move it, assuming that the erratum was triggered and 912 * doing additional checks to see if there is indeed allowed space at 913 * TRBLIMITR.LIMIT. 914 * 915 * Thus the full workaround will move the BASE and the PTR and would 916 * look like (after padding at the skipped bytes at the end of 917 * session) : 918 * 919 * normal-BASE head (normal-TRBPTR) tail (normal-LIMIT) 920 * | \/ / 921 * ------------------------------------------------------------- 922 * | | |///abc.. |.. rst| | 923 * ------------------------------------------------------------- 924 * / | 925 * New-BASER New-TRBPTR 926 * 927 * To summarize, with the work around: 928 * 929 * - We always align the offset for the next session to PAGE_SIZE 930 * (This is to ensure we can program the TRBBASER to this offset 931 * within the region [head...head+size]). 932 * 933 * - At TRBE enable: 934 * - Set the TRBBASER to the page aligned offset of the current 935 * proposed write offset. (which is guaranteed to be aligned 936 * as above) 937 * - Move the TRBPTR to skip first 256bytes (that might be 938 * overwritten with the erratum). This ensures that the trace 939 * generated in the session is not re-written. 940 * 941 * - At trace collection: 942 * - Pad the 256bytes skipped above again with IGNORE packets. 943 */ 944 if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE)) { 945 if (WARN_ON(!IS_ALIGNED(buf->trbe_write, PAGE_SIZE))) 946 return -EINVAL; 947 buf->trbe_hw_base = buf->trbe_write; 948 buf->trbe_write += TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES; 949 } 950 951 /* 952 * TRBE_WORKAROUND_WRITE_OUT_OF_RANGE could cause the TRBE to write to 953 * the next page after the TRBLIMITR.LIMIT. For perf, the "next page" 954 * may be: 955 * - The page beyond the ring buffer. This could mean, TRBE could 956 * corrupt another entity (kernel / user) 957 * - A portion of the "ring buffer" consumed by the userspace. 958 * i.e, a page outisde [head, head + size]. 959 * 960 * We work around this by: 961 * - Making sure that we have at least an extra space of PAGE left 962 * in the ring buffer [head, head + size], than we normally do 963 * without the erratum. See trbe_min_trace_buf_size(). 964 * 965 * - Adjust the TRBLIMITR.LIMIT to leave the extra PAGE outside 966 * the TRBE's range (i.e [TRBBASER, TRBLIMITR.LIMI] ). 967 */ 968 if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE)) { 969 s64 space = buf->trbe_limit - buf->trbe_write; 970 /* 971 * We must have more than a PAGE_SIZE worth space in the proposed 972 * range for the TRBE. 973 */ 974 if (WARN_ON(space <= PAGE_SIZE || 975 !IS_ALIGNED(buf->trbe_limit, PAGE_SIZE))) 976 return -EINVAL; 977 buf->trbe_limit -= PAGE_SIZE; 978 } 979 980 return 0; 981 } 982 983 static int __arm_trbe_enable(struct trbe_buf *buf, 984 struct perf_output_handle *handle) 985 { 986 int ret = 0; 987 988 perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW); 989 buf->trbe_limit = compute_trbe_buffer_limit(handle); 990 buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf); 991 if (buf->trbe_limit == buf->trbe_base) { 992 ret = -ENOSPC; 993 goto err; 994 } 995 /* Set the base of the TRBE to the buffer base */ 996 buf->trbe_hw_base = buf->trbe_base; 997 998 ret = trbe_apply_work_around_before_enable(buf); 999 if (ret) 1000 goto err; 1001 1002 *this_cpu_ptr(buf->cpudata->drvdata->handle) = handle; 1003 trbe_enable_hw(buf); 1004 return 0; 1005 err: 1006 trbe_stop_and_truncate_event(handle); 1007 return ret; 1008 } 1009 1010 static int arm_trbe_enable(struct coresight_device *csdev, enum cs_mode mode, 1011 void *data) 1012 { 1013 struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 1014 struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev); 1015 struct perf_output_handle *handle = data; 1016 struct trbe_buf *buf = etm_perf_sink_config(handle); 1017 1018 WARN_ON(cpudata->cpu != smp_processor_id()); 1019 WARN_ON(cpudata->drvdata != drvdata); 1020 if (mode != CS_MODE_PERF) 1021 return -EINVAL; 1022 1023 cpudata->buf = buf; 1024 cpudata->mode = mode; 1025 buf->cpudata = cpudata; 1026 1027 return __arm_trbe_enable(buf, handle); 1028 } 1029 1030 static int arm_trbe_disable(struct coresight_device *csdev) 1031 { 1032 struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 1033 struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev); 1034 struct trbe_buf *buf = cpudata->buf; 1035 1036 WARN_ON(buf->cpudata != cpudata); 1037 WARN_ON(cpudata->cpu != smp_processor_id()); 1038 WARN_ON(cpudata->drvdata != drvdata); 1039 if (cpudata->mode != CS_MODE_PERF) 1040 return -EINVAL; 1041 1042 trbe_drain_and_disable_local(cpudata); 1043 buf->cpudata = NULL; 1044 cpudata->buf = NULL; 1045 cpudata->mode = CS_MODE_DISABLED; 1046 return 0; 1047 } 1048 1049 static void trbe_handle_spurious(struct perf_output_handle *handle) 1050 { 1051 struct trbe_buf *buf = etm_perf_sink_config(handle); 1052 u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); 1053 1054 /* 1055 * If the IRQ was spurious, simply re-enable the TRBE 1056 * back without modifying the buffer parameters to 1057 * retain the trace collected so far. 1058 */ 1059 set_trbe_enabled(buf->cpudata, trblimitr); 1060 } 1061 1062 static int trbe_handle_overflow(struct perf_output_handle *handle) 1063 { 1064 struct perf_event *event = handle->event; 1065 struct trbe_buf *buf = etm_perf_sink_config(handle); 1066 unsigned long size; 1067 struct etm_event_data *event_data; 1068 1069 size = trbe_get_trace_size(handle, buf, true); 1070 if (buf->snapshot) 1071 handle->head += size; 1072 1073 trbe_report_wrap_event(handle); 1074 perf_aux_output_end(handle, size); 1075 event_data = perf_aux_output_begin(handle, event); 1076 if (!event_data) { 1077 /* 1078 * We are unable to restart the trace collection, 1079 * thus leave the TRBE disabled. The etm-perf driver 1080 * is able to detect this with a disconnected handle 1081 * (handle->event = NULL). 1082 */ 1083 trbe_drain_and_disable_local(buf->cpudata); 1084 *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL; 1085 return -EINVAL; 1086 } 1087 1088 return __arm_trbe_enable(buf, handle); 1089 } 1090 1091 static bool is_perf_trbe(struct perf_output_handle *handle) 1092 { 1093 struct trbe_buf *buf = etm_perf_sink_config(handle); 1094 struct trbe_cpudata *cpudata = buf->cpudata; 1095 struct trbe_drvdata *drvdata = cpudata->drvdata; 1096 int cpu = smp_processor_id(); 1097 1098 WARN_ON(buf->trbe_hw_base != get_trbe_base_pointer()); 1099 WARN_ON(buf->trbe_limit != get_trbe_limit_pointer()); 1100 1101 if (cpudata->mode != CS_MODE_PERF) 1102 return false; 1103 1104 if (cpudata->cpu != cpu) 1105 return false; 1106 1107 if (!cpumask_test_cpu(cpu, &drvdata->supported_cpus)) 1108 return false; 1109 1110 return true; 1111 } 1112 1113 static irqreturn_t arm_trbe_irq_handler(int irq, void *dev) 1114 { 1115 struct perf_output_handle **handle_ptr = dev; 1116 struct perf_output_handle *handle = *handle_ptr; 1117 struct trbe_buf *buf = etm_perf_sink_config(handle); 1118 enum trbe_fault_action act; 1119 u64 status; 1120 bool truncated = false; 1121 u64 trfcr; 1122 1123 /* Reads to TRBSR_EL1 is fine when TRBE is active */ 1124 status = read_sysreg_s(SYS_TRBSR_EL1); 1125 /* 1126 * If the pending IRQ was handled by update_buffer callback 1127 * we have nothing to do here. 1128 */ 1129 if (!is_trbe_irq(status)) 1130 return IRQ_NONE; 1131 1132 /* Prohibit the CPU from tracing before we disable the TRBE */ 1133 trfcr = cpu_prohibit_trace(); 1134 /* 1135 * Ensure the trace is visible to the CPUs and 1136 * any external aborts have been resolved. 1137 */ 1138 trbe_drain_and_disable_local(buf->cpudata); 1139 clr_trbe_irq(); 1140 isb(); 1141 1142 if (WARN_ON_ONCE(!handle) || !perf_get_aux(handle)) 1143 return IRQ_NONE; 1144 1145 if (!is_perf_trbe(handle)) 1146 return IRQ_NONE; 1147 1148 act = trbe_get_fault_act(handle, status); 1149 switch (act) { 1150 case TRBE_FAULT_ACT_WRAP: 1151 truncated = !!trbe_handle_overflow(handle); 1152 break; 1153 case TRBE_FAULT_ACT_SPURIOUS: 1154 trbe_handle_spurious(handle); 1155 break; 1156 case TRBE_FAULT_ACT_FATAL: 1157 trbe_stop_and_truncate_event(handle); 1158 truncated = true; 1159 break; 1160 } 1161 1162 /* 1163 * If the buffer was truncated, ensure perf callbacks 1164 * have completed, which will disable the event. 1165 * 1166 * Otherwise, restore the trace filter controls to 1167 * allow the tracing. 1168 */ 1169 if (truncated) 1170 irq_work_run(); 1171 else 1172 write_trfcr(trfcr); 1173 1174 return IRQ_HANDLED; 1175 } 1176 1177 static const struct coresight_ops_sink arm_trbe_sink_ops = { 1178 .enable = arm_trbe_enable, 1179 .disable = arm_trbe_disable, 1180 .alloc_buffer = arm_trbe_alloc_buffer, 1181 .free_buffer = arm_trbe_free_buffer, 1182 .update_buffer = arm_trbe_update_buffer, 1183 }; 1184 1185 static const struct coresight_ops arm_trbe_cs_ops = { 1186 .sink_ops = &arm_trbe_sink_ops, 1187 }; 1188 1189 static ssize_t align_show(struct device *dev, struct device_attribute *attr, char *buf) 1190 { 1191 struct trbe_cpudata *cpudata = dev_get_drvdata(dev); 1192 1193 return sprintf(buf, "%llx\n", cpudata->trbe_hw_align); 1194 } 1195 static DEVICE_ATTR_RO(align); 1196 1197 static ssize_t flag_show(struct device *dev, struct device_attribute *attr, char *buf) 1198 { 1199 struct trbe_cpudata *cpudata = dev_get_drvdata(dev); 1200 1201 return sprintf(buf, "%d\n", cpudata->trbe_flag); 1202 } 1203 static DEVICE_ATTR_RO(flag); 1204 1205 static struct attribute *arm_trbe_attrs[] = { 1206 &dev_attr_align.attr, 1207 &dev_attr_flag.attr, 1208 NULL, 1209 }; 1210 1211 static const struct attribute_group arm_trbe_group = { 1212 .attrs = arm_trbe_attrs, 1213 }; 1214 1215 static const struct attribute_group *arm_trbe_groups[] = { 1216 &arm_trbe_group, 1217 NULL, 1218 }; 1219 1220 static void arm_trbe_enable_cpu(void *info) 1221 { 1222 struct trbe_drvdata *drvdata = info; 1223 struct trbe_cpudata *cpudata = this_cpu_ptr(drvdata->cpudata); 1224 1225 trbe_reset_local(cpudata); 1226 enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE); 1227 } 1228 1229 static void arm_trbe_disable_cpu(void *info) 1230 { 1231 struct trbe_drvdata *drvdata = info; 1232 struct trbe_cpudata *cpudata = this_cpu_ptr(drvdata->cpudata); 1233 1234 disable_percpu_irq(drvdata->irq); 1235 trbe_reset_local(cpudata); 1236 } 1237 1238 1239 static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cpu) 1240 { 1241 struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu); 1242 struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu); 1243 struct coresight_desc desc = { 0 }; 1244 struct device *dev; 1245 1246 if (WARN_ON(trbe_csdev)) 1247 return; 1248 1249 /* If the TRBE was not probed on the CPU, we shouldn't be here */ 1250 if (WARN_ON(!cpudata->drvdata)) 1251 return; 1252 1253 dev = &cpudata->drvdata->pdev->dev; 1254 desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu); 1255 if (!desc.name) 1256 goto cpu_clear; 1257 /* 1258 * TRBE coresight devices do not need regular connections 1259 * information, as the paths get built between all percpu 1260 * source and their respective percpu sink devices. Though 1261 * coresight_register() expect device connections via the 1262 * platform_data, which TRBE devices do not have. As they 1263 * are not real ACPI devices, coresight_get_platform_data() 1264 * ends up failing. Instead let's allocate a dummy zeroed 1265 * coresight_platform_data structure and assign that back 1266 * into the device for that purpose. 1267 */ 1268 desc.pdata = devm_kzalloc(dev, sizeof(*desc.pdata), GFP_KERNEL); 1269 if (IS_ERR(desc.pdata)) 1270 goto cpu_clear; 1271 1272 desc.type = CORESIGHT_DEV_TYPE_SINK; 1273 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM; 1274 desc.ops = &arm_trbe_cs_ops; 1275 desc.groups = arm_trbe_groups; 1276 desc.dev = dev; 1277 trbe_csdev = coresight_register(&desc); 1278 if (IS_ERR(trbe_csdev)) 1279 goto cpu_clear; 1280 1281 dev_set_drvdata(&trbe_csdev->dev, cpudata); 1282 coresight_set_percpu_sink(cpu, trbe_csdev); 1283 return; 1284 cpu_clear: 1285 cpumask_clear_cpu(cpu, &drvdata->supported_cpus); 1286 } 1287 1288 /* 1289 * Must be called with preemption disabled, for trbe_check_errata(). 1290 */ 1291 static void arm_trbe_probe_cpu(void *info) 1292 { 1293 struct trbe_drvdata *drvdata = info; 1294 int cpu = smp_processor_id(); 1295 struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu); 1296 u64 trbidr; 1297 1298 if (WARN_ON(!cpudata)) 1299 goto cpu_clear; 1300 1301 if (!is_trbe_available()) { 1302 pr_err("TRBE is not implemented on cpu %d\n", cpu); 1303 goto cpu_clear; 1304 } 1305 1306 trbidr = read_sysreg_s(SYS_TRBIDR_EL1); 1307 if (!is_trbe_programmable(trbidr)) { 1308 pr_err("TRBE is owned in higher exception level on cpu %d\n", cpu); 1309 goto cpu_clear; 1310 } 1311 1312 cpudata->trbe_hw_align = 1ULL << get_trbe_address_align(trbidr); 1313 if (cpudata->trbe_hw_align > SZ_2K) { 1314 pr_err("Unsupported alignment on cpu %d\n", cpu); 1315 goto cpu_clear; 1316 } 1317 1318 /* 1319 * Run the TRBE erratum checks, now that we know 1320 * this instance is about to be registered. 1321 */ 1322 trbe_check_errata(cpudata); 1323 1324 if (trbe_is_broken(cpudata)) { 1325 pr_err("Disabling TRBE on cpu%d due to erratum\n", cpu); 1326 goto cpu_clear; 1327 } 1328 1329 /* 1330 * If the TRBE is affected by erratum TRBE_WORKAROUND_OVERWRITE_FILL_MODE, 1331 * we must always program the TBRPTR_EL1, 256bytes from a page 1332 * boundary, with TRBBASER_EL1 set to the page, to prevent 1333 * TRBE over-writing 256bytes at TRBBASER_EL1 on FILL event. 1334 * 1335 * Thus make sure we always align our write pointer to a PAGE_SIZE, 1336 * which also guarantees that we have at least a PAGE_SIZE space in 1337 * the buffer (TRBLIMITR is PAGE aligned) and thus we can skip 1338 * the required bytes at the base. 1339 */ 1340 if (trbe_may_overwrite_in_fill_mode(cpudata)) 1341 cpudata->trbe_align = PAGE_SIZE; 1342 else 1343 cpudata->trbe_align = cpudata->trbe_hw_align; 1344 1345 cpudata->trbe_flag = get_trbe_flag_update(trbidr); 1346 cpudata->cpu = cpu; 1347 cpudata->drvdata = drvdata; 1348 return; 1349 cpu_clear: 1350 cpumask_clear_cpu(cpu, &drvdata->supported_cpus); 1351 } 1352 1353 static void arm_trbe_remove_coresight_cpu(struct trbe_drvdata *drvdata, int cpu) 1354 { 1355 struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu); 1356 1357 if (trbe_csdev) { 1358 coresight_unregister(trbe_csdev); 1359 coresight_set_percpu_sink(cpu, NULL); 1360 } 1361 } 1362 1363 static int arm_trbe_probe_coresight(struct trbe_drvdata *drvdata) 1364 { 1365 int cpu; 1366 1367 drvdata->cpudata = alloc_percpu(typeof(*drvdata->cpudata)); 1368 if (!drvdata->cpudata) 1369 return -ENOMEM; 1370 1371 for_each_cpu(cpu, &drvdata->supported_cpus) { 1372 /* If we fail to probe the CPU, let us defer it to hotplug callbacks */ 1373 if (smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1)) 1374 continue; 1375 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) 1376 arm_trbe_register_coresight_cpu(drvdata, cpu); 1377 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) 1378 smp_call_function_single(cpu, arm_trbe_enable_cpu, drvdata, 1); 1379 } 1380 return 0; 1381 } 1382 1383 static int arm_trbe_remove_coresight(struct trbe_drvdata *drvdata) 1384 { 1385 int cpu; 1386 1387 for_each_cpu(cpu, &drvdata->supported_cpus) { 1388 smp_call_function_single(cpu, arm_trbe_disable_cpu, drvdata, 1); 1389 arm_trbe_remove_coresight_cpu(drvdata, cpu); 1390 } 1391 free_percpu(drvdata->cpudata); 1392 return 0; 1393 } 1394 1395 static void arm_trbe_probe_hotplugged_cpu(struct trbe_drvdata *drvdata) 1396 { 1397 preempt_disable(); 1398 arm_trbe_probe_cpu(drvdata); 1399 preempt_enable(); 1400 } 1401 1402 static int arm_trbe_cpu_startup(unsigned int cpu, struct hlist_node *node) 1403 { 1404 struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node); 1405 1406 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) { 1407 1408 /* 1409 * If this CPU was not probed for TRBE, 1410 * initialize it now. 1411 */ 1412 if (!coresight_get_percpu_sink(cpu)) { 1413 arm_trbe_probe_hotplugged_cpu(drvdata); 1414 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) 1415 arm_trbe_register_coresight_cpu(drvdata, cpu); 1416 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) 1417 arm_trbe_enable_cpu(drvdata); 1418 } else { 1419 arm_trbe_enable_cpu(drvdata); 1420 } 1421 } 1422 return 0; 1423 } 1424 1425 static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node) 1426 { 1427 struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node); 1428 1429 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) 1430 arm_trbe_disable_cpu(drvdata); 1431 return 0; 1432 } 1433 1434 static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata) 1435 { 1436 enum cpuhp_state trbe_online; 1437 int ret; 1438 1439 trbe_online = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME, 1440 arm_trbe_cpu_startup, arm_trbe_cpu_teardown); 1441 if (trbe_online < 0) 1442 return trbe_online; 1443 1444 ret = cpuhp_state_add_instance(trbe_online, &drvdata->hotplug_node); 1445 if (ret) { 1446 cpuhp_remove_multi_state(trbe_online); 1447 return ret; 1448 } 1449 drvdata->trbe_online = trbe_online; 1450 return 0; 1451 } 1452 1453 static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata) 1454 { 1455 cpuhp_state_remove_instance(drvdata->trbe_online, &drvdata->hotplug_node); 1456 cpuhp_remove_multi_state(drvdata->trbe_online); 1457 } 1458 1459 static int arm_trbe_probe_irq(struct platform_device *pdev, 1460 struct trbe_drvdata *drvdata) 1461 { 1462 int ret; 1463 1464 drvdata->irq = platform_get_irq(pdev, 0); 1465 if (drvdata->irq < 0) { 1466 pr_err("IRQ not found for the platform device\n"); 1467 return drvdata->irq; 1468 } 1469 1470 if (!irq_is_percpu(drvdata->irq)) { 1471 pr_err("IRQ is not a PPI\n"); 1472 return -EINVAL; 1473 } 1474 1475 if (irq_get_percpu_devid_partition(drvdata->irq, &drvdata->supported_cpus)) 1476 return -EINVAL; 1477 1478 drvdata->handle = alloc_percpu(struct perf_output_handle *); 1479 if (!drvdata->handle) 1480 return -ENOMEM; 1481 1482 ret = request_percpu_irq(drvdata->irq, arm_trbe_irq_handler, DRVNAME, drvdata->handle); 1483 if (ret) { 1484 free_percpu(drvdata->handle); 1485 return ret; 1486 } 1487 return 0; 1488 } 1489 1490 static void arm_trbe_remove_irq(struct trbe_drvdata *drvdata) 1491 { 1492 free_percpu_irq(drvdata->irq, drvdata->handle); 1493 free_percpu(drvdata->handle); 1494 } 1495 1496 static int arm_trbe_device_probe(struct platform_device *pdev) 1497 { 1498 struct trbe_drvdata *drvdata; 1499 struct device *dev = &pdev->dev; 1500 int ret; 1501 1502 /* Trace capture is not possible with kernel page table isolation */ 1503 if (arm64_kernel_unmapped_at_el0()) { 1504 pr_err("TRBE wouldn't work if kernel gets unmapped at EL0\n"); 1505 return -EOPNOTSUPP; 1506 } 1507 1508 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); 1509 if (!drvdata) 1510 return -ENOMEM; 1511 1512 dev_set_drvdata(dev, drvdata); 1513 drvdata->pdev = pdev; 1514 ret = arm_trbe_probe_irq(pdev, drvdata); 1515 if (ret) 1516 return ret; 1517 1518 ret = arm_trbe_probe_coresight(drvdata); 1519 if (ret) 1520 goto probe_failed; 1521 1522 ret = arm_trbe_probe_cpuhp(drvdata); 1523 if (ret) 1524 goto cpuhp_failed; 1525 1526 return 0; 1527 cpuhp_failed: 1528 arm_trbe_remove_coresight(drvdata); 1529 probe_failed: 1530 arm_trbe_remove_irq(drvdata); 1531 return ret; 1532 } 1533 1534 static void arm_trbe_device_remove(struct platform_device *pdev) 1535 { 1536 struct trbe_drvdata *drvdata = platform_get_drvdata(pdev); 1537 1538 arm_trbe_remove_cpuhp(drvdata); 1539 arm_trbe_remove_coresight(drvdata); 1540 arm_trbe_remove_irq(drvdata); 1541 } 1542 1543 static const struct of_device_id arm_trbe_of_match[] = { 1544 { .compatible = "arm,trace-buffer-extension"}, 1545 {}, 1546 }; 1547 MODULE_DEVICE_TABLE(of, arm_trbe_of_match); 1548 1549 #ifdef CONFIG_ACPI 1550 static const struct platform_device_id arm_trbe_acpi_match[] = { 1551 { ARMV8_TRBE_PDEV_NAME, 0 }, 1552 { } 1553 }; 1554 MODULE_DEVICE_TABLE(platform, arm_trbe_acpi_match); 1555 #endif 1556 1557 static struct platform_driver arm_trbe_driver = { 1558 .id_table = ACPI_PTR(arm_trbe_acpi_match), 1559 .driver = { 1560 .name = DRVNAME, 1561 .of_match_table = of_match_ptr(arm_trbe_of_match), 1562 .suppress_bind_attrs = true, 1563 }, 1564 .probe = arm_trbe_device_probe, 1565 .remove_new = arm_trbe_device_remove, 1566 }; 1567 1568 static int __init arm_trbe_init(void) 1569 { 1570 int ret; 1571 1572 ret = platform_driver_register(&arm_trbe_driver); 1573 if (!ret) 1574 return 0; 1575 1576 pr_err("Error registering %s platform driver\n", DRVNAME); 1577 return ret; 1578 } 1579 1580 static void __exit arm_trbe_exit(void) 1581 { 1582 platform_driver_unregister(&arm_trbe_driver); 1583 } 1584 module_init(arm_trbe_init); 1585 module_exit(arm_trbe_exit); 1586 1587 MODULE_AUTHOR("Anshuman Khandual <anshuman.khandual@arm.com>"); 1588 MODULE_DESCRIPTION("Arm Trace Buffer Extension (TRBE) driver"); 1589 MODULE_LICENSE("GPL v2"); 1590