1 /* 2 * Copyright(C) 2016 Linaro Limited. All rights reserved. 3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include <linux/circ_buf.h> 19 #include <linux/coresight.h> 20 #include <linux/perf_event.h> 21 #include <linux/slab.h> 22 #include "coresight-priv.h" 23 #include "coresight-tmc.h" 24 25 static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata) 26 { 27 CS_UNLOCK(drvdata->base); 28 29 /* Wait for TMCSReady bit to be set */ 30 tmc_wait_for_tmcready(drvdata); 31 32 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); 33 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | 34 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | 35 TMC_FFCR_TRIGON_TRIGIN, 36 drvdata->base + TMC_FFCR); 37 38 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); 39 tmc_enable_hw(drvdata); 40 41 CS_LOCK(drvdata->base); 42 } 43 44 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata) 45 { 46 char *bufp; 47 u32 read_data; 48 int i; 49 50 bufp = drvdata->buf; 51 drvdata->len = 0; 52 while (1) { 53 for (i = 0; i < drvdata->memwidth; i++) { 54 read_data = readl_relaxed(drvdata->base + TMC_RRD); 55 if (read_data == 0xFFFFFFFF) 56 return; 57 memcpy(bufp, &read_data, 4); 58 bufp += 4; 59 drvdata->len += 4; 60 } 61 } 62 } 63 64 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata) 65 { 66 CS_UNLOCK(drvdata->base); 67 68 tmc_flush_and_stop(drvdata); 69 /* 70 * When operating in sysFS mode the content of the buffer needs to be 71 * read before the TMC is disabled. 72 */ 73 if (drvdata->mode == CS_MODE_SYSFS) 74 tmc_etb_dump_hw(drvdata); 75 tmc_disable_hw(drvdata); 76 77 CS_LOCK(drvdata->base); 78 } 79 80 static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata) 81 { 82 CS_UNLOCK(drvdata->base); 83 84 /* Wait for TMCSReady bit to be set */ 85 tmc_wait_for_tmcready(drvdata); 86 87 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE); 88 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI, 89 drvdata->base + TMC_FFCR); 90 writel_relaxed(0x0, drvdata->base + TMC_BUFWM); 91 tmc_enable_hw(drvdata); 92 93 CS_LOCK(drvdata->base); 94 } 95 96 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata) 97 { 98 CS_UNLOCK(drvdata->base); 99 100 tmc_flush_and_stop(drvdata); 101 tmc_disable_hw(drvdata); 102 103 CS_LOCK(drvdata->base); 104 } 105 106 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev) 107 { 108 int ret = 0; 109 bool used = false; 110 char *buf = NULL; 111 unsigned long flags; 112 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 113 114 /* 115 * If we don't have a buffer release the lock and allocate memory. 116 * Otherwise keep the lock and move along. 117 */ 118 spin_lock_irqsave(&drvdata->spinlock, flags); 119 if (!drvdata->buf) { 120 spin_unlock_irqrestore(&drvdata->spinlock, flags); 121 122 /* Allocating the memory here while outside of the spinlock */ 123 buf = kzalloc(drvdata->size, GFP_KERNEL); 124 if (!buf) 125 return -ENOMEM; 126 127 /* Let's try again */ 128 spin_lock_irqsave(&drvdata->spinlock, flags); 129 } 130 131 if (drvdata->reading) { 132 ret = -EBUSY; 133 goto out; 134 } 135 136 /* 137 * In sysFS mode we can have multiple writers per sink. Since this 138 * sink is already enabled no memory is needed and the HW need not be 139 * touched. 140 */ 141 if (drvdata->mode == CS_MODE_SYSFS) 142 goto out; 143 144 /* 145 * If drvdata::buf isn't NULL, memory was allocated for a previous 146 * trace run but wasn't read. If so simply zero-out the memory. 147 * Otherwise use the memory allocated above. 148 * 149 * The memory is freed when users read the buffer using the 150 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for 151 * details. 152 */ 153 if (drvdata->buf) { 154 memset(drvdata->buf, 0, drvdata->size); 155 } else { 156 used = true; 157 drvdata->buf = buf; 158 } 159 160 drvdata->mode = CS_MODE_SYSFS; 161 tmc_etb_enable_hw(drvdata); 162 out: 163 spin_unlock_irqrestore(&drvdata->spinlock, flags); 164 165 /* Free memory outside the spinlock if need be */ 166 if (!used) 167 kfree(buf); 168 169 if (!ret) 170 dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n"); 171 172 return ret; 173 } 174 175 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev) 176 { 177 int ret = 0; 178 unsigned long flags; 179 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 180 181 spin_lock_irqsave(&drvdata->spinlock, flags); 182 if (drvdata->reading) { 183 ret = -EINVAL; 184 goto out; 185 } 186 187 /* 188 * In Perf mode there can be only one writer per sink. There 189 * is also no need to continue if the ETB/ETR is already operated 190 * from sysFS. 191 */ 192 if (drvdata->mode != CS_MODE_DISABLED) { 193 ret = -EINVAL; 194 goto out; 195 } 196 197 drvdata->mode = CS_MODE_PERF; 198 tmc_etb_enable_hw(drvdata); 199 out: 200 spin_unlock_irqrestore(&drvdata->spinlock, flags); 201 202 return ret; 203 } 204 205 static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode) 206 { 207 switch (mode) { 208 case CS_MODE_SYSFS: 209 return tmc_enable_etf_sink_sysfs(csdev); 210 case CS_MODE_PERF: 211 return tmc_enable_etf_sink_perf(csdev); 212 } 213 214 /* We shouldn't be here */ 215 return -EINVAL; 216 } 217 218 static void tmc_disable_etf_sink(struct coresight_device *csdev) 219 { 220 unsigned long flags; 221 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 222 223 spin_lock_irqsave(&drvdata->spinlock, flags); 224 if (drvdata->reading) { 225 spin_unlock_irqrestore(&drvdata->spinlock, flags); 226 return; 227 } 228 229 /* Disable the TMC only if it needs to */ 230 if (drvdata->mode != CS_MODE_DISABLED) { 231 tmc_etb_disable_hw(drvdata); 232 drvdata->mode = CS_MODE_DISABLED; 233 } 234 235 spin_unlock_irqrestore(&drvdata->spinlock, flags); 236 237 dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n"); 238 } 239 240 static int tmc_enable_etf_link(struct coresight_device *csdev, 241 int inport, int outport) 242 { 243 unsigned long flags; 244 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 245 246 spin_lock_irqsave(&drvdata->spinlock, flags); 247 if (drvdata->reading) { 248 spin_unlock_irqrestore(&drvdata->spinlock, flags); 249 return -EBUSY; 250 } 251 252 tmc_etf_enable_hw(drvdata); 253 drvdata->mode = CS_MODE_SYSFS; 254 spin_unlock_irqrestore(&drvdata->spinlock, flags); 255 256 dev_info(drvdata->dev, "TMC-ETF enabled\n"); 257 return 0; 258 } 259 260 static void tmc_disable_etf_link(struct coresight_device *csdev, 261 int inport, int outport) 262 { 263 unsigned long flags; 264 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 265 266 spin_lock_irqsave(&drvdata->spinlock, flags); 267 if (drvdata->reading) { 268 spin_unlock_irqrestore(&drvdata->spinlock, flags); 269 return; 270 } 271 272 tmc_etf_disable_hw(drvdata); 273 drvdata->mode = CS_MODE_DISABLED; 274 spin_unlock_irqrestore(&drvdata->spinlock, flags); 275 276 dev_info(drvdata->dev, "TMC disabled\n"); 277 } 278 279 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu, 280 void **pages, int nr_pages, bool overwrite) 281 { 282 int node; 283 struct cs_buffers *buf; 284 285 if (cpu == -1) 286 cpu = smp_processor_id(); 287 node = cpu_to_node(cpu); 288 289 /* Allocate memory structure for interaction with Perf */ 290 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node); 291 if (!buf) 292 return NULL; 293 294 buf->snapshot = overwrite; 295 buf->nr_pages = nr_pages; 296 buf->data_pages = pages; 297 298 return buf; 299 } 300 301 static void tmc_free_etf_buffer(void *config) 302 { 303 struct cs_buffers *buf = config; 304 305 kfree(buf); 306 } 307 308 static int tmc_set_etf_buffer(struct coresight_device *csdev, 309 struct perf_output_handle *handle, 310 void *sink_config) 311 { 312 int ret = 0; 313 unsigned long head; 314 struct cs_buffers *buf = sink_config; 315 316 /* wrap head around to the amount of space we have */ 317 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); 318 319 /* find the page to write to */ 320 buf->cur = head / PAGE_SIZE; 321 322 /* and offset within that page */ 323 buf->offset = head % PAGE_SIZE; 324 325 local_set(&buf->data_size, 0); 326 327 return ret; 328 } 329 330 static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev, 331 struct perf_output_handle *handle, 332 void *sink_config, bool *lost) 333 { 334 long size = 0; 335 struct cs_buffers *buf = sink_config; 336 337 if (buf) { 338 /* 339 * In snapshot mode ->data_size holds the new address of the 340 * ring buffer's head. The size itself is the whole address 341 * range since we want the latest information. 342 */ 343 if (buf->snapshot) 344 handle->head = local_xchg(&buf->data_size, 345 buf->nr_pages << PAGE_SHIFT); 346 /* 347 * Tell the tracer PMU how much we got in this run and if 348 * something went wrong along the way. Nobody else can use 349 * this cs_buffers instance until we are done. As such 350 * resetting parameters here and squaring off with the ring 351 * buffer API in the tracer PMU is fine. 352 */ 353 *lost = !!local_xchg(&buf->lost, 0); 354 size = local_xchg(&buf->data_size, 0); 355 } 356 357 return size; 358 } 359 360 static void tmc_update_etf_buffer(struct coresight_device *csdev, 361 struct perf_output_handle *handle, 362 void *sink_config) 363 { 364 int i, cur; 365 u32 *buf_ptr; 366 u32 read_ptr, write_ptr; 367 u32 status, to_read; 368 unsigned long offset; 369 struct cs_buffers *buf = sink_config; 370 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 371 372 if (!buf) 373 return; 374 375 /* This shouldn't happen */ 376 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF)) 377 return; 378 379 CS_UNLOCK(drvdata->base); 380 381 tmc_flush_and_stop(drvdata); 382 383 read_ptr = readl_relaxed(drvdata->base + TMC_RRP); 384 write_ptr = readl_relaxed(drvdata->base + TMC_RWP); 385 386 /* 387 * Get a hold of the status register and see if a wrap around 388 * has occurred. If so adjust things accordingly. 389 */ 390 status = readl_relaxed(drvdata->base + TMC_STS); 391 if (status & TMC_STS_FULL) { 392 local_inc(&buf->lost); 393 to_read = drvdata->size; 394 } else { 395 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size); 396 } 397 398 /* 399 * The TMC RAM buffer may be bigger than the space available in the 400 * perf ring buffer (handle->size). If so advance the RRP so that we 401 * get the latest trace data. 402 */ 403 if (to_read > handle->size) { 404 u32 mask = 0; 405 406 /* 407 * The value written to RRP must be byte-address aligned to 408 * the width of the trace memory databus _and_ to a frame 409 * boundary (16 byte), whichever is the biggest. For example, 410 * for 32-bit, 64-bit and 128-bit wide trace memory, the four 411 * LSBs must be 0s. For 256-bit wide trace memory, the five 412 * LSBs must be 0s. 413 */ 414 switch (drvdata->memwidth) { 415 case TMC_MEM_INTF_WIDTH_32BITS: 416 case TMC_MEM_INTF_WIDTH_64BITS: 417 case TMC_MEM_INTF_WIDTH_128BITS: 418 mask = GENMASK(31, 5); 419 break; 420 case TMC_MEM_INTF_WIDTH_256BITS: 421 mask = GENMASK(31, 6); 422 break; 423 } 424 425 /* 426 * Make sure the new size is aligned in accordance with the 427 * requirement explained above. 428 */ 429 to_read = handle->size & mask; 430 /* Move the RAM read pointer up */ 431 read_ptr = (write_ptr + drvdata->size) - to_read; 432 /* Make sure we are still within our limits */ 433 if (read_ptr > (drvdata->size - 1)) 434 read_ptr -= drvdata->size; 435 /* Tell the HW */ 436 writel_relaxed(read_ptr, drvdata->base + TMC_RRP); 437 local_inc(&buf->lost); 438 } 439 440 cur = buf->cur; 441 offset = buf->offset; 442 443 /* for every byte to read */ 444 for (i = 0; i < to_read; i += 4) { 445 buf_ptr = buf->data_pages[cur] + offset; 446 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD); 447 448 offset += 4; 449 if (offset >= PAGE_SIZE) { 450 offset = 0; 451 cur++; 452 /* wrap around at the end of the buffer */ 453 cur &= buf->nr_pages - 1; 454 } 455 } 456 457 /* 458 * In snapshot mode all we have to do is communicate to 459 * perf_aux_output_end() the address of the current head. In full 460 * trace mode the same function expects a size to move rb->aux_head 461 * forward. 462 */ 463 if (buf->snapshot) 464 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset); 465 else 466 local_add(to_read, &buf->data_size); 467 468 CS_LOCK(drvdata->base); 469 } 470 471 static const struct coresight_ops_sink tmc_etf_sink_ops = { 472 .enable = tmc_enable_etf_sink, 473 .disable = tmc_disable_etf_sink, 474 .alloc_buffer = tmc_alloc_etf_buffer, 475 .free_buffer = tmc_free_etf_buffer, 476 .set_buffer = tmc_set_etf_buffer, 477 .reset_buffer = tmc_reset_etf_buffer, 478 .update_buffer = tmc_update_etf_buffer, 479 }; 480 481 static const struct coresight_ops_link tmc_etf_link_ops = { 482 .enable = tmc_enable_etf_link, 483 .disable = tmc_disable_etf_link, 484 }; 485 486 const struct coresight_ops tmc_etb_cs_ops = { 487 .sink_ops = &tmc_etf_sink_ops, 488 }; 489 490 const struct coresight_ops tmc_etf_cs_ops = { 491 .sink_ops = &tmc_etf_sink_ops, 492 .link_ops = &tmc_etf_link_ops, 493 }; 494 495 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) 496 { 497 enum tmc_mode mode; 498 int ret = 0; 499 unsigned long flags; 500 501 /* config types are set a boot time and never change */ 502 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && 503 drvdata->config_type != TMC_CONFIG_TYPE_ETF)) 504 return -EINVAL; 505 506 spin_lock_irqsave(&drvdata->spinlock, flags); 507 508 if (drvdata->reading) { 509 ret = -EBUSY; 510 goto out; 511 } 512 513 /* There is no point in reading a TMC in HW FIFO mode */ 514 mode = readl_relaxed(drvdata->base + TMC_MODE); 515 if (mode != TMC_MODE_CIRCULAR_BUFFER) { 516 ret = -EINVAL; 517 goto out; 518 } 519 520 /* Don't interfere if operated from Perf */ 521 if (drvdata->mode == CS_MODE_PERF) { 522 ret = -EINVAL; 523 goto out; 524 } 525 526 /* If drvdata::buf is NULL the trace data has been read already */ 527 if (drvdata->buf == NULL) { 528 ret = -EINVAL; 529 goto out; 530 } 531 532 /* Disable the TMC if need be */ 533 if (drvdata->mode == CS_MODE_SYSFS) 534 tmc_etb_disable_hw(drvdata); 535 536 drvdata->reading = true; 537 out: 538 spin_unlock_irqrestore(&drvdata->spinlock, flags); 539 540 return ret; 541 } 542 543 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) 544 { 545 char *buf = NULL; 546 enum tmc_mode mode; 547 unsigned long flags; 548 549 /* config types are set a boot time and never change */ 550 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && 551 drvdata->config_type != TMC_CONFIG_TYPE_ETF)) 552 return -EINVAL; 553 554 spin_lock_irqsave(&drvdata->spinlock, flags); 555 556 /* There is no point in reading a TMC in HW FIFO mode */ 557 mode = readl_relaxed(drvdata->base + TMC_MODE); 558 if (mode != TMC_MODE_CIRCULAR_BUFFER) { 559 spin_unlock_irqrestore(&drvdata->spinlock, flags); 560 return -EINVAL; 561 } 562 563 /* Re-enable the TMC if need be */ 564 if (drvdata->mode == CS_MODE_SYSFS) { 565 /* 566 * The trace run will continue with the same allocated trace 567 * buffer. As such zero-out the buffer so that we don't end 568 * up with stale data. 569 * 570 * Since the tracer is still enabled drvdata::buf 571 * can't be NULL. 572 */ 573 memset(drvdata->buf, 0, drvdata->size); 574 tmc_etb_enable_hw(drvdata); 575 } else { 576 /* 577 * The ETB/ETF is not tracing and the buffer was just read. 578 * As such prepare to free the trace buffer. 579 */ 580 buf = drvdata->buf; 581 drvdata->buf = NULL; 582 } 583 584 drvdata->reading = false; 585 spin_unlock_irqrestore(&drvdata->spinlock, flags); 586 587 /* 588 * Free allocated memory outside of the spinlock. There is no need 589 * to assert the validity of 'buf' since calling kfree(NULL) is safe. 590 */ 591 kfree(buf); 592 593 return 0; 594 } 595