1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2016 Linaro Limited. All rights reserved. 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 5 */ 6 7 #include <linux/coresight.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/iommu.h> 10 #include <linux/slab.h> 11 #include <linux/vmalloc.h> 12 #include "coresight-catu.h" 13 #include "coresight-etm-perf.h" 14 #include "coresight-priv.h" 15 #include "coresight-tmc.h" 16 17 struct etr_flat_buf { 18 struct device *dev; 19 dma_addr_t daddr; 20 void *vaddr; 21 size_t size; 22 }; 23 24 /* 25 * etr_perf_buffer - Perf buffer used for ETR 26 * @etr_buf - Actual buffer used by the ETR 27 * @snaphost - Perf session mode 28 * @head - handle->head at the beginning of the session. 29 * @nr_pages - Number of pages in the ring buffer. 30 * @pages - Array of Pages in the ring buffer. 31 */ 32 struct etr_perf_buffer { 33 struct etr_buf *etr_buf; 34 bool snapshot; 35 unsigned long head; 36 int nr_pages; 37 void **pages; 38 }; 39 40 /* Convert the perf index to an offset within the ETR buffer */ 41 #define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT)) 42 43 /* Lower limit for ETR hardware buffer */ 44 #define TMC_ETR_PERF_MIN_BUF_SIZE SZ_1M 45 46 /* 47 * The TMC ETR SG has a page size of 4K. The SG table contains pointers 48 * to 4KB buffers. However, the OS may use a PAGE_SIZE different from 49 * 4K (i.e, 16KB or 64KB). This implies that a single OS page could 50 * contain more than one SG buffer and tables. 51 * 52 * A table entry has the following format: 53 * 54 * ---Bit31------------Bit4-------Bit1-----Bit0-- 55 * | Address[39:12] | SBZ | Entry Type | 56 * ---------------------------------------------- 57 * 58 * Address: Bits [39:12] of a physical page address. Bits [11:0] are 59 * always zero. 60 * 61 * Entry type: 62 * b00 - Reserved. 63 * b01 - Last entry in the tables, points to 4K page buffer. 64 * b10 - Normal entry, points to 4K page buffer. 65 * b11 - Link. The address points to the base of next table. 66 */ 67 68 typedef u32 sgte_t; 69 70 #define ETR_SG_PAGE_SHIFT 12 71 #define ETR_SG_PAGE_SIZE (1UL << ETR_SG_PAGE_SHIFT) 72 #define ETR_SG_PAGES_PER_SYSPAGE (PAGE_SIZE / ETR_SG_PAGE_SIZE) 73 #define ETR_SG_PTRS_PER_PAGE (ETR_SG_PAGE_SIZE / sizeof(sgte_t)) 74 #define ETR_SG_PTRS_PER_SYSPAGE (PAGE_SIZE / sizeof(sgte_t)) 75 76 #define ETR_SG_ET_MASK 0x3 77 #define ETR_SG_ET_LAST 0x1 78 #define ETR_SG_ET_NORMAL 0x2 79 #define ETR_SG_ET_LINK 0x3 80 81 #define ETR_SG_ADDR_SHIFT 4 82 83 #define ETR_SG_ENTRY(addr, type) \ 84 (sgte_t)((((addr) >> ETR_SG_PAGE_SHIFT) << ETR_SG_ADDR_SHIFT) | \ 85 (type & ETR_SG_ET_MASK)) 86 87 #define ETR_SG_ADDR(entry) \ 88 (((dma_addr_t)(entry) >> ETR_SG_ADDR_SHIFT) << ETR_SG_PAGE_SHIFT) 89 #define ETR_SG_ET(entry) ((entry) & ETR_SG_ET_MASK) 90 91 /* 92 * struct etr_sg_table : ETR SG Table 93 * @sg_table: Generic SG Table holding the data/table pages. 94 * @hwaddr: hwaddress used by the TMC, which is the base 95 * address of the table. 96 */ 97 struct etr_sg_table { 98 struct tmc_sg_table *sg_table; 99 dma_addr_t hwaddr; 100 }; 101 102 /* 103 * tmc_etr_sg_table_entries: Total number of table entries required to map 104 * @nr_pages system pages. 105 * 106 * We need to map @nr_pages * ETR_SG_PAGES_PER_SYSPAGE data pages. 107 * Each TMC page can map (ETR_SG_PTRS_PER_PAGE - 1) buffer pointers, 108 * with the last entry pointing to another page of table entries. 109 * If we spill over to a new page for mapping 1 entry, we could as 110 * well replace the link entry of the previous page with the last entry. 111 */ 112 static inline unsigned long __attribute_const__ 113 tmc_etr_sg_table_entries(int nr_pages) 114 { 115 unsigned long nr_sgpages = nr_pages * ETR_SG_PAGES_PER_SYSPAGE; 116 unsigned long nr_sglinks = nr_sgpages / (ETR_SG_PTRS_PER_PAGE - 1); 117 /* 118 * If we spill over to a new page for 1 entry, we could as well 119 * make it the LAST entry in the previous page, skipping the Link 120 * address. 121 */ 122 if (nr_sglinks && (nr_sgpages % (ETR_SG_PTRS_PER_PAGE - 1) < 2)) 123 nr_sglinks--; 124 return nr_sgpages + nr_sglinks; 125 } 126 127 /* 128 * tmc_pages_get_offset: Go through all the pages in the tmc_pages 129 * and map the device address @addr to an offset within the virtual 130 * contiguous buffer. 131 */ 132 static long 133 tmc_pages_get_offset(struct tmc_pages *tmc_pages, dma_addr_t addr) 134 { 135 int i; 136 dma_addr_t page_start; 137 138 for (i = 0; i < tmc_pages->nr_pages; i++) { 139 page_start = tmc_pages->daddrs[i]; 140 if (addr >= page_start && addr < (page_start + PAGE_SIZE)) 141 return i * PAGE_SIZE + (addr - page_start); 142 } 143 144 return -EINVAL; 145 } 146 147 /* 148 * tmc_pages_free : Unmap and free the pages used by tmc_pages. 149 * If the pages were not allocated in tmc_pages_alloc(), we would 150 * simply drop the refcount. 151 */ 152 static void tmc_pages_free(struct tmc_pages *tmc_pages, 153 struct device *dev, enum dma_data_direction dir) 154 { 155 int i; 156 157 for (i = 0; i < tmc_pages->nr_pages; i++) { 158 if (tmc_pages->daddrs && tmc_pages->daddrs[i]) 159 dma_unmap_page(dev, tmc_pages->daddrs[i], 160 PAGE_SIZE, dir); 161 if (tmc_pages->pages && tmc_pages->pages[i]) 162 __free_page(tmc_pages->pages[i]); 163 } 164 165 kfree(tmc_pages->pages); 166 kfree(tmc_pages->daddrs); 167 tmc_pages->pages = NULL; 168 tmc_pages->daddrs = NULL; 169 tmc_pages->nr_pages = 0; 170 } 171 172 /* 173 * tmc_pages_alloc : Allocate and map pages for a given @tmc_pages. 174 * If @pages is not NULL, the list of page virtual addresses are 175 * used as the data pages. The pages are then dma_map'ed for @dev 176 * with dma_direction @dir. 177 * 178 * Returns 0 upon success, else the error number. 179 */ 180 static int tmc_pages_alloc(struct tmc_pages *tmc_pages, 181 struct device *dev, int node, 182 enum dma_data_direction dir, void **pages) 183 { 184 int i, nr_pages; 185 dma_addr_t paddr; 186 struct page *page; 187 188 nr_pages = tmc_pages->nr_pages; 189 tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs), 190 GFP_KERNEL); 191 if (!tmc_pages->daddrs) 192 return -ENOMEM; 193 tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages), 194 GFP_KERNEL); 195 if (!tmc_pages->pages) { 196 kfree(tmc_pages->daddrs); 197 tmc_pages->daddrs = NULL; 198 return -ENOMEM; 199 } 200 201 for (i = 0; i < nr_pages; i++) { 202 if (pages && pages[i]) { 203 page = virt_to_page(pages[i]); 204 /* Hold a refcount on the page */ 205 get_page(page); 206 } else { 207 page = alloc_pages_node(node, 208 GFP_KERNEL | __GFP_ZERO, 0); 209 } 210 paddr = dma_map_page(dev, page, 0, PAGE_SIZE, dir); 211 if (dma_mapping_error(dev, paddr)) 212 goto err; 213 tmc_pages->daddrs[i] = paddr; 214 tmc_pages->pages[i] = page; 215 } 216 return 0; 217 err: 218 tmc_pages_free(tmc_pages, dev, dir); 219 return -ENOMEM; 220 } 221 222 static inline long 223 tmc_sg_get_data_page_offset(struct tmc_sg_table *sg_table, dma_addr_t addr) 224 { 225 return tmc_pages_get_offset(&sg_table->data_pages, addr); 226 } 227 228 static inline void tmc_free_table_pages(struct tmc_sg_table *sg_table) 229 { 230 if (sg_table->table_vaddr) 231 vunmap(sg_table->table_vaddr); 232 tmc_pages_free(&sg_table->table_pages, sg_table->dev, DMA_TO_DEVICE); 233 } 234 235 static void tmc_free_data_pages(struct tmc_sg_table *sg_table) 236 { 237 if (sg_table->data_vaddr) 238 vunmap(sg_table->data_vaddr); 239 tmc_pages_free(&sg_table->data_pages, sg_table->dev, DMA_FROM_DEVICE); 240 } 241 242 void tmc_free_sg_table(struct tmc_sg_table *sg_table) 243 { 244 tmc_free_table_pages(sg_table); 245 tmc_free_data_pages(sg_table); 246 } 247 248 /* 249 * Alloc pages for the table. Since this will be used by the device, 250 * allocate the pages closer to the device (i.e, dev_to_node(dev) 251 * rather than the CPU node). 252 */ 253 static int tmc_alloc_table_pages(struct tmc_sg_table *sg_table) 254 { 255 int rc; 256 struct tmc_pages *table_pages = &sg_table->table_pages; 257 258 rc = tmc_pages_alloc(table_pages, sg_table->dev, 259 dev_to_node(sg_table->dev), 260 DMA_TO_DEVICE, NULL); 261 if (rc) 262 return rc; 263 sg_table->table_vaddr = vmap(table_pages->pages, 264 table_pages->nr_pages, 265 VM_MAP, 266 PAGE_KERNEL); 267 if (!sg_table->table_vaddr) 268 rc = -ENOMEM; 269 else 270 sg_table->table_daddr = table_pages->daddrs[0]; 271 return rc; 272 } 273 274 static int tmc_alloc_data_pages(struct tmc_sg_table *sg_table, void **pages) 275 { 276 int rc; 277 278 /* Allocate data pages on the node requested by the caller */ 279 rc = tmc_pages_alloc(&sg_table->data_pages, 280 sg_table->dev, sg_table->node, 281 DMA_FROM_DEVICE, pages); 282 if (!rc) { 283 sg_table->data_vaddr = vmap(sg_table->data_pages.pages, 284 sg_table->data_pages.nr_pages, 285 VM_MAP, 286 PAGE_KERNEL); 287 if (!sg_table->data_vaddr) 288 rc = -ENOMEM; 289 } 290 return rc; 291 } 292 293 /* 294 * tmc_alloc_sg_table: Allocate and setup dma pages for the TMC SG table 295 * and data buffers. TMC writes to the data buffers and reads from the SG 296 * Table pages. 297 * 298 * @dev - Device to which page should be DMA mapped. 299 * @node - Numa node for mem allocations 300 * @nr_tpages - Number of pages for the table entries. 301 * @nr_dpages - Number of pages for Data buffer. 302 * @pages - Optional list of virtual address of pages. 303 */ 304 struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev, 305 int node, 306 int nr_tpages, 307 int nr_dpages, 308 void **pages) 309 { 310 long rc; 311 struct tmc_sg_table *sg_table; 312 313 sg_table = kzalloc(sizeof(*sg_table), GFP_KERNEL); 314 if (!sg_table) 315 return ERR_PTR(-ENOMEM); 316 sg_table->data_pages.nr_pages = nr_dpages; 317 sg_table->table_pages.nr_pages = nr_tpages; 318 sg_table->node = node; 319 sg_table->dev = dev; 320 321 rc = tmc_alloc_data_pages(sg_table, pages); 322 if (!rc) 323 rc = tmc_alloc_table_pages(sg_table); 324 if (rc) { 325 tmc_free_sg_table(sg_table); 326 kfree(sg_table); 327 return ERR_PTR(rc); 328 } 329 330 return sg_table; 331 } 332 333 /* 334 * tmc_sg_table_sync_data_range: Sync the data buffer written 335 * by the device from @offset upto a @size bytes. 336 */ 337 void tmc_sg_table_sync_data_range(struct tmc_sg_table *table, 338 u64 offset, u64 size) 339 { 340 int i, index, start; 341 int npages = DIV_ROUND_UP(size, PAGE_SIZE); 342 struct device *dev = table->dev; 343 struct tmc_pages *data = &table->data_pages; 344 345 start = offset >> PAGE_SHIFT; 346 for (i = start; i < (start + npages); i++) { 347 index = i % data->nr_pages; 348 dma_sync_single_for_cpu(dev, data->daddrs[index], 349 PAGE_SIZE, DMA_FROM_DEVICE); 350 } 351 } 352 353 /* tmc_sg_sync_table: Sync the page table */ 354 void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table) 355 { 356 int i; 357 struct device *dev = sg_table->dev; 358 struct tmc_pages *table_pages = &sg_table->table_pages; 359 360 for (i = 0; i < table_pages->nr_pages; i++) 361 dma_sync_single_for_device(dev, table_pages->daddrs[i], 362 PAGE_SIZE, DMA_TO_DEVICE); 363 } 364 365 /* 366 * tmc_sg_table_get_data: Get the buffer pointer for data @offset 367 * in the SG buffer. The @bufpp is updated to point to the buffer. 368 * Returns : 369 * the length of linear data available at @offset. 370 * or 371 * <= 0 if no data is available. 372 */ 373 ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table, 374 u64 offset, size_t len, char **bufpp) 375 { 376 size_t size; 377 int pg_idx = offset >> PAGE_SHIFT; 378 int pg_offset = offset & (PAGE_SIZE - 1); 379 struct tmc_pages *data_pages = &sg_table->data_pages; 380 381 size = tmc_sg_table_buf_size(sg_table); 382 if (offset >= size) 383 return -EINVAL; 384 385 /* Make sure we don't go beyond the end */ 386 len = (len < (size - offset)) ? len : size - offset; 387 /* Respect the page boundaries */ 388 len = (len < (PAGE_SIZE - pg_offset)) ? len : (PAGE_SIZE - pg_offset); 389 if (len > 0) 390 *bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset; 391 return len; 392 } 393 394 #ifdef ETR_SG_DEBUG 395 /* Map a dma address to virtual address */ 396 static unsigned long 397 tmc_sg_daddr_to_vaddr(struct tmc_sg_table *sg_table, 398 dma_addr_t addr, bool table) 399 { 400 long offset; 401 unsigned long base; 402 struct tmc_pages *tmc_pages; 403 404 if (table) { 405 tmc_pages = &sg_table->table_pages; 406 base = (unsigned long)sg_table->table_vaddr; 407 } else { 408 tmc_pages = &sg_table->data_pages; 409 base = (unsigned long)sg_table->data_vaddr; 410 } 411 412 offset = tmc_pages_get_offset(tmc_pages, addr); 413 if (offset < 0) 414 return 0; 415 return base + offset; 416 } 417 418 /* Dump the given sg_table */ 419 static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) 420 { 421 sgte_t *ptr; 422 int i = 0; 423 dma_addr_t addr; 424 struct tmc_sg_table *sg_table = etr_table->sg_table; 425 426 ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table, 427 etr_table->hwaddr, true); 428 while (ptr) { 429 addr = ETR_SG_ADDR(*ptr); 430 switch (ETR_SG_ET(*ptr)) { 431 case ETR_SG_ET_NORMAL: 432 dev_dbg(sg_table->dev, 433 "%05d: %p\t:[N] 0x%llx\n", i, ptr, addr); 434 ptr++; 435 break; 436 case ETR_SG_ET_LINK: 437 dev_dbg(sg_table->dev, 438 "%05d: *** %p\t:{L} 0x%llx ***\n", 439 i, ptr, addr); 440 ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table, 441 addr, true); 442 break; 443 case ETR_SG_ET_LAST: 444 dev_dbg(sg_table->dev, 445 "%05d: ### %p\t:[L] 0x%llx ###\n", 446 i, ptr, addr); 447 return; 448 default: 449 dev_dbg(sg_table->dev, 450 "%05d: xxx %p\t:[INVALID] 0x%llx xxx\n", 451 i, ptr, addr); 452 return; 453 } 454 i++; 455 } 456 dev_dbg(sg_table->dev, "******* End of Table *****\n"); 457 } 458 #else 459 static inline void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {} 460 #endif 461 462 /* 463 * Populate the SG Table page table entries from table/data 464 * pages allocated. Each Data page has ETR_SG_PAGES_PER_SYSPAGE SG pages. 465 * So does a Table page. So we keep track of indices of the tables 466 * in each system page and move the pointers accordingly. 467 */ 468 #define INC_IDX_ROUND(idx, size) ((idx) = ((idx) + 1) % (size)) 469 static void tmc_etr_sg_table_populate(struct etr_sg_table *etr_table) 470 { 471 dma_addr_t paddr; 472 int i, type, nr_entries; 473 int tpidx = 0; /* index to the current system table_page */ 474 int sgtidx = 0; /* index to the sg_table within the current syspage */ 475 int sgtentry = 0; /* the entry within the sg_table */ 476 int dpidx = 0; /* index to the current system data_page */ 477 int spidx = 0; /* index to the SG page within the current data page */ 478 sgte_t *ptr; /* pointer to the table entry to fill */ 479 struct tmc_sg_table *sg_table = etr_table->sg_table; 480 dma_addr_t *table_daddrs = sg_table->table_pages.daddrs; 481 dma_addr_t *data_daddrs = sg_table->data_pages.daddrs; 482 483 nr_entries = tmc_etr_sg_table_entries(sg_table->data_pages.nr_pages); 484 /* 485 * Use the contiguous virtual address of the table to update entries. 486 */ 487 ptr = sg_table->table_vaddr; 488 /* 489 * Fill all the entries, except the last entry to avoid special 490 * checks within the loop. 491 */ 492 for (i = 0; i < nr_entries - 1; i++) { 493 if (sgtentry == ETR_SG_PTRS_PER_PAGE - 1) { 494 /* 495 * Last entry in a sg_table page is a link address to 496 * the next table page. If this sg_table is the last 497 * one in the system page, it links to the first 498 * sg_table in the next system page. Otherwise, it 499 * links to the next sg_table page within the system 500 * page. 501 */ 502 if (sgtidx == ETR_SG_PAGES_PER_SYSPAGE - 1) { 503 paddr = table_daddrs[tpidx + 1]; 504 } else { 505 paddr = table_daddrs[tpidx] + 506 (ETR_SG_PAGE_SIZE * (sgtidx + 1)); 507 } 508 type = ETR_SG_ET_LINK; 509 } else { 510 /* 511 * Update the indices to the data_pages to point to the 512 * next sg_page in the data buffer. 513 */ 514 type = ETR_SG_ET_NORMAL; 515 paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE; 516 if (!INC_IDX_ROUND(spidx, ETR_SG_PAGES_PER_SYSPAGE)) 517 dpidx++; 518 } 519 *ptr++ = ETR_SG_ENTRY(paddr, type); 520 /* 521 * Move to the next table pointer, moving the table page index 522 * if necessary 523 */ 524 if (!INC_IDX_ROUND(sgtentry, ETR_SG_PTRS_PER_PAGE)) { 525 if (!INC_IDX_ROUND(sgtidx, ETR_SG_PAGES_PER_SYSPAGE)) 526 tpidx++; 527 } 528 } 529 530 /* Set up the last entry, which is always a data pointer */ 531 paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE; 532 *ptr++ = ETR_SG_ENTRY(paddr, ETR_SG_ET_LAST); 533 } 534 535 /* 536 * tmc_init_etr_sg_table: Allocate a TMC ETR SG table, data buffer of @size and 537 * populate the table. 538 * 539 * @dev - Device pointer for the TMC 540 * @node - NUMA node where the memory should be allocated 541 * @size - Total size of the data buffer 542 * @pages - Optional list of page virtual address 543 */ 544 static struct etr_sg_table * 545 tmc_init_etr_sg_table(struct device *dev, int node, 546 unsigned long size, void **pages) 547 { 548 int nr_entries, nr_tpages; 549 int nr_dpages = size >> PAGE_SHIFT; 550 struct tmc_sg_table *sg_table; 551 struct etr_sg_table *etr_table; 552 553 etr_table = kzalloc(sizeof(*etr_table), GFP_KERNEL); 554 if (!etr_table) 555 return ERR_PTR(-ENOMEM); 556 nr_entries = tmc_etr_sg_table_entries(nr_dpages); 557 nr_tpages = DIV_ROUND_UP(nr_entries, ETR_SG_PTRS_PER_SYSPAGE); 558 559 sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages); 560 if (IS_ERR(sg_table)) { 561 kfree(etr_table); 562 return ERR_CAST(sg_table); 563 } 564 565 etr_table->sg_table = sg_table; 566 /* TMC should use table base address for DBA */ 567 etr_table->hwaddr = sg_table->table_daddr; 568 tmc_etr_sg_table_populate(etr_table); 569 /* Sync the table pages for the HW */ 570 tmc_sg_table_sync_table(sg_table); 571 tmc_etr_sg_table_dump(etr_table); 572 573 return etr_table; 574 } 575 576 /* 577 * tmc_etr_alloc_flat_buf: Allocate a contiguous DMA buffer. 578 */ 579 static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata, 580 struct etr_buf *etr_buf, int node, 581 void **pages) 582 { 583 struct etr_flat_buf *flat_buf; 584 585 /* We cannot reuse existing pages for flat buf */ 586 if (pages) 587 return -EINVAL; 588 589 flat_buf = kzalloc(sizeof(*flat_buf), GFP_KERNEL); 590 if (!flat_buf) 591 return -ENOMEM; 592 593 flat_buf->vaddr = dma_alloc_coherent(drvdata->dev, etr_buf->size, 594 &flat_buf->daddr, GFP_KERNEL); 595 if (!flat_buf->vaddr) { 596 kfree(flat_buf); 597 return -ENOMEM; 598 } 599 600 flat_buf->size = etr_buf->size; 601 flat_buf->dev = drvdata->dev; 602 etr_buf->hwaddr = flat_buf->daddr; 603 etr_buf->mode = ETR_MODE_FLAT; 604 etr_buf->private = flat_buf; 605 return 0; 606 } 607 608 static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf) 609 { 610 struct etr_flat_buf *flat_buf = etr_buf->private; 611 612 if (flat_buf && flat_buf->daddr) 613 dma_free_coherent(flat_buf->dev, flat_buf->size, 614 flat_buf->vaddr, flat_buf->daddr); 615 kfree(flat_buf); 616 } 617 618 static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp) 619 { 620 /* 621 * Adjust the buffer to point to the beginning of the trace data 622 * and update the available trace data. 623 */ 624 etr_buf->offset = rrp - etr_buf->hwaddr; 625 if (etr_buf->full) 626 etr_buf->len = etr_buf->size; 627 else 628 etr_buf->len = rwp - rrp; 629 } 630 631 static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf, 632 u64 offset, size_t len, char **bufpp) 633 { 634 struct etr_flat_buf *flat_buf = etr_buf->private; 635 636 *bufpp = (char *)flat_buf->vaddr + offset; 637 /* 638 * tmc_etr_buf_get_data already adjusts the length to handle 639 * buffer wrapping around. 640 */ 641 return len; 642 } 643 644 static const struct etr_buf_operations etr_flat_buf_ops = { 645 .alloc = tmc_etr_alloc_flat_buf, 646 .free = tmc_etr_free_flat_buf, 647 .sync = tmc_etr_sync_flat_buf, 648 .get_data = tmc_etr_get_data_flat_buf, 649 }; 650 651 /* 652 * tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters 653 * appropriately. 654 */ 655 static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata, 656 struct etr_buf *etr_buf, int node, 657 void **pages) 658 { 659 struct etr_sg_table *etr_table; 660 661 etr_table = tmc_init_etr_sg_table(drvdata->dev, node, 662 etr_buf->size, pages); 663 if (IS_ERR(etr_table)) 664 return -ENOMEM; 665 etr_buf->hwaddr = etr_table->hwaddr; 666 etr_buf->mode = ETR_MODE_ETR_SG; 667 etr_buf->private = etr_table; 668 return 0; 669 } 670 671 static void tmc_etr_free_sg_buf(struct etr_buf *etr_buf) 672 { 673 struct etr_sg_table *etr_table = etr_buf->private; 674 675 if (etr_table) { 676 tmc_free_sg_table(etr_table->sg_table); 677 kfree(etr_table); 678 } 679 } 680 681 static ssize_t tmc_etr_get_data_sg_buf(struct etr_buf *etr_buf, u64 offset, 682 size_t len, char **bufpp) 683 { 684 struct etr_sg_table *etr_table = etr_buf->private; 685 686 return tmc_sg_table_get_data(etr_table->sg_table, offset, len, bufpp); 687 } 688 689 static void tmc_etr_sync_sg_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp) 690 { 691 long r_offset, w_offset; 692 struct etr_sg_table *etr_table = etr_buf->private; 693 struct tmc_sg_table *table = etr_table->sg_table; 694 695 /* Convert hw address to offset in the buffer */ 696 r_offset = tmc_sg_get_data_page_offset(table, rrp); 697 if (r_offset < 0) { 698 dev_warn(table->dev, 699 "Unable to map RRP %llx to offset\n", rrp); 700 etr_buf->len = 0; 701 return; 702 } 703 704 w_offset = tmc_sg_get_data_page_offset(table, rwp); 705 if (w_offset < 0) { 706 dev_warn(table->dev, 707 "Unable to map RWP %llx to offset\n", rwp); 708 etr_buf->len = 0; 709 return; 710 } 711 712 etr_buf->offset = r_offset; 713 if (etr_buf->full) 714 etr_buf->len = etr_buf->size; 715 else 716 etr_buf->len = ((w_offset < r_offset) ? etr_buf->size : 0) + 717 w_offset - r_offset; 718 tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len); 719 } 720 721 static const struct etr_buf_operations etr_sg_buf_ops = { 722 .alloc = tmc_etr_alloc_sg_buf, 723 .free = tmc_etr_free_sg_buf, 724 .sync = tmc_etr_sync_sg_buf, 725 .get_data = tmc_etr_get_data_sg_buf, 726 }; 727 728 /* 729 * TMC ETR could be connected to a CATU device, which can provide address 730 * translation service. This is represented by the Output port of the TMC 731 * (ETR) connected to the input port of the CATU. 732 * 733 * Returns : coresight_device ptr for the CATU device if a CATU is found. 734 * : NULL otherwise. 735 */ 736 struct coresight_device * 737 tmc_etr_get_catu_device(struct tmc_drvdata *drvdata) 738 { 739 int i; 740 struct coresight_device *tmp, *etr = drvdata->csdev; 741 742 if (!IS_ENABLED(CONFIG_CORESIGHT_CATU)) 743 return NULL; 744 745 for (i = 0; i < etr->nr_outport; i++) { 746 tmp = etr->conns[i].child_dev; 747 if (tmp && coresight_is_catu_device(tmp)) 748 return tmp; 749 } 750 751 return NULL; 752 } 753 754 static inline int tmc_etr_enable_catu(struct tmc_drvdata *drvdata, 755 struct etr_buf *etr_buf) 756 { 757 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata); 758 759 if (catu && helper_ops(catu)->enable) 760 return helper_ops(catu)->enable(catu, etr_buf); 761 return 0; 762 } 763 764 static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata) 765 { 766 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata); 767 768 if (catu && helper_ops(catu)->disable) 769 helper_ops(catu)->disable(catu, drvdata->etr_buf); 770 } 771 772 static const struct etr_buf_operations *etr_buf_ops[] = { 773 [ETR_MODE_FLAT] = &etr_flat_buf_ops, 774 [ETR_MODE_ETR_SG] = &etr_sg_buf_ops, 775 [ETR_MODE_CATU] = &etr_catu_buf_ops, 776 }; 777 778 static inline int tmc_etr_mode_alloc_buf(int mode, 779 struct tmc_drvdata *drvdata, 780 struct etr_buf *etr_buf, int node, 781 void **pages) 782 { 783 int rc = -EINVAL; 784 785 switch (mode) { 786 case ETR_MODE_FLAT: 787 case ETR_MODE_ETR_SG: 788 case ETR_MODE_CATU: 789 if (etr_buf_ops[mode]->alloc) 790 rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf, 791 node, pages); 792 if (!rc) 793 etr_buf->ops = etr_buf_ops[mode]; 794 return rc; 795 default: 796 return -EINVAL; 797 } 798 } 799 800 /* 801 * tmc_alloc_etr_buf: Allocate a buffer use by ETR. 802 * @drvdata : ETR device details. 803 * @size : size of the requested buffer. 804 * @flags : Required properties for the buffer. 805 * @node : Node for memory allocations. 806 * @pages : An optional list of pages. 807 */ 808 static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata, 809 ssize_t size, int flags, 810 int node, void **pages) 811 { 812 int rc = -ENOMEM; 813 bool has_etr_sg, has_iommu; 814 bool has_sg, has_catu; 815 struct etr_buf *etr_buf; 816 817 has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG); 818 has_iommu = iommu_get_domain_for_dev(drvdata->dev); 819 has_catu = !!tmc_etr_get_catu_device(drvdata); 820 821 has_sg = has_catu || has_etr_sg; 822 823 etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL); 824 if (!etr_buf) 825 return ERR_PTR(-ENOMEM); 826 827 etr_buf->size = size; 828 829 /* 830 * If we have to use an existing list of pages, we cannot reliably 831 * use a contiguous DMA memory (even if we have an IOMMU). Otherwise, 832 * we use the contiguous DMA memory if at least one of the following 833 * conditions is true: 834 * a) The ETR cannot use Scatter-Gather. 835 * b) we have a backing IOMMU 836 * c) The requested memory size is smaller (< 1M). 837 * 838 * Fallback to available mechanisms. 839 * 840 */ 841 if (!pages && 842 (!has_sg || has_iommu || size < SZ_1M)) 843 rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata, 844 etr_buf, node, pages); 845 if (rc && has_etr_sg) 846 rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata, 847 etr_buf, node, pages); 848 if (rc && has_catu) 849 rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata, 850 etr_buf, node, pages); 851 if (rc) { 852 kfree(etr_buf); 853 return ERR_PTR(rc); 854 } 855 856 dev_dbg(drvdata->dev, "allocated buffer of size %ldKB in mode %d\n", 857 (unsigned long)size >> 10, etr_buf->mode); 858 return etr_buf; 859 } 860 861 static void tmc_free_etr_buf(struct etr_buf *etr_buf) 862 { 863 WARN_ON(!etr_buf->ops || !etr_buf->ops->free); 864 etr_buf->ops->free(etr_buf); 865 kfree(etr_buf); 866 } 867 868 /* 869 * tmc_etr_buf_get_data: Get the pointer the trace data at @offset 870 * with a maximum of @len bytes. 871 * Returns: The size of the linear data available @pos, with *bufpp 872 * updated to point to the buffer. 873 */ 874 static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf, 875 u64 offset, size_t len, char **bufpp) 876 { 877 /* Adjust the length to limit this transaction to end of buffer */ 878 len = (len < (etr_buf->size - offset)) ? len : etr_buf->size - offset; 879 880 return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp); 881 } 882 883 static inline s64 884 tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset) 885 { 886 ssize_t len; 887 char *bufp; 888 889 len = tmc_etr_buf_get_data(etr_buf, offset, 890 CORESIGHT_BARRIER_PKT_SIZE, &bufp); 891 if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE)) 892 return -EINVAL; 893 coresight_insert_barrier_packet(bufp); 894 return offset + CORESIGHT_BARRIER_PKT_SIZE; 895 } 896 897 /* 898 * tmc_sync_etr_buf: Sync the trace buffer availability with drvdata. 899 * Makes sure the trace data is synced to the memory for consumption. 900 * @etr_buf->offset will hold the offset to the beginning of the trace data 901 * within the buffer, with @etr_buf->len bytes to consume. 902 */ 903 static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata) 904 { 905 struct etr_buf *etr_buf = drvdata->etr_buf; 906 u64 rrp, rwp; 907 u32 status; 908 909 rrp = tmc_read_rrp(drvdata); 910 rwp = tmc_read_rwp(drvdata); 911 status = readl_relaxed(drvdata->base + TMC_STS); 912 etr_buf->full = status & TMC_STS_FULL; 913 914 WARN_ON(!etr_buf->ops || !etr_buf->ops->sync); 915 916 etr_buf->ops->sync(etr_buf, rrp, rwp); 917 918 /* Insert barrier packets at the beginning, if there was an overflow */ 919 if (etr_buf->full) 920 tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset); 921 } 922 923 static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata) 924 { 925 u32 axictl, sts; 926 struct etr_buf *etr_buf = drvdata->etr_buf; 927 928 CS_UNLOCK(drvdata->base); 929 930 /* Wait for TMCSReady bit to be set */ 931 tmc_wait_for_tmcready(drvdata); 932 933 writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ); 934 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); 935 936 axictl = readl_relaxed(drvdata->base + TMC_AXICTL); 937 axictl &= ~TMC_AXICTL_CLEAR_MASK; 938 axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16); 939 axictl |= TMC_AXICTL_AXCACHE_OS; 940 941 if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) { 942 axictl &= ~TMC_AXICTL_ARCACHE_MASK; 943 axictl |= TMC_AXICTL_ARCACHE_OS; 944 } 945 946 if (etr_buf->mode == ETR_MODE_ETR_SG) 947 axictl |= TMC_AXICTL_SCT_GAT_MODE; 948 949 writel_relaxed(axictl, drvdata->base + TMC_AXICTL); 950 tmc_write_dba(drvdata, etr_buf->hwaddr); 951 /* 952 * If the TMC pointers must be programmed before the session, 953 * we have to set it properly (i.e, RRP/RWP to base address and 954 * STS to "not full"). 955 */ 956 if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) { 957 tmc_write_rrp(drvdata, etr_buf->hwaddr); 958 tmc_write_rwp(drvdata, etr_buf->hwaddr); 959 sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL; 960 writel_relaxed(sts, drvdata->base + TMC_STS); 961 } 962 963 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | 964 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | 965 TMC_FFCR_TRIGON_TRIGIN, 966 drvdata->base + TMC_FFCR); 967 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); 968 tmc_enable_hw(drvdata); 969 970 CS_LOCK(drvdata->base); 971 } 972 973 static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata, 974 struct etr_buf *etr_buf) 975 { 976 int rc; 977 978 /* Callers should provide an appropriate buffer for use */ 979 if (WARN_ON(!etr_buf)) 980 return -EINVAL; 981 982 if ((etr_buf->mode == ETR_MODE_ETR_SG) && 983 WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG))) 984 return -EINVAL; 985 986 if (WARN_ON(drvdata->etr_buf)) 987 return -EBUSY; 988 989 /* 990 * If this ETR is connected to a CATU, enable it before we turn 991 * this on. 992 */ 993 rc = tmc_etr_enable_catu(drvdata, etr_buf); 994 if (rc) 995 return rc; 996 rc = coresight_claim_device(drvdata->base); 997 if (!rc) { 998 drvdata->etr_buf = etr_buf; 999 __tmc_etr_enable_hw(drvdata); 1000 } 1001 1002 return rc; 1003 } 1004 1005 /* 1006 * Return the available trace data in the buffer (starts at etr_buf->offset, 1007 * limited by etr_buf->len) from @pos, with a maximum limit of @len, 1008 * also updating the @bufpp on where to find it. Since the trace data 1009 * starts at anywhere in the buffer, depending on the RRP, we adjust the 1010 * @len returned to handle buffer wrapping around. 1011 * 1012 * We are protected here by drvdata->reading != 0, which ensures the 1013 * sysfs_buf stays alive. 1014 */ 1015 ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata, 1016 loff_t pos, size_t len, char **bufpp) 1017 { 1018 s64 offset; 1019 ssize_t actual = len; 1020 struct etr_buf *etr_buf = drvdata->sysfs_buf; 1021 1022 if (pos + actual > etr_buf->len) 1023 actual = etr_buf->len - pos; 1024 if (actual <= 0) 1025 return actual; 1026 1027 /* Compute the offset from which we read the data */ 1028 offset = etr_buf->offset + pos; 1029 if (offset >= etr_buf->size) 1030 offset -= etr_buf->size; 1031 return tmc_etr_buf_get_data(etr_buf, offset, actual, bufpp); 1032 } 1033 1034 static struct etr_buf * 1035 tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata) 1036 { 1037 return tmc_alloc_etr_buf(drvdata, drvdata->size, 1038 0, cpu_to_node(0), NULL); 1039 } 1040 1041 static void 1042 tmc_etr_free_sysfs_buf(struct etr_buf *buf) 1043 { 1044 if (buf) 1045 tmc_free_etr_buf(buf); 1046 } 1047 1048 static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata) 1049 { 1050 struct etr_buf *etr_buf = drvdata->etr_buf; 1051 1052 if (WARN_ON(drvdata->sysfs_buf != etr_buf)) { 1053 tmc_etr_free_sysfs_buf(drvdata->sysfs_buf); 1054 drvdata->sysfs_buf = NULL; 1055 } else { 1056 tmc_sync_etr_buf(drvdata); 1057 } 1058 } 1059 1060 static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata) 1061 { 1062 CS_UNLOCK(drvdata->base); 1063 1064 tmc_flush_and_stop(drvdata); 1065 /* 1066 * When operating in sysFS mode the content of the buffer needs to be 1067 * read before the TMC is disabled. 1068 */ 1069 if (drvdata->mode == CS_MODE_SYSFS) 1070 tmc_etr_sync_sysfs_buf(drvdata); 1071 1072 tmc_disable_hw(drvdata); 1073 1074 CS_LOCK(drvdata->base); 1075 1076 } 1077 1078 static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) 1079 { 1080 __tmc_etr_disable_hw(drvdata); 1081 /* Disable CATU device if this ETR is connected to one */ 1082 tmc_etr_disable_catu(drvdata); 1083 coresight_disclaim_device(drvdata->base); 1084 /* Reset the ETR buf used by hardware */ 1085 drvdata->etr_buf = NULL; 1086 } 1087 1088 static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) 1089 { 1090 int ret = 0; 1091 unsigned long flags; 1092 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 1093 struct etr_buf *sysfs_buf = NULL, *new_buf = NULL, *free_buf = NULL; 1094 1095 /* 1096 * If we are enabling the ETR from disabled state, we need to make 1097 * sure we have a buffer with the right size. The etr_buf is not reset 1098 * immediately after we stop the tracing in SYSFS mode as we wait for 1099 * the user to collect the data. We may be able to reuse the existing 1100 * buffer, provided the size matches. Any allocation has to be done 1101 * with the lock released. 1102 */ 1103 spin_lock_irqsave(&drvdata->spinlock, flags); 1104 sysfs_buf = READ_ONCE(drvdata->sysfs_buf); 1105 if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) { 1106 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1107 1108 /* Allocate memory with the locks released */ 1109 free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata); 1110 if (IS_ERR(new_buf)) 1111 return PTR_ERR(new_buf); 1112 1113 /* Let's try again */ 1114 spin_lock_irqsave(&drvdata->spinlock, flags); 1115 } 1116 1117 if (drvdata->reading || drvdata->mode == CS_MODE_PERF) { 1118 ret = -EBUSY; 1119 goto out; 1120 } 1121 1122 /* 1123 * In sysFS mode we can have multiple writers per sink. Since this 1124 * sink is already enabled no memory is needed and the HW need not be 1125 * touched, even if the buffer size has changed. 1126 */ 1127 if (drvdata->mode == CS_MODE_SYSFS) 1128 goto out; 1129 1130 /* 1131 * If we don't have a buffer or it doesn't match the requested size, 1132 * use the buffer allocated above. Otherwise reuse the existing buffer. 1133 */ 1134 sysfs_buf = READ_ONCE(drvdata->sysfs_buf); 1135 if (!sysfs_buf || (new_buf && sysfs_buf->size != new_buf->size)) { 1136 free_buf = sysfs_buf; 1137 drvdata->sysfs_buf = new_buf; 1138 } 1139 1140 ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf); 1141 if (!ret) 1142 drvdata->mode = CS_MODE_SYSFS; 1143 out: 1144 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1145 1146 /* Free memory outside the spinlock if need be */ 1147 if (free_buf) 1148 tmc_etr_free_sysfs_buf(free_buf); 1149 1150 if (!ret) 1151 dev_dbg(drvdata->dev, "TMC-ETR enabled\n"); 1152 1153 return ret; 1154 } 1155 1156 /* 1157 * tmc_etr_setup_perf_buf: Allocate ETR buffer for use by perf. 1158 * The size of the hardware buffer is dependent on the size configured 1159 * via sysfs and the perf ring buffer size. We prefer to allocate the 1160 * largest possible size, scaling down the size by half until it 1161 * reaches a minimum limit (1M), beyond which we give up. 1162 */ 1163 static struct etr_perf_buffer * 1164 tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, int node, int nr_pages, 1165 void **pages, bool snapshot) 1166 { 1167 struct etr_buf *etr_buf; 1168 struct etr_perf_buffer *etr_perf; 1169 unsigned long size; 1170 1171 etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node); 1172 if (!etr_perf) 1173 return ERR_PTR(-ENOMEM); 1174 1175 /* 1176 * Try to match the perf ring buffer size if it is larger 1177 * than the size requested via sysfs. 1178 */ 1179 if ((nr_pages << PAGE_SHIFT) > drvdata->size) { 1180 etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT), 1181 0, node, NULL); 1182 if (!IS_ERR(etr_buf)) 1183 goto done; 1184 } 1185 1186 /* 1187 * Else switch to configured size for this ETR 1188 * and scale down until we hit the minimum limit. 1189 */ 1190 size = drvdata->size; 1191 do { 1192 etr_buf = tmc_alloc_etr_buf(drvdata, size, 0, node, NULL); 1193 if (!IS_ERR(etr_buf)) 1194 goto done; 1195 size /= 2; 1196 } while (size >= TMC_ETR_PERF_MIN_BUF_SIZE); 1197 1198 kfree(etr_perf); 1199 return ERR_PTR(-ENOMEM); 1200 1201 done: 1202 etr_perf->etr_buf = etr_buf; 1203 return etr_perf; 1204 } 1205 1206 1207 static void *tmc_alloc_etr_buffer(struct coresight_device *csdev, 1208 int cpu, void **pages, int nr_pages, 1209 bool snapshot) 1210 { 1211 struct etr_perf_buffer *etr_perf; 1212 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 1213 1214 if (cpu == -1) 1215 cpu = smp_processor_id(); 1216 1217 etr_perf = tmc_etr_setup_perf_buf(drvdata, cpu_to_node(cpu), 1218 nr_pages, pages, snapshot); 1219 if (IS_ERR(etr_perf)) { 1220 dev_dbg(drvdata->dev, "Unable to allocate ETR buffer\n"); 1221 return NULL; 1222 } 1223 1224 etr_perf->snapshot = snapshot; 1225 etr_perf->nr_pages = nr_pages; 1226 etr_perf->pages = pages; 1227 1228 return etr_perf; 1229 } 1230 1231 static void tmc_free_etr_buffer(void *config) 1232 { 1233 struct etr_perf_buffer *etr_perf = config; 1234 1235 if (etr_perf->etr_buf) 1236 tmc_free_etr_buf(etr_perf->etr_buf); 1237 kfree(etr_perf); 1238 } 1239 1240 /* 1241 * tmc_etr_sync_perf_buffer: Copy the actual trace data from the hardware 1242 * buffer to the perf ring buffer. 1243 */ 1244 static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf) 1245 { 1246 long bytes, to_copy; 1247 long pg_idx, pg_offset, src_offset; 1248 unsigned long head = etr_perf->head; 1249 char **dst_pages, *src_buf; 1250 struct etr_buf *etr_buf = etr_perf->etr_buf; 1251 1252 head = etr_perf->head; 1253 pg_idx = head >> PAGE_SHIFT; 1254 pg_offset = head & (PAGE_SIZE - 1); 1255 dst_pages = (char **)etr_perf->pages; 1256 src_offset = etr_buf->offset; 1257 to_copy = etr_buf->len; 1258 1259 while (to_copy > 0) { 1260 /* 1261 * In one iteration, we can copy minimum of : 1262 * 1) what is available in the source buffer, 1263 * 2) what is available in the source buffer, before it 1264 * wraps around. 1265 * 3) what is available in the destination page. 1266 * in one iteration. 1267 */ 1268 bytes = tmc_etr_buf_get_data(etr_buf, src_offset, to_copy, 1269 &src_buf); 1270 if (WARN_ON_ONCE(bytes <= 0)) 1271 break; 1272 bytes = min(bytes, (long)(PAGE_SIZE - pg_offset)); 1273 1274 memcpy(dst_pages[pg_idx] + pg_offset, src_buf, bytes); 1275 1276 to_copy -= bytes; 1277 1278 /* Move destination pointers */ 1279 pg_offset += bytes; 1280 if (pg_offset == PAGE_SIZE) { 1281 pg_offset = 0; 1282 if (++pg_idx == etr_perf->nr_pages) 1283 pg_idx = 0; 1284 } 1285 1286 /* Move source pointers */ 1287 src_offset += bytes; 1288 if (src_offset >= etr_buf->size) 1289 src_offset -= etr_buf->size; 1290 } 1291 } 1292 1293 /* 1294 * tmc_update_etr_buffer : Update the perf ring buffer with the 1295 * available trace data. We use software double buffering at the moment. 1296 * 1297 * TODO: Add support for reusing the perf ring buffer. 1298 */ 1299 static unsigned long 1300 tmc_update_etr_buffer(struct coresight_device *csdev, 1301 struct perf_output_handle *handle, 1302 void *config) 1303 { 1304 bool lost = false; 1305 unsigned long flags, size = 0; 1306 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 1307 struct etr_perf_buffer *etr_perf = config; 1308 struct etr_buf *etr_buf = etr_perf->etr_buf; 1309 1310 spin_lock_irqsave(&drvdata->spinlock, flags); 1311 if (WARN_ON(drvdata->perf_data != etr_perf)) { 1312 lost = true; 1313 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1314 goto out; 1315 } 1316 1317 CS_UNLOCK(drvdata->base); 1318 1319 tmc_flush_and_stop(drvdata); 1320 tmc_sync_etr_buf(drvdata); 1321 1322 CS_LOCK(drvdata->base); 1323 /* Reset perf specific data */ 1324 drvdata->perf_data = NULL; 1325 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1326 1327 size = etr_buf->len; 1328 tmc_etr_sync_perf_buffer(etr_perf); 1329 1330 /* 1331 * Update handle->head in snapshot mode. Also update the size to the 1332 * hardware buffer size if there was an overflow. 1333 */ 1334 if (etr_perf->snapshot) { 1335 handle->head += size; 1336 if (etr_buf->full) 1337 size = etr_buf->size; 1338 } 1339 1340 lost |= etr_buf->full; 1341 out: 1342 if (lost) 1343 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); 1344 return size; 1345 } 1346 1347 static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data) 1348 { 1349 int rc = 0; 1350 unsigned long flags; 1351 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 1352 struct perf_output_handle *handle = data; 1353 struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle); 1354 1355 spin_lock_irqsave(&drvdata->spinlock, flags); 1356 /* 1357 * There can be only one writer per sink in perf mode. If the sink 1358 * is already open in SYSFS mode, we can't use it. 1359 */ 1360 if (drvdata->mode != CS_MODE_DISABLED || WARN_ON(drvdata->perf_data)) { 1361 rc = -EBUSY; 1362 goto unlock_out; 1363 } 1364 1365 if (WARN_ON(!etr_perf || !etr_perf->etr_buf)) { 1366 rc = -EINVAL; 1367 goto unlock_out; 1368 } 1369 1370 etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf); 1371 drvdata->perf_data = etr_perf; 1372 rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf); 1373 if (!rc) 1374 drvdata->mode = CS_MODE_PERF; 1375 1376 unlock_out: 1377 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1378 return rc; 1379 } 1380 1381 static int tmc_enable_etr_sink(struct coresight_device *csdev, 1382 u32 mode, void *data) 1383 { 1384 switch (mode) { 1385 case CS_MODE_SYSFS: 1386 return tmc_enable_etr_sink_sysfs(csdev); 1387 case CS_MODE_PERF: 1388 return tmc_enable_etr_sink_perf(csdev, data); 1389 } 1390 1391 /* We shouldn't be here */ 1392 return -EINVAL; 1393 } 1394 1395 static void tmc_disable_etr_sink(struct coresight_device *csdev) 1396 { 1397 unsigned long flags; 1398 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 1399 1400 spin_lock_irqsave(&drvdata->spinlock, flags); 1401 if (drvdata->reading) { 1402 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1403 return; 1404 } 1405 1406 /* Disable the TMC only if it needs to */ 1407 if (drvdata->mode != CS_MODE_DISABLED) { 1408 tmc_etr_disable_hw(drvdata); 1409 drvdata->mode = CS_MODE_DISABLED; 1410 } 1411 1412 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1413 1414 dev_dbg(drvdata->dev, "TMC-ETR disabled\n"); 1415 } 1416 1417 static const struct coresight_ops_sink tmc_etr_sink_ops = { 1418 .enable = tmc_enable_etr_sink, 1419 .disable = tmc_disable_etr_sink, 1420 .alloc_buffer = tmc_alloc_etr_buffer, 1421 .update_buffer = tmc_update_etr_buffer, 1422 .free_buffer = tmc_free_etr_buffer, 1423 }; 1424 1425 const struct coresight_ops tmc_etr_cs_ops = { 1426 .sink_ops = &tmc_etr_sink_ops, 1427 }; 1428 1429 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) 1430 { 1431 int ret = 0; 1432 unsigned long flags; 1433 1434 /* config types are set a boot time and never change */ 1435 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) 1436 return -EINVAL; 1437 1438 spin_lock_irqsave(&drvdata->spinlock, flags); 1439 if (drvdata->reading) { 1440 ret = -EBUSY; 1441 goto out; 1442 } 1443 1444 /* 1445 * We can safely allow reads even if the ETR is operating in PERF mode, 1446 * since the sysfs session is captured in mode specific data. 1447 * If drvdata::sysfs_data is NULL the trace data has been read already. 1448 */ 1449 if (!drvdata->sysfs_buf) { 1450 ret = -EINVAL; 1451 goto out; 1452 } 1453 1454 /* Disable the TMC if we are trying to read from a running session. */ 1455 if (drvdata->mode == CS_MODE_SYSFS) 1456 __tmc_etr_disable_hw(drvdata); 1457 1458 drvdata->reading = true; 1459 out: 1460 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1461 1462 return ret; 1463 } 1464 1465 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) 1466 { 1467 unsigned long flags; 1468 struct etr_buf *sysfs_buf = NULL; 1469 1470 /* config types are set a boot time and never change */ 1471 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) 1472 return -EINVAL; 1473 1474 spin_lock_irqsave(&drvdata->spinlock, flags); 1475 1476 /* RE-enable the TMC if need be */ 1477 if (drvdata->mode == CS_MODE_SYSFS) { 1478 /* 1479 * The trace run will continue with the same allocated trace 1480 * buffer. Since the tracer is still enabled drvdata::buf can't 1481 * be NULL. 1482 */ 1483 __tmc_etr_enable_hw(drvdata); 1484 } else { 1485 /* 1486 * The ETR is not tracing and the buffer was just read. 1487 * As such prepare to free the trace buffer. 1488 */ 1489 sysfs_buf = drvdata->sysfs_buf; 1490 drvdata->sysfs_buf = NULL; 1491 } 1492 1493 drvdata->reading = false; 1494 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1495 1496 /* Free allocated memory out side of the spinlock */ 1497 if (sysfs_buf) 1498 tmc_etr_free_sysfs_buf(sysfs_buf); 1499 1500 return 0; 1501 } 1502