1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Intel(R) Trace Hub Memory Storage Unit 4 * 5 * Copyright (C) 2014-2015 Intel Corporation. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/types.h> 11 #include <linux/module.h> 12 #include <linux/device.h> 13 #include <linux/uaccess.h> 14 #include <linux/sizes.h> 15 #include <linux/printk.h> 16 #include <linux/slab.h> 17 #include <linux/mm.h> 18 #include <linux/fs.h> 19 #include <linux/io.h> 20 #include <linux/workqueue.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/pfn_t.h> 23 24 #ifdef CONFIG_X86 25 #include <asm/set_memory.h> 26 #endif 27 28 #include <linux/intel_th.h> 29 #include "intel_th.h" 30 #include "msu.h" 31 32 #define msc_dev(x) (&(x)->thdev->dev) 33 34 /* 35 * Lockout state transitions: 36 * READY -> INUSE -+-> LOCKED -+-> READY -> etc. 37 * \-----------/ 38 * WIN_READY: window can be used by HW 39 * WIN_INUSE: window is in use 40 * WIN_LOCKED: window is filled up and is being processed by the buffer 41 * handling code 42 * 43 * All state transitions happen automatically, except for the LOCKED->READY, 44 * which needs to be signalled by the buffer code by calling 45 * intel_th_msc_window_unlock(). 46 * 47 * When the interrupt handler has to switch to the next window, it checks 48 * whether it's READY, and if it is, it performs the switch and tracing 49 * continues. If it's LOCKED, it stops the trace. 50 */ 51 enum lockout_state { 52 WIN_READY = 0, 53 WIN_INUSE, 54 WIN_LOCKED 55 }; 56 57 /** 58 * struct msc_window - multiblock mode window descriptor 59 * @entry: window list linkage (msc::win_list) 60 * @pgoff: page offset into the buffer that this window starts at 61 * @lockout: lockout state, see comment below 62 * @lo_lock: lockout state serialization 63 * @nr_blocks: number of blocks (pages) in this window 64 * @nr_segs: number of segments in this window (<= @nr_blocks) 65 * @msc: pointer to the MSC device 66 * @_sgt: array of block descriptors 67 * @sgt: array of block descriptors 68 */ 69 struct msc_window { 70 struct list_head entry; 71 unsigned long pgoff; 72 enum lockout_state lockout; 73 spinlock_t lo_lock; 74 unsigned int nr_blocks; 75 unsigned int nr_segs; 76 struct msc *msc; 77 struct sg_table _sgt; 78 struct sg_table *sgt; 79 }; 80 81 /** 82 * struct msc_iter - iterator for msc buffer 83 * @entry: msc::iter_list linkage 84 * @msc: pointer to the MSC device 85 * @start_win: oldest window 86 * @win: current window 87 * @offset: current logical offset into the buffer 88 * @start_block: oldest block in the window 89 * @block: block number in the window 90 * @block_off: offset into current block 91 * @wrap_count: block wrapping handling 92 * @eof: end of buffer reached 93 */ 94 struct msc_iter { 95 struct list_head entry; 96 struct msc *msc; 97 struct msc_window *start_win; 98 struct msc_window *win; 99 unsigned long offset; 100 struct scatterlist *start_block; 101 struct scatterlist *block; 102 unsigned int block_off; 103 unsigned int wrap_count; 104 unsigned int eof; 105 }; 106 107 /** 108 * struct msc - MSC device representation 109 * @reg_base: register window base address for the entire MSU 110 * @msu_base: register window base address for this MSC 111 * @thdev: intel_th_device pointer 112 * @mbuf: MSU buffer, if assigned 113 * @mbuf_priv: MSU buffer's private data, if @mbuf 114 * @work: a work to stop the trace when the buffer is full 115 * @win_list: list of windows in multiblock mode 116 * @single_sgt: single mode buffer 117 * @cur_win: current window 118 * @switch_on_unlock: window to switch to when it becomes available 119 * @nr_pages: total number of pages allocated for this buffer 120 * @single_sz: amount of data in single mode 121 * @single_wrap: single mode wrap occurred 122 * @base: buffer's base pointer 123 * @base_addr: buffer's base address 124 * @orig_addr: MSC0 buffer's base address 125 * @orig_sz: MSC0 buffer's size 126 * @user_count: number of users of the buffer 127 * @mmap_count: number of mappings 128 * @buf_mutex: mutex to serialize access to buffer-related bits 129 * @iter_list: list of open file descriptor iterators 130 * @stop_on_full: stop the trace if the current window is full 131 * @enabled: MSC is enabled 132 * @wrap: wrapping is enabled 133 * @do_irq: IRQ resource is available, handle interrupts 134 * @multi_is_broken: multiblock mode enabled (not disabled by PCI drvdata) 135 * @mode: MSC operating mode 136 * @burst_len: write burst length 137 * @index: number of this MSC in the MSU 138 */ 139 struct msc { 140 void __iomem *reg_base; 141 void __iomem *msu_base; 142 struct intel_th_device *thdev; 143 144 const struct msu_buffer *mbuf; 145 void *mbuf_priv; 146 147 struct work_struct work; 148 struct list_head win_list; 149 struct sg_table single_sgt; 150 struct msc_window *cur_win; 151 struct msc_window *switch_on_unlock; 152 unsigned long nr_pages; 153 unsigned long single_sz; 154 unsigned int single_wrap : 1; 155 void *base; 156 dma_addr_t base_addr; 157 u32 orig_addr; 158 u32 orig_sz; 159 160 /* <0: no buffer, 0: no users, >0: active users */ 161 atomic_t user_count; 162 163 atomic_t mmap_count; 164 struct mutex buf_mutex; 165 166 struct list_head iter_list; 167 168 bool stop_on_full; 169 170 /* config */ 171 unsigned int enabled : 1, 172 wrap : 1, 173 do_irq : 1, 174 multi_is_broken : 1; 175 unsigned int mode; 176 unsigned int burst_len; 177 unsigned int index; 178 }; 179 180 static LIST_HEAD(msu_buffer_list); 181 static DEFINE_MUTEX(msu_buffer_mutex); 182 183 /** 184 * struct msu_buffer_entry - internal MSU buffer bookkeeping 185 * @entry: link to msu_buffer_list 186 * @mbuf: MSU buffer object 187 * @owner: module that provides this MSU buffer 188 */ 189 struct msu_buffer_entry { 190 struct list_head entry; 191 const struct msu_buffer *mbuf; 192 struct module *owner; 193 }; 194 195 static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name) 196 { 197 struct msu_buffer_entry *mbe; 198 199 lockdep_assert_held(&msu_buffer_mutex); 200 201 list_for_each_entry(mbe, &msu_buffer_list, entry) { 202 if (!strcmp(mbe->mbuf->name, name)) 203 return mbe; 204 } 205 206 return NULL; 207 } 208 209 static const struct msu_buffer * 210 msu_buffer_get(const char *name) 211 { 212 struct msu_buffer_entry *mbe; 213 214 mutex_lock(&msu_buffer_mutex); 215 mbe = __msu_buffer_entry_find(name); 216 if (mbe && !try_module_get(mbe->owner)) 217 mbe = NULL; 218 mutex_unlock(&msu_buffer_mutex); 219 220 return mbe ? mbe->mbuf : NULL; 221 } 222 223 static void msu_buffer_put(const struct msu_buffer *mbuf) 224 { 225 struct msu_buffer_entry *mbe; 226 227 mutex_lock(&msu_buffer_mutex); 228 mbe = __msu_buffer_entry_find(mbuf->name); 229 if (mbe) 230 module_put(mbe->owner); 231 mutex_unlock(&msu_buffer_mutex); 232 } 233 234 int intel_th_msu_buffer_register(const struct msu_buffer *mbuf, 235 struct module *owner) 236 { 237 struct msu_buffer_entry *mbe; 238 int ret = 0; 239 240 mbe = kzalloc(sizeof(*mbe), GFP_KERNEL); 241 if (!mbe) 242 return -ENOMEM; 243 244 mutex_lock(&msu_buffer_mutex); 245 if (__msu_buffer_entry_find(mbuf->name)) { 246 ret = -EEXIST; 247 kfree(mbe); 248 goto unlock; 249 } 250 251 mbe->mbuf = mbuf; 252 mbe->owner = owner; 253 list_add_tail(&mbe->entry, &msu_buffer_list); 254 unlock: 255 mutex_unlock(&msu_buffer_mutex); 256 257 return ret; 258 } 259 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register); 260 261 void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf) 262 { 263 struct msu_buffer_entry *mbe; 264 265 mutex_lock(&msu_buffer_mutex); 266 mbe = __msu_buffer_entry_find(mbuf->name); 267 if (mbe) { 268 list_del(&mbe->entry); 269 kfree(mbe); 270 } 271 mutex_unlock(&msu_buffer_mutex); 272 } 273 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister); 274 275 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) 276 { 277 /* header hasn't been written */ 278 if (!bdesc->valid_dw) 279 return true; 280 281 /* valid_dw includes the header */ 282 if (!msc_data_sz(bdesc)) 283 return true; 284 285 return false; 286 } 287 288 static inline struct scatterlist *msc_win_base_sg(struct msc_window *win) 289 { 290 return win->sgt->sgl; 291 } 292 293 static inline struct msc_block_desc *msc_win_base(struct msc_window *win) 294 { 295 return sg_virt(msc_win_base_sg(win)); 296 } 297 298 static inline dma_addr_t msc_win_base_dma(struct msc_window *win) 299 { 300 return sg_dma_address(msc_win_base_sg(win)); 301 } 302 303 static inline unsigned long 304 msc_win_base_pfn(struct msc_window *win) 305 { 306 return PFN_DOWN(msc_win_base_dma(win)); 307 } 308 309 /** 310 * msc_is_last_win() - check if a window is the last one for a given MSC 311 * @win: window 312 * Return: true if @win is the last window in MSC's multiblock buffer 313 */ 314 static inline bool msc_is_last_win(struct msc_window *win) 315 { 316 return win->entry.next == &win->msc->win_list; 317 } 318 319 /** 320 * msc_next_window() - return next window in the multiblock buffer 321 * @win: current window 322 * 323 * Return: window following the current one 324 */ 325 static struct msc_window *msc_next_window(struct msc_window *win) 326 { 327 if (msc_is_last_win(win)) 328 return list_first_entry(&win->msc->win_list, struct msc_window, 329 entry); 330 331 return list_next_entry(win, entry); 332 } 333 334 static size_t msc_win_total_sz(struct msc_window *win) 335 { 336 struct scatterlist *sg; 337 unsigned int blk; 338 size_t size = 0; 339 340 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 341 struct msc_block_desc *bdesc = sg_virt(sg); 342 343 if (msc_block_wrapped(bdesc)) 344 return (size_t)win->nr_blocks << PAGE_SHIFT; 345 346 size += msc_total_sz(bdesc); 347 if (msc_block_last_written(bdesc)) 348 break; 349 } 350 351 return size; 352 } 353 354 /** 355 * msc_find_window() - find a window matching a given sg_table 356 * @msc: MSC device 357 * @sgt: SG table of the window 358 * @nonempty: skip over empty windows 359 * 360 * Return: MSC window structure pointer or NULL if the window 361 * could not be found. 362 */ 363 static struct msc_window * 364 msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty) 365 { 366 struct msc_window *win; 367 unsigned int found = 0; 368 369 if (list_empty(&msc->win_list)) 370 return NULL; 371 372 /* 373 * we might need a radix tree for this, depending on how 374 * many windows a typical user would allocate; ideally it's 375 * something like 2, in which case we're good 376 */ 377 list_for_each_entry(win, &msc->win_list, entry) { 378 if (win->sgt == sgt) 379 found++; 380 381 /* skip the empty ones */ 382 if (nonempty && msc_block_is_empty(msc_win_base(win))) 383 continue; 384 385 if (found) 386 return win; 387 } 388 389 return NULL; 390 } 391 392 /** 393 * msc_oldest_window() - locate the window with oldest data 394 * @msc: MSC device 395 * 396 * This should only be used in multiblock mode. Caller should hold the 397 * msc::user_count reference. 398 * 399 * Return: the oldest window with valid data 400 */ 401 static struct msc_window *msc_oldest_window(struct msc *msc) 402 { 403 struct msc_window *win; 404 405 if (list_empty(&msc->win_list)) 406 return NULL; 407 408 win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true); 409 if (win) 410 return win; 411 412 return list_first_entry(&msc->win_list, struct msc_window, entry); 413 } 414 415 /** 416 * msc_win_oldest_sg() - locate the oldest block in a given window 417 * @win: window to look at 418 * 419 * Return: index of the block with the oldest data 420 */ 421 static struct scatterlist *msc_win_oldest_sg(struct msc_window *win) 422 { 423 unsigned int blk; 424 struct scatterlist *sg; 425 struct msc_block_desc *bdesc = msc_win_base(win); 426 427 /* without wrapping, first block is the oldest */ 428 if (!msc_block_wrapped(bdesc)) 429 return msc_win_base_sg(win); 430 431 /* 432 * with wrapping, last written block contains both the newest and the 433 * oldest data for this window. 434 */ 435 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 436 struct msc_block_desc *bdesc = sg_virt(sg); 437 438 if (msc_block_last_written(bdesc)) 439 return sg; 440 } 441 442 return msc_win_base_sg(win); 443 } 444 445 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter) 446 { 447 return sg_virt(iter->block); 448 } 449 450 static struct msc_iter *msc_iter_install(struct msc *msc) 451 { 452 struct msc_iter *iter; 453 454 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 455 if (!iter) 456 return ERR_PTR(-ENOMEM); 457 458 mutex_lock(&msc->buf_mutex); 459 460 /* 461 * Reading and tracing are mutually exclusive; if msc is 462 * enabled, open() will fail; otherwise existing readers 463 * will prevent enabling the msc and the rest of fops don't 464 * need to worry about it. 465 */ 466 if (msc->enabled) { 467 kfree(iter); 468 iter = ERR_PTR(-EBUSY); 469 goto unlock; 470 } 471 472 iter->msc = msc; 473 474 list_add_tail(&iter->entry, &msc->iter_list); 475 unlock: 476 mutex_unlock(&msc->buf_mutex); 477 478 return iter; 479 } 480 481 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc) 482 { 483 mutex_lock(&msc->buf_mutex); 484 list_del(&iter->entry); 485 mutex_unlock(&msc->buf_mutex); 486 487 kfree(iter); 488 } 489 490 static void msc_iter_block_start(struct msc_iter *iter) 491 { 492 if (iter->start_block) 493 return; 494 495 iter->start_block = msc_win_oldest_sg(iter->win); 496 iter->block = iter->start_block; 497 iter->wrap_count = 0; 498 499 /* 500 * start with the block with oldest data; if data has wrapped 501 * in this window, it should be in this block 502 */ 503 if (msc_block_wrapped(msc_iter_bdesc(iter))) 504 iter->wrap_count = 2; 505 506 } 507 508 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc) 509 { 510 /* already started, nothing to do */ 511 if (iter->start_win) 512 return 0; 513 514 iter->start_win = msc_oldest_window(msc); 515 if (!iter->start_win) 516 return -EINVAL; 517 518 iter->win = iter->start_win; 519 iter->start_block = NULL; 520 521 msc_iter_block_start(iter); 522 523 return 0; 524 } 525 526 static int msc_iter_win_advance(struct msc_iter *iter) 527 { 528 iter->win = msc_next_window(iter->win); 529 iter->start_block = NULL; 530 531 if (iter->win == iter->start_win) { 532 iter->eof++; 533 return 1; 534 } 535 536 msc_iter_block_start(iter); 537 538 return 0; 539 } 540 541 static int msc_iter_block_advance(struct msc_iter *iter) 542 { 543 iter->block_off = 0; 544 545 /* wrapping */ 546 if (iter->wrap_count && iter->block == iter->start_block) { 547 iter->wrap_count--; 548 if (!iter->wrap_count) 549 /* copied newest data from the wrapped block */ 550 return msc_iter_win_advance(iter); 551 } 552 553 /* no wrapping, check for last written block */ 554 if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter))) 555 /* copied newest data for the window */ 556 return msc_iter_win_advance(iter); 557 558 /* block advance */ 559 if (sg_is_last(iter->block)) 560 iter->block = msc_win_base_sg(iter->win); 561 else 562 iter->block = sg_next(iter->block); 563 564 /* no wrapping, sanity check in case there is no last written block */ 565 if (!iter->wrap_count && iter->block == iter->start_block) 566 return msc_iter_win_advance(iter); 567 568 return 0; 569 } 570 571 /** 572 * msc_buffer_iterate() - go through multiblock buffer's data 573 * @iter: iterator structure 574 * @size: amount of data to scan 575 * @data: callback's private data 576 * @fn: iterator callback 577 * 578 * This will start at the window which will be written to next (containing 579 * the oldest data) and work its way to the current window, calling @fn 580 * for each chunk of data as it goes. 581 * 582 * Caller should have msc::user_count reference to make sure the buffer 583 * doesn't disappear from under us. 584 * 585 * Return: amount of data actually scanned. 586 */ 587 static ssize_t 588 msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data, 589 unsigned long (*fn)(void *, void *, size_t)) 590 { 591 struct msc *msc = iter->msc; 592 size_t len = size; 593 unsigned int advance; 594 595 if (iter->eof) 596 return 0; 597 598 /* start with the oldest window */ 599 if (msc_iter_win_start(iter, msc)) 600 return 0; 601 602 do { 603 unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter)); 604 void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC; 605 size_t tocopy = data_bytes, copied = 0; 606 size_t remaining = 0; 607 608 advance = 1; 609 610 /* 611 * If block wrapping happened, we need to visit the last block 612 * twice, because it contains both the oldest and the newest 613 * data in this window. 614 * 615 * First time (wrap_count==2), in the very beginning, to collect 616 * the oldest data, which is in the range 617 * (data_bytes..DATA_IN_PAGE). 618 * 619 * Second time (wrap_count==1), it's just like any other block, 620 * containing data in the range of [MSC_BDESC..data_bytes]. 621 */ 622 if (iter->block == iter->start_block && iter->wrap_count == 2) { 623 tocopy = DATA_IN_PAGE - data_bytes; 624 src += data_bytes; 625 } 626 627 if (!tocopy) 628 goto next_block; 629 630 tocopy -= iter->block_off; 631 src += iter->block_off; 632 633 if (len < tocopy) { 634 tocopy = len; 635 advance = 0; 636 } 637 638 remaining = fn(data, src, tocopy); 639 640 if (remaining) 641 advance = 0; 642 643 copied = tocopy - remaining; 644 len -= copied; 645 iter->block_off += copied; 646 iter->offset += copied; 647 648 if (!advance) 649 break; 650 651 next_block: 652 if (msc_iter_block_advance(iter)) 653 break; 654 655 } while (len); 656 657 return size - len; 658 } 659 660 /** 661 * msc_buffer_clear_hw_header() - clear hw header for multiblock 662 * @msc: MSC device 663 */ 664 static void msc_buffer_clear_hw_header(struct msc *msc) 665 { 666 struct msc_window *win; 667 struct scatterlist *sg; 668 669 list_for_each_entry(win, &msc->win_list, entry) { 670 unsigned int blk; 671 672 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 673 struct msc_block_desc *bdesc = sg_virt(sg); 674 675 memset_startat(bdesc, 0, hw_tag); 676 } 677 } 678 } 679 680 static int intel_th_msu_init(struct msc *msc) 681 { 682 u32 mintctl, msusts; 683 684 if (!msc->do_irq) 685 return 0; 686 687 if (!msc->mbuf) 688 return 0; 689 690 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); 691 mintctl |= msc->index ? M1BLIE : M0BLIE; 692 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 693 if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) { 694 dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n"); 695 msc->do_irq = 0; 696 return 0; 697 } 698 699 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); 700 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); 701 702 return 0; 703 } 704 705 static void intel_th_msu_deinit(struct msc *msc) 706 { 707 u32 mintctl; 708 709 if (!msc->do_irq) 710 return; 711 712 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); 713 mintctl &= msc->index ? ~M1BLIE : ~M0BLIE; 714 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 715 } 716 717 static int msc_win_set_lockout(struct msc_window *win, 718 enum lockout_state expect, 719 enum lockout_state new) 720 { 721 enum lockout_state old; 722 unsigned long flags; 723 int ret = 0; 724 725 if (!win->msc->mbuf) 726 return 0; 727 728 spin_lock_irqsave(&win->lo_lock, flags); 729 old = win->lockout; 730 731 if (old != expect) { 732 ret = -EINVAL; 733 goto unlock; 734 } 735 736 win->lockout = new; 737 738 if (old == expect && new == WIN_LOCKED) 739 atomic_inc(&win->msc->user_count); 740 else if (old == expect && old == WIN_LOCKED) 741 atomic_dec(&win->msc->user_count); 742 743 unlock: 744 spin_unlock_irqrestore(&win->lo_lock, flags); 745 746 if (ret) { 747 if (expect == WIN_READY && old == WIN_LOCKED) 748 return -EBUSY; 749 750 /* from intel_th_msc_window_unlock(), don't warn if not locked */ 751 if (expect == WIN_LOCKED && old == new) 752 return 0; 753 754 dev_warn_ratelimited(msc_dev(win->msc), 755 "expected lockout state %d, got %d\n", 756 expect, old); 757 } 758 759 return ret; 760 } 761 /** 762 * msc_configure() - set up MSC hardware 763 * @msc: the MSC device to configure 764 * 765 * Program storage mode, wrapping, burst length and trace buffer address 766 * into a given MSC. Then, enable tracing and set msc::enabled. 767 * The latter is serialized on msc::buf_mutex, so make sure to hold it. 768 * 769 * Return: %0 for success or a negative error code otherwise. 770 */ 771 static int msc_configure(struct msc *msc) 772 { 773 u32 reg; 774 775 lockdep_assert_held(&msc->buf_mutex); 776 777 if (msc->mode > MSC_MODE_MULTI) 778 return -EINVAL; 779 780 if (msc->mode == MSC_MODE_MULTI) { 781 if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE)) 782 return -EBUSY; 783 784 msc_buffer_clear_hw_header(msc); 785 } 786 787 msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR); 788 msc->orig_sz = ioread32(msc->reg_base + REG_MSU_MSC0SIZE); 789 790 reg = msc->base_addr >> PAGE_SHIFT; 791 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR); 792 793 if (msc->mode == MSC_MODE_SINGLE) { 794 reg = msc->nr_pages; 795 iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE); 796 } 797 798 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 799 reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD); 800 801 reg |= MSC_EN; 802 reg |= msc->mode << __ffs(MSC_MODE); 803 reg |= msc->burst_len << __ffs(MSC_LEN); 804 805 if (msc->wrap) 806 reg |= MSC_WRAPEN; 807 808 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 809 810 intel_th_msu_init(msc); 811 812 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; 813 intel_th_trace_enable(msc->thdev); 814 msc->enabled = 1; 815 816 if (msc->mbuf && msc->mbuf->activate) 817 msc->mbuf->activate(msc->mbuf_priv); 818 819 return 0; 820 } 821 822 /** 823 * msc_disable() - disable MSC hardware 824 * @msc: MSC device to disable 825 * 826 * If @msc is enabled, disable tracing on the switch and then disable MSC 827 * storage. Caller must hold msc::buf_mutex. 828 */ 829 static void msc_disable(struct msc *msc) 830 { 831 struct msc_window *win = msc->cur_win; 832 u32 reg; 833 834 lockdep_assert_held(&msc->buf_mutex); 835 836 if (msc->mode == MSC_MODE_MULTI) 837 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED); 838 839 if (msc->mbuf && msc->mbuf->deactivate) 840 msc->mbuf->deactivate(msc->mbuf_priv); 841 intel_th_msu_deinit(msc); 842 intel_th_trace_disable(msc->thdev); 843 844 if (msc->mode == MSC_MODE_SINGLE) { 845 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 846 msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT); 847 848 reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); 849 msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1); 850 dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n", 851 reg, msc->single_sz, msc->single_wrap); 852 } 853 854 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 855 reg &= ~MSC_EN; 856 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 857 858 if (msc->mbuf && msc->mbuf->ready) 859 msc->mbuf->ready(msc->mbuf_priv, win->sgt, 860 msc_win_total_sz(win)); 861 862 msc->enabled = 0; 863 864 iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR); 865 iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE); 866 867 dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n", 868 ioread32(msc->reg_base + REG_MSU_MSC0NWSA)); 869 870 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 871 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg); 872 873 reg = ioread32(msc->reg_base + REG_MSU_MSUSTS); 874 reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; 875 iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS); 876 } 877 878 static int intel_th_msc_activate(struct intel_th_device *thdev) 879 { 880 struct msc *msc = dev_get_drvdata(&thdev->dev); 881 int ret = -EBUSY; 882 883 if (!atomic_inc_unless_negative(&msc->user_count)) 884 return -ENODEV; 885 886 mutex_lock(&msc->buf_mutex); 887 888 /* if there are readers, refuse */ 889 if (list_empty(&msc->iter_list)) 890 ret = msc_configure(msc); 891 892 mutex_unlock(&msc->buf_mutex); 893 894 if (ret) 895 atomic_dec(&msc->user_count); 896 897 return ret; 898 } 899 900 static void intel_th_msc_deactivate(struct intel_th_device *thdev) 901 { 902 struct msc *msc = dev_get_drvdata(&thdev->dev); 903 904 mutex_lock(&msc->buf_mutex); 905 if (msc->enabled) { 906 msc_disable(msc); 907 atomic_dec(&msc->user_count); 908 } 909 mutex_unlock(&msc->buf_mutex); 910 } 911 912 /** 913 * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode 914 * @msc: MSC device 915 * @size: allocation size in bytes 916 * 917 * This modifies msc::base, which requires msc::buf_mutex to serialize, so the 918 * caller is expected to hold it. 919 * 920 * Return: 0 on success, -errno otherwise. 921 */ 922 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size) 923 { 924 unsigned long nr_pages = size >> PAGE_SHIFT; 925 unsigned int order = get_order(size); 926 struct page *page; 927 int ret; 928 929 if (!size) 930 return 0; 931 932 ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL); 933 if (ret) 934 goto err_out; 935 936 ret = -ENOMEM; 937 page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order); 938 if (!page) 939 goto err_free_sgt; 940 941 split_page(page, order); 942 sg_set_buf(msc->single_sgt.sgl, page_address(page), size); 943 944 ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1, 945 DMA_FROM_DEVICE); 946 if (ret < 0) 947 goto err_free_pages; 948 949 msc->nr_pages = nr_pages; 950 msc->base = page_address(page); 951 msc->base_addr = sg_dma_address(msc->single_sgt.sgl); 952 953 return 0; 954 955 err_free_pages: 956 __free_pages(page, order); 957 958 err_free_sgt: 959 sg_free_table(&msc->single_sgt); 960 961 err_out: 962 return ret; 963 } 964 965 /** 966 * msc_buffer_contig_free() - free a contiguous buffer 967 * @msc: MSC configured in SINGLE mode 968 */ 969 static void msc_buffer_contig_free(struct msc *msc) 970 { 971 unsigned long off; 972 973 dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 974 1, DMA_FROM_DEVICE); 975 sg_free_table(&msc->single_sgt); 976 977 for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { 978 struct page *page = virt_to_page(msc->base + off); 979 980 __free_page(page); 981 } 982 983 msc->nr_pages = 0; 984 } 985 986 /** 987 * msc_buffer_contig_get_page() - find a page at a given offset 988 * @msc: MSC configured in SINGLE mode 989 * @pgoff: page offset 990 * 991 * Return: page, if @pgoff is within the range, NULL otherwise. 992 */ 993 static struct page *msc_buffer_contig_get_page(struct msc *msc, 994 unsigned long pgoff) 995 { 996 if (pgoff >= msc->nr_pages) 997 return NULL; 998 999 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT)); 1000 } 1001 1002 static int __msc_buffer_win_alloc(struct msc_window *win, 1003 unsigned int nr_segs) 1004 { 1005 struct scatterlist *sg_ptr; 1006 void *block; 1007 int i, ret; 1008 1009 ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL); 1010 if (ret) 1011 return -ENOMEM; 1012 1013 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { 1014 block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent, 1015 PAGE_SIZE, &sg_dma_address(sg_ptr), 1016 GFP_KERNEL); 1017 if (!block) 1018 goto err_nomem; 1019 1020 sg_set_buf(sg_ptr, block, PAGE_SIZE); 1021 } 1022 1023 return nr_segs; 1024 1025 err_nomem: 1026 for_each_sg(win->sgt->sgl, sg_ptr, i, ret) 1027 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 1028 sg_virt(sg_ptr), sg_dma_address(sg_ptr)); 1029 1030 sg_free_table(win->sgt); 1031 1032 return -ENOMEM; 1033 } 1034 1035 #ifdef CONFIG_X86 1036 static void msc_buffer_set_uc(struct msc *msc) 1037 { 1038 struct scatterlist *sg_ptr; 1039 struct msc_window *win; 1040 int i; 1041 1042 if (msc->mode == MSC_MODE_SINGLE) { 1043 set_memory_uc((unsigned long)msc->base, msc->nr_pages); 1044 return; 1045 } 1046 1047 list_for_each_entry(win, &msc->win_list, entry) { 1048 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { 1049 /* Set the page as uncached */ 1050 set_memory_uc((unsigned long)sg_virt(sg_ptr), 1051 PFN_DOWN(sg_ptr->length)); 1052 } 1053 } 1054 } 1055 1056 static void msc_buffer_set_wb(struct msc *msc) 1057 { 1058 struct scatterlist *sg_ptr; 1059 struct msc_window *win; 1060 int i; 1061 1062 if (msc->mode == MSC_MODE_SINGLE) { 1063 set_memory_wb((unsigned long)msc->base, msc->nr_pages); 1064 return; 1065 } 1066 1067 list_for_each_entry(win, &msc->win_list, entry) { 1068 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { 1069 /* Reset the page to write-back */ 1070 set_memory_wb((unsigned long)sg_virt(sg_ptr), 1071 PFN_DOWN(sg_ptr->length)); 1072 } 1073 } 1074 } 1075 #else /* !X86 */ 1076 static inline void 1077 msc_buffer_set_uc(struct msc *msc) {} 1078 static inline void msc_buffer_set_wb(struct msc *msc) {} 1079 #endif /* CONFIG_X86 */ 1080 1081 static struct page *msc_sg_page(struct scatterlist *sg) 1082 { 1083 void *addr = sg_virt(sg); 1084 1085 if (is_vmalloc_addr(addr)) 1086 return vmalloc_to_page(addr); 1087 1088 return sg_page(sg); 1089 } 1090 1091 /** 1092 * msc_buffer_win_alloc() - alloc a window for a multiblock mode 1093 * @msc: MSC device 1094 * @nr_blocks: number of pages in this window 1095 * 1096 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1097 * to serialize, so the caller is expected to hold it. 1098 * 1099 * Return: 0 on success, -errno otherwise. 1100 */ 1101 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) 1102 { 1103 struct msc_window *win; 1104 int ret = -ENOMEM; 1105 1106 if (!nr_blocks) 1107 return 0; 1108 1109 win = kzalloc(sizeof(*win), GFP_KERNEL); 1110 if (!win) 1111 return -ENOMEM; 1112 1113 win->msc = msc; 1114 win->sgt = &win->_sgt; 1115 win->lockout = WIN_READY; 1116 spin_lock_init(&win->lo_lock); 1117 1118 if (!list_empty(&msc->win_list)) { 1119 struct msc_window *prev = list_last_entry(&msc->win_list, 1120 struct msc_window, 1121 entry); 1122 1123 win->pgoff = prev->pgoff + prev->nr_blocks; 1124 } 1125 1126 if (msc->mbuf && msc->mbuf->alloc_window) 1127 ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt, 1128 nr_blocks << PAGE_SHIFT); 1129 else 1130 ret = __msc_buffer_win_alloc(win, nr_blocks); 1131 1132 if (ret <= 0) 1133 goto err_nomem; 1134 1135 win->nr_segs = ret; 1136 win->nr_blocks = nr_blocks; 1137 1138 if (list_empty(&msc->win_list)) { 1139 msc->base = msc_win_base(win); 1140 msc->base_addr = msc_win_base_dma(win); 1141 msc->cur_win = win; 1142 } 1143 1144 list_add_tail(&win->entry, &msc->win_list); 1145 msc->nr_pages += nr_blocks; 1146 1147 return 0; 1148 1149 err_nomem: 1150 kfree(win); 1151 1152 return ret; 1153 } 1154 1155 static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) 1156 { 1157 struct scatterlist *sg; 1158 int i; 1159 1160 for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) { 1161 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 1162 sg_virt(sg), sg_dma_address(sg)); 1163 } 1164 sg_free_table(win->sgt); 1165 } 1166 1167 /** 1168 * msc_buffer_win_free() - free a window from MSC's window list 1169 * @msc: MSC device 1170 * @win: window to free 1171 * 1172 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1173 * to serialize, so the caller is expected to hold it. 1174 */ 1175 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) 1176 { 1177 msc->nr_pages -= win->nr_blocks; 1178 1179 list_del(&win->entry); 1180 if (list_empty(&msc->win_list)) { 1181 msc->base = NULL; 1182 msc->base_addr = 0; 1183 } 1184 1185 if (msc->mbuf && msc->mbuf->free_window) 1186 msc->mbuf->free_window(msc->mbuf_priv, win->sgt); 1187 else 1188 __msc_buffer_win_free(msc, win); 1189 1190 kfree(win); 1191 } 1192 1193 /** 1194 * msc_buffer_relink() - set up block descriptors for multiblock mode 1195 * @msc: MSC device 1196 * 1197 * This traverses msc::win_list, which requires msc::buf_mutex to serialize, 1198 * so the caller is expected to hold it. 1199 */ 1200 static void msc_buffer_relink(struct msc *msc) 1201 { 1202 struct msc_window *win, *next_win; 1203 1204 /* call with msc::mutex locked */ 1205 list_for_each_entry(win, &msc->win_list, entry) { 1206 struct scatterlist *sg; 1207 unsigned int blk; 1208 u32 sw_tag = 0; 1209 1210 /* 1211 * Last window's next_win should point to the first window 1212 * and MSC_SW_TAG_LASTWIN should be set. 1213 */ 1214 if (msc_is_last_win(win)) { 1215 sw_tag |= MSC_SW_TAG_LASTWIN; 1216 next_win = list_first_entry(&msc->win_list, 1217 struct msc_window, entry); 1218 } else { 1219 next_win = list_next_entry(win, entry); 1220 } 1221 1222 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 1223 struct msc_block_desc *bdesc = sg_virt(sg); 1224 1225 memset(bdesc, 0, sizeof(*bdesc)); 1226 1227 bdesc->next_win = msc_win_base_pfn(next_win); 1228 1229 /* 1230 * Similarly to last window, last block should point 1231 * to the first one. 1232 */ 1233 if (blk == win->nr_segs - 1) { 1234 sw_tag |= MSC_SW_TAG_LASTBLK; 1235 bdesc->next_blk = msc_win_base_pfn(win); 1236 } else { 1237 dma_addr_t addr = sg_dma_address(sg_next(sg)); 1238 1239 bdesc->next_blk = PFN_DOWN(addr); 1240 } 1241 1242 bdesc->sw_tag = sw_tag; 1243 bdesc->block_sz = sg->length / 64; 1244 } 1245 } 1246 1247 /* 1248 * Make the above writes globally visible before tracing is 1249 * enabled to make sure hardware sees them coherently. 1250 */ 1251 wmb(); 1252 } 1253 1254 static void msc_buffer_multi_free(struct msc *msc) 1255 { 1256 struct msc_window *win, *iter; 1257 1258 list_for_each_entry_safe(win, iter, &msc->win_list, entry) 1259 msc_buffer_win_free(msc, win); 1260 } 1261 1262 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages, 1263 unsigned int nr_wins) 1264 { 1265 int ret, i; 1266 1267 for (i = 0; i < nr_wins; i++) { 1268 ret = msc_buffer_win_alloc(msc, nr_pages[i]); 1269 if (ret) { 1270 msc_buffer_multi_free(msc); 1271 return ret; 1272 } 1273 } 1274 1275 msc_buffer_relink(msc); 1276 1277 return 0; 1278 } 1279 1280 /** 1281 * msc_buffer_free() - free buffers for MSC 1282 * @msc: MSC device 1283 * 1284 * Free MSC's storage buffers. 1285 * 1286 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to 1287 * serialize, so the caller is expected to hold it. 1288 */ 1289 static void msc_buffer_free(struct msc *msc) 1290 { 1291 msc_buffer_set_wb(msc); 1292 1293 if (msc->mode == MSC_MODE_SINGLE) 1294 msc_buffer_contig_free(msc); 1295 else if (msc->mode == MSC_MODE_MULTI) 1296 msc_buffer_multi_free(msc); 1297 } 1298 1299 /** 1300 * msc_buffer_alloc() - allocate a buffer for MSC 1301 * @msc: MSC device 1302 * @nr_pages: number of pages for each window 1303 * @nr_wins: number of windows 1304 * 1305 * Allocate a storage buffer for MSC, depending on the msc::mode, it will be 1306 * either done via msc_buffer_contig_alloc() for SINGLE operation mode or 1307 * msc_buffer_win_alloc() for multiblock operation. The latter allocates one 1308 * window per invocation, so in multiblock mode this can be called multiple 1309 * times for the same MSC to allocate multiple windows. 1310 * 1311 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1312 * to serialize, so the caller is expected to hold it. 1313 * 1314 * Return: 0 on success, -errno otherwise. 1315 */ 1316 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages, 1317 unsigned int nr_wins) 1318 { 1319 int ret; 1320 1321 /* -1: buffer not allocated */ 1322 if (atomic_read(&msc->user_count) != -1) 1323 return -EBUSY; 1324 1325 if (msc->mode == MSC_MODE_SINGLE) { 1326 if (nr_wins != 1) 1327 return -EINVAL; 1328 1329 ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT); 1330 } else if (msc->mode == MSC_MODE_MULTI) { 1331 ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins); 1332 } else { 1333 ret = -EINVAL; 1334 } 1335 1336 if (!ret) { 1337 msc_buffer_set_uc(msc); 1338 1339 /* allocation should be visible before the counter goes to 0 */ 1340 smp_mb__before_atomic(); 1341 1342 if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1)) 1343 return -EINVAL; 1344 } 1345 1346 return ret; 1347 } 1348 1349 /** 1350 * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use 1351 * @msc: MSC device 1352 * 1353 * This will free MSC buffer unless it is in use or there is no allocated 1354 * buffer. 1355 * Caller needs to hold msc::buf_mutex. 1356 * 1357 * Return: 0 on successful deallocation or if there was no buffer to 1358 * deallocate, -EBUSY if there are active users. 1359 */ 1360 static int msc_buffer_unlocked_free_unless_used(struct msc *msc) 1361 { 1362 int count, ret = 0; 1363 1364 count = atomic_cmpxchg(&msc->user_count, 0, -1); 1365 1366 /* > 0: buffer is allocated and has users */ 1367 if (count > 0) 1368 ret = -EBUSY; 1369 /* 0: buffer is allocated, no users */ 1370 else if (!count) 1371 msc_buffer_free(msc); 1372 /* < 0: no buffer, nothing to do */ 1373 1374 return ret; 1375 } 1376 1377 /** 1378 * msc_buffer_free_unless_used() - free a buffer unless it's in use 1379 * @msc: MSC device 1380 * 1381 * This is a locked version of msc_buffer_unlocked_free_unless_used(). 1382 * 1383 * Return: 0 on successful deallocation or if there was no buffer to 1384 * deallocate, -EBUSY if there are active users. 1385 */ 1386 static int msc_buffer_free_unless_used(struct msc *msc) 1387 { 1388 int ret; 1389 1390 mutex_lock(&msc->buf_mutex); 1391 ret = msc_buffer_unlocked_free_unless_used(msc); 1392 mutex_unlock(&msc->buf_mutex); 1393 1394 return ret; 1395 } 1396 1397 /** 1398 * msc_buffer_get_page() - get MSC buffer page at a given offset 1399 * @msc: MSC device 1400 * @pgoff: page offset into the storage buffer 1401 * 1402 * This traverses msc::win_list, so holding msc::buf_mutex is expected from 1403 * the caller. 1404 * 1405 * Return: page if @pgoff corresponds to a valid buffer page or NULL. 1406 */ 1407 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff) 1408 { 1409 struct msc_window *win; 1410 struct scatterlist *sg; 1411 unsigned int blk; 1412 1413 if (msc->mode == MSC_MODE_SINGLE) 1414 return msc_buffer_contig_get_page(msc, pgoff); 1415 1416 list_for_each_entry(win, &msc->win_list, entry) 1417 if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks) 1418 goto found; 1419 1420 return NULL; 1421 1422 found: 1423 pgoff -= win->pgoff; 1424 1425 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 1426 struct page *page = msc_sg_page(sg); 1427 size_t pgsz = PFN_DOWN(sg->length); 1428 1429 if (pgoff < pgsz) 1430 return page + pgoff; 1431 1432 pgoff -= pgsz; 1433 } 1434 1435 return NULL; 1436 } 1437 1438 /** 1439 * struct msc_win_to_user_struct - data for copy_to_user() callback 1440 * @buf: userspace buffer to copy data to 1441 * @offset: running offset 1442 */ 1443 struct msc_win_to_user_struct { 1444 char __user *buf; 1445 unsigned long offset; 1446 }; 1447 1448 /** 1449 * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user 1450 * @data: callback's private data 1451 * @src: source buffer 1452 * @len: amount of data to copy from the source buffer 1453 * 1454 * Return: >= %0 for success or -errno for error. 1455 */ 1456 static unsigned long msc_win_to_user(void *data, void *src, size_t len) 1457 { 1458 struct msc_win_to_user_struct *u = data; 1459 unsigned long ret; 1460 1461 ret = copy_to_user(u->buf + u->offset, src, len); 1462 u->offset += len - ret; 1463 1464 return ret; 1465 } 1466 1467 1468 /* 1469 * file operations' callbacks 1470 */ 1471 1472 static int intel_th_msc_open(struct inode *inode, struct file *file) 1473 { 1474 struct intel_th_device *thdev = file->private_data; 1475 struct msc *msc = dev_get_drvdata(&thdev->dev); 1476 struct msc_iter *iter; 1477 1478 if (!capable(CAP_SYS_RAWIO)) 1479 return -EPERM; 1480 1481 iter = msc_iter_install(msc); 1482 if (IS_ERR(iter)) 1483 return PTR_ERR(iter); 1484 1485 file->private_data = iter; 1486 1487 return nonseekable_open(inode, file); 1488 } 1489 1490 static int intel_th_msc_release(struct inode *inode, struct file *file) 1491 { 1492 struct msc_iter *iter = file->private_data; 1493 struct msc *msc = iter->msc; 1494 1495 msc_iter_remove(iter, msc); 1496 1497 return 0; 1498 } 1499 1500 static ssize_t 1501 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len) 1502 { 1503 unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len; 1504 unsigned long start = off, tocopy = 0; 1505 1506 if (msc->single_wrap) { 1507 start += msc->single_sz; 1508 if (start < size) { 1509 tocopy = min(rem, size - start); 1510 if (copy_to_user(buf, msc->base + start, tocopy)) 1511 return -EFAULT; 1512 1513 buf += tocopy; 1514 rem -= tocopy; 1515 start += tocopy; 1516 } 1517 1518 start &= size - 1; 1519 if (rem) { 1520 tocopy = min(rem, msc->single_sz - start); 1521 if (copy_to_user(buf, msc->base + start, tocopy)) 1522 return -EFAULT; 1523 1524 rem -= tocopy; 1525 } 1526 1527 return len - rem; 1528 } 1529 1530 if (copy_to_user(buf, msc->base + start, rem)) 1531 return -EFAULT; 1532 1533 return len; 1534 } 1535 1536 static ssize_t intel_th_msc_read(struct file *file, char __user *buf, 1537 size_t len, loff_t *ppos) 1538 { 1539 struct msc_iter *iter = file->private_data; 1540 struct msc *msc = iter->msc; 1541 size_t size; 1542 loff_t off = *ppos; 1543 ssize_t ret = 0; 1544 1545 if (!atomic_inc_unless_negative(&msc->user_count)) 1546 return 0; 1547 1548 if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap) 1549 size = msc->single_sz; 1550 else 1551 size = msc->nr_pages << PAGE_SHIFT; 1552 1553 if (!size) 1554 goto put_count; 1555 1556 if (off >= size) 1557 goto put_count; 1558 1559 if (off + len >= size) 1560 len = size - off; 1561 1562 if (msc->mode == MSC_MODE_SINGLE) { 1563 ret = msc_single_to_user(msc, buf, off, len); 1564 if (ret >= 0) 1565 *ppos += ret; 1566 } else if (msc->mode == MSC_MODE_MULTI) { 1567 struct msc_win_to_user_struct u = { 1568 .buf = buf, 1569 .offset = 0, 1570 }; 1571 1572 ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user); 1573 if (ret >= 0) 1574 *ppos = iter->offset; 1575 } else { 1576 ret = -EINVAL; 1577 } 1578 1579 put_count: 1580 atomic_dec(&msc->user_count); 1581 1582 return ret; 1583 } 1584 1585 /* 1586 * vm operations callbacks (vm_ops) 1587 */ 1588 1589 static void msc_mmap_open(struct vm_area_struct *vma) 1590 { 1591 struct msc_iter *iter = vma->vm_file->private_data; 1592 struct msc *msc = iter->msc; 1593 1594 atomic_inc(&msc->mmap_count); 1595 } 1596 1597 static void msc_mmap_close(struct vm_area_struct *vma) 1598 { 1599 struct msc_iter *iter = vma->vm_file->private_data; 1600 struct msc *msc = iter->msc; 1601 1602 if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex)) 1603 return; 1604 1605 /* last mapping -- drop user_count */ 1606 atomic_dec(&msc->user_count); 1607 mutex_unlock(&msc->buf_mutex); 1608 } 1609 1610 static vm_fault_t msc_mmap_fault(struct vm_fault *vmf) 1611 { 1612 struct msc_iter *iter = vmf->vma->vm_file->private_data; 1613 struct msc *msc = iter->msc; 1614 struct page *page; 1615 1616 page = msc_buffer_get_page(msc, vmf->pgoff); 1617 if (!page) 1618 return VM_FAULT_SIGBUS; 1619 1620 get_page(page); 1621 return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn_t(page)); 1622 } 1623 1624 static const struct vm_operations_struct msc_mmap_ops = { 1625 .open = msc_mmap_open, 1626 .close = msc_mmap_close, 1627 .fault = msc_mmap_fault, 1628 }; 1629 1630 static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma) 1631 { 1632 unsigned long size = vma->vm_end - vma->vm_start; 1633 struct msc_iter *iter = vma->vm_file->private_data; 1634 struct msc *msc = iter->msc; 1635 int ret = -EINVAL; 1636 1637 if (!size || offset_in_page(size)) 1638 return -EINVAL; 1639 1640 if (vma->vm_pgoff) 1641 return -EINVAL; 1642 1643 /* grab user_count once per mmap; drop in msc_mmap_close() */ 1644 if (!atomic_inc_unless_negative(&msc->user_count)) 1645 return -EINVAL; 1646 1647 if (msc->mode != MSC_MODE_SINGLE && 1648 msc->mode != MSC_MODE_MULTI) 1649 goto out; 1650 1651 if (size >> PAGE_SHIFT != msc->nr_pages) 1652 goto out; 1653 1654 atomic_set(&msc->mmap_count, 1); 1655 ret = 0; 1656 1657 out: 1658 if (ret) 1659 atomic_dec(&msc->user_count); 1660 1661 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1662 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY | VM_MIXEDMAP); 1663 vma->vm_ops = &msc_mmap_ops; 1664 return ret; 1665 } 1666 1667 static const struct file_operations intel_th_msc_fops = { 1668 .open = intel_th_msc_open, 1669 .release = intel_th_msc_release, 1670 .read = intel_th_msc_read, 1671 .mmap = intel_th_msc_mmap, 1672 .owner = THIS_MODULE, 1673 }; 1674 1675 static void intel_th_msc_wait_empty(struct intel_th_device *thdev) 1676 { 1677 struct msc *msc = dev_get_drvdata(&thdev->dev); 1678 unsigned long count; 1679 u32 reg; 1680 1681 for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; 1682 count && !(reg & MSCSTS_PLE); count--) { 1683 reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS); 1684 cpu_relax(); 1685 } 1686 1687 if (!count) 1688 dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); 1689 } 1690 1691 static int intel_th_msc_init(struct msc *msc) 1692 { 1693 atomic_set(&msc->user_count, -1); 1694 1695 msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI; 1696 mutex_init(&msc->buf_mutex); 1697 INIT_LIST_HEAD(&msc->win_list); 1698 INIT_LIST_HEAD(&msc->iter_list); 1699 1700 msc->burst_len = 1701 (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >> 1702 __ffs(MSC_LEN); 1703 1704 return 0; 1705 } 1706 1707 static int msc_win_switch(struct msc *msc) 1708 { 1709 struct msc_window *first; 1710 1711 if (list_empty(&msc->win_list)) 1712 return -EINVAL; 1713 1714 first = list_first_entry(&msc->win_list, struct msc_window, entry); 1715 1716 if (msc_is_last_win(msc->cur_win)) 1717 msc->cur_win = first; 1718 else 1719 msc->cur_win = list_next_entry(msc->cur_win, entry); 1720 1721 msc->base = msc_win_base(msc->cur_win); 1722 msc->base_addr = msc_win_base_dma(msc->cur_win); 1723 1724 intel_th_trace_switch(msc->thdev); 1725 1726 return 0; 1727 } 1728 1729 /** 1730 * intel_th_msc_window_unlock - put the window back in rotation 1731 * @dev: MSC device to which this relates 1732 * @sgt: buffer's sg_table for the window, does nothing if NULL 1733 */ 1734 void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt) 1735 { 1736 struct msc *msc = dev_get_drvdata(dev); 1737 struct msc_window *win; 1738 1739 if (!sgt) 1740 return; 1741 1742 win = msc_find_window(msc, sgt, false); 1743 if (!win) 1744 return; 1745 1746 msc_win_set_lockout(win, WIN_LOCKED, WIN_READY); 1747 if (msc->switch_on_unlock == win) { 1748 msc->switch_on_unlock = NULL; 1749 msc_win_switch(msc); 1750 } 1751 } 1752 EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock); 1753 1754 static void msc_work(struct work_struct *work) 1755 { 1756 struct msc *msc = container_of(work, struct msc, work); 1757 1758 intel_th_msc_deactivate(msc->thdev); 1759 } 1760 1761 static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev) 1762 { 1763 struct msc *msc = dev_get_drvdata(&thdev->dev); 1764 u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); 1765 u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; 1766 struct msc_window *win, *next_win; 1767 1768 if (!msc->do_irq || !msc->mbuf) 1769 return IRQ_NONE; 1770 1771 msusts &= mask; 1772 1773 if (!msusts) 1774 return msc->enabled ? IRQ_HANDLED : IRQ_NONE; 1775 1776 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); 1777 1778 if (!msc->enabled) 1779 return IRQ_NONE; 1780 1781 /* grab the window before we do the switch */ 1782 win = msc->cur_win; 1783 if (!win) 1784 return IRQ_HANDLED; 1785 next_win = msc_next_window(win); 1786 if (!next_win) 1787 return IRQ_HANDLED; 1788 1789 /* next window: if READY, proceed, if LOCKED, stop the trace */ 1790 if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) { 1791 if (msc->stop_on_full) 1792 schedule_work(&msc->work); 1793 else 1794 msc->switch_on_unlock = next_win; 1795 1796 return IRQ_HANDLED; 1797 } 1798 1799 /* current window: INUSE -> LOCKED */ 1800 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED); 1801 1802 msc_win_switch(msc); 1803 1804 if (msc->mbuf && msc->mbuf->ready) 1805 msc->mbuf->ready(msc->mbuf_priv, win->sgt, 1806 msc_win_total_sz(win)); 1807 1808 return IRQ_HANDLED; 1809 } 1810 1811 static const char * const msc_mode[] = { 1812 [MSC_MODE_SINGLE] = "single", 1813 [MSC_MODE_MULTI] = "multi", 1814 [MSC_MODE_EXI] = "ExI", 1815 [MSC_MODE_DEBUG] = "debug", 1816 }; 1817 1818 static ssize_t 1819 wrap_show(struct device *dev, struct device_attribute *attr, char *buf) 1820 { 1821 struct msc *msc = dev_get_drvdata(dev); 1822 1823 return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap); 1824 } 1825 1826 static ssize_t 1827 wrap_store(struct device *dev, struct device_attribute *attr, const char *buf, 1828 size_t size) 1829 { 1830 struct msc *msc = dev_get_drvdata(dev); 1831 unsigned long val; 1832 int ret; 1833 1834 ret = kstrtoul(buf, 10, &val); 1835 if (ret) 1836 return ret; 1837 1838 msc->wrap = !!val; 1839 1840 return size; 1841 } 1842 1843 static DEVICE_ATTR_RW(wrap); 1844 1845 static void msc_buffer_unassign(struct msc *msc) 1846 { 1847 lockdep_assert_held(&msc->buf_mutex); 1848 1849 if (!msc->mbuf) 1850 return; 1851 1852 msc->mbuf->unassign(msc->mbuf_priv); 1853 msu_buffer_put(msc->mbuf); 1854 msc->mbuf_priv = NULL; 1855 msc->mbuf = NULL; 1856 } 1857 1858 static ssize_t 1859 mode_show(struct device *dev, struct device_attribute *attr, char *buf) 1860 { 1861 struct msc *msc = dev_get_drvdata(dev); 1862 const char *mode = msc_mode[msc->mode]; 1863 ssize_t ret; 1864 1865 mutex_lock(&msc->buf_mutex); 1866 if (msc->mbuf) 1867 mode = msc->mbuf->name; 1868 ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode); 1869 mutex_unlock(&msc->buf_mutex); 1870 1871 return ret; 1872 } 1873 1874 static ssize_t 1875 mode_store(struct device *dev, struct device_attribute *attr, const char *buf, 1876 size_t size) 1877 { 1878 const struct msu_buffer *mbuf = NULL; 1879 struct msc *msc = dev_get_drvdata(dev); 1880 size_t len = size; 1881 char *cp, *mode; 1882 int i, ret; 1883 1884 if (!capable(CAP_SYS_RAWIO)) 1885 return -EPERM; 1886 1887 cp = memchr(buf, '\n', len); 1888 if (cp) 1889 len = cp - buf; 1890 1891 mode = kstrndup(buf, len, GFP_KERNEL); 1892 if (!mode) 1893 return -ENOMEM; 1894 1895 i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode); 1896 if (i >= 0) { 1897 kfree(mode); 1898 goto found; 1899 } 1900 1901 /* Buffer sinks only work with a usable IRQ */ 1902 if (!msc->do_irq) { 1903 kfree(mode); 1904 return -EINVAL; 1905 } 1906 1907 mbuf = msu_buffer_get(mode); 1908 kfree(mode); 1909 if (mbuf) 1910 goto found; 1911 1912 return -EINVAL; 1913 1914 found: 1915 if (i == MSC_MODE_MULTI && msc->multi_is_broken) 1916 return -EOPNOTSUPP; 1917 1918 mutex_lock(&msc->buf_mutex); 1919 ret = 0; 1920 1921 /* Same buffer: do nothing */ 1922 if (mbuf && mbuf == msc->mbuf) { 1923 /* put the extra reference we just got */ 1924 msu_buffer_put(mbuf); 1925 goto unlock; 1926 } 1927 1928 ret = msc_buffer_unlocked_free_unless_used(msc); 1929 if (ret) 1930 goto unlock; 1931 1932 if (mbuf) { 1933 void *mbuf_priv = mbuf->assign(dev, &i); 1934 1935 if (!mbuf_priv) { 1936 ret = -ENOMEM; 1937 goto unlock; 1938 } 1939 1940 msc_buffer_unassign(msc); 1941 msc->mbuf_priv = mbuf_priv; 1942 msc->mbuf = mbuf; 1943 } else { 1944 msc_buffer_unassign(msc); 1945 } 1946 1947 msc->mode = i; 1948 1949 unlock: 1950 if (ret && mbuf) 1951 msu_buffer_put(mbuf); 1952 mutex_unlock(&msc->buf_mutex); 1953 1954 return ret ? ret : size; 1955 } 1956 1957 static DEVICE_ATTR_RW(mode); 1958 1959 static ssize_t 1960 nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf) 1961 { 1962 struct msc *msc = dev_get_drvdata(dev); 1963 struct msc_window *win; 1964 size_t count = 0; 1965 1966 mutex_lock(&msc->buf_mutex); 1967 1968 if (msc->mode == MSC_MODE_SINGLE) 1969 count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages); 1970 else if (msc->mode == MSC_MODE_MULTI) { 1971 list_for_each_entry(win, &msc->win_list, entry) { 1972 count += scnprintf(buf + count, PAGE_SIZE - count, 1973 "%d%c", win->nr_blocks, 1974 msc_is_last_win(win) ? '\n' : ','); 1975 } 1976 } else { 1977 count = scnprintf(buf, PAGE_SIZE, "unsupported\n"); 1978 } 1979 1980 mutex_unlock(&msc->buf_mutex); 1981 1982 return count; 1983 } 1984 1985 static ssize_t 1986 nr_pages_store(struct device *dev, struct device_attribute *attr, 1987 const char *buf, size_t size) 1988 { 1989 struct msc *msc = dev_get_drvdata(dev); 1990 unsigned long val, *win = NULL, *rewin; 1991 size_t len = size; 1992 const char *p = buf; 1993 char *end, *s; 1994 int ret, nr_wins = 0; 1995 1996 if (!capable(CAP_SYS_RAWIO)) 1997 return -EPERM; 1998 1999 ret = msc_buffer_free_unless_used(msc); 2000 if (ret) 2001 return ret; 2002 2003 /* scan the comma-separated list of allocation sizes */ 2004 end = memchr(buf, '\n', len); 2005 if (end) 2006 len = end - buf; 2007 2008 do { 2009 end = memchr(p, ',', len); 2010 s = kstrndup(p, end ? end - p : len, GFP_KERNEL); 2011 if (!s) { 2012 ret = -ENOMEM; 2013 goto free_win; 2014 } 2015 2016 ret = kstrtoul(s, 10, &val); 2017 kfree(s); 2018 2019 if (ret || !val) 2020 goto free_win; 2021 2022 if (nr_wins && msc->mode == MSC_MODE_SINGLE) { 2023 ret = -EINVAL; 2024 goto free_win; 2025 } 2026 2027 nr_wins++; 2028 rewin = krealloc_array(win, nr_wins, sizeof(*win), GFP_KERNEL); 2029 if (!rewin) { 2030 kfree(win); 2031 return -ENOMEM; 2032 } 2033 2034 win = rewin; 2035 win[nr_wins - 1] = val; 2036 2037 if (!end) 2038 break; 2039 2040 /* consume the number and the following comma, hence +1 */ 2041 len -= end - p + 1; 2042 p = end + 1; 2043 } while (len); 2044 2045 mutex_lock(&msc->buf_mutex); 2046 ret = msc_buffer_alloc(msc, win, nr_wins); 2047 mutex_unlock(&msc->buf_mutex); 2048 2049 free_win: 2050 kfree(win); 2051 2052 return ret ? ret : size; 2053 } 2054 2055 static DEVICE_ATTR_RW(nr_pages); 2056 2057 static ssize_t 2058 win_switch_store(struct device *dev, struct device_attribute *attr, 2059 const char *buf, size_t size) 2060 { 2061 struct msc *msc = dev_get_drvdata(dev); 2062 unsigned long val; 2063 int ret; 2064 2065 ret = kstrtoul(buf, 10, &val); 2066 if (ret) 2067 return ret; 2068 2069 if (val != 1) 2070 return -EINVAL; 2071 2072 ret = -EINVAL; 2073 mutex_lock(&msc->buf_mutex); 2074 /* 2075 * Window switch can only happen in the "multi" mode. 2076 * If a external buffer is engaged, they have the full 2077 * control over window switching. 2078 */ 2079 if (msc->mode == MSC_MODE_MULTI && !msc->mbuf) 2080 ret = msc_win_switch(msc); 2081 mutex_unlock(&msc->buf_mutex); 2082 2083 return ret ? ret : size; 2084 } 2085 2086 static DEVICE_ATTR_WO(win_switch); 2087 2088 static ssize_t stop_on_full_show(struct device *dev, 2089 struct device_attribute *attr, char *buf) 2090 { 2091 struct msc *msc = dev_get_drvdata(dev); 2092 2093 return sprintf(buf, "%d\n", msc->stop_on_full); 2094 } 2095 2096 static ssize_t stop_on_full_store(struct device *dev, 2097 struct device_attribute *attr, 2098 const char *buf, size_t size) 2099 { 2100 struct msc *msc = dev_get_drvdata(dev); 2101 int ret; 2102 2103 ret = kstrtobool(buf, &msc->stop_on_full); 2104 if (ret) 2105 return ret; 2106 2107 return size; 2108 } 2109 2110 static DEVICE_ATTR_RW(stop_on_full); 2111 2112 static struct attribute *msc_output_attrs[] = { 2113 &dev_attr_wrap.attr, 2114 &dev_attr_mode.attr, 2115 &dev_attr_nr_pages.attr, 2116 &dev_attr_win_switch.attr, 2117 &dev_attr_stop_on_full.attr, 2118 NULL, 2119 }; 2120 2121 static const struct attribute_group msc_output_group = { 2122 .attrs = msc_output_attrs, 2123 }; 2124 2125 static int intel_th_msc_probe(struct intel_th_device *thdev) 2126 { 2127 struct device *dev = &thdev->dev; 2128 struct resource *res; 2129 struct msc *msc; 2130 void __iomem *base; 2131 int err; 2132 2133 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0); 2134 if (!res) 2135 return -ENODEV; 2136 2137 base = devm_ioremap(dev, res->start, resource_size(res)); 2138 if (!base) 2139 return -ENOMEM; 2140 2141 msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL); 2142 if (!msc) 2143 return -ENOMEM; 2144 2145 res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1); 2146 if (!res) 2147 msc->do_irq = 1; 2148 2149 if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken)) 2150 msc->multi_is_broken = 1; 2151 2152 msc->index = thdev->id; 2153 2154 msc->thdev = thdev; 2155 msc->reg_base = base + msc->index * 0x100; 2156 msc->msu_base = base; 2157 2158 INIT_WORK(&msc->work, msc_work); 2159 err = intel_th_msc_init(msc); 2160 if (err) 2161 return err; 2162 2163 dev_set_drvdata(dev, msc); 2164 2165 return 0; 2166 } 2167 2168 static void intel_th_msc_remove(struct intel_th_device *thdev) 2169 { 2170 struct msc *msc = dev_get_drvdata(&thdev->dev); 2171 int ret; 2172 2173 intel_th_msc_deactivate(thdev); 2174 2175 /* 2176 * Buffers should not be used at this point except if the 2177 * output character device is still open and the parent 2178 * device gets detached from its bus, which is a FIXME. 2179 */ 2180 ret = msc_buffer_free_unless_used(msc); 2181 WARN_ON_ONCE(ret); 2182 } 2183 2184 static struct intel_th_driver intel_th_msc_driver = { 2185 .probe = intel_th_msc_probe, 2186 .remove = intel_th_msc_remove, 2187 .irq = intel_th_msc_interrupt, 2188 .wait_empty = intel_th_msc_wait_empty, 2189 .activate = intel_th_msc_activate, 2190 .deactivate = intel_th_msc_deactivate, 2191 .fops = &intel_th_msc_fops, 2192 .attr_group = &msc_output_group, 2193 .driver = { 2194 .name = "msc", 2195 .owner = THIS_MODULE, 2196 }, 2197 }; 2198 2199 module_driver(intel_th_msc_driver, 2200 intel_th_driver_register, 2201 intel_th_driver_unregister); 2202 2203 MODULE_LICENSE("GPL v2"); 2204 MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver"); 2205 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>"); 2206