1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Intel(R) Trace Hub Memory Storage Unit 4 * 5 * Copyright (C) 2014-2015 Intel Corporation. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/types.h> 11 #include <linux/module.h> 12 #include <linux/device.h> 13 #include <linux/uaccess.h> 14 #include <linux/sizes.h> 15 #include <linux/printk.h> 16 #include <linux/slab.h> 17 #include <linux/mm.h> 18 #include <linux/fs.h> 19 #include <linux/io.h> 20 #include <linux/workqueue.h> 21 #include <linux/dma-mapping.h> 22 23 #ifdef CONFIG_X86 24 #include <asm/set_memory.h> 25 #endif 26 27 #include <linux/intel_th.h> 28 #include "intel_th.h" 29 #include "msu.h" 30 31 #define msc_dev(x) (&(x)->thdev->dev) 32 33 /* 34 * Lockout state transitions: 35 * READY -> INUSE -+-> LOCKED -+-> READY -> etc. 36 * \-----------/ 37 * WIN_READY: window can be used by HW 38 * WIN_INUSE: window is in use 39 * WIN_LOCKED: window is filled up and is being processed by the buffer 40 * handling code 41 * 42 * All state transitions happen automatically, except for the LOCKED->READY, 43 * which needs to be signalled by the buffer code by calling 44 * intel_th_msc_window_unlock(). 45 * 46 * When the interrupt handler has to switch to the next window, it checks 47 * whether it's READY, and if it is, it performs the switch and tracing 48 * continues. If it's LOCKED, it stops the trace. 49 */ 50 enum lockout_state { 51 WIN_READY = 0, 52 WIN_INUSE, 53 WIN_LOCKED 54 }; 55 56 /** 57 * struct msc_window - multiblock mode window descriptor 58 * @entry: window list linkage (msc::win_list) 59 * @pgoff: page offset into the buffer that this window starts at 60 * @lockout: lockout state, see comment below 61 * @lo_lock: lockout state serialization 62 * @nr_blocks: number of blocks (pages) in this window 63 * @nr_segs: number of segments in this window (<= @nr_blocks) 64 * @msc: pointer to the MSC device 65 * @_sgt: array of block descriptors 66 * @sgt: array of block descriptors 67 */ 68 struct msc_window { 69 struct list_head entry; 70 unsigned long pgoff; 71 enum lockout_state lockout; 72 spinlock_t lo_lock; 73 unsigned int nr_blocks; 74 unsigned int nr_segs; 75 struct msc *msc; 76 struct sg_table _sgt; 77 struct sg_table *sgt; 78 }; 79 80 /** 81 * struct msc_iter - iterator for msc buffer 82 * @entry: msc::iter_list linkage 83 * @msc: pointer to the MSC device 84 * @start_win: oldest window 85 * @win: current window 86 * @offset: current logical offset into the buffer 87 * @start_block: oldest block in the window 88 * @block: block number in the window 89 * @block_off: offset into current block 90 * @wrap_count: block wrapping handling 91 * @eof: end of buffer reached 92 */ 93 struct msc_iter { 94 struct list_head entry; 95 struct msc *msc; 96 struct msc_window *start_win; 97 struct msc_window *win; 98 unsigned long offset; 99 struct scatterlist *start_block; 100 struct scatterlist *block; 101 unsigned int block_off; 102 unsigned int wrap_count; 103 unsigned int eof; 104 }; 105 106 /** 107 * struct msc - MSC device representation 108 * @reg_base: register window base address for the entire MSU 109 * @msu_base: register window base address for this MSC 110 * @thdev: intel_th_device pointer 111 * @mbuf: MSU buffer, if assigned 112 * @mbuf_priv: MSU buffer's private data, if @mbuf 113 * @work: a work to stop the trace when the buffer is full 114 * @win_list: list of windows in multiblock mode 115 * @single_sgt: single mode buffer 116 * @cur_win: current window 117 * @switch_on_unlock: window to switch to when it becomes available 118 * @nr_pages: total number of pages allocated for this buffer 119 * @single_sz: amount of data in single mode 120 * @single_wrap: single mode wrap occurred 121 * @base: buffer's base pointer 122 * @base_addr: buffer's base address 123 * @orig_addr: MSC0 buffer's base address 124 * @orig_sz: MSC0 buffer's size 125 * @user_count: number of users of the buffer 126 * @mmap_count: number of mappings 127 * @buf_mutex: mutex to serialize access to buffer-related bits 128 * @iter_list: list of open file descriptor iterators 129 * @stop_on_full: stop the trace if the current window is full 130 * @enabled: MSC is enabled 131 * @wrap: wrapping is enabled 132 * @do_irq: IRQ resource is available, handle interrupts 133 * @multi_is_broken: multiblock mode enabled (not disabled by PCI drvdata) 134 * @mode: MSC operating mode 135 * @burst_len: write burst length 136 * @index: number of this MSC in the MSU 137 */ 138 struct msc { 139 void __iomem *reg_base; 140 void __iomem *msu_base; 141 struct intel_th_device *thdev; 142 143 const struct msu_buffer *mbuf; 144 void *mbuf_priv; 145 146 struct work_struct work; 147 struct list_head win_list; 148 struct sg_table single_sgt; 149 struct msc_window *cur_win; 150 struct msc_window *switch_on_unlock; 151 unsigned long nr_pages; 152 unsigned long single_sz; 153 unsigned int single_wrap : 1; 154 void *base; 155 dma_addr_t base_addr; 156 u32 orig_addr; 157 u32 orig_sz; 158 159 /* <0: no buffer, 0: no users, >0: active users */ 160 atomic_t user_count; 161 162 atomic_t mmap_count; 163 struct mutex buf_mutex; 164 165 struct list_head iter_list; 166 167 bool stop_on_full; 168 169 /* config */ 170 unsigned int enabled : 1, 171 wrap : 1, 172 do_irq : 1, 173 multi_is_broken : 1; 174 unsigned int mode; 175 unsigned int burst_len; 176 unsigned int index; 177 }; 178 179 static LIST_HEAD(msu_buffer_list); 180 static DEFINE_MUTEX(msu_buffer_mutex); 181 182 /** 183 * struct msu_buffer_entry - internal MSU buffer bookkeeping 184 * @entry: link to msu_buffer_list 185 * @mbuf: MSU buffer object 186 * @owner: module that provides this MSU buffer 187 */ 188 struct msu_buffer_entry { 189 struct list_head entry; 190 const struct msu_buffer *mbuf; 191 struct module *owner; 192 }; 193 194 static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name) 195 { 196 struct msu_buffer_entry *mbe; 197 198 lockdep_assert_held(&msu_buffer_mutex); 199 200 list_for_each_entry(mbe, &msu_buffer_list, entry) { 201 if (!strcmp(mbe->mbuf->name, name)) 202 return mbe; 203 } 204 205 return NULL; 206 } 207 208 static const struct msu_buffer * 209 msu_buffer_get(const char *name) 210 { 211 struct msu_buffer_entry *mbe; 212 213 mutex_lock(&msu_buffer_mutex); 214 mbe = __msu_buffer_entry_find(name); 215 if (mbe && !try_module_get(mbe->owner)) 216 mbe = NULL; 217 mutex_unlock(&msu_buffer_mutex); 218 219 return mbe ? mbe->mbuf : NULL; 220 } 221 222 static void msu_buffer_put(const struct msu_buffer *mbuf) 223 { 224 struct msu_buffer_entry *mbe; 225 226 mutex_lock(&msu_buffer_mutex); 227 mbe = __msu_buffer_entry_find(mbuf->name); 228 if (mbe) 229 module_put(mbe->owner); 230 mutex_unlock(&msu_buffer_mutex); 231 } 232 233 int intel_th_msu_buffer_register(const struct msu_buffer *mbuf, 234 struct module *owner) 235 { 236 struct msu_buffer_entry *mbe; 237 int ret = 0; 238 239 mbe = kzalloc(sizeof(*mbe), GFP_KERNEL); 240 if (!mbe) 241 return -ENOMEM; 242 243 mutex_lock(&msu_buffer_mutex); 244 if (__msu_buffer_entry_find(mbuf->name)) { 245 ret = -EEXIST; 246 kfree(mbe); 247 goto unlock; 248 } 249 250 mbe->mbuf = mbuf; 251 mbe->owner = owner; 252 list_add_tail(&mbe->entry, &msu_buffer_list); 253 unlock: 254 mutex_unlock(&msu_buffer_mutex); 255 256 return ret; 257 } 258 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register); 259 260 void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf) 261 { 262 struct msu_buffer_entry *mbe; 263 264 mutex_lock(&msu_buffer_mutex); 265 mbe = __msu_buffer_entry_find(mbuf->name); 266 if (mbe) { 267 list_del(&mbe->entry); 268 kfree(mbe); 269 } 270 mutex_unlock(&msu_buffer_mutex); 271 } 272 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister); 273 274 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) 275 { 276 /* header hasn't been written */ 277 if (!bdesc->valid_dw) 278 return true; 279 280 /* valid_dw includes the header */ 281 if (!msc_data_sz(bdesc)) 282 return true; 283 284 return false; 285 } 286 287 static inline struct scatterlist *msc_win_base_sg(struct msc_window *win) 288 { 289 return win->sgt->sgl; 290 } 291 292 static inline struct msc_block_desc *msc_win_base(struct msc_window *win) 293 { 294 return sg_virt(msc_win_base_sg(win)); 295 } 296 297 static inline dma_addr_t msc_win_base_dma(struct msc_window *win) 298 { 299 return sg_dma_address(msc_win_base_sg(win)); 300 } 301 302 static inline unsigned long 303 msc_win_base_pfn(struct msc_window *win) 304 { 305 return PFN_DOWN(msc_win_base_dma(win)); 306 } 307 308 /** 309 * msc_is_last_win() - check if a window is the last one for a given MSC 310 * @win: window 311 * Return: true if @win is the last window in MSC's multiblock buffer 312 */ 313 static inline bool msc_is_last_win(struct msc_window *win) 314 { 315 return win->entry.next == &win->msc->win_list; 316 } 317 318 /** 319 * msc_next_window() - return next window in the multiblock buffer 320 * @win: current window 321 * 322 * Return: window following the current one 323 */ 324 static struct msc_window *msc_next_window(struct msc_window *win) 325 { 326 if (msc_is_last_win(win)) 327 return list_first_entry(&win->msc->win_list, struct msc_window, 328 entry); 329 330 return list_next_entry(win, entry); 331 } 332 333 static size_t msc_win_total_sz(struct msc_window *win) 334 { 335 struct scatterlist *sg; 336 unsigned int blk; 337 size_t size = 0; 338 339 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 340 struct msc_block_desc *bdesc = sg_virt(sg); 341 342 if (msc_block_wrapped(bdesc)) 343 return (size_t)win->nr_blocks << PAGE_SHIFT; 344 345 size += msc_total_sz(bdesc); 346 if (msc_block_last_written(bdesc)) 347 break; 348 } 349 350 return size; 351 } 352 353 /** 354 * msc_find_window() - find a window matching a given sg_table 355 * @msc: MSC device 356 * @sgt: SG table of the window 357 * @nonempty: skip over empty windows 358 * 359 * Return: MSC window structure pointer or NULL if the window 360 * could not be found. 361 */ 362 static struct msc_window * 363 msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty) 364 { 365 struct msc_window *win; 366 unsigned int found = 0; 367 368 if (list_empty(&msc->win_list)) 369 return NULL; 370 371 /* 372 * we might need a radix tree for this, depending on how 373 * many windows a typical user would allocate; ideally it's 374 * something like 2, in which case we're good 375 */ 376 list_for_each_entry(win, &msc->win_list, entry) { 377 if (win->sgt == sgt) 378 found++; 379 380 /* skip the empty ones */ 381 if (nonempty && msc_block_is_empty(msc_win_base(win))) 382 continue; 383 384 if (found) 385 return win; 386 } 387 388 return NULL; 389 } 390 391 /** 392 * msc_oldest_window() - locate the window with oldest data 393 * @msc: MSC device 394 * 395 * This should only be used in multiblock mode. Caller should hold the 396 * msc::user_count reference. 397 * 398 * Return: the oldest window with valid data 399 */ 400 static struct msc_window *msc_oldest_window(struct msc *msc) 401 { 402 struct msc_window *win; 403 404 if (list_empty(&msc->win_list)) 405 return NULL; 406 407 win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true); 408 if (win) 409 return win; 410 411 return list_first_entry(&msc->win_list, struct msc_window, entry); 412 } 413 414 /** 415 * msc_win_oldest_sg() - locate the oldest block in a given window 416 * @win: window to look at 417 * 418 * Return: index of the block with the oldest data 419 */ 420 static struct scatterlist *msc_win_oldest_sg(struct msc_window *win) 421 { 422 unsigned int blk; 423 struct scatterlist *sg; 424 struct msc_block_desc *bdesc = msc_win_base(win); 425 426 /* without wrapping, first block is the oldest */ 427 if (!msc_block_wrapped(bdesc)) 428 return msc_win_base_sg(win); 429 430 /* 431 * with wrapping, last written block contains both the newest and the 432 * oldest data for this window. 433 */ 434 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 435 struct msc_block_desc *bdesc = sg_virt(sg); 436 437 if (msc_block_last_written(bdesc)) 438 return sg; 439 } 440 441 return msc_win_base_sg(win); 442 } 443 444 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter) 445 { 446 return sg_virt(iter->block); 447 } 448 449 static struct msc_iter *msc_iter_install(struct msc *msc) 450 { 451 struct msc_iter *iter; 452 453 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 454 if (!iter) 455 return ERR_PTR(-ENOMEM); 456 457 mutex_lock(&msc->buf_mutex); 458 459 /* 460 * Reading and tracing are mutually exclusive; if msc is 461 * enabled, open() will fail; otherwise existing readers 462 * will prevent enabling the msc and the rest of fops don't 463 * need to worry about it. 464 */ 465 if (msc->enabled) { 466 kfree(iter); 467 iter = ERR_PTR(-EBUSY); 468 goto unlock; 469 } 470 471 iter->msc = msc; 472 473 list_add_tail(&iter->entry, &msc->iter_list); 474 unlock: 475 mutex_unlock(&msc->buf_mutex); 476 477 return iter; 478 } 479 480 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc) 481 { 482 mutex_lock(&msc->buf_mutex); 483 list_del(&iter->entry); 484 mutex_unlock(&msc->buf_mutex); 485 486 kfree(iter); 487 } 488 489 static void msc_iter_block_start(struct msc_iter *iter) 490 { 491 if (iter->start_block) 492 return; 493 494 iter->start_block = msc_win_oldest_sg(iter->win); 495 iter->block = iter->start_block; 496 iter->wrap_count = 0; 497 498 /* 499 * start with the block with oldest data; if data has wrapped 500 * in this window, it should be in this block 501 */ 502 if (msc_block_wrapped(msc_iter_bdesc(iter))) 503 iter->wrap_count = 2; 504 505 } 506 507 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc) 508 { 509 /* already started, nothing to do */ 510 if (iter->start_win) 511 return 0; 512 513 iter->start_win = msc_oldest_window(msc); 514 if (!iter->start_win) 515 return -EINVAL; 516 517 iter->win = iter->start_win; 518 iter->start_block = NULL; 519 520 msc_iter_block_start(iter); 521 522 return 0; 523 } 524 525 static int msc_iter_win_advance(struct msc_iter *iter) 526 { 527 iter->win = msc_next_window(iter->win); 528 iter->start_block = NULL; 529 530 if (iter->win == iter->start_win) { 531 iter->eof++; 532 return 1; 533 } 534 535 msc_iter_block_start(iter); 536 537 return 0; 538 } 539 540 static int msc_iter_block_advance(struct msc_iter *iter) 541 { 542 iter->block_off = 0; 543 544 /* wrapping */ 545 if (iter->wrap_count && iter->block == iter->start_block) { 546 iter->wrap_count--; 547 if (!iter->wrap_count) 548 /* copied newest data from the wrapped block */ 549 return msc_iter_win_advance(iter); 550 } 551 552 /* no wrapping, check for last written block */ 553 if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter))) 554 /* copied newest data for the window */ 555 return msc_iter_win_advance(iter); 556 557 /* block advance */ 558 if (sg_is_last(iter->block)) 559 iter->block = msc_win_base_sg(iter->win); 560 else 561 iter->block = sg_next(iter->block); 562 563 /* no wrapping, sanity check in case there is no last written block */ 564 if (!iter->wrap_count && iter->block == iter->start_block) 565 return msc_iter_win_advance(iter); 566 567 return 0; 568 } 569 570 /** 571 * msc_buffer_iterate() - go through multiblock buffer's data 572 * @iter: iterator structure 573 * @size: amount of data to scan 574 * @data: callback's private data 575 * @fn: iterator callback 576 * 577 * This will start at the window which will be written to next (containing 578 * the oldest data) and work its way to the current window, calling @fn 579 * for each chunk of data as it goes. 580 * 581 * Caller should have msc::user_count reference to make sure the buffer 582 * doesn't disappear from under us. 583 * 584 * Return: amount of data actually scanned. 585 */ 586 static ssize_t 587 msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data, 588 unsigned long (*fn)(void *, void *, size_t)) 589 { 590 struct msc *msc = iter->msc; 591 size_t len = size; 592 unsigned int advance; 593 594 if (iter->eof) 595 return 0; 596 597 /* start with the oldest window */ 598 if (msc_iter_win_start(iter, msc)) 599 return 0; 600 601 do { 602 unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter)); 603 void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC; 604 size_t tocopy = data_bytes, copied = 0; 605 size_t remaining = 0; 606 607 advance = 1; 608 609 /* 610 * If block wrapping happened, we need to visit the last block 611 * twice, because it contains both the oldest and the newest 612 * data in this window. 613 * 614 * First time (wrap_count==2), in the very beginning, to collect 615 * the oldest data, which is in the range 616 * (data_bytes..DATA_IN_PAGE). 617 * 618 * Second time (wrap_count==1), it's just like any other block, 619 * containing data in the range of [MSC_BDESC..data_bytes]. 620 */ 621 if (iter->block == iter->start_block && iter->wrap_count == 2) { 622 tocopy = DATA_IN_PAGE - data_bytes; 623 src += data_bytes; 624 } 625 626 if (!tocopy) 627 goto next_block; 628 629 tocopy -= iter->block_off; 630 src += iter->block_off; 631 632 if (len < tocopy) { 633 tocopy = len; 634 advance = 0; 635 } 636 637 remaining = fn(data, src, tocopy); 638 639 if (remaining) 640 advance = 0; 641 642 copied = tocopy - remaining; 643 len -= copied; 644 iter->block_off += copied; 645 iter->offset += copied; 646 647 if (!advance) 648 break; 649 650 next_block: 651 if (msc_iter_block_advance(iter)) 652 break; 653 654 } while (len); 655 656 return size - len; 657 } 658 659 /** 660 * msc_buffer_clear_hw_header() - clear hw header for multiblock 661 * @msc: MSC device 662 */ 663 static void msc_buffer_clear_hw_header(struct msc *msc) 664 { 665 struct msc_window *win; 666 struct scatterlist *sg; 667 668 list_for_each_entry(win, &msc->win_list, entry) { 669 unsigned int blk; 670 671 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 672 struct msc_block_desc *bdesc = sg_virt(sg); 673 674 memset_startat(bdesc, 0, hw_tag); 675 } 676 } 677 } 678 679 static int intel_th_msu_init(struct msc *msc) 680 { 681 u32 mintctl, msusts; 682 683 if (!msc->do_irq) 684 return 0; 685 686 if (!msc->mbuf) 687 return 0; 688 689 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); 690 mintctl |= msc->index ? M1BLIE : M0BLIE; 691 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 692 if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) { 693 dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n"); 694 msc->do_irq = 0; 695 return 0; 696 } 697 698 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); 699 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); 700 701 return 0; 702 } 703 704 static void intel_th_msu_deinit(struct msc *msc) 705 { 706 u32 mintctl; 707 708 if (!msc->do_irq) 709 return; 710 711 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); 712 mintctl &= msc->index ? ~M1BLIE : ~M0BLIE; 713 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 714 } 715 716 static int msc_win_set_lockout(struct msc_window *win, 717 enum lockout_state expect, 718 enum lockout_state new) 719 { 720 enum lockout_state old; 721 unsigned long flags; 722 int ret = 0; 723 724 if (!win->msc->mbuf) 725 return 0; 726 727 spin_lock_irqsave(&win->lo_lock, flags); 728 old = win->lockout; 729 730 if (old != expect) { 731 ret = -EINVAL; 732 goto unlock; 733 } 734 735 win->lockout = new; 736 737 if (old == expect && new == WIN_LOCKED) 738 atomic_inc(&win->msc->user_count); 739 else if (old == expect && old == WIN_LOCKED) 740 atomic_dec(&win->msc->user_count); 741 742 unlock: 743 spin_unlock_irqrestore(&win->lo_lock, flags); 744 745 if (ret) { 746 if (expect == WIN_READY && old == WIN_LOCKED) 747 return -EBUSY; 748 749 /* from intel_th_msc_window_unlock(), don't warn if not locked */ 750 if (expect == WIN_LOCKED && old == new) 751 return 0; 752 753 dev_warn_ratelimited(msc_dev(win->msc), 754 "expected lockout state %d, got %d\n", 755 expect, old); 756 } 757 758 return ret; 759 } 760 /** 761 * msc_configure() - set up MSC hardware 762 * @msc: the MSC device to configure 763 * 764 * Program storage mode, wrapping, burst length and trace buffer address 765 * into a given MSC. Then, enable tracing and set msc::enabled. 766 * The latter is serialized on msc::buf_mutex, so make sure to hold it. 767 * 768 * Return: %0 for success or a negative error code otherwise. 769 */ 770 static int msc_configure(struct msc *msc) 771 { 772 u32 reg; 773 774 lockdep_assert_held(&msc->buf_mutex); 775 776 if (msc->mode > MSC_MODE_MULTI) 777 return -EINVAL; 778 779 if (msc->mode == MSC_MODE_MULTI) { 780 if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE)) 781 return -EBUSY; 782 783 msc_buffer_clear_hw_header(msc); 784 } 785 786 msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR); 787 msc->orig_sz = ioread32(msc->reg_base + REG_MSU_MSC0SIZE); 788 789 reg = msc->base_addr >> PAGE_SHIFT; 790 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR); 791 792 if (msc->mode == MSC_MODE_SINGLE) { 793 reg = msc->nr_pages; 794 iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE); 795 } 796 797 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 798 reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD); 799 800 reg |= MSC_EN; 801 reg |= msc->mode << __ffs(MSC_MODE); 802 reg |= msc->burst_len << __ffs(MSC_LEN); 803 804 if (msc->wrap) 805 reg |= MSC_WRAPEN; 806 807 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 808 809 intel_th_msu_init(msc); 810 811 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; 812 intel_th_trace_enable(msc->thdev); 813 msc->enabled = 1; 814 815 if (msc->mbuf && msc->mbuf->activate) 816 msc->mbuf->activate(msc->mbuf_priv); 817 818 return 0; 819 } 820 821 /** 822 * msc_disable() - disable MSC hardware 823 * @msc: MSC device to disable 824 * 825 * If @msc is enabled, disable tracing on the switch and then disable MSC 826 * storage. Caller must hold msc::buf_mutex. 827 */ 828 static void msc_disable(struct msc *msc) 829 { 830 struct msc_window *win = msc->cur_win; 831 u32 reg; 832 833 lockdep_assert_held(&msc->buf_mutex); 834 835 if (msc->mode == MSC_MODE_MULTI) 836 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED); 837 838 if (msc->mbuf && msc->mbuf->deactivate) 839 msc->mbuf->deactivate(msc->mbuf_priv); 840 intel_th_msu_deinit(msc); 841 intel_th_trace_disable(msc->thdev); 842 843 if (msc->mode == MSC_MODE_SINGLE) { 844 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 845 msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT); 846 847 reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); 848 msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1); 849 dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n", 850 reg, msc->single_sz, msc->single_wrap); 851 } 852 853 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 854 reg &= ~MSC_EN; 855 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 856 857 if (msc->mbuf && msc->mbuf->ready) 858 msc->mbuf->ready(msc->mbuf_priv, win->sgt, 859 msc_win_total_sz(win)); 860 861 msc->enabled = 0; 862 863 iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR); 864 iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE); 865 866 dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n", 867 ioread32(msc->reg_base + REG_MSU_MSC0NWSA)); 868 869 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 870 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg); 871 872 reg = ioread32(msc->reg_base + REG_MSU_MSUSTS); 873 reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; 874 iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS); 875 } 876 877 static int intel_th_msc_activate(struct intel_th_device *thdev) 878 { 879 struct msc *msc = dev_get_drvdata(&thdev->dev); 880 int ret = -EBUSY; 881 882 if (!atomic_inc_unless_negative(&msc->user_count)) 883 return -ENODEV; 884 885 mutex_lock(&msc->buf_mutex); 886 887 /* if there are readers, refuse */ 888 if (list_empty(&msc->iter_list)) 889 ret = msc_configure(msc); 890 891 mutex_unlock(&msc->buf_mutex); 892 893 if (ret) 894 atomic_dec(&msc->user_count); 895 896 return ret; 897 } 898 899 static void intel_th_msc_deactivate(struct intel_th_device *thdev) 900 { 901 struct msc *msc = dev_get_drvdata(&thdev->dev); 902 903 mutex_lock(&msc->buf_mutex); 904 if (msc->enabled) { 905 msc_disable(msc); 906 atomic_dec(&msc->user_count); 907 } 908 mutex_unlock(&msc->buf_mutex); 909 } 910 911 /** 912 * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode 913 * @msc: MSC device 914 * @size: allocation size in bytes 915 * 916 * This modifies msc::base, which requires msc::buf_mutex to serialize, so the 917 * caller is expected to hold it. 918 * 919 * Return: 0 on success, -errno otherwise. 920 */ 921 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size) 922 { 923 unsigned long nr_pages = size >> PAGE_SHIFT; 924 unsigned int order = get_order(size); 925 struct page *page; 926 int ret; 927 928 if (!size) 929 return 0; 930 931 ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL); 932 if (ret) 933 goto err_out; 934 935 ret = -ENOMEM; 936 page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order); 937 if (!page) 938 goto err_free_sgt; 939 940 split_page(page, order); 941 sg_set_buf(msc->single_sgt.sgl, page_address(page), size); 942 943 ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1, 944 DMA_FROM_DEVICE); 945 if (ret < 0) 946 goto err_free_pages; 947 948 msc->nr_pages = nr_pages; 949 msc->base = page_address(page); 950 msc->base_addr = sg_dma_address(msc->single_sgt.sgl); 951 952 return 0; 953 954 err_free_pages: 955 __free_pages(page, order); 956 957 err_free_sgt: 958 sg_free_table(&msc->single_sgt); 959 960 err_out: 961 return ret; 962 } 963 964 /** 965 * msc_buffer_contig_free() - free a contiguous buffer 966 * @msc: MSC configured in SINGLE mode 967 */ 968 static void msc_buffer_contig_free(struct msc *msc) 969 { 970 unsigned long off; 971 972 dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 973 1, DMA_FROM_DEVICE); 974 sg_free_table(&msc->single_sgt); 975 976 for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { 977 struct page *page = virt_to_page(msc->base + off); 978 979 page->mapping = NULL; 980 __free_page(page); 981 } 982 983 msc->nr_pages = 0; 984 } 985 986 /** 987 * msc_buffer_contig_get_page() - find a page at a given offset 988 * @msc: MSC configured in SINGLE mode 989 * @pgoff: page offset 990 * 991 * Return: page, if @pgoff is within the range, NULL otherwise. 992 */ 993 static struct page *msc_buffer_contig_get_page(struct msc *msc, 994 unsigned long pgoff) 995 { 996 if (pgoff >= msc->nr_pages) 997 return NULL; 998 999 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT)); 1000 } 1001 1002 static int __msc_buffer_win_alloc(struct msc_window *win, 1003 unsigned int nr_segs) 1004 { 1005 struct scatterlist *sg_ptr; 1006 void *block; 1007 int i, ret; 1008 1009 ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL); 1010 if (ret) 1011 return -ENOMEM; 1012 1013 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { 1014 block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent, 1015 PAGE_SIZE, &sg_dma_address(sg_ptr), 1016 GFP_KERNEL); 1017 if (!block) 1018 goto err_nomem; 1019 1020 sg_set_buf(sg_ptr, block, PAGE_SIZE); 1021 } 1022 1023 return nr_segs; 1024 1025 err_nomem: 1026 for_each_sg(win->sgt->sgl, sg_ptr, i, ret) 1027 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 1028 sg_virt(sg_ptr), sg_dma_address(sg_ptr)); 1029 1030 sg_free_table(win->sgt); 1031 1032 return -ENOMEM; 1033 } 1034 1035 #ifdef CONFIG_X86 1036 static void msc_buffer_set_uc(struct msc *msc) 1037 { 1038 struct scatterlist *sg_ptr; 1039 struct msc_window *win; 1040 int i; 1041 1042 if (msc->mode == MSC_MODE_SINGLE) { 1043 set_memory_uc((unsigned long)msc->base, msc->nr_pages); 1044 return; 1045 } 1046 1047 list_for_each_entry(win, &msc->win_list, entry) { 1048 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { 1049 /* Set the page as uncached */ 1050 set_memory_uc((unsigned long)sg_virt(sg_ptr), 1051 PFN_DOWN(sg_ptr->length)); 1052 } 1053 } 1054 } 1055 1056 static void msc_buffer_set_wb(struct msc *msc) 1057 { 1058 struct scatterlist *sg_ptr; 1059 struct msc_window *win; 1060 int i; 1061 1062 if (msc->mode == MSC_MODE_SINGLE) { 1063 set_memory_wb((unsigned long)msc->base, msc->nr_pages); 1064 return; 1065 } 1066 1067 list_for_each_entry(win, &msc->win_list, entry) { 1068 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { 1069 /* Reset the page to write-back */ 1070 set_memory_wb((unsigned long)sg_virt(sg_ptr), 1071 PFN_DOWN(sg_ptr->length)); 1072 } 1073 } 1074 } 1075 #else /* !X86 */ 1076 static inline void 1077 msc_buffer_set_uc(struct msc *msc) {} 1078 static inline void msc_buffer_set_wb(struct msc *msc) {} 1079 #endif /* CONFIG_X86 */ 1080 1081 static struct page *msc_sg_page(struct scatterlist *sg) 1082 { 1083 void *addr = sg_virt(sg); 1084 1085 if (is_vmalloc_addr(addr)) 1086 return vmalloc_to_page(addr); 1087 1088 return sg_page(sg); 1089 } 1090 1091 /** 1092 * msc_buffer_win_alloc() - alloc a window for a multiblock mode 1093 * @msc: MSC device 1094 * @nr_blocks: number of pages in this window 1095 * 1096 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1097 * to serialize, so the caller is expected to hold it. 1098 * 1099 * Return: 0 on success, -errno otherwise. 1100 */ 1101 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) 1102 { 1103 struct msc_window *win; 1104 int ret = -ENOMEM; 1105 1106 if (!nr_blocks) 1107 return 0; 1108 1109 win = kzalloc(sizeof(*win), GFP_KERNEL); 1110 if (!win) 1111 return -ENOMEM; 1112 1113 win->msc = msc; 1114 win->sgt = &win->_sgt; 1115 win->lockout = WIN_READY; 1116 spin_lock_init(&win->lo_lock); 1117 1118 if (!list_empty(&msc->win_list)) { 1119 struct msc_window *prev = list_last_entry(&msc->win_list, 1120 struct msc_window, 1121 entry); 1122 1123 win->pgoff = prev->pgoff + prev->nr_blocks; 1124 } 1125 1126 if (msc->mbuf && msc->mbuf->alloc_window) 1127 ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt, 1128 nr_blocks << PAGE_SHIFT); 1129 else 1130 ret = __msc_buffer_win_alloc(win, nr_blocks); 1131 1132 if (ret <= 0) 1133 goto err_nomem; 1134 1135 win->nr_segs = ret; 1136 win->nr_blocks = nr_blocks; 1137 1138 if (list_empty(&msc->win_list)) { 1139 msc->base = msc_win_base(win); 1140 msc->base_addr = msc_win_base_dma(win); 1141 msc->cur_win = win; 1142 } 1143 1144 list_add_tail(&win->entry, &msc->win_list); 1145 msc->nr_pages += nr_blocks; 1146 1147 return 0; 1148 1149 err_nomem: 1150 kfree(win); 1151 1152 return ret; 1153 } 1154 1155 static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) 1156 { 1157 struct scatterlist *sg; 1158 int i; 1159 1160 for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) { 1161 struct page *page = msc_sg_page(sg); 1162 1163 page->mapping = NULL; 1164 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 1165 sg_virt(sg), sg_dma_address(sg)); 1166 } 1167 sg_free_table(win->sgt); 1168 } 1169 1170 /** 1171 * msc_buffer_win_free() - free a window from MSC's window list 1172 * @msc: MSC device 1173 * @win: window to free 1174 * 1175 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1176 * to serialize, so the caller is expected to hold it. 1177 */ 1178 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) 1179 { 1180 msc->nr_pages -= win->nr_blocks; 1181 1182 list_del(&win->entry); 1183 if (list_empty(&msc->win_list)) { 1184 msc->base = NULL; 1185 msc->base_addr = 0; 1186 } 1187 1188 if (msc->mbuf && msc->mbuf->free_window) 1189 msc->mbuf->free_window(msc->mbuf_priv, win->sgt); 1190 else 1191 __msc_buffer_win_free(msc, win); 1192 1193 kfree(win); 1194 } 1195 1196 /** 1197 * msc_buffer_relink() - set up block descriptors for multiblock mode 1198 * @msc: MSC device 1199 * 1200 * This traverses msc::win_list, which requires msc::buf_mutex to serialize, 1201 * so the caller is expected to hold it. 1202 */ 1203 static void msc_buffer_relink(struct msc *msc) 1204 { 1205 struct msc_window *win, *next_win; 1206 1207 /* call with msc::mutex locked */ 1208 list_for_each_entry(win, &msc->win_list, entry) { 1209 struct scatterlist *sg; 1210 unsigned int blk; 1211 u32 sw_tag = 0; 1212 1213 /* 1214 * Last window's next_win should point to the first window 1215 * and MSC_SW_TAG_LASTWIN should be set. 1216 */ 1217 if (msc_is_last_win(win)) { 1218 sw_tag |= MSC_SW_TAG_LASTWIN; 1219 next_win = list_first_entry(&msc->win_list, 1220 struct msc_window, entry); 1221 } else { 1222 next_win = list_next_entry(win, entry); 1223 } 1224 1225 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 1226 struct msc_block_desc *bdesc = sg_virt(sg); 1227 1228 memset(bdesc, 0, sizeof(*bdesc)); 1229 1230 bdesc->next_win = msc_win_base_pfn(next_win); 1231 1232 /* 1233 * Similarly to last window, last block should point 1234 * to the first one. 1235 */ 1236 if (blk == win->nr_segs - 1) { 1237 sw_tag |= MSC_SW_TAG_LASTBLK; 1238 bdesc->next_blk = msc_win_base_pfn(win); 1239 } else { 1240 dma_addr_t addr = sg_dma_address(sg_next(sg)); 1241 1242 bdesc->next_blk = PFN_DOWN(addr); 1243 } 1244 1245 bdesc->sw_tag = sw_tag; 1246 bdesc->block_sz = sg->length / 64; 1247 } 1248 } 1249 1250 /* 1251 * Make the above writes globally visible before tracing is 1252 * enabled to make sure hardware sees them coherently. 1253 */ 1254 wmb(); 1255 } 1256 1257 static void msc_buffer_multi_free(struct msc *msc) 1258 { 1259 struct msc_window *win, *iter; 1260 1261 list_for_each_entry_safe(win, iter, &msc->win_list, entry) 1262 msc_buffer_win_free(msc, win); 1263 } 1264 1265 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages, 1266 unsigned int nr_wins) 1267 { 1268 int ret, i; 1269 1270 for (i = 0; i < nr_wins; i++) { 1271 ret = msc_buffer_win_alloc(msc, nr_pages[i]); 1272 if (ret) { 1273 msc_buffer_multi_free(msc); 1274 return ret; 1275 } 1276 } 1277 1278 msc_buffer_relink(msc); 1279 1280 return 0; 1281 } 1282 1283 /** 1284 * msc_buffer_free() - free buffers for MSC 1285 * @msc: MSC device 1286 * 1287 * Free MSC's storage buffers. 1288 * 1289 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to 1290 * serialize, so the caller is expected to hold it. 1291 */ 1292 static void msc_buffer_free(struct msc *msc) 1293 { 1294 msc_buffer_set_wb(msc); 1295 1296 if (msc->mode == MSC_MODE_SINGLE) 1297 msc_buffer_contig_free(msc); 1298 else if (msc->mode == MSC_MODE_MULTI) 1299 msc_buffer_multi_free(msc); 1300 } 1301 1302 /** 1303 * msc_buffer_alloc() - allocate a buffer for MSC 1304 * @msc: MSC device 1305 * @nr_pages: number of pages for each window 1306 * @nr_wins: number of windows 1307 * 1308 * Allocate a storage buffer for MSC, depending on the msc::mode, it will be 1309 * either done via msc_buffer_contig_alloc() for SINGLE operation mode or 1310 * msc_buffer_win_alloc() for multiblock operation. The latter allocates one 1311 * window per invocation, so in multiblock mode this can be called multiple 1312 * times for the same MSC to allocate multiple windows. 1313 * 1314 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1315 * to serialize, so the caller is expected to hold it. 1316 * 1317 * Return: 0 on success, -errno otherwise. 1318 */ 1319 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages, 1320 unsigned int nr_wins) 1321 { 1322 int ret; 1323 1324 /* -1: buffer not allocated */ 1325 if (atomic_read(&msc->user_count) != -1) 1326 return -EBUSY; 1327 1328 if (msc->mode == MSC_MODE_SINGLE) { 1329 if (nr_wins != 1) 1330 return -EINVAL; 1331 1332 ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT); 1333 } else if (msc->mode == MSC_MODE_MULTI) { 1334 ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins); 1335 } else { 1336 ret = -EINVAL; 1337 } 1338 1339 if (!ret) { 1340 msc_buffer_set_uc(msc); 1341 1342 /* allocation should be visible before the counter goes to 0 */ 1343 smp_mb__before_atomic(); 1344 1345 if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1)) 1346 return -EINVAL; 1347 } 1348 1349 return ret; 1350 } 1351 1352 /** 1353 * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use 1354 * @msc: MSC device 1355 * 1356 * This will free MSC buffer unless it is in use or there is no allocated 1357 * buffer. 1358 * Caller needs to hold msc::buf_mutex. 1359 * 1360 * Return: 0 on successful deallocation or if there was no buffer to 1361 * deallocate, -EBUSY if there are active users. 1362 */ 1363 static int msc_buffer_unlocked_free_unless_used(struct msc *msc) 1364 { 1365 int count, ret = 0; 1366 1367 count = atomic_cmpxchg(&msc->user_count, 0, -1); 1368 1369 /* > 0: buffer is allocated and has users */ 1370 if (count > 0) 1371 ret = -EBUSY; 1372 /* 0: buffer is allocated, no users */ 1373 else if (!count) 1374 msc_buffer_free(msc); 1375 /* < 0: no buffer, nothing to do */ 1376 1377 return ret; 1378 } 1379 1380 /** 1381 * msc_buffer_free_unless_used() - free a buffer unless it's in use 1382 * @msc: MSC device 1383 * 1384 * This is a locked version of msc_buffer_unlocked_free_unless_used(). 1385 * 1386 * Return: 0 on successful deallocation or if there was no buffer to 1387 * deallocate, -EBUSY if there are active users. 1388 */ 1389 static int msc_buffer_free_unless_used(struct msc *msc) 1390 { 1391 int ret; 1392 1393 mutex_lock(&msc->buf_mutex); 1394 ret = msc_buffer_unlocked_free_unless_used(msc); 1395 mutex_unlock(&msc->buf_mutex); 1396 1397 return ret; 1398 } 1399 1400 /** 1401 * msc_buffer_get_page() - get MSC buffer page at a given offset 1402 * @msc: MSC device 1403 * @pgoff: page offset into the storage buffer 1404 * 1405 * This traverses msc::win_list, so holding msc::buf_mutex is expected from 1406 * the caller. 1407 * 1408 * Return: page if @pgoff corresponds to a valid buffer page or NULL. 1409 */ 1410 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff) 1411 { 1412 struct msc_window *win; 1413 struct scatterlist *sg; 1414 unsigned int blk; 1415 1416 if (msc->mode == MSC_MODE_SINGLE) 1417 return msc_buffer_contig_get_page(msc, pgoff); 1418 1419 list_for_each_entry(win, &msc->win_list, entry) 1420 if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks) 1421 goto found; 1422 1423 return NULL; 1424 1425 found: 1426 pgoff -= win->pgoff; 1427 1428 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 1429 struct page *page = msc_sg_page(sg); 1430 size_t pgsz = PFN_DOWN(sg->length); 1431 1432 if (pgoff < pgsz) 1433 return page + pgoff; 1434 1435 pgoff -= pgsz; 1436 } 1437 1438 return NULL; 1439 } 1440 1441 /** 1442 * struct msc_win_to_user_struct - data for copy_to_user() callback 1443 * @buf: userspace buffer to copy data to 1444 * @offset: running offset 1445 */ 1446 struct msc_win_to_user_struct { 1447 char __user *buf; 1448 unsigned long offset; 1449 }; 1450 1451 /** 1452 * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user 1453 * @data: callback's private data 1454 * @src: source buffer 1455 * @len: amount of data to copy from the source buffer 1456 * 1457 * Return: >= %0 for success or -errno for error. 1458 */ 1459 static unsigned long msc_win_to_user(void *data, void *src, size_t len) 1460 { 1461 struct msc_win_to_user_struct *u = data; 1462 unsigned long ret; 1463 1464 ret = copy_to_user(u->buf + u->offset, src, len); 1465 u->offset += len - ret; 1466 1467 return ret; 1468 } 1469 1470 1471 /* 1472 * file operations' callbacks 1473 */ 1474 1475 static int intel_th_msc_open(struct inode *inode, struct file *file) 1476 { 1477 struct intel_th_device *thdev = file->private_data; 1478 struct msc *msc = dev_get_drvdata(&thdev->dev); 1479 struct msc_iter *iter; 1480 1481 if (!capable(CAP_SYS_RAWIO)) 1482 return -EPERM; 1483 1484 iter = msc_iter_install(msc); 1485 if (IS_ERR(iter)) 1486 return PTR_ERR(iter); 1487 1488 file->private_data = iter; 1489 1490 return nonseekable_open(inode, file); 1491 } 1492 1493 static int intel_th_msc_release(struct inode *inode, struct file *file) 1494 { 1495 struct msc_iter *iter = file->private_data; 1496 struct msc *msc = iter->msc; 1497 1498 msc_iter_remove(iter, msc); 1499 1500 return 0; 1501 } 1502 1503 static ssize_t 1504 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len) 1505 { 1506 unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len; 1507 unsigned long start = off, tocopy = 0; 1508 1509 if (msc->single_wrap) { 1510 start += msc->single_sz; 1511 if (start < size) { 1512 tocopy = min(rem, size - start); 1513 if (copy_to_user(buf, msc->base + start, tocopy)) 1514 return -EFAULT; 1515 1516 buf += tocopy; 1517 rem -= tocopy; 1518 start += tocopy; 1519 } 1520 1521 start &= size - 1; 1522 if (rem) { 1523 tocopy = min(rem, msc->single_sz - start); 1524 if (copy_to_user(buf, msc->base + start, tocopy)) 1525 return -EFAULT; 1526 1527 rem -= tocopy; 1528 } 1529 1530 return len - rem; 1531 } 1532 1533 if (copy_to_user(buf, msc->base + start, rem)) 1534 return -EFAULT; 1535 1536 return len; 1537 } 1538 1539 static ssize_t intel_th_msc_read(struct file *file, char __user *buf, 1540 size_t len, loff_t *ppos) 1541 { 1542 struct msc_iter *iter = file->private_data; 1543 struct msc *msc = iter->msc; 1544 size_t size; 1545 loff_t off = *ppos; 1546 ssize_t ret = 0; 1547 1548 if (!atomic_inc_unless_negative(&msc->user_count)) 1549 return 0; 1550 1551 if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap) 1552 size = msc->single_sz; 1553 else 1554 size = msc->nr_pages << PAGE_SHIFT; 1555 1556 if (!size) 1557 goto put_count; 1558 1559 if (off >= size) 1560 goto put_count; 1561 1562 if (off + len >= size) 1563 len = size - off; 1564 1565 if (msc->mode == MSC_MODE_SINGLE) { 1566 ret = msc_single_to_user(msc, buf, off, len); 1567 if (ret >= 0) 1568 *ppos += ret; 1569 } else if (msc->mode == MSC_MODE_MULTI) { 1570 struct msc_win_to_user_struct u = { 1571 .buf = buf, 1572 .offset = 0, 1573 }; 1574 1575 ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user); 1576 if (ret >= 0) 1577 *ppos = iter->offset; 1578 } else { 1579 ret = -EINVAL; 1580 } 1581 1582 put_count: 1583 atomic_dec(&msc->user_count); 1584 1585 return ret; 1586 } 1587 1588 /* 1589 * vm operations callbacks (vm_ops) 1590 */ 1591 1592 static void msc_mmap_open(struct vm_area_struct *vma) 1593 { 1594 struct msc_iter *iter = vma->vm_file->private_data; 1595 struct msc *msc = iter->msc; 1596 1597 atomic_inc(&msc->mmap_count); 1598 } 1599 1600 static void msc_mmap_close(struct vm_area_struct *vma) 1601 { 1602 struct msc_iter *iter = vma->vm_file->private_data; 1603 struct msc *msc = iter->msc; 1604 unsigned long pg; 1605 1606 if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex)) 1607 return; 1608 1609 /* drop page _refcounts */ 1610 for (pg = 0; pg < msc->nr_pages; pg++) { 1611 struct page *page = msc_buffer_get_page(msc, pg); 1612 1613 if (WARN_ON_ONCE(!page)) 1614 continue; 1615 1616 if (page->mapping) 1617 page->mapping = NULL; 1618 } 1619 1620 /* last mapping -- drop user_count */ 1621 atomic_dec(&msc->user_count); 1622 mutex_unlock(&msc->buf_mutex); 1623 } 1624 1625 static vm_fault_t msc_mmap_fault(struct vm_fault *vmf) 1626 { 1627 struct msc_iter *iter = vmf->vma->vm_file->private_data; 1628 struct msc *msc = iter->msc; 1629 1630 vmf->page = msc_buffer_get_page(msc, vmf->pgoff); 1631 if (!vmf->page) 1632 return VM_FAULT_SIGBUS; 1633 1634 get_page(vmf->page); 1635 vmf->page->mapping = vmf->vma->vm_file->f_mapping; 1636 vmf->page->index = vmf->pgoff; 1637 1638 return 0; 1639 } 1640 1641 static const struct vm_operations_struct msc_mmap_ops = { 1642 .open = msc_mmap_open, 1643 .close = msc_mmap_close, 1644 .fault = msc_mmap_fault, 1645 }; 1646 1647 static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma) 1648 { 1649 unsigned long size = vma->vm_end - vma->vm_start; 1650 struct msc_iter *iter = vma->vm_file->private_data; 1651 struct msc *msc = iter->msc; 1652 int ret = -EINVAL; 1653 1654 if (!size || offset_in_page(size)) 1655 return -EINVAL; 1656 1657 if (vma->vm_pgoff) 1658 return -EINVAL; 1659 1660 /* grab user_count once per mmap; drop in msc_mmap_close() */ 1661 if (!atomic_inc_unless_negative(&msc->user_count)) 1662 return -EINVAL; 1663 1664 if (msc->mode != MSC_MODE_SINGLE && 1665 msc->mode != MSC_MODE_MULTI) 1666 goto out; 1667 1668 if (size >> PAGE_SHIFT != msc->nr_pages) 1669 goto out; 1670 1671 atomic_set(&msc->mmap_count, 1); 1672 ret = 0; 1673 1674 out: 1675 if (ret) 1676 atomic_dec(&msc->user_count); 1677 1678 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1679 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY); 1680 vma->vm_ops = &msc_mmap_ops; 1681 return ret; 1682 } 1683 1684 static const struct file_operations intel_th_msc_fops = { 1685 .open = intel_th_msc_open, 1686 .release = intel_th_msc_release, 1687 .read = intel_th_msc_read, 1688 .mmap = intel_th_msc_mmap, 1689 .owner = THIS_MODULE, 1690 }; 1691 1692 static void intel_th_msc_wait_empty(struct intel_th_device *thdev) 1693 { 1694 struct msc *msc = dev_get_drvdata(&thdev->dev); 1695 unsigned long count; 1696 u32 reg; 1697 1698 for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; 1699 count && !(reg & MSCSTS_PLE); count--) { 1700 reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS); 1701 cpu_relax(); 1702 } 1703 1704 if (!count) 1705 dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); 1706 } 1707 1708 static int intel_th_msc_init(struct msc *msc) 1709 { 1710 atomic_set(&msc->user_count, -1); 1711 1712 msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI; 1713 mutex_init(&msc->buf_mutex); 1714 INIT_LIST_HEAD(&msc->win_list); 1715 INIT_LIST_HEAD(&msc->iter_list); 1716 1717 msc->burst_len = 1718 (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >> 1719 __ffs(MSC_LEN); 1720 1721 return 0; 1722 } 1723 1724 static int msc_win_switch(struct msc *msc) 1725 { 1726 struct msc_window *first; 1727 1728 if (list_empty(&msc->win_list)) 1729 return -EINVAL; 1730 1731 first = list_first_entry(&msc->win_list, struct msc_window, entry); 1732 1733 if (msc_is_last_win(msc->cur_win)) 1734 msc->cur_win = first; 1735 else 1736 msc->cur_win = list_next_entry(msc->cur_win, entry); 1737 1738 msc->base = msc_win_base(msc->cur_win); 1739 msc->base_addr = msc_win_base_dma(msc->cur_win); 1740 1741 intel_th_trace_switch(msc->thdev); 1742 1743 return 0; 1744 } 1745 1746 /** 1747 * intel_th_msc_window_unlock - put the window back in rotation 1748 * @dev: MSC device to which this relates 1749 * @sgt: buffer's sg_table for the window, does nothing if NULL 1750 */ 1751 void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt) 1752 { 1753 struct msc *msc = dev_get_drvdata(dev); 1754 struct msc_window *win; 1755 1756 if (!sgt) 1757 return; 1758 1759 win = msc_find_window(msc, sgt, false); 1760 if (!win) 1761 return; 1762 1763 msc_win_set_lockout(win, WIN_LOCKED, WIN_READY); 1764 if (msc->switch_on_unlock == win) { 1765 msc->switch_on_unlock = NULL; 1766 msc_win_switch(msc); 1767 } 1768 } 1769 EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock); 1770 1771 static void msc_work(struct work_struct *work) 1772 { 1773 struct msc *msc = container_of(work, struct msc, work); 1774 1775 intel_th_msc_deactivate(msc->thdev); 1776 } 1777 1778 static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev) 1779 { 1780 struct msc *msc = dev_get_drvdata(&thdev->dev); 1781 u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); 1782 u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; 1783 struct msc_window *win, *next_win; 1784 1785 if (!msc->do_irq || !msc->mbuf) 1786 return IRQ_NONE; 1787 1788 msusts &= mask; 1789 1790 if (!msusts) 1791 return msc->enabled ? IRQ_HANDLED : IRQ_NONE; 1792 1793 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); 1794 1795 if (!msc->enabled) 1796 return IRQ_NONE; 1797 1798 /* grab the window before we do the switch */ 1799 win = msc->cur_win; 1800 if (!win) 1801 return IRQ_HANDLED; 1802 next_win = msc_next_window(win); 1803 if (!next_win) 1804 return IRQ_HANDLED; 1805 1806 /* next window: if READY, proceed, if LOCKED, stop the trace */ 1807 if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) { 1808 if (msc->stop_on_full) 1809 schedule_work(&msc->work); 1810 else 1811 msc->switch_on_unlock = next_win; 1812 1813 return IRQ_HANDLED; 1814 } 1815 1816 /* current window: INUSE -> LOCKED */ 1817 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED); 1818 1819 msc_win_switch(msc); 1820 1821 if (msc->mbuf && msc->mbuf->ready) 1822 msc->mbuf->ready(msc->mbuf_priv, win->sgt, 1823 msc_win_total_sz(win)); 1824 1825 return IRQ_HANDLED; 1826 } 1827 1828 static const char * const msc_mode[] = { 1829 [MSC_MODE_SINGLE] = "single", 1830 [MSC_MODE_MULTI] = "multi", 1831 [MSC_MODE_EXI] = "ExI", 1832 [MSC_MODE_DEBUG] = "debug", 1833 }; 1834 1835 static ssize_t 1836 wrap_show(struct device *dev, struct device_attribute *attr, char *buf) 1837 { 1838 struct msc *msc = dev_get_drvdata(dev); 1839 1840 return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap); 1841 } 1842 1843 static ssize_t 1844 wrap_store(struct device *dev, struct device_attribute *attr, const char *buf, 1845 size_t size) 1846 { 1847 struct msc *msc = dev_get_drvdata(dev); 1848 unsigned long val; 1849 int ret; 1850 1851 ret = kstrtoul(buf, 10, &val); 1852 if (ret) 1853 return ret; 1854 1855 msc->wrap = !!val; 1856 1857 return size; 1858 } 1859 1860 static DEVICE_ATTR_RW(wrap); 1861 1862 static void msc_buffer_unassign(struct msc *msc) 1863 { 1864 lockdep_assert_held(&msc->buf_mutex); 1865 1866 if (!msc->mbuf) 1867 return; 1868 1869 msc->mbuf->unassign(msc->mbuf_priv); 1870 msu_buffer_put(msc->mbuf); 1871 msc->mbuf_priv = NULL; 1872 msc->mbuf = NULL; 1873 } 1874 1875 static ssize_t 1876 mode_show(struct device *dev, struct device_attribute *attr, char *buf) 1877 { 1878 struct msc *msc = dev_get_drvdata(dev); 1879 const char *mode = msc_mode[msc->mode]; 1880 ssize_t ret; 1881 1882 mutex_lock(&msc->buf_mutex); 1883 if (msc->mbuf) 1884 mode = msc->mbuf->name; 1885 ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode); 1886 mutex_unlock(&msc->buf_mutex); 1887 1888 return ret; 1889 } 1890 1891 static ssize_t 1892 mode_store(struct device *dev, struct device_attribute *attr, const char *buf, 1893 size_t size) 1894 { 1895 const struct msu_buffer *mbuf = NULL; 1896 struct msc *msc = dev_get_drvdata(dev); 1897 size_t len = size; 1898 char *cp, *mode; 1899 int i, ret; 1900 1901 if (!capable(CAP_SYS_RAWIO)) 1902 return -EPERM; 1903 1904 cp = memchr(buf, '\n', len); 1905 if (cp) 1906 len = cp - buf; 1907 1908 mode = kstrndup(buf, len, GFP_KERNEL); 1909 if (!mode) 1910 return -ENOMEM; 1911 1912 i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode); 1913 if (i >= 0) { 1914 kfree(mode); 1915 goto found; 1916 } 1917 1918 /* Buffer sinks only work with a usable IRQ */ 1919 if (!msc->do_irq) { 1920 kfree(mode); 1921 return -EINVAL; 1922 } 1923 1924 mbuf = msu_buffer_get(mode); 1925 kfree(mode); 1926 if (mbuf) 1927 goto found; 1928 1929 return -EINVAL; 1930 1931 found: 1932 if (i == MSC_MODE_MULTI && msc->multi_is_broken) 1933 return -EOPNOTSUPP; 1934 1935 mutex_lock(&msc->buf_mutex); 1936 ret = 0; 1937 1938 /* Same buffer: do nothing */ 1939 if (mbuf && mbuf == msc->mbuf) { 1940 /* put the extra reference we just got */ 1941 msu_buffer_put(mbuf); 1942 goto unlock; 1943 } 1944 1945 ret = msc_buffer_unlocked_free_unless_used(msc); 1946 if (ret) 1947 goto unlock; 1948 1949 if (mbuf) { 1950 void *mbuf_priv = mbuf->assign(dev, &i); 1951 1952 if (!mbuf_priv) { 1953 ret = -ENOMEM; 1954 goto unlock; 1955 } 1956 1957 msc_buffer_unassign(msc); 1958 msc->mbuf_priv = mbuf_priv; 1959 msc->mbuf = mbuf; 1960 } else { 1961 msc_buffer_unassign(msc); 1962 } 1963 1964 msc->mode = i; 1965 1966 unlock: 1967 if (ret && mbuf) 1968 msu_buffer_put(mbuf); 1969 mutex_unlock(&msc->buf_mutex); 1970 1971 return ret ? ret : size; 1972 } 1973 1974 static DEVICE_ATTR_RW(mode); 1975 1976 static ssize_t 1977 nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf) 1978 { 1979 struct msc *msc = dev_get_drvdata(dev); 1980 struct msc_window *win; 1981 size_t count = 0; 1982 1983 mutex_lock(&msc->buf_mutex); 1984 1985 if (msc->mode == MSC_MODE_SINGLE) 1986 count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages); 1987 else if (msc->mode == MSC_MODE_MULTI) { 1988 list_for_each_entry(win, &msc->win_list, entry) { 1989 count += scnprintf(buf + count, PAGE_SIZE - count, 1990 "%d%c", win->nr_blocks, 1991 msc_is_last_win(win) ? '\n' : ','); 1992 } 1993 } else { 1994 count = scnprintf(buf, PAGE_SIZE, "unsupported\n"); 1995 } 1996 1997 mutex_unlock(&msc->buf_mutex); 1998 1999 return count; 2000 } 2001 2002 static ssize_t 2003 nr_pages_store(struct device *dev, struct device_attribute *attr, 2004 const char *buf, size_t size) 2005 { 2006 struct msc *msc = dev_get_drvdata(dev); 2007 unsigned long val, *win = NULL, *rewin; 2008 size_t len = size; 2009 const char *p = buf; 2010 char *end, *s; 2011 int ret, nr_wins = 0; 2012 2013 if (!capable(CAP_SYS_RAWIO)) 2014 return -EPERM; 2015 2016 ret = msc_buffer_free_unless_used(msc); 2017 if (ret) 2018 return ret; 2019 2020 /* scan the comma-separated list of allocation sizes */ 2021 end = memchr(buf, '\n', len); 2022 if (end) 2023 len = end - buf; 2024 2025 do { 2026 end = memchr(p, ',', len); 2027 s = kstrndup(p, end ? end - p : len, GFP_KERNEL); 2028 if (!s) { 2029 ret = -ENOMEM; 2030 goto free_win; 2031 } 2032 2033 ret = kstrtoul(s, 10, &val); 2034 kfree(s); 2035 2036 if (ret || !val) 2037 goto free_win; 2038 2039 if (nr_wins && msc->mode == MSC_MODE_SINGLE) { 2040 ret = -EINVAL; 2041 goto free_win; 2042 } 2043 2044 nr_wins++; 2045 rewin = krealloc_array(win, nr_wins, sizeof(*win), GFP_KERNEL); 2046 if (!rewin) { 2047 kfree(win); 2048 return -ENOMEM; 2049 } 2050 2051 win = rewin; 2052 win[nr_wins - 1] = val; 2053 2054 if (!end) 2055 break; 2056 2057 /* consume the number and the following comma, hence +1 */ 2058 len -= end - p + 1; 2059 p = end + 1; 2060 } while (len); 2061 2062 mutex_lock(&msc->buf_mutex); 2063 ret = msc_buffer_alloc(msc, win, nr_wins); 2064 mutex_unlock(&msc->buf_mutex); 2065 2066 free_win: 2067 kfree(win); 2068 2069 return ret ? ret : size; 2070 } 2071 2072 static DEVICE_ATTR_RW(nr_pages); 2073 2074 static ssize_t 2075 win_switch_store(struct device *dev, struct device_attribute *attr, 2076 const char *buf, size_t size) 2077 { 2078 struct msc *msc = dev_get_drvdata(dev); 2079 unsigned long val; 2080 int ret; 2081 2082 ret = kstrtoul(buf, 10, &val); 2083 if (ret) 2084 return ret; 2085 2086 if (val != 1) 2087 return -EINVAL; 2088 2089 ret = -EINVAL; 2090 mutex_lock(&msc->buf_mutex); 2091 /* 2092 * Window switch can only happen in the "multi" mode. 2093 * If a external buffer is engaged, they have the full 2094 * control over window switching. 2095 */ 2096 if (msc->mode == MSC_MODE_MULTI && !msc->mbuf) 2097 ret = msc_win_switch(msc); 2098 mutex_unlock(&msc->buf_mutex); 2099 2100 return ret ? ret : size; 2101 } 2102 2103 static DEVICE_ATTR_WO(win_switch); 2104 2105 static ssize_t stop_on_full_show(struct device *dev, 2106 struct device_attribute *attr, char *buf) 2107 { 2108 struct msc *msc = dev_get_drvdata(dev); 2109 2110 return sprintf(buf, "%d\n", msc->stop_on_full); 2111 } 2112 2113 static ssize_t stop_on_full_store(struct device *dev, 2114 struct device_attribute *attr, 2115 const char *buf, size_t size) 2116 { 2117 struct msc *msc = dev_get_drvdata(dev); 2118 int ret; 2119 2120 ret = kstrtobool(buf, &msc->stop_on_full); 2121 if (ret) 2122 return ret; 2123 2124 return size; 2125 } 2126 2127 static DEVICE_ATTR_RW(stop_on_full); 2128 2129 static struct attribute *msc_output_attrs[] = { 2130 &dev_attr_wrap.attr, 2131 &dev_attr_mode.attr, 2132 &dev_attr_nr_pages.attr, 2133 &dev_attr_win_switch.attr, 2134 &dev_attr_stop_on_full.attr, 2135 NULL, 2136 }; 2137 2138 static const struct attribute_group msc_output_group = { 2139 .attrs = msc_output_attrs, 2140 }; 2141 2142 static int intel_th_msc_probe(struct intel_th_device *thdev) 2143 { 2144 struct device *dev = &thdev->dev; 2145 struct resource *res; 2146 struct msc *msc; 2147 void __iomem *base; 2148 int err; 2149 2150 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0); 2151 if (!res) 2152 return -ENODEV; 2153 2154 base = devm_ioremap(dev, res->start, resource_size(res)); 2155 if (!base) 2156 return -ENOMEM; 2157 2158 msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL); 2159 if (!msc) 2160 return -ENOMEM; 2161 2162 res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1); 2163 if (!res) 2164 msc->do_irq = 1; 2165 2166 if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken)) 2167 msc->multi_is_broken = 1; 2168 2169 msc->index = thdev->id; 2170 2171 msc->thdev = thdev; 2172 msc->reg_base = base + msc->index * 0x100; 2173 msc->msu_base = base; 2174 2175 INIT_WORK(&msc->work, msc_work); 2176 err = intel_th_msc_init(msc); 2177 if (err) 2178 return err; 2179 2180 dev_set_drvdata(dev, msc); 2181 2182 return 0; 2183 } 2184 2185 static void intel_th_msc_remove(struct intel_th_device *thdev) 2186 { 2187 struct msc *msc = dev_get_drvdata(&thdev->dev); 2188 int ret; 2189 2190 intel_th_msc_deactivate(thdev); 2191 2192 /* 2193 * Buffers should not be used at this point except if the 2194 * output character device is still open and the parent 2195 * device gets detached from its bus, which is a FIXME. 2196 */ 2197 ret = msc_buffer_free_unless_used(msc); 2198 WARN_ON_ONCE(ret); 2199 } 2200 2201 static struct intel_th_driver intel_th_msc_driver = { 2202 .probe = intel_th_msc_probe, 2203 .remove = intel_th_msc_remove, 2204 .irq = intel_th_msc_interrupt, 2205 .wait_empty = intel_th_msc_wait_empty, 2206 .activate = intel_th_msc_activate, 2207 .deactivate = intel_th_msc_deactivate, 2208 .fops = &intel_th_msc_fops, 2209 .attr_group = &msc_output_group, 2210 .driver = { 2211 .name = "msc", 2212 .owner = THIS_MODULE, 2213 }, 2214 }; 2215 2216 module_driver(intel_th_msc_driver, 2217 intel_th_driver_register, 2218 intel_th_driver_unregister); 2219 2220 MODULE_LICENSE("GPL v2"); 2221 MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver"); 2222 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>"); 2223