1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Intel(R) Trace Hub Memory Storage Unit 4 * 5 * Copyright (C) 2014-2015 Intel Corporation. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/types.h> 11 #include <linux/module.h> 12 #include <linux/device.h> 13 #include <linux/uaccess.h> 14 #include <linux/sizes.h> 15 #include <linux/printk.h> 16 #include <linux/slab.h> 17 #include <linux/mm.h> 18 #include <linux/fs.h> 19 #include <linux/io.h> 20 #include <linux/workqueue.h> 21 #include <linux/dma-mapping.h> 22 23 #ifdef CONFIG_X86 24 #include <asm/set_memory.h> 25 #endif 26 27 #include <linux/intel_th.h> 28 #include "intel_th.h" 29 #include "msu.h" 30 31 #define msc_dev(x) (&(x)->thdev->dev) 32 33 /* 34 * Lockout state transitions: 35 * READY -> INUSE -+-> LOCKED -+-> READY -> etc. 36 * \-----------/ 37 * WIN_READY: window can be used by HW 38 * WIN_INUSE: window is in use 39 * WIN_LOCKED: window is filled up and is being processed by the buffer 40 * handling code 41 * 42 * All state transitions happen automatically, except for the LOCKED->READY, 43 * which needs to be signalled by the buffer code by calling 44 * intel_th_msc_window_unlock(). 45 * 46 * When the interrupt handler has to switch to the next window, it checks 47 * whether it's READY, and if it is, it performs the switch and tracing 48 * continues. If it's LOCKED, it stops the trace. 49 */ 50 enum lockout_state { 51 WIN_READY = 0, 52 WIN_INUSE, 53 WIN_LOCKED 54 }; 55 56 /** 57 * struct msc_window - multiblock mode window descriptor 58 * @entry: window list linkage (msc::win_list) 59 * @pgoff: page offset into the buffer that this window starts at 60 * @lockout: lockout state, see comment below 61 * @lo_lock: lockout state serialization 62 * @nr_blocks: number of blocks (pages) in this window 63 * @nr_segs: number of segments in this window (<= @nr_blocks) 64 * @msc: pointer to the MSC device 65 * @_sgt: array of block descriptors 66 * @sgt: array of block descriptors 67 */ 68 struct msc_window { 69 struct list_head entry; 70 unsigned long pgoff; 71 enum lockout_state lockout; 72 spinlock_t lo_lock; 73 unsigned int nr_blocks; 74 unsigned int nr_segs; 75 struct msc *msc; 76 struct sg_table _sgt; 77 struct sg_table *sgt; 78 }; 79 80 /** 81 * struct msc_iter - iterator for msc buffer 82 * @entry: msc::iter_list linkage 83 * @msc: pointer to the MSC device 84 * @start_win: oldest window 85 * @win: current window 86 * @offset: current logical offset into the buffer 87 * @start_block: oldest block in the window 88 * @block: block number in the window 89 * @block_off: offset into current block 90 * @wrap_count: block wrapping handling 91 * @eof: end of buffer reached 92 */ 93 struct msc_iter { 94 struct list_head entry; 95 struct msc *msc; 96 struct msc_window *start_win; 97 struct msc_window *win; 98 unsigned long offset; 99 struct scatterlist *start_block; 100 struct scatterlist *block; 101 unsigned int block_off; 102 unsigned int wrap_count; 103 unsigned int eof; 104 }; 105 106 /** 107 * struct msc - MSC device representation 108 * @reg_base: register window base address for the entire MSU 109 * @msu_base: register window base address for this MSC 110 * @thdev: intel_th_device pointer 111 * @mbuf: MSU buffer, if assigned 112 * @mbuf_priv: MSU buffer's private data, if @mbuf 113 * @work: a work to stop the trace when the buffer is full 114 * @win_list: list of windows in multiblock mode 115 * @single_sgt: single mode buffer 116 * @cur_win: current window 117 * @switch_on_unlock: window to switch to when it becomes available 118 * @nr_pages: total number of pages allocated for this buffer 119 * @single_sz: amount of data in single mode 120 * @single_wrap: single mode wrap occurred 121 * @base: buffer's base pointer 122 * @base_addr: buffer's base address 123 * @orig_addr: MSC0 buffer's base address 124 * @orig_sz: MSC0 buffer's size 125 * @user_count: number of users of the buffer 126 * @mmap_count: number of mappings 127 * @buf_mutex: mutex to serialize access to buffer-related bits 128 * @iter_list: list of open file descriptor iterators 129 * @stop_on_full: stop the trace if the current window is full 130 * @enabled: MSC is enabled 131 * @wrap: wrapping is enabled 132 * @do_irq: IRQ resource is available, handle interrupts 133 * @multi_is_broken: multiblock mode enabled (not disabled by PCI drvdata) 134 * @mode: MSC operating mode 135 * @burst_len: write burst length 136 * @index: number of this MSC in the MSU 137 */ 138 struct msc { 139 void __iomem *reg_base; 140 void __iomem *msu_base; 141 struct intel_th_device *thdev; 142 143 const struct msu_buffer *mbuf; 144 void *mbuf_priv; 145 146 struct work_struct work; 147 struct list_head win_list; 148 struct sg_table single_sgt; 149 struct msc_window *cur_win; 150 struct msc_window *switch_on_unlock; 151 unsigned long nr_pages; 152 unsigned long single_sz; 153 unsigned int single_wrap : 1; 154 void *base; 155 dma_addr_t base_addr; 156 u32 orig_addr; 157 u32 orig_sz; 158 159 /* <0: no buffer, 0: no users, >0: active users */ 160 atomic_t user_count; 161 162 atomic_t mmap_count; 163 struct mutex buf_mutex; 164 165 struct list_head iter_list; 166 167 bool stop_on_full; 168 169 /* config */ 170 unsigned int enabled : 1, 171 wrap : 1, 172 do_irq : 1, 173 multi_is_broken : 1; 174 unsigned int mode; 175 unsigned int burst_len; 176 unsigned int index; 177 }; 178 179 static LIST_HEAD(msu_buffer_list); 180 static DEFINE_MUTEX(msu_buffer_mutex); 181 182 /** 183 * struct msu_buffer_entry - internal MSU buffer bookkeeping 184 * @entry: link to msu_buffer_list 185 * @mbuf: MSU buffer object 186 * @owner: module that provides this MSU buffer 187 */ 188 struct msu_buffer_entry { 189 struct list_head entry; 190 const struct msu_buffer *mbuf; 191 struct module *owner; 192 }; 193 194 static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name) 195 { 196 struct msu_buffer_entry *mbe; 197 198 lockdep_assert_held(&msu_buffer_mutex); 199 200 list_for_each_entry(mbe, &msu_buffer_list, entry) { 201 if (!strcmp(mbe->mbuf->name, name)) 202 return mbe; 203 } 204 205 return NULL; 206 } 207 208 static const struct msu_buffer * 209 msu_buffer_get(const char *name) 210 { 211 struct msu_buffer_entry *mbe; 212 213 mutex_lock(&msu_buffer_mutex); 214 mbe = __msu_buffer_entry_find(name); 215 if (mbe && !try_module_get(mbe->owner)) 216 mbe = NULL; 217 mutex_unlock(&msu_buffer_mutex); 218 219 return mbe ? mbe->mbuf : NULL; 220 } 221 222 static void msu_buffer_put(const struct msu_buffer *mbuf) 223 { 224 struct msu_buffer_entry *mbe; 225 226 mutex_lock(&msu_buffer_mutex); 227 mbe = __msu_buffer_entry_find(mbuf->name); 228 if (mbe) 229 module_put(mbe->owner); 230 mutex_unlock(&msu_buffer_mutex); 231 } 232 233 int intel_th_msu_buffer_register(const struct msu_buffer *mbuf, 234 struct module *owner) 235 { 236 struct msu_buffer_entry *mbe; 237 int ret = 0; 238 239 mbe = kzalloc(sizeof(*mbe), GFP_KERNEL); 240 if (!mbe) 241 return -ENOMEM; 242 243 mutex_lock(&msu_buffer_mutex); 244 if (__msu_buffer_entry_find(mbuf->name)) { 245 ret = -EEXIST; 246 kfree(mbe); 247 goto unlock; 248 } 249 250 mbe->mbuf = mbuf; 251 mbe->owner = owner; 252 list_add_tail(&mbe->entry, &msu_buffer_list); 253 unlock: 254 mutex_unlock(&msu_buffer_mutex); 255 256 return ret; 257 } 258 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register); 259 260 void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf) 261 { 262 struct msu_buffer_entry *mbe; 263 264 mutex_lock(&msu_buffer_mutex); 265 mbe = __msu_buffer_entry_find(mbuf->name); 266 if (mbe) { 267 list_del(&mbe->entry); 268 kfree(mbe); 269 } 270 mutex_unlock(&msu_buffer_mutex); 271 } 272 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister); 273 274 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) 275 { 276 /* header hasn't been written */ 277 if (!bdesc->valid_dw) 278 return true; 279 280 /* valid_dw includes the header */ 281 if (!msc_data_sz(bdesc)) 282 return true; 283 284 return false; 285 } 286 287 static inline struct scatterlist *msc_win_base_sg(struct msc_window *win) 288 { 289 return win->sgt->sgl; 290 } 291 292 static inline struct msc_block_desc *msc_win_base(struct msc_window *win) 293 { 294 return sg_virt(msc_win_base_sg(win)); 295 } 296 297 static inline dma_addr_t msc_win_base_dma(struct msc_window *win) 298 { 299 return sg_dma_address(msc_win_base_sg(win)); 300 } 301 302 static inline unsigned long 303 msc_win_base_pfn(struct msc_window *win) 304 { 305 return PFN_DOWN(msc_win_base_dma(win)); 306 } 307 308 /** 309 * msc_is_last_win() - check if a window is the last one for a given MSC 310 * @win: window 311 * Return: true if @win is the last window in MSC's multiblock buffer 312 */ 313 static inline bool msc_is_last_win(struct msc_window *win) 314 { 315 return win->entry.next == &win->msc->win_list; 316 } 317 318 /** 319 * msc_next_window() - return next window in the multiblock buffer 320 * @win: current window 321 * 322 * Return: window following the current one 323 */ 324 static struct msc_window *msc_next_window(struct msc_window *win) 325 { 326 if (msc_is_last_win(win)) 327 return list_first_entry(&win->msc->win_list, struct msc_window, 328 entry); 329 330 return list_next_entry(win, entry); 331 } 332 333 static size_t msc_win_total_sz(struct msc_window *win) 334 { 335 struct scatterlist *sg; 336 unsigned int blk; 337 size_t size = 0; 338 339 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 340 struct msc_block_desc *bdesc = sg_virt(sg); 341 342 if (msc_block_wrapped(bdesc)) 343 return (size_t)win->nr_blocks << PAGE_SHIFT; 344 345 size += msc_total_sz(bdesc); 346 if (msc_block_last_written(bdesc)) 347 break; 348 } 349 350 return size; 351 } 352 353 /** 354 * msc_find_window() - find a window matching a given sg_table 355 * @msc: MSC device 356 * @sgt: SG table of the window 357 * @nonempty: skip over empty windows 358 * 359 * Return: MSC window structure pointer or NULL if the window 360 * could not be found. 361 */ 362 static struct msc_window * 363 msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty) 364 { 365 struct msc_window *win; 366 unsigned int found = 0; 367 368 if (list_empty(&msc->win_list)) 369 return NULL; 370 371 /* 372 * we might need a radix tree for this, depending on how 373 * many windows a typical user would allocate; ideally it's 374 * something like 2, in which case we're good 375 */ 376 list_for_each_entry(win, &msc->win_list, entry) { 377 if (win->sgt == sgt) 378 found++; 379 380 /* skip the empty ones */ 381 if (nonempty && msc_block_is_empty(msc_win_base(win))) 382 continue; 383 384 if (found) 385 return win; 386 } 387 388 return NULL; 389 } 390 391 /** 392 * msc_oldest_window() - locate the window with oldest data 393 * @msc: MSC device 394 * 395 * This should only be used in multiblock mode. Caller should hold the 396 * msc::user_count reference. 397 * 398 * Return: the oldest window with valid data 399 */ 400 static struct msc_window *msc_oldest_window(struct msc *msc) 401 { 402 struct msc_window *win; 403 404 if (list_empty(&msc->win_list)) 405 return NULL; 406 407 win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true); 408 if (win) 409 return win; 410 411 return list_first_entry(&msc->win_list, struct msc_window, entry); 412 } 413 414 /** 415 * msc_win_oldest_sg() - locate the oldest block in a given window 416 * @win: window to look at 417 * 418 * Return: index of the block with the oldest data 419 */ 420 static struct scatterlist *msc_win_oldest_sg(struct msc_window *win) 421 { 422 unsigned int blk; 423 struct scatterlist *sg; 424 struct msc_block_desc *bdesc = msc_win_base(win); 425 426 /* without wrapping, first block is the oldest */ 427 if (!msc_block_wrapped(bdesc)) 428 return msc_win_base_sg(win); 429 430 /* 431 * with wrapping, last written block contains both the newest and the 432 * oldest data for this window. 433 */ 434 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 435 struct msc_block_desc *bdesc = sg_virt(sg); 436 437 if (msc_block_last_written(bdesc)) 438 return sg; 439 } 440 441 return msc_win_base_sg(win); 442 } 443 444 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter) 445 { 446 return sg_virt(iter->block); 447 } 448 449 static struct msc_iter *msc_iter_install(struct msc *msc) 450 { 451 struct msc_iter *iter; 452 453 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 454 if (!iter) 455 return ERR_PTR(-ENOMEM); 456 457 mutex_lock(&msc->buf_mutex); 458 459 /* 460 * Reading and tracing are mutually exclusive; if msc is 461 * enabled, open() will fail; otherwise existing readers 462 * will prevent enabling the msc and the rest of fops don't 463 * need to worry about it. 464 */ 465 if (msc->enabled) { 466 kfree(iter); 467 iter = ERR_PTR(-EBUSY); 468 goto unlock; 469 } 470 471 iter->msc = msc; 472 473 list_add_tail(&iter->entry, &msc->iter_list); 474 unlock: 475 mutex_unlock(&msc->buf_mutex); 476 477 return iter; 478 } 479 480 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc) 481 { 482 mutex_lock(&msc->buf_mutex); 483 list_del(&iter->entry); 484 mutex_unlock(&msc->buf_mutex); 485 486 kfree(iter); 487 } 488 489 static void msc_iter_block_start(struct msc_iter *iter) 490 { 491 if (iter->start_block) 492 return; 493 494 iter->start_block = msc_win_oldest_sg(iter->win); 495 iter->block = iter->start_block; 496 iter->wrap_count = 0; 497 498 /* 499 * start with the block with oldest data; if data has wrapped 500 * in this window, it should be in this block 501 */ 502 if (msc_block_wrapped(msc_iter_bdesc(iter))) 503 iter->wrap_count = 2; 504 505 } 506 507 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc) 508 { 509 /* already started, nothing to do */ 510 if (iter->start_win) 511 return 0; 512 513 iter->start_win = msc_oldest_window(msc); 514 if (!iter->start_win) 515 return -EINVAL; 516 517 iter->win = iter->start_win; 518 iter->start_block = NULL; 519 520 msc_iter_block_start(iter); 521 522 return 0; 523 } 524 525 static int msc_iter_win_advance(struct msc_iter *iter) 526 { 527 iter->win = msc_next_window(iter->win); 528 iter->start_block = NULL; 529 530 if (iter->win == iter->start_win) { 531 iter->eof++; 532 return 1; 533 } 534 535 msc_iter_block_start(iter); 536 537 return 0; 538 } 539 540 static int msc_iter_block_advance(struct msc_iter *iter) 541 { 542 iter->block_off = 0; 543 544 /* wrapping */ 545 if (iter->wrap_count && iter->block == iter->start_block) { 546 iter->wrap_count--; 547 if (!iter->wrap_count) 548 /* copied newest data from the wrapped block */ 549 return msc_iter_win_advance(iter); 550 } 551 552 /* no wrapping, check for last written block */ 553 if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter))) 554 /* copied newest data for the window */ 555 return msc_iter_win_advance(iter); 556 557 /* block advance */ 558 if (sg_is_last(iter->block)) 559 iter->block = msc_win_base_sg(iter->win); 560 else 561 iter->block = sg_next(iter->block); 562 563 /* no wrapping, sanity check in case there is no last written block */ 564 if (!iter->wrap_count && iter->block == iter->start_block) 565 return msc_iter_win_advance(iter); 566 567 return 0; 568 } 569 570 /** 571 * msc_buffer_iterate() - go through multiblock buffer's data 572 * @iter: iterator structure 573 * @size: amount of data to scan 574 * @data: callback's private data 575 * @fn: iterator callback 576 * 577 * This will start at the window which will be written to next (containing 578 * the oldest data) and work its way to the current window, calling @fn 579 * for each chunk of data as it goes. 580 * 581 * Caller should have msc::user_count reference to make sure the buffer 582 * doesn't disappear from under us. 583 * 584 * Return: amount of data actually scanned. 585 */ 586 static ssize_t 587 msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data, 588 unsigned long (*fn)(void *, void *, size_t)) 589 { 590 struct msc *msc = iter->msc; 591 size_t len = size; 592 unsigned int advance; 593 594 if (iter->eof) 595 return 0; 596 597 /* start with the oldest window */ 598 if (msc_iter_win_start(iter, msc)) 599 return 0; 600 601 do { 602 unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter)); 603 void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC; 604 size_t tocopy = data_bytes, copied = 0; 605 size_t remaining = 0; 606 607 advance = 1; 608 609 /* 610 * If block wrapping happened, we need to visit the last block 611 * twice, because it contains both the oldest and the newest 612 * data in this window. 613 * 614 * First time (wrap_count==2), in the very beginning, to collect 615 * the oldest data, which is in the range 616 * (data_bytes..DATA_IN_PAGE). 617 * 618 * Second time (wrap_count==1), it's just like any other block, 619 * containing data in the range of [MSC_BDESC..data_bytes]. 620 */ 621 if (iter->block == iter->start_block && iter->wrap_count == 2) { 622 tocopy = DATA_IN_PAGE - data_bytes; 623 src += data_bytes; 624 } 625 626 if (!tocopy) 627 goto next_block; 628 629 tocopy -= iter->block_off; 630 src += iter->block_off; 631 632 if (len < tocopy) { 633 tocopy = len; 634 advance = 0; 635 } 636 637 remaining = fn(data, src, tocopy); 638 639 if (remaining) 640 advance = 0; 641 642 copied = tocopy - remaining; 643 len -= copied; 644 iter->block_off += copied; 645 iter->offset += copied; 646 647 if (!advance) 648 break; 649 650 next_block: 651 if (msc_iter_block_advance(iter)) 652 break; 653 654 } while (len); 655 656 return size - len; 657 } 658 659 /** 660 * msc_buffer_clear_hw_header() - clear hw header for multiblock 661 * @msc: MSC device 662 */ 663 static void msc_buffer_clear_hw_header(struct msc *msc) 664 { 665 struct msc_window *win; 666 struct scatterlist *sg; 667 668 list_for_each_entry(win, &msc->win_list, entry) { 669 unsigned int blk; 670 671 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 672 struct msc_block_desc *bdesc = sg_virt(sg); 673 674 memset_startat(bdesc, 0, hw_tag); 675 } 676 } 677 } 678 679 static int intel_th_msu_init(struct msc *msc) 680 { 681 u32 mintctl, msusts; 682 683 if (!msc->do_irq) 684 return 0; 685 686 if (!msc->mbuf) 687 return 0; 688 689 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); 690 mintctl |= msc->index ? M1BLIE : M0BLIE; 691 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 692 if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) { 693 dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n"); 694 msc->do_irq = 0; 695 return 0; 696 } 697 698 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); 699 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); 700 701 return 0; 702 } 703 704 static void intel_th_msu_deinit(struct msc *msc) 705 { 706 u32 mintctl; 707 708 if (!msc->do_irq) 709 return; 710 711 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); 712 mintctl &= msc->index ? ~M1BLIE : ~M0BLIE; 713 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 714 } 715 716 static int msc_win_set_lockout(struct msc_window *win, 717 enum lockout_state expect, 718 enum lockout_state new) 719 { 720 enum lockout_state old; 721 unsigned long flags; 722 int ret = 0; 723 724 if (!win->msc->mbuf) 725 return 0; 726 727 spin_lock_irqsave(&win->lo_lock, flags); 728 old = win->lockout; 729 730 if (old != expect) { 731 ret = -EINVAL; 732 goto unlock; 733 } 734 735 win->lockout = new; 736 737 if (old == expect && new == WIN_LOCKED) 738 atomic_inc(&win->msc->user_count); 739 else if (old == expect && old == WIN_LOCKED) 740 atomic_dec(&win->msc->user_count); 741 742 unlock: 743 spin_unlock_irqrestore(&win->lo_lock, flags); 744 745 if (ret) { 746 if (expect == WIN_READY && old == WIN_LOCKED) 747 return -EBUSY; 748 749 /* from intel_th_msc_window_unlock(), don't warn if not locked */ 750 if (expect == WIN_LOCKED && old == new) 751 return 0; 752 753 dev_warn_ratelimited(msc_dev(win->msc), 754 "expected lockout state %d, got %d\n", 755 expect, old); 756 } 757 758 return ret; 759 } 760 /** 761 * msc_configure() - set up MSC hardware 762 * @msc: the MSC device to configure 763 * 764 * Program storage mode, wrapping, burst length and trace buffer address 765 * into a given MSC. Then, enable tracing and set msc::enabled. 766 * The latter is serialized on msc::buf_mutex, so make sure to hold it. 767 * 768 * Return: %0 for success or a negative error code otherwise. 769 */ 770 static int msc_configure(struct msc *msc) 771 { 772 u32 reg; 773 774 lockdep_assert_held(&msc->buf_mutex); 775 776 if (msc->mode > MSC_MODE_MULTI) 777 return -EINVAL; 778 779 if (msc->mode == MSC_MODE_MULTI) { 780 if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE)) 781 return -EBUSY; 782 783 msc_buffer_clear_hw_header(msc); 784 } 785 786 msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR); 787 msc->orig_sz = ioread32(msc->reg_base + REG_MSU_MSC0SIZE); 788 789 reg = msc->base_addr >> PAGE_SHIFT; 790 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR); 791 792 if (msc->mode == MSC_MODE_SINGLE) { 793 reg = msc->nr_pages; 794 iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE); 795 } 796 797 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 798 reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD); 799 800 reg |= MSC_EN; 801 reg |= msc->mode << __ffs(MSC_MODE); 802 reg |= msc->burst_len << __ffs(MSC_LEN); 803 804 if (msc->wrap) 805 reg |= MSC_WRAPEN; 806 807 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 808 809 intel_th_msu_init(msc); 810 811 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; 812 intel_th_trace_enable(msc->thdev); 813 msc->enabled = 1; 814 815 if (msc->mbuf && msc->mbuf->activate) 816 msc->mbuf->activate(msc->mbuf_priv); 817 818 return 0; 819 } 820 821 /** 822 * msc_disable() - disable MSC hardware 823 * @msc: MSC device to disable 824 * 825 * If @msc is enabled, disable tracing on the switch and then disable MSC 826 * storage. Caller must hold msc::buf_mutex. 827 */ 828 static void msc_disable(struct msc *msc) 829 { 830 struct msc_window *win = msc->cur_win; 831 u32 reg; 832 833 lockdep_assert_held(&msc->buf_mutex); 834 835 if (msc->mode == MSC_MODE_MULTI) 836 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED); 837 838 if (msc->mbuf && msc->mbuf->deactivate) 839 msc->mbuf->deactivate(msc->mbuf_priv); 840 intel_th_msu_deinit(msc); 841 intel_th_trace_disable(msc->thdev); 842 843 if (msc->mode == MSC_MODE_SINGLE) { 844 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 845 msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT); 846 847 reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); 848 msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1); 849 dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n", 850 reg, msc->single_sz, msc->single_wrap); 851 } 852 853 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 854 reg &= ~MSC_EN; 855 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 856 857 if (msc->mbuf && msc->mbuf->ready) 858 msc->mbuf->ready(msc->mbuf_priv, win->sgt, 859 msc_win_total_sz(win)); 860 861 msc->enabled = 0; 862 863 iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR); 864 iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE); 865 866 dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n", 867 ioread32(msc->reg_base + REG_MSU_MSC0NWSA)); 868 869 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 870 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg); 871 872 reg = ioread32(msc->reg_base + REG_MSU_MSUSTS); 873 reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; 874 iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS); 875 } 876 877 static int intel_th_msc_activate(struct intel_th_device *thdev) 878 { 879 struct msc *msc = dev_get_drvdata(&thdev->dev); 880 int ret = -EBUSY; 881 882 if (!atomic_inc_unless_negative(&msc->user_count)) 883 return -ENODEV; 884 885 mutex_lock(&msc->buf_mutex); 886 887 /* if there are readers, refuse */ 888 if (list_empty(&msc->iter_list)) 889 ret = msc_configure(msc); 890 891 mutex_unlock(&msc->buf_mutex); 892 893 if (ret) 894 atomic_dec(&msc->user_count); 895 896 return ret; 897 } 898 899 static void intel_th_msc_deactivate(struct intel_th_device *thdev) 900 { 901 struct msc *msc = dev_get_drvdata(&thdev->dev); 902 903 mutex_lock(&msc->buf_mutex); 904 if (msc->enabled) { 905 msc_disable(msc); 906 atomic_dec(&msc->user_count); 907 } 908 mutex_unlock(&msc->buf_mutex); 909 } 910 911 /** 912 * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode 913 * @msc: MSC device 914 * @size: allocation size in bytes 915 * 916 * This modifies msc::base, which requires msc::buf_mutex to serialize, so the 917 * caller is expected to hold it. 918 * 919 * Return: 0 on success, -errno otherwise. 920 */ 921 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size) 922 { 923 unsigned long nr_pages = size >> PAGE_SHIFT; 924 unsigned int order = get_order(size); 925 struct page *page; 926 int ret; 927 928 if (!size) 929 return 0; 930 931 ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL); 932 if (ret) 933 goto err_out; 934 935 ret = -ENOMEM; 936 page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order); 937 if (!page) 938 goto err_free_sgt; 939 940 split_page(page, order); 941 sg_set_buf(msc->single_sgt.sgl, page_address(page), size); 942 943 ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1, 944 DMA_FROM_DEVICE); 945 if (ret < 0) 946 goto err_free_pages; 947 948 msc->nr_pages = nr_pages; 949 msc->base = page_address(page); 950 msc->base_addr = sg_dma_address(msc->single_sgt.sgl); 951 952 return 0; 953 954 err_free_pages: 955 __free_pages(page, order); 956 957 err_free_sgt: 958 sg_free_table(&msc->single_sgt); 959 960 err_out: 961 return ret; 962 } 963 964 /** 965 * msc_buffer_contig_free() - free a contiguous buffer 966 * @msc: MSC configured in SINGLE mode 967 */ 968 static void msc_buffer_contig_free(struct msc *msc) 969 { 970 unsigned long off; 971 972 dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 973 1, DMA_FROM_DEVICE); 974 sg_free_table(&msc->single_sgt); 975 976 for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { 977 struct page *page = virt_to_page(msc->base + off); 978 979 __free_page(page); 980 } 981 982 msc->nr_pages = 0; 983 } 984 985 /** 986 * msc_buffer_contig_get_page() - find a page at a given offset 987 * @msc: MSC configured in SINGLE mode 988 * @pgoff: page offset 989 * 990 * Return: page, if @pgoff is within the range, NULL otherwise. 991 */ 992 static struct page *msc_buffer_contig_get_page(struct msc *msc, 993 unsigned long pgoff) 994 { 995 if (pgoff >= msc->nr_pages) 996 return NULL; 997 998 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT)); 999 } 1000 1001 static int __msc_buffer_win_alloc(struct msc_window *win, 1002 unsigned int nr_segs) 1003 { 1004 struct scatterlist *sg_ptr; 1005 void *block; 1006 int i, ret; 1007 1008 ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL); 1009 if (ret) 1010 return -ENOMEM; 1011 1012 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { 1013 block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent, 1014 PAGE_SIZE, &sg_dma_address(sg_ptr), 1015 GFP_KERNEL); 1016 if (!block) 1017 goto err_nomem; 1018 1019 sg_set_buf(sg_ptr, block, PAGE_SIZE); 1020 } 1021 1022 return nr_segs; 1023 1024 err_nomem: 1025 for_each_sg(win->sgt->sgl, sg_ptr, i, ret) 1026 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 1027 sg_virt(sg_ptr), sg_dma_address(sg_ptr)); 1028 1029 sg_free_table(win->sgt); 1030 1031 return -ENOMEM; 1032 } 1033 1034 #ifdef CONFIG_X86 1035 static void msc_buffer_set_uc(struct msc *msc) 1036 { 1037 struct scatterlist *sg_ptr; 1038 struct msc_window *win; 1039 int i; 1040 1041 if (msc->mode == MSC_MODE_SINGLE) { 1042 set_memory_uc((unsigned long)msc->base, msc->nr_pages); 1043 return; 1044 } 1045 1046 list_for_each_entry(win, &msc->win_list, entry) { 1047 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { 1048 /* Set the page as uncached */ 1049 set_memory_uc((unsigned long)sg_virt(sg_ptr), 1050 PFN_DOWN(sg_ptr->length)); 1051 } 1052 } 1053 } 1054 1055 static void msc_buffer_set_wb(struct msc *msc) 1056 { 1057 struct scatterlist *sg_ptr; 1058 struct msc_window *win; 1059 int i; 1060 1061 if (msc->mode == MSC_MODE_SINGLE) { 1062 set_memory_wb((unsigned long)msc->base, msc->nr_pages); 1063 return; 1064 } 1065 1066 list_for_each_entry(win, &msc->win_list, entry) { 1067 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { 1068 /* Reset the page to write-back */ 1069 set_memory_wb((unsigned long)sg_virt(sg_ptr), 1070 PFN_DOWN(sg_ptr->length)); 1071 } 1072 } 1073 } 1074 #else /* !X86 */ 1075 static inline void 1076 msc_buffer_set_uc(struct msc *msc) {} 1077 static inline void msc_buffer_set_wb(struct msc *msc) {} 1078 #endif /* CONFIG_X86 */ 1079 1080 static struct page *msc_sg_page(struct scatterlist *sg) 1081 { 1082 void *addr = sg_virt(sg); 1083 1084 if (is_vmalloc_addr(addr)) 1085 return vmalloc_to_page(addr); 1086 1087 return sg_page(sg); 1088 } 1089 1090 /** 1091 * msc_buffer_win_alloc() - alloc a window for a multiblock mode 1092 * @msc: MSC device 1093 * @nr_blocks: number of pages in this window 1094 * 1095 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1096 * to serialize, so the caller is expected to hold it. 1097 * 1098 * Return: 0 on success, -errno otherwise. 1099 */ 1100 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) 1101 { 1102 struct msc_window *win; 1103 int ret = -ENOMEM; 1104 1105 if (!nr_blocks) 1106 return 0; 1107 1108 win = kzalloc(sizeof(*win), GFP_KERNEL); 1109 if (!win) 1110 return -ENOMEM; 1111 1112 win->msc = msc; 1113 win->sgt = &win->_sgt; 1114 win->lockout = WIN_READY; 1115 spin_lock_init(&win->lo_lock); 1116 1117 if (!list_empty(&msc->win_list)) { 1118 struct msc_window *prev = list_last_entry(&msc->win_list, 1119 struct msc_window, 1120 entry); 1121 1122 win->pgoff = prev->pgoff + prev->nr_blocks; 1123 } 1124 1125 if (msc->mbuf && msc->mbuf->alloc_window) 1126 ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt, 1127 nr_blocks << PAGE_SHIFT); 1128 else 1129 ret = __msc_buffer_win_alloc(win, nr_blocks); 1130 1131 if (ret <= 0) 1132 goto err_nomem; 1133 1134 win->nr_segs = ret; 1135 win->nr_blocks = nr_blocks; 1136 1137 if (list_empty(&msc->win_list)) { 1138 msc->base = msc_win_base(win); 1139 msc->base_addr = msc_win_base_dma(win); 1140 msc->cur_win = win; 1141 } 1142 1143 list_add_tail(&win->entry, &msc->win_list); 1144 msc->nr_pages += nr_blocks; 1145 1146 return 0; 1147 1148 err_nomem: 1149 kfree(win); 1150 1151 return ret; 1152 } 1153 1154 static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) 1155 { 1156 struct scatterlist *sg; 1157 int i; 1158 1159 for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) { 1160 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 1161 sg_virt(sg), sg_dma_address(sg)); 1162 } 1163 sg_free_table(win->sgt); 1164 } 1165 1166 /** 1167 * msc_buffer_win_free() - free a window from MSC's window list 1168 * @msc: MSC device 1169 * @win: window to free 1170 * 1171 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1172 * to serialize, so the caller is expected to hold it. 1173 */ 1174 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) 1175 { 1176 msc->nr_pages -= win->nr_blocks; 1177 1178 list_del(&win->entry); 1179 if (list_empty(&msc->win_list)) { 1180 msc->base = NULL; 1181 msc->base_addr = 0; 1182 } 1183 1184 if (msc->mbuf && msc->mbuf->free_window) 1185 msc->mbuf->free_window(msc->mbuf_priv, win->sgt); 1186 else 1187 __msc_buffer_win_free(msc, win); 1188 1189 kfree(win); 1190 } 1191 1192 /** 1193 * msc_buffer_relink() - set up block descriptors for multiblock mode 1194 * @msc: MSC device 1195 * 1196 * This traverses msc::win_list, which requires msc::buf_mutex to serialize, 1197 * so the caller is expected to hold it. 1198 */ 1199 static void msc_buffer_relink(struct msc *msc) 1200 { 1201 struct msc_window *win, *next_win; 1202 1203 /* call with msc::mutex locked */ 1204 list_for_each_entry(win, &msc->win_list, entry) { 1205 struct scatterlist *sg; 1206 unsigned int blk; 1207 u32 sw_tag = 0; 1208 1209 /* 1210 * Last window's next_win should point to the first window 1211 * and MSC_SW_TAG_LASTWIN should be set. 1212 */ 1213 if (msc_is_last_win(win)) { 1214 sw_tag |= MSC_SW_TAG_LASTWIN; 1215 next_win = list_first_entry(&msc->win_list, 1216 struct msc_window, entry); 1217 } else { 1218 next_win = list_next_entry(win, entry); 1219 } 1220 1221 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 1222 struct msc_block_desc *bdesc = sg_virt(sg); 1223 1224 memset(bdesc, 0, sizeof(*bdesc)); 1225 1226 bdesc->next_win = msc_win_base_pfn(next_win); 1227 1228 /* 1229 * Similarly to last window, last block should point 1230 * to the first one. 1231 */ 1232 if (blk == win->nr_segs - 1) { 1233 sw_tag |= MSC_SW_TAG_LASTBLK; 1234 bdesc->next_blk = msc_win_base_pfn(win); 1235 } else { 1236 dma_addr_t addr = sg_dma_address(sg_next(sg)); 1237 1238 bdesc->next_blk = PFN_DOWN(addr); 1239 } 1240 1241 bdesc->sw_tag = sw_tag; 1242 bdesc->block_sz = sg->length / 64; 1243 } 1244 } 1245 1246 /* 1247 * Make the above writes globally visible before tracing is 1248 * enabled to make sure hardware sees them coherently. 1249 */ 1250 wmb(); 1251 } 1252 1253 static void msc_buffer_multi_free(struct msc *msc) 1254 { 1255 struct msc_window *win, *iter; 1256 1257 list_for_each_entry_safe(win, iter, &msc->win_list, entry) 1258 msc_buffer_win_free(msc, win); 1259 } 1260 1261 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages, 1262 unsigned int nr_wins) 1263 { 1264 int ret, i; 1265 1266 for (i = 0; i < nr_wins; i++) { 1267 ret = msc_buffer_win_alloc(msc, nr_pages[i]); 1268 if (ret) { 1269 msc_buffer_multi_free(msc); 1270 return ret; 1271 } 1272 } 1273 1274 msc_buffer_relink(msc); 1275 1276 return 0; 1277 } 1278 1279 /** 1280 * msc_buffer_free() - free buffers for MSC 1281 * @msc: MSC device 1282 * 1283 * Free MSC's storage buffers. 1284 * 1285 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to 1286 * serialize, so the caller is expected to hold it. 1287 */ 1288 static void msc_buffer_free(struct msc *msc) 1289 { 1290 msc_buffer_set_wb(msc); 1291 1292 if (msc->mode == MSC_MODE_SINGLE) 1293 msc_buffer_contig_free(msc); 1294 else if (msc->mode == MSC_MODE_MULTI) 1295 msc_buffer_multi_free(msc); 1296 } 1297 1298 /** 1299 * msc_buffer_alloc() - allocate a buffer for MSC 1300 * @msc: MSC device 1301 * @nr_pages: number of pages for each window 1302 * @nr_wins: number of windows 1303 * 1304 * Allocate a storage buffer for MSC, depending on the msc::mode, it will be 1305 * either done via msc_buffer_contig_alloc() for SINGLE operation mode or 1306 * msc_buffer_win_alloc() for multiblock operation. The latter allocates one 1307 * window per invocation, so in multiblock mode this can be called multiple 1308 * times for the same MSC to allocate multiple windows. 1309 * 1310 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1311 * to serialize, so the caller is expected to hold it. 1312 * 1313 * Return: 0 on success, -errno otherwise. 1314 */ 1315 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages, 1316 unsigned int nr_wins) 1317 { 1318 int ret; 1319 1320 /* -1: buffer not allocated */ 1321 if (atomic_read(&msc->user_count) != -1) 1322 return -EBUSY; 1323 1324 if (msc->mode == MSC_MODE_SINGLE) { 1325 if (nr_wins != 1) 1326 return -EINVAL; 1327 1328 ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT); 1329 } else if (msc->mode == MSC_MODE_MULTI) { 1330 ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins); 1331 } else { 1332 ret = -EINVAL; 1333 } 1334 1335 if (!ret) { 1336 msc_buffer_set_uc(msc); 1337 1338 /* allocation should be visible before the counter goes to 0 */ 1339 smp_mb__before_atomic(); 1340 1341 if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1)) 1342 return -EINVAL; 1343 } 1344 1345 return ret; 1346 } 1347 1348 /** 1349 * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use 1350 * @msc: MSC device 1351 * 1352 * This will free MSC buffer unless it is in use or there is no allocated 1353 * buffer. 1354 * Caller needs to hold msc::buf_mutex. 1355 * 1356 * Return: 0 on successful deallocation or if there was no buffer to 1357 * deallocate, -EBUSY if there are active users. 1358 */ 1359 static int msc_buffer_unlocked_free_unless_used(struct msc *msc) 1360 { 1361 int count, ret = 0; 1362 1363 count = atomic_cmpxchg(&msc->user_count, 0, -1); 1364 1365 /* > 0: buffer is allocated and has users */ 1366 if (count > 0) 1367 ret = -EBUSY; 1368 /* 0: buffer is allocated, no users */ 1369 else if (!count) 1370 msc_buffer_free(msc); 1371 /* < 0: no buffer, nothing to do */ 1372 1373 return ret; 1374 } 1375 1376 /** 1377 * msc_buffer_free_unless_used() - free a buffer unless it's in use 1378 * @msc: MSC device 1379 * 1380 * This is a locked version of msc_buffer_unlocked_free_unless_used(). 1381 * 1382 * Return: 0 on successful deallocation or if there was no buffer to 1383 * deallocate, -EBUSY if there are active users. 1384 */ 1385 static int msc_buffer_free_unless_used(struct msc *msc) 1386 { 1387 int ret; 1388 1389 mutex_lock(&msc->buf_mutex); 1390 ret = msc_buffer_unlocked_free_unless_used(msc); 1391 mutex_unlock(&msc->buf_mutex); 1392 1393 return ret; 1394 } 1395 1396 /** 1397 * msc_buffer_get_page() - get MSC buffer page at a given offset 1398 * @msc: MSC device 1399 * @pgoff: page offset into the storage buffer 1400 * 1401 * This traverses msc::win_list, so holding msc::buf_mutex is expected from 1402 * the caller. 1403 * 1404 * Return: page if @pgoff corresponds to a valid buffer page or NULL. 1405 */ 1406 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff) 1407 { 1408 struct msc_window *win; 1409 struct scatterlist *sg; 1410 unsigned int blk; 1411 1412 if (msc->mode == MSC_MODE_SINGLE) 1413 return msc_buffer_contig_get_page(msc, pgoff); 1414 1415 list_for_each_entry(win, &msc->win_list, entry) 1416 if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks) 1417 goto found; 1418 1419 return NULL; 1420 1421 found: 1422 pgoff -= win->pgoff; 1423 1424 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 1425 struct page *page = msc_sg_page(sg); 1426 size_t pgsz = PFN_DOWN(sg->length); 1427 1428 if (pgoff < pgsz) 1429 return page + pgoff; 1430 1431 pgoff -= pgsz; 1432 } 1433 1434 return NULL; 1435 } 1436 1437 /** 1438 * struct msc_win_to_user_struct - data for copy_to_user() callback 1439 * @buf: userspace buffer to copy data to 1440 * @offset: running offset 1441 */ 1442 struct msc_win_to_user_struct { 1443 char __user *buf; 1444 unsigned long offset; 1445 }; 1446 1447 /** 1448 * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user 1449 * @data: callback's private data 1450 * @src: source buffer 1451 * @len: amount of data to copy from the source buffer 1452 * 1453 * Return: >= %0 for success or -errno for error. 1454 */ 1455 static unsigned long msc_win_to_user(void *data, void *src, size_t len) 1456 { 1457 struct msc_win_to_user_struct *u = data; 1458 unsigned long ret; 1459 1460 ret = copy_to_user(u->buf + u->offset, src, len); 1461 u->offset += len - ret; 1462 1463 return ret; 1464 } 1465 1466 1467 /* 1468 * file operations' callbacks 1469 */ 1470 1471 static int intel_th_msc_open(struct inode *inode, struct file *file) 1472 { 1473 struct intel_th_device *thdev = file->private_data; 1474 struct msc *msc = dev_get_drvdata(&thdev->dev); 1475 struct msc_iter *iter; 1476 1477 if (!capable(CAP_SYS_RAWIO)) 1478 return -EPERM; 1479 1480 iter = msc_iter_install(msc); 1481 if (IS_ERR(iter)) 1482 return PTR_ERR(iter); 1483 1484 file->private_data = iter; 1485 1486 return nonseekable_open(inode, file); 1487 } 1488 1489 static int intel_th_msc_release(struct inode *inode, struct file *file) 1490 { 1491 struct msc_iter *iter = file->private_data; 1492 struct msc *msc = iter->msc; 1493 1494 msc_iter_remove(iter, msc); 1495 1496 return 0; 1497 } 1498 1499 static ssize_t 1500 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len) 1501 { 1502 unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len; 1503 unsigned long start = off, tocopy = 0; 1504 1505 if (msc->single_wrap) { 1506 start += msc->single_sz; 1507 if (start < size) { 1508 tocopy = min(rem, size - start); 1509 if (copy_to_user(buf, msc->base + start, tocopy)) 1510 return -EFAULT; 1511 1512 buf += tocopy; 1513 rem -= tocopy; 1514 start += tocopy; 1515 } 1516 1517 start &= size - 1; 1518 if (rem) { 1519 tocopy = min(rem, msc->single_sz - start); 1520 if (copy_to_user(buf, msc->base + start, tocopy)) 1521 return -EFAULT; 1522 1523 rem -= tocopy; 1524 } 1525 1526 return len - rem; 1527 } 1528 1529 if (copy_to_user(buf, msc->base + start, rem)) 1530 return -EFAULT; 1531 1532 return len; 1533 } 1534 1535 static ssize_t intel_th_msc_read(struct file *file, char __user *buf, 1536 size_t len, loff_t *ppos) 1537 { 1538 struct msc_iter *iter = file->private_data; 1539 struct msc *msc = iter->msc; 1540 size_t size; 1541 loff_t off = *ppos; 1542 ssize_t ret = 0; 1543 1544 if (!atomic_inc_unless_negative(&msc->user_count)) 1545 return 0; 1546 1547 if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap) 1548 size = msc->single_sz; 1549 else 1550 size = msc->nr_pages << PAGE_SHIFT; 1551 1552 if (!size) 1553 goto put_count; 1554 1555 if (off >= size) 1556 goto put_count; 1557 1558 if (off + len >= size) 1559 len = size - off; 1560 1561 if (msc->mode == MSC_MODE_SINGLE) { 1562 ret = msc_single_to_user(msc, buf, off, len); 1563 if (ret >= 0) 1564 *ppos += ret; 1565 } else if (msc->mode == MSC_MODE_MULTI) { 1566 struct msc_win_to_user_struct u = { 1567 .buf = buf, 1568 .offset = 0, 1569 }; 1570 1571 ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user); 1572 if (ret >= 0) 1573 *ppos = iter->offset; 1574 } else { 1575 ret = -EINVAL; 1576 } 1577 1578 put_count: 1579 atomic_dec(&msc->user_count); 1580 1581 return ret; 1582 } 1583 1584 /* 1585 * vm operations callbacks (vm_ops) 1586 */ 1587 1588 static void msc_mmap_open(struct vm_area_struct *vma) 1589 { 1590 struct msc_iter *iter = vma->vm_file->private_data; 1591 struct msc *msc = iter->msc; 1592 1593 atomic_inc(&msc->mmap_count); 1594 } 1595 1596 static void msc_mmap_close(struct vm_area_struct *vma) 1597 { 1598 struct msc_iter *iter = vma->vm_file->private_data; 1599 struct msc *msc = iter->msc; 1600 1601 if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex)) 1602 return; 1603 1604 /* last mapping -- drop user_count */ 1605 atomic_dec(&msc->user_count); 1606 mutex_unlock(&msc->buf_mutex); 1607 } 1608 1609 static vm_fault_t msc_mmap_fault(struct vm_fault *vmf) 1610 { 1611 struct msc_iter *iter = vmf->vma->vm_file->private_data; 1612 struct msc *msc = iter->msc; 1613 struct page *page; 1614 1615 page = msc_buffer_get_page(msc, vmf->pgoff); 1616 if (!page) 1617 return VM_FAULT_SIGBUS; 1618 1619 get_page(page); 1620 return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn(page)); 1621 } 1622 1623 static const struct vm_operations_struct msc_mmap_ops = { 1624 .open = msc_mmap_open, 1625 .close = msc_mmap_close, 1626 .fault = msc_mmap_fault, 1627 }; 1628 1629 static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma) 1630 { 1631 unsigned long size = vma->vm_end - vma->vm_start; 1632 struct msc_iter *iter = vma->vm_file->private_data; 1633 struct msc *msc = iter->msc; 1634 int ret = -EINVAL; 1635 1636 if (!size || offset_in_page(size)) 1637 return -EINVAL; 1638 1639 if (vma->vm_pgoff) 1640 return -EINVAL; 1641 1642 /* grab user_count once per mmap; drop in msc_mmap_close() */ 1643 if (!atomic_inc_unless_negative(&msc->user_count)) 1644 return -EINVAL; 1645 1646 if (msc->mode != MSC_MODE_SINGLE && 1647 msc->mode != MSC_MODE_MULTI) 1648 goto out; 1649 1650 if (size >> PAGE_SHIFT != msc->nr_pages) 1651 goto out; 1652 1653 atomic_set(&msc->mmap_count, 1); 1654 ret = 0; 1655 1656 out: 1657 if (ret) 1658 atomic_dec(&msc->user_count); 1659 1660 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1661 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY | VM_MIXEDMAP); 1662 vma->vm_ops = &msc_mmap_ops; 1663 return ret; 1664 } 1665 1666 static const struct file_operations intel_th_msc_fops = { 1667 .open = intel_th_msc_open, 1668 .release = intel_th_msc_release, 1669 .read = intel_th_msc_read, 1670 .mmap = intel_th_msc_mmap, 1671 .owner = THIS_MODULE, 1672 }; 1673 1674 static void intel_th_msc_wait_empty(struct intel_th_device *thdev) 1675 { 1676 struct msc *msc = dev_get_drvdata(&thdev->dev); 1677 unsigned long count; 1678 u32 reg; 1679 1680 for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; 1681 count && !(reg & MSCSTS_PLE); count--) { 1682 reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS); 1683 cpu_relax(); 1684 } 1685 1686 if (!count) 1687 dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); 1688 } 1689 1690 static int intel_th_msc_init(struct msc *msc) 1691 { 1692 atomic_set(&msc->user_count, -1); 1693 1694 msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI; 1695 mutex_init(&msc->buf_mutex); 1696 INIT_LIST_HEAD(&msc->win_list); 1697 INIT_LIST_HEAD(&msc->iter_list); 1698 1699 msc->burst_len = 1700 (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >> 1701 __ffs(MSC_LEN); 1702 1703 return 0; 1704 } 1705 1706 static int msc_win_switch(struct msc *msc) 1707 { 1708 struct msc_window *first; 1709 1710 if (list_empty(&msc->win_list)) 1711 return -EINVAL; 1712 1713 first = list_first_entry(&msc->win_list, struct msc_window, entry); 1714 1715 if (msc_is_last_win(msc->cur_win)) 1716 msc->cur_win = first; 1717 else 1718 msc->cur_win = list_next_entry(msc->cur_win, entry); 1719 1720 msc->base = msc_win_base(msc->cur_win); 1721 msc->base_addr = msc_win_base_dma(msc->cur_win); 1722 1723 intel_th_trace_switch(msc->thdev); 1724 1725 return 0; 1726 } 1727 1728 /** 1729 * intel_th_msc_window_unlock - put the window back in rotation 1730 * @dev: MSC device to which this relates 1731 * @sgt: buffer's sg_table for the window, does nothing if NULL 1732 */ 1733 void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt) 1734 { 1735 struct msc *msc = dev_get_drvdata(dev); 1736 struct msc_window *win; 1737 1738 if (!sgt) 1739 return; 1740 1741 win = msc_find_window(msc, sgt, false); 1742 if (!win) 1743 return; 1744 1745 msc_win_set_lockout(win, WIN_LOCKED, WIN_READY); 1746 if (msc->switch_on_unlock == win) { 1747 msc->switch_on_unlock = NULL; 1748 msc_win_switch(msc); 1749 } 1750 } 1751 EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock); 1752 1753 static void msc_work(struct work_struct *work) 1754 { 1755 struct msc *msc = container_of(work, struct msc, work); 1756 1757 intel_th_msc_deactivate(msc->thdev); 1758 } 1759 1760 static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev) 1761 { 1762 struct msc *msc = dev_get_drvdata(&thdev->dev); 1763 u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); 1764 u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; 1765 struct msc_window *win, *next_win; 1766 1767 if (!msc->do_irq || !msc->mbuf) 1768 return IRQ_NONE; 1769 1770 msusts &= mask; 1771 1772 if (!msusts) 1773 return msc->enabled ? IRQ_HANDLED : IRQ_NONE; 1774 1775 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); 1776 1777 if (!msc->enabled) 1778 return IRQ_NONE; 1779 1780 /* grab the window before we do the switch */ 1781 win = msc->cur_win; 1782 if (!win) 1783 return IRQ_HANDLED; 1784 next_win = msc_next_window(win); 1785 if (!next_win) 1786 return IRQ_HANDLED; 1787 1788 /* next window: if READY, proceed, if LOCKED, stop the trace */ 1789 if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) { 1790 if (msc->stop_on_full) 1791 schedule_work(&msc->work); 1792 else 1793 msc->switch_on_unlock = next_win; 1794 1795 return IRQ_HANDLED; 1796 } 1797 1798 /* current window: INUSE -> LOCKED */ 1799 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED); 1800 1801 msc_win_switch(msc); 1802 1803 if (msc->mbuf && msc->mbuf->ready) 1804 msc->mbuf->ready(msc->mbuf_priv, win->sgt, 1805 msc_win_total_sz(win)); 1806 1807 return IRQ_HANDLED; 1808 } 1809 1810 static const char * const msc_mode[] = { 1811 [MSC_MODE_SINGLE] = "single", 1812 [MSC_MODE_MULTI] = "multi", 1813 [MSC_MODE_EXI] = "ExI", 1814 [MSC_MODE_DEBUG] = "debug", 1815 }; 1816 1817 static ssize_t 1818 wrap_show(struct device *dev, struct device_attribute *attr, char *buf) 1819 { 1820 struct msc *msc = dev_get_drvdata(dev); 1821 1822 return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap); 1823 } 1824 1825 static ssize_t 1826 wrap_store(struct device *dev, struct device_attribute *attr, const char *buf, 1827 size_t size) 1828 { 1829 struct msc *msc = dev_get_drvdata(dev); 1830 unsigned long val; 1831 int ret; 1832 1833 ret = kstrtoul(buf, 10, &val); 1834 if (ret) 1835 return ret; 1836 1837 msc->wrap = !!val; 1838 1839 return size; 1840 } 1841 1842 static DEVICE_ATTR_RW(wrap); 1843 1844 static void msc_buffer_unassign(struct msc *msc) 1845 { 1846 lockdep_assert_held(&msc->buf_mutex); 1847 1848 if (!msc->mbuf) 1849 return; 1850 1851 msc->mbuf->unassign(msc->mbuf_priv); 1852 msu_buffer_put(msc->mbuf); 1853 msc->mbuf_priv = NULL; 1854 msc->mbuf = NULL; 1855 } 1856 1857 static ssize_t 1858 mode_show(struct device *dev, struct device_attribute *attr, char *buf) 1859 { 1860 struct msc *msc = dev_get_drvdata(dev); 1861 const char *mode = msc_mode[msc->mode]; 1862 ssize_t ret; 1863 1864 mutex_lock(&msc->buf_mutex); 1865 if (msc->mbuf) 1866 mode = msc->mbuf->name; 1867 ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode); 1868 mutex_unlock(&msc->buf_mutex); 1869 1870 return ret; 1871 } 1872 1873 static ssize_t 1874 mode_store(struct device *dev, struct device_attribute *attr, const char *buf, 1875 size_t size) 1876 { 1877 const struct msu_buffer *mbuf = NULL; 1878 struct msc *msc = dev_get_drvdata(dev); 1879 size_t len = size; 1880 char *cp, *mode; 1881 int i, ret; 1882 1883 if (!capable(CAP_SYS_RAWIO)) 1884 return -EPERM; 1885 1886 cp = memchr(buf, '\n', len); 1887 if (cp) 1888 len = cp - buf; 1889 1890 mode = kstrndup(buf, len, GFP_KERNEL); 1891 if (!mode) 1892 return -ENOMEM; 1893 1894 i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode); 1895 if (i >= 0) { 1896 kfree(mode); 1897 goto found; 1898 } 1899 1900 /* Buffer sinks only work with a usable IRQ */ 1901 if (!msc->do_irq) { 1902 kfree(mode); 1903 return -EINVAL; 1904 } 1905 1906 mbuf = msu_buffer_get(mode); 1907 kfree(mode); 1908 if (mbuf) 1909 goto found; 1910 1911 return -EINVAL; 1912 1913 found: 1914 if (i == MSC_MODE_MULTI && msc->multi_is_broken) 1915 return -EOPNOTSUPP; 1916 1917 mutex_lock(&msc->buf_mutex); 1918 ret = 0; 1919 1920 /* Same buffer: do nothing */ 1921 if (mbuf && mbuf == msc->mbuf) { 1922 /* put the extra reference we just got */ 1923 msu_buffer_put(mbuf); 1924 goto unlock; 1925 } 1926 1927 ret = msc_buffer_unlocked_free_unless_used(msc); 1928 if (ret) 1929 goto unlock; 1930 1931 if (mbuf) { 1932 void *mbuf_priv = mbuf->assign(dev, &i); 1933 1934 if (!mbuf_priv) { 1935 ret = -ENOMEM; 1936 goto unlock; 1937 } 1938 1939 msc_buffer_unassign(msc); 1940 msc->mbuf_priv = mbuf_priv; 1941 msc->mbuf = mbuf; 1942 } else { 1943 msc_buffer_unassign(msc); 1944 } 1945 1946 msc->mode = i; 1947 1948 unlock: 1949 if (ret && mbuf) 1950 msu_buffer_put(mbuf); 1951 mutex_unlock(&msc->buf_mutex); 1952 1953 return ret ? ret : size; 1954 } 1955 1956 static DEVICE_ATTR_RW(mode); 1957 1958 static ssize_t 1959 nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf) 1960 { 1961 struct msc *msc = dev_get_drvdata(dev); 1962 struct msc_window *win; 1963 size_t count = 0; 1964 1965 mutex_lock(&msc->buf_mutex); 1966 1967 if (msc->mode == MSC_MODE_SINGLE) 1968 count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages); 1969 else if (msc->mode == MSC_MODE_MULTI) { 1970 list_for_each_entry(win, &msc->win_list, entry) { 1971 count += scnprintf(buf + count, PAGE_SIZE - count, 1972 "%d%c", win->nr_blocks, 1973 msc_is_last_win(win) ? '\n' : ','); 1974 } 1975 } else { 1976 count = scnprintf(buf, PAGE_SIZE, "unsupported\n"); 1977 } 1978 1979 mutex_unlock(&msc->buf_mutex); 1980 1981 return count; 1982 } 1983 1984 static ssize_t 1985 nr_pages_store(struct device *dev, struct device_attribute *attr, 1986 const char *buf, size_t size) 1987 { 1988 struct msc *msc = dev_get_drvdata(dev); 1989 unsigned long val, *win = NULL, *rewin; 1990 size_t len = size; 1991 const char *p = buf; 1992 char *end, *s; 1993 int ret, nr_wins = 0; 1994 1995 if (!capable(CAP_SYS_RAWIO)) 1996 return -EPERM; 1997 1998 ret = msc_buffer_free_unless_used(msc); 1999 if (ret) 2000 return ret; 2001 2002 /* scan the comma-separated list of allocation sizes */ 2003 end = memchr(buf, '\n', len); 2004 if (end) 2005 len = end - buf; 2006 2007 do { 2008 end = memchr(p, ',', len); 2009 s = kstrndup(p, end ? end - p : len, GFP_KERNEL); 2010 if (!s) { 2011 ret = -ENOMEM; 2012 goto free_win; 2013 } 2014 2015 ret = kstrtoul(s, 10, &val); 2016 kfree(s); 2017 2018 if (ret || !val) 2019 goto free_win; 2020 2021 if (nr_wins && msc->mode == MSC_MODE_SINGLE) { 2022 ret = -EINVAL; 2023 goto free_win; 2024 } 2025 2026 nr_wins++; 2027 rewin = krealloc_array(win, nr_wins, sizeof(*win), GFP_KERNEL); 2028 if (!rewin) { 2029 kfree(win); 2030 return -ENOMEM; 2031 } 2032 2033 win = rewin; 2034 win[nr_wins - 1] = val; 2035 2036 if (!end) 2037 break; 2038 2039 /* consume the number and the following comma, hence +1 */ 2040 len -= end - p + 1; 2041 p = end + 1; 2042 } while (len); 2043 2044 mutex_lock(&msc->buf_mutex); 2045 ret = msc_buffer_alloc(msc, win, nr_wins); 2046 mutex_unlock(&msc->buf_mutex); 2047 2048 free_win: 2049 kfree(win); 2050 2051 return ret ? ret : size; 2052 } 2053 2054 static DEVICE_ATTR_RW(nr_pages); 2055 2056 static ssize_t 2057 win_switch_store(struct device *dev, struct device_attribute *attr, 2058 const char *buf, size_t size) 2059 { 2060 struct msc *msc = dev_get_drvdata(dev); 2061 unsigned long val; 2062 int ret; 2063 2064 ret = kstrtoul(buf, 10, &val); 2065 if (ret) 2066 return ret; 2067 2068 if (val != 1) 2069 return -EINVAL; 2070 2071 ret = -EINVAL; 2072 mutex_lock(&msc->buf_mutex); 2073 /* 2074 * Window switch can only happen in the "multi" mode. 2075 * If a external buffer is engaged, they have the full 2076 * control over window switching. 2077 */ 2078 if (msc->mode == MSC_MODE_MULTI && !msc->mbuf) 2079 ret = msc_win_switch(msc); 2080 mutex_unlock(&msc->buf_mutex); 2081 2082 return ret ? ret : size; 2083 } 2084 2085 static DEVICE_ATTR_WO(win_switch); 2086 2087 static ssize_t stop_on_full_show(struct device *dev, 2088 struct device_attribute *attr, char *buf) 2089 { 2090 struct msc *msc = dev_get_drvdata(dev); 2091 2092 return sprintf(buf, "%d\n", msc->stop_on_full); 2093 } 2094 2095 static ssize_t stop_on_full_store(struct device *dev, 2096 struct device_attribute *attr, 2097 const char *buf, size_t size) 2098 { 2099 struct msc *msc = dev_get_drvdata(dev); 2100 int ret; 2101 2102 ret = kstrtobool(buf, &msc->stop_on_full); 2103 if (ret) 2104 return ret; 2105 2106 return size; 2107 } 2108 2109 static DEVICE_ATTR_RW(stop_on_full); 2110 2111 static struct attribute *msc_output_attrs[] = { 2112 &dev_attr_wrap.attr, 2113 &dev_attr_mode.attr, 2114 &dev_attr_nr_pages.attr, 2115 &dev_attr_win_switch.attr, 2116 &dev_attr_stop_on_full.attr, 2117 NULL, 2118 }; 2119 2120 static const struct attribute_group msc_output_group = { 2121 .attrs = msc_output_attrs, 2122 }; 2123 2124 static int intel_th_msc_probe(struct intel_th_device *thdev) 2125 { 2126 struct device *dev = &thdev->dev; 2127 struct resource *res; 2128 struct msc *msc; 2129 void __iomem *base; 2130 int err; 2131 2132 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0); 2133 if (!res) 2134 return -ENODEV; 2135 2136 base = devm_ioremap(dev, res->start, resource_size(res)); 2137 if (!base) 2138 return -ENOMEM; 2139 2140 msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL); 2141 if (!msc) 2142 return -ENOMEM; 2143 2144 res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1); 2145 if (!res) 2146 msc->do_irq = 1; 2147 2148 if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken)) 2149 msc->multi_is_broken = 1; 2150 2151 msc->index = thdev->id; 2152 2153 msc->thdev = thdev; 2154 msc->reg_base = base + msc->index * 0x100; 2155 msc->msu_base = base; 2156 2157 INIT_WORK(&msc->work, msc_work); 2158 err = intel_th_msc_init(msc); 2159 if (err) 2160 return err; 2161 2162 dev_set_drvdata(dev, msc); 2163 2164 return 0; 2165 } 2166 2167 static void intel_th_msc_remove(struct intel_th_device *thdev) 2168 { 2169 struct msc *msc = dev_get_drvdata(&thdev->dev); 2170 int ret; 2171 2172 intel_th_msc_deactivate(thdev); 2173 2174 /* 2175 * Buffers should not be used at this point except if the 2176 * output character device is still open and the parent 2177 * device gets detached from its bus, which is a FIXME. 2178 */ 2179 ret = msc_buffer_free_unless_used(msc); 2180 WARN_ON_ONCE(ret); 2181 } 2182 2183 static struct intel_th_driver intel_th_msc_driver = { 2184 .probe = intel_th_msc_probe, 2185 .remove = intel_th_msc_remove, 2186 .irq = intel_th_msc_interrupt, 2187 .wait_empty = intel_th_msc_wait_empty, 2188 .activate = intel_th_msc_activate, 2189 .deactivate = intel_th_msc_deactivate, 2190 .fops = &intel_th_msc_fops, 2191 .attr_group = &msc_output_group, 2192 .driver = { 2193 .name = "msc", 2194 .owner = THIS_MODULE, 2195 }, 2196 }; 2197 2198 module_driver(intel_th_msc_driver, 2199 intel_th_driver_register, 2200 intel_th_driver_unregister); 2201 2202 MODULE_LICENSE("GPL v2"); 2203 MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver"); 2204 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>"); 2205