xref: /linux/drivers/hwtracing/intel_th/msu.c (revision f52ef24be21a2647fc50b6f8f2a4815d47bbad79)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel(R) Trace Hub Memory Storage Unit
4  *
5  * Copyright (C) 2014-2015 Intel Corporation.
6  */
7 
8 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
9 
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/uaccess.h>
14 #include <linux/sizes.h>
15 #include <linux/printk.h>
16 #include <linux/slab.h>
17 #include <linux/mm.h>
18 #include <linux/fs.h>
19 #include <linux/io.h>
20 #include <linux/workqueue.h>
21 #include <linux/dma-mapping.h>
22 
23 #ifdef CONFIG_X86
24 #include <asm/set_memory.h>
25 #endif
26 
27 #include <linux/intel_th.h>
28 #include "intel_th.h"
29 #include "msu.h"
30 
31 #define msc_dev(x) (&(x)->thdev->dev)
32 
33 /*
34  * Lockout state transitions:
35  *   READY -> INUSE -+-> LOCKED -+-> READY -> etc.
36  *                   \-----------/
37  * WIN_READY:	window can be used by HW
38  * WIN_INUSE:	window is in use
39  * WIN_LOCKED:	window is filled up and is being processed by the buffer
40  * handling code
41  *
42  * All state transitions happen automatically, except for the LOCKED->READY,
43  * which needs to be signalled by the buffer code by calling
44  * intel_th_msc_window_unlock().
45  *
46  * When the interrupt handler has to switch to the next window, it checks
47  * whether it's READY, and if it is, it performs the switch and tracing
48  * continues. If it's LOCKED, it stops the trace.
49  */
50 enum lockout_state {
51 	WIN_READY = 0,
52 	WIN_INUSE,
53 	WIN_LOCKED
54 };
55 
56 /**
57  * struct msc_window - multiblock mode window descriptor
58  * @entry:	window list linkage (msc::win_list)
59  * @pgoff:	page offset into the buffer that this window starts at
60  * @lockout:	lockout state, see comment below
61  * @lo_lock:	lockout state serialization
62  * @nr_blocks:	number of blocks (pages) in this window
63  * @nr_segs:	number of segments in this window (<= @nr_blocks)
64  * @_sgt:	array of block descriptors
65  * @sgt:	array of block descriptors
66  */
67 struct msc_window {
68 	struct list_head	entry;
69 	unsigned long		pgoff;
70 	enum lockout_state	lockout;
71 	spinlock_t		lo_lock;
72 	unsigned int		nr_blocks;
73 	unsigned int		nr_segs;
74 	struct msc		*msc;
75 	struct sg_table		_sgt;
76 	struct sg_table		*sgt;
77 };
78 
79 /**
80  * struct msc_iter - iterator for msc buffer
81  * @entry:		msc::iter_list linkage
82  * @msc:		pointer to the MSC device
83  * @start_win:		oldest window
84  * @win:		current window
85  * @offset:		current logical offset into the buffer
86  * @start_block:	oldest block in the window
87  * @block:		block number in the window
88  * @block_off:		offset into current block
89  * @wrap_count:		block wrapping handling
90  * @eof:		end of buffer reached
91  */
92 struct msc_iter {
93 	struct list_head	entry;
94 	struct msc		*msc;
95 	struct msc_window	*start_win;
96 	struct msc_window	*win;
97 	unsigned long		offset;
98 	struct scatterlist	*start_block;
99 	struct scatterlist	*block;
100 	unsigned int		block_off;
101 	unsigned int		wrap_count;
102 	unsigned int		eof;
103 };
104 
105 /**
106  * struct msc - MSC device representation
107  * @reg_base:		register window base address
108  * @thdev:		intel_th_device pointer
109  * @mbuf:		MSU buffer, if assigned
110  * @mbuf_priv		MSU buffer's private data, if @mbuf
111  * @win_list:		list of windows in multiblock mode
112  * @single_sgt:		single mode buffer
113  * @cur_win:		current window
114  * @nr_pages:		total number of pages allocated for this buffer
115  * @single_sz:		amount of data in single mode
116  * @single_wrap:	single mode wrap occurred
117  * @base:		buffer's base pointer
118  * @base_addr:		buffer's base address
119  * @user_count:		number of users of the buffer
120  * @mmap_count:		number of mappings
121  * @buf_mutex:		mutex to serialize access to buffer-related bits
122 
123  * @enabled:		MSC is enabled
124  * @wrap:		wrapping is enabled
125  * @mode:		MSC operating mode
126  * @burst_len:		write burst length
127  * @index:		number of this MSC in the MSU
128  */
129 struct msc {
130 	void __iomem		*reg_base;
131 	void __iomem		*msu_base;
132 	struct intel_th_device	*thdev;
133 
134 	const struct msu_buffer	*mbuf;
135 	void			*mbuf_priv;
136 
137 	struct work_struct	work;
138 	struct list_head	win_list;
139 	struct sg_table		single_sgt;
140 	struct msc_window	*cur_win;
141 	unsigned long		nr_pages;
142 	unsigned long		single_sz;
143 	unsigned int		single_wrap : 1;
144 	void			*base;
145 	dma_addr_t		base_addr;
146 	u32			orig_addr;
147 	u32			orig_sz;
148 
149 	/* <0: no buffer, 0: no users, >0: active users */
150 	atomic_t		user_count;
151 
152 	atomic_t		mmap_count;
153 	struct mutex		buf_mutex;
154 
155 	struct list_head	iter_list;
156 
157 	/* config */
158 	unsigned int		enabled : 1,
159 				wrap	: 1,
160 				do_irq	: 1,
161 				multi_is_broken : 1;
162 	unsigned int		mode;
163 	unsigned int		burst_len;
164 	unsigned int		index;
165 };
166 
167 static LIST_HEAD(msu_buffer_list);
168 static DEFINE_MUTEX(msu_buffer_mutex);
169 
170 /**
171  * struct msu_buffer_entry - internal MSU buffer bookkeeping
172  * @entry:	link to msu_buffer_list
173  * @mbuf:	MSU buffer object
174  * @owner:	module that provides this MSU buffer
175  */
176 struct msu_buffer_entry {
177 	struct list_head	entry;
178 	const struct msu_buffer	*mbuf;
179 	struct module		*owner;
180 };
181 
182 static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name)
183 {
184 	struct msu_buffer_entry *mbe;
185 
186 	lockdep_assert_held(&msu_buffer_mutex);
187 
188 	list_for_each_entry(mbe, &msu_buffer_list, entry) {
189 		if (!strcmp(mbe->mbuf->name, name))
190 			return mbe;
191 	}
192 
193 	return NULL;
194 }
195 
196 static const struct msu_buffer *
197 msu_buffer_get(const char *name)
198 {
199 	struct msu_buffer_entry *mbe;
200 
201 	mutex_lock(&msu_buffer_mutex);
202 	mbe = __msu_buffer_entry_find(name);
203 	if (mbe && !try_module_get(mbe->owner))
204 		mbe = NULL;
205 	mutex_unlock(&msu_buffer_mutex);
206 
207 	return mbe ? mbe->mbuf : NULL;
208 }
209 
210 static void msu_buffer_put(const struct msu_buffer *mbuf)
211 {
212 	struct msu_buffer_entry *mbe;
213 
214 	mutex_lock(&msu_buffer_mutex);
215 	mbe = __msu_buffer_entry_find(mbuf->name);
216 	if (mbe)
217 		module_put(mbe->owner);
218 	mutex_unlock(&msu_buffer_mutex);
219 }
220 
221 int intel_th_msu_buffer_register(const struct msu_buffer *mbuf,
222 				 struct module *owner)
223 {
224 	struct msu_buffer_entry *mbe;
225 	int ret = 0;
226 
227 	mbe = kzalloc(sizeof(*mbe), GFP_KERNEL);
228 	if (!mbe)
229 		return -ENOMEM;
230 
231 	mutex_lock(&msu_buffer_mutex);
232 	if (__msu_buffer_entry_find(mbuf->name)) {
233 		ret = -EEXIST;
234 		kfree(mbe);
235 		goto unlock;
236 	}
237 
238 	mbe->mbuf = mbuf;
239 	mbe->owner = owner;
240 	list_add_tail(&mbe->entry, &msu_buffer_list);
241 unlock:
242 	mutex_unlock(&msu_buffer_mutex);
243 
244 	return ret;
245 }
246 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register);
247 
248 void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf)
249 {
250 	struct msu_buffer_entry *mbe;
251 
252 	mutex_lock(&msu_buffer_mutex);
253 	mbe = __msu_buffer_entry_find(mbuf->name);
254 	if (mbe) {
255 		list_del(&mbe->entry);
256 		kfree(mbe);
257 	}
258 	mutex_unlock(&msu_buffer_mutex);
259 }
260 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister);
261 
262 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
263 {
264 	/* header hasn't been written */
265 	if (!bdesc->valid_dw)
266 		return true;
267 
268 	/* valid_dw includes the header */
269 	if (!msc_data_sz(bdesc))
270 		return true;
271 
272 	return false;
273 }
274 
275 static inline struct scatterlist *msc_win_base_sg(struct msc_window *win)
276 {
277 	return win->sgt->sgl;
278 }
279 
280 static inline struct msc_block_desc *msc_win_base(struct msc_window *win)
281 {
282 	return sg_virt(msc_win_base_sg(win));
283 }
284 
285 static inline dma_addr_t msc_win_base_dma(struct msc_window *win)
286 {
287 	return sg_dma_address(msc_win_base_sg(win));
288 }
289 
290 static inline unsigned long
291 msc_win_base_pfn(struct msc_window *win)
292 {
293 	return PFN_DOWN(msc_win_base_dma(win));
294 }
295 
296 /**
297  * msc_is_last_win() - check if a window is the last one for a given MSC
298  * @win:	window
299  * Return:	true if @win is the last window in MSC's multiblock buffer
300  */
301 static inline bool msc_is_last_win(struct msc_window *win)
302 {
303 	return win->entry.next == &win->msc->win_list;
304 }
305 
306 /**
307  * msc_next_window() - return next window in the multiblock buffer
308  * @win:	current window
309  *
310  * Return:	window following the current one
311  */
312 static struct msc_window *msc_next_window(struct msc_window *win)
313 {
314 	if (msc_is_last_win(win))
315 		return list_first_entry(&win->msc->win_list, struct msc_window,
316 					entry);
317 
318 	return list_next_entry(win, entry);
319 }
320 
321 static size_t msc_win_total_sz(struct msc_window *win)
322 {
323 	struct scatterlist *sg;
324 	unsigned int blk;
325 	size_t size = 0;
326 
327 	for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
328 		struct msc_block_desc *bdesc = sg_virt(sg);
329 
330 		if (msc_block_wrapped(bdesc))
331 			return (size_t)win->nr_blocks << PAGE_SHIFT;
332 
333 		size += msc_total_sz(bdesc);
334 		if (msc_block_last_written(bdesc))
335 			break;
336 	}
337 
338 	return size;
339 }
340 
341 /**
342  * msc_find_window() - find a window matching a given sg_table
343  * @msc:	MSC device
344  * @sgt:	SG table of the window
345  * @nonempty:	skip over empty windows
346  *
347  * Return:	MSC window structure pointer or NULL if the window
348  *		could not be found.
349  */
350 static struct msc_window *
351 msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty)
352 {
353 	struct msc_window *win;
354 	unsigned int found = 0;
355 
356 	if (list_empty(&msc->win_list))
357 		return NULL;
358 
359 	/*
360 	 * we might need a radix tree for this, depending on how
361 	 * many windows a typical user would allocate; ideally it's
362 	 * something like 2, in which case we're good
363 	 */
364 	list_for_each_entry(win, &msc->win_list, entry) {
365 		if (win->sgt == sgt)
366 			found++;
367 
368 		/* skip the empty ones */
369 		if (nonempty && msc_block_is_empty(msc_win_base(win)))
370 			continue;
371 
372 		if (found)
373 			return win;
374 	}
375 
376 	return NULL;
377 }
378 
379 /**
380  * msc_oldest_window() - locate the window with oldest data
381  * @msc:	MSC device
382  *
383  * This should only be used in multiblock mode. Caller should hold the
384  * msc::user_count reference.
385  *
386  * Return:	the oldest window with valid data
387  */
388 static struct msc_window *msc_oldest_window(struct msc *msc)
389 {
390 	struct msc_window *win;
391 
392 	if (list_empty(&msc->win_list))
393 		return NULL;
394 
395 	win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true);
396 	if (win)
397 		return win;
398 
399 	return list_first_entry(&msc->win_list, struct msc_window, entry);
400 }
401 
402 /**
403  * msc_win_oldest_sg() - locate the oldest block in a given window
404  * @win:	window to look at
405  *
406  * Return:	index of the block with the oldest data
407  */
408 static struct scatterlist *msc_win_oldest_sg(struct msc_window *win)
409 {
410 	unsigned int blk;
411 	struct scatterlist *sg;
412 	struct msc_block_desc *bdesc = msc_win_base(win);
413 
414 	/* without wrapping, first block is the oldest */
415 	if (!msc_block_wrapped(bdesc))
416 		return msc_win_base_sg(win);
417 
418 	/*
419 	 * with wrapping, last written block contains both the newest and the
420 	 * oldest data for this window.
421 	 */
422 	for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
423 		struct msc_block_desc *bdesc = sg_virt(sg);
424 
425 		if (msc_block_last_written(bdesc))
426 			return sg;
427 	}
428 
429 	return msc_win_base_sg(win);
430 }
431 
432 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
433 {
434 	return sg_virt(iter->block);
435 }
436 
437 static struct msc_iter *msc_iter_install(struct msc *msc)
438 {
439 	struct msc_iter *iter;
440 
441 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
442 	if (!iter)
443 		return ERR_PTR(-ENOMEM);
444 
445 	mutex_lock(&msc->buf_mutex);
446 
447 	/*
448 	 * Reading and tracing are mutually exclusive; if msc is
449 	 * enabled, open() will fail; otherwise existing readers
450 	 * will prevent enabling the msc and the rest of fops don't
451 	 * need to worry about it.
452 	 */
453 	if (msc->enabled) {
454 		kfree(iter);
455 		iter = ERR_PTR(-EBUSY);
456 		goto unlock;
457 	}
458 
459 	iter->msc = msc;
460 
461 	list_add_tail(&iter->entry, &msc->iter_list);
462 unlock:
463 	mutex_unlock(&msc->buf_mutex);
464 
465 	return iter;
466 }
467 
468 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
469 {
470 	mutex_lock(&msc->buf_mutex);
471 	list_del(&iter->entry);
472 	mutex_unlock(&msc->buf_mutex);
473 
474 	kfree(iter);
475 }
476 
477 static void msc_iter_block_start(struct msc_iter *iter)
478 {
479 	if (iter->start_block)
480 		return;
481 
482 	iter->start_block = msc_win_oldest_sg(iter->win);
483 	iter->block = iter->start_block;
484 	iter->wrap_count = 0;
485 
486 	/*
487 	 * start with the block with oldest data; if data has wrapped
488 	 * in this window, it should be in this block
489 	 */
490 	if (msc_block_wrapped(msc_iter_bdesc(iter)))
491 		iter->wrap_count = 2;
492 
493 }
494 
495 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
496 {
497 	/* already started, nothing to do */
498 	if (iter->start_win)
499 		return 0;
500 
501 	iter->start_win = msc_oldest_window(msc);
502 	if (!iter->start_win)
503 		return -EINVAL;
504 
505 	iter->win = iter->start_win;
506 	iter->start_block = NULL;
507 
508 	msc_iter_block_start(iter);
509 
510 	return 0;
511 }
512 
513 static int msc_iter_win_advance(struct msc_iter *iter)
514 {
515 	iter->win = msc_next_window(iter->win);
516 	iter->start_block = NULL;
517 
518 	if (iter->win == iter->start_win) {
519 		iter->eof++;
520 		return 1;
521 	}
522 
523 	msc_iter_block_start(iter);
524 
525 	return 0;
526 }
527 
528 static int msc_iter_block_advance(struct msc_iter *iter)
529 {
530 	iter->block_off = 0;
531 
532 	/* wrapping */
533 	if (iter->wrap_count && iter->block == iter->start_block) {
534 		iter->wrap_count--;
535 		if (!iter->wrap_count)
536 			/* copied newest data from the wrapped block */
537 			return msc_iter_win_advance(iter);
538 	}
539 
540 	/* no wrapping, check for last written block */
541 	if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter)))
542 		/* copied newest data for the window */
543 		return msc_iter_win_advance(iter);
544 
545 	/* block advance */
546 	if (sg_is_last(iter->block))
547 		iter->block = msc_win_base_sg(iter->win);
548 	else
549 		iter->block = sg_next(iter->block);
550 
551 	/* no wrapping, sanity check in case there is no last written block */
552 	if (!iter->wrap_count && iter->block == iter->start_block)
553 		return msc_iter_win_advance(iter);
554 
555 	return 0;
556 }
557 
558 /**
559  * msc_buffer_iterate() - go through multiblock buffer's data
560  * @iter:	iterator structure
561  * @size:	amount of data to scan
562  * @data:	callback's private data
563  * @fn:		iterator callback
564  *
565  * This will start at the window which will be written to next (containing
566  * the oldest data) and work its way to the current window, calling @fn
567  * for each chunk of data as it goes.
568  *
569  * Caller should have msc::user_count reference to make sure the buffer
570  * doesn't disappear from under us.
571  *
572  * Return:	amount of data actually scanned.
573  */
574 static ssize_t
575 msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
576 		   unsigned long (*fn)(void *, void *, size_t))
577 {
578 	struct msc *msc = iter->msc;
579 	size_t len = size;
580 	unsigned int advance;
581 
582 	if (iter->eof)
583 		return 0;
584 
585 	/* start with the oldest window */
586 	if (msc_iter_win_start(iter, msc))
587 		return 0;
588 
589 	do {
590 		unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter));
591 		void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC;
592 		size_t tocopy = data_bytes, copied = 0;
593 		size_t remaining = 0;
594 
595 		advance = 1;
596 
597 		/*
598 		 * If block wrapping happened, we need to visit the last block
599 		 * twice, because it contains both the oldest and the newest
600 		 * data in this window.
601 		 *
602 		 * First time (wrap_count==2), in the very beginning, to collect
603 		 * the oldest data, which is in the range
604 		 * (data_bytes..DATA_IN_PAGE).
605 		 *
606 		 * Second time (wrap_count==1), it's just like any other block,
607 		 * containing data in the range of [MSC_BDESC..data_bytes].
608 		 */
609 		if (iter->block == iter->start_block && iter->wrap_count == 2) {
610 			tocopy = DATA_IN_PAGE - data_bytes;
611 			src += data_bytes;
612 		}
613 
614 		if (!tocopy)
615 			goto next_block;
616 
617 		tocopy -= iter->block_off;
618 		src += iter->block_off;
619 
620 		if (len < tocopy) {
621 			tocopy = len;
622 			advance = 0;
623 		}
624 
625 		remaining = fn(data, src, tocopy);
626 
627 		if (remaining)
628 			advance = 0;
629 
630 		copied = tocopy - remaining;
631 		len -= copied;
632 		iter->block_off += copied;
633 		iter->offset += copied;
634 
635 		if (!advance)
636 			break;
637 
638 next_block:
639 		if (msc_iter_block_advance(iter))
640 			break;
641 
642 	} while (len);
643 
644 	return size - len;
645 }
646 
647 /**
648  * msc_buffer_clear_hw_header() - clear hw header for multiblock
649  * @msc:	MSC device
650  */
651 static void msc_buffer_clear_hw_header(struct msc *msc)
652 {
653 	struct msc_window *win;
654 	struct scatterlist *sg;
655 
656 	list_for_each_entry(win, &msc->win_list, entry) {
657 		unsigned int blk;
658 		size_t hw_sz = sizeof(struct msc_block_desc) -
659 			offsetof(struct msc_block_desc, hw_tag);
660 
661 		for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
662 			struct msc_block_desc *bdesc = sg_virt(sg);
663 
664 			memset(&bdesc->hw_tag, 0, hw_sz);
665 		}
666 	}
667 }
668 
669 static int intel_th_msu_init(struct msc *msc)
670 {
671 	u32 mintctl, msusts;
672 
673 	if (!msc->do_irq)
674 		return 0;
675 
676 	if (!msc->mbuf)
677 		return 0;
678 
679 	mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
680 	mintctl |= msc->index ? M1BLIE : M0BLIE;
681 	iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
682 	if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) {
683 		dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n");
684 		msc->do_irq = 0;
685 		return 0;
686 	}
687 
688 	msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
689 	iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
690 
691 	return 0;
692 }
693 
694 static void intel_th_msu_deinit(struct msc *msc)
695 {
696 	u32 mintctl;
697 
698 	if (!msc->do_irq)
699 		return;
700 
701 	mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
702 	mintctl &= msc->index ? ~M1BLIE : ~M0BLIE;
703 	iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
704 }
705 
706 static int msc_win_set_lockout(struct msc_window *win,
707 			       enum lockout_state expect,
708 			       enum lockout_state new)
709 {
710 	enum lockout_state old;
711 	unsigned long flags;
712 	int ret = 0;
713 
714 	if (!win->msc->mbuf)
715 		return 0;
716 
717 	spin_lock_irqsave(&win->lo_lock, flags);
718 	old = win->lockout;
719 
720 	if (old != expect) {
721 		ret = -EINVAL;
722 		dev_warn_ratelimited(msc_dev(win->msc),
723 				     "expected lockout state %d, got %d\n",
724 				     expect, old);
725 		goto unlock;
726 	}
727 
728 	win->lockout = new;
729 
730 	if (old == expect && new == WIN_LOCKED)
731 		atomic_inc(&win->msc->user_count);
732 	else if (old == expect && old == WIN_LOCKED)
733 		atomic_dec(&win->msc->user_count);
734 
735 unlock:
736 	spin_unlock_irqrestore(&win->lo_lock, flags);
737 
738 	if (ret) {
739 		if (expect == WIN_READY && old == WIN_LOCKED)
740 			return -EBUSY;
741 
742 		/* from intel_th_msc_window_unlock(), don't warn if not locked */
743 		if (expect == WIN_LOCKED && old == new)
744 			return 0;
745 	}
746 
747 	return ret;
748 }
749 /**
750  * msc_configure() - set up MSC hardware
751  * @msc:	the MSC device to configure
752  *
753  * Program storage mode, wrapping, burst length and trace buffer address
754  * into a given MSC. Then, enable tracing and set msc::enabled.
755  * The latter is serialized on msc::buf_mutex, so make sure to hold it.
756  */
757 static int msc_configure(struct msc *msc)
758 {
759 	u32 reg;
760 
761 	lockdep_assert_held(&msc->buf_mutex);
762 
763 	if (msc->mode > MSC_MODE_MULTI)
764 		return -ENOTSUPP;
765 
766 	if (msc->mode == MSC_MODE_MULTI) {
767 		if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
768 			return -EBUSY;
769 
770 		msc_buffer_clear_hw_header(msc);
771 	}
772 
773 	msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR);
774 	msc->orig_sz   = ioread32(msc->reg_base + REG_MSU_MSC0SIZE);
775 
776 	reg = msc->base_addr >> PAGE_SHIFT;
777 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
778 
779 	if (msc->mode == MSC_MODE_SINGLE) {
780 		reg = msc->nr_pages;
781 		iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE);
782 	}
783 
784 	reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
785 	reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD);
786 
787 	reg |= MSC_EN;
788 	reg |= msc->mode << __ffs(MSC_MODE);
789 	reg |= msc->burst_len << __ffs(MSC_LEN);
790 
791 	if (msc->wrap)
792 		reg |= MSC_WRAPEN;
793 
794 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
795 
796 	intel_th_msu_init(msc);
797 
798 	msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
799 	intel_th_trace_enable(msc->thdev);
800 	msc->enabled = 1;
801 
802 	if (msc->mbuf && msc->mbuf->activate)
803 		msc->mbuf->activate(msc->mbuf_priv);
804 
805 	return 0;
806 }
807 
808 /**
809  * msc_disable() - disable MSC hardware
810  * @msc:	MSC device to disable
811  *
812  * If @msc is enabled, disable tracing on the switch and then disable MSC
813  * storage. Caller must hold msc::buf_mutex.
814  */
815 static void msc_disable(struct msc *msc)
816 {
817 	struct msc_window *win = msc->cur_win;
818 	u32 reg;
819 
820 	lockdep_assert_held(&msc->buf_mutex);
821 
822 	if (msc->mode == MSC_MODE_MULTI)
823 		msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
824 
825 	if (msc->mbuf && msc->mbuf->deactivate)
826 		msc->mbuf->deactivate(msc->mbuf_priv);
827 	intel_th_msu_deinit(msc);
828 	intel_th_trace_disable(msc->thdev);
829 
830 	if (msc->mode == MSC_MODE_SINGLE) {
831 		reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
832 		msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
833 
834 		reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
835 		msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1);
836 		dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n",
837 			reg, msc->single_sz, msc->single_wrap);
838 	}
839 
840 	reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
841 	reg &= ~MSC_EN;
842 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
843 
844 	if (msc->mbuf && msc->mbuf->ready)
845 		msc->mbuf->ready(msc->mbuf_priv, win->sgt,
846 				 msc_win_total_sz(win));
847 
848 	msc->enabled = 0;
849 
850 	iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR);
851 	iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE);
852 
853 	dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
854 		ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
855 
856 	reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
857 	dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
858 
859 	reg = ioread32(msc->reg_base + REG_MSU_MSUSTS);
860 	reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
861 	iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS);
862 }
863 
864 static int intel_th_msc_activate(struct intel_th_device *thdev)
865 {
866 	struct msc *msc = dev_get_drvdata(&thdev->dev);
867 	int ret = -EBUSY;
868 
869 	if (!atomic_inc_unless_negative(&msc->user_count))
870 		return -ENODEV;
871 
872 	mutex_lock(&msc->buf_mutex);
873 
874 	/* if there are readers, refuse */
875 	if (list_empty(&msc->iter_list))
876 		ret = msc_configure(msc);
877 
878 	mutex_unlock(&msc->buf_mutex);
879 
880 	if (ret)
881 		atomic_dec(&msc->user_count);
882 
883 	return ret;
884 }
885 
886 static void intel_th_msc_deactivate(struct intel_th_device *thdev)
887 {
888 	struct msc *msc = dev_get_drvdata(&thdev->dev);
889 
890 	mutex_lock(&msc->buf_mutex);
891 	if (msc->enabled) {
892 		msc_disable(msc);
893 		atomic_dec(&msc->user_count);
894 	}
895 	mutex_unlock(&msc->buf_mutex);
896 }
897 
898 /**
899  * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode
900  * @msc:	MSC device
901  * @size:	allocation size in bytes
902  *
903  * This modifies msc::base, which requires msc::buf_mutex to serialize, so the
904  * caller is expected to hold it.
905  *
906  * Return:	0 on success, -errno otherwise.
907  */
908 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
909 {
910 	unsigned long nr_pages = size >> PAGE_SHIFT;
911 	unsigned int order = get_order(size);
912 	struct page *page;
913 	int ret;
914 
915 	if (!size)
916 		return 0;
917 
918 	ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
919 	if (ret)
920 		goto err_out;
921 
922 	ret = -ENOMEM;
923 	page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
924 	if (!page)
925 		goto err_free_sgt;
926 
927 	split_page(page, order);
928 	sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
929 
930 	ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
931 			 DMA_FROM_DEVICE);
932 	if (ret < 0)
933 		goto err_free_pages;
934 
935 	msc->nr_pages = nr_pages;
936 	msc->base = page_address(page);
937 	msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
938 
939 	return 0;
940 
941 err_free_pages:
942 	__free_pages(page, order);
943 
944 err_free_sgt:
945 	sg_free_table(&msc->single_sgt);
946 
947 err_out:
948 	return ret;
949 }
950 
951 /**
952  * msc_buffer_contig_free() - free a contiguous buffer
953  * @msc:	MSC configured in SINGLE mode
954  */
955 static void msc_buffer_contig_free(struct msc *msc)
956 {
957 	unsigned long off;
958 
959 	dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
960 		     1, DMA_FROM_DEVICE);
961 	sg_free_table(&msc->single_sgt);
962 
963 	for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
964 		struct page *page = virt_to_page(msc->base + off);
965 
966 		page->mapping = NULL;
967 		__free_page(page);
968 	}
969 
970 	msc->nr_pages = 0;
971 }
972 
973 /**
974  * msc_buffer_contig_get_page() - find a page at a given offset
975  * @msc:	MSC configured in SINGLE mode
976  * @pgoff:	page offset
977  *
978  * Return:	page, if @pgoff is within the range, NULL otherwise.
979  */
980 static struct page *msc_buffer_contig_get_page(struct msc *msc,
981 					       unsigned long pgoff)
982 {
983 	if (pgoff >= msc->nr_pages)
984 		return NULL;
985 
986 	return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
987 }
988 
989 static int __msc_buffer_win_alloc(struct msc_window *win,
990 				  unsigned int nr_segs)
991 {
992 	struct scatterlist *sg_ptr;
993 	void *block;
994 	int i, ret;
995 
996 	ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL);
997 	if (ret)
998 		return -ENOMEM;
999 
1000 	for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
1001 		block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
1002 					  PAGE_SIZE, &sg_dma_address(sg_ptr),
1003 					  GFP_KERNEL);
1004 		if (!block)
1005 			goto err_nomem;
1006 
1007 		sg_set_buf(sg_ptr, block, PAGE_SIZE);
1008 	}
1009 
1010 	return nr_segs;
1011 
1012 err_nomem:
1013 	for_each_sg(win->sgt->sgl, sg_ptr, i, ret)
1014 		dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
1015 				  sg_virt(sg_ptr), sg_dma_address(sg_ptr));
1016 
1017 	sg_free_table(win->sgt);
1018 
1019 	return -ENOMEM;
1020 }
1021 
1022 #ifdef CONFIG_X86
1023 static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs)
1024 {
1025 	struct scatterlist *sg_ptr;
1026 	int i;
1027 
1028 	for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
1029 		/* Set the page as uncached */
1030 		set_memory_uc((unsigned long)sg_virt(sg_ptr),
1031 			      PFN_DOWN(sg_ptr->length));
1032 	}
1033 }
1034 
1035 static void msc_buffer_set_wb(struct msc_window *win)
1036 {
1037 	struct scatterlist *sg_ptr;
1038 	int i;
1039 
1040 	for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
1041 		/* Reset the page to write-back */
1042 		set_memory_wb((unsigned long)sg_virt(sg_ptr),
1043 			      PFN_DOWN(sg_ptr->length));
1044 	}
1045 }
1046 #else /* !X86 */
1047 static inline void
1048 msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {}
1049 static inline void msc_buffer_set_wb(struct msc_window *win) {}
1050 #endif /* CONFIG_X86 */
1051 
1052 /**
1053  * msc_buffer_win_alloc() - alloc a window for a multiblock mode
1054  * @msc:	MSC device
1055  * @nr_blocks:	number of pages in this window
1056  *
1057  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1058  * to serialize, so the caller is expected to hold it.
1059  *
1060  * Return:	0 on success, -errno otherwise.
1061  */
1062 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
1063 {
1064 	struct msc_window *win;
1065 	int ret = -ENOMEM;
1066 
1067 	if (!nr_blocks)
1068 		return 0;
1069 
1070 	win = kzalloc(sizeof(*win), GFP_KERNEL);
1071 	if (!win)
1072 		return -ENOMEM;
1073 
1074 	win->msc = msc;
1075 	win->sgt = &win->_sgt;
1076 	win->lockout = WIN_READY;
1077 	spin_lock_init(&win->lo_lock);
1078 
1079 	if (!list_empty(&msc->win_list)) {
1080 		struct msc_window *prev = list_last_entry(&msc->win_list,
1081 							  struct msc_window,
1082 							  entry);
1083 
1084 		win->pgoff = prev->pgoff + prev->nr_blocks;
1085 	}
1086 
1087 	if (msc->mbuf && msc->mbuf->alloc_window)
1088 		ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt,
1089 					      nr_blocks << PAGE_SHIFT);
1090 	else
1091 		ret = __msc_buffer_win_alloc(win, nr_blocks);
1092 
1093 	if (ret <= 0)
1094 		goto err_nomem;
1095 
1096 	msc_buffer_set_uc(win, ret);
1097 
1098 	win->nr_segs = ret;
1099 	win->nr_blocks = nr_blocks;
1100 
1101 	if (list_empty(&msc->win_list)) {
1102 		msc->base = msc_win_base(win);
1103 		msc->base_addr = msc_win_base_dma(win);
1104 		msc->cur_win = win;
1105 	}
1106 
1107 	list_add_tail(&win->entry, &msc->win_list);
1108 	msc->nr_pages += nr_blocks;
1109 
1110 	return 0;
1111 
1112 err_nomem:
1113 	kfree(win);
1114 
1115 	return ret;
1116 }
1117 
1118 static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
1119 {
1120 	struct scatterlist *sg;
1121 	int i;
1122 
1123 	for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
1124 		struct page *page = sg_page(sg);
1125 
1126 		page->mapping = NULL;
1127 		dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
1128 				  sg_virt(sg), sg_dma_address(sg));
1129 	}
1130 	sg_free_table(win->sgt);
1131 }
1132 
1133 /**
1134  * msc_buffer_win_free() - free a window from MSC's window list
1135  * @msc:	MSC device
1136  * @win:	window to free
1137  *
1138  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1139  * to serialize, so the caller is expected to hold it.
1140  */
1141 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
1142 {
1143 	msc->nr_pages -= win->nr_blocks;
1144 
1145 	list_del(&win->entry);
1146 	if (list_empty(&msc->win_list)) {
1147 		msc->base = NULL;
1148 		msc->base_addr = 0;
1149 	}
1150 
1151 	msc_buffer_set_wb(win);
1152 
1153 	if (msc->mbuf && msc->mbuf->free_window)
1154 		msc->mbuf->free_window(msc->mbuf_priv, win->sgt);
1155 	else
1156 		__msc_buffer_win_free(msc, win);
1157 
1158 	kfree(win);
1159 }
1160 
1161 /**
1162  * msc_buffer_relink() - set up block descriptors for multiblock mode
1163  * @msc:	MSC device
1164  *
1165  * This traverses msc::win_list, which requires msc::buf_mutex to serialize,
1166  * so the caller is expected to hold it.
1167  */
1168 static void msc_buffer_relink(struct msc *msc)
1169 {
1170 	struct msc_window *win, *next_win;
1171 
1172 	/* call with msc::mutex locked */
1173 	list_for_each_entry(win, &msc->win_list, entry) {
1174 		struct scatterlist *sg;
1175 		unsigned int blk;
1176 		u32 sw_tag = 0;
1177 
1178 		/*
1179 		 * Last window's next_win should point to the first window
1180 		 * and MSC_SW_TAG_LASTWIN should be set.
1181 		 */
1182 		if (msc_is_last_win(win)) {
1183 			sw_tag |= MSC_SW_TAG_LASTWIN;
1184 			next_win = list_first_entry(&msc->win_list,
1185 						    struct msc_window, entry);
1186 		} else {
1187 			next_win = list_next_entry(win, entry);
1188 		}
1189 
1190 		for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
1191 			struct msc_block_desc *bdesc = sg_virt(sg);
1192 
1193 			memset(bdesc, 0, sizeof(*bdesc));
1194 
1195 			bdesc->next_win = msc_win_base_pfn(next_win);
1196 
1197 			/*
1198 			 * Similarly to last window, last block should point
1199 			 * to the first one.
1200 			 */
1201 			if (blk == win->nr_segs - 1) {
1202 				sw_tag |= MSC_SW_TAG_LASTBLK;
1203 				bdesc->next_blk = msc_win_base_pfn(win);
1204 			} else {
1205 				dma_addr_t addr = sg_dma_address(sg_next(sg));
1206 
1207 				bdesc->next_blk = PFN_DOWN(addr);
1208 			}
1209 
1210 			bdesc->sw_tag = sw_tag;
1211 			bdesc->block_sz = sg->length / 64;
1212 		}
1213 	}
1214 
1215 	/*
1216 	 * Make the above writes globally visible before tracing is
1217 	 * enabled to make sure hardware sees them coherently.
1218 	 */
1219 	wmb();
1220 }
1221 
1222 static void msc_buffer_multi_free(struct msc *msc)
1223 {
1224 	struct msc_window *win, *iter;
1225 
1226 	list_for_each_entry_safe(win, iter, &msc->win_list, entry)
1227 		msc_buffer_win_free(msc, win);
1228 }
1229 
1230 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages,
1231 				  unsigned int nr_wins)
1232 {
1233 	int ret, i;
1234 
1235 	for (i = 0; i < nr_wins; i++) {
1236 		ret = msc_buffer_win_alloc(msc, nr_pages[i]);
1237 		if (ret) {
1238 			msc_buffer_multi_free(msc);
1239 			return ret;
1240 		}
1241 	}
1242 
1243 	msc_buffer_relink(msc);
1244 
1245 	return 0;
1246 }
1247 
1248 /**
1249  * msc_buffer_free() - free buffers for MSC
1250  * @msc:	MSC device
1251  *
1252  * Free MSC's storage buffers.
1253  *
1254  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to
1255  * serialize, so the caller is expected to hold it.
1256  */
1257 static void msc_buffer_free(struct msc *msc)
1258 {
1259 	if (msc->mode == MSC_MODE_SINGLE)
1260 		msc_buffer_contig_free(msc);
1261 	else if (msc->mode == MSC_MODE_MULTI)
1262 		msc_buffer_multi_free(msc);
1263 }
1264 
1265 /**
1266  * msc_buffer_alloc() - allocate a buffer for MSC
1267  * @msc:	MSC device
1268  * @size:	allocation size in bytes
1269  *
1270  * Allocate a storage buffer for MSC, depending on the msc::mode, it will be
1271  * either done via msc_buffer_contig_alloc() for SINGLE operation mode or
1272  * msc_buffer_win_alloc() for multiblock operation. The latter allocates one
1273  * window per invocation, so in multiblock mode this can be called multiple
1274  * times for the same MSC to allocate multiple windows.
1275  *
1276  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1277  * to serialize, so the caller is expected to hold it.
1278  *
1279  * Return:	0 on success, -errno otherwise.
1280  */
1281 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
1282 			    unsigned int nr_wins)
1283 {
1284 	int ret;
1285 
1286 	/* -1: buffer not allocated */
1287 	if (atomic_read(&msc->user_count) != -1)
1288 		return -EBUSY;
1289 
1290 	if (msc->mode == MSC_MODE_SINGLE) {
1291 		if (nr_wins != 1)
1292 			return -EINVAL;
1293 
1294 		ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT);
1295 	} else if (msc->mode == MSC_MODE_MULTI) {
1296 		ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
1297 	} else {
1298 		ret = -ENOTSUPP;
1299 	}
1300 
1301 	if (!ret) {
1302 		/* allocation should be visible before the counter goes to 0 */
1303 		smp_mb__before_atomic();
1304 
1305 		if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1))
1306 			return -EINVAL;
1307 	}
1308 
1309 	return ret;
1310 }
1311 
1312 /**
1313  * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use
1314  * @msc:	MSC device
1315  *
1316  * This will free MSC buffer unless it is in use or there is no allocated
1317  * buffer.
1318  * Caller needs to hold msc::buf_mutex.
1319  *
1320  * Return:	0 on successful deallocation or if there was no buffer to
1321  *		deallocate, -EBUSY if there are active users.
1322  */
1323 static int msc_buffer_unlocked_free_unless_used(struct msc *msc)
1324 {
1325 	int count, ret = 0;
1326 
1327 	count = atomic_cmpxchg(&msc->user_count, 0, -1);
1328 
1329 	/* > 0: buffer is allocated and has users */
1330 	if (count > 0)
1331 		ret = -EBUSY;
1332 	/* 0: buffer is allocated, no users */
1333 	else if (!count)
1334 		msc_buffer_free(msc);
1335 	/* < 0: no buffer, nothing to do */
1336 
1337 	return ret;
1338 }
1339 
1340 /**
1341  * msc_buffer_free_unless_used() - free a buffer unless it's in use
1342  * @msc:	MSC device
1343  *
1344  * This is a locked version of msc_buffer_unlocked_free_unless_used().
1345  */
1346 static int msc_buffer_free_unless_used(struct msc *msc)
1347 {
1348 	int ret;
1349 
1350 	mutex_lock(&msc->buf_mutex);
1351 	ret = msc_buffer_unlocked_free_unless_used(msc);
1352 	mutex_unlock(&msc->buf_mutex);
1353 
1354 	return ret;
1355 }
1356 
1357 /**
1358  * msc_buffer_get_page() - get MSC buffer page at a given offset
1359  * @msc:	MSC device
1360  * @pgoff:	page offset into the storage buffer
1361  *
1362  * This traverses msc::win_list, so holding msc::buf_mutex is expected from
1363  * the caller.
1364  *
1365  * Return:	page if @pgoff corresponds to a valid buffer page or NULL.
1366  */
1367 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
1368 {
1369 	struct msc_window *win;
1370 	struct scatterlist *sg;
1371 	unsigned int blk;
1372 
1373 	if (msc->mode == MSC_MODE_SINGLE)
1374 		return msc_buffer_contig_get_page(msc, pgoff);
1375 
1376 	list_for_each_entry(win, &msc->win_list, entry)
1377 		if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks)
1378 			goto found;
1379 
1380 	return NULL;
1381 
1382 found:
1383 	pgoff -= win->pgoff;
1384 
1385 	for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
1386 		struct page *page = sg_page(sg);
1387 		size_t pgsz = PFN_DOWN(sg->length);
1388 
1389 		if (pgoff < pgsz)
1390 			return page + pgoff;
1391 
1392 		pgoff -= pgsz;
1393 	}
1394 
1395 	return NULL;
1396 }
1397 
1398 /**
1399  * struct msc_win_to_user_struct - data for copy_to_user() callback
1400  * @buf:	userspace buffer to copy data to
1401  * @offset:	running offset
1402  */
1403 struct msc_win_to_user_struct {
1404 	char __user	*buf;
1405 	unsigned long	offset;
1406 };
1407 
1408 /**
1409  * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user
1410  * @data:	callback's private data
1411  * @src:	source buffer
1412  * @len:	amount of data to copy from the source buffer
1413  */
1414 static unsigned long msc_win_to_user(void *data, void *src, size_t len)
1415 {
1416 	struct msc_win_to_user_struct *u = data;
1417 	unsigned long ret;
1418 
1419 	ret = copy_to_user(u->buf + u->offset, src, len);
1420 	u->offset += len - ret;
1421 
1422 	return ret;
1423 }
1424 
1425 
1426 /*
1427  * file operations' callbacks
1428  */
1429 
1430 static int intel_th_msc_open(struct inode *inode, struct file *file)
1431 {
1432 	struct intel_th_device *thdev = file->private_data;
1433 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1434 	struct msc_iter *iter;
1435 
1436 	if (!capable(CAP_SYS_RAWIO))
1437 		return -EPERM;
1438 
1439 	iter = msc_iter_install(msc);
1440 	if (IS_ERR(iter))
1441 		return PTR_ERR(iter);
1442 
1443 	file->private_data = iter;
1444 
1445 	return nonseekable_open(inode, file);
1446 }
1447 
1448 static int intel_th_msc_release(struct inode *inode, struct file *file)
1449 {
1450 	struct msc_iter *iter = file->private_data;
1451 	struct msc *msc = iter->msc;
1452 
1453 	msc_iter_remove(iter, msc);
1454 
1455 	return 0;
1456 }
1457 
1458 static ssize_t
1459 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len)
1460 {
1461 	unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len;
1462 	unsigned long start = off, tocopy = 0;
1463 
1464 	if (msc->single_wrap) {
1465 		start += msc->single_sz;
1466 		if (start < size) {
1467 			tocopy = min(rem, size - start);
1468 			if (copy_to_user(buf, msc->base + start, tocopy))
1469 				return -EFAULT;
1470 
1471 			buf += tocopy;
1472 			rem -= tocopy;
1473 			start += tocopy;
1474 		}
1475 
1476 		start &= size - 1;
1477 		if (rem) {
1478 			tocopy = min(rem, msc->single_sz - start);
1479 			if (copy_to_user(buf, msc->base + start, tocopy))
1480 				return -EFAULT;
1481 
1482 			rem -= tocopy;
1483 		}
1484 
1485 		return len - rem;
1486 	}
1487 
1488 	if (copy_to_user(buf, msc->base + start, rem))
1489 		return -EFAULT;
1490 
1491 	return len;
1492 }
1493 
1494 static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
1495 				 size_t len, loff_t *ppos)
1496 {
1497 	struct msc_iter *iter = file->private_data;
1498 	struct msc *msc = iter->msc;
1499 	size_t size;
1500 	loff_t off = *ppos;
1501 	ssize_t ret = 0;
1502 
1503 	if (!atomic_inc_unless_negative(&msc->user_count))
1504 		return 0;
1505 
1506 	if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap)
1507 		size = msc->single_sz;
1508 	else
1509 		size = msc->nr_pages << PAGE_SHIFT;
1510 
1511 	if (!size)
1512 		goto put_count;
1513 
1514 	if (off >= size)
1515 		goto put_count;
1516 
1517 	if (off + len >= size)
1518 		len = size - off;
1519 
1520 	if (msc->mode == MSC_MODE_SINGLE) {
1521 		ret = msc_single_to_user(msc, buf, off, len);
1522 		if (ret >= 0)
1523 			*ppos += ret;
1524 	} else if (msc->mode == MSC_MODE_MULTI) {
1525 		struct msc_win_to_user_struct u = {
1526 			.buf	= buf,
1527 			.offset	= 0,
1528 		};
1529 
1530 		ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user);
1531 		if (ret >= 0)
1532 			*ppos = iter->offset;
1533 	} else {
1534 		ret = -ENOTSUPP;
1535 	}
1536 
1537 put_count:
1538 	atomic_dec(&msc->user_count);
1539 
1540 	return ret;
1541 }
1542 
1543 /*
1544  * vm operations callbacks (vm_ops)
1545  */
1546 
1547 static void msc_mmap_open(struct vm_area_struct *vma)
1548 {
1549 	struct msc_iter *iter = vma->vm_file->private_data;
1550 	struct msc *msc = iter->msc;
1551 
1552 	atomic_inc(&msc->mmap_count);
1553 }
1554 
1555 static void msc_mmap_close(struct vm_area_struct *vma)
1556 {
1557 	struct msc_iter *iter = vma->vm_file->private_data;
1558 	struct msc *msc = iter->msc;
1559 	unsigned long pg;
1560 
1561 	if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
1562 		return;
1563 
1564 	/* drop page _refcounts */
1565 	for (pg = 0; pg < msc->nr_pages; pg++) {
1566 		struct page *page = msc_buffer_get_page(msc, pg);
1567 
1568 		if (WARN_ON_ONCE(!page))
1569 			continue;
1570 
1571 		if (page->mapping)
1572 			page->mapping = NULL;
1573 	}
1574 
1575 	/* last mapping -- drop user_count */
1576 	atomic_dec(&msc->user_count);
1577 	mutex_unlock(&msc->buf_mutex);
1578 }
1579 
1580 static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
1581 {
1582 	struct msc_iter *iter = vmf->vma->vm_file->private_data;
1583 	struct msc *msc = iter->msc;
1584 
1585 	vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
1586 	if (!vmf->page)
1587 		return VM_FAULT_SIGBUS;
1588 
1589 	get_page(vmf->page);
1590 	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
1591 	vmf->page->index = vmf->pgoff;
1592 
1593 	return 0;
1594 }
1595 
1596 static const struct vm_operations_struct msc_mmap_ops = {
1597 	.open	= msc_mmap_open,
1598 	.close	= msc_mmap_close,
1599 	.fault	= msc_mmap_fault,
1600 };
1601 
1602 static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
1603 {
1604 	unsigned long size = vma->vm_end - vma->vm_start;
1605 	struct msc_iter *iter = vma->vm_file->private_data;
1606 	struct msc *msc = iter->msc;
1607 	int ret = -EINVAL;
1608 
1609 	if (!size || offset_in_page(size))
1610 		return -EINVAL;
1611 
1612 	if (vma->vm_pgoff)
1613 		return -EINVAL;
1614 
1615 	/* grab user_count once per mmap; drop in msc_mmap_close() */
1616 	if (!atomic_inc_unless_negative(&msc->user_count))
1617 		return -EINVAL;
1618 
1619 	if (msc->mode != MSC_MODE_SINGLE &&
1620 	    msc->mode != MSC_MODE_MULTI)
1621 		goto out;
1622 
1623 	if (size >> PAGE_SHIFT != msc->nr_pages)
1624 		goto out;
1625 
1626 	atomic_set(&msc->mmap_count, 1);
1627 	ret = 0;
1628 
1629 out:
1630 	if (ret)
1631 		atomic_dec(&msc->user_count);
1632 
1633 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1634 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
1635 	vma->vm_ops = &msc_mmap_ops;
1636 	return ret;
1637 }
1638 
1639 static const struct file_operations intel_th_msc_fops = {
1640 	.open		= intel_th_msc_open,
1641 	.release	= intel_th_msc_release,
1642 	.read		= intel_th_msc_read,
1643 	.mmap		= intel_th_msc_mmap,
1644 	.llseek		= no_llseek,
1645 	.owner		= THIS_MODULE,
1646 };
1647 
1648 static void intel_th_msc_wait_empty(struct intel_th_device *thdev)
1649 {
1650 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1651 	unsigned long count;
1652 	u32 reg;
1653 
1654 	for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
1655 	     count && !(reg & MSCSTS_PLE); count--) {
1656 		reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS);
1657 		cpu_relax();
1658 	}
1659 
1660 	if (!count)
1661 		dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
1662 }
1663 
1664 static int intel_th_msc_init(struct msc *msc)
1665 {
1666 	atomic_set(&msc->user_count, -1);
1667 
1668 	msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI;
1669 	mutex_init(&msc->buf_mutex);
1670 	INIT_LIST_HEAD(&msc->win_list);
1671 	INIT_LIST_HEAD(&msc->iter_list);
1672 
1673 	msc->burst_len =
1674 		(ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >>
1675 		__ffs(MSC_LEN);
1676 
1677 	return 0;
1678 }
1679 
1680 static int msc_win_switch(struct msc *msc)
1681 {
1682 	struct msc_window *first;
1683 
1684 	if (list_empty(&msc->win_list))
1685 		return -EINVAL;
1686 
1687 	first = list_first_entry(&msc->win_list, struct msc_window, entry);
1688 
1689 	if (msc_is_last_win(msc->cur_win))
1690 		msc->cur_win = first;
1691 	else
1692 		msc->cur_win = list_next_entry(msc->cur_win, entry);
1693 
1694 	msc->base = msc_win_base(msc->cur_win);
1695 	msc->base_addr = msc_win_base_dma(msc->cur_win);
1696 
1697 	intel_th_trace_switch(msc->thdev);
1698 
1699 	return 0;
1700 }
1701 
1702 /**
1703  * intel_th_msc_window_unlock - put the window back in rotation
1704  * @dev:	MSC device to which this relates
1705  * @sgt:	buffer's sg_table for the window, does nothing if NULL
1706  */
1707 void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt)
1708 {
1709 	struct msc *msc = dev_get_drvdata(dev);
1710 	struct msc_window *win;
1711 
1712 	if (!sgt)
1713 		return;
1714 
1715 	win = msc_find_window(msc, sgt, false);
1716 	if (!win)
1717 		return;
1718 
1719 	msc_win_set_lockout(win, WIN_LOCKED, WIN_READY);
1720 }
1721 EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock);
1722 
1723 static void msc_work(struct work_struct *work)
1724 {
1725 	struct msc *msc = container_of(work, struct msc, work);
1726 
1727 	intel_th_msc_deactivate(msc->thdev);
1728 }
1729 
1730 static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
1731 {
1732 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1733 	u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
1734 	u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
1735 	struct msc_window *win, *next_win;
1736 
1737 	if (!msc->do_irq || !msc->mbuf)
1738 		return IRQ_NONE;
1739 
1740 	msusts &= mask;
1741 
1742 	if (!msusts)
1743 		return msc->enabled ? IRQ_HANDLED : IRQ_NONE;
1744 
1745 	iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
1746 
1747 	if (!msc->enabled)
1748 		return IRQ_NONE;
1749 
1750 	/* grab the window before we do the switch */
1751 	win = msc->cur_win;
1752 	if (!win)
1753 		return IRQ_HANDLED;
1754 	next_win = msc_next_window(win);
1755 	if (!next_win)
1756 		return IRQ_HANDLED;
1757 
1758 	/* next window: if READY, proceed, if LOCKED, stop the trace */
1759 	if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) {
1760 		schedule_work(&msc->work);
1761 		return IRQ_HANDLED;
1762 	}
1763 
1764 	/* current window: INUSE -> LOCKED */
1765 	msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
1766 
1767 	msc_win_switch(msc);
1768 
1769 	if (msc->mbuf && msc->mbuf->ready)
1770 		msc->mbuf->ready(msc->mbuf_priv, win->sgt,
1771 				 msc_win_total_sz(win));
1772 
1773 	return IRQ_HANDLED;
1774 }
1775 
1776 static const char * const msc_mode[] = {
1777 	[MSC_MODE_SINGLE]	= "single",
1778 	[MSC_MODE_MULTI]	= "multi",
1779 	[MSC_MODE_EXI]		= "ExI",
1780 	[MSC_MODE_DEBUG]	= "debug",
1781 };
1782 
1783 static ssize_t
1784 wrap_show(struct device *dev, struct device_attribute *attr, char *buf)
1785 {
1786 	struct msc *msc = dev_get_drvdata(dev);
1787 
1788 	return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap);
1789 }
1790 
1791 static ssize_t
1792 wrap_store(struct device *dev, struct device_attribute *attr, const char *buf,
1793 	   size_t size)
1794 {
1795 	struct msc *msc = dev_get_drvdata(dev);
1796 	unsigned long val;
1797 	int ret;
1798 
1799 	ret = kstrtoul(buf, 10, &val);
1800 	if (ret)
1801 		return ret;
1802 
1803 	msc->wrap = !!val;
1804 
1805 	return size;
1806 }
1807 
1808 static DEVICE_ATTR_RW(wrap);
1809 
1810 static void msc_buffer_unassign(struct msc *msc)
1811 {
1812 	lockdep_assert_held(&msc->buf_mutex);
1813 
1814 	if (!msc->mbuf)
1815 		return;
1816 
1817 	msc->mbuf->unassign(msc->mbuf_priv);
1818 	msu_buffer_put(msc->mbuf);
1819 	msc->mbuf_priv = NULL;
1820 	msc->mbuf = NULL;
1821 }
1822 
1823 static ssize_t
1824 mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1825 {
1826 	struct msc *msc = dev_get_drvdata(dev);
1827 	const char *mode = msc_mode[msc->mode];
1828 	ssize_t ret;
1829 
1830 	mutex_lock(&msc->buf_mutex);
1831 	if (msc->mbuf)
1832 		mode = msc->mbuf->name;
1833 	ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode);
1834 	mutex_unlock(&msc->buf_mutex);
1835 
1836 	return ret;
1837 }
1838 
1839 static ssize_t
1840 mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
1841 	   size_t size)
1842 {
1843 	const struct msu_buffer *mbuf = NULL;
1844 	struct msc *msc = dev_get_drvdata(dev);
1845 	size_t len = size;
1846 	char *cp, *mode;
1847 	int i, ret;
1848 
1849 	if (!capable(CAP_SYS_RAWIO))
1850 		return -EPERM;
1851 
1852 	cp = memchr(buf, '\n', len);
1853 	if (cp)
1854 		len = cp - buf;
1855 
1856 	mode = kstrndup(buf, len, GFP_KERNEL);
1857 	if (!mode)
1858 		return -ENOMEM;
1859 
1860 	i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
1861 	if (i >= 0) {
1862 		kfree(mode);
1863 		goto found;
1864 	}
1865 
1866 	/* Buffer sinks only work with a usable IRQ */
1867 	if (!msc->do_irq) {
1868 		kfree(mode);
1869 		return -EINVAL;
1870 	}
1871 
1872 	mbuf = msu_buffer_get(mode);
1873 	kfree(mode);
1874 	if (mbuf)
1875 		goto found;
1876 
1877 	return -EINVAL;
1878 
1879 found:
1880 	if (i == MSC_MODE_MULTI && msc->multi_is_broken)
1881 		return -EOPNOTSUPP;
1882 
1883 	mutex_lock(&msc->buf_mutex);
1884 	ret = 0;
1885 
1886 	/* Same buffer: do nothing */
1887 	if (mbuf && mbuf == msc->mbuf) {
1888 		/* put the extra reference we just got */
1889 		msu_buffer_put(mbuf);
1890 		goto unlock;
1891 	}
1892 
1893 	ret = msc_buffer_unlocked_free_unless_used(msc);
1894 	if (ret)
1895 		goto unlock;
1896 
1897 	if (mbuf) {
1898 		void *mbuf_priv = mbuf->assign(dev, &i);
1899 
1900 		if (!mbuf_priv) {
1901 			ret = -ENOMEM;
1902 			goto unlock;
1903 		}
1904 
1905 		msc_buffer_unassign(msc);
1906 		msc->mbuf_priv = mbuf_priv;
1907 		msc->mbuf = mbuf;
1908 	} else {
1909 		msc_buffer_unassign(msc);
1910 	}
1911 
1912 	msc->mode = i;
1913 
1914 unlock:
1915 	if (ret && mbuf)
1916 		msu_buffer_put(mbuf);
1917 	mutex_unlock(&msc->buf_mutex);
1918 
1919 	return ret ? ret : size;
1920 }
1921 
1922 static DEVICE_ATTR_RW(mode);
1923 
1924 static ssize_t
1925 nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf)
1926 {
1927 	struct msc *msc = dev_get_drvdata(dev);
1928 	struct msc_window *win;
1929 	size_t count = 0;
1930 
1931 	mutex_lock(&msc->buf_mutex);
1932 
1933 	if (msc->mode == MSC_MODE_SINGLE)
1934 		count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages);
1935 	else if (msc->mode == MSC_MODE_MULTI) {
1936 		list_for_each_entry(win, &msc->win_list, entry) {
1937 			count += scnprintf(buf + count, PAGE_SIZE - count,
1938 					   "%d%c", win->nr_blocks,
1939 					   msc_is_last_win(win) ? '\n' : ',');
1940 		}
1941 	} else {
1942 		count = scnprintf(buf, PAGE_SIZE, "unsupported\n");
1943 	}
1944 
1945 	mutex_unlock(&msc->buf_mutex);
1946 
1947 	return count;
1948 }
1949 
1950 static ssize_t
1951 nr_pages_store(struct device *dev, struct device_attribute *attr,
1952 	       const char *buf, size_t size)
1953 {
1954 	struct msc *msc = dev_get_drvdata(dev);
1955 	unsigned long val, *win = NULL, *rewin;
1956 	size_t len = size;
1957 	const char *p = buf;
1958 	char *end, *s;
1959 	int ret, nr_wins = 0;
1960 
1961 	if (!capable(CAP_SYS_RAWIO))
1962 		return -EPERM;
1963 
1964 	ret = msc_buffer_free_unless_used(msc);
1965 	if (ret)
1966 		return ret;
1967 
1968 	/* scan the comma-separated list of allocation sizes */
1969 	end = memchr(buf, '\n', len);
1970 	if (end)
1971 		len = end - buf;
1972 
1973 	do {
1974 		end = memchr(p, ',', len);
1975 		s = kstrndup(p, end ? end - p : len, GFP_KERNEL);
1976 		if (!s) {
1977 			ret = -ENOMEM;
1978 			goto free_win;
1979 		}
1980 
1981 		ret = kstrtoul(s, 10, &val);
1982 		kfree(s);
1983 
1984 		if (ret || !val)
1985 			goto free_win;
1986 
1987 		if (nr_wins && msc->mode == MSC_MODE_SINGLE) {
1988 			ret = -EINVAL;
1989 			goto free_win;
1990 		}
1991 
1992 		nr_wins++;
1993 		rewin = krealloc(win, sizeof(*win) * nr_wins, GFP_KERNEL);
1994 		if (!rewin) {
1995 			kfree(win);
1996 			return -ENOMEM;
1997 		}
1998 
1999 		win = rewin;
2000 		win[nr_wins - 1] = val;
2001 
2002 		if (!end)
2003 			break;
2004 
2005 		/* consume the number and the following comma, hence +1 */
2006 		len -= end - p + 1;
2007 		p = end + 1;
2008 	} while (len);
2009 
2010 	mutex_lock(&msc->buf_mutex);
2011 	ret = msc_buffer_alloc(msc, win, nr_wins);
2012 	mutex_unlock(&msc->buf_mutex);
2013 
2014 free_win:
2015 	kfree(win);
2016 
2017 	return ret ? ret : size;
2018 }
2019 
2020 static DEVICE_ATTR_RW(nr_pages);
2021 
2022 static ssize_t
2023 win_switch_store(struct device *dev, struct device_attribute *attr,
2024 		 const char *buf, size_t size)
2025 {
2026 	struct msc *msc = dev_get_drvdata(dev);
2027 	unsigned long val;
2028 	int ret;
2029 
2030 	ret = kstrtoul(buf, 10, &val);
2031 	if (ret)
2032 		return ret;
2033 
2034 	if (val != 1)
2035 		return -EINVAL;
2036 
2037 	ret = -EINVAL;
2038 	mutex_lock(&msc->buf_mutex);
2039 	/*
2040 	 * Window switch can only happen in the "multi" mode.
2041 	 * If a external buffer is engaged, they have the full
2042 	 * control over window switching.
2043 	 */
2044 	if (msc->mode == MSC_MODE_MULTI && !msc->mbuf)
2045 		ret = msc_win_switch(msc);
2046 	mutex_unlock(&msc->buf_mutex);
2047 
2048 	return ret ? ret : size;
2049 }
2050 
2051 static DEVICE_ATTR_WO(win_switch);
2052 
2053 static struct attribute *msc_output_attrs[] = {
2054 	&dev_attr_wrap.attr,
2055 	&dev_attr_mode.attr,
2056 	&dev_attr_nr_pages.attr,
2057 	&dev_attr_win_switch.attr,
2058 	NULL,
2059 };
2060 
2061 static struct attribute_group msc_output_group = {
2062 	.attrs	= msc_output_attrs,
2063 };
2064 
2065 static int intel_th_msc_probe(struct intel_th_device *thdev)
2066 {
2067 	struct device *dev = &thdev->dev;
2068 	struct resource *res;
2069 	struct msc *msc;
2070 	void __iomem *base;
2071 	int err;
2072 
2073 	res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
2074 	if (!res)
2075 		return -ENODEV;
2076 
2077 	base = devm_ioremap(dev, res->start, resource_size(res));
2078 	if (!base)
2079 		return -ENOMEM;
2080 
2081 	msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL);
2082 	if (!msc)
2083 		return -ENOMEM;
2084 
2085 	res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1);
2086 	if (!res)
2087 		msc->do_irq = 1;
2088 
2089 	if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken))
2090 		msc->multi_is_broken = 1;
2091 
2092 	msc->index = thdev->id;
2093 
2094 	msc->thdev = thdev;
2095 	msc->reg_base = base + msc->index * 0x100;
2096 	msc->msu_base = base;
2097 
2098 	INIT_WORK(&msc->work, msc_work);
2099 	err = intel_th_msc_init(msc);
2100 	if (err)
2101 		return err;
2102 
2103 	dev_set_drvdata(dev, msc);
2104 
2105 	return 0;
2106 }
2107 
2108 static void intel_th_msc_remove(struct intel_th_device *thdev)
2109 {
2110 	struct msc *msc = dev_get_drvdata(&thdev->dev);
2111 	int ret;
2112 
2113 	intel_th_msc_deactivate(thdev);
2114 
2115 	/*
2116 	 * Buffers should not be used at this point except if the
2117 	 * output character device is still open and the parent
2118 	 * device gets detached from its bus, which is a FIXME.
2119 	 */
2120 	ret = msc_buffer_free_unless_used(msc);
2121 	WARN_ON_ONCE(ret);
2122 }
2123 
2124 static struct intel_th_driver intel_th_msc_driver = {
2125 	.probe	= intel_th_msc_probe,
2126 	.remove	= intel_th_msc_remove,
2127 	.irq		= intel_th_msc_interrupt,
2128 	.wait_empty	= intel_th_msc_wait_empty,
2129 	.activate	= intel_th_msc_activate,
2130 	.deactivate	= intel_th_msc_deactivate,
2131 	.fops	= &intel_th_msc_fops,
2132 	.attr_group	= &msc_output_group,
2133 	.driver	= {
2134 		.name	= "msc",
2135 		.owner	= THIS_MODULE,
2136 	},
2137 };
2138 
2139 module_driver(intel_th_msc_driver,
2140 	      intel_th_driver_register,
2141 	      intel_th_driver_unregister);
2142 
2143 MODULE_LICENSE("GPL v2");
2144 MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver");
2145 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
2146