1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * System Trace Module (STM) infrastructure
4 * Copyright (c) 2014, Intel Corporation.
5 *
6 * STM class implements generic infrastructure for System Trace Module devices
7 * as defined in MIPI STPv2 specification.
8 */
9
10 #include <linux/pm_runtime.h>
11 #include <linux/uaccess.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/compat.h>
16 #include <linux/kdev_t.h>
17 #include <linux/srcu.h>
18 #include <linux/slab.h>
19 #include <linux/stm.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/vmalloc.h>
23 #include "stm.h"
24
25 #include <uapi/linux/stm.h>
26
27 static unsigned int stm_core_up;
28
29 /*
30 * The SRCU here makes sure that STM device doesn't disappear from under a
31 * stm_source_write() caller, which may want to have as little overhead as
32 * possible.
33 */
34 static struct srcu_struct stm_source_srcu;
35
masters_show(struct device * dev,struct device_attribute * attr,char * buf)36 static ssize_t masters_show(struct device *dev,
37 struct device_attribute *attr,
38 char *buf)
39 {
40 struct stm_device *stm = to_stm_device(dev);
41 int ret;
42
43 ret = sprintf(buf, "%u %u\n", stm->data->sw_start, stm->data->sw_end);
44
45 return ret;
46 }
47
48 static DEVICE_ATTR_RO(masters);
49
channels_show(struct device * dev,struct device_attribute * attr,char * buf)50 static ssize_t channels_show(struct device *dev,
51 struct device_attribute *attr,
52 char *buf)
53 {
54 struct stm_device *stm = to_stm_device(dev);
55 int ret;
56
57 ret = sprintf(buf, "%u\n", stm->data->sw_nchannels);
58
59 return ret;
60 }
61
62 static DEVICE_ATTR_RO(channels);
63
hw_override_show(struct device * dev,struct device_attribute * attr,char * buf)64 static ssize_t hw_override_show(struct device *dev,
65 struct device_attribute *attr,
66 char *buf)
67 {
68 struct stm_device *stm = to_stm_device(dev);
69 int ret;
70
71 ret = sprintf(buf, "%u\n", stm->data->hw_override);
72
73 return ret;
74 }
75
76 static DEVICE_ATTR_RO(hw_override);
77
78 static struct attribute *stm_attrs[] = {
79 &dev_attr_masters.attr,
80 &dev_attr_channels.attr,
81 &dev_attr_hw_override.attr,
82 NULL,
83 };
84
85 ATTRIBUTE_GROUPS(stm);
86
87 static struct class stm_class = {
88 .name = "stm",
89 .dev_groups = stm_groups,
90 };
91
92 /**
93 * stm_find_device() - find stm device by name
94 * @buf: character buffer containing the name
95 *
96 * This is called when either policy gets assigned to an stm device or an
97 * stm_source device gets linked to an stm device.
98 *
99 * This grabs device's reference (get_device()) and module reference, both
100 * of which the calling path needs to make sure to drop with stm_put_device().
101 *
102 * Return: stm device pointer or null if lookup failed.
103 */
stm_find_device(const char * buf)104 struct stm_device *stm_find_device(const char *buf)
105 {
106 struct stm_device *stm;
107 struct device *dev;
108
109 if (!stm_core_up)
110 return NULL;
111
112 dev = class_find_device_by_name(&stm_class, buf);
113 if (!dev)
114 return NULL;
115
116 stm = to_stm_device(dev);
117 if (!try_module_get(stm->owner)) {
118 /* matches class_find_device() above */
119 put_device(dev);
120 return NULL;
121 }
122
123 return stm;
124 }
125
126 /**
127 * stm_put_device() - drop references on the stm device
128 * @stm: stm device, previously acquired by stm_find_device()
129 *
130 * This drops the module reference and device reference taken by
131 * stm_find_device() or stm_char_open().
132 */
stm_put_device(struct stm_device * stm)133 void stm_put_device(struct stm_device *stm)
134 {
135 module_put(stm->owner);
136 put_device(&stm->dev);
137 }
138
139 /*
140 * Internally we only care about software-writable masters here, that is the
141 * ones in the range [stm_data->sw_start..stm_data..sw_end], however we need
142 * original master numbers to be visible externally, since they are the ones
143 * that will appear in the STP stream. Thus, the internal bookkeeping uses
144 * $master - stm_data->sw_start to reference master descriptors and such.
145 */
146
147 #define __stm_master(_s, _m) \
148 ((_s)->masters[(_m) - (_s)->data->sw_start])
149
150 static inline struct stp_master *
stm_master(struct stm_device * stm,unsigned int idx)151 stm_master(struct stm_device *stm, unsigned int idx)
152 {
153 if (idx < stm->data->sw_start || idx > stm->data->sw_end)
154 return NULL;
155
156 return __stm_master(stm, idx);
157 }
158
stp_master_alloc(struct stm_device * stm,unsigned int idx)159 static int stp_master_alloc(struct stm_device *stm, unsigned int idx)
160 {
161 struct stp_master *master;
162
163 master = kzalloc(struct_size(master, chan_map,
164 BITS_TO_LONGS(stm->data->sw_nchannels)),
165 GFP_ATOMIC);
166 if (!master)
167 return -ENOMEM;
168
169 master->nr_free = stm->data->sw_nchannels;
170 __stm_master(stm, idx) = master;
171
172 return 0;
173 }
174
stp_master_free(struct stm_device * stm,unsigned int idx)175 static void stp_master_free(struct stm_device *stm, unsigned int idx)
176 {
177 struct stp_master *master = stm_master(stm, idx);
178
179 if (!master)
180 return;
181
182 __stm_master(stm, idx) = NULL;
183 kfree(master);
184 }
185
stm_output_claim(struct stm_device * stm,struct stm_output * output)186 static void stm_output_claim(struct stm_device *stm, struct stm_output *output)
187 {
188 struct stp_master *master = stm_master(stm, output->master);
189
190 lockdep_assert_held(&stm->mc_lock);
191 lockdep_assert_held(&output->lock);
192
193 if (WARN_ON_ONCE(master->nr_free < output->nr_chans))
194 return;
195
196 bitmap_allocate_region(&master->chan_map[0], output->channel,
197 ilog2(output->nr_chans));
198
199 master->nr_free -= output->nr_chans;
200 }
201
202 static void
stm_output_disclaim(struct stm_device * stm,struct stm_output * output)203 stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
204 {
205 struct stp_master *master = stm_master(stm, output->master);
206
207 lockdep_assert_held(&stm->mc_lock);
208 lockdep_assert_held(&output->lock);
209
210 bitmap_release_region(&master->chan_map[0], output->channel,
211 ilog2(output->nr_chans));
212
213 master->nr_free += output->nr_chans;
214 output->nr_chans = 0;
215 }
216
217 /*
218 * This is like bitmap_find_free_region(), except it can ignore @start bits
219 * at the beginning.
220 */
find_free_channels(unsigned long * bitmap,unsigned int start,unsigned int end,unsigned int width)221 static int find_free_channels(unsigned long *bitmap, unsigned int start,
222 unsigned int end, unsigned int width)
223 {
224 unsigned int pos;
225 int i;
226
227 for (pos = start; pos < end + 1; pos = ALIGN(pos, width)) {
228 pos = find_next_zero_bit(bitmap, end + 1, pos);
229 if (pos + width > end + 1)
230 break;
231
232 if (pos & (width - 1))
233 continue;
234
235 for (i = 1; i < width && !test_bit(pos + i, bitmap); i++)
236 ;
237 if (i == width)
238 return pos;
239
240 /* step over [pos..pos+i) to continue search */
241 pos += i;
242 }
243
244 return -1;
245 }
246
247 static int
stm_find_master_chan(struct stm_device * stm,unsigned int width,unsigned int * mstart,unsigned int mend,unsigned int * cstart,unsigned int cend)248 stm_find_master_chan(struct stm_device *stm, unsigned int width,
249 unsigned int *mstart, unsigned int mend,
250 unsigned int *cstart, unsigned int cend)
251 {
252 struct stp_master *master;
253 unsigned int midx;
254 int pos, err;
255
256 for (midx = *mstart; midx <= mend; midx++) {
257 if (!stm_master(stm, midx)) {
258 err = stp_master_alloc(stm, midx);
259 if (err)
260 return err;
261 }
262
263 master = stm_master(stm, midx);
264
265 if (!master->nr_free)
266 continue;
267
268 pos = find_free_channels(master->chan_map, *cstart, cend,
269 width);
270 if (pos < 0)
271 continue;
272
273 *mstart = midx;
274 *cstart = pos;
275 return 0;
276 }
277
278 return -ENOSPC;
279 }
280
stm_output_assign(struct stm_device * stm,unsigned int width,struct stp_policy_node * policy_node,struct stm_output * output)281 static int stm_output_assign(struct stm_device *stm, unsigned int width,
282 struct stp_policy_node *policy_node,
283 struct stm_output *output)
284 {
285 unsigned int midx, cidx, mend, cend;
286 int ret = -EINVAL;
287
288 if (width > stm->data->sw_nchannels)
289 return -EINVAL;
290
291 /* We no longer accept policy_node==NULL here */
292 if (WARN_ON_ONCE(!policy_node))
293 return -EINVAL;
294
295 /*
296 * Also, the caller holds reference to policy_node, so it won't
297 * disappear on us.
298 */
299 stp_policy_node_get_ranges(policy_node, &midx, &mend, &cidx, &cend);
300
301 spin_lock(&stm->mc_lock);
302 spin_lock(&output->lock);
303 /* output is already assigned -- shouldn't happen */
304 if (WARN_ON_ONCE(output->nr_chans))
305 goto unlock;
306
307 ret = stm_find_master_chan(stm, width, &midx, mend, &cidx, cend);
308 if (ret < 0)
309 goto unlock;
310
311 output->master = midx;
312 output->channel = cidx;
313 output->nr_chans = width;
314 if (stm->pdrv->output_open) {
315 void *priv = stp_policy_node_priv(policy_node);
316
317 if (WARN_ON_ONCE(!priv))
318 goto unlock;
319
320 /* configfs subsys mutex is held by the caller */
321 ret = stm->pdrv->output_open(priv, output);
322 if (ret)
323 goto unlock;
324 }
325
326 stm_output_claim(stm, output);
327 dev_dbg(&stm->dev, "assigned %u:%u (+%u)\n", midx, cidx, width);
328
329 ret = 0;
330 unlock:
331 if (ret)
332 output->nr_chans = 0;
333
334 spin_unlock(&output->lock);
335 spin_unlock(&stm->mc_lock);
336
337 return ret;
338 }
339
stm_output_free(struct stm_device * stm,struct stm_output * output)340 static void stm_output_free(struct stm_device *stm, struct stm_output *output)
341 {
342 spin_lock(&stm->mc_lock);
343 spin_lock(&output->lock);
344 if (output->nr_chans)
345 stm_output_disclaim(stm, output);
346 if (stm->pdrv && stm->pdrv->output_close)
347 stm->pdrv->output_close(output);
348 spin_unlock(&output->lock);
349 spin_unlock(&stm->mc_lock);
350 }
351
stm_output_init(struct stm_output * output)352 static void stm_output_init(struct stm_output *output)
353 {
354 spin_lock_init(&output->lock);
355 }
356
major_match(struct device * dev,const void * data)357 static int major_match(struct device *dev, const void *data)
358 {
359 unsigned int major = *(unsigned int *)data;
360
361 return MAJOR(dev->devt) == major;
362 }
363
364 /*
365 * Framing protocol management
366 * Modules can implement STM protocol drivers and (un-)register them
367 * with the STM class framework.
368 */
369 static struct list_head stm_pdrv_head;
370 static struct mutex stm_pdrv_mutex;
371
372 struct stm_pdrv_entry {
373 struct list_head entry;
374 const struct stm_protocol_driver *pdrv;
375 const struct config_item_type *node_type;
376 };
377
378 static const struct stm_pdrv_entry *
__stm_lookup_protocol(const char * name)379 __stm_lookup_protocol(const char *name)
380 {
381 struct stm_pdrv_entry *pe;
382
383 /*
384 * If no name is given (NULL or ""), fall back to "p_basic".
385 */
386 if (!name || !*name)
387 name = "p_basic";
388
389 list_for_each_entry(pe, &stm_pdrv_head, entry) {
390 if (!strcmp(name, pe->pdrv->name))
391 return pe;
392 }
393
394 return NULL;
395 }
396
stm_register_protocol(const struct stm_protocol_driver * pdrv)397 int stm_register_protocol(const struct stm_protocol_driver *pdrv)
398 {
399 struct stm_pdrv_entry *pe = NULL;
400 int ret = -ENOMEM;
401
402 mutex_lock(&stm_pdrv_mutex);
403
404 if (__stm_lookup_protocol(pdrv->name)) {
405 ret = -EEXIST;
406 goto unlock;
407 }
408
409 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
410 if (!pe)
411 goto unlock;
412
413 if (pdrv->policy_attr) {
414 pe->node_type = get_policy_node_type(pdrv->policy_attr);
415 if (!pe->node_type)
416 goto unlock;
417 }
418
419 list_add_tail(&pe->entry, &stm_pdrv_head);
420 pe->pdrv = pdrv;
421
422 ret = 0;
423 unlock:
424 mutex_unlock(&stm_pdrv_mutex);
425
426 if (ret)
427 kfree(pe);
428
429 return ret;
430 }
431 EXPORT_SYMBOL_GPL(stm_register_protocol);
432
stm_unregister_protocol(const struct stm_protocol_driver * pdrv)433 void stm_unregister_protocol(const struct stm_protocol_driver *pdrv)
434 {
435 struct stm_pdrv_entry *pe, *iter;
436
437 mutex_lock(&stm_pdrv_mutex);
438
439 list_for_each_entry_safe(pe, iter, &stm_pdrv_head, entry) {
440 if (pe->pdrv == pdrv) {
441 list_del(&pe->entry);
442
443 if (pe->node_type) {
444 kfree(pe->node_type->ct_attrs);
445 kfree(pe->node_type);
446 }
447 kfree(pe);
448 break;
449 }
450 }
451
452 mutex_unlock(&stm_pdrv_mutex);
453 }
454 EXPORT_SYMBOL_GPL(stm_unregister_protocol);
455
stm_get_protocol(const struct stm_protocol_driver * pdrv)456 static bool stm_get_protocol(const struct stm_protocol_driver *pdrv)
457 {
458 return try_module_get(pdrv->owner);
459 }
460
stm_put_protocol(const struct stm_protocol_driver * pdrv)461 void stm_put_protocol(const struct stm_protocol_driver *pdrv)
462 {
463 module_put(pdrv->owner);
464 }
465
stm_lookup_protocol(const char * name,const struct stm_protocol_driver ** pdrv,const struct config_item_type ** node_type)466 int stm_lookup_protocol(const char *name,
467 const struct stm_protocol_driver **pdrv,
468 const struct config_item_type **node_type)
469 {
470 const struct stm_pdrv_entry *pe;
471
472 mutex_lock(&stm_pdrv_mutex);
473
474 pe = __stm_lookup_protocol(name);
475 if (pe && pe->pdrv && stm_get_protocol(pe->pdrv)) {
476 *pdrv = pe->pdrv;
477 *node_type = pe->node_type;
478 }
479
480 mutex_unlock(&stm_pdrv_mutex);
481
482 return pe ? 0 : -ENOENT;
483 }
484
stm_char_open(struct inode * inode,struct file * file)485 static int stm_char_open(struct inode *inode, struct file *file)
486 {
487 struct stm_file *stmf;
488 struct device *dev;
489 unsigned int major = imajor(inode);
490 int err = -ENOMEM;
491
492 dev = class_find_device(&stm_class, NULL, &major, major_match);
493 if (!dev)
494 return -ENODEV;
495
496 stmf = kzalloc(sizeof(*stmf), GFP_KERNEL);
497 if (!stmf)
498 goto err_put_device;
499
500 err = -ENODEV;
501 stm_output_init(&stmf->output);
502 stmf->stm = to_stm_device(dev);
503
504 if (!try_module_get(stmf->stm->owner))
505 goto err_free;
506
507 file->private_data = stmf;
508
509 return nonseekable_open(inode, file);
510
511 err_free:
512 kfree(stmf);
513 err_put_device:
514 /* matches class_find_device() above */
515 put_device(dev);
516
517 return err;
518 }
519
stm_char_release(struct inode * inode,struct file * file)520 static int stm_char_release(struct inode *inode, struct file *file)
521 {
522 struct stm_file *stmf = file->private_data;
523 struct stm_device *stm = stmf->stm;
524
525 if (stm->data->unlink)
526 stm->data->unlink(stm->data, stmf->output.master,
527 stmf->output.channel);
528
529 stm_output_free(stm, &stmf->output);
530
531 /*
532 * matches the stm_char_open()'s
533 * class_find_device() + try_module_get()
534 */
535 stm_put_device(stm);
536 kfree(stmf);
537
538 return 0;
539 }
540
541 static int
stm_assign_first_policy(struct stm_device * stm,struct stm_output * output,char ** ids,unsigned int width)542 stm_assign_first_policy(struct stm_device *stm, struct stm_output *output,
543 char **ids, unsigned int width)
544 {
545 struct stp_policy_node *pn;
546 int err, n;
547
548 /*
549 * On success, stp_policy_node_lookup() will return holding the
550 * configfs subsystem mutex, which is then released in
551 * stp_policy_node_put(). This allows the pdrv->output_open() in
552 * stm_output_assign() to serialize against the attribute accessors.
553 */
554 for (n = 0, pn = NULL; ids[n] && !pn; n++)
555 pn = stp_policy_node_lookup(stm, ids[n]);
556
557 if (!pn)
558 return -EINVAL;
559
560 err = stm_output_assign(stm, width, pn, output);
561
562 stp_policy_node_put(pn);
563
564 return err;
565 }
566
567 /**
568 * stm_data_write() - send the given payload as data packets
569 * @data: stm driver's data
570 * @m: STP master
571 * @c: STP channel
572 * @ts_first: timestamp the first packet
573 * @buf: data payload buffer
574 * @count: data payload size
575 */
stm_data_write(struct stm_data * data,unsigned int m,unsigned int c,bool ts_first,const void * buf,size_t count)576 ssize_t notrace stm_data_write(struct stm_data *data, unsigned int m,
577 unsigned int c, bool ts_first, const void *buf,
578 size_t count)
579 {
580 unsigned int flags = ts_first ? STP_PACKET_TIMESTAMPED : 0;
581 ssize_t sz;
582 size_t pos;
583
584 for (pos = 0, sz = 0; pos < count; pos += sz) {
585 sz = min_t(unsigned int, count - pos, 8);
586 sz = data->packet(data, m, c, STP_PACKET_DATA, flags, sz,
587 &((u8 *)buf)[pos]);
588 if (sz <= 0)
589 break;
590
591 if (ts_first) {
592 flags = 0;
593 ts_first = false;
594 }
595 }
596
597 return sz < 0 ? sz : pos;
598 }
599 EXPORT_SYMBOL_GPL(stm_data_write);
600
601 static ssize_t notrace
stm_write(struct stm_device * stm,struct stm_output * output,unsigned int chan,const char * buf,size_t count,struct stm_source_data * source)602 stm_write(struct stm_device *stm, struct stm_output *output,
603 unsigned int chan, const char *buf, size_t count, struct stm_source_data *source)
604 {
605 int err;
606
607 /* stm->pdrv is serialized against policy_mutex */
608 if (!stm->pdrv)
609 return -ENODEV;
610
611 err = stm->pdrv->write(stm->data, output, chan, buf, count, source);
612 if (err < 0)
613 return err;
614
615 return err;
616 }
617
stm_char_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)618 static ssize_t stm_char_write(struct file *file, const char __user *buf,
619 size_t count, loff_t *ppos)
620 {
621 struct stm_file *stmf = file->private_data;
622 struct stm_device *stm = stmf->stm;
623 char *kbuf;
624 int err;
625
626 if (count + 1 > PAGE_SIZE)
627 count = PAGE_SIZE - 1;
628
629 /*
630 * If no m/c have been assigned to this writer up to this
631 * point, try to use the task name and "default" policy entries.
632 */
633 if (!stmf->output.nr_chans) {
634 char comm[sizeof(current->comm)];
635 char *ids[] = { comm, "default", NULL };
636
637 get_task_comm(comm, current);
638
639 err = stm_assign_first_policy(stmf->stm, &stmf->output, ids, 1);
640 /*
641 * EBUSY means that somebody else just assigned this
642 * output, which is just fine for write()
643 */
644 if (err)
645 return err;
646 }
647
648 kbuf = kmalloc(count + 1, GFP_KERNEL);
649 if (!kbuf)
650 return -ENOMEM;
651
652 err = copy_from_user(kbuf, buf, count);
653 if (err) {
654 kfree(kbuf);
655 return -EFAULT;
656 }
657
658 pm_runtime_get_sync(&stm->dev);
659
660 count = stm_write(stm, &stmf->output, 0, kbuf, count, NULL);
661
662 pm_runtime_mark_last_busy(&stm->dev);
663 pm_runtime_put_autosuspend(&stm->dev);
664 kfree(kbuf);
665
666 return count;
667 }
668
stm_mmap_open(struct vm_area_struct * vma)669 static void stm_mmap_open(struct vm_area_struct *vma)
670 {
671 struct stm_file *stmf = vma->vm_file->private_data;
672 struct stm_device *stm = stmf->stm;
673
674 pm_runtime_get(&stm->dev);
675 }
676
stm_mmap_close(struct vm_area_struct * vma)677 static void stm_mmap_close(struct vm_area_struct *vma)
678 {
679 struct stm_file *stmf = vma->vm_file->private_data;
680 struct stm_device *stm = stmf->stm;
681
682 pm_runtime_mark_last_busy(&stm->dev);
683 pm_runtime_put_autosuspend(&stm->dev);
684 }
685
686 static const struct vm_operations_struct stm_mmap_vmops = {
687 .open = stm_mmap_open,
688 .close = stm_mmap_close,
689 };
690
stm_char_mmap(struct file * file,struct vm_area_struct * vma)691 static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
692 {
693 struct stm_file *stmf = file->private_data;
694 struct stm_device *stm = stmf->stm;
695 unsigned long size, phys;
696
697 if (!stm->data->mmio_addr)
698 return -EOPNOTSUPP;
699
700 if (vma->vm_pgoff)
701 return -EINVAL;
702
703 size = vma->vm_end - vma->vm_start;
704
705 if (stmf->output.nr_chans * stm->data->sw_mmiosz != size)
706 return -EINVAL;
707
708 phys = stm->data->mmio_addr(stm->data, stmf->output.master,
709 stmf->output.channel,
710 stmf->output.nr_chans);
711
712 if (!phys)
713 return -EINVAL;
714
715 pm_runtime_get_sync(&stm->dev);
716
717 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
718 vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
719 vma->vm_ops = &stm_mmap_vmops;
720 vm_iomap_memory(vma, phys, size);
721
722 return 0;
723 }
724
stm_char_policy_set_ioctl(struct stm_file * stmf,void __user * arg)725 static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
726 {
727 struct stm_device *stm = stmf->stm;
728 struct stp_policy_id *id;
729 char *ids[] = { NULL, NULL };
730 int ret = -EINVAL, wlimit = 1;
731 u32 size;
732
733 if (stmf->output.nr_chans)
734 return -EBUSY;
735
736 if (copy_from_user(&size, arg, sizeof(size)))
737 return -EFAULT;
738
739 if (size < sizeof(*id) || size >= PATH_MAX + sizeof(*id))
740 return -EINVAL;
741
742 /*
743 * size + 1 to make sure the .id string at the bottom is terminated,
744 * which is also why memdup_user() is not useful here
745 */
746 id = kzalloc(size + 1, GFP_KERNEL);
747 if (!id)
748 return -ENOMEM;
749
750 if (copy_from_user(id, arg, size)) {
751 ret = -EFAULT;
752 goto err_free;
753 }
754
755 if (id->__reserved_0 || id->__reserved_1)
756 goto err_free;
757
758 if (stm->data->sw_mmiosz)
759 wlimit = PAGE_SIZE / stm->data->sw_mmiosz;
760
761 if (id->width < 1 || id->width > wlimit)
762 goto err_free;
763
764 ids[0] = id->id;
765 ret = stm_assign_first_policy(stmf->stm, &stmf->output, ids,
766 id->width);
767 if (ret)
768 goto err_free;
769
770 if (stm->data->link)
771 ret = stm->data->link(stm->data, stmf->output.master,
772 stmf->output.channel);
773
774 if (ret)
775 stm_output_free(stmf->stm, &stmf->output);
776
777 err_free:
778 kfree(id);
779
780 return ret;
781 }
782
stm_char_policy_get_ioctl(struct stm_file * stmf,void __user * arg)783 static int stm_char_policy_get_ioctl(struct stm_file *stmf, void __user *arg)
784 {
785 struct stp_policy_id id = {
786 .size = sizeof(id),
787 .master = stmf->output.master,
788 .channel = stmf->output.channel,
789 .width = stmf->output.nr_chans,
790 .__reserved_0 = 0,
791 .__reserved_1 = 0,
792 };
793
794 return copy_to_user(arg, &id, id.size) ? -EFAULT : 0;
795 }
796
797 static long
stm_char_ioctl(struct file * file,unsigned int cmd,unsigned long arg)798 stm_char_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
799 {
800 struct stm_file *stmf = file->private_data;
801 struct stm_data *stm_data = stmf->stm->data;
802 int err = -ENOTTY;
803 u64 options;
804
805 switch (cmd) {
806 case STP_POLICY_ID_SET:
807 err = stm_char_policy_set_ioctl(stmf, (void __user *)arg);
808 if (err)
809 return err;
810
811 return stm_char_policy_get_ioctl(stmf, (void __user *)arg);
812
813 case STP_POLICY_ID_GET:
814 return stm_char_policy_get_ioctl(stmf, (void __user *)arg);
815
816 case STP_SET_OPTIONS:
817 if (copy_from_user(&options, (u64 __user *)arg, sizeof(u64)))
818 return -EFAULT;
819
820 if (stm_data->set_options)
821 err = stm_data->set_options(stm_data,
822 stmf->output.master,
823 stmf->output.channel,
824 stmf->output.nr_chans,
825 options);
826
827 break;
828 default:
829 break;
830 }
831
832 return err;
833 }
834
835 static const struct file_operations stm_fops = {
836 .open = stm_char_open,
837 .release = stm_char_release,
838 .write = stm_char_write,
839 .mmap = stm_char_mmap,
840 .unlocked_ioctl = stm_char_ioctl,
841 .compat_ioctl = compat_ptr_ioctl,
842 };
843
stm_device_release(struct device * dev)844 static void stm_device_release(struct device *dev)
845 {
846 struct stm_device *stm = to_stm_device(dev);
847
848 vfree(stm);
849 }
850
stm_register_device(struct device * parent,struct stm_data * stm_data,struct module * owner)851 int stm_register_device(struct device *parent, struct stm_data *stm_data,
852 struct module *owner)
853 {
854 struct stm_device *stm;
855 unsigned int nmasters;
856 int err = -ENOMEM;
857
858 if (!stm_core_up)
859 return -EPROBE_DEFER;
860
861 if (!stm_data->packet || !stm_data->sw_nchannels)
862 return -EINVAL;
863
864 nmasters = stm_data->sw_end - stm_data->sw_start + 1;
865 stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *));
866 if (!stm)
867 return -ENOMEM;
868
869 stm->major = register_chrdev(0, stm_data->name, &stm_fops);
870 if (stm->major < 0) {
871 err = stm->major;
872 vfree(stm);
873 return err;
874 }
875
876 device_initialize(&stm->dev);
877 stm->dev.devt = MKDEV(stm->major, 0);
878 stm->dev.class = &stm_class;
879 stm->dev.parent = parent;
880 stm->dev.release = stm_device_release;
881
882 mutex_init(&stm->link_mutex);
883 spin_lock_init(&stm->link_lock);
884 INIT_LIST_HEAD(&stm->link_list);
885
886 /* initialize the object before it is accessible via sysfs */
887 spin_lock_init(&stm->mc_lock);
888 mutex_init(&stm->policy_mutex);
889 stm->sw_nmasters = nmasters;
890 stm->owner = owner;
891 stm->data = stm_data;
892 stm_data->stm = stm;
893
894 err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
895 if (err)
896 goto err_device;
897
898 err = device_add(&stm->dev);
899 if (err)
900 goto err_device;
901
902 /*
903 * Use delayed autosuspend to avoid bouncing back and forth
904 * on recurring character device writes, with the initial
905 * delay time of 2 seconds.
906 */
907 pm_runtime_no_callbacks(&stm->dev);
908 pm_runtime_use_autosuspend(&stm->dev);
909 pm_runtime_set_autosuspend_delay(&stm->dev, 2000);
910 pm_runtime_set_suspended(&stm->dev);
911 pm_runtime_enable(&stm->dev);
912
913 return 0;
914
915 err_device:
916 unregister_chrdev(stm->major, stm_data->name);
917
918 /* calls stm_device_release() */
919 put_device(&stm->dev);
920
921 return err;
922 }
923 EXPORT_SYMBOL_GPL(stm_register_device);
924
925 static int __stm_source_link_drop(struct stm_source_device *src,
926 struct stm_device *stm);
927
stm_unregister_device(struct stm_data * stm_data)928 void stm_unregister_device(struct stm_data *stm_data)
929 {
930 struct stm_device *stm = stm_data->stm;
931 struct stm_source_device *src, *iter;
932 int i, ret;
933
934 pm_runtime_dont_use_autosuspend(&stm->dev);
935 pm_runtime_disable(&stm->dev);
936
937 mutex_lock(&stm->link_mutex);
938 list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
939 ret = __stm_source_link_drop(src, stm);
940 /*
941 * src <-> stm link must not change under the same
942 * stm::link_mutex, so complain loudly if it has;
943 * also in this situation ret!=0 means this src is
944 * not connected to this stm and it should be otherwise
945 * safe to proceed with the tear-down of stm.
946 */
947 WARN_ON_ONCE(ret);
948 }
949 mutex_unlock(&stm->link_mutex);
950
951 synchronize_srcu(&stm_source_srcu);
952
953 unregister_chrdev(stm->major, stm_data->name);
954
955 mutex_lock(&stm->policy_mutex);
956 if (stm->policy)
957 stp_policy_unbind(stm->policy);
958 mutex_unlock(&stm->policy_mutex);
959
960 for (i = stm->data->sw_start; i <= stm->data->sw_end; i++)
961 stp_master_free(stm, i);
962
963 device_unregister(&stm->dev);
964 stm_data->stm = NULL;
965 }
966 EXPORT_SYMBOL_GPL(stm_unregister_device);
967
968 /*
969 * stm::link_list access serialization uses a spinlock and a mutex; holding
970 * either of them guarantees that the list is stable; modification requires
971 * holding both of them.
972 *
973 * Lock ordering is as follows:
974 * stm::link_mutex
975 * stm::link_lock
976 * src::link_lock
977 */
978
979 /**
980 * stm_source_link_add() - connect an stm_source device to an stm device
981 * @src: stm_source device
982 * @stm: stm device
983 *
984 * This function establishes a link from stm_source to an stm device so that
985 * the former can send out trace data to the latter.
986 *
987 * Return: 0 on success, -errno otherwise.
988 */
stm_source_link_add(struct stm_source_device * src,struct stm_device * stm)989 static int stm_source_link_add(struct stm_source_device *src,
990 struct stm_device *stm)
991 {
992 char *ids[] = { NULL, "default", NULL };
993 int err = -ENOMEM;
994
995 mutex_lock(&stm->link_mutex);
996 spin_lock(&stm->link_lock);
997 spin_lock(&src->link_lock);
998
999 /* src->link is dereferenced under stm_source_srcu but not the list */
1000 rcu_assign_pointer(src->link, stm);
1001 list_add_tail(&src->link_entry, &stm->link_list);
1002
1003 spin_unlock(&src->link_lock);
1004 spin_unlock(&stm->link_lock);
1005 mutex_unlock(&stm->link_mutex);
1006
1007 ids[0] = kstrdup(src->data->name, GFP_KERNEL);
1008 if (!ids[0])
1009 goto fail_detach;
1010
1011 err = stm_assign_first_policy(stm, &src->output, ids,
1012 src->data->nr_chans);
1013 kfree(ids[0]);
1014
1015 if (err)
1016 goto fail_detach;
1017
1018 /* this is to notify the STM device that a new link has been made */
1019 if (stm->data->link)
1020 err = stm->data->link(stm->data, src->output.master,
1021 src->output.channel);
1022
1023 if (err)
1024 goto fail_free_output;
1025
1026 /* this is to let the source carry out all necessary preparations */
1027 if (src->data->link)
1028 src->data->link(src->data);
1029
1030 return 0;
1031
1032 fail_free_output:
1033 stm_output_free(stm, &src->output);
1034
1035 fail_detach:
1036 mutex_lock(&stm->link_mutex);
1037 spin_lock(&stm->link_lock);
1038 spin_lock(&src->link_lock);
1039
1040 rcu_assign_pointer(src->link, NULL);
1041 list_del_init(&src->link_entry);
1042
1043 spin_unlock(&src->link_lock);
1044 spin_unlock(&stm->link_lock);
1045 mutex_unlock(&stm->link_mutex);
1046
1047 return err;
1048 }
1049
1050 /**
1051 * __stm_source_link_drop() - detach stm_source from an stm device
1052 * @src: stm_source device
1053 * @stm: stm device
1054 *
1055 * If @stm is @src::link, disconnect them from one another and put the
1056 * reference on the @stm device.
1057 *
1058 * Caller must hold stm::link_mutex.
1059 */
__stm_source_link_drop(struct stm_source_device * src,struct stm_device * stm)1060 static int __stm_source_link_drop(struct stm_source_device *src,
1061 struct stm_device *stm)
1062 {
1063 struct stm_device *link;
1064 int ret = 0;
1065
1066 lockdep_assert_held(&stm->link_mutex);
1067
1068 /* for stm::link_list modification, we hold both mutex and spinlock */
1069 spin_lock(&stm->link_lock);
1070 spin_lock(&src->link_lock);
1071 link = srcu_dereference_check(src->link, &stm_source_srcu, 1);
1072
1073 /*
1074 * The linked device may have changed since we last looked, because
1075 * we weren't holding the src::link_lock back then; if this is the
1076 * case, tell the caller to retry.
1077 */
1078 if (link != stm) {
1079 ret = -EAGAIN;
1080 goto unlock;
1081 }
1082
1083 stm_output_free(link, &src->output);
1084 list_del_init(&src->link_entry);
1085 pm_runtime_mark_last_busy(&link->dev);
1086 pm_runtime_put_autosuspend(&link->dev);
1087 /* matches stm_find_device() from stm_source_link_store() */
1088 stm_put_device(link);
1089 rcu_assign_pointer(src->link, NULL);
1090
1091 unlock:
1092 spin_unlock(&src->link_lock);
1093 spin_unlock(&stm->link_lock);
1094
1095 /*
1096 * Call the unlink callbacks for both source and stm, when we know
1097 * that we have actually performed the unlinking.
1098 */
1099 if (!ret) {
1100 if (src->data->unlink)
1101 src->data->unlink(src->data);
1102
1103 if (stm->data->unlink)
1104 stm->data->unlink(stm->data, src->output.master,
1105 src->output.channel);
1106 }
1107
1108 return ret;
1109 }
1110
1111 /**
1112 * stm_source_link_drop() - detach stm_source from its stm device
1113 * @src: stm_source device
1114 *
1115 * Unlinking means disconnecting from source's STM device; after this
1116 * writes will be unsuccessful until it is linked to a new STM device.
1117 *
1118 * This will happen on "stm_source_link" sysfs attribute write to undo
1119 * the existing link (if any), or on linked STM device's de-registration.
1120 */
stm_source_link_drop(struct stm_source_device * src)1121 static void stm_source_link_drop(struct stm_source_device *src)
1122 {
1123 struct stm_device *stm;
1124 int idx, ret;
1125
1126 retry:
1127 idx = srcu_read_lock(&stm_source_srcu);
1128 /*
1129 * The stm device will be valid for the duration of this
1130 * read section, but the link may change before we grab
1131 * the src::link_lock in __stm_source_link_drop().
1132 */
1133 stm = srcu_dereference(src->link, &stm_source_srcu);
1134
1135 ret = 0;
1136 if (stm) {
1137 mutex_lock(&stm->link_mutex);
1138 ret = __stm_source_link_drop(src, stm);
1139 mutex_unlock(&stm->link_mutex);
1140 }
1141
1142 srcu_read_unlock(&stm_source_srcu, idx);
1143
1144 /* if it did change, retry */
1145 if (ret == -EAGAIN)
1146 goto retry;
1147 }
1148
stm_source_link_show(struct device * dev,struct device_attribute * attr,char * buf)1149 static ssize_t stm_source_link_show(struct device *dev,
1150 struct device_attribute *attr,
1151 char *buf)
1152 {
1153 struct stm_source_device *src = to_stm_source_device(dev);
1154 struct stm_device *stm;
1155 int idx, ret;
1156
1157 idx = srcu_read_lock(&stm_source_srcu);
1158 stm = srcu_dereference(src->link, &stm_source_srcu);
1159 ret = sprintf(buf, "%s\n",
1160 stm ? dev_name(&stm->dev) : "<none>");
1161 srcu_read_unlock(&stm_source_srcu, idx);
1162
1163 return ret;
1164 }
1165
stm_source_link_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1166 static ssize_t stm_source_link_store(struct device *dev,
1167 struct device_attribute *attr,
1168 const char *buf, size_t count)
1169 {
1170 struct stm_source_device *src = to_stm_source_device(dev);
1171 struct stm_device *link;
1172 int err;
1173
1174 stm_source_link_drop(src);
1175
1176 link = stm_find_device(buf);
1177 if (!link)
1178 return -EINVAL;
1179
1180 pm_runtime_get(&link->dev);
1181
1182 err = stm_source_link_add(src, link);
1183 if (err) {
1184 pm_runtime_put_autosuspend(&link->dev);
1185 /* matches the stm_find_device() above */
1186 stm_put_device(link);
1187 }
1188
1189 return err ? : count;
1190 }
1191
1192 static DEVICE_ATTR_RW(stm_source_link);
1193
1194 static struct attribute *stm_source_attrs[] = {
1195 &dev_attr_stm_source_link.attr,
1196 NULL,
1197 };
1198
1199 ATTRIBUTE_GROUPS(stm_source);
1200
1201 static struct class stm_source_class = {
1202 .name = "stm_source",
1203 .dev_groups = stm_source_groups,
1204 };
1205
stm_source_device_release(struct device * dev)1206 static void stm_source_device_release(struct device *dev)
1207 {
1208 struct stm_source_device *src = to_stm_source_device(dev);
1209
1210 kfree(src);
1211 }
1212
1213 /**
1214 * stm_source_register_device() - register an stm_source device
1215 * @parent: parent device
1216 * @data: device description structure
1217 *
1218 * This will create a device of stm_source class that can write
1219 * data to an stm device once linked.
1220 *
1221 * Return: 0 on success, -errno otherwise.
1222 */
stm_source_register_device(struct device * parent,struct stm_source_data * data)1223 int stm_source_register_device(struct device *parent,
1224 struct stm_source_data *data)
1225 {
1226 struct stm_source_device *src;
1227 int err;
1228
1229 if (!stm_core_up)
1230 return -EPROBE_DEFER;
1231
1232 src = kzalloc(sizeof(*src), GFP_KERNEL);
1233 if (!src)
1234 return -ENOMEM;
1235
1236 device_initialize(&src->dev);
1237 src->dev.class = &stm_source_class;
1238 src->dev.parent = parent;
1239 src->dev.release = stm_source_device_release;
1240
1241 err = kobject_set_name(&src->dev.kobj, "%s", data->name);
1242 if (err)
1243 goto err;
1244
1245 pm_runtime_no_callbacks(&src->dev);
1246 pm_runtime_forbid(&src->dev);
1247
1248 err = device_add(&src->dev);
1249 if (err)
1250 goto err;
1251
1252 stm_output_init(&src->output);
1253 spin_lock_init(&src->link_lock);
1254 INIT_LIST_HEAD(&src->link_entry);
1255 src->data = data;
1256 data->src = src;
1257
1258 return 0;
1259
1260 err:
1261 put_device(&src->dev);
1262
1263 return err;
1264 }
1265 EXPORT_SYMBOL_GPL(stm_source_register_device);
1266
1267 /**
1268 * stm_source_unregister_device() - unregister an stm_source device
1269 * @data: device description that was used to register the device
1270 *
1271 * This will remove a previously created stm_source device from the system.
1272 */
stm_source_unregister_device(struct stm_source_data * data)1273 void stm_source_unregister_device(struct stm_source_data *data)
1274 {
1275 struct stm_source_device *src = data->src;
1276
1277 stm_source_link_drop(src);
1278
1279 device_unregister(&src->dev);
1280 }
1281 EXPORT_SYMBOL_GPL(stm_source_unregister_device);
1282
stm_source_write(struct stm_source_data * data,unsigned int chan,const char * buf,size_t count)1283 int notrace stm_source_write(struct stm_source_data *data,
1284 unsigned int chan,
1285 const char *buf, size_t count)
1286 {
1287 struct stm_source_device *src = data->src;
1288 struct stm_device *stm;
1289 int idx;
1290
1291 if (!src->output.nr_chans)
1292 return -ENODEV;
1293
1294 if (chan >= src->output.nr_chans)
1295 return -EINVAL;
1296
1297 idx = srcu_read_lock(&stm_source_srcu);
1298
1299 stm = srcu_dereference(src->link, &stm_source_srcu);
1300 if (stm)
1301 count = stm_write(stm, &src->output, chan, buf, count, data);
1302 else
1303 count = -ENODEV;
1304
1305 srcu_read_unlock(&stm_source_srcu, idx);
1306
1307 return count;
1308 }
1309 EXPORT_SYMBOL_GPL(stm_source_write);
1310
stm_core_init(void)1311 static int __init stm_core_init(void)
1312 {
1313 int err;
1314
1315 err = class_register(&stm_class);
1316 if (err)
1317 return err;
1318
1319 err = class_register(&stm_source_class);
1320 if (err)
1321 goto err_stm;
1322
1323 err = stp_configfs_init();
1324 if (err)
1325 goto err_src;
1326
1327 init_srcu_struct(&stm_source_srcu);
1328 INIT_LIST_HEAD(&stm_pdrv_head);
1329 mutex_init(&stm_pdrv_mutex);
1330
1331 /*
1332 * So as to not confuse existing users with a requirement
1333 * to load yet another module, do it here.
1334 */
1335 if (IS_ENABLED(CONFIG_STM_PROTO_BASIC))
1336 (void)request_module_nowait("stm_p_basic");
1337 stm_core_up++;
1338
1339 return 0;
1340
1341 err_src:
1342 class_unregister(&stm_source_class);
1343 err_stm:
1344 class_unregister(&stm_class);
1345
1346 return err;
1347 }
1348
1349 module_init(stm_core_init);
1350
stm_core_exit(void)1351 static void __exit stm_core_exit(void)
1352 {
1353 cleanup_srcu_struct(&stm_source_srcu);
1354 class_unregister(&stm_source_class);
1355 class_unregister(&stm_class);
1356 stp_configfs_exit();
1357 }
1358
1359 module_exit(stm_core_exit);
1360
1361 MODULE_LICENSE("GPL v2");
1362 MODULE_DESCRIPTION("System Trace Module device class");
1363 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
1364