1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * cdev.c - Character device component for Mostcore
4 *
5 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
6 */
7
8 #include <linux/module.h>
9 #include <linux/sched.h>
10 #include <linux/fs.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/cdev.h>
14 #include <linux/poll.h>
15 #include <linux/kfifo.h>
16 #include <linux/uaccess.h>
17 #include <linux/idr.h>
18 #include <linux/most.h>
19
20 #define CHRDEV_REGION_SIZE 50
21
22 static const struct class most_cdev_class = {
23 .name = "most_cdev"
24 };
25
26 static struct cdev_component {
27 dev_t devno;
28 struct ida minor_id;
29 unsigned int major;
30 struct most_component cc;
31 } comp;
32
33 struct comp_channel {
34 wait_queue_head_t wq;
35 spinlock_t unlink; /* synchronization lock to unlink channels */
36 struct cdev cdev;
37 struct device *dev;
38 struct mutex io_mutex;
39 struct most_interface *iface;
40 struct most_channel_config *cfg;
41 unsigned int channel_id;
42 dev_t devno;
43 size_t mbo_offs;
44 DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
45 int access_ref;
46 struct list_head list;
47 };
48
49 #define to_channel(d) container_of(d, struct comp_channel, cdev)
50 static LIST_HEAD(channel_list);
51 static DEFINE_SPINLOCK(ch_list_lock);
52
ch_has_mbo(struct comp_channel * c)53 static inline bool ch_has_mbo(struct comp_channel *c)
54 {
55 return channel_has_mbo(c->iface, c->channel_id, &comp.cc) > 0;
56 }
57
ch_get_mbo(struct comp_channel * c,struct mbo ** mbo)58 static inline struct mbo *ch_get_mbo(struct comp_channel *c, struct mbo **mbo)
59 {
60 if (!kfifo_peek(&c->fifo, mbo)) {
61 *mbo = most_get_mbo(c->iface, c->channel_id, &comp.cc);
62 if (*mbo)
63 kfifo_in(&c->fifo, mbo, 1);
64 }
65 return *mbo;
66 }
67
get_channel(struct most_interface * iface,int id)68 static struct comp_channel *get_channel(struct most_interface *iface, int id)
69 {
70 struct comp_channel *c, *tmp;
71 unsigned long flags;
72
73 spin_lock_irqsave(&ch_list_lock, flags);
74 list_for_each_entry_safe(c, tmp, &channel_list, list) {
75 if ((c->iface == iface) && (c->channel_id == id)) {
76 spin_unlock_irqrestore(&ch_list_lock, flags);
77 return c;
78 }
79 }
80 spin_unlock_irqrestore(&ch_list_lock, flags);
81 return NULL;
82 }
83
stop_channel(struct comp_channel * c)84 static void stop_channel(struct comp_channel *c)
85 {
86 struct mbo *mbo;
87
88 while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
89 most_put_mbo(mbo);
90 most_stop_channel(c->iface, c->channel_id, &comp.cc);
91 }
92
destroy_cdev(struct comp_channel * c)93 static void destroy_cdev(struct comp_channel *c)
94 {
95 unsigned long flags;
96
97 device_destroy(&most_cdev_class, c->devno);
98 cdev_del(&c->cdev);
99 spin_lock_irqsave(&ch_list_lock, flags);
100 list_del(&c->list);
101 spin_unlock_irqrestore(&ch_list_lock, flags);
102 }
103
destroy_channel(struct comp_channel * c)104 static void destroy_channel(struct comp_channel *c)
105 {
106 ida_free(&comp.minor_id, MINOR(c->devno));
107 kfifo_free(&c->fifo);
108 kfree(c);
109 }
110
111 /**
112 * comp_open - implements the syscall to open the device
113 * @inode: inode pointer
114 * @filp: file pointer
115 *
116 * This stores the channel pointer in the private data field of
117 * the file structure and activates the channel within the core.
118 */
comp_open(struct inode * inode,struct file * filp)119 static int comp_open(struct inode *inode, struct file *filp)
120 {
121 struct comp_channel *c;
122 int ret;
123
124 c = to_channel(inode->i_cdev);
125 filp->private_data = c;
126
127 if (((c->cfg->direction == MOST_CH_RX) &&
128 ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
129 ((c->cfg->direction == MOST_CH_TX) &&
130 ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
131 return -EACCES;
132 }
133
134 mutex_lock(&c->io_mutex);
135 if (!c->dev) {
136 mutex_unlock(&c->io_mutex);
137 return -ENODEV;
138 }
139
140 if (c->access_ref) {
141 mutex_unlock(&c->io_mutex);
142 return -EBUSY;
143 }
144
145 c->mbo_offs = 0;
146 ret = most_start_channel(c->iface, c->channel_id, &comp.cc);
147 if (!ret)
148 c->access_ref = 1;
149 mutex_unlock(&c->io_mutex);
150 return ret;
151 }
152
153 /**
154 * comp_close - implements the syscall to close the device
155 * @inode: inode pointer
156 * @filp: file pointer
157 *
158 * This stops the channel within the core.
159 */
comp_close(struct inode * inode,struct file * filp)160 static int comp_close(struct inode *inode, struct file *filp)
161 {
162 struct comp_channel *c = to_channel(inode->i_cdev);
163
164 mutex_lock(&c->io_mutex);
165 spin_lock(&c->unlink);
166 c->access_ref = 0;
167 spin_unlock(&c->unlink);
168 if (c->dev) {
169 stop_channel(c);
170 mutex_unlock(&c->io_mutex);
171 } else {
172 mutex_unlock(&c->io_mutex);
173 destroy_channel(c);
174 }
175 return 0;
176 }
177
178 /**
179 * comp_write - implements the syscall to write to the device
180 * @filp: file pointer
181 * @buf: pointer to user buffer
182 * @count: number of bytes to write
183 * @offset: offset from where to start writing
184 */
comp_write(struct file * filp,const char __user * buf,size_t count,loff_t * offset)185 static ssize_t comp_write(struct file *filp, const char __user *buf,
186 size_t count, loff_t *offset)
187 {
188 int ret;
189 size_t to_copy, left;
190 struct mbo *mbo = NULL;
191 struct comp_channel *c = filp->private_data;
192
193 mutex_lock(&c->io_mutex);
194 while (c->dev && !ch_get_mbo(c, &mbo)) {
195 mutex_unlock(&c->io_mutex);
196
197 if ((filp->f_flags & O_NONBLOCK))
198 return -EAGAIN;
199 if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
200 return -ERESTARTSYS;
201 mutex_lock(&c->io_mutex);
202 }
203
204 if (unlikely(!c->dev)) {
205 ret = -ENODEV;
206 goto unlock;
207 }
208
209 to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
210 left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
211 if (left == to_copy) {
212 ret = -EFAULT;
213 goto unlock;
214 }
215
216 c->mbo_offs += to_copy - left;
217 if (c->mbo_offs >= c->cfg->buffer_size ||
218 c->cfg->data_type == MOST_CH_CONTROL ||
219 c->cfg->data_type == MOST_CH_ASYNC) {
220 kfifo_skip(&c->fifo);
221 mbo->buffer_length = c->mbo_offs;
222 c->mbo_offs = 0;
223 most_submit_mbo(mbo);
224 }
225
226 ret = to_copy - left;
227 unlock:
228 mutex_unlock(&c->io_mutex);
229 return ret;
230 }
231
232 /**
233 * comp_read - implements the syscall to read from the device
234 * @filp: file pointer
235 * @buf: pointer to user buffer
236 * @count: number of bytes to read
237 * @offset: offset from where to start reading
238 */
239 static ssize_t
comp_read(struct file * filp,char __user * buf,size_t count,loff_t * offset)240 comp_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
241 {
242 size_t to_copy, not_copied, copied;
243 struct mbo *mbo = NULL;
244 struct comp_channel *c = filp->private_data;
245
246 mutex_lock(&c->io_mutex);
247 while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
248 mutex_unlock(&c->io_mutex);
249 if (filp->f_flags & O_NONBLOCK)
250 return -EAGAIN;
251 if (wait_event_interruptible(c->wq,
252 (!kfifo_is_empty(&c->fifo) ||
253 (!c->dev))))
254 return -ERESTARTSYS;
255 mutex_lock(&c->io_mutex);
256 }
257
258 /* make sure we don't submit to gone devices */
259 if (unlikely(!c->dev)) {
260 mutex_unlock(&c->io_mutex);
261 return -ENODEV;
262 }
263
264 to_copy = min_t(size_t,
265 count,
266 mbo->processed_length - c->mbo_offs);
267
268 not_copied = copy_to_user(buf,
269 mbo->virt_address + c->mbo_offs,
270 to_copy);
271
272 copied = to_copy - not_copied;
273
274 c->mbo_offs += copied;
275 if (c->mbo_offs >= mbo->processed_length) {
276 kfifo_skip(&c->fifo);
277 most_put_mbo(mbo);
278 c->mbo_offs = 0;
279 }
280 mutex_unlock(&c->io_mutex);
281 return copied;
282 }
283
comp_poll(struct file * filp,poll_table * wait)284 static __poll_t comp_poll(struct file *filp, poll_table *wait)
285 {
286 struct comp_channel *c = filp->private_data;
287 __poll_t mask = 0;
288
289 poll_wait(filp, &c->wq, wait);
290
291 mutex_lock(&c->io_mutex);
292 if (c->cfg->direction == MOST_CH_RX) {
293 if (!c->dev || !kfifo_is_empty(&c->fifo))
294 mask |= EPOLLIN | EPOLLRDNORM;
295 } else {
296 if (!c->dev || !kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
297 mask |= EPOLLOUT | EPOLLWRNORM;
298 }
299 mutex_unlock(&c->io_mutex);
300 return mask;
301 }
302
303 /*
304 * Initialization of struct file_operations
305 */
306 static const struct file_operations channel_fops = {
307 .owner = THIS_MODULE,
308 .read = comp_read,
309 .write = comp_write,
310 .open = comp_open,
311 .release = comp_close,
312 .poll = comp_poll,
313 };
314
315 /**
316 * comp_disconnect_channel - disconnect a channel
317 * @iface: pointer to interface instance
318 * @channel_id: channel index
319 *
320 * This frees allocated memory and removes the cdev that represents this
321 * channel in user space.
322 */
comp_disconnect_channel(struct most_interface * iface,int channel_id)323 static int comp_disconnect_channel(struct most_interface *iface, int channel_id)
324 {
325 struct comp_channel *c;
326
327 c = get_channel(iface, channel_id);
328 if (!c)
329 return -EINVAL;
330
331 mutex_lock(&c->io_mutex);
332 spin_lock(&c->unlink);
333 c->dev = NULL;
334 spin_unlock(&c->unlink);
335 destroy_cdev(c);
336 if (c->access_ref) {
337 stop_channel(c);
338 wake_up_interruptible(&c->wq);
339 mutex_unlock(&c->io_mutex);
340 } else {
341 mutex_unlock(&c->io_mutex);
342 destroy_channel(c);
343 }
344 return 0;
345 }
346
347 /**
348 * comp_rx_completion - completion handler for rx channels
349 * @mbo: pointer to buffer object that has completed
350 *
351 * This searches for the channel linked to this MBO and stores it in the local
352 * fifo buffer.
353 */
comp_rx_completion(struct mbo * mbo)354 static int comp_rx_completion(struct mbo *mbo)
355 {
356 struct comp_channel *c;
357
358 if (!mbo)
359 return -EINVAL;
360
361 c = get_channel(mbo->ifp, mbo->hdm_channel_id);
362 if (!c)
363 return -EINVAL;
364
365 spin_lock(&c->unlink);
366 if (!c->access_ref || !c->dev) {
367 spin_unlock(&c->unlink);
368 return -ENODEV;
369 }
370 kfifo_in(&c->fifo, &mbo, 1);
371 spin_unlock(&c->unlink);
372 #ifdef DEBUG_MESG
373 if (kfifo_is_full(&c->fifo))
374 dev_warn(c->dev, "Fifo is full\n");
375 #endif
376 wake_up_interruptible(&c->wq);
377 return 0;
378 }
379
380 /**
381 * comp_tx_completion - completion handler for tx channels
382 * @iface: pointer to interface instance
383 * @channel_id: channel index/ID
384 *
385 * This wakes sleeping processes in the wait-queue.
386 */
comp_tx_completion(struct most_interface * iface,int channel_id)387 static int comp_tx_completion(struct most_interface *iface, int channel_id)
388 {
389 struct comp_channel *c;
390
391 c = get_channel(iface, channel_id);
392 if (!c)
393 return -EINVAL;
394
395 if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
396 dev_warn(c->dev, "Channel ID out of range\n");
397 return -EINVAL;
398 }
399
400 wake_up_interruptible(&c->wq);
401 return 0;
402 }
403
404 /**
405 * comp_probe - probe function of the driver module
406 * @iface: pointer to interface instance
407 * @channel_id: channel index/ID
408 * @cfg: pointer to actual channel configuration
409 * @name: name of the device to be created
410 * @args: pointer to array of component parameters (from configfs)
411 *
412 * This allocates a channel object and creates the device node in /dev
413 *
414 * Returns 0 on success or error code otherwise.
415 */
comp_probe(struct most_interface * iface,int channel_id,struct most_channel_config * cfg,char * name,char * args)416 static int comp_probe(struct most_interface *iface, int channel_id,
417 struct most_channel_config *cfg, char *name, char *args)
418 {
419 struct comp_channel *c;
420 unsigned long cl_flags;
421 int retval;
422 int current_minor;
423
424 if (!cfg || !name)
425 return -EINVAL;
426
427 c = get_channel(iface, channel_id);
428 if (c)
429 return -EEXIST;
430
431 current_minor = ida_alloc(&comp.minor_id, GFP_KERNEL);
432 if (current_minor < 0)
433 return current_minor;
434
435 c = kzalloc_obj(*c);
436 if (!c) {
437 retval = -ENOMEM;
438 goto err_remove_ida;
439 }
440
441 c->devno = MKDEV(comp.major, current_minor);
442 cdev_init(&c->cdev, &channel_fops);
443 c->cdev.owner = THIS_MODULE;
444 retval = cdev_add(&c->cdev, c->devno, 1);
445 if (retval < 0)
446 goto err_free_c;
447 c->iface = iface;
448 c->cfg = cfg;
449 c->channel_id = channel_id;
450 c->access_ref = 0;
451 spin_lock_init(&c->unlink);
452 INIT_KFIFO(c->fifo);
453 retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
454 if (retval)
455 goto err_del_cdev_and_free_channel;
456 init_waitqueue_head(&c->wq);
457 mutex_init(&c->io_mutex);
458 spin_lock_irqsave(&ch_list_lock, cl_flags);
459 list_add_tail(&c->list, &channel_list);
460 spin_unlock_irqrestore(&ch_list_lock, cl_flags);
461 c->dev = device_create(&most_cdev_class, NULL, c->devno, NULL, "%s", name);
462
463 if (IS_ERR(c->dev)) {
464 retval = PTR_ERR(c->dev);
465 goto err_free_kfifo_and_del_list;
466 }
467 kobject_uevent(&c->dev->kobj, KOBJ_ADD);
468 return 0;
469
470 err_free_kfifo_and_del_list:
471 kfifo_free(&c->fifo);
472 list_del(&c->list);
473 err_del_cdev_and_free_channel:
474 cdev_del(&c->cdev);
475 err_free_c:
476 kfree(c);
477 err_remove_ida:
478 ida_free(&comp.minor_id, current_minor);
479 return retval;
480 }
481
482 static struct cdev_component comp = {
483 .cc = {
484 .mod = THIS_MODULE,
485 .name = "cdev",
486 .probe_channel = comp_probe,
487 .disconnect_channel = comp_disconnect_channel,
488 .rx_completion = comp_rx_completion,
489 .tx_completion = comp_tx_completion,
490 },
491 };
492
493
most_cdev_init(void)494 static int __init most_cdev_init(void)
495 {
496 int err;
497
498 err = class_register(&most_cdev_class);
499 if (err)
500 return err;
501
502 ida_init(&comp.minor_id);
503
504 err = alloc_chrdev_region(&comp.devno, 0, CHRDEV_REGION_SIZE, "cdev");
505 if (err < 0)
506 goto dest_ida;
507 comp.major = MAJOR(comp.devno);
508 err = most_register_component(&comp.cc);
509 if (err)
510 goto free_cdev;
511 err = most_register_configfs_subsys(&comp.cc);
512 if (err)
513 goto deregister_comp;
514 return 0;
515
516 deregister_comp:
517 most_deregister_component(&comp.cc);
518 free_cdev:
519 unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
520 dest_ida:
521 ida_destroy(&comp.minor_id);
522 class_unregister(&most_cdev_class);
523 return err;
524 }
525
most_cdev_exit(void)526 static void __exit most_cdev_exit(void)
527 {
528 struct comp_channel *c, *tmp;
529
530 most_deregister_configfs_subsys(&comp.cc);
531 most_deregister_component(&comp.cc);
532
533 list_for_each_entry_safe(c, tmp, &channel_list, list) {
534 destroy_cdev(c);
535 destroy_channel(c);
536 }
537 unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
538 ida_destroy(&comp.minor_id);
539 class_unregister(&most_cdev_class);
540 }
541
542 module_init(most_cdev_init);
543 module_exit(most_cdev_exit);
544 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
545 MODULE_LICENSE("GPL");
546 MODULE_DESCRIPTION("character device component for mostcore");
547