1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/anon_inodes.h>
4 #include <linux/atomic.h>
5 #include <linux/bitmap.h>
6 #include <linux/build_bug.h>
7 #include <linux/cdev.h>
8 #include <linux/cleanup.h>
9 #include <linux/compat.h>
10 #include <linux/compiler.h>
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/file.h>
14 #include <linux/gpio.h>
15 #include <linux/gpio/driver.h>
16 #include <linux/hte.h>
17 #include <linux/interrupt.h>
18 #include <linux/irqreturn.h>
19 #include <linux/kfifo.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/overflow.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/spinlock.h>
27 #include <linux/string.h>
28 #include <linux/timekeeping.h>
29 #include <linux/uaccess.h>
30 #include <linux/workqueue.h>
31
32 #include <uapi/linux/gpio.h>
33
34 #include "gpiolib.h"
35 #include "gpiolib-cdev.h"
36
37 /*
38 * Array sizes must ensure 64-bit alignment and not create holes in the
39 * struct packing.
40 */
41 static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2));
42 static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8));
43
44 /*
45 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility
46 */
47 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8));
48 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8));
49 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8));
50 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8));
51 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8));
52 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8));
53 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8));
54 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8));
55
56 /* Character device interface to GPIO.
57 *
58 * The GPIO character device, /dev/gpiochipN, provides userspace an
59 * interface to gpiolib GPIOs via ioctl()s.
60 */
61
62 /*
63 * GPIO line handle management
64 */
65
66 #ifdef CONFIG_GPIO_CDEV_V1
67 /**
68 * struct linehandle_state - contains the state of a userspace handle
69 * @gdev: the GPIO device the handle pertains to
70 * @label: consumer label used to tag descriptors
71 * @descs: the GPIO descriptors held by this handle
72 * @num_descs: the number of descriptors held in the descs array
73 */
74 struct linehandle_state {
75 struct gpio_device *gdev;
76 const char *label;
77 struct gpio_desc *descs[GPIOHANDLES_MAX];
78 u32 num_descs;
79 };
80
81 #define GPIOHANDLE_REQUEST_VALID_FLAGS \
82 (GPIOHANDLE_REQUEST_INPUT | \
83 GPIOHANDLE_REQUEST_OUTPUT | \
84 GPIOHANDLE_REQUEST_ACTIVE_LOW | \
85 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \
86 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \
87 GPIOHANDLE_REQUEST_BIAS_DISABLE | \
88 GPIOHANDLE_REQUEST_OPEN_DRAIN | \
89 GPIOHANDLE_REQUEST_OPEN_SOURCE)
90
91 #define GPIOHANDLE_REQUEST_DIRECTION_FLAGS \
92 (GPIOHANDLE_REQUEST_INPUT | \
93 GPIOHANDLE_REQUEST_OUTPUT)
94
linehandle_validate_flags(u32 flags)95 static int linehandle_validate_flags(u32 flags)
96 {
97 /* Return an error if an unknown flag is set */
98 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
99 return -EINVAL;
100
101 /*
102 * Do not allow both INPUT & OUTPUT flags to be set as they are
103 * contradictory.
104 */
105 if ((flags & GPIOHANDLE_REQUEST_INPUT) &&
106 (flags & GPIOHANDLE_REQUEST_OUTPUT))
107 return -EINVAL;
108
109 /*
110 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
111 * the hardware actually supports enabling both at the same time the
112 * electrical result would be disastrous.
113 */
114 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
115 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
116 return -EINVAL;
117
118 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
119 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) &&
120 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
121 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
122 return -EINVAL;
123
124 /* Bias flags only allowed for input or output mode. */
125 if (!((flags & GPIOHANDLE_REQUEST_INPUT) ||
126 (flags & GPIOHANDLE_REQUEST_OUTPUT)) &&
127 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) ||
128 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) ||
129 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)))
130 return -EINVAL;
131
132 /* Only one bias flag can be set. */
133 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
134 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
135 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
136 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
137 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
138 return -EINVAL;
139
140 return 0;
141 }
142
linehandle_flags_to_desc_flags(u32 lflags,unsigned long * flagsp)143 static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
144 {
145 unsigned long flags = READ_ONCE(*flagsp);
146
147 assign_bit(FLAG_ACTIVE_LOW, &flags,
148 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
149 assign_bit(FLAG_OPEN_DRAIN, &flags,
150 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
151 assign_bit(FLAG_OPEN_SOURCE, &flags,
152 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
153 assign_bit(FLAG_PULL_UP, &flags,
154 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
155 assign_bit(FLAG_PULL_DOWN, &flags,
156 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
157 assign_bit(FLAG_BIAS_DISABLE, &flags,
158 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
159
160 WRITE_ONCE(*flagsp, flags);
161 }
162
linehandle_set_config(struct linehandle_state * lh,void __user * ip)163 static long linehandle_set_config(struct linehandle_state *lh,
164 void __user *ip)
165 {
166 struct gpiohandle_config gcnf;
167 struct gpio_desc *desc;
168 int i, ret;
169 u32 lflags;
170
171 if (copy_from_user(&gcnf, ip, sizeof(gcnf)))
172 return -EFAULT;
173
174 lflags = gcnf.flags;
175 ret = linehandle_validate_flags(lflags);
176 if (ret)
177 return ret;
178
179 /* Lines must be reconfigured explicitly as input or output. */
180 if (!(lflags & GPIOHANDLE_REQUEST_DIRECTION_FLAGS))
181 return -EINVAL;
182
183 for (i = 0; i < lh->num_descs; i++) {
184 desc = lh->descs[i];
185 linehandle_flags_to_desc_flags(lflags, &desc->flags);
186
187 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
188 int val = !!gcnf.default_values[i];
189
190 ret = gpiod_direction_output_nonotify(desc, val);
191 if (ret)
192 return ret;
193 } else {
194 ret = gpiod_direction_input_nonotify(desc);
195 if (ret)
196 return ret;
197 }
198
199 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
200 }
201 return 0;
202 }
203
linehandle_ioctl(struct file * file,unsigned int cmd,unsigned long arg)204 static long linehandle_ioctl(struct file *file, unsigned int cmd,
205 unsigned long arg)
206 {
207 struct linehandle_state *lh = file->private_data;
208 void __user *ip = (void __user *)arg;
209 struct gpiohandle_data ghd;
210 DECLARE_BITMAP(vals, GPIOHANDLES_MAX);
211 unsigned int i;
212 int ret;
213
214 guard(srcu)(&lh->gdev->srcu);
215
216 if (!rcu_access_pointer(lh->gdev->chip))
217 return -ENODEV;
218
219 switch (cmd) {
220 case GPIOHANDLE_GET_LINE_VALUES_IOCTL:
221 /* NOTE: It's okay to read values of output lines */
222 ret = gpiod_get_array_value_complex(false, true,
223 lh->num_descs, lh->descs,
224 NULL, vals);
225 if (ret)
226 return ret;
227
228 memset(&ghd, 0, sizeof(ghd));
229 for (i = 0; i < lh->num_descs; i++)
230 ghd.values[i] = test_bit(i, vals);
231
232 if (copy_to_user(ip, &ghd, sizeof(ghd)))
233 return -EFAULT;
234
235 return 0;
236 case GPIOHANDLE_SET_LINE_VALUES_IOCTL:
237 /*
238 * All line descriptors were created at once with the same
239 * flags so just check if the first one is really output.
240 */
241 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
242 return -EPERM;
243
244 if (copy_from_user(&ghd, ip, sizeof(ghd)))
245 return -EFAULT;
246
247 /* Clamp all values to [0,1] */
248 for (i = 0; i < lh->num_descs; i++)
249 __assign_bit(i, vals, ghd.values[i]);
250
251 /* Reuse the array setting function */
252 return gpiod_set_array_value_complex(false,
253 true,
254 lh->num_descs,
255 lh->descs,
256 NULL,
257 vals);
258 case GPIOHANDLE_SET_CONFIG_IOCTL:
259 return linehandle_set_config(lh, ip);
260 default:
261 return -EINVAL;
262 }
263 }
264
265 #ifdef CONFIG_COMPAT
linehandle_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)266 static long linehandle_ioctl_compat(struct file *file, unsigned int cmd,
267 unsigned long arg)
268 {
269 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
270 }
271 #endif
272
linehandle_free(struct linehandle_state * lh)273 static void linehandle_free(struct linehandle_state *lh)
274 {
275 int i;
276
277 for (i = 0; i < lh->num_descs; i++)
278 if (lh->descs[i])
279 gpiod_free(lh->descs[i]);
280 kfree(lh->label);
281 gpio_device_put(lh->gdev);
282 kfree(lh);
283 }
284
linehandle_release(struct inode * inode,struct file * file)285 static int linehandle_release(struct inode *inode, struct file *file)
286 {
287 linehandle_free(file->private_data);
288 return 0;
289 }
290
291 static const struct file_operations linehandle_fileops = {
292 .release = linehandle_release,
293 .owner = THIS_MODULE,
294 .llseek = noop_llseek,
295 .unlocked_ioctl = linehandle_ioctl,
296 #ifdef CONFIG_COMPAT
297 .compat_ioctl = linehandle_ioctl_compat,
298 #endif
299 };
300
linehandle_create(struct gpio_device * gdev,void __user * ip)301 static int linehandle_create(struct gpio_device *gdev, void __user *ip)
302 {
303 struct gpiohandle_request handlereq;
304 struct linehandle_state *lh;
305 struct file *file;
306 int fd, i, ret;
307 u32 lflags;
308
309 if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
310 return -EFAULT;
311 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
312 return -EINVAL;
313
314 lflags = handlereq.flags;
315
316 ret = linehandle_validate_flags(lflags);
317 if (ret)
318 return ret;
319
320 lh = kzalloc(sizeof(*lh), GFP_KERNEL);
321 if (!lh)
322 return -ENOMEM;
323 lh->gdev = gpio_device_get(gdev);
324
325 if (handlereq.consumer_label[0] != '\0') {
326 /* label is only initialized if consumer_label is set */
327 lh->label = kstrndup(handlereq.consumer_label,
328 sizeof(handlereq.consumer_label) - 1,
329 GFP_KERNEL);
330 if (!lh->label) {
331 ret = -ENOMEM;
332 goto out_free_lh;
333 }
334 }
335
336 lh->num_descs = handlereq.lines;
337
338 /* Request each GPIO */
339 for (i = 0; i < handlereq.lines; i++) {
340 u32 offset = handlereq.lineoffsets[i];
341 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
342
343 if (IS_ERR(desc)) {
344 ret = PTR_ERR(desc);
345 goto out_free_lh;
346 }
347
348 ret = gpiod_request_user(desc, lh->label);
349 if (ret)
350 goto out_free_lh;
351 lh->descs[i] = desc;
352 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags);
353
354 ret = gpiod_set_transitory(desc, false);
355 if (ret < 0)
356 goto out_free_lh;
357
358 /*
359 * Lines have to be requested explicitly for input
360 * or output, else the line will be treated "as is".
361 */
362 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
363 int val = !!handlereq.default_values[i];
364
365 ret = gpiod_direction_output_nonotify(desc, val);
366 if (ret)
367 goto out_free_lh;
368 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
369 ret = gpiod_direction_input_nonotify(desc);
370 if (ret)
371 goto out_free_lh;
372 }
373
374 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
375
376 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
377 offset);
378 }
379
380 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
381 if (fd < 0) {
382 ret = fd;
383 goto out_free_lh;
384 }
385
386 file = anon_inode_getfile("gpio-linehandle",
387 &linehandle_fileops,
388 lh,
389 O_RDONLY | O_CLOEXEC);
390 if (IS_ERR(file)) {
391 ret = PTR_ERR(file);
392 goto out_put_unused_fd;
393 }
394
395 handlereq.fd = fd;
396 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
397 /*
398 * fput() will trigger the release() callback, so do not go onto
399 * the regular error cleanup path here.
400 */
401 fput(file);
402 put_unused_fd(fd);
403 return -EFAULT;
404 }
405
406 fd_install(fd, file);
407
408 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
409 lh->num_descs);
410
411 return 0;
412
413 out_put_unused_fd:
414 put_unused_fd(fd);
415 out_free_lh:
416 linehandle_free(lh);
417 return ret;
418 }
419 #endif /* CONFIG_GPIO_CDEV_V1 */
420
421 /**
422 * struct line - contains the state of a requested line
423 * @desc: the GPIO descriptor for this line.
424 * @req: the corresponding line request
425 * @irq: the interrupt triggered in response to events on this GPIO
426 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or
427 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied
428 * @timestamp_ns: cache for the timestamp storing it between hardirq and
429 * IRQ thread, used to bring the timestamp close to the actual event
430 * @req_seqno: the seqno for the current edge event in the sequence of
431 * events for the corresponding line request. This is drawn from the @req.
432 * @line_seqno: the seqno for the current edge event in the sequence of
433 * events for this line.
434 * @work: the worker that implements software debouncing
435 * @sw_debounced: flag indicating if the software debouncer is active
436 * @level: the current debounced physical level of the line
437 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor
438 * @raw_level: the line level at the time of event
439 * @total_discard_seq: the running counter of the discarded events
440 * @last_seqno: the last sequence number before debounce period expires
441 */
442 struct line {
443 struct gpio_desc *desc;
444 /*
445 * -- edge detector specific fields --
446 */
447 struct linereq *req;
448 unsigned int irq;
449 /*
450 * The flags for the active edge detector configuration.
451 *
452 * edflags is set by linereq_create(), linereq_free(), and
453 * linereq_set_config(), which are themselves mutually
454 * exclusive, and is accessed by edge_irq_thread(),
455 * process_hw_ts_thread() and debounce_work_func(),
456 * which can all live with a slightly stale value.
457 */
458 u64 edflags;
459 /*
460 * timestamp_ns and req_seqno are accessed only by
461 * edge_irq_handler() and edge_irq_thread(), which are themselves
462 * mutually exclusive, so no additional protection is necessary.
463 */
464 u64 timestamp_ns;
465 u32 req_seqno;
466 /*
467 * line_seqno is accessed by either edge_irq_thread() or
468 * debounce_work_func(), which are themselves mutually exclusive,
469 * so no additional protection is necessary.
470 */
471 u32 line_seqno;
472 /*
473 * -- debouncer specific fields --
474 */
475 struct delayed_work work;
476 /*
477 * sw_debounce is accessed by linereq_set_config(), which is the
478 * only setter, and linereq_get_values(), which can live with a
479 * slightly stale value.
480 */
481 unsigned int sw_debounced;
482 /*
483 * level is accessed by debounce_work_func(), which is the only
484 * setter, and linereq_get_values() which can live with a slightly
485 * stale value.
486 */
487 unsigned int level;
488 #ifdef CONFIG_HTE
489 struct hte_ts_desc hdesc;
490 /*
491 * HTE provider sets line level at the time of event. The valid
492 * value is 0 or 1 and negative value for an error.
493 */
494 int raw_level;
495 /*
496 * when sw_debounce is set on HTE enabled line, this is running
497 * counter of the discarded events.
498 */
499 u32 total_discard_seq;
500 /*
501 * when sw_debounce is set on HTE enabled line, this variable records
502 * last sequence number before debounce period expires.
503 */
504 u32 last_seqno;
505 #endif /* CONFIG_HTE */
506 };
507
508 /**
509 * struct linereq - contains the state of a userspace line request
510 * @gdev: the GPIO device the line request pertains to
511 * @label: consumer label used to tag GPIO descriptors
512 * @num_lines: the number of lines in the lines array
513 * @wait: wait queue that handles blocking reads of events
514 * @device_unregistered_nb: notifier block for receiving gdev unregister events
515 * @event_buffer_size: the number of elements allocated in @events
516 * @events: KFIFO for the GPIO events
517 * @seqno: the sequence number for edge events generated on all lines in
518 * this line request. Note that this is not used when @num_lines is 1, as
519 * the line_seqno is then the same and is cheaper to calculate.
520 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency
521 * of configuration, particularly multi-step accesses to desc flags.
522 * @lines: the lines held by this line request, with @num_lines elements.
523 */
524 struct linereq {
525 struct gpio_device *gdev;
526 const char *label;
527 u32 num_lines;
528 wait_queue_head_t wait;
529 struct notifier_block device_unregistered_nb;
530 u32 event_buffer_size;
531 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event);
532 atomic_t seqno;
533 struct mutex config_mutex;
534 struct line lines[] __counted_by(num_lines);
535 };
536
537 #define GPIO_V2_LINE_BIAS_FLAGS \
538 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
539 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
540 GPIO_V2_LINE_FLAG_BIAS_DISABLED)
541
542 #define GPIO_V2_LINE_DIRECTION_FLAGS \
543 (GPIO_V2_LINE_FLAG_INPUT | \
544 GPIO_V2_LINE_FLAG_OUTPUT)
545
546 #define GPIO_V2_LINE_DRIVE_FLAGS \
547 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \
548 GPIO_V2_LINE_FLAG_OPEN_SOURCE)
549
550 #define GPIO_V2_LINE_EDGE_FLAGS \
551 (GPIO_V2_LINE_FLAG_EDGE_RISING | \
552 GPIO_V2_LINE_FLAG_EDGE_FALLING)
553
554 #define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS
555
556 #define GPIO_V2_LINE_VALID_FLAGS \
557 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
558 GPIO_V2_LINE_DIRECTION_FLAGS | \
559 GPIO_V2_LINE_DRIVE_FLAGS | \
560 GPIO_V2_LINE_EDGE_FLAGS | \
561 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
562 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
563 GPIO_V2_LINE_BIAS_FLAGS)
564
565 /* subset of flags relevant for edge detector configuration */
566 #define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \
567 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
568 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
569 GPIO_V2_LINE_EDGE_FLAGS)
570
linereq_unregistered_notify(struct notifier_block * nb,unsigned long action,void * data)571 static int linereq_unregistered_notify(struct notifier_block *nb,
572 unsigned long action, void *data)
573 {
574 struct linereq *lr = container_of(nb, struct linereq,
575 device_unregistered_nb);
576
577 wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR);
578
579 return NOTIFY_OK;
580 }
581
linereq_put_event(struct linereq * lr,struct gpio_v2_line_event * le)582 static void linereq_put_event(struct linereq *lr,
583 struct gpio_v2_line_event *le)
584 {
585 bool overflow = false;
586
587 scoped_guard(spinlock, &lr->wait.lock) {
588 if (kfifo_is_full(&lr->events)) {
589 overflow = true;
590 kfifo_skip(&lr->events);
591 }
592 kfifo_in(&lr->events, le, 1);
593 }
594 if (!overflow)
595 wake_up_poll(&lr->wait, EPOLLIN);
596 else
597 pr_debug_ratelimited("event FIFO is full - event dropped\n");
598 }
599
line_event_timestamp(struct line * line)600 static u64 line_event_timestamp(struct line *line)
601 {
602 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
603 return ktime_get_real_ns();
604 else if (IS_ENABLED(CONFIG_HTE) &&
605 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
606 return line->timestamp_ns;
607
608 return ktime_get_ns();
609 }
610
line_event_id(int level)611 static u32 line_event_id(int level)
612 {
613 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE :
614 GPIO_V2_LINE_EVENT_FALLING_EDGE;
615 }
616
make_irq_label(const char * orig)617 static inline char *make_irq_label(const char *orig)
618 {
619 char *new;
620
621 if (!orig)
622 return NULL;
623
624 new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
625 if (!new)
626 return ERR_PTR(-ENOMEM);
627
628 return new;
629 }
630
free_irq_label(const char * label)631 static inline void free_irq_label(const char *label)
632 {
633 kfree(label);
634 }
635
636 #ifdef CONFIG_HTE
637
process_hw_ts_thread(void * p)638 static enum hte_return process_hw_ts_thread(void *p)
639 {
640 struct line *line;
641 struct linereq *lr;
642 struct gpio_v2_line_event le;
643 u64 edflags;
644 int level;
645
646 if (!p)
647 return HTE_CB_HANDLED;
648
649 line = p;
650 lr = line->req;
651
652 memset(&le, 0, sizeof(le));
653
654 le.timestamp_ns = line->timestamp_ns;
655 edflags = READ_ONCE(line->edflags);
656
657 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) {
658 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
659 level = (line->raw_level >= 0) ?
660 line->raw_level :
661 gpiod_get_raw_value_cansleep(line->desc);
662
663 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
664 level = !level;
665
666 le.id = line_event_id(level);
667 break;
668 case GPIO_V2_LINE_FLAG_EDGE_RISING:
669 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
670 break;
671 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
672 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
673 break;
674 default:
675 return HTE_CB_HANDLED;
676 }
677 le.line_seqno = line->line_seqno;
678 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
679 le.offset = gpio_chip_hwgpio(line->desc);
680
681 linereq_put_event(lr, &le);
682
683 return HTE_CB_HANDLED;
684 }
685
process_hw_ts(struct hte_ts_data * ts,void * p)686 static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
687 {
688 struct line *line;
689 struct linereq *lr;
690 int diff_seqno = 0;
691
692 if (!ts || !p)
693 return HTE_CB_HANDLED;
694
695 line = p;
696 line->timestamp_ns = ts->tsc;
697 line->raw_level = ts->raw_level;
698 lr = line->req;
699
700 if (READ_ONCE(line->sw_debounced)) {
701 line->total_discard_seq++;
702 line->last_seqno = ts->seq;
703 mod_delayed_work(system_wq, &line->work,
704 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
705 } else {
706 if (unlikely(ts->seq < line->line_seqno))
707 return HTE_CB_HANDLED;
708
709 diff_seqno = ts->seq - line->line_seqno;
710 line->line_seqno = ts->seq;
711 if (lr->num_lines != 1)
712 line->req_seqno = atomic_add_return(diff_seqno,
713 &lr->seqno);
714
715 return HTE_RUN_SECOND_CB;
716 }
717
718 return HTE_CB_HANDLED;
719 }
720
hte_edge_setup(struct line * line,u64 eflags)721 static int hte_edge_setup(struct line *line, u64 eflags)
722 {
723 int ret;
724 unsigned long flags = 0;
725 struct hte_ts_desc *hdesc = &line->hdesc;
726
727 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
728 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
729 HTE_FALLING_EDGE_TS :
730 HTE_RISING_EDGE_TS;
731 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
732 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
733 HTE_RISING_EDGE_TS :
734 HTE_FALLING_EDGE_TS;
735
736 line->total_discard_seq = 0;
737
738 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL,
739 line->desc);
740
741 ret = hte_ts_get(NULL, hdesc, 0);
742 if (ret)
743 return ret;
744
745 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread,
746 line);
747 }
748
749 #else
750
hte_edge_setup(struct line * line,u64 eflags)751 static int hte_edge_setup(struct line *line, u64 eflags)
752 {
753 return 0;
754 }
755 #endif /* CONFIG_HTE */
756
edge_irq_thread(int irq,void * p)757 static irqreturn_t edge_irq_thread(int irq, void *p)
758 {
759 struct line *line = p;
760 struct linereq *lr = line->req;
761 struct gpio_v2_line_event le;
762
763 /* Do not leak kernel stack to userspace */
764 memset(&le, 0, sizeof(le));
765
766 if (line->timestamp_ns) {
767 le.timestamp_ns = line->timestamp_ns;
768 } else {
769 /*
770 * We may be running from a nested threaded interrupt in
771 * which case we didn't get the timestamp from
772 * edge_irq_handler().
773 */
774 le.timestamp_ns = line_event_timestamp(line);
775 if (lr->num_lines != 1)
776 line->req_seqno = atomic_inc_return(&lr->seqno);
777 }
778 line->timestamp_ns = 0;
779
780 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) {
781 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
782 le.id = line_event_id(gpiod_get_value_cansleep(line->desc));
783 break;
784 case GPIO_V2_LINE_FLAG_EDGE_RISING:
785 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
786 break;
787 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
788 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
789 break;
790 default:
791 return IRQ_NONE;
792 }
793 line->line_seqno++;
794 le.line_seqno = line->line_seqno;
795 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
796 le.offset = gpio_chip_hwgpio(line->desc);
797
798 linereq_put_event(lr, &le);
799
800 return IRQ_HANDLED;
801 }
802
edge_irq_handler(int irq,void * p)803 static irqreturn_t edge_irq_handler(int irq, void *p)
804 {
805 struct line *line = p;
806 struct linereq *lr = line->req;
807
808 /*
809 * Just store the timestamp in hardirq context so we get it as
810 * close in time as possible to the actual event.
811 */
812 line->timestamp_ns = line_event_timestamp(line);
813
814 if (lr->num_lines != 1)
815 line->req_seqno = atomic_inc_return(&lr->seqno);
816
817 return IRQ_WAKE_THREAD;
818 }
819
820 /*
821 * returns the current debounced logical value.
822 */
debounced_value(struct line * line)823 static bool debounced_value(struct line *line)
824 {
825 bool value;
826
827 /*
828 * minor race - debouncer may be stopped here, so edge_detector_stop()
829 * must leave the value unchanged so the following will read the level
830 * from when the debouncer was last running.
831 */
832 value = READ_ONCE(line->level);
833
834 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
835 value = !value;
836
837 return value;
838 }
839
debounce_irq_handler(int irq,void * p)840 static irqreturn_t debounce_irq_handler(int irq, void *p)
841 {
842 struct line *line = p;
843
844 mod_delayed_work(system_wq, &line->work,
845 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
846
847 return IRQ_HANDLED;
848 }
849
debounce_work_func(struct work_struct * work)850 static void debounce_work_func(struct work_struct *work)
851 {
852 struct gpio_v2_line_event le;
853 struct line *line = container_of(work, struct line, work.work);
854 struct linereq *lr;
855 u64 eflags, edflags = READ_ONCE(line->edflags);
856 int level = -1;
857 #ifdef CONFIG_HTE
858 int diff_seqno;
859
860 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
861 level = line->raw_level;
862 #endif
863 if (level < 0)
864 level = gpiod_get_raw_value_cansleep(line->desc);
865 if (level < 0) {
866 pr_debug_ratelimited("debouncer failed to read line value\n");
867 return;
868 }
869
870 if (READ_ONCE(line->level) == level)
871 return;
872
873 WRITE_ONCE(line->level, level);
874
875 /* -- edge detection -- */
876 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
877 if (!eflags)
878 return;
879
880 /* switch from physical level to logical - if they differ */
881 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
882 level = !level;
883
884 /* ignore edges that are not being monitored */
885 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
886 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
887 return;
888
889 /* Do not leak kernel stack to userspace */
890 memset(&le, 0, sizeof(le));
891
892 lr = line->req;
893 le.timestamp_ns = line_event_timestamp(line);
894 le.offset = gpio_chip_hwgpio(line->desc);
895 #ifdef CONFIG_HTE
896 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
897 /* discard events except the last one */
898 line->total_discard_seq -= 1;
899 diff_seqno = line->last_seqno - line->total_discard_seq -
900 line->line_seqno;
901 line->line_seqno = line->last_seqno - line->total_discard_seq;
902 le.line_seqno = line->line_seqno;
903 le.seqno = (lr->num_lines == 1) ?
904 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
905 } else
906 #endif /* CONFIG_HTE */
907 {
908 line->line_seqno++;
909 le.line_seqno = line->line_seqno;
910 le.seqno = (lr->num_lines == 1) ?
911 le.line_seqno : atomic_inc_return(&lr->seqno);
912 }
913
914 le.id = line_event_id(level);
915
916 linereq_put_event(lr, &le);
917 }
918
debounce_setup(struct line * line,unsigned int debounce_period_us)919 static int debounce_setup(struct line *line, unsigned int debounce_period_us)
920 {
921 unsigned long irqflags;
922 int ret, level, irq;
923 char *label;
924
925 /*
926 * Try hardware. Skip gpiod_set_config() to avoid emitting two
927 * CHANGED_CONFIG line state events.
928 */
929 ret = gpio_do_set_config(line->desc,
930 pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE,
931 debounce_period_us));
932 if (ret != -ENOTSUPP)
933 return ret;
934
935 if (debounce_period_us) {
936 /* setup software debounce */
937 level = gpiod_get_raw_value_cansleep(line->desc);
938 if (level < 0)
939 return level;
940
941 if (!(IS_ENABLED(CONFIG_HTE) &&
942 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
943 irq = gpiod_to_irq(line->desc);
944 if (irq < 0)
945 return -ENXIO;
946
947 label = make_irq_label(line->req->label);
948 if (IS_ERR(label))
949 return -ENOMEM;
950
951 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
952 ret = request_irq(irq, debounce_irq_handler, irqflags,
953 label, line);
954 if (ret) {
955 free_irq_label(label);
956 return ret;
957 }
958 line->irq = irq;
959 } else {
960 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
961 if (ret)
962 return ret;
963 }
964
965 WRITE_ONCE(line->level, level);
966 WRITE_ONCE(line->sw_debounced, 1);
967 }
968 return 0;
969 }
970
gpio_v2_line_config_debounced(struct gpio_v2_line_config * lc,unsigned int line_idx)971 static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc,
972 unsigned int line_idx)
973 {
974 unsigned int i;
975 u64 mask = BIT_ULL(line_idx);
976
977 for (i = 0; i < lc->num_attrs; i++) {
978 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
979 (lc->attrs[i].mask & mask))
980 return true;
981 }
982 return false;
983 }
984
gpio_v2_line_config_debounce_period(struct gpio_v2_line_config * lc,unsigned int line_idx)985 static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
986 unsigned int line_idx)
987 {
988 unsigned int i;
989 u64 mask = BIT_ULL(line_idx);
990
991 for (i = 0; i < lc->num_attrs; i++) {
992 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
993 (lc->attrs[i].mask & mask))
994 return lc->attrs[i].attr.debounce_period_us;
995 }
996 return 0;
997 }
998
edge_detector_stop(struct line * line)999 static void edge_detector_stop(struct line *line)
1000 {
1001 if (line->irq) {
1002 free_irq_label(free_irq(line->irq, line));
1003 line->irq = 0;
1004 }
1005
1006 #ifdef CONFIG_HTE
1007 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
1008 hte_ts_put(&line->hdesc);
1009 #endif
1010
1011 cancel_delayed_work_sync(&line->work);
1012 WRITE_ONCE(line->sw_debounced, 0);
1013 WRITE_ONCE(line->edflags, 0);
1014 if (line->desc)
1015 WRITE_ONCE(line->desc->debounce_period_us, 0);
1016 /* do not change line->level - see comment in debounced_value() */
1017 }
1018
edge_detector_fifo_init(struct linereq * req)1019 static int edge_detector_fifo_init(struct linereq *req)
1020 {
1021 if (kfifo_initialized(&req->events))
1022 return 0;
1023
1024 return kfifo_alloc(&req->events, req->event_buffer_size, GFP_KERNEL);
1025 }
1026
edge_detector_setup(struct line * line,struct gpio_v2_line_config * lc,unsigned int line_idx,u64 edflags)1027 static int edge_detector_setup(struct line *line,
1028 struct gpio_v2_line_config *lc,
1029 unsigned int line_idx, u64 edflags)
1030 {
1031 u32 debounce_period_us;
1032 unsigned long irqflags = 0;
1033 u64 eflags;
1034 int irq, ret;
1035 char *label;
1036
1037 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
1038 if (eflags) {
1039 ret = edge_detector_fifo_init(line->req);
1040 if (ret)
1041 return ret;
1042 }
1043 if (gpio_v2_line_config_debounced(lc, line_idx)) {
1044 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
1045 ret = debounce_setup(line, debounce_period_us);
1046 if (ret)
1047 return ret;
1048 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
1049 }
1050
1051 /* detection disabled or sw debouncer will provide edge detection */
1052 if (!eflags || READ_ONCE(line->sw_debounced))
1053 return 0;
1054
1055 if (IS_ENABLED(CONFIG_HTE) &&
1056 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1057 return hte_edge_setup(line, edflags);
1058
1059 irq = gpiod_to_irq(line->desc);
1060 if (irq < 0)
1061 return -ENXIO;
1062
1063 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
1064 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1065 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
1066 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
1067 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1068 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
1069 irqflags |= IRQF_ONESHOT;
1070
1071 label = make_irq_label(line->req->label);
1072 if (IS_ERR(label))
1073 return PTR_ERR(label);
1074
1075 /* Request a thread to read the events */
1076 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
1077 irqflags, label, line);
1078 if (ret) {
1079 free_irq_label(label);
1080 return ret;
1081 }
1082
1083 line->irq = irq;
1084 return 0;
1085 }
1086
edge_detector_update(struct line * line,struct gpio_v2_line_config * lc,unsigned int line_idx,u64 edflags)1087 static int edge_detector_update(struct line *line,
1088 struct gpio_v2_line_config *lc,
1089 unsigned int line_idx, u64 edflags)
1090 {
1091 u64 active_edflags = READ_ONCE(line->edflags);
1092 unsigned int debounce_period_us =
1093 gpio_v2_line_config_debounce_period(lc, line_idx);
1094
1095 if ((active_edflags == edflags) &&
1096 (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
1097 return 0;
1098
1099 /* sw debounced and still will be...*/
1100 if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
1101 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
1102 /*
1103 * ensure event fifo is initialised if edge detection
1104 * is now enabled.
1105 */
1106 if (edflags & GPIO_V2_LINE_EDGE_FLAGS)
1107 return edge_detector_fifo_init(line->req);
1108
1109 return 0;
1110 }
1111
1112 /* reconfiguring edge detection or sw debounce being disabled */
1113 if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
1114 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) ||
1115 (!debounce_period_us && READ_ONCE(line->sw_debounced)))
1116 edge_detector_stop(line);
1117
1118 return edge_detector_setup(line, lc, line_idx, edflags);
1119 }
1120
gpio_v2_line_config_flags(struct gpio_v2_line_config * lc,unsigned int line_idx)1121 static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
1122 unsigned int line_idx)
1123 {
1124 unsigned int i;
1125 u64 mask = BIT_ULL(line_idx);
1126
1127 for (i = 0; i < lc->num_attrs; i++) {
1128 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) &&
1129 (lc->attrs[i].mask & mask))
1130 return lc->attrs[i].attr.flags;
1131 }
1132 return lc->flags;
1133 }
1134
gpio_v2_line_config_output_value(struct gpio_v2_line_config * lc,unsigned int line_idx)1135 static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc,
1136 unsigned int line_idx)
1137 {
1138 unsigned int i;
1139 u64 mask = BIT_ULL(line_idx);
1140
1141 for (i = 0; i < lc->num_attrs; i++) {
1142 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) &&
1143 (lc->attrs[i].mask & mask))
1144 return !!(lc->attrs[i].attr.values & mask);
1145 }
1146 return 0;
1147 }
1148
gpio_v2_line_flags_validate(u64 flags)1149 static int gpio_v2_line_flags_validate(u64 flags)
1150 {
1151 /* Return an error if an unknown flag is set */
1152 if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
1153 return -EINVAL;
1154
1155 if (!IS_ENABLED(CONFIG_HTE) &&
1156 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1157 return -EOPNOTSUPP;
1158
1159 /*
1160 * Do not allow both INPUT and OUTPUT flags to be set as they are
1161 * contradictory.
1162 */
1163 if ((flags & GPIO_V2_LINE_FLAG_INPUT) &&
1164 (flags & GPIO_V2_LINE_FLAG_OUTPUT))
1165 return -EINVAL;
1166
1167 /* Only allow one event clock source */
1168 if (IS_ENABLED(CONFIG_HTE) &&
1169 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
1170 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1171 return -EINVAL;
1172
1173 /* Edge detection requires explicit input. */
1174 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
1175 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1176 return -EINVAL;
1177
1178 /*
1179 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single
1180 * request. If the hardware actually supports enabling both at the
1181 * same time the electrical result would be disastrous.
1182 */
1183 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) &&
1184 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE))
1185 return -EINVAL;
1186
1187 /* Drive requires explicit output direction. */
1188 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) &&
1189 !(flags & GPIO_V2_LINE_FLAG_OUTPUT))
1190 return -EINVAL;
1191
1192 /* Bias requires explicit direction. */
1193 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) &&
1194 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1195 return -EINVAL;
1196
1197 /* Only one bias flag can be set. */
1198 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) &&
1199 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN |
1200 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) ||
1201 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) &&
1202 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)))
1203 return -EINVAL;
1204
1205 return 0;
1206 }
1207
gpio_v2_line_config_validate(struct gpio_v2_line_config * lc,unsigned int num_lines)1208 static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
1209 unsigned int num_lines)
1210 {
1211 unsigned int i;
1212 u64 flags;
1213 int ret;
1214
1215 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX)
1216 return -EINVAL;
1217
1218 if (!mem_is_zero(lc->padding, sizeof(lc->padding)))
1219 return -EINVAL;
1220
1221 for (i = 0; i < num_lines; i++) {
1222 flags = gpio_v2_line_config_flags(lc, i);
1223 ret = gpio_v2_line_flags_validate(flags);
1224 if (ret)
1225 return ret;
1226
1227 /* debounce requires explicit input */
1228 if (gpio_v2_line_config_debounced(lc, i) &&
1229 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1230 return -EINVAL;
1231 }
1232 return 0;
1233 }
1234
gpio_v2_line_config_flags_to_desc_flags(u64 lflags,unsigned long * flagsp)1235 static void gpio_v2_line_config_flags_to_desc_flags(u64 lflags,
1236 unsigned long *flagsp)
1237 {
1238 unsigned long flags = READ_ONCE(*flagsp);
1239
1240 assign_bit(FLAG_ACTIVE_LOW, &flags,
1241 lflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
1242
1243 if (lflags & GPIO_V2_LINE_FLAG_OUTPUT)
1244 set_bit(FLAG_IS_OUT, &flags);
1245 else if (lflags & GPIO_V2_LINE_FLAG_INPUT)
1246 clear_bit(FLAG_IS_OUT, &flags);
1247
1248 assign_bit(FLAG_EDGE_RISING, &flags,
1249 lflags & GPIO_V2_LINE_FLAG_EDGE_RISING);
1250 assign_bit(FLAG_EDGE_FALLING, &flags,
1251 lflags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
1252
1253 assign_bit(FLAG_OPEN_DRAIN, &flags,
1254 lflags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
1255 assign_bit(FLAG_OPEN_SOURCE, &flags,
1256 lflags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
1257
1258 assign_bit(FLAG_PULL_UP, &flags,
1259 lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
1260 assign_bit(FLAG_PULL_DOWN, &flags,
1261 lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
1262 assign_bit(FLAG_BIAS_DISABLE, &flags,
1263 lflags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
1264
1265 assign_bit(FLAG_EVENT_CLOCK_REALTIME, &flags,
1266 lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
1267 assign_bit(FLAG_EVENT_CLOCK_HTE, &flags,
1268 lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
1269
1270 WRITE_ONCE(*flagsp, flags);
1271 }
1272
linereq_get_values(struct linereq * lr,void __user * ip)1273 static long linereq_get_values(struct linereq *lr, void __user *ip)
1274 {
1275 struct gpio_v2_line_values lv;
1276 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1277 struct gpio_desc **descs;
1278 unsigned int i, didx, num_get;
1279 bool val;
1280 int ret;
1281
1282 /* NOTE: It's ok to read values of output lines. */
1283 if (copy_from_user(&lv, ip, sizeof(lv)))
1284 return -EFAULT;
1285
1286 /*
1287 * gpiod_get_array_value_complex() requires compacted desc and val
1288 * arrays, rather than the sparse ones in lv.
1289 * Calculation of num_get and construction of the desc array is
1290 * optimized to avoid allocation for the desc array for the common
1291 * num_get == 1 case.
1292 */
1293 /* scan requested lines to calculate the subset to get */
1294 for (num_get = 0, i = 0; i < lr->num_lines; i++) {
1295 if (lv.mask & BIT_ULL(i)) {
1296 num_get++;
1297 /* capture desc for the num_get == 1 case */
1298 descs = &lr->lines[i].desc;
1299 }
1300 }
1301
1302 if (num_get == 0)
1303 return -EINVAL;
1304
1305 if (num_get != 1) {
1306 /* build compacted desc array */
1307 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL);
1308 if (!descs)
1309 return -ENOMEM;
1310 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1311 if (lv.mask & BIT_ULL(i)) {
1312 descs[didx] = lr->lines[i].desc;
1313 didx++;
1314 }
1315 }
1316 }
1317 ret = gpiod_get_array_value_complex(false, true, num_get,
1318 descs, NULL, vals);
1319
1320 if (num_get != 1)
1321 kfree(descs);
1322 if (ret)
1323 return ret;
1324
1325 lv.bits = 0;
1326 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1327 /* unpack compacted vals for the response */
1328 if (lv.mask & BIT_ULL(i)) {
1329 if (lr->lines[i].sw_debounced)
1330 val = debounced_value(&lr->lines[i]);
1331 else
1332 val = test_bit(didx, vals);
1333 if (val)
1334 lv.bits |= BIT_ULL(i);
1335 didx++;
1336 }
1337 }
1338
1339 if (copy_to_user(ip, &lv, sizeof(lv)))
1340 return -EFAULT;
1341
1342 return 0;
1343 }
1344
linereq_set_values(struct linereq * lr,void __user * ip)1345 static long linereq_set_values(struct linereq *lr, void __user *ip)
1346 {
1347 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1348 struct gpio_v2_line_values lv;
1349 struct gpio_desc **descs;
1350 unsigned int i, didx, num_set;
1351 int ret;
1352
1353 if (copy_from_user(&lv, ip, sizeof(lv)))
1354 return -EFAULT;
1355
1356 guard(mutex)(&lr->config_mutex);
1357
1358 /*
1359 * gpiod_set_array_value_complex() requires compacted desc and val
1360 * arrays, rather than the sparse ones in lv.
1361 * Calculation of num_set and construction of the descs and vals arrays
1362 * is optimized to minimize scanning the lv->mask, and to avoid
1363 * allocation for the desc array for the common num_set == 1 case.
1364 */
1365 bitmap_zero(vals, GPIO_V2_LINES_MAX);
1366 /* scan requested lines to determine the subset to be set */
1367 for (num_set = 0, i = 0; i < lr->num_lines; i++) {
1368 if (lv.mask & BIT_ULL(i)) {
1369 /* add to compacted values */
1370 if (lv.bits & BIT_ULL(i))
1371 __set_bit(num_set, vals);
1372 num_set++;
1373 /* capture desc for the num_set == 1 case */
1374 descs = &lr->lines[i].desc;
1375 }
1376 }
1377 if (num_set == 0)
1378 return -EINVAL;
1379
1380 if (num_set != 1) {
1381 /* build compacted desc array */
1382 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL);
1383 if (!descs)
1384 return -ENOMEM;
1385 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1386 if (lv.mask & BIT_ULL(i)) {
1387 descs[didx] = lr->lines[i].desc;
1388 didx++;
1389 }
1390 }
1391 }
1392 ret = gpiod_set_array_value_complex(false, true, num_set,
1393 descs, NULL, vals);
1394
1395 if (num_set != 1)
1396 kfree(descs);
1397 return ret;
1398 }
1399
linereq_set_config(struct linereq * lr,void __user * ip)1400 static long linereq_set_config(struct linereq *lr, void __user *ip)
1401 {
1402 struct gpio_v2_line_config lc;
1403 struct gpio_desc *desc;
1404 struct line *line;
1405 unsigned int i;
1406 u64 flags, edflags;
1407 int ret;
1408
1409 if (copy_from_user(&lc, ip, sizeof(lc)))
1410 return -EFAULT;
1411
1412 ret = gpio_v2_line_config_validate(&lc, lr->num_lines);
1413 if (ret)
1414 return ret;
1415
1416 guard(mutex)(&lr->config_mutex);
1417
1418 for (i = 0; i < lr->num_lines; i++) {
1419 line = &lr->lines[i];
1420 desc = lr->lines[i].desc;
1421 flags = gpio_v2_line_config_flags(&lc, i);
1422 /*
1423 * Lines not explicitly reconfigured as input or output
1424 * are left unchanged.
1425 */
1426 if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1427 continue;
1428 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1429 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1430 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1431 int val = gpio_v2_line_config_output_value(&lc, i);
1432
1433 edge_detector_stop(line);
1434 ret = gpiod_direction_output_nonotify(desc, val);
1435 if (ret)
1436 return ret;
1437 } else {
1438 ret = gpiod_direction_input_nonotify(desc);
1439 if (ret)
1440 return ret;
1441
1442 ret = edge_detector_update(line, &lc, i, edflags);
1443 if (ret)
1444 return ret;
1445 }
1446
1447 WRITE_ONCE(line->edflags, edflags);
1448
1449 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
1450 }
1451 return 0;
1452 }
1453
linereq_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1454 static long linereq_ioctl(struct file *file, unsigned int cmd,
1455 unsigned long arg)
1456 {
1457 struct linereq *lr = file->private_data;
1458 void __user *ip = (void __user *)arg;
1459
1460 guard(srcu)(&lr->gdev->srcu);
1461
1462 if (!rcu_access_pointer(lr->gdev->chip))
1463 return -ENODEV;
1464
1465 switch (cmd) {
1466 case GPIO_V2_LINE_GET_VALUES_IOCTL:
1467 return linereq_get_values(lr, ip);
1468 case GPIO_V2_LINE_SET_VALUES_IOCTL:
1469 return linereq_set_values(lr, ip);
1470 case GPIO_V2_LINE_SET_CONFIG_IOCTL:
1471 return linereq_set_config(lr, ip);
1472 default:
1473 return -EINVAL;
1474 }
1475 }
1476
1477 #ifdef CONFIG_COMPAT
linereq_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)1478 static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
1479 unsigned long arg)
1480 {
1481 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1482 }
1483 #endif
1484
linereq_poll(struct file * file,struct poll_table_struct * wait)1485 static __poll_t linereq_poll(struct file *file,
1486 struct poll_table_struct *wait)
1487 {
1488 struct linereq *lr = file->private_data;
1489 __poll_t events = 0;
1490
1491 guard(srcu)(&lr->gdev->srcu);
1492
1493 if (!rcu_access_pointer(lr->gdev->chip))
1494 return EPOLLHUP | EPOLLERR;
1495
1496 poll_wait(file, &lr->wait, wait);
1497
1498 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events,
1499 &lr->wait.lock))
1500 events = EPOLLIN | EPOLLRDNORM;
1501
1502 return events;
1503 }
1504
linereq_read(struct file * file,char __user * buf,size_t count,loff_t * f_ps)1505 static ssize_t linereq_read(struct file *file, char __user *buf,
1506 size_t count, loff_t *f_ps)
1507 {
1508 struct linereq *lr = file->private_data;
1509 struct gpio_v2_line_event le;
1510 ssize_t bytes_read = 0;
1511 int ret;
1512
1513 guard(srcu)(&lr->gdev->srcu);
1514
1515 if (!rcu_access_pointer(lr->gdev->chip))
1516 return -ENODEV;
1517
1518 if (count < sizeof(le))
1519 return -EINVAL;
1520
1521 do {
1522 scoped_guard(spinlock, &lr->wait.lock) {
1523 if (kfifo_is_empty(&lr->events)) {
1524 if (bytes_read)
1525 return bytes_read;
1526
1527 if (file->f_flags & O_NONBLOCK)
1528 return -EAGAIN;
1529
1530 ret = wait_event_interruptible_locked(lr->wait,
1531 !kfifo_is_empty(&lr->events));
1532 if (ret)
1533 return ret;
1534 }
1535
1536 if (kfifo_out(&lr->events, &le, 1) != 1) {
1537 /*
1538 * This should never happen - we hold the
1539 * lock from the moment we learned the fifo
1540 * is no longer empty until now.
1541 */
1542 WARN(1, "failed to read from non-empty kfifo");
1543 return -EIO;
1544 }
1545 }
1546
1547 if (copy_to_user(buf + bytes_read, &le, sizeof(le)))
1548 return -EFAULT;
1549 bytes_read += sizeof(le);
1550 } while (count >= bytes_read + sizeof(le));
1551
1552 return bytes_read;
1553 }
1554
linereq_free(struct linereq * lr)1555 static void linereq_free(struct linereq *lr)
1556 {
1557 unsigned int i;
1558
1559 if (lr->device_unregistered_nb.notifier_call)
1560 blocking_notifier_chain_unregister(&lr->gdev->device_notifier,
1561 &lr->device_unregistered_nb);
1562
1563 for (i = 0; i < lr->num_lines; i++) {
1564 if (lr->lines[i].desc) {
1565 edge_detector_stop(&lr->lines[i]);
1566 gpiod_free(lr->lines[i].desc);
1567 }
1568 }
1569 kfifo_free(&lr->events);
1570 kfree(lr->label);
1571 gpio_device_put(lr->gdev);
1572 kvfree(lr);
1573 }
1574
linereq_release(struct inode * inode,struct file * file)1575 static int linereq_release(struct inode *inode, struct file *file)
1576 {
1577 struct linereq *lr = file->private_data;
1578
1579 linereq_free(lr);
1580 return 0;
1581 }
1582
1583 #ifdef CONFIG_PROC_FS
linereq_show_fdinfo(struct seq_file * out,struct file * file)1584 static void linereq_show_fdinfo(struct seq_file *out, struct file *file)
1585 {
1586 struct linereq *lr = file->private_data;
1587 struct device *dev = &lr->gdev->dev;
1588 u16 i;
1589
1590 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev));
1591
1592 for (i = 0; i < lr->num_lines; i++)
1593 seq_printf(out, "gpio-line:\t%d\n",
1594 gpio_chip_hwgpio(lr->lines[i].desc));
1595 }
1596 #endif
1597
1598 static const struct file_operations line_fileops = {
1599 .release = linereq_release,
1600 .read = linereq_read,
1601 .poll = linereq_poll,
1602 .owner = THIS_MODULE,
1603 .llseek = noop_llseek,
1604 .unlocked_ioctl = linereq_ioctl,
1605 #ifdef CONFIG_COMPAT
1606 .compat_ioctl = linereq_ioctl_compat,
1607 #endif
1608 #ifdef CONFIG_PROC_FS
1609 .show_fdinfo = linereq_show_fdinfo,
1610 #endif
1611 };
1612
linereq_create(struct gpio_device * gdev,void __user * ip)1613 static int linereq_create(struct gpio_device *gdev, void __user *ip)
1614 {
1615 struct gpio_v2_line_request ulr;
1616 struct gpio_v2_line_config *lc;
1617 struct linereq *lr;
1618 struct file *file;
1619 u64 flags, edflags;
1620 unsigned int i;
1621 int fd, ret;
1622
1623 if (copy_from_user(&ulr, ip, sizeof(ulr)))
1624 return -EFAULT;
1625
1626 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX))
1627 return -EINVAL;
1628
1629 if (!mem_is_zero(ulr.padding, sizeof(ulr.padding)))
1630 return -EINVAL;
1631
1632 lc = &ulr.config;
1633 ret = gpio_v2_line_config_validate(lc, ulr.num_lines);
1634 if (ret)
1635 return ret;
1636
1637 lr = kvzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL);
1638 if (!lr)
1639 return -ENOMEM;
1640 lr->num_lines = ulr.num_lines;
1641
1642 lr->gdev = gpio_device_get(gdev);
1643
1644 for (i = 0; i < ulr.num_lines; i++) {
1645 lr->lines[i].req = lr;
1646 WRITE_ONCE(lr->lines[i].sw_debounced, 0);
1647 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func);
1648 }
1649
1650 if (ulr.consumer[0] != '\0') {
1651 /* label is only initialized if consumer is set */
1652 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1,
1653 GFP_KERNEL);
1654 if (!lr->label) {
1655 ret = -ENOMEM;
1656 goto out_free_linereq;
1657 }
1658 }
1659
1660 mutex_init(&lr->config_mutex);
1661 init_waitqueue_head(&lr->wait);
1662 INIT_KFIFO(lr->events);
1663 lr->event_buffer_size = ulr.event_buffer_size;
1664 if (lr->event_buffer_size == 0)
1665 lr->event_buffer_size = ulr.num_lines * 16;
1666 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16)
1667 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16;
1668
1669 atomic_set(&lr->seqno, 0);
1670
1671 /* Request each GPIO */
1672 for (i = 0; i < ulr.num_lines; i++) {
1673 u32 offset = ulr.offsets[i];
1674 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
1675
1676 if (IS_ERR(desc)) {
1677 ret = PTR_ERR(desc);
1678 goto out_free_linereq;
1679 }
1680
1681 ret = gpiod_request_user(desc, lr->label);
1682 if (ret)
1683 goto out_free_linereq;
1684
1685 lr->lines[i].desc = desc;
1686 flags = gpio_v2_line_config_flags(lc, i);
1687 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1688
1689 ret = gpiod_set_transitory(desc, false);
1690 if (ret < 0)
1691 goto out_free_linereq;
1692
1693 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1694 /*
1695 * Lines have to be requested explicitly for input
1696 * or output, else the line will be treated "as is".
1697 */
1698 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1699 int val = gpio_v2_line_config_output_value(lc, i);
1700
1701 ret = gpiod_direction_output_nonotify(desc, val);
1702 if (ret)
1703 goto out_free_linereq;
1704 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
1705 ret = gpiod_direction_input_nonotify(desc);
1706 if (ret)
1707 goto out_free_linereq;
1708
1709 ret = edge_detector_setup(&lr->lines[i], lc, i,
1710 edflags);
1711 if (ret)
1712 goto out_free_linereq;
1713 }
1714
1715 lr->lines[i].edflags = edflags;
1716
1717 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
1718
1719 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
1720 offset);
1721 }
1722
1723 lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify;
1724 ret = blocking_notifier_chain_register(&gdev->device_notifier,
1725 &lr->device_unregistered_nb);
1726 if (ret)
1727 goto out_free_linereq;
1728
1729 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
1730 if (fd < 0) {
1731 ret = fd;
1732 goto out_free_linereq;
1733 }
1734
1735 file = anon_inode_getfile("gpio-line", &line_fileops, lr,
1736 O_RDONLY | O_CLOEXEC);
1737 if (IS_ERR(file)) {
1738 ret = PTR_ERR(file);
1739 goto out_put_unused_fd;
1740 }
1741
1742 ulr.fd = fd;
1743 if (copy_to_user(ip, &ulr, sizeof(ulr))) {
1744 /*
1745 * fput() will trigger the release() callback, so do not go onto
1746 * the regular error cleanup path here.
1747 */
1748 fput(file);
1749 put_unused_fd(fd);
1750 return -EFAULT;
1751 }
1752
1753 fd_install(fd, file);
1754
1755 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
1756 lr->num_lines);
1757
1758 return 0;
1759
1760 out_put_unused_fd:
1761 put_unused_fd(fd);
1762 out_free_linereq:
1763 linereq_free(lr);
1764 return ret;
1765 }
1766
1767 #ifdef CONFIG_GPIO_CDEV_V1
1768
1769 /*
1770 * GPIO line event management
1771 */
1772
1773 /**
1774 * struct lineevent_state - contains the state of a userspace event
1775 * @gdev: the GPIO device the event pertains to
1776 * @label: consumer label used to tag descriptors
1777 * @desc: the GPIO descriptor held by this event
1778 * @eflags: the event flags this line was requested with
1779 * @irq: the interrupt that trigger in response to events on this GPIO
1780 * @wait: wait queue that handles blocking reads of events
1781 * @device_unregistered_nb: notifier block for receiving gdev unregister events
1782 * @events: KFIFO for the GPIO events
1783 * @timestamp: cache for the timestamp storing it between hardirq
1784 * and IRQ thread, used to bring the timestamp close to the actual
1785 * event
1786 */
1787 struct lineevent_state {
1788 struct gpio_device *gdev;
1789 const char *label;
1790 struct gpio_desc *desc;
1791 u32 eflags;
1792 int irq;
1793 wait_queue_head_t wait;
1794 struct notifier_block device_unregistered_nb;
1795 DECLARE_KFIFO(events, struct gpioevent_data, 16);
1796 u64 timestamp;
1797 };
1798
1799 #define GPIOEVENT_REQUEST_VALID_FLAGS \
1800 (GPIOEVENT_REQUEST_RISING_EDGE | \
1801 GPIOEVENT_REQUEST_FALLING_EDGE)
1802
lineevent_poll(struct file * file,struct poll_table_struct * wait)1803 static __poll_t lineevent_poll(struct file *file,
1804 struct poll_table_struct *wait)
1805 {
1806 struct lineevent_state *le = file->private_data;
1807 __poll_t events = 0;
1808
1809 guard(srcu)(&le->gdev->srcu);
1810
1811 if (!rcu_access_pointer(le->gdev->chip))
1812 return EPOLLHUP | EPOLLERR;
1813
1814 poll_wait(file, &le->wait, wait);
1815
1816 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
1817 events = EPOLLIN | EPOLLRDNORM;
1818
1819 return events;
1820 }
1821
lineevent_unregistered_notify(struct notifier_block * nb,unsigned long action,void * data)1822 static int lineevent_unregistered_notify(struct notifier_block *nb,
1823 unsigned long action, void *data)
1824 {
1825 struct lineevent_state *le = container_of(nb, struct lineevent_state,
1826 device_unregistered_nb);
1827
1828 wake_up_poll(&le->wait, EPOLLIN | EPOLLERR);
1829
1830 return NOTIFY_OK;
1831 }
1832
1833 struct compat_gpioeevent_data {
1834 compat_u64 timestamp;
1835 u32 id;
1836 };
1837
lineevent_read(struct file * file,char __user * buf,size_t count,loff_t * f_ps)1838 static ssize_t lineevent_read(struct file *file, char __user *buf,
1839 size_t count, loff_t *f_ps)
1840 {
1841 struct lineevent_state *le = file->private_data;
1842 struct gpioevent_data ge;
1843 ssize_t bytes_read = 0;
1844 ssize_t ge_size;
1845 int ret;
1846
1847 guard(srcu)(&le->gdev->srcu);
1848
1849 if (!rcu_access_pointer(le->gdev->chip))
1850 return -ENODEV;
1851
1852 /*
1853 * When compatible system call is being used the struct gpioevent_data,
1854 * in case of at least ia32, has different size due to the alignment
1855 * differences. Because we have first member 64 bits followed by one of
1856 * 32 bits there is no gap between them. The only difference is the
1857 * padding at the end of the data structure. Hence, we calculate the
1858 * actual sizeof() and pass this as an argument to copy_to_user() to
1859 * drop unneeded bytes from the output.
1860 */
1861 if (compat_need_64bit_alignment_fixup())
1862 ge_size = sizeof(struct compat_gpioeevent_data);
1863 else
1864 ge_size = sizeof(struct gpioevent_data);
1865 if (count < ge_size)
1866 return -EINVAL;
1867
1868 do {
1869 scoped_guard(spinlock, &le->wait.lock) {
1870 if (kfifo_is_empty(&le->events)) {
1871 if (bytes_read)
1872 return bytes_read;
1873
1874 if (file->f_flags & O_NONBLOCK)
1875 return -EAGAIN;
1876
1877 ret = wait_event_interruptible_locked(le->wait,
1878 !kfifo_is_empty(&le->events));
1879 if (ret)
1880 return ret;
1881 }
1882
1883 if (kfifo_out(&le->events, &ge, 1) != 1) {
1884 /*
1885 * This should never happen - we hold the
1886 * lock from the moment we learned the fifo
1887 * is no longer empty until now.
1888 */
1889 WARN(1, "failed to read from non-empty kfifo");
1890 return -EIO;
1891 }
1892 }
1893
1894 if (copy_to_user(buf + bytes_read, &ge, ge_size))
1895 return -EFAULT;
1896 bytes_read += ge_size;
1897 } while (count >= bytes_read + ge_size);
1898
1899 return bytes_read;
1900 }
1901
lineevent_free(struct lineevent_state * le)1902 static void lineevent_free(struct lineevent_state *le)
1903 {
1904 if (le->device_unregistered_nb.notifier_call)
1905 blocking_notifier_chain_unregister(&le->gdev->device_notifier,
1906 &le->device_unregistered_nb);
1907 if (le->irq)
1908 free_irq_label(free_irq(le->irq, le));
1909 if (le->desc)
1910 gpiod_free(le->desc);
1911 kfree(le->label);
1912 gpio_device_put(le->gdev);
1913 kfree(le);
1914 }
1915
lineevent_release(struct inode * inode,struct file * file)1916 static int lineevent_release(struct inode *inode, struct file *file)
1917 {
1918 lineevent_free(file->private_data);
1919 return 0;
1920 }
1921
lineevent_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1922 static long lineevent_ioctl(struct file *file, unsigned int cmd,
1923 unsigned long arg)
1924 {
1925 struct lineevent_state *le = file->private_data;
1926 void __user *ip = (void __user *)arg;
1927 struct gpiohandle_data ghd;
1928
1929 guard(srcu)(&le->gdev->srcu);
1930
1931 if (!rcu_access_pointer(le->gdev->chip))
1932 return -ENODEV;
1933
1934 /*
1935 * We can get the value for an event line but not set it,
1936 * because it is input by definition.
1937 */
1938 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
1939 int val;
1940
1941 memset(&ghd, 0, sizeof(ghd));
1942
1943 val = gpiod_get_value_cansleep(le->desc);
1944 if (val < 0)
1945 return val;
1946 ghd.values[0] = val;
1947
1948 if (copy_to_user(ip, &ghd, sizeof(ghd)))
1949 return -EFAULT;
1950
1951 return 0;
1952 }
1953 return -EINVAL;
1954 }
1955
1956 #ifdef CONFIG_COMPAT
lineevent_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)1957 static long lineevent_ioctl_compat(struct file *file, unsigned int cmd,
1958 unsigned long arg)
1959 {
1960 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1961 }
1962 #endif
1963
1964 static const struct file_operations lineevent_fileops = {
1965 .release = lineevent_release,
1966 .read = lineevent_read,
1967 .poll = lineevent_poll,
1968 .owner = THIS_MODULE,
1969 .llseek = noop_llseek,
1970 .unlocked_ioctl = lineevent_ioctl,
1971 #ifdef CONFIG_COMPAT
1972 .compat_ioctl = lineevent_ioctl_compat,
1973 #endif
1974 };
1975
lineevent_irq_thread(int irq,void * p)1976 static irqreturn_t lineevent_irq_thread(int irq, void *p)
1977 {
1978 struct lineevent_state *le = p;
1979 struct gpioevent_data ge;
1980 int ret;
1981
1982 /* Do not leak kernel stack to userspace */
1983 memset(&ge, 0, sizeof(ge));
1984
1985 /*
1986 * We may be running from a nested threaded interrupt in which case
1987 * we didn't get the timestamp from lineevent_irq_handler().
1988 */
1989 if (!le->timestamp)
1990 ge.timestamp = ktime_get_ns();
1991 else
1992 ge.timestamp = le->timestamp;
1993
1994 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
1995 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
1996 int level = gpiod_get_value_cansleep(le->desc);
1997
1998 if (level)
1999 /* Emit low-to-high event */
2000 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2001 else
2002 /* Emit high-to-low event */
2003 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2004 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
2005 /* Emit low-to-high event */
2006 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2007 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
2008 /* Emit high-to-low event */
2009 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2010 } else {
2011 return IRQ_NONE;
2012 }
2013
2014 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge,
2015 1, &le->wait.lock);
2016 if (ret)
2017 wake_up_poll(&le->wait, EPOLLIN);
2018 else
2019 pr_debug_ratelimited("event FIFO is full - event dropped\n");
2020
2021 return IRQ_HANDLED;
2022 }
2023
lineevent_irq_handler(int irq,void * p)2024 static irqreturn_t lineevent_irq_handler(int irq, void *p)
2025 {
2026 struct lineevent_state *le = p;
2027
2028 /*
2029 * Just store the timestamp in hardirq context so we get it as
2030 * close in time as possible to the actual event.
2031 */
2032 le->timestamp = ktime_get_ns();
2033
2034 return IRQ_WAKE_THREAD;
2035 }
2036
lineevent_create(struct gpio_device * gdev,void __user * ip)2037 static int lineevent_create(struct gpio_device *gdev, void __user *ip)
2038 {
2039 struct gpioevent_request eventreq;
2040 struct lineevent_state *le;
2041 struct gpio_desc *desc;
2042 struct file *file;
2043 u32 offset;
2044 u32 lflags;
2045 u32 eflags;
2046 int fd;
2047 int ret;
2048 int irq, irqflags = 0;
2049 char *label;
2050
2051 if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
2052 return -EFAULT;
2053
2054 offset = eventreq.lineoffset;
2055 lflags = eventreq.handleflags;
2056 eflags = eventreq.eventflags;
2057
2058 desc = gpio_device_get_desc(gdev, offset);
2059 if (IS_ERR(desc))
2060 return PTR_ERR(desc);
2061
2062 /* Return an error if a unknown flag is set */
2063 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
2064 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS))
2065 return -EINVAL;
2066
2067 /* This is just wrong: we don't look for events on output lines */
2068 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
2069 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
2070 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
2071 return -EINVAL;
2072
2073 /* Only one bias flag can be set. */
2074 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
2075 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
2076 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
2077 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
2078 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
2079 return -EINVAL;
2080
2081 le = kzalloc(sizeof(*le), GFP_KERNEL);
2082 if (!le)
2083 return -ENOMEM;
2084 le->gdev = gpio_device_get(gdev);
2085
2086 if (eventreq.consumer_label[0] != '\0') {
2087 /* label is only initialized if consumer_label is set */
2088 le->label = kstrndup(eventreq.consumer_label,
2089 sizeof(eventreq.consumer_label) - 1,
2090 GFP_KERNEL);
2091 if (!le->label) {
2092 ret = -ENOMEM;
2093 goto out_free_le;
2094 }
2095 }
2096
2097 ret = gpiod_request_user(desc, le->label);
2098 if (ret)
2099 goto out_free_le;
2100 le->desc = desc;
2101 le->eflags = eflags;
2102
2103 linehandle_flags_to_desc_flags(lflags, &desc->flags);
2104
2105 ret = gpiod_direction_input(desc);
2106 if (ret)
2107 goto out_free_le;
2108
2109 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
2110
2111 irq = gpiod_to_irq(desc);
2112 if (irq <= 0) {
2113 ret = -ENODEV;
2114 goto out_free_le;
2115 }
2116
2117 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
2118 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2119 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
2120 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
2121 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2122 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
2123 irqflags |= IRQF_ONESHOT;
2124
2125 INIT_KFIFO(le->events);
2126 init_waitqueue_head(&le->wait);
2127
2128 le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify;
2129 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2130 &le->device_unregistered_nb);
2131 if (ret)
2132 goto out_free_le;
2133
2134 label = make_irq_label(le->label);
2135 if (IS_ERR(label)) {
2136 ret = PTR_ERR(label);
2137 goto out_free_le;
2138 }
2139
2140 /* Request a thread to read the events */
2141 ret = request_threaded_irq(irq,
2142 lineevent_irq_handler,
2143 lineevent_irq_thread,
2144 irqflags,
2145 label,
2146 le);
2147 if (ret) {
2148 free_irq_label(label);
2149 goto out_free_le;
2150 }
2151
2152 le->irq = irq;
2153
2154 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
2155 if (fd < 0) {
2156 ret = fd;
2157 goto out_free_le;
2158 }
2159
2160 file = anon_inode_getfile("gpio-event",
2161 &lineevent_fileops,
2162 le,
2163 O_RDONLY | O_CLOEXEC);
2164 if (IS_ERR(file)) {
2165 ret = PTR_ERR(file);
2166 goto out_put_unused_fd;
2167 }
2168
2169 eventreq.fd = fd;
2170 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
2171 /*
2172 * fput() will trigger the release() callback, so do not go onto
2173 * the regular error cleanup path here.
2174 */
2175 fput(file);
2176 put_unused_fd(fd);
2177 return -EFAULT;
2178 }
2179
2180 fd_install(fd, file);
2181
2182 return 0;
2183
2184 out_put_unused_fd:
2185 put_unused_fd(fd);
2186 out_free_le:
2187 lineevent_free(le);
2188 return ret;
2189 }
2190
gpio_v2_line_info_to_v1(struct gpio_v2_line_info * info_v2,struct gpioline_info * info_v1)2191 static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2,
2192 struct gpioline_info *info_v1)
2193 {
2194 u64 flagsv2 = info_v2->flags;
2195
2196 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name));
2197 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer));
2198 info_v1->line_offset = info_v2->offset;
2199 info_v1->flags = 0;
2200
2201 if (flagsv2 & GPIO_V2_LINE_FLAG_USED)
2202 info_v1->flags |= GPIOLINE_FLAG_KERNEL;
2203
2204 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT)
2205 info_v1->flags |= GPIOLINE_FLAG_IS_OUT;
2206
2207 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
2208 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
2209
2210 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN)
2211 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN;
2212 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE)
2213 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE;
2214
2215 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)
2216 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
2217 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN)
2218 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
2219 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED)
2220 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
2221 }
2222
gpio_v2_line_info_changed_to_v1(struct gpio_v2_line_info_changed * lic_v2,struct gpioline_info_changed * lic_v1)2223 static void gpio_v2_line_info_changed_to_v1(
2224 struct gpio_v2_line_info_changed *lic_v2,
2225 struct gpioline_info_changed *lic_v1)
2226 {
2227 memset(lic_v1, 0, sizeof(*lic_v1));
2228 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
2229 lic_v1->timestamp = lic_v2->timestamp_ns;
2230 lic_v1->event_type = lic_v2->event_type;
2231 }
2232
2233 #endif /* CONFIG_GPIO_CDEV_V1 */
2234
gpio_desc_to_lineinfo(struct gpio_desc * desc,struct gpio_v2_line_info * info,bool atomic)2235 static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
2236 struct gpio_v2_line_info *info, bool atomic)
2237 {
2238 u32 debounce_period_us;
2239 unsigned long dflags;
2240 const char *label;
2241
2242 CLASS(gpio_chip_guard, guard)(desc);
2243 if (!guard.gc)
2244 return;
2245
2246 memset(info, 0, sizeof(*info));
2247 info->offset = gpio_chip_hwgpio(desc);
2248
2249 if (desc->name)
2250 strscpy(info->name, desc->name, sizeof(info->name));
2251
2252 dflags = READ_ONCE(desc->flags);
2253
2254 scoped_guard(srcu, &desc->gdev->desc_srcu) {
2255 label = gpiod_get_label(desc);
2256 if (label && test_bit(FLAG_REQUESTED, &dflags))
2257 strscpy(info->consumer, label,
2258 sizeof(info->consumer));
2259 }
2260
2261 /*
2262 * Userspace only need know that the kernel is using this GPIO so it
2263 * can't use it.
2264 * The calculation of the used flag is slightly racy, as it may read
2265 * desc, gc and pinctrl state without a lock covering all three at
2266 * once. Worst case if the line is in transition and the calculation
2267 * is inconsistent then it looks to the user like they performed the
2268 * read on the other side of the transition - but that can always
2269 * happen.
2270 * The definitive test that a line is available to userspace is to
2271 * request it.
2272 */
2273 if (test_bit(FLAG_REQUESTED, &dflags) ||
2274 test_bit(FLAG_IS_HOGGED, &dflags) ||
2275 test_bit(FLAG_EXPORT, &dflags) ||
2276 test_bit(FLAG_SYSFS, &dflags) ||
2277 !gpiochip_line_is_valid(guard.gc, info->offset)) {
2278 info->flags |= GPIO_V2_LINE_FLAG_USED;
2279 } else if (!atomic) {
2280 if (!pinctrl_gpio_can_use_line(guard.gc, info->offset))
2281 info->flags |= GPIO_V2_LINE_FLAG_USED;
2282 }
2283
2284 if (test_bit(FLAG_IS_OUT, &dflags))
2285 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
2286 else
2287 info->flags |= GPIO_V2_LINE_FLAG_INPUT;
2288
2289 if (test_bit(FLAG_ACTIVE_LOW, &dflags))
2290 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
2291
2292 if (test_bit(FLAG_OPEN_DRAIN, &dflags))
2293 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
2294 if (test_bit(FLAG_OPEN_SOURCE, &dflags))
2295 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
2296
2297 if (test_bit(FLAG_BIAS_DISABLE, &dflags))
2298 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
2299 if (test_bit(FLAG_PULL_DOWN, &dflags))
2300 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
2301 if (test_bit(FLAG_PULL_UP, &dflags))
2302 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
2303
2304 if (test_bit(FLAG_EDGE_RISING, &dflags))
2305 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
2306 if (test_bit(FLAG_EDGE_FALLING, &dflags))
2307 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
2308
2309 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &dflags))
2310 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
2311 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags))
2312 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
2313
2314 debounce_period_us = READ_ONCE(desc->debounce_period_us);
2315 if (debounce_period_us) {
2316 info->attrs[info->num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
2317 info->attrs[info->num_attrs].debounce_period_us =
2318 debounce_period_us;
2319 info->num_attrs++;
2320 }
2321 }
2322
2323 struct gpio_chardev_data {
2324 struct gpio_device *gdev;
2325 wait_queue_head_t wait;
2326 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32);
2327 struct notifier_block lineinfo_changed_nb;
2328 struct notifier_block device_unregistered_nb;
2329 unsigned long *watched_lines;
2330 #ifdef CONFIG_GPIO_CDEV_V1
2331 atomic_t watch_abi_version;
2332 #endif
2333 struct file *fp;
2334 };
2335
chipinfo_get(struct gpio_chardev_data * cdev,void __user * ip)2336 static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
2337 {
2338 struct gpio_device *gdev = cdev->gdev;
2339 struct gpiochip_info chipinfo;
2340
2341 memset(&chipinfo, 0, sizeof(chipinfo));
2342
2343 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
2344 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
2345 chipinfo.lines = gdev->ngpio;
2346 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
2347 return -EFAULT;
2348 return 0;
2349 }
2350
2351 #ifdef CONFIG_GPIO_CDEV_V1
2352 /*
2353 * returns 0 if the versions match, else the previously selected ABI version
2354 */
lineinfo_ensure_abi_version(struct gpio_chardev_data * cdata,unsigned int version)2355 static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
2356 unsigned int version)
2357 {
2358 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version);
2359
2360 if (abiv == version)
2361 return 0;
2362
2363 return abiv;
2364 }
2365
lineinfo_get_v1(struct gpio_chardev_data * cdev,void __user * ip,bool watch)2366 static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
2367 bool watch)
2368 {
2369 struct gpio_desc *desc;
2370 struct gpioline_info lineinfo;
2371 struct gpio_v2_line_info lineinfo_v2;
2372
2373 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2374 return -EFAULT;
2375
2376 /* this doubles as a range check on line_offset */
2377 desc = gpio_device_get_desc(cdev->gdev, lineinfo.line_offset);
2378 if (IS_ERR(desc))
2379 return PTR_ERR(desc);
2380
2381 if (watch) {
2382 if (lineinfo_ensure_abi_version(cdev, 1))
2383 return -EPERM;
2384
2385 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
2386 return -EBUSY;
2387 }
2388
2389 gpio_desc_to_lineinfo(desc, &lineinfo_v2, false);
2390 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
2391
2392 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2393 if (watch)
2394 clear_bit(lineinfo.line_offset, cdev->watched_lines);
2395 return -EFAULT;
2396 }
2397
2398 return 0;
2399 }
2400 #endif
2401
lineinfo_get(struct gpio_chardev_data * cdev,void __user * ip,bool watch)2402 static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
2403 bool watch)
2404 {
2405 struct gpio_desc *desc;
2406 struct gpio_v2_line_info lineinfo;
2407
2408 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2409 return -EFAULT;
2410
2411 if (!mem_is_zero(lineinfo.padding, sizeof(lineinfo.padding)))
2412 return -EINVAL;
2413
2414 desc = gpio_device_get_desc(cdev->gdev, lineinfo.offset);
2415 if (IS_ERR(desc))
2416 return PTR_ERR(desc);
2417
2418 if (watch) {
2419 #ifdef CONFIG_GPIO_CDEV_V1
2420 if (lineinfo_ensure_abi_version(cdev, 2))
2421 return -EPERM;
2422 #endif
2423 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines))
2424 return -EBUSY;
2425 }
2426 gpio_desc_to_lineinfo(desc, &lineinfo, false);
2427
2428 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2429 if (watch)
2430 clear_bit(lineinfo.offset, cdev->watched_lines);
2431 return -EFAULT;
2432 }
2433
2434 return 0;
2435 }
2436
lineinfo_unwatch(struct gpio_chardev_data * cdev,void __user * ip)2437 static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
2438 {
2439 __u32 offset;
2440
2441 if (copy_from_user(&offset, ip, sizeof(offset)))
2442 return -EFAULT;
2443
2444 if (offset >= cdev->gdev->ngpio)
2445 return -EINVAL;
2446
2447 if (!test_and_clear_bit(offset, cdev->watched_lines))
2448 return -EBUSY;
2449
2450 return 0;
2451 }
2452
2453 /*
2454 * gpio_ioctl() - ioctl handler for the GPIO chardev
2455 */
gpio_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2456 static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2457 {
2458 struct gpio_chardev_data *cdev = file->private_data;
2459 struct gpio_device *gdev = cdev->gdev;
2460 void __user *ip = (void __user *)arg;
2461
2462 guard(srcu)(&gdev->srcu);
2463
2464 /* We fail any subsequent ioctl():s when the chip is gone */
2465 if (!rcu_access_pointer(gdev->chip))
2466 return -ENODEV;
2467
2468 /* Fill in the struct and pass to userspace */
2469 switch (cmd) {
2470 case GPIO_GET_CHIPINFO_IOCTL:
2471 return chipinfo_get(cdev, ip);
2472 #ifdef CONFIG_GPIO_CDEV_V1
2473 case GPIO_GET_LINEHANDLE_IOCTL:
2474 return linehandle_create(gdev, ip);
2475 case GPIO_GET_LINEEVENT_IOCTL:
2476 return lineevent_create(gdev, ip);
2477 case GPIO_GET_LINEINFO_IOCTL:
2478 return lineinfo_get_v1(cdev, ip, false);
2479 case GPIO_GET_LINEINFO_WATCH_IOCTL:
2480 return lineinfo_get_v1(cdev, ip, true);
2481 #endif /* CONFIG_GPIO_CDEV_V1 */
2482 case GPIO_V2_GET_LINEINFO_IOCTL:
2483 return lineinfo_get(cdev, ip, false);
2484 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL:
2485 return lineinfo_get(cdev, ip, true);
2486 case GPIO_V2_GET_LINE_IOCTL:
2487 return linereq_create(gdev, ip);
2488 case GPIO_GET_LINEINFO_UNWATCH_IOCTL:
2489 return lineinfo_unwatch(cdev, ip);
2490 default:
2491 return -EINVAL;
2492 }
2493 }
2494
2495 #ifdef CONFIG_COMPAT
gpio_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)2496 static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
2497 unsigned long arg)
2498 {
2499 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2500 }
2501 #endif
2502
2503 struct lineinfo_changed_ctx {
2504 struct work_struct work;
2505 struct gpio_v2_line_info_changed chg;
2506 struct gpio_device *gdev;
2507 struct gpio_chardev_data *cdev;
2508 };
2509
lineinfo_changed_func(struct work_struct * work)2510 static void lineinfo_changed_func(struct work_struct *work)
2511 {
2512 struct lineinfo_changed_ctx *ctx =
2513 container_of(work, struct lineinfo_changed_ctx, work);
2514 struct gpio_chip *gc;
2515 int ret;
2516
2517 if (!(ctx->chg.info.flags & GPIO_V2_LINE_FLAG_USED)) {
2518 /*
2519 * If nobody set the USED flag earlier, let's see with pinctrl
2520 * now. We're doing this late because it's a sleeping function.
2521 * Pin functions are in general much more static and while it's
2522 * not 100% bullet-proof, it's good enough for most cases.
2523 */
2524 scoped_guard(srcu, &ctx->gdev->srcu) {
2525 gc = srcu_dereference(ctx->gdev->chip, &ctx->gdev->srcu);
2526 if (gc &&
2527 !pinctrl_gpio_can_use_line(gc, ctx->chg.info.offset))
2528 ctx->chg.info.flags |= GPIO_V2_LINE_FLAG_USED;
2529 }
2530 }
2531
2532 ret = kfifo_in_spinlocked(&ctx->cdev->events, &ctx->chg, 1,
2533 &ctx->cdev->wait.lock);
2534 if (ret)
2535 wake_up_poll(&ctx->cdev->wait, EPOLLIN);
2536 else
2537 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
2538
2539 gpio_device_put(ctx->gdev);
2540 fput(ctx->cdev->fp);
2541 kfree(ctx);
2542 }
2543
lineinfo_changed_notify(struct notifier_block * nb,unsigned long action,void * data)2544 static int lineinfo_changed_notify(struct notifier_block *nb,
2545 unsigned long action, void *data)
2546 {
2547 struct gpio_chardev_data *cdev =
2548 container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
2549 struct lineinfo_changed_ctx *ctx;
2550 struct gpio_desc *desc = data;
2551
2552 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
2553 return NOTIFY_DONE;
2554
2555 /*
2556 * If this is called from atomic context (for instance: with a spinlock
2557 * taken by the atomic notifier chain), any sleeping calls must be done
2558 * outside of this function in process context of the dedicated
2559 * workqueue.
2560 *
2561 * Let's gather as much info as possible from the descriptor and
2562 * postpone just the call to pinctrl_gpio_can_use_line() until the work
2563 * is executed.
2564 */
2565
2566 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
2567 if (!ctx) {
2568 pr_err("Failed to allocate memory for line info notification\n");
2569 return NOTIFY_DONE;
2570 }
2571
2572 ctx->chg.event_type = action;
2573 ctx->chg.timestamp_ns = ktime_get_ns();
2574 gpio_desc_to_lineinfo(desc, &ctx->chg.info, true);
2575 /* Keep the GPIO device alive until we emit the event. */
2576 ctx->gdev = gpio_device_get(desc->gdev);
2577 ctx->cdev = cdev;
2578 /* Keep the file descriptor alive too. */
2579 get_file(ctx->cdev->fp);
2580
2581 INIT_WORK(&ctx->work, lineinfo_changed_func);
2582 queue_work(ctx->gdev->line_state_wq, &ctx->work);
2583
2584 return NOTIFY_OK;
2585 }
2586
gpio_device_unregistered_notify(struct notifier_block * nb,unsigned long action,void * data)2587 static int gpio_device_unregistered_notify(struct notifier_block *nb,
2588 unsigned long action, void *data)
2589 {
2590 struct gpio_chardev_data *cdev = container_of(nb,
2591 struct gpio_chardev_data,
2592 device_unregistered_nb);
2593
2594 wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR);
2595
2596 return NOTIFY_OK;
2597 }
2598
lineinfo_watch_poll(struct file * file,struct poll_table_struct * pollt)2599 static __poll_t lineinfo_watch_poll(struct file *file,
2600 struct poll_table_struct *pollt)
2601 {
2602 struct gpio_chardev_data *cdev = file->private_data;
2603 __poll_t events = 0;
2604
2605 guard(srcu)(&cdev->gdev->srcu);
2606
2607 if (!rcu_access_pointer(cdev->gdev->chip))
2608 return EPOLLHUP | EPOLLERR;
2609
2610 poll_wait(file, &cdev->wait, pollt);
2611
2612 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events,
2613 &cdev->wait.lock))
2614 events = EPOLLIN | EPOLLRDNORM;
2615
2616 return events;
2617 }
2618
lineinfo_watch_read(struct file * file,char __user * buf,size_t count,loff_t * off)2619 static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
2620 size_t count, loff_t *off)
2621 {
2622 struct gpio_chardev_data *cdev = file->private_data;
2623 struct gpio_v2_line_info_changed event;
2624 ssize_t bytes_read = 0;
2625 int ret;
2626 size_t event_size;
2627
2628 guard(srcu)(&cdev->gdev->srcu);
2629
2630 if (!rcu_access_pointer(cdev->gdev->chip))
2631 return -ENODEV;
2632
2633 #ifndef CONFIG_GPIO_CDEV_V1
2634 event_size = sizeof(struct gpio_v2_line_info_changed);
2635 if (count < event_size)
2636 return -EINVAL;
2637 #endif
2638
2639 do {
2640 scoped_guard(spinlock, &cdev->wait.lock) {
2641 if (kfifo_is_empty(&cdev->events)) {
2642 if (bytes_read)
2643 return bytes_read;
2644
2645 if (file->f_flags & O_NONBLOCK)
2646 return -EAGAIN;
2647
2648 ret = wait_event_interruptible_locked(cdev->wait,
2649 !kfifo_is_empty(&cdev->events));
2650 if (ret)
2651 return ret;
2652 }
2653 #ifdef CONFIG_GPIO_CDEV_V1
2654 /* must be after kfifo check so watch_abi_version is set */
2655 if (atomic_read(&cdev->watch_abi_version) == 2)
2656 event_size = sizeof(struct gpio_v2_line_info_changed);
2657 else
2658 event_size = sizeof(struct gpioline_info_changed);
2659 if (count < event_size)
2660 return -EINVAL;
2661 #endif
2662 if (kfifo_out(&cdev->events, &event, 1) != 1) {
2663 /*
2664 * This should never happen - we hold the
2665 * lock from the moment we learned the fifo
2666 * is no longer empty until now.
2667 */
2668 WARN(1, "failed to read from non-empty kfifo");
2669 return -EIO;
2670 }
2671 }
2672
2673 #ifdef CONFIG_GPIO_CDEV_V1
2674 if (event_size == sizeof(struct gpio_v2_line_info_changed)) {
2675 if (copy_to_user(buf + bytes_read, &event, event_size))
2676 return -EFAULT;
2677 } else {
2678 struct gpioline_info_changed event_v1;
2679
2680 gpio_v2_line_info_changed_to_v1(&event, &event_v1);
2681 if (copy_to_user(buf + bytes_read, &event_v1,
2682 event_size))
2683 return -EFAULT;
2684 }
2685 #else
2686 if (copy_to_user(buf + bytes_read, &event, event_size))
2687 return -EFAULT;
2688 #endif
2689 bytes_read += event_size;
2690 } while (count >= bytes_read + sizeof(event));
2691
2692 return bytes_read;
2693 }
2694
2695 /**
2696 * gpio_chrdev_open() - open the chardev for ioctl operations
2697 * @inode: inode for this chardev
2698 * @file: file struct for storing private data
2699 *
2700 * Returns:
2701 * 0 on success, or negative errno on failure.
2702 */
gpio_chrdev_open(struct inode * inode,struct file * file)2703 static int gpio_chrdev_open(struct inode *inode, struct file *file)
2704 {
2705 struct gpio_device *gdev = container_of(inode->i_cdev,
2706 struct gpio_device, chrdev);
2707 struct gpio_chardev_data *cdev;
2708 int ret = -ENOMEM;
2709
2710 guard(srcu)(&gdev->srcu);
2711
2712 /* Fail on open if the backing gpiochip is gone */
2713 if (!rcu_access_pointer(gdev->chip))
2714 return -ENODEV;
2715
2716 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
2717 if (!cdev)
2718 return -ENODEV;
2719
2720 cdev->watched_lines = bitmap_zalloc(gdev->ngpio, GFP_KERNEL);
2721 if (!cdev->watched_lines)
2722 goto out_free_cdev;
2723
2724 init_waitqueue_head(&cdev->wait);
2725 INIT_KFIFO(cdev->events);
2726 cdev->gdev = gpio_device_get(gdev);
2727
2728 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
2729 scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
2730 ret = raw_notifier_chain_register(&gdev->line_state_notifier,
2731 &cdev->lineinfo_changed_nb);
2732 if (ret)
2733 goto out_free_bitmap;
2734
2735 cdev->device_unregistered_nb.notifier_call =
2736 gpio_device_unregistered_notify;
2737 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2738 &cdev->device_unregistered_nb);
2739 if (ret)
2740 goto out_unregister_line_notifier;
2741
2742 file->private_data = cdev;
2743 cdev->fp = file;
2744
2745 ret = nonseekable_open(inode, file);
2746 if (ret)
2747 goto out_unregister_device_notifier;
2748
2749 return ret;
2750
2751 out_unregister_device_notifier:
2752 blocking_notifier_chain_unregister(&gdev->device_notifier,
2753 &cdev->device_unregistered_nb);
2754 out_unregister_line_notifier:
2755 scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
2756 raw_notifier_chain_unregister(&gdev->line_state_notifier,
2757 &cdev->lineinfo_changed_nb);
2758 out_free_bitmap:
2759 gpio_device_put(gdev);
2760 bitmap_free(cdev->watched_lines);
2761 out_free_cdev:
2762 kfree(cdev);
2763 return ret;
2764 }
2765
2766 /**
2767 * gpio_chrdev_release() - close chardev after ioctl operations
2768 * @inode: inode for this chardev
2769 * @file: file struct for storing private data
2770 *
2771 * Returns:
2772 * 0 on success, or negative errno on failure.
2773 */
gpio_chrdev_release(struct inode * inode,struct file * file)2774 static int gpio_chrdev_release(struct inode *inode, struct file *file)
2775 {
2776 struct gpio_chardev_data *cdev = file->private_data;
2777 struct gpio_device *gdev = cdev->gdev;
2778
2779 blocking_notifier_chain_unregister(&gdev->device_notifier,
2780 &cdev->device_unregistered_nb);
2781 scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
2782 raw_notifier_chain_unregister(&gdev->line_state_notifier,
2783 &cdev->lineinfo_changed_nb);
2784 bitmap_free(cdev->watched_lines);
2785 gpio_device_put(gdev);
2786 kfree(cdev);
2787
2788 return 0;
2789 }
2790
2791 static const struct file_operations gpio_fileops = {
2792 .release = gpio_chrdev_release,
2793 .open = gpio_chrdev_open,
2794 .poll = lineinfo_watch_poll,
2795 .read = lineinfo_watch_read,
2796 .owner = THIS_MODULE,
2797 .unlocked_ioctl = gpio_ioctl,
2798 #ifdef CONFIG_COMPAT
2799 .compat_ioctl = gpio_ioctl_compat,
2800 #endif
2801 };
2802
gpiolib_cdev_register(struct gpio_device * gdev,dev_t devt)2803 int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
2804 {
2805 struct gpio_chip *gc;
2806 int ret;
2807
2808 cdev_init(&gdev->chrdev, &gpio_fileops);
2809 gdev->chrdev.owner = THIS_MODULE;
2810 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id);
2811
2812 gdev->line_state_wq = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2813 dev_name(&gdev->dev));
2814 if (!gdev->line_state_wq)
2815 return -ENOMEM;
2816
2817 ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
2818 if (ret)
2819 return ret;
2820
2821 guard(srcu)(&gdev->srcu);
2822 gc = srcu_dereference(gdev->chip, &gdev->srcu);
2823 if (!gc)
2824 return -ENODEV;
2825
2826 chip_dbg(gc, "added GPIO chardev (%d:%d)\n", MAJOR(devt), gdev->id);
2827
2828 return 0;
2829 }
2830
gpiolib_cdev_unregister(struct gpio_device * gdev)2831 void gpiolib_cdev_unregister(struct gpio_device *gdev)
2832 {
2833 destroy_workqueue(gdev->line_state_wq);
2834 cdev_device_del(&gdev->chrdev, &gdev->dev);
2835 blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL);
2836 }
2837