1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/anon_inodes.h>
4 #include <linux/atomic.h>
5 #include <linux/bitmap.h>
6 #include <linux/build_bug.h>
7 #include <linux/cdev.h>
8 #include <linux/cleanup.h>
9 #include <linux/compat.h>
10 #include <linux/compiler.h>
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/file.h>
14 #include <linux/gpio.h>
15 #include <linux/gpio/driver.h>
16 #include <linux/hte.h>
17 #include <linux/interrupt.h>
18 #include <linux/irqreturn.h>
19 #include <linux/kfifo.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/overflow.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/spinlock.h>
27 #include <linux/string.h>
28 #include <linux/timekeeping.h>
29 #include <linux/uaccess.h>
30 #include <linux/workqueue.h>
31
32 #include <uapi/linux/gpio.h>
33
34 #include "gpiolib.h"
35 #include "gpiolib-cdev.h"
36
37 /*
38 * Array sizes must ensure 64-bit alignment and not create holes in the
39 * struct packing.
40 */
41 static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2));
42 static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8));
43
44 /*
45 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility
46 */
47 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8));
48 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8));
49 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8));
50 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8));
51 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8));
52 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8));
53 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8));
54 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8));
55
56 /* Character device interface to GPIO.
57 *
58 * The GPIO character device, /dev/gpiochipN, provides userspace an
59 * interface to gpiolib GPIOs via ioctl()s.
60 */
61
62 /*
63 * GPIO line handle management
64 */
65
66 #ifdef CONFIG_GPIO_CDEV_V1
67 /**
68 * struct linehandle_state - contains the state of a userspace handle
69 * @gdev: the GPIO device the handle pertains to
70 * @label: consumer label used to tag descriptors
71 * @descs: the GPIO descriptors held by this handle
72 * @num_descs: the number of descriptors held in the descs array
73 */
74 struct linehandle_state {
75 struct gpio_device *gdev;
76 const char *label;
77 struct gpio_desc *descs[GPIOHANDLES_MAX];
78 u32 num_descs;
79 };
80
81 #define GPIOHANDLE_REQUEST_VALID_FLAGS \
82 (GPIOHANDLE_REQUEST_INPUT | \
83 GPIOHANDLE_REQUEST_OUTPUT | \
84 GPIOHANDLE_REQUEST_ACTIVE_LOW | \
85 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \
86 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \
87 GPIOHANDLE_REQUEST_BIAS_DISABLE | \
88 GPIOHANDLE_REQUEST_OPEN_DRAIN | \
89 GPIOHANDLE_REQUEST_OPEN_SOURCE)
90
91 #define GPIOHANDLE_REQUEST_DIRECTION_FLAGS \
92 (GPIOHANDLE_REQUEST_INPUT | \
93 GPIOHANDLE_REQUEST_OUTPUT)
94
linehandle_validate_flags(u32 flags)95 static int linehandle_validate_flags(u32 flags)
96 {
97 /* Return an error if an unknown flag is set */
98 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
99 return -EINVAL;
100
101 /*
102 * Do not allow both INPUT & OUTPUT flags to be set as they are
103 * contradictory.
104 */
105 if ((flags & GPIOHANDLE_REQUEST_INPUT) &&
106 (flags & GPIOHANDLE_REQUEST_OUTPUT))
107 return -EINVAL;
108
109 /*
110 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
111 * the hardware actually supports enabling both at the same time the
112 * electrical result would be disastrous.
113 */
114 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
115 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
116 return -EINVAL;
117
118 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
119 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) &&
120 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
121 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
122 return -EINVAL;
123
124 /* Bias flags only allowed for input or output mode. */
125 if (!((flags & GPIOHANDLE_REQUEST_INPUT) ||
126 (flags & GPIOHANDLE_REQUEST_OUTPUT)) &&
127 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) ||
128 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) ||
129 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)))
130 return -EINVAL;
131
132 /* Only one bias flag can be set. */
133 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
134 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
135 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
136 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
137 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
138 return -EINVAL;
139
140 return 0;
141 }
142
linehandle_flags_to_desc_flags(u32 lflags,unsigned long * flagsp)143 static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
144 {
145 unsigned long flags = READ_ONCE(*flagsp);
146
147 assign_bit(FLAG_ACTIVE_LOW, &flags,
148 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
149 assign_bit(FLAG_OPEN_DRAIN, &flags,
150 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
151 assign_bit(FLAG_OPEN_SOURCE, &flags,
152 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
153 assign_bit(FLAG_PULL_UP, &flags,
154 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
155 assign_bit(FLAG_PULL_DOWN, &flags,
156 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
157 assign_bit(FLAG_BIAS_DISABLE, &flags,
158 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
159
160 WRITE_ONCE(*flagsp, flags);
161 }
162
linehandle_set_config(struct linehandle_state * lh,void __user * ip)163 static long linehandle_set_config(struct linehandle_state *lh,
164 void __user *ip)
165 {
166 struct gpiohandle_config gcnf;
167 struct gpio_desc *desc;
168 int i, ret;
169 u32 lflags;
170
171 if (copy_from_user(&gcnf, ip, sizeof(gcnf)))
172 return -EFAULT;
173
174 lflags = gcnf.flags;
175 ret = linehandle_validate_flags(lflags);
176 if (ret)
177 return ret;
178
179 /* Lines must be reconfigured explicitly as input or output. */
180 if (!(lflags & GPIOHANDLE_REQUEST_DIRECTION_FLAGS))
181 return -EINVAL;
182
183 for (i = 0; i < lh->num_descs; i++) {
184 desc = lh->descs[i];
185 linehandle_flags_to_desc_flags(lflags, &desc->flags);
186
187 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
188 int val = !!gcnf.default_values[i];
189
190 ret = gpiod_direction_output_nonotify(desc, val);
191 if (ret)
192 return ret;
193 } else {
194 ret = gpiod_direction_input_nonotify(desc);
195 if (ret)
196 return ret;
197 }
198
199 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
200 }
201 return 0;
202 }
203
linehandle_ioctl(struct file * file,unsigned int cmd,unsigned long arg)204 static long linehandle_ioctl(struct file *file, unsigned int cmd,
205 unsigned long arg)
206 {
207 struct linehandle_state *lh = file->private_data;
208 void __user *ip = (void __user *)arg;
209 struct gpiohandle_data ghd;
210 DECLARE_BITMAP(vals, GPIOHANDLES_MAX);
211 unsigned int i;
212 int ret;
213
214 guard(srcu)(&lh->gdev->srcu);
215
216 if (!rcu_access_pointer(lh->gdev->chip))
217 return -ENODEV;
218
219 switch (cmd) {
220 case GPIOHANDLE_GET_LINE_VALUES_IOCTL:
221 /* NOTE: It's okay to read values of output lines */
222 ret = gpiod_get_array_value_complex(false, true,
223 lh->num_descs, lh->descs,
224 NULL, vals);
225 if (ret)
226 return ret;
227
228 memset(&ghd, 0, sizeof(ghd));
229 for (i = 0; i < lh->num_descs; i++)
230 ghd.values[i] = test_bit(i, vals);
231
232 if (copy_to_user(ip, &ghd, sizeof(ghd)))
233 return -EFAULT;
234
235 return 0;
236 case GPIOHANDLE_SET_LINE_VALUES_IOCTL:
237 /*
238 * All line descriptors were created at once with the same
239 * flags so just check if the first one is really output.
240 */
241 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
242 return -EPERM;
243
244 if (copy_from_user(&ghd, ip, sizeof(ghd)))
245 return -EFAULT;
246
247 /* Clamp all values to [0,1] */
248 for (i = 0; i < lh->num_descs; i++)
249 __assign_bit(i, vals, ghd.values[i]);
250
251 /* Reuse the array setting function */
252 return gpiod_set_array_value_complex(false,
253 true,
254 lh->num_descs,
255 lh->descs,
256 NULL,
257 vals);
258 case GPIOHANDLE_SET_CONFIG_IOCTL:
259 return linehandle_set_config(lh, ip);
260 default:
261 return -EINVAL;
262 }
263 }
264
265 #ifdef CONFIG_COMPAT
linehandle_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)266 static long linehandle_ioctl_compat(struct file *file, unsigned int cmd,
267 unsigned long arg)
268 {
269 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
270 }
271 #endif
272
linehandle_free(struct linehandle_state * lh)273 static void linehandle_free(struct linehandle_state *lh)
274 {
275 int i;
276
277 for (i = 0; i < lh->num_descs; i++)
278 if (lh->descs[i])
279 gpiod_free(lh->descs[i]);
280 kfree(lh->label);
281 gpio_device_put(lh->gdev);
282 kfree(lh);
283 }
284
linehandle_release(struct inode * inode,struct file * file)285 static int linehandle_release(struct inode *inode, struct file *file)
286 {
287 linehandle_free(file->private_data);
288 return 0;
289 }
290
291 static const struct file_operations linehandle_fileops = {
292 .release = linehandle_release,
293 .owner = THIS_MODULE,
294 .llseek = noop_llseek,
295 .unlocked_ioctl = linehandle_ioctl,
296 #ifdef CONFIG_COMPAT
297 .compat_ioctl = linehandle_ioctl_compat,
298 #endif
299 };
300
linehandle_create(struct gpio_device * gdev,void __user * ip)301 static int linehandle_create(struct gpio_device *gdev, void __user *ip)
302 {
303 struct gpiohandle_request handlereq;
304 struct linehandle_state *lh;
305 struct file *file;
306 int fd, i, ret;
307 u32 lflags;
308
309 if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
310 return -EFAULT;
311 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
312 return -EINVAL;
313
314 lflags = handlereq.flags;
315
316 ret = linehandle_validate_flags(lflags);
317 if (ret)
318 return ret;
319
320 lh = kzalloc(sizeof(*lh), GFP_KERNEL);
321 if (!lh)
322 return -ENOMEM;
323 lh->gdev = gpio_device_get(gdev);
324
325 if (handlereq.consumer_label[0] != '\0') {
326 /* label is only initialized if consumer_label is set */
327 lh->label = kstrndup(handlereq.consumer_label,
328 sizeof(handlereq.consumer_label) - 1,
329 GFP_KERNEL);
330 if (!lh->label) {
331 ret = -ENOMEM;
332 goto out_free_lh;
333 }
334 }
335
336 lh->num_descs = handlereq.lines;
337
338 /* Request each GPIO */
339 for (i = 0; i < handlereq.lines; i++) {
340 u32 offset = handlereq.lineoffsets[i];
341 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
342
343 if (IS_ERR(desc)) {
344 ret = PTR_ERR(desc);
345 goto out_free_lh;
346 }
347
348 ret = gpiod_request_user(desc, lh->label);
349 if (ret)
350 goto out_free_lh;
351 lh->descs[i] = desc;
352 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags);
353
354 ret = gpiod_set_transitory(desc, false);
355 if (ret < 0)
356 goto out_free_lh;
357
358 /*
359 * Lines have to be requested explicitly for input
360 * or output, else the line will be treated "as is".
361 */
362 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
363 int val = !!handlereq.default_values[i];
364
365 ret = gpiod_direction_output_nonotify(desc, val);
366 if (ret)
367 goto out_free_lh;
368 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
369 ret = gpiod_direction_input_nonotify(desc);
370 if (ret)
371 goto out_free_lh;
372 }
373
374 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
375
376 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
377 offset);
378 }
379
380 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
381 if (fd < 0) {
382 ret = fd;
383 goto out_free_lh;
384 }
385
386 file = anon_inode_getfile("gpio-linehandle",
387 &linehandle_fileops,
388 lh,
389 O_RDONLY | O_CLOEXEC);
390 if (IS_ERR(file)) {
391 ret = PTR_ERR(file);
392 goto out_put_unused_fd;
393 }
394
395 handlereq.fd = fd;
396 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
397 /*
398 * fput() will trigger the release() callback, so do not go onto
399 * the regular error cleanup path here.
400 */
401 fput(file);
402 put_unused_fd(fd);
403 return -EFAULT;
404 }
405
406 fd_install(fd, file);
407
408 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
409 lh->num_descs);
410
411 return 0;
412
413 out_put_unused_fd:
414 put_unused_fd(fd);
415 out_free_lh:
416 linehandle_free(lh);
417 return ret;
418 }
419 #endif /* CONFIG_GPIO_CDEV_V1 */
420
421 /**
422 * struct line - contains the state of a requested line
423 * @desc: the GPIO descriptor for this line.
424 * @req: the corresponding line request
425 * @irq: the interrupt triggered in response to events on this GPIO
426 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or
427 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied
428 * @timestamp_ns: cache for the timestamp storing it between hardirq and
429 * IRQ thread, used to bring the timestamp close to the actual event
430 * @req_seqno: the seqno for the current edge event in the sequence of
431 * events for the corresponding line request. This is drawn from the @req.
432 * @line_seqno: the seqno for the current edge event in the sequence of
433 * events for this line.
434 * @work: the worker that implements software debouncing
435 * @sw_debounced: flag indicating if the software debouncer is active
436 * @level: the current debounced physical level of the line
437 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor
438 * @raw_level: the line level at the time of event
439 * @total_discard_seq: the running counter of the discarded events
440 * @last_seqno: the last sequence number before debounce period expires
441 */
442 struct line {
443 struct gpio_desc *desc;
444 /*
445 * -- edge detector specific fields --
446 */
447 struct linereq *req;
448 unsigned int irq;
449 /*
450 * The flags for the active edge detector configuration.
451 *
452 * edflags is set by linereq_create(), linereq_free(), and
453 * linereq_set_config(), which are themselves mutually
454 * exclusive, and is accessed by edge_irq_thread(),
455 * process_hw_ts_thread() and debounce_work_func(),
456 * which can all live with a slightly stale value.
457 */
458 u64 edflags;
459 /*
460 * timestamp_ns and req_seqno are accessed only by
461 * edge_irq_handler() and edge_irq_thread(), which are themselves
462 * mutually exclusive, so no additional protection is necessary.
463 */
464 u64 timestamp_ns;
465 u32 req_seqno;
466 /*
467 * line_seqno is accessed by either edge_irq_thread() or
468 * debounce_work_func(), which are themselves mutually exclusive,
469 * so no additional protection is necessary.
470 */
471 u32 line_seqno;
472 /*
473 * -- debouncer specific fields --
474 */
475 struct delayed_work work;
476 /*
477 * sw_debounce is accessed by linereq_set_config(), which is the
478 * only setter, and linereq_get_values(), which can live with a
479 * slightly stale value.
480 */
481 unsigned int sw_debounced;
482 /*
483 * level is accessed by debounce_work_func(), which is the only
484 * setter, and linereq_get_values() which can live with a slightly
485 * stale value.
486 */
487 unsigned int level;
488 #ifdef CONFIG_HTE
489 struct hte_ts_desc hdesc;
490 /*
491 * HTE provider sets line level at the time of event. The valid
492 * value is 0 or 1 and negative value for an error.
493 */
494 int raw_level;
495 /*
496 * when sw_debounce is set on HTE enabled line, this is running
497 * counter of the discarded events.
498 */
499 u32 total_discard_seq;
500 /*
501 * when sw_debounce is set on HTE enabled line, this variable records
502 * last sequence number before debounce period expires.
503 */
504 u32 last_seqno;
505 #endif /* CONFIG_HTE */
506 };
507
508 /**
509 * struct linereq - contains the state of a userspace line request
510 * @gdev: the GPIO device the line request pertains to
511 * @label: consumer label used to tag GPIO descriptors
512 * @num_lines: the number of lines in the lines array
513 * @wait: wait queue that handles blocking reads of events
514 * @device_unregistered_nb: notifier block for receiving gdev unregister events
515 * @event_buffer_size: the number of elements allocated in @events
516 * @events: KFIFO for the GPIO events
517 * @seqno: the sequence number for edge events generated on all lines in
518 * this line request. Note that this is not used when @num_lines is 1, as
519 * the line_seqno is then the same and is cheaper to calculate.
520 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency
521 * of configuration, particularly multi-step accesses to desc flags.
522 * @lines: the lines held by this line request, with @num_lines elements.
523 */
524 struct linereq {
525 struct gpio_device *gdev;
526 const char *label;
527 u32 num_lines;
528 wait_queue_head_t wait;
529 struct notifier_block device_unregistered_nb;
530 u32 event_buffer_size;
531 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event);
532 atomic_t seqno;
533 struct mutex config_mutex;
534 struct line lines[] __counted_by(num_lines);
535 };
536
537 #define GPIO_V2_LINE_BIAS_FLAGS \
538 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
539 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
540 GPIO_V2_LINE_FLAG_BIAS_DISABLED)
541
542 #define GPIO_V2_LINE_DIRECTION_FLAGS \
543 (GPIO_V2_LINE_FLAG_INPUT | \
544 GPIO_V2_LINE_FLAG_OUTPUT)
545
546 #define GPIO_V2_LINE_DRIVE_FLAGS \
547 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \
548 GPIO_V2_LINE_FLAG_OPEN_SOURCE)
549
550 #define GPIO_V2_LINE_EDGE_FLAGS \
551 (GPIO_V2_LINE_FLAG_EDGE_RISING | \
552 GPIO_V2_LINE_FLAG_EDGE_FALLING)
553
554 #define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS
555
556 #define GPIO_V2_LINE_VALID_FLAGS \
557 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
558 GPIO_V2_LINE_DIRECTION_FLAGS | \
559 GPIO_V2_LINE_DRIVE_FLAGS | \
560 GPIO_V2_LINE_EDGE_FLAGS | \
561 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
562 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
563 GPIO_V2_LINE_BIAS_FLAGS)
564
565 /* subset of flags relevant for edge detector configuration */
566 #define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \
567 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
568 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
569 GPIO_V2_LINE_EDGE_FLAGS)
570
linereq_unregistered_notify(struct notifier_block * nb,unsigned long action,void * data)571 static int linereq_unregistered_notify(struct notifier_block *nb,
572 unsigned long action, void *data)
573 {
574 struct linereq *lr = container_of(nb, struct linereq,
575 device_unregistered_nb);
576
577 wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR);
578
579 return NOTIFY_OK;
580 }
581
linereq_put_event(struct linereq * lr,struct gpio_v2_line_event * le)582 static void linereq_put_event(struct linereq *lr,
583 struct gpio_v2_line_event *le)
584 {
585 bool overflow = false;
586
587 scoped_guard(spinlock, &lr->wait.lock) {
588 if (kfifo_is_full(&lr->events)) {
589 overflow = true;
590 kfifo_skip(&lr->events);
591 }
592 kfifo_in(&lr->events, le, 1);
593 }
594 if (!overflow)
595 wake_up_poll(&lr->wait, EPOLLIN);
596 else
597 pr_debug_ratelimited("event FIFO is full - event dropped\n");
598 }
599
line_event_timestamp(struct line * line)600 static u64 line_event_timestamp(struct line *line)
601 {
602 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
603 return ktime_get_real_ns();
604 else if (IS_ENABLED(CONFIG_HTE) &&
605 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
606 return line->timestamp_ns;
607
608 return ktime_get_ns();
609 }
610
line_event_id(int level)611 static u32 line_event_id(int level)
612 {
613 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE :
614 GPIO_V2_LINE_EVENT_FALLING_EDGE;
615 }
616
make_irq_label(const char * orig)617 static inline char *make_irq_label(const char *orig)
618 {
619 char *new;
620
621 if (!orig)
622 return NULL;
623
624 new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
625 if (!new)
626 return ERR_PTR(-ENOMEM);
627
628 return new;
629 }
630
free_irq_label(const char * label)631 static inline void free_irq_label(const char *label)
632 {
633 kfree(label);
634 }
635
636 #ifdef CONFIG_HTE
637
process_hw_ts_thread(void * p)638 static enum hte_return process_hw_ts_thread(void *p)
639 {
640 struct line *line;
641 struct linereq *lr;
642 struct gpio_v2_line_event le;
643 u64 edflags;
644 int level;
645
646 if (!p)
647 return HTE_CB_HANDLED;
648
649 line = p;
650 lr = line->req;
651
652 memset(&le, 0, sizeof(le));
653
654 le.timestamp_ns = line->timestamp_ns;
655 edflags = READ_ONCE(line->edflags);
656
657 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) {
658 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
659 level = (line->raw_level >= 0) ?
660 line->raw_level :
661 gpiod_get_raw_value_cansleep(line->desc);
662
663 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
664 level = !level;
665
666 le.id = line_event_id(level);
667 break;
668 case GPIO_V2_LINE_FLAG_EDGE_RISING:
669 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
670 break;
671 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
672 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
673 break;
674 default:
675 return HTE_CB_HANDLED;
676 }
677 le.line_seqno = line->line_seqno;
678 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
679 le.offset = gpio_chip_hwgpio(line->desc);
680
681 linereq_put_event(lr, &le);
682
683 return HTE_CB_HANDLED;
684 }
685
process_hw_ts(struct hte_ts_data * ts,void * p)686 static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
687 {
688 struct line *line;
689 struct linereq *lr;
690 int diff_seqno = 0;
691
692 if (!ts || !p)
693 return HTE_CB_HANDLED;
694
695 line = p;
696 line->timestamp_ns = ts->tsc;
697 line->raw_level = ts->raw_level;
698 lr = line->req;
699
700 if (READ_ONCE(line->sw_debounced)) {
701 line->total_discard_seq++;
702 line->last_seqno = ts->seq;
703 mod_delayed_work(system_wq, &line->work,
704 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
705 } else {
706 if (unlikely(ts->seq < line->line_seqno))
707 return HTE_CB_HANDLED;
708
709 diff_seqno = ts->seq - line->line_seqno;
710 line->line_seqno = ts->seq;
711 if (lr->num_lines != 1)
712 line->req_seqno = atomic_add_return(diff_seqno,
713 &lr->seqno);
714
715 return HTE_RUN_SECOND_CB;
716 }
717
718 return HTE_CB_HANDLED;
719 }
720
hte_edge_setup(struct line * line,u64 eflags)721 static int hte_edge_setup(struct line *line, u64 eflags)
722 {
723 int ret;
724 unsigned long flags = 0;
725 struct hte_ts_desc *hdesc = &line->hdesc;
726
727 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
728 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
729 HTE_FALLING_EDGE_TS :
730 HTE_RISING_EDGE_TS;
731 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
732 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
733 HTE_RISING_EDGE_TS :
734 HTE_FALLING_EDGE_TS;
735
736 line->total_discard_seq = 0;
737
738 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL,
739 line->desc);
740
741 ret = hte_ts_get(NULL, hdesc, 0);
742 if (ret)
743 return ret;
744
745 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread,
746 line);
747 }
748
749 #else
750
hte_edge_setup(struct line * line,u64 eflags)751 static int hte_edge_setup(struct line *line, u64 eflags)
752 {
753 return 0;
754 }
755 #endif /* CONFIG_HTE */
756
edge_irq_thread(int irq,void * p)757 static irqreturn_t edge_irq_thread(int irq, void *p)
758 {
759 struct line *line = p;
760 struct linereq *lr = line->req;
761 struct gpio_v2_line_event le;
762
763 /* Do not leak kernel stack to userspace */
764 memset(&le, 0, sizeof(le));
765
766 if (line->timestamp_ns) {
767 le.timestamp_ns = line->timestamp_ns;
768 } else {
769 /*
770 * We may be running from a nested threaded interrupt in
771 * which case we didn't get the timestamp from
772 * edge_irq_handler().
773 */
774 le.timestamp_ns = line_event_timestamp(line);
775 if (lr->num_lines != 1)
776 line->req_seqno = atomic_inc_return(&lr->seqno);
777 }
778 line->timestamp_ns = 0;
779
780 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) {
781 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
782 le.id = line_event_id(gpiod_get_value_cansleep(line->desc));
783 break;
784 case GPIO_V2_LINE_FLAG_EDGE_RISING:
785 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
786 break;
787 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
788 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
789 break;
790 default:
791 return IRQ_NONE;
792 }
793 line->line_seqno++;
794 le.line_seqno = line->line_seqno;
795 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
796 le.offset = gpio_chip_hwgpio(line->desc);
797
798 linereq_put_event(lr, &le);
799
800 return IRQ_HANDLED;
801 }
802
edge_irq_handler(int irq,void * p)803 static irqreturn_t edge_irq_handler(int irq, void *p)
804 {
805 struct line *line = p;
806 struct linereq *lr = line->req;
807
808 /*
809 * Just store the timestamp in hardirq context so we get it as
810 * close in time as possible to the actual event.
811 */
812 line->timestamp_ns = line_event_timestamp(line);
813
814 if (lr->num_lines != 1)
815 line->req_seqno = atomic_inc_return(&lr->seqno);
816
817 return IRQ_WAKE_THREAD;
818 }
819
820 /*
821 * returns the current debounced logical value.
822 */
debounced_value(struct line * line)823 static bool debounced_value(struct line *line)
824 {
825 bool value;
826
827 /*
828 * minor race - debouncer may be stopped here, so edge_detector_stop()
829 * must leave the value unchanged so the following will read the level
830 * from when the debouncer was last running.
831 */
832 value = READ_ONCE(line->level);
833
834 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
835 value = !value;
836
837 return value;
838 }
839
debounce_irq_handler(int irq,void * p)840 static irqreturn_t debounce_irq_handler(int irq, void *p)
841 {
842 struct line *line = p;
843
844 mod_delayed_work(system_wq, &line->work,
845 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
846
847 return IRQ_HANDLED;
848 }
849
debounce_work_func(struct work_struct * work)850 static void debounce_work_func(struct work_struct *work)
851 {
852 struct gpio_v2_line_event le;
853 struct line *line = container_of(work, struct line, work.work);
854 struct linereq *lr;
855 u64 eflags, edflags = READ_ONCE(line->edflags);
856 int level = -1;
857 #ifdef CONFIG_HTE
858 int diff_seqno;
859
860 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
861 level = line->raw_level;
862 #endif
863 if (level < 0)
864 level = gpiod_get_raw_value_cansleep(line->desc);
865 if (level < 0) {
866 pr_debug_ratelimited("debouncer failed to read line value\n");
867 return;
868 }
869
870 if (READ_ONCE(line->level) == level)
871 return;
872
873 WRITE_ONCE(line->level, level);
874
875 /* -- edge detection -- */
876 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
877 if (!eflags)
878 return;
879
880 /* switch from physical level to logical - if they differ */
881 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
882 level = !level;
883
884 /* ignore edges that are not being monitored */
885 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
886 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
887 return;
888
889 /* Do not leak kernel stack to userspace */
890 memset(&le, 0, sizeof(le));
891
892 lr = line->req;
893 le.timestamp_ns = line_event_timestamp(line);
894 le.offset = gpio_chip_hwgpio(line->desc);
895 #ifdef CONFIG_HTE
896 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
897 /* discard events except the last one */
898 line->total_discard_seq -= 1;
899 diff_seqno = line->last_seqno - line->total_discard_seq -
900 line->line_seqno;
901 line->line_seqno = line->last_seqno - line->total_discard_seq;
902 le.line_seqno = line->line_seqno;
903 le.seqno = (lr->num_lines == 1) ?
904 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
905 } else
906 #endif /* CONFIG_HTE */
907 {
908 line->line_seqno++;
909 le.line_seqno = line->line_seqno;
910 le.seqno = (lr->num_lines == 1) ?
911 le.line_seqno : atomic_inc_return(&lr->seqno);
912 }
913
914 le.id = line_event_id(level);
915
916 linereq_put_event(lr, &le);
917 }
918
debounce_setup(struct line * line,unsigned int debounce_period_us)919 static int debounce_setup(struct line *line, unsigned int debounce_period_us)
920 {
921 unsigned long irqflags;
922 int ret, level, irq;
923 char *label;
924
925 /*
926 * Try hardware. Skip gpiod_set_config() to avoid emitting two
927 * CHANGED_CONFIG line state events.
928 */
929 ret = gpio_do_set_config(line->desc,
930 pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE,
931 debounce_period_us));
932 if (ret != -ENOTSUPP)
933 return ret;
934
935 if (debounce_period_us) {
936 /* setup software debounce */
937 level = gpiod_get_raw_value_cansleep(line->desc);
938 if (level < 0)
939 return level;
940
941 if (!(IS_ENABLED(CONFIG_HTE) &&
942 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
943 irq = gpiod_to_irq(line->desc);
944 if (irq < 0)
945 return -ENXIO;
946
947 label = make_irq_label(line->req->label);
948 if (IS_ERR(label))
949 return -ENOMEM;
950
951 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
952 ret = request_irq(irq, debounce_irq_handler, irqflags,
953 label, line);
954 if (ret) {
955 free_irq_label(label);
956 return ret;
957 }
958 line->irq = irq;
959 } else {
960 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
961 if (ret)
962 return ret;
963 }
964
965 WRITE_ONCE(line->level, level);
966 WRITE_ONCE(line->sw_debounced, 1);
967 }
968 return 0;
969 }
970
gpio_v2_line_config_debounced(struct gpio_v2_line_config * lc,unsigned int line_idx)971 static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc,
972 unsigned int line_idx)
973 {
974 unsigned int i;
975 u64 mask = BIT_ULL(line_idx);
976
977 for (i = 0; i < lc->num_attrs; i++) {
978 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
979 (lc->attrs[i].mask & mask))
980 return true;
981 }
982 return false;
983 }
984
gpio_v2_line_config_debounce_period(struct gpio_v2_line_config * lc,unsigned int line_idx)985 static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
986 unsigned int line_idx)
987 {
988 unsigned int i;
989 u64 mask = BIT_ULL(line_idx);
990
991 for (i = 0; i < lc->num_attrs; i++) {
992 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
993 (lc->attrs[i].mask & mask))
994 return lc->attrs[i].attr.debounce_period_us;
995 }
996 return 0;
997 }
998
edge_detector_stop(struct line * line)999 static void edge_detector_stop(struct line *line)
1000 {
1001 if (line->irq) {
1002 free_irq_label(free_irq(line->irq, line));
1003 line->irq = 0;
1004 }
1005
1006 #ifdef CONFIG_HTE
1007 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
1008 hte_ts_put(&line->hdesc);
1009 #endif
1010
1011 cancel_delayed_work_sync(&line->work);
1012 WRITE_ONCE(line->sw_debounced, 0);
1013 WRITE_ONCE(line->edflags, 0);
1014 if (line->desc)
1015 WRITE_ONCE(line->desc->debounce_period_us, 0);
1016 /* do not change line->level - see comment in debounced_value() */
1017 }
1018
edge_detector_fifo_init(struct linereq * req)1019 static int edge_detector_fifo_init(struct linereq *req)
1020 {
1021 if (kfifo_initialized(&req->events))
1022 return 0;
1023
1024 return kfifo_alloc(&req->events, req->event_buffer_size, GFP_KERNEL);
1025 }
1026
edge_detector_setup(struct line * line,struct gpio_v2_line_config * lc,unsigned int line_idx,u64 edflags)1027 static int edge_detector_setup(struct line *line,
1028 struct gpio_v2_line_config *lc,
1029 unsigned int line_idx, u64 edflags)
1030 {
1031 u32 debounce_period_us;
1032 unsigned long irqflags = 0;
1033 u64 eflags;
1034 int irq, ret;
1035 char *label;
1036
1037 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
1038 if (eflags) {
1039 ret = edge_detector_fifo_init(line->req);
1040 if (ret)
1041 return ret;
1042 }
1043 if (gpio_v2_line_config_debounced(lc, line_idx)) {
1044 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
1045 ret = debounce_setup(line, debounce_period_us);
1046 if (ret)
1047 return ret;
1048 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
1049 }
1050
1051 /* detection disabled or sw debouncer will provide edge detection */
1052 if (!eflags || READ_ONCE(line->sw_debounced))
1053 return 0;
1054
1055 if (IS_ENABLED(CONFIG_HTE) &&
1056 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1057 return hte_edge_setup(line, edflags);
1058
1059 irq = gpiod_to_irq(line->desc);
1060 if (irq < 0)
1061 return -ENXIO;
1062
1063 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
1064 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1065 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
1066 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
1067 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1068 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
1069 irqflags |= IRQF_ONESHOT;
1070
1071 label = make_irq_label(line->req->label);
1072 if (IS_ERR(label))
1073 return PTR_ERR(label);
1074
1075 /* Request a thread to read the events */
1076 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
1077 irqflags, label, line);
1078 if (ret) {
1079 free_irq_label(label);
1080 return ret;
1081 }
1082
1083 line->irq = irq;
1084 return 0;
1085 }
1086
edge_detector_update(struct line * line,struct gpio_v2_line_config * lc,unsigned int line_idx,u64 edflags)1087 static int edge_detector_update(struct line *line,
1088 struct gpio_v2_line_config *lc,
1089 unsigned int line_idx, u64 edflags)
1090 {
1091 u64 active_edflags = READ_ONCE(line->edflags);
1092 unsigned int debounce_period_us =
1093 gpio_v2_line_config_debounce_period(lc, line_idx);
1094
1095 if ((active_edflags == edflags) &&
1096 (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
1097 return 0;
1098
1099 /* sw debounced and still will be...*/
1100 if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
1101 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
1102 /*
1103 * ensure event fifo is initialised if edge detection
1104 * is now enabled.
1105 */
1106 if (edflags & GPIO_V2_LINE_EDGE_FLAGS)
1107 return edge_detector_fifo_init(line->req);
1108
1109 return 0;
1110 }
1111
1112 /* reconfiguring edge detection or sw debounce being disabled */
1113 if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
1114 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) ||
1115 (!debounce_period_us && READ_ONCE(line->sw_debounced)))
1116 edge_detector_stop(line);
1117
1118 return edge_detector_setup(line, lc, line_idx, edflags);
1119 }
1120
gpio_v2_line_config_flags(struct gpio_v2_line_config * lc,unsigned int line_idx)1121 static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
1122 unsigned int line_idx)
1123 {
1124 unsigned int i;
1125 u64 mask = BIT_ULL(line_idx);
1126
1127 for (i = 0; i < lc->num_attrs; i++) {
1128 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) &&
1129 (lc->attrs[i].mask & mask))
1130 return lc->attrs[i].attr.flags;
1131 }
1132 return lc->flags;
1133 }
1134
gpio_v2_line_config_output_value(struct gpio_v2_line_config * lc,unsigned int line_idx)1135 static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc,
1136 unsigned int line_idx)
1137 {
1138 unsigned int i;
1139 u64 mask = BIT_ULL(line_idx);
1140
1141 for (i = 0; i < lc->num_attrs; i++) {
1142 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) &&
1143 (lc->attrs[i].mask & mask))
1144 return !!(lc->attrs[i].attr.values & mask);
1145 }
1146 return 0;
1147 }
1148
gpio_v2_line_flags_validate(u64 flags)1149 static int gpio_v2_line_flags_validate(u64 flags)
1150 {
1151 /* Return an error if an unknown flag is set */
1152 if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
1153 return -EINVAL;
1154
1155 if (!IS_ENABLED(CONFIG_HTE) &&
1156 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1157 return -EOPNOTSUPP;
1158
1159 /*
1160 * Do not allow both INPUT and OUTPUT flags to be set as they are
1161 * contradictory.
1162 */
1163 if ((flags & GPIO_V2_LINE_FLAG_INPUT) &&
1164 (flags & GPIO_V2_LINE_FLAG_OUTPUT))
1165 return -EINVAL;
1166
1167 /* Only allow one event clock source */
1168 if (IS_ENABLED(CONFIG_HTE) &&
1169 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
1170 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1171 return -EINVAL;
1172
1173 /* Edge detection requires explicit input. */
1174 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
1175 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1176 return -EINVAL;
1177
1178 /*
1179 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single
1180 * request. If the hardware actually supports enabling both at the
1181 * same time the electrical result would be disastrous.
1182 */
1183 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) &&
1184 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE))
1185 return -EINVAL;
1186
1187 /* Drive requires explicit output direction. */
1188 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) &&
1189 !(flags & GPIO_V2_LINE_FLAG_OUTPUT))
1190 return -EINVAL;
1191
1192 /* Bias requires explicit direction. */
1193 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) &&
1194 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1195 return -EINVAL;
1196
1197 /* Only one bias flag can be set. */
1198 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) &&
1199 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN |
1200 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) ||
1201 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) &&
1202 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)))
1203 return -EINVAL;
1204
1205 return 0;
1206 }
1207
gpio_v2_line_config_validate(struct gpio_v2_line_config * lc,unsigned int num_lines)1208 static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
1209 unsigned int num_lines)
1210 {
1211 unsigned int i;
1212 u64 flags;
1213 int ret;
1214
1215 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX)
1216 return -EINVAL;
1217
1218 if (!mem_is_zero(lc->padding, sizeof(lc->padding)))
1219 return -EINVAL;
1220
1221 for (i = 0; i < num_lines; i++) {
1222 flags = gpio_v2_line_config_flags(lc, i);
1223 ret = gpio_v2_line_flags_validate(flags);
1224 if (ret)
1225 return ret;
1226
1227 /* debounce requires explicit input */
1228 if (gpio_v2_line_config_debounced(lc, i) &&
1229 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1230 return -EINVAL;
1231 }
1232 return 0;
1233 }
1234
gpio_v2_line_config_flags_to_desc_flags(u64 lflags,unsigned long * flagsp)1235 static void gpio_v2_line_config_flags_to_desc_flags(u64 lflags,
1236 unsigned long *flagsp)
1237 {
1238 unsigned long flags = READ_ONCE(*flagsp);
1239
1240 assign_bit(FLAG_ACTIVE_LOW, &flags,
1241 lflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
1242
1243 if (lflags & GPIO_V2_LINE_FLAG_OUTPUT)
1244 set_bit(FLAG_IS_OUT, &flags);
1245 else if (lflags & GPIO_V2_LINE_FLAG_INPUT)
1246 clear_bit(FLAG_IS_OUT, &flags);
1247
1248 assign_bit(FLAG_EDGE_RISING, &flags,
1249 lflags & GPIO_V2_LINE_FLAG_EDGE_RISING);
1250 assign_bit(FLAG_EDGE_FALLING, &flags,
1251 lflags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
1252
1253 assign_bit(FLAG_OPEN_DRAIN, &flags,
1254 lflags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
1255 assign_bit(FLAG_OPEN_SOURCE, &flags,
1256 lflags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
1257
1258 assign_bit(FLAG_PULL_UP, &flags,
1259 lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
1260 assign_bit(FLAG_PULL_DOWN, &flags,
1261 lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
1262 assign_bit(FLAG_BIAS_DISABLE, &flags,
1263 lflags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
1264
1265 assign_bit(FLAG_EVENT_CLOCK_REALTIME, &flags,
1266 lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
1267 assign_bit(FLAG_EVENT_CLOCK_HTE, &flags,
1268 lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
1269
1270 WRITE_ONCE(*flagsp, flags);
1271 }
1272
linereq_get_values(struct linereq * lr,void __user * ip)1273 static long linereq_get_values(struct linereq *lr, void __user *ip)
1274 {
1275 struct gpio_v2_line_values lv;
1276 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1277 struct gpio_desc **descs;
1278 unsigned int i, didx, num_get;
1279 bool val;
1280 int ret;
1281
1282 /* NOTE: It's ok to read values of output lines. */
1283 if (copy_from_user(&lv, ip, sizeof(lv)))
1284 return -EFAULT;
1285
1286 /*
1287 * gpiod_get_array_value_complex() requires compacted desc and val
1288 * arrays, rather than the sparse ones in lv.
1289 * Calculation of num_get and construction of the desc array is
1290 * optimized to avoid allocation for the desc array for the common
1291 * num_get == 1 case.
1292 */
1293 /* scan requested lines to calculate the subset to get */
1294 for (num_get = 0, i = 0; i < lr->num_lines; i++) {
1295 if (lv.mask & BIT_ULL(i)) {
1296 num_get++;
1297 /* capture desc for the num_get == 1 case */
1298 descs = &lr->lines[i].desc;
1299 }
1300 }
1301
1302 if (num_get == 0)
1303 return -EINVAL;
1304
1305 if (num_get != 1) {
1306 /* build compacted desc array */
1307 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL);
1308 if (!descs)
1309 return -ENOMEM;
1310 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1311 if (lv.mask & BIT_ULL(i)) {
1312 descs[didx] = lr->lines[i].desc;
1313 didx++;
1314 }
1315 }
1316 }
1317 ret = gpiod_get_array_value_complex(false, true, num_get,
1318 descs, NULL, vals);
1319
1320 if (num_get != 1)
1321 kfree(descs);
1322 if (ret)
1323 return ret;
1324
1325 lv.bits = 0;
1326 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1327 /* unpack compacted vals for the response */
1328 if (lv.mask & BIT_ULL(i)) {
1329 if (lr->lines[i].sw_debounced)
1330 val = debounced_value(&lr->lines[i]);
1331 else
1332 val = test_bit(didx, vals);
1333 if (val)
1334 lv.bits |= BIT_ULL(i);
1335 didx++;
1336 }
1337 }
1338
1339 if (copy_to_user(ip, &lv, sizeof(lv)))
1340 return -EFAULT;
1341
1342 return 0;
1343 }
1344
linereq_set_values(struct linereq * lr,void __user * ip)1345 static long linereq_set_values(struct linereq *lr, void __user *ip)
1346 {
1347 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1348 struct gpio_v2_line_values lv;
1349 struct gpio_desc **descs;
1350 unsigned int i, didx, num_set;
1351 int ret;
1352
1353 if (copy_from_user(&lv, ip, sizeof(lv)))
1354 return -EFAULT;
1355
1356 guard(mutex)(&lr->config_mutex);
1357
1358 /*
1359 * gpiod_set_array_value_complex() requires compacted desc and val
1360 * arrays, rather than the sparse ones in lv.
1361 * Calculation of num_set and construction of the descs and vals arrays
1362 * is optimized to minimize scanning the lv->mask, and to avoid
1363 * allocation for the desc array for the common num_set == 1 case.
1364 */
1365 bitmap_zero(vals, GPIO_V2_LINES_MAX);
1366 /* scan requested lines to determine the subset to be set */
1367 for (num_set = 0, i = 0; i < lr->num_lines; i++) {
1368 if (lv.mask & BIT_ULL(i)) {
1369 /* setting inputs is not allowed */
1370 if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags))
1371 return -EPERM;
1372 /* add to compacted values */
1373 if (lv.bits & BIT_ULL(i))
1374 __set_bit(num_set, vals);
1375 num_set++;
1376 /* capture desc for the num_set == 1 case */
1377 descs = &lr->lines[i].desc;
1378 }
1379 }
1380 if (num_set == 0)
1381 return -EINVAL;
1382
1383 if (num_set != 1) {
1384 /* build compacted desc array */
1385 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL);
1386 if (!descs)
1387 return -ENOMEM;
1388 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1389 if (lv.mask & BIT_ULL(i)) {
1390 descs[didx] = lr->lines[i].desc;
1391 didx++;
1392 }
1393 }
1394 }
1395 ret = gpiod_set_array_value_complex(false, true, num_set,
1396 descs, NULL, vals);
1397
1398 if (num_set != 1)
1399 kfree(descs);
1400 return ret;
1401 }
1402
linereq_set_config(struct linereq * lr,void __user * ip)1403 static long linereq_set_config(struct linereq *lr, void __user *ip)
1404 {
1405 struct gpio_v2_line_config lc;
1406 struct gpio_desc *desc;
1407 struct line *line;
1408 unsigned int i;
1409 u64 flags, edflags;
1410 int ret;
1411
1412 if (copy_from_user(&lc, ip, sizeof(lc)))
1413 return -EFAULT;
1414
1415 ret = gpio_v2_line_config_validate(&lc, lr->num_lines);
1416 if (ret)
1417 return ret;
1418
1419 guard(mutex)(&lr->config_mutex);
1420
1421 for (i = 0; i < lr->num_lines; i++) {
1422 line = &lr->lines[i];
1423 desc = lr->lines[i].desc;
1424 flags = gpio_v2_line_config_flags(&lc, i);
1425 /*
1426 * Lines not explicitly reconfigured as input or output
1427 * are left unchanged.
1428 */
1429 if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1430 continue;
1431 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1432 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1433 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1434 int val = gpio_v2_line_config_output_value(&lc, i);
1435
1436 edge_detector_stop(line);
1437 ret = gpiod_direction_output_nonotify(desc, val);
1438 if (ret)
1439 return ret;
1440 } else {
1441 ret = gpiod_direction_input_nonotify(desc);
1442 if (ret)
1443 return ret;
1444
1445 ret = edge_detector_update(line, &lc, i, edflags);
1446 if (ret)
1447 return ret;
1448 }
1449
1450 WRITE_ONCE(line->edflags, edflags);
1451
1452 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
1453 }
1454 return 0;
1455 }
1456
linereq_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1457 static long linereq_ioctl(struct file *file, unsigned int cmd,
1458 unsigned long arg)
1459 {
1460 struct linereq *lr = file->private_data;
1461 void __user *ip = (void __user *)arg;
1462
1463 guard(srcu)(&lr->gdev->srcu);
1464
1465 if (!rcu_access_pointer(lr->gdev->chip))
1466 return -ENODEV;
1467
1468 switch (cmd) {
1469 case GPIO_V2_LINE_GET_VALUES_IOCTL:
1470 return linereq_get_values(lr, ip);
1471 case GPIO_V2_LINE_SET_VALUES_IOCTL:
1472 return linereq_set_values(lr, ip);
1473 case GPIO_V2_LINE_SET_CONFIG_IOCTL:
1474 return linereq_set_config(lr, ip);
1475 default:
1476 return -EINVAL;
1477 }
1478 }
1479
1480 #ifdef CONFIG_COMPAT
linereq_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)1481 static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
1482 unsigned long arg)
1483 {
1484 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1485 }
1486 #endif
1487
linereq_poll(struct file * file,struct poll_table_struct * wait)1488 static __poll_t linereq_poll(struct file *file,
1489 struct poll_table_struct *wait)
1490 {
1491 struct linereq *lr = file->private_data;
1492 __poll_t events = 0;
1493
1494 guard(srcu)(&lr->gdev->srcu);
1495
1496 if (!rcu_access_pointer(lr->gdev->chip))
1497 return EPOLLHUP | EPOLLERR;
1498
1499 poll_wait(file, &lr->wait, wait);
1500
1501 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events,
1502 &lr->wait.lock))
1503 events = EPOLLIN | EPOLLRDNORM;
1504
1505 return events;
1506 }
1507
linereq_read(struct file * file,char __user * buf,size_t count,loff_t * f_ps)1508 static ssize_t linereq_read(struct file *file, char __user *buf,
1509 size_t count, loff_t *f_ps)
1510 {
1511 struct linereq *lr = file->private_data;
1512 struct gpio_v2_line_event le;
1513 ssize_t bytes_read = 0;
1514 int ret;
1515
1516 guard(srcu)(&lr->gdev->srcu);
1517
1518 if (!rcu_access_pointer(lr->gdev->chip))
1519 return -ENODEV;
1520
1521 if (count < sizeof(le))
1522 return -EINVAL;
1523
1524 do {
1525 scoped_guard(spinlock, &lr->wait.lock) {
1526 if (kfifo_is_empty(&lr->events)) {
1527 if (bytes_read)
1528 return bytes_read;
1529
1530 if (file->f_flags & O_NONBLOCK)
1531 return -EAGAIN;
1532
1533 ret = wait_event_interruptible_locked(lr->wait,
1534 !kfifo_is_empty(&lr->events));
1535 if (ret)
1536 return ret;
1537 }
1538
1539 if (kfifo_out(&lr->events, &le, 1) != 1) {
1540 /*
1541 * This should never happen - we hold the
1542 * lock from the moment we learned the fifo
1543 * is no longer empty until now.
1544 */
1545 WARN(1, "failed to read from non-empty kfifo");
1546 return -EIO;
1547 }
1548 }
1549
1550 if (copy_to_user(buf + bytes_read, &le, sizeof(le)))
1551 return -EFAULT;
1552 bytes_read += sizeof(le);
1553 } while (count >= bytes_read + sizeof(le));
1554
1555 return bytes_read;
1556 }
1557
linereq_free(struct linereq * lr)1558 static void linereq_free(struct linereq *lr)
1559 {
1560 unsigned int i;
1561
1562 if (lr->device_unregistered_nb.notifier_call)
1563 blocking_notifier_chain_unregister(&lr->gdev->device_notifier,
1564 &lr->device_unregistered_nb);
1565
1566 for (i = 0; i < lr->num_lines; i++) {
1567 if (lr->lines[i].desc) {
1568 edge_detector_stop(&lr->lines[i]);
1569 gpiod_free(lr->lines[i].desc);
1570 }
1571 }
1572 kfifo_free(&lr->events);
1573 kfree(lr->label);
1574 gpio_device_put(lr->gdev);
1575 kvfree(lr);
1576 }
1577
linereq_release(struct inode * inode,struct file * file)1578 static int linereq_release(struct inode *inode, struct file *file)
1579 {
1580 struct linereq *lr = file->private_data;
1581
1582 linereq_free(lr);
1583 return 0;
1584 }
1585
1586 #ifdef CONFIG_PROC_FS
linereq_show_fdinfo(struct seq_file * out,struct file * file)1587 static void linereq_show_fdinfo(struct seq_file *out, struct file *file)
1588 {
1589 struct linereq *lr = file->private_data;
1590 struct device *dev = &lr->gdev->dev;
1591 u16 i;
1592
1593 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev));
1594
1595 for (i = 0; i < lr->num_lines; i++)
1596 seq_printf(out, "gpio-line:\t%d\n",
1597 gpio_chip_hwgpio(lr->lines[i].desc));
1598 }
1599 #endif
1600
1601 static const struct file_operations line_fileops = {
1602 .release = linereq_release,
1603 .read = linereq_read,
1604 .poll = linereq_poll,
1605 .owner = THIS_MODULE,
1606 .llseek = noop_llseek,
1607 .unlocked_ioctl = linereq_ioctl,
1608 #ifdef CONFIG_COMPAT
1609 .compat_ioctl = linereq_ioctl_compat,
1610 #endif
1611 #ifdef CONFIG_PROC_FS
1612 .show_fdinfo = linereq_show_fdinfo,
1613 #endif
1614 };
1615
linereq_create(struct gpio_device * gdev,void __user * ip)1616 static int linereq_create(struct gpio_device *gdev, void __user *ip)
1617 {
1618 struct gpio_v2_line_request ulr;
1619 struct gpio_v2_line_config *lc;
1620 struct linereq *lr;
1621 struct file *file;
1622 u64 flags, edflags;
1623 unsigned int i;
1624 int fd, ret;
1625
1626 if (copy_from_user(&ulr, ip, sizeof(ulr)))
1627 return -EFAULT;
1628
1629 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX))
1630 return -EINVAL;
1631
1632 if (!mem_is_zero(ulr.padding, sizeof(ulr.padding)))
1633 return -EINVAL;
1634
1635 lc = &ulr.config;
1636 ret = gpio_v2_line_config_validate(lc, ulr.num_lines);
1637 if (ret)
1638 return ret;
1639
1640 lr = kvzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL);
1641 if (!lr)
1642 return -ENOMEM;
1643 lr->num_lines = ulr.num_lines;
1644
1645 lr->gdev = gpio_device_get(gdev);
1646
1647 for (i = 0; i < ulr.num_lines; i++) {
1648 lr->lines[i].req = lr;
1649 WRITE_ONCE(lr->lines[i].sw_debounced, 0);
1650 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func);
1651 }
1652
1653 if (ulr.consumer[0] != '\0') {
1654 /* label is only initialized if consumer is set */
1655 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1,
1656 GFP_KERNEL);
1657 if (!lr->label) {
1658 ret = -ENOMEM;
1659 goto out_free_linereq;
1660 }
1661 }
1662
1663 mutex_init(&lr->config_mutex);
1664 init_waitqueue_head(&lr->wait);
1665 INIT_KFIFO(lr->events);
1666 lr->event_buffer_size = ulr.event_buffer_size;
1667 if (lr->event_buffer_size == 0)
1668 lr->event_buffer_size = ulr.num_lines * 16;
1669 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16)
1670 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16;
1671
1672 atomic_set(&lr->seqno, 0);
1673
1674 /* Request each GPIO */
1675 for (i = 0; i < ulr.num_lines; i++) {
1676 u32 offset = ulr.offsets[i];
1677 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
1678
1679 if (IS_ERR(desc)) {
1680 ret = PTR_ERR(desc);
1681 goto out_free_linereq;
1682 }
1683
1684 ret = gpiod_request_user(desc, lr->label);
1685 if (ret)
1686 goto out_free_linereq;
1687
1688 lr->lines[i].desc = desc;
1689 flags = gpio_v2_line_config_flags(lc, i);
1690 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1691
1692 ret = gpiod_set_transitory(desc, false);
1693 if (ret < 0)
1694 goto out_free_linereq;
1695
1696 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1697 /*
1698 * Lines have to be requested explicitly for input
1699 * or output, else the line will be treated "as is".
1700 */
1701 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1702 int val = gpio_v2_line_config_output_value(lc, i);
1703
1704 ret = gpiod_direction_output_nonotify(desc, val);
1705 if (ret)
1706 goto out_free_linereq;
1707 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
1708 ret = gpiod_direction_input_nonotify(desc);
1709 if (ret)
1710 goto out_free_linereq;
1711
1712 ret = edge_detector_setup(&lr->lines[i], lc, i,
1713 edflags);
1714 if (ret)
1715 goto out_free_linereq;
1716 }
1717
1718 lr->lines[i].edflags = edflags;
1719
1720 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
1721
1722 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
1723 offset);
1724 }
1725
1726 lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify;
1727 ret = blocking_notifier_chain_register(&gdev->device_notifier,
1728 &lr->device_unregistered_nb);
1729 if (ret)
1730 goto out_free_linereq;
1731
1732 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
1733 if (fd < 0) {
1734 ret = fd;
1735 goto out_free_linereq;
1736 }
1737
1738 file = anon_inode_getfile("gpio-line", &line_fileops, lr,
1739 O_RDONLY | O_CLOEXEC);
1740 if (IS_ERR(file)) {
1741 ret = PTR_ERR(file);
1742 goto out_put_unused_fd;
1743 }
1744
1745 ulr.fd = fd;
1746 if (copy_to_user(ip, &ulr, sizeof(ulr))) {
1747 /*
1748 * fput() will trigger the release() callback, so do not go onto
1749 * the regular error cleanup path here.
1750 */
1751 fput(file);
1752 put_unused_fd(fd);
1753 return -EFAULT;
1754 }
1755
1756 fd_install(fd, file);
1757
1758 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
1759 lr->num_lines);
1760
1761 return 0;
1762
1763 out_put_unused_fd:
1764 put_unused_fd(fd);
1765 out_free_linereq:
1766 linereq_free(lr);
1767 return ret;
1768 }
1769
1770 #ifdef CONFIG_GPIO_CDEV_V1
1771
1772 /*
1773 * GPIO line event management
1774 */
1775
1776 /**
1777 * struct lineevent_state - contains the state of a userspace event
1778 * @gdev: the GPIO device the event pertains to
1779 * @label: consumer label used to tag descriptors
1780 * @desc: the GPIO descriptor held by this event
1781 * @eflags: the event flags this line was requested with
1782 * @irq: the interrupt that trigger in response to events on this GPIO
1783 * @wait: wait queue that handles blocking reads of events
1784 * @device_unregistered_nb: notifier block for receiving gdev unregister events
1785 * @events: KFIFO for the GPIO events
1786 * @timestamp: cache for the timestamp storing it between hardirq
1787 * and IRQ thread, used to bring the timestamp close to the actual
1788 * event
1789 */
1790 struct lineevent_state {
1791 struct gpio_device *gdev;
1792 const char *label;
1793 struct gpio_desc *desc;
1794 u32 eflags;
1795 int irq;
1796 wait_queue_head_t wait;
1797 struct notifier_block device_unregistered_nb;
1798 DECLARE_KFIFO(events, struct gpioevent_data, 16);
1799 u64 timestamp;
1800 };
1801
1802 #define GPIOEVENT_REQUEST_VALID_FLAGS \
1803 (GPIOEVENT_REQUEST_RISING_EDGE | \
1804 GPIOEVENT_REQUEST_FALLING_EDGE)
1805
lineevent_poll(struct file * file,struct poll_table_struct * wait)1806 static __poll_t lineevent_poll(struct file *file,
1807 struct poll_table_struct *wait)
1808 {
1809 struct lineevent_state *le = file->private_data;
1810 __poll_t events = 0;
1811
1812 guard(srcu)(&le->gdev->srcu);
1813
1814 if (!rcu_access_pointer(le->gdev->chip))
1815 return EPOLLHUP | EPOLLERR;
1816
1817 poll_wait(file, &le->wait, wait);
1818
1819 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
1820 events = EPOLLIN | EPOLLRDNORM;
1821
1822 return events;
1823 }
1824
lineevent_unregistered_notify(struct notifier_block * nb,unsigned long action,void * data)1825 static int lineevent_unregistered_notify(struct notifier_block *nb,
1826 unsigned long action, void *data)
1827 {
1828 struct lineevent_state *le = container_of(nb, struct lineevent_state,
1829 device_unregistered_nb);
1830
1831 wake_up_poll(&le->wait, EPOLLIN | EPOLLERR);
1832
1833 return NOTIFY_OK;
1834 }
1835
1836 struct compat_gpioeevent_data {
1837 compat_u64 timestamp;
1838 u32 id;
1839 };
1840
lineevent_read(struct file * file,char __user * buf,size_t count,loff_t * f_ps)1841 static ssize_t lineevent_read(struct file *file, char __user *buf,
1842 size_t count, loff_t *f_ps)
1843 {
1844 struct lineevent_state *le = file->private_data;
1845 struct gpioevent_data ge;
1846 ssize_t bytes_read = 0;
1847 ssize_t ge_size;
1848 int ret;
1849
1850 guard(srcu)(&le->gdev->srcu);
1851
1852 if (!rcu_access_pointer(le->gdev->chip))
1853 return -ENODEV;
1854
1855 /*
1856 * When compatible system call is being used the struct gpioevent_data,
1857 * in case of at least ia32, has different size due to the alignment
1858 * differences. Because we have first member 64 bits followed by one of
1859 * 32 bits there is no gap between them. The only difference is the
1860 * padding at the end of the data structure. Hence, we calculate the
1861 * actual sizeof() and pass this as an argument to copy_to_user() to
1862 * drop unneeded bytes from the output.
1863 */
1864 if (compat_need_64bit_alignment_fixup())
1865 ge_size = sizeof(struct compat_gpioeevent_data);
1866 else
1867 ge_size = sizeof(struct gpioevent_data);
1868 if (count < ge_size)
1869 return -EINVAL;
1870
1871 do {
1872 scoped_guard(spinlock, &le->wait.lock) {
1873 if (kfifo_is_empty(&le->events)) {
1874 if (bytes_read)
1875 return bytes_read;
1876
1877 if (file->f_flags & O_NONBLOCK)
1878 return -EAGAIN;
1879
1880 ret = wait_event_interruptible_locked(le->wait,
1881 !kfifo_is_empty(&le->events));
1882 if (ret)
1883 return ret;
1884 }
1885
1886 if (kfifo_out(&le->events, &ge, 1) != 1) {
1887 /*
1888 * This should never happen - we hold the
1889 * lock from the moment we learned the fifo
1890 * is no longer empty until now.
1891 */
1892 WARN(1, "failed to read from non-empty kfifo");
1893 return -EIO;
1894 }
1895 }
1896
1897 if (copy_to_user(buf + bytes_read, &ge, ge_size))
1898 return -EFAULT;
1899 bytes_read += ge_size;
1900 } while (count >= bytes_read + ge_size);
1901
1902 return bytes_read;
1903 }
1904
lineevent_free(struct lineevent_state * le)1905 static void lineevent_free(struct lineevent_state *le)
1906 {
1907 if (le->device_unregistered_nb.notifier_call)
1908 blocking_notifier_chain_unregister(&le->gdev->device_notifier,
1909 &le->device_unregistered_nb);
1910 if (le->irq)
1911 free_irq_label(free_irq(le->irq, le));
1912 if (le->desc)
1913 gpiod_free(le->desc);
1914 kfree(le->label);
1915 gpio_device_put(le->gdev);
1916 kfree(le);
1917 }
1918
lineevent_release(struct inode * inode,struct file * file)1919 static int lineevent_release(struct inode *inode, struct file *file)
1920 {
1921 lineevent_free(file->private_data);
1922 return 0;
1923 }
1924
lineevent_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1925 static long lineevent_ioctl(struct file *file, unsigned int cmd,
1926 unsigned long arg)
1927 {
1928 struct lineevent_state *le = file->private_data;
1929 void __user *ip = (void __user *)arg;
1930 struct gpiohandle_data ghd;
1931
1932 guard(srcu)(&le->gdev->srcu);
1933
1934 if (!rcu_access_pointer(le->gdev->chip))
1935 return -ENODEV;
1936
1937 /*
1938 * We can get the value for an event line but not set it,
1939 * because it is input by definition.
1940 */
1941 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
1942 int val;
1943
1944 memset(&ghd, 0, sizeof(ghd));
1945
1946 val = gpiod_get_value_cansleep(le->desc);
1947 if (val < 0)
1948 return val;
1949 ghd.values[0] = val;
1950
1951 if (copy_to_user(ip, &ghd, sizeof(ghd)))
1952 return -EFAULT;
1953
1954 return 0;
1955 }
1956 return -EINVAL;
1957 }
1958
1959 #ifdef CONFIG_COMPAT
lineevent_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)1960 static long lineevent_ioctl_compat(struct file *file, unsigned int cmd,
1961 unsigned long arg)
1962 {
1963 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1964 }
1965 #endif
1966
1967 static const struct file_operations lineevent_fileops = {
1968 .release = lineevent_release,
1969 .read = lineevent_read,
1970 .poll = lineevent_poll,
1971 .owner = THIS_MODULE,
1972 .llseek = noop_llseek,
1973 .unlocked_ioctl = lineevent_ioctl,
1974 #ifdef CONFIG_COMPAT
1975 .compat_ioctl = lineevent_ioctl_compat,
1976 #endif
1977 };
1978
lineevent_irq_thread(int irq,void * p)1979 static irqreturn_t lineevent_irq_thread(int irq, void *p)
1980 {
1981 struct lineevent_state *le = p;
1982 struct gpioevent_data ge;
1983 int ret;
1984
1985 /* Do not leak kernel stack to userspace */
1986 memset(&ge, 0, sizeof(ge));
1987
1988 /*
1989 * We may be running from a nested threaded interrupt in which case
1990 * we didn't get the timestamp from lineevent_irq_handler().
1991 */
1992 if (!le->timestamp)
1993 ge.timestamp = ktime_get_ns();
1994 else
1995 ge.timestamp = le->timestamp;
1996
1997 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
1998 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
1999 int level = gpiod_get_value_cansleep(le->desc);
2000
2001 if (level)
2002 /* Emit low-to-high event */
2003 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2004 else
2005 /* Emit high-to-low event */
2006 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2007 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
2008 /* Emit low-to-high event */
2009 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2010 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
2011 /* Emit high-to-low event */
2012 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2013 } else {
2014 return IRQ_NONE;
2015 }
2016
2017 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge,
2018 1, &le->wait.lock);
2019 if (ret)
2020 wake_up_poll(&le->wait, EPOLLIN);
2021 else
2022 pr_debug_ratelimited("event FIFO is full - event dropped\n");
2023
2024 return IRQ_HANDLED;
2025 }
2026
lineevent_irq_handler(int irq,void * p)2027 static irqreturn_t lineevent_irq_handler(int irq, void *p)
2028 {
2029 struct lineevent_state *le = p;
2030
2031 /*
2032 * Just store the timestamp in hardirq context so we get it as
2033 * close in time as possible to the actual event.
2034 */
2035 le->timestamp = ktime_get_ns();
2036
2037 return IRQ_WAKE_THREAD;
2038 }
2039
lineevent_create(struct gpio_device * gdev,void __user * ip)2040 static int lineevent_create(struct gpio_device *gdev, void __user *ip)
2041 {
2042 struct gpioevent_request eventreq;
2043 struct lineevent_state *le;
2044 struct gpio_desc *desc;
2045 struct file *file;
2046 u32 offset;
2047 u32 lflags;
2048 u32 eflags;
2049 int fd;
2050 int ret;
2051 int irq, irqflags = 0;
2052 char *label;
2053
2054 if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
2055 return -EFAULT;
2056
2057 offset = eventreq.lineoffset;
2058 lflags = eventreq.handleflags;
2059 eflags = eventreq.eventflags;
2060
2061 desc = gpio_device_get_desc(gdev, offset);
2062 if (IS_ERR(desc))
2063 return PTR_ERR(desc);
2064
2065 /* Return an error if a unknown flag is set */
2066 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
2067 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS))
2068 return -EINVAL;
2069
2070 /* This is just wrong: we don't look for events on output lines */
2071 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
2072 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
2073 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
2074 return -EINVAL;
2075
2076 /* Only one bias flag can be set. */
2077 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
2078 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
2079 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
2080 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
2081 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
2082 return -EINVAL;
2083
2084 le = kzalloc(sizeof(*le), GFP_KERNEL);
2085 if (!le)
2086 return -ENOMEM;
2087 le->gdev = gpio_device_get(gdev);
2088
2089 if (eventreq.consumer_label[0] != '\0') {
2090 /* label is only initialized if consumer_label is set */
2091 le->label = kstrndup(eventreq.consumer_label,
2092 sizeof(eventreq.consumer_label) - 1,
2093 GFP_KERNEL);
2094 if (!le->label) {
2095 ret = -ENOMEM;
2096 goto out_free_le;
2097 }
2098 }
2099
2100 ret = gpiod_request_user(desc, le->label);
2101 if (ret)
2102 goto out_free_le;
2103 le->desc = desc;
2104 le->eflags = eflags;
2105
2106 linehandle_flags_to_desc_flags(lflags, &desc->flags);
2107
2108 ret = gpiod_direction_input(desc);
2109 if (ret)
2110 goto out_free_le;
2111
2112 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
2113
2114 irq = gpiod_to_irq(desc);
2115 if (irq <= 0) {
2116 ret = -ENODEV;
2117 goto out_free_le;
2118 }
2119
2120 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
2121 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2122 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
2123 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
2124 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2125 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
2126 irqflags |= IRQF_ONESHOT;
2127
2128 INIT_KFIFO(le->events);
2129 init_waitqueue_head(&le->wait);
2130
2131 le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify;
2132 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2133 &le->device_unregistered_nb);
2134 if (ret)
2135 goto out_free_le;
2136
2137 label = make_irq_label(le->label);
2138 if (IS_ERR(label)) {
2139 ret = PTR_ERR(label);
2140 goto out_free_le;
2141 }
2142
2143 /* Request a thread to read the events */
2144 ret = request_threaded_irq(irq,
2145 lineevent_irq_handler,
2146 lineevent_irq_thread,
2147 irqflags,
2148 label,
2149 le);
2150 if (ret) {
2151 free_irq_label(label);
2152 goto out_free_le;
2153 }
2154
2155 le->irq = irq;
2156
2157 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
2158 if (fd < 0) {
2159 ret = fd;
2160 goto out_free_le;
2161 }
2162
2163 file = anon_inode_getfile("gpio-event",
2164 &lineevent_fileops,
2165 le,
2166 O_RDONLY | O_CLOEXEC);
2167 if (IS_ERR(file)) {
2168 ret = PTR_ERR(file);
2169 goto out_put_unused_fd;
2170 }
2171
2172 eventreq.fd = fd;
2173 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
2174 /*
2175 * fput() will trigger the release() callback, so do not go onto
2176 * the regular error cleanup path here.
2177 */
2178 fput(file);
2179 put_unused_fd(fd);
2180 return -EFAULT;
2181 }
2182
2183 fd_install(fd, file);
2184
2185 return 0;
2186
2187 out_put_unused_fd:
2188 put_unused_fd(fd);
2189 out_free_le:
2190 lineevent_free(le);
2191 return ret;
2192 }
2193
gpio_v2_line_info_to_v1(struct gpio_v2_line_info * info_v2,struct gpioline_info * info_v1)2194 static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2,
2195 struct gpioline_info *info_v1)
2196 {
2197 u64 flagsv2 = info_v2->flags;
2198
2199 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name));
2200 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer));
2201 info_v1->line_offset = info_v2->offset;
2202 info_v1->flags = 0;
2203
2204 if (flagsv2 & GPIO_V2_LINE_FLAG_USED)
2205 info_v1->flags |= GPIOLINE_FLAG_KERNEL;
2206
2207 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT)
2208 info_v1->flags |= GPIOLINE_FLAG_IS_OUT;
2209
2210 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
2211 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
2212
2213 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN)
2214 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN;
2215 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE)
2216 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE;
2217
2218 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)
2219 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
2220 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN)
2221 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
2222 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED)
2223 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
2224 }
2225
gpio_v2_line_info_changed_to_v1(struct gpio_v2_line_info_changed * lic_v2,struct gpioline_info_changed * lic_v1)2226 static void gpio_v2_line_info_changed_to_v1(
2227 struct gpio_v2_line_info_changed *lic_v2,
2228 struct gpioline_info_changed *lic_v1)
2229 {
2230 memset(lic_v1, 0, sizeof(*lic_v1));
2231 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
2232 lic_v1->timestamp = lic_v2->timestamp_ns;
2233 lic_v1->event_type = lic_v2->event_type;
2234 }
2235
2236 #endif /* CONFIG_GPIO_CDEV_V1 */
2237
gpio_desc_to_lineinfo(struct gpio_desc * desc,struct gpio_v2_line_info * info,bool atomic)2238 static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
2239 struct gpio_v2_line_info *info, bool atomic)
2240 {
2241 u32 debounce_period_us;
2242 unsigned long dflags;
2243 const char *label;
2244
2245 CLASS(gpio_chip_guard, guard)(desc);
2246 if (!guard.gc)
2247 return;
2248
2249 memset(info, 0, sizeof(*info));
2250 info->offset = gpio_chip_hwgpio(desc);
2251
2252 if (desc->name)
2253 strscpy(info->name, desc->name, sizeof(info->name));
2254
2255 dflags = READ_ONCE(desc->flags);
2256
2257 scoped_guard(srcu, &desc->gdev->desc_srcu) {
2258 label = gpiod_get_label(desc);
2259 if (label && test_bit(FLAG_REQUESTED, &dflags))
2260 strscpy(info->consumer, label,
2261 sizeof(info->consumer));
2262 }
2263
2264 /*
2265 * Userspace only need know that the kernel is using this GPIO so it
2266 * can't use it.
2267 * The calculation of the used flag is slightly racy, as it may read
2268 * desc, gc and pinctrl state without a lock covering all three at
2269 * once. Worst case if the line is in transition and the calculation
2270 * is inconsistent then it looks to the user like they performed the
2271 * read on the other side of the transition - but that can always
2272 * happen.
2273 * The definitive test that a line is available to userspace is to
2274 * request it.
2275 */
2276 if (test_bit(FLAG_REQUESTED, &dflags) ||
2277 test_bit(FLAG_IS_HOGGED, &dflags) ||
2278 test_bit(FLAG_EXPORT, &dflags) ||
2279 test_bit(FLAG_SYSFS, &dflags) ||
2280 !gpiochip_line_is_valid(guard.gc, info->offset)) {
2281 info->flags |= GPIO_V2_LINE_FLAG_USED;
2282 } else if (!atomic) {
2283 if (!pinctrl_gpio_can_use_line(guard.gc, info->offset))
2284 info->flags |= GPIO_V2_LINE_FLAG_USED;
2285 }
2286
2287 if (test_bit(FLAG_IS_OUT, &dflags))
2288 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
2289 else
2290 info->flags |= GPIO_V2_LINE_FLAG_INPUT;
2291
2292 if (test_bit(FLAG_ACTIVE_LOW, &dflags))
2293 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
2294
2295 if (test_bit(FLAG_OPEN_DRAIN, &dflags))
2296 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
2297 if (test_bit(FLAG_OPEN_SOURCE, &dflags))
2298 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
2299
2300 if (test_bit(FLAG_BIAS_DISABLE, &dflags))
2301 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
2302 if (test_bit(FLAG_PULL_DOWN, &dflags))
2303 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
2304 if (test_bit(FLAG_PULL_UP, &dflags))
2305 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
2306
2307 if (test_bit(FLAG_EDGE_RISING, &dflags))
2308 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
2309 if (test_bit(FLAG_EDGE_FALLING, &dflags))
2310 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
2311
2312 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &dflags))
2313 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
2314 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags))
2315 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
2316
2317 debounce_period_us = READ_ONCE(desc->debounce_period_us);
2318 if (debounce_period_us) {
2319 info->attrs[info->num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
2320 info->attrs[info->num_attrs].debounce_period_us =
2321 debounce_period_us;
2322 info->num_attrs++;
2323 }
2324 }
2325
2326 struct gpio_chardev_data {
2327 struct gpio_device *gdev;
2328 wait_queue_head_t wait;
2329 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32);
2330 struct notifier_block lineinfo_changed_nb;
2331 struct notifier_block device_unregistered_nb;
2332 unsigned long *watched_lines;
2333 #ifdef CONFIG_GPIO_CDEV_V1
2334 atomic_t watch_abi_version;
2335 #endif
2336 struct file *fp;
2337 };
2338
chipinfo_get(struct gpio_chardev_data * cdev,void __user * ip)2339 static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
2340 {
2341 struct gpio_device *gdev = cdev->gdev;
2342 struct gpiochip_info chipinfo;
2343
2344 memset(&chipinfo, 0, sizeof(chipinfo));
2345
2346 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
2347 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
2348 chipinfo.lines = gdev->ngpio;
2349 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
2350 return -EFAULT;
2351 return 0;
2352 }
2353
2354 #ifdef CONFIG_GPIO_CDEV_V1
2355 /*
2356 * returns 0 if the versions match, else the previously selected ABI version
2357 */
lineinfo_ensure_abi_version(struct gpio_chardev_data * cdata,unsigned int version)2358 static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
2359 unsigned int version)
2360 {
2361 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version);
2362
2363 if (abiv == version)
2364 return 0;
2365
2366 return abiv;
2367 }
2368
lineinfo_get_v1(struct gpio_chardev_data * cdev,void __user * ip,bool watch)2369 static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
2370 bool watch)
2371 {
2372 struct gpio_desc *desc;
2373 struct gpioline_info lineinfo;
2374 struct gpio_v2_line_info lineinfo_v2;
2375
2376 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2377 return -EFAULT;
2378
2379 /* this doubles as a range check on line_offset */
2380 desc = gpio_device_get_desc(cdev->gdev, lineinfo.line_offset);
2381 if (IS_ERR(desc))
2382 return PTR_ERR(desc);
2383
2384 if (watch) {
2385 if (lineinfo_ensure_abi_version(cdev, 1))
2386 return -EPERM;
2387
2388 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
2389 return -EBUSY;
2390 }
2391
2392 gpio_desc_to_lineinfo(desc, &lineinfo_v2, false);
2393 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
2394
2395 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2396 if (watch)
2397 clear_bit(lineinfo.line_offset, cdev->watched_lines);
2398 return -EFAULT;
2399 }
2400
2401 return 0;
2402 }
2403 #endif
2404
lineinfo_get(struct gpio_chardev_data * cdev,void __user * ip,bool watch)2405 static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
2406 bool watch)
2407 {
2408 struct gpio_desc *desc;
2409 struct gpio_v2_line_info lineinfo;
2410
2411 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2412 return -EFAULT;
2413
2414 if (!mem_is_zero(lineinfo.padding, sizeof(lineinfo.padding)))
2415 return -EINVAL;
2416
2417 desc = gpio_device_get_desc(cdev->gdev, lineinfo.offset);
2418 if (IS_ERR(desc))
2419 return PTR_ERR(desc);
2420
2421 if (watch) {
2422 #ifdef CONFIG_GPIO_CDEV_V1
2423 if (lineinfo_ensure_abi_version(cdev, 2))
2424 return -EPERM;
2425 #endif
2426 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines))
2427 return -EBUSY;
2428 }
2429 gpio_desc_to_lineinfo(desc, &lineinfo, false);
2430
2431 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2432 if (watch)
2433 clear_bit(lineinfo.offset, cdev->watched_lines);
2434 return -EFAULT;
2435 }
2436
2437 return 0;
2438 }
2439
lineinfo_unwatch(struct gpio_chardev_data * cdev,void __user * ip)2440 static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
2441 {
2442 __u32 offset;
2443
2444 if (copy_from_user(&offset, ip, sizeof(offset)))
2445 return -EFAULT;
2446
2447 if (offset >= cdev->gdev->ngpio)
2448 return -EINVAL;
2449
2450 if (!test_and_clear_bit(offset, cdev->watched_lines))
2451 return -EBUSY;
2452
2453 return 0;
2454 }
2455
2456 /*
2457 * gpio_ioctl() - ioctl handler for the GPIO chardev
2458 */
gpio_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2459 static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2460 {
2461 struct gpio_chardev_data *cdev = file->private_data;
2462 struct gpio_device *gdev = cdev->gdev;
2463 void __user *ip = (void __user *)arg;
2464
2465 guard(srcu)(&gdev->srcu);
2466
2467 /* We fail any subsequent ioctl():s when the chip is gone */
2468 if (!rcu_access_pointer(gdev->chip))
2469 return -ENODEV;
2470
2471 /* Fill in the struct and pass to userspace */
2472 switch (cmd) {
2473 case GPIO_GET_CHIPINFO_IOCTL:
2474 return chipinfo_get(cdev, ip);
2475 #ifdef CONFIG_GPIO_CDEV_V1
2476 case GPIO_GET_LINEHANDLE_IOCTL:
2477 return linehandle_create(gdev, ip);
2478 case GPIO_GET_LINEEVENT_IOCTL:
2479 return lineevent_create(gdev, ip);
2480 case GPIO_GET_LINEINFO_IOCTL:
2481 return lineinfo_get_v1(cdev, ip, false);
2482 case GPIO_GET_LINEINFO_WATCH_IOCTL:
2483 return lineinfo_get_v1(cdev, ip, true);
2484 #endif /* CONFIG_GPIO_CDEV_V1 */
2485 case GPIO_V2_GET_LINEINFO_IOCTL:
2486 return lineinfo_get(cdev, ip, false);
2487 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL:
2488 return lineinfo_get(cdev, ip, true);
2489 case GPIO_V2_GET_LINE_IOCTL:
2490 return linereq_create(gdev, ip);
2491 case GPIO_GET_LINEINFO_UNWATCH_IOCTL:
2492 return lineinfo_unwatch(cdev, ip);
2493 default:
2494 return -EINVAL;
2495 }
2496 }
2497
2498 #ifdef CONFIG_COMPAT
gpio_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)2499 static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
2500 unsigned long arg)
2501 {
2502 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2503 }
2504 #endif
2505
2506 struct lineinfo_changed_ctx {
2507 struct work_struct work;
2508 struct gpio_v2_line_info_changed chg;
2509 struct gpio_device *gdev;
2510 struct gpio_chardev_data *cdev;
2511 };
2512
lineinfo_changed_func(struct work_struct * work)2513 static void lineinfo_changed_func(struct work_struct *work)
2514 {
2515 struct lineinfo_changed_ctx *ctx =
2516 container_of(work, struct lineinfo_changed_ctx, work);
2517 struct gpio_chip *gc;
2518 int ret;
2519
2520 if (!(ctx->chg.info.flags & GPIO_V2_LINE_FLAG_USED)) {
2521 /*
2522 * If nobody set the USED flag earlier, let's see with pinctrl
2523 * now. We're doing this late because it's a sleeping function.
2524 * Pin functions are in general much more static and while it's
2525 * not 100% bullet-proof, it's good enough for most cases.
2526 */
2527 scoped_guard(srcu, &ctx->gdev->srcu) {
2528 gc = srcu_dereference(ctx->gdev->chip, &ctx->gdev->srcu);
2529 if (gc &&
2530 !pinctrl_gpio_can_use_line(gc, ctx->chg.info.offset))
2531 ctx->chg.info.flags |= GPIO_V2_LINE_FLAG_USED;
2532 }
2533 }
2534
2535 ret = kfifo_in_spinlocked(&ctx->cdev->events, &ctx->chg, 1,
2536 &ctx->cdev->wait.lock);
2537 if (ret)
2538 wake_up_poll(&ctx->cdev->wait, EPOLLIN);
2539 else
2540 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
2541
2542 gpio_device_put(ctx->gdev);
2543 fput(ctx->cdev->fp);
2544 kfree(ctx);
2545 }
2546
lineinfo_changed_notify(struct notifier_block * nb,unsigned long action,void * data)2547 static int lineinfo_changed_notify(struct notifier_block *nb,
2548 unsigned long action, void *data)
2549 {
2550 struct gpio_chardev_data *cdev =
2551 container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
2552 struct lineinfo_changed_ctx *ctx;
2553 struct gpio_desc *desc = data;
2554
2555 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
2556 return NOTIFY_DONE;
2557
2558 /*
2559 * If this is called from atomic context (for instance: with a spinlock
2560 * taken by the atomic notifier chain), any sleeping calls must be done
2561 * outside of this function in process context of the dedicated
2562 * workqueue.
2563 *
2564 * Let's gather as much info as possible from the descriptor and
2565 * postpone just the call to pinctrl_gpio_can_use_line() until the work
2566 * is executed.
2567 */
2568
2569 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
2570 if (!ctx) {
2571 pr_err("Failed to allocate memory for line info notification\n");
2572 return NOTIFY_DONE;
2573 }
2574
2575 ctx->chg.event_type = action;
2576 ctx->chg.timestamp_ns = ktime_get_ns();
2577 gpio_desc_to_lineinfo(desc, &ctx->chg.info, true);
2578 /* Keep the GPIO device alive until we emit the event. */
2579 ctx->gdev = gpio_device_get(desc->gdev);
2580 ctx->cdev = cdev;
2581 /* Keep the file descriptor alive too. */
2582 get_file(ctx->cdev->fp);
2583
2584 INIT_WORK(&ctx->work, lineinfo_changed_func);
2585 queue_work(ctx->gdev->line_state_wq, &ctx->work);
2586
2587 return NOTIFY_OK;
2588 }
2589
gpio_device_unregistered_notify(struct notifier_block * nb,unsigned long action,void * data)2590 static int gpio_device_unregistered_notify(struct notifier_block *nb,
2591 unsigned long action, void *data)
2592 {
2593 struct gpio_chardev_data *cdev = container_of(nb,
2594 struct gpio_chardev_data,
2595 device_unregistered_nb);
2596
2597 wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR);
2598
2599 return NOTIFY_OK;
2600 }
2601
lineinfo_watch_poll(struct file * file,struct poll_table_struct * pollt)2602 static __poll_t lineinfo_watch_poll(struct file *file,
2603 struct poll_table_struct *pollt)
2604 {
2605 struct gpio_chardev_data *cdev = file->private_data;
2606 __poll_t events = 0;
2607
2608 guard(srcu)(&cdev->gdev->srcu);
2609
2610 if (!rcu_access_pointer(cdev->gdev->chip))
2611 return EPOLLHUP | EPOLLERR;
2612
2613 poll_wait(file, &cdev->wait, pollt);
2614
2615 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events,
2616 &cdev->wait.lock))
2617 events = EPOLLIN | EPOLLRDNORM;
2618
2619 return events;
2620 }
2621
lineinfo_watch_read(struct file * file,char __user * buf,size_t count,loff_t * off)2622 static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
2623 size_t count, loff_t *off)
2624 {
2625 struct gpio_chardev_data *cdev = file->private_data;
2626 struct gpio_v2_line_info_changed event;
2627 ssize_t bytes_read = 0;
2628 int ret;
2629 size_t event_size;
2630
2631 guard(srcu)(&cdev->gdev->srcu);
2632
2633 if (!rcu_access_pointer(cdev->gdev->chip))
2634 return -ENODEV;
2635
2636 #ifndef CONFIG_GPIO_CDEV_V1
2637 event_size = sizeof(struct gpio_v2_line_info_changed);
2638 if (count < event_size)
2639 return -EINVAL;
2640 #endif
2641
2642 do {
2643 scoped_guard(spinlock, &cdev->wait.lock) {
2644 if (kfifo_is_empty(&cdev->events)) {
2645 if (bytes_read)
2646 return bytes_read;
2647
2648 if (file->f_flags & O_NONBLOCK)
2649 return -EAGAIN;
2650
2651 ret = wait_event_interruptible_locked(cdev->wait,
2652 !kfifo_is_empty(&cdev->events));
2653 if (ret)
2654 return ret;
2655 }
2656 #ifdef CONFIG_GPIO_CDEV_V1
2657 /* must be after kfifo check so watch_abi_version is set */
2658 if (atomic_read(&cdev->watch_abi_version) == 2)
2659 event_size = sizeof(struct gpio_v2_line_info_changed);
2660 else
2661 event_size = sizeof(struct gpioline_info_changed);
2662 if (count < event_size)
2663 return -EINVAL;
2664 #endif
2665 if (kfifo_out(&cdev->events, &event, 1) != 1) {
2666 /*
2667 * This should never happen - we hold the
2668 * lock from the moment we learned the fifo
2669 * is no longer empty until now.
2670 */
2671 WARN(1, "failed to read from non-empty kfifo");
2672 return -EIO;
2673 }
2674 }
2675
2676 #ifdef CONFIG_GPIO_CDEV_V1
2677 if (event_size == sizeof(struct gpio_v2_line_info_changed)) {
2678 if (copy_to_user(buf + bytes_read, &event, event_size))
2679 return -EFAULT;
2680 } else {
2681 struct gpioline_info_changed event_v1;
2682
2683 gpio_v2_line_info_changed_to_v1(&event, &event_v1);
2684 if (copy_to_user(buf + bytes_read, &event_v1,
2685 event_size))
2686 return -EFAULT;
2687 }
2688 #else
2689 if (copy_to_user(buf + bytes_read, &event, event_size))
2690 return -EFAULT;
2691 #endif
2692 bytes_read += event_size;
2693 } while (count >= bytes_read + sizeof(event));
2694
2695 return bytes_read;
2696 }
2697
2698 /**
2699 * gpio_chrdev_open() - open the chardev for ioctl operations
2700 * @inode: inode for this chardev
2701 * @file: file struct for storing private data
2702 *
2703 * Returns:
2704 * 0 on success, or negative errno on failure.
2705 */
gpio_chrdev_open(struct inode * inode,struct file * file)2706 static int gpio_chrdev_open(struct inode *inode, struct file *file)
2707 {
2708 struct gpio_device *gdev = container_of(inode->i_cdev,
2709 struct gpio_device, chrdev);
2710 struct gpio_chardev_data *cdev;
2711 int ret = -ENOMEM;
2712
2713 guard(srcu)(&gdev->srcu);
2714
2715 /* Fail on open if the backing gpiochip is gone */
2716 if (!rcu_access_pointer(gdev->chip))
2717 return -ENODEV;
2718
2719 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
2720 if (!cdev)
2721 return -ENODEV;
2722
2723 cdev->watched_lines = bitmap_zalloc(gdev->ngpio, GFP_KERNEL);
2724 if (!cdev->watched_lines)
2725 goto out_free_cdev;
2726
2727 init_waitqueue_head(&cdev->wait);
2728 INIT_KFIFO(cdev->events);
2729 cdev->gdev = gpio_device_get(gdev);
2730
2731 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
2732 ret = atomic_notifier_chain_register(&gdev->line_state_notifier,
2733 &cdev->lineinfo_changed_nb);
2734 if (ret)
2735 goto out_free_bitmap;
2736
2737 cdev->device_unregistered_nb.notifier_call =
2738 gpio_device_unregistered_notify;
2739 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2740 &cdev->device_unregistered_nb);
2741 if (ret)
2742 goto out_unregister_line_notifier;
2743
2744 file->private_data = cdev;
2745 cdev->fp = file;
2746
2747 ret = nonseekable_open(inode, file);
2748 if (ret)
2749 goto out_unregister_device_notifier;
2750
2751 return ret;
2752
2753 out_unregister_device_notifier:
2754 blocking_notifier_chain_unregister(&gdev->device_notifier,
2755 &cdev->device_unregistered_nb);
2756 out_unregister_line_notifier:
2757 atomic_notifier_chain_unregister(&gdev->line_state_notifier,
2758 &cdev->lineinfo_changed_nb);
2759 out_free_bitmap:
2760 gpio_device_put(gdev);
2761 bitmap_free(cdev->watched_lines);
2762 out_free_cdev:
2763 kfree(cdev);
2764 return ret;
2765 }
2766
2767 /**
2768 * gpio_chrdev_release() - close chardev after ioctl operations
2769 * @inode: inode for this chardev
2770 * @file: file struct for storing private data
2771 *
2772 * Returns:
2773 * 0 on success, or negative errno on failure.
2774 */
gpio_chrdev_release(struct inode * inode,struct file * file)2775 static int gpio_chrdev_release(struct inode *inode, struct file *file)
2776 {
2777 struct gpio_chardev_data *cdev = file->private_data;
2778 struct gpio_device *gdev = cdev->gdev;
2779
2780 blocking_notifier_chain_unregister(&gdev->device_notifier,
2781 &cdev->device_unregistered_nb);
2782 atomic_notifier_chain_unregister(&gdev->line_state_notifier,
2783 &cdev->lineinfo_changed_nb);
2784 bitmap_free(cdev->watched_lines);
2785 gpio_device_put(gdev);
2786 kfree(cdev);
2787
2788 return 0;
2789 }
2790
2791 static const struct file_operations gpio_fileops = {
2792 .release = gpio_chrdev_release,
2793 .open = gpio_chrdev_open,
2794 .poll = lineinfo_watch_poll,
2795 .read = lineinfo_watch_read,
2796 .owner = THIS_MODULE,
2797 .unlocked_ioctl = gpio_ioctl,
2798 #ifdef CONFIG_COMPAT
2799 .compat_ioctl = gpio_ioctl_compat,
2800 #endif
2801 };
2802
gpiolib_cdev_register(struct gpio_device * gdev,dev_t devt)2803 int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
2804 {
2805 struct gpio_chip *gc;
2806 int ret;
2807
2808 cdev_init(&gdev->chrdev, &gpio_fileops);
2809 gdev->chrdev.owner = THIS_MODULE;
2810 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id);
2811
2812 gdev->line_state_wq = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2813 dev_name(&gdev->dev));
2814 if (!gdev->line_state_wq)
2815 return -ENOMEM;
2816
2817 ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
2818 if (ret)
2819 return ret;
2820
2821 guard(srcu)(&gdev->srcu);
2822 gc = srcu_dereference(gdev->chip, &gdev->srcu);
2823 if (!gc)
2824 return -ENODEV;
2825
2826 chip_dbg(gc, "added GPIO chardev (%d:%d)\n", MAJOR(devt), gdev->id);
2827
2828 return 0;
2829 }
2830
gpiolib_cdev_unregister(struct gpio_device * gdev)2831 void gpiolib_cdev_unregister(struct gpio_device *gdev)
2832 {
2833 destroy_workqueue(gdev->line_state_wq);
2834 cdev_device_del(&gdev->chrdev, &gdev->dev);
2835 blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL);
2836 }
2837