1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/anon_inodes.h>
4 #include <linux/atomic.h>
5 #include <linux/bitmap.h>
6 #include <linux/build_bug.h>
7 #include <linux/cdev.h>
8 #include <linux/cleanup.h>
9 #include <linux/compat.h>
10 #include <linux/compiler.h>
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/file.h>
14 #include <linux/gpio.h>
15 #include <linux/gpio/driver.h>
16 #include <linux/hte.h>
17 #include <linux/interrupt.h>
18 #include <linux/irqreturn.h>
19 #include <linux/kfifo.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/overflow.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/spinlock.h>
27 #include <linux/string.h>
28 #include <linux/timekeeping.h>
29 #include <linux/uaccess.h>
30 #include <linux/workqueue.h>
31
32 #include <uapi/linux/gpio.h>
33
34 #include "gpiolib.h"
35 #include "gpiolib-cdev.h"
36
37 /*
38 * Array sizes must ensure 64-bit alignment and not create holes in the
39 * struct packing.
40 */
41 static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2));
42 static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8));
43
44 /*
45 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility
46 */
47 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8));
48 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8));
49 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8));
50 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8));
51 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8));
52 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8));
53 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8));
54 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8));
55
56 /* Character device interface to GPIO.
57 *
58 * The GPIO character device, /dev/gpiochipN, provides userspace an
59 * interface to gpiolib GPIOs via ioctl()s.
60 */
61
62 /*
63 * GPIO line handle management
64 */
65
66 #ifdef CONFIG_GPIO_CDEV_V1
67 /**
68 * struct linehandle_state - contains the state of a userspace handle
69 * @gdev: the GPIO device the handle pertains to
70 * @label: consumer label used to tag descriptors
71 * @descs: the GPIO descriptors held by this handle
72 * @num_descs: the number of descriptors held in the descs array
73 */
74 struct linehandle_state {
75 struct gpio_device *gdev;
76 const char *label;
77 struct gpio_desc *descs[GPIOHANDLES_MAX];
78 u32 num_descs;
79 };
80
81 #define GPIOHANDLE_REQUEST_VALID_FLAGS \
82 (GPIOHANDLE_REQUEST_INPUT | \
83 GPIOHANDLE_REQUEST_OUTPUT | \
84 GPIOHANDLE_REQUEST_ACTIVE_LOW | \
85 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \
86 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \
87 GPIOHANDLE_REQUEST_BIAS_DISABLE | \
88 GPIOHANDLE_REQUEST_OPEN_DRAIN | \
89 GPIOHANDLE_REQUEST_OPEN_SOURCE)
90
91 #define GPIOHANDLE_REQUEST_DIRECTION_FLAGS \
92 (GPIOHANDLE_REQUEST_INPUT | \
93 GPIOHANDLE_REQUEST_OUTPUT)
94
linehandle_validate_flags(u32 flags)95 static int linehandle_validate_flags(u32 flags)
96 {
97 /* Return an error if an unknown flag is set */
98 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
99 return -EINVAL;
100
101 /*
102 * Do not allow both INPUT & OUTPUT flags to be set as they are
103 * contradictory.
104 */
105 if ((flags & GPIOHANDLE_REQUEST_INPUT) &&
106 (flags & GPIOHANDLE_REQUEST_OUTPUT))
107 return -EINVAL;
108
109 /*
110 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
111 * the hardware actually supports enabling both at the same time the
112 * electrical result would be disastrous.
113 */
114 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
115 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
116 return -EINVAL;
117
118 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
119 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) &&
120 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
121 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
122 return -EINVAL;
123
124 /* Bias flags only allowed for input or output mode. */
125 if (!((flags & GPIOHANDLE_REQUEST_INPUT) ||
126 (flags & GPIOHANDLE_REQUEST_OUTPUT)) &&
127 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) ||
128 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) ||
129 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)))
130 return -EINVAL;
131
132 /* Only one bias flag can be set. */
133 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
134 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
135 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
136 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
137 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
138 return -EINVAL;
139
140 return 0;
141 }
142
linehandle_flags_to_desc_flags(u32 lflags,unsigned long * flagsp)143 static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
144 {
145 unsigned long flags = READ_ONCE(*flagsp);
146
147 assign_bit(GPIOD_FLAG_ACTIVE_LOW, &flags,
148 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
149 assign_bit(GPIOD_FLAG_OPEN_DRAIN, &flags,
150 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
151 assign_bit(GPIOD_FLAG_OPEN_SOURCE, &flags,
152 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
153 assign_bit(GPIOD_FLAG_PULL_UP, &flags,
154 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
155 assign_bit(GPIOD_FLAG_PULL_DOWN, &flags,
156 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
157 assign_bit(GPIOD_FLAG_BIAS_DISABLE, &flags,
158 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
159
160 WRITE_ONCE(*flagsp, flags);
161 }
162
linehandle_set_config(struct linehandle_state * lh,void __user * ip)163 static long linehandle_set_config(struct linehandle_state *lh,
164 void __user *ip)
165 {
166 struct gpiohandle_config gcnf;
167 struct gpio_desc *desc;
168 int i, ret;
169 u32 lflags;
170
171 if (copy_from_user(&gcnf, ip, sizeof(gcnf)))
172 return -EFAULT;
173
174 lflags = gcnf.flags;
175 ret = linehandle_validate_flags(lflags);
176 if (ret)
177 return ret;
178
179 /* Lines must be reconfigured explicitly as input or output. */
180 if (!(lflags & GPIOHANDLE_REQUEST_DIRECTION_FLAGS))
181 return -EINVAL;
182
183 for (i = 0; i < lh->num_descs; i++) {
184 desc = lh->descs[i];
185 linehandle_flags_to_desc_flags(lflags, &desc->flags);
186
187 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
188 int val = !!gcnf.default_values[i];
189
190 ret = gpiod_direction_output_nonotify(desc, val);
191 if (ret)
192 return ret;
193 } else {
194 ret = gpiod_direction_input_nonotify(desc);
195 if (ret)
196 return ret;
197 }
198
199 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
200 }
201 return 0;
202 }
203
linehandle_ioctl(struct file * file,unsigned int cmd,unsigned long arg)204 static long linehandle_ioctl(struct file *file, unsigned int cmd,
205 unsigned long arg)
206 {
207 struct linehandle_state *lh = file->private_data;
208 void __user *ip = (void __user *)arg;
209 struct gpiohandle_data ghd;
210 DECLARE_BITMAP(vals, GPIOHANDLES_MAX);
211 unsigned int i;
212 int ret;
213
214 guard(srcu)(&lh->gdev->srcu);
215
216 if (!rcu_access_pointer(lh->gdev->chip))
217 return -ENODEV;
218
219 switch (cmd) {
220 case GPIOHANDLE_GET_LINE_VALUES_IOCTL:
221 /* NOTE: It's okay to read values of output lines */
222 ret = gpiod_get_array_value_complex(false, true,
223 lh->num_descs, lh->descs,
224 NULL, vals);
225 if (ret)
226 return ret;
227
228 memset(&ghd, 0, sizeof(ghd));
229 for (i = 0; i < lh->num_descs; i++)
230 ghd.values[i] = test_bit(i, vals);
231
232 if (copy_to_user(ip, &ghd, sizeof(ghd)))
233 return -EFAULT;
234
235 return 0;
236 case GPIOHANDLE_SET_LINE_VALUES_IOCTL:
237 /*
238 * All line descriptors were created at once with the same
239 * flags so just check if the first one is really output.
240 */
241 if (!test_bit(GPIOD_FLAG_IS_OUT, &lh->descs[0]->flags))
242 return -EPERM;
243
244 if (copy_from_user(&ghd, ip, sizeof(ghd)))
245 return -EFAULT;
246
247 /* Clamp all values to [0,1] */
248 for (i = 0; i < lh->num_descs; i++)
249 __assign_bit(i, vals, ghd.values[i]);
250
251 /* Reuse the array setting function */
252 return gpiod_set_array_value_complex(false,
253 true,
254 lh->num_descs,
255 lh->descs,
256 NULL,
257 vals);
258 case GPIOHANDLE_SET_CONFIG_IOCTL:
259 return linehandle_set_config(lh, ip);
260 default:
261 return -EINVAL;
262 }
263 }
264
265 #ifdef CONFIG_COMPAT
linehandle_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)266 static long linehandle_ioctl_compat(struct file *file, unsigned int cmd,
267 unsigned long arg)
268 {
269 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
270 }
271 #endif
272
linehandle_free(struct linehandle_state * lh)273 static void linehandle_free(struct linehandle_state *lh)
274 {
275 int i;
276
277 for (i = 0; i < lh->num_descs; i++)
278 if (lh->descs[i])
279 gpiod_free(lh->descs[i]);
280 kfree(lh->label);
281 gpio_device_put(lh->gdev);
282 kfree(lh);
283 }
284
linehandle_release(struct inode * inode,struct file * file)285 static int linehandle_release(struct inode *inode, struct file *file)
286 {
287 linehandle_free(file->private_data);
288 return 0;
289 }
290
291 static const struct file_operations linehandle_fileops = {
292 .release = linehandle_release,
293 .owner = THIS_MODULE,
294 .llseek = noop_llseek,
295 .unlocked_ioctl = linehandle_ioctl,
296 #ifdef CONFIG_COMPAT
297 .compat_ioctl = linehandle_ioctl_compat,
298 #endif
299 };
300
301 DEFINE_FREE(linehandle_free, struct linehandle_state *, if (!IS_ERR_OR_NULL(_T)) linehandle_free(_T))
302
linehandle_create(struct gpio_device * gdev,void __user * ip)303 static int linehandle_create(struct gpio_device *gdev, void __user *ip)
304 {
305 struct gpiohandle_request handlereq;
306 struct linehandle_state *lh __free(linehandle_free) = NULL;
307 int i, ret;
308 u32 lflags;
309
310 if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
311 return -EFAULT;
312 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
313 return -EINVAL;
314
315 lflags = handlereq.flags;
316
317 ret = linehandle_validate_flags(lflags);
318 if (ret)
319 return ret;
320
321 lh = kzalloc(sizeof(*lh), GFP_KERNEL);
322 if (!lh)
323 return -ENOMEM;
324 lh->gdev = gpio_device_get(gdev);
325
326 if (handlereq.consumer_label[0] != '\0') {
327 /* label is only initialized if consumer_label is set */
328 lh->label = kstrndup(handlereq.consumer_label,
329 sizeof(handlereq.consumer_label) - 1,
330 GFP_KERNEL);
331 if (!lh->label)
332 return -ENOMEM;
333 }
334
335 lh->num_descs = handlereq.lines;
336
337 /* Request each GPIO */
338 for (i = 0; i < handlereq.lines; i++) {
339 u32 offset = handlereq.lineoffsets[i];
340 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
341
342 if (IS_ERR(desc))
343 return PTR_ERR(desc);
344
345 ret = gpiod_request_user(desc, lh->label);
346 if (ret)
347 return ret;
348 lh->descs[i] = desc;
349 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags);
350
351 ret = gpiod_set_transitory(desc, false);
352 if (ret < 0)
353 return ret;
354
355 /*
356 * Lines have to be requested explicitly for input
357 * or output, else the line will be treated "as is".
358 */
359 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
360 int val = !!handlereq.default_values[i];
361
362 ret = gpiod_direction_output_nonotify(desc, val);
363 if (ret)
364 return ret;
365 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
366 ret = gpiod_direction_input_nonotify(desc);
367 if (ret)
368 return ret;
369 }
370
371 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
372
373 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
374 offset);
375 }
376
377 FD_PREPARE(fdf, O_RDONLY | O_CLOEXEC,
378 anon_inode_getfile("gpio-linehandle", &linehandle_fileops,
379 lh, O_RDONLY | O_CLOEXEC));
380 if (fdf.err)
381 return fdf.err;
382 retain_and_null_ptr(lh);
383
384 handlereq.fd = fd_prepare_fd(fdf);
385 if (copy_to_user(ip, &handlereq, sizeof(handlereq)))
386 return -EFAULT;
387
388 fd_publish(fdf);
389
390 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
391 lh->num_descs);
392
393 return 0;
394 }
395 #endif /* CONFIG_GPIO_CDEV_V1 */
396
397 /**
398 * struct line - contains the state of a requested line
399 * @desc: the GPIO descriptor for this line.
400 * @req: the corresponding line request
401 * @irq: the interrupt triggered in response to events on this GPIO
402 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or
403 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied
404 * @timestamp_ns: cache for the timestamp storing it between hardirq and
405 * IRQ thread, used to bring the timestamp close to the actual event
406 * @req_seqno: the seqno for the current edge event in the sequence of
407 * events for the corresponding line request. This is drawn from the @req.
408 * @line_seqno: the seqno for the current edge event in the sequence of
409 * events for this line.
410 * @work: the worker that implements software debouncing
411 * @sw_debounced: flag indicating if the software debouncer is active
412 * @level: the current debounced physical level of the line
413 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor
414 * @raw_level: the line level at the time of event
415 * @total_discard_seq: the running counter of the discarded events
416 * @last_seqno: the last sequence number before debounce period expires
417 */
418 struct line {
419 struct gpio_desc *desc;
420 /*
421 * -- edge detector specific fields --
422 */
423 struct linereq *req;
424 unsigned int irq;
425 /*
426 * The flags for the active edge detector configuration.
427 *
428 * edflags is set by linereq_create(), linereq_free(), and
429 * linereq_set_config(), which are themselves mutually
430 * exclusive, and is accessed by edge_irq_thread(),
431 * process_hw_ts_thread() and debounce_work_func(),
432 * which can all live with a slightly stale value.
433 */
434 u64 edflags;
435 /*
436 * timestamp_ns and req_seqno are accessed only by
437 * edge_irq_handler() and edge_irq_thread(), which are themselves
438 * mutually exclusive, so no additional protection is necessary.
439 */
440 u64 timestamp_ns;
441 u32 req_seqno;
442 /*
443 * line_seqno is accessed by either edge_irq_thread() or
444 * debounce_work_func(), which are themselves mutually exclusive,
445 * so no additional protection is necessary.
446 */
447 u32 line_seqno;
448 /*
449 * -- debouncer specific fields --
450 */
451 struct delayed_work work;
452 /*
453 * sw_debounce is accessed by linereq_set_config(), which is the
454 * only setter, and linereq_get_values(), which can live with a
455 * slightly stale value.
456 */
457 unsigned int sw_debounced;
458 /*
459 * level is accessed by debounce_work_func(), which is the only
460 * setter, and linereq_get_values() which can live with a slightly
461 * stale value.
462 */
463 unsigned int level;
464 #ifdef CONFIG_HTE
465 struct hte_ts_desc hdesc;
466 /*
467 * HTE provider sets line level at the time of event. The valid
468 * value is 0 or 1 and negative value for an error.
469 */
470 int raw_level;
471 /*
472 * when sw_debounce is set on HTE enabled line, this is running
473 * counter of the discarded events.
474 */
475 u32 total_discard_seq;
476 /*
477 * when sw_debounce is set on HTE enabled line, this variable records
478 * last sequence number before debounce period expires.
479 */
480 u32 last_seqno;
481 #endif /* CONFIG_HTE */
482 };
483
484 /**
485 * struct linereq - contains the state of a userspace line request
486 * @gdev: the GPIO device the line request pertains to
487 * @label: consumer label used to tag GPIO descriptors
488 * @num_lines: the number of lines in the lines array
489 * @wait: wait queue that handles blocking reads of events
490 * @device_unregistered_nb: notifier block for receiving gdev unregister events
491 * @event_buffer_size: the number of elements allocated in @events
492 * @events: KFIFO for the GPIO events
493 * @seqno: the sequence number for edge events generated on all lines in
494 * this line request. Note that this is not used when @num_lines is 1, as
495 * the line_seqno is then the same and is cheaper to calculate.
496 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency
497 * of configuration, particularly multi-step accesses to desc flags.
498 * @lines: the lines held by this line request, with @num_lines elements.
499 */
500 struct linereq {
501 struct gpio_device *gdev;
502 const char *label;
503 u32 num_lines;
504 wait_queue_head_t wait;
505 struct notifier_block device_unregistered_nb;
506 u32 event_buffer_size;
507 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event);
508 atomic_t seqno;
509 struct mutex config_mutex;
510 struct line lines[] __counted_by(num_lines);
511 };
512
513 #define GPIO_V2_LINE_BIAS_FLAGS \
514 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
515 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
516 GPIO_V2_LINE_FLAG_BIAS_DISABLED)
517
518 #define GPIO_V2_LINE_DIRECTION_FLAGS \
519 (GPIO_V2_LINE_FLAG_INPUT | \
520 GPIO_V2_LINE_FLAG_OUTPUT)
521
522 #define GPIO_V2_LINE_DRIVE_FLAGS \
523 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \
524 GPIO_V2_LINE_FLAG_OPEN_SOURCE)
525
526 #define GPIO_V2_LINE_EDGE_FLAGS \
527 (GPIO_V2_LINE_FLAG_EDGE_RISING | \
528 GPIO_V2_LINE_FLAG_EDGE_FALLING)
529
530 #define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS
531
532 #define GPIO_V2_LINE_VALID_FLAGS \
533 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
534 GPIO_V2_LINE_DIRECTION_FLAGS | \
535 GPIO_V2_LINE_DRIVE_FLAGS | \
536 GPIO_V2_LINE_EDGE_FLAGS | \
537 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
538 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
539 GPIO_V2_LINE_BIAS_FLAGS)
540
541 /* subset of flags relevant for edge detector configuration */
542 #define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \
543 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
544 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
545 GPIO_V2_LINE_EDGE_FLAGS)
546
linereq_unregistered_notify(struct notifier_block * nb,unsigned long action,void * data)547 static int linereq_unregistered_notify(struct notifier_block *nb,
548 unsigned long action, void *data)
549 {
550 struct linereq *lr = container_of(nb, struct linereq,
551 device_unregistered_nb);
552
553 wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR);
554
555 return NOTIFY_OK;
556 }
557
linereq_put_event(struct linereq * lr,struct gpio_v2_line_event * le)558 static void linereq_put_event(struct linereq *lr,
559 struct gpio_v2_line_event *le)
560 {
561 bool overflow = false;
562
563 scoped_guard(spinlock, &lr->wait.lock) {
564 if (kfifo_is_full(&lr->events)) {
565 overflow = true;
566 kfifo_skip(&lr->events);
567 }
568 kfifo_in(&lr->events, le, 1);
569 }
570 if (!overflow)
571 wake_up_poll(&lr->wait, EPOLLIN);
572 else
573 pr_debug_ratelimited("event FIFO is full - event dropped\n");
574 }
575
line_event_timestamp(struct line * line)576 static u64 line_event_timestamp(struct line *line)
577 {
578 if (test_bit(GPIOD_FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
579 return ktime_get_real_ns();
580 else if (IS_ENABLED(CONFIG_HTE) &&
581 test_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
582 return line->timestamp_ns;
583
584 return ktime_get_ns();
585 }
586
line_event_id(int level)587 static u32 line_event_id(int level)
588 {
589 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE :
590 GPIO_V2_LINE_EVENT_FALLING_EDGE;
591 }
592
make_irq_label(const char * orig)593 static inline char *make_irq_label(const char *orig)
594 {
595 char *new;
596
597 if (!orig)
598 return NULL;
599
600 new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
601 if (!new)
602 return ERR_PTR(-ENOMEM);
603
604 return new;
605 }
606
free_irq_label(const char * label)607 static inline void free_irq_label(const char *label)
608 {
609 kfree(label);
610 }
611
612 #ifdef CONFIG_HTE
613
process_hw_ts_thread(void * p)614 static enum hte_return process_hw_ts_thread(void *p)
615 {
616 struct line *line;
617 struct linereq *lr;
618 struct gpio_v2_line_event le;
619 u64 edflags;
620 int level;
621
622 if (!p)
623 return HTE_CB_HANDLED;
624
625 line = p;
626 lr = line->req;
627
628 memset(&le, 0, sizeof(le));
629
630 le.timestamp_ns = line->timestamp_ns;
631 edflags = READ_ONCE(line->edflags);
632
633 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) {
634 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
635 level = (line->raw_level >= 0) ?
636 line->raw_level :
637 gpiod_get_raw_value_cansleep(line->desc);
638
639 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
640 level = !level;
641
642 le.id = line_event_id(level);
643 break;
644 case GPIO_V2_LINE_FLAG_EDGE_RISING:
645 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
646 break;
647 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
648 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
649 break;
650 default:
651 return HTE_CB_HANDLED;
652 }
653 le.line_seqno = line->line_seqno;
654 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
655 le.offset = gpiod_hwgpio(line->desc);
656
657 linereq_put_event(lr, &le);
658
659 return HTE_CB_HANDLED;
660 }
661
process_hw_ts(struct hte_ts_data * ts,void * p)662 static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
663 {
664 struct line *line;
665 struct linereq *lr;
666 int diff_seqno = 0;
667
668 if (!ts || !p)
669 return HTE_CB_HANDLED;
670
671 line = p;
672 line->timestamp_ns = ts->tsc;
673 line->raw_level = ts->raw_level;
674 lr = line->req;
675
676 if (READ_ONCE(line->sw_debounced)) {
677 line->total_discard_seq++;
678 line->last_seqno = ts->seq;
679 mod_delayed_work(system_percpu_wq, &line->work,
680 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
681 } else {
682 if (unlikely(ts->seq < line->line_seqno))
683 return HTE_CB_HANDLED;
684
685 diff_seqno = ts->seq - line->line_seqno;
686 line->line_seqno = ts->seq;
687 if (lr->num_lines != 1)
688 line->req_seqno = atomic_add_return(diff_seqno,
689 &lr->seqno);
690
691 return HTE_RUN_SECOND_CB;
692 }
693
694 return HTE_CB_HANDLED;
695 }
696
hte_edge_setup(struct line * line,u64 eflags)697 static int hte_edge_setup(struct line *line, u64 eflags)
698 {
699 int ret;
700 unsigned long flags = 0;
701 struct hte_ts_desc *hdesc = &line->hdesc;
702
703 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
704 flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
705 HTE_FALLING_EDGE_TS :
706 HTE_RISING_EDGE_TS;
707 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
708 flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
709 HTE_RISING_EDGE_TS :
710 HTE_FALLING_EDGE_TS;
711
712 line->total_discard_seq = 0;
713
714 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL,
715 line->desc);
716
717 ret = hte_ts_get(NULL, hdesc, 0);
718 if (ret)
719 return ret;
720
721 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread,
722 line);
723 }
724
725 #else
726
hte_edge_setup(struct line * line,u64 eflags)727 static int hte_edge_setup(struct line *line, u64 eflags)
728 {
729 return 0;
730 }
731 #endif /* CONFIG_HTE */
732
edge_irq_thread(int irq,void * p)733 static irqreturn_t edge_irq_thread(int irq, void *p)
734 {
735 struct line *line = p;
736 struct linereq *lr = line->req;
737 struct gpio_v2_line_event le;
738
739 /* Do not leak kernel stack to userspace */
740 memset(&le, 0, sizeof(le));
741
742 if (line->timestamp_ns) {
743 le.timestamp_ns = line->timestamp_ns;
744 } else {
745 /*
746 * We may be running from a nested threaded interrupt in
747 * which case we didn't get the timestamp from
748 * edge_irq_handler().
749 */
750 le.timestamp_ns = line_event_timestamp(line);
751 if (lr->num_lines != 1)
752 line->req_seqno = atomic_inc_return(&lr->seqno);
753 }
754 line->timestamp_ns = 0;
755
756 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) {
757 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
758 le.id = line_event_id(gpiod_get_value_cansleep(line->desc));
759 break;
760 case GPIO_V2_LINE_FLAG_EDGE_RISING:
761 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
762 break;
763 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
764 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
765 break;
766 default:
767 return IRQ_NONE;
768 }
769 line->line_seqno++;
770 le.line_seqno = line->line_seqno;
771 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
772 le.offset = gpiod_hwgpio(line->desc);
773
774 linereq_put_event(lr, &le);
775
776 return IRQ_HANDLED;
777 }
778
edge_irq_handler(int irq,void * p)779 static irqreturn_t edge_irq_handler(int irq, void *p)
780 {
781 struct line *line = p;
782 struct linereq *lr = line->req;
783
784 /*
785 * Just store the timestamp in hardirq context so we get it as
786 * close in time as possible to the actual event.
787 */
788 line->timestamp_ns = line_event_timestamp(line);
789
790 if (lr->num_lines != 1)
791 line->req_seqno = atomic_inc_return(&lr->seqno);
792
793 return IRQ_WAKE_THREAD;
794 }
795
796 /*
797 * returns the current debounced logical value.
798 */
debounced_value(struct line * line)799 static bool debounced_value(struct line *line)
800 {
801 bool value;
802
803 /*
804 * minor race - debouncer may be stopped here, so edge_detector_stop()
805 * must leave the value unchanged so the following will read the level
806 * from when the debouncer was last running.
807 */
808 value = READ_ONCE(line->level);
809
810 if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags))
811 value = !value;
812
813 return value;
814 }
815
debounce_irq_handler(int irq,void * p)816 static irqreturn_t debounce_irq_handler(int irq, void *p)
817 {
818 struct line *line = p;
819
820 mod_delayed_work(system_percpu_wq, &line->work,
821 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
822
823 return IRQ_HANDLED;
824 }
825
debounce_work_func(struct work_struct * work)826 static void debounce_work_func(struct work_struct *work)
827 {
828 struct gpio_v2_line_event le;
829 struct line *line = container_of(work, struct line, work.work);
830 struct linereq *lr;
831 u64 eflags, edflags = READ_ONCE(line->edflags);
832 int level = -1;
833 #ifdef CONFIG_HTE
834 int diff_seqno;
835
836 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
837 level = line->raw_level;
838 #endif
839 if (level < 0)
840 level = gpiod_get_raw_value_cansleep(line->desc);
841 if (level < 0) {
842 pr_debug_ratelimited("debouncer failed to read line value\n");
843 return;
844 }
845
846 if (READ_ONCE(line->level) == level)
847 return;
848
849 WRITE_ONCE(line->level, level);
850
851 /* -- edge detection -- */
852 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
853 if (!eflags)
854 return;
855
856 /* switch from physical level to logical - if they differ */
857 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
858 level = !level;
859
860 /* ignore edges that are not being monitored */
861 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
862 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
863 return;
864
865 /* Do not leak kernel stack to userspace */
866 memset(&le, 0, sizeof(le));
867
868 lr = line->req;
869 le.timestamp_ns = line_event_timestamp(line);
870 le.offset = gpiod_hwgpio(line->desc);
871 #ifdef CONFIG_HTE
872 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
873 /* discard events except the last one */
874 line->total_discard_seq -= 1;
875 diff_seqno = line->last_seqno - line->total_discard_seq -
876 line->line_seqno;
877 line->line_seqno = line->last_seqno - line->total_discard_seq;
878 le.line_seqno = line->line_seqno;
879 le.seqno = (lr->num_lines == 1) ?
880 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
881 } else
882 #endif /* CONFIG_HTE */
883 {
884 line->line_seqno++;
885 le.line_seqno = line->line_seqno;
886 le.seqno = (lr->num_lines == 1) ?
887 le.line_seqno : atomic_inc_return(&lr->seqno);
888 }
889
890 le.id = line_event_id(level);
891
892 linereq_put_event(lr, &le);
893 }
894
debounce_setup(struct line * line,unsigned int debounce_period_us)895 static int debounce_setup(struct line *line, unsigned int debounce_period_us)
896 {
897 unsigned long irqflags;
898 int ret, level, irq;
899 char *label;
900
901 /*
902 * Try hardware. Skip gpiod_set_config() to avoid emitting two
903 * CHANGED_CONFIG line state events.
904 */
905 ret = gpio_do_set_config(line->desc,
906 pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE,
907 debounce_period_us));
908 if (ret != -ENOTSUPP)
909 return ret;
910
911 if (debounce_period_us) {
912 /* setup software debounce */
913 level = gpiod_get_raw_value_cansleep(line->desc);
914 if (level < 0)
915 return level;
916
917 if (!(IS_ENABLED(CONFIG_HTE) &&
918 test_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
919 irq = gpiod_to_irq(line->desc);
920 if (irq < 0)
921 return -ENXIO;
922
923 label = make_irq_label(line->req->label);
924 if (IS_ERR(label))
925 return -ENOMEM;
926
927 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
928 ret = request_irq(irq, debounce_irq_handler, irqflags,
929 label, line);
930 if (ret) {
931 free_irq_label(label);
932 return ret;
933 }
934 line->irq = irq;
935 } else {
936 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
937 if (ret)
938 return ret;
939 }
940
941 WRITE_ONCE(line->level, level);
942 WRITE_ONCE(line->sw_debounced, 1);
943 }
944 return 0;
945 }
946
gpio_v2_line_config_debounced(struct gpio_v2_line_config * lc,unsigned int line_idx)947 static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc,
948 unsigned int line_idx)
949 {
950 unsigned int i;
951 u64 mask = BIT_ULL(line_idx);
952
953 for (i = 0; i < lc->num_attrs; i++) {
954 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
955 (lc->attrs[i].mask & mask))
956 return true;
957 }
958 return false;
959 }
960
gpio_v2_line_config_debounce_period(struct gpio_v2_line_config * lc,unsigned int line_idx)961 static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
962 unsigned int line_idx)
963 {
964 unsigned int i;
965 u64 mask = BIT_ULL(line_idx);
966
967 for (i = 0; i < lc->num_attrs; i++) {
968 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
969 (lc->attrs[i].mask & mask))
970 return lc->attrs[i].attr.debounce_period_us;
971 }
972 return 0;
973 }
974
edge_detector_stop(struct line * line)975 static void edge_detector_stop(struct line *line)
976 {
977 if (line->irq) {
978 free_irq_label(free_irq(line->irq, line));
979 line->irq = 0;
980 }
981
982 #ifdef CONFIG_HTE
983 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
984 hte_ts_put(&line->hdesc);
985 #endif
986
987 cancel_delayed_work_sync(&line->work);
988 WRITE_ONCE(line->sw_debounced, 0);
989 WRITE_ONCE(line->edflags, 0);
990 if (line->desc)
991 WRITE_ONCE(line->desc->debounce_period_us, 0);
992 /* do not change line->level - see comment in debounced_value() */
993 }
994
edge_detector_fifo_init(struct linereq * req)995 static int edge_detector_fifo_init(struct linereq *req)
996 {
997 if (kfifo_initialized(&req->events))
998 return 0;
999
1000 return kfifo_alloc(&req->events, req->event_buffer_size, GFP_KERNEL);
1001 }
1002
edge_detector_setup(struct line * line,struct gpio_v2_line_config * lc,unsigned int line_idx,u64 edflags)1003 static int edge_detector_setup(struct line *line,
1004 struct gpio_v2_line_config *lc,
1005 unsigned int line_idx, u64 edflags)
1006 {
1007 u32 debounce_period_us;
1008 unsigned long irqflags = 0;
1009 u64 eflags;
1010 int irq, ret;
1011 char *label;
1012
1013 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
1014 if (eflags) {
1015 ret = edge_detector_fifo_init(line->req);
1016 if (ret)
1017 return ret;
1018 }
1019 if (gpio_v2_line_config_debounced(lc, line_idx)) {
1020 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
1021 ret = debounce_setup(line, debounce_period_us);
1022 if (ret)
1023 return ret;
1024 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
1025 }
1026
1027 /* detection disabled or sw debouncer will provide edge detection */
1028 if (!eflags || READ_ONCE(line->sw_debounced))
1029 return 0;
1030
1031 if (IS_ENABLED(CONFIG_HTE) &&
1032 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1033 return hte_edge_setup(line, edflags);
1034
1035 irq = gpiod_to_irq(line->desc);
1036 if (irq < 0)
1037 return -ENXIO;
1038
1039 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
1040 irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
1041 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
1042 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
1043 irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
1044 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
1045 irqflags |= IRQF_ONESHOT;
1046
1047 label = make_irq_label(line->req->label);
1048 if (IS_ERR(label))
1049 return PTR_ERR(label);
1050
1051 /* Request a thread to read the events */
1052 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
1053 irqflags, label, line);
1054 if (ret) {
1055 free_irq_label(label);
1056 return ret;
1057 }
1058
1059 line->irq = irq;
1060 return 0;
1061 }
1062
edge_detector_update(struct line * line,struct gpio_v2_line_config * lc,unsigned int line_idx,u64 edflags)1063 static int edge_detector_update(struct line *line,
1064 struct gpio_v2_line_config *lc,
1065 unsigned int line_idx, u64 edflags)
1066 {
1067 u64 active_edflags = READ_ONCE(line->edflags);
1068 unsigned int debounce_period_us =
1069 gpio_v2_line_config_debounce_period(lc, line_idx);
1070
1071 if ((active_edflags == edflags) &&
1072 (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
1073 return 0;
1074
1075 /* sw debounced and still will be...*/
1076 if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
1077 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
1078 /*
1079 * ensure event fifo is initialised if edge detection
1080 * is now enabled.
1081 */
1082 if (edflags & GPIO_V2_LINE_EDGE_FLAGS)
1083 return edge_detector_fifo_init(line->req);
1084
1085 return 0;
1086 }
1087
1088 /* reconfiguring edge detection or sw debounce being disabled */
1089 if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
1090 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) ||
1091 (!debounce_period_us && READ_ONCE(line->sw_debounced)))
1092 edge_detector_stop(line);
1093
1094 return edge_detector_setup(line, lc, line_idx, edflags);
1095 }
1096
gpio_v2_line_config_flags(struct gpio_v2_line_config * lc,unsigned int line_idx)1097 static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
1098 unsigned int line_idx)
1099 {
1100 unsigned int i;
1101 u64 mask = BIT_ULL(line_idx);
1102
1103 for (i = 0; i < lc->num_attrs; i++) {
1104 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) &&
1105 (lc->attrs[i].mask & mask))
1106 return lc->attrs[i].attr.flags;
1107 }
1108 return lc->flags;
1109 }
1110
gpio_v2_line_config_output_value(struct gpio_v2_line_config * lc,unsigned int line_idx)1111 static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc,
1112 unsigned int line_idx)
1113 {
1114 unsigned int i;
1115 u64 mask = BIT_ULL(line_idx);
1116
1117 for (i = 0; i < lc->num_attrs; i++) {
1118 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) &&
1119 (lc->attrs[i].mask & mask))
1120 return !!(lc->attrs[i].attr.values & mask);
1121 }
1122 return 0;
1123 }
1124
gpio_v2_line_flags_validate(u64 flags)1125 static int gpio_v2_line_flags_validate(u64 flags)
1126 {
1127 /* Return an error if an unknown flag is set */
1128 if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
1129 return -EINVAL;
1130
1131 if (!IS_ENABLED(CONFIG_HTE) &&
1132 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1133 return -EOPNOTSUPP;
1134
1135 /*
1136 * Do not allow both INPUT and OUTPUT flags to be set as they are
1137 * contradictory.
1138 */
1139 if ((flags & GPIO_V2_LINE_FLAG_INPUT) &&
1140 (flags & GPIO_V2_LINE_FLAG_OUTPUT))
1141 return -EINVAL;
1142
1143 /* Only allow one event clock source */
1144 if (IS_ENABLED(CONFIG_HTE) &&
1145 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
1146 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1147 return -EINVAL;
1148
1149 /* Edge detection requires explicit input. */
1150 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
1151 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1152 return -EINVAL;
1153
1154 /*
1155 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single
1156 * request. If the hardware actually supports enabling both at the
1157 * same time the electrical result would be disastrous.
1158 */
1159 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) &&
1160 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE))
1161 return -EINVAL;
1162
1163 /* Drive requires explicit output direction. */
1164 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) &&
1165 !(flags & GPIO_V2_LINE_FLAG_OUTPUT))
1166 return -EINVAL;
1167
1168 /* Bias requires explicit direction. */
1169 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) &&
1170 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1171 return -EINVAL;
1172
1173 /* Only one bias flag can be set. */
1174 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) &&
1175 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN |
1176 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) ||
1177 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) &&
1178 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)))
1179 return -EINVAL;
1180
1181 return 0;
1182 }
1183
gpio_v2_line_config_validate(struct gpio_v2_line_config * lc,unsigned int num_lines)1184 static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
1185 unsigned int num_lines)
1186 {
1187 unsigned int i;
1188 u64 flags;
1189 int ret;
1190
1191 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX)
1192 return -EINVAL;
1193
1194 if (!mem_is_zero(lc->padding, sizeof(lc->padding)))
1195 return -EINVAL;
1196
1197 for (i = 0; i < num_lines; i++) {
1198 flags = gpio_v2_line_config_flags(lc, i);
1199 ret = gpio_v2_line_flags_validate(flags);
1200 if (ret)
1201 return ret;
1202
1203 /* debounce requires explicit input */
1204 if (gpio_v2_line_config_debounced(lc, i) &&
1205 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1206 return -EINVAL;
1207 }
1208 return 0;
1209 }
1210
gpio_v2_line_config_flags_to_desc_flags(u64 lflags,unsigned long * flagsp)1211 static void gpio_v2_line_config_flags_to_desc_flags(u64 lflags,
1212 unsigned long *flagsp)
1213 {
1214 unsigned long flags = READ_ONCE(*flagsp);
1215
1216 assign_bit(GPIOD_FLAG_ACTIVE_LOW, &flags,
1217 lflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
1218
1219 if (lflags & GPIO_V2_LINE_FLAG_OUTPUT)
1220 set_bit(GPIOD_FLAG_IS_OUT, &flags);
1221 else if (lflags & GPIO_V2_LINE_FLAG_INPUT)
1222 clear_bit(GPIOD_FLAG_IS_OUT, &flags);
1223
1224 assign_bit(GPIOD_FLAG_EDGE_RISING, &flags,
1225 lflags & GPIO_V2_LINE_FLAG_EDGE_RISING);
1226 assign_bit(GPIOD_FLAG_EDGE_FALLING, &flags,
1227 lflags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
1228
1229 assign_bit(GPIOD_FLAG_OPEN_DRAIN, &flags,
1230 lflags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
1231 assign_bit(GPIOD_FLAG_OPEN_SOURCE, &flags,
1232 lflags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
1233
1234 assign_bit(GPIOD_FLAG_PULL_UP, &flags,
1235 lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
1236 assign_bit(GPIOD_FLAG_PULL_DOWN, &flags,
1237 lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
1238 assign_bit(GPIOD_FLAG_BIAS_DISABLE, &flags,
1239 lflags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
1240
1241 assign_bit(GPIOD_FLAG_EVENT_CLOCK_REALTIME, &flags,
1242 lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
1243 assign_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &flags,
1244 lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
1245
1246 WRITE_ONCE(*flagsp, flags);
1247 }
1248
linereq_get_values(struct linereq * lr,void __user * ip)1249 static long linereq_get_values(struct linereq *lr, void __user *ip)
1250 {
1251 struct gpio_v2_line_values lv;
1252 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1253 struct gpio_desc **descs;
1254 unsigned int i, didx, num_get;
1255 bool val;
1256 int ret;
1257
1258 /* NOTE: It's ok to read values of output lines. */
1259 if (copy_from_user(&lv, ip, sizeof(lv)))
1260 return -EFAULT;
1261
1262 /*
1263 * gpiod_get_array_value_complex() requires compacted desc and val
1264 * arrays, rather than the sparse ones in lv.
1265 * Calculation of num_get and construction of the desc array is
1266 * optimized to avoid allocation for the desc array for the common
1267 * num_get == 1 case.
1268 */
1269 /* scan requested lines to calculate the subset to get */
1270 for (num_get = 0, i = 0; i < lr->num_lines; i++) {
1271 if (lv.mask & BIT_ULL(i)) {
1272 num_get++;
1273 /* capture desc for the num_get == 1 case */
1274 descs = &lr->lines[i].desc;
1275 }
1276 }
1277
1278 if (num_get == 0)
1279 return -EINVAL;
1280
1281 if (num_get != 1) {
1282 /* build compacted desc array */
1283 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL);
1284 if (!descs)
1285 return -ENOMEM;
1286 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1287 if (lv.mask & BIT_ULL(i)) {
1288 descs[didx] = lr->lines[i].desc;
1289 didx++;
1290 }
1291 }
1292 }
1293 ret = gpiod_get_array_value_complex(false, true, num_get,
1294 descs, NULL, vals);
1295
1296 if (num_get != 1)
1297 kfree(descs);
1298 if (ret)
1299 return ret;
1300
1301 lv.bits = 0;
1302 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1303 /* unpack compacted vals for the response */
1304 if (lv.mask & BIT_ULL(i)) {
1305 if (lr->lines[i].sw_debounced)
1306 val = debounced_value(&lr->lines[i]);
1307 else
1308 val = test_bit(didx, vals);
1309 if (val)
1310 lv.bits |= BIT_ULL(i);
1311 didx++;
1312 }
1313 }
1314
1315 if (copy_to_user(ip, &lv, sizeof(lv)))
1316 return -EFAULT;
1317
1318 return 0;
1319 }
1320
linereq_set_values(struct linereq * lr,void __user * ip)1321 static long linereq_set_values(struct linereq *lr, void __user *ip)
1322 {
1323 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1324 struct gpio_v2_line_values lv;
1325 struct gpio_desc **descs;
1326 unsigned int i, didx, num_set;
1327 int ret;
1328
1329 if (copy_from_user(&lv, ip, sizeof(lv)))
1330 return -EFAULT;
1331
1332 guard(mutex)(&lr->config_mutex);
1333
1334 /*
1335 * gpiod_set_array_value_complex() requires compacted desc and val
1336 * arrays, rather than the sparse ones in lv.
1337 * Calculation of num_set and construction of the descs and vals arrays
1338 * is optimized to minimize scanning the lv->mask, and to avoid
1339 * allocation for the desc array for the common num_set == 1 case.
1340 */
1341 bitmap_zero(vals, GPIO_V2_LINES_MAX);
1342 /* scan requested lines to determine the subset to be set */
1343 for (num_set = 0, i = 0; i < lr->num_lines; i++) {
1344 if (lv.mask & BIT_ULL(i)) {
1345 /* add to compacted values */
1346 if (lv.bits & BIT_ULL(i))
1347 __set_bit(num_set, vals);
1348 num_set++;
1349 /* capture desc for the num_set == 1 case */
1350 descs = &lr->lines[i].desc;
1351 }
1352 }
1353 if (num_set == 0)
1354 return -EINVAL;
1355
1356 if (num_set != 1) {
1357 /* build compacted desc array */
1358 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL);
1359 if (!descs)
1360 return -ENOMEM;
1361 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1362 if (lv.mask & BIT_ULL(i)) {
1363 descs[didx] = lr->lines[i].desc;
1364 didx++;
1365 }
1366 }
1367 }
1368 ret = gpiod_set_array_value_complex(false, true, num_set,
1369 descs, NULL, vals);
1370
1371 if (num_set != 1)
1372 kfree(descs);
1373 return ret;
1374 }
1375
linereq_set_config(struct linereq * lr,void __user * ip)1376 static long linereq_set_config(struct linereq *lr, void __user *ip)
1377 {
1378 struct gpio_v2_line_config lc;
1379 struct gpio_desc *desc;
1380 struct line *line;
1381 unsigned int i;
1382 u64 flags, edflags;
1383 int ret;
1384
1385 if (copy_from_user(&lc, ip, sizeof(lc)))
1386 return -EFAULT;
1387
1388 ret = gpio_v2_line_config_validate(&lc, lr->num_lines);
1389 if (ret)
1390 return ret;
1391
1392 guard(mutex)(&lr->config_mutex);
1393
1394 for (i = 0; i < lr->num_lines; i++) {
1395 line = &lr->lines[i];
1396 desc = lr->lines[i].desc;
1397 flags = gpio_v2_line_config_flags(&lc, i);
1398 /*
1399 * Lines not explicitly reconfigured as input or output
1400 * are left unchanged.
1401 */
1402 if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1403 continue;
1404 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1405 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1406 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1407 int val = gpio_v2_line_config_output_value(&lc, i);
1408
1409 edge_detector_stop(line);
1410 ret = gpiod_direction_output_nonotify(desc, val);
1411 if (ret)
1412 return ret;
1413 } else {
1414 ret = gpiod_direction_input_nonotify(desc);
1415 if (ret)
1416 return ret;
1417
1418 ret = edge_detector_update(line, &lc, i, edflags);
1419 if (ret)
1420 return ret;
1421 }
1422
1423 WRITE_ONCE(line->edflags, edflags);
1424
1425 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
1426 }
1427 return 0;
1428 }
1429
linereq_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1430 static long linereq_ioctl(struct file *file, unsigned int cmd,
1431 unsigned long arg)
1432 {
1433 struct linereq *lr = file->private_data;
1434 void __user *ip = (void __user *)arg;
1435
1436 guard(srcu)(&lr->gdev->srcu);
1437
1438 if (!rcu_access_pointer(lr->gdev->chip))
1439 return -ENODEV;
1440
1441 switch (cmd) {
1442 case GPIO_V2_LINE_GET_VALUES_IOCTL:
1443 return linereq_get_values(lr, ip);
1444 case GPIO_V2_LINE_SET_VALUES_IOCTL:
1445 return linereq_set_values(lr, ip);
1446 case GPIO_V2_LINE_SET_CONFIG_IOCTL:
1447 return linereq_set_config(lr, ip);
1448 default:
1449 return -EINVAL;
1450 }
1451 }
1452
1453 #ifdef CONFIG_COMPAT
linereq_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)1454 static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
1455 unsigned long arg)
1456 {
1457 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1458 }
1459 #endif
1460
linereq_poll(struct file * file,struct poll_table_struct * wait)1461 static __poll_t linereq_poll(struct file *file,
1462 struct poll_table_struct *wait)
1463 {
1464 struct linereq *lr = file->private_data;
1465 __poll_t events = 0;
1466
1467 guard(srcu)(&lr->gdev->srcu);
1468
1469 if (!rcu_access_pointer(lr->gdev->chip))
1470 return EPOLLHUP | EPOLLERR;
1471
1472 poll_wait(file, &lr->wait, wait);
1473
1474 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events,
1475 &lr->wait.lock))
1476 events = EPOLLIN | EPOLLRDNORM;
1477
1478 return events;
1479 }
1480
linereq_read(struct file * file,char __user * buf,size_t count,loff_t * f_ps)1481 static ssize_t linereq_read(struct file *file, char __user *buf,
1482 size_t count, loff_t *f_ps)
1483 {
1484 struct linereq *lr = file->private_data;
1485 struct gpio_v2_line_event le;
1486 ssize_t bytes_read = 0;
1487 int ret;
1488
1489 guard(srcu)(&lr->gdev->srcu);
1490
1491 if (!rcu_access_pointer(lr->gdev->chip))
1492 return -ENODEV;
1493
1494 if (count < sizeof(le))
1495 return -EINVAL;
1496
1497 do {
1498 scoped_guard(spinlock, &lr->wait.lock) {
1499 if (kfifo_is_empty(&lr->events)) {
1500 if (bytes_read)
1501 return bytes_read;
1502
1503 if (file->f_flags & O_NONBLOCK)
1504 return -EAGAIN;
1505
1506 ret = wait_event_interruptible_locked(lr->wait,
1507 !kfifo_is_empty(&lr->events));
1508 if (ret)
1509 return ret;
1510 }
1511
1512 if (kfifo_out(&lr->events, &le, 1) != 1) {
1513 /*
1514 * This should never happen - we hold the
1515 * lock from the moment we learned the fifo
1516 * is no longer empty until now.
1517 */
1518 WARN(1, "failed to read from non-empty kfifo");
1519 return -EIO;
1520 }
1521 }
1522
1523 if (copy_to_user(buf + bytes_read, &le, sizeof(le)))
1524 return -EFAULT;
1525 bytes_read += sizeof(le);
1526 } while (count >= bytes_read + sizeof(le));
1527
1528 return bytes_read;
1529 }
1530
linereq_free(struct linereq * lr)1531 static void linereq_free(struct linereq *lr)
1532 {
1533 unsigned int i;
1534
1535 if (lr->device_unregistered_nb.notifier_call)
1536 blocking_notifier_chain_unregister(&lr->gdev->device_notifier,
1537 &lr->device_unregistered_nb);
1538
1539 for (i = 0; i < lr->num_lines; i++) {
1540 if (lr->lines[i].desc) {
1541 edge_detector_stop(&lr->lines[i]);
1542 gpiod_free(lr->lines[i].desc);
1543 }
1544 }
1545 kfifo_free(&lr->events);
1546 kfree(lr->label);
1547 gpio_device_put(lr->gdev);
1548 kvfree(lr);
1549 }
1550
linereq_release(struct inode * inode,struct file * file)1551 static int linereq_release(struct inode *inode, struct file *file)
1552 {
1553 struct linereq *lr = file->private_data;
1554
1555 linereq_free(lr);
1556 return 0;
1557 }
1558
1559 #ifdef CONFIG_PROC_FS
linereq_show_fdinfo(struct seq_file * out,struct file * file)1560 static void linereq_show_fdinfo(struct seq_file *out, struct file *file)
1561 {
1562 struct linereq *lr = file->private_data;
1563 struct device *dev = &lr->gdev->dev;
1564 u16 i;
1565
1566 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev));
1567
1568 for (i = 0; i < lr->num_lines; i++)
1569 seq_printf(out, "gpio-line:\t%d\n",
1570 gpiod_hwgpio(lr->lines[i].desc));
1571 }
1572 #endif
1573
1574 static const struct file_operations line_fileops = {
1575 .release = linereq_release,
1576 .read = linereq_read,
1577 .poll = linereq_poll,
1578 .owner = THIS_MODULE,
1579 .llseek = noop_llseek,
1580 .unlocked_ioctl = linereq_ioctl,
1581 #ifdef CONFIG_COMPAT
1582 .compat_ioctl = linereq_ioctl_compat,
1583 #endif
1584 #ifdef CONFIG_PROC_FS
1585 .show_fdinfo = linereq_show_fdinfo,
1586 #endif
1587 };
1588
linereq_create(struct gpio_device * gdev,void __user * ip)1589 static int linereq_create(struct gpio_device *gdev, void __user *ip)
1590 {
1591 struct gpio_v2_line_request ulr;
1592 struct gpio_v2_line_config *lc;
1593 struct linereq *lr;
1594 struct file *file;
1595 u64 flags, edflags;
1596 unsigned int i;
1597 int fd, ret;
1598
1599 if (copy_from_user(&ulr, ip, sizeof(ulr)))
1600 return -EFAULT;
1601
1602 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX))
1603 return -EINVAL;
1604
1605 if (!mem_is_zero(ulr.padding, sizeof(ulr.padding)))
1606 return -EINVAL;
1607
1608 lc = &ulr.config;
1609 ret = gpio_v2_line_config_validate(lc, ulr.num_lines);
1610 if (ret)
1611 return ret;
1612
1613 lr = kvzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL);
1614 if (!lr)
1615 return -ENOMEM;
1616 lr->num_lines = ulr.num_lines;
1617
1618 lr->gdev = gpio_device_get(gdev);
1619
1620 for (i = 0; i < ulr.num_lines; i++) {
1621 lr->lines[i].req = lr;
1622 WRITE_ONCE(lr->lines[i].sw_debounced, 0);
1623 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func);
1624 }
1625
1626 if (ulr.consumer[0] != '\0') {
1627 /* label is only initialized if consumer is set */
1628 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1,
1629 GFP_KERNEL);
1630 if (!lr->label) {
1631 ret = -ENOMEM;
1632 goto out_free_linereq;
1633 }
1634 }
1635
1636 mutex_init(&lr->config_mutex);
1637 init_waitqueue_head(&lr->wait);
1638 INIT_KFIFO(lr->events);
1639 lr->event_buffer_size = ulr.event_buffer_size;
1640 if (lr->event_buffer_size == 0)
1641 lr->event_buffer_size = ulr.num_lines * 16;
1642 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16)
1643 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16;
1644
1645 atomic_set(&lr->seqno, 0);
1646
1647 /* Request each GPIO */
1648 for (i = 0; i < ulr.num_lines; i++) {
1649 u32 offset = ulr.offsets[i];
1650 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
1651
1652 if (IS_ERR(desc)) {
1653 ret = PTR_ERR(desc);
1654 goto out_free_linereq;
1655 }
1656
1657 ret = gpiod_request_user(desc, lr->label);
1658 if (ret)
1659 goto out_free_linereq;
1660
1661 lr->lines[i].desc = desc;
1662 flags = gpio_v2_line_config_flags(lc, i);
1663 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1664
1665 ret = gpiod_set_transitory(desc, false);
1666 if (ret < 0)
1667 goto out_free_linereq;
1668
1669 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1670 /*
1671 * Lines have to be requested explicitly for input
1672 * or output, else the line will be treated "as is".
1673 */
1674 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1675 int val = gpio_v2_line_config_output_value(lc, i);
1676
1677 ret = gpiod_direction_output_nonotify(desc, val);
1678 if (ret)
1679 goto out_free_linereq;
1680 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
1681 ret = gpiod_direction_input_nonotify(desc);
1682 if (ret)
1683 goto out_free_linereq;
1684
1685 ret = edge_detector_setup(&lr->lines[i], lc, i,
1686 edflags);
1687 if (ret)
1688 goto out_free_linereq;
1689 }
1690
1691 lr->lines[i].edflags = edflags;
1692
1693 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
1694
1695 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
1696 offset);
1697 }
1698
1699 lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify;
1700 ret = blocking_notifier_chain_register(&gdev->device_notifier,
1701 &lr->device_unregistered_nb);
1702 if (ret)
1703 goto out_free_linereq;
1704
1705 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
1706 if (fd < 0) {
1707 ret = fd;
1708 goto out_free_linereq;
1709 }
1710
1711 file = anon_inode_getfile("gpio-line", &line_fileops, lr,
1712 O_RDONLY | O_CLOEXEC);
1713 if (IS_ERR(file)) {
1714 ret = PTR_ERR(file);
1715 goto out_put_unused_fd;
1716 }
1717
1718 ulr.fd = fd;
1719 if (copy_to_user(ip, &ulr, sizeof(ulr))) {
1720 /*
1721 * fput() will trigger the release() callback, so do not go onto
1722 * the regular error cleanup path here.
1723 */
1724 fput(file);
1725 put_unused_fd(fd);
1726 return -EFAULT;
1727 }
1728
1729 fd_install(fd, file);
1730
1731 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
1732 lr->num_lines);
1733
1734 return 0;
1735
1736 out_put_unused_fd:
1737 put_unused_fd(fd);
1738 out_free_linereq:
1739 linereq_free(lr);
1740 return ret;
1741 }
1742
1743 #ifdef CONFIG_GPIO_CDEV_V1
1744
1745 /*
1746 * GPIO line event management
1747 */
1748
1749 /**
1750 * struct lineevent_state - contains the state of a userspace event
1751 * @gdev: the GPIO device the event pertains to
1752 * @label: consumer label used to tag descriptors
1753 * @desc: the GPIO descriptor held by this event
1754 * @eflags: the event flags this line was requested with
1755 * @irq: the interrupt that trigger in response to events on this GPIO
1756 * @wait: wait queue that handles blocking reads of events
1757 * @device_unregistered_nb: notifier block for receiving gdev unregister events
1758 * @events: KFIFO for the GPIO events
1759 * @timestamp: cache for the timestamp storing it between hardirq
1760 * and IRQ thread, used to bring the timestamp close to the actual
1761 * event
1762 */
1763 struct lineevent_state {
1764 struct gpio_device *gdev;
1765 const char *label;
1766 struct gpio_desc *desc;
1767 u32 eflags;
1768 int irq;
1769 wait_queue_head_t wait;
1770 struct notifier_block device_unregistered_nb;
1771 DECLARE_KFIFO(events, struct gpioevent_data, 16);
1772 u64 timestamp;
1773 };
1774
1775 #define GPIOEVENT_REQUEST_VALID_FLAGS \
1776 (GPIOEVENT_REQUEST_RISING_EDGE | \
1777 GPIOEVENT_REQUEST_FALLING_EDGE)
1778
lineevent_poll(struct file * file,struct poll_table_struct * wait)1779 static __poll_t lineevent_poll(struct file *file,
1780 struct poll_table_struct *wait)
1781 {
1782 struct lineevent_state *le = file->private_data;
1783 __poll_t events = 0;
1784
1785 guard(srcu)(&le->gdev->srcu);
1786
1787 if (!rcu_access_pointer(le->gdev->chip))
1788 return EPOLLHUP | EPOLLERR;
1789
1790 poll_wait(file, &le->wait, wait);
1791
1792 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
1793 events = EPOLLIN | EPOLLRDNORM;
1794
1795 return events;
1796 }
1797
lineevent_unregistered_notify(struct notifier_block * nb,unsigned long action,void * data)1798 static int lineevent_unregistered_notify(struct notifier_block *nb,
1799 unsigned long action, void *data)
1800 {
1801 struct lineevent_state *le = container_of(nb, struct lineevent_state,
1802 device_unregistered_nb);
1803
1804 wake_up_poll(&le->wait, EPOLLIN | EPOLLERR);
1805
1806 return NOTIFY_OK;
1807 }
1808
1809 struct compat_gpioeevent_data {
1810 compat_u64 timestamp;
1811 u32 id;
1812 };
1813
lineevent_read(struct file * file,char __user * buf,size_t count,loff_t * f_ps)1814 static ssize_t lineevent_read(struct file *file, char __user *buf,
1815 size_t count, loff_t *f_ps)
1816 {
1817 struct lineevent_state *le = file->private_data;
1818 struct gpioevent_data ge;
1819 ssize_t bytes_read = 0;
1820 ssize_t ge_size;
1821 int ret;
1822
1823 guard(srcu)(&le->gdev->srcu);
1824
1825 if (!rcu_access_pointer(le->gdev->chip))
1826 return -ENODEV;
1827
1828 /*
1829 * When compatible system call is being used the struct gpioevent_data,
1830 * in case of at least ia32, has different size due to the alignment
1831 * differences. Because we have first member 64 bits followed by one of
1832 * 32 bits there is no gap between them. The only difference is the
1833 * padding at the end of the data structure. Hence, we calculate the
1834 * actual sizeof() and pass this as an argument to copy_to_user() to
1835 * drop unneeded bytes from the output.
1836 */
1837 if (compat_need_64bit_alignment_fixup())
1838 ge_size = sizeof(struct compat_gpioeevent_data);
1839 else
1840 ge_size = sizeof(struct gpioevent_data);
1841 if (count < ge_size)
1842 return -EINVAL;
1843
1844 do {
1845 scoped_guard(spinlock, &le->wait.lock) {
1846 if (kfifo_is_empty(&le->events)) {
1847 if (bytes_read)
1848 return bytes_read;
1849
1850 if (file->f_flags & O_NONBLOCK)
1851 return -EAGAIN;
1852
1853 ret = wait_event_interruptible_locked(le->wait,
1854 !kfifo_is_empty(&le->events));
1855 if (ret)
1856 return ret;
1857 }
1858
1859 if (kfifo_out(&le->events, &ge, 1) != 1) {
1860 /*
1861 * This should never happen - we hold the
1862 * lock from the moment we learned the fifo
1863 * is no longer empty until now.
1864 */
1865 WARN(1, "failed to read from non-empty kfifo");
1866 return -EIO;
1867 }
1868 }
1869
1870 if (copy_to_user(buf + bytes_read, &ge, ge_size))
1871 return -EFAULT;
1872 bytes_read += ge_size;
1873 } while (count >= bytes_read + ge_size);
1874
1875 return bytes_read;
1876 }
1877
lineevent_free(struct lineevent_state * le)1878 static void lineevent_free(struct lineevent_state *le)
1879 {
1880 if (le->device_unregistered_nb.notifier_call)
1881 blocking_notifier_chain_unregister(&le->gdev->device_notifier,
1882 &le->device_unregistered_nb);
1883 if (le->irq)
1884 free_irq_label(free_irq(le->irq, le));
1885 if (le->desc)
1886 gpiod_free(le->desc);
1887 kfree(le->label);
1888 gpio_device_put(le->gdev);
1889 kfree(le);
1890 }
1891
lineevent_release(struct inode * inode,struct file * file)1892 static int lineevent_release(struct inode *inode, struct file *file)
1893 {
1894 lineevent_free(file->private_data);
1895 return 0;
1896 }
1897
lineevent_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1898 static long lineevent_ioctl(struct file *file, unsigned int cmd,
1899 unsigned long arg)
1900 {
1901 struct lineevent_state *le = file->private_data;
1902 void __user *ip = (void __user *)arg;
1903 struct gpiohandle_data ghd;
1904
1905 guard(srcu)(&le->gdev->srcu);
1906
1907 if (!rcu_access_pointer(le->gdev->chip))
1908 return -ENODEV;
1909
1910 /*
1911 * We can get the value for an event line but not set it,
1912 * because it is input by definition.
1913 */
1914 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
1915 int val;
1916
1917 memset(&ghd, 0, sizeof(ghd));
1918
1919 val = gpiod_get_value_cansleep(le->desc);
1920 if (val < 0)
1921 return val;
1922 ghd.values[0] = val;
1923
1924 if (copy_to_user(ip, &ghd, sizeof(ghd)))
1925 return -EFAULT;
1926
1927 return 0;
1928 }
1929 return -EINVAL;
1930 }
1931
1932 #ifdef CONFIG_COMPAT
lineevent_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)1933 static long lineevent_ioctl_compat(struct file *file, unsigned int cmd,
1934 unsigned long arg)
1935 {
1936 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1937 }
1938 #endif
1939
1940 static const struct file_operations lineevent_fileops = {
1941 .release = lineevent_release,
1942 .read = lineevent_read,
1943 .poll = lineevent_poll,
1944 .owner = THIS_MODULE,
1945 .llseek = noop_llseek,
1946 .unlocked_ioctl = lineevent_ioctl,
1947 #ifdef CONFIG_COMPAT
1948 .compat_ioctl = lineevent_ioctl_compat,
1949 #endif
1950 };
1951
lineevent_irq_thread(int irq,void * p)1952 static irqreturn_t lineevent_irq_thread(int irq, void *p)
1953 {
1954 struct lineevent_state *le = p;
1955 struct gpioevent_data ge;
1956 int ret;
1957
1958 /* Do not leak kernel stack to userspace */
1959 memset(&ge, 0, sizeof(ge));
1960
1961 /*
1962 * We may be running from a nested threaded interrupt in which case
1963 * we didn't get the timestamp from lineevent_irq_handler().
1964 */
1965 if (!le->timestamp)
1966 ge.timestamp = ktime_get_ns();
1967 else
1968 ge.timestamp = le->timestamp;
1969
1970 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
1971 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
1972 int level = gpiod_get_value_cansleep(le->desc);
1973
1974 if (level)
1975 /* Emit low-to-high event */
1976 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
1977 else
1978 /* Emit high-to-low event */
1979 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
1980 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
1981 /* Emit low-to-high event */
1982 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
1983 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
1984 /* Emit high-to-low event */
1985 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
1986 } else {
1987 return IRQ_NONE;
1988 }
1989
1990 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge,
1991 1, &le->wait.lock);
1992 if (ret)
1993 wake_up_poll(&le->wait, EPOLLIN);
1994 else
1995 pr_debug_ratelimited("event FIFO is full - event dropped\n");
1996
1997 return IRQ_HANDLED;
1998 }
1999
lineevent_irq_handler(int irq,void * p)2000 static irqreturn_t lineevent_irq_handler(int irq, void *p)
2001 {
2002 struct lineevent_state *le = p;
2003
2004 /*
2005 * Just store the timestamp in hardirq context so we get it as
2006 * close in time as possible to the actual event.
2007 */
2008 le->timestamp = ktime_get_ns();
2009
2010 return IRQ_WAKE_THREAD;
2011 }
2012
lineevent_create(struct gpio_device * gdev,void __user * ip)2013 static int lineevent_create(struct gpio_device *gdev, void __user *ip)
2014 {
2015 struct gpioevent_request eventreq;
2016 struct lineevent_state *le;
2017 struct gpio_desc *desc;
2018 struct file *file;
2019 u32 offset;
2020 u32 lflags;
2021 u32 eflags;
2022 int fd;
2023 int ret;
2024 int irq, irqflags = 0;
2025 char *label;
2026
2027 if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
2028 return -EFAULT;
2029
2030 offset = eventreq.lineoffset;
2031 lflags = eventreq.handleflags;
2032 eflags = eventreq.eventflags;
2033
2034 desc = gpio_device_get_desc(gdev, offset);
2035 if (IS_ERR(desc))
2036 return PTR_ERR(desc);
2037
2038 /* Return an error if a unknown flag is set */
2039 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
2040 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS))
2041 return -EINVAL;
2042
2043 /* This is just wrong: we don't look for events on output lines */
2044 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
2045 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
2046 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
2047 return -EINVAL;
2048
2049 /* Only one bias flag can be set. */
2050 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
2051 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
2052 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
2053 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
2054 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
2055 return -EINVAL;
2056
2057 le = kzalloc(sizeof(*le), GFP_KERNEL);
2058 if (!le)
2059 return -ENOMEM;
2060 le->gdev = gpio_device_get(gdev);
2061
2062 if (eventreq.consumer_label[0] != '\0') {
2063 /* label is only initialized if consumer_label is set */
2064 le->label = kstrndup(eventreq.consumer_label,
2065 sizeof(eventreq.consumer_label) - 1,
2066 GFP_KERNEL);
2067 if (!le->label) {
2068 ret = -ENOMEM;
2069 goto out_free_le;
2070 }
2071 }
2072
2073 ret = gpiod_request_user(desc, le->label);
2074 if (ret)
2075 goto out_free_le;
2076 le->desc = desc;
2077 le->eflags = eflags;
2078
2079 linehandle_flags_to_desc_flags(lflags, &desc->flags);
2080
2081 ret = gpiod_direction_input(desc);
2082 if (ret)
2083 goto out_free_le;
2084
2085 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
2086
2087 irq = gpiod_to_irq(desc);
2088 if (irq <= 0) {
2089 ret = -ENODEV;
2090 goto out_free_le;
2091 }
2092
2093 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
2094 irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
2095 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
2096 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
2097 irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
2098 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
2099 irqflags |= IRQF_ONESHOT;
2100
2101 INIT_KFIFO(le->events);
2102 init_waitqueue_head(&le->wait);
2103
2104 le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify;
2105 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2106 &le->device_unregistered_nb);
2107 if (ret)
2108 goto out_free_le;
2109
2110 label = make_irq_label(le->label);
2111 if (IS_ERR(label)) {
2112 ret = PTR_ERR(label);
2113 goto out_free_le;
2114 }
2115
2116 /* Request a thread to read the events */
2117 ret = request_threaded_irq(irq,
2118 lineevent_irq_handler,
2119 lineevent_irq_thread,
2120 irqflags,
2121 label,
2122 le);
2123 if (ret) {
2124 free_irq_label(label);
2125 goto out_free_le;
2126 }
2127
2128 le->irq = irq;
2129
2130 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
2131 if (fd < 0) {
2132 ret = fd;
2133 goto out_free_le;
2134 }
2135
2136 file = anon_inode_getfile("gpio-event",
2137 &lineevent_fileops,
2138 le,
2139 O_RDONLY | O_CLOEXEC);
2140 if (IS_ERR(file)) {
2141 ret = PTR_ERR(file);
2142 goto out_put_unused_fd;
2143 }
2144
2145 eventreq.fd = fd;
2146 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
2147 /*
2148 * fput() will trigger the release() callback, so do not go onto
2149 * the regular error cleanup path here.
2150 */
2151 fput(file);
2152 put_unused_fd(fd);
2153 return -EFAULT;
2154 }
2155
2156 fd_install(fd, file);
2157
2158 return 0;
2159
2160 out_put_unused_fd:
2161 put_unused_fd(fd);
2162 out_free_le:
2163 lineevent_free(le);
2164 return ret;
2165 }
2166
gpio_v2_line_info_to_v1(struct gpio_v2_line_info * info_v2,struct gpioline_info * info_v1)2167 static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2,
2168 struct gpioline_info *info_v1)
2169 {
2170 u64 flagsv2 = info_v2->flags;
2171
2172 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name));
2173 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer));
2174 info_v1->line_offset = info_v2->offset;
2175 info_v1->flags = 0;
2176
2177 if (flagsv2 & GPIO_V2_LINE_FLAG_USED)
2178 info_v1->flags |= GPIOLINE_FLAG_KERNEL;
2179
2180 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT)
2181 info_v1->flags |= GPIOLINE_FLAG_IS_OUT;
2182
2183 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
2184 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
2185
2186 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN)
2187 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN;
2188 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE)
2189 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE;
2190
2191 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)
2192 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
2193 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN)
2194 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
2195 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED)
2196 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
2197 }
2198
gpio_v2_line_info_changed_to_v1(struct gpio_v2_line_info_changed * lic_v2,struct gpioline_info_changed * lic_v1)2199 static void gpio_v2_line_info_changed_to_v1(
2200 struct gpio_v2_line_info_changed *lic_v2,
2201 struct gpioline_info_changed *lic_v1)
2202 {
2203 memset(lic_v1, 0, sizeof(*lic_v1));
2204 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
2205 lic_v1->timestamp = lic_v2->timestamp_ns;
2206 lic_v1->event_type = lic_v2->event_type;
2207 }
2208
2209 #endif /* CONFIG_GPIO_CDEV_V1 */
2210
gpio_desc_to_lineinfo(struct gpio_desc * desc,struct gpio_v2_line_info * info,bool atomic)2211 static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
2212 struct gpio_v2_line_info *info, bool atomic)
2213 {
2214 u32 debounce_period_us;
2215 unsigned long dflags;
2216 const char *label;
2217
2218 CLASS(gpio_chip_guard, guard)(desc);
2219 if (!guard.gc)
2220 return;
2221
2222 memset(info, 0, sizeof(*info));
2223 info->offset = gpiod_hwgpio(desc);
2224
2225 if (desc->name)
2226 strscpy(info->name, desc->name, sizeof(info->name));
2227
2228 dflags = READ_ONCE(desc->flags);
2229
2230 scoped_guard(srcu, &desc->gdev->desc_srcu) {
2231 label = gpiod_get_label(desc);
2232 if (label && test_bit(GPIOD_FLAG_REQUESTED, &dflags))
2233 strscpy(info->consumer, label,
2234 sizeof(info->consumer));
2235 }
2236
2237 /*
2238 * Userspace only need know that the kernel is using this GPIO so it
2239 * can't use it.
2240 * The calculation of the used flag is slightly racy, as it may read
2241 * desc, gc and pinctrl state without a lock covering all three at
2242 * once. Worst case if the line is in transition and the calculation
2243 * is inconsistent then it looks to the user like they performed the
2244 * read on the other side of the transition - but that can always
2245 * happen.
2246 * The definitive test that a line is available to userspace is to
2247 * request it.
2248 */
2249 if (test_bit(GPIOD_FLAG_REQUESTED, &dflags) ||
2250 test_bit(GPIOD_FLAG_IS_HOGGED, &dflags) ||
2251 test_bit(GPIOD_FLAG_EXPORT, &dflags) ||
2252 test_bit(GPIOD_FLAG_SYSFS, &dflags) ||
2253 !gpiochip_line_is_valid(guard.gc, info->offset)) {
2254 info->flags |= GPIO_V2_LINE_FLAG_USED;
2255 } else if (!atomic) {
2256 if (!pinctrl_gpio_can_use_line(guard.gc, info->offset))
2257 info->flags |= GPIO_V2_LINE_FLAG_USED;
2258 }
2259
2260 if (test_bit(GPIOD_FLAG_IS_OUT, &dflags))
2261 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
2262 else
2263 info->flags |= GPIO_V2_LINE_FLAG_INPUT;
2264
2265 if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &dflags))
2266 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
2267
2268 if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &dflags))
2269 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
2270 if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &dflags))
2271 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
2272
2273 if (test_bit(GPIOD_FLAG_BIAS_DISABLE, &dflags))
2274 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
2275 if (test_bit(GPIOD_FLAG_PULL_DOWN, &dflags))
2276 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
2277 if (test_bit(GPIOD_FLAG_PULL_UP, &dflags))
2278 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
2279
2280 if (test_bit(GPIOD_FLAG_EDGE_RISING, &dflags))
2281 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
2282 if (test_bit(GPIOD_FLAG_EDGE_FALLING, &dflags))
2283 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
2284
2285 if (test_bit(GPIOD_FLAG_EVENT_CLOCK_REALTIME, &dflags))
2286 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
2287 else if (test_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &dflags))
2288 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
2289
2290 debounce_period_us = READ_ONCE(desc->debounce_period_us);
2291 if (debounce_period_us) {
2292 info->attrs[info->num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
2293 info->attrs[info->num_attrs].debounce_period_us =
2294 debounce_period_us;
2295 info->num_attrs++;
2296 }
2297 }
2298
2299 struct gpio_chardev_data {
2300 struct gpio_device *gdev;
2301 wait_queue_head_t wait;
2302 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32);
2303 struct notifier_block lineinfo_changed_nb;
2304 struct notifier_block device_unregistered_nb;
2305 unsigned long *watched_lines;
2306 #ifdef CONFIG_GPIO_CDEV_V1
2307 atomic_t watch_abi_version;
2308 #endif
2309 struct file *fp;
2310 };
2311
chipinfo_get(struct gpio_chardev_data * cdev,void __user * ip)2312 static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
2313 {
2314 struct gpio_device *gdev = cdev->gdev;
2315 struct gpiochip_info chipinfo;
2316
2317 memset(&chipinfo, 0, sizeof(chipinfo));
2318
2319 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
2320 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
2321 chipinfo.lines = gdev->ngpio;
2322 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
2323 return -EFAULT;
2324 return 0;
2325 }
2326
2327 #ifdef CONFIG_GPIO_CDEV_V1
2328 /*
2329 * returns 0 if the versions match, else the previously selected ABI version
2330 */
lineinfo_ensure_abi_version(struct gpio_chardev_data * cdata,unsigned int version)2331 static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
2332 unsigned int version)
2333 {
2334 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version);
2335
2336 if (abiv == version)
2337 return 0;
2338
2339 return abiv;
2340 }
2341
lineinfo_get_v1(struct gpio_chardev_data * cdev,void __user * ip,bool watch)2342 static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
2343 bool watch)
2344 {
2345 struct gpio_desc *desc;
2346 struct gpioline_info lineinfo;
2347 struct gpio_v2_line_info lineinfo_v2;
2348
2349 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2350 return -EFAULT;
2351
2352 /* this doubles as a range check on line_offset */
2353 desc = gpio_device_get_desc(cdev->gdev, lineinfo.line_offset);
2354 if (IS_ERR(desc))
2355 return PTR_ERR(desc);
2356
2357 if (watch) {
2358 if (lineinfo_ensure_abi_version(cdev, 1))
2359 return -EPERM;
2360
2361 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
2362 return -EBUSY;
2363 }
2364
2365 gpio_desc_to_lineinfo(desc, &lineinfo_v2, false);
2366 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
2367
2368 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2369 if (watch)
2370 clear_bit(lineinfo.line_offset, cdev->watched_lines);
2371 return -EFAULT;
2372 }
2373
2374 return 0;
2375 }
2376 #endif
2377
lineinfo_get(struct gpio_chardev_data * cdev,void __user * ip,bool watch)2378 static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
2379 bool watch)
2380 {
2381 struct gpio_desc *desc;
2382 struct gpio_v2_line_info lineinfo;
2383
2384 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2385 return -EFAULT;
2386
2387 if (!mem_is_zero(lineinfo.padding, sizeof(lineinfo.padding)))
2388 return -EINVAL;
2389
2390 desc = gpio_device_get_desc(cdev->gdev, lineinfo.offset);
2391 if (IS_ERR(desc))
2392 return PTR_ERR(desc);
2393
2394 if (watch) {
2395 #ifdef CONFIG_GPIO_CDEV_V1
2396 if (lineinfo_ensure_abi_version(cdev, 2))
2397 return -EPERM;
2398 #endif
2399 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines))
2400 return -EBUSY;
2401 }
2402 gpio_desc_to_lineinfo(desc, &lineinfo, false);
2403
2404 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2405 if (watch)
2406 clear_bit(lineinfo.offset, cdev->watched_lines);
2407 return -EFAULT;
2408 }
2409
2410 return 0;
2411 }
2412
lineinfo_unwatch(struct gpio_chardev_data * cdev,void __user * ip)2413 static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
2414 {
2415 __u32 offset;
2416
2417 if (copy_from_user(&offset, ip, sizeof(offset)))
2418 return -EFAULT;
2419
2420 if (offset >= cdev->gdev->ngpio)
2421 return -EINVAL;
2422
2423 if (!test_and_clear_bit(offset, cdev->watched_lines))
2424 return -EBUSY;
2425
2426 return 0;
2427 }
2428
2429 /*
2430 * gpio_ioctl() - ioctl handler for the GPIO chardev
2431 */
gpio_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2432 static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2433 {
2434 struct gpio_chardev_data *cdev = file->private_data;
2435 struct gpio_device *gdev = cdev->gdev;
2436 void __user *ip = (void __user *)arg;
2437
2438 guard(srcu)(&gdev->srcu);
2439
2440 /* We fail any subsequent ioctl():s when the chip is gone */
2441 if (!rcu_access_pointer(gdev->chip))
2442 return -ENODEV;
2443
2444 /* Fill in the struct and pass to userspace */
2445 switch (cmd) {
2446 case GPIO_GET_CHIPINFO_IOCTL:
2447 return chipinfo_get(cdev, ip);
2448 #ifdef CONFIG_GPIO_CDEV_V1
2449 case GPIO_GET_LINEHANDLE_IOCTL:
2450 return linehandle_create(gdev, ip);
2451 case GPIO_GET_LINEEVENT_IOCTL:
2452 return lineevent_create(gdev, ip);
2453 case GPIO_GET_LINEINFO_IOCTL:
2454 return lineinfo_get_v1(cdev, ip, false);
2455 case GPIO_GET_LINEINFO_WATCH_IOCTL:
2456 return lineinfo_get_v1(cdev, ip, true);
2457 #endif /* CONFIG_GPIO_CDEV_V1 */
2458 case GPIO_V2_GET_LINEINFO_IOCTL:
2459 return lineinfo_get(cdev, ip, false);
2460 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL:
2461 return lineinfo_get(cdev, ip, true);
2462 case GPIO_V2_GET_LINE_IOCTL:
2463 return linereq_create(gdev, ip);
2464 case GPIO_GET_LINEINFO_UNWATCH_IOCTL:
2465 return lineinfo_unwatch(cdev, ip);
2466 default:
2467 return -EINVAL;
2468 }
2469 }
2470
2471 #ifdef CONFIG_COMPAT
gpio_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)2472 static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
2473 unsigned long arg)
2474 {
2475 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2476 }
2477 #endif
2478
2479 struct lineinfo_changed_ctx {
2480 struct work_struct work;
2481 struct gpio_v2_line_info_changed chg;
2482 struct gpio_device *gdev;
2483 struct gpio_chardev_data *cdev;
2484 };
2485
lineinfo_changed_func(struct work_struct * work)2486 static void lineinfo_changed_func(struct work_struct *work)
2487 {
2488 struct lineinfo_changed_ctx *ctx =
2489 container_of(work, struct lineinfo_changed_ctx, work);
2490 struct gpio_chip *gc;
2491 int ret;
2492
2493 if (!(ctx->chg.info.flags & GPIO_V2_LINE_FLAG_USED)) {
2494 /*
2495 * If nobody set the USED flag earlier, let's see with pinctrl
2496 * now. We're doing this late because it's a sleeping function.
2497 * Pin functions are in general much more static and while it's
2498 * not 100% bullet-proof, it's good enough for most cases.
2499 */
2500 scoped_guard(srcu, &ctx->gdev->srcu) {
2501 gc = srcu_dereference(ctx->gdev->chip, &ctx->gdev->srcu);
2502 if (gc &&
2503 !pinctrl_gpio_can_use_line(gc, ctx->chg.info.offset))
2504 ctx->chg.info.flags |= GPIO_V2_LINE_FLAG_USED;
2505 }
2506 }
2507
2508 ret = kfifo_in_spinlocked(&ctx->cdev->events, &ctx->chg, 1,
2509 &ctx->cdev->wait.lock);
2510 if (ret)
2511 wake_up_poll(&ctx->cdev->wait, EPOLLIN);
2512 else
2513 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
2514
2515 gpio_device_put(ctx->gdev);
2516 fput(ctx->cdev->fp);
2517 kfree(ctx);
2518 }
2519
lineinfo_changed_notify(struct notifier_block * nb,unsigned long action,void * data)2520 static int lineinfo_changed_notify(struct notifier_block *nb,
2521 unsigned long action, void *data)
2522 {
2523 struct gpio_chardev_data *cdev =
2524 container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
2525 struct lineinfo_changed_ctx *ctx;
2526 struct gpio_desc *desc = data;
2527 struct file *fp;
2528
2529 if (!test_bit(gpiod_hwgpio(desc), cdev->watched_lines))
2530 return NOTIFY_DONE;
2531
2532 /* Keep the file descriptor alive for the duration of the notification. */
2533 fp = get_file_active(&cdev->fp);
2534 if (!fp)
2535 /* Chardev file descriptor was or is being released. */
2536 return NOTIFY_DONE;
2537
2538 /*
2539 * If this is called from atomic context (for instance: with a spinlock
2540 * taken by the atomic notifier chain), any sleeping calls must be done
2541 * outside of this function in process context of the dedicated
2542 * workqueue.
2543 *
2544 * Let's gather as much info as possible from the descriptor and
2545 * postpone just the call to pinctrl_gpio_can_use_line() until the work
2546 * is executed.
2547 */
2548
2549 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
2550 if (!ctx) {
2551 pr_err("Failed to allocate memory for line info notification\n");
2552 fput(fp);
2553 return NOTIFY_DONE;
2554 }
2555
2556 ctx->chg.event_type = action;
2557 ctx->chg.timestamp_ns = ktime_get_ns();
2558 gpio_desc_to_lineinfo(desc, &ctx->chg.info, true);
2559 /* Keep the GPIO device alive until we emit the event. */
2560 ctx->gdev = gpio_device_get(desc->gdev);
2561 ctx->cdev = cdev;
2562
2563 INIT_WORK(&ctx->work, lineinfo_changed_func);
2564 queue_work(ctx->gdev->line_state_wq, &ctx->work);
2565
2566 return NOTIFY_OK;
2567 }
2568
gpio_device_unregistered_notify(struct notifier_block * nb,unsigned long action,void * data)2569 static int gpio_device_unregistered_notify(struct notifier_block *nb,
2570 unsigned long action, void *data)
2571 {
2572 struct gpio_chardev_data *cdev = container_of(nb,
2573 struct gpio_chardev_data,
2574 device_unregistered_nb);
2575
2576 wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR);
2577
2578 return NOTIFY_OK;
2579 }
2580
lineinfo_watch_poll(struct file * file,struct poll_table_struct * pollt)2581 static __poll_t lineinfo_watch_poll(struct file *file,
2582 struct poll_table_struct *pollt)
2583 {
2584 struct gpio_chardev_data *cdev = file->private_data;
2585 __poll_t events = 0;
2586
2587 guard(srcu)(&cdev->gdev->srcu);
2588
2589 if (!rcu_access_pointer(cdev->gdev->chip))
2590 return EPOLLHUP | EPOLLERR;
2591
2592 poll_wait(file, &cdev->wait, pollt);
2593
2594 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events,
2595 &cdev->wait.lock))
2596 events = EPOLLIN | EPOLLRDNORM;
2597
2598 return events;
2599 }
2600
lineinfo_watch_read(struct file * file,char __user * buf,size_t count,loff_t * off)2601 static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
2602 size_t count, loff_t *off)
2603 {
2604 struct gpio_chardev_data *cdev = file->private_data;
2605 struct gpio_v2_line_info_changed event;
2606 ssize_t bytes_read = 0;
2607 int ret;
2608 size_t event_size;
2609
2610 guard(srcu)(&cdev->gdev->srcu);
2611
2612 if (!rcu_access_pointer(cdev->gdev->chip))
2613 return -ENODEV;
2614
2615 #ifndef CONFIG_GPIO_CDEV_V1
2616 event_size = sizeof(struct gpio_v2_line_info_changed);
2617 if (count < event_size)
2618 return -EINVAL;
2619 #endif
2620
2621 do {
2622 scoped_guard(spinlock, &cdev->wait.lock) {
2623 if (kfifo_is_empty(&cdev->events)) {
2624 if (bytes_read)
2625 return bytes_read;
2626
2627 if (file->f_flags & O_NONBLOCK)
2628 return -EAGAIN;
2629
2630 ret = wait_event_interruptible_locked(cdev->wait,
2631 !kfifo_is_empty(&cdev->events));
2632 if (ret)
2633 return ret;
2634 }
2635 #ifdef CONFIG_GPIO_CDEV_V1
2636 /* must be after kfifo check so watch_abi_version is set */
2637 if (atomic_read(&cdev->watch_abi_version) == 2)
2638 event_size = sizeof(struct gpio_v2_line_info_changed);
2639 else
2640 event_size = sizeof(struct gpioline_info_changed);
2641 if (count < event_size)
2642 return -EINVAL;
2643 #endif
2644 if (kfifo_out(&cdev->events, &event, 1) != 1) {
2645 /*
2646 * This should never happen - we hold the
2647 * lock from the moment we learned the fifo
2648 * is no longer empty until now.
2649 */
2650 WARN(1, "failed to read from non-empty kfifo");
2651 return -EIO;
2652 }
2653 }
2654
2655 #ifdef CONFIG_GPIO_CDEV_V1
2656 if (event_size == sizeof(struct gpio_v2_line_info_changed)) {
2657 if (copy_to_user(buf + bytes_read, &event, event_size))
2658 return -EFAULT;
2659 } else {
2660 struct gpioline_info_changed event_v1;
2661
2662 gpio_v2_line_info_changed_to_v1(&event, &event_v1);
2663 if (copy_to_user(buf + bytes_read, &event_v1,
2664 event_size))
2665 return -EFAULT;
2666 }
2667 #else
2668 if (copy_to_user(buf + bytes_read, &event, event_size))
2669 return -EFAULT;
2670 #endif
2671 bytes_read += event_size;
2672 } while (count >= bytes_read + sizeof(event));
2673
2674 return bytes_read;
2675 }
2676
2677 /**
2678 * gpio_chrdev_open() - open the chardev for ioctl operations
2679 * @inode: inode for this chardev
2680 * @file: file struct for storing private data
2681 *
2682 * Returns:
2683 * 0 on success, or negative errno on failure.
2684 */
gpio_chrdev_open(struct inode * inode,struct file * file)2685 static int gpio_chrdev_open(struct inode *inode, struct file *file)
2686 {
2687 struct gpio_device *gdev = container_of(inode->i_cdev,
2688 struct gpio_device, chrdev);
2689 struct gpio_chardev_data *cdev;
2690 int ret = -ENOMEM;
2691
2692 guard(srcu)(&gdev->srcu);
2693
2694 /* Fail on open if the backing gpiochip is gone */
2695 if (!rcu_access_pointer(gdev->chip))
2696 return -ENODEV;
2697
2698 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
2699 if (!cdev)
2700 return -ENOMEM;
2701
2702 cdev->watched_lines = bitmap_zalloc(gdev->ngpio, GFP_KERNEL);
2703 if (!cdev->watched_lines)
2704 goto out_free_cdev;
2705
2706 init_waitqueue_head(&cdev->wait);
2707 INIT_KFIFO(cdev->events);
2708 cdev->gdev = gpio_device_get(gdev);
2709
2710 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
2711 scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
2712 ret = raw_notifier_chain_register(&gdev->line_state_notifier,
2713 &cdev->lineinfo_changed_nb);
2714 if (ret)
2715 goto out_free_bitmap;
2716
2717 cdev->device_unregistered_nb.notifier_call =
2718 gpio_device_unregistered_notify;
2719 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2720 &cdev->device_unregistered_nb);
2721 if (ret)
2722 goto out_unregister_line_notifier;
2723
2724 file->private_data = cdev;
2725 cdev->fp = file;
2726
2727 ret = nonseekable_open(inode, file);
2728 if (ret)
2729 goto out_unregister_device_notifier;
2730
2731 return ret;
2732
2733 out_unregister_device_notifier:
2734 blocking_notifier_chain_unregister(&gdev->device_notifier,
2735 &cdev->device_unregistered_nb);
2736 out_unregister_line_notifier:
2737 scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
2738 raw_notifier_chain_unregister(&gdev->line_state_notifier,
2739 &cdev->lineinfo_changed_nb);
2740 out_free_bitmap:
2741 gpio_device_put(gdev);
2742 bitmap_free(cdev->watched_lines);
2743 out_free_cdev:
2744 kfree(cdev);
2745 return ret;
2746 }
2747
2748 /**
2749 * gpio_chrdev_release() - close chardev after ioctl operations
2750 * @inode: inode for this chardev
2751 * @file: file struct for storing private data
2752 *
2753 * Returns:
2754 * 0 on success, or negative errno on failure.
2755 */
gpio_chrdev_release(struct inode * inode,struct file * file)2756 static int gpio_chrdev_release(struct inode *inode, struct file *file)
2757 {
2758 struct gpio_chardev_data *cdev = file->private_data;
2759 struct gpio_device *gdev = cdev->gdev;
2760
2761 blocking_notifier_chain_unregister(&gdev->device_notifier,
2762 &cdev->device_unregistered_nb);
2763 scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
2764 raw_notifier_chain_unregister(&gdev->line_state_notifier,
2765 &cdev->lineinfo_changed_nb);
2766 bitmap_free(cdev->watched_lines);
2767 gpio_device_put(gdev);
2768 kfree(cdev);
2769
2770 return 0;
2771 }
2772
2773 static const struct file_operations gpio_fileops = {
2774 .release = gpio_chrdev_release,
2775 .open = gpio_chrdev_open,
2776 .poll = lineinfo_watch_poll,
2777 .read = lineinfo_watch_read,
2778 .owner = THIS_MODULE,
2779 .unlocked_ioctl = gpio_ioctl,
2780 #ifdef CONFIG_COMPAT
2781 .compat_ioctl = gpio_ioctl_compat,
2782 #endif
2783 };
2784
gpiolib_cdev_register(struct gpio_device * gdev,dev_t devt)2785 int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
2786 {
2787 struct gpio_chip *gc;
2788 int ret;
2789
2790 cdev_init(&gdev->chrdev, &gpio_fileops);
2791 gdev->chrdev.owner = THIS_MODULE;
2792 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id);
2793
2794 gdev->line_state_wq = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2795 dev_name(&gdev->dev));
2796 if (!gdev->line_state_wq)
2797 return -ENOMEM;
2798
2799 ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
2800 if (ret) {
2801 destroy_workqueue(gdev->line_state_wq);
2802 return ret;
2803 }
2804
2805 guard(srcu)(&gdev->srcu);
2806 gc = srcu_dereference(gdev->chip, &gdev->srcu);
2807 if (!gc) {
2808 cdev_device_del(&gdev->chrdev, &gdev->dev);
2809 destroy_workqueue(gdev->line_state_wq);
2810 return -ENODEV;
2811 }
2812
2813 gpiochip_dbg(gc, "added GPIO chardev (%d:%d)\n", MAJOR(devt), gdev->id);
2814
2815 return 0;
2816 }
2817
gpiolib_cdev_unregister(struct gpio_device * gdev)2818 void gpiolib_cdev_unregister(struct gpio_device *gdev)
2819 {
2820 destroy_workqueue(gdev->line_state_wq);
2821 cdev_device_del(&gdev->chrdev, &gdev->dev);
2822 blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL);
2823 }
2824