1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/anon_inodes.h> 4 #include <linux/atomic.h> 5 #include <linux/bitmap.h> 6 #include <linux/build_bug.h> 7 #include <linux/cdev.h> 8 #include <linux/cleanup.h> 9 #include <linux/compat.h> 10 #include <linux/compiler.h> 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/file.h> 14 #include <linux/gpio.h> 15 #include <linux/gpio/driver.h> 16 #include <linux/hte.h> 17 #include <linux/interrupt.h> 18 #include <linux/irqreturn.h> 19 #include <linux/kernel.h> 20 #include <linux/kfifo.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/overflow.h> 24 #include <linux/pinctrl/consumer.h> 25 #include <linux/poll.h> 26 #include <linux/rbtree.h> 27 #include <linux/rwsem.h> 28 #include <linux/seq_file.h> 29 #include <linux/spinlock.h> 30 #include <linux/timekeeping.h> 31 #include <linux/uaccess.h> 32 #include <linux/workqueue.h> 33 34 #include <uapi/linux/gpio.h> 35 36 #include "gpiolib.h" 37 #include "gpiolib-cdev.h" 38 39 /* 40 * Array sizes must ensure 64-bit alignment and not create holes in the 41 * struct packing. 42 */ 43 static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2)); 44 static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8)); 45 46 /* 47 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility 48 */ 49 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8)); 50 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8)); 51 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8)); 52 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8)); 53 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8)); 54 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8)); 55 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8)); 56 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8)); 57 58 /* Character device interface to GPIO. 59 * 60 * The GPIO character device, /dev/gpiochipN, provides userspace an 61 * interface to gpiolib GPIOs via ioctl()s. 62 */ 63 64 typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *); 65 typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long); 66 typedef ssize_t (*read_fn)(struct file *, char __user *, 67 size_t count, loff_t *); 68 69 /* 70 * GPIO line handle management 71 */ 72 73 #ifdef CONFIG_GPIO_CDEV_V1 74 /** 75 * struct linehandle_state - contains the state of a userspace handle 76 * @gdev: the GPIO device the handle pertains to 77 * @label: consumer label used to tag descriptors 78 * @descs: the GPIO descriptors held by this handle 79 * @num_descs: the number of descriptors held in the descs array 80 */ 81 struct linehandle_state { 82 struct gpio_device *gdev; 83 const char *label; 84 struct gpio_desc *descs[GPIOHANDLES_MAX]; 85 u32 num_descs; 86 }; 87 88 #define GPIOHANDLE_REQUEST_VALID_FLAGS \ 89 (GPIOHANDLE_REQUEST_INPUT | \ 90 GPIOHANDLE_REQUEST_OUTPUT | \ 91 GPIOHANDLE_REQUEST_ACTIVE_LOW | \ 92 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \ 93 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \ 94 GPIOHANDLE_REQUEST_BIAS_DISABLE | \ 95 GPIOHANDLE_REQUEST_OPEN_DRAIN | \ 96 GPIOHANDLE_REQUEST_OPEN_SOURCE) 97 98 static int linehandle_validate_flags(u32 flags) 99 { 100 /* Return an error if an unknown flag is set */ 101 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) 102 return -EINVAL; 103 104 /* 105 * Do not allow both INPUT & OUTPUT flags to be set as they are 106 * contradictory. 107 */ 108 if ((flags & GPIOHANDLE_REQUEST_INPUT) && 109 (flags & GPIOHANDLE_REQUEST_OUTPUT)) 110 return -EINVAL; 111 112 /* 113 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If 114 * the hardware actually supports enabling both at the same time the 115 * electrical result would be disastrous. 116 */ 117 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) && 118 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) 119 return -EINVAL; 120 121 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */ 122 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) && 123 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || 124 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))) 125 return -EINVAL; 126 127 /* Bias flags only allowed for input or output mode. */ 128 if (!((flags & GPIOHANDLE_REQUEST_INPUT) || 129 (flags & GPIOHANDLE_REQUEST_OUTPUT)) && 130 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) || 131 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) || 132 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN))) 133 return -EINVAL; 134 135 /* Only one bias flag can be set. */ 136 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) && 137 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | 138 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) || 139 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) && 140 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP))) 141 return -EINVAL; 142 143 return 0; 144 } 145 146 static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp) 147 { 148 assign_bit(FLAG_ACTIVE_LOW, flagsp, 149 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW); 150 assign_bit(FLAG_OPEN_DRAIN, flagsp, 151 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN); 152 assign_bit(FLAG_OPEN_SOURCE, flagsp, 153 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE); 154 assign_bit(FLAG_PULL_UP, flagsp, 155 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP); 156 assign_bit(FLAG_PULL_DOWN, flagsp, 157 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN); 158 assign_bit(FLAG_BIAS_DISABLE, flagsp, 159 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE); 160 } 161 162 static long linehandle_set_config(struct linehandle_state *lh, 163 void __user *ip) 164 { 165 struct gpiohandle_config gcnf; 166 struct gpio_desc *desc; 167 int i, ret; 168 u32 lflags; 169 170 if (copy_from_user(&gcnf, ip, sizeof(gcnf))) 171 return -EFAULT; 172 173 lflags = gcnf.flags; 174 ret = linehandle_validate_flags(lflags); 175 if (ret) 176 return ret; 177 178 for (i = 0; i < lh->num_descs; i++) { 179 desc = lh->descs[i]; 180 linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags); 181 182 /* 183 * Lines have to be requested explicitly for input 184 * or output, else the line will be treated "as is". 185 */ 186 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 187 int val = !!gcnf.default_values[i]; 188 189 ret = gpiod_direction_output(desc, val); 190 if (ret) 191 return ret; 192 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) { 193 ret = gpiod_direction_input(desc); 194 if (ret) 195 return ret; 196 } 197 198 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG); 199 } 200 return 0; 201 } 202 203 static long linehandle_ioctl(struct file *file, unsigned int cmd, 204 unsigned long arg) 205 { 206 struct linehandle_state *lh = file->private_data; 207 void __user *ip = (void __user *)arg; 208 struct gpiohandle_data ghd; 209 DECLARE_BITMAP(vals, GPIOHANDLES_MAX); 210 unsigned int i; 211 int ret; 212 213 guard(rwsem_read)(&lh->gdev->sem); 214 215 if (!lh->gdev->chip) 216 return -ENODEV; 217 218 switch (cmd) { 219 case GPIOHANDLE_GET_LINE_VALUES_IOCTL: 220 /* NOTE: It's okay to read values of output lines */ 221 ret = gpiod_get_array_value_complex(false, true, 222 lh->num_descs, lh->descs, 223 NULL, vals); 224 if (ret) 225 return ret; 226 227 memset(&ghd, 0, sizeof(ghd)); 228 for (i = 0; i < lh->num_descs; i++) 229 ghd.values[i] = test_bit(i, vals); 230 231 if (copy_to_user(ip, &ghd, sizeof(ghd))) 232 return -EFAULT; 233 234 return 0; 235 case GPIOHANDLE_SET_LINE_VALUES_IOCTL: 236 /* 237 * All line descriptors were created at once with the same 238 * flags so just check if the first one is really output. 239 */ 240 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags)) 241 return -EPERM; 242 243 if (copy_from_user(&ghd, ip, sizeof(ghd))) 244 return -EFAULT; 245 246 /* Clamp all values to [0,1] */ 247 for (i = 0; i < lh->num_descs; i++) 248 __assign_bit(i, vals, ghd.values[i]); 249 250 /* Reuse the array setting function */ 251 return gpiod_set_array_value_complex(false, 252 true, 253 lh->num_descs, 254 lh->descs, 255 NULL, 256 vals); 257 case GPIOHANDLE_SET_CONFIG_IOCTL: 258 return linehandle_set_config(lh, ip); 259 default: 260 return -EINVAL; 261 } 262 } 263 264 #ifdef CONFIG_COMPAT 265 static long linehandle_ioctl_compat(struct file *file, unsigned int cmd, 266 unsigned long arg) 267 { 268 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 269 } 270 #endif 271 272 static void linehandle_free(struct linehandle_state *lh) 273 { 274 int i; 275 276 for (i = 0; i < lh->num_descs; i++) 277 if (lh->descs[i]) 278 gpiod_free(lh->descs[i]); 279 kfree(lh->label); 280 gpio_device_put(lh->gdev); 281 kfree(lh); 282 } 283 284 static int linehandle_release(struct inode *inode, struct file *file) 285 { 286 linehandle_free(file->private_data); 287 return 0; 288 } 289 290 static const struct file_operations linehandle_fileops = { 291 .release = linehandle_release, 292 .owner = THIS_MODULE, 293 .llseek = noop_llseek, 294 .unlocked_ioctl = linehandle_ioctl, 295 #ifdef CONFIG_COMPAT 296 .compat_ioctl = linehandle_ioctl_compat, 297 #endif 298 }; 299 300 static int linehandle_create(struct gpio_device *gdev, void __user *ip) 301 { 302 struct gpiohandle_request handlereq; 303 struct linehandle_state *lh; 304 struct file *file; 305 int fd, i, ret; 306 u32 lflags; 307 308 if (copy_from_user(&handlereq, ip, sizeof(handlereq))) 309 return -EFAULT; 310 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX)) 311 return -EINVAL; 312 313 lflags = handlereq.flags; 314 315 ret = linehandle_validate_flags(lflags); 316 if (ret) 317 return ret; 318 319 lh = kzalloc(sizeof(*lh), GFP_KERNEL); 320 if (!lh) 321 return -ENOMEM; 322 lh->gdev = gpio_device_get(gdev); 323 324 if (handlereq.consumer_label[0] != '\0') { 325 /* label is only initialized if consumer_label is set */ 326 lh->label = kstrndup(handlereq.consumer_label, 327 sizeof(handlereq.consumer_label) - 1, 328 GFP_KERNEL); 329 if (!lh->label) { 330 ret = -ENOMEM; 331 goto out_free_lh; 332 } 333 } 334 335 lh->num_descs = handlereq.lines; 336 337 /* Request each GPIO */ 338 for (i = 0; i < handlereq.lines; i++) { 339 u32 offset = handlereq.lineoffsets[i]; 340 struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset); 341 342 if (IS_ERR(desc)) { 343 ret = PTR_ERR(desc); 344 goto out_free_lh; 345 } 346 347 ret = gpiod_request_user(desc, lh->label); 348 if (ret) 349 goto out_free_lh; 350 lh->descs[i] = desc; 351 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags); 352 353 ret = gpiod_set_transitory(desc, false); 354 if (ret < 0) 355 goto out_free_lh; 356 357 /* 358 * Lines have to be requested explicitly for input 359 * or output, else the line will be treated "as is". 360 */ 361 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 362 int val = !!handlereq.default_values[i]; 363 364 ret = gpiod_direction_output(desc, val); 365 if (ret) 366 goto out_free_lh; 367 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) { 368 ret = gpiod_direction_input(desc); 369 if (ret) 370 goto out_free_lh; 371 } 372 373 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); 374 375 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", 376 offset); 377 } 378 379 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 380 if (fd < 0) { 381 ret = fd; 382 goto out_free_lh; 383 } 384 385 file = anon_inode_getfile("gpio-linehandle", 386 &linehandle_fileops, 387 lh, 388 O_RDONLY | O_CLOEXEC); 389 if (IS_ERR(file)) { 390 ret = PTR_ERR(file); 391 goto out_put_unused_fd; 392 } 393 394 handlereq.fd = fd; 395 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) { 396 /* 397 * fput() will trigger the release() callback, so do not go onto 398 * the regular error cleanup path here. 399 */ 400 fput(file); 401 put_unused_fd(fd); 402 return -EFAULT; 403 } 404 405 fd_install(fd, file); 406 407 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 408 lh->num_descs); 409 410 return 0; 411 412 out_put_unused_fd: 413 put_unused_fd(fd); 414 out_free_lh: 415 linehandle_free(lh); 416 return ret; 417 } 418 #endif /* CONFIG_GPIO_CDEV_V1 */ 419 420 /** 421 * struct line - contains the state of a requested line 422 * @node: to store the object in supinfo_tree if supplemental 423 * @desc: the GPIO descriptor for this line. 424 * @req: the corresponding line request 425 * @irq: the interrupt triggered in response to events on this GPIO 426 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or 427 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied 428 * @timestamp_ns: cache for the timestamp storing it between hardirq and 429 * IRQ thread, used to bring the timestamp close to the actual event 430 * @req_seqno: the seqno for the current edge event in the sequence of 431 * events for the corresponding line request. This is drawn from the @req. 432 * @line_seqno: the seqno for the current edge event in the sequence of 433 * events for this line. 434 * @work: the worker that implements software debouncing 435 * @debounce_period_us: the debounce period in microseconds 436 * @sw_debounced: flag indicating if the software debouncer is active 437 * @level: the current debounced physical level of the line 438 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor 439 * @raw_level: the line level at the time of event 440 * @total_discard_seq: the running counter of the discarded events 441 * @last_seqno: the last sequence number before debounce period expires 442 */ 443 struct line { 444 struct rb_node node; 445 struct gpio_desc *desc; 446 /* 447 * -- edge detector specific fields -- 448 */ 449 struct linereq *req; 450 unsigned int irq; 451 /* 452 * The flags for the active edge detector configuration. 453 * 454 * edflags is set by linereq_create(), linereq_free(), and 455 * linereq_set_config_unlocked(), which are themselves mutually 456 * exclusive, and is accessed by edge_irq_thread(), 457 * process_hw_ts_thread() and debounce_work_func(), 458 * which can all live with a slightly stale value. 459 */ 460 u64 edflags; 461 /* 462 * timestamp_ns and req_seqno are accessed only by 463 * edge_irq_handler() and edge_irq_thread(), which are themselves 464 * mutually exclusive, so no additional protection is necessary. 465 */ 466 u64 timestamp_ns; 467 u32 req_seqno; 468 /* 469 * line_seqno is accessed by either edge_irq_thread() or 470 * debounce_work_func(), which are themselves mutually exclusive, 471 * so no additional protection is necessary. 472 */ 473 u32 line_seqno; 474 /* 475 * -- debouncer specific fields -- 476 */ 477 struct delayed_work work; 478 /* 479 * debounce_period_us is accessed by debounce_irq_handler() and 480 * process_hw_ts() which are disabled when modified by 481 * debounce_setup(), edge_detector_setup() or edge_detector_stop() 482 * or can live with a stale version when updated by 483 * edge_detector_update(). 484 * The modifying functions are themselves mutually exclusive. 485 */ 486 unsigned int debounce_period_us; 487 /* 488 * sw_debounce is accessed by linereq_set_config(), which is the 489 * only setter, and linereq_get_values(), which can live with a 490 * slightly stale value. 491 */ 492 unsigned int sw_debounced; 493 /* 494 * level is accessed by debounce_work_func(), which is the only 495 * setter, and linereq_get_values() which can live with a slightly 496 * stale value. 497 */ 498 unsigned int level; 499 #ifdef CONFIG_HTE 500 struct hte_ts_desc hdesc; 501 /* 502 * HTE provider sets line level at the time of event. The valid 503 * value is 0 or 1 and negative value for an error. 504 */ 505 int raw_level; 506 /* 507 * when sw_debounce is set on HTE enabled line, this is running 508 * counter of the discarded events. 509 */ 510 u32 total_discard_seq; 511 /* 512 * when sw_debounce is set on HTE enabled line, this variable records 513 * last sequence number before debounce period expires. 514 */ 515 u32 last_seqno; 516 #endif /* CONFIG_HTE */ 517 }; 518 519 /* 520 * a rbtree of the struct lines containing supplemental info. 521 * Used to populate gpio_v2_line_info with cdev specific fields not contained 522 * in the struct gpio_desc. 523 * A line is determined to contain supplemental information by 524 * line_has_supinfo(). 525 */ 526 static struct rb_root supinfo_tree = RB_ROOT; 527 /* covers supinfo_tree */ 528 static DEFINE_SPINLOCK(supinfo_lock); 529 530 /** 531 * struct linereq - contains the state of a userspace line request 532 * @gdev: the GPIO device the line request pertains to 533 * @label: consumer label used to tag GPIO descriptors 534 * @num_lines: the number of lines in the lines array 535 * @wait: wait queue that handles blocking reads of events 536 * @device_unregistered_nb: notifier block for receiving gdev unregister events 537 * @event_buffer_size: the number of elements allocated in @events 538 * @events: KFIFO for the GPIO events 539 * @seqno: the sequence number for edge events generated on all lines in 540 * this line request. Note that this is not used when @num_lines is 1, as 541 * the line_seqno is then the same and is cheaper to calculate. 542 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency 543 * of configuration, particularly multi-step accesses to desc flags and 544 * changes to supinfo status. 545 * @lines: the lines held by this line request, with @num_lines elements. 546 */ 547 struct linereq { 548 struct gpio_device *gdev; 549 const char *label; 550 u32 num_lines; 551 wait_queue_head_t wait; 552 struct notifier_block device_unregistered_nb; 553 u32 event_buffer_size; 554 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event); 555 atomic_t seqno; 556 struct mutex config_mutex; 557 struct line lines[] __counted_by(num_lines); 558 }; 559 560 static void supinfo_insert(struct line *line) 561 { 562 struct rb_node **new = &(supinfo_tree.rb_node), *parent = NULL; 563 struct line *entry; 564 565 guard(spinlock)(&supinfo_lock); 566 567 while (*new) { 568 entry = container_of(*new, struct line, node); 569 570 parent = *new; 571 if (line->desc < entry->desc) { 572 new = &((*new)->rb_left); 573 } else if (line->desc > entry->desc) { 574 new = &((*new)->rb_right); 575 } else { 576 /* this should never happen */ 577 WARN(1, "duplicate line inserted"); 578 return; 579 } 580 } 581 582 rb_link_node(&line->node, parent, new); 583 rb_insert_color(&line->node, &supinfo_tree); 584 } 585 586 static void supinfo_erase(struct line *line) 587 { 588 guard(spinlock)(&supinfo_lock); 589 590 rb_erase(&line->node, &supinfo_tree); 591 } 592 593 static struct line *supinfo_find(struct gpio_desc *desc) 594 { 595 struct rb_node *node = supinfo_tree.rb_node; 596 struct line *line; 597 598 while (node) { 599 line = container_of(node, struct line, node); 600 if (desc < line->desc) 601 node = node->rb_left; 602 else if (desc > line->desc) 603 node = node->rb_right; 604 else 605 return line; 606 } 607 return NULL; 608 } 609 610 static void supinfo_to_lineinfo(struct gpio_desc *desc, 611 struct gpio_v2_line_info *info) 612 { 613 struct gpio_v2_line_attribute *attr; 614 struct line *line; 615 616 guard(spinlock)(&supinfo_lock); 617 618 line = supinfo_find(desc); 619 if (!line) 620 return; 621 622 attr = &info->attrs[info->num_attrs]; 623 attr->id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE; 624 attr->debounce_period_us = READ_ONCE(line->debounce_period_us); 625 info->num_attrs++; 626 } 627 628 static inline bool line_has_supinfo(struct line *line) 629 { 630 return READ_ONCE(line->debounce_period_us); 631 } 632 633 /* 634 * Checks line_has_supinfo() before and after the change to avoid unnecessary 635 * supinfo_tree access. 636 * Called indirectly by linereq_create() or linereq_set_config() so line 637 * is already protected from concurrent changes. 638 */ 639 static void line_set_debounce_period(struct line *line, 640 unsigned int debounce_period_us) 641 { 642 bool was_suppl = line_has_supinfo(line); 643 644 WRITE_ONCE(line->debounce_period_us, debounce_period_us); 645 646 /* if supinfo status is unchanged then we're done */ 647 if (line_has_supinfo(line) == was_suppl) 648 return; 649 650 /* supinfo status has changed, so update the tree */ 651 if (was_suppl) 652 supinfo_erase(line); 653 else 654 supinfo_insert(line); 655 } 656 657 #define GPIO_V2_LINE_BIAS_FLAGS \ 658 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \ 659 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \ 660 GPIO_V2_LINE_FLAG_BIAS_DISABLED) 661 662 #define GPIO_V2_LINE_DIRECTION_FLAGS \ 663 (GPIO_V2_LINE_FLAG_INPUT | \ 664 GPIO_V2_LINE_FLAG_OUTPUT) 665 666 #define GPIO_V2_LINE_DRIVE_FLAGS \ 667 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \ 668 GPIO_V2_LINE_FLAG_OPEN_SOURCE) 669 670 #define GPIO_V2_LINE_EDGE_FLAGS \ 671 (GPIO_V2_LINE_FLAG_EDGE_RISING | \ 672 GPIO_V2_LINE_FLAG_EDGE_FALLING) 673 674 #define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS 675 676 #define GPIO_V2_LINE_VALID_FLAGS \ 677 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \ 678 GPIO_V2_LINE_DIRECTION_FLAGS | \ 679 GPIO_V2_LINE_DRIVE_FLAGS | \ 680 GPIO_V2_LINE_EDGE_FLAGS | \ 681 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \ 682 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \ 683 GPIO_V2_LINE_BIAS_FLAGS) 684 685 /* subset of flags relevant for edge detector configuration */ 686 #define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \ 687 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \ 688 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \ 689 GPIO_V2_LINE_EDGE_FLAGS) 690 691 static int linereq_unregistered_notify(struct notifier_block *nb, 692 unsigned long action, void *data) 693 { 694 struct linereq *lr = container_of(nb, struct linereq, 695 device_unregistered_nb); 696 697 wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR); 698 699 return NOTIFY_OK; 700 } 701 702 static void linereq_put_event(struct linereq *lr, 703 struct gpio_v2_line_event *le) 704 { 705 bool overflow = false; 706 707 scoped_guard(spinlock, &lr->wait.lock) { 708 if (kfifo_is_full(&lr->events)) { 709 overflow = true; 710 kfifo_skip(&lr->events); 711 } 712 kfifo_in(&lr->events, le, 1); 713 } 714 if (!overflow) 715 wake_up_poll(&lr->wait, EPOLLIN); 716 else 717 pr_debug_ratelimited("event FIFO is full - event dropped\n"); 718 } 719 720 static u64 line_event_timestamp(struct line *line) 721 { 722 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags)) 723 return ktime_get_real_ns(); 724 else if (IS_ENABLED(CONFIG_HTE) && 725 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) 726 return line->timestamp_ns; 727 728 return ktime_get_ns(); 729 } 730 731 static u32 line_event_id(int level) 732 { 733 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE : 734 GPIO_V2_LINE_EVENT_FALLING_EDGE; 735 } 736 737 #ifdef CONFIG_HTE 738 739 static enum hte_return process_hw_ts_thread(void *p) 740 { 741 struct line *line; 742 struct linereq *lr; 743 struct gpio_v2_line_event le; 744 u64 edflags; 745 int level; 746 747 if (!p) 748 return HTE_CB_HANDLED; 749 750 line = p; 751 lr = line->req; 752 753 memset(&le, 0, sizeof(le)); 754 755 le.timestamp_ns = line->timestamp_ns; 756 edflags = READ_ONCE(line->edflags); 757 758 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) { 759 case GPIO_V2_LINE_FLAG_EDGE_BOTH: 760 level = (line->raw_level >= 0) ? 761 line->raw_level : 762 gpiod_get_raw_value_cansleep(line->desc); 763 764 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 765 level = !level; 766 767 le.id = line_event_id(level); 768 break; 769 case GPIO_V2_LINE_FLAG_EDGE_RISING: 770 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE; 771 break; 772 case GPIO_V2_LINE_FLAG_EDGE_FALLING: 773 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE; 774 break; 775 default: 776 return HTE_CB_HANDLED; 777 } 778 le.line_seqno = line->line_seqno; 779 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno; 780 le.offset = gpio_chip_hwgpio(line->desc); 781 782 linereq_put_event(lr, &le); 783 784 return HTE_CB_HANDLED; 785 } 786 787 static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p) 788 { 789 struct line *line; 790 struct linereq *lr; 791 int diff_seqno = 0; 792 793 if (!ts || !p) 794 return HTE_CB_HANDLED; 795 796 line = p; 797 line->timestamp_ns = ts->tsc; 798 line->raw_level = ts->raw_level; 799 lr = line->req; 800 801 if (READ_ONCE(line->sw_debounced)) { 802 line->total_discard_seq++; 803 line->last_seqno = ts->seq; 804 mod_delayed_work(system_wq, &line->work, 805 usecs_to_jiffies(READ_ONCE(line->debounce_period_us))); 806 } else { 807 if (unlikely(ts->seq < line->line_seqno)) 808 return HTE_CB_HANDLED; 809 810 diff_seqno = ts->seq - line->line_seqno; 811 line->line_seqno = ts->seq; 812 if (lr->num_lines != 1) 813 line->req_seqno = atomic_add_return(diff_seqno, 814 &lr->seqno); 815 816 return HTE_RUN_SECOND_CB; 817 } 818 819 return HTE_CB_HANDLED; 820 } 821 822 static int hte_edge_setup(struct line *line, u64 eflags) 823 { 824 int ret; 825 unsigned long flags = 0; 826 struct hte_ts_desc *hdesc = &line->hdesc; 827 828 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING) 829 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 830 HTE_FALLING_EDGE_TS : 831 HTE_RISING_EDGE_TS; 832 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING) 833 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 834 HTE_RISING_EDGE_TS : 835 HTE_FALLING_EDGE_TS; 836 837 line->total_discard_seq = 0; 838 839 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL, 840 line->desc); 841 842 ret = hte_ts_get(NULL, hdesc, 0); 843 if (ret) 844 return ret; 845 846 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread, 847 line); 848 } 849 850 #else 851 852 static int hte_edge_setup(struct line *line, u64 eflags) 853 { 854 return 0; 855 } 856 #endif /* CONFIG_HTE */ 857 858 static irqreturn_t edge_irq_thread(int irq, void *p) 859 { 860 struct line *line = p; 861 struct linereq *lr = line->req; 862 struct gpio_v2_line_event le; 863 864 /* Do not leak kernel stack to userspace */ 865 memset(&le, 0, sizeof(le)); 866 867 if (line->timestamp_ns) { 868 le.timestamp_ns = line->timestamp_ns; 869 } else { 870 /* 871 * We may be running from a nested threaded interrupt in 872 * which case we didn't get the timestamp from 873 * edge_irq_handler(). 874 */ 875 le.timestamp_ns = line_event_timestamp(line); 876 if (lr->num_lines != 1) 877 line->req_seqno = atomic_inc_return(&lr->seqno); 878 } 879 line->timestamp_ns = 0; 880 881 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) { 882 case GPIO_V2_LINE_FLAG_EDGE_BOTH: 883 le.id = line_event_id(gpiod_get_value_cansleep(line->desc)); 884 break; 885 case GPIO_V2_LINE_FLAG_EDGE_RISING: 886 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE; 887 break; 888 case GPIO_V2_LINE_FLAG_EDGE_FALLING: 889 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE; 890 break; 891 default: 892 return IRQ_NONE; 893 } 894 line->line_seqno++; 895 le.line_seqno = line->line_seqno; 896 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno; 897 le.offset = gpio_chip_hwgpio(line->desc); 898 899 linereq_put_event(lr, &le); 900 901 return IRQ_HANDLED; 902 } 903 904 static irqreturn_t edge_irq_handler(int irq, void *p) 905 { 906 struct line *line = p; 907 struct linereq *lr = line->req; 908 909 /* 910 * Just store the timestamp in hardirq context so we get it as 911 * close in time as possible to the actual event. 912 */ 913 line->timestamp_ns = line_event_timestamp(line); 914 915 if (lr->num_lines != 1) 916 line->req_seqno = atomic_inc_return(&lr->seqno); 917 918 return IRQ_WAKE_THREAD; 919 } 920 921 /* 922 * returns the current debounced logical value. 923 */ 924 static bool debounced_value(struct line *line) 925 { 926 bool value; 927 928 /* 929 * minor race - debouncer may be stopped here, so edge_detector_stop() 930 * must leave the value unchanged so the following will read the level 931 * from when the debouncer was last running. 932 */ 933 value = READ_ONCE(line->level); 934 935 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags)) 936 value = !value; 937 938 return value; 939 } 940 941 static irqreturn_t debounce_irq_handler(int irq, void *p) 942 { 943 struct line *line = p; 944 945 mod_delayed_work(system_wq, &line->work, 946 usecs_to_jiffies(READ_ONCE(line->debounce_period_us))); 947 948 return IRQ_HANDLED; 949 } 950 951 static void debounce_work_func(struct work_struct *work) 952 { 953 struct gpio_v2_line_event le; 954 struct line *line = container_of(work, struct line, work.work); 955 struct linereq *lr; 956 u64 eflags, edflags = READ_ONCE(line->edflags); 957 int level = -1; 958 #ifdef CONFIG_HTE 959 int diff_seqno; 960 961 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) 962 level = line->raw_level; 963 #endif 964 if (level < 0) 965 level = gpiod_get_raw_value_cansleep(line->desc); 966 if (level < 0) { 967 pr_debug_ratelimited("debouncer failed to read line value\n"); 968 return; 969 } 970 971 if (READ_ONCE(line->level) == level) 972 return; 973 974 WRITE_ONCE(line->level, level); 975 976 /* -- edge detection -- */ 977 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; 978 if (!eflags) 979 return; 980 981 /* switch from physical level to logical - if they differ */ 982 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 983 level = !level; 984 985 /* ignore edges that are not being monitored */ 986 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) || 987 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level)) 988 return; 989 990 /* Do not leak kernel stack to userspace */ 991 memset(&le, 0, sizeof(le)); 992 993 lr = line->req; 994 le.timestamp_ns = line_event_timestamp(line); 995 le.offset = gpio_chip_hwgpio(line->desc); 996 #ifdef CONFIG_HTE 997 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) { 998 /* discard events except the last one */ 999 line->total_discard_seq -= 1; 1000 diff_seqno = line->last_seqno - line->total_discard_seq - 1001 line->line_seqno; 1002 line->line_seqno = line->last_seqno - line->total_discard_seq; 1003 le.line_seqno = line->line_seqno; 1004 le.seqno = (lr->num_lines == 1) ? 1005 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno); 1006 } else 1007 #endif /* CONFIG_HTE */ 1008 { 1009 line->line_seqno++; 1010 le.line_seqno = line->line_seqno; 1011 le.seqno = (lr->num_lines == 1) ? 1012 le.line_seqno : atomic_inc_return(&lr->seqno); 1013 } 1014 1015 le.id = line_event_id(level); 1016 1017 linereq_put_event(lr, &le); 1018 } 1019 1020 static int debounce_setup(struct line *line, unsigned int debounce_period_us) 1021 { 1022 unsigned long irqflags; 1023 int ret, level, irq; 1024 1025 /* try hardware */ 1026 ret = gpiod_set_debounce(line->desc, debounce_period_us); 1027 if (!ret) { 1028 line_set_debounce_period(line, debounce_period_us); 1029 return ret; 1030 } 1031 if (ret != -ENOTSUPP) 1032 return ret; 1033 1034 if (debounce_period_us) { 1035 /* setup software debounce */ 1036 level = gpiod_get_raw_value_cansleep(line->desc); 1037 if (level < 0) 1038 return level; 1039 1040 if (!(IS_ENABLED(CONFIG_HTE) && 1041 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) { 1042 irq = gpiod_to_irq(line->desc); 1043 if (irq < 0) 1044 return -ENXIO; 1045 1046 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING; 1047 ret = request_irq(irq, debounce_irq_handler, irqflags, 1048 line->req->label, line); 1049 if (ret) 1050 return ret; 1051 line->irq = irq; 1052 } else { 1053 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH); 1054 if (ret) 1055 return ret; 1056 } 1057 1058 WRITE_ONCE(line->level, level); 1059 WRITE_ONCE(line->sw_debounced, 1); 1060 } 1061 return 0; 1062 } 1063 1064 static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc, 1065 unsigned int line_idx) 1066 { 1067 unsigned int i; 1068 u64 mask = BIT_ULL(line_idx); 1069 1070 for (i = 0; i < lc->num_attrs; i++) { 1071 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) && 1072 (lc->attrs[i].mask & mask)) 1073 return true; 1074 } 1075 return false; 1076 } 1077 1078 static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc, 1079 unsigned int line_idx) 1080 { 1081 unsigned int i; 1082 u64 mask = BIT_ULL(line_idx); 1083 1084 for (i = 0; i < lc->num_attrs; i++) { 1085 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) && 1086 (lc->attrs[i].mask & mask)) 1087 return lc->attrs[i].attr.debounce_period_us; 1088 } 1089 return 0; 1090 } 1091 1092 static void edge_detector_stop(struct line *line) 1093 { 1094 if (line->irq) { 1095 free_irq(line->irq, line); 1096 line->irq = 0; 1097 } 1098 1099 #ifdef CONFIG_HTE 1100 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) 1101 hte_ts_put(&line->hdesc); 1102 #endif 1103 1104 cancel_delayed_work_sync(&line->work); 1105 WRITE_ONCE(line->sw_debounced, 0); 1106 WRITE_ONCE(line->edflags, 0); 1107 line_set_debounce_period(line, 0); 1108 /* do not change line->level - see comment in debounced_value() */ 1109 } 1110 1111 static int edge_detector_setup(struct line *line, 1112 struct gpio_v2_line_config *lc, 1113 unsigned int line_idx, u64 edflags) 1114 { 1115 u32 debounce_period_us; 1116 unsigned long irqflags = 0; 1117 u64 eflags; 1118 int irq, ret; 1119 1120 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; 1121 if (eflags && !kfifo_initialized(&line->req->events)) { 1122 ret = kfifo_alloc(&line->req->events, 1123 line->req->event_buffer_size, GFP_KERNEL); 1124 if (ret) 1125 return ret; 1126 } 1127 if (gpio_v2_line_config_debounced(lc, line_idx)) { 1128 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx); 1129 ret = debounce_setup(line, debounce_period_us); 1130 if (ret) 1131 return ret; 1132 line_set_debounce_period(line, debounce_period_us); 1133 } 1134 1135 /* detection disabled or sw debouncer will provide edge detection */ 1136 if (!eflags || READ_ONCE(line->sw_debounced)) 1137 return 0; 1138 1139 if (IS_ENABLED(CONFIG_HTE) && 1140 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1141 return hte_edge_setup(line, edflags); 1142 1143 irq = gpiod_to_irq(line->desc); 1144 if (irq < 0) 1145 return -ENXIO; 1146 1147 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING) 1148 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 1149 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; 1150 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING) 1151 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 1152 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; 1153 irqflags |= IRQF_ONESHOT; 1154 1155 /* Request a thread to read the events */ 1156 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread, 1157 irqflags, line->req->label, line); 1158 if (ret) 1159 return ret; 1160 1161 line->irq = irq; 1162 return 0; 1163 } 1164 1165 static int edge_detector_update(struct line *line, 1166 struct gpio_v2_line_config *lc, 1167 unsigned int line_idx, u64 edflags) 1168 { 1169 u64 active_edflags = READ_ONCE(line->edflags); 1170 unsigned int debounce_period_us = 1171 gpio_v2_line_config_debounce_period(lc, line_idx); 1172 1173 if ((active_edflags == edflags) && 1174 (READ_ONCE(line->debounce_period_us) == debounce_period_us)) 1175 return 0; 1176 1177 /* sw debounced and still will be...*/ 1178 if (debounce_period_us && READ_ONCE(line->sw_debounced)) { 1179 line_set_debounce_period(line, debounce_period_us); 1180 return 0; 1181 } 1182 1183 /* reconfiguring edge detection or sw debounce being disabled */ 1184 if ((line->irq && !READ_ONCE(line->sw_debounced)) || 1185 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) || 1186 (!debounce_period_us && READ_ONCE(line->sw_debounced))) 1187 edge_detector_stop(line); 1188 1189 return edge_detector_setup(line, lc, line_idx, edflags); 1190 } 1191 1192 static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc, 1193 unsigned int line_idx) 1194 { 1195 unsigned int i; 1196 u64 mask = BIT_ULL(line_idx); 1197 1198 for (i = 0; i < lc->num_attrs; i++) { 1199 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) && 1200 (lc->attrs[i].mask & mask)) 1201 return lc->attrs[i].attr.flags; 1202 } 1203 return lc->flags; 1204 } 1205 1206 static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc, 1207 unsigned int line_idx) 1208 { 1209 unsigned int i; 1210 u64 mask = BIT_ULL(line_idx); 1211 1212 for (i = 0; i < lc->num_attrs; i++) { 1213 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) && 1214 (lc->attrs[i].mask & mask)) 1215 return !!(lc->attrs[i].attr.values & mask); 1216 } 1217 return 0; 1218 } 1219 1220 static int gpio_v2_line_flags_validate(u64 flags) 1221 { 1222 /* Return an error if an unknown flag is set */ 1223 if (flags & ~GPIO_V2_LINE_VALID_FLAGS) 1224 return -EINVAL; 1225 1226 if (!IS_ENABLED(CONFIG_HTE) && 1227 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1228 return -EOPNOTSUPP; 1229 1230 /* 1231 * Do not allow both INPUT and OUTPUT flags to be set as they are 1232 * contradictory. 1233 */ 1234 if ((flags & GPIO_V2_LINE_FLAG_INPUT) && 1235 (flags & GPIO_V2_LINE_FLAG_OUTPUT)) 1236 return -EINVAL; 1237 1238 /* Only allow one event clock source */ 1239 if (IS_ENABLED(CONFIG_HTE) && 1240 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) && 1241 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1242 return -EINVAL; 1243 1244 /* Edge detection requires explicit input. */ 1245 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) && 1246 !(flags & GPIO_V2_LINE_FLAG_INPUT)) 1247 return -EINVAL; 1248 1249 /* 1250 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single 1251 * request. If the hardware actually supports enabling both at the 1252 * same time the electrical result would be disastrous. 1253 */ 1254 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) && 1255 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE)) 1256 return -EINVAL; 1257 1258 /* Drive requires explicit output direction. */ 1259 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) && 1260 !(flags & GPIO_V2_LINE_FLAG_OUTPUT)) 1261 return -EINVAL; 1262 1263 /* Bias requires explicit direction. */ 1264 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) && 1265 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS)) 1266 return -EINVAL; 1267 1268 /* Only one bias flag can be set. */ 1269 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) && 1270 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | 1271 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) || 1272 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) && 1273 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) 1274 return -EINVAL; 1275 1276 return 0; 1277 } 1278 1279 static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc, 1280 unsigned int num_lines) 1281 { 1282 unsigned int i; 1283 u64 flags; 1284 int ret; 1285 1286 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX) 1287 return -EINVAL; 1288 1289 if (memchr_inv(lc->padding, 0, sizeof(lc->padding))) 1290 return -EINVAL; 1291 1292 for (i = 0; i < num_lines; i++) { 1293 flags = gpio_v2_line_config_flags(lc, i); 1294 ret = gpio_v2_line_flags_validate(flags); 1295 if (ret) 1296 return ret; 1297 1298 /* debounce requires explicit input */ 1299 if (gpio_v2_line_config_debounced(lc, i) && 1300 !(flags & GPIO_V2_LINE_FLAG_INPUT)) 1301 return -EINVAL; 1302 } 1303 return 0; 1304 } 1305 1306 static void gpio_v2_line_config_flags_to_desc_flags(u64 flags, 1307 unsigned long *flagsp) 1308 { 1309 assign_bit(FLAG_ACTIVE_LOW, flagsp, 1310 flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW); 1311 1312 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) 1313 set_bit(FLAG_IS_OUT, flagsp); 1314 else if (flags & GPIO_V2_LINE_FLAG_INPUT) 1315 clear_bit(FLAG_IS_OUT, flagsp); 1316 1317 assign_bit(FLAG_EDGE_RISING, flagsp, 1318 flags & GPIO_V2_LINE_FLAG_EDGE_RISING); 1319 assign_bit(FLAG_EDGE_FALLING, flagsp, 1320 flags & GPIO_V2_LINE_FLAG_EDGE_FALLING); 1321 1322 assign_bit(FLAG_OPEN_DRAIN, flagsp, 1323 flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN); 1324 assign_bit(FLAG_OPEN_SOURCE, flagsp, 1325 flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE); 1326 1327 assign_bit(FLAG_PULL_UP, flagsp, 1328 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP); 1329 assign_bit(FLAG_PULL_DOWN, flagsp, 1330 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN); 1331 assign_bit(FLAG_BIAS_DISABLE, flagsp, 1332 flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED); 1333 1334 assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp, 1335 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME); 1336 assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp, 1337 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE); 1338 } 1339 1340 static long linereq_get_values(struct linereq *lr, void __user *ip) 1341 { 1342 struct gpio_v2_line_values lv; 1343 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX); 1344 struct gpio_desc **descs; 1345 unsigned int i, didx, num_get; 1346 bool val; 1347 int ret; 1348 1349 /* NOTE: It's ok to read values of output lines. */ 1350 if (copy_from_user(&lv, ip, sizeof(lv))) 1351 return -EFAULT; 1352 1353 /* 1354 * gpiod_get_array_value_complex() requires compacted desc and val 1355 * arrays, rather than the sparse ones in lv. 1356 * Calculation of num_get and construction of the desc array is 1357 * optimized to avoid allocation for the desc array for the common 1358 * num_get == 1 case. 1359 */ 1360 /* scan requested lines to calculate the subset to get */ 1361 for (num_get = 0, i = 0; i < lr->num_lines; i++) { 1362 if (lv.mask & BIT_ULL(i)) { 1363 num_get++; 1364 /* capture desc for the num_get == 1 case */ 1365 descs = &lr->lines[i].desc; 1366 } 1367 } 1368 1369 if (num_get == 0) 1370 return -EINVAL; 1371 1372 if (num_get != 1) { 1373 /* build compacted desc array */ 1374 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL); 1375 if (!descs) 1376 return -ENOMEM; 1377 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1378 if (lv.mask & BIT_ULL(i)) { 1379 descs[didx] = lr->lines[i].desc; 1380 didx++; 1381 } 1382 } 1383 } 1384 ret = gpiod_get_array_value_complex(false, true, num_get, 1385 descs, NULL, vals); 1386 1387 if (num_get != 1) 1388 kfree(descs); 1389 if (ret) 1390 return ret; 1391 1392 lv.bits = 0; 1393 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1394 /* unpack compacted vals for the response */ 1395 if (lv.mask & BIT_ULL(i)) { 1396 if (lr->lines[i].sw_debounced) 1397 val = debounced_value(&lr->lines[i]); 1398 else 1399 val = test_bit(didx, vals); 1400 if (val) 1401 lv.bits |= BIT_ULL(i); 1402 didx++; 1403 } 1404 } 1405 1406 if (copy_to_user(ip, &lv, sizeof(lv))) 1407 return -EFAULT; 1408 1409 return 0; 1410 } 1411 1412 static long linereq_set_values(struct linereq *lr, void __user *ip) 1413 { 1414 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX); 1415 struct gpio_v2_line_values lv; 1416 struct gpio_desc **descs; 1417 unsigned int i, didx, num_set; 1418 int ret; 1419 1420 if (copy_from_user(&lv, ip, sizeof(lv))) 1421 return -EFAULT; 1422 1423 guard(mutex)(&lr->config_mutex); 1424 1425 /* 1426 * gpiod_set_array_value_complex() requires compacted desc and val 1427 * arrays, rather than the sparse ones in lv. 1428 * Calculation of num_set and construction of the descs and vals arrays 1429 * is optimized to minimize scanning the lv->mask, and to avoid 1430 * allocation for the desc array for the common num_set == 1 case. 1431 */ 1432 bitmap_zero(vals, GPIO_V2_LINES_MAX); 1433 /* scan requested lines to determine the subset to be set */ 1434 for (num_set = 0, i = 0; i < lr->num_lines; i++) { 1435 if (lv.mask & BIT_ULL(i)) { 1436 /* setting inputs is not allowed */ 1437 if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags)) 1438 return -EPERM; 1439 /* add to compacted values */ 1440 if (lv.bits & BIT_ULL(i)) 1441 __set_bit(num_set, vals); 1442 num_set++; 1443 /* capture desc for the num_set == 1 case */ 1444 descs = &lr->lines[i].desc; 1445 } 1446 } 1447 if (num_set == 0) 1448 return -EINVAL; 1449 1450 if (num_set != 1) { 1451 /* build compacted desc array */ 1452 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL); 1453 if (!descs) 1454 return -ENOMEM; 1455 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1456 if (lv.mask & BIT_ULL(i)) { 1457 descs[didx] = lr->lines[i].desc; 1458 didx++; 1459 } 1460 } 1461 } 1462 ret = gpiod_set_array_value_complex(false, true, num_set, 1463 descs, NULL, vals); 1464 1465 if (num_set != 1) 1466 kfree(descs); 1467 return ret; 1468 } 1469 1470 static long linereq_set_config(struct linereq *lr, void __user *ip) 1471 { 1472 struct gpio_v2_line_config lc; 1473 struct gpio_desc *desc; 1474 struct line *line; 1475 unsigned int i; 1476 u64 flags, edflags; 1477 int ret; 1478 1479 if (copy_from_user(&lc, ip, sizeof(lc))) 1480 return -EFAULT; 1481 1482 ret = gpio_v2_line_config_validate(&lc, lr->num_lines); 1483 if (ret) 1484 return ret; 1485 1486 guard(mutex)(&lr->config_mutex); 1487 1488 for (i = 0; i < lr->num_lines; i++) { 1489 line = &lr->lines[i]; 1490 desc = lr->lines[i].desc; 1491 flags = gpio_v2_line_config_flags(&lc, i); 1492 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); 1493 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; 1494 /* 1495 * Lines have to be requested explicitly for input 1496 * or output, else the line will be treated "as is". 1497 */ 1498 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { 1499 int val = gpio_v2_line_config_output_value(&lc, i); 1500 1501 edge_detector_stop(line); 1502 ret = gpiod_direction_output(desc, val); 1503 if (ret) 1504 return ret; 1505 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) { 1506 ret = gpiod_direction_input(desc); 1507 if (ret) 1508 return ret; 1509 1510 ret = edge_detector_update(line, &lc, i, edflags); 1511 if (ret) 1512 return ret; 1513 } 1514 1515 WRITE_ONCE(line->edflags, edflags); 1516 1517 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG); 1518 } 1519 return 0; 1520 } 1521 1522 static long linereq_ioctl(struct file *file, unsigned int cmd, 1523 unsigned long arg) 1524 { 1525 struct linereq *lr = file->private_data; 1526 void __user *ip = (void __user *)arg; 1527 1528 guard(rwsem_read)(&lr->gdev->sem); 1529 1530 if (!lr->gdev->chip) 1531 return -ENODEV; 1532 1533 switch (cmd) { 1534 case GPIO_V2_LINE_GET_VALUES_IOCTL: 1535 return linereq_get_values(lr, ip); 1536 case GPIO_V2_LINE_SET_VALUES_IOCTL: 1537 return linereq_set_values(lr, ip); 1538 case GPIO_V2_LINE_SET_CONFIG_IOCTL: 1539 return linereq_set_config(lr, ip); 1540 default: 1541 return -EINVAL; 1542 } 1543 } 1544 1545 #ifdef CONFIG_COMPAT 1546 static long linereq_ioctl_compat(struct file *file, unsigned int cmd, 1547 unsigned long arg) 1548 { 1549 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 1550 } 1551 #endif 1552 1553 static __poll_t linereq_poll(struct file *file, 1554 struct poll_table_struct *wait) 1555 { 1556 struct linereq *lr = file->private_data; 1557 __poll_t events = 0; 1558 1559 guard(rwsem_read)(&lr->gdev->sem); 1560 1561 if (!lr->gdev->chip) 1562 return EPOLLHUP | EPOLLERR; 1563 1564 poll_wait(file, &lr->wait, wait); 1565 1566 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events, 1567 &lr->wait.lock)) 1568 events = EPOLLIN | EPOLLRDNORM; 1569 1570 return events; 1571 } 1572 1573 static ssize_t linereq_read(struct file *file, char __user *buf, 1574 size_t count, loff_t *f_ps) 1575 { 1576 struct linereq *lr = file->private_data; 1577 struct gpio_v2_line_event le; 1578 ssize_t bytes_read = 0; 1579 int ret; 1580 1581 guard(rwsem_read)(&lr->gdev->sem); 1582 1583 if (!lr->gdev->chip) 1584 return -ENODEV; 1585 1586 if (count < sizeof(le)) 1587 return -EINVAL; 1588 1589 do { 1590 scoped_guard(spinlock, &lr->wait.lock) { 1591 if (kfifo_is_empty(&lr->events)) { 1592 if (bytes_read) 1593 return bytes_read; 1594 1595 if (file->f_flags & O_NONBLOCK) 1596 return -EAGAIN; 1597 1598 ret = wait_event_interruptible_locked(lr->wait, 1599 !kfifo_is_empty(&lr->events)); 1600 if (ret) 1601 return ret; 1602 } 1603 1604 ret = kfifo_out(&lr->events, &le, 1); 1605 } 1606 if (ret != 1) { 1607 /* 1608 * This should never happen - we were holding the 1609 * lock from the moment we learned the fifo is no 1610 * longer empty until now. 1611 */ 1612 ret = -EIO; 1613 break; 1614 } 1615 1616 if (copy_to_user(buf + bytes_read, &le, sizeof(le))) 1617 return -EFAULT; 1618 bytes_read += sizeof(le); 1619 } while (count >= bytes_read + sizeof(le)); 1620 1621 return bytes_read; 1622 } 1623 1624 static void linereq_free(struct linereq *lr) 1625 { 1626 struct line *line; 1627 unsigned int i; 1628 1629 if (lr->device_unregistered_nb.notifier_call) 1630 blocking_notifier_chain_unregister(&lr->gdev->device_notifier, 1631 &lr->device_unregistered_nb); 1632 1633 for (i = 0; i < lr->num_lines; i++) { 1634 line = &lr->lines[i]; 1635 if (!line->desc) 1636 continue; 1637 1638 edge_detector_stop(line); 1639 if (line_has_supinfo(line)) 1640 supinfo_erase(line); 1641 gpiod_free(line->desc); 1642 } 1643 kfifo_free(&lr->events); 1644 kfree(lr->label); 1645 gpio_device_put(lr->gdev); 1646 kvfree(lr); 1647 } 1648 1649 static int linereq_release(struct inode *inode, struct file *file) 1650 { 1651 struct linereq *lr = file->private_data; 1652 1653 linereq_free(lr); 1654 return 0; 1655 } 1656 1657 #ifdef CONFIG_PROC_FS 1658 static void linereq_show_fdinfo(struct seq_file *out, struct file *file) 1659 { 1660 struct linereq *lr = file->private_data; 1661 struct device *dev = &lr->gdev->dev; 1662 u16 i; 1663 1664 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev)); 1665 1666 for (i = 0; i < lr->num_lines; i++) 1667 seq_printf(out, "gpio-line:\t%d\n", 1668 gpio_chip_hwgpio(lr->lines[i].desc)); 1669 } 1670 #endif 1671 1672 static const struct file_operations line_fileops = { 1673 .release = linereq_release, 1674 .read = linereq_read, 1675 .poll = linereq_poll, 1676 .owner = THIS_MODULE, 1677 .llseek = noop_llseek, 1678 .unlocked_ioctl = linereq_ioctl, 1679 #ifdef CONFIG_COMPAT 1680 .compat_ioctl = linereq_ioctl_compat, 1681 #endif 1682 #ifdef CONFIG_PROC_FS 1683 .show_fdinfo = linereq_show_fdinfo, 1684 #endif 1685 }; 1686 1687 static int linereq_create(struct gpio_device *gdev, void __user *ip) 1688 { 1689 struct gpio_v2_line_request ulr; 1690 struct gpio_v2_line_config *lc; 1691 struct linereq *lr; 1692 struct file *file; 1693 u64 flags, edflags; 1694 unsigned int i; 1695 int fd, ret; 1696 1697 if (copy_from_user(&ulr, ip, sizeof(ulr))) 1698 return -EFAULT; 1699 1700 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX)) 1701 return -EINVAL; 1702 1703 if (memchr_inv(ulr.padding, 0, sizeof(ulr.padding))) 1704 return -EINVAL; 1705 1706 lc = &ulr.config; 1707 ret = gpio_v2_line_config_validate(lc, ulr.num_lines); 1708 if (ret) 1709 return ret; 1710 1711 lr = kvzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL); 1712 if (!lr) 1713 return -ENOMEM; 1714 lr->num_lines = ulr.num_lines; 1715 1716 lr->gdev = gpio_device_get(gdev); 1717 1718 for (i = 0; i < ulr.num_lines; i++) { 1719 lr->lines[i].req = lr; 1720 WRITE_ONCE(lr->lines[i].sw_debounced, 0); 1721 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func); 1722 } 1723 1724 if (ulr.consumer[0] != '\0') { 1725 /* label is only initialized if consumer is set */ 1726 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1, 1727 GFP_KERNEL); 1728 if (!lr->label) { 1729 ret = -ENOMEM; 1730 goto out_free_linereq; 1731 } 1732 } 1733 1734 mutex_init(&lr->config_mutex); 1735 init_waitqueue_head(&lr->wait); 1736 lr->event_buffer_size = ulr.event_buffer_size; 1737 if (lr->event_buffer_size == 0) 1738 lr->event_buffer_size = ulr.num_lines * 16; 1739 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16) 1740 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16; 1741 1742 atomic_set(&lr->seqno, 0); 1743 1744 /* Request each GPIO */ 1745 for (i = 0; i < ulr.num_lines; i++) { 1746 u32 offset = ulr.offsets[i]; 1747 struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset); 1748 1749 if (IS_ERR(desc)) { 1750 ret = PTR_ERR(desc); 1751 goto out_free_linereq; 1752 } 1753 1754 ret = gpiod_request_user(desc, lr->label); 1755 if (ret) 1756 goto out_free_linereq; 1757 1758 lr->lines[i].desc = desc; 1759 flags = gpio_v2_line_config_flags(lc, i); 1760 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); 1761 1762 ret = gpiod_set_transitory(desc, false); 1763 if (ret < 0) 1764 goto out_free_linereq; 1765 1766 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; 1767 /* 1768 * Lines have to be requested explicitly for input 1769 * or output, else the line will be treated "as is". 1770 */ 1771 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { 1772 int val = gpio_v2_line_config_output_value(lc, i); 1773 1774 ret = gpiod_direction_output(desc, val); 1775 if (ret) 1776 goto out_free_linereq; 1777 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) { 1778 ret = gpiod_direction_input(desc); 1779 if (ret) 1780 goto out_free_linereq; 1781 1782 ret = edge_detector_setup(&lr->lines[i], lc, i, 1783 edflags); 1784 if (ret) 1785 goto out_free_linereq; 1786 } 1787 1788 lr->lines[i].edflags = edflags; 1789 1790 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); 1791 1792 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", 1793 offset); 1794 } 1795 1796 lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify; 1797 ret = blocking_notifier_chain_register(&gdev->device_notifier, 1798 &lr->device_unregistered_nb); 1799 if (ret) 1800 goto out_free_linereq; 1801 1802 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 1803 if (fd < 0) { 1804 ret = fd; 1805 goto out_free_linereq; 1806 } 1807 1808 file = anon_inode_getfile("gpio-line", &line_fileops, lr, 1809 O_RDONLY | O_CLOEXEC); 1810 if (IS_ERR(file)) { 1811 ret = PTR_ERR(file); 1812 goto out_put_unused_fd; 1813 } 1814 1815 ulr.fd = fd; 1816 if (copy_to_user(ip, &ulr, sizeof(ulr))) { 1817 /* 1818 * fput() will trigger the release() callback, so do not go onto 1819 * the regular error cleanup path here. 1820 */ 1821 fput(file); 1822 put_unused_fd(fd); 1823 return -EFAULT; 1824 } 1825 1826 fd_install(fd, file); 1827 1828 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 1829 lr->num_lines); 1830 1831 return 0; 1832 1833 out_put_unused_fd: 1834 put_unused_fd(fd); 1835 out_free_linereq: 1836 linereq_free(lr); 1837 return ret; 1838 } 1839 1840 #ifdef CONFIG_GPIO_CDEV_V1 1841 1842 /* 1843 * GPIO line event management 1844 */ 1845 1846 /** 1847 * struct lineevent_state - contains the state of a userspace event 1848 * @gdev: the GPIO device the event pertains to 1849 * @label: consumer label used to tag descriptors 1850 * @desc: the GPIO descriptor held by this event 1851 * @eflags: the event flags this line was requested with 1852 * @irq: the interrupt that trigger in response to events on this GPIO 1853 * @wait: wait queue that handles blocking reads of events 1854 * @device_unregistered_nb: notifier block for receiving gdev unregister events 1855 * @events: KFIFO for the GPIO events 1856 * @timestamp: cache for the timestamp storing it between hardirq 1857 * and IRQ thread, used to bring the timestamp close to the actual 1858 * event 1859 */ 1860 struct lineevent_state { 1861 struct gpio_device *gdev; 1862 const char *label; 1863 struct gpio_desc *desc; 1864 u32 eflags; 1865 int irq; 1866 wait_queue_head_t wait; 1867 struct notifier_block device_unregistered_nb; 1868 DECLARE_KFIFO(events, struct gpioevent_data, 16); 1869 u64 timestamp; 1870 }; 1871 1872 #define GPIOEVENT_REQUEST_VALID_FLAGS \ 1873 (GPIOEVENT_REQUEST_RISING_EDGE | \ 1874 GPIOEVENT_REQUEST_FALLING_EDGE) 1875 1876 static __poll_t lineevent_poll(struct file *file, 1877 struct poll_table_struct *wait) 1878 { 1879 struct lineevent_state *le = file->private_data; 1880 __poll_t events = 0; 1881 1882 guard(rwsem_read)(&le->gdev->sem); 1883 1884 if (!le->gdev->chip) 1885 return EPOLLHUP | EPOLLERR; 1886 1887 poll_wait(file, &le->wait, wait); 1888 1889 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock)) 1890 events = EPOLLIN | EPOLLRDNORM; 1891 1892 return events; 1893 } 1894 1895 static int lineevent_unregistered_notify(struct notifier_block *nb, 1896 unsigned long action, void *data) 1897 { 1898 struct lineevent_state *le = container_of(nb, struct lineevent_state, 1899 device_unregistered_nb); 1900 1901 wake_up_poll(&le->wait, EPOLLIN | EPOLLERR); 1902 1903 return NOTIFY_OK; 1904 } 1905 1906 struct compat_gpioeevent_data { 1907 compat_u64 timestamp; 1908 u32 id; 1909 }; 1910 1911 static ssize_t lineevent_read(struct file *file, char __user *buf, 1912 size_t count, loff_t *f_ps) 1913 { 1914 struct lineevent_state *le = file->private_data; 1915 struct gpioevent_data ge; 1916 ssize_t bytes_read = 0; 1917 ssize_t ge_size; 1918 int ret; 1919 1920 guard(rwsem_read)(&le->gdev->sem); 1921 1922 if (!le->gdev->chip) 1923 return -ENODEV; 1924 1925 /* 1926 * When compatible system call is being used the struct gpioevent_data, 1927 * in case of at least ia32, has different size due to the alignment 1928 * differences. Because we have first member 64 bits followed by one of 1929 * 32 bits there is no gap between them. The only difference is the 1930 * padding at the end of the data structure. Hence, we calculate the 1931 * actual sizeof() and pass this as an argument to copy_to_user() to 1932 * drop unneeded bytes from the output. 1933 */ 1934 if (compat_need_64bit_alignment_fixup()) 1935 ge_size = sizeof(struct compat_gpioeevent_data); 1936 else 1937 ge_size = sizeof(struct gpioevent_data); 1938 if (count < ge_size) 1939 return -EINVAL; 1940 1941 do { 1942 scoped_guard(spinlock, &le->wait.lock) { 1943 if (kfifo_is_empty(&le->events)) { 1944 if (bytes_read) 1945 return bytes_read; 1946 1947 if (file->f_flags & O_NONBLOCK) 1948 return -EAGAIN; 1949 1950 ret = wait_event_interruptible_locked(le->wait, 1951 !kfifo_is_empty(&le->events)); 1952 if (ret) 1953 return ret; 1954 } 1955 1956 ret = kfifo_out(&le->events, &ge, 1); 1957 } 1958 if (ret != 1) { 1959 /* 1960 * This should never happen - we were holding the lock 1961 * from the moment we learned the fifo is no longer 1962 * empty until now. 1963 */ 1964 ret = -EIO; 1965 break; 1966 } 1967 1968 if (copy_to_user(buf + bytes_read, &ge, ge_size)) 1969 return -EFAULT; 1970 bytes_read += ge_size; 1971 } while (count >= bytes_read + ge_size); 1972 1973 return bytes_read; 1974 } 1975 1976 static void lineevent_free(struct lineevent_state *le) 1977 { 1978 if (le->device_unregistered_nb.notifier_call) 1979 blocking_notifier_chain_unregister(&le->gdev->device_notifier, 1980 &le->device_unregistered_nb); 1981 if (le->irq) 1982 free_irq(le->irq, le); 1983 if (le->desc) 1984 gpiod_free(le->desc); 1985 kfree(le->label); 1986 gpio_device_put(le->gdev); 1987 kfree(le); 1988 } 1989 1990 static int lineevent_release(struct inode *inode, struct file *file) 1991 { 1992 lineevent_free(file->private_data); 1993 return 0; 1994 } 1995 1996 static long lineevent_ioctl(struct file *file, unsigned int cmd, 1997 unsigned long arg) 1998 { 1999 struct lineevent_state *le = file->private_data; 2000 void __user *ip = (void __user *)arg; 2001 struct gpiohandle_data ghd; 2002 2003 guard(rwsem_read)(&le->gdev->sem); 2004 2005 if (!le->gdev->chip) 2006 return -ENODEV; 2007 2008 /* 2009 * We can get the value for an event line but not set it, 2010 * because it is input by definition. 2011 */ 2012 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { 2013 int val; 2014 2015 memset(&ghd, 0, sizeof(ghd)); 2016 2017 val = gpiod_get_value_cansleep(le->desc); 2018 if (val < 0) 2019 return val; 2020 ghd.values[0] = val; 2021 2022 if (copy_to_user(ip, &ghd, sizeof(ghd))) 2023 return -EFAULT; 2024 2025 return 0; 2026 } 2027 return -EINVAL; 2028 } 2029 2030 #ifdef CONFIG_COMPAT 2031 static long lineevent_ioctl_compat(struct file *file, unsigned int cmd, 2032 unsigned long arg) 2033 { 2034 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 2035 } 2036 #endif 2037 2038 static const struct file_operations lineevent_fileops = { 2039 .release = lineevent_release, 2040 .read = lineevent_read, 2041 .poll = lineevent_poll, 2042 .owner = THIS_MODULE, 2043 .llseek = noop_llseek, 2044 .unlocked_ioctl = lineevent_ioctl, 2045 #ifdef CONFIG_COMPAT 2046 .compat_ioctl = lineevent_ioctl_compat, 2047 #endif 2048 }; 2049 2050 static irqreturn_t lineevent_irq_thread(int irq, void *p) 2051 { 2052 struct lineevent_state *le = p; 2053 struct gpioevent_data ge; 2054 int ret; 2055 2056 /* Do not leak kernel stack to userspace */ 2057 memset(&ge, 0, sizeof(ge)); 2058 2059 /* 2060 * We may be running from a nested threaded interrupt in which case 2061 * we didn't get the timestamp from lineevent_irq_handler(). 2062 */ 2063 if (!le->timestamp) 2064 ge.timestamp = ktime_get_ns(); 2065 else 2066 ge.timestamp = le->timestamp; 2067 2068 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE 2069 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 2070 int level = gpiod_get_value_cansleep(le->desc); 2071 2072 if (level) 2073 /* Emit low-to-high event */ 2074 ge.id = GPIOEVENT_EVENT_RISING_EDGE; 2075 else 2076 /* Emit high-to-low event */ 2077 ge.id = GPIOEVENT_EVENT_FALLING_EDGE; 2078 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) { 2079 /* Emit low-to-high event */ 2080 ge.id = GPIOEVENT_EVENT_RISING_EDGE; 2081 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 2082 /* Emit high-to-low event */ 2083 ge.id = GPIOEVENT_EVENT_FALLING_EDGE; 2084 } else { 2085 return IRQ_NONE; 2086 } 2087 2088 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge, 2089 1, &le->wait.lock); 2090 if (ret) 2091 wake_up_poll(&le->wait, EPOLLIN); 2092 else 2093 pr_debug_ratelimited("event FIFO is full - event dropped\n"); 2094 2095 return IRQ_HANDLED; 2096 } 2097 2098 static irqreturn_t lineevent_irq_handler(int irq, void *p) 2099 { 2100 struct lineevent_state *le = p; 2101 2102 /* 2103 * Just store the timestamp in hardirq context so we get it as 2104 * close in time as possible to the actual event. 2105 */ 2106 le->timestamp = ktime_get_ns(); 2107 2108 return IRQ_WAKE_THREAD; 2109 } 2110 2111 static int lineevent_create(struct gpio_device *gdev, void __user *ip) 2112 { 2113 struct gpioevent_request eventreq; 2114 struct lineevent_state *le; 2115 struct gpio_desc *desc; 2116 struct file *file; 2117 u32 offset; 2118 u32 lflags; 2119 u32 eflags; 2120 int fd; 2121 int ret; 2122 int irq, irqflags = 0; 2123 2124 if (copy_from_user(&eventreq, ip, sizeof(eventreq))) 2125 return -EFAULT; 2126 2127 offset = eventreq.lineoffset; 2128 lflags = eventreq.handleflags; 2129 eflags = eventreq.eventflags; 2130 2131 desc = gpiochip_get_desc(gdev->chip, offset); 2132 if (IS_ERR(desc)) 2133 return PTR_ERR(desc); 2134 2135 /* Return an error if a unknown flag is set */ 2136 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) || 2137 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) 2138 return -EINVAL; 2139 2140 /* This is just wrong: we don't look for events on output lines */ 2141 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) || 2142 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || 2143 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) 2144 return -EINVAL; 2145 2146 /* Only one bias flag can be set. */ 2147 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) && 2148 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | 2149 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) || 2150 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) && 2151 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP))) 2152 return -EINVAL; 2153 2154 le = kzalloc(sizeof(*le), GFP_KERNEL); 2155 if (!le) 2156 return -ENOMEM; 2157 le->gdev = gpio_device_get(gdev); 2158 2159 if (eventreq.consumer_label[0] != '\0') { 2160 /* label is only initialized if consumer_label is set */ 2161 le->label = kstrndup(eventreq.consumer_label, 2162 sizeof(eventreq.consumer_label) - 1, 2163 GFP_KERNEL); 2164 if (!le->label) { 2165 ret = -ENOMEM; 2166 goto out_free_le; 2167 } 2168 } 2169 2170 ret = gpiod_request_user(desc, le->label); 2171 if (ret) 2172 goto out_free_le; 2173 le->desc = desc; 2174 le->eflags = eflags; 2175 2176 linehandle_flags_to_desc_flags(lflags, &desc->flags); 2177 2178 ret = gpiod_direction_input(desc); 2179 if (ret) 2180 goto out_free_le; 2181 2182 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); 2183 2184 irq = gpiod_to_irq(desc); 2185 if (irq <= 0) { 2186 ret = -ENODEV; 2187 goto out_free_le; 2188 } 2189 2190 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) 2191 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? 2192 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; 2193 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE) 2194 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? 2195 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; 2196 irqflags |= IRQF_ONESHOT; 2197 2198 INIT_KFIFO(le->events); 2199 init_waitqueue_head(&le->wait); 2200 2201 le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify; 2202 ret = blocking_notifier_chain_register(&gdev->device_notifier, 2203 &le->device_unregistered_nb); 2204 if (ret) 2205 goto out_free_le; 2206 2207 /* Request a thread to read the events */ 2208 ret = request_threaded_irq(irq, 2209 lineevent_irq_handler, 2210 lineevent_irq_thread, 2211 irqflags, 2212 le->label, 2213 le); 2214 if (ret) 2215 goto out_free_le; 2216 2217 le->irq = irq; 2218 2219 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 2220 if (fd < 0) { 2221 ret = fd; 2222 goto out_free_le; 2223 } 2224 2225 file = anon_inode_getfile("gpio-event", 2226 &lineevent_fileops, 2227 le, 2228 O_RDONLY | O_CLOEXEC); 2229 if (IS_ERR(file)) { 2230 ret = PTR_ERR(file); 2231 goto out_put_unused_fd; 2232 } 2233 2234 eventreq.fd = fd; 2235 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) { 2236 /* 2237 * fput() will trigger the release() callback, so do not go onto 2238 * the regular error cleanup path here. 2239 */ 2240 fput(file); 2241 put_unused_fd(fd); 2242 return -EFAULT; 2243 } 2244 2245 fd_install(fd, file); 2246 2247 return 0; 2248 2249 out_put_unused_fd: 2250 put_unused_fd(fd); 2251 out_free_le: 2252 lineevent_free(le); 2253 return ret; 2254 } 2255 2256 static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2, 2257 struct gpioline_info *info_v1) 2258 { 2259 u64 flagsv2 = info_v2->flags; 2260 2261 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name)); 2262 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer)); 2263 info_v1->line_offset = info_v2->offset; 2264 info_v1->flags = 0; 2265 2266 if (flagsv2 & GPIO_V2_LINE_FLAG_USED) 2267 info_v1->flags |= GPIOLINE_FLAG_KERNEL; 2268 2269 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT) 2270 info_v1->flags |= GPIOLINE_FLAG_IS_OUT; 2271 2272 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 2273 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW; 2274 2275 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN) 2276 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN; 2277 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE) 2278 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE; 2279 2280 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP) 2281 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP; 2282 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) 2283 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN; 2284 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED) 2285 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE; 2286 } 2287 2288 static void gpio_v2_line_info_changed_to_v1( 2289 struct gpio_v2_line_info_changed *lic_v2, 2290 struct gpioline_info_changed *lic_v1) 2291 { 2292 memset(lic_v1, 0, sizeof(*lic_v1)); 2293 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info); 2294 lic_v1->timestamp = lic_v2->timestamp_ns; 2295 lic_v1->event_type = lic_v2->event_type; 2296 } 2297 2298 #endif /* CONFIG_GPIO_CDEV_V1 */ 2299 2300 static void gpio_desc_to_lineinfo(struct gpio_desc *desc, 2301 struct gpio_v2_line_info *info) 2302 { 2303 struct gpio_chip *gc = desc->gdev->chip; 2304 unsigned long dflags; 2305 2306 memset(info, 0, sizeof(*info)); 2307 info->offset = gpio_chip_hwgpio(desc); 2308 2309 scoped_guard(spinlock_irqsave, &gpio_lock) { 2310 if (desc->name) 2311 strscpy(info->name, desc->name, sizeof(info->name)); 2312 2313 if (desc->label) 2314 strscpy(info->consumer, desc->label, 2315 sizeof(info->consumer)); 2316 2317 dflags = READ_ONCE(desc->flags); 2318 } 2319 2320 /* 2321 * Userspace only need know that the kernel is using this GPIO so it 2322 * can't use it. 2323 * The calculation of the used flag is slightly racy, as it may read 2324 * desc, gc and pinctrl state without a lock covering all three at 2325 * once. Worst case if the line is in transition and the calculation 2326 * is inconsistent then it looks to the user like they performed the 2327 * read on the other side of the transition - but that can always 2328 * happen. 2329 * The definitive test that a line is available to userspace is to 2330 * request it. 2331 */ 2332 if (test_bit(FLAG_REQUESTED, &dflags) || 2333 test_bit(FLAG_IS_HOGGED, &dflags) || 2334 test_bit(FLAG_USED_AS_IRQ, &dflags) || 2335 test_bit(FLAG_EXPORT, &dflags) || 2336 test_bit(FLAG_SYSFS, &dflags) || 2337 !gpiochip_line_is_valid(gc, info->offset) || 2338 !pinctrl_gpio_can_use_line(gc, info->offset)) 2339 info->flags |= GPIO_V2_LINE_FLAG_USED; 2340 2341 if (test_bit(FLAG_IS_OUT, &dflags)) 2342 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT; 2343 else 2344 info->flags |= GPIO_V2_LINE_FLAG_INPUT; 2345 2346 if (test_bit(FLAG_ACTIVE_LOW, &dflags)) 2347 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW; 2348 2349 if (test_bit(FLAG_OPEN_DRAIN, &dflags)) 2350 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN; 2351 if (test_bit(FLAG_OPEN_SOURCE, &dflags)) 2352 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE; 2353 2354 if (test_bit(FLAG_BIAS_DISABLE, &dflags)) 2355 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED; 2356 if (test_bit(FLAG_PULL_DOWN, &dflags)) 2357 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN; 2358 if (test_bit(FLAG_PULL_UP, &dflags)) 2359 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP; 2360 2361 if (test_bit(FLAG_EDGE_RISING, &dflags)) 2362 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING; 2363 if (test_bit(FLAG_EDGE_FALLING, &dflags)) 2364 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING; 2365 2366 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &dflags)) 2367 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME; 2368 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags)) 2369 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE; 2370 } 2371 2372 struct gpio_chardev_data { 2373 struct gpio_device *gdev; 2374 wait_queue_head_t wait; 2375 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32); 2376 struct notifier_block lineinfo_changed_nb; 2377 struct notifier_block device_unregistered_nb; 2378 unsigned long *watched_lines; 2379 #ifdef CONFIG_GPIO_CDEV_V1 2380 atomic_t watch_abi_version; 2381 #endif 2382 }; 2383 2384 static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip) 2385 { 2386 struct gpio_device *gdev = cdev->gdev; 2387 struct gpiochip_info chipinfo; 2388 2389 memset(&chipinfo, 0, sizeof(chipinfo)); 2390 2391 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name)); 2392 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label)); 2393 chipinfo.lines = gdev->ngpio; 2394 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo))) 2395 return -EFAULT; 2396 return 0; 2397 } 2398 2399 #ifdef CONFIG_GPIO_CDEV_V1 2400 /* 2401 * returns 0 if the versions match, else the previously selected ABI version 2402 */ 2403 static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata, 2404 unsigned int version) 2405 { 2406 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version); 2407 2408 if (abiv == version) 2409 return 0; 2410 2411 return abiv; 2412 } 2413 2414 static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip, 2415 bool watch) 2416 { 2417 struct gpio_desc *desc; 2418 struct gpioline_info lineinfo; 2419 struct gpio_v2_line_info lineinfo_v2; 2420 2421 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) 2422 return -EFAULT; 2423 2424 /* this doubles as a range check on line_offset */ 2425 desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset); 2426 if (IS_ERR(desc)) 2427 return PTR_ERR(desc); 2428 2429 if (watch) { 2430 if (lineinfo_ensure_abi_version(cdev, 1)) 2431 return -EPERM; 2432 2433 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines)) 2434 return -EBUSY; 2435 } 2436 2437 gpio_desc_to_lineinfo(desc, &lineinfo_v2); 2438 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo); 2439 2440 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) { 2441 if (watch) 2442 clear_bit(lineinfo.line_offset, cdev->watched_lines); 2443 return -EFAULT; 2444 } 2445 2446 return 0; 2447 } 2448 #endif 2449 2450 static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip, 2451 bool watch) 2452 { 2453 struct gpio_desc *desc; 2454 struct gpio_v2_line_info lineinfo; 2455 2456 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) 2457 return -EFAULT; 2458 2459 if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding))) 2460 return -EINVAL; 2461 2462 desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.offset); 2463 if (IS_ERR(desc)) 2464 return PTR_ERR(desc); 2465 2466 if (watch) { 2467 #ifdef CONFIG_GPIO_CDEV_V1 2468 if (lineinfo_ensure_abi_version(cdev, 2)) 2469 return -EPERM; 2470 #endif 2471 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines)) 2472 return -EBUSY; 2473 } 2474 gpio_desc_to_lineinfo(desc, &lineinfo); 2475 supinfo_to_lineinfo(desc, &lineinfo); 2476 2477 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) { 2478 if (watch) 2479 clear_bit(lineinfo.offset, cdev->watched_lines); 2480 return -EFAULT; 2481 } 2482 2483 return 0; 2484 } 2485 2486 static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip) 2487 { 2488 __u32 offset; 2489 2490 if (copy_from_user(&offset, ip, sizeof(offset))) 2491 return -EFAULT; 2492 2493 if (offset >= cdev->gdev->ngpio) 2494 return -EINVAL; 2495 2496 if (!test_and_clear_bit(offset, cdev->watched_lines)) 2497 return -EBUSY; 2498 2499 return 0; 2500 } 2501 2502 /* 2503 * gpio_ioctl() - ioctl handler for the GPIO chardev 2504 */ 2505 static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2506 { 2507 struct gpio_chardev_data *cdev = file->private_data; 2508 struct gpio_device *gdev = cdev->gdev; 2509 void __user *ip = (void __user *)arg; 2510 2511 guard(rwsem_read)(&gdev->sem); 2512 2513 /* We fail any subsequent ioctl():s when the chip is gone */ 2514 if (!gdev->chip) 2515 return -ENODEV; 2516 2517 /* Fill in the struct and pass to userspace */ 2518 switch (cmd) { 2519 case GPIO_GET_CHIPINFO_IOCTL: 2520 return chipinfo_get(cdev, ip); 2521 #ifdef CONFIG_GPIO_CDEV_V1 2522 case GPIO_GET_LINEHANDLE_IOCTL: 2523 return linehandle_create(gdev, ip); 2524 case GPIO_GET_LINEEVENT_IOCTL: 2525 return lineevent_create(gdev, ip); 2526 case GPIO_GET_LINEINFO_IOCTL: 2527 return lineinfo_get_v1(cdev, ip, false); 2528 case GPIO_GET_LINEINFO_WATCH_IOCTL: 2529 return lineinfo_get_v1(cdev, ip, true); 2530 #endif /* CONFIG_GPIO_CDEV_V1 */ 2531 case GPIO_V2_GET_LINEINFO_IOCTL: 2532 return lineinfo_get(cdev, ip, false); 2533 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL: 2534 return lineinfo_get(cdev, ip, true); 2535 case GPIO_V2_GET_LINE_IOCTL: 2536 return linereq_create(gdev, ip); 2537 case GPIO_GET_LINEINFO_UNWATCH_IOCTL: 2538 return lineinfo_unwatch(cdev, ip); 2539 default: 2540 return -EINVAL; 2541 } 2542 } 2543 2544 #ifdef CONFIG_COMPAT 2545 static long gpio_ioctl_compat(struct file *file, unsigned int cmd, 2546 unsigned long arg) 2547 { 2548 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 2549 } 2550 #endif 2551 2552 static int lineinfo_changed_notify(struct notifier_block *nb, 2553 unsigned long action, void *data) 2554 { 2555 struct gpio_chardev_data *cdev = 2556 container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb); 2557 struct gpio_v2_line_info_changed chg; 2558 struct gpio_desc *desc = data; 2559 int ret; 2560 2561 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines)) 2562 return NOTIFY_DONE; 2563 2564 memset(&chg, 0, sizeof(chg)); 2565 chg.event_type = action; 2566 chg.timestamp_ns = ktime_get_ns(); 2567 gpio_desc_to_lineinfo(desc, &chg.info); 2568 supinfo_to_lineinfo(desc, &chg.info); 2569 2570 ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock); 2571 if (ret) 2572 wake_up_poll(&cdev->wait, EPOLLIN); 2573 else 2574 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n"); 2575 2576 return NOTIFY_OK; 2577 } 2578 2579 static int gpio_device_unregistered_notify(struct notifier_block *nb, 2580 unsigned long action, void *data) 2581 { 2582 struct gpio_chardev_data *cdev = container_of(nb, 2583 struct gpio_chardev_data, 2584 device_unregistered_nb); 2585 2586 wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR); 2587 2588 return NOTIFY_OK; 2589 } 2590 2591 static __poll_t lineinfo_watch_poll(struct file *file, 2592 struct poll_table_struct *pollt) 2593 { 2594 struct gpio_chardev_data *cdev = file->private_data; 2595 __poll_t events = 0; 2596 2597 guard(rwsem_read)(&cdev->gdev->sem); 2598 2599 if (!cdev->gdev->chip) 2600 return EPOLLHUP | EPOLLERR; 2601 2602 poll_wait(file, &cdev->wait, pollt); 2603 2604 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events, 2605 &cdev->wait.lock)) 2606 events = EPOLLIN | EPOLLRDNORM; 2607 2608 return events; 2609 } 2610 2611 static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, 2612 size_t count, loff_t *off) 2613 { 2614 struct gpio_chardev_data *cdev = file->private_data; 2615 struct gpio_v2_line_info_changed event; 2616 ssize_t bytes_read = 0; 2617 int ret; 2618 size_t event_size; 2619 2620 guard(rwsem_read)(&cdev->gdev->sem); 2621 2622 if (!cdev->gdev->chip) 2623 return -ENODEV; 2624 2625 #ifndef CONFIG_GPIO_CDEV_V1 2626 event_size = sizeof(struct gpio_v2_line_info_changed); 2627 if (count < event_size) 2628 return -EINVAL; 2629 #endif 2630 2631 do { 2632 scoped_guard(spinlock, &cdev->wait.lock) { 2633 if (kfifo_is_empty(&cdev->events)) { 2634 if (bytes_read) 2635 return bytes_read; 2636 2637 if (file->f_flags & O_NONBLOCK) 2638 return -EAGAIN; 2639 2640 ret = wait_event_interruptible_locked(cdev->wait, 2641 !kfifo_is_empty(&cdev->events)); 2642 if (ret) 2643 return ret; 2644 } 2645 #ifdef CONFIG_GPIO_CDEV_V1 2646 /* must be after kfifo check so watch_abi_version is set */ 2647 if (atomic_read(&cdev->watch_abi_version) == 2) 2648 event_size = sizeof(struct gpio_v2_line_info_changed); 2649 else 2650 event_size = sizeof(struct gpioline_info_changed); 2651 if (count < event_size) 2652 return -EINVAL; 2653 #endif 2654 ret = kfifo_out(&cdev->events, &event, 1); 2655 } 2656 if (ret != 1) { 2657 ret = -EIO; 2658 break; 2659 /* We should never get here. See lineevent_read(). */ 2660 } 2661 2662 #ifdef CONFIG_GPIO_CDEV_V1 2663 if (event_size == sizeof(struct gpio_v2_line_info_changed)) { 2664 if (copy_to_user(buf + bytes_read, &event, event_size)) 2665 return -EFAULT; 2666 } else { 2667 struct gpioline_info_changed event_v1; 2668 2669 gpio_v2_line_info_changed_to_v1(&event, &event_v1); 2670 if (copy_to_user(buf + bytes_read, &event_v1, 2671 event_size)) 2672 return -EFAULT; 2673 } 2674 #else 2675 if (copy_to_user(buf + bytes_read, &event, event_size)) 2676 return -EFAULT; 2677 #endif 2678 bytes_read += event_size; 2679 } while (count >= bytes_read + sizeof(event)); 2680 2681 return bytes_read; 2682 } 2683 2684 /** 2685 * gpio_chrdev_open() - open the chardev for ioctl operations 2686 * @inode: inode for this chardev 2687 * @file: file struct for storing private data 2688 * Returns 0 on success 2689 */ 2690 static int gpio_chrdev_open(struct inode *inode, struct file *file) 2691 { 2692 struct gpio_device *gdev = container_of(inode->i_cdev, 2693 struct gpio_device, chrdev); 2694 struct gpio_chardev_data *cdev; 2695 int ret = -ENOMEM; 2696 2697 guard(rwsem_read)(&gdev->sem); 2698 2699 /* Fail on open if the backing gpiochip is gone */ 2700 if (!gdev->chip) 2701 return -ENODEV; 2702 2703 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 2704 if (!cdev) 2705 return -ENODEV; 2706 2707 cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL); 2708 if (!cdev->watched_lines) 2709 goto out_free_cdev; 2710 2711 init_waitqueue_head(&cdev->wait); 2712 INIT_KFIFO(cdev->events); 2713 cdev->gdev = gpio_device_get(gdev); 2714 2715 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify; 2716 ret = blocking_notifier_chain_register(&gdev->line_state_notifier, 2717 &cdev->lineinfo_changed_nb); 2718 if (ret) 2719 goto out_free_bitmap; 2720 2721 cdev->device_unregistered_nb.notifier_call = 2722 gpio_device_unregistered_notify; 2723 ret = blocking_notifier_chain_register(&gdev->device_notifier, 2724 &cdev->device_unregistered_nb); 2725 if (ret) 2726 goto out_unregister_line_notifier; 2727 2728 file->private_data = cdev; 2729 2730 ret = nonseekable_open(inode, file); 2731 if (ret) 2732 goto out_unregister_device_notifier; 2733 2734 return ret; 2735 2736 out_unregister_device_notifier: 2737 blocking_notifier_chain_unregister(&gdev->device_notifier, 2738 &cdev->device_unregistered_nb); 2739 out_unregister_line_notifier: 2740 blocking_notifier_chain_unregister(&gdev->line_state_notifier, 2741 &cdev->lineinfo_changed_nb); 2742 out_free_bitmap: 2743 gpio_device_put(gdev); 2744 bitmap_free(cdev->watched_lines); 2745 out_free_cdev: 2746 kfree(cdev); 2747 return ret; 2748 } 2749 2750 /** 2751 * gpio_chrdev_release() - close chardev after ioctl operations 2752 * @inode: inode for this chardev 2753 * @file: file struct for storing private data 2754 * Returns 0 on success 2755 */ 2756 static int gpio_chrdev_release(struct inode *inode, struct file *file) 2757 { 2758 struct gpio_chardev_data *cdev = file->private_data; 2759 struct gpio_device *gdev = cdev->gdev; 2760 2761 bitmap_free(cdev->watched_lines); 2762 blocking_notifier_chain_unregister(&gdev->device_notifier, 2763 &cdev->device_unregistered_nb); 2764 blocking_notifier_chain_unregister(&gdev->line_state_notifier, 2765 &cdev->lineinfo_changed_nb); 2766 gpio_device_put(gdev); 2767 kfree(cdev); 2768 2769 return 0; 2770 } 2771 2772 static const struct file_operations gpio_fileops = { 2773 .release = gpio_chrdev_release, 2774 .open = gpio_chrdev_open, 2775 .poll = lineinfo_watch_poll, 2776 .read = lineinfo_watch_read, 2777 .owner = THIS_MODULE, 2778 .llseek = no_llseek, 2779 .unlocked_ioctl = gpio_ioctl, 2780 #ifdef CONFIG_COMPAT 2781 .compat_ioctl = gpio_ioctl_compat, 2782 #endif 2783 }; 2784 2785 int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt) 2786 { 2787 int ret; 2788 2789 cdev_init(&gdev->chrdev, &gpio_fileops); 2790 gdev->chrdev.owner = THIS_MODULE; 2791 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id); 2792 2793 ret = cdev_device_add(&gdev->chrdev, &gdev->dev); 2794 if (ret) 2795 return ret; 2796 2797 chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n", 2798 MAJOR(devt), gdev->id); 2799 2800 return 0; 2801 } 2802 2803 void gpiolib_cdev_unregister(struct gpio_device *gdev) 2804 { 2805 cdev_device_del(&gdev->chrdev, &gdev->dev); 2806 blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL); 2807 } 2808