1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/anon_inodes.h> 4 #include <linux/atomic.h> 5 #include <linux/bitmap.h> 6 #include <linux/build_bug.h> 7 #include <linux/cdev.h> 8 #include <linux/cleanup.h> 9 #include <linux/compat.h> 10 #include <linux/compiler.h> 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/file.h> 14 #include <linux/gpio.h> 15 #include <linux/gpio/driver.h> 16 #include <linux/hte.h> 17 #include <linux/interrupt.h> 18 #include <linux/irqreturn.h> 19 #include <linux/kernel.h> 20 #include <linux/kfifo.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/overflow.h> 24 #include <linux/pinctrl/consumer.h> 25 #include <linux/poll.h> 26 #include <linux/rbtree.h> 27 #include <linux/seq_file.h> 28 #include <linux/spinlock.h> 29 #include <linux/timekeeping.h> 30 #include <linux/uaccess.h> 31 #include <linux/workqueue.h> 32 33 #include <uapi/linux/gpio.h> 34 35 #include "gpiolib.h" 36 #include "gpiolib-cdev.h" 37 38 /* 39 * Array sizes must ensure 64-bit alignment and not create holes in the 40 * struct packing. 41 */ 42 static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2)); 43 static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8)); 44 45 /* 46 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility 47 */ 48 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8)); 49 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8)); 50 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8)); 51 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8)); 52 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8)); 53 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8)); 54 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8)); 55 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8)); 56 57 /* Character device interface to GPIO. 58 * 59 * The GPIO character device, /dev/gpiochipN, provides userspace an 60 * interface to gpiolib GPIOs via ioctl()s. 61 */ 62 63 /* 64 * GPIO line handle management 65 */ 66 67 #ifdef CONFIG_GPIO_CDEV_V1 68 /** 69 * struct linehandle_state - contains the state of a userspace handle 70 * @gdev: the GPIO device the handle pertains to 71 * @label: consumer label used to tag descriptors 72 * @descs: the GPIO descriptors held by this handle 73 * @num_descs: the number of descriptors held in the descs array 74 */ 75 struct linehandle_state { 76 struct gpio_device *gdev; 77 const char *label; 78 struct gpio_desc *descs[GPIOHANDLES_MAX]; 79 u32 num_descs; 80 }; 81 82 #define GPIOHANDLE_REQUEST_VALID_FLAGS \ 83 (GPIOHANDLE_REQUEST_INPUT | \ 84 GPIOHANDLE_REQUEST_OUTPUT | \ 85 GPIOHANDLE_REQUEST_ACTIVE_LOW | \ 86 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \ 87 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \ 88 GPIOHANDLE_REQUEST_BIAS_DISABLE | \ 89 GPIOHANDLE_REQUEST_OPEN_DRAIN | \ 90 GPIOHANDLE_REQUEST_OPEN_SOURCE) 91 92 static int linehandle_validate_flags(u32 flags) 93 { 94 /* Return an error if an unknown flag is set */ 95 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) 96 return -EINVAL; 97 98 /* 99 * Do not allow both INPUT & OUTPUT flags to be set as they are 100 * contradictory. 101 */ 102 if ((flags & GPIOHANDLE_REQUEST_INPUT) && 103 (flags & GPIOHANDLE_REQUEST_OUTPUT)) 104 return -EINVAL; 105 106 /* 107 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If 108 * the hardware actually supports enabling both at the same time the 109 * electrical result would be disastrous. 110 */ 111 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) && 112 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) 113 return -EINVAL; 114 115 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */ 116 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) && 117 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || 118 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))) 119 return -EINVAL; 120 121 /* Bias flags only allowed for input or output mode. */ 122 if (!((flags & GPIOHANDLE_REQUEST_INPUT) || 123 (flags & GPIOHANDLE_REQUEST_OUTPUT)) && 124 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) || 125 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) || 126 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN))) 127 return -EINVAL; 128 129 /* Only one bias flag can be set. */ 130 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) && 131 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | 132 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) || 133 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) && 134 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP))) 135 return -EINVAL; 136 137 return 0; 138 } 139 140 static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp) 141 { 142 assign_bit(FLAG_ACTIVE_LOW, flagsp, 143 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW); 144 assign_bit(FLAG_OPEN_DRAIN, flagsp, 145 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN); 146 assign_bit(FLAG_OPEN_SOURCE, flagsp, 147 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE); 148 assign_bit(FLAG_PULL_UP, flagsp, 149 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP); 150 assign_bit(FLAG_PULL_DOWN, flagsp, 151 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN); 152 assign_bit(FLAG_BIAS_DISABLE, flagsp, 153 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE); 154 } 155 156 static long linehandle_set_config(struct linehandle_state *lh, 157 void __user *ip) 158 { 159 struct gpiohandle_config gcnf; 160 struct gpio_desc *desc; 161 int i, ret; 162 u32 lflags; 163 164 if (copy_from_user(&gcnf, ip, sizeof(gcnf))) 165 return -EFAULT; 166 167 lflags = gcnf.flags; 168 ret = linehandle_validate_flags(lflags); 169 if (ret) 170 return ret; 171 172 for (i = 0; i < lh->num_descs; i++) { 173 desc = lh->descs[i]; 174 linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags); 175 176 /* 177 * Lines have to be requested explicitly for input 178 * or output, else the line will be treated "as is". 179 */ 180 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 181 int val = !!gcnf.default_values[i]; 182 183 ret = gpiod_direction_output(desc, val); 184 if (ret) 185 return ret; 186 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) { 187 ret = gpiod_direction_input(desc); 188 if (ret) 189 return ret; 190 } 191 192 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG); 193 } 194 return 0; 195 } 196 197 static long linehandle_ioctl(struct file *file, unsigned int cmd, 198 unsigned long arg) 199 { 200 struct linehandle_state *lh = file->private_data; 201 void __user *ip = (void __user *)arg; 202 struct gpiohandle_data ghd; 203 DECLARE_BITMAP(vals, GPIOHANDLES_MAX); 204 unsigned int i; 205 int ret; 206 207 guard(srcu)(&lh->gdev->srcu); 208 209 if (!rcu_access_pointer(lh->gdev->chip)) 210 return -ENODEV; 211 212 switch (cmd) { 213 case GPIOHANDLE_GET_LINE_VALUES_IOCTL: 214 /* NOTE: It's okay to read values of output lines */ 215 ret = gpiod_get_array_value_complex(false, true, 216 lh->num_descs, lh->descs, 217 NULL, vals); 218 if (ret) 219 return ret; 220 221 memset(&ghd, 0, sizeof(ghd)); 222 for (i = 0; i < lh->num_descs; i++) 223 ghd.values[i] = test_bit(i, vals); 224 225 if (copy_to_user(ip, &ghd, sizeof(ghd))) 226 return -EFAULT; 227 228 return 0; 229 case GPIOHANDLE_SET_LINE_VALUES_IOCTL: 230 /* 231 * All line descriptors were created at once with the same 232 * flags so just check if the first one is really output. 233 */ 234 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags)) 235 return -EPERM; 236 237 if (copy_from_user(&ghd, ip, sizeof(ghd))) 238 return -EFAULT; 239 240 /* Clamp all values to [0,1] */ 241 for (i = 0; i < lh->num_descs; i++) 242 __assign_bit(i, vals, ghd.values[i]); 243 244 /* Reuse the array setting function */ 245 return gpiod_set_array_value_complex(false, 246 true, 247 lh->num_descs, 248 lh->descs, 249 NULL, 250 vals); 251 case GPIOHANDLE_SET_CONFIG_IOCTL: 252 return linehandle_set_config(lh, ip); 253 default: 254 return -EINVAL; 255 } 256 } 257 258 #ifdef CONFIG_COMPAT 259 static long linehandle_ioctl_compat(struct file *file, unsigned int cmd, 260 unsigned long arg) 261 { 262 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 263 } 264 #endif 265 266 static void linehandle_free(struct linehandle_state *lh) 267 { 268 int i; 269 270 for (i = 0; i < lh->num_descs; i++) 271 if (lh->descs[i]) 272 gpiod_free(lh->descs[i]); 273 kfree(lh->label); 274 gpio_device_put(lh->gdev); 275 kfree(lh); 276 } 277 278 static int linehandle_release(struct inode *inode, struct file *file) 279 { 280 linehandle_free(file->private_data); 281 return 0; 282 } 283 284 static const struct file_operations linehandle_fileops = { 285 .release = linehandle_release, 286 .owner = THIS_MODULE, 287 .llseek = noop_llseek, 288 .unlocked_ioctl = linehandle_ioctl, 289 #ifdef CONFIG_COMPAT 290 .compat_ioctl = linehandle_ioctl_compat, 291 #endif 292 }; 293 294 static int linehandle_create(struct gpio_device *gdev, void __user *ip) 295 { 296 struct gpiohandle_request handlereq; 297 struct linehandle_state *lh; 298 struct file *file; 299 int fd, i, ret; 300 u32 lflags; 301 302 if (copy_from_user(&handlereq, ip, sizeof(handlereq))) 303 return -EFAULT; 304 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX)) 305 return -EINVAL; 306 307 lflags = handlereq.flags; 308 309 ret = linehandle_validate_flags(lflags); 310 if (ret) 311 return ret; 312 313 lh = kzalloc(sizeof(*lh), GFP_KERNEL); 314 if (!lh) 315 return -ENOMEM; 316 lh->gdev = gpio_device_get(gdev); 317 318 if (handlereq.consumer_label[0] != '\0') { 319 /* label is only initialized if consumer_label is set */ 320 lh->label = kstrndup(handlereq.consumer_label, 321 sizeof(handlereq.consumer_label) - 1, 322 GFP_KERNEL); 323 if (!lh->label) { 324 ret = -ENOMEM; 325 goto out_free_lh; 326 } 327 } 328 329 lh->num_descs = handlereq.lines; 330 331 /* Request each GPIO */ 332 for (i = 0; i < handlereq.lines; i++) { 333 u32 offset = handlereq.lineoffsets[i]; 334 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset); 335 336 if (IS_ERR(desc)) { 337 ret = PTR_ERR(desc); 338 goto out_free_lh; 339 } 340 341 ret = gpiod_request_user(desc, lh->label); 342 if (ret) 343 goto out_free_lh; 344 lh->descs[i] = desc; 345 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags); 346 347 ret = gpiod_set_transitory(desc, false); 348 if (ret < 0) 349 goto out_free_lh; 350 351 /* 352 * Lines have to be requested explicitly for input 353 * or output, else the line will be treated "as is". 354 */ 355 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 356 int val = !!handlereq.default_values[i]; 357 358 ret = gpiod_direction_output(desc, val); 359 if (ret) 360 goto out_free_lh; 361 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) { 362 ret = gpiod_direction_input(desc); 363 if (ret) 364 goto out_free_lh; 365 } 366 367 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); 368 369 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", 370 offset); 371 } 372 373 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 374 if (fd < 0) { 375 ret = fd; 376 goto out_free_lh; 377 } 378 379 file = anon_inode_getfile("gpio-linehandle", 380 &linehandle_fileops, 381 lh, 382 O_RDONLY | O_CLOEXEC); 383 if (IS_ERR(file)) { 384 ret = PTR_ERR(file); 385 goto out_put_unused_fd; 386 } 387 388 handlereq.fd = fd; 389 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) { 390 /* 391 * fput() will trigger the release() callback, so do not go onto 392 * the regular error cleanup path here. 393 */ 394 fput(file); 395 put_unused_fd(fd); 396 return -EFAULT; 397 } 398 399 fd_install(fd, file); 400 401 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 402 lh->num_descs); 403 404 return 0; 405 406 out_put_unused_fd: 407 put_unused_fd(fd); 408 out_free_lh: 409 linehandle_free(lh); 410 return ret; 411 } 412 #endif /* CONFIG_GPIO_CDEV_V1 */ 413 414 /** 415 * struct line - contains the state of a requested line 416 * @node: to store the object in supinfo_tree if supplemental 417 * @desc: the GPIO descriptor for this line. 418 * @req: the corresponding line request 419 * @irq: the interrupt triggered in response to events on this GPIO 420 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or 421 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied 422 * @timestamp_ns: cache for the timestamp storing it between hardirq and 423 * IRQ thread, used to bring the timestamp close to the actual event 424 * @req_seqno: the seqno for the current edge event in the sequence of 425 * events for the corresponding line request. This is drawn from the @req. 426 * @line_seqno: the seqno for the current edge event in the sequence of 427 * events for this line. 428 * @work: the worker that implements software debouncing 429 * @debounce_period_us: the debounce period in microseconds 430 * @sw_debounced: flag indicating if the software debouncer is active 431 * @level: the current debounced physical level of the line 432 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor 433 * @raw_level: the line level at the time of event 434 * @total_discard_seq: the running counter of the discarded events 435 * @last_seqno: the last sequence number before debounce period expires 436 */ 437 struct line { 438 struct rb_node node; 439 struct gpio_desc *desc; 440 /* 441 * -- edge detector specific fields -- 442 */ 443 struct linereq *req; 444 unsigned int irq; 445 /* 446 * The flags for the active edge detector configuration. 447 * 448 * edflags is set by linereq_create(), linereq_free(), and 449 * linereq_set_config_unlocked(), which are themselves mutually 450 * exclusive, and is accessed by edge_irq_thread(), 451 * process_hw_ts_thread() and debounce_work_func(), 452 * which can all live with a slightly stale value. 453 */ 454 u64 edflags; 455 /* 456 * timestamp_ns and req_seqno are accessed only by 457 * edge_irq_handler() and edge_irq_thread(), which are themselves 458 * mutually exclusive, so no additional protection is necessary. 459 */ 460 u64 timestamp_ns; 461 u32 req_seqno; 462 /* 463 * line_seqno is accessed by either edge_irq_thread() or 464 * debounce_work_func(), which are themselves mutually exclusive, 465 * so no additional protection is necessary. 466 */ 467 u32 line_seqno; 468 /* 469 * -- debouncer specific fields -- 470 */ 471 struct delayed_work work; 472 /* 473 * debounce_period_us is accessed by debounce_irq_handler() and 474 * process_hw_ts() which are disabled when modified by 475 * debounce_setup(), edge_detector_setup() or edge_detector_stop() 476 * or can live with a stale version when updated by 477 * edge_detector_update(). 478 * The modifying functions are themselves mutually exclusive. 479 */ 480 unsigned int debounce_period_us; 481 /* 482 * sw_debounce is accessed by linereq_set_config(), which is the 483 * only setter, and linereq_get_values(), which can live with a 484 * slightly stale value. 485 */ 486 unsigned int sw_debounced; 487 /* 488 * level is accessed by debounce_work_func(), which is the only 489 * setter, and linereq_get_values() which can live with a slightly 490 * stale value. 491 */ 492 unsigned int level; 493 #ifdef CONFIG_HTE 494 struct hte_ts_desc hdesc; 495 /* 496 * HTE provider sets line level at the time of event. The valid 497 * value is 0 or 1 and negative value for an error. 498 */ 499 int raw_level; 500 /* 501 * when sw_debounce is set on HTE enabled line, this is running 502 * counter of the discarded events. 503 */ 504 u32 total_discard_seq; 505 /* 506 * when sw_debounce is set on HTE enabled line, this variable records 507 * last sequence number before debounce period expires. 508 */ 509 u32 last_seqno; 510 #endif /* CONFIG_HTE */ 511 }; 512 513 /* 514 * a rbtree of the struct lines containing supplemental info. 515 * Used to populate gpio_v2_line_info with cdev specific fields not contained 516 * in the struct gpio_desc. 517 * A line is determined to contain supplemental information by 518 * line_has_supinfo(). 519 */ 520 static struct rb_root supinfo_tree = RB_ROOT; 521 /* covers supinfo_tree */ 522 static DEFINE_SPINLOCK(supinfo_lock); 523 524 /** 525 * struct linereq - contains the state of a userspace line request 526 * @gdev: the GPIO device the line request pertains to 527 * @label: consumer label used to tag GPIO descriptors 528 * @num_lines: the number of lines in the lines array 529 * @wait: wait queue that handles blocking reads of events 530 * @device_unregistered_nb: notifier block for receiving gdev unregister events 531 * @event_buffer_size: the number of elements allocated in @events 532 * @events: KFIFO for the GPIO events 533 * @seqno: the sequence number for edge events generated on all lines in 534 * this line request. Note that this is not used when @num_lines is 1, as 535 * the line_seqno is then the same and is cheaper to calculate. 536 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency 537 * of configuration, particularly multi-step accesses to desc flags and 538 * changes to supinfo status. 539 * @lines: the lines held by this line request, with @num_lines elements. 540 */ 541 struct linereq { 542 struct gpio_device *gdev; 543 const char *label; 544 u32 num_lines; 545 wait_queue_head_t wait; 546 struct notifier_block device_unregistered_nb; 547 u32 event_buffer_size; 548 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event); 549 atomic_t seqno; 550 struct mutex config_mutex; 551 struct line lines[] __counted_by(num_lines); 552 }; 553 554 static void supinfo_insert(struct line *line) 555 { 556 struct rb_node **new = &(supinfo_tree.rb_node), *parent = NULL; 557 struct line *entry; 558 559 guard(spinlock)(&supinfo_lock); 560 561 while (*new) { 562 entry = container_of(*new, struct line, node); 563 564 parent = *new; 565 if (line->desc < entry->desc) { 566 new = &((*new)->rb_left); 567 } else if (line->desc > entry->desc) { 568 new = &((*new)->rb_right); 569 } else { 570 /* this should never happen */ 571 WARN(1, "duplicate line inserted"); 572 return; 573 } 574 } 575 576 rb_link_node(&line->node, parent, new); 577 rb_insert_color(&line->node, &supinfo_tree); 578 } 579 580 static void supinfo_erase(struct line *line) 581 { 582 guard(spinlock)(&supinfo_lock); 583 584 rb_erase(&line->node, &supinfo_tree); 585 } 586 587 static struct line *supinfo_find(struct gpio_desc *desc) 588 { 589 struct rb_node *node = supinfo_tree.rb_node; 590 struct line *line; 591 592 while (node) { 593 line = container_of(node, struct line, node); 594 if (desc < line->desc) 595 node = node->rb_left; 596 else if (desc > line->desc) 597 node = node->rb_right; 598 else 599 return line; 600 } 601 return NULL; 602 } 603 604 static void supinfo_to_lineinfo(struct gpio_desc *desc, 605 struct gpio_v2_line_info *info) 606 { 607 struct gpio_v2_line_attribute *attr; 608 struct line *line; 609 610 guard(spinlock)(&supinfo_lock); 611 612 line = supinfo_find(desc); 613 if (!line) 614 return; 615 616 attr = &info->attrs[info->num_attrs]; 617 attr->id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE; 618 attr->debounce_period_us = READ_ONCE(line->debounce_period_us); 619 info->num_attrs++; 620 } 621 622 static inline bool line_has_supinfo(struct line *line) 623 { 624 return READ_ONCE(line->debounce_period_us); 625 } 626 627 /* 628 * Checks line_has_supinfo() before and after the change to avoid unnecessary 629 * supinfo_tree access. 630 * Called indirectly by linereq_create() or linereq_set_config() so line 631 * is already protected from concurrent changes. 632 */ 633 static void line_set_debounce_period(struct line *line, 634 unsigned int debounce_period_us) 635 { 636 bool was_suppl = line_has_supinfo(line); 637 638 WRITE_ONCE(line->debounce_period_us, debounce_period_us); 639 640 /* if supinfo status is unchanged then we're done */ 641 if (line_has_supinfo(line) == was_suppl) 642 return; 643 644 /* supinfo status has changed, so update the tree */ 645 if (was_suppl) 646 supinfo_erase(line); 647 else 648 supinfo_insert(line); 649 } 650 651 #define GPIO_V2_LINE_BIAS_FLAGS \ 652 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \ 653 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \ 654 GPIO_V2_LINE_FLAG_BIAS_DISABLED) 655 656 #define GPIO_V2_LINE_DIRECTION_FLAGS \ 657 (GPIO_V2_LINE_FLAG_INPUT | \ 658 GPIO_V2_LINE_FLAG_OUTPUT) 659 660 #define GPIO_V2_LINE_DRIVE_FLAGS \ 661 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \ 662 GPIO_V2_LINE_FLAG_OPEN_SOURCE) 663 664 #define GPIO_V2_LINE_EDGE_FLAGS \ 665 (GPIO_V2_LINE_FLAG_EDGE_RISING | \ 666 GPIO_V2_LINE_FLAG_EDGE_FALLING) 667 668 #define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS 669 670 #define GPIO_V2_LINE_VALID_FLAGS \ 671 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \ 672 GPIO_V2_LINE_DIRECTION_FLAGS | \ 673 GPIO_V2_LINE_DRIVE_FLAGS | \ 674 GPIO_V2_LINE_EDGE_FLAGS | \ 675 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \ 676 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \ 677 GPIO_V2_LINE_BIAS_FLAGS) 678 679 /* subset of flags relevant for edge detector configuration */ 680 #define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \ 681 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \ 682 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \ 683 GPIO_V2_LINE_EDGE_FLAGS) 684 685 static int linereq_unregistered_notify(struct notifier_block *nb, 686 unsigned long action, void *data) 687 { 688 struct linereq *lr = container_of(nb, struct linereq, 689 device_unregistered_nb); 690 691 wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR); 692 693 return NOTIFY_OK; 694 } 695 696 static void linereq_put_event(struct linereq *lr, 697 struct gpio_v2_line_event *le) 698 { 699 bool overflow = false; 700 701 scoped_guard(spinlock, &lr->wait.lock) { 702 if (kfifo_is_full(&lr->events)) { 703 overflow = true; 704 kfifo_skip(&lr->events); 705 } 706 kfifo_in(&lr->events, le, 1); 707 } 708 if (!overflow) 709 wake_up_poll(&lr->wait, EPOLLIN); 710 else 711 pr_debug_ratelimited("event FIFO is full - event dropped\n"); 712 } 713 714 static u64 line_event_timestamp(struct line *line) 715 { 716 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags)) 717 return ktime_get_real_ns(); 718 else if (IS_ENABLED(CONFIG_HTE) && 719 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) 720 return line->timestamp_ns; 721 722 return ktime_get_ns(); 723 } 724 725 static u32 line_event_id(int level) 726 { 727 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE : 728 GPIO_V2_LINE_EVENT_FALLING_EDGE; 729 } 730 731 #ifdef CONFIG_HTE 732 733 static enum hte_return process_hw_ts_thread(void *p) 734 { 735 struct line *line; 736 struct linereq *lr; 737 struct gpio_v2_line_event le; 738 u64 edflags; 739 int level; 740 741 if (!p) 742 return HTE_CB_HANDLED; 743 744 line = p; 745 lr = line->req; 746 747 memset(&le, 0, sizeof(le)); 748 749 le.timestamp_ns = line->timestamp_ns; 750 edflags = READ_ONCE(line->edflags); 751 752 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) { 753 case GPIO_V2_LINE_FLAG_EDGE_BOTH: 754 level = (line->raw_level >= 0) ? 755 line->raw_level : 756 gpiod_get_raw_value_cansleep(line->desc); 757 758 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 759 level = !level; 760 761 le.id = line_event_id(level); 762 break; 763 case GPIO_V2_LINE_FLAG_EDGE_RISING: 764 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE; 765 break; 766 case GPIO_V2_LINE_FLAG_EDGE_FALLING: 767 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE; 768 break; 769 default: 770 return HTE_CB_HANDLED; 771 } 772 le.line_seqno = line->line_seqno; 773 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno; 774 le.offset = gpio_chip_hwgpio(line->desc); 775 776 linereq_put_event(lr, &le); 777 778 return HTE_CB_HANDLED; 779 } 780 781 static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p) 782 { 783 struct line *line; 784 struct linereq *lr; 785 int diff_seqno = 0; 786 787 if (!ts || !p) 788 return HTE_CB_HANDLED; 789 790 line = p; 791 line->timestamp_ns = ts->tsc; 792 line->raw_level = ts->raw_level; 793 lr = line->req; 794 795 if (READ_ONCE(line->sw_debounced)) { 796 line->total_discard_seq++; 797 line->last_seqno = ts->seq; 798 mod_delayed_work(system_wq, &line->work, 799 usecs_to_jiffies(READ_ONCE(line->debounce_period_us))); 800 } else { 801 if (unlikely(ts->seq < line->line_seqno)) 802 return HTE_CB_HANDLED; 803 804 diff_seqno = ts->seq - line->line_seqno; 805 line->line_seqno = ts->seq; 806 if (lr->num_lines != 1) 807 line->req_seqno = atomic_add_return(diff_seqno, 808 &lr->seqno); 809 810 return HTE_RUN_SECOND_CB; 811 } 812 813 return HTE_CB_HANDLED; 814 } 815 816 static int hte_edge_setup(struct line *line, u64 eflags) 817 { 818 int ret; 819 unsigned long flags = 0; 820 struct hte_ts_desc *hdesc = &line->hdesc; 821 822 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING) 823 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 824 HTE_FALLING_EDGE_TS : 825 HTE_RISING_EDGE_TS; 826 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING) 827 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 828 HTE_RISING_EDGE_TS : 829 HTE_FALLING_EDGE_TS; 830 831 line->total_discard_seq = 0; 832 833 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL, 834 line->desc); 835 836 ret = hte_ts_get(NULL, hdesc, 0); 837 if (ret) 838 return ret; 839 840 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread, 841 line); 842 } 843 844 #else 845 846 static int hte_edge_setup(struct line *line, u64 eflags) 847 { 848 return 0; 849 } 850 #endif /* CONFIG_HTE */ 851 852 static irqreturn_t edge_irq_thread(int irq, void *p) 853 { 854 struct line *line = p; 855 struct linereq *lr = line->req; 856 struct gpio_v2_line_event le; 857 858 /* Do not leak kernel stack to userspace */ 859 memset(&le, 0, sizeof(le)); 860 861 if (line->timestamp_ns) { 862 le.timestamp_ns = line->timestamp_ns; 863 } else { 864 /* 865 * We may be running from a nested threaded interrupt in 866 * which case we didn't get the timestamp from 867 * edge_irq_handler(). 868 */ 869 le.timestamp_ns = line_event_timestamp(line); 870 if (lr->num_lines != 1) 871 line->req_seqno = atomic_inc_return(&lr->seqno); 872 } 873 line->timestamp_ns = 0; 874 875 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) { 876 case GPIO_V2_LINE_FLAG_EDGE_BOTH: 877 le.id = line_event_id(gpiod_get_value_cansleep(line->desc)); 878 break; 879 case GPIO_V2_LINE_FLAG_EDGE_RISING: 880 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE; 881 break; 882 case GPIO_V2_LINE_FLAG_EDGE_FALLING: 883 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE; 884 break; 885 default: 886 return IRQ_NONE; 887 } 888 line->line_seqno++; 889 le.line_seqno = line->line_seqno; 890 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno; 891 le.offset = gpio_chip_hwgpio(line->desc); 892 893 linereq_put_event(lr, &le); 894 895 return IRQ_HANDLED; 896 } 897 898 static irqreturn_t edge_irq_handler(int irq, void *p) 899 { 900 struct line *line = p; 901 struct linereq *lr = line->req; 902 903 /* 904 * Just store the timestamp in hardirq context so we get it as 905 * close in time as possible to the actual event. 906 */ 907 line->timestamp_ns = line_event_timestamp(line); 908 909 if (lr->num_lines != 1) 910 line->req_seqno = atomic_inc_return(&lr->seqno); 911 912 return IRQ_WAKE_THREAD; 913 } 914 915 /* 916 * returns the current debounced logical value. 917 */ 918 static bool debounced_value(struct line *line) 919 { 920 bool value; 921 922 /* 923 * minor race - debouncer may be stopped here, so edge_detector_stop() 924 * must leave the value unchanged so the following will read the level 925 * from when the debouncer was last running. 926 */ 927 value = READ_ONCE(line->level); 928 929 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags)) 930 value = !value; 931 932 return value; 933 } 934 935 static irqreturn_t debounce_irq_handler(int irq, void *p) 936 { 937 struct line *line = p; 938 939 mod_delayed_work(system_wq, &line->work, 940 usecs_to_jiffies(READ_ONCE(line->debounce_period_us))); 941 942 return IRQ_HANDLED; 943 } 944 945 static void debounce_work_func(struct work_struct *work) 946 { 947 struct gpio_v2_line_event le; 948 struct line *line = container_of(work, struct line, work.work); 949 struct linereq *lr; 950 u64 eflags, edflags = READ_ONCE(line->edflags); 951 int level = -1; 952 #ifdef CONFIG_HTE 953 int diff_seqno; 954 955 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) 956 level = line->raw_level; 957 #endif 958 if (level < 0) 959 level = gpiod_get_raw_value_cansleep(line->desc); 960 if (level < 0) { 961 pr_debug_ratelimited("debouncer failed to read line value\n"); 962 return; 963 } 964 965 if (READ_ONCE(line->level) == level) 966 return; 967 968 WRITE_ONCE(line->level, level); 969 970 /* -- edge detection -- */ 971 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; 972 if (!eflags) 973 return; 974 975 /* switch from physical level to logical - if they differ */ 976 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 977 level = !level; 978 979 /* ignore edges that are not being monitored */ 980 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) || 981 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level)) 982 return; 983 984 /* Do not leak kernel stack to userspace */ 985 memset(&le, 0, sizeof(le)); 986 987 lr = line->req; 988 le.timestamp_ns = line_event_timestamp(line); 989 le.offset = gpio_chip_hwgpio(line->desc); 990 #ifdef CONFIG_HTE 991 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) { 992 /* discard events except the last one */ 993 line->total_discard_seq -= 1; 994 diff_seqno = line->last_seqno - line->total_discard_seq - 995 line->line_seqno; 996 line->line_seqno = line->last_seqno - line->total_discard_seq; 997 le.line_seqno = line->line_seqno; 998 le.seqno = (lr->num_lines == 1) ? 999 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno); 1000 } else 1001 #endif /* CONFIG_HTE */ 1002 { 1003 line->line_seqno++; 1004 le.line_seqno = line->line_seqno; 1005 le.seqno = (lr->num_lines == 1) ? 1006 le.line_seqno : atomic_inc_return(&lr->seqno); 1007 } 1008 1009 le.id = line_event_id(level); 1010 1011 linereq_put_event(lr, &le); 1012 } 1013 1014 static int debounce_setup(struct line *line, unsigned int debounce_period_us) 1015 { 1016 unsigned long irqflags; 1017 int ret, level, irq; 1018 1019 /* try hardware */ 1020 ret = gpiod_set_debounce(line->desc, debounce_period_us); 1021 if (!ret) { 1022 line_set_debounce_period(line, debounce_period_us); 1023 return ret; 1024 } 1025 if (ret != -ENOTSUPP) 1026 return ret; 1027 1028 if (debounce_period_us) { 1029 /* setup software debounce */ 1030 level = gpiod_get_raw_value_cansleep(line->desc); 1031 if (level < 0) 1032 return level; 1033 1034 if (!(IS_ENABLED(CONFIG_HTE) && 1035 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) { 1036 irq = gpiod_to_irq(line->desc); 1037 if (irq < 0) 1038 return -ENXIO; 1039 1040 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING; 1041 ret = request_irq(irq, debounce_irq_handler, irqflags, 1042 line->req->label, line); 1043 if (ret) 1044 return ret; 1045 line->irq = irq; 1046 } else { 1047 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH); 1048 if (ret) 1049 return ret; 1050 } 1051 1052 WRITE_ONCE(line->level, level); 1053 WRITE_ONCE(line->sw_debounced, 1); 1054 } 1055 return 0; 1056 } 1057 1058 static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc, 1059 unsigned int line_idx) 1060 { 1061 unsigned int i; 1062 u64 mask = BIT_ULL(line_idx); 1063 1064 for (i = 0; i < lc->num_attrs; i++) { 1065 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) && 1066 (lc->attrs[i].mask & mask)) 1067 return true; 1068 } 1069 return false; 1070 } 1071 1072 static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc, 1073 unsigned int line_idx) 1074 { 1075 unsigned int i; 1076 u64 mask = BIT_ULL(line_idx); 1077 1078 for (i = 0; i < lc->num_attrs; i++) { 1079 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) && 1080 (lc->attrs[i].mask & mask)) 1081 return lc->attrs[i].attr.debounce_period_us; 1082 } 1083 return 0; 1084 } 1085 1086 static void edge_detector_stop(struct line *line) 1087 { 1088 if (line->irq) { 1089 free_irq(line->irq, line); 1090 line->irq = 0; 1091 } 1092 1093 #ifdef CONFIG_HTE 1094 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) 1095 hte_ts_put(&line->hdesc); 1096 #endif 1097 1098 cancel_delayed_work_sync(&line->work); 1099 WRITE_ONCE(line->sw_debounced, 0); 1100 WRITE_ONCE(line->edflags, 0); 1101 line_set_debounce_period(line, 0); 1102 /* do not change line->level - see comment in debounced_value() */ 1103 } 1104 1105 static int edge_detector_setup(struct line *line, 1106 struct gpio_v2_line_config *lc, 1107 unsigned int line_idx, u64 edflags) 1108 { 1109 u32 debounce_period_us; 1110 unsigned long irqflags = 0; 1111 u64 eflags; 1112 int irq, ret; 1113 1114 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; 1115 if (eflags && !kfifo_initialized(&line->req->events)) { 1116 ret = kfifo_alloc(&line->req->events, 1117 line->req->event_buffer_size, GFP_KERNEL); 1118 if (ret) 1119 return ret; 1120 } 1121 if (gpio_v2_line_config_debounced(lc, line_idx)) { 1122 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx); 1123 ret = debounce_setup(line, debounce_period_us); 1124 if (ret) 1125 return ret; 1126 line_set_debounce_period(line, debounce_period_us); 1127 } 1128 1129 /* detection disabled or sw debouncer will provide edge detection */ 1130 if (!eflags || READ_ONCE(line->sw_debounced)) 1131 return 0; 1132 1133 if (IS_ENABLED(CONFIG_HTE) && 1134 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1135 return hte_edge_setup(line, edflags); 1136 1137 irq = gpiod_to_irq(line->desc); 1138 if (irq < 0) 1139 return -ENXIO; 1140 1141 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING) 1142 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 1143 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; 1144 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING) 1145 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 1146 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; 1147 irqflags |= IRQF_ONESHOT; 1148 1149 /* Request a thread to read the events */ 1150 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread, 1151 irqflags, line->req->label, line); 1152 if (ret) 1153 return ret; 1154 1155 line->irq = irq; 1156 return 0; 1157 } 1158 1159 static int edge_detector_update(struct line *line, 1160 struct gpio_v2_line_config *lc, 1161 unsigned int line_idx, u64 edflags) 1162 { 1163 u64 active_edflags = READ_ONCE(line->edflags); 1164 unsigned int debounce_period_us = 1165 gpio_v2_line_config_debounce_period(lc, line_idx); 1166 1167 if ((active_edflags == edflags) && 1168 (READ_ONCE(line->debounce_period_us) == debounce_period_us)) 1169 return 0; 1170 1171 /* sw debounced and still will be...*/ 1172 if (debounce_period_us && READ_ONCE(line->sw_debounced)) { 1173 line_set_debounce_period(line, debounce_period_us); 1174 return 0; 1175 } 1176 1177 /* reconfiguring edge detection or sw debounce being disabled */ 1178 if ((line->irq && !READ_ONCE(line->sw_debounced)) || 1179 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) || 1180 (!debounce_period_us && READ_ONCE(line->sw_debounced))) 1181 edge_detector_stop(line); 1182 1183 return edge_detector_setup(line, lc, line_idx, edflags); 1184 } 1185 1186 static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc, 1187 unsigned int line_idx) 1188 { 1189 unsigned int i; 1190 u64 mask = BIT_ULL(line_idx); 1191 1192 for (i = 0; i < lc->num_attrs; i++) { 1193 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) && 1194 (lc->attrs[i].mask & mask)) 1195 return lc->attrs[i].attr.flags; 1196 } 1197 return lc->flags; 1198 } 1199 1200 static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc, 1201 unsigned int line_idx) 1202 { 1203 unsigned int i; 1204 u64 mask = BIT_ULL(line_idx); 1205 1206 for (i = 0; i < lc->num_attrs; i++) { 1207 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) && 1208 (lc->attrs[i].mask & mask)) 1209 return !!(lc->attrs[i].attr.values & mask); 1210 } 1211 return 0; 1212 } 1213 1214 static int gpio_v2_line_flags_validate(u64 flags) 1215 { 1216 /* Return an error if an unknown flag is set */ 1217 if (flags & ~GPIO_V2_LINE_VALID_FLAGS) 1218 return -EINVAL; 1219 1220 if (!IS_ENABLED(CONFIG_HTE) && 1221 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1222 return -EOPNOTSUPP; 1223 1224 /* 1225 * Do not allow both INPUT and OUTPUT flags to be set as they are 1226 * contradictory. 1227 */ 1228 if ((flags & GPIO_V2_LINE_FLAG_INPUT) && 1229 (flags & GPIO_V2_LINE_FLAG_OUTPUT)) 1230 return -EINVAL; 1231 1232 /* Only allow one event clock source */ 1233 if (IS_ENABLED(CONFIG_HTE) && 1234 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) && 1235 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1236 return -EINVAL; 1237 1238 /* Edge detection requires explicit input. */ 1239 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) && 1240 !(flags & GPIO_V2_LINE_FLAG_INPUT)) 1241 return -EINVAL; 1242 1243 /* 1244 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single 1245 * request. If the hardware actually supports enabling both at the 1246 * same time the electrical result would be disastrous. 1247 */ 1248 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) && 1249 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE)) 1250 return -EINVAL; 1251 1252 /* Drive requires explicit output direction. */ 1253 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) && 1254 !(flags & GPIO_V2_LINE_FLAG_OUTPUT)) 1255 return -EINVAL; 1256 1257 /* Bias requires explicit direction. */ 1258 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) && 1259 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS)) 1260 return -EINVAL; 1261 1262 /* Only one bias flag can be set. */ 1263 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) && 1264 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | 1265 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) || 1266 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) && 1267 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) 1268 return -EINVAL; 1269 1270 return 0; 1271 } 1272 1273 static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc, 1274 unsigned int num_lines) 1275 { 1276 unsigned int i; 1277 u64 flags; 1278 int ret; 1279 1280 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX) 1281 return -EINVAL; 1282 1283 if (memchr_inv(lc->padding, 0, sizeof(lc->padding))) 1284 return -EINVAL; 1285 1286 for (i = 0; i < num_lines; i++) { 1287 flags = gpio_v2_line_config_flags(lc, i); 1288 ret = gpio_v2_line_flags_validate(flags); 1289 if (ret) 1290 return ret; 1291 1292 /* debounce requires explicit input */ 1293 if (gpio_v2_line_config_debounced(lc, i) && 1294 !(flags & GPIO_V2_LINE_FLAG_INPUT)) 1295 return -EINVAL; 1296 } 1297 return 0; 1298 } 1299 1300 static void gpio_v2_line_config_flags_to_desc_flags(u64 flags, 1301 unsigned long *flagsp) 1302 { 1303 assign_bit(FLAG_ACTIVE_LOW, flagsp, 1304 flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW); 1305 1306 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) 1307 set_bit(FLAG_IS_OUT, flagsp); 1308 else if (flags & GPIO_V2_LINE_FLAG_INPUT) 1309 clear_bit(FLAG_IS_OUT, flagsp); 1310 1311 assign_bit(FLAG_EDGE_RISING, flagsp, 1312 flags & GPIO_V2_LINE_FLAG_EDGE_RISING); 1313 assign_bit(FLAG_EDGE_FALLING, flagsp, 1314 flags & GPIO_V2_LINE_FLAG_EDGE_FALLING); 1315 1316 assign_bit(FLAG_OPEN_DRAIN, flagsp, 1317 flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN); 1318 assign_bit(FLAG_OPEN_SOURCE, flagsp, 1319 flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE); 1320 1321 assign_bit(FLAG_PULL_UP, flagsp, 1322 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP); 1323 assign_bit(FLAG_PULL_DOWN, flagsp, 1324 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN); 1325 assign_bit(FLAG_BIAS_DISABLE, flagsp, 1326 flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED); 1327 1328 assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp, 1329 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME); 1330 assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp, 1331 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE); 1332 } 1333 1334 static long linereq_get_values(struct linereq *lr, void __user *ip) 1335 { 1336 struct gpio_v2_line_values lv; 1337 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX); 1338 struct gpio_desc **descs; 1339 unsigned int i, didx, num_get; 1340 bool val; 1341 int ret; 1342 1343 /* NOTE: It's ok to read values of output lines. */ 1344 if (copy_from_user(&lv, ip, sizeof(lv))) 1345 return -EFAULT; 1346 1347 /* 1348 * gpiod_get_array_value_complex() requires compacted desc and val 1349 * arrays, rather than the sparse ones in lv. 1350 * Calculation of num_get and construction of the desc array is 1351 * optimized to avoid allocation for the desc array for the common 1352 * num_get == 1 case. 1353 */ 1354 /* scan requested lines to calculate the subset to get */ 1355 for (num_get = 0, i = 0; i < lr->num_lines; i++) { 1356 if (lv.mask & BIT_ULL(i)) { 1357 num_get++; 1358 /* capture desc for the num_get == 1 case */ 1359 descs = &lr->lines[i].desc; 1360 } 1361 } 1362 1363 if (num_get == 0) 1364 return -EINVAL; 1365 1366 if (num_get != 1) { 1367 /* build compacted desc array */ 1368 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL); 1369 if (!descs) 1370 return -ENOMEM; 1371 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1372 if (lv.mask & BIT_ULL(i)) { 1373 descs[didx] = lr->lines[i].desc; 1374 didx++; 1375 } 1376 } 1377 } 1378 ret = gpiod_get_array_value_complex(false, true, num_get, 1379 descs, NULL, vals); 1380 1381 if (num_get != 1) 1382 kfree(descs); 1383 if (ret) 1384 return ret; 1385 1386 lv.bits = 0; 1387 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1388 /* unpack compacted vals for the response */ 1389 if (lv.mask & BIT_ULL(i)) { 1390 if (lr->lines[i].sw_debounced) 1391 val = debounced_value(&lr->lines[i]); 1392 else 1393 val = test_bit(didx, vals); 1394 if (val) 1395 lv.bits |= BIT_ULL(i); 1396 didx++; 1397 } 1398 } 1399 1400 if (copy_to_user(ip, &lv, sizeof(lv))) 1401 return -EFAULT; 1402 1403 return 0; 1404 } 1405 1406 static long linereq_set_values(struct linereq *lr, void __user *ip) 1407 { 1408 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX); 1409 struct gpio_v2_line_values lv; 1410 struct gpio_desc **descs; 1411 unsigned int i, didx, num_set; 1412 int ret; 1413 1414 if (copy_from_user(&lv, ip, sizeof(lv))) 1415 return -EFAULT; 1416 1417 guard(mutex)(&lr->config_mutex); 1418 1419 /* 1420 * gpiod_set_array_value_complex() requires compacted desc and val 1421 * arrays, rather than the sparse ones in lv. 1422 * Calculation of num_set and construction of the descs and vals arrays 1423 * is optimized to minimize scanning the lv->mask, and to avoid 1424 * allocation for the desc array for the common num_set == 1 case. 1425 */ 1426 bitmap_zero(vals, GPIO_V2_LINES_MAX); 1427 /* scan requested lines to determine the subset to be set */ 1428 for (num_set = 0, i = 0; i < lr->num_lines; i++) { 1429 if (lv.mask & BIT_ULL(i)) { 1430 /* setting inputs is not allowed */ 1431 if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags)) 1432 return -EPERM; 1433 /* add to compacted values */ 1434 if (lv.bits & BIT_ULL(i)) 1435 __set_bit(num_set, vals); 1436 num_set++; 1437 /* capture desc for the num_set == 1 case */ 1438 descs = &lr->lines[i].desc; 1439 } 1440 } 1441 if (num_set == 0) 1442 return -EINVAL; 1443 1444 if (num_set != 1) { 1445 /* build compacted desc array */ 1446 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL); 1447 if (!descs) 1448 return -ENOMEM; 1449 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1450 if (lv.mask & BIT_ULL(i)) { 1451 descs[didx] = lr->lines[i].desc; 1452 didx++; 1453 } 1454 } 1455 } 1456 ret = gpiod_set_array_value_complex(false, true, num_set, 1457 descs, NULL, vals); 1458 1459 if (num_set != 1) 1460 kfree(descs); 1461 return ret; 1462 } 1463 1464 static long linereq_set_config(struct linereq *lr, void __user *ip) 1465 { 1466 struct gpio_v2_line_config lc; 1467 struct gpio_desc *desc; 1468 struct line *line; 1469 unsigned int i; 1470 u64 flags, edflags; 1471 int ret; 1472 1473 if (copy_from_user(&lc, ip, sizeof(lc))) 1474 return -EFAULT; 1475 1476 ret = gpio_v2_line_config_validate(&lc, lr->num_lines); 1477 if (ret) 1478 return ret; 1479 1480 guard(mutex)(&lr->config_mutex); 1481 1482 for (i = 0; i < lr->num_lines; i++) { 1483 line = &lr->lines[i]; 1484 desc = lr->lines[i].desc; 1485 flags = gpio_v2_line_config_flags(&lc, i); 1486 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); 1487 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; 1488 /* 1489 * Lines have to be requested explicitly for input 1490 * or output, else the line will be treated "as is". 1491 */ 1492 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { 1493 int val = gpio_v2_line_config_output_value(&lc, i); 1494 1495 edge_detector_stop(line); 1496 ret = gpiod_direction_output(desc, val); 1497 if (ret) 1498 return ret; 1499 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) { 1500 ret = gpiod_direction_input(desc); 1501 if (ret) 1502 return ret; 1503 1504 ret = edge_detector_update(line, &lc, i, edflags); 1505 if (ret) 1506 return ret; 1507 } 1508 1509 WRITE_ONCE(line->edflags, edflags); 1510 1511 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG); 1512 } 1513 return 0; 1514 } 1515 1516 static long linereq_ioctl(struct file *file, unsigned int cmd, 1517 unsigned long arg) 1518 { 1519 struct linereq *lr = file->private_data; 1520 void __user *ip = (void __user *)arg; 1521 1522 guard(srcu)(&lr->gdev->srcu); 1523 1524 if (!rcu_access_pointer(lr->gdev->chip)) 1525 return -ENODEV; 1526 1527 switch (cmd) { 1528 case GPIO_V2_LINE_GET_VALUES_IOCTL: 1529 return linereq_get_values(lr, ip); 1530 case GPIO_V2_LINE_SET_VALUES_IOCTL: 1531 return linereq_set_values(lr, ip); 1532 case GPIO_V2_LINE_SET_CONFIG_IOCTL: 1533 return linereq_set_config(lr, ip); 1534 default: 1535 return -EINVAL; 1536 } 1537 } 1538 1539 #ifdef CONFIG_COMPAT 1540 static long linereq_ioctl_compat(struct file *file, unsigned int cmd, 1541 unsigned long arg) 1542 { 1543 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 1544 } 1545 #endif 1546 1547 static __poll_t linereq_poll(struct file *file, 1548 struct poll_table_struct *wait) 1549 { 1550 struct linereq *lr = file->private_data; 1551 __poll_t events = 0; 1552 1553 guard(srcu)(&lr->gdev->srcu); 1554 1555 if (!rcu_access_pointer(lr->gdev->chip)) 1556 return EPOLLHUP | EPOLLERR; 1557 1558 poll_wait(file, &lr->wait, wait); 1559 1560 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events, 1561 &lr->wait.lock)) 1562 events = EPOLLIN | EPOLLRDNORM; 1563 1564 return events; 1565 } 1566 1567 static ssize_t linereq_read(struct file *file, char __user *buf, 1568 size_t count, loff_t *f_ps) 1569 { 1570 struct linereq *lr = file->private_data; 1571 struct gpio_v2_line_event le; 1572 ssize_t bytes_read = 0; 1573 int ret; 1574 1575 guard(srcu)(&lr->gdev->srcu); 1576 1577 if (!rcu_access_pointer(lr->gdev->chip)) 1578 return -ENODEV; 1579 1580 if (count < sizeof(le)) 1581 return -EINVAL; 1582 1583 do { 1584 scoped_guard(spinlock, &lr->wait.lock) { 1585 if (kfifo_is_empty(&lr->events)) { 1586 if (bytes_read) 1587 return bytes_read; 1588 1589 if (file->f_flags & O_NONBLOCK) 1590 return -EAGAIN; 1591 1592 ret = wait_event_interruptible_locked(lr->wait, 1593 !kfifo_is_empty(&lr->events)); 1594 if (ret) 1595 return ret; 1596 } 1597 1598 ret = kfifo_out(&lr->events, &le, 1); 1599 } 1600 if (ret != 1) { 1601 /* 1602 * This should never happen - we were holding the 1603 * lock from the moment we learned the fifo is no 1604 * longer empty until now. 1605 */ 1606 ret = -EIO; 1607 break; 1608 } 1609 1610 if (copy_to_user(buf + bytes_read, &le, sizeof(le))) 1611 return -EFAULT; 1612 bytes_read += sizeof(le); 1613 } while (count >= bytes_read + sizeof(le)); 1614 1615 return bytes_read; 1616 } 1617 1618 static void linereq_free(struct linereq *lr) 1619 { 1620 struct line *line; 1621 unsigned int i; 1622 1623 if (lr->device_unregistered_nb.notifier_call) 1624 blocking_notifier_chain_unregister(&lr->gdev->device_notifier, 1625 &lr->device_unregistered_nb); 1626 1627 for (i = 0; i < lr->num_lines; i++) { 1628 line = &lr->lines[i]; 1629 if (!line->desc) 1630 continue; 1631 1632 edge_detector_stop(line); 1633 if (line_has_supinfo(line)) 1634 supinfo_erase(line); 1635 gpiod_free(line->desc); 1636 } 1637 kfifo_free(&lr->events); 1638 kfree(lr->label); 1639 gpio_device_put(lr->gdev); 1640 kvfree(lr); 1641 } 1642 1643 static int linereq_release(struct inode *inode, struct file *file) 1644 { 1645 struct linereq *lr = file->private_data; 1646 1647 linereq_free(lr); 1648 return 0; 1649 } 1650 1651 #ifdef CONFIG_PROC_FS 1652 static void linereq_show_fdinfo(struct seq_file *out, struct file *file) 1653 { 1654 struct linereq *lr = file->private_data; 1655 struct device *dev = &lr->gdev->dev; 1656 u16 i; 1657 1658 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev)); 1659 1660 for (i = 0; i < lr->num_lines; i++) 1661 seq_printf(out, "gpio-line:\t%d\n", 1662 gpio_chip_hwgpio(lr->lines[i].desc)); 1663 } 1664 #endif 1665 1666 static const struct file_operations line_fileops = { 1667 .release = linereq_release, 1668 .read = linereq_read, 1669 .poll = linereq_poll, 1670 .owner = THIS_MODULE, 1671 .llseek = noop_llseek, 1672 .unlocked_ioctl = linereq_ioctl, 1673 #ifdef CONFIG_COMPAT 1674 .compat_ioctl = linereq_ioctl_compat, 1675 #endif 1676 #ifdef CONFIG_PROC_FS 1677 .show_fdinfo = linereq_show_fdinfo, 1678 #endif 1679 }; 1680 1681 static int linereq_create(struct gpio_device *gdev, void __user *ip) 1682 { 1683 struct gpio_v2_line_request ulr; 1684 struct gpio_v2_line_config *lc; 1685 struct linereq *lr; 1686 struct file *file; 1687 u64 flags, edflags; 1688 unsigned int i; 1689 int fd, ret; 1690 1691 if (copy_from_user(&ulr, ip, sizeof(ulr))) 1692 return -EFAULT; 1693 1694 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX)) 1695 return -EINVAL; 1696 1697 if (memchr_inv(ulr.padding, 0, sizeof(ulr.padding))) 1698 return -EINVAL; 1699 1700 lc = &ulr.config; 1701 ret = gpio_v2_line_config_validate(lc, ulr.num_lines); 1702 if (ret) 1703 return ret; 1704 1705 lr = kvzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL); 1706 if (!lr) 1707 return -ENOMEM; 1708 lr->num_lines = ulr.num_lines; 1709 1710 lr->gdev = gpio_device_get(gdev); 1711 1712 for (i = 0; i < ulr.num_lines; i++) { 1713 lr->lines[i].req = lr; 1714 WRITE_ONCE(lr->lines[i].sw_debounced, 0); 1715 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func); 1716 } 1717 1718 if (ulr.consumer[0] != '\0') { 1719 /* label is only initialized if consumer is set */ 1720 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1, 1721 GFP_KERNEL); 1722 if (!lr->label) { 1723 ret = -ENOMEM; 1724 goto out_free_linereq; 1725 } 1726 } 1727 1728 mutex_init(&lr->config_mutex); 1729 init_waitqueue_head(&lr->wait); 1730 lr->event_buffer_size = ulr.event_buffer_size; 1731 if (lr->event_buffer_size == 0) 1732 lr->event_buffer_size = ulr.num_lines * 16; 1733 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16) 1734 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16; 1735 1736 atomic_set(&lr->seqno, 0); 1737 1738 /* Request each GPIO */ 1739 for (i = 0; i < ulr.num_lines; i++) { 1740 u32 offset = ulr.offsets[i]; 1741 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset); 1742 1743 if (IS_ERR(desc)) { 1744 ret = PTR_ERR(desc); 1745 goto out_free_linereq; 1746 } 1747 1748 ret = gpiod_request_user(desc, lr->label); 1749 if (ret) 1750 goto out_free_linereq; 1751 1752 lr->lines[i].desc = desc; 1753 flags = gpio_v2_line_config_flags(lc, i); 1754 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); 1755 1756 ret = gpiod_set_transitory(desc, false); 1757 if (ret < 0) 1758 goto out_free_linereq; 1759 1760 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; 1761 /* 1762 * Lines have to be requested explicitly for input 1763 * or output, else the line will be treated "as is". 1764 */ 1765 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { 1766 int val = gpio_v2_line_config_output_value(lc, i); 1767 1768 ret = gpiod_direction_output(desc, val); 1769 if (ret) 1770 goto out_free_linereq; 1771 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) { 1772 ret = gpiod_direction_input(desc); 1773 if (ret) 1774 goto out_free_linereq; 1775 1776 ret = edge_detector_setup(&lr->lines[i], lc, i, 1777 edflags); 1778 if (ret) 1779 goto out_free_linereq; 1780 } 1781 1782 lr->lines[i].edflags = edflags; 1783 1784 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); 1785 1786 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", 1787 offset); 1788 } 1789 1790 lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify; 1791 ret = blocking_notifier_chain_register(&gdev->device_notifier, 1792 &lr->device_unregistered_nb); 1793 if (ret) 1794 goto out_free_linereq; 1795 1796 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 1797 if (fd < 0) { 1798 ret = fd; 1799 goto out_free_linereq; 1800 } 1801 1802 file = anon_inode_getfile("gpio-line", &line_fileops, lr, 1803 O_RDONLY | O_CLOEXEC); 1804 if (IS_ERR(file)) { 1805 ret = PTR_ERR(file); 1806 goto out_put_unused_fd; 1807 } 1808 1809 ulr.fd = fd; 1810 if (copy_to_user(ip, &ulr, sizeof(ulr))) { 1811 /* 1812 * fput() will trigger the release() callback, so do not go onto 1813 * the regular error cleanup path here. 1814 */ 1815 fput(file); 1816 put_unused_fd(fd); 1817 return -EFAULT; 1818 } 1819 1820 fd_install(fd, file); 1821 1822 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 1823 lr->num_lines); 1824 1825 return 0; 1826 1827 out_put_unused_fd: 1828 put_unused_fd(fd); 1829 out_free_linereq: 1830 linereq_free(lr); 1831 return ret; 1832 } 1833 1834 #ifdef CONFIG_GPIO_CDEV_V1 1835 1836 /* 1837 * GPIO line event management 1838 */ 1839 1840 /** 1841 * struct lineevent_state - contains the state of a userspace event 1842 * @gdev: the GPIO device the event pertains to 1843 * @label: consumer label used to tag descriptors 1844 * @desc: the GPIO descriptor held by this event 1845 * @eflags: the event flags this line was requested with 1846 * @irq: the interrupt that trigger in response to events on this GPIO 1847 * @wait: wait queue that handles blocking reads of events 1848 * @device_unregistered_nb: notifier block for receiving gdev unregister events 1849 * @events: KFIFO for the GPIO events 1850 * @timestamp: cache for the timestamp storing it between hardirq 1851 * and IRQ thread, used to bring the timestamp close to the actual 1852 * event 1853 */ 1854 struct lineevent_state { 1855 struct gpio_device *gdev; 1856 const char *label; 1857 struct gpio_desc *desc; 1858 u32 eflags; 1859 int irq; 1860 wait_queue_head_t wait; 1861 struct notifier_block device_unregistered_nb; 1862 DECLARE_KFIFO(events, struct gpioevent_data, 16); 1863 u64 timestamp; 1864 }; 1865 1866 #define GPIOEVENT_REQUEST_VALID_FLAGS \ 1867 (GPIOEVENT_REQUEST_RISING_EDGE | \ 1868 GPIOEVENT_REQUEST_FALLING_EDGE) 1869 1870 static __poll_t lineevent_poll(struct file *file, 1871 struct poll_table_struct *wait) 1872 { 1873 struct lineevent_state *le = file->private_data; 1874 __poll_t events = 0; 1875 1876 guard(srcu)(&le->gdev->srcu); 1877 1878 if (!rcu_access_pointer(le->gdev->chip)) 1879 return EPOLLHUP | EPOLLERR; 1880 1881 poll_wait(file, &le->wait, wait); 1882 1883 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock)) 1884 events = EPOLLIN | EPOLLRDNORM; 1885 1886 return events; 1887 } 1888 1889 static int lineevent_unregistered_notify(struct notifier_block *nb, 1890 unsigned long action, void *data) 1891 { 1892 struct lineevent_state *le = container_of(nb, struct lineevent_state, 1893 device_unregistered_nb); 1894 1895 wake_up_poll(&le->wait, EPOLLIN | EPOLLERR); 1896 1897 return NOTIFY_OK; 1898 } 1899 1900 struct compat_gpioeevent_data { 1901 compat_u64 timestamp; 1902 u32 id; 1903 }; 1904 1905 static ssize_t lineevent_read(struct file *file, char __user *buf, 1906 size_t count, loff_t *f_ps) 1907 { 1908 struct lineevent_state *le = file->private_data; 1909 struct gpioevent_data ge; 1910 ssize_t bytes_read = 0; 1911 ssize_t ge_size; 1912 int ret; 1913 1914 guard(srcu)(&le->gdev->srcu); 1915 1916 if (!rcu_access_pointer(le->gdev->chip)) 1917 return -ENODEV; 1918 1919 /* 1920 * When compatible system call is being used the struct gpioevent_data, 1921 * in case of at least ia32, has different size due to the alignment 1922 * differences. Because we have first member 64 bits followed by one of 1923 * 32 bits there is no gap between them. The only difference is the 1924 * padding at the end of the data structure. Hence, we calculate the 1925 * actual sizeof() and pass this as an argument to copy_to_user() to 1926 * drop unneeded bytes from the output. 1927 */ 1928 if (compat_need_64bit_alignment_fixup()) 1929 ge_size = sizeof(struct compat_gpioeevent_data); 1930 else 1931 ge_size = sizeof(struct gpioevent_data); 1932 if (count < ge_size) 1933 return -EINVAL; 1934 1935 do { 1936 scoped_guard(spinlock, &le->wait.lock) { 1937 if (kfifo_is_empty(&le->events)) { 1938 if (bytes_read) 1939 return bytes_read; 1940 1941 if (file->f_flags & O_NONBLOCK) 1942 return -EAGAIN; 1943 1944 ret = wait_event_interruptible_locked(le->wait, 1945 !kfifo_is_empty(&le->events)); 1946 if (ret) 1947 return ret; 1948 } 1949 1950 ret = kfifo_out(&le->events, &ge, 1); 1951 } 1952 if (ret != 1) { 1953 /* 1954 * This should never happen - we were holding the lock 1955 * from the moment we learned the fifo is no longer 1956 * empty until now. 1957 */ 1958 ret = -EIO; 1959 break; 1960 } 1961 1962 if (copy_to_user(buf + bytes_read, &ge, ge_size)) 1963 return -EFAULT; 1964 bytes_read += ge_size; 1965 } while (count >= bytes_read + ge_size); 1966 1967 return bytes_read; 1968 } 1969 1970 static void lineevent_free(struct lineevent_state *le) 1971 { 1972 if (le->device_unregistered_nb.notifier_call) 1973 blocking_notifier_chain_unregister(&le->gdev->device_notifier, 1974 &le->device_unregistered_nb); 1975 if (le->irq) 1976 free_irq(le->irq, le); 1977 if (le->desc) 1978 gpiod_free(le->desc); 1979 kfree(le->label); 1980 gpio_device_put(le->gdev); 1981 kfree(le); 1982 } 1983 1984 static int lineevent_release(struct inode *inode, struct file *file) 1985 { 1986 lineevent_free(file->private_data); 1987 return 0; 1988 } 1989 1990 static long lineevent_ioctl(struct file *file, unsigned int cmd, 1991 unsigned long arg) 1992 { 1993 struct lineevent_state *le = file->private_data; 1994 void __user *ip = (void __user *)arg; 1995 struct gpiohandle_data ghd; 1996 1997 guard(srcu)(&le->gdev->srcu); 1998 1999 if (!rcu_access_pointer(le->gdev->chip)) 2000 return -ENODEV; 2001 2002 /* 2003 * We can get the value for an event line but not set it, 2004 * because it is input by definition. 2005 */ 2006 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { 2007 int val; 2008 2009 memset(&ghd, 0, sizeof(ghd)); 2010 2011 val = gpiod_get_value_cansleep(le->desc); 2012 if (val < 0) 2013 return val; 2014 ghd.values[0] = val; 2015 2016 if (copy_to_user(ip, &ghd, sizeof(ghd))) 2017 return -EFAULT; 2018 2019 return 0; 2020 } 2021 return -EINVAL; 2022 } 2023 2024 #ifdef CONFIG_COMPAT 2025 static long lineevent_ioctl_compat(struct file *file, unsigned int cmd, 2026 unsigned long arg) 2027 { 2028 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 2029 } 2030 #endif 2031 2032 static const struct file_operations lineevent_fileops = { 2033 .release = lineevent_release, 2034 .read = lineevent_read, 2035 .poll = lineevent_poll, 2036 .owner = THIS_MODULE, 2037 .llseek = noop_llseek, 2038 .unlocked_ioctl = lineevent_ioctl, 2039 #ifdef CONFIG_COMPAT 2040 .compat_ioctl = lineevent_ioctl_compat, 2041 #endif 2042 }; 2043 2044 static irqreturn_t lineevent_irq_thread(int irq, void *p) 2045 { 2046 struct lineevent_state *le = p; 2047 struct gpioevent_data ge; 2048 int ret; 2049 2050 /* Do not leak kernel stack to userspace */ 2051 memset(&ge, 0, sizeof(ge)); 2052 2053 /* 2054 * We may be running from a nested threaded interrupt in which case 2055 * we didn't get the timestamp from lineevent_irq_handler(). 2056 */ 2057 if (!le->timestamp) 2058 ge.timestamp = ktime_get_ns(); 2059 else 2060 ge.timestamp = le->timestamp; 2061 2062 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE 2063 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 2064 int level = gpiod_get_value_cansleep(le->desc); 2065 2066 if (level) 2067 /* Emit low-to-high event */ 2068 ge.id = GPIOEVENT_EVENT_RISING_EDGE; 2069 else 2070 /* Emit high-to-low event */ 2071 ge.id = GPIOEVENT_EVENT_FALLING_EDGE; 2072 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) { 2073 /* Emit low-to-high event */ 2074 ge.id = GPIOEVENT_EVENT_RISING_EDGE; 2075 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 2076 /* Emit high-to-low event */ 2077 ge.id = GPIOEVENT_EVENT_FALLING_EDGE; 2078 } else { 2079 return IRQ_NONE; 2080 } 2081 2082 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge, 2083 1, &le->wait.lock); 2084 if (ret) 2085 wake_up_poll(&le->wait, EPOLLIN); 2086 else 2087 pr_debug_ratelimited("event FIFO is full - event dropped\n"); 2088 2089 return IRQ_HANDLED; 2090 } 2091 2092 static irqreturn_t lineevent_irq_handler(int irq, void *p) 2093 { 2094 struct lineevent_state *le = p; 2095 2096 /* 2097 * Just store the timestamp in hardirq context so we get it as 2098 * close in time as possible to the actual event. 2099 */ 2100 le->timestamp = ktime_get_ns(); 2101 2102 return IRQ_WAKE_THREAD; 2103 } 2104 2105 static int lineevent_create(struct gpio_device *gdev, void __user *ip) 2106 { 2107 struct gpioevent_request eventreq; 2108 struct lineevent_state *le; 2109 struct gpio_desc *desc; 2110 struct file *file; 2111 u32 offset; 2112 u32 lflags; 2113 u32 eflags; 2114 int fd; 2115 int ret; 2116 int irq, irqflags = 0; 2117 2118 if (copy_from_user(&eventreq, ip, sizeof(eventreq))) 2119 return -EFAULT; 2120 2121 offset = eventreq.lineoffset; 2122 lflags = eventreq.handleflags; 2123 eflags = eventreq.eventflags; 2124 2125 desc = gpio_device_get_desc(gdev, offset); 2126 if (IS_ERR(desc)) 2127 return PTR_ERR(desc); 2128 2129 /* Return an error if a unknown flag is set */ 2130 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) || 2131 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) 2132 return -EINVAL; 2133 2134 /* This is just wrong: we don't look for events on output lines */ 2135 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) || 2136 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || 2137 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) 2138 return -EINVAL; 2139 2140 /* Only one bias flag can be set. */ 2141 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) && 2142 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | 2143 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) || 2144 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) && 2145 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP))) 2146 return -EINVAL; 2147 2148 le = kzalloc(sizeof(*le), GFP_KERNEL); 2149 if (!le) 2150 return -ENOMEM; 2151 le->gdev = gpio_device_get(gdev); 2152 2153 if (eventreq.consumer_label[0] != '\0') { 2154 /* label is only initialized if consumer_label is set */ 2155 le->label = kstrndup(eventreq.consumer_label, 2156 sizeof(eventreq.consumer_label) - 1, 2157 GFP_KERNEL); 2158 if (!le->label) { 2159 ret = -ENOMEM; 2160 goto out_free_le; 2161 } 2162 } 2163 2164 ret = gpiod_request_user(desc, le->label); 2165 if (ret) 2166 goto out_free_le; 2167 le->desc = desc; 2168 le->eflags = eflags; 2169 2170 linehandle_flags_to_desc_flags(lflags, &desc->flags); 2171 2172 ret = gpiod_direction_input(desc); 2173 if (ret) 2174 goto out_free_le; 2175 2176 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); 2177 2178 irq = gpiod_to_irq(desc); 2179 if (irq <= 0) { 2180 ret = -ENODEV; 2181 goto out_free_le; 2182 } 2183 2184 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) 2185 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? 2186 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; 2187 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE) 2188 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? 2189 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; 2190 irqflags |= IRQF_ONESHOT; 2191 2192 INIT_KFIFO(le->events); 2193 init_waitqueue_head(&le->wait); 2194 2195 le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify; 2196 ret = blocking_notifier_chain_register(&gdev->device_notifier, 2197 &le->device_unregistered_nb); 2198 if (ret) 2199 goto out_free_le; 2200 2201 /* Request a thread to read the events */ 2202 ret = request_threaded_irq(irq, 2203 lineevent_irq_handler, 2204 lineevent_irq_thread, 2205 irqflags, 2206 le->label, 2207 le); 2208 if (ret) 2209 goto out_free_le; 2210 2211 le->irq = irq; 2212 2213 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 2214 if (fd < 0) { 2215 ret = fd; 2216 goto out_free_le; 2217 } 2218 2219 file = anon_inode_getfile("gpio-event", 2220 &lineevent_fileops, 2221 le, 2222 O_RDONLY | O_CLOEXEC); 2223 if (IS_ERR(file)) { 2224 ret = PTR_ERR(file); 2225 goto out_put_unused_fd; 2226 } 2227 2228 eventreq.fd = fd; 2229 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) { 2230 /* 2231 * fput() will trigger the release() callback, so do not go onto 2232 * the regular error cleanup path here. 2233 */ 2234 fput(file); 2235 put_unused_fd(fd); 2236 return -EFAULT; 2237 } 2238 2239 fd_install(fd, file); 2240 2241 return 0; 2242 2243 out_put_unused_fd: 2244 put_unused_fd(fd); 2245 out_free_le: 2246 lineevent_free(le); 2247 return ret; 2248 } 2249 2250 static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2, 2251 struct gpioline_info *info_v1) 2252 { 2253 u64 flagsv2 = info_v2->flags; 2254 2255 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name)); 2256 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer)); 2257 info_v1->line_offset = info_v2->offset; 2258 info_v1->flags = 0; 2259 2260 if (flagsv2 & GPIO_V2_LINE_FLAG_USED) 2261 info_v1->flags |= GPIOLINE_FLAG_KERNEL; 2262 2263 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT) 2264 info_v1->flags |= GPIOLINE_FLAG_IS_OUT; 2265 2266 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 2267 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW; 2268 2269 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN) 2270 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN; 2271 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE) 2272 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE; 2273 2274 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP) 2275 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP; 2276 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) 2277 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN; 2278 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED) 2279 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE; 2280 } 2281 2282 static void gpio_v2_line_info_changed_to_v1( 2283 struct gpio_v2_line_info_changed *lic_v2, 2284 struct gpioline_info_changed *lic_v1) 2285 { 2286 memset(lic_v1, 0, sizeof(*lic_v1)); 2287 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info); 2288 lic_v1->timestamp = lic_v2->timestamp_ns; 2289 lic_v1->event_type = lic_v2->event_type; 2290 } 2291 2292 #endif /* CONFIG_GPIO_CDEV_V1 */ 2293 2294 static void gpio_desc_to_lineinfo(struct gpio_desc *desc, 2295 struct gpio_v2_line_info *info) 2296 { 2297 unsigned long dflags; 2298 const char *label; 2299 2300 CLASS(gpio_chip_guard, guard)(desc); 2301 if (!guard.gc) 2302 return; 2303 2304 memset(info, 0, sizeof(*info)); 2305 info->offset = gpio_chip_hwgpio(desc); 2306 2307 if (desc->name) 2308 strscpy(info->name, desc->name, sizeof(info->name)); 2309 2310 dflags = READ_ONCE(desc->flags); 2311 2312 scoped_guard(srcu, &desc->srcu) { 2313 label = gpiod_get_label(desc); 2314 if (label && test_bit(FLAG_REQUESTED, &dflags)) 2315 strscpy(info->consumer, label, 2316 sizeof(info->consumer)); 2317 } 2318 2319 /* 2320 * Userspace only need know that the kernel is using this GPIO so it 2321 * can't use it. 2322 * The calculation of the used flag is slightly racy, as it may read 2323 * desc, gc and pinctrl state without a lock covering all three at 2324 * once. Worst case if the line is in transition and the calculation 2325 * is inconsistent then it looks to the user like they performed the 2326 * read on the other side of the transition - but that can always 2327 * happen. 2328 * The definitive test that a line is available to userspace is to 2329 * request it. 2330 */ 2331 if (test_bit(FLAG_REQUESTED, &dflags) || 2332 test_bit(FLAG_IS_HOGGED, &dflags) || 2333 test_bit(FLAG_USED_AS_IRQ, &dflags) || 2334 test_bit(FLAG_EXPORT, &dflags) || 2335 test_bit(FLAG_SYSFS, &dflags) || 2336 !gpiochip_line_is_valid(guard.gc, info->offset) || 2337 !pinctrl_gpio_can_use_line(guard.gc, info->offset)) 2338 info->flags |= GPIO_V2_LINE_FLAG_USED; 2339 2340 if (test_bit(FLAG_IS_OUT, &dflags)) 2341 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT; 2342 else 2343 info->flags |= GPIO_V2_LINE_FLAG_INPUT; 2344 2345 if (test_bit(FLAG_ACTIVE_LOW, &dflags)) 2346 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW; 2347 2348 if (test_bit(FLAG_OPEN_DRAIN, &dflags)) 2349 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN; 2350 if (test_bit(FLAG_OPEN_SOURCE, &dflags)) 2351 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE; 2352 2353 if (test_bit(FLAG_BIAS_DISABLE, &dflags)) 2354 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED; 2355 if (test_bit(FLAG_PULL_DOWN, &dflags)) 2356 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN; 2357 if (test_bit(FLAG_PULL_UP, &dflags)) 2358 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP; 2359 2360 if (test_bit(FLAG_EDGE_RISING, &dflags)) 2361 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING; 2362 if (test_bit(FLAG_EDGE_FALLING, &dflags)) 2363 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING; 2364 2365 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &dflags)) 2366 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME; 2367 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags)) 2368 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE; 2369 } 2370 2371 struct gpio_chardev_data { 2372 struct gpio_device *gdev; 2373 wait_queue_head_t wait; 2374 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32); 2375 struct notifier_block lineinfo_changed_nb; 2376 struct notifier_block device_unregistered_nb; 2377 unsigned long *watched_lines; 2378 #ifdef CONFIG_GPIO_CDEV_V1 2379 atomic_t watch_abi_version; 2380 #endif 2381 }; 2382 2383 static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip) 2384 { 2385 struct gpio_device *gdev = cdev->gdev; 2386 struct gpiochip_info chipinfo; 2387 2388 memset(&chipinfo, 0, sizeof(chipinfo)); 2389 2390 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name)); 2391 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label)); 2392 chipinfo.lines = gdev->ngpio; 2393 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo))) 2394 return -EFAULT; 2395 return 0; 2396 } 2397 2398 #ifdef CONFIG_GPIO_CDEV_V1 2399 /* 2400 * returns 0 if the versions match, else the previously selected ABI version 2401 */ 2402 static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata, 2403 unsigned int version) 2404 { 2405 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version); 2406 2407 if (abiv == version) 2408 return 0; 2409 2410 return abiv; 2411 } 2412 2413 static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip, 2414 bool watch) 2415 { 2416 struct gpio_desc *desc; 2417 struct gpioline_info lineinfo; 2418 struct gpio_v2_line_info lineinfo_v2; 2419 2420 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) 2421 return -EFAULT; 2422 2423 /* this doubles as a range check on line_offset */ 2424 desc = gpio_device_get_desc(cdev->gdev, lineinfo.line_offset); 2425 if (IS_ERR(desc)) 2426 return PTR_ERR(desc); 2427 2428 if (watch) { 2429 if (lineinfo_ensure_abi_version(cdev, 1)) 2430 return -EPERM; 2431 2432 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines)) 2433 return -EBUSY; 2434 } 2435 2436 gpio_desc_to_lineinfo(desc, &lineinfo_v2); 2437 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo); 2438 2439 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) { 2440 if (watch) 2441 clear_bit(lineinfo.line_offset, cdev->watched_lines); 2442 return -EFAULT; 2443 } 2444 2445 return 0; 2446 } 2447 #endif 2448 2449 static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip, 2450 bool watch) 2451 { 2452 struct gpio_desc *desc; 2453 struct gpio_v2_line_info lineinfo; 2454 2455 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) 2456 return -EFAULT; 2457 2458 if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding))) 2459 return -EINVAL; 2460 2461 desc = gpio_device_get_desc(cdev->gdev, lineinfo.offset); 2462 if (IS_ERR(desc)) 2463 return PTR_ERR(desc); 2464 2465 if (watch) { 2466 #ifdef CONFIG_GPIO_CDEV_V1 2467 if (lineinfo_ensure_abi_version(cdev, 2)) 2468 return -EPERM; 2469 #endif 2470 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines)) 2471 return -EBUSY; 2472 } 2473 gpio_desc_to_lineinfo(desc, &lineinfo); 2474 supinfo_to_lineinfo(desc, &lineinfo); 2475 2476 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) { 2477 if (watch) 2478 clear_bit(lineinfo.offset, cdev->watched_lines); 2479 return -EFAULT; 2480 } 2481 2482 return 0; 2483 } 2484 2485 static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip) 2486 { 2487 __u32 offset; 2488 2489 if (copy_from_user(&offset, ip, sizeof(offset))) 2490 return -EFAULT; 2491 2492 if (offset >= cdev->gdev->ngpio) 2493 return -EINVAL; 2494 2495 if (!test_and_clear_bit(offset, cdev->watched_lines)) 2496 return -EBUSY; 2497 2498 return 0; 2499 } 2500 2501 /* 2502 * gpio_ioctl() - ioctl handler for the GPIO chardev 2503 */ 2504 static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2505 { 2506 struct gpio_chardev_data *cdev = file->private_data; 2507 struct gpio_device *gdev = cdev->gdev; 2508 void __user *ip = (void __user *)arg; 2509 2510 guard(srcu)(&gdev->srcu); 2511 2512 /* We fail any subsequent ioctl():s when the chip is gone */ 2513 if (!rcu_access_pointer(gdev->chip)) 2514 return -ENODEV; 2515 2516 /* Fill in the struct and pass to userspace */ 2517 switch (cmd) { 2518 case GPIO_GET_CHIPINFO_IOCTL: 2519 return chipinfo_get(cdev, ip); 2520 #ifdef CONFIG_GPIO_CDEV_V1 2521 case GPIO_GET_LINEHANDLE_IOCTL: 2522 return linehandle_create(gdev, ip); 2523 case GPIO_GET_LINEEVENT_IOCTL: 2524 return lineevent_create(gdev, ip); 2525 case GPIO_GET_LINEINFO_IOCTL: 2526 return lineinfo_get_v1(cdev, ip, false); 2527 case GPIO_GET_LINEINFO_WATCH_IOCTL: 2528 return lineinfo_get_v1(cdev, ip, true); 2529 #endif /* CONFIG_GPIO_CDEV_V1 */ 2530 case GPIO_V2_GET_LINEINFO_IOCTL: 2531 return lineinfo_get(cdev, ip, false); 2532 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL: 2533 return lineinfo_get(cdev, ip, true); 2534 case GPIO_V2_GET_LINE_IOCTL: 2535 return linereq_create(gdev, ip); 2536 case GPIO_GET_LINEINFO_UNWATCH_IOCTL: 2537 return lineinfo_unwatch(cdev, ip); 2538 default: 2539 return -EINVAL; 2540 } 2541 } 2542 2543 #ifdef CONFIG_COMPAT 2544 static long gpio_ioctl_compat(struct file *file, unsigned int cmd, 2545 unsigned long arg) 2546 { 2547 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 2548 } 2549 #endif 2550 2551 static int lineinfo_changed_notify(struct notifier_block *nb, 2552 unsigned long action, void *data) 2553 { 2554 struct gpio_chardev_data *cdev = 2555 container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb); 2556 struct gpio_v2_line_info_changed chg; 2557 struct gpio_desc *desc = data; 2558 int ret; 2559 2560 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines)) 2561 return NOTIFY_DONE; 2562 2563 memset(&chg, 0, sizeof(chg)); 2564 chg.event_type = action; 2565 chg.timestamp_ns = ktime_get_ns(); 2566 gpio_desc_to_lineinfo(desc, &chg.info); 2567 supinfo_to_lineinfo(desc, &chg.info); 2568 2569 ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock); 2570 if (ret) 2571 wake_up_poll(&cdev->wait, EPOLLIN); 2572 else 2573 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n"); 2574 2575 return NOTIFY_OK; 2576 } 2577 2578 static int gpio_device_unregistered_notify(struct notifier_block *nb, 2579 unsigned long action, void *data) 2580 { 2581 struct gpio_chardev_data *cdev = container_of(nb, 2582 struct gpio_chardev_data, 2583 device_unregistered_nb); 2584 2585 wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR); 2586 2587 return NOTIFY_OK; 2588 } 2589 2590 static __poll_t lineinfo_watch_poll(struct file *file, 2591 struct poll_table_struct *pollt) 2592 { 2593 struct gpio_chardev_data *cdev = file->private_data; 2594 __poll_t events = 0; 2595 2596 guard(srcu)(&cdev->gdev->srcu); 2597 2598 if (!rcu_access_pointer(cdev->gdev->chip)) 2599 return EPOLLHUP | EPOLLERR; 2600 2601 poll_wait(file, &cdev->wait, pollt); 2602 2603 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events, 2604 &cdev->wait.lock)) 2605 events = EPOLLIN | EPOLLRDNORM; 2606 2607 return events; 2608 } 2609 2610 static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, 2611 size_t count, loff_t *off) 2612 { 2613 struct gpio_chardev_data *cdev = file->private_data; 2614 struct gpio_v2_line_info_changed event; 2615 ssize_t bytes_read = 0; 2616 int ret; 2617 size_t event_size; 2618 2619 guard(srcu)(&cdev->gdev->srcu); 2620 2621 if (!rcu_access_pointer(cdev->gdev->chip)) 2622 return -ENODEV; 2623 2624 #ifndef CONFIG_GPIO_CDEV_V1 2625 event_size = sizeof(struct gpio_v2_line_info_changed); 2626 if (count < event_size) 2627 return -EINVAL; 2628 #endif 2629 2630 do { 2631 scoped_guard(spinlock, &cdev->wait.lock) { 2632 if (kfifo_is_empty(&cdev->events)) { 2633 if (bytes_read) 2634 return bytes_read; 2635 2636 if (file->f_flags & O_NONBLOCK) 2637 return -EAGAIN; 2638 2639 ret = wait_event_interruptible_locked(cdev->wait, 2640 !kfifo_is_empty(&cdev->events)); 2641 if (ret) 2642 return ret; 2643 } 2644 #ifdef CONFIG_GPIO_CDEV_V1 2645 /* must be after kfifo check so watch_abi_version is set */ 2646 if (atomic_read(&cdev->watch_abi_version) == 2) 2647 event_size = sizeof(struct gpio_v2_line_info_changed); 2648 else 2649 event_size = sizeof(struct gpioline_info_changed); 2650 if (count < event_size) 2651 return -EINVAL; 2652 #endif 2653 ret = kfifo_out(&cdev->events, &event, 1); 2654 } 2655 if (ret != 1) { 2656 ret = -EIO; 2657 break; 2658 /* We should never get here. See lineevent_read(). */ 2659 } 2660 2661 #ifdef CONFIG_GPIO_CDEV_V1 2662 if (event_size == sizeof(struct gpio_v2_line_info_changed)) { 2663 if (copy_to_user(buf + bytes_read, &event, event_size)) 2664 return -EFAULT; 2665 } else { 2666 struct gpioline_info_changed event_v1; 2667 2668 gpio_v2_line_info_changed_to_v1(&event, &event_v1); 2669 if (copy_to_user(buf + bytes_read, &event_v1, 2670 event_size)) 2671 return -EFAULT; 2672 } 2673 #else 2674 if (copy_to_user(buf + bytes_read, &event, event_size)) 2675 return -EFAULT; 2676 #endif 2677 bytes_read += event_size; 2678 } while (count >= bytes_read + sizeof(event)); 2679 2680 return bytes_read; 2681 } 2682 2683 /** 2684 * gpio_chrdev_open() - open the chardev for ioctl operations 2685 * @inode: inode for this chardev 2686 * @file: file struct for storing private data 2687 * Returns 0 on success 2688 */ 2689 static int gpio_chrdev_open(struct inode *inode, struct file *file) 2690 { 2691 struct gpio_device *gdev = container_of(inode->i_cdev, 2692 struct gpio_device, chrdev); 2693 struct gpio_chardev_data *cdev; 2694 int ret = -ENOMEM; 2695 2696 guard(srcu)(&gdev->srcu); 2697 2698 /* Fail on open if the backing gpiochip is gone */ 2699 if (!rcu_access_pointer(gdev->chip)) 2700 return -ENODEV; 2701 2702 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 2703 if (!cdev) 2704 return -ENODEV; 2705 2706 cdev->watched_lines = bitmap_zalloc(gdev->ngpio, GFP_KERNEL); 2707 if (!cdev->watched_lines) 2708 goto out_free_cdev; 2709 2710 init_waitqueue_head(&cdev->wait); 2711 INIT_KFIFO(cdev->events); 2712 cdev->gdev = gpio_device_get(gdev); 2713 2714 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify; 2715 ret = blocking_notifier_chain_register(&gdev->line_state_notifier, 2716 &cdev->lineinfo_changed_nb); 2717 if (ret) 2718 goto out_free_bitmap; 2719 2720 cdev->device_unregistered_nb.notifier_call = 2721 gpio_device_unregistered_notify; 2722 ret = blocking_notifier_chain_register(&gdev->device_notifier, 2723 &cdev->device_unregistered_nb); 2724 if (ret) 2725 goto out_unregister_line_notifier; 2726 2727 file->private_data = cdev; 2728 2729 ret = nonseekable_open(inode, file); 2730 if (ret) 2731 goto out_unregister_device_notifier; 2732 2733 return ret; 2734 2735 out_unregister_device_notifier: 2736 blocking_notifier_chain_unregister(&gdev->device_notifier, 2737 &cdev->device_unregistered_nb); 2738 out_unregister_line_notifier: 2739 blocking_notifier_chain_unregister(&gdev->line_state_notifier, 2740 &cdev->lineinfo_changed_nb); 2741 out_free_bitmap: 2742 gpio_device_put(gdev); 2743 bitmap_free(cdev->watched_lines); 2744 out_free_cdev: 2745 kfree(cdev); 2746 return ret; 2747 } 2748 2749 /** 2750 * gpio_chrdev_release() - close chardev after ioctl operations 2751 * @inode: inode for this chardev 2752 * @file: file struct for storing private data 2753 * Returns 0 on success 2754 */ 2755 static int gpio_chrdev_release(struct inode *inode, struct file *file) 2756 { 2757 struct gpio_chardev_data *cdev = file->private_data; 2758 struct gpio_device *gdev = cdev->gdev; 2759 2760 bitmap_free(cdev->watched_lines); 2761 blocking_notifier_chain_unregister(&gdev->device_notifier, 2762 &cdev->device_unregistered_nb); 2763 blocking_notifier_chain_unregister(&gdev->line_state_notifier, 2764 &cdev->lineinfo_changed_nb); 2765 gpio_device_put(gdev); 2766 kfree(cdev); 2767 2768 return 0; 2769 } 2770 2771 static const struct file_operations gpio_fileops = { 2772 .release = gpio_chrdev_release, 2773 .open = gpio_chrdev_open, 2774 .poll = lineinfo_watch_poll, 2775 .read = lineinfo_watch_read, 2776 .owner = THIS_MODULE, 2777 .llseek = no_llseek, 2778 .unlocked_ioctl = gpio_ioctl, 2779 #ifdef CONFIG_COMPAT 2780 .compat_ioctl = gpio_ioctl_compat, 2781 #endif 2782 }; 2783 2784 int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt) 2785 { 2786 struct gpio_chip *gc; 2787 int ret; 2788 2789 cdev_init(&gdev->chrdev, &gpio_fileops); 2790 gdev->chrdev.owner = THIS_MODULE; 2791 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id); 2792 2793 ret = cdev_device_add(&gdev->chrdev, &gdev->dev); 2794 if (ret) 2795 return ret; 2796 2797 guard(srcu)(&gdev->srcu); 2798 gc = srcu_dereference(gdev->chip, &gdev->srcu); 2799 if (!gc) 2800 return -ENODEV; 2801 2802 chip_dbg(gc, "added GPIO chardev (%d:%d)\n", MAJOR(devt), gdev->id); 2803 2804 return 0; 2805 } 2806 2807 void gpiolib_cdev_unregister(struct gpio_device *gdev) 2808 { 2809 cdev_device_del(&gdev->chrdev, &gdev->dev); 2810 blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL); 2811 } 2812