1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/anon_inodes.h> 4 #include <linux/atomic.h> 5 #include <linux/bitmap.h> 6 #include <linux/build_bug.h> 7 #include <linux/cdev.h> 8 #include <linux/cleanup.h> 9 #include <linux/compat.h> 10 #include <linux/compiler.h> 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/file.h> 14 #include <linux/gpio.h> 15 #include <linux/gpio/driver.h> 16 #include <linux/hte.h> 17 #include <linux/interrupt.h> 18 #include <linux/irqreturn.h> 19 #include <linux/kernel.h> 20 #include <linux/kfifo.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/overflow.h> 24 #include <linux/pinctrl/consumer.h> 25 #include <linux/poll.h> 26 #include <linux/rbtree.h> 27 #include <linux/seq_file.h> 28 #include <linux/spinlock.h> 29 #include <linux/timekeeping.h> 30 #include <linux/uaccess.h> 31 #include <linux/workqueue.h> 32 33 #include <uapi/linux/gpio.h> 34 35 #include "gpiolib.h" 36 #include "gpiolib-cdev.h" 37 38 /* 39 * Array sizes must ensure 64-bit alignment and not create holes in the 40 * struct packing. 41 */ 42 static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2)); 43 static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8)); 44 45 /* 46 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility 47 */ 48 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8)); 49 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8)); 50 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8)); 51 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8)); 52 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8)); 53 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8)); 54 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8)); 55 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8)); 56 57 /* Character device interface to GPIO. 58 * 59 * The GPIO character device, /dev/gpiochipN, provides userspace an 60 * interface to gpiolib GPIOs via ioctl()s. 61 */ 62 63 /* 64 * GPIO line handle management 65 */ 66 67 #ifdef CONFIG_GPIO_CDEV_V1 68 /** 69 * struct linehandle_state - contains the state of a userspace handle 70 * @gdev: the GPIO device the handle pertains to 71 * @label: consumer label used to tag descriptors 72 * @descs: the GPIO descriptors held by this handle 73 * @num_descs: the number of descriptors held in the descs array 74 */ 75 struct linehandle_state { 76 struct gpio_device *gdev; 77 const char *label; 78 struct gpio_desc *descs[GPIOHANDLES_MAX]; 79 u32 num_descs; 80 }; 81 82 #define GPIOHANDLE_REQUEST_VALID_FLAGS \ 83 (GPIOHANDLE_REQUEST_INPUT | \ 84 GPIOHANDLE_REQUEST_OUTPUT | \ 85 GPIOHANDLE_REQUEST_ACTIVE_LOW | \ 86 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \ 87 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \ 88 GPIOHANDLE_REQUEST_BIAS_DISABLE | \ 89 GPIOHANDLE_REQUEST_OPEN_DRAIN | \ 90 GPIOHANDLE_REQUEST_OPEN_SOURCE) 91 92 static int linehandle_validate_flags(u32 flags) 93 { 94 /* Return an error if an unknown flag is set */ 95 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) 96 return -EINVAL; 97 98 /* 99 * Do not allow both INPUT & OUTPUT flags to be set as they are 100 * contradictory. 101 */ 102 if ((flags & GPIOHANDLE_REQUEST_INPUT) && 103 (flags & GPIOHANDLE_REQUEST_OUTPUT)) 104 return -EINVAL; 105 106 /* 107 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If 108 * the hardware actually supports enabling both at the same time the 109 * electrical result would be disastrous. 110 */ 111 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) && 112 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) 113 return -EINVAL; 114 115 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */ 116 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) && 117 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || 118 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))) 119 return -EINVAL; 120 121 /* Bias flags only allowed for input or output mode. */ 122 if (!((flags & GPIOHANDLE_REQUEST_INPUT) || 123 (flags & GPIOHANDLE_REQUEST_OUTPUT)) && 124 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) || 125 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) || 126 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN))) 127 return -EINVAL; 128 129 /* Only one bias flag can be set. */ 130 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) && 131 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | 132 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) || 133 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) && 134 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP))) 135 return -EINVAL; 136 137 return 0; 138 } 139 140 static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp) 141 { 142 assign_bit(FLAG_ACTIVE_LOW, flagsp, 143 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW); 144 assign_bit(FLAG_OPEN_DRAIN, flagsp, 145 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN); 146 assign_bit(FLAG_OPEN_SOURCE, flagsp, 147 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE); 148 assign_bit(FLAG_PULL_UP, flagsp, 149 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP); 150 assign_bit(FLAG_PULL_DOWN, flagsp, 151 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN); 152 assign_bit(FLAG_BIAS_DISABLE, flagsp, 153 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE); 154 } 155 156 static long linehandle_set_config(struct linehandle_state *lh, 157 void __user *ip) 158 { 159 struct gpiohandle_config gcnf; 160 struct gpio_desc *desc; 161 int i, ret; 162 u32 lflags; 163 164 if (copy_from_user(&gcnf, ip, sizeof(gcnf))) 165 return -EFAULT; 166 167 lflags = gcnf.flags; 168 ret = linehandle_validate_flags(lflags); 169 if (ret) 170 return ret; 171 172 for (i = 0; i < lh->num_descs; i++) { 173 desc = lh->descs[i]; 174 linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags); 175 176 /* 177 * Lines have to be requested explicitly for input 178 * or output, else the line will be treated "as is". 179 */ 180 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 181 int val = !!gcnf.default_values[i]; 182 183 ret = gpiod_direction_output(desc, val); 184 if (ret) 185 return ret; 186 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) { 187 ret = gpiod_direction_input(desc); 188 if (ret) 189 return ret; 190 } 191 192 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG); 193 } 194 return 0; 195 } 196 197 static long linehandle_ioctl(struct file *file, unsigned int cmd, 198 unsigned long arg) 199 { 200 struct linehandle_state *lh = file->private_data; 201 void __user *ip = (void __user *)arg; 202 struct gpiohandle_data ghd; 203 DECLARE_BITMAP(vals, GPIOHANDLES_MAX); 204 unsigned int i; 205 int ret; 206 207 guard(srcu)(&lh->gdev->srcu); 208 209 if (!rcu_access_pointer(lh->gdev->chip)) 210 return -ENODEV; 211 212 switch (cmd) { 213 case GPIOHANDLE_GET_LINE_VALUES_IOCTL: 214 /* NOTE: It's okay to read values of output lines */ 215 ret = gpiod_get_array_value_complex(false, true, 216 lh->num_descs, lh->descs, 217 NULL, vals); 218 if (ret) 219 return ret; 220 221 memset(&ghd, 0, sizeof(ghd)); 222 for (i = 0; i < lh->num_descs; i++) 223 ghd.values[i] = test_bit(i, vals); 224 225 if (copy_to_user(ip, &ghd, sizeof(ghd))) 226 return -EFAULT; 227 228 return 0; 229 case GPIOHANDLE_SET_LINE_VALUES_IOCTL: 230 /* 231 * All line descriptors were created at once with the same 232 * flags so just check if the first one is really output. 233 */ 234 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags)) 235 return -EPERM; 236 237 if (copy_from_user(&ghd, ip, sizeof(ghd))) 238 return -EFAULT; 239 240 /* Clamp all values to [0,1] */ 241 for (i = 0; i < lh->num_descs; i++) 242 __assign_bit(i, vals, ghd.values[i]); 243 244 /* Reuse the array setting function */ 245 return gpiod_set_array_value_complex(false, 246 true, 247 lh->num_descs, 248 lh->descs, 249 NULL, 250 vals); 251 case GPIOHANDLE_SET_CONFIG_IOCTL: 252 return linehandle_set_config(lh, ip); 253 default: 254 return -EINVAL; 255 } 256 } 257 258 #ifdef CONFIG_COMPAT 259 static long linehandle_ioctl_compat(struct file *file, unsigned int cmd, 260 unsigned long arg) 261 { 262 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 263 } 264 #endif 265 266 static void linehandle_free(struct linehandle_state *lh) 267 { 268 int i; 269 270 for (i = 0; i < lh->num_descs; i++) 271 if (lh->descs[i]) 272 gpiod_free(lh->descs[i]); 273 kfree(lh->label); 274 gpio_device_put(lh->gdev); 275 kfree(lh); 276 } 277 278 static int linehandle_release(struct inode *inode, struct file *file) 279 { 280 linehandle_free(file->private_data); 281 return 0; 282 } 283 284 static const struct file_operations linehandle_fileops = { 285 .release = linehandle_release, 286 .owner = THIS_MODULE, 287 .llseek = noop_llseek, 288 .unlocked_ioctl = linehandle_ioctl, 289 #ifdef CONFIG_COMPAT 290 .compat_ioctl = linehandle_ioctl_compat, 291 #endif 292 }; 293 294 static int linehandle_create(struct gpio_device *gdev, void __user *ip) 295 { 296 struct gpiohandle_request handlereq; 297 struct linehandle_state *lh; 298 struct file *file; 299 int fd, i, ret; 300 u32 lflags; 301 302 if (copy_from_user(&handlereq, ip, sizeof(handlereq))) 303 return -EFAULT; 304 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX)) 305 return -EINVAL; 306 307 lflags = handlereq.flags; 308 309 ret = linehandle_validate_flags(lflags); 310 if (ret) 311 return ret; 312 313 lh = kzalloc(sizeof(*lh), GFP_KERNEL); 314 if (!lh) 315 return -ENOMEM; 316 lh->gdev = gpio_device_get(gdev); 317 318 if (handlereq.consumer_label[0] != '\0') { 319 /* label is only initialized if consumer_label is set */ 320 lh->label = kstrndup(handlereq.consumer_label, 321 sizeof(handlereq.consumer_label) - 1, 322 GFP_KERNEL); 323 if (!lh->label) { 324 ret = -ENOMEM; 325 goto out_free_lh; 326 } 327 } 328 329 lh->num_descs = handlereq.lines; 330 331 /* Request each GPIO */ 332 for (i = 0; i < handlereq.lines; i++) { 333 u32 offset = handlereq.lineoffsets[i]; 334 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset); 335 336 if (IS_ERR(desc)) { 337 ret = PTR_ERR(desc); 338 goto out_free_lh; 339 } 340 341 ret = gpiod_request_user(desc, lh->label); 342 if (ret) 343 goto out_free_lh; 344 lh->descs[i] = desc; 345 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags); 346 347 ret = gpiod_set_transitory(desc, false); 348 if (ret < 0) 349 goto out_free_lh; 350 351 /* 352 * Lines have to be requested explicitly for input 353 * or output, else the line will be treated "as is". 354 */ 355 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 356 int val = !!handlereq.default_values[i]; 357 358 ret = gpiod_direction_output(desc, val); 359 if (ret) 360 goto out_free_lh; 361 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) { 362 ret = gpiod_direction_input(desc); 363 if (ret) 364 goto out_free_lh; 365 } 366 367 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); 368 369 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", 370 offset); 371 } 372 373 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 374 if (fd < 0) { 375 ret = fd; 376 goto out_free_lh; 377 } 378 379 file = anon_inode_getfile("gpio-linehandle", 380 &linehandle_fileops, 381 lh, 382 O_RDONLY | O_CLOEXEC); 383 if (IS_ERR(file)) { 384 ret = PTR_ERR(file); 385 goto out_put_unused_fd; 386 } 387 388 handlereq.fd = fd; 389 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) { 390 /* 391 * fput() will trigger the release() callback, so do not go onto 392 * the regular error cleanup path here. 393 */ 394 fput(file); 395 put_unused_fd(fd); 396 return -EFAULT; 397 } 398 399 fd_install(fd, file); 400 401 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 402 lh->num_descs); 403 404 return 0; 405 406 out_put_unused_fd: 407 put_unused_fd(fd); 408 out_free_lh: 409 linehandle_free(lh); 410 return ret; 411 } 412 #endif /* CONFIG_GPIO_CDEV_V1 */ 413 414 /** 415 * struct line - contains the state of a requested line 416 * @node: to store the object in supinfo_tree if supplemental 417 * @desc: the GPIO descriptor for this line. 418 * @req: the corresponding line request 419 * @irq: the interrupt triggered in response to events on this GPIO 420 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or 421 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied 422 * @timestamp_ns: cache for the timestamp storing it between hardirq and 423 * IRQ thread, used to bring the timestamp close to the actual event 424 * @req_seqno: the seqno for the current edge event in the sequence of 425 * events for the corresponding line request. This is drawn from the @req. 426 * @line_seqno: the seqno for the current edge event in the sequence of 427 * events for this line. 428 * @work: the worker that implements software debouncing 429 * @debounce_period_us: the debounce period in microseconds 430 * @sw_debounced: flag indicating if the software debouncer is active 431 * @level: the current debounced physical level of the line 432 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor 433 * @raw_level: the line level at the time of event 434 * @total_discard_seq: the running counter of the discarded events 435 * @last_seqno: the last sequence number before debounce period expires 436 */ 437 struct line { 438 struct rb_node node; 439 struct gpio_desc *desc; 440 /* 441 * -- edge detector specific fields -- 442 */ 443 struct linereq *req; 444 unsigned int irq; 445 /* 446 * The flags for the active edge detector configuration. 447 * 448 * edflags is set by linereq_create(), linereq_free(), and 449 * linereq_set_config_unlocked(), which are themselves mutually 450 * exclusive, and is accessed by edge_irq_thread(), 451 * process_hw_ts_thread() and debounce_work_func(), 452 * which can all live with a slightly stale value. 453 */ 454 u64 edflags; 455 /* 456 * timestamp_ns and req_seqno are accessed only by 457 * edge_irq_handler() and edge_irq_thread(), which are themselves 458 * mutually exclusive, so no additional protection is necessary. 459 */ 460 u64 timestamp_ns; 461 u32 req_seqno; 462 /* 463 * line_seqno is accessed by either edge_irq_thread() or 464 * debounce_work_func(), which are themselves mutually exclusive, 465 * so no additional protection is necessary. 466 */ 467 u32 line_seqno; 468 /* 469 * -- debouncer specific fields -- 470 */ 471 struct delayed_work work; 472 /* 473 * debounce_period_us is accessed by debounce_irq_handler() and 474 * process_hw_ts() which are disabled when modified by 475 * debounce_setup(), edge_detector_setup() or edge_detector_stop() 476 * or can live with a stale version when updated by 477 * edge_detector_update(). 478 * The modifying functions are themselves mutually exclusive. 479 */ 480 unsigned int debounce_period_us; 481 /* 482 * sw_debounce is accessed by linereq_set_config(), which is the 483 * only setter, and linereq_get_values(), which can live with a 484 * slightly stale value. 485 */ 486 unsigned int sw_debounced; 487 /* 488 * level is accessed by debounce_work_func(), which is the only 489 * setter, and linereq_get_values() which can live with a slightly 490 * stale value. 491 */ 492 unsigned int level; 493 #ifdef CONFIG_HTE 494 struct hte_ts_desc hdesc; 495 /* 496 * HTE provider sets line level at the time of event. The valid 497 * value is 0 or 1 and negative value for an error. 498 */ 499 int raw_level; 500 /* 501 * when sw_debounce is set on HTE enabled line, this is running 502 * counter of the discarded events. 503 */ 504 u32 total_discard_seq; 505 /* 506 * when sw_debounce is set on HTE enabled line, this variable records 507 * last sequence number before debounce period expires. 508 */ 509 u32 last_seqno; 510 #endif /* CONFIG_HTE */ 511 }; 512 513 /* 514 * a rbtree of the struct lines containing supplemental info. 515 * Used to populate gpio_v2_line_info with cdev specific fields not contained 516 * in the struct gpio_desc. 517 * A line is determined to contain supplemental information by 518 * line_has_supinfo(). 519 */ 520 static struct rb_root supinfo_tree = RB_ROOT; 521 /* covers supinfo_tree */ 522 static DEFINE_SPINLOCK(supinfo_lock); 523 524 /** 525 * struct linereq - contains the state of a userspace line request 526 * @gdev: the GPIO device the line request pertains to 527 * @label: consumer label used to tag GPIO descriptors 528 * @num_lines: the number of lines in the lines array 529 * @wait: wait queue that handles blocking reads of events 530 * @device_unregistered_nb: notifier block for receiving gdev unregister events 531 * @event_buffer_size: the number of elements allocated in @events 532 * @events: KFIFO for the GPIO events 533 * @seqno: the sequence number for edge events generated on all lines in 534 * this line request. Note that this is not used when @num_lines is 1, as 535 * the line_seqno is then the same and is cheaper to calculate. 536 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency 537 * of configuration, particularly multi-step accesses to desc flags and 538 * changes to supinfo status. 539 * @lines: the lines held by this line request, with @num_lines elements. 540 */ 541 struct linereq { 542 struct gpio_device *gdev; 543 const char *label; 544 u32 num_lines; 545 wait_queue_head_t wait; 546 struct notifier_block device_unregistered_nb; 547 u32 event_buffer_size; 548 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event); 549 atomic_t seqno; 550 struct mutex config_mutex; 551 struct line lines[] __counted_by(num_lines); 552 }; 553 554 static void supinfo_insert(struct line *line) 555 { 556 struct rb_node **new = &(supinfo_tree.rb_node), *parent = NULL; 557 struct line *entry; 558 559 guard(spinlock)(&supinfo_lock); 560 561 while (*new) { 562 entry = container_of(*new, struct line, node); 563 564 parent = *new; 565 if (line->desc < entry->desc) { 566 new = &((*new)->rb_left); 567 } else if (line->desc > entry->desc) { 568 new = &((*new)->rb_right); 569 } else { 570 /* this should never happen */ 571 WARN(1, "duplicate line inserted"); 572 return; 573 } 574 } 575 576 rb_link_node(&line->node, parent, new); 577 rb_insert_color(&line->node, &supinfo_tree); 578 } 579 580 static void supinfo_erase(struct line *line) 581 { 582 guard(spinlock)(&supinfo_lock); 583 584 rb_erase(&line->node, &supinfo_tree); 585 } 586 587 static struct line *supinfo_find(struct gpio_desc *desc) 588 { 589 struct rb_node *node = supinfo_tree.rb_node; 590 struct line *line; 591 592 while (node) { 593 line = container_of(node, struct line, node); 594 if (desc < line->desc) 595 node = node->rb_left; 596 else if (desc > line->desc) 597 node = node->rb_right; 598 else 599 return line; 600 } 601 return NULL; 602 } 603 604 static void supinfo_to_lineinfo(struct gpio_desc *desc, 605 struct gpio_v2_line_info *info) 606 { 607 struct gpio_v2_line_attribute *attr; 608 struct line *line; 609 610 guard(spinlock)(&supinfo_lock); 611 612 line = supinfo_find(desc); 613 if (!line) 614 return; 615 616 attr = &info->attrs[info->num_attrs]; 617 attr->id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE; 618 attr->debounce_period_us = READ_ONCE(line->debounce_period_us); 619 info->num_attrs++; 620 } 621 622 static inline bool line_has_supinfo(struct line *line) 623 { 624 return READ_ONCE(line->debounce_period_us); 625 } 626 627 /* 628 * Checks line_has_supinfo() before and after the change to avoid unnecessary 629 * supinfo_tree access. 630 * Called indirectly by linereq_create() or linereq_set_config() so line 631 * is already protected from concurrent changes. 632 */ 633 static void line_set_debounce_period(struct line *line, 634 unsigned int debounce_period_us) 635 { 636 bool was_suppl = line_has_supinfo(line); 637 638 WRITE_ONCE(line->debounce_period_us, debounce_period_us); 639 640 /* if supinfo status is unchanged then we're done */ 641 if (line_has_supinfo(line) == was_suppl) 642 return; 643 644 /* supinfo status has changed, so update the tree */ 645 if (was_suppl) 646 supinfo_erase(line); 647 else 648 supinfo_insert(line); 649 } 650 651 #define GPIO_V2_LINE_BIAS_FLAGS \ 652 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \ 653 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \ 654 GPIO_V2_LINE_FLAG_BIAS_DISABLED) 655 656 #define GPIO_V2_LINE_DIRECTION_FLAGS \ 657 (GPIO_V2_LINE_FLAG_INPUT | \ 658 GPIO_V2_LINE_FLAG_OUTPUT) 659 660 #define GPIO_V2_LINE_DRIVE_FLAGS \ 661 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \ 662 GPIO_V2_LINE_FLAG_OPEN_SOURCE) 663 664 #define GPIO_V2_LINE_EDGE_FLAGS \ 665 (GPIO_V2_LINE_FLAG_EDGE_RISING | \ 666 GPIO_V2_LINE_FLAG_EDGE_FALLING) 667 668 #define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS 669 670 #define GPIO_V2_LINE_VALID_FLAGS \ 671 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \ 672 GPIO_V2_LINE_DIRECTION_FLAGS | \ 673 GPIO_V2_LINE_DRIVE_FLAGS | \ 674 GPIO_V2_LINE_EDGE_FLAGS | \ 675 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \ 676 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \ 677 GPIO_V2_LINE_BIAS_FLAGS) 678 679 /* subset of flags relevant for edge detector configuration */ 680 #define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \ 681 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \ 682 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \ 683 GPIO_V2_LINE_EDGE_FLAGS) 684 685 static int linereq_unregistered_notify(struct notifier_block *nb, 686 unsigned long action, void *data) 687 { 688 struct linereq *lr = container_of(nb, struct linereq, 689 device_unregistered_nb); 690 691 wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR); 692 693 return NOTIFY_OK; 694 } 695 696 static void linereq_put_event(struct linereq *lr, 697 struct gpio_v2_line_event *le) 698 { 699 bool overflow = false; 700 701 scoped_guard(spinlock, &lr->wait.lock) { 702 if (kfifo_is_full(&lr->events)) { 703 overflow = true; 704 kfifo_skip(&lr->events); 705 } 706 kfifo_in(&lr->events, le, 1); 707 } 708 if (!overflow) 709 wake_up_poll(&lr->wait, EPOLLIN); 710 else 711 pr_debug_ratelimited("event FIFO is full - event dropped\n"); 712 } 713 714 static u64 line_event_timestamp(struct line *line) 715 { 716 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags)) 717 return ktime_get_real_ns(); 718 else if (IS_ENABLED(CONFIG_HTE) && 719 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) 720 return line->timestamp_ns; 721 722 return ktime_get_ns(); 723 } 724 725 static u32 line_event_id(int level) 726 { 727 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE : 728 GPIO_V2_LINE_EVENT_FALLING_EDGE; 729 } 730 731 static inline char *make_irq_label(const char *orig) 732 { 733 char *new; 734 735 if (!orig) 736 return NULL; 737 738 new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL); 739 if (!new) 740 return ERR_PTR(-ENOMEM); 741 742 return new; 743 } 744 745 static inline void free_irq_label(const char *label) 746 { 747 kfree(label); 748 } 749 750 #ifdef CONFIG_HTE 751 752 static enum hte_return process_hw_ts_thread(void *p) 753 { 754 struct line *line; 755 struct linereq *lr; 756 struct gpio_v2_line_event le; 757 u64 edflags; 758 int level; 759 760 if (!p) 761 return HTE_CB_HANDLED; 762 763 line = p; 764 lr = line->req; 765 766 memset(&le, 0, sizeof(le)); 767 768 le.timestamp_ns = line->timestamp_ns; 769 edflags = READ_ONCE(line->edflags); 770 771 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) { 772 case GPIO_V2_LINE_FLAG_EDGE_BOTH: 773 level = (line->raw_level >= 0) ? 774 line->raw_level : 775 gpiod_get_raw_value_cansleep(line->desc); 776 777 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 778 level = !level; 779 780 le.id = line_event_id(level); 781 break; 782 case GPIO_V2_LINE_FLAG_EDGE_RISING: 783 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE; 784 break; 785 case GPIO_V2_LINE_FLAG_EDGE_FALLING: 786 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE; 787 break; 788 default: 789 return HTE_CB_HANDLED; 790 } 791 le.line_seqno = line->line_seqno; 792 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno; 793 le.offset = gpio_chip_hwgpio(line->desc); 794 795 linereq_put_event(lr, &le); 796 797 return HTE_CB_HANDLED; 798 } 799 800 static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p) 801 { 802 struct line *line; 803 struct linereq *lr; 804 int diff_seqno = 0; 805 806 if (!ts || !p) 807 return HTE_CB_HANDLED; 808 809 line = p; 810 line->timestamp_ns = ts->tsc; 811 line->raw_level = ts->raw_level; 812 lr = line->req; 813 814 if (READ_ONCE(line->sw_debounced)) { 815 line->total_discard_seq++; 816 line->last_seqno = ts->seq; 817 mod_delayed_work(system_wq, &line->work, 818 usecs_to_jiffies(READ_ONCE(line->debounce_period_us))); 819 } else { 820 if (unlikely(ts->seq < line->line_seqno)) 821 return HTE_CB_HANDLED; 822 823 diff_seqno = ts->seq - line->line_seqno; 824 line->line_seqno = ts->seq; 825 if (lr->num_lines != 1) 826 line->req_seqno = atomic_add_return(diff_seqno, 827 &lr->seqno); 828 829 return HTE_RUN_SECOND_CB; 830 } 831 832 return HTE_CB_HANDLED; 833 } 834 835 static int hte_edge_setup(struct line *line, u64 eflags) 836 { 837 int ret; 838 unsigned long flags = 0; 839 struct hte_ts_desc *hdesc = &line->hdesc; 840 841 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING) 842 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 843 HTE_FALLING_EDGE_TS : 844 HTE_RISING_EDGE_TS; 845 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING) 846 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 847 HTE_RISING_EDGE_TS : 848 HTE_FALLING_EDGE_TS; 849 850 line->total_discard_seq = 0; 851 852 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL, 853 line->desc); 854 855 ret = hte_ts_get(NULL, hdesc, 0); 856 if (ret) 857 return ret; 858 859 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread, 860 line); 861 } 862 863 #else 864 865 static int hte_edge_setup(struct line *line, u64 eflags) 866 { 867 return 0; 868 } 869 #endif /* CONFIG_HTE */ 870 871 static irqreturn_t edge_irq_thread(int irq, void *p) 872 { 873 struct line *line = p; 874 struct linereq *lr = line->req; 875 struct gpio_v2_line_event le; 876 877 /* Do not leak kernel stack to userspace */ 878 memset(&le, 0, sizeof(le)); 879 880 if (line->timestamp_ns) { 881 le.timestamp_ns = line->timestamp_ns; 882 } else { 883 /* 884 * We may be running from a nested threaded interrupt in 885 * which case we didn't get the timestamp from 886 * edge_irq_handler(). 887 */ 888 le.timestamp_ns = line_event_timestamp(line); 889 if (lr->num_lines != 1) 890 line->req_seqno = atomic_inc_return(&lr->seqno); 891 } 892 line->timestamp_ns = 0; 893 894 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) { 895 case GPIO_V2_LINE_FLAG_EDGE_BOTH: 896 le.id = line_event_id(gpiod_get_value_cansleep(line->desc)); 897 break; 898 case GPIO_V2_LINE_FLAG_EDGE_RISING: 899 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE; 900 break; 901 case GPIO_V2_LINE_FLAG_EDGE_FALLING: 902 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE; 903 break; 904 default: 905 return IRQ_NONE; 906 } 907 line->line_seqno++; 908 le.line_seqno = line->line_seqno; 909 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno; 910 le.offset = gpio_chip_hwgpio(line->desc); 911 912 linereq_put_event(lr, &le); 913 914 return IRQ_HANDLED; 915 } 916 917 static irqreturn_t edge_irq_handler(int irq, void *p) 918 { 919 struct line *line = p; 920 struct linereq *lr = line->req; 921 922 /* 923 * Just store the timestamp in hardirq context so we get it as 924 * close in time as possible to the actual event. 925 */ 926 line->timestamp_ns = line_event_timestamp(line); 927 928 if (lr->num_lines != 1) 929 line->req_seqno = atomic_inc_return(&lr->seqno); 930 931 return IRQ_WAKE_THREAD; 932 } 933 934 /* 935 * returns the current debounced logical value. 936 */ 937 static bool debounced_value(struct line *line) 938 { 939 bool value; 940 941 /* 942 * minor race - debouncer may be stopped here, so edge_detector_stop() 943 * must leave the value unchanged so the following will read the level 944 * from when the debouncer was last running. 945 */ 946 value = READ_ONCE(line->level); 947 948 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags)) 949 value = !value; 950 951 return value; 952 } 953 954 static irqreturn_t debounce_irq_handler(int irq, void *p) 955 { 956 struct line *line = p; 957 958 mod_delayed_work(system_wq, &line->work, 959 usecs_to_jiffies(READ_ONCE(line->debounce_period_us))); 960 961 return IRQ_HANDLED; 962 } 963 964 static void debounce_work_func(struct work_struct *work) 965 { 966 struct gpio_v2_line_event le; 967 struct line *line = container_of(work, struct line, work.work); 968 struct linereq *lr; 969 u64 eflags, edflags = READ_ONCE(line->edflags); 970 int level = -1; 971 #ifdef CONFIG_HTE 972 int diff_seqno; 973 974 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) 975 level = line->raw_level; 976 #endif 977 if (level < 0) 978 level = gpiod_get_raw_value_cansleep(line->desc); 979 if (level < 0) { 980 pr_debug_ratelimited("debouncer failed to read line value\n"); 981 return; 982 } 983 984 if (READ_ONCE(line->level) == level) 985 return; 986 987 WRITE_ONCE(line->level, level); 988 989 /* -- edge detection -- */ 990 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; 991 if (!eflags) 992 return; 993 994 /* switch from physical level to logical - if they differ */ 995 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 996 level = !level; 997 998 /* ignore edges that are not being monitored */ 999 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) || 1000 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level)) 1001 return; 1002 1003 /* Do not leak kernel stack to userspace */ 1004 memset(&le, 0, sizeof(le)); 1005 1006 lr = line->req; 1007 le.timestamp_ns = line_event_timestamp(line); 1008 le.offset = gpio_chip_hwgpio(line->desc); 1009 #ifdef CONFIG_HTE 1010 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) { 1011 /* discard events except the last one */ 1012 line->total_discard_seq -= 1; 1013 diff_seqno = line->last_seqno - line->total_discard_seq - 1014 line->line_seqno; 1015 line->line_seqno = line->last_seqno - line->total_discard_seq; 1016 le.line_seqno = line->line_seqno; 1017 le.seqno = (lr->num_lines == 1) ? 1018 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno); 1019 } else 1020 #endif /* CONFIG_HTE */ 1021 { 1022 line->line_seqno++; 1023 le.line_seqno = line->line_seqno; 1024 le.seqno = (lr->num_lines == 1) ? 1025 le.line_seqno : atomic_inc_return(&lr->seqno); 1026 } 1027 1028 le.id = line_event_id(level); 1029 1030 linereq_put_event(lr, &le); 1031 } 1032 1033 static int debounce_setup(struct line *line, unsigned int debounce_period_us) 1034 { 1035 unsigned long irqflags; 1036 int ret, level, irq; 1037 char *label; 1038 1039 /* try hardware */ 1040 ret = gpiod_set_debounce(line->desc, debounce_period_us); 1041 if (!ret) { 1042 line_set_debounce_period(line, debounce_period_us); 1043 return ret; 1044 } 1045 if (ret != -ENOTSUPP) 1046 return ret; 1047 1048 if (debounce_period_us) { 1049 /* setup software debounce */ 1050 level = gpiod_get_raw_value_cansleep(line->desc); 1051 if (level < 0) 1052 return level; 1053 1054 if (!(IS_ENABLED(CONFIG_HTE) && 1055 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) { 1056 irq = gpiod_to_irq(line->desc); 1057 if (irq < 0) 1058 return -ENXIO; 1059 1060 label = make_irq_label(line->req->label); 1061 if (IS_ERR(label)) 1062 return -ENOMEM; 1063 1064 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING; 1065 ret = request_irq(irq, debounce_irq_handler, irqflags, 1066 label, line); 1067 if (ret) { 1068 free_irq_label(label); 1069 return ret; 1070 } 1071 line->irq = irq; 1072 } else { 1073 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH); 1074 if (ret) 1075 return ret; 1076 } 1077 1078 WRITE_ONCE(line->level, level); 1079 WRITE_ONCE(line->sw_debounced, 1); 1080 } 1081 return 0; 1082 } 1083 1084 static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc, 1085 unsigned int line_idx) 1086 { 1087 unsigned int i; 1088 u64 mask = BIT_ULL(line_idx); 1089 1090 for (i = 0; i < lc->num_attrs; i++) { 1091 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) && 1092 (lc->attrs[i].mask & mask)) 1093 return true; 1094 } 1095 return false; 1096 } 1097 1098 static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc, 1099 unsigned int line_idx) 1100 { 1101 unsigned int i; 1102 u64 mask = BIT_ULL(line_idx); 1103 1104 for (i = 0; i < lc->num_attrs; i++) { 1105 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) && 1106 (lc->attrs[i].mask & mask)) 1107 return lc->attrs[i].attr.debounce_period_us; 1108 } 1109 return 0; 1110 } 1111 1112 static void edge_detector_stop(struct line *line) 1113 { 1114 if (line->irq) { 1115 free_irq_label(free_irq(line->irq, line)); 1116 line->irq = 0; 1117 } 1118 1119 #ifdef CONFIG_HTE 1120 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) 1121 hte_ts_put(&line->hdesc); 1122 #endif 1123 1124 cancel_delayed_work_sync(&line->work); 1125 WRITE_ONCE(line->sw_debounced, 0); 1126 WRITE_ONCE(line->edflags, 0); 1127 line_set_debounce_period(line, 0); 1128 /* do not change line->level - see comment in debounced_value() */ 1129 } 1130 1131 static int edge_detector_setup(struct line *line, 1132 struct gpio_v2_line_config *lc, 1133 unsigned int line_idx, u64 edflags) 1134 { 1135 u32 debounce_period_us; 1136 unsigned long irqflags = 0; 1137 u64 eflags; 1138 int irq, ret; 1139 char *label; 1140 1141 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; 1142 if (eflags && !kfifo_initialized(&line->req->events)) { 1143 ret = kfifo_alloc(&line->req->events, 1144 line->req->event_buffer_size, GFP_KERNEL); 1145 if (ret) 1146 return ret; 1147 } 1148 if (gpio_v2_line_config_debounced(lc, line_idx)) { 1149 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx); 1150 ret = debounce_setup(line, debounce_period_us); 1151 if (ret) 1152 return ret; 1153 line_set_debounce_period(line, debounce_period_us); 1154 } 1155 1156 /* detection disabled or sw debouncer will provide edge detection */ 1157 if (!eflags || READ_ONCE(line->sw_debounced)) 1158 return 0; 1159 1160 if (IS_ENABLED(CONFIG_HTE) && 1161 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1162 return hte_edge_setup(line, edflags); 1163 1164 irq = gpiod_to_irq(line->desc); 1165 if (irq < 0) 1166 return -ENXIO; 1167 1168 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING) 1169 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 1170 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; 1171 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING) 1172 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 1173 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; 1174 irqflags |= IRQF_ONESHOT; 1175 1176 label = make_irq_label(line->req->label); 1177 if (IS_ERR(label)) 1178 return PTR_ERR(label); 1179 1180 /* Request a thread to read the events */ 1181 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread, 1182 irqflags, label, line); 1183 if (ret) { 1184 free_irq_label(label); 1185 return ret; 1186 } 1187 1188 line->irq = irq; 1189 return 0; 1190 } 1191 1192 static int edge_detector_update(struct line *line, 1193 struct gpio_v2_line_config *lc, 1194 unsigned int line_idx, u64 edflags) 1195 { 1196 u64 eflags; 1197 int ret; 1198 u64 active_edflags = READ_ONCE(line->edflags); 1199 unsigned int debounce_period_us = 1200 gpio_v2_line_config_debounce_period(lc, line_idx); 1201 1202 if ((active_edflags == edflags) && 1203 (READ_ONCE(line->debounce_period_us) == debounce_period_us)) 1204 return 0; 1205 1206 /* sw debounced and still will be...*/ 1207 if (debounce_period_us && READ_ONCE(line->sw_debounced)) { 1208 line_set_debounce_period(line, debounce_period_us); 1209 /* 1210 * ensure event fifo is initialised if edge detection 1211 * is now enabled. 1212 */ 1213 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; 1214 if (eflags && !kfifo_initialized(&line->req->events)) { 1215 ret = kfifo_alloc(&line->req->events, 1216 line->req->event_buffer_size, 1217 GFP_KERNEL); 1218 if (ret) 1219 return ret; 1220 } 1221 return 0; 1222 } 1223 1224 /* reconfiguring edge detection or sw debounce being disabled */ 1225 if ((line->irq && !READ_ONCE(line->sw_debounced)) || 1226 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) || 1227 (!debounce_period_us && READ_ONCE(line->sw_debounced))) 1228 edge_detector_stop(line); 1229 1230 return edge_detector_setup(line, lc, line_idx, edflags); 1231 } 1232 1233 static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc, 1234 unsigned int line_idx) 1235 { 1236 unsigned int i; 1237 u64 mask = BIT_ULL(line_idx); 1238 1239 for (i = 0; i < lc->num_attrs; i++) { 1240 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) && 1241 (lc->attrs[i].mask & mask)) 1242 return lc->attrs[i].attr.flags; 1243 } 1244 return lc->flags; 1245 } 1246 1247 static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc, 1248 unsigned int line_idx) 1249 { 1250 unsigned int i; 1251 u64 mask = BIT_ULL(line_idx); 1252 1253 for (i = 0; i < lc->num_attrs; i++) { 1254 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) && 1255 (lc->attrs[i].mask & mask)) 1256 return !!(lc->attrs[i].attr.values & mask); 1257 } 1258 return 0; 1259 } 1260 1261 static int gpio_v2_line_flags_validate(u64 flags) 1262 { 1263 /* Return an error if an unknown flag is set */ 1264 if (flags & ~GPIO_V2_LINE_VALID_FLAGS) 1265 return -EINVAL; 1266 1267 if (!IS_ENABLED(CONFIG_HTE) && 1268 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1269 return -EOPNOTSUPP; 1270 1271 /* 1272 * Do not allow both INPUT and OUTPUT flags to be set as they are 1273 * contradictory. 1274 */ 1275 if ((flags & GPIO_V2_LINE_FLAG_INPUT) && 1276 (flags & GPIO_V2_LINE_FLAG_OUTPUT)) 1277 return -EINVAL; 1278 1279 /* Only allow one event clock source */ 1280 if (IS_ENABLED(CONFIG_HTE) && 1281 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) && 1282 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1283 return -EINVAL; 1284 1285 /* Edge detection requires explicit input. */ 1286 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) && 1287 !(flags & GPIO_V2_LINE_FLAG_INPUT)) 1288 return -EINVAL; 1289 1290 /* 1291 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single 1292 * request. If the hardware actually supports enabling both at the 1293 * same time the electrical result would be disastrous. 1294 */ 1295 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) && 1296 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE)) 1297 return -EINVAL; 1298 1299 /* Drive requires explicit output direction. */ 1300 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) && 1301 !(flags & GPIO_V2_LINE_FLAG_OUTPUT)) 1302 return -EINVAL; 1303 1304 /* Bias requires explicit direction. */ 1305 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) && 1306 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS)) 1307 return -EINVAL; 1308 1309 /* Only one bias flag can be set. */ 1310 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) && 1311 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | 1312 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) || 1313 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) && 1314 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) 1315 return -EINVAL; 1316 1317 return 0; 1318 } 1319 1320 static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc, 1321 unsigned int num_lines) 1322 { 1323 unsigned int i; 1324 u64 flags; 1325 int ret; 1326 1327 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX) 1328 return -EINVAL; 1329 1330 if (memchr_inv(lc->padding, 0, sizeof(lc->padding))) 1331 return -EINVAL; 1332 1333 for (i = 0; i < num_lines; i++) { 1334 flags = gpio_v2_line_config_flags(lc, i); 1335 ret = gpio_v2_line_flags_validate(flags); 1336 if (ret) 1337 return ret; 1338 1339 /* debounce requires explicit input */ 1340 if (gpio_v2_line_config_debounced(lc, i) && 1341 !(flags & GPIO_V2_LINE_FLAG_INPUT)) 1342 return -EINVAL; 1343 } 1344 return 0; 1345 } 1346 1347 static void gpio_v2_line_config_flags_to_desc_flags(u64 flags, 1348 unsigned long *flagsp) 1349 { 1350 assign_bit(FLAG_ACTIVE_LOW, flagsp, 1351 flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW); 1352 1353 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) 1354 set_bit(FLAG_IS_OUT, flagsp); 1355 else if (flags & GPIO_V2_LINE_FLAG_INPUT) 1356 clear_bit(FLAG_IS_OUT, flagsp); 1357 1358 assign_bit(FLAG_EDGE_RISING, flagsp, 1359 flags & GPIO_V2_LINE_FLAG_EDGE_RISING); 1360 assign_bit(FLAG_EDGE_FALLING, flagsp, 1361 flags & GPIO_V2_LINE_FLAG_EDGE_FALLING); 1362 1363 assign_bit(FLAG_OPEN_DRAIN, flagsp, 1364 flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN); 1365 assign_bit(FLAG_OPEN_SOURCE, flagsp, 1366 flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE); 1367 1368 assign_bit(FLAG_PULL_UP, flagsp, 1369 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP); 1370 assign_bit(FLAG_PULL_DOWN, flagsp, 1371 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN); 1372 assign_bit(FLAG_BIAS_DISABLE, flagsp, 1373 flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED); 1374 1375 assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp, 1376 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME); 1377 assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp, 1378 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE); 1379 } 1380 1381 static long linereq_get_values(struct linereq *lr, void __user *ip) 1382 { 1383 struct gpio_v2_line_values lv; 1384 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX); 1385 struct gpio_desc **descs; 1386 unsigned int i, didx, num_get; 1387 bool val; 1388 int ret; 1389 1390 /* NOTE: It's ok to read values of output lines. */ 1391 if (copy_from_user(&lv, ip, sizeof(lv))) 1392 return -EFAULT; 1393 1394 /* 1395 * gpiod_get_array_value_complex() requires compacted desc and val 1396 * arrays, rather than the sparse ones in lv. 1397 * Calculation of num_get and construction of the desc array is 1398 * optimized to avoid allocation for the desc array for the common 1399 * num_get == 1 case. 1400 */ 1401 /* scan requested lines to calculate the subset to get */ 1402 for (num_get = 0, i = 0; i < lr->num_lines; i++) { 1403 if (lv.mask & BIT_ULL(i)) { 1404 num_get++; 1405 /* capture desc for the num_get == 1 case */ 1406 descs = &lr->lines[i].desc; 1407 } 1408 } 1409 1410 if (num_get == 0) 1411 return -EINVAL; 1412 1413 if (num_get != 1) { 1414 /* build compacted desc array */ 1415 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL); 1416 if (!descs) 1417 return -ENOMEM; 1418 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1419 if (lv.mask & BIT_ULL(i)) { 1420 descs[didx] = lr->lines[i].desc; 1421 didx++; 1422 } 1423 } 1424 } 1425 ret = gpiod_get_array_value_complex(false, true, num_get, 1426 descs, NULL, vals); 1427 1428 if (num_get != 1) 1429 kfree(descs); 1430 if (ret) 1431 return ret; 1432 1433 lv.bits = 0; 1434 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1435 /* unpack compacted vals for the response */ 1436 if (lv.mask & BIT_ULL(i)) { 1437 if (lr->lines[i].sw_debounced) 1438 val = debounced_value(&lr->lines[i]); 1439 else 1440 val = test_bit(didx, vals); 1441 if (val) 1442 lv.bits |= BIT_ULL(i); 1443 didx++; 1444 } 1445 } 1446 1447 if (copy_to_user(ip, &lv, sizeof(lv))) 1448 return -EFAULT; 1449 1450 return 0; 1451 } 1452 1453 static long linereq_set_values(struct linereq *lr, void __user *ip) 1454 { 1455 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX); 1456 struct gpio_v2_line_values lv; 1457 struct gpio_desc **descs; 1458 unsigned int i, didx, num_set; 1459 int ret; 1460 1461 if (copy_from_user(&lv, ip, sizeof(lv))) 1462 return -EFAULT; 1463 1464 guard(mutex)(&lr->config_mutex); 1465 1466 /* 1467 * gpiod_set_array_value_complex() requires compacted desc and val 1468 * arrays, rather than the sparse ones in lv. 1469 * Calculation of num_set and construction of the descs and vals arrays 1470 * is optimized to minimize scanning the lv->mask, and to avoid 1471 * allocation for the desc array for the common num_set == 1 case. 1472 */ 1473 bitmap_zero(vals, GPIO_V2_LINES_MAX); 1474 /* scan requested lines to determine the subset to be set */ 1475 for (num_set = 0, i = 0; i < lr->num_lines; i++) { 1476 if (lv.mask & BIT_ULL(i)) { 1477 /* setting inputs is not allowed */ 1478 if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags)) 1479 return -EPERM; 1480 /* add to compacted values */ 1481 if (lv.bits & BIT_ULL(i)) 1482 __set_bit(num_set, vals); 1483 num_set++; 1484 /* capture desc for the num_set == 1 case */ 1485 descs = &lr->lines[i].desc; 1486 } 1487 } 1488 if (num_set == 0) 1489 return -EINVAL; 1490 1491 if (num_set != 1) { 1492 /* build compacted desc array */ 1493 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL); 1494 if (!descs) 1495 return -ENOMEM; 1496 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1497 if (lv.mask & BIT_ULL(i)) { 1498 descs[didx] = lr->lines[i].desc; 1499 didx++; 1500 } 1501 } 1502 } 1503 ret = gpiod_set_array_value_complex(false, true, num_set, 1504 descs, NULL, vals); 1505 1506 if (num_set != 1) 1507 kfree(descs); 1508 return ret; 1509 } 1510 1511 static long linereq_set_config(struct linereq *lr, void __user *ip) 1512 { 1513 struct gpio_v2_line_config lc; 1514 struct gpio_desc *desc; 1515 struct line *line; 1516 unsigned int i; 1517 u64 flags, edflags; 1518 int ret; 1519 1520 if (copy_from_user(&lc, ip, sizeof(lc))) 1521 return -EFAULT; 1522 1523 ret = gpio_v2_line_config_validate(&lc, lr->num_lines); 1524 if (ret) 1525 return ret; 1526 1527 guard(mutex)(&lr->config_mutex); 1528 1529 for (i = 0; i < lr->num_lines; i++) { 1530 line = &lr->lines[i]; 1531 desc = lr->lines[i].desc; 1532 flags = gpio_v2_line_config_flags(&lc, i); 1533 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); 1534 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; 1535 /* 1536 * Lines have to be requested explicitly for input 1537 * or output, else the line will be treated "as is". 1538 */ 1539 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { 1540 int val = gpio_v2_line_config_output_value(&lc, i); 1541 1542 edge_detector_stop(line); 1543 ret = gpiod_direction_output(desc, val); 1544 if (ret) 1545 return ret; 1546 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) { 1547 ret = gpiod_direction_input(desc); 1548 if (ret) 1549 return ret; 1550 1551 ret = edge_detector_update(line, &lc, i, edflags); 1552 if (ret) 1553 return ret; 1554 } 1555 1556 WRITE_ONCE(line->edflags, edflags); 1557 1558 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG); 1559 } 1560 return 0; 1561 } 1562 1563 static long linereq_ioctl(struct file *file, unsigned int cmd, 1564 unsigned long arg) 1565 { 1566 struct linereq *lr = file->private_data; 1567 void __user *ip = (void __user *)arg; 1568 1569 guard(srcu)(&lr->gdev->srcu); 1570 1571 if (!rcu_access_pointer(lr->gdev->chip)) 1572 return -ENODEV; 1573 1574 switch (cmd) { 1575 case GPIO_V2_LINE_GET_VALUES_IOCTL: 1576 return linereq_get_values(lr, ip); 1577 case GPIO_V2_LINE_SET_VALUES_IOCTL: 1578 return linereq_set_values(lr, ip); 1579 case GPIO_V2_LINE_SET_CONFIG_IOCTL: 1580 return linereq_set_config(lr, ip); 1581 default: 1582 return -EINVAL; 1583 } 1584 } 1585 1586 #ifdef CONFIG_COMPAT 1587 static long linereq_ioctl_compat(struct file *file, unsigned int cmd, 1588 unsigned long arg) 1589 { 1590 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 1591 } 1592 #endif 1593 1594 static __poll_t linereq_poll(struct file *file, 1595 struct poll_table_struct *wait) 1596 { 1597 struct linereq *lr = file->private_data; 1598 __poll_t events = 0; 1599 1600 guard(srcu)(&lr->gdev->srcu); 1601 1602 if (!rcu_access_pointer(lr->gdev->chip)) 1603 return EPOLLHUP | EPOLLERR; 1604 1605 poll_wait(file, &lr->wait, wait); 1606 1607 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events, 1608 &lr->wait.lock)) 1609 events = EPOLLIN | EPOLLRDNORM; 1610 1611 return events; 1612 } 1613 1614 static ssize_t linereq_read(struct file *file, char __user *buf, 1615 size_t count, loff_t *f_ps) 1616 { 1617 struct linereq *lr = file->private_data; 1618 struct gpio_v2_line_event le; 1619 ssize_t bytes_read = 0; 1620 int ret; 1621 1622 guard(srcu)(&lr->gdev->srcu); 1623 1624 if (!rcu_access_pointer(lr->gdev->chip)) 1625 return -ENODEV; 1626 1627 if (count < sizeof(le)) 1628 return -EINVAL; 1629 1630 do { 1631 scoped_guard(spinlock, &lr->wait.lock) { 1632 if (kfifo_is_empty(&lr->events)) { 1633 if (bytes_read) 1634 return bytes_read; 1635 1636 if (file->f_flags & O_NONBLOCK) 1637 return -EAGAIN; 1638 1639 ret = wait_event_interruptible_locked(lr->wait, 1640 !kfifo_is_empty(&lr->events)); 1641 if (ret) 1642 return ret; 1643 } 1644 1645 ret = kfifo_out(&lr->events, &le, 1); 1646 } 1647 if (ret != 1) { 1648 /* 1649 * This should never happen - we were holding the 1650 * lock from the moment we learned the fifo is no 1651 * longer empty until now. 1652 */ 1653 ret = -EIO; 1654 break; 1655 } 1656 1657 if (copy_to_user(buf + bytes_read, &le, sizeof(le))) 1658 return -EFAULT; 1659 bytes_read += sizeof(le); 1660 } while (count >= bytes_read + sizeof(le)); 1661 1662 return bytes_read; 1663 } 1664 1665 static void linereq_free(struct linereq *lr) 1666 { 1667 struct line *line; 1668 unsigned int i; 1669 1670 if (lr->device_unregistered_nb.notifier_call) 1671 blocking_notifier_chain_unregister(&lr->gdev->device_notifier, 1672 &lr->device_unregistered_nb); 1673 1674 for (i = 0; i < lr->num_lines; i++) { 1675 line = &lr->lines[i]; 1676 if (!line->desc) 1677 continue; 1678 1679 edge_detector_stop(line); 1680 if (line_has_supinfo(line)) 1681 supinfo_erase(line); 1682 gpiod_free(line->desc); 1683 } 1684 kfifo_free(&lr->events); 1685 kfree(lr->label); 1686 gpio_device_put(lr->gdev); 1687 kvfree(lr); 1688 } 1689 1690 static int linereq_release(struct inode *inode, struct file *file) 1691 { 1692 struct linereq *lr = file->private_data; 1693 1694 linereq_free(lr); 1695 return 0; 1696 } 1697 1698 #ifdef CONFIG_PROC_FS 1699 static void linereq_show_fdinfo(struct seq_file *out, struct file *file) 1700 { 1701 struct linereq *lr = file->private_data; 1702 struct device *dev = &lr->gdev->dev; 1703 u16 i; 1704 1705 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev)); 1706 1707 for (i = 0; i < lr->num_lines; i++) 1708 seq_printf(out, "gpio-line:\t%d\n", 1709 gpio_chip_hwgpio(lr->lines[i].desc)); 1710 } 1711 #endif 1712 1713 static const struct file_operations line_fileops = { 1714 .release = linereq_release, 1715 .read = linereq_read, 1716 .poll = linereq_poll, 1717 .owner = THIS_MODULE, 1718 .llseek = noop_llseek, 1719 .unlocked_ioctl = linereq_ioctl, 1720 #ifdef CONFIG_COMPAT 1721 .compat_ioctl = linereq_ioctl_compat, 1722 #endif 1723 #ifdef CONFIG_PROC_FS 1724 .show_fdinfo = linereq_show_fdinfo, 1725 #endif 1726 }; 1727 1728 static int linereq_create(struct gpio_device *gdev, void __user *ip) 1729 { 1730 struct gpio_v2_line_request ulr; 1731 struct gpio_v2_line_config *lc; 1732 struct linereq *lr; 1733 struct file *file; 1734 u64 flags, edflags; 1735 unsigned int i; 1736 int fd, ret; 1737 1738 if (copy_from_user(&ulr, ip, sizeof(ulr))) 1739 return -EFAULT; 1740 1741 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX)) 1742 return -EINVAL; 1743 1744 if (memchr_inv(ulr.padding, 0, sizeof(ulr.padding))) 1745 return -EINVAL; 1746 1747 lc = &ulr.config; 1748 ret = gpio_v2_line_config_validate(lc, ulr.num_lines); 1749 if (ret) 1750 return ret; 1751 1752 lr = kvzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL); 1753 if (!lr) 1754 return -ENOMEM; 1755 lr->num_lines = ulr.num_lines; 1756 1757 lr->gdev = gpio_device_get(gdev); 1758 1759 for (i = 0; i < ulr.num_lines; i++) { 1760 lr->lines[i].req = lr; 1761 WRITE_ONCE(lr->lines[i].sw_debounced, 0); 1762 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func); 1763 } 1764 1765 if (ulr.consumer[0] != '\0') { 1766 /* label is only initialized if consumer is set */ 1767 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1, 1768 GFP_KERNEL); 1769 if (!lr->label) { 1770 ret = -ENOMEM; 1771 goto out_free_linereq; 1772 } 1773 } 1774 1775 mutex_init(&lr->config_mutex); 1776 init_waitqueue_head(&lr->wait); 1777 lr->event_buffer_size = ulr.event_buffer_size; 1778 if (lr->event_buffer_size == 0) 1779 lr->event_buffer_size = ulr.num_lines * 16; 1780 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16) 1781 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16; 1782 1783 atomic_set(&lr->seqno, 0); 1784 1785 /* Request each GPIO */ 1786 for (i = 0; i < ulr.num_lines; i++) { 1787 u32 offset = ulr.offsets[i]; 1788 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset); 1789 1790 if (IS_ERR(desc)) { 1791 ret = PTR_ERR(desc); 1792 goto out_free_linereq; 1793 } 1794 1795 ret = gpiod_request_user(desc, lr->label); 1796 if (ret) 1797 goto out_free_linereq; 1798 1799 lr->lines[i].desc = desc; 1800 flags = gpio_v2_line_config_flags(lc, i); 1801 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); 1802 1803 ret = gpiod_set_transitory(desc, false); 1804 if (ret < 0) 1805 goto out_free_linereq; 1806 1807 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; 1808 /* 1809 * Lines have to be requested explicitly for input 1810 * or output, else the line will be treated "as is". 1811 */ 1812 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { 1813 int val = gpio_v2_line_config_output_value(lc, i); 1814 1815 ret = gpiod_direction_output(desc, val); 1816 if (ret) 1817 goto out_free_linereq; 1818 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) { 1819 ret = gpiod_direction_input(desc); 1820 if (ret) 1821 goto out_free_linereq; 1822 1823 ret = edge_detector_setup(&lr->lines[i], lc, i, 1824 edflags); 1825 if (ret) 1826 goto out_free_linereq; 1827 } 1828 1829 lr->lines[i].edflags = edflags; 1830 1831 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); 1832 1833 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", 1834 offset); 1835 } 1836 1837 lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify; 1838 ret = blocking_notifier_chain_register(&gdev->device_notifier, 1839 &lr->device_unregistered_nb); 1840 if (ret) 1841 goto out_free_linereq; 1842 1843 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 1844 if (fd < 0) { 1845 ret = fd; 1846 goto out_free_linereq; 1847 } 1848 1849 file = anon_inode_getfile("gpio-line", &line_fileops, lr, 1850 O_RDONLY | O_CLOEXEC); 1851 if (IS_ERR(file)) { 1852 ret = PTR_ERR(file); 1853 goto out_put_unused_fd; 1854 } 1855 1856 ulr.fd = fd; 1857 if (copy_to_user(ip, &ulr, sizeof(ulr))) { 1858 /* 1859 * fput() will trigger the release() callback, so do not go onto 1860 * the regular error cleanup path here. 1861 */ 1862 fput(file); 1863 put_unused_fd(fd); 1864 return -EFAULT; 1865 } 1866 1867 fd_install(fd, file); 1868 1869 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 1870 lr->num_lines); 1871 1872 return 0; 1873 1874 out_put_unused_fd: 1875 put_unused_fd(fd); 1876 out_free_linereq: 1877 linereq_free(lr); 1878 return ret; 1879 } 1880 1881 #ifdef CONFIG_GPIO_CDEV_V1 1882 1883 /* 1884 * GPIO line event management 1885 */ 1886 1887 /** 1888 * struct lineevent_state - contains the state of a userspace event 1889 * @gdev: the GPIO device the event pertains to 1890 * @label: consumer label used to tag descriptors 1891 * @desc: the GPIO descriptor held by this event 1892 * @eflags: the event flags this line was requested with 1893 * @irq: the interrupt that trigger in response to events on this GPIO 1894 * @wait: wait queue that handles blocking reads of events 1895 * @device_unregistered_nb: notifier block for receiving gdev unregister events 1896 * @events: KFIFO for the GPIO events 1897 * @timestamp: cache for the timestamp storing it between hardirq 1898 * and IRQ thread, used to bring the timestamp close to the actual 1899 * event 1900 */ 1901 struct lineevent_state { 1902 struct gpio_device *gdev; 1903 const char *label; 1904 struct gpio_desc *desc; 1905 u32 eflags; 1906 int irq; 1907 wait_queue_head_t wait; 1908 struct notifier_block device_unregistered_nb; 1909 DECLARE_KFIFO(events, struct gpioevent_data, 16); 1910 u64 timestamp; 1911 }; 1912 1913 #define GPIOEVENT_REQUEST_VALID_FLAGS \ 1914 (GPIOEVENT_REQUEST_RISING_EDGE | \ 1915 GPIOEVENT_REQUEST_FALLING_EDGE) 1916 1917 static __poll_t lineevent_poll(struct file *file, 1918 struct poll_table_struct *wait) 1919 { 1920 struct lineevent_state *le = file->private_data; 1921 __poll_t events = 0; 1922 1923 guard(srcu)(&le->gdev->srcu); 1924 1925 if (!rcu_access_pointer(le->gdev->chip)) 1926 return EPOLLHUP | EPOLLERR; 1927 1928 poll_wait(file, &le->wait, wait); 1929 1930 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock)) 1931 events = EPOLLIN | EPOLLRDNORM; 1932 1933 return events; 1934 } 1935 1936 static int lineevent_unregistered_notify(struct notifier_block *nb, 1937 unsigned long action, void *data) 1938 { 1939 struct lineevent_state *le = container_of(nb, struct lineevent_state, 1940 device_unregistered_nb); 1941 1942 wake_up_poll(&le->wait, EPOLLIN | EPOLLERR); 1943 1944 return NOTIFY_OK; 1945 } 1946 1947 struct compat_gpioeevent_data { 1948 compat_u64 timestamp; 1949 u32 id; 1950 }; 1951 1952 static ssize_t lineevent_read(struct file *file, char __user *buf, 1953 size_t count, loff_t *f_ps) 1954 { 1955 struct lineevent_state *le = file->private_data; 1956 struct gpioevent_data ge; 1957 ssize_t bytes_read = 0; 1958 ssize_t ge_size; 1959 int ret; 1960 1961 guard(srcu)(&le->gdev->srcu); 1962 1963 if (!rcu_access_pointer(le->gdev->chip)) 1964 return -ENODEV; 1965 1966 /* 1967 * When compatible system call is being used the struct gpioevent_data, 1968 * in case of at least ia32, has different size due to the alignment 1969 * differences. Because we have first member 64 bits followed by one of 1970 * 32 bits there is no gap between them. The only difference is the 1971 * padding at the end of the data structure. Hence, we calculate the 1972 * actual sizeof() and pass this as an argument to copy_to_user() to 1973 * drop unneeded bytes from the output. 1974 */ 1975 if (compat_need_64bit_alignment_fixup()) 1976 ge_size = sizeof(struct compat_gpioeevent_data); 1977 else 1978 ge_size = sizeof(struct gpioevent_data); 1979 if (count < ge_size) 1980 return -EINVAL; 1981 1982 do { 1983 scoped_guard(spinlock, &le->wait.lock) { 1984 if (kfifo_is_empty(&le->events)) { 1985 if (bytes_read) 1986 return bytes_read; 1987 1988 if (file->f_flags & O_NONBLOCK) 1989 return -EAGAIN; 1990 1991 ret = wait_event_interruptible_locked(le->wait, 1992 !kfifo_is_empty(&le->events)); 1993 if (ret) 1994 return ret; 1995 } 1996 1997 ret = kfifo_out(&le->events, &ge, 1); 1998 } 1999 if (ret != 1) { 2000 /* 2001 * This should never happen - we were holding the lock 2002 * from the moment we learned the fifo is no longer 2003 * empty until now. 2004 */ 2005 ret = -EIO; 2006 break; 2007 } 2008 2009 if (copy_to_user(buf + bytes_read, &ge, ge_size)) 2010 return -EFAULT; 2011 bytes_read += ge_size; 2012 } while (count >= bytes_read + ge_size); 2013 2014 return bytes_read; 2015 } 2016 2017 static void lineevent_free(struct lineevent_state *le) 2018 { 2019 if (le->device_unregistered_nb.notifier_call) 2020 blocking_notifier_chain_unregister(&le->gdev->device_notifier, 2021 &le->device_unregistered_nb); 2022 if (le->irq) 2023 free_irq_label(free_irq(le->irq, le)); 2024 if (le->desc) 2025 gpiod_free(le->desc); 2026 kfree(le->label); 2027 gpio_device_put(le->gdev); 2028 kfree(le); 2029 } 2030 2031 static int lineevent_release(struct inode *inode, struct file *file) 2032 { 2033 lineevent_free(file->private_data); 2034 return 0; 2035 } 2036 2037 static long lineevent_ioctl(struct file *file, unsigned int cmd, 2038 unsigned long arg) 2039 { 2040 struct lineevent_state *le = file->private_data; 2041 void __user *ip = (void __user *)arg; 2042 struct gpiohandle_data ghd; 2043 2044 guard(srcu)(&le->gdev->srcu); 2045 2046 if (!rcu_access_pointer(le->gdev->chip)) 2047 return -ENODEV; 2048 2049 /* 2050 * We can get the value for an event line but not set it, 2051 * because it is input by definition. 2052 */ 2053 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { 2054 int val; 2055 2056 memset(&ghd, 0, sizeof(ghd)); 2057 2058 val = gpiod_get_value_cansleep(le->desc); 2059 if (val < 0) 2060 return val; 2061 ghd.values[0] = val; 2062 2063 if (copy_to_user(ip, &ghd, sizeof(ghd))) 2064 return -EFAULT; 2065 2066 return 0; 2067 } 2068 return -EINVAL; 2069 } 2070 2071 #ifdef CONFIG_COMPAT 2072 static long lineevent_ioctl_compat(struct file *file, unsigned int cmd, 2073 unsigned long arg) 2074 { 2075 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 2076 } 2077 #endif 2078 2079 static const struct file_operations lineevent_fileops = { 2080 .release = lineevent_release, 2081 .read = lineevent_read, 2082 .poll = lineevent_poll, 2083 .owner = THIS_MODULE, 2084 .llseek = noop_llseek, 2085 .unlocked_ioctl = lineevent_ioctl, 2086 #ifdef CONFIG_COMPAT 2087 .compat_ioctl = lineevent_ioctl_compat, 2088 #endif 2089 }; 2090 2091 static irqreturn_t lineevent_irq_thread(int irq, void *p) 2092 { 2093 struct lineevent_state *le = p; 2094 struct gpioevent_data ge; 2095 int ret; 2096 2097 /* Do not leak kernel stack to userspace */ 2098 memset(&ge, 0, sizeof(ge)); 2099 2100 /* 2101 * We may be running from a nested threaded interrupt in which case 2102 * we didn't get the timestamp from lineevent_irq_handler(). 2103 */ 2104 if (!le->timestamp) 2105 ge.timestamp = ktime_get_ns(); 2106 else 2107 ge.timestamp = le->timestamp; 2108 2109 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE 2110 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 2111 int level = gpiod_get_value_cansleep(le->desc); 2112 2113 if (level) 2114 /* Emit low-to-high event */ 2115 ge.id = GPIOEVENT_EVENT_RISING_EDGE; 2116 else 2117 /* Emit high-to-low event */ 2118 ge.id = GPIOEVENT_EVENT_FALLING_EDGE; 2119 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) { 2120 /* Emit low-to-high event */ 2121 ge.id = GPIOEVENT_EVENT_RISING_EDGE; 2122 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 2123 /* Emit high-to-low event */ 2124 ge.id = GPIOEVENT_EVENT_FALLING_EDGE; 2125 } else { 2126 return IRQ_NONE; 2127 } 2128 2129 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge, 2130 1, &le->wait.lock); 2131 if (ret) 2132 wake_up_poll(&le->wait, EPOLLIN); 2133 else 2134 pr_debug_ratelimited("event FIFO is full - event dropped\n"); 2135 2136 return IRQ_HANDLED; 2137 } 2138 2139 static irqreturn_t lineevent_irq_handler(int irq, void *p) 2140 { 2141 struct lineevent_state *le = p; 2142 2143 /* 2144 * Just store the timestamp in hardirq context so we get it as 2145 * close in time as possible to the actual event. 2146 */ 2147 le->timestamp = ktime_get_ns(); 2148 2149 return IRQ_WAKE_THREAD; 2150 } 2151 2152 static int lineevent_create(struct gpio_device *gdev, void __user *ip) 2153 { 2154 struct gpioevent_request eventreq; 2155 struct lineevent_state *le; 2156 struct gpio_desc *desc; 2157 struct file *file; 2158 u32 offset; 2159 u32 lflags; 2160 u32 eflags; 2161 int fd; 2162 int ret; 2163 int irq, irqflags = 0; 2164 char *label; 2165 2166 if (copy_from_user(&eventreq, ip, sizeof(eventreq))) 2167 return -EFAULT; 2168 2169 offset = eventreq.lineoffset; 2170 lflags = eventreq.handleflags; 2171 eflags = eventreq.eventflags; 2172 2173 desc = gpio_device_get_desc(gdev, offset); 2174 if (IS_ERR(desc)) 2175 return PTR_ERR(desc); 2176 2177 /* Return an error if a unknown flag is set */ 2178 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) || 2179 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) 2180 return -EINVAL; 2181 2182 /* This is just wrong: we don't look for events on output lines */ 2183 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) || 2184 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || 2185 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) 2186 return -EINVAL; 2187 2188 /* Only one bias flag can be set. */ 2189 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) && 2190 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | 2191 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) || 2192 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) && 2193 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP))) 2194 return -EINVAL; 2195 2196 le = kzalloc(sizeof(*le), GFP_KERNEL); 2197 if (!le) 2198 return -ENOMEM; 2199 le->gdev = gpio_device_get(gdev); 2200 2201 if (eventreq.consumer_label[0] != '\0') { 2202 /* label is only initialized if consumer_label is set */ 2203 le->label = kstrndup(eventreq.consumer_label, 2204 sizeof(eventreq.consumer_label) - 1, 2205 GFP_KERNEL); 2206 if (!le->label) { 2207 ret = -ENOMEM; 2208 goto out_free_le; 2209 } 2210 } 2211 2212 ret = gpiod_request_user(desc, le->label); 2213 if (ret) 2214 goto out_free_le; 2215 le->desc = desc; 2216 le->eflags = eflags; 2217 2218 linehandle_flags_to_desc_flags(lflags, &desc->flags); 2219 2220 ret = gpiod_direction_input(desc); 2221 if (ret) 2222 goto out_free_le; 2223 2224 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); 2225 2226 irq = gpiod_to_irq(desc); 2227 if (irq <= 0) { 2228 ret = -ENODEV; 2229 goto out_free_le; 2230 } 2231 2232 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) 2233 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? 2234 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; 2235 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE) 2236 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? 2237 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; 2238 irqflags |= IRQF_ONESHOT; 2239 2240 INIT_KFIFO(le->events); 2241 init_waitqueue_head(&le->wait); 2242 2243 le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify; 2244 ret = blocking_notifier_chain_register(&gdev->device_notifier, 2245 &le->device_unregistered_nb); 2246 if (ret) 2247 goto out_free_le; 2248 2249 label = make_irq_label(le->label); 2250 if (IS_ERR(label)) { 2251 ret = PTR_ERR(label); 2252 goto out_free_le; 2253 } 2254 2255 /* Request a thread to read the events */ 2256 ret = request_threaded_irq(irq, 2257 lineevent_irq_handler, 2258 lineevent_irq_thread, 2259 irqflags, 2260 label, 2261 le); 2262 if (ret) { 2263 free_irq_label(label); 2264 goto out_free_le; 2265 } 2266 2267 le->irq = irq; 2268 2269 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 2270 if (fd < 0) { 2271 ret = fd; 2272 goto out_free_le; 2273 } 2274 2275 file = anon_inode_getfile("gpio-event", 2276 &lineevent_fileops, 2277 le, 2278 O_RDONLY | O_CLOEXEC); 2279 if (IS_ERR(file)) { 2280 ret = PTR_ERR(file); 2281 goto out_put_unused_fd; 2282 } 2283 2284 eventreq.fd = fd; 2285 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) { 2286 /* 2287 * fput() will trigger the release() callback, so do not go onto 2288 * the regular error cleanup path here. 2289 */ 2290 fput(file); 2291 put_unused_fd(fd); 2292 return -EFAULT; 2293 } 2294 2295 fd_install(fd, file); 2296 2297 return 0; 2298 2299 out_put_unused_fd: 2300 put_unused_fd(fd); 2301 out_free_le: 2302 lineevent_free(le); 2303 return ret; 2304 } 2305 2306 static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2, 2307 struct gpioline_info *info_v1) 2308 { 2309 u64 flagsv2 = info_v2->flags; 2310 2311 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name)); 2312 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer)); 2313 info_v1->line_offset = info_v2->offset; 2314 info_v1->flags = 0; 2315 2316 if (flagsv2 & GPIO_V2_LINE_FLAG_USED) 2317 info_v1->flags |= GPIOLINE_FLAG_KERNEL; 2318 2319 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT) 2320 info_v1->flags |= GPIOLINE_FLAG_IS_OUT; 2321 2322 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 2323 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW; 2324 2325 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN) 2326 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN; 2327 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE) 2328 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE; 2329 2330 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP) 2331 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP; 2332 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) 2333 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN; 2334 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED) 2335 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE; 2336 } 2337 2338 static void gpio_v2_line_info_changed_to_v1( 2339 struct gpio_v2_line_info_changed *lic_v2, 2340 struct gpioline_info_changed *lic_v1) 2341 { 2342 memset(lic_v1, 0, sizeof(*lic_v1)); 2343 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info); 2344 lic_v1->timestamp = lic_v2->timestamp_ns; 2345 lic_v1->event_type = lic_v2->event_type; 2346 } 2347 2348 #endif /* CONFIG_GPIO_CDEV_V1 */ 2349 2350 static void gpio_desc_to_lineinfo(struct gpio_desc *desc, 2351 struct gpio_v2_line_info *info) 2352 { 2353 unsigned long dflags; 2354 const char *label; 2355 2356 CLASS(gpio_chip_guard, guard)(desc); 2357 if (!guard.gc) 2358 return; 2359 2360 memset(info, 0, sizeof(*info)); 2361 info->offset = gpio_chip_hwgpio(desc); 2362 2363 if (desc->name) 2364 strscpy(info->name, desc->name, sizeof(info->name)); 2365 2366 dflags = READ_ONCE(desc->flags); 2367 2368 scoped_guard(srcu, &desc->gdev->desc_srcu) { 2369 label = gpiod_get_label(desc); 2370 if (label && test_bit(FLAG_REQUESTED, &dflags)) 2371 strscpy(info->consumer, label, 2372 sizeof(info->consumer)); 2373 } 2374 2375 /* 2376 * Userspace only need know that the kernel is using this GPIO so it 2377 * can't use it. 2378 * The calculation of the used flag is slightly racy, as it may read 2379 * desc, gc and pinctrl state without a lock covering all three at 2380 * once. Worst case if the line is in transition and the calculation 2381 * is inconsistent then it looks to the user like they performed the 2382 * read on the other side of the transition - but that can always 2383 * happen. 2384 * The definitive test that a line is available to userspace is to 2385 * request it. 2386 */ 2387 if (test_bit(FLAG_REQUESTED, &dflags) || 2388 test_bit(FLAG_IS_HOGGED, &dflags) || 2389 test_bit(FLAG_USED_AS_IRQ, &dflags) || 2390 test_bit(FLAG_EXPORT, &dflags) || 2391 test_bit(FLAG_SYSFS, &dflags) || 2392 !gpiochip_line_is_valid(guard.gc, info->offset) || 2393 !pinctrl_gpio_can_use_line(guard.gc, info->offset)) 2394 info->flags |= GPIO_V2_LINE_FLAG_USED; 2395 2396 if (test_bit(FLAG_IS_OUT, &dflags)) 2397 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT; 2398 else 2399 info->flags |= GPIO_V2_LINE_FLAG_INPUT; 2400 2401 if (test_bit(FLAG_ACTIVE_LOW, &dflags)) 2402 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW; 2403 2404 if (test_bit(FLAG_OPEN_DRAIN, &dflags)) 2405 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN; 2406 if (test_bit(FLAG_OPEN_SOURCE, &dflags)) 2407 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE; 2408 2409 if (test_bit(FLAG_BIAS_DISABLE, &dflags)) 2410 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED; 2411 if (test_bit(FLAG_PULL_DOWN, &dflags)) 2412 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN; 2413 if (test_bit(FLAG_PULL_UP, &dflags)) 2414 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP; 2415 2416 if (test_bit(FLAG_EDGE_RISING, &dflags)) 2417 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING; 2418 if (test_bit(FLAG_EDGE_FALLING, &dflags)) 2419 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING; 2420 2421 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &dflags)) 2422 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME; 2423 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags)) 2424 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE; 2425 } 2426 2427 struct gpio_chardev_data { 2428 struct gpio_device *gdev; 2429 wait_queue_head_t wait; 2430 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32); 2431 struct notifier_block lineinfo_changed_nb; 2432 struct notifier_block device_unregistered_nb; 2433 unsigned long *watched_lines; 2434 #ifdef CONFIG_GPIO_CDEV_V1 2435 atomic_t watch_abi_version; 2436 #endif 2437 }; 2438 2439 static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip) 2440 { 2441 struct gpio_device *gdev = cdev->gdev; 2442 struct gpiochip_info chipinfo; 2443 2444 memset(&chipinfo, 0, sizeof(chipinfo)); 2445 2446 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name)); 2447 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label)); 2448 chipinfo.lines = gdev->ngpio; 2449 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo))) 2450 return -EFAULT; 2451 return 0; 2452 } 2453 2454 #ifdef CONFIG_GPIO_CDEV_V1 2455 /* 2456 * returns 0 if the versions match, else the previously selected ABI version 2457 */ 2458 static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata, 2459 unsigned int version) 2460 { 2461 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version); 2462 2463 if (abiv == version) 2464 return 0; 2465 2466 return abiv; 2467 } 2468 2469 static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip, 2470 bool watch) 2471 { 2472 struct gpio_desc *desc; 2473 struct gpioline_info lineinfo; 2474 struct gpio_v2_line_info lineinfo_v2; 2475 2476 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) 2477 return -EFAULT; 2478 2479 /* this doubles as a range check on line_offset */ 2480 desc = gpio_device_get_desc(cdev->gdev, lineinfo.line_offset); 2481 if (IS_ERR(desc)) 2482 return PTR_ERR(desc); 2483 2484 if (watch) { 2485 if (lineinfo_ensure_abi_version(cdev, 1)) 2486 return -EPERM; 2487 2488 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines)) 2489 return -EBUSY; 2490 } 2491 2492 gpio_desc_to_lineinfo(desc, &lineinfo_v2); 2493 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo); 2494 2495 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) { 2496 if (watch) 2497 clear_bit(lineinfo.line_offset, cdev->watched_lines); 2498 return -EFAULT; 2499 } 2500 2501 return 0; 2502 } 2503 #endif 2504 2505 static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip, 2506 bool watch) 2507 { 2508 struct gpio_desc *desc; 2509 struct gpio_v2_line_info lineinfo; 2510 2511 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) 2512 return -EFAULT; 2513 2514 if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding))) 2515 return -EINVAL; 2516 2517 desc = gpio_device_get_desc(cdev->gdev, lineinfo.offset); 2518 if (IS_ERR(desc)) 2519 return PTR_ERR(desc); 2520 2521 if (watch) { 2522 #ifdef CONFIG_GPIO_CDEV_V1 2523 if (lineinfo_ensure_abi_version(cdev, 2)) 2524 return -EPERM; 2525 #endif 2526 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines)) 2527 return -EBUSY; 2528 } 2529 gpio_desc_to_lineinfo(desc, &lineinfo); 2530 supinfo_to_lineinfo(desc, &lineinfo); 2531 2532 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) { 2533 if (watch) 2534 clear_bit(lineinfo.offset, cdev->watched_lines); 2535 return -EFAULT; 2536 } 2537 2538 return 0; 2539 } 2540 2541 static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip) 2542 { 2543 __u32 offset; 2544 2545 if (copy_from_user(&offset, ip, sizeof(offset))) 2546 return -EFAULT; 2547 2548 if (offset >= cdev->gdev->ngpio) 2549 return -EINVAL; 2550 2551 if (!test_and_clear_bit(offset, cdev->watched_lines)) 2552 return -EBUSY; 2553 2554 return 0; 2555 } 2556 2557 /* 2558 * gpio_ioctl() - ioctl handler for the GPIO chardev 2559 */ 2560 static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2561 { 2562 struct gpio_chardev_data *cdev = file->private_data; 2563 struct gpio_device *gdev = cdev->gdev; 2564 void __user *ip = (void __user *)arg; 2565 2566 guard(srcu)(&gdev->srcu); 2567 2568 /* We fail any subsequent ioctl():s when the chip is gone */ 2569 if (!rcu_access_pointer(gdev->chip)) 2570 return -ENODEV; 2571 2572 /* Fill in the struct and pass to userspace */ 2573 switch (cmd) { 2574 case GPIO_GET_CHIPINFO_IOCTL: 2575 return chipinfo_get(cdev, ip); 2576 #ifdef CONFIG_GPIO_CDEV_V1 2577 case GPIO_GET_LINEHANDLE_IOCTL: 2578 return linehandle_create(gdev, ip); 2579 case GPIO_GET_LINEEVENT_IOCTL: 2580 return lineevent_create(gdev, ip); 2581 case GPIO_GET_LINEINFO_IOCTL: 2582 return lineinfo_get_v1(cdev, ip, false); 2583 case GPIO_GET_LINEINFO_WATCH_IOCTL: 2584 return lineinfo_get_v1(cdev, ip, true); 2585 #endif /* CONFIG_GPIO_CDEV_V1 */ 2586 case GPIO_V2_GET_LINEINFO_IOCTL: 2587 return lineinfo_get(cdev, ip, false); 2588 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL: 2589 return lineinfo_get(cdev, ip, true); 2590 case GPIO_V2_GET_LINE_IOCTL: 2591 return linereq_create(gdev, ip); 2592 case GPIO_GET_LINEINFO_UNWATCH_IOCTL: 2593 return lineinfo_unwatch(cdev, ip); 2594 default: 2595 return -EINVAL; 2596 } 2597 } 2598 2599 #ifdef CONFIG_COMPAT 2600 static long gpio_ioctl_compat(struct file *file, unsigned int cmd, 2601 unsigned long arg) 2602 { 2603 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 2604 } 2605 #endif 2606 2607 static int lineinfo_changed_notify(struct notifier_block *nb, 2608 unsigned long action, void *data) 2609 { 2610 struct gpio_chardev_data *cdev = 2611 container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb); 2612 struct gpio_v2_line_info_changed chg; 2613 struct gpio_desc *desc = data; 2614 int ret; 2615 2616 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines)) 2617 return NOTIFY_DONE; 2618 2619 memset(&chg, 0, sizeof(chg)); 2620 chg.event_type = action; 2621 chg.timestamp_ns = ktime_get_ns(); 2622 gpio_desc_to_lineinfo(desc, &chg.info); 2623 supinfo_to_lineinfo(desc, &chg.info); 2624 2625 ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock); 2626 if (ret) 2627 wake_up_poll(&cdev->wait, EPOLLIN); 2628 else 2629 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n"); 2630 2631 return NOTIFY_OK; 2632 } 2633 2634 static int gpio_device_unregistered_notify(struct notifier_block *nb, 2635 unsigned long action, void *data) 2636 { 2637 struct gpio_chardev_data *cdev = container_of(nb, 2638 struct gpio_chardev_data, 2639 device_unregistered_nb); 2640 2641 wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR); 2642 2643 return NOTIFY_OK; 2644 } 2645 2646 static __poll_t lineinfo_watch_poll(struct file *file, 2647 struct poll_table_struct *pollt) 2648 { 2649 struct gpio_chardev_data *cdev = file->private_data; 2650 __poll_t events = 0; 2651 2652 guard(srcu)(&cdev->gdev->srcu); 2653 2654 if (!rcu_access_pointer(cdev->gdev->chip)) 2655 return EPOLLHUP | EPOLLERR; 2656 2657 poll_wait(file, &cdev->wait, pollt); 2658 2659 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events, 2660 &cdev->wait.lock)) 2661 events = EPOLLIN | EPOLLRDNORM; 2662 2663 return events; 2664 } 2665 2666 static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, 2667 size_t count, loff_t *off) 2668 { 2669 struct gpio_chardev_data *cdev = file->private_data; 2670 struct gpio_v2_line_info_changed event; 2671 ssize_t bytes_read = 0; 2672 int ret; 2673 size_t event_size; 2674 2675 guard(srcu)(&cdev->gdev->srcu); 2676 2677 if (!rcu_access_pointer(cdev->gdev->chip)) 2678 return -ENODEV; 2679 2680 #ifndef CONFIG_GPIO_CDEV_V1 2681 event_size = sizeof(struct gpio_v2_line_info_changed); 2682 if (count < event_size) 2683 return -EINVAL; 2684 #endif 2685 2686 do { 2687 scoped_guard(spinlock, &cdev->wait.lock) { 2688 if (kfifo_is_empty(&cdev->events)) { 2689 if (bytes_read) 2690 return bytes_read; 2691 2692 if (file->f_flags & O_NONBLOCK) 2693 return -EAGAIN; 2694 2695 ret = wait_event_interruptible_locked(cdev->wait, 2696 !kfifo_is_empty(&cdev->events)); 2697 if (ret) 2698 return ret; 2699 } 2700 #ifdef CONFIG_GPIO_CDEV_V1 2701 /* must be after kfifo check so watch_abi_version is set */ 2702 if (atomic_read(&cdev->watch_abi_version) == 2) 2703 event_size = sizeof(struct gpio_v2_line_info_changed); 2704 else 2705 event_size = sizeof(struct gpioline_info_changed); 2706 if (count < event_size) 2707 return -EINVAL; 2708 #endif 2709 ret = kfifo_out(&cdev->events, &event, 1); 2710 } 2711 if (ret != 1) { 2712 ret = -EIO; 2713 break; 2714 /* We should never get here. See lineevent_read(). */ 2715 } 2716 2717 #ifdef CONFIG_GPIO_CDEV_V1 2718 if (event_size == sizeof(struct gpio_v2_line_info_changed)) { 2719 if (copy_to_user(buf + bytes_read, &event, event_size)) 2720 return -EFAULT; 2721 } else { 2722 struct gpioline_info_changed event_v1; 2723 2724 gpio_v2_line_info_changed_to_v1(&event, &event_v1); 2725 if (copy_to_user(buf + bytes_read, &event_v1, 2726 event_size)) 2727 return -EFAULT; 2728 } 2729 #else 2730 if (copy_to_user(buf + bytes_read, &event, event_size)) 2731 return -EFAULT; 2732 #endif 2733 bytes_read += event_size; 2734 } while (count >= bytes_read + sizeof(event)); 2735 2736 return bytes_read; 2737 } 2738 2739 /** 2740 * gpio_chrdev_open() - open the chardev for ioctl operations 2741 * @inode: inode for this chardev 2742 * @file: file struct for storing private data 2743 * Returns 0 on success 2744 */ 2745 static int gpio_chrdev_open(struct inode *inode, struct file *file) 2746 { 2747 struct gpio_device *gdev = container_of(inode->i_cdev, 2748 struct gpio_device, chrdev); 2749 struct gpio_chardev_data *cdev; 2750 int ret = -ENOMEM; 2751 2752 guard(srcu)(&gdev->srcu); 2753 2754 /* Fail on open if the backing gpiochip is gone */ 2755 if (!rcu_access_pointer(gdev->chip)) 2756 return -ENODEV; 2757 2758 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 2759 if (!cdev) 2760 return -ENODEV; 2761 2762 cdev->watched_lines = bitmap_zalloc(gdev->ngpio, GFP_KERNEL); 2763 if (!cdev->watched_lines) 2764 goto out_free_cdev; 2765 2766 init_waitqueue_head(&cdev->wait); 2767 INIT_KFIFO(cdev->events); 2768 cdev->gdev = gpio_device_get(gdev); 2769 2770 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify; 2771 ret = blocking_notifier_chain_register(&gdev->line_state_notifier, 2772 &cdev->lineinfo_changed_nb); 2773 if (ret) 2774 goto out_free_bitmap; 2775 2776 cdev->device_unregistered_nb.notifier_call = 2777 gpio_device_unregistered_notify; 2778 ret = blocking_notifier_chain_register(&gdev->device_notifier, 2779 &cdev->device_unregistered_nb); 2780 if (ret) 2781 goto out_unregister_line_notifier; 2782 2783 file->private_data = cdev; 2784 2785 ret = nonseekable_open(inode, file); 2786 if (ret) 2787 goto out_unregister_device_notifier; 2788 2789 return ret; 2790 2791 out_unregister_device_notifier: 2792 blocking_notifier_chain_unregister(&gdev->device_notifier, 2793 &cdev->device_unregistered_nb); 2794 out_unregister_line_notifier: 2795 blocking_notifier_chain_unregister(&gdev->line_state_notifier, 2796 &cdev->lineinfo_changed_nb); 2797 out_free_bitmap: 2798 gpio_device_put(gdev); 2799 bitmap_free(cdev->watched_lines); 2800 out_free_cdev: 2801 kfree(cdev); 2802 return ret; 2803 } 2804 2805 /** 2806 * gpio_chrdev_release() - close chardev after ioctl operations 2807 * @inode: inode for this chardev 2808 * @file: file struct for storing private data 2809 * Returns 0 on success 2810 */ 2811 static int gpio_chrdev_release(struct inode *inode, struct file *file) 2812 { 2813 struct gpio_chardev_data *cdev = file->private_data; 2814 struct gpio_device *gdev = cdev->gdev; 2815 2816 blocking_notifier_chain_unregister(&gdev->device_notifier, 2817 &cdev->device_unregistered_nb); 2818 blocking_notifier_chain_unregister(&gdev->line_state_notifier, 2819 &cdev->lineinfo_changed_nb); 2820 bitmap_free(cdev->watched_lines); 2821 gpio_device_put(gdev); 2822 kfree(cdev); 2823 2824 return 0; 2825 } 2826 2827 static const struct file_operations gpio_fileops = { 2828 .release = gpio_chrdev_release, 2829 .open = gpio_chrdev_open, 2830 .poll = lineinfo_watch_poll, 2831 .read = lineinfo_watch_read, 2832 .owner = THIS_MODULE, 2833 .llseek = no_llseek, 2834 .unlocked_ioctl = gpio_ioctl, 2835 #ifdef CONFIG_COMPAT 2836 .compat_ioctl = gpio_ioctl_compat, 2837 #endif 2838 }; 2839 2840 int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt) 2841 { 2842 struct gpio_chip *gc; 2843 int ret; 2844 2845 cdev_init(&gdev->chrdev, &gpio_fileops); 2846 gdev->chrdev.owner = THIS_MODULE; 2847 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id); 2848 2849 ret = cdev_device_add(&gdev->chrdev, &gdev->dev); 2850 if (ret) 2851 return ret; 2852 2853 guard(srcu)(&gdev->srcu); 2854 gc = srcu_dereference(gdev->chip, &gdev->srcu); 2855 if (!gc) 2856 return -ENODEV; 2857 2858 chip_dbg(gc, "added GPIO chardev (%d:%d)\n", MAJOR(devt), gdev->id); 2859 2860 return 0; 2861 } 2862 2863 void gpiolib_cdev_unregister(struct gpio_device *gdev) 2864 { 2865 cdev_device_del(&gdev->chrdev, &gdev->dev); 2866 blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL); 2867 } 2868