1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/anon_inodes.h> 4 #include <linux/atomic.h> 5 #include <linux/bitmap.h> 6 #include <linux/build_bug.h> 7 #include <linux/cdev.h> 8 #include <linux/cleanup.h> 9 #include <linux/compat.h> 10 #include <linux/compiler.h> 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/file.h> 14 #include <linux/gpio.h> 15 #include <linux/gpio/driver.h> 16 #include <linux/hte.h> 17 #include <linux/interrupt.h> 18 #include <linux/irqreturn.h> 19 #include <linux/kernel.h> 20 #include <linux/kfifo.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/overflow.h> 24 #include <linux/pinctrl/consumer.h> 25 #include <linux/poll.h> 26 #include <linux/rbtree.h> 27 #include <linux/seq_file.h> 28 #include <linux/spinlock.h> 29 #include <linux/timekeeping.h> 30 #include <linux/uaccess.h> 31 #include <linux/workqueue.h> 32 33 #include <uapi/linux/gpio.h> 34 35 #include "gpiolib.h" 36 #include "gpiolib-cdev.h" 37 38 /* 39 * Array sizes must ensure 64-bit alignment and not create holes in the 40 * struct packing. 41 */ 42 static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2)); 43 static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8)); 44 45 /* 46 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility 47 */ 48 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8)); 49 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8)); 50 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8)); 51 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8)); 52 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8)); 53 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8)); 54 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8)); 55 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8)); 56 57 /* Character device interface to GPIO. 58 * 59 * The GPIO character device, /dev/gpiochipN, provides userspace an 60 * interface to gpiolib GPIOs via ioctl()s. 61 */ 62 63 /* 64 * GPIO line handle management 65 */ 66 67 #ifdef CONFIG_GPIO_CDEV_V1 68 /** 69 * struct linehandle_state - contains the state of a userspace handle 70 * @gdev: the GPIO device the handle pertains to 71 * @label: consumer label used to tag descriptors 72 * @descs: the GPIO descriptors held by this handle 73 * @num_descs: the number of descriptors held in the descs array 74 */ 75 struct linehandle_state { 76 struct gpio_device *gdev; 77 const char *label; 78 struct gpio_desc *descs[GPIOHANDLES_MAX]; 79 u32 num_descs; 80 }; 81 82 #define GPIOHANDLE_REQUEST_VALID_FLAGS \ 83 (GPIOHANDLE_REQUEST_INPUT | \ 84 GPIOHANDLE_REQUEST_OUTPUT | \ 85 GPIOHANDLE_REQUEST_ACTIVE_LOW | \ 86 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \ 87 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \ 88 GPIOHANDLE_REQUEST_BIAS_DISABLE | \ 89 GPIOHANDLE_REQUEST_OPEN_DRAIN | \ 90 GPIOHANDLE_REQUEST_OPEN_SOURCE) 91 92 #define GPIOHANDLE_REQUEST_DIRECTION_FLAGS \ 93 (GPIOHANDLE_REQUEST_INPUT | \ 94 GPIOHANDLE_REQUEST_OUTPUT) 95 96 static int linehandle_validate_flags(u32 flags) 97 { 98 /* Return an error if an unknown flag is set */ 99 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) 100 return -EINVAL; 101 102 /* 103 * Do not allow both INPUT & OUTPUT flags to be set as they are 104 * contradictory. 105 */ 106 if ((flags & GPIOHANDLE_REQUEST_INPUT) && 107 (flags & GPIOHANDLE_REQUEST_OUTPUT)) 108 return -EINVAL; 109 110 /* 111 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If 112 * the hardware actually supports enabling both at the same time the 113 * electrical result would be disastrous. 114 */ 115 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) && 116 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) 117 return -EINVAL; 118 119 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */ 120 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) && 121 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || 122 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))) 123 return -EINVAL; 124 125 /* Bias flags only allowed for input or output mode. */ 126 if (!((flags & GPIOHANDLE_REQUEST_INPUT) || 127 (flags & GPIOHANDLE_REQUEST_OUTPUT)) && 128 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) || 129 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) || 130 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN))) 131 return -EINVAL; 132 133 /* Only one bias flag can be set. */ 134 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) && 135 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | 136 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) || 137 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) && 138 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP))) 139 return -EINVAL; 140 141 return 0; 142 } 143 144 static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp) 145 { 146 assign_bit(FLAG_ACTIVE_LOW, flagsp, 147 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW); 148 assign_bit(FLAG_OPEN_DRAIN, flagsp, 149 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN); 150 assign_bit(FLAG_OPEN_SOURCE, flagsp, 151 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE); 152 assign_bit(FLAG_PULL_UP, flagsp, 153 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP); 154 assign_bit(FLAG_PULL_DOWN, flagsp, 155 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN); 156 assign_bit(FLAG_BIAS_DISABLE, flagsp, 157 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE); 158 } 159 160 static long linehandle_set_config(struct linehandle_state *lh, 161 void __user *ip) 162 { 163 struct gpiohandle_config gcnf; 164 struct gpio_desc *desc; 165 int i, ret; 166 u32 lflags; 167 168 if (copy_from_user(&gcnf, ip, sizeof(gcnf))) 169 return -EFAULT; 170 171 lflags = gcnf.flags; 172 ret = linehandle_validate_flags(lflags); 173 if (ret) 174 return ret; 175 176 /* Lines must be reconfigured explicitly as input or output. */ 177 if (!(lflags & GPIOHANDLE_REQUEST_DIRECTION_FLAGS)) 178 return -EINVAL; 179 180 for (i = 0; i < lh->num_descs; i++) { 181 desc = lh->descs[i]; 182 linehandle_flags_to_desc_flags(lflags, &desc->flags); 183 184 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 185 int val = !!gcnf.default_values[i]; 186 187 ret = gpiod_direction_output(desc, val); 188 if (ret) 189 return ret; 190 } else { 191 ret = gpiod_direction_input(desc); 192 if (ret) 193 return ret; 194 } 195 196 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG); 197 } 198 return 0; 199 } 200 201 static long linehandle_ioctl(struct file *file, unsigned int cmd, 202 unsigned long arg) 203 { 204 struct linehandle_state *lh = file->private_data; 205 void __user *ip = (void __user *)arg; 206 struct gpiohandle_data ghd; 207 DECLARE_BITMAP(vals, GPIOHANDLES_MAX); 208 unsigned int i; 209 int ret; 210 211 guard(srcu)(&lh->gdev->srcu); 212 213 if (!rcu_access_pointer(lh->gdev->chip)) 214 return -ENODEV; 215 216 switch (cmd) { 217 case GPIOHANDLE_GET_LINE_VALUES_IOCTL: 218 /* NOTE: It's okay to read values of output lines */ 219 ret = gpiod_get_array_value_complex(false, true, 220 lh->num_descs, lh->descs, 221 NULL, vals); 222 if (ret) 223 return ret; 224 225 memset(&ghd, 0, sizeof(ghd)); 226 for (i = 0; i < lh->num_descs; i++) 227 ghd.values[i] = test_bit(i, vals); 228 229 if (copy_to_user(ip, &ghd, sizeof(ghd))) 230 return -EFAULT; 231 232 return 0; 233 case GPIOHANDLE_SET_LINE_VALUES_IOCTL: 234 /* 235 * All line descriptors were created at once with the same 236 * flags so just check if the first one is really output. 237 */ 238 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags)) 239 return -EPERM; 240 241 if (copy_from_user(&ghd, ip, sizeof(ghd))) 242 return -EFAULT; 243 244 /* Clamp all values to [0,1] */ 245 for (i = 0; i < lh->num_descs; i++) 246 __assign_bit(i, vals, ghd.values[i]); 247 248 /* Reuse the array setting function */ 249 return gpiod_set_array_value_complex(false, 250 true, 251 lh->num_descs, 252 lh->descs, 253 NULL, 254 vals); 255 case GPIOHANDLE_SET_CONFIG_IOCTL: 256 return linehandle_set_config(lh, ip); 257 default: 258 return -EINVAL; 259 } 260 } 261 262 #ifdef CONFIG_COMPAT 263 static long linehandle_ioctl_compat(struct file *file, unsigned int cmd, 264 unsigned long arg) 265 { 266 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 267 } 268 #endif 269 270 static void linehandle_free(struct linehandle_state *lh) 271 { 272 int i; 273 274 for (i = 0; i < lh->num_descs; i++) 275 if (lh->descs[i]) 276 gpiod_free(lh->descs[i]); 277 kfree(lh->label); 278 gpio_device_put(lh->gdev); 279 kfree(lh); 280 } 281 282 static int linehandle_release(struct inode *inode, struct file *file) 283 { 284 linehandle_free(file->private_data); 285 return 0; 286 } 287 288 static const struct file_operations linehandle_fileops = { 289 .release = linehandle_release, 290 .owner = THIS_MODULE, 291 .llseek = noop_llseek, 292 .unlocked_ioctl = linehandle_ioctl, 293 #ifdef CONFIG_COMPAT 294 .compat_ioctl = linehandle_ioctl_compat, 295 #endif 296 }; 297 298 static int linehandle_create(struct gpio_device *gdev, void __user *ip) 299 { 300 struct gpiohandle_request handlereq; 301 struct linehandle_state *lh; 302 struct file *file; 303 int fd, i, ret; 304 u32 lflags; 305 306 if (copy_from_user(&handlereq, ip, sizeof(handlereq))) 307 return -EFAULT; 308 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX)) 309 return -EINVAL; 310 311 lflags = handlereq.flags; 312 313 ret = linehandle_validate_flags(lflags); 314 if (ret) 315 return ret; 316 317 lh = kzalloc(sizeof(*lh), GFP_KERNEL); 318 if (!lh) 319 return -ENOMEM; 320 lh->gdev = gpio_device_get(gdev); 321 322 if (handlereq.consumer_label[0] != '\0') { 323 /* label is only initialized if consumer_label is set */ 324 lh->label = kstrndup(handlereq.consumer_label, 325 sizeof(handlereq.consumer_label) - 1, 326 GFP_KERNEL); 327 if (!lh->label) { 328 ret = -ENOMEM; 329 goto out_free_lh; 330 } 331 } 332 333 lh->num_descs = handlereq.lines; 334 335 /* Request each GPIO */ 336 for (i = 0; i < handlereq.lines; i++) { 337 u32 offset = handlereq.lineoffsets[i]; 338 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset); 339 340 if (IS_ERR(desc)) { 341 ret = PTR_ERR(desc); 342 goto out_free_lh; 343 } 344 345 ret = gpiod_request_user(desc, lh->label); 346 if (ret) 347 goto out_free_lh; 348 lh->descs[i] = desc; 349 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags); 350 351 ret = gpiod_set_transitory(desc, false); 352 if (ret < 0) 353 goto out_free_lh; 354 355 /* 356 * Lines have to be requested explicitly for input 357 * or output, else the line will be treated "as is". 358 */ 359 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 360 int val = !!handlereq.default_values[i]; 361 362 ret = gpiod_direction_output(desc, val); 363 if (ret) 364 goto out_free_lh; 365 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) { 366 ret = gpiod_direction_input(desc); 367 if (ret) 368 goto out_free_lh; 369 } 370 371 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); 372 373 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", 374 offset); 375 } 376 377 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 378 if (fd < 0) { 379 ret = fd; 380 goto out_free_lh; 381 } 382 383 file = anon_inode_getfile("gpio-linehandle", 384 &linehandle_fileops, 385 lh, 386 O_RDONLY | O_CLOEXEC); 387 if (IS_ERR(file)) { 388 ret = PTR_ERR(file); 389 goto out_put_unused_fd; 390 } 391 392 handlereq.fd = fd; 393 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) { 394 /* 395 * fput() will trigger the release() callback, so do not go onto 396 * the regular error cleanup path here. 397 */ 398 fput(file); 399 put_unused_fd(fd); 400 return -EFAULT; 401 } 402 403 fd_install(fd, file); 404 405 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 406 lh->num_descs); 407 408 return 0; 409 410 out_put_unused_fd: 411 put_unused_fd(fd); 412 out_free_lh: 413 linehandle_free(lh); 414 return ret; 415 } 416 #endif /* CONFIG_GPIO_CDEV_V1 */ 417 418 /** 419 * struct line - contains the state of a requested line 420 * @node: to store the object in supinfo_tree if supplemental 421 * @desc: the GPIO descriptor for this line. 422 * @req: the corresponding line request 423 * @irq: the interrupt triggered in response to events on this GPIO 424 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or 425 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied 426 * @timestamp_ns: cache for the timestamp storing it between hardirq and 427 * IRQ thread, used to bring the timestamp close to the actual event 428 * @req_seqno: the seqno for the current edge event in the sequence of 429 * events for the corresponding line request. This is drawn from the @req. 430 * @line_seqno: the seqno for the current edge event in the sequence of 431 * events for this line. 432 * @work: the worker that implements software debouncing 433 * @debounce_period_us: the debounce period in microseconds 434 * @sw_debounced: flag indicating if the software debouncer is active 435 * @level: the current debounced physical level of the line 436 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor 437 * @raw_level: the line level at the time of event 438 * @total_discard_seq: the running counter of the discarded events 439 * @last_seqno: the last sequence number before debounce period expires 440 */ 441 struct line { 442 struct rb_node node; 443 struct gpio_desc *desc; 444 /* 445 * -- edge detector specific fields -- 446 */ 447 struct linereq *req; 448 unsigned int irq; 449 /* 450 * The flags for the active edge detector configuration. 451 * 452 * edflags is set by linereq_create(), linereq_free(), and 453 * linereq_set_config_unlocked(), which are themselves mutually 454 * exclusive, and is accessed by edge_irq_thread(), 455 * process_hw_ts_thread() and debounce_work_func(), 456 * which can all live with a slightly stale value. 457 */ 458 u64 edflags; 459 /* 460 * timestamp_ns and req_seqno are accessed only by 461 * edge_irq_handler() and edge_irq_thread(), which are themselves 462 * mutually exclusive, so no additional protection is necessary. 463 */ 464 u64 timestamp_ns; 465 u32 req_seqno; 466 /* 467 * line_seqno is accessed by either edge_irq_thread() or 468 * debounce_work_func(), which are themselves mutually exclusive, 469 * so no additional protection is necessary. 470 */ 471 u32 line_seqno; 472 /* 473 * -- debouncer specific fields -- 474 */ 475 struct delayed_work work; 476 /* 477 * debounce_period_us is accessed by debounce_irq_handler() and 478 * process_hw_ts() which are disabled when modified by 479 * debounce_setup(), edge_detector_setup() or edge_detector_stop() 480 * or can live with a stale version when updated by 481 * edge_detector_update(). 482 * The modifying functions are themselves mutually exclusive. 483 */ 484 unsigned int debounce_period_us; 485 /* 486 * sw_debounce is accessed by linereq_set_config(), which is the 487 * only setter, and linereq_get_values(), which can live with a 488 * slightly stale value. 489 */ 490 unsigned int sw_debounced; 491 /* 492 * level is accessed by debounce_work_func(), which is the only 493 * setter, and linereq_get_values() which can live with a slightly 494 * stale value. 495 */ 496 unsigned int level; 497 #ifdef CONFIG_HTE 498 struct hte_ts_desc hdesc; 499 /* 500 * HTE provider sets line level at the time of event. The valid 501 * value is 0 or 1 and negative value for an error. 502 */ 503 int raw_level; 504 /* 505 * when sw_debounce is set on HTE enabled line, this is running 506 * counter of the discarded events. 507 */ 508 u32 total_discard_seq; 509 /* 510 * when sw_debounce is set on HTE enabled line, this variable records 511 * last sequence number before debounce period expires. 512 */ 513 u32 last_seqno; 514 #endif /* CONFIG_HTE */ 515 }; 516 517 /* 518 * a rbtree of the struct lines containing supplemental info. 519 * Used to populate gpio_v2_line_info with cdev specific fields not contained 520 * in the struct gpio_desc. 521 * A line is determined to contain supplemental information by 522 * line_has_supinfo(). 523 */ 524 static struct rb_root supinfo_tree = RB_ROOT; 525 /* covers supinfo_tree */ 526 static DEFINE_SPINLOCK(supinfo_lock); 527 528 /** 529 * struct linereq - contains the state of a userspace line request 530 * @gdev: the GPIO device the line request pertains to 531 * @label: consumer label used to tag GPIO descriptors 532 * @num_lines: the number of lines in the lines array 533 * @wait: wait queue that handles blocking reads of events 534 * @device_unregistered_nb: notifier block for receiving gdev unregister events 535 * @event_buffer_size: the number of elements allocated in @events 536 * @events: KFIFO for the GPIO events 537 * @seqno: the sequence number for edge events generated on all lines in 538 * this line request. Note that this is not used when @num_lines is 1, as 539 * the line_seqno is then the same and is cheaper to calculate. 540 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency 541 * of configuration, particularly multi-step accesses to desc flags and 542 * changes to supinfo status. 543 * @lines: the lines held by this line request, with @num_lines elements. 544 */ 545 struct linereq { 546 struct gpio_device *gdev; 547 const char *label; 548 u32 num_lines; 549 wait_queue_head_t wait; 550 struct notifier_block device_unregistered_nb; 551 u32 event_buffer_size; 552 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event); 553 atomic_t seqno; 554 struct mutex config_mutex; 555 struct line lines[] __counted_by(num_lines); 556 }; 557 558 static void supinfo_insert(struct line *line) 559 { 560 struct rb_node **new = &(supinfo_tree.rb_node), *parent = NULL; 561 struct line *entry; 562 563 guard(spinlock)(&supinfo_lock); 564 565 while (*new) { 566 entry = container_of(*new, struct line, node); 567 568 parent = *new; 569 if (line->desc < entry->desc) { 570 new = &((*new)->rb_left); 571 } else if (line->desc > entry->desc) { 572 new = &((*new)->rb_right); 573 } else { 574 /* this should never happen */ 575 WARN(1, "duplicate line inserted"); 576 return; 577 } 578 } 579 580 rb_link_node(&line->node, parent, new); 581 rb_insert_color(&line->node, &supinfo_tree); 582 } 583 584 static void supinfo_erase(struct line *line) 585 { 586 guard(spinlock)(&supinfo_lock); 587 588 rb_erase(&line->node, &supinfo_tree); 589 } 590 591 static struct line *supinfo_find(struct gpio_desc *desc) 592 { 593 struct rb_node *node = supinfo_tree.rb_node; 594 struct line *line; 595 596 while (node) { 597 line = container_of(node, struct line, node); 598 if (desc < line->desc) 599 node = node->rb_left; 600 else if (desc > line->desc) 601 node = node->rb_right; 602 else 603 return line; 604 } 605 return NULL; 606 } 607 608 static void supinfo_to_lineinfo(struct gpio_desc *desc, 609 struct gpio_v2_line_info *info) 610 { 611 struct gpio_v2_line_attribute *attr; 612 struct line *line; 613 614 guard(spinlock)(&supinfo_lock); 615 616 line = supinfo_find(desc); 617 if (!line) 618 return; 619 620 attr = &info->attrs[info->num_attrs]; 621 attr->id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE; 622 attr->debounce_period_us = READ_ONCE(line->debounce_period_us); 623 info->num_attrs++; 624 } 625 626 static inline bool line_has_supinfo(struct line *line) 627 { 628 return READ_ONCE(line->debounce_period_us); 629 } 630 631 /* 632 * Checks line_has_supinfo() before and after the change to avoid unnecessary 633 * supinfo_tree access. 634 * Called indirectly by linereq_create() or linereq_set_config() so line 635 * is already protected from concurrent changes. 636 */ 637 static void line_set_debounce_period(struct line *line, 638 unsigned int debounce_period_us) 639 { 640 bool was_suppl = line_has_supinfo(line); 641 642 WRITE_ONCE(line->debounce_period_us, debounce_period_us); 643 644 /* if supinfo status is unchanged then we're done */ 645 if (line_has_supinfo(line) == was_suppl) 646 return; 647 648 /* supinfo status has changed, so update the tree */ 649 if (was_suppl) 650 supinfo_erase(line); 651 else 652 supinfo_insert(line); 653 } 654 655 #define GPIO_V2_LINE_BIAS_FLAGS \ 656 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \ 657 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \ 658 GPIO_V2_LINE_FLAG_BIAS_DISABLED) 659 660 #define GPIO_V2_LINE_DIRECTION_FLAGS \ 661 (GPIO_V2_LINE_FLAG_INPUT | \ 662 GPIO_V2_LINE_FLAG_OUTPUT) 663 664 #define GPIO_V2_LINE_DRIVE_FLAGS \ 665 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \ 666 GPIO_V2_LINE_FLAG_OPEN_SOURCE) 667 668 #define GPIO_V2_LINE_EDGE_FLAGS \ 669 (GPIO_V2_LINE_FLAG_EDGE_RISING | \ 670 GPIO_V2_LINE_FLAG_EDGE_FALLING) 671 672 #define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS 673 674 #define GPIO_V2_LINE_VALID_FLAGS \ 675 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \ 676 GPIO_V2_LINE_DIRECTION_FLAGS | \ 677 GPIO_V2_LINE_DRIVE_FLAGS | \ 678 GPIO_V2_LINE_EDGE_FLAGS | \ 679 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \ 680 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \ 681 GPIO_V2_LINE_BIAS_FLAGS) 682 683 /* subset of flags relevant for edge detector configuration */ 684 #define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \ 685 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \ 686 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \ 687 GPIO_V2_LINE_EDGE_FLAGS) 688 689 static int linereq_unregistered_notify(struct notifier_block *nb, 690 unsigned long action, void *data) 691 { 692 struct linereq *lr = container_of(nb, struct linereq, 693 device_unregistered_nb); 694 695 wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR); 696 697 return NOTIFY_OK; 698 } 699 700 static void linereq_put_event(struct linereq *lr, 701 struct gpio_v2_line_event *le) 702 { 703 bool overflow = false; 704 705 scoped_guard(spinlock, &lr->wait.lock) { 706 if (kfifo_is_full(&lr->events)) { 707 overflow = true; 708 kfifo_skip(&lr->events); 709 } 710 kfifo_in(&lr->events, le, 1); 711 } 712 if (!overflow) 713 wake_up_poll(&lr->wait, EPOLLIN); 714 else 715 pr_debug_ratelimited("event FIFO is full - event dropped\n"); 716 } 717 718 static u64 line_event_timestamp(struct line *line) 719 { 720 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags)) 721 return ktime_get_real_ns(); 722 else if (IS_ENABLED(CONFIG_HTE) && 723 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) 724 return line->timestamp_ns; 725 726 return ktime_get_ns(); 727 } 728 729 static u32 line_event_id(int level) 730 { 731 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE : 732 GPIO_V2_LINE_EVENT_FALLING_EDGE; 733 } 734 735 static inline char *make_irq_label(const char *orig) 736 { 737 char *new; 738 739 if (!orig) 740 return NULL; 741 742 new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL); 743 if (!new) 744 return ERR_PTR(-ENOMEM); 745 746 return new; 747 } 748 749 static inline void free_irq_label(const char *label) 750 { 751 kfree(label); 752 } 753 754 #ifdef CONFIG_HTE 755 756 static enum hte_return process_hw_ts_thread(void *p) 757 { 758 struct line *line; 759 struct linereq *lr; 760 struct gpio_v2_line_event le; 761 u64 edflags; 762 int level; 763 764 if (!p) 765 return HTE_CB_HANDLED; 766 767 line = p; 768 lr = line->req; 769 770 memset(&le, 0, sizeof(le)); 771 772 le.timestamp_ns = line->timestamp_ns; 773 edflags = READ_ONCE(line->edflags); 774 775 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) { 776 case GPIO_V2_LINE_FLAG_EDGE_BOTH: 777 level = (line->raw_level >= 0) ? 778 line->raw_level : 779 gpiod_get_raw_value_cansleep(line->desc); 780 781 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 782 level = !level; 783 784 le.id = line_event_id(level); 785 break; 786 case GPIO_V2_LINE_FLAG_EDGE_RISING: 787 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE; 788 break; 789 case GPIO_V2_LINE_FLAG_EDGE_FALLING: 790 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE; 791 break; 792 default: 793 return HTE_CB_HANDLED; 794 } 795 le.line_seqno = line->line_seqno; 796 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno; 797 le.offset = gpio_chip_hwgpio(line->desc); 798 799 linereq_put_event(lr, &le); 800 801 return HTE_CB_HANDLED; 802 } 803 804 static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p) 805 { 806 struct line *line; 807 struct linereq *lr; 808 int diff_seqno = 0; 809 810 if (!ts || !p) 811 return HTE_CB_HANDLED; 812 813 line = p; 814 line->timestamp_ns = ts->tsc; 815 line->raw_level = ts->raw_level; 816 lr = line->req; 817 818 if (READ_ONCE(line->sw_debounced)) { 819 line->total_discard_seq++; 820 line->last_seqno = ts->seq; 821 mod_delayed_work(system_wq, &line->work, 822 usecs_to_jiffies(READ_ONCE(line->debounce_period_us))); 823 } else { 824 if (unlikely(ts->seq < line->line_seqno)) 825 return HTE_CB_HANDLED; 826 827 diff_seqno = ts->seq - line->line_seqno; 828 line->line_seqno = ts->seq; 829 if (lr->num_lines != 1) 830 line->req_seqno = atomic_add_return(diff_seqno, 831 &lr->seqno); 832 833 return HTE_RUN_SECOND_CB; 834 } 835 836 return HTE_CB_HANDLED; 837 } 838 839 static int hte_edge_setup(struct line *line, u64 eflags) 840 { 841 int ret; 842 unsigned long flags = 0; 843 struct hte_ts_desc *hdesc = &line->hdesc; 844 845 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING) 846 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 847 HTE_FALLING_EDGE_TS : 848 HTE_RISING_EDGE_TS; 849 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING) 850 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 851 HTE_RISING_EDGE_TS : 852 HTE_FALLING_EDGE_TS; 853 854 line->total_discard_seq = 0; 855 856 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL, 857 line->desc); 858 859 ret = hte_ts_get(NULL, hdesc, 0); 860 if (ret) 861 return ret; 862 863 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread, 864 line); 865 } 866 867 #else 868 869 static int hte_edge_setup(struct line *line, u64 eflags) 870 { 871 return 0; 872 } 873 #endif /* CONFIG_HTE */ 874 875 static irqreturn_t edge_irq_thread(int irq, void *p) 876 { 877 struct line *line = p; 878 struct linereq *lr = line->req; 879 struct gpio_v2_line_event le; 880 881 /* Do not leak kernel stack to userspace */ 882 memset(&le, 0, sizeof(le)); 883 884 if (line->timestamp_ns) { 885 le.timestamp_ns = line->timestamp_ns; 886 } else { 887 /* 888 * We may be running from a nested threaded interrupt in 889 * which case we didn't get the timestamp from 890 * edge_irq_handler(). 891 */ 892 le.timestamp_ns = line_event_timestamp(line); 893 if (lr->num_lines != 1) 894 line->req_seqno = atomic_inc_return(&lr->seqno); 895 } 896 line->timestamp_ns = 0; 897 898 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) { 899 case GPIO_V2_LINE_FLAG_EDGE_BOTH: 900 le.id = line_event_id(gpiod_get_value_cansleep(line->desc)); 901 break; 902 case GPIO_V2_LINE_FLAG_EDGE_RISING: 903 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE; 904 break; 905 case GPIO_V2_LINE_FLAG_EDGE_FALLING: 906 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE; 907 break; 908 default: 909 return IRQ_NONE; 910 } 911 line->line_seqno++; 912 le.line_seqno = line->line_seqno; 913 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno; 914 le.offset = gpio_chip_hwgpio(line->desc); 915 916 linereq_put_event(lr, &le); 917 918 return IRQ_HANDLED; 919 } 920 921 static irqreturn_t edge_irq_handler(int irq, void *p) 922 { 923 struct line *line = p; 924 struct linereq *lr = line->req; 925 926 /* 927 * Just store the timestamp in hardirq context so we get it as 928 * close in time as possible to the actual event. 929 */ 930 line->timestamp_ns = line_event_timestamp(line); 931 932 if (lr->num_lines != 1) 933 line->req_seqno = atomic_inc_return(&lr->seqno); 934 935 return IRQ_WAKE_THREAD; 936 } 937 938 /* 939 * returns the current debounced logical value. 940 */ 941 static bool debounced_value(struct line *line) 942 { 943 bool value; 944 945 /* 946 * minor race - debouncer may be stopped here, so edge_detector_stop() 947 * must leave the value unchanged so the following will read the level 948 * from when the debouncer was last running. 949 */ 950 value = READ_ONCE(line->level); 951 952 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags)) 953 value = !value; 954 955 return value; 956 } 957 958 static irqreturn_t debounce_irq_handler(int irq, void *p) 959 { 960 struct line *line = p; 961 962 mod_delayed_work(system_wq, &line->work, 963 usecs_to_jiffies(READ_ONCE(line->debounce_period_us))); 964 965 return IRQ_HANDLED; 966 } 967 968 static void debounce_work_func(struct work_struct *work) 969 { 970 struct gpio_v2_line_event le; 971 struct line *line = container_of(work, struct line, work.work); 972 struct linereq *lr; 973 u64 eflags, edflags = READ_ONCE(line->edflags); 974 int level = -1; 975 #ifdef CONFIG_HTE 976 int diff_seqno; 977 978 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) 979 level = line->raw_level; 980 #endif 981 if (level < 0) 982 level = gpiod_get_raw_value_cansleep(line->desc); 983 if (level < 0) { 984 pr_debug_ratelimited("debouncer failed to read line value\n"); 985 return; 986 } 987 988 if (READ_ONCE(line->level) == level) 989 return; 990 991 WRITE_ONCE(line->level, level); 992 993 /* -- edge detection -- */ 994 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; 995 if (!eflags) 996 return; 997 998 /* switch from physical level to logical - if they differ */ 999 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 1000 level = !level; 1001 1002 /* ignore edges that are not being monitored */ 1003 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) || 1004 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level)) 1005 return; 1006 1007 /* Do not leak kernel stack to userspace */ 1008 memset(&le, 0, sizeof(le)); 1009 1010 lr = line->req; 1011 le.timestamp_ns = line_event_timestamp(line); 1012 le.offset = gpio_chip_hwgpio(line->desc); 1013 #ifdef CONFIG_HTE 1014 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) { 1015 /* discard events except the last one */ 1016 line->total_discard_seq -= 1; 1017 diff_seqno = line->last_seqno - line->total_discard_seq - 1018 line->line_seqno; 1019 line->line_seqno = line->last_seqno - line->total_discard_seq; 1020 le.line_seqno = line->line_seqno; 1021 le.seqno = (lr->num_lines == 1) ? 1022 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno); 1023 } else 1024 #endif /* CONFIG_HTE */ 1025 { 1026 line->line_seqno++; 1027 le.line_seqno = line->line_seqno; 1028 le.seqno = (lr->num_lines == 1) ? 1029 le.line_seqno : atomic_inc_return(&lr->seqno); 1030 } 1031 1032 le.id = line_event_id(level); 1033 1034 linereq_put_event(lr, &le); 1035 } 1036 1037 static int debounce_setup(struct line *line, unsigned int debounce_period_us) 1038 { 1039 unsigned long irqflags; 1040 int ret, level, irq; 1041 char *label; 1042 1043 /* try hardware */ 1044 ret = gpiod_set_debounce(line->desc, debounce_period_us); 1045 if (!ret) { 1046 line_set_debounce_period(line, debounce_period_us); 1047 return ret; 1048 } 1049 if (ret != -ENOTSUPP) 1050 return ret; 1051 1052 if (debounce_period_us) { 1053 /* setup software debounce */ 1054 level = gpiod_get_raw_value_cansleep(line->desc); 1055 if (level < 0) 1056 return level; 1057 1058 if (!(IS_ENABLED(CONFIG_HTE) && 1059 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) { 1060 irq = gpiod_to_irq(line->desc); 1061 if (irq < 0) 1062 return -ENXIO; 1063 1064 label = make_irq_label(line->req->label); 1065 if (IS_ERR(label)) 1066 return -ENOMEM; 1067 1068 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING; 1069 ret = request_irq(irq, debounce_irq_handler, irqflags, 1070 label, line); 1071 if (ret) { 1072 free_irq_label(label); 1073 return ret; 1074 } 1075 line->irq = irq; 1076 } else { 1077 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH); 1078 if (ret) 1079 return ret; 1080 } 1081 1082 WRITE_ONCE(line->level, level); 1083 WRITE_ONCE(line->sw_debounced, 1); 1084 } 1085 return 0; 1086 } 1087 1088 static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc, 1089 unsigned int line_idx) 1090 { 1091 unsigned int i; 1092 u64 mask = BIT_ULL(line_idx); 1093 1094 for (i = 0; i < lc->num_attrs; i++) { 1095 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) && 1096 (lc->attrs[i].mask & mask)) 1097 return true; 1098 } 1099 return false; 1100 } 1101 1102 static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc, 1103 unsigned int line_idx) 1104 { 1105 unsigned int i; 1106 u64 mask = BIT_ULL(line_idx); 1107 1108 for (i = 0; i < lc->num_attrs; i++) { 1109 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) && 1110 (lc->attrs[i].mask & mask)) 1111 return lc->attrs[i].attr.debounce_period_us; 1112 } 1113 return 0; 1114 } 1115 1116 static void edge_detector_stop(struct line *line) 1117 { 1118 if (line->irq) { 1119 free_irq_label(free_irq(line->irq, line)); 1120 line->irq = 0; 1121 } 1122 1123 #ifdef CONFIG_HTE 1124 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) 1125 hte_ts_put(&line->hdesc); 1126 #endif 1127 1128 cancel_delayed_work_sync(&line->work); 1129 WRITE_ONCE(line->sw_debounced, 0); 1130 WRITE_ONCE(line->edflags, 0); 1131 line_set_debounce_period(line, 0); 1132 /* do not change line->level - see comment in debounced_value() */ 1133 } 1134 1135 static int edge_detector_fifo_init(struct linereq *req) 1136 { 1137 if (kfifo_initialized(&req->events)) 1138 return 0; 1139 1140 return kfifo_alloc(&req->events, req->event_buffer_size, GFP_KERNEL); 1141 } 1142 1143 static int edge_detector_setup(struct line *line, 1144 struct gpio_v2_line_config *lc, 1145 unsigned int line_idx, u64 edflags) 1146 { 1147 u32 debounce_period_us; 1148 unsigned long irqflags = 0; 1149 u64 eflags; 1150 int irq, ret; 1151 char *label; 1152 1153 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; 1154 if (eflags) { 1155 ret = edge_detector_fifo_init(line->req); 1156 if (ret) 1157 return ret; 1158 } 1159 if (gpio_v2_line_config_debounced(lc, line_idx)) { 1160 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx); 1161 ret = debounce_setup(line, debounce_period_us); 1162 if (ret) 1163 return ret; 1164 line_set_debounce_period(line, debounce_period_us); 1165 } 1166 1167 /* detection disabled or sw debouncer will provide edge detection */ 1168 if (!eflags || READ_ONCE(line->sw_debounced)) 1169 return 0; 1170 1171 if (IS_ENABLED(CONFIG_HTE) && 1172 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1173 return hte_edge_setup(line, edflags); 1174 1175 irq = gpiod_to_irq(line->desc); 1176 if (irq < 0) 1177 return -ENXIO; 1178 1179 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING) 1180 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 1181 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; 1182 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING) 1183 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 1184 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; 1185 irqflags |= IRQF_ONESHOT; 1186 1187 label = make_irq_label(line->req->label); 1188 if (IS_ERR(label)) 1189 return PTR_ERR(label); 1190 1191 /* Request a thread to read the events */ 1192 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread, 1193 irqflags, label, line); 1194 if (ret) { 1195 free_irq_label(label); 1196 return ret; 1197 } 1198 1199 line->irq = irq; 1200 return 0; 1201 } 1202 1203 static int edge_detector_update(struct line *line, 1204 struct gpio_v2_line_config *lc, 1205 unsigned int line_idx, u64 edflags) 1206 { 1207 u64 active_edflags = READ_ONCE(line->edflags); 1208 unsigned int debounce_period_us = 1209 gpio_v2_line_config_debounce_period(lc, line_idx); 1210 1211 if ((active_edflags == edflags) && 1212 (READ_ONCE(line->debounce_period_us) == debounce_period_us)) 1213 return 0; 1214 1215 /* sw debounced and still will be...*/ 1216 if (debounce_period_us && READ_ONCE(line->sw_debounced)) { 1217 line_set_debounce_period(line, debounce_period_us); 1218 /* 1219 * ensure event fifo is initialised if edge detection 1220 * is now enabled. 1221 */ 1222 if (edflags & GPIO_V2_LINE_EDGE_FLAGS) 1223 return edge_detector_fifo_init(line->req); 1224 1225 return 0; 1226 } 1227 1228 /* reconfiguring edge detection or sw debounce being disabled */ 1229 if ((line->irq && !READ_ONCE(line->sw_debounced)) || 1230 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) || 1231 (!debounce_period_us && READ_ONCE(line->sw_debounced))) 1232 edge_detector_stop(line); 1233 1234 return edge_detector_setup(line, lc, line_idx, edflags); 1235 } 1236 1237 static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc, 1238 unsigned int line_idx) 1239 { 1240 unsigned int i; 1241 u64 mask = BIT_ULL(line_idx); 1242 1243 for (i = 0; i < lc->num_attrs; i++) { 1244 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) && 1245 (lc->attrs[i].mask & mask)) 1246 return lc->attrs[i].attr.flags; 1247 } 1248 return lc->flags; 1249 } 1250 1251 static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc, 1252 unsigned int line_idx) 1253 { 1254 unsigned int i; 1255 u64 mask = BIT_ULL(line_idx); 1256 1257 for (i = 0; i < lc->num_attrs; i++) { 1258 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) && 1259 (lc->attrs[i].mask & mask)) 1260 return !!(lc->attrs[i].attr.values & mask); 1261 } 1262 return 0; 1263 } 1264 1265 static int gpio_v2_line_flags_validate(u64 flags) 1266 { 1267 /* Return an error if an unknown flag is set */ 1268 if (flags & ~GPIO_V2_LINE_VALID_FLAGS) 1269 return -EINVAL; 1270 1271 if (!IS_ENABLED(CONFIG_HTE) && 1272 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1273 return -EOPNOTSUPP; 1274 1275 /* 1276 * Do not allow both INPUT and OUTPUT flags to be set as they are 1277 * contradictory. 1278 */ 1279 if ((flags & GPIO_V2_LINE_FLAG_INPUT) && 1280 (flags & GPIO_V2_LINE_FLAG_OUTPUT)) 1281 return -EINVAL; 1282 1283 /* Only allow one event clock source */ 1284 if (IS_ENABLED(CONFIG_HTE) && 1285 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) && 1286 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1287 return -EINVAL; 1288 1289 /* Edge detection requires explicit input. */ 1290 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) && 1291 !(flags & GPIO_V2_LINE_FLAG_INPUT)) 1292 return -EINVAL; 1293 1294 /* 1295 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single 1296 * request. If the hardware actually supports enabling both at the 1297 * same time the electrical result would be disastrous. 1298 */ 1299 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) && 1300 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE)) 1301 return -EINVAL; 1302 1303 /* Drive requires explicit output direction. */ 1304 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) && 1305 !(flags & GPIO_V2_LINE_FLAG_OUTPUT)) 1306 return -EINVAL; 1307 1308 /* Bias requires explicit direction. */ 1309 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) && 1310 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS)) 1311 return -EINVAL; 1312 1313 /* Only one bias flag can be set. */ 1314 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) && 1315 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | 1316 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) || 1317 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) && 1318 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) 1319 return -EINVAL; 1320 1321 return 0; 1322 } 1323 1324 static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc, 1325 unsigned int num_lines) 1326 { 1327 unsigned int i; 1328 u64 flags; 1329 int ret; 1330 1331 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX) 1332 return -EINVAL; 1333 1334 if (memchr_inv(lc->padding, 0, sizeof(lc->padding))) 1335 return -EINVAL; 1336 1337 for (i = 0; i < num_lines; i++) { 1338 flags = gpio_v2_line_config_flags(lc, i); 1339 ret = gpio_v2_line_flags_validate(flags); 1340 if (ret) 1341 return ret; 1342 1343 /* debounce requires explicit input */ 1344 if (gpio_v2_line_config_debounced(lc, i) && 1345 !(flags & GPIO_V2_LINE_FLAG_INPUT)) 1346 return -EINVAL; 1347 } 1348 return 0; 1349 } 1350 1351 static void gpio_v2_line_config_flags_to_desc_flags(u64 flags, 1352 unsigned long *flagsp) 1353 { 1354 assign_bit(FLAG_ACTIVE_LOW, flagsp, 1355 flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW); 1356 1357 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) 1358 set_bit(FLAG_IS_OUT, flagsp); 1359 else if (flags & GPIO_V2_LINE_FLAG_INPUT) 1360 clear_bit(FLAG_IS_OUT, flagsp); 1361 1362 assign_bit(FLAG_EDGE_RISING, flagsp, 1363 flags & GPIO_V2_LINE_FLAG_EDGE_RISING); 1364 assign_bit(FLAG_EDGE_FALLING, flagsp, 1365 flags & GPIO_V2_LINE_FLAG_EDGE_FALLING); 1366 1367 assign_bit(FLAG_OPEN_DRAIN, flagsp, 1368 flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN); 1369 assign_bit(FLAG_OPEN_SOURCE, flagsp, 1370 flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE); 1371 1372 assign_bit(FLAG_PULL_UP, flagsp, 1373 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP); 1374 assign_bit(FLAG_PULL_DOWN, flagsp, 1375 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN); 1376 assign_bit(FLAG_BIAS_DISABLE, flagsp, 1377 flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED); 1378 1379 assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp, 1380 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME); 1381 assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp, 1382 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE); 1383 } 1384 1385 static long linereq_get_values(struct linereq *lr, void __user *ip) 1386 { 1387 struct gpio_v2_line_values lv; 1388 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX); 1389 struct gpio_desc **descs; 1390 unsigned int i, didx, num_get; 1391 bool val; 1392 int ret; 1393 1394 /* NOTE: It's ok to read values of output lines. */ 1395 if (copy_from_user(&lv, ip, sizeof(lv))) 1396 return -EFAULT; 1397 1398 /* 1399 * gpiod_get_array_value_complex() requires compacted desc and val 1400 * arrays, rather than the sparse ones in lv. 1401 * Calculation of num_get and construction of the desc array is 1402 * optimized to avoid allocation for the desc array for the common 1403 * num_get == 1 case. 1404 */ 1405 /* scan requested lines to calculate the subset to get */ 1406 for (num_get = 0, i = 0; i < lr->num_lines; i++) { 1407 if (lv.mask & BIT_ULL(i)) { 1408 num_get++; 1409 /* capture desc for the num_get == 1 case */ 1410 descs = &lr->lines[i].desc; 1411 } 1412 } 1413 1414 if (num_get == 0) 1415 return -EINVAL; 1416 1417 if (num_get != 1) { 1418 /* build compacted desc array */ 1419 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL); 1420 if (!descs) 1421 return -ENOMEM; 1422 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1423 if (lv.mask & BIT_ULL(i)) { 1424 descs[didx] = lr->lines[i].desc; 1425 didx++; 1426 } 1427 } 1428 } 1429 ret = gpiod_get_array_value_complex(false, true, num_get, 1430 descs, NULL, vals); 1431 1432 if (num_get != 1) 1433 kfree(descs); 1434 if (ret) 1435 return ret; 1436 1437 lv.bits = 0; 1438 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1439 /* unpack compacted vals for the response */ 1440 if (lv.mask & BIT_ULL(i)) { 1441 if (lr->lines[i].sw_debounced) 1442 val = debounced_value(&lr->lines[i]); 1443 else 1444 val = test_bit(didx, vals); 1445 if (val) 1446 lv.bits |= BIT_ULL(i); 1447 didx++; 1448 } 1449 } 1450 1451 if (copy_to_user(ip, &lv, sizeof(lv))) 1452 return -EFAULT; 1453 1454 return 0; 1455 } 1456 1457 static long linereq_set_values(struct linereq *lr, void __user *ip) 1458 { 1459 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX); 1460 struct gpio_v2_line_values lv; 1461 struct gpio_desc **descs; 1462 unsigned int i, didx, num_set; 1463 int ret; 1464 1465 if (copy_from_user(&lv, ip, sizeof(lv))) 1466 return -EFAULT; 1467 1468 guard(mutex)(&lr->config_mutex); 1469 1470 /* 1471 * gpiod_set_array_value_complex() requires compacted desc and val 1472 * arrays, rather than the sparse ones in lv. 1473 * Calculation of num_set and construction of the descs and vals arrays 1474 * is optimized to minimize scanning the lv->mask, and to avoid 1475 * allocation for the desc array for the common num_set == 1 case. 1476 */ 1477 bitmap_zero(vals, GPIO_V2_LINES_MAX); 1478 /* scan requested lines to determine the subset to be set */ 1479 for (num_set = 0, i = 0; i < lr->num_lines; i++) { 1480 if (lv.mask & BIT_ULL(i)) { 1481 /* setting inputs is not allowed */ 1482 if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags)) 1483 return -EPERM; 1484 /* add to compacted values */ 1485 if (lv.bits & BIT_ULL(i)) 1486 __set_bit(num_set, vals); 1487 num_set++; 1488 /* capture desc for the num_set == 1 case */ 1489 descs = &lr->lines[i].desc; 1490 } 1491 } 1492 if (num_set == 0) 1493 return -EINVAL; 1494 1495 if (num_set != 1) { 1496 /* build compacted desc array */ 1497 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL); 1498 if (!descs) 1499 return -ENOMEM; 1500 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1501 if (lv.mask & BIT_ULL(i)) { 1502 descs[didx] = lr->lines[i].desc; 1503 didx++; 1504 } 1505 } 1506 } 1507 ret = gpiod_set_array_value_complex(false, true, num_set, 1508 descs, NULL, vals); 1509 1510 if (num_set != 1) 1511 kfree(descs); 1512 return ret; 1513 } 1514 1515 static long linereq_set_config(struct linereq *lr, void __user *ip) 1516 { 1517 struct gpio_v2_line_config lc; 1518 struct gpio_desc *desc; 1519 struct line *line; 1520 unsigned int i; 1521 u64 flags, edflags; 1522 int ret; 1523 1524 if (copy_from_user(&lc, ip, sizeof(lc))) 1525 return -EFAULT; 1526 1527 ret = gpio_v2_line_config_validate(&lc, lr->num_lines); 1528 if (ret) 1529 return ret; 1530 1531 guard(mutex)(&lr->config_mutex); 1532 1533 for (i = 0; i < lr->num_lines; i++) { 1534 line = &lr->lines[i]; 1535 desc = lr->lines[i].desc; 1536 flags = gpio_v2_line_config_flags(&lc, i); 1537 /* 1538 * Lines not explicitly reconfigured as input or output 1539 * are left unchanged. 1540 */ 1541 if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS)) 1542 continue; 1543 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); 1544 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; 1545 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { 1546 int val = gpio_v2_line_config_output_value(&lc, i); 1547 1548 edge_detector_stop(line); 1549 ret = gpiod_direction_output(desc, val); 1550 if (ret) 1551 return ret; 1552 } else { 1553 ret = gpiod_direction_input(desc); 1554 if (ret) 1555 return ret; 1556 1557 ret = edge_detector_update(line, &lc, i, edflags); 1558 if (ret) 1559 return ret; 1560 } 1561 1562 WRITE_ONCE(line->edflags, edflags); 1563 1564 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG); 1565 } 1566 return 0; 1567 } 1568 1569 static long linereq_ioctl(struct file *file, unsigned int cmd, 1570 unsigned long arg) 1571 { 1572 struct linereq *lr = file->private_data; 1573 void __user *ip = (void __user *)arg; 1574 1575 guard(srcu)(&lr->gdev->srcu); 1576 1577 if (!rcu_access_pointer(lr->gdev->chip)) 1578 return -ENODEV; 1579 1580 switch (cmd) { 1581 case GPIO_V2_LINE_GET_VALUES_IOCTL: 1582 return linereq_get_values(lr, ip); 1583 case GPIO_V2_LINE_SET_VALUES_IOCTL: 1584 return linereq_set_values(lr, ip); 1585 case GPIO_V2_LINE_SET_CONFIG_IOCTL: 1586 return linereq_set_config(lr, ip); 1587 default: 1588 return -EINVAL; 1589 } 1590 } 1591 1592 #ifdef CONFIG_COMPAT 1593 static long linereq_ioctl_compat(struct file *file, unsigned int cmd, 1594 unsigned long arg) 1595 { 1596 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 1597 } 1598 #endif 1599 1600 static __poll_t linereq_poll(struct file *file, 1601 struct poll_table_struct *wait) 1602 { 1603 struct linereq *lr = file->private_data; 1604 __poll_t events = 0; 1605 1606 guard(srcu)(&lr->gdev->srcu); 1607 1608 if (!rcu_access_pointer(lr->gdev->chip)) 1609 return EPOLLHUP | EPOLLERR; 1610 1611 poll_wait(file, &lr->wait, wait); 1612 1613 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events, 1614 &lr->wait.lock)) 1615 events = EPOLLIN | EPOLLRDNORM; 1616 1617 return events; 1618 } 1619 1620 static ssize_t linereq_read(struct file *file, char __user *buf, 1621 size_t count, loff_t *f_ps) 1622 { 1623 struct linereq *lr = file->private_data; 1624 struct gpio_v2_line_event le; 1625 ssize_t bytes_read = 0; 1626 int ret; 1627 1628 guard(srcu)(&lr->gdev->srcu); 1629 1630 if (!rcu_access_pointer(lr->gdev->chip)) 1631 return -ENODEV; 1632 1633 if (count < sizeof(le)) 1634 return -EINVAL; 1635 1636 do { 1637 scoped_guard(spinlock, &lr->wait.lock) { 1638 if (kfifo_is_empty(&lr->events)) { 1639 if (bytes_read) 1640 return bytes_read; 1641 1642 if (file->f_flags & O_NONBLOCK) 1643 return -EAGAIN; 1644 1645 ret = wait_event_interruptible_locked(lr->wait, 1646 !kfifo_is_empty(&lr->events)); 1647 if (ret) 1648 return ret; 1649 } 1650 1651 if (kfifo_out(&lr->events, &le, 1) != 1) { 1652 /* 1653 * This should never happen - we hold the 1654 * lock from the moment we learned the fifo 1655 * is no longer empty until now. 1656 */ 1657 WARN(1, "failed to read from non-empty kfifo"); 1658 return -EIO; 1659 } 1660 } 1661 1662 if (copy_to_user(buf + bytes_read, &le, sizeof(le))) 1663 return -EFAULT; 1664 bytes_read += sizeof(le); 1665 } while (count >= bytes_read + sizeof(le)); 1666 1667 return bytes_read; 1668 } 1669 1670 static void linereq_free(struct linereq *lr) 1671 { 1672 struct line *line; 1673 unsigned int i; 1674 1675 if (lr->device_unregistered_nb.notifier_call) 1676 blocking_notifier_chain_unregister(&lr->gdev->device_notifier, 1677 &lr->device_unregistered_nb); 1678 1679 for (i = 0; i < lr->num_lines; i++) { 1680 line = &lr->lines[i]; 1681 if (!line->desc) 1682 continue; 1683 1684 edge_detector_stop(line); 1685 if (line_has_supinfo(line)) 1686 supinfo_erase(line); 1687 gpiod_free(line->desc); 1688 } 1689 kfifo_free(&lr->events); 1690 kfree(lr->label); 1691 gpio_device_put(lr->gdev); 1692 kvfree(lr); 1693 } 1694 1695 static int linereq_release(struct inode *inode, struct file *file) 1696 { 1697 struct linereq *lr = file->private_data; 1698 1699 linereq_free(lr); 1700 return 0; 1701 } 1702 1703 #ifdef CONFIG_PROC_FS 1704 static void linereq_show_fdinfo(struct seq_file *out, struct file *file) 1705 { 1706 struct linereq *lr = file->private_data; 1707 struct device *dev = &lr->gdev->dev; 1708 u16 i; 1709 1710 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev)); 1711 1712 for (i = 0; i < lr->num_lines; i++) 1713 seq_printf(out, "gpio-line:\t%d\n", 1714 gpio_chip_hwgpio(lr->lines[i].desc)); 1715 } 1716 #endif 1717 1718 static const struct file_operations line_fileops = { 1719 .release = linereq_release, 1720 .read = linereq_read, 1721 .poll = linereq_poll, 1722 .owner = THIS_MODULE, 1723 .llseek = noop_llseek, 1724 .unlocked_ioctl = linereq_ioctl, 1725 #ifdef CONFIG_COMPAT 1726 .compat_ioctl = linereq_ioctl_compat, 1727 #endif 1728 #ifdef CONFIG_PROC_FS 1729 .show_fdinfo = linereq_show_fdinfo, 1730 #endif 1731 }; 1732 1733 static int linereq_create(struct gpio_device *gdev, void __user *ip) 1734 { 1735 struct gpio_v2_line_request ulr; 1736 struct gpio_v2_line_config *lc; 1737 struct linereq *lr; 1738 struct file *file; 1739 u64 flags, edflags; 1740 unsigned int i; 1741 int fd, ret; 1742 1743 if (copy_from_user(&ulr, ip, sizeof(ulr))) 1744 return -EFAULT; 1745 1746 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX)) 1747 return -EINVAL; 1748 1749 if (memchr_inv(ulr.padding, 0, sizeof(ulr.padding))) 1750 return -EINVAL; 1751 1752 lc = &ulr.config; 1753 ret = gpio_v2_line_config_validate(lc, ulr.num_lines); 1754 if (ret) 1755 return ret; 1756 1757 lr = kvzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL); 1758 if (!lr) 1759 return -ENOMEM; 1760 lr->num_lines = ulr.num_lines; 1761 1762 lr->gdev = gpio_device_get(gdev); 1763 1764 for (i = 0; i < ulr.num_lines; i++) { 1765 lr->lines[i].req = lr; 1766 WRITE_ONCE(lr->lines[i].sw_debounced, 0); 1767 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func); 1768 } 1769 1770 if (ulr.consumer[0] != '\0') { 1771 /* label is only initialized if consumer is set */ 1772 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1, 1773 GFP_KERNEL); 1774 if (!lr->label) { 1775 ret = -ENOMEM; 1776 goto out_free_linereq; 1777 } 1778 } 1779 1780 mutex_init(&lr->config_mutex); 1781 init_waitqueue_head(&lr->wait); 1782 INIT_KFIFO(lr->events); 1783 lr->event_buffer_size = ulr.event_buffer_size; 1784 if (lr->event_buffer_size == 0) 1785 lr->event_buffer_size = ulr.num_lines * 16; 1786 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16) 1787 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16; 1788 1789 atomic_set(&lr->seqno, 0); 1790 1791 /* Request each GPIO */ 1792 for (i = 0; i < ulr.num_lines; i++) { 1793 u32 offset = ulr.offsets[i]; 1794 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset); 1795 1796 if (IS_ERR(desc)) { 1797 ret = PTR_ERR(desc); 1798 goto out_free_linereq; 1799 } 1800 1801 ret = gpiod_request_user(desc, lr->label); 1802 if (ret) 1803 goto out_free_linereq; 1804 1805 lr->lines[i].desc = desc; 1806 flags = gpio_v2_line_config_flags(lc, i); 1807 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); 1808 1809 ret = gpiod_set_transitory(desc, false); 1810 if (ret < 0) 1811 goto out_free_linereq; 1812 1813 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; 1814 /* 1815 * Lines have to be requested explicitly for input 1816 * or output, else the line will be treated "as is". 1817 */ 1818 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { 1819 int val = gpio_v2_line_config_output_value(lc, i); 1820 1821 ret = gpiod_direction_output(desc, val); 1822 if (ret) 1823 goto out_free_linereq; 1824 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) { 1825 ret = gpiod_direction_input(desc); 1826 if (ret) 1827 goto out_free_linereq; 1828 1829 ret = edge_detector_setup(&lr->lines[i], lc, i, 1830 edflags); 1831 if (ret) 1832 goto out_free_linereq; 1833 } 1834 1835 lr->lines[i].edflags = edflags; 1836 1837 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); 1838 1839 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", 1840 offset); 1841 } 1842 1843 lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify; 1844 ret = blocking_notifier_chain_register(&gdev->device_notifier, 1845 &lr->device_unregistered_nb); 1846 if (ret) 1847 goto out_free_linereq; 1848 1849 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 1850 if (fd < 0) { 1851 ret = fd; 1852 goto out_free_linereq; 1853 } 1854 1855 file = anon_inode_getfile("gpio-line", &line_fileops, lr, 1856 O_RDONLY | O_CLOEXEC); 1857 if (IS_ERR(file)) { 1858 ret = PTR_ERR(file); 1859 goto out_put_unused_fd; 1860 } 1861 1862 ulr.fd = fd; 1863 if (copy_to_user(ip, &ulr, sizeof(ulr))) { 1864 /* 1865 * fput() will trigger the release() callback, so do not go onto 1866 * the regular error cleanup path here. 1867 */ 1868 fput(file); 1869 put_unused_fd(fd); 1870 return -EFAULT; 1871 } 1872 1873 fd_install(fd, file); 1874 1875 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 1876 lr->num_lines); 1877 1878 return 0; 1879 1880 out_put_unused_fd: 1881 put_unused_fd(fd); 1882 out_free_linereq: 1883 linereq_free(lr); 1884 return ret; 1885 } 1886 1887 #ifdef CONFIG_GPIO_CDEV_V1 1888 1889 /* 1890 * GPIO line event management 1891 */ 1892 1893 /** 1894 * struct lineevent_state - contains the state of a userspace event 1895 * @gdev: the GPIO device the event pertains to 1896 * @label: consumer label used to tag descriptors 1897 * @desc: the GPIO descriptor held by this event 1898 * @eflags: the event flags this line was requested with 1899 * @irq: the interrupt that trigger in response to events on this GPIO 1900 * @wait: wait queue that handles blocking reads of events 1901 * @device_unregistered_nb: notifier block for receiving gdev unregister events 1902 * @events: KFIFO for the GPIO events 1903 * @timestamp: cache for the timestamp storing it between hardirq 1904 * and IRQ thread, used to bring the timestamp close to the actual 1905 * event 1906 */ 1907 struct lineevent_state { 1908 struct gpio_device *gdev; 1909 const char *label; 1910 struct gpio_desc *desc; 1911 u32 eflags; 1912 int irq; 1913 wait_queue_head_t wait; 1914 struct notifier_block device_unregistered_nb; 1915 DECLARE_KFIFO(events, struct gpioevent_data, 16); 1916 u64 timestamp; 1917 }; 1918 1919 #define GPIOEVENT_REQUEST_VALID_FLAGS \ 1920 (GPIOEVENT_REQUEST_RISING_EDGE | \ 1921 GPIOEVENT_REQUEST_FALLING_EDGE) 1922 1923 static __poll_t lineevent_poll(struct file *file, 1924 struct poll_table_struct *wait) 1925 { 1926 struct lineevent_state *le = file->private_data; 1927 __poll_t events = 0; 1928 1929 guard(srcu)(&le->gdev->srcu); 1930 1931 if (!rcu_access_pointer(le->gdev->chip)) 1932 return EPOLLHUP | EPOLLERR; 1933 1934 poll_wait(file, &le->wait, wait); 1935 1936 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock)) 1937 events = EPOLLIN | EPOLLRDNORM; 1938 1939 return events; 1940 } 1941 1942 static int lineevent_unregistered_notify(struct notifier_block *nb, 1943 unsigned long action, void *data) 1944 { 1945 struct lineevent_state *le = container_of(nb, struct lineevent_state, 1946 device_unregistered_nb); 1947 1948 wake_up_poll(&le->wait, EPOLLIN | EPOLLERR); 1949 1950 return NOTIFY_OK; 1951 } 1952 1953 struct compat_gpioeevent_data { 1954 compat_u64 timestamp; 1955 u32 id; 1956 }; 1957 1958 static ssize_t lineevent_read(struct file *file, char __user *buf, 1959 size_t count, loff_t *f_ps) 1960 { 1961 struct lineevent_state *le = file->private_data; 1962 struct gpioevent_data ge; 1963 ssize_t bytes_read = 0; 1964 ssize_t ge_size; 1965 int ret; 1966 1967 guard(srcu)(&le->gdev->srcu); 1968 1969 if (!rcu_access_pointer(le->gdev->chip)) 1970 return -ENODEV; 1971 1972 /* 1973 * When compatible system call is being used the struct gpioevent_data, 1974 * in case of at least ia32, has different size due to the alignment 1975 * differences. Because we have first member 64 bits followed by one of 1976 * 32 bits there is no gap between them. The only difference is the 1977 * padding at the end of the data structure. Hence, we calculate the 1978 * actual sizeof() and pass this as an argument to copy_to_user() to 1979 * drop unneeded bytes from the output. 1980 */ 1981 if (compat_need_64bit_alignment_fixup()) 1982 ge_size = sizeof(struct compat_gpioeevent_data); 1983 else 1984 ge_size = sizeof(struct gpioevent_data); 1985 if (count < ge_size) 1986 return -EINVAL; 1987 1988 do { 1989 scoped_guard(spinlock, &le->wait.lock) { 1990 if (kfifo_is_empty(&le->events)) { 1991 if (bytes_read) 1992 return bytes_read; 1993 1994 if (file->f_flags & O_NONBLOCK) 1995 return -EAGAIN; 1996 1997 ret = wait_event_interruptible_locked(le->wait, 1998 !kfifo_is_empty(&le->events)); 1999 if (ret) 2000 return ret; 2001 } 2002 2003 if (kfifo_out(&le->events, &ge, 1) != 1) { 2004 /* 2005 * This should never happen - we hold the 2006 * lock from the moment we learned the fifo 2007 * is no longer empty until now. 2008 */ 2009 WARN(1, "failed to read from non-empty kfifo"); 2010 return -EIO; 2011 } 2012 } 2013 2014 if (copy_to_user(buf + bytes_read, &ge, ge_size)) 2015 return -EFAULT; 2016 bytes_read += ge_size; 2017 } while (count >= bytes_read + ge_size); 2018 2019 return bytes_read; 2020 } 2021 2022 static void lineevent_free(struct lineevent_state *le) 2023 { 2024 if (le->device_unregistered_nb.notifier_call) 2025 blocking_notifier_chain_unregister(&le->gdev->device_notifier, 2026 &le->device_unregistered_nb); 2027 if (le->irq) 2028 free_irq_label(free_irq(le->irq, le)); 2029 if (le->desc) 2030 gpiod_free(le->desc); 2031 kfree(le->label); 2032 gpio_device_put(le->gdev); 2033 kfree(le); 2034 } 2035 2036 static int lineevent_release(struct inode *inode, struct file *file) 2037 { 2038 lineevent_free(file->private_data); 2039 return 0; 2040 } 2041 2042 static long lineevent_ioctl(struct file *file, unsigned int cmd, 2043 unsigned long arg) 2044 { 2045 struct lineevent_state *le = file->private_data; 2046 void __user *ip = (void __user *)arg; 2047 struct gpiohandle_data ghd; 2048 2049 guard(srcu)(&le->gdev->srcu); 2050 2051 if (!rcu_access_pointer(le->gdev->chip)) 2052 return -ENODEV; 2053 2054 /* 2055 * We can get the value for an event line but not set it, 2056 * because it is input by definition. 2057 */ 2058 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { 2059 int val; 2060 2061 memset(&ghd, 0, sizeof(ghd)); 2062 2063 val = gpiod_get_value_cansleep(le->desc); 2064 if (val < 0) 2065 return val; 2066 ghd.values[0] = val; 2067 2068 if (copy_to_user(ip, &ghd, sizeof(ghd))) 2069 return -EFAULT; 2070 2071 return 0; 2072 } 2073 return -EINVAL; 2074 } 2075 2076 #ifdef CONFIG_COMPAT 2077 static long lineevent_ioctl_compat(struct file *file, unsigned int cmd, 2078 unsigned long arg) 2079 { 2080 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 2081 } 2082 #endif 2083 2084 static const struct file_operations lineevent_fileops = { 2085 .release = lineevent_release, 2086 .read = lineevent_read, 2087 .poll = lineevent_poll, 2088 .owner = THIS_MODULE, 2089 .llseek = noop_llseek, 2090 .unlocked_ioctl = lineevent_ioctl, 2091 #ifdef CONFIG_COMPAT 2092 .compat_ioctl = lineevent_ioctl_compat, 2093 #endif 2094 }; 2095 2096 static irqreturn_t lineevent_irq_thread(int irq, void *p) 2097 { 2098 struct lineevent_state *le = p; 2099 struct gpioevent_data ge; 2100 int ret; 2101 2102 /* Do not leak kernel stack to userspace */ 2103 memset(&ge, 0, sizeof(ge)); 2104 2105 /* 2106 * We may be running from a nested threaded interrupt in which case 2107 * we didn't get the timestamp from lineevent_irq_handler(). 2108 */ 2109 if (!le->timestamp) 2110 ge.timestamp = ktime_get_ns(); 2111 else 2112 ge.timestamp = le->timestamp; 2113 2114 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE 2115 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 2116 int level = gpiod_get_value_cansleep(le->desc); 2117 2118 if (level) 2119 /* Emit low-to-high event */ 2120 ge.id = GPIOEVENT_EVENT_RISING_EDGE; 2121 else 2122 /* Emit high-to-low event */ 2123 ge.id = GPIOEVENT_EVENT_FALLING_EDGE; 2124 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) { 2125 /* Emit low-to-high event */ 2126 ge.id = GPIOEVENT_EVENT_RISING_EDGE; 2127 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 2128 /* Emit high-to-low event */ 2129 ge.id = GPIOEVENT_EVENT_FALLING_EDGE; 2130 } else { 2131 return IRQ_NONE; 2132 } 2133 2134 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge, 2135 1, &le->wait.lock); 2136 if (ret) 2137 wake_up_poll(&le->wait, EPOLLIN); 2138 else 2139 pr_debug_ratelimited("event FIFO is full - event dropped\n"); 2140 2141 return IRQ_HANDLED; 2142 } 2143 2144 static irqreturn_t lineevent_irq_handler(int irq, void *p) 2145 { 2146 struct lineevent_state *le = p; 2147 2148 /* 2149 * Just store the timestamp in hardirq context so we get it as 2150 * close in time as possible to the actual event. 2151 */ 2152 le->timestamp = ktime_get_ns(); 2153 2154 return IRQ_WAKE_THREAD; 2155 } 2156 2157 static int lineevent_create(struct gpio_device *gdev, void __user *ip) 2158 { 2159 struct gpioevent_request eventreq; 2160 struct lineevent_state *le; 2161 struct gpio_desc *desc; 2162 struct file *file; 2163 u32 offset; 2164 u32 lflags; 2165 u32 eflags; 2166 int fd; 2167 int ret; 2168 int irq, irqflags = 0; 2169 char *label; 2170 2171 if (copy_from_user(&eventreq, ip, sizeof(eventreq))) 2172 return -EFAULT; 2173 2174 offset = eventreq.lineoffset; 2175 lflags = eventreq.handleflags; 2176 eflags = eventreq.eventflags; 2177 2178 desc = gpio_device_get_desc(gdev, offset); 2179 if (IS_ERR(desc)) 2180 return PTR_ERR(desc); 2181 2182 /* Return an error if a unknown flag is set */ 2183 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) || 2184 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) 2185 return -EINVAL; 2186 2187 /* This is just wrong: we don't look for events on output lines */ 2188 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) || 2189 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || 2190 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) 2191 return -EINVAL; 2192 2193 /* Only one bias flag can be set. */ 2194 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) && 2195 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | 2196 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) || 2197 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) && 2198 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP))) 2199 return -EINVAL; 2200 2201 le = kzalloc(sizeof(*le), GFP_KERNEL); 2202 if (!le) 2203 return -ENOMEM; 2204 le->gdev = gpio_device_get(gdev); 2205 2206 if (eventreq.consumer_label[0] != '\0') { 2207 /* label is only initialized if consumer_label is set */ 2208 le->label = kstrndup(eventreq.consumer_label, 2209 sizeof(eventreq.consumer_label) - 1, 2210 GFP_KERNEL); 2211 if (!le->label) { 2212 ret = -ENOMEM; 2213 goto out_free_le; 2214 } 2215 } 2216 2217 ret = gpiod_request_user(desc, le->label); 2218 if (ret) 2219 goto out_free_le; 2220 le->desc = desc; 2221 le->eflags = eflags; 2222 2223 linehandle_flags_to_desc_flags(lflags, &desc->flags); 2224 2225 ret = gpiod_direction_input(desc); 2226 if (ret) 2227 goto out_free_le; 2228 2229 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED); 2230 2231 irq = gpiod_to_irq(desc); 2232 if (irq <= 0) { 2233 ret = -ENODEV; 2234 goto out_free_le; 2235 } 2236 2237 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) 2238 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? 2239 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; 2240 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE) 2241 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? 2242 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; 2243 irqflags |= IRQF_ONESHOT; 2244 2245 INIT_KFIFO(le->events); 2246 init_waitqueue_head(&le->wait); 2247 2248 le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify; 2249 ret = blocking_notifier_chain_register(&gdev->device_notifier, 2250 &le->device_unregistered_nb); 2251 if (ret) 2252 goto out_free_le; 2253 2254 label = make_irq_label(le->label); 2255 if (IS_ERR(label)) { 2256 ret = PTR_ERR(label); 2257 goto out_free_le; 2258 } 2259 2260 /* Request a thread to read the events */ 2261 ret = request_threaded_irq(irq, 2262 lineevent_irq_handler, 2263 lineevent_irq_thread, 2264 irqflags, 2265 label, 2266 le); 2267 if (ret) { 2268 free_irq_label(label); 2269 goto out_free_le; 2270 } 2271 2272 le->irq = irq; 2273 2274 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 2275 if (fd < 0) { 2276 ret = fd; 2277 goto out_free_le; 2278 } 2279 2280 file = anon_inode_getfile("gpio-event", 2281 &lineevent_fileops, 2282 le, 2283 O_RDONLY | O_CLOEXEC); 2284 if (IS_ERR(file)) { 2285 ret = PTR_ERR(file); 2286 goto out_put_unused_fd; 2287 } 2288 2289 eventreq.fd = fd; 2290 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) { 2291 /* 2292 * fput() will trigger the release() callback, so do not go onto 2293 * the regular error cleanup path here. 2294 */ 2295 fput(file); 2296 put_unused_fd(fd); 2297 return -EFAULT; 2298 } 2299 2300 fd_install(fd, file); 2301 2302 return 0; 2303 2304 out_put_unused_fd: 2305 put_unused_fd(fd); 2306 out_free_le: 2307 lineevent_free(le); 2308 return ret; 2309 } 2310 2311 static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2, 2312 struct gpioline_info *info_v1) 2313 { 2314 u64 flagsv2 = info_v2->flags; 2315 2316 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name)); 2317 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer)); 2318 info_v1->line_offset = info_v2->offset; 2319 info_v1->flags = 0; 2320 2321 if (flagsv2 & GPIO_V2_LINE_FLAG_USED) 2322 info_v1->flags |= GPIOLINE_FLAG_KERNEL; 2323 2324 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT) 2325 info_v1->flags |= GPIOLINE_FLAG_IS_OUT; 2326 2327 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 2328 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW; 2329 2330 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN) 2331 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN; 2332 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE) 2333 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE; 2334 2335 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP) 2336 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP; 2337 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) 2338 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN; 2339 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED) 2340 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE; 2341 } 2342 2343 static void gpio_v2_line_info_changed_to_v1( 2344 struct gpio_v2_line_info_changed *lic_v2, 2345 struct gpioline_info_changed *lic_v1) 2346 { 2347 memset(lic_v1, 0, sizeof(*lic_v1)); 2348 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info); 2349 lic_v1->timestamp = lic_v2->timestamp_ns; 2350 lic_v1->event_type = lic_v2->event_type; 2351 } 2352 2353 #endif /* CONFIG_GPIO_CDEV_V1 */ 2354 2355 static void gpio_desc_to_lineinfo(struct gpio_desc *desc, 2356 struct gpio_v2_line_info *info) 2357 { 2358 unsigned long dflags; 2359 const char *label; 2360 2361 CLASS(gpio_chip_guard, guard)(desc); 2362 if (!guard.gc) 2363 return; 2364 2365 memset(info, 0, sizeof(*info)); 2366 info->offset = gpio_chip_hwgpio(desc); 2367 2368 if (desc->name) 2369 strscpy(info->name, desc->name, sizeof(info->name)); 2370 2371 dflags = READ_ONCE(desc->flags); 2372 2373 scoped_guard(srcu, &desc->gdev->desc_srcu) { 2374 label = gpiod_get_label(desc); 2375 if (label && test_bit(FLAG_REQUESTED, &dflags)) 2376 strscpy(info->consumer, label, 2377 sizeof(info->consumer)); 2378 } 2379 2380 /* 2381 * Userspace only need know that the kernel is using this GPIO so it 2382 * can't use it. 2383 * The calculation of the used flag is slightly racy, as it may read 2384 * desc, gc and pinctrl state without a lock covering all three at 2385 * once. Worst case if the line is in transition and the calculation 2386 * is inconsistent then it looks to the user like they performed the 2387 * read on the other side of the transition - but that can always 2388 * happen. 2389 * The definitive test that a line is available to userspace is to 2390 * request it. 2391 */ 2392 if (test_bit(FLAG_REQUESTED, &dflags) || 2393 test_bit(FLAG_IS_HOGGED, &dflags) || 2394 test_bit(FLAG_USED_AS_IRQ, &dflags) || 2395 test_bit(FLAG_EXPORT, &dflags) || 2396 test_bit(FLAG_SYSFS, &dflags) || 2397 !gpiochip_line_is_valid(guard.gc, info->offset) || 2398 !pinctrl_gpio_can_use_line(guard.gc, info->offset)) 2399 info->flags |= GPIO_V2_LINE_FLAG_USED; 2400 2401 if (test_bit(FLAG_IS_OUT, &dflags)) 2402 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT; 2403 else 2404 info->flags |= GPIO_V2_LINE_FLAG_INPUT; 2405 2406 if (test_bit(FLAG_ACTIVE_LOW, &dflags)) 2407 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW; 2408 2409 if (test_bit(FLAG_OPEN_DRAIN, &dflags)) 2410 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN; 2411 if (test_bit(FLAG_OPEN_SOURCE, &dflags)) 2412 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE; 2413 2414 if (test_bit(FLAG_BIAS_DISABLE, &dflags)) 2415 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED; 2416 if (test_bit(FLAG_PULL_DOWN, &dflags)) 2417 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN; 2418 if (test_bit(FLAG_PULL_UP, &dflags)) 2419 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP; 2420 2421 if (test_bit(FLAG_EDGE_RISING, &dflags)) 2422 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING; 2423 if (test_bit(FLAG_EDGE_FALLING, &dflags)) 2424 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING; 2425 2426 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &dflags)) 2427 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME; 2428 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags)) 2429 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE; 2430 } 2431 2432 struct gpio_chardev_data { 2433 struct gpio_device *gdev; 2434 wait_queue_head_t wait; 2435 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32); 2436 struct notifier_block lineinfo_changed_nb; 2437 struct notifier_block device_unregistered_nb; 2438 unsigned long *watched_lines; 2439 #ifdef CONFIG_GPIO_CDEV_V1 2440 atomic_t watch_abi_version; 2441 #endif 2442 }; 2443 2444 static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip) 2445 { 2446 struct gpio_device *gdev = cdev->gdev; 2447 struct gpiochip_info chipinfo; 2448 2449 memset(&chipinfo, 0, sizeof(chipinfo)); 2450 2451 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name)); 2452 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label)); 2453 chipinfo.lines = gdev->ngpio; 2454 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo))) 2455 return -EFAULT; 2456 return 0; 2457 } 2458 2459 #ifdef CONFIG_GPIO_CDEV_V1 2460 /* 2461 * returns 0 if the versions match, else the previously selected ABI version 2462 */ 2463 static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata, 2464 unsigned int version) 2465 { 2466 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version); 2467 2468 if (abiv == version) 2469 return 0; 2470 2471 return abiv; 2472 } 2473 2474 static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip, 2475 bool watch) 2476 { 2477 struct gpio_desc *desc; 2478 struct gpioline_info lineinfo; 2479 struct gpio_v2_line_info lineinfo_v2; 2480 2481 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) 2482 return -EFAULT; 2483 2484 /* this doubles as a range check on line_offset */ 2485 desc = gpio_device_get_desc(cdev->gdev, lineinfo.line_offset); 2486 if (IS_ERR(desc)) 2487 return PTR_ERR(desc); 2488 2489 if (watch) { 2490 if (lineinfo_ensure_abi_version(cdev, 1)) 2491 return -EPERM; 2492 2493 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines)) 2494 return -EBUSY; 2495 } 2496 2497 gpio_desc_to_lineinfo(desc, &lineinfo_v2); 2498 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo); 2499 2500 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) { 2501 if (watch) 2502 clear_bit(lineinfo.line_offset, cdev->watched_lines); 2503 return -EFAULT; 2504 } 2505 2506 return 0; 2507 } 2508 #endif 2509 2510 static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip, 2511 bool watch) 2512 { 2513 struct gpio_desc *desc; 2514 struct gpio_v2_line_info lineinfo; 2515 2516 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) 2517 return -EFAULT; 2518 2519 if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding))) 2520 return -EINVAL; 2521 2522 desc = gpio_device_get_desc(cdev->gdev, lineinfo.offset); 2523 if (IS_ERR(desc)) 2524 return PTR_ERR(desc); 2525 2526 if (watch) { 2527 #ifdef CONFIG_GPIO_CDEV_V1 2528 if (lineinfo_ensure_abi_version(cdev, 2)) 2529 return -EPERM; 2530 #endif 2531 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines)) 2532 return -EBUSY; 2533 } 2534 gpio_desc_to_lineinfo(desc, &lineinfo); 2535 supinfo_to_lineinfo(desc, &lineinfo); 2536 2537 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) { 2538 if (watch) 2539 clear_bit(lineinfo.offset, cdev->watched_lines); 2540 return -EFAULT; 2541 } 2542 2543 return 0; 2544 } 2545 2546 static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip) 2547 { 2548 __u32 offset; 2549 2550 if (copy_from_user(&offset, ip, sizeof(offset))) 2551 return -EFAULT; 2552 2553 if (offset >= cdev->gdev->ngpio) 2554 return -EINVAL; 2555 2556 if (!test_and_clear_bit(offset, cdev->watched_lines)) 2557 return -EBUSY; 2558 2559 return 0; 2560 } 2561 2562 /* 2563 * gpio_ioctl() - ioctl handler for the GPIO chardev 2564 */ 2565 static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2566 { 2567 struct gpio_chardev_data *cdev = file->private_data; 2568 struct gpio_device *gdev = cdev->gdev; 2569 void __user *ip = (void __user *)arg; 2570 2571 guard(srcu)(&gdev->srcu); 2572 2573 /* We fail any subsequent ioctl():s when the chip is gone */ 2574 if (!rcu_access_pointer(gdev->chip)) 2575 return -ENODEV; 2576 2577 /* Fill in the struct and pass to userspace */ 2578 switch (cmd) { 2579 case GPIO_GET_CHIPINFO_IOCTL: 2580 return chipinfo_get(cdev, ip); 2581 #ifdef CONFIG_GPIO_CDEV_V1 2582 case GPIO_GET_LINEHANDLE_IOCTL: 2583 return linehandle_create(gdev, ip); 2584 case GPIO_GET_LINEEVENT_IOCTL: 2585 return lineevent_create(gdev, ip); 2586 case GPIO_GET_LINEINFO_IOCTL: 2587 return lineinfo_get_v1(cdev, ip, false); 2588 case GPIO_GET_LINEINFO_WATCH_IOCTL: 2589 return lineinfo_get_v1(cdev, ip, true); 2590 #endif /* CONFIG_GPIO_CDEV_V1 */ 2591 case GPIO_V2_GET_LINEINFO_IOCTL: 2592 return lineinfo_get(cdev, ip, false); 2593 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL: 2594 return lineinfo_get(cdev, ip, true); 2595 case GPIO_V2_GET_LINE_IOCTL: 2596 return linereq_create(gdev, ip); 2597 case GPIO_GET_LINEINFO_UNWATCH_IOCTL: 2598 return lineinfo_unwatch(cdev, ip); 2599 default: 2600 return -EINVAL; 2601 } 2602 } 2603 2604 #ifdef CONFIG_COMPAT 2605 static long gpio_ioctl_compat(struct file *file, unsigned int cmd, 2606 unsigned long arg) 2607 { 2608 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 2609 } 2610 #endif 2611 2612 static int lineinfo_changed_notify(struct notifier_block *nb, 2613 unsigned long action, void *data) 2614 { 2615 struct gpio_chardev_data *cdev = 2616 container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb); 2617 struct gpio_v2_line_info_changed chg; 2618 struct gpio_desc *desc = data; 2619 int ret; 2620 2621 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines)) 2622 return NOTIFY_DONE; 2623 2624 memset(&chg, 0, sizeof(chg)); 2625 chg.event_type = action; 2626 chg.timestamp_ns = ktime_get_ns(); 2627 gpio_desc_to_lineinfo(desc, &chg.info); 2628 supinfo_to_lineinfo(desc, &chg.info); 2629 2630 ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock); 2631 if (ret) 2632 wake_up_poll(&cdev->wait, EPOLLIN); 2633 else 2634 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n"); 2635 2636 return NOTIFY_OK; 2637 } 2638 2639 static int gpio_device_unregistered_notify(struct notifier_block *nb, 2640 unsigned long action, void *data) 2641 { 2642 struct gpio_chardev_data *cdev = container_of(nb, 2643 struct gpio_chardev_data, 2644 device_unregistered_nb); 2645 2646 wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR); 2647 2648 return NOTIFY_OK; 2649 } 2650 2651 static __poll_t lineinfo_watch_poll(struct file *file, 2652 struct poll_table_struct *pollt) 2653 { 2654 struct gpio_chardev_data *cdev = file->private_data; 2655 __poll_t events = 0; 2656 2657 guard(srcu)(&cdev->gdev->srcu); 2658 2659 if (!rcu_access_pointer(cdev->gdev->chip)) 2660 return EPOLLHUP | EPOLLERR; 2661 2662 poll_wait(file, &cdev->wait, pollt); 2663 2664 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events, 2665 &cdev->wait.lock)) 2666 events = EPOLLIN | EPOLLRDNORM; 2667 2668 return events; 2669 } 2670 2671 static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, 2672 size_t count, loff_t *off) 2673 { 2674 struct gpio_chardev_data *cdev = file->private_data; 2675 struct gpio_v2_line_info_changed event; 2676 ssize_t bytes_read = 0; 2677 int ret; 2678 size_t event_size; 2679 2680 guard(srcu)(&cdev->gdev->srcu); 2681 2682 if (!rcu_access_pointer(cdev->gdev->chip)) 2683 return -ENODEV; 2684 2685 #ifndef CONFIG_GPIO_CDEV_V1 2686 event_size = sizeof(struct gpio_v2_line_info_changed); 2687 if (count < event_size) 2688 return -EINVAL; 2689 #endif 2690 2691 do { 2692 scoped_guard(spinlock, &cdev->wait.lock) { 2693 if (kfifo_is_empty(&cdev->events)) { 2694 if (bytes_read) 2695 return bytes_read; 2696 2697 if (file->f_flags & O_NONBLOCK) 2698 return -EAGAIN; 2699 2700 ret = wait_event_interruptible_locked(cdev->wait, 2701 !kfifo_is_empty(&cdev->events)); 2702 if (ret) 2703 return ret; 2704 } 2705 #ifdef CONFIG_GPIO_CDEV_V1 2706 /* must be after kfifo check so watch_abi_version is set */ 2707 if (atomic_read(&cdev->watch_abi_version) == 2) 2708 event_size = sizeof(struct gpio_v2_line_info_changed); 2709 else 2710 event_size = sizeof(struct gpioline_info_changed); 2711 if (count < event_size) 2712 return -EINVAL; 2713 #endif 2714 if (kfifo_out(&cdev->events, &event, 1) != 1) { 2715 /* 2716 * This should never happen - we hold the 2717 * lock from the moment we learned the fifo 2718 * is no longer empty until now. 2719 */ 2720 WARN(1, "failed to read from non-empty kfifo"); 2721 return -EIO; 2722 } 2723 } 2724 2725 #ifdef CONFIG_GPIO_CDEV_V1 2726 if (event_size == sizeof(struct gpio_v2_line_info_changed)) { 2727 if (copy_to_user(buf + bytes_read, &event, event_size)) 2728 return -EFAULT; 2729 } else { 2730 struct gpioline_info_changed event_v1; 2731 2732 gpio_v2_line_info_changed_to_v1(&event, &event_v1); 2733 if (copy_to_user(buf + bytes_read, &event_v1, 2734 event_size)) 2735 return -EFAULT; 2736 } 2737 #else 2738 if (copy_to_user(buf + bytes_read, &event, event_size)) 2739 return -EFAULT; 2740 #endif 2741 bytes_read += event_size; 2742 } while (count >= bytes_read + sizeof(event)); 2743 2744 return bytes_read; 2745 } 2746 2747 /** 2748 * gpio_chrdev_open() - open the chardev for ioctl operations 2749 * @inode: inode for this chardev 2750 * @file: file struct for storing private data 2751 * Returns 0 on success 2752 */ 2753 static int gpio_chrdev_open(struct inode *inode, struct file *file) 2754 { 2755 struct gpio_device *gdev = container_of(inode->i_cdev, 2756 struct gpio_device, chrdev); 2757 struct gpio_chardev_data *cdev; 2758 int ret = -ENOMEM; 2759 2760 guard(srcu)(&gdev->srcu); 2761 2762 /* Fail on open if the backing gpiochip is gone */ 2763 if (!rcu_access_pointer(gdev->chip)) 2764 return -ENODEV; 2765 2766 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 2767 if (!cdev) 2768 return -ENODEV; 2769 2770 cdev->watched_lines = bitmap_zalloc(gdev->ngpio, GFP_KERNEL); 2771 if (!cdev->watched_lines) 2772 goto out_free_cdev; 2773 2774 init_waitqueue_head(&cdev->wait); 2775 INIT_KFIFO(cdev->events); 2776 cdev->gdev = gpio_device_get(gdev); 2777 2778 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify; 2779 ret = blocking_notifier_chain_register(&gdev->line_state_notifier, 2780 &cdev->lineinfo_changed_nb); 2781 if (ret) 2782 goto out_free_bitmap; 2783 2784 cdev->device_unregistered_nb.notifier_call = 2785 gpio_device_unregistered_notify; 2786 ret = blocking_notifier_chain_register(&gdev->device_notifier, 2787 &cdev->device_unregistered_nb); 2788 if (ret) 2789 goto out_unregister_line_notifier; 2790 2791 file->private_data = cdev; 2792 2793 ret = nonseekable_open(inode, file); 2794 if (ret) 2795 goto out_unregister_device_notifier; 2796 2797 return ret; 2798 2799 out_unregister_device_notifier: 2800 blocking_notifier_chain_unregister(&gdev->device_notifier, 2801 &cdev->device_unregistered_nb); 2802 out_unregister_line_notifier: 2803 blocking_notifier_chain_unregister(&gdev->line_state_notifier, 2804 &cdev->lineinfo_changed_nb); 2805 out_free_bitmap: 2806 gpio_device_put(gdev); 2807 bitmap_free(cdev->watched_lines); 2808 out_free_cdev: 2809 kfree(cdev); 2810 return ret; 2811 } 2812 2813 /** 2814 * gpio_chrdev_release() - close chardev after ioctl operations 2815 * @inode: inode for this chardev 2816 * @file: file struct for storing private data 2817 * Returns 0 on success 2818 */ 2819 static int gpio_chrdev_release(struct inode *inode, struct file *file) 2820 { 2821 struct gpio_chardev_data *cdev = file->private_data; 2822 struct gpio_device *gdev = cdev->gdev; 2823 2824 blocking_notifier_chain_unregister(&gdev->device_notifier, 2825 &cdev->device_unregistered_nb); 2826 blocking_notifier_chain_unregister(&gdev->line_state_notifier, 2827 &cdev->lineinfo_changed_nb); 2828 bitmap_free(cdev->watched_lines); 2829 gpio_device_put(gdev); 2830 kfree(cdev); 2831 2832 return 0; 2833 } 2834 2835 static const struct file_operations gpio_fileops = { 2836 .release = gpio_chrdev_release, 2837 .open = gpio_chrdev_open, 2838 .poll = lineinfo_watch_poll, 2839 .read = lineinfo_watch_read, 2840 .owner = THIS_MODULE, 2841 .llseek = no_llseek, 2842 .unlocked_ioctl = gpio_ioctl, 2843 #ifdef CONFIG_COMPAT 2844 .compat_ioctl = gpio_ioctl_compat, 2845 #endif 2846 }; 2847 2848 int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt) 2849 { 2850 struct gpio_chip *gc; 2851 int ret; 2852 2853 cdev_init(&gdev->chrdev, &gpio_fileops); 2854 gdev->chrdev.owner = THIS_MODULE; 2855 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id); 2856 2857 ret = cdev_device_add(&gdev->chrdev, &gdev->dev); 2858 if (ret) 2859 return ret; 2860 2861 guard(srcu)(&gdev->srcu); 2862 gc = srcu_dereference(gdev->chip, &gdev->srcu); 2863 if (!gc) 2864 return -ENODEV; 2865 2866 chip_dbg(gc, "added GPIO chardev (%d:%d)\n", MAJOR(devt), gdev->id); 2867 2868 return 0; 2869 } 2870 2871 void gpiolib_cdev_unregister(struct gpio_device *gdev) 2872 { 2873 cdev_device_del(&gdev->chrdev, &gdev->dev); 2874 blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL); 2875 } 2876