1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/anon_inodes.h> 4 #include <linux/atomic.h> 5 #include <linux/bitmap.h> 6 #include <linux/build_bug.h> 7 #include <linux/cdev.h> 8 #include <linux/compat.h> 9 #include <linux/compiler.h> 10 #include <linux/device.h> 11 #include <linux/err.h> 12 #include <linux/file.h> 13 #include <linux/gpio.h> 14 #include <linux/gpio/driver.h> 15 #include <linux/hte.h> 16 #include <linux/interrupt.h> 17 #include <linux/irqreturn.h> 18 #include <linux/kernel.h> 19 #include <linux/kfifo.h> 20 #include <linux/module.h> 21 #include <linux/mutex.h> 22 #include <linux/pinctrl/consumer.h> 23 #include <linux/poll.h> 24 #include <linux/seq_file.h> 25 #include <linux/spinlock.h> 26 #include <linux/timekeeping.h> 27 #include <linux/uaccess.h> 28 #include <linux/workqueue.h> 29 30 #include <uapi/linux/gpio.h> 31 32 #include "gpiolib.h" 33 #include "gpiolib-cdev.h" 34 35 /* 36 * Array sizes must ensure 64-bit alignment and not create holes in the 37 * struct packing. 38 */ 39 static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2)); 40 static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8)); 41 42 /* 43 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility 44 */ 45 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8)); 46 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8)); 47 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8)); 48 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8)); 49 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8)); 50 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8)); 51 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8)); 52 static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8)); 53 54 /* Character device interface to GPIO. 55 * 56 * The GPIO character device, /dev/gpiochipN, provides userspace an 57 * interface to gpiolib GPIOs via ioctl()s. 58 */ 59 60 /* 61 * GPIO line handle management 62 */ 63 64 #ifdef CONFIG_GPIO_CDEV_V1 65 /** 66 * struct linehandle_state - contains the state of a userspace handle 67 * @gdev: the GPIO device the handle pertains to 68 * @label: consumer label used to tag descriptors 69 * @descs: the GPIO descriptors held by this handle 70 * @num_descs: the number of descriptors held in the descs array 71 */ 72 struct linehandle_state { 73 struct gpio_device *gdev; 74 const char *label; 75 struct gpio_desc *descs[GPIOHANDLES_MAX]; 76 u32 num_descs; 77 }; 78 79 #define GPIOHANDLE_REQUEST_VALID_FLAGS \ 80 (GPIOHANDLE_REQUEST_INPUT | \ 81 GPIOHANDLE_REQUEST_OUTPUT | \ 82 GPIOHANDLE_REQUEST_ACTIVE_LOW | \ 83 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \ 84 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \ 85 GPIOHANDLE_REQUEST_BIAS_DISABLE | \ 86 GPIOHANDLE_REQUEST_OPEN_DRAIN | \ 87 GPIOHANDLE_REQUEST_OPEN_SOURCE) 88 89 static int linehandle_validate_flags(u32 flags) 90 { 91 /* Return an error if an unknown flag is set */ 92 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) 93 return -EINVAL; 94 95 /* 96 * Do not allow both INPUT & OUTPUT flags to be set as they are 97 * contradictory. 98 */ 99 if ((flags & GPIOHANDLE_REQUEST_INPUT) && 100 (flags & GPIOHANDLE_REQUEST_OUTPUT)) 101 return -EINVAL; 102 103 /* 104 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If 105 * the hardware actually supports enabling both at the same time the 106 * electrical result would be disastrous. 107 */ 108 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) && 109 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) 110 return -EINVAL; 111 112 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */ 113 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) && 114 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || 115 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))) 116 return -EINVAL; 117 118 /* Bias flags only allowed for input or output mode. */ 119 if (!((flags & GPIOHANDLE_REQUEST_INPUT) || 120 (flags & GPIOHANDLE_REQUEST_OUTPUT)) && 121 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) || 122 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) || 123 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN))) 124 return -EINVAL; 125 126 /* Only one bias flag can be set. */ 127 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) && 128 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | 129 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) || 130 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) && 131 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP))) 132 return -EINVAL; 133 134 return 0; 135 } 136 137 static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp) 138 { 139 assign_bit(FLAG_ACTIVE_LOW, flagsp, 140 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW); 141 assign_bit(FLAG_OPEN_DRAIN, flagsp, 142 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN); 143 assign_bit(FLAG_OPEN_SOURCE, flagsp, 144 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE); 145 assign_bit(FLAG_PULL_UP, flagsp, 146 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP); 147 assign_bit(FLAG_PULL_DOWN, flagsp, 148 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN); 149 assign_bit(FLAG_BIAS_DISABLE, flagsp, 150 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE); 151 } 152 153 static long linehandle_set_config(struct linehandle_state *lh, 154 void __user *ip) 155 { 156 struct gpiohandle_config gcnf; 157 struct gpio_desc *desc; 158 int i, ret; 159 u32 lflags; 160 161 if (copy_from_user(&gcnf, ip, sizeof(gcnf))) 162 return -EFAULT; 163 164 lflags = gcnf.flags; 165 ret = linehandle_validate_flags(lflags); 166 if (ret) 167 return ret; 168 169 for (i = 0; i < lh->num_descs; i++) { 170 desc = lh->descs[i]; 171 linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags); 172 173 /* 174 * Lines have to be requested explicitly for input 175 * or output, else the line will be treated "as is". 176 */ 177 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 178 int val = !!gcnf.default_values[i]; 179 180 ret = gpiod_direction_output(desc, val); 181 if (ret) 182 return ret; 183 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) { 184 ret = gpiod_direction_input(desc); 185 if (ret) 186 return ret; 187 } 188 189 blocking_notifier_call_chain(&desc->gdev->notifier, 190 GPIO_V2_LINE_CHANGED_CONFIG, 191 desc); 192 } 193 return 0; 194 } 195 196 static long linehandle_ioctl(struct file *file, unsigned int cmd, 197 unsigned long arg) 198 { 199 struct linehandle_state *lh = file->private_data; 200 void __user *ip = (void __user *)arg; 201 struct gpiohandle_data ghd; 202 DECLARE_BITMAP(vals, GPIOHANDLES_MAX); 203 unsigned int i; 204 int ret; 205 206 switch (cmd) { 207 case GPIOHANDLE_GET_LINE_VALUES_IOCTL: 208 /* NOTE: It's okay to read values of output lines */ 209 ret = gpiod_get_array_value_complex(false, true, 210 lh->num_descs, lh->descs, 211 NULL, vals); 212 if (ret) 213 return ret; 214 215 memset(&ghd, 0, sizeof(ghd)); 216 for (i = 0; i < lh->num_descs; i++) 217 ghd.values[i] = test_bit(i, vals); 218 219 if (copy_to_user(ip, &ghd, sizeof(ghd))) 220 return -EFAULT; 221 222 return 0; 223 case GPIOHANDLE_SET_LINE_VALUES_IOCTL: 224 /* 225 * All line descriptors were created at once with the same 226 * flags so just check if the first one is really output. 227 */ 228 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags)) 229 return -EPERM; 230 231 if (copy_from_user(&ghd, ip, sizeof(ghd))) 232 return -EFAULT; 233 234 /* Clamp all values to [0,1] */ 235 for (i = 0; i < lh->num_descs; i++) 236 __assign_bit(i, vals, ghd.values[i]); 237 238 /* Reuse the array setting function */ 239 return gpiod_set_array_value_complex(false, 240 true, 241 lh->num_descs, 242 lh->descs, 243 NULL, 244 vals); 245 case GPIOHANDLE_SET_CONFIG_IOCTL: 246 return linehandle_set_config(lh, ip); 247 default: 248 return -EINVAL; 249 } 250 } 251 252 #ifdef CONFIG_COMPAT 253 static long linehandle_ioctl_compat(struct file *file, unsigned int cmd, 254 unsigned long arg) 255 { 256 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 257 } 258 #endif 259 260 static void linehandle_free(struct linehandle_state *lh) 261 { 262 int i; 263 264 for (i = 0; i < lh->num_descs; i++) 265 if (lh->descs[i]) 266 gpiod_free(lh->descs[i]); 267 kfree(lh->label); 268 put_device(&lh->gdev->dev); 269 kfree(lh); 270 } 271 272 static int linehandle_release(struct inode *inode, struct file *file) 273 { 274 linehandle_free(file->private_data); 275 return 0; 276 } 277 278 static const struct file_operations linehandle_fileops = { 279 .release = linehandle_release, 280 .owner = THIS_MODULE, 281 .llseek = noop_llseek, 282 .unlocked_ioctl = linehandle_ioctl, 283 #ifdef CONFIG_COMPAT 284 .compat_ioctl = linehandle_ioctl_compat, 285 #endif 286 }; 287 288 static int linehandle_create(struct gpio_device *gdev, void __user *ip) 289 { 290 struct gpiohandle_request handlereq; 291 struct linehandle_state *lh; 292 struct file *file; 293 int fd, i, ret; 294 u32 lflags; 295 296 if (copy_from_user(&handlereq, ip, sizeof(handlereq))) 297 return -EFAULT; 298 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX)) 299 return -EINVAL; 300 301 lflags = handlereq.flags; 302 303 ret = linehandle_validate_flags(lflags); 304 if (ret) 305 return ret; 306 307 lh = kzalloc(sizeof(*lh), GFP_KERNEL); 308 if (!lh) 309 return -ENOMEM; 310 lh->gdev = gdev; 311 get_device(&gdev->dev); 312 313 if (handlereq.consumer_label[0] != '\0') { 314 /* label is only initialized if consumer_label is set */ 315 lh->label = kstrndup(handlereq.consumer_label, 316 sizeof(handlereq.consumer_label) - 1, 317 GFP_KERNEL); 318 if (!lh->label) { 319 ret = -ENOMEM; 320 goto out_free_lh; 321 } 322 } 323 324 lh->num_descs = handlereq.lines; 325 326 /* Request each GPIO */ 327 for (i = 0; i < handlereq.lines; i++) { 328 u32 offset = handlereq.lineoffsets[i]; 329 struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset); 330 331 if (IS_ERR(desc)) { 332 ret = PTR_ERR(desc); 333 goto out_free_lh; 334 } 335 336 ret = gpiod_request_user(desc, lh->label); 337 if (ret) 338 goto out_free_lh; 339 lh->descs[i] = desc; 340 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags); 341 342 ret = gpiod_set_transitory(desc, false); 343 if (ret < 0) 344 goto out_free_lh; 345 346 /* 347 * Lines have to be requested explicitly for input 348 * or output, else the line will be treated "as is". 349 */ 350 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 351 int val = !!handlereq.default_values[i]; 352 353 ret = gpiod_direction_output(desc, val); 354 if (ret) 355 goto out_free_lh; 356 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) { 357 ret = gpiod_direction_input(desc); 358 if (ret) 359 goto out_free_lh; 360 } 361 362 blocking_notifier_call_chain(&desc->gdev->notifier, 363 GPIO_V2_LINE_CHANGED_REQUESTED, desc); 364 365 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", 366 offset); 367 } 368 369 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 370 if (fd < 0) { 371 ret = fd; 372 goto out_free_lh; 373 } 374 375 file = anon_inode_getfile("gpio-linehandle", 376 &linehandle_fileops, 377 lh, 378 O_RDONLY | O_CLOEXEC); 379 if (IS_ERR(file)) { 380 ret = PTR_ERR(file); 381 goto out_put_unused_fd; 382 } 383 384 handlereq.fd = fd; 385 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) { 386 /* 387 * fput() will trigger the release() callback, so do not go onto 388 * the regular error cleanup path here. 389 */ 390 fput(file); 391 put_unused_fd(fd); 392 return -EFAULT; 393 } 394 395 fd_install(fd, file); 396 397 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 398 lh->num_descs); 399 400 return 0; 401 402 out_put_unused_fd: 403 put_unused_fd(fd); 404 out_free_lh: 405 linehandle_free(lh); 406 return ret; 407 } 408 #endif /* CONFIG_GPIO_CDEV_V1 */ 409 410 /** 411 * struct line - contains the state of a requested line 412 * @desc: the GPIO descriptor for this line. 413 * @req: the corresponding line request 414 * @irq: the interrupt triggered in response to events on this GPIO 415 * @eflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or 416 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied 417 * @timestamp_ns: cache for the timestamp storing it between hardirq and 418 * IRQ thread, used to bring the timestamp close to the actual event 419 * @req_seqno: the seqno for the current edge event in the sequence of 420 * events for the corresponding line request. This is drawn from the @req. 421 * @line_seqno: the seqno for the current edge event in the sequence of 422 * events for this line. 423 * @work: the worker that implements software debouncing 424 * @sw_debounced: flag indicating if the software debouncer is active 425 * @level: the current debounced physical level of the line 426 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor 427 * @raw_level: the line level at the time of event 428 * @total_discard_seq: the running counter of the discarded events 429 * @last_seqno: the last sequence number before debounce period expires 430 */ 431 struct line { 432 struct gpio_desc *desc; 433 /* 434 * -- edge detector specific fields -- 435 */ 436 struct linereq *req; 437 unsigned int irq; 438 /* 439 * The flags for the active edge detector configuration. 440 * 441 * edflags is set by linereq_create(), linereq_free(), and 442 * linereq_set_config_unlocked(), which are themselves mutually 443 * exclusive, and is accessed by edge_irq_thread(), 444 * process_hw_ts_thread() and debounce_work_func(), 445 * which can all live with a slightly stale value. 446 */ 447 u64 edflags; 448 /* 449 * timestamp_ns and req_seqno are accessed only by 450 * edge_irq_handler() and edge_irq_thread(), which are themselves 451 * mutually exclusive, so no additional protection is necessary. 452 */ 453 u64 timestamp_ns; 454 u32 req_seqno; 455 /* 456 * line_seqno is accessed by either edge_irq_thread() or 457 * debounce_work_func(), which are themselves mutually exclusive, 458 * so no additional protection is necessary. 459 */ 460 u32 line_seqno; 461 /* 462 * -- debouncer specific fields -- 463 */ 464 struct delayed_work work; 465 /* 466 * sw_debounce is accessed by linereq_set_config(), which is the 467 * only setter, and linereq_get_values(), which can live with a 468 * slightly stale value. 469 */ 470 unsigned int sw_debounced; 471 /* 472 * level is accessed by debounce_work_func(), which is the only 473 * setter, and linereq_get_values() which can live with a slightly 474 * stale value. 475 */ 476 unsigned int level; 477 #ifdef CONFIG_HTE 478 struct hte_ts_desc hdesc; 479 /* 480 * HTE provider sets line level at the time of event. The valid 481 * value is 0 or 1 and negative value for an error. 482 */ 483 int raw_level; 484 /* 485 * when sw_debounce is set on HTE enabled line, this is running 486 * counter of the discarded events. 487 */ 488 u32 total_discard_seq; 489 /* 490 * when sw_debounce is set on HTE enabled line, this variable records 491 * last sequence number before debounce period expires. 492 */ 493 u32 last_seqno; 494 #endif /* CONFIG_HTE */ 495 }; 496 497 /** 498 * struct linereq - contains the state of a userspace line request 499 * @gdev: the GPIO device the line request pertains to 500 * @label: consumer label used to tag GPIO descriptors 501 * @num_lines: the number of lines in the lines array 502 * @wait: wait queue that handles blocking reads of events 503 * @event_buffer_size: the number of elements allocated in @events 504 * @events: KFIFO for the GPIO events 505 * @seqno: the sequence number for edge events generated on all lines in 506 * this line request. Note that this is not used when @num_lines is 1, as 507 * the line_seqno is then the same and is cheaper to calculate. 508 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency 509 * of configuration, particularly multi-step accesses to desc flags. 510 * @lines: the lines held by this line request, with @num_lines elements. 511 */ 512 struct linereq { 513 struct gpio_device *gdev; 514 const char *label; 515 u32 num_lines; 516 wait_queue_head_t wait; 517 u32 event_buffer_size; 518 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event); 519 atomic_t seqno; 520 struct mutex config_mutex; 521 struct line lines[]; 522 }; 523 524 #define GPIO_V2_LINE_BIAS_FLAGS \ 525 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \ 526 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \ 527 GPIO_V2_LINE_FLAG_BIAS_DISABLED) 528 529 #define GPIO_V2_LINE_DIRECTION_FLAGS \ 530 (GPIO_V2_LINE_FLAG_INPUT | \ 531 GPIO_V2_LINE_FLAG_OUTPUT) 532 533 #define GPIO_V2_LINE_DRIVE_FLAGS \ 534 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \ 535 GPIO_V2_LINE_FLAG_OPEN_SOURCE) 536 537 #define GPIO_V2_LINE_EDGE_FLAGS \ 538 (GPIO_V2_LINE_FLAG_EDGE_RISING | \ 539 GPIO_V2_LINE_FLAG_EDGE_FALLING) 540 541 #define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS 542 543 #define GPIO_V2_LINE_VALID_FLAGS \ 544 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \ 545 GPIO_V2_LINE_DIRECTION_FLAGS | \ 546 GPIO_V2_LINE_DRIVE_FLAGS | \ 547 GPIO_V2_LINE_EDGE_FLAGS | \ 548 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \ 549 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \ 550 GPIO_V2_LINE_BIAS_FLAGS) 551 552 /* subset of flags relevant for edge detector configuration */ 553 #define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \ 554 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \ 555 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \ 556 GPIO_V2_LINE_EDGE_FLAGS) 557 558 static void linereq_put_event(struct linereq *lr, 559 struct gpio_v2_line_event *le) 560 { 561 bool overflow = false; 562 563 spin_lock(&lr->wait.lock); 564 if (kfifo_is_full(&lr->events)) { 565 overflow = true; 566 kfifo_skip(&lr->events); 567 } 568 kfifo_in(&lr->events, le, 1); 569 spin_unlock(&lr->wait.lock); 570 if (!overflow) 571 wake_up_poll(&lr->wait, EPOLLIN); 572 else 573 pr_debug_ratelimited("event FIFO is full - event dropped\n"); 574 } 575 576 static u64 line_event_timestamp(struct line *line) 577 { 578 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags)) 579 return ktime_get_real_ns(); 580 else if (IS_ENABLED(CONFIG_HTE) && 581 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) 582 return line->timestamp_ns; 583 584 return ktime_get_ns(); 585 } 586 587 static u32 line_event_id(int level) 588 { 589 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE : 590 GPIO_V2_LINE_EVENT_FALLING_EDGE; 591 } 592 593 #ifdef CONFIG_HTE 594 595 static enum hte_return process_hw_ts_thread(void *p) 596 { 597 struct line *line; 598 struct linereq *lr; 599 struct gpio_v2_line_event le; 600 u64 edflags; 601 int level; 602 603 if (!p) 604 return HTE_CB_HANDLED; 605 606 line = p; 607 lr = line->req; 608 609 memset(&le, 0, sizeof(le)); 610 611 le.timestamp_ns = line->timestamp_ns; 612 edflags = READ_ONCE(line->edflags); 613 614 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) { 615 case GPIO_V2_LINE_FLAG_EDGE_BOTH: 616 level = (line->raw_level >= 0) ? 617 line->raw_level : 618 gpiod_get_raw_value_cansleep(line->desc); 619 620 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 621 level = !level; 622 623 le.id = line_event_id(level); 624 break; 625 case GPIO_V2_LINE_FLAG_EDGE_RISING: 626 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE; 627 break; 628 case GPIO_V2_LINE_FLAG_EDGE_FALLING: 629 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE; 630 break; 631 default: 632 return HTE_CB_HANDLED; 633 } 634 le.line_seqno = line->line_seqno; 635 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno; 636 le.offset = gpio_chip_hwgpio(line->desc); 637 638 linereq_put_event(lr, &le); 639 640 return HTE_CB_HANDLED; 641 } 642 643 static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p) 644 { 645 struct line *line; 646 struct linereq *lr; 647 int diff_seqno = 0; 648 649 if (!ts || !p) 650 return HTE_CB_HANDLED; 651 652 line = p; 653 line->timestamp_ns = ts->tsc; 654 line->raw_level = ts->raw_level; 655 lr = line->req; 656 657 if (READ_ONCE(line->sw_debounced)) { 658 line->total_discard_seq++; 659 line->last_seqno = ts->seq; 660 mod_delayed_work(system_wq, &line->work, 661 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us))); 662 } else { 663 if (unlikely(ts->seq < line->line_seqno)) 664 return HTE_CB_HANDLED; 665 666 diff_seqno = ts->seq - line->line_seqno; 667 line->line_seqno = ts->seq; 668 if (lr->num_lines != 1) 669 line->req_seqno = atomic_add_return(diff_seqno, 670 &lr->seqno); 671 672 return HTE_RUN_SECOND_CB; 673 } 674 675 return HTE_CB_HANDLED; 676 } 677 678 static int hte_edge_setup(struct line *line, u64 eflags) 679 { 680 int ret; 681 unsigned long flags = 0; 682 struct hte_ts_desc *hdesc = &line->hdesc; 683 684 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING) 685 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 686 HTE_FALLING_EDGE_TS : 687 HTE_RISING_EDGE_TS; 688 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING) 689 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 690 HTE_RISING_EDGE_TS : 691 HTE_FALLING_EDGE_TS; 692 693 line->total_discard_seq = 0; 694 695 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL, 696 line->desc); 697 698 ret = hte_ts_get(NULL, hdesc, 0); 699 if (ret) 700 return ret; 701 702 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread, 703 line); 704 } 705 706 #else 707 708 static int hte_edge_setup(struct line *line, u64 eflags) 709 { 710 return 0; 711 } 712 #endif /* CONFIG_HTE */ 713 714 static irqreturn_t edge_irq_thread(int irq, void *p) 715 { 716 struct line *line = p; 717 struct linereq *lr = line->req; 718 struct gpio_v2_line_event le; 719 720 /* Do not leak kernel stack to userspace */ 721 memset(&le, 0, sizeof(le)); 722 723 if (line->timestamp_ns) { 724 le.timestamp_ns = line->timestamp_ns; 725 } else { 726 /* 727 * We may be running from a nested threaded interrupt in 728 * which case we didn't get the timestamp from 729 * edge_irq_handler(). 730 */ 731 le.timestamp_ns = line_event_timestamp(line); 732 if (lr->num_lines != 1) 733 line->req_seqno = atomic_inc_return(&lr->seqno); 734 } 735 line->timestamp_ns = 0; 736 737 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) { 738 case GPIO_V2_LINE_FLAG_EDGE_BOTH: 739 le.id = line_event_id(gpiod_get_value_cansleep(line->desc)); 740 break; 741 case GPIO_V2_LINE_FLAG_EDGE_RISING: 742 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE; 743 break; 744 case GPIO_V2_LINE_FLAG_EDGE_FALLING: 745 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE; 746 break; 747 default: 748 return IRQ_NONE; 749 } 750 line->line_seqno++; 751 le.line_seqno = line->line_seqno; 752 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno; 753 le.offset = gpio_chip_hwgpio(line->desc); 754 755 linereq_put_event(lr, &le); 756 757 return IRQ_HANDLED; 758 } 759 760 static irqreturn_t edge_irq_handler(int irq, void *p) 761 { 762 struct line *line = p; 763 struct linereq *lr = line->req; 764 765 /* 766 * Just store the timestamp in hardirq context so we get it as 767 * close in time as possible to the actual event. 768 */ 769 line->timestamp_ns = line_event_timestamp(line); 770 771 if (lr->num_lines != 1) 772 line->req_seqno = atomic_inc_return(&lr->seqno); 773 774 return IRQ_WAKE_THREAD; 775 } 776 777 /* 778 * returns the current debounced logical value. 779 */ 780 static bool debounced_value(struct line *line) 781 { 782 bool value; 783 784 /* 785 * minor race - debouncer may be stopped here, so edge_detector_stop() 786 * must leave the value unchanged so the following will read the level 787 * from when the debouncer was last running. 788 */ 789 value = READ_ONCE(line->level); 790 791 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags)) 792 value = !value; 793 794 return value; 795 } 796 797 static irqreturn_t debounce_irq_handler(int irq, void *p) 798 { 799 struct line *line = p; 800 801 mod_delayed_work(system_wq, &line->work, 802 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us))); 803 804 return IRQ_HANDLED; 805 } 806 807 static void debounce_work_func(struct work_struct *work) 808 { 809 struct gpio_v2_line_event le; 810 struct line *line = container_of(work, struct line, work.work); 811 struct linereq *lr; 812 u64 eflags, edflags = READ_ONCE(line->edflags); 813 int level = -1; 814 #ifdef CONFIG_HTE 815 int diff_seqno; 816 817 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) 818 level = line->raw_level; 819 #endif 820 if (level < 0) 821 level = gpiod_get_raw_value_cansleep(line->desc); 822 if (level < 0) { 823 pr_debug_ratelimited("debouncer failed to read line value\n"); 824 return; 825 } 826 827 if (READ_ONCE(line->level) == level) 828 return; 829 830 WRITE_ONCE(line->level, level); 831 832 /* -- edge detection -- */ 833 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; 834 if (!eflags) 835 return; 836 837 /* switch from physical level to logical - if they differ */ 838 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 839 level = !level; 840 841 /* ignore edges that are not being monitored */ 842 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) || 843 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level)) 844 return; 845 846 /* Do not leak kernel stack to userspace */ 847 memset(&le, 0, sizeof(le)); 848 849 lr = line->req; 850 le.timestamp_ns = line_event_timestamp(line); 851 le.offset = gpio_chip_hwgpio(line->desc); 852 #ifdef CONFIG_HTE 853 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) { 854 /* discard events except the last one */ 855 line->total_discard_seq -= 1; 856 diff_seqno = line->last_seqno - line->total_discard_seq - 857 line->line_seqno; 858 line->line_seqno = line->last_seqno - line->total_discard_seq; 859 le.line_seqno = line->line_seqno; 860 le.seqno = (lr->num_lines == 1) ? 861 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno); 862 } else 863 #endif /* CONFIG_HTE */ 864 { 865 line->line_seqno++; 866 le.line_seqno = line->line_seqno; 867 le.seqno = (lr->num_lines == 1) ? 868 le.line_seqno : atomic_inc_return(&lr->seqno); 869 } 870 871 le.id = line_event_id(level); 872 873 linereq_put_event(lr, &le); 874 } 875 876 static int debounce_setup(struct line *line, unsigned int debounce_period_us) 877 { 878 unsigned long irqflags; 879 int ret, level, irq; 880 881 /* try hardware */ 882 ret = gpiod_set_debounce(line->desc, debounce_period_us); 883 if (!ret) { 884 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us); 885 return ret; 886 } 887 if (ret != -ENOTSUPP) 888 return ret; 889 890 if (debounce_period_us) { 891 /* setup software debounce */ 892 level = gpiod_get_raw_value_cansleep(line->desc); 893 if (level < 0) 894 return level; 895 896 if (!(IS_ENABLED(CONFIG_HTE) && 897 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) { 898 irq = gpiod_to_irq(line->desc); 899 if (irq < 0) 900 return -ENXIO; 901 902 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING; 903 ret = request_irq(irq, debounce_irq_handler, irqflags, 904 line->req->label, line); 905 if (ret) 906 return ret; 907 line->irq = irq; 908 } else { 909 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH); 910 if (ret) 911 return ret; 912 } 913 914 WRITE_ONCE(line->level, level); 915 WRITE_ONCE(line->sw_debounced, 1); 916 } 917 return 0; 918 } 919 920 static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc, 921 unsigned int line_idx) 922 { 923 unsigned int i; 924 u64 mask = BIT_ULL(line_idx); 925 926 for (i = 0; i < lc->num_attrs; i++) { 927 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) && 928 (lc->attrs[i].mask & mask)) 929 return true; 930 } 931 return false; 932 } 933 934 static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc, 935 unsigned int line_idx) 936 { 937 unsigned int i; 938 u64 mask = BIT_ULL(line_idx); 939 940 for (i = 0; i < lc->num_attrs; i++) { 941 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) && 942 (lc->attrs[i].mask & mask)) 943 return lc->attrs[i].attr.debounce_period_us; 944 } 945 return 0; 946 } 947 948 static void edge_detector_stop(struct line *line) 949 { 950 if (line->irq) { 951 free_irq(line->irq, line); 952 line->irq = 0; 953 } 954 955 #ifdef CONFIG_HTE 956 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) 957 hte_ts_put(&line->hdesc); 958 #endif 959 960 cancel_delayed_work_sync(&line->work); 961 WRITE_ONCE(line->sw_debounced, 0); 962 WRITE_ONCE(line->edflags, 0); 963 if (line->desc) 964 WRITE_ONCE(line->desc->debounce_period_us, 0); 965 /* do not change line->level - see comment in debounced_value() */ 966 } 967 968 static int edge_detector_setup(struct line *line, 969 struct gpio_v2_line_config *lc, 970 unsigned int line_idx, u64 edflags) 971 { 972 u32 debounce_period_us; 973 unsigned long irqflags = 0; 974 u64 eflags; 975 int irq, ret; 976 977 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; 978 if (eflags && !kfifo_initialized(&line->req->events)) { 979 ret = kfifo_alloc(&line->req->events, 980 line->req->event_buffer_size, GFP_KERNEL); 981 if (ret) 982 return ret; 983 } 984 if (gpio_v2_line_config_debounced(lc, line_idx)) { 985 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx); 986 ret = debounce_setup(line, debounce_period_us); 987 if (ret) 988 return ret; 989 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us); 990 } 991 992 /* detection disabled or sw debouncer will provide edge detection */ 993 if (!eflags || READ_ONCE(line->sw_debounced)) 994 return 0; 995 996 if (IS_ENABLED(CONFIG_HTE) && 997 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 998 return hte_edge_setup(line, edflags); 999 1000 irq = gpiod_to_irq(line->desc); 1001 if (irq < 0) 1002 return -ENXIO; 1003 1004 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING) 1005 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 1006 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; 1007 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING) 1008 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ? 1009 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; 1010 irqflags |= IRQF_ONESHOT; 1011 1012 /* Request a thread to read the events */ 1013 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread, 1014 irqflags, line->req->label, line); 1015 if (ret) 1016 return ret; 1017 1018 line->irq = irq; 1019 return 0; 1020 } 1021 1022 static int edge_detector_update(struct line *line, 1023 struct gpio_v2_line_config *lc, 1024 unsigned int line_idx, u64 edflags) 1025 { 1026 u64 active_edflags = READ_ONCE(line->edflags); 1027 unsigned int debounce_period_us = 1028 gpio_v2_line_config_debounce_period(lc, line_idx); 1029 1030 if ((active_edflags == edflags) && 1031 (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us)) 1032 return 0; 1033 1034 /* sw debounced and still will be...*/ 1035 if (debounce_period_us && READ_ONCE(line->sw_debounced)) { 1036 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us); 1037 return 0; 1038 } 1039 1040 /* reconfiguring edge detection or sw debounce being disabled */ 1041 if ((line->irq && !READ_ONCE(line->sw_debounced)) || 1042 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) || 1043 (!debounce_period_us && READ_ONCE(line->sw_debounced))) 1044 edge_detector_stop(line); 1045 1046 return edge_detector_setup(line, lc, line_idx, edflags); 1047 } 1048 1049 static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc, 1050 unsigned int line_idx) 1051 { 1052 unsigned int i; 1053 u64 mask = BIT_ULL(line_idx); 1054 1055 for (i = 0; i < lc->num_attrs; i++) { 1056 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) && 1057 (lc->attrs[i].mask & mask)) 1058 return lc->attrs[i].attr.flags; 1059 } 1060 return lc->flags; 1061 } 1062 1063 static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc, 1064 unsigned int line_idx) 1065 { 1066 unsigned int i; 1067 u64 mask = BIT_ULL(line_idx); 1068 1069 for (i = 0; i < lc->num_attrs; i++) { 1070 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) && 1071 (lc->attrs[i].mask & mask)) 1072 return !!(lc->attrs[i].attr.values & mask); 1073 } 1074 return 0; 1075 } 1076 1077 static int gpio_v2_line_flags_validate(u64 flags) 1078 { 1079 /* Return an error if an unknown flag is set */ 1080 if (flags & ~GPIO_V2_LINE_VALID_FLAGS) 1081 return -EINVAL; 1082 1083 if (!IS_ENABLED(CONFIG_HTE) && 1084 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1085 return -EOPNOTSUPP; 1086 1087 /* 1088 * Do not allow both INPUT and OUTPUT flags to be set as they are 1089 * contradictory. 1090 */ 1091 if ((flags & GPIO_V2_LINE_FLAG_INPUT) && 1092 (flags & GPIO_V2_LINE_FLAG_OUTPUT)) 1093 return -EINVAL; 1094 1095 /* Only allow one event clock source */ 1096 if (IS_ENABLED(CONFIG_HTE) && 1097 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) && 1098 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)) 1099 return -EINVAL; 1100 1101 /* Edge detection requires explicit input. */ 1102 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) && 1103 !(flags & GPIO_V2_LINE_FLAG_INPUT)) 1104 return -EINVAL; 1105 1106 /* 1107 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single 1108 * request. If the hardware actually supports enabling both at the 1109 * same time the electrical result would be disastrous. 1110 */ 1111 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) && 1112 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE)) 1113 return -EINVAL; 1114 1115 /* Drive requires explicit output direction. */ 1116 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) && 1117 !(flags & GPIO_V2_LINE_FLAG_OUTPUT)) 1118 return -EINVAL; 1119 1120 /* Bias requires explicit direction. */ 1121 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) && 1122 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS)) 1123 return -EINVAL; 1124 1125 /* Only one bias flag can be set. */ 1126 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) && 1127 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | 1128 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) || 1129 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) && 1130 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) 1131 return -EINVAL; 1132 1133 return 0; 1134 } 1135 1136 static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc, 1137 unsigned int num_lines) 1138 { 1139 unsigned int i; 1140 u64 flags; 1141 int ret; 1142 1143 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX) 1144 return -EINVAL; 1145 1146 if (memchr_inv(lc->padding, 0, sizeof(lc->padding))) 1147 return -EINVAL; 1148 1149 for (i = 0; i < num_lines; i++) { 1150 flags = gpio_v2_line_config_flags(lc, i); 1151 ret = gpio_v2_line_flags_validate(flags); 1152 if (ret) 1153 return ret; 1154 1155 /* debounce requires explicit input */ 1156 if (gpio_v2_line_config_debounced(lc, i) && 1157 !(flags & GPIO_V2_LINE_FLAG_INPUT)) 1158 return -EINVAL; 1159 } 1160 return 0; 1161 } 1162 1163 static void gpio_v2_line_config_flags_to_desc_flags(u64 flags, 1164 unsigned long *flagsp) 1165 { 1166 assign_bit(FLAG_ACTIVE_LOW, flagsp, 1167 flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW); 1168 1169 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) 1170 set_bit(FLAG_IS_OUT, flagsp); 1171 else if (flags & GPIO_V2_LINE_FLAG_INPUT) 1172 clear_bit(FLAG_IS_OUT, flagsp); 1173 1174 assign_bit(FLAG_EDGE_RISING, flagsp, 1175 flags & GPIO_V2_LINE_FLAG_EDGE_RISING); 1176 assign_bit(FLAG_EDGE_FALLING, flagsp, 1177 flags & GPIO_V2_LINE_FLAG_EDGE_FALLING); 1178 1179 assign_bit(FLAG_OPEN_DRAIN, flagsp, 1180 flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN); 1181 assign_bit(FLAG_OPEN_SOURCE, flagsp, 1182 flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE); 1183 1184 assign_bit(FLAG_PULL_UP, flagsp, 1185 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP); 1186 assign_bit(FLAG_PULL_DOWN, flagsp, 1187 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN); 1188 assign_bit(FLAG_BIAS_DISABLE, flagsp, 1189 flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED); 1190 1191 assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp, 1192 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME); 1193 assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp, 1194 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE); 1195 } 1196 1197 static long linereq_get_values(struct linereq *lr, void __user *ip) 1198 { 1199 struct gpio_v2_line_values lv; 1200 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX); 1201 struct gpio_desc **descs; 1202 unsigned int i, didx, num_get; 1203 bool val; 1204 int ret; 1205 1206 /* NOTE: It's ok to read values of output lines. */ 1207 if (copy_from_user(&lv, ip, sizeof(lv))) 1208 return -EFAULT; 1209 1210 for (num_get = 0, i = 0; i < lr->num_lines; i++) { 1211 if (lv.mask & BIT_ULL(i)) { 1212 num_get++; 1213 descs = &lr->lines[i].desc; 1214 } 1215 } 1216 1217 if (num_get == 0) 1218 return -EINVAL; 1219 1220 if (num_get != 1) { 1221 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL); 1222 if (!descs) 1223 return -ENOMEM; 1224 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1225 if (lv.mask & BIT_ULL(i)) { 1226 descs[didx] = lr->lines[i].desc; 1227 didx++; 1228 } 1229 } 1230 } 1231 ret = gpiod_get_array_value_complex(false, true, num_get, 1232 descs, NULL, vals); 1233 1234 if (num_get != 1) 1235 kfree(descs); 1236 if (ret) 1237 return ret; 1238 1239 lv.bits = 0; 1240 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1241 if (lv.mask & BIT_ULL(i)) { 1242 if (lr->lines[i].sw_debounced) 1243 val = debounced_value(&lr->lines[i]); 1244 else 1245 val = test_bit(didx, vals); 1246 if (val) 1247 lv.bits |= BIT_ULL(i); 1248 didx++; 1249 } 1250 } 1251 1252 if (copy_to_user(ip, &lv, sizeof(lv))) 1253 return -EFAULT; 1254 1255 return 0; 1256 } 1257 1258 static long linereq_set_values_unlocked(struct linereq *lr, 1259 struct gpio_v2_line_values *lv) 1260 { 1261 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX); 1262 struct gpio_desc **descs; 1263 unsigned int i, didx, num_set; 1264 int ret; 1265 1266 bitmap_zero(vals, GPIO_V2_LINES_MAX); 1267 for (num_set = 0, i = 0; i < lr->num_lines; i++) { 1268 if (lv->mask & BIT_ULL(i)) { 1269 if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags)) 1270 return -EPERM; 1271 if (lv->bits & BIT_ULL(i)) 1272 __set_bit(num_set, vals); 1273 num_set++; 1274 descs = &lr->lines[i].desc; 1275 } 1276 } 1277 if (num_set == 0) 1278 return -EINVAL; 1279 1280 if (num_set != 1) { 1281 /* build compacted desc array and values */ 1282 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL); 1283 if (!descs) 1284 return -ENOMEM; 1285 for (didx = 0, i = 0; i < lr->num_lines; i++) { 1286 if (lv->mask & BIT_ULL(i)) { 1287 descs[didx] = lr->lines[i].desc; 1288 didx++; 1289 } 1290 } 1291 } 1292 ret = gpiod_set_array_value_complex(false, true, num_set, 1293 descs, NULL, vals); 1294 1295 if (num_set != 1) 1296 kfree(descs); 1297 return ret; 1298 } 1299 1300 static long linereq_set_values(struct linereq *lr, void __user *ip) 1301 { 1302 struct gpio_v2_line_values lv; 1303 int ret; 1304 1305 if (copy_from_user(&lv, ip, sizeof(lv))) 1306 return -EFAULT; 1307 1308 mutex_lock(&lr->config_mutex); 1309 1310 ret = linereq_set_values_unlocked(lr, &lv); 1311 1312 mutex_unlock(&lr->config_mutex); 1313 1314 return ret; 1315 } 1316 1317 static long linereq_set_config_unlocked(struct linereq *lr, 1318 struct gpio_v2_line_config *lc) 1319 { 1320 struct gpio_desc *desc; 1321 struct line *line; 1322 unsigned int i; 1323 u64 flags, edflags; 1324 int ret; 1325 1326 for (i = 0; i < lr->num_lines; i++) { 1327 line = &lr->lines[i]; 1328 desc = lr->lines[i].desc; 1329 flags = gpio_v2_line_config_flags(lc, i); 1330 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); 1331 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; 1332 /* 1333 * Lines have to be requested explicitly for input 1334 * or output, else the line will be treated "as is". 1335 */ 1336 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { 1337 int val = gpio_v2_line_config_output_value(lc, i); 1338 1339 edge_detector_stop(line); 1340 ret = gpiod_direction_output(desc, val); 1341 if (ret) 1342 return ret; 1343 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) { 1344 ret = gpiod_direction_input(desc); 1345 if (ret) 1346 return ret; 1347 1348 ret = edge_detector_update(line, lc, i, edflags); 1349 if (ret) 1350 return ret; 1351 } 1352 1353 WRITE_ONCE(line->edflags, edflags); 1354 1355 blocking_notifier_call_chain(&desc->gdev->notifier, 1356 GPIO_V2_LINE_CHANGED_CONFIG, 1357 desc); 1358 } 1359 return 0; 1360 } 1361 1362 static long linereq_set_config(struct linereq *lr, void __user *ip) 1363 { 1364 struct gpio_v2_line_config lc; 1365 int ret; 1366 1367 if (copy_from_user(&lc, ip, sizeof(lc))) 1368 return -EFAULT; 1369 1370 ret = gpio_v2_line_config_validate(&lc, lr->num_lines); 1371 if (ret) 1372 return ret; 1373 1374 mutex_lock(&lr->config_mutex); 1375 1376 ret = linereq_set_config_unlocked(lr, &lc); 1377 1378 mutex_unlock(&lr->config_mutex); 1379 1380 return ret; 1381 } 1382 1383 static long linereq_ioctl(struct file *file, unsigned int cmd, 1384 unsigned long arg) 1385 { 1386 struct linereq *lr = file->private_data; 1387 void __user *ip = (void __user *)arg; 1388 1389 switch (cmd) { 1390 case GPIO_V2_LINE_GET_VALUES_IOCTL: 1391 return linereq_get_values(lr, ip); 1392 case GPIO_V2_LINE_SET_VALUES_IOCTL: 1393 return linereq_set_values(lr, ip); 1394 case GPIO_V2_LINE_SET_CONFIG_IOCTL: 1395 return linereq_set_config(lr, ip); 1396 default: 1397 return -EINVAL; 1398 } 1399 } 1400 1401 #ifdef CONFIG_COMPAT 1402 static long linereq_ioctl_compat(struct file *file, unsigned int cmd, 1403 unsigned long arg) 1404 { 1405 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 1406 } 1407 #endif 1408 1409 static __poll_t linereq_poll(struct file *file, 1410 struct poll_table_struct *wait) 1411 { 1412 struct linereq *lr = file->private_data; 1413 __poll_t events = 0; 1414 1415 poll_wait(file, &lr->wait, wait); 1416 1417 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events, 1418 &lr->wait.lock)) 1419 events = EPOLLIN | EPOLLRDNORM; 1420 1421 return events; 1422 } 1423 1424 static ssize_t linereq_read(struct file *file, 1425 char __user *buf, 1426 size_t count, 1427 loff_t *f_ps) 1428 { 1429 struct linereq *lr = file->private_data; 1430 struct gpio_v2_line_event le; 1431 ssize_t bytes_read = 0; 1432 int ret; 1433 1434 if (count < sizeof(le)) 1435 return -EINVAL; 1436 1437 do { 1438 spin_lock(&lr->wait.lock); 1439 if (kfifo_is_empty(&lr->events)) { 1440 if (bytes_read) { 1441 spin_unlock(&lr->wait.lock); 1442 return bytes_read; 1443 } 1444 1445 if (file->f_flags & O_NONBLOCK) { 1446 spin_unlock(&lr->wait.lock); 1447 return -EAGAIN; 1448 } 1449 1450 ret = wait_event_interruptible_locked(lr->wait, 1451 !kfifo_is_empty(&lr->events)); 1452 if (ret) { 1453 spin_unlock(&lr->wait.lock); 1454 return ret; 1455 } 1456 } 1457 1458 ret = kfifo_out(&lr->events, &le, 1); 1459 spin_unlock(&lr->wait.lock); 1460 if (ret != 1) { 1461 /* 1462 * This should never happen - we were holding the 1463 * lock from the moment we learned the fifo is no 1464 * longer empty until now. 1465 */ 1466 ret = -EIO; 1467 break; 1468 } 1469 1470 if (copy_to_user(buf + bytes_read, &le, sizeof(le))) 1471 return -EFAULT; 1472 bytes_read += sizeof(le); 1473 } while (count >= bytes_read + sizeof(le)); 1474 1475 return bytes_read; 1476 } 1477 1478 static void linereq_free(struct linereq *lr) 1479 { 1480 unsigned int i; 1481 1482 for (i = 0; i < lr->num_lines; i++) { 1483 if (lr->lines[i].desc) { 1484 edge_detector_stop(&lr->lines[i]); 1485 gpiod_free(lr->lines[i].desc); 1486 } 1487 } 1488 kfifo_free(&lr->events); 1489 kfree(lr->label); 1490 put_device(&lr->gdev->dev); 1491 kfree(lr); 1492 } 1493 1494 static int linereq_release(struct inode *inode, struct file *file) 1495 { 1496 struct linereq *lr = file->private_data; 1497 1498 linereq_free(lr); 1499 return 0; 1500 } 1501 1502 #ifdef CONFIG_PROC_FS 1503 static void linereq_show_fdinfo(struct seq_file *out, struct file *file) 1504 { 1505 struct linereq *lr = file->private_data; 1506 struct device *dev = &lr->gdev->dev; 1507 u16 i; 1508 1509 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev)); 1510 1511 for (i = 0; i < lr->num_lines; i++) 1512 seq_printf(out, "gpio-line:\t%d\n", 1513 gpio_chip_hwgpio(lr->lines[i].desc)); 1514 } 1515 #endif 1516 1517 static const struct file_operations line_fileops = { 1518 .release = linereq_release, 1519 .read = linereq_read, 1520 .poll = linereq_poll, 1521 .owner = THIS_MODULE, 1522 .llseek = noop_llseek, 1523 .unlocked_ioctl = linereq_ioctl, 1524 #ifdef CONFIG_COMPAT 1525 .compat_ioctl = linereq_ioctl_compat, 1526 #endif 1527 #ifdef CONFIG_PROC_FS 1528 .show_fdinfo = linereq_show_fdinfo, 1529 #endif 1530 }; 1531 1532 static int linereq_create(struct gpio_device *gdev, void __user *ip) 1533 { 1534 struct gpio_v2_line_request ulr; 1535 struct gpio_v2_line_config *lc; 1536 struct linereq *lr; 1537 struct file *file; 1538 u64 flags, edflags; 1539 unsigned int i; 1540 int fd, ret; 1541 1542 if (copy_from_user(&ulr, ip, sizeof(ulr))) 1543 return -EFAULT; 1544 1545 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX)) 1546 return -EINVAL; 1547 1548 if (memchr_inv(ulr.padding, 0, sizeof(ulr.padding))) 1549 return -EINVAL; 1550 1551 lc = &ulr.config; 1552 ret = gpio_v2_line_config_validate(lc, ulr.num_lines); 1553 if (ret) 1554 return ret; 1555 1556 lr = kzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL); 1557 if (!lr) 1558 return -ENOMEM; 1559 1560 lr->gdev = gdev; 1561 get_device(&gdev->dev); 1562 1563 for (i = 0; i < ulr.num_lines; i++) { 1564 lr->lines[i].req = lr; 1565 WRITE_ONCE(lr->lines[i].sw_debounced, 0); 1566 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func); 1567 } 1568 1569 if (ulr.consumer[0] != '\0') { 1570 /* label is only initialized if consumer is set */ 1571 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1, 1572 GFP_KERNEL); 1573 if (!lr->label) { 1574 ret = -ENOMEM; 1575 goto out_free_linereq; 1576 } 1577 } 1578 1579 mutex_init(&lr->config_mutex); 1580 init_waitqueue_head(&lr->wait); 1581 lr->event_buffer_size = ulr.event_buffer_size; 1582 if (lr->event_buffer_size == 0) 1583 lr->event_buffer_size = ulr.num_lines * 16; 1584 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16) 1585 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16; 1586 1587 atomic_set(&lr->seqno, 0); 1588 lr->num_lines = ulr.num_lines; 1589 1590 /* Request each GPIO */ 1591 for (i = 0; i < ulr.num_lines; i++) { 1592 u32 offset = ulr.offsets[i]; 1593 struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset); 1594 1595 if (IS_ERR(desc)) { 1596 ret = PTR_ERR(desc); 1597 goto out_free_linereq; 1598 } 1599 1600 ret = gpiod_request_user(desc, lr->label); 1601 if (ret) 1602 goto out_free_linereq; 1603 1604 lr->lines[i].desc = desc; 1605 flags = gpio_v2_line_config_flags(lc, i); 1606 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); 1607 1608 ret = gpiod_set_transitory(desc, false); 1609 if (ret < 0) 1610 goto out_free_linereq; 1611 1612 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; 1613 /* 1614 * Lines have to be requested explicitly for input 1615 * or output, else the line will be treated "as is". 1616 */ 1617 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { 1618 int val = gpio_v2_line_config_output_value(lc, i); 1619 1620 ret = gpiod_direction_output(desc, val); 1621 if (ret) 1622 goto out_free_linereq; 1623 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) { 1624 ret = gpiod_direction_input(desc); 1625 if (ret) 1626 goto out_free_linereq; 1627 1628 ret = edge_detector_setup(&lr->lines[i], lc, i, 1629 edflags); 1630 if (ret) 1631 goto out_free_linereq; 1632 } 1633 1634 lr->lines[i].edflags = edflags; 1635 1636 blocking_notifier_call_chain(&desc->gdev->notifier, 1637 GPIO_V2_LINE_CHANGED_REQUESTED, desc); 1638 1639 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", 1640 offset); 1641 } 1642 1643 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 1644 if (fd < 0) { 1645 ret = fd; 1646 goto out_free_linereq; 1647 } 1648 1649 file = anon_inode_getfile("gpio-line", &line_fileops, lr, 1650 O_RDONLY | O_CLOEXEC); 1651 if (IS_ERR(file)) { 1652 ret = PTR_ERR(file); 1653 goto out_put_unused_fd; 1654 } 1655 1656 ulr.fd = fd; 1657 if (copy_to_user(ip, &ulr, sizeof(ulr))) { 1658 /* 1659 * fput() will trigger the release() callback, so do not go onto 1660 * the regular error cleanup path here. 1661 */ 1662 fput(file); 1663 put_unused_fd(fd); 1664 return -EFAULT; 1665 } 1666 1667 fd_install(fd, file); 1668 1669 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", 1670 lr->num_lines); 1671 1672 return 0; 1673 1674 out_put_unused_fd: 1675 put_unused_fd(fd); 1676 out_free_linereq: 1677 linereq_free(lr); 1678 return ret; 1679 } 1680 1681 #ifdef CONFIG_GPIO_CDEV_V1 1682 1683 /* 1684 * GPIO line event management 1685 */ 1686 1687 /** 1688 * struct lineevent_state - contains the state of a userspace event 1689 * @gdev: the GPIO device the event pertains to 1690 * @label: consumer label used to tag descriptors 1691 * @desc: the GPIO descriptor held by this event 1692 * @eflags: the event flags this line was requested with 1693 * @irq: the interrupt that trigger in response to events on this GPIO 1694 * @wait: wait queue that handles blocking reads of events 1695 * @events: KFIFO for the GPIO events 1696 * @timestamp: cache for the timestamp storing it between hardirq 1697 * and IRQ thread, used to bring the timestamp close to the actual 1698 * event 1699 */ 1700 struct lineevent_state { 1701 struct gpio_device *gdev; 1702 const char *label; 1703 struct gpio_desc *desc; 1704 u32 eflags; 1705 int irq; 1706 wait_queue_head_t wait; 1707 DECLARE_KFIFO(events, struct gpioevent_data, 16); 1708 u64 timestamp; 1709 }; 1710 1711 #define GPIOEVENT_REQUEST_VALID_FLAGS \ 1712 (GPIOEVENT_REQUEST_RISING_EDGE | \ 1713 GPIOEVENT_REQUEST_FALLING_EDGE) 1714 1715 static __poll_t lineevent_poll(struct file *file, 1716 struct poll_table_struct *wait) 1717 { 1718 struct lineevent_state *le = file->private_data; 1719 __poll_t events = 0; 1720 1721 poll_wait(file, &le->wait, wait); 1722 1723 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock)) 1724 events = EPOLLIN | EPOLLRDNORM; 1725 1726 return events; 1727 } 1728 1729 struct compat_gpioeevent_data { 1730 compat_u64 timestamp; 1731 u32 id; 1732 }; 1733 1734 static ssize_t lineevent_read(struct file *file, 1735 char __user *buf, 1736 size_t count, 1737 loff_t *f_ps) 1738 { 1739 struct lineevent_state *le = file->private_data; 1740 struct gpioevent_data ge; 1741 ssize_t bytes_read = 0; 1742 ssize_t ge_size; 1743 int ret; 1744 1745 /* 1746 * When compatible system call is being used the struct gpioevent_data, 1747 * in case of at least ia32, has different size due to the alignment 1748 * differences. Because we have first member 64 bits followed by one of 1749 * 32 bits there is no gap between them. The only difference is the 1750 * padding at the end of the data structure. Hence, we calculate the 1751 * actual sizeof() and pass this as an argument to copy_to_user() to 1752 * drop unneeded bytes from the output. 1753 */ 1754 if (compat_need_64bit_alignment_fixup()) 1755 ge_size = sizeof(struct compat_gpioeevent_data); 1756 else 1757 ge_size = sizeof(struct gpioevent_data); 1758 if (count < ge_size) 1759 return -EINVAL; 1760 1761 do { 1762 spin_lock(&le->wait.lock); 1763 if (kfifo_is_empty(&le->events)) { 1764 if (bytes_read) { 1765 spin_unlock(&le->wait.lock); 1766 return bytes_read; 1767 } 1768 1769 if (file->f_flags & O_NONBLOCK) { 1770 spin_unlock(&le->wait.lock); 1771 return -EAGAIN; 1772 } 1773 1774 ret = wait_event_interruptible_locked(le->wait, 1775 !kfifo_is_empty(&le->events)); 1776 if (ret) { 1777 spin_unlock(&le->wait.lock); 1778 return ret; 1779 } 1780 } 1781 1782 ret = kfifo_out(&le->events, &ge, 1); 1783 spin_unlock(&le->wait.lock); 1784 if (ret != 1) { 1785 /* 1786 * This should never happen - we were holding the lock 1787 * from the moment we learned the fifo is no longer 1788 * empty until now. 1789 */ 1790 ret = -EIO; 1791 break; 1792 } 1793 1794 if (copy_to_user(buf + bytes_read, &ge, ge_size)) 1795 return -EFAULT; 1796 bytes_read += ge_size; 1797 } while (count >= bytes_read + ge_size); 1798 1799 return bytes_read; 1800 } 1801 1802 static void lineevent_free(struct lineevent_state *le) 1803 { 1804 if (le->irq) 1805 free_irq(le->irq, le); 1806 if (le->desc) 1807 gpiod_free(le->desc); 1808 kfree(le->label); 1809 put_device(&le->gdev->dev); 1810 kfree(le); 1811 } 1812 1813 static int lineevent_release(struct inode *inode, struct file *file) 1814 { 1815 lineevent_free(file->private_data); 1816 return 0; 1817 } 1818 1819 static long lineevent_ioctl(struct file *file, unsigned int cmd, 1820 unsigned long arg) 1821 { 1822 struct lineevent_state *le = file->private_data; 1823 void __user *ip = (void __user *)arg; 1824 struct gpiohandle_data ghd; 1825 1826 /* 1827 * We can get the value for an event line but not set it, 1828 * because it is input by definition. 1829 */ 1830 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { 1831 int val; 1832 1833 memset(&ghd, 0, sizeof(ghd)); 1834 1835 val = gpiod_get_value_cansleep(le->desc); 1836 if (val < 0) 1837 return val; 1838 ghd.values[0] = val; 1839 1840 if (copy_to_user(ip, &ghd, sizeof(ghd))) 1841 return -EFAULT; 1842 1843 return 0; 1844 } 1845 return -EINVAL; 1846 } 1847 1848 #ifdef CONFIG_COMPAT 1849 static long lineevent_ioctl_compat(struct file *file, unsigned int cmd, 1850 unsigned long arg) 1851 { 1852 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 1853 } 1854 #endif 1855 1856 static const struct file_operations lineevent_fileops = { 1857 .release = lineevent_release, 1858 .read = lineevent_read, 1859 .poll = lineevent_poll, 1860 .owner = THIS_MODULE, 1861 .llseek = noop_llseek, 1862 .unlocked_ioctl = lineevent_ioctl, 1863 #ifdef CONFIG_COMPAT 1864 .compat_ioctl = lineevent_ioctl_compat, 1865 #endif 1866 }; 1867 1868 static irqreturn_t lineevent_irq_thread(int irq, void *p) 1869 { 1870 struct lineevent_state *le = p; 1871 struct gpioevent_data ge; 1872 int ret; 1873 1874 /* Do not leak kernel stack to userspace */ 1875 memset(&ge, 0, sizeof(ge)); 1876 1877 /* 1878 * We may be running from a nested threaded interrupt in which case 1879 * we didn't get the timestamp from lineevent_irq_handler(). 1880 */ 1881 if (!le->timestamp) 1882 ge.timestamp = ktime_get_ns(); 1883 else 1884 ge.timestamp = le->timestamp; 1885 1886 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE 1887 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 1888 int level = gpiod_get_value_cansleep(le->desc); 1889 1890 if (level) 1891 /* Emit low-to-high event */ 1892 ge.id = GPIOEVENT_EVENT_RISING_EDGE; 1893 else 1894 /* Emit high-to-low event */ 1895 ge.id = GPIOEVENT_EVENT_FALLING_EDGE; 1896 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) { 1897 /* Emit low-to-high event */ 1898 ge.id = GPIOEVENT_EVENT_RISING_EDGE; 1899 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 1900 /* Emit high-to-low event */ 1901 ge.id = GPIOEVENT_EVENT_FALLING_EDGE; 1902 } else { 1903 return IRQ_NONE; 1904 } 1905 1906 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge, 1907 1, &le->wait.lock); 1908 if (ret) 1909 wake_up_poll(&le->wait, EPOLLIN); 1910 else 1911 pr_debug_ratelimited("event FIFO is full - event dropped\n"); 1912 1913 return IRQ_HANDLED; 1914 } 1915 1916 static irqreturn_t lineevent_irq_handler(int irq, void *p) 1917 { 1918 struct lineevent_state *le = p; 1919 1920 /* 1921 * Just store the timestamp in hardirq context so we get it as 1922 * close in time as possible to the actual event. 1923 */ 1924 le->timestamp = ktime_get_ns(); 1925 1926 return IRQ_WAKE_THREAD; 1927 } 1928 1929 static int lineevent_create(struct gpio_device *gdev, void __user *ip) 1930 { 1931 struct gpioevent_request eventreq; 1932 struct lineevent_state *le; 1933 struct gpio_desc *desc; 1934 struct file *file; 1935 u32 offset; 1936 u32 lflags; 1937 u32 eflags; 1938 int fd; 1939 int ret; 1940 int irq, irqflags = 0; 1941 1942 if (copy_from_user(&eventreq, ip, sizeof(eventreq))) 1943 return -EFAULT; 1944 1945 offset = eventreq.lineoffset; 1946 lflags = eventreq.handleflags; 1947 eflags = eventreq.eventflags; 1948 1949 desc = gpiochip_get_desc(gdev->chip, offset); 1950 if (IS_ERR(desc)) 1951 return PTR_ERR(desc); 1952 1953 /* Return an error if a unknown flag is set */ 1954 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) || 1955 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) 1956 return -EINVAL; 1957 1958 /* This is just wrong: we don't look for events on output lines */ 1959 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) || 1960 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || 1961 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) 1962 return -EINVAL; 1963 1964 /* Only one bias flag can be set. */ 1965 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) && 1966 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | 1967 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) || 1968 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) && 1969 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP))) 1970 return -EINVAL; 1971 1972 le = kzalloc(sizeof(*le), GFP_KERNEL); 1973 if (!le) 1974 return -ENOMEM; 1975 le->gdev = gdev; 1976 get_device(&gdev->dev); 1977 1978 if (eventreq.consumer_label[0] != '\0') { 1979 /* label is only initialized if consumer_label is set */ 1980 le->label = kstrndup(eventreq.consumer_label, 1981 sizeof(eventreq.consumer_label) - 1, 1982 GFP_KERNEL); 1983 if (!le->label) { 1984 ret = -ENOMEM; 1985 goto out_free_le; 1986 } 1987 } 1988 1989 ret = gpiod_request_user(desc, le->label); 1990 if (ret) 1991 goto out_free_le; 1992 le->desc = desc; 1993 le->eflags = eflags; 1994 1995 linehandle_flags_to_desc_flags(lflags, &desc->flags); 1996 1997 ret = gpiod_direction_input(desc); 1998 if (ret) 1999 goto out_free_le; 2000 2001 blocking_notifier_call_chain(&desc->gdev->notifier, 2002 GPIO_V2_LINE_CHANGED_REQUESTED, desc); 2003 2004 irq = gpiod_to_irq(desc); 2005 if (irq <= 0) { 2006 ret = -ENODEV; 2007 goto out_free_le; 2008 } 2009 2010 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) 2011 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? 2012 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; 2013 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE) 2014 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? 2015 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; 2016 irqflags |= IRQF_ONESHOT; 2017 2018 INIT_KFIFO(le->events); 2019 init_waitqueue_head(&le->wait); 2020 2021 /* Request a thread to read the events */ 2022 ret = request_threaded_irq(irq, 2023 lineevent_irq_handler, 2024 lineevent_irq_thread, 2025 irqflags, 2026 le->label, 2027 le); 2028 if (ret) 2029 goto out_free_le; 2030 2031 le->irq = irq; 2032 2033 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); 2034 if (fd < 0) { 2035 ret = fd; 2036 goto out_free_le; 2037 } 2038 2039 file = anon_inode_getfile("gpio-event", 2040 &lineevent_fileops, 2041 le, 2042 O_RDONLY | O_CLOEXEC); 2043 if (IS_ERR(file)) { 2044 ret = PTR_ERR(file); 2045 goto out_put_unused_fd; 2046 } 2047 2048 eventreq.fd = fd; 2049 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) { 2050 /* 2051 * fput() will trigger the release() callback, so do not go onto 2052 * the regular error cleanup path here. 2053 */ 2054 fput(file); 2055 put_unused_fd(fd); 2056 return -EFAULT; 2057 } 2058 2059 fd_install(fd, file); 2060 2061 return 0; 2062 2063 out_put_unused_fd: 2064 put_unused_fd(fd); 2065 out_free_le: 2066 lineevent_free(le); 2067 return ret; 2068 } 2069 2070 static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2, 2071 struct gpioline_info *info_v1) 2072 { 2073 u64 flagsv2 = info_v2->flags; 2074 2075 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name)); 2076 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer)); 2077 info_v1->line_offset = info_v2->offset; 2078 info_v1->flags = 0; 2079 2080 if (flagsv2 & GPIO_V2_LINE_FLAG_USED) 2081 info_v1->flags |= GPIOLINE_FLAG_KERNEL; 2082 2083 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT) 2084 info_v1->flags |= GPIOLINE_FLAG_IS_OUT; 2085 2086 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW) 2087 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW; 2088 2089 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN) 2090 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN; 2091 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE) 2092 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE; 2093 2094 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP) 2095 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP; 2096 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) 2097 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN; 2098 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED) 2099 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE; 2100 } 2101 2102 static void gpio_v2_line_info_changed_to_v1( 2103 struct gpio_v2_line_info_changed *lic_v2, 2104 struct gpioline_info_changed *lic_v1) 2105 { 2106 memset(lic_v1, 0, sizeof(*lic_v1)); 2107 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info); 2108 lic_v1->timestamp = lic_v2->timestamp_ns; 2109 lic_v1->event_type = lic_v2->event_type; 2110 } 2111 2112 #endif /* CONFIG_GPIO_CDEV_V1 */ 2113 2114 static void gpio_desc_to_lineinfo(struct gpio_desc *desc, 2115 struct gpio_v2_line_info *info) 2116 { 2117 struct gpio_chip *gc = desc->gdev->chip; 2118 bool ok_for_pinctrl; 2119 unsigned long flags; 2120 u32 debounce_period_us; 2121 unsigned int num_attrs = 0; 2122 2123 memset(info, 0, sizeof(*info)); 2124 info->offset = gpio_chip_hwgpio(desc); 2125 2126 /* 2127 * This function takes a mutex so we must check this before taking 2128 * the spinlock. 2129 * 2130 * FIXME: find a non-racy way to retrieve this information. Maybe a 2131 * lock common to both frameworks? 2132 */ 2133 ok_for_pinctrl = 2134 pinctrl_gpio_can_use_line(gc->base + info->offset); 2135 2136 spin_lock_irqsave(&gpio_lock, flags); 2137 2138 if (desc->name) 2139 strscpy(info->name, desc->name, sizeof(info->name)); 2140 2141 if (desc->label) 2142 strscpy(info->consumer, desc->label, sizeof(info->consumer)); 2143 2144 /* 2145 * Userspace only need to know that the kernel is using this GPIO so 2146 * it can't use it. 2147 */ 2148 info->flags = 0; 2149 if (test_bit(FLAG_REQUESTED, &desc->flags) || 2150 test_bit(FLAG_IS_HOGGED, &desc->flags) || 2151 test_bit(FLAG_USED_AS_IRQ, &desc->flags) || 2152 test_bit(FLAG_EXPORT, &desc->flags) || 2153 test_bit(FLAG_SYSFS, &desc->flags) || 2154 !gpiochip_line_is_valid(gc, info->offset) || 2155 !ok_for_pinctrl) 2156 info->flags |= GPIO_V2_LINE_FLAG_USED; 2157 2158 if (test_bit(FLAG_IS_OUT, &desc->flags)) 2159 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT; 2160 else 2161 info->flags |= GPIO_V2_LINE_FLAG_INPUT; 2162 2163 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) 2164 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW; 2165 2166 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) 2167 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN; 2168 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) 2169 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE; 2170 2171 if (test_bit(FLAG_BIAS_DISABLE, &desc->flags)) 2172 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED; 2173 if (test_bit(FLAG_PULL_DOWN, &desc->flags)) 2174 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN; 2175 if (test_bit(FLAG_PULL_UP, &desc->flags)) 2176 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP; 2177 2178 if (test_bit(FLAG_EDGE_RISING, &desc->flags)) 2179 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING; 2180 if (test_bit(FLAG_EDGE_FALLING, &desc->flags)) 2181 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING; 2182 2183 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &desc->flags)) 2184 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME; 2185 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags)) 2186 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE; 2187 2188 debounce_period_us = READ_ONCE(desc->debounce_period_us); 2189 if (debounce_period_us) { 2190 info->attrs[num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE; 2191 info->attrs[num_attrs].debounce_period_us = debounce_period_us; 2192 num_attrs++; 2193 } 2194 info->num_attrs = num_attrs; 2195 2196 spin_unlock_irqrestore(&gpio_lock, flags); 2197 } 2198 2199 struct gpio_chardev_data { 2200 struct gpio_device *gdev; 2201 wait_queue_head_t wait; 2202 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32); 2203 struct notifier_block lineinfo_changed_nb; 2204 unsigned long *watched_lines; 2205 #ifdef CONFIG_GPIO_CDEV_V1 2206 atomic_t watch_abi_version; 2207 #endif 2208 }; 2209 2210 static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip) 2211 { 2212 struct gpio_device *gdev = cdev->gdev; 2213 struct gpiochip_info chipinfo; 2214 2215 memset(&chipinfo, 0, sizeof(chipinfo)); 2216 2217 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name)); 2218 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label)); 2219 chipinfo.lines = gdev->ngpio; 2220 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo))) 2221 return -EFAULT; 2222 return 0; 2223 } 2224 2225 #ifdef CONFIG_GPIO_CDEV_V1 2226 /* 2227 * returns 0 if the versions match, else the previously selected ABI version 2228 */ 2229 static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata, 2230 unsigned int version) 2231 { 2232 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version); 2233 2234 if (abiv == version) 2235 return 0; 2236 2237 return abiv; 2238 } 2239 2240 static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip, 2241 bool watch) 2242 { 2243 struct gpio_desc *desc; 2244 struct gpioline_info lineinfo; 2245 struct gpio_v2_line_info lineinfo_v2; 2246 2247 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) 2248 return -EFAULT; 2249 2250 /* this doubles as a range check on line_offset */ 2251 desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset); 2252 if (IS_ERR(desc)) 2253 return PTR_ERR(desc); 2254 2255 if (watch) { 2256 if (lineinfo_ensure_abi_version(cdev, 1)) 2257 return -EPERM; 2258 2259 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines)) 2260 return -EBUSY; 2261 } 2262 2263 gpio_desc_to_lineinfo(desc, &lineinfo_v2); 2264 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo); 2265 2266 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) { 2267 if (watch) 2268 clear_bit(lineinfo.line_offset, cdev->watched_lines); 2269 return -EFAULT; 2270 } 2271 2272 return 0; 2273 } 2274 #endif 2275 2276 static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip, 2277 bool watch) 2278 { 2279 struct gpio_desc *desc; 2280 struct gpio_v2_line_info lineinfo; 2281 2282 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) 2283 return -EFAULT; 2284 2285 if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding))) 2286 return -EINVAL; 2287 2288 desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.offset); 2289 if (IS_ERR(desc)) 2290 return PTR_ERR(desc); 2291 2292 if (watch) { 2293 #ifdef CONFIG_GPIO_CDEV_V1 2294 if (lineinfo_ensure_abi_version(cdev, 2)) 2295 return -EPERM; 2296 #endif 2297 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines)) 2298 return -EBUSY; 2299 } 2300 gpio_desc_to_lineinfo(desc, &lineinfo); 2301 2302 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) { 2303 if (watch) 2304 clear_bit(lineinfo.offset, cdev->watched_lines); 2305 return -EFAULT; 2306 } 2307 2308 return 0; 2309 } 2310 2311 static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip) 2312 { 2313 __u32 offset; 2314 2315 if (copy_from_user(&offset, ip, sizeof(offset))) 2316 return -EFAULT; 2317 2318 if (offset >= cdev->gdev->ngpio) 2319 return -EINVAL; 2320 2321 if (!test_and_clear_bit(offset, cdev->watched_lines)) 2322 return -EBUSY; 2323 2324 return 0; 2325 } 2326 2327 /* 2328 * gpio_ioctl() - ioctl handler for the GPIO chardev 2329 */ 2330 static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2331 { 2332 struct gpio_chardev_data *cdev = file->private_data; 2333 struct gpio_device *gdev = cdev->gdev; 2334 void __user *ip = (void __user *)arg; 2335 2336 /* We fail any subsequent ioctl():s when the chip is gone */ 2337 if (!gdev->chip) 2338 return -ENODEV; 2339 2340 /* Fill in the struct and pass to userspace */ 2341 switch (cmd) { 2342 case GPIO_GET_CHIPINFO_IOCTL: 2343 return chipinfo_get(cdev, ip); 2344 #ifdef CONFIG_GPIO_CDEV_V1 2345 case GPIO_GET_LINEHANDLE_IOCTL: 2346 return linehandle_create(gdev, ip); 2347 case GPIO_GET_LINEEVENT_IOCTL: 2348 return lineevent_create(gdev, ip); 2349 case GPIO_GET_LINEINFO_IOCTL: 2350 return lineinfo_get_v1(cdev, ip, false); 2351 case GPIO_GET_LINEINFO_WATCH_IOCTL: 2352 return lineinfo_get_v1(cdev, ip, true); 2353 #endif /* CONFIG_GPIO_CDEV_V1 */ 2354 case GPIO_V2_GET_LINEINFO_IOCTL: 2355 return lineinfo_get(cdev, ip, false); 2356 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL: 2357 return lineinfo_get(cdev, ip, true); 2358 case GPIO_V2_GET_LINE_IOCTL: 2359 return linereq_create(gdev, ip); 2360 case GPIO_GET_LINEINFO_UNWATCH_IOCTL: 2361 return lineinfo_unwatch(cdev, ip); 2362 default: 2363 return -EINVAL; 2364 } 2365 } 2366 2367 #ifdef CONFIG_COMPAT 2368 static long gpio_ioctl_compat(struct file *file, unsigned int cmd, 2369 unsigned long arg) 2370 { 2371 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 2372 } 2373 #endif 2374 2375 static struct gpio_chardev_data * 2376 to_gpio_chardev_data(struct notifier_block *nb) 2377 { 2378 return container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb); 2379 } 2380 2381 static int lineinfo_changed_notify(struct notifier_block *nb, 2382 unsigned long action, void *data) 2383 { 2384 struct gpio_chardev_data *cdev = to_gpio_chardev_data(nb); 2385 struct gpio_v2_line_info_changed chg; 2386 struct gpio_desc *desc = data; 2387 int ret; 2388 2389 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines)) 2390 return NOTIFY_DONE; 2391 2392 memset(&chg, 0, sizeof(chg)); 2393 chg.event_type = action; 2394 chg.timestamp_ns = ktime_get_ns(); 2395 gpio_desc_to_lineinfo(desc, &chg.info); 2396 2397 ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock); 2398 if (ret) 2399 wake_up_poll(&cdev->wait, EPOLLIN); 2400 else 2401 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n"); 2402 2403 return NOTIFY_OK; 2404 } 2405 2406 static __poll_t lineinfo_watch_poll(struct file *file, 2407 struct poll_table_struct *pollt) 2408 { 2409 struct gpio_chardev_data *cdev = file->private_data; 2410 __poll_t events = 0; 2411 2412 poll_wait(file, &cdev->wait, pollt); 2413 2414 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events, 2415 &cdev->wait.lock)) 2416 events = EPOLLIN | EPOLLRDNORM; 2417 2418 return events; 2419 } 2420 2421 static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, 2422 size_t count, loff_t *off) 2423 { 2424 struct gpio_chardev_data *cdev = file->private_data; 2425 struct gpio_v2_line_info_changed event; 2426 ssize_t bytes_read = 0; 2427 int ret; 2428 size_t event_size; 2429 2430 #ifndef CONFIG_GPIO_CDEV_V1 2431 event_size = sizeof(struct gpio_v2_line_info_changed); 2432 if (count < event_size) 2433 return -EINVAL; 2434 #endif 2435 2436 do { 2437 spin_lock(&cdev->wait.lock); 2438 if (kfifo_is_empty(&cdev->events)) { 2439 if (bytes_read) { 2440 spin_unlock(&cdev->wait.lock); 2441 return bytes_read; 2442 } 2443 2444 if (file->f_flags & O_NONBLOCK) { 2445 spin_unlock(&cdev->wait.lock); 2446 return -EAGAIN; 2447 } 2448 2449 ret = wait_event_interruptible_locked(cdev->wait, 2450 !kfifo_is_empty(&cdev->events)); 2451 if (ret) { 2452 spin_unlock(&cdev->wait.lock); 2453 return ret; 2454 } 2455 } 2456 #ifdef CONFIG_GPIO_CDEV_V1 2457 /* must be after kfifo check so watch_abi_version is set */ 2458 if (atomic_read(&cdev->watch_abi_version) == 2) 2459 event_size = sizeof(struct gpio_v2_line_info_changed); 2460 else 2461 event_size = sizeof(struct gpioline_info_changed); 2462 if (count < event_size) { 2463 spin_unlock(&cdev->wait.lock); 2464 return -EINVAL; 2465 } 2466 #endif 2467 ret = kfifo_out(&cdev->events, &event, 1); 2468 spin_unlock(&cdev->wait.lock); 2469 if (ret != 1) { 2470 ret = -EIO; 2471 break; 2472 /* We should never get here. See lineevent_read(). */ 2473 } 2474 2475 #ifdef CONFIG_GPIO_CDEV_V1 2476 if (event_size == sizeof(struct gpio_v2_line_info_changed)) { 2477 if (copy_to_user(buf + bytes_read, &event, event_size)) 2478 return -EFAULT; 2479 } else { 2480 struct gpioline_info_changed event_v1; 2481 2482 gpio_v2_line_info_changed_to_v1(&event, &event_v1); 2483 if (copy_to_user(buf + bytes_read, &event_v1, 2484 event_size)) 2485 return -EFAULT; 2486 } 2487 #else 2488 if (copy_to_user(buf + bytes_read, &event, event_size)) 2489 return -EFAULT; 2490 #endif 2491 bytes_read += event_size; 2492 } while (count >= bytes_read + sizeof(event)); 2493 2494 return bytes_read; 2495 } 2496 2497 /** 2498 * gpio_chrdev_open() - open the chardev for ioctl operations 2499 * @inode: inode for this chardev 2500 * @file: file struct for storing private data 2501 * Returns 0 on success 2502 */ 2503 static int gpio_chrdev_open(struct inode *inode, struct file *file) 2504 { 2505 struct gpio_device *gdev = container_of(inode->i_cdev, 2506 struct gpio_device, chrdev); 2507 struct gpio_chardev_data *cdev; 2508 int ret = -ENOMEM; 2509 2510 /* Fail on open if the backing gpiochip is gone */ 2511 if (!gdev->chip) 2512 return -ENODEV; 2513 2514 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 2515 if (!cdev) 2516 return -ENOMEM; 2517 2518 cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL); 2519 if (!cdev->watched_lines) 2520 goto out_free_cdev; 2521 2522 init_waitqueue_head(&cdev->wait); 2523 INIT_KFIFO(cdev->events); 2524 cdev->gdev = gdev; 2525 2526 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify; 2527 ret = blocking_notifier_chain_register(&gdev->notifier, 2528 &cdev->lineinfo_changed_nb); 2529 if (ret) 2530 goto out_free_bitmap; 2531 2532 get_device(&gdev->dev); 2533 file->private_data = cdev; 2534 2535 ret = nonseekable_open(inode, file); 2536 if (ret) 2537 goto out_unregister_notifier; 2538 2539 return ret; 2540 2541 out_unregister_notifier: 2542 blocking_notifier_chain_unregister(&gdev->notifier, 2543 &cdev->lineinfo_changed_nb); 2544 out_free_bitmap: 2545 bitmap_free(cdev->watched_lines); 2546 out_free_cdev: 2547 kfree(cdev); 2548 return ret; 2549 } 2550 2551 /** 2552 * gpio_chrdev_release() - close chardev after ioctl operations 2553 * @inode: inode for this chardev 2554 * @file: file struct for storing private data 2555 * Returns 0 on success 2556 */ 2557 static int gpio_chrdev_release(struct inode *inode, struct file *file) 2558 { 2559 struct gpio_chardev_data *cdev = file->private_data; 2560 struct gpio_device *gdev = cdev->gdev; 2561 2562 bitmap_free(cdev->watched_lines); 2563 blocking_notifier_chain_unregister(&gdev->notifier, 2564 &cdev->lineinfo_changed_nb); 2565 put_device(&gdev->dev); 2566 kfree(cdev); 2567 2568 return 0; 2569 } 2570 2571 static const struct file_operations gpio_fileops = { 2572 .release = gpio_chrdev_release, 2573 .open = gpio_chrdev_open, 2574 .poll = lineinfo_watch_poll, 2575 .read = lineinfo_watch_read, 2576 .owner = THIS_MODULE, 2577 .llseek = no_llseek, 2578 .unlocked_ioctl = gpio_ioctl, 2579 #ifdef CONFIG_COMPAT 2580 .compat_ioctl = gpio_ioctl_compat, 2581 #endif 2582 }; 2583 2584 int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt) 2585 { 2586 int ret; 2587 2588 cdev_init(&gdev->chrdev, &gpio_fileops); 2589 gdev->chrdev.owner = THIS_MODULE; 2590 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id); 2591 2592 ret = cdev_device_add(&gdev->chrdev, &gdev->dev); 2593 if (ret) 2594 return ret; 2595 2596 chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n", 2597 MAJOR(devt), gdev->id); 2598 2599 return 0; 2600 } 2601 2602 void gpiolib_cdev_unregister(struct gpio_device *gdev) 2603 { 2604 cdev_device_del(&gdev->chrdev, &gdev->dev); 2605 } 2606