1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // GPIO Aggregator 4 // 5 // Copyright (C) 2019-2020 Glider bv 6 7 #define DRV_NAME "gpio-aggregator" 8 #define pr_fmt(fmt) DRV_NAME ": " fmt 9 10 #include <linux/bitmap.h> 11 #include <linux/bitops.h> 12 #include <linux/configfs.h> 13 #include <linux/ctype.h> 14 #include <linux/delay.h> 15 #include <linux/export.h> 16 #include <linux/idr.h> 17 #include <linux/kernel.h> 18 #include <linux/list.h> 19 #include <linux/lockdep.h> 20 #include <linux/mod_devicetable.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/overflow.h> 24 #include <linux/platform_device.h> 25 #include <linux/property.h> 26 #include <linux/slab.h> 27 #include <linux/spinlock.h> 28 #include <linux/string.h> 29 30 #include <linux/gpio/consumer.h> 31 #include <linux/gpio/driver.h> 32 #include <linux/gpio/forwarder.h> 33 #include <linux/gpio/machine.h> 34 35 #include "dev-sync-probe.h" 36 37 #define AGGREGATOR_MAX_GPIOS 512 38 #define AGGREGATOR_LEGACY_PREFIX "_sysfs" 39 40 /* 41 * GPIO Aggregator sysfs interface 42 */ 43 44 struct gpio_aggregator { 45 struct dev_sync_probe_data probe_data; 46 struct config_group group; 47 struct gpiod_lookup_table *lookups; 48 struct mutex lock; 49 int id; 50 51 /* List of gpio_aggregator_line. Always added in order */ 52 struct list_head list_head; 53 54 /* used by legacy sysfs interface only */ 55 bool init_via_sysfs; 56 char args[]; 57 }; 58 59 struct gpio_aggregator_line { 60 struct config_group group; 61 struct gpio_aggregator *parent; 62 struct list_head entry; 63 64 /* Line index within the aggregator device */ 65 unsigned int idx; 66 67 /* Custom name for the virtual line */ 68 const char *name; 69 /* GPIO chip label or line name */ 70 const char *key; 71 /* Can be negative to indicate lookup by line name */ 72 int offset; 73 74 enum gpio_lookup_flags flags; 75 }; 76 77 struct gpio_aggregator_pdev_meta { 78 bool init_via_sysfs; 79 }; 80 81 static DEFINE_MUTEX(gpio_aggregator_lock); /* protects idr */ 82 static DEFINE_IDR(gpio_aggregator_idr); 83 84 static int gpio_aggregator_alloc(struct gpio_aggregator **aggr, size_t arg_size) 85 { 86 int ret; 87 88 struct gpio_aggregator *new __free(kfree) = kzalloc( 89 sizeof(*new) + arg_size, GFP_KERNEL); 90 if (!new) 91 return -ENOMEM; 92 93 scoped_guard(mutex, &gpio_aggregator_lock) 94 ret = idr_alloc(&gpio_aggregator_idr, new, 0, 0, GFP_KERNEL); 95 96 if (ret < 0) 97 return ret; 98 99 new->id = ret; 100 INIT_LIST_HEAD(&new->list_head); 101 mutex_init(&new->lock); 102 *aggr = no_free_ptr(new); 103 return 0; 104 } 105 106 static void gpio_aggregator_free(struct gpio_aggregator *aggr) 107 { 108 scoped_guard(mutex, &gpio_aggregator_lock) 109 idr_remove(&gpio_aggregator_idr, aggr->id); 110 111 mutex_destroy(&aggr->lock); 112 kfree(aggr); 113 } 114 115 static int gpio_aggregator_add_gpio(struct gpio_aggregator *aggr, 116 const char *key, int hwnum, unsigned int *n) 117 { 118 struct gpiod_lookup_table *lookups; 119 120 lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2), 121 GFP_KERNEL); 122 if (!lookups) 123 return -ENOMEM; 124 125 lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0); 126 127 (*n)++; 128 memset(&lookups->table[*n], 0, sizeof(lookups->table[*n])); 129 130 aggr->lookups = lookups; 131 return 0; 132 } 133 134 static bool gpio_aggregator_is_active(struct gpio_aggregator *aggr) 135 { 136 lockdep_assert_held(&aggr->lock); 137 138 return aggr->probe_data.pdev && platform_get_drvdata(aggr->probe_data.pdev); 139 } 140 141 /* Only aggregators created via legacy sysfs can be "activating". */ 142 static bool gpio_aggregator_is_activating(struct gpio_aggregator *aggr) 143 { 144 lockdep_assert_held(&aggr->lock); 145 146 return aggr->probe_data.pdev && !platform_get_drvdata(aggr->probe_data.pdev); 147 } 148 149 static size_t gpio_aggregator_count_lines(struct gpio_aggregator *aggr) 150 { 151 lockdep_assert_held(&aggr->lock); 152 153 return list_count_nodes(&aggr->list_head); 154 } 155 156 static struct gpio_aggregator_line * 157 gpio_aggregator_line_alloc(struct gpio_aggregator *parent, unsigned int idx, 158 char *key, int offset) 159 { 160 struct gpio_aggregator_line *line; 161 162 line = kzalloc(sizeof(*line), GFP_KERNEL); 163 if (!line) 164 return ERR_PTR(-ENOMEM); 165 166 if (key) { 167 line->key = kstrdup(key, GFP_KERNEL); 168 if (!line->key) { 169 kfree(line); 170 return ERR_PTR(-ENOMEM); 171 } 172 } 173 174 line->flags = GPIO_LOOKUP_FLAGS_DEFAULT; 175 line->parent = parent; 176 line->idx = idx; 177 line->offset = offset; 178 INIT_LIST_HEAD(&line->entry); 179 180 return line; 181 } 182 183 static void gpio_aggregator_line_add(struct gpio_aggregator *aggr, 184 struct gpio_aggregator_line *line) 185 { 186 struct gpio_aggregator_line *tmp; 187 188 lockdep_assert_held(&aggr->lock); 189 190 list_for_each_entry(tmp, &aggr->list_head, entry) { 191 if (tmp->idx > line->idx) { 192 list_add_tail(&line->entry, &tmp->entry); 193 return; 194 } 195 } 196 list_add_tail(&line->entry, &aggr->list_head); 197 } 198 199 static void gpio_aggregator_line_del(struct gpio_aggregator *aggr, 200 struct gpio_aggregator_line *line) 201 { 202 lockdep_assert_held(&aggr->lock); 203 204 list_del(&line->entry); 205 } 206 207 static void gpio_aggregator_free_lines(struct gpio_aggregator *aggr) 208 { 209 struct gpio_aggregator_line *line, *tmp; 210 211 list_for_each_entry_safe(line, tmp, &aggr->list_head, entry) { 212 configfs_unregister_group(&line->group); 213 /* 214 * Normally, we acquire aggr->lock within the configfs 215 * callback. However, in the legacy sysfs interface case, 216 * calling configfs_(un)register_group while holding 217 * aggr->lock could cause a deadlock. Fortunately, this is 218 * unnecessary because the new_device/delete_device path 219 * and the module unload path are mutually exclusive, 220 * thanks to an explicit try_module_get. That's why this 221 * minimal scoped_guard suffices. 222 */ 223 scoped_guard(mutex, &aggr->lock) 224 gpio_aggregator_line_del(aggr, line); 225 kfree(line->key); 226 kfree(line->name); 227 kfree(line); 228 } 229 } 230 231 232 /* 233 * GPIO Forwarder 234 */ 235 236 struct gpiochip_fwd_timing { 237 u32 ramp_up_us; 238 u32 ramp_down_us; 239 }; 240 241 struct gpiochip_fwd { 242 struct gpio_chip chip; 243 struct gpio_desc **descs; 244 union { 245 struct mutex mlock; /* protects tmp[] if can_sleep */ 246 spinlock_t slock; /* protects tmp[] if !can_sleep */ 247 }; 248 struct gpiochip_fwd_timing *delay_timings; 249 void *data; 250 unsigned long *valid_mask; 251 unsigned long tmp[]; /* values and descs for multiple ops */ 252 }; 253 254 #define fwd_tmp_values(fwd) (&(fwd)->tmp[0]) 255 #define fwd_tmp_descs(fwd) ((void *)&(fwd)->tmp[BITS_TO_LONGS((fwd)->chip.ngpio)]) 256 257 #define fwd_tmp_size(ngpios) (BITS_TO_LONGS((ngpios)) + (ngpios)) 258 259 static int gpio_fwd_request(struct gpio_chip *chip, unsigned int offset) 260 { 261 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 262 263 return test_bit(offset, fwd->valid_mask) ? 0 : -ENODEV; 264 } 265 266 static int gpio_fwd_get_direction(struct gpio_chip *chip, unsigned int offset) 267 { 268 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 269 270 /* 271 * get_direction() is called during gpiochip registration, return 272 * -ENODEV if there is no GPIO desc for the line. 273 */ 274 if (!test_bit(offset, fwd->valid_mask)) 275 return -ENODEV; 276 277 return gpiod_get_direction(fwd->descs[offset]); 278 } 279 280 static int gpio_fwd_direction_input(struct gpio_chip *chip, unsigned int offset) 281 { 282 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 283 284 return gpiod_direction_input(fwd->descs[offset]); 285 } 286 287 static int gpio_fwd_direction_output(struct gpio_chip *chip, 288 unsigned int offset, int value) 289 { 290 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 291 292 return gpiod_direction_output(fwd->descs[offset], value); 293 } 294 295 static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset) 296 { 297 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 298 299 return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset]) 300 : gpiod_get_value(fwd->descs[offset]); 301 } 302 303 static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, 304 unsigned long *bits) 305 { 306 struct gpio_desc **descs = fwd_tmp_descs(fwd); 307 unsigned long *values = fwd_tmp_values(fwd); 308 unsigned int i, j = 0; 309 int error; 310 311 bitmap_clear(values, 0, fwd->chip.ngpio); 312 for_each_set_bit(i, mask, fwd->chip.ngpio) 313 descs[j++] = fwd->descs[i]; 314 315 if (fwd->chip.can_sleep) 316 error = gpiod_get_array_value_cansleep(j, descs, NULL, values); 317 else 318 error = gpiod_get_array_value(j, descs, NULL, values); 319 if (error) 320 return error; 321 322 j = 0; 323 for_each_set_bit(i, mask, fwd->chip.ngpio) 324 __assign_bit(i, bits, test_bit(j++, values)); 325 326 return 0; 327 } 328 329 static int gpio_fwd_get_multiple_locked(struct gpio_chip *chip, 330 unsigned long *mask, unsigned long *bits) 331 { 332 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 333 unsigned long flags; 334 int error; 335 336 if (chip->can_sleep) { 337 mutex_lock(&fwd->mlock); 338 error = gpio_fwd_get_multiple(fwd, mask, bits); 339 mutex_unlock(&fwd->mlock); 340 } else { 341 spin_lock_irqsave(&fwd->slock, flags); 342 error = gpio_fwd_get_multiple(fwd, mask, bits); 343 spin_unlock_irqrestore(&fwd->slock, flags); 344 } 345 346 return error; 347 } 348 349 static void gpio_fwd_delay(struct gpio_chip *chip, unsigned int offset, int value) 350 { 351 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 352 const struct gpiochip_fwd_timing *delay_timings; 353 bool is_active_low = gpiod_is_active_low(fwd->descs[offset]); 354 u32 delay_us; 355 356 delay_timings = &fwd->delay_timings[offset]; 357 if ((!is_active_low && value) || (is_active_low && !value)) 358 delay_us = delay_timings->ramp_up_us; 359 else 360 delay_us = delay_timings->ramp_down_us; 361 if (!delay_us) 362 return; 363 364 if (chip->can_sleep) 365 fsleep(delay_us); 366 else 367 udelay(delay_us); 368 } 369 370 static int gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value) 371 { 372 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 373 int ret; 374 375 if (chip->can_sleep) 376 ret = gpiod_set_value_cansleep(fwd->descs[offset], value); 377 else 378 ret = gpiod_set_value(fwd->descs[offset], value); 379 if (ret) 380 return ret; 381 382 if (fwd->delay_timings) 383 gpio_fwd_delay(chip, offset, value); 384 385 return ret; 386 } 387 388 static int gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, 389 unsigned long *bits) 390 { 391 struct gpio_desc **descs = fwd_tmp_descs(fwd); 392 unsigned long *values = fwd_tmp_values(fwd); 393 unsigned int i, j = 0, ret; 394 395 for_each_set_bit(i, mask, fwd->chip.ngpio) { 396 __assign_bit(j, values, test_bit(i, bits)); 397 descs[j++] = fwd->descs[i]; 398 } 399 400 if (fwd->chip.can_sleep) 401 ret = gpiod_set_array_value_cansleep(j, descs, NULL, values); 402 else 403 ret = gpiod_set_array_value(j, descs, NULL, values); 404 405 return ret; 406 } 407 408 static int gpio_fwd_set_multiple_locked(struct gpio_chip *chip, 409 unsigned long *mask, unsigned long *bits) 410 { 411 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 412 unsigned long flags; 413 int ret; 414 415 if (chip->can_sleep) { 416 mutex_lock(&fwd->mlock); 417 ret = gpio_fwd_set_multiple(fwd, mask, bits); 418 mutex_unlock(&fwd->mlock); 419 } else { 420 spin_lock_irqsave(&fwd->slock, flags); 421 ret = gpio_fwd_set_multiple(fwd, mask, bits); 422 spin_unlock_irqrestore(&fwd->slock, flags); 423 } 424 425 return ret; 426 } 427 428 static int gpio_fwd_set_config(struct gpio_chip *chip, unsigned int offset, 429 unsigned long config) 430 { 431 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 432 433 return gpiod_set_config(fwd->descs[offset], config); 434 } 435 436 static int gpio_fwd_to_irq(struct gpio_chip *chip, unsigned int offset) 437 { 438 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 439 440 return gpiod_to_irq(fwd->descs[offset]); 441 } 442 443 /* 444 * The GPIO delay provides a way to configure platform specific delays 445 * for the GPIO ramp-up or ramp-down delays. This can serve the following 446 * purposes: 447 * - Open-drain output using an RC filter 448 */ 449 #define FWD_FEATURE_DELAY BIT(0) 450 451 #ifdef CONFIG_OF_GPIO 452 static int gpiochip_fwd_delay_of_xlate(struct gpio_chip *chip, 453 const struct of_phandle_args *gpiospec, 454 u32 *flags) 455 { 456 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 457 struct gpiochip_fwd_timing *timings; 458 u32 line; 459 460 if (gpiospec->args_count != chip->of_gpio_n_cells) 461 return -EINVAL; 462 463 line = gpiospec->args[0]; 464 if (line >= chip->ngpio) 465 return -EINVAL; 466 467 timings = &fwd->delay_timings[line]; 468 timings->ramp_up_us = gpiospec->args[1]; 469 timings->ramp_down_us = gpiospec->args[2]; 470 471 return line; 472 } 473 474 static int gpiochip_fwd_setup_delay_line(struct gpiochip_fwd *fwd) 475 { 476 struct gpio_chip *chip = &fwd->chip; 477 478 fwd->delay_timings = devm_kcalloc(chip->parent, chip->ngpio, 479 sizeof(*fwd->delay_timings), 480 GFP_KERNEL); 481 if (!fwd->delay_timings) 482 return -ENOMEM; 483 484 chip->of_xlate = gpiochip_fwd_delay_of_xlate; 485 chip->of_gpio_n_cells = 3; 486 487 return 0; 488 } 489 #else 490 static int gpiochip_fwd_setup_delay_line(struct gpiochip_fwd *fwd) 491 { 492 return 0; 493 } 494 #endif /* !CONFIG_OF_GPIO */ 495 496 /** 497 * gpiochip_fwd_get_gpiochip - Get the GPIO chip for the GPIO forwarder 498 * @fwd: GPIO forwarder 499 * 500 * Returns: The GPIO chip for the GPIO forwarder 501 */ 502 struct gpio_chip *gpiochip_fwd_get_gpiochip(struct gpiochip_fwd *fwd) 503 { 504 return &fwd->chip; 505 } 506 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_get_gpiochip, "GPIO_FORWARDER"); 507 508 /** 509 * gpiochip_fwd_get_data - Get driver-private data for the GPIO forwarder 510 * @fwd: GPIO forwarder 511 * 512 * Returns: The driver-private data for the GPIO forwarder 513 */ 514 void *gpiochip_fwd_get_data(struct gpiochip_fwd *fwd) 515 { 516 return fwd->data; 517 } 518 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_get_data, "GPIO_FORWARDER"); 519 520 /** 521 * gpiochip_fwd_gpio_request - Request a line of the GPIO forwarder 522 * @fwd: GPIO forwarder 523 * @offset: the offset of the line to request 524 * 525 * Returns: 0 on success, or negative errno on failure. 526 */ 527 int gpiochip_fwd_gpio_request(struct gpiochip_fwd *fwd, unsigned int offset) 528 { 529 struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd); 530 531 return gpio_fwd_request(gc, offset); 532 } 533 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_request, "GPIO_FORWARDER"); 534 535 /** 536 * gpiochip_fwd_gpio_get_direction - Return the current direction of a GPIO forwarder line 537 * @fwd: GPIO forwarder 538 * @offset: the offset of the line 539 * 540 * Returns: 0 for output, 1 for input, or an error code in case of error. 541 */ 542 int gpiochip_fwd_gpio_get_direction(struct gpiochip_fwd *fwd, unsigned int offset) 543 { 544 struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd); 545 546 return gpio_fwd_get_direction(gc, offset); 547 } 548 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get_direction, "GPIO_FORWARDER"); 549 550 /** 551 * gpiochip_fwd_gpio_direction_output - Set a GPIO forwarder line direction to 552 * output 553 * @fwd: GPIO forwarder 554 * @offset: the offset of the line 555 * @value: value to set 556 * 557 * Returns: 0 on success, or negative errno on failure. 558 */ 559 int gpiochip_fwd_gpio_direction_output(struct gpiochip_fwd *fwd, unsigned int offset, 560 int value) 561 { 562 struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd); 563 564 return gpio_fwd_direction_output(gc, offset, value); 565 } 566 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_direction_output, "GPIO_FORWARDER"); 567 568 /** 569 * gpiochip_fwd_gpio_direction_input - Set a GPIO forwarder line direction to input 570 * @fwd: GPIO forwarder 571 * @offset: the offset of the line 572 * 573 * Returns: 0 on success, or negative errno on failure. 574 */ 575 int gpiochip_fwd_gpio_direction_input(struct gpiochip_fwd *fwd, unsigned int offset) 576 { 577 struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd); 578 579 return gpio_fwd_direction_input(gc, offset); 580 } 581 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_direction_input, "GPIO_FORWARDER"); 582 583 /** 584 * gpiochip_fwd_gpio_get - Return a GPIO forwarder line's value 585 * @fwd: GPIO forwarder 586 * @offset: the offset of the line 587 * 588 * Returns: The GPIO's logical value, i.e. taking the ACTIVE_LOW status into 589 * account, or negative errno on failure. 590 */ 591 int gpiochip_fwd_gpio_get(struct gpiochip_fwd *fwd, unsigned int offset) 592 { 593 struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd); 594 595 return gpio_fwd_get(gc, offset); 596 } 597 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get, "GPIO_FORWARDER"); 598 599 /** 600 * gpiochip_fwd_gpio_get_multiple - Get values for multiple GPIO forwarder lines 601 * @fwd: GPIO forwarder 602 * @mask: bit mask array; one bit per line; BITS_PER_LONG bits per word defines 603 * which lines are to be read 604 * @bits: bit value array; one bit per line; BITS_PER_LONG bits per word will 605 * contains the read values for the lines specified by mask 606 * 607 * Returns: 0 on success, or negative errno on failure. 608 */ 609 int gpiochip_fwd_gpio_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, 610 unsigned long *bits) 611 { 612 struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd); 613 614 return gpio_fwd_get_multiple_locked(gc, mask, bits); 615 } 616 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get_multiple, "GPIO_FORWARDER"); 617 618 /** 619 * gpiochip_fwd_gpio_set - Assign value to a GPIO forwarder line. 620 * @fwd: GPIO forwarder 621 * @offset: the offset of the line 622 * @value: value to set 623 * 624 * Returns: 0 on success, or negative errno on failure. 625 */ 626 int gpiochip_fwd_gpio_set(struct gpiochip_fwd *fwd, unsigned int offset, int value) 627 { 628 struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd); 629 630 return gpio_fwd_set(gc, offset, value); 631 } 632 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set, "GPIO_FORWARDER"); 633 634 /** 635 * gpiochip_fwd_gpio_set_multiple - Assign values to multiple GPIO forwarder lines 636 * @fwd: GPIO forwarder 637 * @mask: bit mask array; one bit per output; BITS_PER_LONG bits per word 638 * defines which outputs are to be changed 639 * @bits: bit value array; one bit per output; BITS_PER_LONG bits per word 640 * defines the values the outputs specified by mask are to be set to 641 * 642 * Returns: 0 on success, or negative errno on failure. 643 */ 644 int gpiochip_fwd_gpio_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, 645 unsigned long *bits) 646 { 647 struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd); 648 649 return gpio_fwd_set_multiple_locked(gc, mask, bits); 650 } 651 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set_multiple, "GPIO_FORWARDER"); 652 653 /** 654 * gpiochip_fwd_gpio_set_config - Set @config for a GPIO forwarder line 655 * @fwd: GPIO forwarder 656 * @offset: the offset of the line 657 * @config: Same packed config format as generic pinconf 658 * 659 * Returns: 0 on success, %-ENOTSUPP if the controller doesn't support setting 660 * the configuration. 661 */ 662 int gpiochip_fwd_gpio_set_config(struct gpiochip_fwd *fwd, unsigned int offset, 663 unsigned long config) 664 { 665 struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd); 666 667 return gpio_fwd_set_config(gc, offset, config); 668 } 669 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set_config, "GPIO_FORWARDER"); 670 671 /** 672 * gpiochip_fwd_gpio_to_irq - Return the IRQ corresponding to a GPIO forwarder line 673 * @fwd: GPIO forwarder 674 * @offset: the offset of the line 675 * 676 * Returns: The Linux IRQ corresponding to the passed line, or an error code in 677 * case of error. 678 */ 679 int gpiochip_fwd_gpio_to_irq(struct gpiochip_fwd *fwd, unsigned int offset) 680 { 681 struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd); 682 683 return gpio_fwd_to_irq(gc, offset); 684 } 685 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_to_irq, "GPIO_FORWARDER"); 686 687 /** 688 * devm_gpiochip_fwd_alloc - Allocate and initialize a new GPIO forwarder 689 * @dev: Parent device pointer 690 * @ngpios: Number of GPIOs in the forwarder 691 * 692 * Returns: An opaque object pointer, or an ERR_PTR()-encoded negative error 693 * code on failure. 694 */ 695 struct gpiochip_fwd *devm_gpiochip_fwd_alloc(struct device *dev, 696 unsigned int ngpios) 697 { 698 struct gpiochip_fwd *fwd; 699 struct gpio_chip *chip; 700 701 fwd = devm_kzalloc(dev, struct_size(fwd, tmp, fwd_tmp_size(ngpios)), GFP_KERNEL); 702 if (!fwd) 703 return ERR_PTR(-ENOMEM); 704 705 fwd->descs = devm_kcalloc(dev, ngpios, sizeof(*fwd->descs), GFP_KERNEL); 706 if (!fwd->descs) 707 return ERR_PTR(-ENOMEM); 708 709 fwd->valid_mask = devm_bitmap_zalloc(dev, ngpios, GFP_KERNEL); 710 if (!fwd->valid_mask) 711 return ERR_PTR(-ENOMEM); 712 713 chip = &fwd->chip; 714 715 chip->label = dev_name(dev); 716 chip->parent = dev; 717 chip->owner = THIS_MODULE; 718 chip->request = gpio_fwd_request; 719 chip->get_direction = gpio_fwd_get_direction; 720 chip->direction_input = gpio_fwd_direction_input; 721 chip->direction_output = gpio_fwd_direction_output; 722 chip->get = gpio_fwd_get; 723 chip->get_multiple = gpio_fwd_get_multiple_locked; 724 chip->set = gpio_fwd_set; 725 chip->set_multiple = gpio_fwd_set_multiple_locked; 726 chip->to_irq = gpio_fwd_to_irq; 727 chip->base = -1; 728 chip->ngpio = ngpios; 729 730 return fwd; 731 } 732 EXPORT_SYMBOL_NS_GPL(devm_gpiochip_fwd_alloc, "GPIO_FORWARDER"); 733 734 /** 735 * gpiochip_fwd_desc_add - Add a GPIO desc in the forwarder 736 * @fwd: GPIO forwarder 737 * @desc: GPIO descriptor to register 738 * @offset: offset for the GPIO in the forwarder 739 * 740 * Returns: 0 on success, or negative errno on failure. 741 */ 742 int gpiochip_fwd_desc_add(struct gpiochip_fwd *fwd, struct gpio_desc *desc, 743 unsigned int offset) 744 { 745 struct gpio_chip *chip = &fwd->chip; 746 747 if (offset >= chip->ngpio) 748 return -EINVAL; 749 750 if (test_and_set_bit(offset, fwd->valid_mask)) 751 return -EEXIST; 752 753 /* 754 * If any of the GPIO lines are sleeping, then the entire forwarder 755 * will be sleeping. 756 */ 757 if (gpiod_cansleep(desc)) 758 chip->can_sleep = true; 759 760 fwd->descs[offset] = desc; 761 762 dev_dbg(chip->parent, "%u => gpio %d irq %d\n", offset, 763 desc_to_gpio(desc), gpiod_to_irq(desc)); 764 765 return 0; 766 } 767 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_desc_add, "GPIO_FORWARDER"); 768 769 /** 770 * gpiochip_fwd_desc_free - Remove a GPIO desc from the forwarder 771 * @fwd: GPIO forwarder 772 * @offset: offset of GPIO desc to remove 773 */ 774 void gpiochip_fwd_desc_free(struct gpiochip_fwd *fwd, unsigned int offset) 775 { 776 if (test_and_clear_bit(offset, fwd->valid_mask)) 777 gpiod_put(fwd->descs[offset]); 778 } 779 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_desc_free, "GPIO_FORWARDER"); 780 781 /** 782 * gpiochip_fwd_register - Register a GPIO forwarder 783 * @fwd: GPIO forwarder 784 * @data: driver-private data associated with this forwarder 785 * 786 * Returns: 0 on success, or negative errno on failure. 787 */ 788 int gpiochip_fwd_register(struct gpiochip_fwd *fwd, void *data) 789 { 790 struct gpio_chip *chip = &fwd->chip; 791 792 /* 793 * Some gpio_desc were not registered. They will be registered at runtime 794 * but we have to suppose they can sleep. 795 */ 796 if (!bitmap_full(fwd->valid_mask, chip->ngpio)) 797 chip->can_sleep = true; 798 799 if (chip->can_sleep) 800 mutex_init(&fwd->mlock); 801 else 802 spin_lock_init(&fwd->slock); 803 804 fwd->data = data; 805 806 return devm_gpiochip_add_data(chip->parent, chip, fwd); 807 } 808 EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_register, "GPIO_FORWARDER"); 809 810 /** 811 * gpiochip_fwd_create() - Create a new GPIO forwarder 812 * @dev: Parent device pointer 813 * @ngpios: Number of GPIOs in the forwarder. 814 * @descs: Array containing the GPIO descriptors to forward to. 815 * This array must contain @ngpios entries, and can be deallocated 816 * as the forwarder has its own array. 817 * @features: Bitwise ORed features as defined with FWD_FEATURE_*. 818 * 819 * This function creates a new gpiochip, which forwards all GPIO operations to 820 * the passed GPIO descriptors. 821 * 822 * Return: An opaque object pointer, or an ERR_PTR()-encoded negative error 823 * code on failure. 824 */ 825 static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev, 826 unsigned int ngpios, 827 struct gpio_desc *descs[], 828 unsigned long features) 829 { 830 struct gpiochip_fwd *fwd; 831 unsigned int i; 832 int error; 833 834 fwd = devm_gpiochip_fwd_alloc(dev, ngpios); 835 if (IS_ERR(fwd)) 836 return fwd; 837 838 for (i = 0; i < ngpios; i++) { 839 error = gpiochip_fwd_desc_add(fwd, descs[i], i); 840 if (error) 841 return ERR_PTR(error); 842 } 843 844 if (features & FWD_FEATURE_DELAY) { 845 error = gpiochip_fwd_setup_delay_line(fwd); 846 if (error) 847 return ERR_PTR(error); 848 } 849 850 error = gpiochip_fwd_register(fwd, NULL); 851 if (error) 852 return ERR_PTR(error); 853 854 return fwd; 855 } 856 857 /* 858 * Configfs interface 859 */ 860 861 static struct gpio_aggregator * 862 to_gpio_aggregator(struct config_item *item) 863 { 864 struct config_group *group = to_config_group(item); 865 866 return container_of(group, struct gpio_aggregator, group); 867 } 868 869 static struct gpio_aggregator_line * 870 to_gpio_aggregator_line(struct config_item *item) 871 { 872 struct config_group *group = to_config_group(item); 873 874 return container_of(group, struct gpio_aggregator_line, group); 875 } 876 877 static struct fwnode_handle * 878 gpio_aggregator_make_device_sw_node(struct gpio_aggregator *aggr) 879 { 880 struct property_entry properties[2]; 881 struct gpio_aggregator_line *line; 882 size_t num_lines; 883 int n = 0; 884 885 memset(properties, 0, sizeof(properties)); 886 887 num_lines = gpio_aggregator_count_lines(aggr); 888 if (num_lines == 0) 889 return NULL; 890 891 const char **line_names __free(kfree) = kcalloc( 892 num_lines, sizeof(*line_names), GFP_KERNEL); 893 if (!line_names) 894 return ERR_PTR(-ENOMEM); 895 896 /* The list is always sorted as new elements are inserted in order. */ 897 list_for_each_entry(line, &aggr->list_head, entry) 898 line_names[n++] = line->name ?: ""; 899 900 properties[0] = PROPERTY_ENTRY_STRING_ARRAY_LEN( 901 "gpio-line-names", 902 line_names, num_lines); 903 904 return fwnode_create_software_node(properties, NULL); 905 } 906 907 static int gpio_aggregator_activate(struct gpio_aggregator *aggr) 908 { 909 struct platform_device_info pdevinfo; 910 struct gpio_aggregator_line *line; 911 struct fwnode_handle *swnode; 912 unsigned int n = 0; 913 int ret = 0; 914 915 if (gpio_aggregator_count_lines(aggr) == 0) 916 return -EINVAL; 917 918 aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1), 919 GFP_KERNEL); 920 if (!aggr->lookups) 921 return -ENOMEM; 922 923 swnode = gpio_aggregator_make_device_sw_node(aggr); 924 if (IS_ERR(swnode)) { 925 ret = PTR_ERR(swnode); 926 goto err_remove_lookups; 927 } 928 929 memset(&pdevinfo, 0, sizeof(pdevinfo)); 930 pdevinfo.name = DRV_NAME; 931 pdevinfo.id = aggr->id; 932 pdevinfo.fwnode = swnode; 933 934 /* The list is always sorted as new elements are inserted in order. */ 935 list_for_each_entry(line, &aggr->list_head, entry) { 936 /* 937 * - Either GPIO chip label or line name must be configured 938 * (i.e. line->key must be non-NULL) 939 * - Line directories must be named with sequential numeric 940 * suffixes starting from 0. (i.e. ./line0, ./line1, ...) 941 */ 942 if (!line->key || line->idx != n) { 943 ret = -EINVAL; 944 goto err_remove_swnode; 945 } 946 947 if (line->offset < 0) 948 ret = gpio_aggregator_add_gpio(aggr, line->key, 949 U16_MAX, &n); 950 else 951 ret = gpio_aggregator_add_gpio(aggr, line->key, 952 line->offset, &n); 953 if (ret) 954 goto err_remove_swnode; 955 } 956 957 aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id); 958 if (!aggr->lookups->dev_id) { 959 ret = -ENOMEM; 960 goto err_remove_swnode; 961 } 962 963 gpiod_add_lookup_table(aggr->lookups); 964 965 ret = dev_sync_probe_register(&aggr->probe_data, &pdevinfo); 966 if (ret) 967 goto err_remove_lookup_table; 968 969 return 0; 970 971 err_remove_lookup_table: 972 kfree(aggr->lookups->dev_id); 973 gpiod_remove_lookup_table(aggr->lookups); 974 err_remove_swnode: 975 fwnode_remove_software_node(swnode); 976 err_remove_lookups: 977 kfree(aggr->lookups); 978 979 return ret; 980 } 981 982 static void gpio_aggregator_deactivate(struct gpio_aggregator *aggr) 983 { 984 dev_sync_probe_unregister(&aggr->probe_data); 985 gpiod_remove_lookup_table(aggr->lookups); 986 kfree(aggr->lookups->dev_id); 987 kfree(aggr->lookups); 988 } 989 990 static void gpio_aggregator_lockup_configfs(struct gpio_aggregator *aggr, 991 bool lock) 992 { 993 struct configfs_subsystem *subsys = aggr->group.cg_subsys; 994 struct gpio_aggregator_line *line; 995 996 /* 997 * The device only needs to depend on leaf lines. This is 998 * sufficient to lock up all the configfs entries that the 999 * instantiated, alive device depends on. 1000 */ 1001 list_for_each_entry(line, &aggr->list_head, entry) { 1002 if (lock) 1003 configfs_depend_item_unlocked( 1004 subsys, &line->group.cg_item); 1005 else 1006 configfs_undepend_item_unlocked( 1007 &line->group.cg_item); 1008 } 1009 } 1010 1011 static ssize_t 1012 gpio_aggregator_line_key_show(struct config_item *item, char *page) 1013 { 1014 struct gpio_aggregator_line *line = to_gpio_aggregator_line(item); 1015 struct gpio_aggregator *aggr = line->parent; 1016 1017 guard(mutex)(&aggr->lock); 1018 1019 return sysfs_emit(page, "%s\n", line->key ?: ""); 1020 } 1021 1022 static ssize_t 1023 gpio_aggregator_line_key_store(struct config_item *item, const char *page, 1024 size_t count) 1025 { 1026 struct gpio_aggregator_line *line = to_gpio_aggregator_line(item); 1027 struct gpio_aggregator *aggr = line->parent; 1028 1029 char *key __free(kfree) = kstrndup(skip_spaces(page), count, 1030 GFP_KERNEL); 1031 if (!key) 1032 return -ENOMEM; 1033 1034 strim(key); 1035 1036 guard(mutex)(&aggr->lock); 1037 1038 if (gpio_aggregator_is_activating(aggr) || 1039 gpio_aggregator_is_active(aggr)) 1040 return -EBUSY; 1041 1042 kfree(line->key); 1043 line->key = no_free_ptr(key); 1044 1045 return count; 1046 } 1047 CONFIGFS_ATTR(gpio_aggregator_line_, key); 1048 1049 static ssize_t 1050 gpio_aggregator_line_name_show(struct config_item *item, char *page) 1051 { 1052 struct gpio_aggregator_line *line = to_gpio_aggregator_line(item); 1053 struct gpio_aggregator *aggr = line->parent; 1054 1055 guard(mutex)(&aggr->lock); 1056 1057 return sysfs_emit(page, "%s\n", line->name ?: ""); 1058 } 1059 1060 static ssize_t 1061 gpio_aggregator_line_name_store(struct config_item *item, const char *page, 1062 size_t count) 1063 { 1064 struct gpio_aggregator_line *line = to_gpio_aggregator_line(item); 1065 struct gpio_aggregator *aggr = line->parent; 1066 1067 char *name __free(kfree) = kstrndup(skip_spaces(page), count, 1068 GFP_KERNEL); 1069 if (!name) 1070 return -ENOMEM; 1071 1072 strim(name); 1073 1074 guard(mutex)(&aggr->lock); 1075 1076 if (gpio_aggregator_is_activating(aggr) || 1077 gpio_aggregator_is_active(aggr)) 1078 return -EBUSY; 1079 1080 kfree(line->name); 1081 line->name = no_free_ptr(name); 1082 1083 return count; 1084 } 1085 CONFIGFS_ATTR(gpio_aggregator_line_, name); 1086 1087 static ssize_t 1088 gpio_aggregator_line_offset_show(struct config_item *item, char *page) 1089 { 1090 struct gpio_aggregator_line *line = to_gpio_aggregator_line(item); 1091 struct gpio_aggregator *aggr = line->parent; 1092 1093 guard(mutex)(&aggr->lock); 1094 1095 return sysfs_emit(page, "%d\n", line->offset); 1096 } 1097 1098 static ssize_t 1099 gpio_aggregator_line_offset_store(struct config_item *item, const char *page, 1100 size_t count) 1101 { 1102 struct gpio_aggregator_line *line = to_gpio_aggregator_line(item); 1103 struct gpio_aggregator *aggr = line->parent; 1104 int offset, ret; 1105 1106 ret = kstrtoint(page, 0, &offset); 1107 if (ret) 1108 return ret; 1109 1110 /* 1111 * When offset == -1, 'key' represents a line name to lookup. 1112 * When 0 <= offset < 65535, 'key' represents the label of the chip with 1113 * the 'offset' value representing the line within that chip. 1114 * 1115 * GPIOLIB uses the U16_MAX value to indicate lookup by line name so 1116 * the greatest offset we can accept is (U16_MAX - 1). 1117 */ 1118 if (offset > (U16_MAX - 1) || offset < -1) 1119 return -EINVAL; 1120 1121 guard(mutex)(&aggr->lock); 1122 1123 if (gpio_aggregator_is_activating(aggr) || 1124 gpio_aggregator_is_active(aggr)) 1125 return -EBUSY; 1126 1127 line->offset = offset; 1128 1129 return count; 1130 } 1131 CONFIGFS_ATTR(gpio_aggregator_line_, offset); 1132 1133 static struct configfs_attribute *gpio_aggregator_line_attrs[] = { 1134 &gpio_aggregator_line_attr_key, 1135 &gpio_aggregator_line_attr_name, 1136 &gpio_aggregator_line_attr_offset, 1137 NULL 1138 }; 1139 1140 static ssize_t 1141 gpio_aggregator_device_dev_name_show(struct config_item *item, char *page) 1142 { 1143 struct gpio_aggregator *aggr = to_gpio_aggregator(item); 1144 struct platform_device *pdev; 1145 1146 guard(mutex)(&aggr->lock); 1147 1148 pdev = aggr->probe_data.pdev; 1149 if (pdev) 1150 return sysfs_emit(page, "%s\n", dev_name(&pdev->dev)); 1151 1152 return sysfs_emit(page, "%s.%d\n", DRV_NAME, aggr->id); 1153 } 1154 CONFIGFS_ATTR_RO(gpio_aggregator_device_, dev_name); 1155 1156 static ssize_t 1157 gpio_aggregator_device_live_show(struct config_item *item, char *page) 1158 { 1159 struct gpio_aggregator *aggr = to_gpio_aggregator(item); 1160 1161 guard(mutex)(&aggr->lock); 1162 1163 return sysfs_emit(page, "%c\n", 1164 gpio_aggregator_is_active(aggr) ? '1' : '0'); 1165 } 1166 1167 static ssize_t 1168 gpio_aggregator_device_live_store(struct config_item *item, const char *page, 1169 size_t count) 1170 { 1171 struct gpio_aggregator *aggr = to_gpio_aggregator(item); 1172 int ret = 0; 1173 bool live; 1174 1175 ret = kstrtobool(page, &live); 1176 if (ret) 1177 return ret; 1178 1179 if (!try_module_get(THIS_MODULE)) 1180 return -ENOENT; 1181 1182 if (live && !aggr->init_via_sysfs) 1183 gpio_aggregator_lockup_configfs(aggr, true); 1184 1185 scoped_guard(mutex, &aggr->lock) { 1186 if (gpio_aggregator_is_activating(aggr) || 1187 (live == gpio_aggregator_is_active(aggr))) 1188 ret = -EPERM; 1189 else if (live) 1190 ret = gpio_aggregator_activate(aggr); 1191 else 1192 gpio_aggregator_deactivate(aggr); 1193 } 1194 1195 /* 1196 * Undepend is required only if device disablement (live == 0) 1197 * succeeds or if device enablement (live == 1) fails. 1198 */ 1199 if (live == !!ret && !aggr->init_via_sysfs) 1200 gpio_aggregator_lockup_configfs(aggr, false); 1201 1202 module_put(THIS_MODULE); 1203 1204 return ret ?: count; 1205 } 1206 CONFIGFS_ATTR(gpio_aggregator_device_, live); 1207 1208 static struct configfs_attribute *gpio_aggregator_device_attrs[] = { 1209 &gpio_aggregator_device_attr_dev_name, 1210 &gpio_aggregator_device_attr_live, 1211 NULL 1212 }; 1213 1214 static void 1215 gpio_aggregator_line_release(struct config_item *item) 1216 { 1217 struct gpio_aggregator_line *line = to_gpio_aggregator_line(item); 1218 struct gpio_aggregator *aggr = line->parent; 1219 1220 guard(mutex)(&aggr->lock); 1221 1222 gpio_aggregator_line_del(aggr, line); 1223 kfree(line->key); 1224 kfree(line->name); 1225 kfree(line); 1226 } 1227 1228 static struct configfs_item_operations gpio_aggregator_line_item_ops = { 1229 .release = gpio_aggregator_line_release, 1230 }; 1231 1232 static const struct config_item_type gpio_aggregator_line_type = { 1233 .ct_item_ops = &gpio_aggregator_line_item_ops, 1234 .ct_attrs = gpio_aggregator_line_attrs, 1235 .ct_owner = THIS_MODULE, 1236 }; 1237 1238 static void gpio_aggregator_device_release(struct config_item *item) 1239 { 1240 struct gpio_aggregator *aggr = to_gpio_aggregator(item); 1241 1242 /* 1243 * At this point, aggr is neither active nor activating, 1244 * so calling gpio_aggregator_deactivate() is always unnecessary. 1245 */ 1246 gpio_aggregator_free(aggr); 1247 } 1248 1249 static struct configfs_item_operations gpio_aggregator_device_item_ops = { 1250 .release = gpio_aggregator_device_release, 1251 }; 1252 1253 static struct config_group * 1254 gpio_aggregator_device_make_group(struct config_group *group, const char *name) 1255 { 1256 struct gpio_aggregator *aggr = to_gpio_aggregator(&group->cg_item); 1257 struct gpio_aggregator_line *line; 1258 unsigned int idx; 1259 int ret, nchar; 1260 1261 ret = sscanf(name, "line%u%n", &idx, &nchar); 1262 if (ret != 1 || nchar != strlen(name)) 1263 return ERR_PTR(-EINVAL); 1264 1265 if (aggr->init_via_sysfs) 1266 /* 1267 * Aggregators created via legacy sysfs interface are exposed as 1268 * default groups, which means rmdir(2) is prohibited for them. 1269 * For simplicity, and to avoid confusion, we also prohibit 1270 * mkdir(2). 1271 */ 1272 return ERR_PTR(-EPERM); 1273 1274 guard(mutex)(&aggr->lock); 1275 1276 if (gpio_aggregator_is_active(aggr)) 1277 return ERR_PTR(-EBUSY); 1278 1279 list_for_each_entry(line, &aggr->list_head, entry) 1280 if (line->idx == idx) 1281 return ERR_PTR(-EINVAL); 1282 1283 line = gpio_aggregator_line_alloc(aggr, idx, NULL, -1); 1284 if (IS_ERR(line)) 1285 return ERR_CAST(line); 1286 1287 config_group_init_type_name(&line->group, name, &gpio_aggregator_line_type); 1288 1289 gpio_aggregator_line_add(aggr, line); 1290 1291 return &line->group; 1292 } 1293 1294 static struct configfs_group_operations gpio_aggregator_device_group_ops = { 1295 .make_group = gpio_aggregator_device_make_group, 1296 }; 1297 1298 static const struct config_item_type gpio_aggregator_device_type = { 1299 .ct_group_ops = &gpio_aggregator_device_group_ops, 1300 .ct_item_ops = &gpio_aggregator_device_item_ops, 1301 .ct_attrs = gpio_aggregator_device_attrs, 1302 .ct_owner = THIS_MODULE, 1303 }; 1304 1305 static struct config_group * 1306 gpio_aggregator_make_group(struct config_group *group, const char *name) 1307 { 1308 struct gpio_aggregator *aggr; 1309 int ret; 1310 1311 /* 1312 * "_sysfs" prefix is reserved for auto-generated config group 1313 * for devices create via legacy sysfs interface. 1314 */ 1315 if (strncmp(name, AGGREGATOR_LEGACY_PREFIX, 1316 sizeof(AGGREGATOR_LEGACY_PREFIX) - 1) == 0) 1317 return ERR_PTR(-EINVAL); 1318 1319 /* arg space is unneeded */ 1320 ret = gpio_aggregator_alloc(&aggr, 0); 1321 if (ret) 1322 return ERR_PTR(ret); 1323 1324 config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type); 1325 dev_sync_probe_init(&aggr->probe_data); 1326 1327 return &aggr->group; 1328 } 1329 1330 static struct configfs_group_operations gpio_aggregator_group_ops = { 1331 .make_group = gpio_aggregator_make_group, 1332 }; 1333 1334 static const struct config_item_type gpio_aggregator_type = { 1335 .ct_group_ops = &gpio_aggregator_group_ops, 1336 .ct_owner = THIS_MODULE, 1337 }; 1338 1339 static struct configfs_subsystem gpio_aggregator_subsys = { 1340 .su_group = { 1341 .cg_item = { 1342 .ci_namebuf = DRV_NAME, 1343 .ci_type = &gpio_aggregator_type, 1344 }, 1345 }, 1346 }; 1347 1348 /* 1349 * Sysfs interface 1350 */ 1351 static int gpio_aggregator_parse(struct gpio_aggregator *aggr) 1352 { 1353 char *args = skip_spaces(aggr->args); 1354 struct gpio_aggregator_line *line; 1355 char name[CONFIGFS_ITEM_NAME_LEN]; 1356 char *key, *offsets, *p; 1357 unsigned int i, n = 0; 1358 int error = 0; 1359 1360 unsigned long *bitmap __free(bitmap) = 1361 bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL); 1362 if (!bitmap) 1363 return -ENOMEM; 1364 1365 args = next_arg(args, &key, &p); 1366 while (*args) { 1367 args = next_arg(args, &offsets, &p); 1368 1369 p = get_options(offsets, 0, &error); 1370 if (error == 0 || *p) { 1371 /* Named GPIO line */ 1372 scnprintf(name, sizeof(name), "line%u", n); 1373 line = gpio_aggregator_line_alloc(aggr, n, key, -1); 1374 if (IS_ERR(line)) { 1375 error = PTR_ERR(line); 1376 goto err; 1377 } 1378 config_group_init_type_name(&line->group, name, 1379 &gpio_aggregator_line_type); 1380 error = configfs_register_group(&aggr->group, 1381 &line->group); 1382 if (error) 1383 goto err; 1384 scoped_guard(mutex, &aggr->lock) 1385 gpio_aggregator_line_add(aggr, line); 1386 1387 error = gpio_aggregator_add_gpio(aggr, key, U16_MAX, &n); 1388 if (error) 1389 goto err; 1390 1391 key = offsets; 1392 continue; 1393 } 1394 1395 /* GPIO chip + offset(s) */ 1396 error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS); 1397 if (error) { 1398 pr_err("Cannot parse %s: %d\n", offsets, error); 1399 goto err; 1400 } 1401 1402 for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) { 1403 scnprintf(name, sizeof(name), "line%u", n); 1404 line = gpio_aggregator_line_alloc(aggr, n, key, i); 1405 if (IS_ERR(line)) { 1406 error = PTR_ERR(line); 1407 goto err; 1408 } 1409 config_group_init_type_name(&line->group, name, 1410 &gpio_aggregator_line_type); 1411 error = configfs_register_group(&aggr->group, 1412 &line->group); 1413 if (error) 1414 goto err; 1415 scoped_guard(mutex, &aggr->lock) 1416 gpio_aggregator_line_add(aggr, line); 1417 1418 error = gpio_aggregator_add_gpio(aggr, key, i, &n); 1419 if (error) 1420 goto err; 1421 } 1422 1423 args = next_arg(args, &key, &p); 1424 } 1425 1426 if (!n) { 1427 pr_err("No GPIOs specified\n"); 1428 error = -EINVAL; 1429 goto err; 1430 } 1431 1432 return 0; 1433 1434 err: 1435 gpio_aggregator_free_lines(aggr); 1436 return error; 1437 } 1438 1439 static ssize_t gpio_aggregator_new_device_store(struct device_driver *driver, 1440 const char *buf, size_t count) 1441 { 1442 struct gpio_aggregator_pdev_meta meta = { .init_via_sysfs = true }; 1443 char name[CONFIGFS_ITEM_NAME_LEN]; 1444 struct gpio_aggregator *aggr; 1445 struct platform_device *pdev; 1446 int res; 1447 1448 if (!try_module_get(THIS_MODULE)) 1449 return -ENOENT; 1450 1451 /* kernfs guarantees string termination, so count + 1 is safe */ 1452 res = gpio_aggregator_alloc(&aggr, count + 1); 1453 if (res) 1454 goto put_module; 1455 1456 memcpy(aggr->args, buf, count + 1); 1457 1458 aggr->init_via_sysfs = true; 1459 aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1), 1460 GFP_KERNEL); 1461 if (!aggr->lookups) { 1462 res = -ENOMEM; 1463 goto free_ga; 1464 } 1465 1466 aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id); 1467 if (!aggr->lookups->dev_id) { 1468 res = -ENOMEM; 1469 goto free_table; 1470 } 1471 1472 scnprintf(name, sizeof(name), "%s.%d", AGGREGATOR_LEGACY_PREFIX, aggr->id); 1473 config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type); 1474 1475 /* 1476 * Since the device created by sysfs might be toggled via configfs 1477 * 'live' attribute later, this initialization is needed. 1478 */ 1479 dev_sync_probe_init(&aggr->probe_data); 1480 1481 /* Expose to configfs */ 1482 res = configfs_register_group(&gpio_aggregator_subsys.su_group, 1483 &aggr->group); 1484 if (res) 1485 goto free_dev_id; 1486 1487 res = gpio_aggregator_parse(aggr); 1488 if (res) 1489 goto unregister_group; 1490 1491 gpiod_add_lookup_table(aggr->lookups); 1492 1493 pdev = platform_device_register_data(NULL, DRV_NAME, aggr->id, &meta, sizeof(meta)); 1494 if (IS_ERR(pdev)) { 1495 res = PTR_ERR(pdev); 1496 goto remove_table; 1497 } 1498 1499 aggr->probe_data.pdev = pdev; 1500 module_put(THIS_MODULE); 1501 return count; 1502 1503 remove_table: 1504 gpiod_remove_lookup_table(aggr->lookups); 1505 unregister_group: 1506 configfs_unregister_group(&aggr->group); 1507 free_dev_id: 1508 kfree(aggr->lookups->dev_id); 1509 free_table: 1510 kfree(aggr->lookups); 1511 free_ga: 1512 gpio_aggregator_free(aggr); 1513 put_module: 1514 module_put(THIS_MODULE); 1515 return res; 1516 } 1517 1518 static struct driver_attribute driver_attr_gpio_aggregator_new_device = 1519 __ATTR(new_device, 0200, NULL, gpio_aggregator_new_device_store); 1520 1521 static void gpio_aggregator_destroy(struct gpio_aggregator *aggr) 1522 { 1523 scoped_guard(mutex, &aggr->lock) { 1524 if (gpio_aggregator_is_activating(aggr) || 1525 gpio_aggregator_is_active(aggr)) 1526 gpio_aggregator_deactivate(aggr); 1527 } 1528 gpio_aggregator_free_lines(aggr); 1529 configfs_unregister_group(&aggr->group); 1530 kfree(aggr); 1531 } 1532 1533 static ssize_t gpio_aggregator_delete_device_store(struct device_driver *driver, 1534 const char *buf, size_t count) 1535 { 1536 struct gpio_aggregator *aggr; 1537 unsigned int id; 1538 int error; 1539 1540 if (!str_has_prefix(buf, DRV_NAME ".")) 1541 return -EINVAL; 1542 1543 error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id); 1544 if (error) 1545 return error; 1546 1547 if (!try_module_get(THIS_MODULE)) 1548 return -ENOENT; 1549 1550 mutex_lock(&gpio_aggregator_lock); 1551 aggr = idr_find(&gpio_aggregator_idr, id); 1552 /* 1553 * For simplicity, devices created via configfs cannot be deleted 1554 * via sysfs. 1555 */ 1556 if (aggr && aggr->init_via_sysfs) 1557 idr_remove(&gpio_aggregator_idr, id); 1558 else { 1559 mutex_unlock(&gpio_aggregator_lock); 1560 module_put(THIS_MODULE); 1561 return -ENOENT; 1562 } 1563 mutex_unlock(&gpio_aggregator_lock); 1564 1565 gpio_aggregator_destroy(aggr); 1566 module_put(THIS_MODULE); 1567 return count; 1568 } 1569 1570 static struct driver_attribute driver_attr_gpio_aggregator_delete_device = 1571 __ATTR(delete_device, 0200, NULL, gpio_aggregator_delete_device_store); 1572 1573 static struct attribute *gpio_aggregator_attrs[] = { 1574 &driver_attr_gpio_aggregator_new_device.attr, 1575 &driver_attr_gpio_aggregator_delete_device.attr, 1576 NULL 1577 }; 1578 ATTRIBUTE_GROUPS(gpio_aggregator); 1579 1580 /* 1581 * GPIO Aggregator platform device 1582 */ 1583 1584 static int gpio_aggregator_probe(struct platform_device *pdev) 1585 { 1586 struct gpio_aggregator_pdev_meta *meta; 1587 struct device *dev = &pdev->dev; 1588 bool init_via_sysfs = false; 1589 struct gpio_desc **descs; 1590 struct gpiochip_fwd *fwd; 1591 unsigned long features; 1592 int i, n; 1593 1594 n = gpiod_count(dev, NULL); 1595 if (n < 0) 1596 return n; 1597 1598 descs = devm_kmalloc_array(dev, n, sizeof(*descs), GFP_KERNEL); 1599 if (!descs) 1600 return -ENOMEM; 1601 1602 meta = dev_get_platdata(&pdev->dev); 1603 if (meta && meta->init_via_sysfs) 1604 init_via_sysfs = true; 1605 1606 for (i = 0; i < n; i++) { 1607 descs[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS); 1608 if (IS_ERR(descs[i])) { 1609 /* 1610 * Deferred probing is not suitable when the aggregator 1611 * is created via configfs. They should just retry later 1612 * whenever they like. For device creation via sysfs, 1613 * error is propagated without overriding for backward 1614 * compatibility. .prevent_deferred_probe is kept unset 1615 * for other cases. 1616 */ 1617 if (!init_via_sysfs && !dev_of_node(dev) && 1618 descs[i] == ERR_PTR(-EPROBE_DEFER)) { 1619 pr_warn("Deferred probe canceled for creation via configfs.\n"); 1620 return -ENODEV; 1621 } 1622 return PTR_ERR(descs[i]); 1623 } 1624 } 1625 1626 features = (uintptr_t)device_get_match_data(dev); 1627 fwd = gpiochip_fwd_create(dev, n, descs, features); 1628 if (IS_ERR(fwd)) 1629 return PTR_ERR(fwd); 1630 1631 platform_set_drvdata(pdev, fwd); 1632 devm_kfree(dev, descs); 1633 return 0; 1634 } 1635 1636 static const struct of_device_id gpio_aggregator_dt_ids[] = { 1637 { 1638 .compatible = "gpio-delay", 1639 .data = (void *)FWD_FEATURE_DELAY, 1640 }, 1641 /* 1642 * Add GPIO-operated devices controlled from userspace below, 1643 * or use "driver_override" in sysfs. 1644 */ 1645 {} 1646 }; 1647 MODULE_DEVICE_TABLE(of, gpio_aggregator_dt_ids); 1648 1649 static struct platform_driver gpio_aggregator_driver = { 1650 .probe = gpio_aggregator_probe, 1651 .driver = { 1652 .name = DRV_NAME, 1653 .groups = gpio_aggregator_groups, 1654 .of_match_table = gpio_aggregator_dt_ids, 1655 }, 1656 }; 1657 1658 static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data) 1659 { 1660 /* 1661 * There should be no aggregator created via configfs, as their 1662 * presence would prevent module unloading. 1663 */ 1664 gpio_aggregator_destroy(p); 1665 return 0; 1666 } 1667 1668 static void __exit gpio_aggregator_remove_all(void) 1669 { 1670 /* 1671 * Configfs callbacks acquire gpio_aggregator_lock when accessing 1672 * gpio_aggregator_idr, so to prevent lock inversion deadlock, we 1673 * cannot protect idr_for_each invocation here with 1674 * gpio_aggregator_lock, as gpio_aggregator_idr_remove() accesses 1675 * configfs groups. Fortunately, the new_device/delete_device path 1676 * and the module unload path are mutually exclusive, thanks to an 1677 * explicit try_module_get inside of those driver attr handlers. 1678 * Also, when we reach here, no configfs entries present or being 1679 * created. Therefore, no need to protect with gpio_aggregator_lock 1680 * below. 1681 */ 1682 idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL); 1683 idr_destroy(&gpio_aggregator_idr); 1684 } 1685 1686 static int __init gpio_aggregator_init(void) 1687 { 1688 int ret = 0; 1689 1690 config_group_init(&gpio_aggregator_subsys.su_group); 1691 mutex_init(&gpio_aggregator_subsys.su_mutex); 1692 ret = configfs_register_subsystem(&gpio_aggregator_subsys); 1693 if (ret) { 1694 pr_err("Failed to register the '%s' configfs subsystem: %d\n", 1695 gpio_aggregator_subsys.su_group.cg_item.ci_namebuf, ret); 1696 mutex_destroy(&gpio_aggregator_subsys.su_mutex); 1697 return ret; 1698 } 1699 1700 /* 1701 * CAVEAT: This must occur after configfs registration. Otherwise, 1702 * a race condition could arise: driver attribute groups might be 1703 * exposed and accessed by users before configfs registration 1704 * completes. new_device_store() does not expect a partially 1705 * initialized configfs state. 1706 */ 1707 ret = platform_driver_register(&gpio_aggregator_driver); 1708 if (ret) { 1709 pr_err("Failed to register the platform driver: %d\n", ret); 1710 mutex_destroy(&gpio_aggregator_subsys.su_mutex); 1711 configfs_unregister_subsystem(&gpio_aggregator_subsys); 1712 } 1713 1714 return ret; 1715 } 1716 module_init(gpio_aggregator_init); 1717 1718 static void __exit gpio_aggregator_exit(void) 1719 { 1720 gpio_aggregator_remove_all(); 1721 platform_driver_unregister(&gpio_aggregator_driver); 1722 configfs_unregister_subsystem(&gpio_aggregator_subsys); 1723 } 1724 module_exit(gpio_aggregator_exit); 1725 1726 MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>"); 1727 MODULE_DESCRIPTION("GPIO Aggregator"); 1728 MODULE_LICENSE("GPL v2"); 1729