1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // GPIO Aggregator 4 // 5 // Copyright (C) 2019-2020 Glider bv 6 7 #define DRV_NAME "gpio-aggregator" 8 #define pr_fmt(fmt) DRV_NAME ": " fmt 9 10 #include <linux/bitmap.h> 11 #include <linux/bitops.h> 12 #include <linux/ctype.h> 13 #include <linux/delay.h> 14 #include <linux/idr.h> 15 #include <linux/kernel.h> 16 #include <linux/mod_devicetable.h> 17 #include <linux/module.h> 18 #include <linux/mutex.h> 19 #include <linux/overflow.h> 20 #include <linux/platform_device.h> 21 #include <linux/property.h> 22 #include <linux/slab.h> 23 #include <linux/spinlock.h> 24 #include <linux/string.h> 25 26 #include <linux/gpio/consumer.h> 27 #include <linux/gpio/driver.h> 28 #include <linux/gpio/machine.h> 29 30 #define AGGREGATOR_MAX_GPIOS 512 31 32 /* 33 * GPIO Aggregator sysfs interface 34 */ 35 36 struct gpio_aggregator { 37 struct gpiod_lookup_table *lookups; 38 struct platform_device *pdev; 39 char args[]; 40 }; 41 42 static DEFINE_MUTEX(gpio_aggregator_lock); /* protects idr */ 43 static DEFINE_IDR(gpio_aggregator_idr); 44 45 static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key, 46 int hwnum, unsigned int *n) 47 { 48 struct gpiod_lookup_table *lookups; 49 50 lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2), 51 GFP_KERNEL); 52 if (!lookups) 53 return -ENOMEM; 54 55 lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0); 56 57 (*n)++; 58 memset(&lookups->table[*n], 0, sizeof(lookups->table[*n])); 59 60 aggr->lookups = lookups; 61 return 0; 62 } 63 64 static int aggr_parse(struct gpio_aggregator *aggr) 65 { 66 char *args = skip_spaces(aggr->args); 67 char *name, *offsets, *p; 68 unsigned int i, n = 0; 69 int error = 0; 70 71 unsigned long *bitmap __free(bitmap) = 72 bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL); 73 if (!bitmap) 74 return -ENOMEM; 75 76 args = next_arg(args, &name, &p); 77 while (*args) { 78 args = next_arg(args, &offsets, &p); 79 80 p = get_options(offsets, 0, &error); 81 if (error == 0 || *p) { 82 /* Named GPIO line */ 83 error = aggr_add_gpio(aggr, name, U16_MAX, &n); 84 if (error) 85 return error; 86 87 name = offsets; 88 continue; 89 } 90 91 /* GPIO chip + offset(s) */ 92 error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS); 93 if (error) { 94 pr_err("Cannot parse %s: %d\n", offsets, error); 95 return error; 96 } 97 98 for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) { 99 error = aggr_add_gpio(aggr, name, i, &n); 100 if (error) 101 return error; 102 } 103 104 args = next_arg(args, &name, &p); 105 } 106 107 if (!n) { 108 pr_err("No GPIOs specified\n"); 109 return -EINVAL; 110 } 111 112 return 0; 113 } 114 115 static ssize_t new_device_store(struct device_driver *driver, const char *buf, 116 size_t count) 117 { 118 struct gpio_aggregator *aggr; 119 struct platform_device *pdev; 120 int res, id; 121 122 /* kernfs guarantees string termination, so count + 1 is safe */ 123 aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL); 124 if (!aggr) 125 return -ENOMEM; 126 127 memcpy(aggr->args, buf, count + 1); 128 129 aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1), 130 GFP_KERNEL); 131 if (!aggr->lookups) { 132 res = -ENOMEM; 133 goto free_ga; 134 } 135 136 mutex_lock(&gpio_aggregator_lock); 137 id = idr_alloc(&gpio_aggregator_idr, aggr, 0, 0, GFP_KERNEL); 138 mutex_unlock(&gpio_aggregator_lock); 139 140 if (id < 0) { 141 res = id; 142 goto free_table; 143 } 144 145 aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, id); 146 if (!aggr->lookups->dev_id) { 147 res = -ENOMEM; 148 goto remove_idr; 149 } 150 151 res = aggr_parse(aggr); 152 if (res) 153 goto free_dev_id; 154 155 gpiod_add_lookup_table(aggr->lookups); 156 157 pdev = platform_device_register_simple(DRV_NAME, id, NULL, 0); 158 if (IS_ERR(pdev)) { 159 res = PTR_ERR(pdev); 160 goto remove_table; 161 } 162 163 aggr->pdev = pdev; 164 return count; 165 166 remove_table: 167 gpiod_remove_lookup_table(aggr->lookups); 168 free_dev_id: 169 kfree(aggr->lookups->dev_id); 170 remove_idr: 171 mutex_lock(&gpio_aggregator_lock); 172 idr_remove(&gpio_aggregator_idr, id); 173 mutex_unlock(&gpio_aggregator_lock); 174 free_table: 175 kfree(aggr->lookups); 176 free_ga: 177 kfree(aggr); 178 return res; 179 } 180 181 static DRIVER_ATTR_WO(new_device); 182 183 static void gpio_aggregator_free(struct gpio_aggregator *aggr) 184 { 185 platform_device_unregister(aggr->pdev); 186 gpiod_remove_lookup_table(aggr->lookups); 187 kfree(aggr->lookups->dev_id); 188 kfree(aggr->lookups); 189 kfree(aggr); 190 } 191 192 static ssize_t delete_device_store(struct device_driver *driver, 193 const char *buf, size_t count) 194 { 195 struct gpio_aggregator *aggr; 196 unsigned int id; 197 int error; 198 199 if (!str_has_prefix(buf, DRV_NAME ".")) 200 return -EINVAL; 201 202 error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id); 203 if (error) 204 return error; 205 206 mutex_lock(&gpio_aggregator_lock); 207 aggr = idr_remove(&gpio_aggregator_idr, id); 208 mutex_unlock(&gpio_aggregator_lock); 209 if (!aggr) 210 return -ENOENT; 211 212 gpio_aggregator_free(aggr); 213 return count; 214 } 215 static DRIVER_ATTR_WO(delete_device); 216 217 static struct attribute *gpio_aggregator_attrs[] = { 218 &driver_attr_new_device.attr, 219 &driver_attr_delete_device.attr, 220 NULL 221 }; 222 ATTRIBUTE_GROUPS(gpio_aggregator); 223 224 static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data) 225 { 226 gpio_aggregator_free(p); 227 return 0; 228 } 229 230 static void __exit gpio_aggregator_remove_all(void) 231 { 232 mutex_lock(&gpio_aggregator_lock); 233 idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL); 234 idr_destroy(&gpio_aggregator_idr); 235 mutex_unlock(&gpio_aggregator_lock); 236 } 237 238 239 /* 240 * GPIO Forwarder 241 */ 242 243 struct gpiochip_fwd_timing { 244 u32 ramp_up_us; 245 u32 ramp_down_us; 246 }; 247 248 struct gpiochip_fwd { 249 struct gpio_chip chip; 250 struct gpio_desc **descs; 251 union { 252 struct mutex mlock; /* protects tmp[] if can_sleep */ 253 spinlock_t slock; /* protects tmp[] if !can_sleep */ 254 }; 255 struct gpiochip_fwd_timing *delay_timings; 256 unsigned long tmp[]; /* values and descs for multiple ops */ 257 }; 258 259 #define fwd_tmp_values(fwd) &(fwd)->tmp[0] 260 #define fwd_tmp_descs(fwd) (void *)&(fwd)->tmp[BITS_TO_LONGS((fwd)->chip.ngpio)] 261 262 #define fwd_tmp_size(ngpios) (BITS_TO_LONGS((ngpios)) + (ngpios)) 263 264 static int gpio_fwd_get_direction(struct gpio_chip *chip, unsigned int offset) 265 { 266 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 267 268 return gpiod_get_direction(fwd->descs[offset]); 269 } 270 271 static int gpio_fwd_direction_input(struct gpio_chip *chip, unsigned int offset) 272 { 273 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 274 275 return gpiod_direction_input(fwd->descs[offset]); 276 } 277 278 static int gpio_fwd_direction_output(struct gpio_chip *chip, 279 unsigned int offset, int value) 280 { 281 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 282 283 return gpiod_direction_output(fwd->descs[offset], value); 284 } 285 286 static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset) 287 { 288 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 289 290 return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset]) 291 : gpiod_get_value(fwd->descs[offset]); 292 } 293 294 static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, 295 unsigned long *bits) 296 { 297 struct gpio_desc **descs = fwd_tmp_descs(fwd); 298 unsigned long *values = fwd_tmp_values(fwd); 299 unsigned int i, j = 0; 300 int error; 301 302 bitmap_clear(values, 0, fwd->chip.ngpio); 303 for_each_set_bit(i, mask, fwd->chip.ngpio) 304 descs[j++] = fwd->descs[i]; 305 306 if (fwd->chip.can_sleep) 307 error = gpiod_get_array_value_cansleep(j, descs, NULL, values); 308 else 309 error = gpiod_get_array_value(j, descs, NULL, values); 310 if (error) 311 return error; 312 313 j = 0; 314 for_each_set_bit(i, mask, fwd->chip.ngpio) 315 __assign_bit(i, bits, test_bit(j++, values)); 316 317 return 0; 318 } 319 320 static int gpio_fwd_get_multiple_locked(struct gpio_chip *chip, 321 unsigned long *mask, unsigned long *bits) 322 { 323 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 324 unsigned long flags; 325 int error; 326 327 if (chip->can_sleep) { 328 mutex_lock(&fwd->mlock); 329 error = gpio_fwd_get_multiple(fwd, mask, bits); 330 mutex_unlock(&fwd->mlock); 331 } else { 332 spin_lock_irqsave(&fwd->slock, flags); 333 error = gpio_fwd_get_multiple(fwd, mask, bits); 334 spin_unlock_irqrestore(&fwd->slock, flags); 335 } 336 337 return error; 338 } 339 340 static void gpio_fwd_delay(struct gpio_chip *chip, unsigned int offset, int value) 341 { 342 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 343 const struct gpiochip_fwd_timing *delay_timings; 344 bool is_active_low = gpiod_is_active_low(fwd->descs[offset]); 345 u32 delay_us; 346 347 delay_timings = &fwd->delay_timings[offset]; 348 if ((!is_active_low && value) || (is_active_low && !value)) 349 delay_us = delay_timings->ramp_up_us; 350 else 351 delay_us = delay_timings->ramp_down_us; 352 if (!delay_us) 353 return; 354 355 if (chip->can_sleep) 356 fsleep(delay_us); 357 else 358 udelay(delay_us); 359 } 360 361 static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value) 362 { 363 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 364 365 if (chip->can_sleep) 366 gpiod_set_value_cansleep(fwd->descs[offset], value); 367 else 368 gpiod_set_value(fwd->descs[offset], value); 369 370 if (fwd->delay_timings) 371 gpio_fwd_delay(chip, offset, value); 372 } 373 374 static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, 375 unsigned long *bits) 376 { 377 struct gpio_desc **descs = fwd_tmp_descs(fwd); 378 unsigned long *values = fwd_tmp_values(fwd); 379 unsigned int i, j = 0; 380 381 for_each_set_bit(i, mask, fwd->chip.ngpio) { 382 __assign_bit(j, values, test_bit(i, bits)); 383 descs[j++] = fwd->descs[i]; 384 } 385 386 if (fwd->chip.can_sleep) 387 gpiod_set_array_value_cansleep(j, descs, NULL, values); 388 else 389 gpiod_set_array_value(j, descs, NULL, values); 390 } 391 392 static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip, 393 unsigned long *mask, unsigned long *bits) 394 { 395 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 396 unsigned long flags; 397 398 if (chip->can_sleep) { 399 mutex_lock(&fwd->mlock); 400 gpio_fwd_set_multiple(fwd, mask, bits); 401 mutex_unlock(&fwd->mlock); 402 } else { 403 spin_lock_irqsave(&fwd->slock, flags); 404 gpio_fwd_set_multiple(fwd, mask, bits); 405 spin_unlock_irqrestore(&fwd->slock, flags); 406 } 407 } 408 409 static int gpio_fwd_set_config(struct gpio_chip *chip, unsigned int offset, 410 unsigned long config) 411 { 412 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 413 414 return gpiod_set_config(fwd->descs[offset], config); 415 } 416 417 static int gpio_fwd_to_irq(struct gpio_chip *chip, unsigned int offset) 418 { 419 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 420 421 return gpiod_to_irq(fwd->descs[offset]); 422 } 423 424 /* 425 * The GPIO delay provides a way to configure platform specific delays 426 * for the GPIO ramp-up or ramp-down delays. This can serve the following 427 * purposes: 428 * - Open-drain output using an RC filter 429 */ 430 #define FWD_FEATURE_DELAY BIT(0) 431 432 #ifdef CONFIG_OF_GPIO 433 static int gpiochip_fwd_delay_of_xlate(struct gpio_chip *chip, 434 const struct of_phandle_args *gpiospec, 435 u32 *flags) 436 { 437 struct gpiochip_fwd *fwd = gpiochip_get_data(chip); 438 struct gpiochip_fwd_timing *timings; 439 u32 line; 440 441 if (gpiospec->args_count != chip->of_gpio_n_cells) 442 return -EINVAL; 443 444 line = gpiospec->args[0]; 445 if (line >= chip->ngpio) 446 return -EINVAL; 447 448 timings = &fwd->delay_timings[line]; 449 timings->ramp_up_us = gpiospec->args[1]; 450 timings->ramp_down_us = gpiospec->args[2]; 451 452 return line; 453 } 454 455 static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *chip, 456 struct gpiochip_fwd *fwd) 457 { 458 fwd->delay_timings = devm_kcalloc(dev, chip->ngpio, 459 sizeof(*fwd->delay_timings), 460 GFP_KERNEL); 461 if (!fwd->delay_timings) 462 return -ENOMEM; 463 464 chip->of_xlate = gpiochip_fwd_delay_of_xlate; 465 chip->of_gpio_n_cells = 3; 466 467 return 0; 468 } 469 #else 470 static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *chip, 471 struct gpiochip_fwd *fwd) 472 { 473 return 0; 474 } 475 #endif /* !CONFIG_OF_GPIO */ 476 477 /** 478 * gpiochip_fwd_create() - Create a new GPIO forwarder 479 * @dev: Parent device pointer 480 * @ngpios: Number of GPIOs in the forwarder. 481 * @descs: Array containing the GPIO descriptors to forward to. 482 * This array must contain @ngpios entries, and must not be deallocated 483 * before the forwarder has been destroyed again. 484 * @features: Bitwise ORed features as defined with FWD_FEATURE_*. 485 * 486 * This function creates a new gpiochip, which forwards all GPIO operations to 487 * the passed GPIO descriptors. 488 * 489 * Return: An opaque object pointer, or an ERR_PTR()-encoded negative error 490 * code on failure. 491 */ 492 static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev, 493 unsigned int ngpios, 494 struct gpio_desc *descs[], 495 unsigned long features) 496 { 497 const char *label = dev_name(dev); 498 struct gpiochip_fwd *fwd; 499 struct gpio_chip *chip; 500 unsigned int i; 501 int error; 502 503 fwd = devm_kzalloc(dev, struct_size(fwd, tmp, fwd_tmp_size(ngpios)), 504 GFP_KERNEL); 505 if (!fwd) 506 return ERR_PTR(-ENOMEM); 507 508 chip = &fwd->chip; 509 510 /* 511 * If any of the GPIO lines are sleeping, then the entire forwarder 512 * will be sleeping. 513 * If any of the chips support .set_config(), then the forwarder will 514 * support setting configs. 515 */ 516 for (i = 0; i < ngpios; i++) { 517 struct gpio_chip *parent = gpiod_to_chip(descs[i]); 518 519 dev_dbg(dev, "%u => gpio %d irq %d\n", i, 520 desc_to_gpio(descs[i]), gpiod_to_irq(descs[i])); 521 522 if (gpiod_cansleep(descs[i])) 523 chip->can_sleep = true; 524 if (parent && parent->set_config) 525 chip->set_config = gpio_fwd_set_config; 526 } 527 528 chip->label = label; 529 chip->parent = dev; 530 chip->owner = THIS_MODULE; 531 chip->get_direction = gpio_fwd_get_direction; 532 chip->direction_input = gpio_fwd_direction_input; 533 chip->direction_output = gpio_fwd_direction_output; 534 chip->get = gpio_fwd_get; 535 chip->get_multiple = gpio_fwd_get_multiple_locked; 536 chip->set = gpio_fwd_set; 537 chip->set_multiple = gpio_fwd_set_multiple_locked; 538 chip->to_irq = gpio_fwd_to_irq; 539 chip->base = -1; 540 chip->ngpio = ngpios; 541 fwd->descs = descs; 542 543 if (chip->can_sleep) 544 mutex_init(&fwd->mlock); 545 else 546 spin_lock_init(&fwd->slock); 547 548 if (features & FWD_FEATURE_DELAY) { 549 error = gpiochip_fwd_setup_delay_line(dev, chip, fwd); 550 if (error) 551 return ERR_PTR(error); 552 } 553 554 error = devm_gpiochip_add_data(dev, chip, fwd); 555 if (error) 556 return ERR_PTR(error); 557 558 return fwd; 559 } 560 561 562 /* 563 * GPIO Aggregator platform device 564 */ 565 566 static int gpio_aggregator_probe(struct platform_device *pdev) 567 { 568 struct device *dev = &pdev->dev; 569 struct gpio_desc **descs; 570 struct gpiochip_fwd *fwd; 571 unsigned long features; 572 int i, n; 573 574 n = gpiod_count(dev, NULL); 575 if (n < 0) 576 return n; 577 578 descs = devm_kmalloc_array(dev, n, sizeof(*descs), GFP_KERNEL); 579 if (!descs) 580 return -ENOMEM; 581 582 for (i = 0; i < n; i++) { 583 descs[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS); 584 if (IS_ERR(descs[i])) 585 return PTR_ERR(descs[i]); 586 } 587 588 features = (uintptr_t)device_get_match_data(dev); 589 fwd = gpiochip_fwd_create(dev, n, descs, features); 590 if (IS_ERR(fwd)) 591 return PTR_ERR(fwd); 592 593 platform_set_drvdata(pdev, fwd); 594 return 0; 595 } 596 597 static const struct of_device_id gpio_aggregator_dt_ids[] = { 598 { 599 .compatible = "gpio-delay", 600 .data = (void *)FWD_FEATURE_DELAY, 601 }, 602 /* 603 * Add GPIO-operated devices controlled from userspace below, 604 * or use "driver_override" in sysfs. 605 */ 606 {} 607 }; 608 MODULE_DEVICE_TABLE(of, gpio_aggregator_dt_ids); 609 610 static struct platform_driver gpio_aggregator_driver = { 611 .probe = gpio_aggregator_probe, 612 .driver = { 613 .name = DRV_NAME, 614 .groups = gpio_aggregator_groups, 615 .of_match_table = gpio_aggregator_dt_ids, 616 }, 617 }; 618 619 static int __init gpio_aggregator_init(void) 620 { 621 return platform_driver_register(&gpio_aggregator_driver); 622 } 623 module_init(gpio_aggregator_init); 624 625 static void __exit gpio_aggregator_exit(void) 626 { 627 gpio_aggregator_remove_all(); 628 platform_driver_unregister(&gpio_aggregator_driver); 629 } 630 module_exit(gpio_aggregator_exit); 631 632 MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>"); 633 MODULE_DESCRIPTION("GPIO Aggregator"); 634 MODULE_LICENSE("GPL v2"); 635