1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // regmap based irq_chip 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/array_size.h> 10 #include <linux/device.h> 11 #include <linux/export.h> 12 #include <linux/interrupt.h> 13 #include <linux/irq.h> 14 #include <linux/irqdomain.h> 15 #include <linux/overflow.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/regmap.h> 18 #include <linux/slab.h> 19 20 #include "internal.h" 21 22 struct regmap_irq_chip_data { 23 struct mutex lock; 24 struct lock_class_key lock_key; 25 struct irq_chip irq_chip; 26 27 struct regmap *map; 28 const struct regmap_irq_chip *chip; 29 30 int irq_base; 31 struct irq_domain *domain; 32 33 int irq; 34 int wake_count; 35 36 void *status_reg_buf; 37 unsigned int *main_status_buf; 38 unsigned int *status_buf; 39 unsigned int *prev_status_buf; 40 unsigned int *mask_buf; 41 unsigned int *mask_buf_def; 42 unsigned int *wake_buf; 43 unsigned int *type_buf; 44 unsigned int *type_buf_def; 45 unsigned int **config_buf; 46 47 unsigned int irq_reg_stride; 48 49 unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data, 50 unsigned int base, int index); 51 52 unsigned int clear_status:1; 53 }; 54 55 static inline const 56 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data, 57 int irq) 58 { 59 return &data->chip->irqs[irq]; 60 } 61 62 static bool regmap_irq_can_bulk_read_status(struct regmap_irq_chip_data *data) 63 { 64 struct regmap *map = data->map; 65 66 /* 67 * While possible that a user-defined ->get_irq_reg() callback might 68 * be linear enough to support bulk reads, most of the time it won't. 69 * Therefore only allow them if the default callback is being used. 70 */ 71 return data->irq_reg_stride == 1 && map->reg_stride == 1 && 72 data->get_irq_reg == regmap_irq_get_irq_reg_linear && 73 !map->use_single_read; 74 } 75 76 static void regmap_irq_lock(struct irq_data *data) 77 { 78 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 79 80 mutex_lock(&d->lock); 81 } 82 83 static void regmap_irq_sync_unlock(struct irq_data *data) 84 { 85 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 86 struct regmap *map = d->map; 87 int i, j, ret; 88 u32 reg; 89 u32 val; 90 91 if (d->chip->runtime_pm) { 92 ret = pm_runtime_get_sync(map->dev); 93 if (ret < 0) 94 dev_err(map->dev, "IRQ sync failed to resume: %d\n", 95 ret); 96 } 97 98 if (d->clear_status) { 99 for (i = 0; i < d->chip->num_regs; i++) { 100 reg = d->get_irq_reg(d, d->chip->status_base, i); 101 102 ret = regmap_read(map, reg, &val); 103 if (ret) 104 dev_err(d->map->dev, 105 "Failed to clear the interrupt status bits\n"); 106 } 107 108 d->clear_status = false; 109 } 110 111 /* 112 * If there's been a change in the mask write it back to the 113 * hardware. We rely on the use of the regmap core cache to 114 * suppress pointless writes. 115 */ 116 for (i = 0; i < d->chip->num_regs; i++) { 117 if (d->chip->handle_mask_sync) 118 d->chip->handle_mask_sync(i, d->mask_buf_def[i], 119 d->mask_buf[i], 120 d->chip->irq_drv_data); 121 122 if (d->chip->mask_base && !d->chip->handle_mask_sync) { 123 reg = d->get_irq_reg(d, d->chip->mask_base, i); 124 ret = regmap_update_bits(d->map, reg, 125 d->mask_buf_def[i], 126 d->mask_buf[i]); 127 if (ret) 128 dev_err(d->map->dev, "Failed to sync masks in %x\n", reg); 129 } 130 131 if (d->chip->unmask_base && !d->chip->handle_mask_sync) { 132 reg = d->get_irq_reg(d, d->chip->unmask_base, i); 133 ret = regmap_update_bits(d->map, reg, 134 d->mask_buf_def[i], ~d->mask_buf[i]); 135 if (ret) 136 dev_err(d->map->dev, "Failed to sync masks in %x\n", 137 reg); 138 } 139 140 reg = d->get_irq_reg(d, d->chip->wake_base, i); 141 if (d->wake_buf) { 142 if (d->chip->wake_invert) 143 ret = regmap_update_bits(d->map, reg, 144 d->mask_buf_def[i], 145 ~d->wake_buf[i]); 146 else 147 ret = regmap_update_bits(d->map, reg, 148 d->mask_buf_def[i], 149 d->wake_buf[i]); 150 if (ret != 0) 151 dev_err(d->map->dev, 152 "Failed to sync wakes in %x: %d\n", 153 reg, ret); 154 } 155 156 if (!d->chip->init_ack_masked) 157 continue; 158 /* 159 * Ack all the masked interrupts unconditionally, 160 * OR if there is masked interrupt which hasn't been Acked, 161 * it'll be ignored in irq handler, then may introduce irq storm 162 */ 163 if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) { 164 reg = d->get_irq_reg(d, d->chip->ack_base, i); 165 166 /* some chips ack by write 0 */ 167 if (d->chip->ack_invert) 168 ret = regmap_write(map, reg, ~d->mask_buf[i]); 169 else 170 ret = regmap_write(map, reg, d->mask_buf[i]); 171 if (d->chip->clear_ack) { 172 if (d->chip->ack_invert && !ret) 173 ret = regmap_write(map, reg, UINT_MAX); 174 else if (!ret) 175 ret = regmap_write(map, reg, 0); 176 } 177 if (ret != 0) 178 dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", 179 reg, ret); 180 } 181 } 182 183 for (i = 0; i < d->chip->num_config_bases; i++) { 184 for (j = 0; j < d->chip->num_config_regs; j++) { 185 reg = d->get_irq_reg(d, d->chip->config_base[i], j); 186 ret = regmap_write(map, reg, d->config_buf[i][j]); 187 if (ret) 188 dev_err(d->map->dev, 189 "Failed to write config %x: %d\n", 190 reg, ret); 191 } 192 } 193 194 if (d->chip->runtime_pm) 195 pm_runtime_put(map->dev); 196 197 /* If we've changed our wakeup count propagate it to the parent */ 198 if (d->wake_count < 0) 199 for (i = d->wake_count; i < 0; i++) 200 disable_irq_wake(d->irq); 201 else if (d->wake_count > 0) 202 for (i = 0; i < d->wake_count; i++) 203 enable_irq_wake(d->irq); 204 205 d->wake_count = 0; 206 207 mutex_unlock(&d->lock); 208 } 209 210 static void regmap_irq_enable(struct irq_data *data) 211 { 212 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 213 struct regmap *map = d->map; 214 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 215 unsigned int reg = irq_data->reg_offset / map->reg_stride; 216 unsigned int mask; 217 218 /* 219 * The type_in_mask flag means that the underlying hardware uses 220 * separate mask bits for each interrupt trigger type, but we want 221 * to have a single logical interrupt with a configurable type. 222 * 223 * If the interrupt we're enabling defines any supported types 224 * then instead of using the regular mask bits for this interrupt, 225 * use the value previously written to the type buffer at the 226 * corresponding offset in regmap_irq_set_type(). 227 */ 228 if (d->chip->type_in_mask && irq_data->type.types_supported) 229 mask = d->type_buf[reg] & irq_data->mask; 230 else 231 mask = irq_data->mask; 232 233 if (d->chip->clear_on_unmask) 234 d->clear_status = true; 235 236 d->mask_buf[reg] &= ~mask; 237 } 238 239 static void regmap_irq_disable(struct irq_data *data) 240 { 241 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 242 struct regmap *map = d->map; 243 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 244 245 d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask; 246 } 247 248 static int regmap_irq_set_type(struct irq_data *data, unsigned int type) 249 { 250 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 251 struct regmap *map = d->map; 252 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 253 int reg, ret; 254 const struct regmap_irq_type *t = &irq_data->type; 255 256 if ((t->types_supported & type) != type) 257 return 0; 258 259 reg = t->type_reg_offset / map->reg_stride; 260 261 if (d->chip->type_in_mask) { 262 ret = regmap_irq_set_type_config_simple(&d->type_buf, type, 263 irq_data, reg, d->chip->irq_drv_data); 264 if (ret) 265 return ret; 266 } 267 268 if (d->chip->set_type_config) { 269 ret = d->chip->set_type_config(d->config_buf, type, irq_data, 270 reg, d->chip->irq_drv_data); 271 if (ret) 272 return ret; 273 } 274 275 return 0; 276 } 277 278 static int regmap_irq_set_wake(struct irq_data *data, unsigned int on) 279 { 280 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 281 struct regmap *map = d->map; 282 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 283 284 if (on) { 285 if (d->wake_buf) 286 d->wake_buf[irq_data->reg_offset / map->reg_stride] 287 &= ~irq_data->mask; 288 d->wake_count++; 289 } else { 290 if (d->wake_buf) 291 d->wake_buf[irq_data->reg_offset / map->reg_stride] 292 |= irq_data->mask; 293 d->wake_count--; 294 } 295 296 return 0; 297 } 298 299 static const struct irq_chip regmap_irq_chip = { 300 .irq_bus_lock = regmap_irq_lock, 301 .irq_bus_sync_unlock = regmap_irq_sync_unlock, 302 .irq_disable = regmap_irq_disable, 303 .irq_enable = regmap_irq_enable, 304 .irq_set_type = regmap_irq_set_type, 305 .irq_set_wake = regmap_irq_set_wake, 306 }; 307 308 static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, 309 unsigned int b) 310 { 311 const struct regmap_irq_chip *chip = data->chip; 312 const struct regmap_irq_sub_irq_map *subreg; 313 struct regmap *map = data->map; 314 unsigned int reg; 315 int i, ret = 0; 316 317 if (!chip->sub_reg_offsets) { 318 reg = data->get_irq_reg(data, chip->status_base, b); 319 ret = regmap_read(map, reg, &data->status_buf[b]); 320 } else { 321 /* 322 * Note we can't use ->get_irq_reg() here because the offsets 323 * in 'subreg' are *not* interchangeable with indices. 324 */ 325 subreg = &chip->sub_reg_offsets[b]; 326 for (i = 0; i < subreg->num_regs; i++) { 327 unsigned int offset = subreg->offset[i]; 328 unsigned int index = offset / map->reg_stride; 329 330 ret = regmap_read(map, chip->status_base + offset, 331 &data->status_buf[index]); 332 if (ret) 333 break; 334 } 335 } 336 return ret; 337 } 338 339 static int read_irq_data(struct regmap_irq_chip_data *data) 340 { 341 const struct regmap_irq_chip *chip = data->chip; 342 struct regmap *map = data->map; 343 int ret, i; 344 u32 reg; 345 346 /* 347 * Read only registers with active IRQs if the chip has 'main status 348 * register'. Else read in the statuses, using a single bulk read if 349 * possible in order to reduce the I/O overheads. 350 */ 351 352 if (chip->no_status) { 353 /* no status register so default to all active */ 354 memset32(data->status_buf, GENMASK(31, 0), chip->num_regs); 355 } else if (chip->num_main_regs) { 356 unsigned int max_main_bits; 357 358 max_main_bits = (chip->num_main_status_bits) ? 359 chip->num_main_status_bits : chip->num_regs; 360 /* Clear the status buf as we don't read all status regs */ 361 memset32(data->status_buf, 0, chip->num_regs); 362 363 /* We could support bulk read for main status registers 364 * but I don't expect to see devices with really many main 365 * status registers so let's only support single reads for the 366 * sake of simplicity. and add bulk reads only if needed 367 */ 368 for (i = 0; i < chip->num_main_regs; i++) { 369 reg = data->get_irq_reg(data, chip->main_status, i); 370 ret = regmap_read(map, reg, &data->main_status_buf[i]); 371 if (ret) { 372 dev_err(map->dev, "Failed to read IRQ status %d\n", ret); 373 return ret; 374 } 375 } 376 377 /* Read sub registers with active IRQs */ 378 for (i = 0; i < chip->num_main_regs; i++) { 379 unsigned int b; 380 const unsigned long mreg = data->main_status_buf[i]; 381 382 for_each_set_bit(b, &mreg, map->format.val_bytes * 8) { 383 if (i * map->format.val_bytes * 8 + b > 384 max_main_bits) 385 break; 386 ret = read_sub_irq_data(data, b); 387 388 if (ret != 0) { 389 dev_err(map->dev, "Failed to read IRQ status %d\n", ret); 390 return ret; 391 } 392 } 393 394 } 395 } else if (regmap_irq_can_bulk_read_status(data)) { 396 397 u8 *buf8 = data->status_reg_buf; 398 u16 *buf16 = data->status_reg_buf; 399 u32 *buf32 = data->status_reg_buf; 400 401 BUG_ON(!data->status_reg_buf); 402 403 ret = regmap_bulk_read(map, chip->status_base, 404 data->status_reg_buf, 405 chip->num_regs); 406 if (ret != 0) { 407 dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); 408 return ret; 409 } 410 411 for (i = 0; i < data->chip->num_regs; i++) { 412 switch (map->format.val_bytes) { 413 case 1: 414 data->status_buf[i] = buf8[i]; 415 break; 416 case 2: 417 data->status_buf[i] = buf16[i]; 418 break; 419 case 4: 420 data->status_buf[i] = buf32[i]; 421 break; 422 default: 423 BUG(); 424 return -EIO; 425 } 426 } 427 428 } else { 429 for (i = 0; i < data->chip->num_regs; i++) { 430 unsigned int reg = data->get_irq_reg(data, 431 data->chip->status_base, i); 432 ret = regmap_read(map, reg, &data->status_buf[i]); 433 434 if (ret != 0) { 435 dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); 436 return ret; 437 } 438 } 439 } 440 441 if (chip->status_invert) 442 for (i = 0; i < data->chip->num_regs; i++) 443 data->status_buf[i] = ~data->status_buf[i]; 444 445 return 0; 446 } 447 448 static irqreturn_t regmap_irq_thread(int irq, void *d) 449 { 450 struct regmap_irq_chip_data *data = d; 451 const struct regmap_irq_chip *chip = data->chip; 452 struct regmap *map = data->map; 453 int ret, i; 454 bool handled = false; 455 u32 reg; 456 457 if (chip->handle_pre_irq) 458 chip->handle_pre_irq(chip->irq_drv_data); 459 460 if (chip->runtime_pm) { 461 ret = pm_runtime_get_sync(map->dev); 462 if (ret < 0) { 463 dev_err(map->dev, "IRQ thread failed to resume: %d\n", ret); 464 goto exit; 465 } 466 } 467 468 ret = read_irq_data(data); 469 if (ret < 0) 470 goto exit; 471 472 if (chip->status_is_level) { 473 for (i = 0; i < data->chip->num_regs; i++) { 474 unsigned int val = data->status_buf[i]; 475 476 data->status_buf[i] ^= data->prev_status_buf[i]; 477 data->prev_status_buf[i] = val; 478 } 479 } 480 481 /* 482 * Ignore masked IRQs and ack if we need to; we ack early so 483 * there is no race between handling and acknowledging the 484 * interrupt. We assume that typically few of the interrupts 485 * will fire simultaneously so don't worry about overhead from 486 * doing a write per register. 487 */ 488 for (i = 0; i < data->chip->num_regs; i++) { 489 data->status_buf[i] &= ~data->mask_buf[i]; 490 491 if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) { 492 reg = data->get_irq_reg(data, data->chip->ack_base, i); 493 494 if (chip->ack_invert) 495 ret = regmap_write(map, reg, 496 ~data->status_buf[i]); 497 else 498 ret = regmap_write(map, reg, 499 data->status_buf[i]); 500 if (chip->clear_ack) { 501 if (chip->ack_invert && !ret) 502 ret = regmap_write(map, reg, UINT_MAX); 503 else if (!ret) 504 ret = regmap_write(map, reg, 0); 505 } 506 if (ret != 0) 507 dev_err(map->dev, "Failed to ack 0x%x: %d\n", 508 reg, ret); 509 } 510 } 511 512 for (i = 0; i < chip->num_irqs; i++) { 513 if (data->status_buf[chip->irqs[i].reg_offset / 514 map->reg_stride] & chip->irqs[i].mask) { 515 handle_nested_irq(irq_find_mapping(data->domain, i)); 516 handled = true; 517 } 518 } 519 520 exit: 521 if (chip->handle_post_irq) 522 chip->handle_post_irq(chip->irq_drv_data); 523 524 if (chip->runtime_pm) 525 pm_runtime_put(map->dev); 526 527 if (handled) 528 return IRQ_HANDLED; 529 else 530 return IRQ_NONE; 531 } 532 533 static struct lock_class_key regmap_irq_lock_class; 534 static struct lock_class_key regmap_irq_request_class; 535 536 static int regmap_irq_map(struct irq_domain *h, unsigned int virq, 537 irq_hw_number_t hw) 538 { 539 struct regmap_irq_chip_data *data = h->host_data; 540 541 irq_set_chip_data(virq, data); 542 irq_set_lockdep_class(virq, ®map_irq_lock_class, ®map_irq_request_class); 543 irq_set_chip(virq, &data->irq_chip); 544 irq_set_nested_thread(virq, 1); 545 irq_set_parent(virq, data->irq); 546 irq_set_noprobe(virq); 547 548 return 0; 549 } 550 551 static const struct irq_domain_ops regmap_domain_ops = { 552 .map = regmap_irq_map, 553 .xlate = irq_domain_xlate_onetwocell, 554 }; 555 556 /** 557 * regmap_irq_get_irq_reg_linear() - Linear IRQ register mapping callback. 558 * @data: Data for the &struct regmap_irq_chip 559 * @base: Base register 560 * @index: Register index 561 * 562 * Returns the register address corresponding to the given @base and @index 563 * by the formula ``base + index * regmap_stride * irq_reg_stride``. 564 */ 565 unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data, 566 unsigned int base, int index) 567 { 568 struct regmap *map = data->map; 569 570 return base + index * map->reg_stride * data->irq_reg_stride; 571 } 572 EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear); 573 574 /** 575 * regmap_irq_set_type_config_simple() - Simple IRQ type configuration callback. 576 * @buf: Buffer containing configuration register values, this is a 2D array of 577 * `num_config_bases` rows, each of `num_config_regs` elements. 578 * @type: The requested IRQ type. 579 * @irq_data: The IRQ being configured. 580 * @idx: Index of the irq's config registers within each array `buf[i]` 581 * @irq_drv_data: Driver specific IRQ data 582 * 583 * This is a &struct regmap_irq_chip->set_type_config callback suitable for 584 * chips with one config register. Register values are updated according to 585 * the &struct regmap_irq_type data associated with an IRQ. 586 */ 587 int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type, 588 const struct regmap_irq *irq_data, 589 int idx, void *irq_drv_data) 590 { 591 const struct regmap_irq_type *t = &irq_data->type; 592 593 if (t->type_reg_mask) 594 buf[0][idx] &= ~t->type_reg_mask; 595 else 596 buf[0][idx] &= ~(t->type_falling_val | 597 t->type_rising_val | 598 t->type_level_low_val | 599 t->type_level_high_val); 600 601 switch (type) { 602 case IRQ_TYPE_EDGE_FALLING: 603 buf[0][idx] |= t->type_falling_val; 604 break; 605 606 case IRQ_TYPE_EDGE_RISING: 607 buf[0][idx] |= t->type_rising_val; 608 break; 609 610 case IRQ_TYPE_EDGE_BOTH: 611 buf[0][idx] |= (t->type_falling_val | 612 t->type_rising_val); 613 break; 614 615 case IRQ_TYPE_LEVEL_HIGH: 616 buf[0][idx] |= t->type_level_high_val; 617 break; 618 619 case IRQ_TYPE_LEVEL_LOW: 620 buf[0][idx] |= t->type_level_low_val; 621 break; 622 623 default: 624 return -EINVAL; 625 } 626 627 return 0; 628 } 629 EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple); 630 631 static int regmap_irq_create_domain(struct fwnode_handle *fwnode, int irq_base, 632 const struct regmap_irq_chip *chip, 633 struct regmap_irq_chip_data *d) 634 { 635 struct irq_domain_info info = { 636 .fwnode = fwnode, 637 .size = chip->num_irqs, 638 .hwirq_max = chip->num_irqs, 639 .virq_base = irq_base, 640 .ops = ®map_domain_ops, 641 .host_data = d, 642 .name_suffix = chip->domain_suffix, 643 }; 644 645 d->domain = irq_domain_instantiate(&info); 646 if (IS_ERR(d->domain)) { 647 dev_err(d->map->dev, "Failed to create IRQ domain\n"); 648 return PTR_ERR(d->domain); 649 } 650 651 return 0; 652 } 653 654 655 /** 656 * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling 657 * 658 * @fwnode: The firmware node where the IRQ domain should be added to. 659 * @map: The regmap for the device. 660 * @irq: The IRQ the device uses to signal interrupts. 661 * @irq_flags: The IRQF_ flags to use for the primary interrupt. 662 * @irq_base: Allocate at specific IRQ number if irq_base > 0. 663 * @chip: Configuration for the interrupt controller. 664 * @data: Runtime data structure for the controller, allocated on success. 665 * 666 * Returns 0 on success or an errno on failure. 667 * 668 * In order for this to be efficient the chip really should use a 669 * register cache. The chip driver is responsible for restoring the 670 * register values used by the IRQ controller over suspend and resume. 671 */ 672 int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, 673 struct regmap *map, int irq, 674 int irq_flags, int irq_base, 675 const struct regmap_irq_chip *chip, 676 struct regmap_irq_chip_data **data) 677 { 678 struct regmap_irq_chip_data *d; 679 int i; 680 int ret = -ENOMEM; 681 u32 reg; 682 683 if (chip->num_regs <= 0) 684 return -EINVAL; 685 686 if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack)) 687 return -EINVAL; 688 689 if (chip->mask_base && chip->unmask_base && !chip->mask_unmask_non_inverted) 690 return -EINVAL; 691 692 for (i = 0; i < chip->num_irqs; i++) { 693 if (chip->irqs[i].reg_offset % map->reg_stride) 694 return -EINVAL; 695 if (chip->irqs[i].reg_offset / map->reg_stride >= 696 chip->num_regs) 697 return -EINVAL; 698 } 699 700 if (irq_base) { 701 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0); 702 if (irq_base < 0) { 703 dev_warn(map->dev, "Failed to allocate IRQs: %d\n", 704 irq_base); 705 return irq_base; 706 } 707 } 708 709 d = kzalloc(sizeof(*d), GFP_KERNEL); 710 if (!d) 711 return -ENOMEM; 712 713 if (chip->num_main_regs) { 714 d->main_status_buf = kcalloc(chip->num_main_regs, 715 sizeof(*d->main_status_buf), 716 GFP_KERNEL); 717 718 if (!d->main_status_buf) 719 goto err_alloc; 720 } 721 722 d->status_buf = kcalloc(chip->num_regs, sizeof(*d->status_buf), 723 GFP_KERNEL); 724 if (!d->status_buf) 725 goto err_alloc; 726 727 if (chip->status_is_level) { 728 d->prev_status_buf = kcalloc(chip->num_regs, sizeof(*d->prev_status_buf), 729 GFP_KERNEL); 730 if (!d->prev_status_buf) 731 goto err_alloc; 732 } 733 734 d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf), 735 GFP_KERNEL); 736 if (!d->mask_buf) 737 goto err_alloc; 738 739 d->mask_buf_def = kcalloc(chip->num_regs, sizeof(*d->mask_buf_def), 740 GFP_KERNEL); 741 if (!d->mask_buf_def) 742 goto err_alloc; 743 744 if (chip->wake_base) { 745 d->wake_buf = kcalloc(chip->num_regs, sizeof(*d->wake_buf), 746 GFP_KERNEL); 747 if (!d->wake_buf) 748 goto err_alloc; 749 } 750 751 if (chip->type_in_mask) { 752 d->type_buf_def = kcalloc(chip->num_regs, 753 sizeof(*d->type_buf_def), GFP_KERNEL); 754 if (!d->type_buf_def) 755 goto err_alloc; 756 757 d->type_buf = kcalloc(chip->num_regs, sizeof(*d->type_buf), GFP_KERNEL); 758 if (!d->type_buf) 759 goto err_alloc; 760 } 761 762 if (chip->num_config_bases && chip->num_config_regs) { 763 /* 764 * Create config_buf[num_config_bases][num_config_regs] 765 */ 766 d->config_buf = kcalloc(chip->num_config_bases, 767 sizeof(*d->config_buf), GFP_KERNEL); 768 if (!d->config_buf) 769 goto err_alloc; 770 771 for (i = 0; i < chip->num_config_bases; i++) { 772 d->config_buf[i] = kcalloc(chip->num_config_regs, 773 sizeof(**d->config_buf), 774 GFP_KERNEL); 775 if (!d->config_buf[i]) 776 goto err_alloc; 777 } 778 } 779 780 d->irq_chip = regmap_irq_chip; 781 d->irq_chip.name = chip->name; 782 d->irq = irq; 783 d->map = map; 784 d->chip = chip; 785 d->irq_base = irq_base; 786 787 if (chip->irq_reg_stride) 788 d->irq_reg_stride = chip->irq_reg_stride; 789 else 790 d->irq_reg_stride = 1; 791 792 if (chip->get_irq_reg) 793 d->get_irq_reg = chip->get_irq_reg; 794 else 795 d->get_irq_reg = regmap_irq_get_irq_reg_linear; 796 797 if (regmap_irq_can_bulk_read_status(d)) { 798 d->status_reg_buf = kmalloc_array(chip->num_regs, 799 map->format.val_bytes, 800 GFP_KERNEL); 801 if (!d->status_reg_buf) 802 goto err_alloc; 803 } 804 805 /* 806 * If one regmap-irq is the parent of another then we'll try 807 * to lock the child with the parent locked, use an explicit 808 * lock_key so lockdep can figure out what's going on. 809 */ 810 lockdep_register_key(&d->lock_key); 811 mutex_init_with_key(&d->lock, &d->lock_key); 812 813 for (i = 0; i < chip->num_irqs; i++) 814 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride] 815 |= chip->irqs[i].mask; 816 817 /* Mask all the interrupts by default */ 818 for (i = 0; i < chip->num_regs; i++) { 819 d->mask_buf[i] = d->mask_buf_def[i]; 820 821 if (chip->handle_mask_sync) { 822 ret = chip->handle_mask_sync(i, d->mask_buf_def[i], 823 d->mask_buf[i], 824 chip->irq_drv_data); 825 if (ret) 826 goto err_mutex; 827 } 828 829 if (chip->mask_base && !chip->handle_mask_sync) { 830 reg = d->get_irq_reg(d, chip->mask_base, i); 831 ret = regmap_update_bits(d->map, reg, 832 d->mask_buf_def[i], 833 d->mask_buf[i]); 834 if (ret) { 835 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 836 reg, ret); 837 goto err_mutex; 838 } 839 } 840 841 if (chip->unmask_base && !chip->handle_mask_sync) { 842 reg = d->get_irq_reg(d, chip->unmask_base, i); 843 ret = regmap_update_bits(d->map, reg, 844 d->mask_buf_def[i], ~d->mask_buf[i]); 845 if (ret) { 846 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 847 reg, ret); 848 goto err_mutex; 849 } 850 } 851 852 if (!chip->init_ack_masked) 853 continue; 854 855 /* Ack masked but set interrupts */ 856 if (d->chip->no_status) { 857 /* no status register so default to all active */ 858 d->status_buf[i] = UINT_MAX; 859 } else { 860 reg = d->get_irq_reg(d, d->chip->status_base, i); 861 ret = regmap_read(map, reg, &d->status_buf[i]); 862 if (ret != 0) { 863 dev_err(map->dev, "Failed to read IRQ status: %d\n", 864 ret); 865 goto err_mutex; 866 } 867 } 868 869 if (chip->status_invert) 870 d->status_buf[i] = ~d->status_buf[i]; 871 872 if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) { 873 reg = d->get_irq_reg(d, d->chip->ack_base, i); 874 if (chip->ack_invert) 875 ret = regmap_write(map, reg, 876 ~(d->status_buf[i] & d->mask_buf[i])); 877 else 878 ret = regmap_write(map, reg, 879 d->status_buf[i] & d->mask_buf[i]); 880 if (chip->clear_ack) { 881 if (chip->ack_invert && !ret) 882 ret = regmap_write(map, reg, UINT_MAX); 883 else if (!ret) 884 ret = regmap_write(map, reg, 0); 885 } 886 if (ret != 0) { 887 dev_err(map->dev, "Failed to ack 0x%x: %d\n", 888 reg, ret); 889 goto err_mutex; 890 } 891 } 892 } 893 894 /* Wake is disabled by default */ 895 if (d->wake_buf) { 896 for (i = 0; i < chip->num_regs; i++) { 897 d->wake_buf[i] = d->mask_buf_def[i]; 898 reg = d->get_irq_reg(d, d->chip->wake_base, i); 899 900 if (chip->wake_invert) 901 ret = regmap_update_bits(d->map, reg, 902 d->mask_buf_def[i], 903 0); 904 else 905 ret = regmap_update_bits(d->map, reg, 906 d->mask_buf_def[i], 907 d->wake_buf[i]); 908 if (ret != 0) { 909 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 910 reg, ret); 911 goto err_mutex; 912 } 913 } 914 } 915 916 /* Store current levels */ 917 if (chip->status_is_level) { 918 ret = read_irq_data(d); 919 if (ret < 0) 920 goto err_mutex; 921 922 memcpy(d->prev_status_buf, d->status_buf, 923 array_size(d->chip->num_regs, sizeof(d->prev_status_buf[0]))); 924 } 925 926 ret = regmap_irq_create_domain(fwnode, irq_base, chip, d); 927 if (ret) 928 goto err_mutex; 929 930 ret = request_threaded_irq(irq, NULL, regmap_irq_thread, 931 irq_flags | IRQF_ONESHOT, 932 chip->name, d); 933 if (ret != 0) { 934 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", 935 irq, chip->name, ret); 936 goto err_domain; 937 } 938 939 *data = d; 940 941 return 0; 942 943 err_domain: 944 /* Should really dispose of the domain but... */ 945 err_mutex: 946 mutex_destroy(&d->lock); 947 lockdep_unregister_key(&d->lock_key); 948 err_alloc: 949 kfree(d->type_buf); 950 kfree(d->type_buf_def); 951 kfree(d->wake_buf); 952 kfree(d->mask_buf_def); 953 kfree(d->mask_buf); 954 kfree(d->main_status_buf); 955 kfree(d->status_buf); 956 kfree(d->prev_status_buf); 957 kfree(d->status_reg_buf); 958 if (d->config_buf) { 959 for (i = 0; i < chip->num_config_bases; i++) 960 kfree(d->config_buf[i]); 961 kfree(d->config_buf); 962 } 963 kfree(d); 964 return ret; 965 } 966 EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode); 967 968 /** 969 * regmap_add_irq_chip() - Use standard regmap IRQ controller handling 970 * 971 * @map: The regmap for the device. 972 * @irq: The IRQ the device uses to signal interrupts. 973 * @irq_flags: The IRQF_ flags to use for the primary interrupt. 974 * @irq_base: Allocate at specific IRQ number if irq_base > 0. 975 * @chip: Configuration for the interrupt controller. 976 * @data: Runtime data structure for the controller, allocated on success. 977 * 978 * Returns 0 on success or an errno on failure. 979 * 980 * This is the same as regmap_add_irq_chip_fwnode, except that the firmware 981 * node of the regmap is used. 982 */ 983 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, 984 int irq_base, const struct regmap_irq_chip *chip, 985 struct regmap_irq_chip_data **data) 986 { 987 return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq, 988 irq_flags, irq_base, chip, data); 989 } 990 EXPORT_SYMBOL_GPL(regmap_add_irq_chip); 991 992 /** 993 * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip 994 * 995 * @irq: Primary IRQ for the device 996 * @d: ®map_irq_chip_data allocated by regmap_add_irq_chip() 997 * 998 * This function also disposes of all mapped IRQs on the chip. 999 */ 1000 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) 1001 { 1002 unsigned int virq; 1003 int i, hwirq; 1004 1005 if (!d) 1006 return; 1007 1008 free_irq(irq, d); 1009 1010 /* Dispose all virtual irq from irq domain before removing it */ 1011 for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) { 1012 /* Ignore hwirq if holes in the IRQ list */ 1013 if (!d->chip->irqs[hwirq].mask) 1014 continue; 1015 1016 /* 1017 * Find the virtual irq of hwirq on chip and if it is 1018 * there then dispose it 1019 */ 1020 virq = irq_find_mapping(d->domain, hwirq); 1021 if (virq) 1022 irq_dispose_mapping(virq); 1023 } 1024 1025 irq_domain_remove(d->domain); 1026 kfree(d->type_buf); 1027 kfree(d->type_buf_def); 1028 kfree(d->wake_buf); 1029 kfree(d->mask_buf_def); 1030 kfree(d->mask_buf); 1031 kfree(d->main_status_buf); 1032 kfree(d->status_reg_buf); 1033 kfree(d->status_buf); 1034 kfree(d->prev_status_buf); 1035 if (d->config_buf) { 1036 for (i = 0; i < d->chip->num_config_bases; i++) 1037 kfree(d->config_buf[i]); 1038 kfree(d->config_buf); 1039 } 1040 mutex_destroy(&d->lock); 1041 lockdep_unregister_key(&d->lock_key); 1042 kfree(d); 1043 } 1044 EXPORT_SYMBOL_GPL(regmap_del_irq_chip); 1045 1046 static void devm_regmap_irq_chip_release(struct device *dev, void *res) 1047 { 1048 struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res; 1049 1050 regmap_del_irq_chip(d->irq, d); 1051 } 1052 1053 static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data) 1054 1055 { 1056 struct regmap_irq_chip_data **r = res; 1057 1058 if (!r || !*r) { 1059 WARN_ON(!r || !*r); 1060 return 0; 1061 } 1062 return *r == data; 1063 } 1064 1065 /** 1066 * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode() 1067 * 1068 * @dev: The device pointer on which irq_chip belongs to. 1069 * @fwnode: The firmware node where the IRQ domain should be added to. 1070 * @map: The regmap for the device. 1071 * @irq: The IRQ the device uses to signal interrupts 1072 * @irq_flags: The IRQF_ flags to use for the primary interrupt. 1073 * @irq_base: Allocate at specific IRQ number if irq_base > 0. 1074 * @chip: Configuration for the interrupt controller. 1075 * @data: Runtime data structure for the controller, allocated on success 1076 * 1077 * Returns 0 on success or an errno on failure. 1078 * 1079 * The ®map_irq_chip_data will be automatically released when the device is 1080 * unbound. 1081 */ 1082 int devm_regmap_add_irq_chip_fwnode(struct device *dev, 1083 struct fwnode_handle *fwnode, 1084 struct regmap *map, int irq, 1085 int irq_flags, int irq_base, 1086 const struct regmap_irq_chip *chip, 1087 struct regmap_irq_chip_data **data) 1088 { 1089 struct regmap_irq_chip_data **ptr, *d; 1090 int ret; 1091 1092 ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr), 1093 GFP_KERNEL); 1094 if (!ptr) 1095 return -ENOMEM; 1096 1097 ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base, 1098 chip, &d); 1099 if (ret < 0) { 1100 devres_free(ptr); 1101 return ret; 1102 } 1103 1104 *ptr = d; 1105 devres_add(dev, ptr); 1106 *data = d; 1107 return 0; 1108 } 1109 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode); 1110 1111 /** 1112 * devm_regmap_add_irq_chip() - Resource managed regmap_add_irq_chip() 1113 * 1114 * @dev: The device pointer on which irq_chip belongs to. 1115 * @map: The regmap for the device. 1116 * @irq: The IRQ the device uses to signal interrupts 1117 * @irq_flags: The IRQF_ flags to use for the primary interrupt. 1118 * @irq_base: Allocate at specific IRQ number if irq_base > 0. 1119 * @chip: Configuration for the interrupt controller. 1120 * @data: Runtime data structure for the controller, allocated on success 1121 * 1122 * Returns 0 on success or an errno on failure. 1123 * 1124 * The ®map_irq_chip_data will be automatically released when the device is 1125 * unbound. 1126 */ 1127 int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, 1128 int irq_flags, int irq_base, 1129 const struct regmap_irq_chip *chip, 1130 struct regmap_irq_chip_data **data) 1131 { 1132 return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map, 1133 irq, irq_flags, irq_base, chip, 1134 data); 1135 } 1136 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip); 1137 1138 /** 1139 * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip() 1140 * 1141 * @dev: Device for which the resource was allocated. 1142 * @irq: Primary IRQ for the device. 1143 * @data: ®map_irq_chip_data allocated by regmap_add_irq_chip(). 1144 * 1145 * A resource managed version of regmap_del_irq_chip(). 1146 */ 1147 void devm_regmap_del_irq_chip(struct device *dev, int irq, 1148 struct regmap_irq_chip_data *data) 1149 { 1150 int rc; 1151 1152 WARN_ON(irq != data->irq); 1153 rc = devres_release(dev, devm_regmap_irq_chip_release, 1154 devm_regmap_irq_chip_match, data); 1155 1156 if (rc != 0) 1157 WARN_ON(rc); 1158 } 1159 EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip); 1160 1161 /** 1162 * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip 1163 * 1164 * @data: regmap irq controller to operate on. 1165 * 1166 * Useful for drivers to request their own IRQs. 1167 */ 1168 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data) 1169 { 1170 WARN_ON(!data->irq_base); 1171 return data->irq_base; 1172 } 1173 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base); 1174 1175 /** 1176 * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ 1177 * 1178 * @data: regmap irq controller to operate on. 1179 * @irq: index of the interrupt requested in the chip IRQs. 1180 * 1181 * Useful for drivers to request their own IRQs. 1182 */ 1183 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq) 1184 { 1185 /* Handle holes in the IRQ list */ 1186 if (!data->chip->irqs[irq].mask) 1187 return -EINVAL; 1188 1189 return irq_create_mapping(data->domain, irq); 1190 } 1191 EXPORT_SYMBOL_GPL(regmap_irq_get_virq); 1192 1193 /** 1194 * regmap_irq_get_domain() - Retrieve the irq_domain for the chip 1195 * 1196 * @data: regmap_irq controller to operate on. 1197 * 1198 * Useful for drivers to request their own IRQs and for integration 1199 * with subsystems. For ease of integration NULL is accepted as a 1200 * domain, allowing devices to just call this even if no domain is 1201 * allocated. 1202 */ 1203 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data) 1204 { 1205 if (data) 1206 return data->domain; 1207 else 1208 return NULL; 1209 } 1210 EXPORT_SYMBOL_GPL(regmap_irq_get_domain); 1211