1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // regmap based irq_chip
4 //
5 // Copyright 2011 Wolfson Microelectronics plc
6 //
7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8
9 #include <linux/array_size.h>
10 #include <linux/device.h>
11 #include <linux/export.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqdomain.h>
15 #include <linux/overflow.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/regmap.h>
18 #include <linux/slab.h>
19
20 #include "internal.h"
21
22 struct regmap_irq_chip_data {
23 struct mutex lock;
24 struct irq_chip irq_chip;
25
26 struct regmap *map;
27 const struct regmap_irq_chip *chip;
28
29 int irq_base;
30 struct irq_domain *domain;
31
32 int irq;
33 int wake_count;
34
35 void *status_reg_buf;
36 unsigned int *main_status_buf;
37 unsigned int *status_buf;
38 unsigned int *prev_status_buf;
39 unsigned int *mask_buf;
40 unsigned int *mask_buf_def;
41 unsigned int *wake_buf;
42 unsigned int *type_buf;
43 unsigned int *type_buf_def;
44 unsigned int **config_buf;
45
46 unsigned int irq_reg_stride;
47
48 unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data,
49 unsigned int base, int index);
50
51 unsigned int clear_status:1;
52 };
53
54 static inline const
irq_to_regmap_irq(struct regmap_irq_chip_data * data,int irq)55 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
56 int irq)
57 {
58 return &data->chip->irqs[irq];
59 }
60
regmap_irq_can_bulk_read_status(struct regmap_irq_chip_data * data)61 static bool regmap_irq_can_bulk_read_status(struct regmap_irq_chip_data *data)
62 {
63 struct regmap *map = data->map;
64
65 /*
66 * While possible that a user-defined ->get_irq_reg() callback might
67 * be linear enough to support bulk reads, most of the time it won't.
68 * Therefore only allow them if the default callback is being used.
69 */
70 return data->irq_reg_stride == 1 && map->reg_stride == 1 &&
71 data->get_irq_reg == regmap_irq_get_irq_reg_linear &&
72 !map->use_single_read;
73 }
74
regmap_irq_lock(struct irq_data * data)75 static void regmap_irq_lock(struct irq_data *data)
76 {
77 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
78
79 mutex_lock(&d->lock);
80 }
81
regmap_irq_sync_unlock(struct irq_data * data)82 static void regmap_irq_sync_unlock(struct irq_data *data)
83 {
84 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
85 struct regmap *map = d->map;
86 int i, j, ret;
87 u32 reg;
88 u32 val;
89
90 if (d->chip->runtime_pm) {
91 ret = pm_runtime_get_sync(map->dev);
92 if (ret < 0)
93 dev_err(map->dev, "IRQ sync failed to resume: %d\n",
94 ret);
95 }
96
97 if (d->clear_status) {
98 for (i = 0; i < d->chip->num_regs; i++) {
99 reg = d->get_irq_reg(d, d->chip->status_base, i);
100
101 ret = regmap_read(map, reg, &val);
102 if (ret)
103 dev_err(d->map->dev,
104 "Failed to clear the interrupt status bits\n");
105 }
106
107 d->clear_status = false;
108 }
109
110 /*
111 * If there's been a change in the mask write it back to the
112 * hardware. We rely on the use of the regmap core cache to
113 * suppress pointless writes.
114 */
115 for (i = 0; i < d->chip->num_regs; i++) {
116 if (d->chip->handle_mask_sync)
117 d->chip->handle_mask_sync(i, d->mask_buf_def[i],
118 d->mask_buf[i],
119 d->chip->irq_drv_data);
120
121 if (d->chip->mask_base && !d->chip->handle_mask_sync) {
122 reg = d->get_irq_reg(d, d->chip->mask_base, i);
123 ret = regmap_update_bits(d->map, reg,
124 d->mask_buf_def[i],
125 d->mask_buf[i]);
126 if (ret)
127 dev_err(d->map->dev, "Failed to sync masks in %x\n", reg);
128 }
129
130 if (d->chip->unmask_base && !d->chip->handle_mask_sync) {
131 reg = d->get_irq_reg(d, d->chip->unmask_base, i);
132 ret = regmap_update_bits(d->map, reg,
133 d->mask_buf_def[i], ~d->mask_buf[i]);
134 if (ret)
135 dev_err(d->map->dev, "Failed to sync masks in %x\n",
136 reg);
137 }
138
139 reg = d->get_irq_reg(d, d->chip->wake_base, i);
140 if (d->wake_buf) {
141 if (d->chip->wake_invert)
142 ret = regmap_update_bits(d->map, reg,
143 d->mask_buf_def[i],
144 ~d->wake_buf[i]);
145 else
146 ret = regmap_update_bits(d->map, reg,
147 d->mask_buf_def[i],
148 d->wake_buf[i]);
149 if (ret != 0)
150 dev_err(d->map->dev,
151 "Failed to sync wakes in %x: %d\n",
152 reg, ret);
153 }
154
155 if (!d->chip->init_ack_masked)
156 continue;
157 /*
158 * Ack all the masked interrupts unconditionally,
159 * OR if there is masked interrupt which hasn't been Acked,
160 * it'll be ignored in irq handler, then may introduce irq storm
161 */
162 if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
163 reg = d->get_irq_reg(d, d->chip->ack_base, i);
164
165 /* some chips ack by write 0 */
166 if (d->chip->ack_invert)
167 ret = regmap_write(map, reg, ~d->mask_buf[i]);
168 else
169 ret = regmap_write(map, reg, d->mask_buf[i]);
170 if (d->chip->clear_ack) {
171 if (d->chip->ack_invert && !ret)
172 ret = regmap_write(map, reg, UINT_MAX);
173 else if (!ret)
174 ret = regmap_write(map, reg, 0);
175 }
176 if (ret != 0)
177 dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
178 reg, ret);
179 }
180 }
181
182 for (i = 0; i < d->chip->num_config_bases; i++) {
183 for (j = 0; j < d->chip->num_config_regs; j++) {
184 reg = d->get_irq_reg(d, d->chip->config_base[i], j);
185 ret = regmap_write(map, reg, d->config_buf[i][j]);
186 if (ret)
187 dev_err(d->map->dev,
188 "Failed to write config %x: %d\n",
189 reg, ret);
190 }
191 }
192
193 if (d->chip->runtime_pm)
194 pm_runtime_put(map->dev);
195
196 /* If we've changed our wakeup count propagate it to the parent */
197 if (d->wake_count < 0)
198 for (i = d->wake_count; i < 0; i++)
199 disable_irq_wake(d->irq);
200 else if (d->wake_count > 0)
201 for (i = 0; i < d->wake_count; i++)
202 enable_irq_wake(d->irq);
203
204 d->wake_count = 0;
205
206 mutex_unlock(&d->lock);
207 }
208
regmap_irq_enable(struct irq_data * data)209 static void regmap_irq_enable(struct irq_data *data)
210 {
211 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
212 struct regmap *map = d->map;
213 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
214 unsigned int reg = irq_data->reg_offset / map->reg_stride;
215 unsigned int mask;
216
217 /*
218 * The type_in_mask flag means that the underlying hardware uses
219 * separate mask bits for each interrupt trigger type, but we want
220 * to have a single logical interrupt with a configurable type.
221 *
222 * If the interrupt we're enabling defines any supported types
223 * then instead of using the regular mask bits for this interrupt,
224 * use the value previously written to the type buffer at the
225 * corresponding offset in regmap_irq_set_type().
226 */
227 if (d->chip->type_in_mask && irq_data->type.types_supported)
228 mask = d->type_buf[reg] & irq_data->mask;
229 else
230 mask = irq_data->mask;
231
232 if (d->chip->clear_on_unmask)
233 d->clear_status = true;
234
235 d->mask_buf[reg] &= ~mask;
236 }
237
regmap_irq_disable(struct irq_data * data)238 static void regmap_irq_disable(struct irq_data *data)
239 {
240 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
241 struct regmap *map = d->map;
242 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
243
244 d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
245 }
246
regmap_irq_set_type(struct irq_data * data,unsigned int type)247 static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
248 {
249 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
250 struct regmap *map = d->map;
251 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
252 int reg, ret;
253 const struct regmap_irq_type *t = &irq_data->type;
254
255 if ((t->types_supported & type) != type)
256 return 0;
257
258 reg = t->type_reg_offset / map->reg_stride;
259
260 if (d->chip->type_in_mask) {
261 ret = regmap_irq_set_type_config_simple(&d->type_buf, type,
262 irq_data, reg, d->chip->irq_drv_data);
263 if (ret)
264 return ret;
265 }
266
267 if (d->chip->set_type_config) {
268 ret = d->chip->set_type_config(d->config_buf, type, irq_data,
269 reg, d->chip->irq_drv_data);
270 if (ret)
271 return ret;
272 }
273
274 return 0;
275 }
276
regmap_irq_set_wake(struct irq_data * data,unsigned int on)277 static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
278 {
279 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
280 struct regmap *map = d->map;
281 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
282
283 if (on) {
284 if (d->wake_buf)
285 d->wake_buf[irq_data->reg_offset / map->reg_stride]
286 &= ~irq_data->mask;
287 d->wake_count++;
288 } else {
289 if (d->wake_buf)
290 d->wake_buf[irq_data->reg_offset / map->reg_stride]
291 |= irq_data->mask;
292 d->wake_count--;
293 }
294
295 return 0;
296 }
297
298 static const struct irq_chip regmap_irq_chip = {
299 .irq_bus_lock = regmap_irq_lock,
300 .irq_bus_sync_unlock = regmap_irq_sync_unlock,
301 .irq_disable = regmap_irq_disable,
302 .irq_enable = regmap_irq_enable,
303 .irq_set_type = regmap_irq_set_type,
304 .irq_set_wake = regmap_irq_set_wake,
305 };
306
read_sub_irq_data(struct regmap_irq_chip_data * data,unsigned int b)307 static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
308 unsigned int b)
309 {
310 const struct regmap_irq_chip *chip = data->chip;
311 const struct regmap_irq_sub_irq_map *subreg;
312 struct regmap *map = data->map;
313 unsigned int reg;
314 int i, ret = 0;
315
316 if (!chip->sub_reg_offsets) {
317 reg = data->get_irq_reg(data, chip->status_base, b);
318 ret = regmap_read(map, reg, &data->status_buf[b]);
319 } else {
320 /*
321 * Note we can't use ->get_irq_reg() here because the offsets
322 * in 'subreg' are *not* interchangeable with indices.
323 */
324 subreg = &chip->sub_reg_offsets[b];
325 for (i = 0; i < subreg->num_regs; i++) {
326 unsigned int offset = subreg->offset[i];
327 unsigned int index = offset / map->reg_stride;
328
329 ret = regmap_read(map, chip->status_base + offset,
330 &data->status_buf[index]);
331 if (ret)
332 break;
333 }
334 }
335 return ret;
336 }
337
read_irq_data(struct regmap_irq_chip_data * data)338 static int read_irq_data(struct regmap_irq_chip_data *data)
339 {
340 const struct regmap_irq_chip *chip = data->chip;
341 struct regmap *map = data->map;
342 int ret, i;
343 u32 reg;
344
345 /*
346 * Read only registers with active IRQs if the chip has 'main status
347 * register'. Else read in the statuses, using a single bulk read if
348 * possible in order to reduce the I/O overheads.
349 */
350
351 if (chip->no_status) {
352 /* no status register so default to all active */
353 memset32(data->status_buf, GENMASK(31, 0), chip->num_regs);
354 } else if (chip->num_main_regs) {
355 unsigned int max_main_bits;
356
357 max_main_bits = (chip->num_main_status_bits) ?
358 chip->num_main_status_bits : chip->num_regs;
359 /* Clear the status buf as we don't read all status regs */
360 memset32(data->status_buf, 0, chip->num_regs);
361
362 /* We could support bulk read for main status registers
363 * but I don't expect to see devices with really many main
364 * status registers so let's only support single reads for the
365 * sake of simplicity. and add bulk reads only if needed
366 */
367 for (i = 0; i < chip->num_main_regs; i++) {
368 reg = data->get_irq_reg(data, chip->main_status, i);
369 ret = regmap_read(map, reg, &data->main_status_buf[i]);
370 if (ret) {
371 dev_err(map->dev, "Failed to read IRQ status %d\n", ret);
372 return ret;
373 }
374 }
375
376 /* Read sub registers with active IRQs */
377 for (i = 0; i < chip->num_main_regs; i++) {
378 unsigned int b;
379 const unsigned long mreg = data->main_status_buf[i];
380
381 for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
382 if (i * map->format.val_bytes * 8 + b >
383 max_main_bits)
384 break;
385 ret = read_sub_irq_data(data, b);
386
387 if (ret != 0) {
388 dev_err(map->dev, "Failed to read IRQ status %d\n", ret);
389 return ret;
390 }
391 }
392
393 }
394 } else if (regmap_irq_can_bulk_read_status(data)) {
395
396 u8 *buf8 = data->status_reg_buf;
397 u16 *buf16 = data->status_reg_buf;
398 u32 *buf32 = data->status_reg_buf;
399
400 BUG_ON(!data->status_reg_buf);
401
402 ret = regmap_bulk_read(map, chip->status_base,
403 data->status_reg_buf,
404 chip->num_regs);
405 if (ret != 0) {
406 dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
407 return ret;
408 }
409
410 for (i = 0; i < data->chip->num_regs; i++) {
411 switch (map->format.val_bytes) {
412 case 1:
413 data->status_buf[i] = buf8[i];
414 break;
415 case 2:
416 data->status_buf[i] = buf16[i];
417 break;
418 case 4:
419 data->status_buf[i] = buf32[i];
420 break;
421 default:
422 BUG();
423 return -EIO;
424 }
425 }
426
427 } else {
428 for (i = 0; i < data->chip->num_regs; i++) {
429 unsigned int reg = data->get_irq_reg(data,
430 data->chip->status_base, i);
431 ret = regmap_read(map, reg, &data->status_buf[i]);
432
433 if (ret != 0) {
434 dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
435 return ret;
436 }
437 }
438 }
439
440 if (chip->status_invert)
441 for (i = 0; i < data->chip->num_regs; i++)
442 data->status_buf[i] = ~data->status_buf[i];
443
444 return 0;
445 }
446
regmap_irq_thread(int irq,void * d)447 static irqreturn_t regmap_irq_thread(int irq, void *d)
448 {
449 struct regmap_irq_chip_data *data = d;
450 const struct regmap_irq_chip *chip = data->chip;
451 struct regmap *map = data->map;
452 int ret, i;
453 bool handled = false;
454 u32 reg;
455
456 if (chip->handle_pre_irq)
457 chip->handle_pre_irq(chip->irq_drv_data);
458
459 if (chip->runtime_pm) {
460 ret = pm_runtime_get_sync(map->dev);
461 if (ret < 0) {
462 dev_err(map->dev, "IRQ thread failed to resume: %d\n", ret);
463 goto exit;
464 }
465 }
466
467 ret = read_irq_data(data);
468 if (ret < 0)
469 goto exit;
470
471 if (chip->status_is_level) {
472 for (i = 0; i < data->chip->num_regs; i++) {
473 unsigned int val = data->status_buf[i];
474
475 data->status_buf[i] ^= data->prev_status_buf[i];
476 data->prev_status_buf[i] = val;
477 }
478 }
479
480 /*
481 * Ignore masked IRQs and ack if we need to; we ack early so
482 * there is no race between handling and acknowledging the
483 * interrupt. We assume that typically few of the interrupts
484 * will fire simultaneously so don't worry about overhead from
485 * doing a write per register.
486 */
487 for (i = 0; i < data->chip->num_regs; i++) {
488 data->status_buf[i] &= ~data->mask_buf[i];
489
490 if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
491 reg = data->get_irq_reg(data, data->chip->ack_base, i);
492
493 if (chip->ack_invert)
494 ret = regmap_write(map, reg,
495 ~data->status_buf[i]);
496 else
497 ret = regmap_write(map, reg,
498 data->status_buf[i]);
499 if (chip->clear_ack) {
500 if (chip->ack_invert && !ret)
501 ret = regmap_write(map, reg, UINT_MAX);
502 else if (!ret)
503 ret = regmap_write(map, reg, 0);
504 }
505 if (ret != 0)
506 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
507 reg, ret);
508 }
509 }
510
511 for (i = 0; i < chip->num_irqs; i++) {
512 if (data->status_buf[chip->irqs[i].reg_offset /
513 map->reg_stride] & chip->irqs[i].mask) {
514 handle_nested_irq(irq_find_mapping(data->domain, i));
515 handled = true;
516 }
517 }
518
519 exit:
520 if (chip->handle_post_irq)
521 chip->handle_post_irq(chip->irq_drv_data);
522
523 if (chip->runtime_pm)
524 pm_runtime_put(map->dev);
525
526 if (handled)
527 return IRQ_HANDLED;
528 else
529 return IRQ_NONE;
530 }
531
532 static struct lock_class_key regmap_irq_lock_class;
533 static struct lock_class_key regmap_irq_request_class;
534
regmap_irq_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)535 static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
536 irq_hw_number_t hw)
537 {
538 struct regmap_irq_chip_data *data = h->host_data;
539
540 irq_set_chip_data(virq, data);
541 irq_set_lockdep_class(virq, ®map_irq_lock_class, ®map_irq_request_class);
542 irq_set_chip(virq, &data->irq_chip);
543 irq_set_nested_thread(virq, 1);
544 irq_set_parent(virq, data->irq);
545 irq_set_noprobe(virq);
546
547 return 0;
548 }
549
550 static const struct irq_domain_ops regmap_domain_ops = {
551 .map = regmap_irq_map,
552 .xlate = irq_domain_xlate_onetwocell,
553 };
554
555 /**
556 * regmap_irq_get_irq_reg_linear() - Linear IRQ register mapping callback.
557 * @data: Data for the &struct regmap_irq_chip
558 * @base: Base register
559 * @index: Register index
560 *
561 * Returns the register address corresponding to the given @base and @index
562 * by the formula ``base + index * regmap_stride * irq_reg_stride``.
563 */
regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data * data,unsigned int base,int index)564 unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data,
565 unsigned int base, int index)
566 {
567 struct regmap *map = data->map;
568
569 return base + index * map->reg_stride * data->irq_reg_stride;
570 }
571 EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear);
572
573 /**
574 * regmap_irq_set_type_config_simple() - Simple IRQ type configuration callback.
575 * @buf: Buffer containing configuration register values, this is a 2D array of
576 * `num_config_bases` rows, each of `num_config_regs` elements.
577 * @type: The requested IRQ type.
578 * @irq_data: The IRQ being configured.
579 * @idx: Index of the irq's config registers within each array `buf[i]`
580 * @irq_drv_data: Driver specific IRQ data
581 *
582 * This is a &struct regmap_irq_chip->set_type_config callback suitable for
583 * chips with one config register. Register values are updated according to
584 * the &struct regmap_irq_type data associated with an IRQ.
585 */
regmap_irq_set_type_config_simple(unsigned int ** buf,unsigned int type,const struct regmap_irq * irq_data,int idx,void * irq_drv_data)586 int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type,
587 const struct regmap_irq *irq_data,
588 int idx, void *irq_drv_data)
589 {
590 const struct regmap_irq_type *t = &irq_data->type;
591
592 if (t->type_reg_mask)
593 buf[0][idx] &= ~t->type_reg_mask;
594 else
595 buf[0][idx] &= ~(t->type_falling_val |
596 t->type_rising_val |
597 t->type_level_low_val |
598 t->type_level_high_val);
599
600 switch (type) {
601 case IRQ_TYPE_EDGE_FALLING:
602 buf[0][idx] |= t->type_falling_val;
603 break;
604
605 case IRQ_TYPE_EDGE_RISING:
606 buf[0][idx] |= t->type_rising_val;
607 break;
608
609 case IRQ_TYPE_EDGE_BOTH:
610 buf[0][idx] |= (t->type_falling_val |
611 t->type_rising_val);
612 break;
613
614 case IRQ_TYPE_LEVEL_HIGH:
615 buf[0][idx] |= t->type_level_high_val;
616 break;
617
618 case IRQ_TYPE_LEVEL_LOW:
619 buf[0][idx] |= t->type_level_low_val;
620 break;
621
622 default:
623 return -EINVAL;
624 }
625
626 return 0;
627 }
628 EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple);
629
regmap_irq_create_domain(struct fwnode_handle * fwnode,int irq_base,const struct regmap_irq_chip * chip,struct regmap_irq_chip_data * d)630 static int regmap_irq_create_domain(struct fwnode_handle *fwnode, int irq_base,
631 const struct regmap_irq_chip *chip,
632 struct regmap_irq_chip_data *d)
633 {
634 struct irq_domain_info info = {
635 .fwnode = fwnode,
636 .size = chip->num_irqs,
637 .hwirq_max = chip->num_irqs,
638 .virq_base = irq_base,
639 .ops = ®map_domain_ops,
640 .host_data = d,
641 .name_suffix = chip->domain_suffix,
642 };
643
644 d->domain = irq_domain_instantiate(&info);
645 if (IS_ERR(d->domain)) {
646 dev_err(d->map->dev, "Failed to create IRQ domain\n");
647 return PTR_ERR(d->domain);
648 }
649
650 return 0;
651 }
652
653
654 /**
655 * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
656 *
657 * @fwnode: The firmware node where the IRQ domain should be added to.
658 * @map: The regmap for the device.
659 * @irq: The IRQ the device uses to signal interrupts.
660 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
661 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
662 * @chip: Configuration for the interrupt controller.
663 * @data: Runtime data structure for the controller, allocated on success.
664 *
665 * Returns 0 on success or an errno on failure.
666 *
667 * In order for this to be efficient the chip really should use a
668 * register cache. The chip driver is responsible for restoring the
669 * register values used by the IRQ controller over suspend and resume.
670 */
regmap_add_irq_chip_fwnode(struct fwnode_handle * fwnode,struct regmap * map,int irq,int irq_flags,int irq_base,const struct regmap_irq_chip * chip,struct regmap_irq_chip_data ** data)671 int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
672 struct regmap *map, int irq,
673 int irq_flags, int irq_base,
674 const struct regmap_irq_chip *chip,
675 struct regmap_irq_chip_data **data)
676 {
677 struct regmap_irq_chip_data *d;
678 int i;
679 int ret = -ENOMEM;
680 u32 reg;
681
682 if (chip->num_regs <= 0)
683 return -EINVAL;
684
685 if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
686 return -EINVAL;
687
688 if (chip->mask_base && chip->unmask_base && !chip->mask_unmask_non_inverted)
689 return -EINVAL;
690
691 for (i = 0; i < chip->num_irqs; i++) {
692 if (chip->irqs[i].reg_offset % map->reg_stride)
693 return -EINVAL;
694 if (chip->irqs[i].reg_offset / map->reg_stride >=
695 chip->num_regs)
696 return -EINVAL;
697 }
698
699 if (irq_base) {
700 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
701 if (irq_base < 0) {
702 dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
703 irq_base);
704 return irq_base;
705 }
706 }
707
708 d = kzalloc(sizeof(*d), GFP_KERNEL);
709 if (!d)
710 return -ENOMEM;
711
712 if (chip->num_main_regs) {
713 d->main_status_buf = kcalloc(chip->num_main_regs,
714 sizeof(*d->main_status_buf),
715 GFP_KERNEL);
716
717 if (!d->main_status_buf)
718 goto err_alloc;
719 }
720
721 d->status_buf = kcalloc(chip->num_regs, sizeof(*d->status_buf),
722 GFP_KERNEL);
723 if (!d->status_buf)
724 goto err_alloc;
725
726 if (chip->status_is_level) {
727 d->prev_status_buf = kcalloc(chip->num_regs, sizeof(*d->prev_status_buf),
728 GFP_KERNEL);
729 if (!d->prev_status_buf)
730 goto err_alloc;
731 }
732
733 d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf),
734 GFP_KERNEL);
735 if (!d->mask_buf)
736 goto err_alloc;
737
738 d->mask_buf_def = kcalloc(chip->num_regs, sizeof(*d->mask_buf_def),
739 GFP_KERNEL);
740 if (!d->mask_buf_def)
741 goto err_alloc;
742
743 if (chip->wake_base) {
744 d->wake_buf = kcalloc(chip->num_regs, sizeof(*d->wake_buf),
745 GFP_KERNEL);
746 if (!d->wake_buf)
747 goto err_alloc;
748 }
749
750 if (chip->type_in_mask) {
751 d->type_buf_def = kcalloc(chip->num_regs,
752 sizeof(*d->type_buf_def), GFP_KERNEL);
753 if (!d->type_buf_def)
754 goto err_alloc;
755
756 d->type_buf = kcalloc(chip->num_regs, sizeof(*d->type_buf), GFP_KERNEL);
757 if (!d->type_buf)
758 goto err_alloc;
759 }
760
761 if (chip->num_config_bases && chip->num_config_regs) {
762 /*
763 * Create config_buf[num_config_bases][num_config_regs]
764 */
765 d->config_buf = kcalloc(chip->num_config_bases,
766 sizeof(*d->config_buf), GFP_KERNEL);
767 if (!d->config_buf)
768 goto err_alloc;
769
770 for (i = 0; i < chip->num_config_bases; i++) {
771 d->config_buf[i] = kcalloc(chip->num_config_regs,
772 sizeof(**d->config_buf),
773 GFP_KERNEL);
774 if (!d->config_buf[i])
775 goto err_alloc;
776 }
777 }
778
779 d->irq_chip = regmap_irq_chip;
780 d->irq_chip.name = chip->name;
781 d->irq = irq;
782 d->map = map;
783 d->chip = chip;
784 d->irq_base = irq_base;
785
786 if (chip->irq_reg_stride)
787 d->irq_reg_stride = chip->irq_reg_stride;
788 else
789 d->irq_reg_stride = 1;
790
791 if (chip->get_irq_reg)
792 d->get_irq_reg = chip->get_irq_reg;
793 else
794 d->get_irq_reg = regmap_irq_get_irq_reg_linear;
795
796 if (regmap_irq_can_bulk_read_status(d)) {
797 d->status_reg_buf = kmalloc_array(chip->num_regs,
798 map->format.val_bytes,
799 GFP_KERNEL);
800 if (!d->status_reg_buf)
801 goto err_alloc;
802 }
803
804 mutex_init(&d->lock);
805
806 for (i = 0; i < chip->num_irqs; i++)
807 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
808 |= chip->irqs[i].mask;
809
810 /* Mask all the interrupts by default */
811 for (i = 0; i < chip->num_regs; i++) {
812 d->mask_buf[i] = d->mask_buf_def[i];
813
814 if (chip->handle_mask_sync) {
815 ret = chip->handle_mask_sync(i, d->mask_buf_def[i],
816 d->mask_buf[i],
817 chip->irq_drv_data);
818 if (ret)
819 goto err_alloc;
820 }
821
822 if (chip->mask_base && !chip->handle_mask_sync) {
823 reg = d->get_irq_reg(d, chip->mask_base, i);
824 ret = regmap_update_bits(d->map, reg,
825 d->mask_buf_def[i],
826 d->mask_buf[i]);
827 if (ret) {
828 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
829 reg, ret);
830 goto err_alloc;
831 }
832 }
833
834 if (chip->unmask_base && !chip->handle_mask_sync) {
835 reg = d->get_irq_reg(d, chip->unmask_base, i);
836 ret = regmap_update_bits(d->map, reg,
837 d->mask_buf_def[i], ~d->mask_buf[i]);
838 if (ret) {
839 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
840 reg, ret);
841 goto err_alloc;
842 }
843 }
844
845 if (!chip->init_ack_masked)
846 continue;
847
848 /* Ack masked but set interrupts */
849 if (d->chip->no_status) {
850 /* no status register so default to all active */
851 d->status_buf[i] = UINT_MAX;
852 } else {
853 reg = d->get_irq_reg(d, d->chip->status_base, i);
854 ret = regmap_read(map, reg, &d->status_buf[i]);
855 if (ret != 0) {
856 dev_err(map->dev, "Failed to read IRQ status: %d\n",
857 ret);
858 goto err_alloc;
859 }
860 }
861
862 if (chip->status_invert)
863 d->status_buf[i] = ~d->status_buf[i];
864
865 if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
866 reg = d->get_irq_reg(d, d->chip->ack_base, i);
867 if (chip->ack_invert)
868 ret = regmap_write(map, reg,
869 ~(d->status_buf[i] & d->mask_buf[i]));
870 else
871 ret = regmap_write(map, reg,
872 d->status_buf[i] & d->mask_buf[i]);
873 if (chip->clear_ack) {
874 if (chip->ack_invert && !ret)
875 ret = regmap_write(map, reg, UINT_MAX);
876 else if (!ret)
877 ret = regmap_write(map, reg, 0);
878 }
879 if (ret != 0) {
880 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
881 reg, ret);
882 goto err_alloc;
883 }
884 }
885 }
886
887 /* Wake is disabled by default */
888 if (d->wake_buf) {
889 for (i = 0; i < chip->num_regs; i++) {
890 d->wake_buf[i] = d->mask_buf_def[i];
891 reg = d->get_irq_reg(d, d->chip->wake_base, i);
892
893 if (chip->wake_invert)
894 ret = regmap_update_bits(d->map, reg,
895 d->mask_buf_def[i],
896 0);
897 else
898 ret = regmap_update_bits(d->map, reg,
899 d->mask_buf_def[i],
900 d->wake_buf[i]);
901 if (ret != 0) {
902 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
903 reg, ret);
904 goto err_alloc;
905 }
906 }
907 }
908
909 /* Store current levels */
910 if (chip->status_is_level) {
911 ret = read_irq_data(d);
912 if (ret < 0)
913 goto err_alloc;
914
915 memcpy(d->prev_status_buf, d->status_buf,
916 array_size(d->chip->num_regs, sizeof(d->prev_status_buf[0])));
917 }
918
919 ret = regmap_irq_create_domain(fwnode, irq_base, chip, d);
920 if (ret)
921 goto err_alloc;
922
923 ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
924 irq_flags | IRQF_ONESHOT,
925 chip->name, d);
926 if (ret != 0) {
927 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
928 irq, chip->name, ret);
929 goto err_domain;
930 }
931
932 *data = d;
933
934 return 0;
935
936 err_domain:
937 /* Should really dispose of the domain but... */
938 err_alloc:
939 kfree(d->type_buf);
940 kfree(d->type_buf_def);
941 kfree(d->wake_buf);
942 kfree(d->mask_buf_def);
943 kfree(d->mask_buf);
944 kfree(d->main_status_buf);
945 kfree(d->status_buf);
946 kfree(d->prev_status_buf);
947 kfree(d->status_reg_buf);
948 if (d->config_buf) {
949 for (i = 0; i < chip->num_config_bases; i++)
950 kfree(d->config_buf[i]);
951 kfree(d->config_buf);
952 }
953 kfree(d);
954 return ret;
955 }
956 EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode);
957
958 /**
959 * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
960 *
961 * @map: The regmap for the device.
962 * @irq: The IRQ the device uses to signal interrupts.
963 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
964 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
965 * @chip: Configuration for the interrupt controller.
966 * @data: Runtime data structure for the controller, allocated on success.
967 *
968 * Returns 0 on success or an errno on failure.
969 *
970 * This is the same as regmap_add_irq_chip_fwnode, except that the firmware
971 * node of the regmap is used.
972 */
regmap_add_irq_chip(struct regmap * map,int irq,int irq_flags,int irq_base,const struct regmap_irq_chip * chip,struct regmap_irq_chip_data ** data)973 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
974 int irq_base, const struct regmap_irq_chip *chip,
975 struct regmap_irq_chip_data **data)
976 {
977 return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq,
978 irq_flags, irq_base, chip, data);
979 }
980 EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
981
982 /**
983 * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
984 *
985 * @irq: Primary IRQ for the device
986 * @d: ®map_irq_chip_data allocated by regmap_add_irq_chip()
987 *
988 * This function also disposes of all mapped IRQs on the chip.
989 */
regmap_del_irq_chip(int irq,struct regmap_irq_chip_data * d)990 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
991 {
992 unsigned int virq;
993 int i, hwirq;
994
995 if (!d)
996 return;
997
998 free_irq(irq, d);
999
1000 /* Dispose all virtual irq from irq domain before removing it */
1001 for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
1002 /* Ignore hwirq if holes in the IRQ list */
1003 if (!d->chip->irqs[hwirq].mask)
1004 continue;
1005
1006 /*
1007 * Find the virtual irq of hwirq on chip and if it is
1008 * there then dispose it
1009 */
1010 virq = irq_find_mapping(d->domain, hwirq);
1011 if (virq)
1012 irq_dispose_mapping(virq);
1013 }
1014
1015 irq_domain_remove(d->domain);
1016 kfree(d->type_buf);
1017 kfree(d->type_buf_def);
1018 kfree(d->wake_buf);
1019 kfree(d->mask_buf_def);
1020 kfree(d->mask_buf);
1021 kfree(d->main_status_buf);
1022 kfree(d->status_reg_buf);
1023 kfree(d->status_buf);
1024 kfree(d->prev_status_buf);
1025 if (d->config_buf) {
1026 for (i = 0; i < d->chip->num_config_bases; i++)
1027 kfree(d->config_buf[i]);
1028 kfree(d->config_buf);
1029 }
1030 kfree(d);
1031 }
1032 EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
1033
devm_regmap_irq_chip_release(struct device * dev,void * res)1034 static void devm_regmap_irq_chip_release(struct device *dev, void *res)
1035 {
1036 struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
1037
1038 regmap_del_irq_chip(d->irq, d);
1039 }
1040
devm_regmap_irq_chip_match(struct device * dev,void * res,void * data)1041 static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
1042
1043 {
1044 struct regmap_irq_chip_data **r = res;
1045
1046 if (!r || !*r) {
1047 WARN_ON(!r || !*r);
1048 return 0;
1049 }
1050 return *r == data;
1051 }
1052
1053 /**
1054 * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode()
1055 *
1056 * @dev: The device pointer on which irq_chip belongs to.
1057 * @fwnode: The firmware node where the IRQ domain should be added to.
1058 * @map: The regmap for the device.
1059 * @irq: The IRQ the device uses to signal interrupts
1060 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
1061 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
1062 * @chip: Configuration for the interrupt controller.
1063 * @data: Runtime data structure for the controller, allocated on success
1064 *
1065 * Returns 0 on success or an errno on failure.
1066 *
1067 * The ®map_irq_chip_data will be automatically released when the device is
1068 * unbound.
1069 */
devm_regmap_add_irq_chip_fwnode(struct device * dev,struct fwnode_handle * fwnode,struct regmap * map,int irq,int irq_flags,int irq_base,const struct regmap_irq_chip * chip,struct regmap_irq_chip_data ** data)1070 int devm_regmap_add_irq_chip_fwnode(struct device *dev,
1071 struct fwnode_handle *fwnode,
1072 struct regmap *map, int irq,
1073 int irq_flags, int irq_base,
1074 const struct regmap_irq_chip *chip,
1075 struct regmap_irq_chip_data **data)
1076 {
1077 struct regmap_irq_chip_data **ptr, *d;
1078 int ret;
1079
1080 ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
1081 GFP_KERNEL);
1082 if (!ptr)
1083 return -ENOMEM;
1084
1085 ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base,
1086 chip, &d);
1087 if (ret < 0) {
1088 devres_free(ptr);
1089 return ret;
1090 }
1091
1092 *ptr = d;
1093 devres_add(dev, ptr);
1094 *data = d;
1095 return 0;
1096 }
1097 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
1098
1099 /**
1100 * devm_regmap_add_irq_chip() - Resource managed regmap_add_irq_chip()
1101 *
1102 * @dev: The device pointer on which irq_chip belongs to.
1103 * @map: The regmap for the device.
1104 * @irq: The IRQ the device uses to signal interrupts
1105 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
1106 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
1107 * @chip: Configuration for the interrupt controller.
1108 * @data: Runtime data structure for the controller, allocated on success
1109 *
1110 * Returns 0 on success or an errno on failure.
1111 *
1112 * The ®map_irq_chip_data will be automatically released when the device is
1113 * unbound.
1114 */
devm_regmap_add_irq_chip(struct device * dev,struct regmap * map,int irq,int irq_flags,int irq_base,const struct regmap_irq_chip * chip,struct regmap_irq_chip_data ** data)1115 int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
1116 int irq_flags, int irq_base,
1117 const struct regmap_irq_chip *chip,
1118 struct regmap_irq_chip_data **data)
1119 {
1120 return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map,
1121 irq, irq_flags, irq_base, chip,
1122 data);
1123 }
1124 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
1125
1126 /**
1127 * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
1128 *
1129 * @dev: Device for which the resource was allocated.
1130 * @irq: Primary IRQ for the device.
1131 * @data: ®map_irq_chip_data allocated by regmap_add_irq_chip().
1132 *
1133 * A resource managed version of regmap_del_irq_chip().
1134 */
devm_regmap_del_irq_chip(struct device * dev,int irq,struct regmap_irq_chip_data * data)1135 void devm_regmap_del_irq_chip(struct device *dev, int irq,
1136 struct regmap_irq_chip_data *data)
1137 {
1138 int rc;
1139
1140 WARN_ON(irq != data->irq);
1141 rc = devres_release(dev, devm_regmap_irq_chip_release,
1142 devm_regmap_irq_chip_match, data);
1143
1144 if (rc != 0)
1145 WARN_ON(rc);
1146 }
1147 EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
1148
1149 /**
1150 * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
1151 *
1152 * @data: regmap irq controller to operate on.
1153 *
1154 * Useful for drivers to request their own IRQs.
1155 */
regmap_irq_chip_get_base(struct regmap_irq_chip_data * data)1156 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
1157 {
1158 WARN_ON(!data->irq_base);
1159 return data->irq_base;
1160 }
1161 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
1162
1163 /**
1164 * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
1165 *
1166 * @data: regmap irq controller to operate on.
1167 * @irq: index of the interrupt requested in the chip IRQs.
1168 *
1169 * Useful for drivers to request their own IRQs.
1170 */
regmap_irq_get_virq(struct regmap_irq_chip_data * data,int irq)1171 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
1172 {
1173 /* Handle holes in the IRQ list */
1174 if (!data->chip->irqs[irq].mask)
1175 return -EINVAL;
1176
1177 return irq_create_mapping(data->domain, irq);
1178 }
1179 EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
1180
1181 /**
1182 * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
1183 *
1184 * @data: regmap_irq controller to operate on.
1185 *
1186 * Useful for drivers to request their own IRQs and for integration
1187 * with subsystems. For ease of integration NULL is accepted as a
1188 * domain, allowing devices to just call this even if no domain is
1189 * allocated.
1190 */
regmap_irq_get_domain(struct regmap_irq_chip_data * data)1191 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
1192 {
1193 if (data)
1194 return data->domain;
1195 else
1196 return NULL;
1197 }
1198 EXPORT_SYMBOL_GPL(regmap_irq_get_domain);
1199