xref: /linux/drivers/sh/intc/virq.c (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1 /*
2  * Support for virtual IRQ subgroups.
3  *
4  * Copyright (C) 2010  Paul Mundt
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  */
10 #define pr_fmt(fmt) "intc: " fmt
11 
12 #include <linux/slab.h>
13 #include <linux/irq.h>
14 #include <linux/list.h>
15 #include <linux/radix-tree.h>
16 #include <linux/spinlock.h>
17 #include <linux/export.h>
18 #include "internals.h"
19 
20 static struct intc_map_entry intc_irq_xlate[INTC_NR_IRQS];
21 
22 struct intc_virq_list {
23 	unsigned int irq;
24 	struct intc_virq_list *next;
25 };
26 
27 #define for_each_virq(entry, head) \
28 	for (entry = head; entry; entry = entry->next)
29 
30 /*
31  * Tags for the radix tree
32  */
33 #define INTC_TAG_VIRQ_NEEDS_ALLOC	0
34 
35 void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d)
36 {
37 	unsigned long flags;
38 
39 	raw_spin_lock_irqsave(&intc_big_lock, flags);
40 	intc_irq_xlate[irq].enum_id = id;
41 	intc_irq_xlate[irq].desc = d;
42 	raw_spin_unlock_irqrestore(&intc_big_lock, flags);
43 }
44 
45 struct intc_map_entry *intc_irq_xlate_get(unsigned int irq)
46 {
47 	return intc_irq_xlate + irq;
48 }
49 
50 int intc_irq_lookup(const char *chipname, intc_enum enum_id)
51 {
52 	struct intc_map_entry *ptr;
53 	struct intc_desc_int *d;
54 	int irq = -1;
55 
56 	list_for_each_entry(d, &intc_list, list) {
57 		int tagged;
58 
59 		if (strcmp(d->chip.name, chipname) != 0)
60 			continue;
61 
62 		/*
63 		 * Catch early lookups for subgroup VIRQs that have not
64 		 * yet been allocated an IRQ. This already includes a
65 		 * fast-path out if the tree is untagged, so there is no
66 		 * need to explicitly test the root tree.
67 		 */
68 		tagged = radix_tree_tag_get(&d->tree, enum_id,
69 					    INTC_TAG_VIRQ_NEEDS_ALLOC);
70 		if (unlikely(tagged))
71 			break;
72 
73 		ptr = radix_tree_lookup(&d->tree, enum_id);
74 		if (ptr) {
75 			irq = ptr - intc_irq_xlate;
76 			break;
77 		}
78 	}
79 
80 	return irq;
81 }
82 EXPORT_SYMBOL_GPL(intc_irq_lookup);
83 
84 static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
85 {
86 	struct intc_virq_list **last, *entry;
87 	struct irq_data *data = irq_get_irq_data(irq);
88 
89 	/* scan for duplicates */
90 	last = (struct intc_virq_list **)&data->handler_data;
91 	for_each_virq(entry, data->handler_data) {
92 		if (entry->irq == virq)
93 			return 0;
94 		last = &entry->next;
95 	}
96 
97 	entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC);
98 	if (!entry) {
99 		pr_err("can't allocate VIRQ mapping for %d\n", virq);
100 		return -ENOMEM;
101 	}
102 
103 	entry->irq = virq;
104 
105 	*last = entry;
106 
107 	return 0;
108 }
109 
110 static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
111 {
112 	struct irq_data *data = irq_get_irq_data(irq);
113 	struct irq_chip *chip = irq_data_get_irq_chip(data);
114 	struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data);
115 	struct intc_desc_int *d = get_intc_desc(irq);
116 
117 	chip->irq_mask_ack(data);
118 
119 	for_each_virq(entry, vlist) {
120 		unsigned long addr, handle;
121 
122 		handle = (unsigned long)irq_get_handler_data(entry->irq);
123 		addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
124 
125 		if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
126 			generic_handle_irq(entry->irq);
127 	}
128 
129 	chip->irq_unmask(data);
130 }
131 
132 static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup,
133 					       struct intc_desc_int *d,
134 					       unsigned int index)
135 {
136 	unsigned int fn = REG_FN_TEST_BASE + (subgroup->reg_width >> 3) - 1;
137 
138 	return _INTC_MK(fn, MODE_ENABLE_REG, intc_get_reg(d, subgroup->reg),
139 			0, 1, (subgroup->reg_width - 1) - index);
140 }
141 
142 static void __init intc_subgroup_init_one(struct intc_desc *desc,
143 					  struct intc_desc_int *d,
144 					  struct intc_subgroup *subgroup)
145 {
146 	struct intc_map_entry *mapped;
147 	unsigned int pirq;
148 	unsigned long flags;
149 	int i;
150 
151 	mapped = radix_tree_lookup(&d->tree, subgroup->parent_id);
152 	if (!mapped) {
153 		WARN_ON(1);
154 		return;
155 	}
156 
157 	pirq = mapped - intc_irq_xlate;
158 
159 	raw_spin_lock_irqsave(&d->lock, flags);
160 
161 	for (i = 0; i < ARRAY_SIZE(subgroup->enum_ids); i++) {
162 		struct intc_subgroup_entry *entry;
163 		int err;
164 
165 		if (!subgroup->enum_ids[i])
166 			continue;
167 
168 		entry = kmalloc(sizeof(*entry), GFP_NOWAIT);
169 		if (!entry)
170 			break;
171 
172 		entry->pirq = pirq;
173 		entry->enum_id = subgroup->enum_ids[i];
174 		entry->handle = intc_subgroup_data(subgroup, d, i);
175 
176 		err = radix_tree_insert(&d->tree, entry->enum_id, entry);
177 		if (unlikely(err < 0))
178 			break;
179 
180 		radix_tree_tag_set(&d->tree, entry->enum_id,
181 				   INTC_TAG_VIRQ_NEEDS_ALLOC);
182 	}
183 
184 	raw_spin_unlock_irqrestore(&d->lock, flags);
185 }
186 
187 void __init intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d)
188 {
189 	int i;
190 
191 	if (!desc->hw.subgroups)
192 		return;
193 
194 	for (i = 0; i < desc->hw.nr_subgroups; i++)
195 		intc_subgroup_init_one(desc, d, desc->hw.subgroups + i);
196 }
197 
198 static void __init intc_subgroup_map(struct intc_desc_int *d)
199 {
200 	struct intc_subgroup_entry *entries[32];
201 	unsigned long flags;
202 	unsigned int nr_found;
203 	int i;
204 
205 	raw_spin_lock_irqsave(&d->lock, flags);
206 
207 restart:
208 	nr_found = radix_tree_gang_lookup_tag_slot(&d->tree,
209 			(void ***)entries, 0, ARRAY_SIZE(entries),
210 			INTC_TAG_VIRQ_NEEDS_ALLOC);
211 
212 	for (i = 0; i < nr_found; i++) {
213 		struct intc_subgroup_entry *entry;
214 		int irq;
215 
216 		entry = radix_tree_deref_slot((void **)entries[i]);
217 		if (unlikely(!entry))
218 			continue;
219 		if (radix_tree_deref_retry(entry))
220 			goto restart;
221 
222 		irq = irq_alloc_desc(numa_node_id());
223 		if (unlikely(irq < 0)) {
224 			pr_err("no more free IRQs, bailing..\n");
225 			break;
226 		}
227 
228 		activate_irq(irq);
229 
230 		pr_info("Setting up a chained VIRQ from %d -> %d\n",
231 			irq, entry->pirq);
232 
233 		intc_irq_xlate_set(irq, entry->enum_id, d);
234 
235 		irq_set_chip_and_handler_name(irq, irq_get_chip(entry->pirq),
236 					      handle_simple_irq, "virq");
237 		irq_set_chip_data(irq, irq_get_chip_data(entry->pirq));
238 
239 		irq_set_handler_data(irq, (void *)entry->handle);
240 
241 		/*
242 		 * Set the virtual IRQ as non-threadable.
243 		 */
244 		irq_set_nothread(irq);
245 
246 		irq_set_chained_handler(entry->pirq, intc_virq_handler);
247 		add_virq_to_pirq(entry->pirq, irq);
248 
249 		radix_tree_tag_clear(&d->tree, entry->enum_id,
250 				     INTC_TAG_VIRQ_NEEDS_ALLOC);
251 		radix_tree_replace_slot((void **)entries[i],
252 					&intc_irq_xlate[irq]);
253 	}
254 
255 	raw_spin_unlock_irqrestore(&d->lock, flags);
256 }
257 
258 void __init intc_finalize(void)
259 {
260 	struct intc_desc_int *d;
261 
262 	list_for_each_entry(d, &intc_list, list)
263 		if (radix_tree_tagged(&d->tree, INTC_TAG_VIRQ_NEEDS_ALLOC))
264 			intc_subgroup_map(d);
265 }
266