xref: /linux/drivers/irqchip/irq-loongson-eiointc.c (revision 03a53e09cd723295ac1ddd16d9908d1680e7a1bf)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Loongson Extend I/O Interrupt Controller support
4  *
5  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6  */
7 
8 #define pr_fmt(fmt) "eiointc: " fmt
9 
10 #include <linux/cpuhotplug.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/irqchip.h>
14 #include <linux/irqdomain.h>
15 #include <linux/irqchip/chained_irq.h>
16 #include <linux/kernel.h>
17 #include <linux/kvm_para.h>
18 #include <linux/syscore_ops.h>
19 #include <asm/numa.h>
20 
21 #include "irq-loongson.h"
22 
23 #define EIOINTC_REG_NODEMAP	0x14a0
24 #define EIOINTC_REG_IPMAP	0x14c0
25 #define EIOINTC_REG_ENABLE	0x1600
26 #define EIOINTC_REG_BOUNCE	0x1680
27 #define EIOINTC_REG_ISR		0x1800
28 #define EIOINTC_REG_ROUTE	0x1c00
29 
30 #define EXTIOI_VIRT_FEATURES           0x40000000
31 #define  EXTIOI_HAS_VIRT_EXTENSION     BIT(0)
32 #define  EXTIOI_HAS_ENABLE_OPTION      BIT(1)
33 #define  EXTIOI_HAS_INT_ENCODE         BIT(2)
34 #define  EXTIOI_HAS_CPU_ENCODE         BIT(3)
35 #define EXTIOI_VIRT_CONFIG             0x40000004
36 #define  EXTIOI_ENABLE                 BIT(1)
37 #define  EXTIOI_ENABLE_INT_ENCODE      BIT(2)
38 #define  EXTIOI_ENABLE_CPU_ENCODE      BIT(3)
39 
40 #define VEC_REG_COUNT		4
41 #define VEC_COUNT_PER_REG	64
42 #define VEC_COUNT		(VEC_REG_COUNT * VEC_COUNT_PER_REG)
43 #define VEC_REG_IDX(irq_id)	((irq_id) / VEC_COUNT_PER_REG)
44 #define VEC_REG_BIT(irq_id)     ((irq_id) % VEC_COUNT_PER_REG)
45 #define EIOINTC_ALL_ENABLE	0xffffffff
46 #define EIOINTC_ALL_ENABLE_VEC_MASK(vector)	(EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1f))
47 #define EIOINTC_REG_ENABLE_VEC(vector)		(EIOINTC_REG_ENABLE + ((vector >> 5) << 2))
48 #define EIOINTC_USE_CPU_ENCODE			BIT(0)
49 #define EIOINTC_ROUTE_MULT_IP			BIT(1)
50 
51 #define MAX_EIO_NODES		(NR_CPUS / CORES_PER_EIO_NODE)
52 
53 /*
54  * Routing registers are 32bit, and there is 8-bit route setting for every
55  * interrupt vector. So one Route register contains four vectors routing
56  * information.
57  */
58 #define EIOINTC_REG_ROUTE_VEC(vector)		(EIOINTC_REG_ROUTE + (vector & ~0x03))
59 #define EIOINTC_REG_ROUTE_VEC_SHIFT(vector)	((vector & 0x03) << 3)
60 #define EIOINTC_REG_ROUTE_VEC_MASK(vector)	(0xff << EIOINTC_REG_ROUTE_VEC_SHIFT(vector))
61 
62 static int nr_pics;
63 struct eiointc_priv;
64 
65 struct eiointc_ip_route {
66 	struct eiointc_priv	*priv;
67 	/* Offset Routed destination IP */
68 	int			start;
69 	int			end;
70 };
71 
72 struct eiointc_priv {
73 	u32			node;
74 	u32			vec_count;
75 	nodemask_t		node_map;
76 	cpumask_t		cpuspan_map;
77 	struct fwnode_handle	*domain_handle;
78 	struct irq_domain	*eiointc_domain;
79 	int			flags;
80 	irq_hw_number_t		parent_hwirq;
81 	struct eiointc_ip_route	route_info[VEC_REG_COUNT];
82 };
83 
84 static struct eiointc_priv *eiointc_priv[MAX_IO_PICS];
85 
eiointc_enable(void)86 static void eiointc_enable(void)
87 {
88 	uint64_t misc;
89 
90 	misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
91 	misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
92 	iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
93 }
94 
cpu_to_eio_node(int cpu)95 static int cpu_to_eio_node(int cpu)
96 {
97 	if (!kvm_para_has_feature(KVM_FEATURE_VIRT_EXTIOI))
98 		return cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
99 	else
100 		return cpu_logical_map(cpu) / CORES_PER_VEIO_NODE;
101 }
102 
103 #ifdef CONFIG_SMP
eiointc_set_irq_route(int pos,unsigned int cpu,unsigned int mnode,nodemask_t * node_map)104 static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map)
105 {
106 	int i, node, cpu_node, route_node;
107 	unsigned char coremap;
108 	uint32_t pos_off, data, data_byte, data_mask;
109 
110 	pos_off = pos & ~3;
111 	data_byte = pos & 3;
112 	data_mask = ~BIT_MASK(data_byte) & 0xf;
113 
114 	/* Calculate node and coremap of target irq */
115 	cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
116 	coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE);
117 
118 	for_each_online_cpu(i) {
119 		node = cpu_to_eio_node(i);
120 		if (!node_isset(node, *node_map))
121 			continue;
122 
123 		/* EIO node 0 is in charge of inter-node interrupt dispatch */
124 		route_node = (node == mnode) ? cpu_node : node;
125 		data = ((coremap | (route_node << 4)) << (data_byte * 8));
126 		csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE);
127 	}
128 }
129 
veiointc_set_irq_route(unsigned int vector,unsigned int cpu)130 static void veiointc_set_irq_route(unsigned int vector, unsigned int cpu)
131 {
132 	unsigned long reg = EIOINTC_REG_ROUTE_VEC(vector);
133 	unsigned int data;
134 
135 	data = iocsr_read32(reg);
136 	data &= ~EIOINTC_REG_ROUTE_VEC_MASK(vector);
137 	data |= cpu_logical_map(cpu) << EIOINTC_REG_ROUTE_VEC_SHIFT(vector);
138 	iocsr_write32(data, reg);
139 }
140 
141 static DEFINE_RAW_SPINLOCK(affinity_lock);
142 
eiointc_set_irq_affinity(struct irq_data * d,const struct cpumask * affinity,bool force)143 static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force)
144 {
145 	unsigned int cpu;
146 	unsigned long flags;
147 	uint32_t vector, regaddr;
148 	struct eiointc_priv *priv = d->domain->host_data;
149 
150 	raw_spin_lock_irqsave(&affinity_lock, flags);
151 
152 	cpu = cpumask_first_and_and(&priv->cpuspan_map, affinity, cpu_online_mask);
153 	if (cpu >= nr_cpu_ids) {
154 		raw_spin_unlock_irqrestore(&affinity_lock, flags);
155 		return -EINVAL;
156 	}
157 
158 	vector = d->hwirq;
159 	regaddr = EIOINTC_REG_ENABLE_VEC(vector);
160 
161 	if (priv->flags & EIOINTC_USE_CPU_ENCODE) {
162 		iocsr_write32(EIOINTC_ALL_ENABLE_VEC_MASK(vector), regaddr);
163 		veiointc_set_irq_route(vector, cpu);
164 		iocsr_write32(EIOINTC_ALL_ENABLE, regaddr);
165 	} else {
166 		/* Mask target vector */
167 		csr_any_send(regaddr, EIOINTC_ALL_ENABLE_VEC_MASK(vector),
168 			     0x0, priv->node * CORES_PER_EIO_NODE);
169 
170 		/* Set route for target vector */
171 		eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
172 
173 		/* Unmask target vector */
174 		csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
175 			     0x0, priv->node * CORES_PER_EIO_NODE);
176 	}
177 
178 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
179 
180 	raw_spin_unlock_irqrestore(&affinity_lock, flags);
181 
182 	return IRQ_SET_MASK_OK;
183 }
184 #endif
185 
eiointc_index(int node)186 static int eiointc_index(int node)
187 {
188 	int i;
189 
190 	for (i = 0; i < nr_pics; i++) {
191 		if (node_isset(node, eiointc_priv[i]->node_map))
192 			return i;
193 	}
194 
195 	return -1;
196 }
197 
eiointc_router_init(unsigned int cpu)198 static int eiointc_router_init(unsigned int cpu)
199 {
200 	int i, bit, cores, index, node;
201 	unsigned int data;
202 	int hwirq, mask;
203 
204 	node = cpu_to_eio_node(cpu);
205 	index = eiointc_index(node);
206 
207 	if (index < 0) {
208 		pr_err("Error: invalid nodemap!\n");
209 		return -EINVAL;
210 	}
211 
212 	/* Enable cpu interrupt pin from eiointc */
213 	hwirq = eiointc_priv[index]->parent_hwirq;
214 	mask = BIT(hwirq);
215 	if (eiointc_priv[index]->flags & EIOINTC_ROUTE_MULT_IP)
216 		mask |= BIT(hwirq + 1) | BIT(hwirq + 2) | BIT(hwirq + 3);
217 	set_csr_ecfg(mask);
218 
219 	if (!(eiointc_priv[index]->flags & EIOINTC_USE_CPU_ENCODE))
220 		cores = CORES_PER_EIO_NODE;
221 	else
222 		cores = CORES_PER_VEIO_NODE;
223 
224 	if ((cpu_logical_map(cpu) % cores) == 0) {
225 		eiointc_enable();
226 
227 		for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) {
228 			data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2)));
229 			iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4);
230 		}
231 
232 		for (i = 0; i < eiointc_priv[0]->vec_count / 32 / 4; i++) {
233 			/*
234 			 * Route to interrupt pin, relative offset used here
235 			 * Offset 0 means routing to IP0 and so on
236 			 *
237 			 * If flags is set with EIOINTC_ROUTE_MULT_IP,
238 			 * every 64 vector routes to different consecutive
239 			 * IPs, otherwise all vector routes to the same IP
240 			 */
241 			if (eiointc_priv[index]->flags & EIOINTC_ROUTE_MULT_IP) {
242 				/* The first 64 vectors route to hwirq */
243 				bit = BIT(hwirq++ - INT_HWI0);
244 				data = bit | (bit << 8);
245 
246 				/* The second 64 vectors route to hwirq + 1 */
247 				bit = BIT(hwirq++ - INT_HWI0);
248 				data |= (bit << 16) | (bit << 24);
249 
250 				/*
251 				 * Route to hwirq + 2/hwirq + 3 separately
252 				 * in next loop
253 				 */
254 			} else  {
255 				bit = BIT(hwirq - INT_HWI0);
256 				data = bit | (bit << 8) | (bit << 16) | (bit << 24);
257 			}
258 			iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4);
259 		}
260 
261 		for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) {
262 			/* Route to Node-0 Core-0 */
263 			if (eiointc_priv[index]->flags & EIOINTC_USE_CPU_ENCODE)
264 				bit = cpu_logical_map(0);
265 			else if (index == 0)
266 				bit = BIT(cpu_logical_map(0));
267 			else
268 				bit = (eiointc_priv[index]->node << 4) | 1;
269 
270 			data = bit | (bit << 8) | (bit << 16) | (bit << 24);
271 			iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4);
272 		}
273 
274 		for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) {
275 			data = 0xffffffff;
276 			iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4);
277 			iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4);
278 		}
279 	}
280 
281 	return 0;
282 }
283 
eiointc_irq_dispatch(struct irq_desc * desc)284 static void eiointc_irq_dispatch(struct irq_desc *desc)
285 {
286 	struct eiointc_ip_route *info = irq_desc_get_handler_data(desc);
287 	struct irq_chip *chip = irq_desc_get_chip(desc);
288 	bool handled = false;
289 	u64 pending;
290 	int i;
291 
292 	chained_irq_enter(chip, desc);
293 
294 	/*
295 	 * If EIOINTC_ROUTE_MULT_IP is set, every 64 interrupt vectors in
296 	 * eiointc interrupt controller routes to different cpu interrupt pins
297 	 *
298 	 * Every cpu interrupt pin has its own irq handler, it is ok to
299 	 * read ISR for these 64 interrupt vectors rather than all vectors
300 	 */
301 	for (i = info->start; i < info->end; i++) {
302 		pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
303 
304 		/* Skip handling if pending bitmap is zero */
305 		if (!pending)
306 			continue;
307 
308 		/* Clear the IRQs */
309 		iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
310 		while (pending) {
311 			int bit = __ffs(pending);
312 			int irq = bit + VEC_COUNT_PER_REG * i;
313 
314 			generic_handle_domain_irq(info->priv->eiointc_domain, irq);
315 			pending &= ~BIT(bit);
316 			handled = true;
317 		}
318 	}
319 
320 	if (!handled)
321 		spurious_interrupt();
322 
323 	chained_irq_exit(chip, desc);
324 }
325 
eiointc_ack_irq(struct irq_data * d)326 static void eiointc_ack_irq(struct irq_data *d)
327 {
328 }
329 
eiointc_mask_irq(struct irq_data * d)330 static void eiointc_mask_irq(struct irq_data *d)
331 {
332 }
333 
eiointc_unmask_irq(struct irq_data * d)334 static void eiointc_unmask_irq(struct irq_data *d)
335 {
336 }
337 
338 static struct irq_chip eiointc_irq_chip = {
339 	.name			= "EIOINTC",
340 	.irq_ack		= eiointc_ack_irq,
341 	.irq_mask		= eiointc_mask_irq,
342 	.irq_unmask		= eiointc_unmask_irq,
343 #ifdef CONFIG_SMP
344 	.irq_set_affinity	= eiointc_set_irq_affinity,
345 #endif
346 };
347 
eiointc_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)348 static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
349 				unsigned int nr_irqs, void *arg)
350 {
351 	int ret;
352 	unsigned int i, type;
353 	unsigned long hwirq = 0;
354 	struct eiointc_priv *priv = domain->host_data;
355 
356 	ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
357 	if (ret)
358 		return ret;
359 
360 	for (i = 0; i < nr_irqs; i++) {
361 		irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip,
362 					priv, handle_edge_irq, NULL, NULL);
363 	}
364 
365 	return 0;
366 }
367 
eiointc_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)368 static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq,
369 				unsigned int nr_irqs)
370 {
371 	int i;
372 
373 	for (i = 0; i < nr_irqs; i++) {
374 		struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
375 
376 		irq_set_handler(virq + i, NULL);
377 		irq_domain_reset_irq_data(d);
378 	}
379 }
380 
381 static const struct irq_domain_ops eiointc_domain_ops = {
382 	.translate	= irq_domain_translate_onecell,
383 	.alloc		= eiointc_domain_alloc,
384 	.free		= eiointc_domain_free,
385 };
386 
acpi_set_vec_parent(int node,struct irq_domain * parent,struct acpi_vector_group * vec_group)387 static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group)
388 {
389 	int i;
390 
391 	for (i = 0; i < MAX_IO_PICS; i++) {
392 		if (node == vec_group[i].node) {
393 			vec_group[i].parent = parent;
394 			return;
395 		}
396 	}
397 }
398 
acpi_get_vec_parent(int node,struct acpi_vector_group * vec_group)399 static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
400 {
401 	int i;
402 
403 	for (i = 0; i < MAX_IO_PICS; i++) {
404 		if (node == vec_group[i].node)
405 			return vec_group[i].parent;
406 	}
407 	return NULL;
408 }
409 
eiointc_suspend(void)410 static int eiointc_suspend(void)
411 {
412 	return 0;
413 }
414 
eiointc_resume(void)415 static void eiointc_resume(void)
416 {
417 	eiointc_router_init(0);
418 }
419 
420 static struct syscore_ops eiointc_syscore_ops = {
421 	.suspend = eiointc_suspend,
422 	.resume = eiointc_resume,
423 };
424 
pch_pic_parse_madt(union acpi_subtable_headers * header,const unsigned long end)425 static int __init pch_pic_parse_madt(union acpi_subtable_headers *header,
426 					const unsigned long end)
427 {
428 	struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header;
429 	unsigned int node = (pchpic_entry->address >> 44) & 0xf;
430 	struct irq_domain *parent = acpi_get_vec_parent(node, pch_group);
431 
432 	if (parent)
433 		return pch_pic_acpi_init(parent, pchpic_entry);
434 
435 	return 0;
436 }
437 
pch_msi_parse_madt(union acpi_subtable_headers * header,const unsigned long end)438 static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
439 					const unsigned long end)
440 {
441 	struct irq_domain *parent;
442 	struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
443 	int node;
444 
445 	if (cpu_has_flatmode)
446 		node = early_cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
447 	else
448 		node = eiointc_priv[nr_pics - 1]->node;
449 
450 	parent = acpi_get_vec_parent(node, msi_group);
451 
452 	if (parent)
453 		return pch_msi_acpi_init(parent, pchmsi_entry);
454 
455 	return 0;
456 }
457 
acpi_cascade_irqdomain_init(void)458 static int __init acpi_cascade_irqdomain_init(void)
459 {
460 	int r;
461 
462 	r = acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, pch_pic_parse_madt, 0);
463 	if (r < 0)
464 		return r;
465 
466 	if (cpu_has_avecint)
467 		return 0;
468 
469 	r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
470 	if (r < 0)
471 		return r;
472 
473 	return 0;
474 }
475 
eiointc_init(struct eiointc_priv * priv,int parent_irq,u64 node_map)476 static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq,
477 			       u64 node_map)
478 {
479 	int i, val;
480 
481 	node_map = node_map ? node_map : -1ULL;
482 	for_each_possible_cpu(i) {
483 		if (node_map & (1ULL << (cpu_to_eio_node(i)))) {
484 			node_set(cpu_to_eio_node(i), priv->node_map);
485 			cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map,
486 				   cpumask_of(i));
487 		}
488 	}
489 
490 	priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle,
491 							priv->vec_count,
492 							&eiointc_domain_ops,
493 							priv);
494 	if (!priv->eiointc_domain) {
495 		pr_err("loongson-extioi: cannot add IRQ domain\n");
496 		return -ENOMEM;
497 	}
498 
499 	if (kvm_para_has_feature(KVM_FEATURE_VIRT_EXTIOI)) {
500 		val = iocsr_read32(EXTIOI_VIRT_FEATURES);
501 		/*
502 		 * With EXTIOI_ENABLE_CPU_ENCODE set
503 		 * interrupts can route to 256 vCPUs.
504 		 */
505 		if (val & EXTIOI_HAS_CPU_ENCODE) {
506 			val = iocsr_read32(EXTIOI_VIRT_CONFIG);
507 			val |= EXTIOI_ENABLE_CPU_ENCODE;
508 			iocsr_write32(val, EXTIOI_VIRT_CONFIG);
509 			priv->flags = EIOINTC_USE_CPU_ENCODE;
510 		}
511 	}
512 
513 	eiointc_priv[nr_pics++] = priv;
514 	/*
515 	 * Only the first eiointc device on VM supports routing to
516 	 * different CPU interrupt pins. The later eiointc devices use
517 	 * generic method if there are multiple eiointc devices in future
518 	 */
519 	if (cpu_has_hypervisor && (nr_pics == 1)) {
520 		priv->flags |= EIOINTC_ROUTE_MULT_IP;
521 		priv->parent_hwirq = INT_HWI0;
522 	}
523 
524 	if (priv->flags & EIOINTC_ROUTE_MULT_IP) {
525 		for (i = 0; i < priv->vec_count / VEC_COUNT_PER_REG; i++) {
526 			priv->route_info[i].start  = priv->parent_hwirq - INT_HWI0 + i;
527 			priv->route_info[i].end    = priv->route_info[i].start + 1;
528 			priv->route_info[i].priv   = priv;
529 			parent_irq = get_percpu_irq(priv->parent_hwirq + i);
530 			irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch,
531 							 &priv->route_info[i]);
532 		}
533 	} else {
534 		priv->route_info[0].start  = 0;
535 		priv->route_info[0].end    = priv->vec_count / VEC_COUNT_PER_REG;
536 		priv->route_info[0].priv   = priv;
537 		irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch,
538 						 &priv->route_info[0]);
539 	}
540 	eiointc_router_init(0);
541 
542 	if (nr_pics == 1) {
543 		register_syscore_ops(&eiointc_syscore_ops);
544 		cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_EIOINTC_STARTING,
545 					  "irqchip/loongarch/eiointc:starting",
546 					  eiointc_router_init, NULL);
547 	}
548 
549 	return 0;
550 }
551 
eiointc_acpi_init(struct irq_domain * parent,struct acpi_madt_eio_pic * acpi_eiointc)552 int __init eiointc_acpi_init(struct irq_domain *parent,
553 				     struct acpi_madt_eio_pic *acpi_eiointc)
554 {
555 	int parent_irq, ret;
556 	struct eiointc_priv *priv;
557 	int node;
558 
559 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
560 	if (!priv)
561 		return -ENOMEM;
562 
563 	priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC",
564 							       acpi_eiointc->node);
565 	if (!priv->domain_handle) {
566 		pr_err("Unable to allocate domain handle\n");
567 		goto out_free_priv;
568 	}
569 
570 	priv->vec_count = VEC_COUNT;
571 	priv->node = acpi_eiointc->node;
572 	priv->parent_hwirq = acpi_eiointc->cascade;
573 	parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
574 
575 	ret = eiointc_init(priv, parent_irq, acpi_eiointc->node_map);
576 	if (ret < 0)
577 		goto out_free_handle;
578 
579 	if (cpu_has_flatmode)
580 		node = early_cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
581 	else
582 		node = acpi_eiointc->node;
583 	acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
584 	acpi_set_vec_parent(node, priv->eiointc_domain, msi_group);
585 
586 	ret = acpi_cascade_irqdomain_init();
587 	if (ret < 0)
588 		goto out_free_handle;
589 
590 	return ret;
591 
592 out_free_handle:
593 	irq_domain_free_fwnode(priv->domain_handle);
594 	priv->domain_handle = NULL;
595 out_free_priv:
596 	kfree(priv);
597 
598 	return -ENOMEM;
599 }
600 
eiointc_of_init(struct device_node * of_node,struct device_node * parent)601 static int __init eiointc_of_init(struct device_node *of_node,
602 				  struct device_node *parent)
603 {
604 	struct eiointc_priv *priv;
605 	struct irq_data *irq_data;
606 	int parent_irq, ret;
607 
608 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
609 	if (!priv)
610 		return -ENOMEM;
611 
612 	parent_irq = irq_of_parse_and_map(of_node, 0);
613 	if (parent_irq <= 0) {
614 		ret = -ENODEV;
615 		goto out_free_priv;
616 	}
617 
618 	ret = irq_set_handler_data(parent_irq, priv);
619 	if (ret < 0)
620 		goto out_free_priv;
621 
622 	irq_data = irq_get_irq_data(parent_irq);
623 	if (!irq_data) {
624 		ret = -ENODEV;
625 		goto out_free_priv;
626 	}
627 
628 	/*
629 	 * In particular, the number of devices supported by the LS2K0500
630 	 * extended I/O interrupt vector is 128.
631 	 */
632 	if (of_device_is_compatible(of_node, "loongson,ls2k0500-eiointc"))
633 		priv->vec_count = 128;
634 	else
635 		priv->vec_count = VEC_COUNT;
636 	priv->parent_hwirq = irqd_to_hwirq(irq_data);
637 	priv->node = 0;
638 	priv->domain_handle = of_fwnode_handle(of_node);
639 
640 	ret = eiointc_init(priv, parent_irq, 0);
641 	if (ret < 0)
642 		goto out_free_priv;
643 
644 	return 0;
645 
646 out_free_priv:
647 	kfree(priv);
648 	return ret;
649 }
650 
651 IRQCHIP_DECLARE(loongson_ls2k0500_eiointc, "loongson,ls2k0500-eiointc", eiointc_of_init);
652 IRQCHIP_DECLARE(loongson_ls2k2000_eiointc, "loongson,ls2k2000-eiointc", eiointc_of_init);
653