xref: /linux/kernel/irq/irqdomain.c (revision 46e6acfe3501fa938af9c5bd730f0020235b08a2)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #define pr_fmt(fmt)  "irq: " fmt
4 
5 #include <linux/acpi.h>
6 #include <linux/debugfs.h>
7 #include <linux/hardirq.h>
8 #include <linux/interrupt.h>
9 #include <linux/irq.h>
10 #include <linux/irqdesc.h>
11 #include <linux/irqdomain.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_irq.h>
17 #include <linux/topology.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/fs.h>
22 
23 static LIST_HEAD(irq_domain_list);
24 static DEFINE_MUTEX(irq_domain_mutex);
25 
26 static struct irq_domain *irq_default_domain;
27 
28 static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
29 					unsigned int nr_irqs, int node, void *arg,
30 					bool realloc, const struct irq_affinity_desc *affinity);
31 static void irq_domain_check_hierarchy(struct irq_domain *domain);
32 static void irq_domain_free_one_irq(struct irq_domain *domain, unsigned int virq);
33 
34 struct irqchip_fwid {
35 	struct fwnode_handle	fwnode;
36 	unsigned int		type;
37 	char			*name;
38 	phys_addr_t		*pa;
39 };
40 
41 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
42 static void debugfs_add_domain_dir(struct irq_domain *d);
43 static void debugfs_remove_domain_dir(struct irq_domain *d);
44 #else
45 static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
46 static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
47 #endif
48 
49 static const char *irqchip_fwnode_get_name(const struct fwnode_handle *fwnode)
50 {
51 	struct irqchip_fwid *fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
52 
53 	return fwid->name;
54 }
55 
56 const struct fwnode_operations irqchip_fwnode_ops = {
57 	.get_name = irqchip_fwnode_get_name,
58 };
59 EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
60 
61 /**
62  * __irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
63  *                           identifying an irq domain
64  * @type:	Type of irqchip_fwnode. See linux/irqdomain.h
65  * @id:		Optional user provided id if name != NULL
66  * @name:	Optional user provided domain name
67  * @pa:		Optional user-provided physical address
68  *
69  * Allocate a struct irqchip_fwid, and return a pointer to the embedded
70  * fwnode_handle (or NULL on failure).
71  *
72  * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are
73  * solely to transport name information to irqdomain creation code. The
74  * node is not stored. For other types the pointer is kept in the irq
75  * domain struct.
76  */
77 struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
78 						const char *name,
79 						phys_addr_t *pa)
80 {
81 	struct irqchip_fwid *fwid;
82 	char *n;
83 
84 	fwid = kzalloc(sizeof(*fwid), GFP_KERNEL);
85 
86 	switch (type) {
87 	case IRQCHIP_FWNODE_NAMED:
88 		n = kasprintf(GFP_KERNEL, "%s", name);
89 		break;
90 	case IRQCHIP_FWNODE_NAMED_ID:
91 		n = kasprintf(GFP_KERNEL, "%s-%d", name, id);
92 		break;
93 	default:
94 		n = kasprintf(GFP_KERNEL, "irqchip@%pa", pa);
95 		break;
96 	}
97 
98 	if (!fwid || !n) {
99 		kfree(fwid);
100 		kfree(n);
101 		return NULL;
102 	}
103 
104 	fwid->type = type;
105 	fwid->name = n;
106 	fwid->pa = pa;
107 	fwnode_init(&fwid->fwnode, &irqchip_fwnode_ops);
108 	return &fwid->fwnode;
109 }
110 EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
111 
112 /**
113  * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle
114  * @fwnode: fwnode_handle to free
115  *
116  * Free a fwnode_handle allocated with irq_domain_alloc_fwnode.
117  */
118 void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
119 {
120 	struct irqchip_fwid *fwid;
121 
122 	if (!fwnode || WARN_ON(!is_fwnode_irqchip(fwnode)))
123 		return;
124 
125 	fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
126 	kfree(fwid->name);
127 	kfree(fwid);
128 }
129 EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
130 
131 static int irq_domain_set_name(struct irq_domain *domain,
132 			       const struct fwnode_handle *fwnode,
133 			       enum irq_domain_bus_token bus_token)
134 {
135 	static atomic_t unknown_domains;
136 	struct irqchip_fwid *fwid;
137 
138 	if (is_fwnode_irqchip(fwnode)) {
139 		fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
140 
141 		switch (fwid->type) {
142 		case IRQCHIP_FWNODE_NAMED:
143 		case IRQCHIP_FWNODE_NAMED_ID:
144 			domain->name = bus_token ?
145 					kasprintf(GFP_KERNEL, "%s-%d",
146 						  fwid->name, bus_token) :
147 					kstrdup(fwid->name, GFP_KERNEL);
148 			if (!domain->name)
149 				return -ENOMEM;
150 			domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
151 			break;
152 		default:
153 			domain->name = fwid->name;
154 			if (bus_token) {
155 				domain->name = kasprintf(GFP_KERNEL, "%s-%d",
156 							 fwid->name, bus_token);
157 				if (!domain->name)
158 					return -ENOMEM;
159 				domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
160 			}
161 			break;
162 		}
163 	} else if (is_of_node(fwnode) || is_acpi_device_node(fwnode) ||
164 		   is_software_node(fwnode)) {
165 		char *name;
166 
167 		/*
168 		 * fwnode paths contain '/', which debugfs is legitimately
169 		 * unhappy about. Replace them with ':', which does
170 		 * the trick and is not as offensive as '\'...
171 		 */
172 		name = bus_token ?
173 			kasprintf(GFP_KERNEL, "%pfw-%d", fwnode, bus_token) :
174 			kasprintf(GFP_KERNEL, "%pfw", fwnode);
175 		if (!name)
176 			return -ENOMEM;
177 
178 		domain->name = strreplace(name, '/', ':');
179 		domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
180 	}
181 
182 	if (!domain->name) {
183 		if (fwnode)
184 			pr_err("Invalid fwnode type for irqdomain\n");
185 		domain->name = bus_token ?
186 				kasprintf(GFP_KERNEL, "unknown-%d-%d",
187 					  atomic_inc_return(&unknown_domains),
188 					  bus_token) :
189 				kasprintf(GFP_KERNEL, "unknown-%d",
190 					  atomic_inc_return(&unknown_domains));
191 		if (!domain->name)
192 			return -ENOMEM;
193 		domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
194 	}
195 
196 	return 0;
197 }
198 
199 static struct irq_domain *__irq_domain_create(const struct irq_domain_info *info)
200 {
201 	struct irq_domain *domain;
202 	int err;
203 
204 	if (WARN_ON((info->size && info->direct_max) ||
205 		    (!IS_ENABLED(CONFIG_IRQ_DOMAIN_NOMAP) && info->direct_max) ||
206 		    (info->direct_max && info->direct_max != info->hwirq_max)))
207 		return ERR_PTR(-EINVAL);
208 
209 	domain = kzalloc_node(struct_size(domain, revmap, info->size),
210 			      GFP_KERNEL, of_node_to_nid(to_of_node(info->fwnode)));
211 	if (!domain)
212 		return ERR_PTR(-ENOMEM);
213 
214 	err = irq_domain_set_name(domain, info->fwnode, info->bus_token);
215 	if (err) {
216 		kfree(domain);
217 		return ERR_PTR(err);
218 	}
219 
220 	domain->fwnode = fwnode_handle_get(info->fwnode);
221 	fwnode_dev_initialized(domain->fwnode, true);
222 
223 	/* Fill structure */
224 	INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
225 	domain->ops = info->ops;
226 	domain->host_data = info->host_data;
227 	domain->bus_token = info->bus_token;
228 	domain->hwirq_max = info->hwirq_max;
229 
230 	if (info->direct_max)
231 		domain->flags |= IRQ_DOMAIN_FLAG_NO_MAP;
232 
233 	domain->revmap_size = info->size;
234 
235 	/*
236 	 * Hierarchical domains use the domain lock of the root domain
237 	 * (innermost domain).
238 	 *
239 	 * For non-hierarchical domains (as for root domains), the root
240 	 * pointer is set to the domain itself so that &domain->root->mutex
241 	 * always points to the right lock.
242 	 */
243 	mutex_init(&domain->mutex);
244 	domain->root = domain;
245 
246 	irq_domain_check_hierarchy(domain);
247 
248 	return domain;
249 }
250 
251 static void __irq_domain_publish(struct irq_domain *domain)
252 {
253 	mutex_lock(&irq_domain_mutex);
254 	debugfs_add_domain_dir(domain);
255 	list_add(&domain->link, &irq_domain_list);
256 	mutex_unlock(&irq_domain_mutex);
257 
258 	pr_debug("Added domain %s\n", domain->name);
259 }
260 
261 static void irq_domain_free(struct irq_domain *domain)
262 {
263 	fwnode_dev_initialized(domain->fwnode, false);
264 	fwnode_handle_put(domain->fwnode);
265 	if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
266 		kfree(domain->name);
267 	kfree(domain);
268 }
269 
270 /**
271  * irq_domain_instantiate() - Instantiate a new irq domain data structure
272  * @info: Domain information pointer pointing to the information for this domain
273  *
274  * Return: A pointer to the instantiated irq domain or an ERR_PTR value.
275  */
276 struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info)
277 {
278 	struct irq_domain *domain;
279 	int err;
280 
281 	domain = __irq_domain_create(info);
282 	if (IS_ERR(domain))
283 		return domain;
284 
285 	domain->flags |= info->domain_flags;
286 	domain->exit = info->exit;
287 
288 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
289 	if (info->parent) {
290 		domain->root = info->parent->root;
291 		domain->parent = info->parent;
292 	}
293 #endif
294 
295 	if (info->dgc_info) {
296 		err = irq_domain_alloc_generic_chips(domain, info->dgc_info);
297 		if (err)
298 			goto err_domain_free;
299 	}
300 
301 	if (info->init) {
302 		err = info->init(domain);
303 		if (err)
304 			goto err_domain_gc_remove;
305 	}
306 
307 	__irq_domain_publish(domain);
308 
309 	return domain;
310 
311 err_domain_gc_remove:
312 	if (info->dgc_info)
313 		irq_domain_remove_generic_chips(domain);
314 err_domain_free:
315 	irq_domain_free(domain);
316 	return ERR_PTR(err);
317 }
318 EXPORT_SYMBOL_GPL(irq_domain_instantiate);
319 
320 /**
321  * irq_domain_remove() - Remove an irq domain.
322  * @domain: domain to remove
323  *
324  * This routine is used to remove an irq domain. The caller must ensure
325  * that all mappings within the domain have been disposed of prior to
326  * use, depending on the revmap type.
327  */
328 void irq_domain_remove(struct irq_domain *domain)
329 {
330 	if (domain->exit)
331 		domain->exit(domain);
332 
333 	mutex_lock(&irq_domain_mutex);
334 	debugfs_remove_domain_dir(domain);
335 
336 	WARN_ON(!radix_tree_empty(&domain->revmap_tree));
337 
338 	list_del(&domain->link);
339 
340 	/*
341 	 * If the going away domain is the default one, reset it.
342 	 */
343 	if (unlikely(irq_default_domain == domain))
344 		irq_set_default_host(NULL);
345 
346 	mutex_unlock(&irq_domain_mutex);
347 
348 	if (domain->flags & IRQ_DOMAIN_FLAG_DESTROY_GC)
349 		irq_domain_remove_generic_chips(domain);
350 
351 	pr_debug("Removed domain %s\n", domain->name);
352 	irq_domain_free(domain);
353 }
354 EXPORT_SYMBOL_GPL(irq_domain_remove);
355 
356 void irq_domain_update_bus_token(struct irq_domain *domain,
357 				 enum irq_domain_bus_token bus_token)
358 {
359 	char *name;
360 
361 	if (domain->bus_token == bus_token)
362 		return;
363 
364 	mutex_lock(&irq_domain_mutex);
365 
366 	domain->bus_token = bus_token;
367 
368 	name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token);
369 	if (!name) {
370 		mutex_unlock(&irq_domain_mutex);
371 		return;
372 	}
373 
374 	debugfs_remove_domain_dir(domain);
375 
376 	if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
377 		kfree(domain->name);
378 	else
379 		domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
380 
381 	domain->name = name;
382 	debugfs_add_domain_dir(domain);
383 
384 	mutex_unlock(&irq_domain_mutex);
385 }
386 EXPORT_SYMBOL_GPL(irq_domain_update_bus_token);
387 
388 /**
389  * irq_domain_create_simple() - Register an irq_domain and optionally map a range of irqs
390  * @fwnode: firmware node for the interrupt controller
391  * @size: total number of irqs in mapping
392  * @first_irq: first number of irq block assigned to the domain,
393  *	pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
394  *	pre-map all of the irqs in the domain to virqs starting at first_irq.
395  * @ops: domain callbacks
396  * @host_data: Controller private data pointer
397  *
398  * Allocates an irq_domain, and optionally if first_irq is positive then also
399  * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
400  *
401  * This is intended to implement the expected behaviour for most
402  * interrupt controllers. If device tree is used, then first_irq will be 0 and
403  * irqs get mapped dynamically on the fly. However, if the controller requires
404  * static virq assignments (non-DT boot) then it will set that up correctly.
405  */
406 struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode,
407 					    unsigned int size,
408 					    unsigned int first_irq,
409 					    const struct irq_domain_ops *ops,
410 					    void *host_data)
411 {
412 	struct irq_domain_info info = {
413 		.fwnode		= fwnode,
414 		.size		= size,
415 		.hwirq_max	= size,
416 		.ops		= ops,
417 		.host_data	= host_data,
418 	};
419 	struct irq_domain *domain;
420 
421 	domain = irq_domain_instantiate(&info);
422 	if (IS_ERR(domain))
423 		return NULL;
424 
425 	if (first_irq > 0) {
426 		if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
427 			/* attempt to allocated irq_descs */
428 			int rc = irq_alloc_descs(first_irq, first_irq, size,
429 						 of_node_to_nid(to_of_node(fwnode)));
430 			if (rc < 0)
431 				pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
432 					first_irq);
433 		}
434 		irq_domain_associate_many(domain, first_irq, 0, size);
435 	}
436 
437 	return domain;
438 }
439 EXPORT_SYMBOL_GPL(irq_domain_create_simple);
440 
441 /**
442  * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
443  * @of_node: pointer to interrupt controller's device tree node.
444  * @size: total number of irqs in legacy mapping
445  * @first_irq: first number of irq block assigned to the domain
446  * @first_hwirq: first hwirq number to use for the translation. Should normally
447  *               be '0', but a positive integer can be used if the effective
448  *               hwirqs numbering does not begin at zero.
449  * @ops: map/unmap domain callbacks
450  * @host_data: Controller private data pointer
451  *
452  * Note: the map() callback will be called before this function returns
453  * for all legacy interrupts except 0 (which is always the invalid irq for
454  * a legacy controller).
455  */
456 struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
457 					 unsigned int size,
458 					 unsigned int first_irq,
459 					 irq_hw_number_t first_hwirq,
460 					 const struct irq_domain_ops *ops,
461 					 void *host_data)
462 {
463 	return irq_domain_create_legacy(of_node_to_fwnode(of_node), size,
464 					first_irq, first_hwirq, ops, host_data);
465 }
466 EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
467 
468 struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode,
469 					 unsigned int size,
470 					 unsigned int first_irq,
471 					 irq_hw_number_t first_hwirq,
472 					 const struct irq_domain_ops *ops,
473 					 void *host_data)
474 {
475 	struct irq_domain_info info = {
476 		.fwnode		= fwnode,
477 		.size		= first_hwirq + size,
478 		.hwirq_max	= first_hwirq + size,
479 		.ops		= ops,
480 		.host_data	= host_data,
481 	};
482 	struct irq_domain *domain;
483 
484 	domain = irq_domain_instantiate(&info);
485 	if (IS_ERR(domain))
486 		return NULL;
487 
488 	irq_domain_associate_many(domain, first_irq, first_hwirq, size);
489 
490 	return domain;
491 }
492 EXPORT_SYMBOL_GPL(irq_domain_create_legacy);
493 
494 /**
495  * irq_find_matching_fwspec() - Locates a domain for a given fwspec
496  * @fwspec: FW specifier for an interrupt
497  * @bus_token: domain-specific data
498  */
499 struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
500 					    enum irq_domain_bus_token bus_token)
501 {
502 	struct irq_domain *h, *found = NULL;
503 	struct fwnode_handle *fwnode = fwspec->fwnode;
504 	int rc;
505 
506 	/*
507 	 * We might want to match the legacy controller last since
508 	 * it might potentially be set to match all interrupts in
509 	 * the absence of a device node. This isn't a problem so far
510 	 * yet though...
511 	 *
512 	 * bus_token == DOMAIN_BUS_ANY matches any domain, any other
513 	 * values must generate an exact match for the domain to be
514 	 * selected.
515 	 */
516 	mutex_lock(&irq_domain_mutex);
517 	list_for_each_entry(h, &irq_domain_list, link) {
518 		if (h->ops->select && bus_token != DOMAIN_BUS_ANY)
519 			rc = h->ops->select(h, fwspec, bus_token);
520 		else if (h->ops->match)
521 			rc = h->ops->match(h, to_of_node(fwnode), bus_token);
522 		else
523 			rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
524 			      ((bus_token == DOMAIN_BUS_ANY) ||
525 			       (h->bus_token == bus_token)));
526 
527 		if (rc) {
528 			found = h;
529 			break;
530 		}
531 	}
532 	mutex_unlock(&irq_domain_mutex);
533 	return found;
534 }
535 EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
536 
537 /**
538  * irq_set_default_host() - Set a "default" irq domain
539  * @domain: default domain pointer
540  *
541  * For convenience, it's possible to set a "default" domain that will be used
542  * whenever NULL is passed to irq_create_mapping(). It makes life easier for
543  * platforms that want to manipulate a few hard coded interrupt numbers that
544  * aren't properly represented in the device-tree.
545  */
546 void irq_set_default_host(struct irq_domain *domain)
547 {
548 	pr_debug("Default domain set to @0x%p\n", domain);
549 
550 	irq_default_domain = domain;
551 }
552 EXPORT_SYMBOL_GPL(irq_set_default_host);
553 
554 /**
555  * irq_get_default_host() - Retrieve the "default" irq domain
556  *
557  * Returns: the default domain, if any.
558  *
559  * Modern code should never use this. This should only be used on
560  * systems that cannot implement a firmware->fwnode mapping (which
561  * both DT and ACPI provide).
562  */
563 struct irq_domain *irq_get_default_host(void)
564 {
565 	return irq_default_domain;
566 }
567 EXPORT_SYMBOL_GPL(irq_get_default_host);
568 
569 static bool irq_domain_is_nomap(struct irq_domain *domain)
570 {
571 	return IS_ENABLED(CONFIG_IRQ_DOMAIN_NOMAP) &&
572 	       (domain->flags & IRQ_DOMAIN_FLAG_NO_MAP);
573 }
574 
575 static void irq_domain_clear_mapping(struct irq_domain *domain,
576 				     irq_hw_number_t hwirq)
577 {
578 	lockdep_assert_held(&domain->root->mutex);
579 
580 	if (irq_domain_is_nomap(domain))
581 		return;
582 
583 	if (hwirq < domain->revmap_size)
584 		rcu_assign_pointer(domain->revmap[hwirq], NULL);
585 	else
586 		radix_tree_delete(&domain->revmap_tree, hwirq);
587 }
588 
589 static void irq_domain_set_mapping(struct irq_domain *domain,
590 				   irq_hw_number_t hwirq,
591 				   struct irq_data *irq_data)
592 {
593 	/*
594 	 * This also makes sure that all domains point to the same root when
595 	 * called from irq_domain_insert_irq() for each domain in a hierarchy.
596 	 */
597 	lockdep_assert_held(&domain->root->mutex);
598 
599 	if (irq_domain_is_nomap(domain))
600 		return;
601 
602 	if (hwirq < domain->revmap_size)
603 		rcu_assign_pointer(domain->revmap[hwirq], irq_data);
604 	else
605 		radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
606 }
607 
608 static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
609 {
610 	struct irq_data *irq_data = irq_get_irq_data(irq);
611 	irq_hw_number_t hwirq;
612 
613 	if (WARN(!irq_data || irq_data->domain != domain,
614 		 "virq%i doesn't exist; cannot disassociate\n", irq))
615 		return;
616 
617 	hwirq = irq_data->hwirq;
618 
619 	mutex_lock(&domain->root->mutex);
620 
621 	irq_set_status_flags(irq, IRQ_NOREQUEST);
622 
623 	/* remove chip and handler */
624 	irq_set_chip_and_handler(irq, NULL, NULL);
625 
626 	/* Make sure it's completed */
627 	synchronize_irq(irq);
628 
629 	/* Tell the PIC about it */
630 	if (domain->ops->unmap)
631 		domain->ops->unmap(domain, irq);
632 	smp_mb();
633 
634 	irq_data->domain = NULL;
635 	irq_data->hwirq = 0;
636 	domain->mapcount--;
637 
638 	/* Clear reverse map for this hwirq */
639 	irq_domain_clear_mapping(domain, hwirq);
640 
641 	mutex_unlock(&domain->root->mutex);
642 }
643 
644 static int irq_domain_associate_locked(struct irq_domain *domain, unsigned int virq,
645 				       irq_hw_number_t hwirq)
646 {
647 	struct irq_data *irq_data = irq_get_irq_data(virq);
648 	int ret;
649 
650 	if (WARN(hwirq >= domain->hwirq_max,
651 		 "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
652 		return -EINVAL;
653 	if (WARN(!irq_data, "error: virq%i is not allocated", virq))
654 		return -EINVAL;
655 	if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
656 		return -EINVAL;
657 
658 	irq_data->hwirq = hwirq;
659 	irq_data->domain = domain;
660 	if (domain->ops->map) {
661 		ret = domain->ops->map(domain, virq, hwirq);
662 		if (ret != 0) {
663 			/*
664 			 * If map() returns -EPERM, this interrupt is protected
665 			 * by the firmware or some other service and shall not
666 			 * be mapped. Don't bother telling the user about it.
667 			 */
668 			if (ret != -EPERM) {
669 				pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
670 				       domain->name, hwirq, virq, ret);
671 			}
672 			irq_data->domain = NULL;
673 			irq_data->hwirq = 0;
674 			return ret;
675 		}
676 	}
677 
678 	domain->mapcount++;
679 	irq_domain_set_mapping(domain, hwirq, irq_data);
680 
681 	irq_clear_status_flags(virq, IRQ_NOREQUEST);
682 
683 	return 0;
684 }
685 
686 int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
687 			 irq_hw_number_t hwirq)
688 {
689 	int ret;
690 
691 	mutex_lock(&domain->root->mutex);
692 	ret = irq_domain_associate_locked(domain, virq, hwirq);
693 	mutex_unlock(&domain->root->mutex);
694 
695 	return ret;
696 }
697 EXPORT_SYMBOL_GPL(irq_domain_associate);
698 
699 void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
700 			       irq_hw_number_t hwirq_base, int count)
701 {
702 	struct device_node *of_node;
703 	int i;
704 
705 	of_node = irq_domain_get_of_node(domain);
706 	pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
707 		of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
708 
709 	for (i = 0; i < count; i++)
710 		irq_domain_associate(domain, irq_base + i, hwirq_base + i);
711 }
712 EXPORT_SYMBOL_GPL(irq_domain_associate_many);
713 
714 #ifdef CONFIG_IRQ_DOMAIN_NOMAP
715 /**
716  * irq_create_direct_mapping() - Allocate an irq for direct mapping
717  * @domain: domain to allocate the irq for or NULL for default domain
718  *
719  * This routine is used for irq controllers which can choose the hardware
720  * interrupt numbers they generate. In such a case it's simplest to use
721  * the linux irq as the hardware interrupt number. It still uses the linear
722  * or radix tree to store the mapping, but the irq controller can optimize
723  * the revmap path by using the hwirq directly.
724  */
725 unsigned int irq_create_direct_mapping(struct irq_domain *domain)
726 {
727 	struct device_node *of_node;
728 	unsigned int virq;
729 
730 	if (domain == NULL)
731 		domain = irq_default_domain;
732 
733 	of_node = irq_domain_get_of_node(domain);
734 	virq = irq_alloc_desc_from(1, of_node_to_nid(of_node));
735 	if (!virq) {
736 		pr_debug("create_direct virq allocation failed\n");
737 		return 0;
738 	}
739 	if (virq >= domain->hwirq_max) {
740 		pr_err("ERROR: no free irqs available below %lu maximum\n",
741 			domain->hwirq_max);
742 		irq_free_desc(virq);
743 		return 0;
744 	}
745 	pr_debug("create_direct obtained virq %d\n", virq);
746 
747 	if (irq_domain_associate(domain, virq, virq)) {
748 		irq_free_desc(virq);
749 		return 0;
750 	}
751 
752 	return virq;
753 }
754 EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
755 #endif
756 
757 static unsigned int irq_create_mapping_affinity_locked(struct irq_domain *domain,
758 						       irq_hw_number_t hwirq,
759 						       const struct irq_affinity_desc *affinity)
760 {
761 	struct device_node *of_node = irq_domain_get_of_node(domain);
762 	int virq;
763 
764 	pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
765 
766 	/* Allocate a virtual interrupt number */
767 	virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
768 				      affinity);
769 	if (virq <= 0) {
770 		pr_debug("-> virq allocation failed\n");
771 		return 0;
772 	}
773 
774 	if (irq_domain_associate_locked(domain, virq, hwirq)) {
775 		irq_free_desc(virq);
776 		return 0;
777 	}
778 
779 	pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
780 		hwirq, of_node_full_name(of_node), virq);
781 
782 	return virq;
783 }
784 
785 /**
786  * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space
787  * @domain: domain owning this hardware interrupt or NULL for default domain
788  * @hwirq: hardware irq number in that domain space
789  * @affinity: irq affinity
790  *
791  * Only one mapping per hardware interrupt is permitted. Returns a linux
792  * irq number.
793  * If the sense/trigger is to be specified, set_irq_type() should be called
794  * on the number returned from that call.
795  */
796 unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
797 					 irq_hw_number_t hwirq,
798 					 const struct irq_affinity_desc *affinity)
799 {
800 	int virq;
801 
802 	/* Look for default domain if necessary */
803 	if (domain == NULL)
804 		domain = irq_default_domain;
805 	if (domain == NULL) {
806 		WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
807 		return 0;
808 	}
809 
810 	mutex_lock(&domain->root->mutex);
811 
812 	/* Check if mapping already exists */
813 	virq = irq_find_mapping(domain, hwirq);
814 	if (virq) {
815 		pr_debug("existing mapping on virq %d\n", virq);
816 		goto out;
817 	}
818 
819 	virq = irq_create_mapping_affinity_locked(domain, hwirq, affinity);
820 out:
821 	mutex_unlock(&domain->root->mutex);
822 
823 	return virq;
824 }
825 EXPORT_SYMBOL_GPL(irq_create_mapping_affinity);
826 
827 static int irq_domain_translate(struct irq_domain *d,
828 				struct irq_fwspec *fwspec,
829 				irq_hw_number_t *hwirq, unsigned int *type)
830 {
831 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
832 	if (d->ops->translate)
833 		return d->ops->translate(d, fwspec, hwirq, type);
834 #endif
835 	if (d->ops->xlate)
836 		return d->ops->xlate(d, to_of_node(fwspec->fwnode),
837 				     fwspec->param, fwspec->param_count,
838 				     hwirq, type);
839 
840 	/* If domain has no translation, then we assume interrupt line */
841 	*hwirq = fwspec->param[0];
842 	return 0;
843 }
844 
845 void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
846 			       unsigned int count, struct irq_fwspec *fwspec)
847 {
848 	int i;
849 
850 	fwspec->fwnode = of_node_to_fwnode(np);
851 	fwspec->param_count = count;
852 
853 	for (i = 0; i < count; i++)
854 		fwspec->param[i] = args[i];
855 }
856 EXPORT_SYMBOL_GPL(of_phandle_args_to_fwspec);
857 
858 unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
859 {
860 	struct irq_domain *domain;
861 	struct irq_data *irq_data;
862 	irq_hw_number_t hwirq;
863 	unsigned int type = IRQ_TYPE_NONE;
864 	int virq;
865 
866 	if (fwspec->fwnode) {
867 		domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
868 		if (!domain)
869 			domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY);
870 	} else {
871 		domain = irq_default_domain;
872 	}
873 
874 	if (!domain) {
875 		pr_warn("no irq domain found for %s !\n",
876 			of_node_full_name(to_of_node(fwspec->fwnode)));
877 		return 0;
878 	}
879 
880 	if (irq_domain_translate(domain, fwspec, &hwirq, &type))
881 		return 0;
882 
883 	/*
884 	 * WARN if the irqchip returns a type with bits
885 	 * outside the sense mask set and clear these bits.
886 	 */
887 	if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
888 		type &= IRQ_TYPE_SENSE_MASK;
889 
890 	mutex_lock(&domain->root->mutex);
891 
892 	/*
893 	 * If we've already configured this interrupt,
894 	 * don't do it again, or hell will break loose.
895 	 */
896 	virq = irq_find_mapping(domain, hwirq);
897 	if (virq) {
898 		/*
899 		 * If the trigger type is not specified or matches the
900 		 * current trigger type then we are done so return the
901 		 * interrupt number.
902 		 */
903 		if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
904 			goto out;
905 
906 		/*
907 		 * If the trigger type has not been set yet, then set
908 		 * it now and return the interrupt number.
909 		 */
910 		if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
911 			irq_data = irq_get_irq_data(virq);
912 			if (!irq_data) {
913 				virq = 0;
914 				goto out;
915 			}
916 
917 			irqd_set_trigger_type(irq_data, type);
918 			goto out;
919 		}
920 
921 		pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
922 			hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
923 		virq = 0;
924 		goto out;
925 	}
926 
927 	if (irq_domain_is_hierarchy(domain)) {
928 		if (irq_domain_is_msi_device(domain)) {
929 			mutex_unlock(&domain->root->mutex);
930 			virq = msi_device_domain_alloc_wired(domain, hwirq, type);
931 			mutex_lock(&domain->root->mutex);
932 		} else
933 			virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE,
934 							    fwspec, false, NULL);
935 		if (virq <= 0) {
936 			virq = 0;
937 			goto out;
938 		}
939 	} else {
940 		/* Create mapping */
941 		virq = irq_create_mapping_affinity_locked(domain, hwirq, NULL);
942 		if (!virq)
943 			goto out;
944 	}
945 
946 	irq_data = irq_get_irq_data(virq);
947 	if (WARN_ON(!irq_data)) {
948 		virq = 0;
949 		goto out;
950 	}
951 
952 	/* Store trigger type */
953 	irqd_set_trigger_type(irq_data, type);
954 out:
955 	mutex_unlock(&domain->root->mutex);
956 
957 	return virq;
958 }
959 EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
960 
961 unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
962 {
963 	struct irq_fwspec fwspec;
964 
965 	of_phandle_args_to_fwspec(irq_data->np, irq_data->args,
966 				  irq_data->args_count, &fwspec);
967 
968 	return irq_create_fwspec_mapping(&fwspec);
969 }
970 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
971 
972 /**
973  * irq_dispose_mapping() - Unmap an interrupt
974  * @virq: linux irq number of the interrupt to unmap
975  */
976 void irq_dispose_mapping(unsigned int virq)
977 {
978 	struct irq_data *irq_data;
979 	struct irq_domain *domain;
980 
981 	irq_data = virq ? irq_get_irq_data(virq) : NULL;
982 	if (!irq_data)
983 		return;
984 
985 	domain = irq_data->domain;
986 	if (WARN_ON(domain == NULL))
987 		return;
988 
989 	if (irq_domain_is_hierarchy(domain)) {
990 		irq_domain_free_one_irq(domain, virq);
991 	} else {
992 		irq_domain_disassociate(domain, virq);
993 		irq_free_desc(virq);
994 	}
995 }
996 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
997 
998 /**
999  * __irq_resolve_mapping() - Find a linux irq from a hw irq number.
1000  * @domain: domain owning this hardware interrupt
1001  * @hwirq: hardware irq number in that domain space
1002  * @irq: optional pointer to return the Linux irq if required
1003  *
1004  * Returns the interrupt descriptor.
1005  */
1006 struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain,
1007 				       irq_hw_number_t hwirq,
1008 				       unsigned int *irq)
1009 {
1010 	struct irq_desc *desc = NULL;
1011 	struct irq_data *data;
1012 
1013 	/* Look for default domain if necessary */
1014 	if (domain == NULL)
1015 		domain = irq_default_domain;
1016 	if (domain == NULL)
1017 		return desc;
1018 
1019 	if (irq_domain_is_nomap(domain)) {
1020 		if (hwirq < domain->hwirq_max) {
1021 			data = irq_domain_get_irq_data(domain, hwirq);
1022 			if (data && data->hwirq == hwirq)
1023 				desc = irq_data_to_desc(data);
1024 			if (irq && desc)
1025 				*irq = hwirq;
1026 		}
1027 
1028 		return desc;
1029 	}
1030 
1031 	rcu_read_lock();
1032 	/* Check if the hwirq is in the linear revmap. */
1033 	if (hwirq < domain->revmap_size)
1034 		data = rcu_dereference(domain->revmap[hwirq]);
1035 	else
1036 		data = radix_tree_lookup(&domain->revmap_tree, hwirq);
1037 
1038 	if (likely(data)) {
1039 		desc = irq_data_to_desc(data);
1040 		if (irq)
1041 			*irq = data->irq;
1042 	}
1043 
1044 	rcu_read_unlock();
1045 	return desc;
1046 }
1047 EXPORT_SYMBOL_GPL(__irq_resolve_mapping);
1048 
1049 /**
1050  * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
1051  * @d:		Interrupt domain involved in the translation
1052  * @ctrlr:	The device tree node for the device whose interrupt is translated
1053  * @intspec:	The interrupt specifier data from the device tree
1054  * @intsize:	The number of entries in @intspec
1055  * @out_hwirq:	Pointer to storage for the hardware interrupt number
1056  * @out_type:	Pointer to storage for the interrupt type
1057  *
1058  * Device Tree IRQ specifier translation function which works with one cell
1059  * bindings where the cell value maps directly to the hwirq number.
1060  */
1061 int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
1062 			     const u32 *intspec, unsigned int intsize,
1063 			     unsigned long *out_hwirq, unsigned int *out_type)
1064 {
1065 	if (WARN_ON(intsize < 1))
1066 		return -EINVAL;
1067 	*out_hwirq = intspec[0];
1068 	*out_type = IRQ_TYPE_NONE;
1069 	return 0;
1070 }
1071 EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
1072 
1073 /**
1074  * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
1075  * @d:		Interrupt domain involved in the translation
1076  * @ctrlr:	The device tree node for the device whose interrupt is translated
1077  * @intspec:	The interrupt specifier data from the device tree
1078  * @intsize:	The number of entries in @intspec
1079  * @out_hwirq:	Pointer to storage for the hardware interrupt number
1080  * @out_type:	Pointer to storage for the interrupt type
1081  *
1082  * Device Tree IRQ specifier translation function which works with two cell
1083  * bindings where the cell values map directly to the hwirq number
1084  * and linux irq flags.
1085  */
1086 int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
1087 			const u32 *intspec, unsigned int intsize,
1088 			irq_hw_number_t *out_hwirq, unsigned int *out_type)
1089 {
1090 	struct irq_fwspec fwspec;
1091 
1092 	of_phandle_args_to_fwspec(ctrlr, intspec, intsize, &fwspec);
1093 	return irq_domain_translate_twocell(d, &fwspec, out_hwirq, out_type);
1094 }
1095 EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
1096 
1097 /**
1098  * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
1099  * @d:		Interrupt domain involved in the translation
1100  * @ctrlr:	The device tree node for the device whose interrupt is translated
1101  * @intspec:	The interrupt specifier data from the device tree
1102  * @intsize:	The number of entries in @intspec
1103  * @out_hwirq:	Pointer to storage for the hardware interrupt number
1104  * @out_type:	Pointer to storage for the interrupt type
1105  *
1106  * Device Tree IRQ specifier translation function which works with either one
1107  * or two cell bindings where the cell values map directly to the hwirq number
1108  * and linux irq flags.
1109  *
1110  * Note: don't use this function unless your interrupt controller explicitly
1111  * supports both one and two cell bindings.  For the majority of controllers
1112  * the _onecell() or _twocell() variants above should be used.
1113  */
1114 int irq_domain_xlate_onetwocell(struct irq_domain *d,
1115 				struct device_node *ctrlr,
1116 				const u32 *intspec, unsigned int intsize,
1117 				unsigned long *out_hwirq, unsigned int *out_type)
1118 {
1119 	if (WARN_ON(intsize < 1))
1120 		return -EINVAL;
1121 	*out_hwirq = intspec[0];
1122 	if (intsize > 1)
1123 		*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
1124 	else
1125 		*out_type = IRQ_TYPE_NONE;
1126 	return 0;
1127 }
1128 EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
1129 
1130 const struct irq_domain_ops irq_domain_simple_ops = {
1131 	.xlate = irq_domain_xlate_onetwocell,
1132 };
1133 EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
1134 
1135 /**
1136  * irq_domain_translate_onecell() - Generic translate for direct one cell
1137  * bindings
1138  * @d:		Interrupt domain involved in the translation
1139  * @fwspec:	The firmware interrupt specifier to translate
1140  * @out_hwirq:	Pointer to storage for the hardware interrupt number
1141  * @out_type:	Pointer to storage for the interrupt type
1142  */
1143 int irq_domain_translate_onecell(struct irq_domain *d,
1144 				 struct irq_fwspec *fwspec,
1145 				 unsigned long *out_hwirq,
1146 				 unsigned int *out_type)
1147 {
1148 	if (WARN_ON(fwspec->param_count < 1))
1149 		return -EINVAL;
1150 	*out_hwirq = fwspec->param[0];
1151 	*out_type = IRQ_TYPE_NONE;
1152 	return 0;
1153 }
1154 EXPORT_SYMBOL_GPL(irq_domain_translate_onecell);
1155 
1156 /**
1157  * irq_domain_translate_twocell() - Generic translate for direct two cell
1158  * bindings
1159  * @d:		Interrupt domain involved in the translation
1160  * @fwspec:	The firmware interrupt specifier to translate
1161  * @out_hwirq:	Pointer to storage for the hardware interrupt number
1162  * @out_type:	Pointer to storage for the interrupt type
1163  *
1164  * Device Tree IRQ specifier translation function which works with two cell
1165  * bindings where the cell values map directly to the hwirq number
1166  * and linux irq flags.
1167  */
1168 int irq_domain_translate_twocell(struct irq_domain *d,
1169 				 struct irq_fwspec *fwspec,
1170 				 unsigned long *out_hwirq,
1171 				 unsigned int *out_type)
1172 {
1173 	if (WARN_ON(fwspec->param_count < 2))
1174 		return -EINVAL;
1175 	*out_hwirq = fwspec->param[0];
1176 	*out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
1177 	return 0;
1178 }
1179 EXPORT_SYMBOL_GPL(irq_domain_translate_twocell);
1180 
1181 int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
1182 			   int node, const struct irq_affinity_desc *affinity)
1183 {
1184 	unsigned int hint;
1185 
1186 	if (virq >= 0) {
1187 		virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
1188 					 affinity);
1189 	} else {
1190 		hint = hwirq % nr_irqs;
1191 		if (hint == 0)
1192 			hint++;
1193 		virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
1194 					 affinity);
1195 		if (virq <= 0 && hint > 1) {
1196 			virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
1197 						 affinity);
1198 		}
1199 	}
1200 
1201 	return virq;
1202 }
1203 
1204 /**
1205  * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
1206  * @irq_data:	The pointer to irq_data
1207  */
1208 void irq_domain_reset_irq_data(struct irq_data *irq_data)
1209 {
1210 	irq_data->hwirq = 0;
1211 	irq_data->chip = &no_irq_chip;
1212 	irq_data->chip_data = NULL;
1213 }
1214 EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
1215 
1216 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
1217 /**
1218  * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
1219  * @parent:	Parent irq domain to associate with the new domain
1220  * @flags:	Irq domain flags associated to the domain
1221  * @size:	Size of the domain. See below
1222  * @fwnode:	Optional fwnode of the interrupt controller
1223  * @ops:	Pointer to the interrupt domain callbacks
1224  * @host_data:	Controller private data pointer
1225  *
1226  * If @size is 0 a tree domain is created, otherwise a linear domain.
1227  *
1228  * If successful the parent is associated to the new domain and the
1229  * domain flags are set.
1230  * Returns pointer to IRQ domain, or NULL on failure.
1231  */
1232 struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
1233 					    unsigned int flags,
1234 					    unsigned int size,
1235 					    struct fwnode_handle *fwnode,
1236 					    const struct irq_domain_ops *ops,
1237 					    void *host_data)
1238 {
1239 	struct irq_domain_info info = {
1240 		.fwnode		= fwnode,
1241 		.size		= size,
1242 		.hwirq_max	= size,
1243 		.ops		= ops,
1244 		.host_data	= host_data,
1245 		.domain_flags	= flags,
1246 		.parent		= parent,
1247 	};
1248 	struct irq_domain *d;
1249 
1250 	if (!info.size)
1251 		info.hwirq_max = ~0U;
1252 
1253 	d = irq_domain_instantiate(&info);
1254 	return IS_ERR(d) ? NULL : d;
1255 }
1256 EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy);
1257 
1258 static void irq_domain_insert_irq(int virq)
1259 {
1260 	struct irq_data *data;
1261 
1262 	for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
1263 		struct irq_domain *domain = data->domain;
1264 
1265 		domain->mapcount++;
1266 		irq_domain_set_mapping(domain, data->hwirq, data);
1267 	}
1268 
1269 	irq_clear_status_flags(virq, IRQ_NOREQUEST);
1270 }
1271 
1272 static void irq_domain_remove_irq(int virq)
1273 {
1274 	struct irq_data *data;
1275 
1276 	irq_set_status_flags(virq, IRQ_NOREQUEST);
1277 	irq_set_chip_and_handler(virq, NULL, NULL);
1278 	synchronize_irq(virq);
1279 	smp_mb();
1280 
1281 	for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
1282 		struct irq_domain *domain = data->domain;
1283 		irq_hw_number_t hwirq = data->hwirq;
1284 
1285 		domain->mapcount--;
1286 		irq_domain_clear_mapping(domain, hwirq);
1287 	}
1288 }
1289 
1290 static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain,
1291 						   struct irq_data *child)
1292 {
1293 	struct irq_data *irq_data;
1294 
1295 	irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL,
1296 				irq_data_get_node(child));
1297 	if (irq_data) {
1298 		child->parent_data = irq_data;
1299 		irq_data->irq = child->irq;
1300 		irq_data->common = child->common;
1301 		irq_data->domain = domain;
1302 	}
1303 
1304 	return irq_data;
1305 }
1306 
1307 static void __irq_domain_free_hierarchy(struct irq_data *irq_data)
1308 {
1309 	struct irq_data *tmp;
1310 
1311 	while (irq_data) {
1312 		tmp = irq_data;
1313 		irq_data = irq_data->parent_data;
1314 		kfree(tmp);
1315 	}
1316 }
1317 
1318 static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs)
1319 {
1320 	struct irq_data *irq_data, *tmp;
1321 	int i;
1322 
1323 	for (i = 0; i < nr_irqs; i++) {
1324 		irq_data = irq_get_irq_data(virq + i);
1325 		tmp = irq_data->parent_data;
1326 		irq_data->parent_data = NULL;
1327 		irq_data->domain = NULL;
1328 
1329 		__irq_domain_free_hierarchy(tmp);
1330 	}
1331 }
1332 
1333 /**
1334  * irq_domain_disconnect_hierarchy - Mark the first unused level of a hierarchy
1335  * @domain:	IRQ domain from which the hierarchy is to be disconnected
1336  * @virq:	IRQ number where the hierarchy is to be trimmed
1337  *
1338  * Marks the @virq level belonging to @domain as disconnected.
1339  * Returns -EINVAL if @virq doesn't have a valid irq_data pointing
1340  * to @domain.
1341  *
1342  * Its only use is to be able to trim levels of hierarchy that do not
1343  * have any real meaning for this interrupt, and that the driver marks
1344  * as such from its .alloc() callback.
1345  */
1346 int irq_domain_disconnect_hierarchy(struct irq_domain *domain,
1347 				    unsigned int virq)
1348 {
1349 	struct irq_data *irqd;
1350 
1351 	irqd = irq_domain_get_irq_data(domain, virq);
1352 	if (!irqd)
1353 		return -EINVAL;
1354 
1355 	irqd->chip = ERR_PTR(-ENOTCONN);
1356 	return 0;
1357 }
1358 EXPORT_SYMBOL_GPL(irq_domain_disconnect_hierarchy);
1359 
1360 static int irq_domain_trim_hierarchy(unsigned int virq)
1361 {
1362 	struct irq_data *tail, *irqd, *irq_data;
1363 
1364 	irq_data = irq_get_irq_data(virq);
1365 	tail = NULL;
1366 
1367 	/* The first entry must have a valid irqchip */
1368 	if (!irq_data->chip || IS_ERR(irq_data->chip))
1369 		return -EINVAL;
1370 
1371 	/*
1372 	 * Validate that the irq_data chain is sane in the presence of
1373 	 * a hierarchy trimming marker.
1374 	 */
1375 	for (irqd = irq_data->parent_data; irqd; irq_data = irqd, irqd = irqd->parent_data) {
1376 		/* Can't have a valid irqchip after a trim marker */
1377 		if (irqd->chip && tail)
1378 			return -EINVAL;
1379 
1380 		/* Can't have an empty irqchip before a trim marker */
1381 		if (!irqd->chip && !tail)
1382 			return -EINVAL;
1383 
1384 		if (IS_ERR(irqd->chip)) {
1385 			/* Only -ENOTCONN is a valid trim marker */
1386 			if (PTR_ERR(irqd->chip) != -ENOTCONN)
1387 				return -EINVAL;
1388 
1389 			tail = irq_data;
1390 		}
1391 	}
1392 
1393 	/* No trim marker, nothing to do */
1394 	if (!tail)
1395 		return 0;
1396 
1397 	pr_info("IRQ%d: trimming hierarchy from %s\n",
1398 		virq, tail->parent_data->domain->name);
1399 
1400 	/* Sever the inner part of the hierarchy...  */
1401 	irqd = tail;
1402 	tail = tail->parent_data;
1403 	irqd->parent_data = NULL;
1404 	__irq_domain_free_hierarchy(tail);
1405 
1406 	return 0;
1407 }
1408 
1409 static int irq_domain_alloc_irq_data(struct irq_domain *domain,
1410 				     unsigned int virq, unsigned int nr_irqs)
1411 {
1412 	struct irq_data *irq_data;
1413 	struct irq_domain *parent;
1414 	int i;
1415 
1416 	/* The outermost irq_data is embedded in struct irq_desc */
1417 	for (i = 0; i < nr_irqs; i++) {
1418 		irq_data = irq_get_irq_data(virq + i);
1419 		irq_data->domain = domain;
1420 
1421 		for (parent = domain->parent; parent; parent = parent->parent) {
1422 			irq_data = irq_domain_insert_irq_data(parent, irq_data);
1423 			if (!irq_data) {
1424 				irq_domain_free_irq_data(virq, i + 1);
1425 				return -ENOMEM;
1426 			}
1427 		}
1428 	}
1429 
1430 	return 0;
1431 }
1432 
1433 /**
1434  * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1435  * @domain:	domain to match
1436  * @virq:	IRQ number to get irq_data
1437  */
1438 struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
1439 					 unsigned int virq)
1440 {
1441 	struct irq_data *irq_data;
1442 
1443 	for (irq_data = irq_get_irq_data(virq); irq_data;
1444 	     irq_data = irq_data->parent_data)
1445 		if (irq_data->domain == domain)
1446 			return irq_data;
1447 
1448 	return NULL;
1449 }
1450 EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
1451 
1452 /**
1453  * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain
1454  * @domain:	Interrupt domain to match
1455  * @virq:	IRQ number
1456  * @hwirq:	The hwirq number
1457  * @chip:	The associated interrupt chip
1458  * @chip_data:	The associated chip data
1459  */
1460 int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
1461 				  irq_hw_number_t hwirq,
1462 				  const struct irq_chip *chip,
1463 				  void *chip_data)
1464 {
1465 	struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
1466 
1467 	if (!irq_data)
1468 		return -ENOENT;
1469 
1470 	irq_data->hwirq = hwirq;
1471 	irq_data->chip = (struct irq_chip *)(chip ? chip : &no_irq_chip);
1472 	irq_data->chip_data = chip_data;
1473 
1474 	return 0;
1475 }
1476 EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip);
1477 
1478 /**
1479  * irq_domain_set_info - Set the complete data for a @virq in @domain
1480  * @domain:		Interrupt domain to match
1481  * @virq:		IRQ number
1482  * @hwirq:		The hardware interrupt number
1483  * @chip:		The associated interrupt chip
1484  * @chip_data:		The associated interrupt chip data
1485  * @handler:		The interrupt flow handler
1486  * @handler_data:	The interrupt flow handler data
1487  * @handler_name:	The interrupt handler name
1488  */
1489 void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
1490 			 irq_hw_number_t hwirq, const struct irq_chip *chip,
1491 			 void *chip_data, irq_flow_handler_t handler,
1492 			 void *handler_data, const char *handler_name)
1493 {
1494 	irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data);
1495 	__irq_set_handler(virq, handler, 0, handler_name);
1496 	irq_set_handler_data(virq, handler_data);
1497 }
1498 EXPORT_SYMBOL(irq_domain_set_info);
1499 
1500 /**
1501  * irq_domain_free_irqs_common - Clear irq_data and free the parent
1502  * @domain:	Interrupt domain to match
1503  * @virq:	IRQ number to start with
1504  * @nr_irqs:	The number of irqs to free
1505  */
1506 void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
1507 				 unsigned int nr_irqs)
1508 {
1509 	struct irq_data *irq_data;
1510 	int i;
1511 
1512 	for (i = 0; i < nr_irqs; i++) {
1513 		irq_data = irq_domain_get_irq_data(domain, virq + i);
1514 		if (irq_data)
1515 			irq_domain_reset_irq_data(irq_data);
1516 	}
1517 	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
1518 }
1519 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common);
1520 
1521 /**
1522  * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent
1523  * @domain:	Interrupt domain to match
1524  * @virq:	IRQ number to start with
1525  * @nr_irqs:	The number of irqs to free
1526  */
1527 void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq,
1528 			      unsigned int nr_irqs)
1529 {
1530 	int i;
1531 
1532 	for (i = 0; i < nr_irqs; i++) {
1533 		irq_set_handler_data(virq + i, NULL);
1534 		irq_set_handler(virq + i, NULL);
1535 	}
1536 	irq_domain_free_irqs_common(domain, virq, nr_irqs);
1537 }
1538 
1539 static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
1540 					   unsigned int irq_base,
1541 					   unsigned int nr_irqs)
1542 {
1543 	unsigned int i;
1544 
1545 	if (!domain->ops->free)
1546 		return;
1547 
1548 	for (i = 0; i < nr_irqs; i++) {
1549 		if (irq_domain_get_irq_data(domain, irq_base + i))
1550 			domain->ops->free(domain, irq_base + i, 1);
1551 	}
1552 }
1553 
1554 int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
1555 				    unsigned int irq_base,
1556 				    unsigned int nr_irqs, void *arg)
1557 {
1558 	if (!domain->ops->alloc) {
1559 		pr_debug("domain->ops->alloc() is NULL\n");
1560 		return -ENOSYS;
1561 	}
1562 
1563 	return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
1564 }
1565 
1566 static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
1567 					unsigned int nr_irqs, int node, void *arg,
1568 					bool realloc, const struct irq_affinity_desc *affinity)
1569 {
1570 	int i, ret, virq;
1571 
1572 	if (realloc && irq_base >= 0) {
1573 		virq = irq_base;
1574 	} else {
1575 		virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
1576 					      affinity);
1577 		if (virq < 0) {
1578 			pr_debug("cannot allocate IRQ(base %d, count %d)\n",
1579 				 irq_base, nr_irqs);
1580 			return virq;
1581 		}
1582 	}
1583 
1584 	if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) {
1585 		pr_debug("cannot allocate memory for IRQ%d\n", virq);
1586 		ret = -ENOMEM;
1587 		goto out_free_desc;
1588 	}
1589 
1590 	ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
1591 	if (ret < 0)
1592 		goto out_free_irq_data;
1593 
1594 	for (i = 0; i < nr_irqs; i++) {
1595 		ret = irq_domain_trim_hierarchy(virq + i);
1596 		if (ret)
1597 			goto out_free_irq_data;
1598 	}
1599 
1600 	for (i = 0; i < nr_irqs; i++)
1601 		irq_domain_insert_irq(virq + i);
1602 
1603 	return virq;
1604 
1605 out_free_irq_data:
1606 	irq_domain_free_irq_data(virq, nr_irqs);
1607 out_free_desc:
1608 	irq_free_descs(virq, nr_irqs);
1609 	return ret;
1610 }
1611 
1612 /**
1613  * __irq_domain_alloc_irqs - Allocate IRQs from domain
1614  * @domain:	domain to allocate from
1615  * @irq_base:	allocate specified IRQ number if irq_base >= 0
1616  * @nr_irqs:	number of IRQs to allocate
1617  * @node:	NUMA node id for memory allocation
1618  * @arg:	domain specific argument
1619  * @realloc:	IRQ descriptors have already been allocated if true
1620  * @affinity:	Optional irq affinity mask for multiqueue devices
1621  *
1622  * Allocate IRQ numbers and initialized all data structures to support
1623  * hierarchy IRQ domains.
1624  * Parameter @realloc is mainly to support legacy IRQs.
1625  * Returns error code or allocated IRQ number
1626  *
1627  * The whole process to setup an IRQ has been split into two steps.
1628  * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
1629  * descriptor and required hardware resources. The second step,
1630  * irq_domain_activate_irq(), is to program the hardware with preallocated
1631  * resources. In this way, it's easier to rollback when failing to
1632  * allocate resources.
1633  */
1634 int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
1635 			    unsigned int nr_irqs, int node, void *arg,
1636 			    bool realloc, const struct irq_affinity_desc *affinity)
1637 {
1638 	int ret;
1639 
1640 	if (domain == NULL) {
1641 		domain = irq_default_domain;
1642 		if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
1643 			return -EINVAL;
1644 	}
1645 
1646 	mutex_lock(&domain->root->mutex);
1647 	ret = irq_domain_alloc_irqs_locked(domain, irq_base, nr_irqs, node, arg,
1648 					   realloc, affinity);
1649 	mutex_unlock(&domain->root->mutex);
1650 
1651 	return ret;
1652 }
1653 EXPORT_SYMBOL_GPL(__irq_domain_alloc_irqs);
1654 
1655 /* The irq_data was moved, fix the revmap to refer to the new location */
1656 static void irq_domain_fix_revmap(struct irq_data *d)
1657 {
1658 	void __rcu **slot;
1659 
1660 	lockdep_assert_held(&d->domain->root->mutex);
1661 
1662 	if (irq_domain_is_nomap(d->domain))
1663 		return;
1664 
1665 	/* Fix up the revmap. */
1666 	if (d->hwirq < d->domain->revmap_size) {
1667 		/* Not using radix tree */
1668 		rcu_assign_pointer(d->domain->revmap[d->hwirq], d);
1669 	} else {
1670 		slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
1671 		if (slot)
1672 			radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
1673 	}
1674 }
1675 
1676 /**
1677  * irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
1678  * @domain:	Domain to push.
1679  * @virq:	Irq to push the domain in to.
1680  * @arg:	Passed to the irq_domain_ops alloc() function.
1681  *
1682  * For an already existing irqdomain hierarchy, as might be obtained
1683  * via a call to pci_enable_msix(), add an additional domain to the
1684  * head of the processing chain.  Must be called before request_irq()
1685  * has been called.
1686  */
1687 int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
1688 {
1689 	struct irq_data *irq_data = irq_get_irq_data(virq);
1690 	struct irq_data *parent_irq_data;
1691 	struct irq_desc *desc;
1692 	int rv = 0;
1693 
1694 	/*
1695 	 * Check that no action has been set, which indicates the virq
1696 	 * is in a state where this function doesn't have to deal with
1697 	 * races between interrupt handling and maintaining the
1698 	 * hierarchy.  This will catch gross misuse.  Attempting to
1699 	 * make the check race free would require holding locks across
1700 	 * calls to struct irq_domain_ops->alloc(), which could lead
1701 	 * to deadlock, so we just do a simple check before starting.
1702 	 */
1703 	desc = irq_to_desc(virq);
1704 	if (!desc)
1705 		return -EINVAL;
1706 	if (WARN_ON(desc->action))
1707 		return -EBUSY;
1708 
1709 	if (domain == NULL)
1710 		return -EINVAL;
1711 
1712 	if (WARN_ON(!irq_domain_is_hierarchy(domain)))
1713 		return -EINVAL;
1714 
1715 	if (!irq_data)
1716 		return -EINVAL;
1717 
1718 	if (domain->parent != irq_data->domain)
1719 		return -EINVAL;
1720 
1721 	parent_irq_data = kzalloc_node(sizeof(*parent_irq_data), GFP_KERNEL,
1722 				       irq_data_get_node(irq_data));
1723 	if (!parent_irq_data)
1724 		return -ENOMEM;
1725 
1726 	mutex_lock(&domain->root->mutex);
1727 
1728 	/* Copy the original irq_data. */
1729 	*parent_irq_data = *irq_data;
1730 
1731 	/*
1732 	 * Overwrite the irq_data, which is embedded in struct irq_desc, with
1733 	 * values for this domain.
1734 	 */
1735 	irq_data->parent_data = parent_irq_data;
1736 	irq_data->domain = domain;
1737 	irq_data->mask = 0;
1738 	irq_data->hwirq = 0;
1739 	irq_data->chip = NULL;
1740 	irq_data->chip_data = NULL;
1741 
1742 	/* May (probably does) set hwirq, chip, etc. */
1743 	rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
1744 	if (rv) {
1745 		/* Restore the original irq_data. */
1746 		*irq_data = *parent_irq_data;
1747 		kfree(parent_irq_data);
1748 		goto error;
1749 	}
1750 
1751 	irq_domain_fix_revmap(parent_irq_data);
1752 	irq_domain_set_mapping(domain, irq_data->hwirq, irq_data);
1753 error:
1754 	mutex_unlock(&domain->root->mutex);
1755 
1756 	return rv;
1757 }
1758 EXPORT_SYMBOL_GPL(irq_domain_push_irq);
1759 
1760 /**
1761  * irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
1762  * @domain:	Domain to remove.
1763  * @virq:	Irq to remove the domain from.
1764  *
1765  * Undo the effects of a call to irq_domain_push_irq().  Must be
1766  * called either before request_irq() or after free_irq().
1767  */
1768 int irq_domain_pop_irq(struct irq_domain *domain, int virq)
1769 {
1770 	struct irq_data *irq_data = irq_get_irq_data(virq);
1771 	struct irq_data *parent_irq_data;
1772 	struct irq_data *tmp_irq_data;
1773 	struct irq_desc *desc;
1774 
1775 	/*
1776 	 * Check that no action is set, which indicates the virq is in
1777 	 * a state where this function doesn't have to deal with races
1778 	 * between interrupt handling and maintaining the hierarchy.
1779 	 * This will catch gross misuse.  Attempting to make the check
1780 	 * race free would require holding locks across calls to
1781 	 * struct irq_domain_ops->free(), which could lead to
1782 	 * deadlock, so we just do a simple check before starting.
1783 	 */
1784 	desc = irq_to_desc(virq);
1785 	if (!desc)
1786 		return -EINVAL;
1787 	if (WARN_ON(desc->action))
1788 		return -EBUSY;
1789 
1790 	if (domain == NULL)
1791 		return -EINVAL;
1792 
1793 	if (!irq_data)
1794 		return -EINVAL;
1795 
1796 	tmp_irq_data = irq_domain_get_irq_data(domain, virq);
1797 
1798 	/* We can only "pop" if this domain is at the top of the list */
1799 	if (WARN_ON(irq_data != tmp_irq_data))
1800 		return -EINVAL;
1801 
1802 	if (WARN_ON(irq_data->domain != domain))
1803 		return -EINVAL;
1804 
1805 	parent_irq_data = irq_data->parent_data;
1806 	if (WARN_ON(!parent_irq_data))
1807 		return -EINVAL;
1808 
1809 	mutex_lock(&domain->root->mutex);
1810 
1811 	irq_data->parent_data = NULL;
1812 
1813 	irq_domain_clear_mapping(domain, irq_data->hwirq);
1814 	irq_domain_free_irqs_hierarchy(domain, virq, 1);
1815 
1816 	/* Restore the original irq_data. */
1817 	*irq_data = *parent_irq_data;
1818 
1819 	irq_domain_fix_revmap(irq_data);
1820 
1821 	mutex_unlock(&domain->root->mutex);
1822 
1823 	kfree(parent_irq_data);
1824 
1825 	return 0;
1826 }
1827 EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
1828 
1829 /**
1830  * irq_domain_free_irqs - Free IRQ number and associated data structures
1831  * @virq:	base IRQ number
1832  * @nr_irqs:	number of IRQs to free
1833  */
1834 void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
1835 {
1836 	struct irq_data *data = irq_get_irq_data(virq);
1837 	struct irq_domain *domain;
1838 	int i;
1839 
1840 	if (WARN(!data || !data->domain || !data->domain->ops->free,
1841 		 "NULL pointer, cannot free irq\n"))
1842 		return;
1843 
1844 	domain = data->domain;
1845 
1846 	mutex_lock(&domain->root->mutex);
1847 	for (i = 0; i < nr_irqs; i++)
1848 		irq_domain_remove_irq(virq + i);
1849 	irq_domain_free_irqs_hierarchy(domain, virq, nr_irqs);
1850 	mutex_unlock(&domain->root->mutex);
1851 
1852 	irq_domain_free_irq_data(virq, nr_irqs);
1853 	irq_free_descs(virq, nr_irqs);
1854 }
1855 
1856 static void irq_domain_free_one_irq(struct irq_domain *domain, unsigned int virq)
1857 {
1858 	if (irq_domain_is_msi_device(domain))
1859 		msi_device_domain_free_wired(domain, virq);
1860 	else
1861 		irq_domain_free_irqs(virq, 1);
1862 }
1863 
1864 /**
1865  * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
1866  * @domain:	Domain below which interrupts must be allocated
1867  * @irq_base:	Base IRQ number
1868  * @nr_irqs:	Number of IRQs to allocate
1869  * @arg:	Allocation data (arch/domain specific)
1870  */
1871 int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
1872 				 unsigned int irq_base, unsigned int nr_irqs,
1873 				 void *arg)
1874 {
1875 	if (!domain->parent)
1876 		return -ENOSYS;
1877 
1878 	return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base,
1879 					       nr_irqs, arg);
1880 }
1881 EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
1882 
1883 /**
1884  * irq_domain_free_irqs_parent - Free interrupts from parent domain
1885  * @domain:	Domain below which interrupts must be freed
1886  * @irq_base:	Base IRQ number
1887  * @nr_irqs:	Number of IRQs to free
1888  */
1889 void irq_domain_free_irqs_parent(struct irq_domain *domain,
1890 				 unsigned int irq_base, unsigned int nr_irqs)
1891 {
1892 	if (!domain->parent)
1893 		return;
1894 
1895 	irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs);
1896 }
1897 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
1898 
1899 static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
1900 {
1901 	if (irq_data && irq_data->domain) {
1902 		struct irq_domain *domain = irq_data->domain;
1903 
1904 		if (domain->ops->deactivate)
1905 			domain->ops->deactivate(domain, irq_data);
1906 		if (irq_data->parent_data)
1907 			__irq_domain_deactivate_irq(irq_data->parent_data);
1908 	}
1909 }
1910 
1911 static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve)
1912 {
1913 	int ret = 0;
1914 
1915 	if (irqd && irqd->domain) {
1916 		struct irq_domain *domain = irqd->domain;
1917 
1918 		if (irqd->parent_data)
1919 			ret = __irq_domain_activate_irq(irqd->parent_data,
1920 							reserve);
1921 		if (!ret && domain->ops->activate) {
1922 			ret = domain->ops->activate(domain, irqd, reserve);
1923 			/* Rollback in case of error */
1924 			if (ret && irqd->parent_data)
1925 				__irq_domain_deactivate_irq(irqd->parent_data);
1926 		}
1927 	}
1928 	return ret;
1929 }
1930 
1931 /**
1932  * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
1933  *			     interrupt
1934  * @irq_data:	Outermost irq_data associated with interrupt
1935  * @reserve:	If set only reserve an interrupt vector instead of assigning one
1936  *
1937  * This is the second step to call domain_ops->activate to program interrupt
1938  * controllers, so the interrupt could actually get delivered.
1939  */
1940 int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve)
1941 {
1942 	int ret = 0;
1943 
1944 	if (!irqd_is_activated(irq_data))
1945 		ret = __irq_domain_activate_irq(irq_data, reserve);
1946 	if (!ret)
1947 		irqd_set_activated(irq_data);
1948 	return ret;
1949 }
1950 
1951 /**
1952  * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to
1953  *			       deactivate interrupt
1954  * @irq_data: outermost irq_data associated with interrupt
1955  *
1956  * It calls domain_ops->deactivate to program interrupt controllers to disable
1957  * interrupt delivery.
1958  */
1959 void irq_domain_deactivate_irq(struct irq_data *irq_data)
1960 {
1961 	if (irqd_is_activated(irq_data)) {
1962 		__irq_domain_deactivate_irq(irq_data);
1963 		irqd_clr_activated(irq_data);
1964 	}
1965 }
1966 
1967 static void irq_domain_check_hierarchy(struct irq_domain *domain)
1968 {
1969 	/* Hierarchy irq_domains must implement callback alloc() */
1970 	if (domain->ops->alloc)
1971 		domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
1972 }
1973 #else	/* CONFIG_IRQ_DOMAIN_HIERARCHY */
1974 /**
1975  * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1976  * @domain:	domain to match
1977  * @virq:	IRQ number to get irq_data
1978  */
1979 struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
1980 					 unsigned int virq)
1981 {
1982 	struct irq_data *irq_data = irq_get_irq_data(virq);
1983 
1984 	return (irq_data && irq_data->domain == domain) ? irq_data : NULL;
1985 }
1986 EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
1987 
1988 /**
1989  * irq_domain_set_info - Set the complete data for a @virq in @domain
1990  * @domain:		Interrupt domain to match
1991  * @virq:		IRQ number
1992  * @hwirq:		The hardware interrupt number
1993  * @chip:		The associated interrupt chip
1994  * @chip_data:		The associated interrupt chip data
1995  * @handler:		The interrupt flow handler
1996  * @handler_data:	The interrupt flow handler data
1997  * @handler_name:	The interrupt handler name
1998  */
1999 void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
2000 			 irq_hw_number_t hwirq, const struct irq_chip *chip,
2001 			 void *chip_data, irq_flow_handler_t handler,
2002 			 void *handler_data, const char *handler_name)
2003 {
2004 	irq_set_chip_and_handler_name(virq, chip, handler, handler_name);
2005 	irq_set_chip_data(virq, chip_data);
2006 	irq_set_handler_data(virq, handler_data);
2007 }
2008 
2009 static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
2010 					unsigned int nr_irqs, int node, void *arg,
2011 					bool realloc, const struct irq_affinity_desc *affinity)
2012 {
2013 	return -EINVAL;
2014 }
2015 
2016 static void irq_domain_check_hierarchy(struct irq_domain *domain) { }
2017 static void irq_domain_free_one_irq(struct irq_domain *domain, unsigned int virq) { }
2018 
2019 #endif	/* CONFIG_IRQ_DOMAIN_HIERARCHY */
2020 
2021 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
2022 #include "internals.h"
2023 
2024 static struct dentry *domain_dir;
2025 
2026 static const struct irq_bit_descr irqdomain_flags[] = {
2027 	BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_HIERARCHY),
2028 	BIT_MASK_DESCR(IRQ_DOMAIN_NAME_ALLOCATED),
2029 	BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_IPI_PER_CPU),
2030 	BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_IPI_SINGLE),
2031 	BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_MSI),
2032 	BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_ISOLATED_MSI),
2033 	BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_NO_MAP),
2034 	BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_MSI_PARENT),
2035 	BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_MSI_DEVICE),
2036 	BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_NONCORE),
2037 };
2038 
2039 static void irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
2040 {
2041 	seq_printf(m, "%*sname:   %s\n", ind, "", d->name);
2042 	seq_printf(m, "%*ssize:   %u\n", ind + 1, "", d->revmap_size);
2043 	seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
2044 	seq_printf(m, "%*sflags:  0x%08x\n", ind +1 , "", d->flags);
2045 	irq_debug_show_bits(m, ind, d->flags, irqdomain_flags, ARRAY_SIZE(irqdomain_flags));
2046 	if (d->ops && d->ops->debug_show)
2047 		d->ops->debug_show(m, d, NULL, ind + 1);
2048 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
2049 	if (!d->parent)
2050 		return;
2051 	seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name);
2052 	irq_domain_debug_show_one(m, d->parent, ind + 4);
2053 #endif
2054 }
2055 
2056 static int irq_domain_debug_show(struct seq_file *m, void *p)
2057 {
2058 	struct irq_domain *d = m->private;
2059 
2060 	/* Default domain? Might be NULL */
2061 	if (!d) {
2062 		if (!irq_default_domain)
2063 			return 0;
2064 		d = irq_default_domain;
2065 	}
2066 	irq_domain_debug_show_one(m, d, 0);
2067 	return 0;
2068 }
2069 DEFINE_SHOW_ATTRIBUTE(irq_domain_debug);
2070 
2071 static void debugfs_add_domain_dir(struct irq_domain *d)
2072 {
2073 	if (!d->name || !domain_dir)
2074 		return;
2075 	debugfs_create_file(d->name, 0444, domain_dir, d,
2076 			    &irq_domain_debug_fops);
2077 }
2078 
2079 static void debugfs_remove_domain_dir(struct irq_domain *d)
2080 {
2081 	debugfs_lookup_and_remove(d->name, domain_dir);
2082 }
2083 
2084 void __init irq_domain_debugfs_init(struct dentry *root)
2085 {
2086 	struct irq_domain *d;
2087 
2088 	domain_dir = debugfs_create_dir("domains", root);
2089 
2090 	debugfs_create_file("default", 0444, domain_dir, NULL,
2091 			    &irq_domain_debug_fops);
2092 	mutex_lock(&irq_domain_mutex);
2093 	list_for_each_entry(d, &irq_domain_list, link)
2094 		debugfs_add_domain_dir(d);
2095 	mutex_unlock(&irq_domain_mutex);
2096 }
2097 #endif
2098