xref: /linux/drivers/acpi/arm64/iort.c (revision 05bff3419adaa272713be4c07d287756a4b2c5f5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016, Semihalf
4  *	Author: Tomasz Nowicki <tn@semihalf.com>
5  *
6  * This file implements early detection/parsing of I/O mapping
7  * reported to OS through firmware via I/O Remapping Table (IORT)
8  * IORT document number: ARM DEN 0049A
9  */
10 
11 #define pr_fmt(fmt)	"ACPI: IORT: " fmt
12 
13 #include <linux/acpi_iort.h>
14 #include <linux/bitfield.h>
15 #include <linux/iommu.h>
16 #include <linux/kernel.h>
17 #include <linux/list.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 #include <linux/dma-map-ops.h>
22 #include "init.h"
23 
24 #define IORT_TYPE_MASK(type)	(1 << (type))
25 #define IORT_MSI_TYPE		(1 << ACPI_IORT_NODE_ITS_GROUP)
26 #define IORT_IOMMU_TYPE		((1 << ACPI_IORT_NODE_SMMU) |	\
27 				(1 << ACPI_IORT_NODE_SMMU_V3))
28 
29 struct iort_its_msi_chip {
30 	struct list_head	list;
31 	struct fwnode_handle	*fw_node;
32 	phys_addr_t		base_addr;
33 	u32			translation_id;
34 };
35 
36 struct iort_fwnode {
37 	struct list_head list;
38 	struct acpi_iort_node *iort_node;
39 	struct fwnode_handle *fwnode;
40 };
41 static LIST_HEAD(iort_fwnode_list);
42 static DEFINE_SPINLOCK(iort_fwnode_lock);
43 
44 /**
45  * iort_set_fwnode() - Create iort_fwnode and use it to register
46  *		       iommu data in the iort_fwnode_list
47  *
48  * @iort_node: IORT table node associated with the IOMMU
49  * @fwnode: fwnode associated with the IORT node
50  *
51  * Returns: 0 on success
52  *          <0 on failure
53  */
54 static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
55 				  struct fwnode_handle *fwnode)
56 {
57 	struct iort_fwnode *np;
58 
59 	np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
60 
61 	if (WARN_ON(!np))
62 		return -ENOMEM;
63 
64 	INIT_LIST_HEAD(&np->list);
65 	np->iort_node = iort_node;
66 	np->fwnode = fwnode;
67 
68 	spin_lock(&iort_fwnode_lock);
69 	list_add_tail(&np->list, &iort_fwnode_list);
70 	spin_unlock(&iort_fwnode_lock);
71 
72 	return 0;
73 }
74 
75 /**
76  * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
77  *
78  * @node: IORT table node to be looked-up
79  *
80  * Returns: fwnode_handle pointer on success, NULL on failure
81  */
82 static inline struct fwnode_handle *iort_get_fwnode(
83 			struct acpi_iort_node *node)
84 {
85 	struct iort_fwnode *curr;
86 	struct fwnode_handle *fwnode = NULL;
87 
88 	spin_lock(&iort_fwnode_lock);
89 	list_for_each_entry(curr, &iort_fwnode_list, list) {
90 		if (curr->iort_node == node) {
91 			fwnode = curr->fwnode;
92 			break;
93 		}
94 	}
95 	spin_unlock(&iort_fwnode_lock);
96 
97 	return fwnode;
98 }
99 
100 /**
101  * iort_delete_fwnode() - Delete fwnode associated with an IORT node
102  *
103  * @node: IORT table node associated with fwnode to delete
104  */
105 static inline void iort_delete_fwnode(struct acpi_iort_node *node)
106 {
107 	struct iort_fwnode *curr, *tmp;
108 
109 	spin_lock(&iort_fwnode_lock);
110 	list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
111 		if (curr->iort_node == node) {
112 			list_del(&curr->list);
113 			kfree(curr);
114 			break;
115 		}
116 	}
117 	spin_unlock(&iort_fwnode_lock);
118 }
119 
120 /**
121  * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
122  *
123  * @fwnode: fwnode associated with device to be looked-up
124  *
125  * Returns: iort_node pointer on success, NULL on failure
126  */
127 static inline struct acpi_iort_node *iort_get_iort_node(
128 			struct fwnode_handle *fwnode)
129 {
130 	struct iort_fwnode *curr;
131 	struct acpi_iort_node *iort_node = NULL;
132 
133 	spin_lock(&iort_fwnode_lock);
134 	list_for_each_entry(curr, &iort_fwnode_list, list) {
135 		if (curr->fwnode == fwnode) {
136 			iort_node = curr->iort_node;
137 			break;
138 		}
139 	}
140 	spin_unlock(&iort_fwnode_lock);
141 
142 	return iort_node;
143 }
144 
145 typedef acpi_status (*iort_find_node_callback)
146 	(struct acpi_iort_node *node, void *context);
147 
148 /* Root pointer to the mapped IORT table */
149 static struct acpi_table_header *iort_table;
150 
151 static LIST_HEAD(iort_msi_chip_list);
152 static DEFINE_SPINLOCK(iort_msi_chip_lock);
153 
154 /**
155  * iort_register_domain_token() - register domain token along with related
156  * ITS ID and base address to the list from where we can get it back later on.
157  * @trans_id: ITS ID.
158  * @base: ITS base address.
159  * @fw_node: Domain token.
160  *
161  * Returns: 0 on success, -ENOMEM if no memory when allocating list element
162  */
163 int iort_register_domain_token(int trans_id, phys_addr_t base,
164 			       struct fwnode_handle *fw_node)
165 {
166 	struct iort_its_msi_chip *its_msi_chip;
167 
168 	its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
169 	if (!its_msi_chip)
170 		return -ENOMEM;
171 
172 	its_msi_chip->fw_node = fw_node;
173 	its_msi_chip->translation_id = trans_id;
174 	its_msi_chip->base_addr = base;
175 
176 	spin_lock(&iort_msi_chip_lock);
177 	list_add(&its_msi_chip->list, &iort_msi_chip_list);
178 	spin_unlock(&iort_msi_chip_lock);
179 
180 	return 0;
181 }
182 
183 /**
184  * iort_deregister_domain_token() - Deregister domain token based on ITS ID
185  * @trans_id: ITS ID.
186  *
187  * Returns: none.
188  */
189 void iort_deregister_domain_token(int trans_id)
190 {
191 	struct iort_its_msi_chip *its_msi_chip, *t;
192 
193 	spin_lock(&iort_msi_chip_lock);
194 	list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
195 		if (its_msi_chip->translation_id == trans_id) {
196 			list_del(&its_msi_chip->list);
197 			kfree(its_msi_chip);
198 			break;
199 		}
200 	}
201 	spin_unlock(&iort_msi_chip_lock);
202 }
203 
204 /**
205  * iort_find_domain_token() - Find domain token based on given ITS ID
206  * @trans_id: ITS ID.
207  *
208  * Returns: domain token when find on the list, NULL otherwise
209  */
210 struct fwnode_handle *iort_find_domain_token(int trans_id)
211 {
212 	struct fwnode_handle *fw_node = NULL;
213 	struct iort_its_msi_chip *its_msi_chip;
214 
215 	spin_lock(&iort_msi_chip_lock);
216 	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
217 		if (its_msi_chip->translation_id == trans_id) {
218 			fw_node = its_msi_chip->fw_node;
219 			break;
220 		}
221 	}
222 	spin_unlock(&iort_msi_chip_lock);
223 
224 	return fw_node;
225 }
226 
227 static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
228 					     iort_find_node_callback callback,
229 					     void *context)
230 {
231 	struct acpi_iort_node *iort_node, *iort_end;
232 	struct acpi_table_iort *iort;
233 	int i;
234 
235 	if (!iort_table)
236 		return NULL;
237 
238 	/* Get the first IORT node */
239 	iort = (struct acpi_table_iort *)iort_table;
240 	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
241 				 iort->node_offset);
242 	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
243 				iort_table->length);
244 
245 	for (i = 0; i < iort->node_count; i++) {
246 		if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
247 			       "IORT node pointer overflows, bad table!\n"))
248 			return NULL;
249 
250 		if (iort_node->type == type &&
251 		    ACPI_SUCCESS(callback(iort_node, context)))
252 			return iort_node;
253 
254 		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
255 					 iort_node->length);
256 	}
257 
258 	return NULL;
259 }
260 
261 static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
262 					    void *context)
263 {
264 	struct device *dev = context;
265 	acpi_status status = AE_NOT_FOUND;
266 
267 	if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
268 	    node->type == ACPI_IORT_NODE_IWB) {
269 		struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
270 		struct acpi_iort_named_component *ncomp;
271 		struct acpi_iort_iwb *iwb;
272 		struct device *cdev = dev;
273 		struct acpi_device *adev;
274 		const char *device_name;
275 
276 		/*
277 		 * Walk the device tree to find a device with an
278 		 * ACPI companion; there is no point in scanning
279 		 * IORT for a device matching a named component or IWB if
280 		 * the device does not have an ACPI companion to
281 		 * start with.
282 		 */
283 		do {
284 			adev = ACPI_COMPANION(cdev);
285 			if (adev)
286 				break;
287 
288 			cdev = cdev->parent;
289 		} while (cdev);
290 
291 		if (!adev)
292 			goto out;
293 
294 		status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
295 		if (ACPI_FAILURE(status)) {
296 			dev_warn(cdev, "Can't get device full path name\n");
297 			goto out;
298 		}
299 
300 		if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
301 			ncomp = (struct acpi_iort_named_component *)node->node_data;
302 			device_name = ncomp->device_name;
303 		} else {
304 			iwb = (struct acpi_iort_iwb *)node->node_data;
305 			device_name = iwb->device_name;
306 		}
307 		status = !strcmp(device_name, buf.pointer) ?  AE_OK : AE_NOT_FOUND;
308 		acpi_os_free(buf.pointer);
309 	} else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
310 		struct acpi_iort_root_complex *pci_rc;
311 		struct pci_bus *bus;
312 
313 		bus = to_pci_bus(dev);
314 		pci_rc = (struct acpi_iort_root_complex *)node->node_data;
315 
316 		/*
317 		 * It is assumed that PCI segment numbers maps one-to-one
318 		 * with root complexes. Each segment number can represent only
319 		 * one root complex.
320 		 */
321 		status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
322 							AE_OK : AE_NOT_FOUND;
323 	}
324 out:
325 	return status;
326 }
327 
328 static acpi_status iort_match_iwb_callback(struct acpi_iort_node *node, void *context)
329 {
330 	struct acpi_iort_iwb *iwb;
331 	u32 *id = context;
332 
333 	if (node->type != ACPI_IORT_NODE_IWB)
334 		return AE_NOT_FOUND;
335 
336 	iwb = (struct acpi_iort_iwb *)node->node_data;
337 	if (iwb->iwb_index != *id)
338 		return AE_NOT_FOUND;
339 
340 	return AE_OK;
341 }
342 
343 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
344 		       u32 *rid_out, bool check_overlap)
345 {
346 	/* Single mapping does not care for input id */
347 	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
348 		if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
349 		    type == ACPI_IORT_NODE_IWB		   ||
350 		    type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
351 			*rid_out = map->output_base;
352 			return 0;
353 		}
354 
355 		pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
356 			map, type);
357 		return -ENXIO;
358 	}
359 
360 	if (rid_in < map->input_base ||
361 	    (rid_in > map->input_base + map->id_count))
362 		return -ENXIO;
363 
364 	if (check_overlap) {
365 		/*
366 		 * We already found a mapping for this input ID at the end of
367 		 * another region. If it coincides with the start of this
368 		 * region, we assume the prior match was due to the off-by-1
369 		 * issue mentioned below, and allow it to be superseded.
370 		 * Otherwise, things are *really* broken, and we just disregard
371 		 * duplicate matches entirely to retain compatibility.
372 		 */
373 		pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n",
374 		       map, rid_in);
375 		if (rid_in != map->input_base)
376 			return -ENXIO;
377 
378 		pr_err(FW_BUG "applying workaround.\n");
379 	}
380 
381 	*rid_out = map->output_base + (rid_in - map->input_base);
382 
383 	/*
384 	 * Due to confusion regarding the meaning of the id_count field (which
385 	 * carries the number of IDs *minus 1*), we may have to disregard this
386 	 * match if it is at the end of the range, and overlaps with the start
387 	 * of another one.
388 	 */
389 	if (map->id_count > 0 && rid_in == map->input_base + map->id_count)
390 		return -EAGAIN;
391 	return 0;
392 }
393 
394 static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
395 					       u32 *id_out, int index)
396 {
397 	struct acpi_iort_node *parent;
398 	struct acpi_iort_id_mapping *map;
399 
400 	if (!node->mapping_offset || !node->mapping_count ||
401 				     index >= node->mapping_count)
402 		return NULL;
403 
404 	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
405 			   node->mapping_offset + index * sizeof(*map));
406 
407 	/* Firmware bug! */
408 	if (!map->output_reference) {
409 		pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
410 		       node, node->type);
411 		return NULL;
412 	}
413 
414 	parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
415 			       map->output_reference);
416 
417 	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
418 		if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
419 		    node->type == ACPI_IORT_NODE_IWB ||
420 		    node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
421 		    node->type == ACPI_IORT_NODE_SMMU_V3 ||
422 		    node->type == ACPI_IORT_NODE_PMCG) {
423 			*id_out = map->output_base;
424 			return parent;
425 		}
426 	}
427 
428 	return NULL;
429 }
430 
431 #ifndef ACPI_IORT_SMMU_V3_DEVICEID_VALID
432 #define ACPI_IORT_SMMU_V3_DEVICEID_VALID (1 << 4)
433 #endif
434 
435 static int iort_get_id_mapping_index(struct acpi_iort_node *node)
436 {
437 	struct acpi_iort_smmu_v3 *smmu;
438 	struct acpi_iort_pmcg *pmcg;
439 
440 	switch (node->type) {
441 	case ACPI_IORT_NODE_SMMU_V3:
442 		/*
443 		 * SMMUv3 dev ID mapping index was introduced in revision 1
444 		 * table, not available in revision 0
445 		 */
446 		if (node->revision < 1)
447 			return -EINVAL;
448 
449 		smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
450 		/*
451 		 * Until IORT E.e (node rev. 5), the ID mapping index was
452 		 * defined to be valid unless all interrupts are GSIV-based.
453 		 */
454 		if (node->revision < 5) {
455 			if (smmu->event_gsiv && smmu->pri_gsiv &&
456 			    smmu->gerr_gsiv && smmu->sync_gsiv)
457 				return -EINVAL;
458 		} else if (!(smmu->flags & ACPI_IORT_SMMU_V3_DEVICEID_VALID)) {
459 			return -EINVAL;
460 		}
461 
462 		if (smmu->id_mapping_index >= node->mapping_count) {
463 			pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
464 			       node, node->type);
465 			return -EINVAL;
466 		}
467 
468 		return smmu->id_mapping_index;
469 	case ACPI_IORT_NODE_PMCG:
470 		pmcg = (struct acpi_iort_pmcg *)node->node_data;
471 		if (pmcg->overflow_gsiv || node->mapping_count == 0)
472 			return -EINVAL;
473 
474 		return 0;
475 	default:
476 		return -EINVAL;
477 	}
478 }
479 
480 static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
481 					       u32 id_in, u32 *id_out,
482 					       u8 type_mask)
483 {
484 	u32 id = id_in;
485 
486 	/* Parse the ID mapping tree to find specified node type */
487 	while (node) {
488 		struct acpi_iort_id_mapping *map;
489 		int i, index, rc = 0;
490 		u32 out_ref = 0, map_id = id;
491 
492 		if (IORT_TYPE_MASK(node->type) & type_mask) {
493 			if (id_out)
494 				*id_out = id;
495 			return node;
496 		}
497 
498 		if (!node->mapping_offset || !node->mapping_count)
499 			goto fail_map;
500 
501 		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
502 				   node->mapping_offset);
503 
504 		/* Firmware bug! */
505 		if (!map->output_reference) {
506 			pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
507 			       node, node->type);
508 			goto fail_map;
509 		}
510 
511 		/*
512 		 * Get the special ID mapping index (if any) and skip its
513 		 * associated ID map to prevent erroneous multi-stage
514 		 * IORT ID translations.
515 		 */
516 		index = iort_get_id_mapping_index(node);
517 
518 		/* Do the ID translation */
519 		for (i = 0; i < node->mapping_count; i++, map++) {
520 			/* if it is special mapping index, skip it */
521 			if (i == index)
522 				continue;
523 
524 			rc = iort_id_map(map, node->type, map_id, &id, out_ref);
525 			if (!rc)
526 				break;
527 			if (rc == -EAGAIN)
528 				out_ref = map->output_reference;
529 		}
530 
531 		if (i == node->mapping_count && !out_ref)
532 			goto fail_map;
533 
534 		node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
535 				    rc ? out_ref : map->output_reference);
536 	}
537 
538 fail_map:
539 	/* Map input ID to output ID unchanged on mapping failure */
540 	if (id_out)
541 		*id_out = id_in;
542 
543 	return NULL;
544 }
545 
546 static struct acpi_iort_node *iort_node_map_platform_id(
547 		struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
548 		int index)
549 {
550 	struct acpi_iort_node *parent;
551 	u32 id;
552 
553 	/* step 1: retrieve the initial dev id */
554 	parent = iort_node_get_id(node, &id, index);
555 	if (!parent)
556 		return NULL;
557 
558 	/*
559 	 * optional step 2: map the initial dev id if its parent is not
560 	 * the target type we want, map it again for the use cases such
561 	 * as NC (named component) -> SMMU -> ITS. If the type is matched,
562 	 * return the initial dev id and its parent pointer directly.
563 	 */
564 	if (!(IORT_TYPE_MASK(parent->type) & type_mask))
565 		parent = iort_node_map_id(parent, id, id_out, type_mask);
566 	else
567 		if (id_out)
568 			*id_out = id;
569 
570 	return parent;
571 }
572 
573 static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
574 {
575 	struct pci_bus *pbus;
576 
577 	if (!dev_is_pci(dev)) {
578 		struct acpi_iort_node *node;
579 		/*
580 		 * scan iort_fwnode_list to see if it's an iort platform
581 		 * device (such as SMMU, PMCG),its iort node already cached
582 		 * and associated with fwnode when iort platform devices
583 		 * were initialized.
584 		 */
585 		node = iort_get_iort_node(dev->fwnode);
586 		if (node)
587 			return node;
588 		/*
589 		 * if not, then it should be a platform device defined in
590 		 * DSDT/SSDT (with Named Component node in IORT) or an
591 		 * IWB device in the DSDT/SSDT.
592 		 */
593 		node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
594 				      iort_match_node_callback, dev);
595 		if (node)
596 			return node;
597 		return iort_scan_node(ACPI_IORT_NODE_IWB,
598 				      iort_match_node_callback, dev);
599 	}
600 
601 	pbus = to_pci_dev(dev)->bus;
602 
603 	return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
604 			      iort_match_node_callback, &pbus->dev);
605 }
606 
607 /**
608  * iort_msi_map_id() - Map a MSI input ID for a device
609  * @dev: The device for which the mapping is to be done.
610  * @input_id: The device input ID.
611  *
612  * Returns: mapped MSI ID on success, input ID otherwise
613  */
614 u32 iort_msi_map_id(struct device *dev, u32 input_id)
615 {
616 	struct acpi_iort_node *node;
617 	u32 dev_id;
618 
619 	node = iort_find_dev_node(dev);
620 	if (!node)
621 		return input_id;
622 
623 	iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
624 	return dev_id;
625 }
626 
627 /**
628  * iort_msi_xlate() - Map a MSI input ID for a device
629  * @dev: The device for which the mapping is to be done.
630  * @input_id: The device input ID.
631  * @fwnode: Pointer to store the fwnode.
632  *
633  * Returns: mapped MSI ID on success, input ID otherwise
634  *	    On success, the fwnode pointer is initialized to the MSI
635  *	    controller fwnode handle.
636  */
637 u32 iort_msi_xlate(struct device *dev, u32 input_id, struct fwnode_handle **fwnode)
638 {
639 	struct acpi_iort_its_group *its;
640 	struct acpi_iort_node *node;
641 	u32 dev_id;
642 
643 	node = iort_find_dev_node(dev);
644 	if (!node)
645 		return input_id;
646 
647 	node = iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
648 	if (!node)
649 		return input_id;
650 
651 	/* Move to ITS specific data */
652 	its = (struct acpi_iort_its_group *)node->node_data;
653 
654 	*fwnode = iort_find_domain_token(its->identifiers[0]);
655 
656 	return dev_id;
657 }
658 
659 int iort_its_translate_pa(struct fwnode_handle *node, phys_addr_t *base)
660 {
661 	struct iort_its_msi_chip *its_msi_chip;
662 	int ret = -ENODEV;
663 
664 	spin_lock(&iort_msi_chip_lock);
665 	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
666 		if (its_msi_chip->fw_node == node) {
667 			*base = its_msi_chip->base_addr;
668 			ret = 0;
669 			break;
670 		}
671 	}
672 	spin_unlock(&iort_msi_chip_lock);
673 
674 	return ret;
675 }
676 
677 static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
678 {
679 	struct fwnode_handle *fwnode = iort_find_domain_token(its_id);
680 
681 	if (!fwnode)
682 		return -ENODEV;
683 
684 	return iort_its_translate_pa(fwnode, base);
685 }
686 
687 /**
688  * iort_pmsi_get_msi_info() - Get the device id and translate frame PA for a device
689  * @dev: The device for which the mapping is to be done.
690  * @dev_id: The device ID found.
691  * @pa: optional pointer to store translate frame address.
692  *
693  * Returns: 0 for successful devid and pa retrieval, -ENODEV on error
694  */
695 int iort_pmsi_get_msi_info(struct device *dev, u32 *dev_id, phys_addr_t *pa)
696 {
697 	struct acpi_iort_node *node, *parent = NULL;
698 	struct acpi_iort_its_group *its;
699 	int i, index;
700 
701 	node = iort_find_dev_node(dev);
702 	if (!node)
703 		return -ENODEV;
704 
705 	index = iort_get_id_mapping_index(node);
706 	/* if there is a valid index, go get the dev_id directly */
707 	if (index >= 0) {
708 		parent = iort_node_get_id(node, dev_id, index);
709 	} else {
710 		for (i = 0; i < node->mapping_count; i++) {
711 			parent = iort_node_map_platform_id(node, dev_id,
712 						      IORT_MSI_TYPE, i);
713 			if (parent)
714 				break;
715 		}
716 	}
717 
718 	if (!parent)
719 		return -ENODEV;
720 
721 	if (pa) {
722 		int ret;
723 
724 		its = (struct acpi_iort_its_group *)node->node_data;
725 		ret = iort_find_its_base(its->identifiers[0], pa);
726 		if (ret)
727 			return ret;
728 	}
729 
730 	return 0;
731 }
732 
733 /**
734  * iort_dev_find_its_id() - Find the ITS identifier for a device
735  * @dev: The device.
736  * @id: Device's ID
737  * @idx: Index of the ITS identifier list.
738  * @its_id: ITS identifier.
739  *
740  * Returns: 0 on success, appropriate error value otherwise
741  */
742 static int iort_dev_find_its_id(struct device *dev, u32 id,
743 				unsigned int idx, int *its_id)
744 {
745 	struct acpi_iort_its_group *its;
746 	struct acpi_iort_node *node;
747 
748 	node = iort_find_dev_node(dev);
749 	if (!node)
750 		return -ENXIO;
751 
752 	node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE);
753 	if (!node)
754 		return -ENXIO;
755 
756 	/* Move to ITS specific data */
757 	its = (struct acpi_iort_its_group *)node->node_data;
758 	if (idx >= its->its_count) {
759 		dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
760 			idx, its->its_count);
761 		return -ENXIO;
762 	}
763 
764 	*its_id = its->identifiers[idx];
765 	return 0;
766 }
767 
768 /**
769  * iort_get_device_domain() - Find MSI domain related to a device
770  * @dev: The device.
771  * @id: Requester ID for the device.
772  * @bus_token: irq domain bus token.
773  *
774  * Returns: the MSI domain for this device, NULL otherwise
775  */
776 struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
777 					  enum irq_domain_bus_token bus_token)
778 {
779 	struct fwnode_handle *handle;
780 	int its_id;
781 
782 	if (iort_dev_find_its_id(dev, id, 0, &its_id))
783 		return NULL;
784 
785 	handle = iort_find_domain_token(its_id);
786 	if (!handle)
787 		return NULL;
788 
789 	return irq_find_matching_fwnode(handle, bus_token);
790 }
791 
792 struct fwnode_handle *iort_iwb_handle(u32 iwb_id)
793 {
794 	struct fwnode_handle *fwnode;
795 	struct acpi_iort_node *node;
796 	struct acpi_device *device;
797 	struct acpi_iort_iwb *iwb;
798 	acpi_status status;
799 	acpi_handle handle;
800 
801 	/* find its associated IWB node */
802 	node = iort_scan_node(ACPI_IORT_NODE_IWB, iort_match_iwb_callback, &iwb_id);
803 	if (!node)
804 		return NULL;
805 
806 	iwb = (struct acpi_iort_iwb *)node->node_data;
807 	status = acpi_get_handle(NULL, iwb->device_name, &handle);
808 	if (ACPI_FAILURE(status))
809 		return NULL;
810 
811 	device = acpi_get_acpi_dev(handle);
812 	if (!device)
813 		return NULL;
814 
815 	fwnode = acpi_fwnode_handle(device);
816 	acpi_put_acpi_dev(device);
817 
818 	return fwnode;
819 }
820 
821 static void iort_set_device_domain(struct device *dev,
822 				   struct acpi_iort_node *node)
823 {
824 	struct acpi_iort_its_group *its;
825 	struct acpi_iort_node *msi_parent;
826 	struct acpi_iort_id_mapping *map;
827 	struct fwnode_handle *iort_fwnode;
828 	struct irq_domain *domain;
829 	int index;
830 
831 	index = iort_get_id_mapping_index(node);
832 	if (index < 0)
833 		return;
834 
835 	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
836 			   node->mapping_offset + index * sizeof(*map));
837 
838 	/* Firmware bug! */
839 	if (!map->output_reference ||
840 	    !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
841 		pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
842 		       node, node->type);
843 		return;
844 	}
845 
846 	msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
847 				  map->output_reference);
848 
849 	if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
850 		return;
851 
852 	/* Move to ITS specific data */
853 	its = (struct acpi_iort_its_group *)msi_parent->node_data;
854 
855 	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
856 	if (!iort_fwnode)
857 		return;
858 
859 	domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
860 	if (domain)
861 		dev_set_msi_domain(dev, domain);
862 }
863 
864 /**
865  * iort_get_platform_device_domain() - Find MSI domain related to a
866  * platform device
867  * @dev: the dev pointer associated with the platform device
868  *
869  * Returns: the MSI domain for this device, NULL otherwise
870  */
871 static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
872 {
873 	struct acpi_iort_node *node, *msi_parent = NULL;
874 	struct fwnode_handle *iort_fwnode;
875 	struct acpi_iort_its_group *its;
876 	int i;
877 
878 	/* find its associated iort node */
879 	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
880 			      iort_match_node_callback, dev);
881 	if (!node) {
882 		/* find its associated iort node */
883 		node = iort_scan_node(ACPI_IORT_NODE_IWB,
884 				      iort_match_node_callback, dev);
885 
886 		if (!node)
887 			return NULL;
888 	}
889 
890 	/* then find its msi parent node */
891 	for (i = 0; i < node->mapping_count; i++) {
892 		msi_parent = iort_node_map_platform_id(node, NULL,
893 						       IORT_MSI_TYPE, i);
894 		if (msi_parent)
895 			break;
896 	}
897 
898 	if (!msi_parent)
899 		return NULL;
900 
901 	/* Move to ITS specific data */
902 	its = (struct acpi_iort_its_group *)msi_parent->node_data;
903 
904 	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
905 	if (!iort_fwnode)
906 		return NULL;
907 
908 	return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
909 }
910 
911 void acpi_configure_pmsi_domain(struct device *dev)
912 {
913 	struct irq_domain *msi_domain;
914 
915 	msi_domain = iort_get_platform_device_domain(dev);
916 	if (msi_domain)
917 		dev_set_msi_domain(dev, msi_domain);
918 }
919 
920 #ifdef CONFIG_IOMMU_API
921 static void iort_rmr_free(struct device *dev,
922 			  struct iommu_resv_region *region)
923 {
924 	struct iommu_iort_rmr_data *rmr_data;
925 
926 	rmr_data = container_of(region, struct iommu_iort_rmr_data, rr);
927 	kfree(rmr_data->sids);
928 	kfree(rmr_data);
929 }
930 
931 static struct iommu_iort_rmr_data *iort_rmr_alloc(
932 					struct acpi_iort_rmr_desc *rmr_desc,
933 					int prot, enum iommu_resv_type type,
934 					u32 *sids, u32 num_sids)
935 {
936 	struct iommu_iort_rmr_data *rmr_data;
937 	struct iommu_resv_region *region;
938 	u32 *sids_copy;
939 	u64 addr = rmr_desc->base_address, size = rmr_desc->length;
940 
941 	rmr_data = kmalloc(sizeof(*rmr_data), GFP_KERNEL);
942 	if (!rmr_data)
943 		return NULL;
944 
945 	/* Create a copy of SIDs array to associate with this rmr_data */
946 	sids_copy = kmemdup_array(sids, num_sids, sizeof(*sids), GFP_KERNEL);
947 	if (!sids_copy) {
948 		kfree(rmr_data);
949 		return NULL;
950 	}
951 	rmr_data->sids = sids_copy;
952 	rmr_data->num_sids = num_sids;
953 
954 	if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) {
955 		/* PAGE align base addr and size */
956 		addr &= PAGE_MASK;
957 		size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address));
958 
959 		pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n",
960 		       rmr_desc->base_address,
961 		       rmr_desc->base_address + rmr_desc->length - 1,
962 		       addr, addr + size - 1);
963 	}
964 
965 	region = &rmr_data->rr;
966 	INIT_LIST_HEAD(&region->list);
967 	region->start = addr;
968 	region->length = size;
969 	region->prot = prot;
970 	region->type = type;
971 	region->free = iort_rmr_free;
972 
973 	return rmr_data;
974 }
975 
976 static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc,
977 					u32 count)
978 {
979 	int i, j;
980 
981 	for (i = 0; i < count; i++) {
982 		u64 end, start = desc[i].base_address, length = desc[i].length;
983 
984 		if (!length) {
985 			pr_err(FW_BUG "RMR descriptor[0x%llx] with zero length, continue anyway\n",
986 			       start);
987 			continue;
988 		}
989 
990 		end = start + length - 1;
991 
992 		/* Check for address overlap */
993 		for (j = i + 1; j < count; j++) {
994 			u64 e_start = desc[j].base_address;
995 			u64 e_end = e_start + desc[j].length - 1;
996 
997 			if (start <= e_end && end >= e_start)
998 				pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n",
999 				       start, end);
1000 		}
1001 	}
1002 }
1003 
1004 /*
1005  * Please note, we will keep the already allocated RMR reserve
1006  * regions in case of a memory allocation failure.
1007  */
1008 static void iort_get_rmrs(struct acpi_iort_node *node,
1009 			  struct acpi_iort_node *smmu,
1010 			  u32 *sids, u32 num_sids,
1011 			  struct list_head *head)
1012 {
1013 	struct acpi_iort_rmr *rmr = (struct acpi_iort_rmr *)node->node_data;
1014 	struct acpi_iort_rmr_desc *rmr_desc;
1015 	int i;
1016 
1017 	rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, node,
1018 				rmr->rmr_offset);
1019 
1020 	iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count);
1021 
1022 	for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) {
1023 		struct iommu_iort_rmr_data *rmr_data;
1024 		enum iommu_resv_type type;
1025 		int prot = IOMMU_READ | IOMMU_WRITE;
1026 
1027 		if (rmr->flags & ACPI_IORT_RMR_REMAP_PERMITTED)
1028 			type = IOMMU_RESV_DIRECT_RELAXABLE;
1029 		else
1030 			type = IOMMU_RESV_DIRECT;
1031 
1032 		if (rmr->flags & ACPI_IORT_RMR_ACCESS_PRIVILEGE)
1033 			prot |= IOMMU_PRIV;
1034 
1035 		/* Attributes 0x00 - 0x03 represents device memory */
1036 		if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) <=
1037 				ACPI_IORT_RMR_ATTR_DEVICE_GRE)
1038 			prot |= IOMMU_MMIO;
1039 		else if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) ==
1040 				ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB)
1041 			prot |= IOMMU_CACHE;
1042 
1043 		rmr_data = iort_rmr_alloc(rmr_desc, prot, type,
1044 					  sids, num_sids);
1045 		if (!rmr_data)
1046 			return;
1047 
1048 		list_add_tail(&rmr_data->rr.list, head);
1049 	}
1050 }
1051 
1052 static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
1053 				u32 new_count)
1054 {
1055 	u32 *new_sids;
1056 	u32 total_count = count + new_count;
1057 	int i;
1058 
1059 	new_sids = krealloc_array(sids, count + new_count,
1060 				  sizeof(*new_sids), GFP_KERNEL);
1061 	if (!new_sids) {
1062 		kfree(sids);
1063 		return NULL;
1064 	}
1065 
1066 	for (i = count; i < total_count; i++)
1067 		new_sids[i] = id_start++;
1068 
1069 	return new_sids;
1070 }
1071 
1072 static bool iort_rmr_has_dev(struct device *dev, u32 id_start,
1073 			     u32 id_count)
1074 {
1075 	int i;
1076 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1077 
1078 	/*
1079 	 * Make sure the kernel has preserved the boot firmware PCIe
1080 	 * configuration. This is required to ensure that the RMR PCIe
1081 	 * StreamIDs are still valid (Refer: ARM DEN 0049E.d Section 3.1.1.5).
1082 	 */
1083 	if (dev_is_pci(dev)) {
1084 		struct pci_dev *pdev = to_pci_dev(dev);
1085 		struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
1086 
1087 		if (!host->preserve_config)
1088 			return false;
1089 	}
1090 
1091 	for (i = 0; i < fwspec->num_ids; i++) {
1092 		if (fwspec->ids[i] >= id_start &&
1093 		    fwspec->ids[i] <= id_start + id_count)
1094 			return true;
1095 	}
1096 
1097 	return false;
1098 }
1099 
1100 static void iort_node_get_rmr_info(struct acpi_iort_node *node,
1101 				   struct acpi_iort_node *iommu,
1102 				   struct device *dev, struct list_head *head)
1103 {
1104 	struct acpi_iort_node *smmu = NULL;
1105 	struct acpi_iort_rmr *rmr;
1106 	struct acpi_iort_id_mapping *map;
1107 	u32 *sids = NULL;
1108 	u32 num_sids = 0;
1109 	int i;
1110 
1111 	if (!node->mapping_offset || !node->mapping_count) {
1112 		pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n",
1113 		       node);
1114 		return;
1115 	}
1116 
1117 	rmr = (struct acpi_iort_rmr *)node->node_data;
1118 	if (!rmr->rmr_offset || !rmr->rmr_count)
1119 		return;
1120 
1121 	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
1122 			   node->mapping_offset);
1123 
1124 	/*
1125 	 * Go through the ID mappings and see if we have a match for SMMU
1126 	 * and dev(if !NULL). If found, get the sids for the Node.
1127 	 * Please note, id_count is equal to the number of IDs  in the
1128 	 * range minus one.
1129 	 */
1130 	for (i = 0; i < node->mapping_count; i++, map++) {
1131 		struct acpi_iort_node *parent;
1132 
1133 		parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
1134 				      map->output_reference);
1135 		if (parent != iommu)
1136 			continue;
1137 
1138 		/* If dev is valid, check RMR node corresponds to the dev SID */
1139 		if (dev && !iort_rmr_has_dev(dev, map->output_base,
1140 					     map->id_count))
1141 			continue;
1142 
1143 		/* Retrieve SIDs associated with the Node. */
1144 		sids = iort_rmr_alloc_sids(sids, num_sids, map->output_base,
1145 					   map->id_count + 1);
1146 		if (!sids)
1147 			return;
1148 
1149 		num_sids += map->id_count + 1;
1150 	}
1151 
1152 	if (!sids)
1153 		return;
1154 
1155 	iort_get_rmrs(node, smmu, sids, num_sids, head);
1156 	kfree(sids);
1157 }
1158 
1159 static void iort_find_rmrs(struct acpi_iort_node *iommu, struct device *dev,
1160 			   struct list_head *head)
1161 {
1162 	struct acpi_table_iort *iort;
1163 	struct acpi_iort_node *iort_node, *iort_end;
1164 	int i;
1165 
1166 	/* Only supports ARM DEN 0049E.d onwards */
1167 	if (iort_table->revision < 5)
1168 		return;
1169 
1170 	iort = (struct acpi_table_iort *)iort_table;
1171 
1172 	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1173 				 iort->node_offset);
1174 	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1175 				iort_table->length);
1176 
1177 	for (i = 0; i < iort->node_count; i++) {
1178 		if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
1179 			       "IORT node pointer overflows, bad table!\n"))
1180 			return;
1181 
1182 		if (iort_node->type == ACPI_IORT_NODE_RMR)
1183 			iort_node_get_rmr_info(iort_node, iommu, dev, head);
1184 
1185 		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1186 					 iort_node->length);
1187 	}
1188 }
1189 
1190 /*
1191  * Populate the RMR list associated with a given IOMMU and dev(if provided).
1192  * If dev is NULL, the function populates all the RMRs associated with the
1193  * given IOMMU.
1194  */
1195 static void iort_iommu_rmr_get_resv_regions(struct fwnode_handle *iommu_fwnode,
1196 					    struct device *dev,
1197 					    struct list_head *head)
1198 {
1199 	struct acpi_iort_node *iommu;
1200 
1201 	iommu = iort_get_iort_node(iommu_fwnode);
1202 	if (!iommu)
1203 		return;
1204 
1205 	iort_find_rmrs(iommu, dev, head);
1206 }
1207 
1208 static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
1209 {
1210 	struct acpi_iort_node *iommu;
1211 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1212 
1213 	iommu = iort_get_iort_node(fwspec->iommu_fwnode);
1214 
1215 	if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) {
1216 		struct acpi_iort_smmu_v3 *smmu;
1217 
1218 		smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data;
1219 		if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X)
1220 			return iommu;
1221 	}
1222 
1223 	return NULL;
1224 }
1225 
1226 /*
1227  * Retrieve platform specific HW MSI reserve regions.
1228  * The ITS interrupt translation spaces (ITS_base + SZ_64K, SZ_64K)
1229  * associated with the device are the HW MSI reserved regions.
1230  */
1231 static void iort_iommu_msi_get_resv_regions(struct device *dev,
1232 					    struct list_head *head)
1233 {
1234 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1235 	struct acpi_iort_its_group *its;
1236 	struct acpi_iort_node *iommu_node, *its_node = NULL;
1237 	int i;
1238 
1239 	iommu_node = iort_get_msi_resv_iommu(dev);
1240 	if (!iommu_node)
1241 		return;
1242 
1243 	/*
1244 	 * Current logic to reserve ITS regions relies on HW topologies
1245 	 * where a given PCI or named component maps its IDs to only one
1246 	 * ITS group; if a PCI or named component can map its IDs to
1247 	 * different ITS groups through IORT mappings this function has
1248 	 * to be reworked to ensure we reserve regions for all ITS groups
1249 	 * a given PCI or named component may map IDs to.
1250 	 */
1251 
1252 	for (i = 0; i < fwspec->num_ids; i++) {
1253 		its_node = iort_node_map_id(iommu_node,
1254 					fwspec->ids[i],
1255 					NULL, IORT_MSI_TYPE);
1256 		if (its_node)
1257 			break;
1258 	}
1259 
1260 	if (!its_node)
1261 		return;
1262 
1263 	/* Move to ITS specific data */
1264 	its = (struct acpi_iort_its_group *)its_node->node_data;
1265 
1266 	for (i = 0; i < its->its_count; i++) {
1267 		phys_addr_t base;
1268 
1269 		if (!iort_find_its_base(its->identifiers[i], &base)) {
1270 			int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1271 			struct iommu_resv_region *region;
1272 
1273 			region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
1274 							 prot, IOMMU_RESV_MSI,
1275 							 GFP_KERNEL);
1276 			if (region)
1277 				list_add_tail(&region->list, head);
1278 		}
1279 	}
1280 }
1281 
1282 /**
1283  * iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions.
1284  * @dev: Device from iommu_get_resv_regions()
1285  * @head: Reserved region list from iommu_get_resv_regions()
1286  */
1287 void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
1288 {
1289 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1290 
1291 	iort_iommu_msi_get_resv_regions(dev, head);
1292 	iort_iommu_rmr_get_resv_regions(fwspec->iommu_fwnode, dev, head);
1293 }
1294 
1295 /**
1296  * iort_get_rmr_sids - Retrieve IORT RMR node reserved regions with
1297  *                     associated StreamIDs information.
1298  * @iommu_fwnode: fwnode associated with IOMMU
1299  * @head: Resereved region list
1300  */
1301 void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
1302 		       struct list_head *head)
1303 {
1304 	iort_iommu_rmr_get_resv_regions(iommu_fwnode, NULL, head);
1305 }
1306 EXPORT_SYMBOL_GPL(iort_get_rmr_sids);
1307 
1308 /**
1309  * iort_put_rmr_sids - Free memory allocated for RMR reserved regions.
1310  * @iommu_fwnode: fwnode associated with IOMMU
1311  * @head: Resereved region list
1312  */
1313 void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
1314 		       struct list_head *head)
1315 {
1316 	struct iommu_resv_region *entry, *next;
1317 
1318 	list_for_each_entry_safe(entry, next, head, list)
1319 		entry->free(NULL, entry);
1320 }
1321 EXPORT_SYMBOL_GPL(iort_put_rmr_sids);
1322 
1323 static inline bool iort_iommu_driver_enabled(u8 type)
1324 {
1325 	switch (type) {
1326 	case ACPI_IORT_NODE_SMMU_V3:
1327 		return IS_ENABLED(CONFIG_ARM_SMMU_V3);
1328 	case ACPI_IORT_NODE_SMMU:
1329 		return IS_ENABLED(CONFIG_ARM_SMMU);
1330 	default:
1331 		pr_warn("IORT node type %u does not describe an SMMU\n", type);
1332 		return false;
1333 	}
1334 }
1335 
1336 static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
1337 {
1338 	struct acpi_iort_root_complex *pci_rc;
1339 
1340 	pci_rc = (struct acpi_iort_root_complex *)node->node_data;
1341 	return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
1342 }
1343 
1344 static bool iort_pci_rc_supports_canwbs(struct acpi_iort_node *node)
1345 {
1346 	struct acpi_iort_memory_access *memory_access;
1347 	struct acpi_iort_root_complex *pci_rc;
1348 
1349 	pci_rc = (struct acpi_iort_root_complex *)node->node_data;
1350 	memory_access =
1351 		(struct acpi_iort_memory_access *)&pci_rc->memory_properties;
1352 	return memory_access->memory_flags & ACPI_IORT_MF_CANWBS;
1353 }
1354 
1355 static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
1356 			    u32 streamid)
1357 {
1358 	struct fwnode_handle *iort_fwnode;
1359 
1360 	/* If there's no SMMU driver at all, give up now */
1361 	if (!node || !iort_iommu_driver_enabled(node->type))
1362 		return -ENODEV;
1363 
1364 	iort_fwnode = iort_get_fwnode(node);
1365 	if (!iort_fwnode)
1366 		return -ENODEV;
1367 
1368 	/*
1369 	 * If the SMMU drivers are enabled but not loaded/probed
1370 	 * yet, this will defer.
1371 	 */
1372 	return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode);
1373 }
1374 
1375 struct iort_pci_alias_info {
1376 	struct device *dev;
1377 	struct acpi_iort_node *node;
1378 };
1379 
1380 static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
1381 {
1382 	struct iort_pci_alias_info *info = data;
1383 	struct acpi_iort_node *parent;
1384 	u32 streamid;
1385 
1386 	parent = iort_node_map_id(info->node, alias, &streamid,
1387 				  IORT_IOMMU_TYPE);
1388 	return iort_iommu_xlate(info->dev, parent, streamid);
1389 }
1390 
1391 static void iort_named_component_init(struct device *dev,
1392 				      struct acpi_iort_node *node)
1393 {
1394 	struct property_entry props[3] = {};
1395 	struct acpi_iort_named_component *nc;
1396 
1397 	nc = (struct acpi_iort_named_component *)node->node_data;
1398 	props[0] = PROPERTY_ENTRY_U32("pasid-num-bits",
1399 				      FIELD_GET(ACPI_IORT_NC_PASID_BITS,
1400 						nc->node_flags));
1401 	if (nc->node_flags & ACPI_IORT_NC_STALL_SUPPORTED)
1402 		props[1] = PROPERTY_ENTRY_BOOL("dma-can-stall");
1403 
1404 	if (device_create_managed_software_node(dev, props, NULL))
1405 		dev_warn(dev, "Could not add device properties\n");
1406 }
1407 
1408 static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
1409 {
1410 	struct acpi_iort_node *parent;
1411 	int err = -ENODEV, i = 0;
1412 	u32 streamid = 0;
1413 
1414 	do {
1415 
1416 		parent = iort_node_map_platform_id(node, &streamid,
1417 						   IORT_IOMMU_TYPE,
1418 						   i++);
1419 
1420 		if (parent)
1421 			err = iort_iommu_xlate(dev, parent, streamid);
1422 	} while (parent && !err);
1423 
1424 	return err;
1425 }
1426 
1427 static int iort_nc_iommu_map_id(struct device *dev,
1428 				struct acpi_iort_node *node,
1429 				const u32 *in_id)
1430 {
1431 	struct acpi_iort_node *parent;
1432 	u32 streamid;
1433 
1434 	parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE);
1435 	if (parent)
1436 		return iort_iommu_xlate(dev, parent, streamid);
1437 
1438 	return -ENODEV;
1439 }
1440 
1441 
1442 /**
1443  * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
1444  *
1445  * @dev: device to configure
1446  * @id_in: optional input id const value pointer
1447  *
1448  * Returns: 0 on success, <0 on failure
1449  */
1450 int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
1451 {
1452 	struct acpi_iort_node *node;
1453 	int err = -ENODEV;
1454 
1455 	if (dev_is_pci(dev)) {
1456 		struct iommu_fwspec *fwspec;
1457 		struct pci_bus *bus = to_pci_dev(dev)->bus;
1458 		struct iort_pci_alias_info info = { .dev = dev };
1459 
1460 		node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1461 				      iort_match_node_callback, &bus->dev);
1462 		if (!node)
1463 			return -ENODEV;
1464 
1465 		info.node = node;
1466 		err = pci_for_each_dma_alias(to_pci_dev(dev),
1467 					     iort_pci_iommu_init, &info);
1468 
1469 		fwspec = dev_iommu_fwspec_get(dev);
1470 		if (fwspec && iort_pci_rc_supports_ats(node))
1471 			fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
1472 		if (fwspec && iort_pci_rc_supports_canwbs(node))
1473 			fwspec->flags |= IOMMU_FWSPEC_PCI_RC_CANWBS;
1474 	} else {
1475 		node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1476 				      iort_match_node_callback, dev);
1477 		if (!node)
1478 			return -ENODEV;
1479 
1480 		err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) :
1481 			      iort_nc_iommu_map(dev, node);
1482 
1483 		if (!err)
1484 			iort_named_component_init(dev, node);
1485 	}
1486 
1487 	return err;
1488 }
1489 
1490 #else
1491 void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
1492 { }
1493 int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
1494 { return -ENODEV; }
1495 #endif
1496 
1497 static int nc_dma_get_range(struct device *dev, u64 *limit)
1498 {
1499 	struct acpi_iort_node *node;
1500 	struct acpi_iort_named_component *ncomp;
1501 
1502 	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1503 			      iort_match_node_callback, dev);
1504 	if (!node)
1505 		return -ENODEV;
1506 
1507 	ncomp = (struct acpi_iort_named_component *)node->node_data;
1508 
1509 	if (!ncomp->memory_address_limit) {
1510 		pr_warn(FW_BUG "Named component missing memory address limit\n");
1511 		return -EINVAL;
1512 	}
1513 
1514 	*limit = ncomp->memory_address_limit >= 64 ? U64_MAX :
1515 			(1ULL << ncomp->memory_address_limit) - 1;
1516 
1517 	return 0;
1518 }
1519 
1520 static int rc_dma_get_range(struct device *dev, u64 *limit)
1521 {
1522 	struct acpi_iort_node *node;
1523 	struct acpi_iort_root_complex *rc;
1524 	struct pci_bus *pbus = to_pci_dev(dev)->bus;
1525 
1526 	node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1527 			      iort_match_node_callback, &pbus->dev);
1528 	if (!node || node->revision < 1)
1529 		return -ENODEV;
1530 
1531 	rc = (struct acpi_iort_root_complex *)node->node_data;
1532 
1533 	if (!rc->memory_address_limit) {
1534 		pr_warn(FW_BUG "Root complex missing memory address limit\n");
1535 		return -EINVAL;
1536 	}
1537 
1538 	*limit = rc->memory_address_limit >= 64 ? U64_MAX :
1539 			(1ULL << rc->memory_address_limit) - 1;
1540 
1541 	return 0;
1542 }
1543 
1544 /**
1545  * iort_dma_get_ranges() - Look up DMA addressing limit for the device
1546  * @dev: device to lookup
1547  * @limit: DMA limit result pointer
1548  *
1549  * Return: 0 on success, an error otherwise.
1550  */
1551 int iort_dma_get_ranges(struct device *dev, u64 *limit)
1552 {
1553 	if (dev_is_pci(dev))
1554 		return rc_dma_get_range(dev, limit);
1555 	else
1556 		return nc_dma_get_range(dev, limit);
1557 }
1558 
1559 static void __init acpi_iort_register_irq(int hwirq, const char *name,
1560 					  int trigger,
1561 					  struct resource *res)
1562 {
1563 	int irq = acpi_register_gsi(NULL, hwirq, trigger,
1564 				    ACPI_ACTIVE_HIGH);
1565 
1566 	if (irq <= 0) {
1567 		pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
1568 								      name);
1569 		return;
1570 	}
1571 
1572 	res->start = irq;
1573 	res->end = irq;
1574 	res->flags = IORESOURCE_IRQ;
1575 	res->name = name;
1576 }
1577 
1578 static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
1579 {
1580 	struct acpi_iort_smmu_v3 *smmu;
1581 	/* Always present mem resource */
1582 	int num_res = 1;
1583 
1584 	/* Retrieve SMMUv3 specific data */
1585 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1586 
1587 	if (smmu->event_gsiv)
1588 		num_res++;
1589 
1590 	if (smmu->pri_gsiv)
1591 		num_res++;
1592 
1593 	if (smmu->gerr_gsiv)
1594 		num_res++;
1595 
1596 	if (smmu->sync_gsiv)
1597 		num_res++;
1598 
1599 	return num_res;
1600 }
1601 
1602 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
1603 {
1604 	/*
1605 	 * Cavium ThunderX2 implementation doesn't not support unique
1606 	 * irq line. Use single irq line for all the SMMUv3 interrupts.
1607 	 */
1608 	if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1609 		return false;
1610 
1611 	/*
1612 	 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
1613 	 * SPI numbers here.
1614 	 */
1615 	return smmu->event_gsiv == smmu->pri_gsiv &&
1616 	       smmu->event_gsiv == smmu->gerr_gsiv &&
1617 	       smmu->event_gsiv == smmu->sync_gsiv;
1618 }
1619 
1620 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
1621 {
1622 	/*
1623 	 * Override the size, for Cavium ThunderX2 implementation
1624 	 * which doesn't support the page 1 SMMU register space.
1625 	 */
1626 	if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1627 		return SZ_64K;
1628 
1629 	return SZ_128K;
1630 }
1631 
1632 static void __init arm_smmu_v3_init_resources(struct resource *res,
1633 					      struct acpi_iort_node *node)
1634 {
1635 	struct acpi_iort_smmu_v3 *smmu;
1636 	int num_res = 0;
1637 
1638 	/* Retrieve SMMUv3 specific data */
1639 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1640 
1641 	res[num_res].start = smmu->base_address;
1642 	res[num_res].end = smmu->base_address +
1643 				arm_smmu_v3_resource_size(smmu) - 1;
1644 	res[num_res].flags = IORESOURCE_MEM;
1645 
1646 	num_res++;
1647 	if (arm_smmu_v3_is_combined_irq(smmu)) {
1648 		if (smmu->event_gsiv)
1649 			acpi_iort_register_irq(smmu->event_gsiv, "combined",
1650 					       ACPI_EDGE_SENSITIVE,
1651 					       &res[num_res++]);
1652 	} else {
1653 
1654 		if (smmu->event_gsiv)
1655 			acpi_iort_register_irq(smmu->event_gsiv, "eventq",
1656 					       ACPI_EDGE_SENSITIVE,
1657 					       &res[num_res++]);
1658 
1659 		if (smmu->pri_gsiv)
1660 			acpi_iort_register_irq(smmu->pri_gsiv, "priq",
1661 					       ACPI_EDGE_SENSITIVE,
1662 					       &res[num_res++]);
1663 
1664 		if (smmu->gerr_gsiv)
1665 			acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
1666 					       ACPI_EDGE_SENSITIVE,
1667 					       &res[num_res++]);
1668 
1669 		if (smmu->sync_gsiv)
1670 			acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
1671 					       ACPI_EDGE_SENSITIVE,
1672 					       &res[num_res++]);
1673 	}
1674 }
1675 
1676 static void __init arm_smmu_v3_dma_configure(struct device *dev,
1677 					     struct acpi_iort_node *node)
1678 {
1679 	struct acpi_iort_smmu_v3 *smmu;
1680 	enum dev_dma_attr attr;
1681 
1682 	/* Retrieve SMMUv3 specific data */
1683 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1684 
1685 	attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
1686 			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1687 
1688 	/* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
1689 	dev->dma_mask = &dev->coherent_dma_mask;
1690 
1691 	/* Configure DMA for the page table walker */
1692 	acpi_dma_configure(dev, attr);
1693 }
1694 
1695 #if defined(CONFIG_ACPI_NUMA)
1696 /*
1697  * set numa proximity domain for smmuv3 device
1698  */
1699 static int  __init arm_smmu_v3_set_proximity(struct device *dev,
1700 					      struct acpi_iort_node *node)
1701 {
1702 	struct acpi_iort_smmu_v3 *smmu;
1703 
1704 	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1705 	if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1706 		int dev_node = pxm_to_node(smmu->pxm);
1707 
1708 		if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
1709 			return -EINVAL;
1710 
1711 		set_dev_node(dev, dev_node);
1712 		pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1713 			smmu->base_address,
1714 			smmu->pxm);
1715 	}
1716 	return 0;
1717 }
1718 #else
1719 #define arm_smmu_v3_set_proximity NULL
1720 #endif
1721 
1722 static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
1723 {
1724 	struct acpi_iort_smmu *smmu;
1725 
1726 	/* Retrieve SMMU specific data */
1727 	smmu = (struct acpi_iort_smmu *)node->node_data;
1728 
1729 	/*
1730 	 * Only consider the global fault interrupt and ignore the
1731 	 * configuration access interrupt.
1732 	 *
1733 	 * MMIO address and global fault interrupt resources are always
1734 	 * present so add them to the context interrupt count as a static
1735 	 * value.
1736 	 */
1737 	return smmu->context_interrupt_count + 2;
1738 }
1739 
1740 static void __init arm_smmu_init_resources(struct resource *res,
1741 					   struct acpi_iort_node *node)
1742 {
1743 	struct acpi_iort_smmu *smmu;
1744 	int i, hw_irq, trigger, num_res = 0;
1745 	u64 *ctx_irq, *glb_irq;
1746 
1747 	/* Retrieve SMMU specific data */
1748 	smmu = (struct acpi_iort_smmu *)node->node_data;
1749 
1750 	res[num_res].start = smmu->base_address;
1751 	res[num_res].end = smmu->base_address + smmu->span - 1;
1752 	res[num_res].flags = IORESOURCE_MEM;
1753 	num_res++;
1754 
1755 	glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
1756 	/* Global IRQs */
1757 	hw_irq = IORT_IRQ_MASK(glb_irq[0]);
1758 	trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
1759 
1760 	acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
1761 				     &res[num_res++]);
1762 
1763 	/* Context IRQs */
1764 	ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
1765 	for (i = 0; i < smmu->context_interrupt_count; i++) {
1766 		hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
1767 		trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
1768 
1769 		acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
1770 				       &res[num_res++]);
1771 	}
1772 }
1773 
1774 static void __init arm_smmu_dma_configure(struct device *dev,
1775 					  struct acpi_iort_node *node)
1776 {
1777 	struct acpi_iort_smmu *smmu;
1778 	enum dev_dma_attr attr;
1779 
1780 	/* Retrieve SMMU specific data */
1781 	smmu = (struct acpi_iort_smmu *)node->node_data;
1782 
1783 	attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
1784 			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1785 
1786 	/* We expect the dma masks to be equivalent for SMMU set-ups */
1787 	dev->dma_mask = &dev->coherent_dma_mask;
1788 
1789 	/* Configure DMA for the page table walker */
1790 	acpi_dma_configure(dev, attr);
1791 }
1792 
1793 static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
1794 {
1795 	struct acpi_iort_pmcg *pmcg;
1796 
1797 	/* Retrieve PMCG specific data */
1798 	pmcg = (struct acpi_iort_pmcg *)node->node_data;
1799 
1800 	/*
1801 	 * There are always 2 memory resources.
1802 	 * If the overflow_gsiv is present then add that for a total of 3.
1803 	 */
1804 	return pmcg->overflow_gsiv ? 3 : 2;
1805 }
1806 
1807 static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
1808 						   struct acpi_iort_node *node)
1809 {
1810 	struct acpi_iort_pmcg *pmcg;
1811 
1812 	/* Retrieve PMCG specific data */
1813 	pmcg = (struct acpi_iort_pmcg *)node->node_data;
1814 
1815 	res[0].start = pmcg->page0_base_address;
1816 	res[0].end = pmcg->page0_base_address + SZ_4K - 1;
1817 	res[0].flags = IORESOURCE_MEM;
1818 	/*
1819 	 * The initial version in DEN0049C lacked a way to describe register
1820 	 * page 1, which makes it broken for most PMCG implementations; in
1821 	 * that case, just let the driver fail gracefully if it expects to
1822 	 * find a second memory resource.
1823 	 */
1824 	if (node->revision > 0) {
1825 		res[1].start = pmcg->page1_base_address;
1826 		res[1].end = pmcg->page1_base_address + SZ_4K - 1;
1827 		res[1].flags = IORESOURCE_MEM;
1828 	}
1829 
1830 	if (pmcg->overflow_gsiv)
1831 		acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
1832 				       ACPI_EDGE_SENSITIVE, &res[2]);
1833 }
1834 
1835 static struct acpi_platform_list pmcg_plat_info[] __initdata = {
1836 	/* HiSilicon Hip08 Platform */
1837 	{"HISI  ", "HIP08   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1838 	 "Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08},
1839 	/* HiSilicon Hip09 Platform */
1840 	{"HISI  ", "HIP09   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1841 	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
1842 	{"HISI  ", "HIP09A  ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1843 	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
1844 	/* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
1845 	{"HISI  ", "HIP10   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1846 	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
1847 	{"HISI  ", "HIP10C  ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1848 	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
1849 	{"HISI  ", "HIP11   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1850 	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
1851 	{ }
1852 };
1853 
1854 static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
1855 {
1856 	u32 model;
1857 	int idx;
1858 
1859 	idx = acpi_match_platform_list(pmcg_plat_info);
1860 	if (idx >= 0)
1861 		model = pmcg_plat_info[idx].data;
1862 	else
1863 		model = IORT_SMMU_V3_PMCG_GENERIC;
1864 
1865 	return platform_device_add_data(pdev, &model, sizeof(model));
1866 }
1867 
1868 struct iort_dev_config {
1869 	const char *name;
1870 	int (*dev_init)(struct acpi_iort_node *node);
1871 	void (*dev_dma_configure)(struct device *dev,
1872 				  struct acpi_iort_node *node);
1873 	int (*dev_count_resources)(struct acpi_iort_node *node);
1874 	void (*dev_init_resources)(struct resource *res,
1875 				     struct acpi_iort_node *node);
1876 	int (*dev_set_proximity)(struct device *dev,
1877 				    struct acpi_iort_node *node);
1878 	int (*dev_add_platdata)(struct platform_device *pdev);
1879 };
1880 
1881 static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
1882 	.name = "arm-smmu-v3",
1883 	.dev_dma_configure = arm_smmu_v3_dma_configure,
1884 	.dev_count_resources = arm_smmu_v3_count_resources,
1885 	.dev_init_resources = arm_smmu_v3_init_resources,
1886 	.dev_set_proximity = arm_smmu_v3_set_proximity,
1887 };
1888 
1889 static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
1890 	.name = "arm-smmu",
1891 	.dev_dma_configure = arm_smmu_dma_configure,
1892 	.dev_count_resources = arm_smmu_count_resources,
1893 	.dev_init_resources = arm_smmu_init_resources,
1894 };
1895 
1896 static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
1897 	.name = "arm-smmu-v3-pmcg",
1898 	.dev_count_resources = arm_smmu_v3_pmcg_count_resources,
1899 	.dev_init_resources = arm_smmu_v3_pmcg_init_resources,
1900 	.dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
1901 };
1902 
1903 static __init const struct iort_dev_config *iort_get_dev_cfg(
1904 			struct acpi_iort_node *node)
1905 {
1906 	switch (node->type) {
1907 	case ACPI_IORT_NODE_SMMU_V3:
1908 		return &iort_arm_smmu_v3_cfg;
1909 	case ACPI_IORT_NODE_SMMU:
1910 		return &iort_arm_smmu_cfg;
1911 	case ACPI_IORT_NODE_PMCG:
1912 		return &iort_arm_smmu_v3_pmcg_cfg;
1913 	default:
1914 		return NULL;
1915 	}
1916 }
1917 
1918 /**
1919  * iort_add_platform_device() - Allocate a platform device for IORT node
1920  * @node: Pointer to device ACPI IORT node
1921  * @ops: Pointer to IORT device config struct
1922  *
1923  * Returns: 0 on success, <0 failure
1924  */
1925 static int __init iort_add_platform_device(struct acpi_iort_node *node,
1926 					   const struct iort_dev_config *ops)
1927 {
1928 	struct fwnode_handle *fwnode;
1929 	struct platform_device *pdev;
1930 	struct resource *r;
1931 	int ret, count;
1932 
1933 	pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
1934 	if (!pdev)
1935 		return -ENOMEM;
1936 
1937 	if (ops->dev_set_proximity) {
1938 		ret = ops->dev_set_proximity(&pdev->dev, node);
1939 		if (ret)
1940 			goto dev_put;
1941 	}
1942 
1943 	count = ops->dev_count_resources(node);
1944 
1945 	r = kcalloc(count, sizeof(*r), GFP_KERNEL);
1946 	if (!r) {
1947 		ret = -ENOMEM;
1948 		goto dev_put;
1949 	}
1950 
1951 	ops->dev_init_resources(r, node);
1952 
1953 	ret = platform_device_add_resources(pdev, r, count);
1954 	/*
1955 	 * Resources are duplicated in platform_device_add_resources,
1956 	 * free their allocated memory
1957 	 */
1958 	kfree(r);
1959 
1960 	if (ret)
1961 		goto dev_put;
1962 
1963 	/*
1964 	 * Platform devices based on PMCG nodes uses platform_data to
1965 	 * pass the hardware model info to the driver. For others, add
1966 	 * a copy of IORT node pointer to platform_data to be used to
1967 	 * retrieve IORT data information.
1968 	 */
1969 	if (ops->dev_add_platdata)
1970 		ret = ops->dev_add_platdata(pdev);
1971 	else
1972 		ret = platform_device_add_data(pdev, &node, sizeof(node));
1973 
1974 	if (ret)
1975 		goto dev_put;
1976 
1977 	fwnode = iort_get_fwnode(node);
1978 
1979 	if (!fwnode) {
1980 		ret = -ENODEV;
1981 		goto dev_put;
1982 	}
1983 
1984 	pdev->dev.fwnode = fwnode;
1985 
1986 	if (ops->dev_dma_configure)
1987 		ops->dev_dma_configure(&pdev->dev, node);
1988 
1989 	iort_set_device_domain(&pdev->dev, node);
1990 
1991 	ret = platform_device_add(pdev);
1992 	if (ret)
1993 		goto dma_deconfigure;
1994 
1995 	return 0;
1996 
1997 dma_deconfigure:
1998 	arch_teardown_dma_ops(&pdev->dev);
1999 dev_put:
2000 	platform_device_put(pdev);
2001 
2002 	return ret;
2003 }
2004 
2005 #ifdef CONFIG_PCI
2006 static void __init iort_enable_acs(struct acpi_iort_node *iort_node)
2007 {
2008 	static bool acs_enabled __initdata;
2009 
2010 	if (acs_enabled)
2011 		return;
2012 
2013 	if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
2014 		struct acpi_iort_node *parent;
2015 		struct acpi_iort_id_mapping *map;
2016 		int i;
2017 
2018 		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
2019 				   iort_node->mapping_offset);
2020 
2021 		for (i = 0; i < iort_node->mapping_count; i++, map++) {
2022 			if (!map->output_reference)
2023 				continue;
2024 
2025 			parent = ACPI_ADD_PTR(struct acpi_iort_node,
2026 					iort_table,  map->output_reference);
2027 			/*
2028 			 * If we detect a RC->SMMU mapping, make sure
2029 			 * we enable ACS on the system.
2030 			 */
2031 			if ((parent->type == ACPI_IORT_NODE_SMMU) ||
2032 				(parent->type == ACPI_IORT_NODE_SMMU_V3)) {
2033 				pci_request_acs();
2034 				acs_enabled = true;
2035 				return;
2036 			}
2037 		}
2038 	}
2039 }
2040 #else
2041 static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { }
2042 #endif
2043 
2044 static void __init iort_init_platform_devices(void)
2045 {
2046 	struct acpi_iort_node *iort_node, *iort_end;
2047 	struct acpi_table_iort *iort;
2048 	struct fwnode_handle *fwnode;
2049 	int i, ret;
2050 	const struct iort_dev_config *ops;
2051 
2052 	/*
2053 	 * iort_table and iort both point to the start of IORT table, but
2054 	 * have different struct types
2055 	 */
2056 	iort = (struct acpi_table_iort *)iort_table;
2057 
2058 	/* Get the first IORT node */
2059 	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
2060 				 iort->node_offset);
2061 	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
2062 				iort_table->length);
2063 
2064 	for (i = 0; i < iort->node_count; i++) {
2065 		if (iort_node >= iort_end) {
2066 			pr_err("iort node pointer overflows, bad table\n");
2067 			return;
2068 		}
2069 
2070 		iort_enable_acs(iort_node);
2071 
2072 		ops = iort_get_dev_cfg(iort_node);
2073 		if (ops) {
2074 			fwnode = acpi_alloc_fwnode_static();
2075 			if (!fwnode)
2076 				return;
2077 
2078 			iort_set_fwnode(iort_node, fwnode);
2079 
2080 			ret = iort_add_platform_device(iort_node, ops);
2081 			if (ret) {
2082 				iort_delete_fwnode(iort_node);
2083 				acpi_free_fwnode_static(fwnode);
2084 				return;
2085 			}
2086 		}
2087 
2088 		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
2089 					 iort_node->length);
2090 	}
2091 }
2092 
2093 void __init acpi_iort_init(void)
2094 {
2095 	acpi_status status;
2096 
2097 	/* iort_table will be used at runtime after the iort init,
2098 	 * so we don't need to call acpi_put_table() to release
2099 	 * the IORT table mapping.
2100 	 */
2101 	status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
2102 	if (ACPI_FAILURE(status)) {
2103 		if (status != AE_NOT_FOUND) {
2104 			const char *msg = acpi_format_exception(status);
2105 
2106 			pr_err("Failed to get table, %s\n", msg);
2107 		}
2108 
2109 		return;
2110 	}
2111 
2112 	iort_init_platform_devices();
2113 }
2114 
2115 #ifdef CONFIG_ZONE_DMA
2116 /*
2117  * Extract the highest CPU physical address accessible to all DMA masters in
2118  * the system. PHYS_ADDR_MAX is returned when no constrained device is found.
2119  */
2120 phys_addr_t __init acpi_iort_dma_get_max_cpu_address(void)
2121 {
2122 	phys_addr_t limit = PHYS_ADDR_MAX;
2123 	struct acpi_iort_node *node, *end;
2124 	struct acpi_table_iort *iort;
2125 	acpi_status status;
2126 	int i;
2127 
2128 	if (acpi_disabled)
2129 		return limit;
2130 
2131 	status = acpi_get_table(ACPI_SIG_IORT, 0,
2132 				(struct acpi_table_header **)&iort);
2133 	if (ACPI_FAILURE(status))
2134 		return limit;
2135 
2136 	node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset);
2137 	end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length);
2138 
2139 	for (i = 0; i < iort->node_count; i++) {
2140 		if (node >= end)
2141 			break;
2142 
2143 		switch (node->type) {
2144 			struct acpi_iort_named_component *ncomp;
2145 			struct acpi_iort_root_complex *rc;
2146 			phys_addr_t local_limit;
2147 
2148 		case ACPI_IORT_NODE_NAMED_COMPONENT:
2149 			ncomp = (struct acpi_iort_named_component *)node->node_data;
2150 			local_limit = DMA_BIT_MASK(ncomp->memory_address_limit);
2151 			limit = min_not_zero(limit, local_limit);
2152 			break;
2153 
2154 		case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
2155 			if (node->revision < 1)
2156 				break;
2157 
2158 			rc = (struct acpi_iort_root_complex *)node->node_data;
2159 			local_limit = DMA_BIT_MASK(rc->memory_address_limit);
2160 			limit = min_not_zero(limit, local_limit);
2161 			break;
2162 		}
2163 		node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length);
2164 	}
2165 	acpi_put_table(&iort->header);
2166 	return limit;
2167 }
2168 #endif
2169