xref: /freebsd/sys/arm64/acpica/acpi_iort.c (revision 32100375a661c1e16588ddfa7b90ca8d26cb9786)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * Author: Jayachandran C Nair <jchandra@freebsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include "opt_acpi.h"
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 
40 #include <machine/intr.h>
41 
42 #include <contrib/dev/acpica/include/acpi.h>
43 #include <contrib/dev/acpica/include/accommon.h>
44 #include <contrib/dev/acpica/include/actables.h>
45 
46 #include <dev/acpica/acpivar.h>
47 
48 /*
49  * Track next XREF available for ITS groups.
50  */
51 static u_int acpi_its_xref = ACPI_MSI_XREF;
52 
53 /*
54  * Some types of IORT nodes have a set of mappings.  Each of them map
55  * a range of device IDs [base..end] from the current node to another
56  * node. The corresponding device IDs on destination node starts at
57  * outbase.
58  */
59 struct iort_map_entry {
60 	u_int			base;
61 	u_int			end;
62 	u_int			outbase;
63 	u_int			flags;
64 	u_int			out_node_offset;
65 	struct iort_node	*out_node;
66 };
67 
68 /*
69  * The ITS group node does not have any outgoing mappings. It has a
70  * of a list of GIC ITS blocks which can handle the device ID. We
71  * will store the PIC XREF used by the block and the blocks proximity
72  * data here, so that it can be retrieved together.
73  */
74 struct iort_its_entry {
75 	u_int			its_id;
76 	u_int			xref;
77 	int			pxm;
78 };
79 
80 /*
81  * IORT node. Each node has some device specific data depending on the
82  * type of the node. The node can also have a set of mappings, OR in
83  * case of ITS group nodes a set of ITS entries.
84  * The nodes are kept in a TAILQ by type.
85  */
86 struct iort_node {
87 	TAILQ_ENTRY(iort_node)	next;		/* next entry with same type */
88 	enum AcpiIortNodeType	type;		/* ACPI type */
89 	u_int			node_offset;	/* offset in IORT - node ID */
90 	u_int			nentries;	/* items in array below */
91 	u_int			usecount;	/* for bookkeeping */
92 	u_int			revision;	/* node revision */
93 	union {
94 		ACPI_IORT_ROOT_COMPLEX	pci_rc;		/* PCI root complex */
95 		ACPI_IORT_SMMU		smmu;
96 		ACPI_IORT_SMMU_V3	smmu_v3;
97 	} data;
98 	union {
99 		struct iort_map_entry	*mappings;	/* node mappings  */
100 		struct iort_its_entry	*its;		/* ITS IDs array */
101 	} entries;
102 };
103 
104 /* Lists for each of the types. */
105 static TAILQ_HEAD(, iort_node) pci_nodes = TAILQ_HEAD_INITIALIZER(pci_nodes);
106 static TAILQ_HEAD(, iort_node) smmu_nodes = TAILQ_HEAD_INITIALIZER(smmu_nodes);
107 static TAILQ_HEAD(, iort_node) its_groups = TAILQ_HEAD_INITIALIZER(its_groups);
108 
109 static int
110 iort_entry_get_id_mapping_index(struct iort_node *node)
111 {
112 
113 	switch(node->type) {
114 	case ACPI_IORT_NODE_SMMU_V3:
115 		/* The ID mapping field was added in version 1 */
116 		if (node->revision < 1)
117 			return (-1);
118 
119 		/*
120 		 * If all the control interrupts are GISCV based the ID
121 		 * mapping field is ignored.
122 		 */
123 		if (node->data.smmu_v3.EventGsiv != 0 &&
124 		    node->data.smmu_v3.PriGsiv != 0 &&
125 		    node->data.smmu_v3.GerrGsiv != 0 &&
126 		    node->data.smmu_v3.SyncGsiv != 0)
127 			return (-1);
128 
129 		if (node->data.smmu_v3.IdMappingIndex >= node->nentries)
130 			return (-1);
131 
132 		return (node->data.smmu_v3.IdMappingIndex);
133 	case ACPI_IORT_NODE_PMCG:
134 		return (0);
135 	default:
136 		break;
137 	}
138 
139 	return (-1);
140 }
141 
142 /*
143  * Lookup an ID in the mappings array. If successful, map the input ID
144  * to the output ID and return the output node found.
145  */
146 static struct iort_node *
147 iort_entry_lookup(struct iort_node *node, u_int id, u_int *outid)
148 {
149 	struct iort_map_entry *entry;
150 	int i, id_map;
151 
152 	id_map = iort_entry_get_id_mapping_index(node);
153 	entry = node->entries.mappings;
154 	for (i = 0; i < node->nentries; i++, entry++) {
155 		if (i == id_map)
156 			continue;
157 		if (entry->base <= id && id <= entry->end)
158 			break;
159 	}
160 	if (i == node->nentries)
161 		return (NULL);
162 	if ((entry->flags & ACPI_IORT_ID_SINGLE_MAPPING) == 0)
163 		*outid =  entry->outbase + (id - entry->base);
164 	else
165 		*outid = entry->outbase;
166 	return (entry->out_node);
167 }
168 
169 /*
170  * Map a PCI RID to a SMMU node or an ITS node, based on outtype.
171  */
172 static struct iort_node *
173 iort_pci_rc_map(u_int seg, u_int rid, u_int outtype, u_int *outid)
174 {
175 	struct iort_node *node, *out_node;
176 	u_int nxtid;
177 
178 	out_node = NULL;
179 	TAILQ_FOREACH(node, &pci_nodes, next) {
180 		if (node->data.pci_rc.PciSegmentNumber != seg)
181 			continue;
182 		out_node = iort_entry_lookup(node, rid, &nxtid);
183 		if (out_node != NULL)
184 			break;
185 	}
186 
187 	/* Could not find a PCI RC node with segment and device ID. */
188 	if (out_node == NULL)
189 		return (NULL);
190 
191 	/* Node can be SMMU or ITS. If SMMU, we need another lookup. */
192 	if (outtype == ACPI_IORT_NODE_ITS_GROUP &&
193 	    (out_node->type == ACPI_IORT_NODE_SMMU_V3 ||
194 	    out_node->type == ACPI_IORT_NODE_SMMU)) {
195 		out_node = iort_entry_lookup(out_node, nxtid, &nxtid);
196 		if (out_node == NULL)
197 			return (NULL);
198 	}
199 
200 	KASSERT(out_node->type == outtype, ("mapping fail"));
201 	*outid = nxtid;
202 	return (out_node);
203 }
204 
205 #ifdef notyet
206 /*
207  * Not implemented, map a PCIe device to the SMMU it is associated with.
208  */
209 int
210 acpi_iort_map_smmu(u_int seg, u_int devid, void **smmu, u_int *sid)
211 {
212 	/* XXX: convert oref to SMMU device */
213 	return (ENXIO);
214 }
215 #endif
216 
217 /*
218  * Allocate memory for a node, initialize and copy mappings. 'start'
219  * argument provides the table start used to calculate the node offset.
220  */
221 static void
222 iort_copy_data(struct iort_node *node, ACPI_IORT_NODE *node_entry)
223 {
224 	ACPI_IORT_ID_MAPPING *map_entry;
225 	struct iort_map_entry *mapping;
226 	int i;
227 
228 	map_entry = ACPI_ADD_PTR(ACPI_IORT_ID_MAPPING, node_entry,
229 	    node_entry->MappingOffset);
230 	node->nentries = node_entry->MappingCount;
231 	node->usecount = 0;
232 	mapping = malloc(sizeof(*mapping) * node->nentries, M_DEVBUF,
233 	    M_WAITOK | M_ZERO);
234 	node->entries.mappings = mapping;
235 	for (i = 0; i < node->nentries; i++, mapping++, map_entry++) {
236 		mapping->base = map_entry->InputBase;
237 		mapping->end = map_entry->InputBase + map_entry->IdCount - 1;
238 		mapping->outbase = map_entry->OutputBase;
239 		mapping->out_node_offset = map_entry->OutputReference;
240 		mapping->flags = map_entry->Flags;
241 		mapping->out_node = NULL;
242 	}
243 }
244 
245 /*
246  * Allocate and copy an ITS group.
247  */
248 static void
249 iort_copy_its(struct iort_node *node, ACPI_IORT_NODE *node_entry)
250 {
251 	struct iort_its_entry *its;
252 	ACPI_IORT_ITS_GROUP *itsg_entry;
253 	UINT32 *id;
254 	int i;
255 
256 	itsg_entry = (ACPI_IORT_ITS_GROUP *)node_entry->NodeData;
257 	node->nentries = itsg_entry->ItsCount;
258 	node->usecount = 0;
259 	its = malloc(sizeof(*its) * node->nentries, M_DEVBUF, M_WAITOK | M_ZERO);
260 	node->entries.its = its;
261 	id = &itsg_entry->Identifiers[0];
262 	for (i = 0; i < node->nentries; i++, its++, id++) {
263 		its->its_id = *id;
264 		its->pxm = -1;
265 		its->xref = 0;
266 	}
267 }
268 
269 /*
270  * Walk the IORT table and add nodes to corresponding list.
271  */
272 static void
273 iort_add_nodes(ACPI_IORT_NODE *node_entry, u_int node_offset)
274 {
275 	ACPI_IORT_ROOT_COMPLEX *pci_rc;
276 	ACPI_IORT_SMMU *smmu;
277 	ACPI_IORT_SMMU_V3 *smmu_v3;
278 	struct iort_node *node;
279 
280 	node = malloc(sizeof(*node), M_DEVBUF, M_WAITOK | M_ZERO);
281 	node->type =  node_entry->Type;
282 	node->node_offset = node_offset;
283 	node->revision = node_entry->Revision;
284 
285 	/* copy nodes depending on type */
286 	switch(node_entry->Type) {
287 	case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
288 		pci_rc = (ACPI_IORT_ROOT_COMPLEX *)node_entry->NodeData;
289 		memcpy(&node->data.pci_rc, pci_rc, sizeof(*pci_rc));
290 		iort_copy_data(node, node_entry);
291 		TAILQ_INSERT_TAIL(&pci_nodes, node, next);
292 		break;
293 	case ACPI_IORT_NODE_SMMU:
294 		smmu = (ACPI_IORT_SMMU *)node_entry->NodeData;
295 		memcpy(&node->data.smmu, smmu, sizeof(*smmu));
296 		iort_copy_data(node, node_entry);
297 		TAILQ_INSERT_TAIL(&smmu_nodes, node, next);
298 		break;
299 	case ACPI_IORT_NODE_SMMU_V3:
300 		smmu_v3 = (ACPI_IORT_SMMU_V3 *)node_entry->NodeData;
301 		memcpy(&node->data.smmu_v3, smmu_v3, sizeof(*smmu_v3));
302 		iort_copy_data(node, node_entry);
303 		TAILQ_INSERT_TAIL(&smmu_nodes, node, next);
304 		break;
305 	case ACPI_IORT_NODE_ITS_GROUP:
306 		iort_copy_its(node, node_entry);
307 		TAILQ_INSERT_TAIL(&its_groups, node, next);
308 		break;
309 	default:
310 		printf("ACPI: IORT: Dropping unhandled type %u\n",
311 		    node_entry->Type);
312 		free(node, M_DEVBUF);
313 		break;
314 	}
315 }
316 
317 /*
318  * For the mapping entry given, walk thru all the possible destination
319  * nodes and resolve the output reference.
320  */
321 static void
322 iort_resolve_node(struct iort_map_entry *entry, int check_smmu)
323 {
324 	struct iort_node *node, *np;
325 
326 	node = NULL;
327 	if (check_smmu) {
328 		TAILQ_FOREACH(np, &smmu_nodes, next) {
329 			if (entry->out_node_offset == np->node_offset) {
330 				node = np;
331 				break;
332 			}
333 		}
334 	}
335 	if (node == NULL) {
336 		TAILQ_FOREACH(np, &its_groups, next) {
337 			if (entry->out_node_offset == np->node_offset) {
338 				node = np;
339 				break;
340 			}
341 		}
342 	}
343 	if (node != NULL) {
344 		node->usecount++;
345 		entry->out_node = node;
346 	} else {
347 		printf("ACPI: IORT: Firmware Bug: no mapping for node %u\n",
348 		    entry->out_node_offset);
349 	}
350 }
351 
352 /*
353  * Resolve all output node references to node pointers.
354  */
355 static void
356 iort_post_process_mappings(void)
357 {
358 	struct iort_node *node;
359 	int i;
360 
361 	TAILQ_FOREACH(node, &pci_nodes, next)
362 		for (i = 0; i < node->nentries; i++)
363 			iort_resolve_node(&node->entries.mappings[i], TRUE);
364 	TAILQ_FOREACH(node, &smmu_nodes, next)
365 		for (i = 0; i < node->nentries; i++)
366 			iort_resolve_node(&node->entries.mappings[i], FALSE);
367 	/* TODO: named nodes */
368 }
369 
370 /*
371  * Walk MADT table, assign PIC xrefs to all ITS entries.
372  */
373 static void
374 madt_resolve_its_xref(ACPI_SUBTABLE_HEADER *entry, void *arg)
375 {
376 	ACPI_MADT_GENERIC_TRANSLATOR *gict;
377 	struct iort_node *its_node;
378 	struct iort_its_entry *its_entry;
379 	u_int xref;
380 	int i, matches;
381 
382         if (entry->Type != ACPI_MADT_TYPE_GENERIC_TRANSLATOR)
383 		return;
384 
385 	gict = (ACPI_MADT_GENERIC_TRANSLATOR *)entry;
386 	matches = 0;
387 	xref = acpi_its_xref++;
388 	TAILQ_FOREACH(its_node, &its_groups, next) {
389 		its_entry = its_node->entries.its;
390 		for (i = 0; i < its_node->nentries; i++, its_entry++) {
391 			if (its_entry->its_id == gict->TranslationId) {
392 				its_entry->xref = xref;
393 				matches++;
394 			}
395 		}
396 	}
397 	if (matches == 0)
398 		printf("ACPI: IORT: Unused ITS block, ID %u\n",
399 		    gict->TranslationId);
400 }
401 
402 /*
403  * Walk SRAT, assign proximity to all ITS entries.
404  */
405 static void
406 srat_resolve_its_pxm(ACPI_SUBTABLE_HEADER *entry, void *arg)
407 {
408 	ACPI_SRAT_GIC_ITS_AFFINITY *gicits;
409 	struct iort_node *its_node;
410 	struct iort_its_entry *its_entry;
411 	int *map_counts;
412 	int i, matches, dom;
413 
414 	if (entry->Type != ACPI_SRAT_TYPE_GIC_ITS_AFFINITY)
415 		return;
416 
417 	matches = 0;
418 	map_counts = arg;
419 	gicits = (ACPI_SRAT_GIC_ITS_AFFINITY *)entry;
420 	dom = acpi_map_pxm_to_vm_domainid(gicits->ProximityDomain);
421 
422 	/*
423 	 * Catch firmware and config errors. map_counts keeps a
424 	 * count of ProximityDomain values mapping to a domain ID
425 	 */
426 #if MAXMEMDOM > 1
427 	if (dom == -1)
428 		printf("Firmware Error: Proximity Domain %d could not be"
429 		    " mapped for GIC ITS ID %d!\n",
430 		    gicits->ProximityDomain, gicits->ItsId);
431 #endif
432 	/* use dom + 1 as index to handle the case where dom == -1 */
433 	i = ++map_counts[dom + 1];
434 	if (i > 1) {
435 #ifdef NUMA
436 		if (dom != -1)
437 			printf("ERROR: Multiple Proximity Domains map to the"
438 			    " same NUMA domain %d!\n", dom);
439 #else
440 		printf("WARNING: multiple Proximity Domains in SRAT but NUMA"
441 		    " NOT enabled!\n");
442 #endif
443 	}
444 	TAILQ_FOREACH(its_node, &its_groups, next) {
445 		its_entry = its_node->entries.its;
446 		for (i = 0; i < its_node->nentries; i++, its_entry++) {
447 			if (its_entry->its_id == gicits->ItsId) {
448 				its_entry->pxm = dom;
449 				matches++;
450 			}
451 		}
452 	}
453 	if (matches == 0)
454 		printf("ACPI: IORT: ITS block %u in SRAT not found in IORT!\n",
455 		    gicits->ItsId);
456 }
457 
458 /*
459  * Cross check the ITS Id with MADT and (if available) SRAT.
460  */
461 static int
462 iort_post_process_its(void)
463 {
464 	ACPI_TABLE_MADT *madt;
465 	ACPI_TABLE_SRAT *srat;
466 	vm_paddr_t madt_pa, srat_pa;
467 	int map_counts[MAXMEMDOM + 1] = { 0 };
468 
469 	/* Check ITS block in MADT */
470 	madt_pa = acpi_find_table(ACPI_SIG_MADT);
471 	KASSERT(madt_pa != 0, ("no MADT!"));
472 	madt = acpi_map_table(madt_pa, ACPI_SIG_MADT);
473 	KASSERT(madt != NULL, ("can't map MADT!"));
474 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
475 	    madt_resolve_its_xref, NULL);
476 	acpi_unmap_table(madt);
477 
478 	/* Get proximtiy if available */
479 	srat_pa = acpi_find_table(ACPI_SIG_SRAT);
480 	if (srat_pa != 0) {
481 		srat = acpi_map_table(srat_pa, ACPI_SIG_SRAT);
482 		KASSERT(srat != NULL, ("can't map SRAT!"));
483 		acpi_walk_subtables(srat + 1, (char *)srat + srat->Header.Length,
484 		    srat_resolve_its_pxm, map_counts);
485 		acpi_unmap_table(srat);
486 	}
487 	return (0);
488 }
489 
490 /*
491  * Find, parse, and save IO Remapping Table ("IORT").
492  */
493 static int
494 acpi_parse_iort(void *dummy __unused)
495 {
496 	ACPI_TABLE_IORT *iort;
497 	ACPI_IORT_NODE *node_entry;
498 	vm_paddr_t iort_pa;
499 	u_int node_offset;
500 
501 	iort_pa = acpi_find_table(ACPI_SIG_IORT);
502 	if (iort_pa == 0)
503 		return (ENXIO);
504 
505 	iort = acpi_map_table(iort_pa, ACPI_SIG_IORT);
506 	if (iort == NULL) {
507 		printf("ACPI: Unable to map the IORT table!\n");
508 		return (ENXIO);
509 	}
510 	for (node_offset = iort->NodeOffset;
511 	    node_offset < iort->Header.Length;
512 	    node_offset += node_entry->Length) {
513 		node_entry = ACPI_ADD_PTR(ACPI_IORT_NODE, iort, node_offset);
514 		iort_add_nodes(node_entry, node_offset);
515 	}
516 	acpi_unmap_table(iort);
517 	iort_post_process_mappings();
518 	iort_post_process_its();
519 	return (0);
520 }
521 SYSINIT(acpi_parse_iort, SI_SUB_DRIVERS, SI_ORDER_FIRST, acpi_parse_iort, NULL);
522 
523 /*
524  * Provide ITS ID to PIC xref mapping.
525  */
526 int
527 acpi_iort_its_lookup(u_int its_id, u_int *xref, int *pxm)
528 {
529 	struct iort_node *its_node;
530 	struct iort_its_entry *its_entry;
531 	int i;
532 
533 	TAILQ_FOREACH(its_node, &its_groups, next) {
534 		its_entry = its_node->entries.its;
535 		for  (i = 0; i < its_node->nentries; i++, its_entry++) {
536 			if (its_entry->its_id == its_id) {
537 				*xref = its_entry->xref;
538 				*pxm = its_entry->pxm;
539 				return (0);
540 			}
541 		}
542 	}
543 	return (ENOENT);
544 }
545 
546 /*
547  * Find mapping for a PCIe device given segment and device ID
548  * returns the XREF for MSI interrupt setup and the device ID to
549  * use for the interrupt setup
550  */
551 int
552 acpi_iort_map_pci_msi(u_int seg, u_int rid, u_int *xref, u_int *devid)
553 {
554 	struct iort_node *node;
555 
556 	node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_ITS_GROUP, devid);
557 	if (node == NULL)
558 		return (ENOENT);
559 
560 	/* This should be an ITS node */
561 	KASSERT(node->type == ACPI_IORT_NODE_ITS_GROUP, ("bad group"));
562 
563 	/* return first node, we don't handle more than that now. */
564 	*xref = node->entries.its[0].xref;
565 	return (0);
566 }
567