xref: /illumos-gate/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_acpi.c (revision 67dbe2be0c0f1e2eb428b89088bb5667e8f0b9f6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include "amd_iommu_acpi.h"
28 #include "amd_iommu_impl.h"
29 
30 static int create_acpi_hash(amd_iommu_acpi_t *acpi);
31 static void amd_iommu_acpi_table_fini(amd_iommu_acpi_t **acpipp);
32 
33 static void dump_acpi_aliases(void);
34 
35 
36 /*
37  * Globals
38  */
39 static amd_iommu_acpi_global_t *amd_iommu_acpi_global;
40 static amd_iommu_acpi_ivhd_t **amd_iommu_acpi_ivhd_hash;
41 static amd_iommu_acpi_ivmd_t **amd_iommu_acpi_ivmd_hash;
42 
43 static int
44 type_byte_size(char *cp)
45 {
46 	uint8_t type8 = *((uint8_t *)cp);
47 	uint8_t len_bits;
48 
49 	len_bits = AMD_IOMMU_REG_GET8(&type8, AMD_IOMMU_ACPI_DEVENTRY_LEN);
50 
51 	switch (len_bits) {
52 	case 0:
53 			return (4);
54 	case 1:
55 			return (8);
56 	case 2:
57 			return (16);
58 	case 3:
59 			return (32);
60 	default:
61 			cmn_err(CE_WARN, "%s: Invalid deventry len: %d",
62 			    amd_iommu_modname, len_bits);
63 			return (len_bits);
64 	}
65 	/*NOTREACHED*/
66 }
67 
68 static void
69 process_4byte_deventry(ivhd_container_t *c, char *cp)
70 {
71 	int entry_type = *((uint8_t *)cp);
72 	ivhd_deventry_t deventry = {0};
73 	ivhd_deventry_t *devp;
74 	uint8_t datsetting8;
75 	align_16_t al = {0};
76 	int i;
77 
78 	/* 4 byte entry */
79 	deventry.idev_len = 4;
80 	deventry.idev_deviceid = -1;
81 	deventry.idev_src_deviceid = -1;
82 
83 	for (i = 0; i < 2; i++) {
84 		al.ent8[i] = *((uint8_t *)&cp[i + 1]);
85 	}
86 
87 	switch (entry_type) {
88 	case 1:
89 		deventry.idev_type = DEVENTRY_ALL;
90 		break;
91 	case 2:
92 		deventry.idev_type = DEVENTRY_SELECT;
93 		deventry.idev_deviceid = al.ent16;
94 		break;
95 	case 3:
96 		deventry.idev_type = DEVENTRY_RANGE;
97 		deventry.idev_deviceid = al.ent16;
98 		break;
99 	case 4:
100 		deventry.idev_type = DEVENTRY_RANGE_END;
101 		deventry.idev_deviceid = al.ent16;
102 		ASSERT(cp[3] == 0);
103 		break;
104 	case 0:
105 		ASSERT(al.ent16 == 0);
106 		ASSERT(cp[3] == 0);
107 	default:
108 		return;
109 	}
110 
111 
112 	devp = kmem_alloc(sizeof (ivhd_deventry_t), KM_SLEEP);
113 	*devp = deventry;
114 
115 	if (c->ivhdc_first_deventry == NULL)
116 		c->ivhdc_first_deventry =  devp;
117 	else
118 		c->ivhdc_last_deventry->idev_next = devp;
119 
120 	c->ivhdc_last_deventry = devp;
121 
122 	if (entry_type == 4)
123 		return;
124 
125 	datsetting8 = (*((uint8_t *)&cp[3]));
126 
127 	devp->idev_Lint1Pass = AMD_IOMMU_REG_GET8(&datsetting8,
128 	    AMD_IOMMU_ACPI_LINT1PASS);
129 
130 	devp->idev_Lint0Pass = AMD_IOMMU_REG_GET8(&datsetting8,
131 	    AMD_IOMMU_ACPI_LINT0PASS);
132 
133 	devp->idev_SysMgt = AMD_IOMMU_REG_GET8(&datsetting8,
134 	    AMD_IOMMU_ACPI_SYSMGT);
135 
136 	ASSERT(AMD_IOMMU_REG_GET8(&datsetting8,
137 	    AMD_IOMMU_ACPI_DATRSV) == 0);
138 
139 	devp->idev_NMIPass = AMD_IOMMU_REG_GET8(&datsetting8,
140 	    AMD_IOMMU_ACPI_NMIPASS);
141 
142 	devp->idev_ExtIntPass = AMD_IOMMU_REG_GET8(&datsetting8,
143 	    AMD_IOMMU_ACPI_EXTINTPASS);
144 
145 	devp->idev_INITPass = AMD_IOMMU_REG_GET8(&datsetting8,
146 	    AMD_IOMMU_ACPI_INITPASS);
147 }
148 
149 static void
150 process_8byte_deventry(ivhd_container_t *c, char *cp)
151 {
152 	uint8_t datsetting8;
153 	int entry_type = (uint8_t)*cp;
154 	ivhd_deventry_t deventry = {0};
155 	ivhd_deventry_t *devp;
156 	align_16_t al1 = {0};
157 	align_16_t al2 = {0};
158 	align_32_t al3 = {0};
159 	int i;
160 
161 	/* Length is 8 bytes */
162 	deventry.idev_len = 8;
163 	deventry.idev_deviceid = -1;
164 	deventry.idev_src_deviceid = -1;
165 
166 	for (i = 0; i < 2; i++) {
167 		al1.ent8[i] = *((uint8_t *)&cp[i+1]);
168 		al2.ent8[i] = *((uint8_t *)&cp[i+5]);
169 	}
170 
171 	datsetting8 = *((uint8_t *)&cp[3]);
172 
173 	switch (entry_type) {
174 	case 66:
175 		deventry.idev_type = DEVENTRY_ALIAS_SELECT;
176 		deventry.idev_deviceid = al1.ent16;
177 		deventry.idev_src_deviceid = al2.ent16;
178 		ASSERT(cp[4] == 0);
179 		ASSERT(cp[7] == 0);
180 		break;
181 	case 67:
182 		deventry.idev_type = DEVENTRY_ALIAS_RANGE;
183 		deventry.idev_deviceid = al1.ent16;
184 		deventry.idev_src_deviceid = al2.ent16;
185 		ASSERT(cp[4] == 0);
186 		ASSERT(cp[7] == 0);
187 		break;
188 	case 70:
189 		deventry.idev_type = DEVENTRY_EXTENDED_SELECT;
190 		deventry.idev_deviceid = al1.ent16;
191 		break;
192 	case 71:
193 		deventry.idev_type = DEVENTRY_EXTENDED_RANGE;
194 		deventry.idev_deviceid = al1.ent16;
195 		break;
196 	case 72:
197 		deventry.idev_type = DEVENTRY_SPECIAL_DEVICE;
198 		ASSERT(al1.ent16 == 0);
199 		deventry.idev_deviceid = -1;
200 		deventry.idev_handle = cp[4];
201 		deventry.idev_variety = cp[7];
202 		deventry.idev_src_deviceid = al2.ent16;
203 	default:
204 #ifdef BROKEN_ASSERT
205 		for (i = 0; i < 7; i++) {
206 			ASSERT(cp[i] == 0);
207 		}
208 #endif
209 		return;
210 	}
211 
212 
213 	devp = kmem_alloc(sizeof (ivhd_deventry_t), KM_SLEEP);
214 	*devp = deventry;
215 
216 	if (c->ivhdc_first_deventry == NULL)
217 		c->ivhdc_first_deventry =  devp;
218 	else
219 		c->ivhdc_last_deventry->idev_next = devp;
220 
221 	c->ivhdc_last_deventry = devp;
222 
223 	devp->idev_Lint1Pass = AMD_IOMMU_REG_GET8(&datsetting8,
224 	    AMD_IOMMU_ACPI_LINT1PASS);
225 
226 	devp->idev_Lint0Pass = AMD_IOMMU_REG_GET8(&datsetting8,
227 	    AMD_IOMMU_ACPI_LINT0PASS);
228 
229 	devp->idev_SysMgt = AMD_IOMMU_REG_GET8(&datsetting8,
230 	    AMD_IOMMU_ACPI_SYSMGT);
231 
232 	ASSERT(AMD_IOMMU_REG_GET8(&datsetting8,
233 	    AMD_IOMMU_ACPI_DATRSV) == 0);
234 
235 	devp->idev_NMIPass = AMD_IOMMU_REG_GET8(&datsetting8,
236 	    AMD_IOMMU_ACPI_NMIPASS);
237 
238 	devp->idev_ExtIntPass = AMD_IOMMU_REG_GET8(&datsetting8,
239 	    AMD_IOMMU_ACPI_EXTINTPASS);
240 
241 	devp->idev_INITPass = AMD_IOMMU_REG_GET8(&datsetting8,
242 	    AMD_IOMMU_ACPI_INITPASS);
243 
244 	if (entry_type != 70 && entry_type != 71) {
245 		return;
246 	}
247 
248 	/* Type 70 and 71 */
249 	for (i = 0; i < 4; i++) {
250 		al3.ent8[i] = *((uint8_t *)&cp[i+4]);
251 	}
252 
253 	devp->idev_AtsDisabled = AMD_IOMMU_REG_GET8(&al3.ent32,
254 	    AMD_IOMMU_ACPI_ATSDISABLED);
255 
256 	ASSERT(AMD_IOMMU_REG_GET8(&al3.ent32, AMD_IOMMU_ACPI_EXTDATRSV) == 0);
257 }
258 
259 static void
260 process_ivhd(amd_iommu_acpi_t *acpi, ivhd_t *ivhdp)
261 {
262 	ivhd_container_t *c;
263 	caddr_t ivhd_end;
264 	caddr_t ivhd_tot_end;
265 	caddr_t cp;
266 
267 	ASSERT(ivhdp->ivhd_type == 0x10);
268 
269 	c = kmem_zalloc(sizeof (ivhd_container_t), KM_SLEEP);
270 	c->ivhdc_ivhd = kmem_alloc(sizeof (ivhd_t), KM_SLEEP);
271 	*(c->ivhdc_ivhd) = *ivhdp;
272 
273 	if (acpi->acp_first_ivhdc == NULL)
274 		acpi->acp_first_ivhdc = c;
275 	else
276 		acpi->acp_last_ivhdc->ivhdc_next = c;
277 
278 	acpi->acp_last_ivhdc = c;
279 
280 	ivhd_end = (caddr_t)ivhdp + sizeof (ivhd_t);
281 	ivhd_tot_end = (caddr_t)ivhdp + ivhdp->ivhd_len;
282 
283 	for (cp = ivhd_end; cp < ivhd_tot_end; cp += type_byte_size(cp)) {
284 		/* 16 byte and 32 byte size are currently reserved */
285 		switch (type_byte_size(cp)) {
286 		case 4:
287 			process_4byte_deventry(c, cp);
288 			break;
289 		case 8:
290 			process_8byte_deventry(c, cp);
291 			break;
292 		case 16:
293 		case 32:
294 			/* Reserved */
295 			break;
296 		default:
297 			cmn_err(CE_WARN, "%s: unsupported length for device "
298 			    "entry in ACPI IVRS table's IVHD entry",
299 			    amd_iommu_modname);
300 			break;
301 		}
302 	}
303 }
304 
305 static void
306 process_ivmd(amd_iommu_acpi_t *acpi, ivmd_t *ivmdp)
307 {
308 	ivmd_container_t *c;
309 
310 	ASSERT(ivmdp->ivmd_type != 0x10);
311 
312 	c = kmem_zalloc(sizeof (ivmd_container_t), KM_SLEEP);
313 	c->ivmdc_ivmd = kmem_alloc(sizeof (ivmd_t), KM_SLEEP);
314 	*(c->ivmdc_ivmd) = *ivmdp;
315 
316 	if (acpi->acp_first_ivmdc == NULL)
317 		acpi->acp_first_ivmdc = c;
318 	else
319 		acpi->acp_last_ivmdc->ivmdc_next = c;
320 
321 	acpi->acp_last_ivmdc = c;
322 }
323 
324 int
325 amd_iommu_acpi_init(void)
326 {
327 	ivrs_t *ivrsp;
328 	caddr_t ivrsp_end;
329 	caddr_t table_end;
330 	caddr_t cp;
331 	uint8_t type8;
332 	amd_iommu_acpi_t *acpi;
333 	align_ivhd_t al_vhd = {0};
334 	align_ivmd_t al_vmd = {0};
335 
336 	if (AcpiGetTable(IVRS_SIG, 1, (ACPI_TABLE_HEADER **)&ivrsp) != AE_OK) {
337 		cmn_err(CE_NOTE, "!amd_iommu: No AMD IOMMU ACPI IVRS table");
338 		return (DDI_FAILURE);
339 	}
340 
341 	/*
342 	 * Reserved field must be 0
343 	 */
344 	ASSERT(ivrsp->ivrs_resv == 0);
345 
346 	ASSERT(AMD_IOMMU_REG_GET32(&ivrsp->ivrs_ivinfo,
347 	    AMD_IOMMU_ACPI_IVINFO_RSV1) == 0);
348 	ASSERT(AMD_IOMMU_REG_GET32(&ivrsp->ivrs_ivinfo,
349 	    AMD_IOMMU_ACPI_IVINFO_RSV2) == 0);
350 
351 	ivrsp_end = (caddr_t)ivrsp + sizeof (struct ivrs);
352 	table_end = (caddr_t)ivrsp + ivrsp->ivrs_hdr.Length;
353 
354 	acpi = kmem_zalloc(sizeof (amd_iommu_acpi_t), KM_SLEEP);
355 	acpi->acp_ivrs = kmem_alloc(sizeof (ivrs_t), KM_SLEEP);
356 	*(acpi->acp_ivrs) = *ivrsp;
357 
358 	for (cp = ivrsp_end; cp < table_end; cp += (al_vhd.ivhdp)->ivhd_len) {
359 		al_vhd.cp = cp;
360 		if (al_vhd.ivhdp->ivhd_type == 0x10)
361 			process_ivhd(acpi, al_vhd.ivhdp);
362 	}
363 
364 	for (cp = ivrsp_end; cp < table_end; cp += (al_vmd.ivmdp)->ivmd_len) {
365 		al_vmd.cp = cp;
366 		type8 = al_vmd.ivmdp->ivmd_type;
367 		if (type8 == 0x20 || type8 == 0x21 || type8 == 0x22)
368 			process_ivmd(acpi, al_vmd.ivmdp);
369 	}
370 
371 	if (create_acpi_hash(acpi) != DDI_SUCCESS) {
372 		return (DDI_FAILURE);
373 	}
374 
375 	amd_iommu_acpi_table_fini(&acpi);
376 
377 	ASSERT(acpi == NULL);
378 
379 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
380 		dump_acpi_aliases();
381 		debug_enter("dump");
382 	}
383 
384 	return (DDI_SUCCESS);
385 }
386 
387 static ivhd_deventry_t *
388 free_ivhd_deventry(ivhd_deventry_t *devp)
389 {
390 	ivhd_deventry_t *next = devp->idev_next;
391 
392 	kmem_free(devp, sizeof (ivhd_deventry_t));
393 
394 	return (next);
395 }
396 
397 static ivhd_container_t *
398 free_ivhd_container(ivhd_container_t *ivhdcp)
399 {
400 	ivhd_container_t *next = ivhdcp->ivhdc_next;
401 	ivhd_deventry_t *devp;
402 
403 	for (devp = ivhdcp->ivhdc_first_deventry; devp; ) {
404 		devp = free_ivhd_deventry(devp);
405 	}
406 
407 	kmem_free(ivhdcp->ivhdc_ivhd, sizeof (ivhd_t));
408 	kmem_free(ivhdcp, sizeof (ivhd_container_t));
409 
410 	return (next);
411 }
412 
413 static ivmd_container_t *
414 free_ivmd_container(ivmd_container_t *ivmdcp)
415 {
416 	ivmd_container_t *next = ivmdcp->ivmdc_next;
417 
418 	kmem_free(ivmdcp->ivmdc_ivmd, sizeof (ivmd_t));
419 	kmem_free(ivmdcp, sizeof (ivmd_container_t));
420 
421 	return (next);
422 }
423 
424 void
425 amd_iommu_acpi_fini(void)
426 {
427 }
428 
429 /*
430  * TODO: Do we need to free the ACPI table for om GetFirmwareTable()
431  */
432 static void
433 amd_iommu_acpi_table_fini(amd_iommu_acpi_t **acpipp)
434 {
435 	amd_iommu_acpi_t *acpi = *acpipp;
436 	ivhd_container_t *ivhdcp;
437 	ivmd_container_t *ivmdcp;
438 
439 	ASSERT(acpi);
440 
441 	for (ivhdcp = acpi->acp_first_ivhdc; ivhdcp; ) {
442 		ivhdcp = free_ivhd_container(ivhdcp);
443 	}
444 	for (ivmdcp = acpi->acp_first_ivmdc; ivmdcp; ) {
445 		ivmdcp = free_ivmd_container(ivmdcp);
446 	}
447 
448 	kmem_free(acpi->acp_ivrs, sizeof (struct ivrs));
449 	kmem_free(acpi, sizeof (amd_iommu_acpi_t));
450 
451 	*acpipp = NULL;
452 }
453 
454 static uint16_t
455 deviceid_hashfn(uint16_t deviceid)
456 {
457 	return (deviceid % AMD_IOMMU_ACPI_INFO_HASH_SZ);
458 }
459 
460 static void
461 add_deventry_info(ivhd_t *ivhdp, ivhd_deventry_t *deventry,
462     amd_iommu_acpi_ivhd_t **hash)
463 {
464 	static amd_iommu_acpi_ivhd_t *last;
465 	amd_iommu_acpi_ivhd_t *acpi_ivhdp;
466 	uint8_t uint8_flags;
467 	uint16_t uint16_info;
468 	uint16_t idx;
469 
470 	if (deventry->idev_type == DEVENTRY_RANGE_END) {
471 		ASSERT(last);
472 		acpi_ivhdp = last;
473 		last = NULL;
474 		ASSERT(acpi_ivhdp->ach_dev_type == DEVENTRY_RANGE ||
475 		    acpi_ivhdp->ach_dev_type == DEVENTRY_ALIAS_RANGE ||
476 		    acpi_ivhdp->ach_dev_type == DEVENTRY_EXTENDED_RANGE);
477 		ASSERT(acpi_ivhdp->ach_deviceid_end == -1);
478 		acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
479 		/* TODO ASSERT data is 0 */
480 		return;
481 	}
482 
483 	ASSERT(last == NULL);
484 	acpi_ivhdp = kmem_zalloc(sizeof (*acpi_ivhdp), KM_SLEEP);
485 
486 	uint8_flags = ivhdp->ivhd_flags;
487 
488 #ifdef BROKEN_ASSERT
489 	ASSERT(AMD_IOMMU_REG_GET8(&uint8_flags,
490 	    AMD_IOMMU_ACPI_IVHD_FLAGS_RSV) == 0);
491 #endif
492 
493 	acpi_ivhdp->ach_IotlbSup = AMD_IOMMU_REG_GET8(&uint8_flags,
494 	    AMD_IOMMU_ACPI_IVHD_FLAGS_IOTLBSUP);
495 	acpi_ivhdp->ach_Isoc = AMD_IOMMU_REG_GET8(&uint8_flags,
496 	    AMD_IOMMU_ACPI_IVHD_FLAGS_ISOC);
497 	acpi_ivhdp->ach_ResPassPW = AMD_IOMMU_REG_GET8(&uint8_flags,
498 	    AMD_IOMMU_ACPI_IVHD_FLAGS_RESPASSPW);
499 	acpi_ivhdp->ach_PassPW = AMD_IOMMU_REG_GET8(&uint8_flags,
500 	    AMD_IOMMU_ACPI_IVHD_FLAGS_PASSPW);
501 	acpi_ivhdp->ach_HtTunEn = AMD_IOMMU_REG_GET8(&uint8_flags,
502 	    AMD_IOMMU_ACPI_IVHD_FLAGS_HTTUNEN);
503 
504 	/* IVHD fields */
505 	acpi_ivhdp->ach_IOMMU_deviceid = ivhdp->ivhd_deviceid;
506 	acpi_ivhdp->ach_IOMMU_cap_off = ivhdp->ivhd_cap_off;
507 	acpi_ivhdp->ach_IOMMU_reg_base = ivhdp->ivhd_reg_base;
508 	acpi_ivhdp->ach_IOMMU_pci_seg = ivhdp->ivhd_pci_seg;
509 
510 	/* IVHD IOMMU info fields */
511 	uint16_info = ivhdp->ivhd_iommu_info;
512 
513 #ifdef BROKEN_ASSERT
514 	ASSERT(AMD_IOMMU_REG_GET16(&uint16_info,
515 	    AMD_IOMMU_ACPI_IOMMU_INFO_RSV1) == 0);
516 #endif
517 
518 	acpi_ivhdp->ach_IOMMU_UnitID = AMD_IOMMU_REG_GET16(&uint16_info,
519 	    AMD_IOMMU_ACPI_IOMMU_INFO_UNITID);
520 	ASSERT(AMD_IOMMU_REG_GET16(&uint16_info,
521 	    AMD_IOMMU_ACPI_IOMMU_INFO_RSV2) == 0);
522 	acpi_ivhdp->ach_IOMMU_MSInum = AMD_IOMMU_REG_GET16(&uint16_info,
523 	    AMD_IOMMU_ACPI_IOMMU_INFO_MSINUM);
524 
525 	/* Initialize  deviceids to -1 */
526 	acpi_ivhdp->ach_deviceid_start = -1;
527 	acpi_ivhdp->ach_deviceid_end = -1;
528 	acpi_ivhdp->ach_src_deviceid = -1;
529 
530 	/* All range type entries are put on hash entry 0 */
531 	switch (deventry->idev_type) {
532 	case DEVENTRY_ALL:
533 		acpi_ivhdp->ach_deviceid_start = 0;
534 		acpi_ivhdp->ach_deviceid_end = (uint16_t)-1;
535 		acpi_ivhdp->ach_dev_type = DEVENTRY_ALL;
536 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
537 		break;
538 	case DEVENTRY_SELECT:
539 		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
540 		acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
541 		acpi_ivhdp->ach_dev_type = DEVENTRY_SELECT;
542 		idx = deviceid_hashfn(deventry->idev_deviceid);
543 		break;
544 	case DEVENTRY_RANGE:
545 		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
546 		acpi_ivhdp->ach_deviceid_end = -1;
547 		acpi_ivhdp->ach_dev_type = DEVENTRY_RANGE;
548 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
549 		last = acpi_ivhdp;
550 		break;
551 	case DEVENTRY_RANGE_END:
552 		cmn_err(CE_PANIC, "%s: Unexpected Range End Deventry",
553 		    amd_iommu_modname);
554 		/*NOTREACHED*/
555 	case DEVENTRY_ALIAS_SELECT:
556 		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
557 		acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
558 		acpi_ivhdp->ach_src_deviceid = deventry->idev_src_deviceid;
559 		acpi_ivhdp->ach_dev_type = DEVENTRY_ALIAS_SELECT;
560 		idx = deviceid_hashfn(deventry->idev_deviceid);
561 		break;
562 	case DEVENTRY_ALIAS_RANGE:
563 		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
564 		acpi_ivhdp->ach_deviceid_end = -1;
565 		acpi_ivhdp->ach_src_deviceid = deventry->idev_src_deviceid;
566 		acpi_ivhdp->ach_dev_type = DEVENTRY_ALIAS_RANGE;
567 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
568 		last = acpi_ivhdp;
569 		break;
570 	case DEVENTRY_EXTENDED_SELECT:
571 		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
572 		acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
573 		acpi_ivhdp->ach_dev_type = DEVENTRY_EXTENDED_SELECT;
574 		idx = deviceid_hashfn(deventry->idev_deviceid);
575 		break;
576 	case DEVENTRY_EXTENDED_RANGE:
577 		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
578 		acpi_ivhdp->ach_deviceid_end = -1;
579 		acpi_ivhdp->ach_dev_type = DEVENTRY_EXTENDED_RANGE;
580 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
581 		last = acpi_ivhdp;
582 		break;
583 	case DEVENTRY_SPECIAL_DEVICE:
584 		acpi_ivhdp->ach_deviceid_start = -1;
585 		acpi_ivhdp->ach_deviceid_end = -1;
586 		acpi_ivhdp->ach_src_deviceid = deventry->idev_src_deviceid;
587 		acpi_ivhdp->ach_special_handle = deventry->idev_handle;
588 		acpi_ivhdp->ach_special_variety = deventry->idev_variety;
589 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
590 	default:
591 		cmn_err(CE_PANIC, "%s: Unsupported deventry type",
592 		    amd_iommu_modname);
593 		/*NOTREACHED*/
594 	}
595 
596 	acpi_ivhdp->ach_Lint1Pass = deventry->idev_Lint1Pass;
597 	acpi_ivhdp->ach_Lint0Pass = deventry->idev_Lint0Pass;
598 	acpi_ivhdp->ach_SysMgt = deventry->idev_SysMgt;
599 	acpi_ivhdp->ach_NMIPass = deventry->idev_NMIPass;
600 	acpi_ivhdp->ach_ExtIntPass = deventry->idev_ExtIntPass;
601 	acpi_ivhdp->ach_INITPass = deventry->idev_INITPass;
602 
603 
604 	/* extended data */
605 	if (acpi_ivhdp->ach_dev_type == DEVENTRY_EXTENDED_SELECT ||
606 	    acpi_ivhdp->ach_dev_type == DEVENTRY_EXTENDED_RANGE) {
607 		acpi_ivhdp->ach_AtsDisabled = deventry->idev_AtsDisabled;
608 	}
609 
610 	/*
611 	 * Now add it to the hash
612 	 */
613 	ASSERT(hash[idx] != acpi_ivhdp);
614 	acpi_ivhdp->ach_next = hash[idx];
615 	hash[idx] = acpi_ivhdp;
616 }
617 
618 static void
619 add_ivhdc_info(ivhd_container_t *ivhdcp, amd_iommu_acpi_ivhd_t **hash)
620 {
621 	ivhd_deventry_t *deventry;
622 	ivhd_t *ivhdp = ivhdcp->ivhdc_ivhd;
623 
624 	for (deventry = ivhdcp->ivhdc_first_deventry; deventry;
625 	    deventry = deventry->idev_next) {
626 		add_deventry_info(ivhdp, deventry, hash);
627 	}
628 }
629 
630 static void
631 add_ivhd_info(amd_iommu_acpi_t *acpi, amd_iommu_acpi_ivhd_t **hash)
632 {
633 	ivhd_container_t *ivhdcp;
634 
635 	for (ivhdcp = acpi->acp_first_ivhdc; ivhdcp;
636 	    ivhdcp = ivhdcp->ivhdc_next) {
637 		add_ivhdc_info(ivhdcp, hash);
638 	}
639 }
640 
641 static void
642 set_ivmd_info(ivmd_t *ivmdp, amd_iommu_acpi_ivmd_t **hash)
643 {
644 	amd_iommu_acpi_ivmd_t *acpi_ivmdp;
645 	uint8_t uint8_flags;
646 	uint16_t idx;
647 
648 	uint8_flags = ivmdp->ivmd_flags;
649 
650 	acpi_ivmdp = kmem_zalloc(sizeof (*acpi_ivmdp), KM_SLEEP);
651 
652 	switch (ivmdp->ivmd_type) {
653 	case 0x20:
654 		acpi_ivmdp->acm_deviceid_start = 0;
655 		acpi_ivmdp->acm_deviceid_end = (uint16_t)-1;
656 		acpi_ivmdp->acm_dev_type = IVMD_DEVICEID_ALL;
657 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
658 		break;
659 	case 0x21:
660 		acpi_ivmdp->acm_deviceid_start = ivmdp->ivmd_deviceid;
661 		acpi_ivmdp->acm_deviceid_end = ivmdp->ivmd_deviceid;
662 		acpi_ivmdp->acm_dev_type = IVMD_DEVICEID_SELECT;
663 		idx = deviceid_hashfn(ivmdp->ivmd_deviceid);
664 		break;
665 	case 0x22:
666 		acpi_ivmdp->acm_deviceid_start = ivmdp->ivmd_deviceid;
667 		acpi_ivmdp->acm_deviceid_end = ivmdp->ivmd_auxdata;
668 		acpi_ivmdp->acm_dev_type = IVMD_DEVICEID_RANGE;
669 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
670 		break;
671 	default:
672 		cmn_err(CE_PANIC, "Unknown AMD IOMMU ACPI IVMD deviceid type: "
673 		    "%x", ivmdp->ivmd_type);
674 		/*NOTREACHED*/
675 	}
676 
677 	ASSERT(AMD_IOMMU_REG_GET8(&uint8_flags,
678 	    AMD_IOMMU_ACPI_IVMD_RSV) == 0);
679 
680 	acpi_ivmdp->acm_ExclRange = AMD_IOMMU_REG_GET8(&uint8_flags,
681 	    AMD_IOMMU_ACPI_IVMD_EXCL_RANGE);
682 	acpi_ivmdp->acm_IW = AMD_IOMMU_REG_GET8(&uint8_flags,
683 	    AMD_IOMMU_ACPI_IVMD_IW);
684 	acpi_ivmdp->acm_IR = AMD_IOMMU_REG_GET8(&uint8_flags,
685 	    AMD_IOMMU_ACPI_IVMD_IR);
686 	acpi_ivmdp->acm_Unity = AMD_IOMMU_REG_GET8(&uint8_flags,
687 	    AMD_IOMMU_ACPI_IVMD_UNITY);
688 
689 	acpi_ivmdp->acm_ivmd_phys_start = ivmdp->ivmd_phys_start;
690 	acpi_ivmdp->acm_ivmd_phys_len = ivmdp->ivmd_phys_len;
691 
692 	acpi_ivmdp->acm_next = hash[idx];
693 	hash[idx] = acpi_ivmdp;
694 }
695 
696 static void
697 add_ivmdc_info(ivmd_container_t *ivmdcp, amd_iommu_acpi_ivmd_t **hash)
698 {
699 	set_ivmd_info(ivmdcp->ivmdc_ivmd, hash);
700 }
701 
702 static void
703 add_ivmd_info(amd_iommu_acpi_t *acpi, amd_iommu_acpi_ivmd_t **hash)
704 {
705 	ivmd_container_t *ivmdcp;
706 
707 	for (ivmdcp = acpi->acp_first_ivmdc; ivmdcp;
708 	    ivmdcp = ivmdcp->ivmdc_next) {
709 		add_ivmdc_info(ivmdcp, hash);
710 	}
711 }
712 
713 static void
714 add_global_info(amd_iommu_acpi_t *acpi, amd_iommu_acpi_global_t *global)
715 {
716 	uint32_t ivrs_ivinfo = acpi->acp_ivrs->ivrs_ivinfo;
717 
718 	global->acg_HtAtsResv =
719 	    AMD_IOMMU_REG_GET32(&ivrs_ivinfo, AMD_IOMMU_ACPI_HT_ATSRSV);
720 	global->acg_VAsize =
721 	    AMD_IOMMU_REG_GET32(&ivrs_ivinfo, AMD_IOMMU_ACPI_VA_SIZE);
722 	global->acg_PAsize =
723 	    AMD_IOMMU_REG_GET32(&ivrs_ivinfo, AMD_IOMMU_ACPI_PA_SIZE);
724 }
725 
726 static int
727 create_acpi_hash(amd_iommu_acpi_t *acpi)
728 {
729 	/* Last hash entry is for deviceid ranges including "all" */
730 
731 	amd_iommu_acpi_global = kmem_zalloc(sizeof (amd_iommu_acpi_global_t),
732 	    KM_SLEEP);
733 
734 	amd_iommu_acpi_ivhd_hash = kmem_zalloc(sizeof (amd_iommu_acpi_ivhd_t *)
735 	    * (AMD_IOMMU_ACPI_INFO_HASH_SZ + 1), KM_SLEEP);
736 
737 	amd_iommu_acpi_ivmd_hash = kmem_zalloc(sizeof (amd_iommu_acpi_ivmd_t *)
738 	    * (AMD_IOMMU_ACPI_INFO_HASH_SZ + 1), KM_SLEEP);
739 
740 	add_global_info(acpi, amd_iommu_acpi_global);
741 
742 	add_ivhd_info(acpi, amd_iommu_acpi_ivhd_hash);
743 
744 	add_ivmd_info(acpi, amd_iommu_acpi_ivmd_hash);
745 
746 	return (DDI_SUCCESS);
747 }
748 
749 amd_iommu_acpi_global_t *
750 amd_iommu_lookup_acpi_global(void)
751 {
752 	ASSERT(amd_iommu_acpi_global);
753 
754 	return (amd_iommu_acpi_global);
755 }
756 
757 amd_iommu_acpi_ivhd_t *
758 amd_iommu_lookup_all_ivhd(void)
759 {
760 	amd_iommu_acpi_ivhd_t *hinfop;
761 
762 	hinfop = amd_iommu_acpi_ivhd_hash[AMD_IOMMU_ACPI_INFO_HASH_SZ];
763 	for (; hinfop; hinfop = hinfop->ach_next) {
764 		if (hinfop->ach_deviceid_start == 0 &&
765 		    hinfop->ach_deviceid_end == (uint16_t)-1) {
766 			break;
767 		}
768 	}
769 
770 	return (hinfop);
771 }
772 
773 amd_iommu_acpi_ivmd_t *
774 amd_iommu_lookup_all_ivmd(void)
775 {
776 	amd_iommu_acpi_ivmd_t *minfop;
777 
778 	minfop = amd_iommu_acpi_ivmd_hash[AMD_IOMMU_ACPI_INFO_HASH_SZ];
779 	for (; minfop; minfop = minfop->acm_next) {
780 		if (minfop->acm_deviceid_start == 0 &&
781 		    minfop->acm_deviceid_end == (uint16_t)-1) {
782 			break;
783 		}
784 	}
785 
786 	return (minfop);
787 }
788 
789 amd_iommu_acpi_ivhd_t *
790 amd_iommu_lookup_any_ivhd(void)
791 {
792 	int i;
793 	amd_iommu_acpi_ivhd_t *hinfop;
794 
795 	for (i = AMD_IOMMU_ACPI_INFO_HASH_SZ; i >= 0; i--) {
796 		/*LINTED*/
797 		if (hinfop = amd_iommu_acpi_ivhd_hash[i])
798 			break;
799 	}
800 
801 	return (hinfop);
802 }
803 
804 amd_iommu_acpi_ivmd_t *
805 amd_iommu_lookup_any_ivmd(void)
806 {
807 	int i;
808 	amd_iommu_acpi_ivmd_t *minfop;
809 
810 	for (i = AMD_IOMMU_ACPI_INFO_HASH_SZ; i >= 0; i--) {
811 		/*LINTED*/
812 		if (minfop = amd_iommu_acpi_ivmd_hash[i])
813 			break;
814 	}
815 
816 	return (minfop);
817 }
818 
819 static void
820 dump_acpi_aliases(void)
821 {
822 	amd_iommu_acpi_ivhd_t *hinfop;
823 	uint16_t idx;
824 
825 	for (idx = 0; idx <= AMD_IOMMU_ACPI_INFO_HASH_SZ; idx++) {
826 		hinfop = amd_iommu_acpi_ivhd_hash[idx];
827 		for (; hinfop; hinfop = hinfop->ach_next) {
828 			cmn_err(CE_NOTE, "start=%d, end=%d, src_bdf=%d",
829 			    hinfop->ach_deviceid_start,
830 			    hinfop->ach_deviceid_end,
831 			    hinfop->ach_src_deviceid);
832 		}
833 	}
834 }
835 
836 amd_iommu_acpi_ivhd_t *
837 amd_iommu_lookup_ivhd(int32_t deviceid)
838 {
839 	amd_iommu_acpi_ivhd_t *hinfop;
840 	uint16_t idx;
841 
842 	if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
843 		cmn_err(CE_NOTE, "Attempting to get ACPI IVHD info "
844 		    "for deviceid: %d", deviceid);
845 	}
846 
847 	ASSERT(amd_iommu_acpi_ivhd_hash);
848 
849 	/* check if special device */
850 	if (deviceid == -1) {
851 		hinfop = amd_iommu_acpi_ivhd_hash[AMD_IOMMU_ACPI_INFO_HASH_SZ];
852 		for (; hinfop; hinfop = hinfop->ach_next) {
853 			if (hinfop->ach_deviceid_start  == -1 &&
854 			    hinfop->ach_deviceid_end == -1) {
855 				break;
856 			}
857 		}
858 		return (hinfop);
859 	}
860 
861 	/* First search for an exact match */
862 
863 	idx = deviceid_hashfn(deviceid);
864 
865 
866 range:
867 	hinfop = amd_iommu_acpi_ivhd_hash[idx];
868 
869 	for (; hinfop; hinfop = hinfop->ach_next) {
870 		if (deviceid < hinfop->ach_deviceid_start ||
871 		    deviceid > hinfop->ach_deviceid_end)
872 			continue;
873 
874 		if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
875 			cmn_err(CE_NOTE, "Found ACPI IVHD match: %p, "
876 			    "actual deviceid = %u, start = %u, end = %u",
877 			    (void *)hinfop, deviceid,
878 			    hinfop->ach_deviceid_start,
879 			    hinfop->ach_deviceid_end);
880 		}
881 		goto out;
882 	}
883 
884 	if (idx !=  AMD_IOMMU_ACPI_INFO_HASH_SZ) {
885 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
886 		goto range;
887 	} else {
888 		cmn_err(CE_PANIC, "IVHD not found for deviceid: %x", deviceid);
889 	}
890 
891 out:
892 	if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
893 		cmn_err(CE_NOTE, "%u: %s ACPI IVHD %p", deviceid,
894 		    hinfop ? "GOT" : "Did NOT get", (void *)hinfop);
895 	}
896 
897 	return (hinfop);
898 }
899 
900 amd_iommu_acpi_ivmd_t *
901 amd_iommu_lookup_ivmd(int32_t deviceid)
902 {
903 	amd_iommu_acpi_ivmd_t *minfop;
904 	uint16_t idx;
905 
906 	if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
907 		cmn_err(CE_NOTE, "Attempting to get ACPI IVMD info "
908 		    "for deviceid: %u", deviceid);
909 	}
910 
911 	ASSERT(amd_iommu_acpi_ivmd_hash);
912 
913 	/* First search for an exact match */
914 
915 	idx = deviceid_hashfn(deviceid);
916 
917 
918 range:
919 	minfop = amd_iommu_acpi_ivmd_hash[idx];
920 
921 	for (; minfop; minfop = minfop->acm_next) {
922 		if (deviceid < minfop->acm_deviceid_start &&
923 		    deviceid > minfop->acm_deviceid_end)
924 			continue;
925 
926 		if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
927 			cmn_err(CE_NOTE, "Found ACPI IVMD match: %p, "
928 			    "actual deviceid = %u, start = %u, end = %u",
929 			    (void *)minfop, deviceid,
930 			    minfop->acm_deviceid_start,
931 			    minfop->acm_deviceid_end);
932 		}
933 
934 		goto out;
935 	}
936 
937 	if (idx !=  AMD_IOMMU_ACPI_INFO_HASH_SZ) {
938 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
939 		goto range;
940 	} else {
941 		cmn_err(CE_PANIC, "IVMD not found for deviceid: %x", deviceid);
942 	}
943 
944 out:
945 	if (amd_iommu_debug == AMD_IOMMU_DEBUG_ACPI) {
946 		cmn_err(CE_NOTE, "%u: %s ACPI IVMD info %p", deviceid,
947 		    minfop ? "GOT" : "Did NOT get", (void *)minfop);
948 	}
949 
950 	return (minfop);
951 }
952