xref: /titanic_50/usr/src/uts/i86pc/io/immu_dmar.c (revision 6e1ae2a33c618c4c2b14aec7d2f21743ddea5837)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * Copyright (c) 2009, Intel Corporation.
27  * All rights reserved.
28  */
29 
30 
31 #include <sys/debug.h>
32 #include <sys/sysmacros.h>
33 #include <sys/types.h>
34 #include <sys/kmem.h>
35 #include <sys/sunddi.h>
36 #include <sys/list.h>
37 #include <sys/pci.h>
38 #include <sys/pci_cfgspace.h>
39 #include <sys/pci_impl.h>
40 #include <sys/sunndi.h>
41 #include <sys/ksynch.h>
42 #include <sys/cmn_err.h>
43 #include <sys/bootconf.h>
44 #include <sys/int_fmtio.h>
45 #include <sys/smbios.h>
46 #include <sys/apic.h>
47 #include <sys/acpi/acpi.h>
48 #include <sys/acpica.h>
49 #include <sys/iommulib.h>
50 #include <sys/immu.h>
51 #include <sys/smp_impldefs.h>
52 
53 static void dmar_table_destroy(dmar_table_t *tbl);
54 
55 /*
56  * internal global variables
57  */
58 static char	*dmar_raw;		/* raw DMAR ACPI table */
59 static dmar_table_t *dmar_table;	/* converted form of DMAR table */
60 
61 /*
62  * global variables exported outside this file
63  */
64 boolean_t dmar_print = B_FALSE;
65 kmutex_t ioapic_drhd_lock;
66 list_t ioapic_drhd_list;
67 
68 /* ######################################################################### */
69 
70 /*
71  * helper functions to read the "raw" DMAR table
72  */
73 
74 static uint8_t
75 get_uint8(char *cp)
76 {
77 	uint8_t val = *((uint8_t *)cp);
78 	return (val);
79 }
80 
81 static uint16_t
82 get_uint16(char *cp)
83 {
84 	uint16_t val = *((uint16_t *)cp);
85 	return (val);
86 }
87 
88 static uint32_t
89 get_uint32(char *cp)
90 {
91 	uint32_t val = *((uint32_t *)cp);
92 	return (val);
93 }
94 
95 static uint64_t
96 get_uint64(char *cp)
97 {
98 	uint64_t val = *((uint64_t *)cp);
99 	return (val);
100 }
101 
102 static char *
103 get_str(char *cp, uint_t len)
104 {
105 	char *str = kmem_alloc(len + 1, KM_SLEEP);
106 
107 	(void) strlcpy(str, cp, len + 1);
108 
109 	return (str);
110 }
111 
112 static void
113 scope_list_free(list_t *scope_list)
114 {
115 	scope_t *scope;
116 
117 	if (list_is_empty(scope_list)) {
118 		list_destroy(scope_list);
119 		return;
120 	}
121 
122 	while ((scope = list_remove_head(scope_list)) != NULL) {
123 		kmem_free(scope, sizeof (scope_t));
124 	}
125 
126 	ASSERT(list_is_empty(scope_list));
127 	list_destroy(scope_list);
128 }
129 
130 static void
131 drhd_list_destroy(list_t *drhd_list)
132 {
133 	drhd_t *drhd;
134 
135 	ASSERT(drhd_list);
136 
137 	if (list_is_empty(drhd_list)) {
138 		list_destroy(drhd_list);
139 		return;
140 	}
141 
142 	while ((drhd = list_remove_head(drhd_list)) != NULL) {
143 		scope_list_free(&(drhd->dr_scope_list));
144 		kmem_free(drhd, sizeof (drhd_t));
145 	}
146 
147 	ASSERT(list_is_empty(drhd_list));
148 	list_destroy(drhd_list);
149 }
150 
151 static void
152 rmrr_list_destroy(list_t *rmrr_list)
153 {
154 	rmrr_t *rmrr;
155 
156 	ASSERT(rmrr_list);
157 
158 	if (list_is_empty(rmrr_list)) {
159 		list_destroy(rmrr_list);
160 		return;
161 	}
162 
163 	while ((rmrr = list_remove_head(rmrr_list)) != NULL) {
164 		scope_list_free(&(rmrr->rm_scope_list));
165 		kmem_free(rmrr, sizeof (rmrr_t));
166 	}
167 
168 	ASSERT(list_is_empty(rmrr_list));
169 	list_destroy(rmrr_list);
170 }
171 
172 /*
173  * parse_scope()
174  *      parse a scope structure in the "raw" table
175  */
176 static scope_t *
177 parse_scope(char *shead)
178 {
179 	scope_t *scope;
180 	char *phead;
181 	int bus, dev, func;
182 	uint8_t startbus;
183 	uint8_t len;
184 	int depth;
185 
186 	ASSERT(shead);
187 
188 	scope = kmem_zalloc(sizeof (scope_t), KM_SLEEP);
189 	scope->scp_type = get_uint8(&shead[0]);
190 	scope->scp_enumid = get_uint8(&shead[4]);
191 
192 	len = get_uint8(&shead[1]);
193 	startbus = get_uint8(&shead[5]);
194 	depth = (len - 6)/2;
195 	ASSERT(depth >= 1);
196 
197 	phead = &shead[6];
198 
199 	bus = startbus;
200 	dev = get_uint8(phead++);
201 	func = get_uint8(phead++);
202 
203 	for (depth--; depth > 0; depth--) {
204 		bus = pci_getb_func(bus, dev, func, PCI_BCNF_SECBUS);
205 		dev = get_uint8(phead++);
206 		func = get_uint8(phead++);
207 	}
208 
209 	ASSERT(bus >= 0 && bus < 256);
210 	ASSERT(dev >= 0 && dev < 32);
211 	ASSERT(func >= 0 && func < 8);
212 
213 	/* ok we got the device BDF */
214 	scope->scp_bus = bus;
215 	scope->scp_dev = dev;
216 	scope->scp_func = func;
217 
218 	return (scope);
219 }
220 
221 
222 /* setup the ioapic_drhd structure */
223 static void
224 ioapic_drhd_setup(void)
225 {
226 	mutex_init(&(ioapic_drhd_lock), NULL, MUTEX_DEFAULT, NULL);
227 
228 	mutex_enter(&(ioapic_drhd_lock));
229 	list_create(&(ioapic_drhd_list), sizeof (ioapic_drhd_t),
230 	    offsetof(ioapic_drhd_t, ioapic_node));
231 	mutex_exit(&(ioapic_drhd_lock));
232 }
233 
234 /* get ioapic source id for interrupt remapping */
235 static void
236 ioapic_drhd_insert(scope_t *scope, drhd_t *drhd)
237 {
238 	ioapic_drhd_t *idt;
239 
240 	idt = kmem_zalloc(sizeof (ioapic_drhd_t), KM_SLEEP);
241 	idt->ioapic_ioapicid = scope->scp_enumid;
242 	idt->ioapic_sid = ((scope->scp_bus << 8) | (scope->scp_dev << 3) |
243 	    (scope->scp_func));
244 	idt->ioapic_drhd = drhd;
245 
246 	mutex_enter(&ioapic_drhd_lock);
247 	list_insert_tail(&ioapic_drhd_list, idt);
248 	mutex_exit(&ioapic_drhd_lock);
249 }
250 
251 static ioapic_drhd_t *
252 ioapic_drhd_lookup(int ioapicid)
253 {
254 	ioapic_drhd_t *idt;
255 
256 	mutex_enter(&ioapic_drhd_lock);
257 	idt = list_head(&ioapic_drhd_list);
258 	for (; idt; idt = list_next(&ioapic_drhd_list, idt)) {
259 		if (idt->ioapic_ioapicid == ioapicid) {
260 			break;
261 		}
262 	}
263 	mutex_exit(&ioapic_drhd_lock);
264 
265 	return (idt);
266 }
267 
268 static void
269 ioapic_drhd_destroy(void)
270 {
271 	ioapic_drhd_t *idt;
272 
273 	mutex_enter(&ioapic_drhd_lock);
274 	while (idt = list_remove_head(&ioapic_drhd_list)) {
275 		kmem_free(idt, sizeof (ioapic_drhd_t));
276 	}
277 	list_destroy(&ioapic_drhd_list);
278 	mutex_exit(&(ioapic_drhd_lock));
279 
280 	mutex_destroy(&(ioapic_drhd_lock));
281 }
282 
283 /*
284  * parse_drhd()
285  *   parse the drhd uints in dmar table
286  */
287 static int
288 parse_drhd(char *uhead, dmar_table_t *tbl)
289 {
290 	drhd_t *drhd;
291 	int seg;
292 	int len;
293 	char *shead;
294 	scope_t *scope;
295 
296 	ASSERT(uhead);
297 	ASSERT(tbl);
298 	ASSERT(get_uint16(&uhead[0]) == DMAR_DRHD);
299 
300 	seg = get_uint16(&uhead[6]);
301 	if (seg < 0 || seg >= IMMU_MAXSEG) {
302 		ddi_err(DER_WARN, NULL, "invalid segment# <%d>"
303 		    "in DRHD unit in ACPI DMAR table", seg);
304 		return (DDI_FAILURE);
305 	}
306 
307 	drhd = kmem_zalloc(sizeof (drhd_t), KM_SLEEP);
308 	mutex_init(&(drhd->dr_lock), NULL, MUTEX_DEFAULT, NULL);
309 	list_create(&(drhd->dr_scope_list), sizeof (scope_t),
310 	    offsetof(scope_t, scp_node));
311 
312 	len = get_uint16(&uhead[2]);
313 	drhd->dr_include_all =
314 	    (get_uint8(&uhead[4]) & DMAR_INCLUDE_ALL) ? B_TRUE : B_FALSE;
315 	drhd->dr_seg = seg;
316 	drhd->dr_regs = get_uint64(&uhead[8]);
317 
318 	/*
319 	 * parse each scope.
320 	 */
321 	shead = &uhead[16];
322 	while (shead < &uhead[len - 1]) {
323 		scope = parse_scope(shead);
324 		if (scope == NULL) {
325 			return (DDI_FAILURE);
326 		}
327 
328 		if (scope->scp_type == DMAR_IOAPIC)  {
329 			ioapic_drhd_insert(scope, drhd);
330 		}
331 
332 		list_insert_tail(&(drhd->dr_scope_list), scope);
333 		shead += get_uint8(&shead[1]);
334 	}
335 
336 	list_insert_tail(&(tbl->tbl_drhd_list[drhd->dr_seg]), drhd);
337 
338 	return (DDI_SUCCESS);
339 }
340 
341 /*
342  * parse_rmrr()
343  *   parse the rmrr units in dmar table
344  */
345 static int
346 parse_rmrr(char *uhead, dmar_table_t *tbl)
347 {
348 	rmrr_t *rmrr;
349 	int seg;
350 	int len;
351 	char *shead;
352 	scope_t *scope;
353 
354 	ASSERT(uhead);
355 	ASSERT(tbl);
356 	ASSERT(get_uint16(&uhead[0]) == DMAR_RMRR);
357 
358 	seg = get_uint16(&uhead[6]);
359 	if (seg < 0 || seg >= IMMU_MAXSEG) {
360 		ddi_err(DER_WARN, NULL, "invalid segment# <%d>"
361 		    "in RMRR unit in ACPI DMAR table", seg);
362 		return (DDI_FAILURE);
363 	}
364 
365 	rmrr = kmem_zalloc(sizeof (rmrr_t), KM_SLEEP);
366 	mutex_init(&(rmrr->rm_lock), NULL, MUTEX_DEFAULT, NULL);
367 	list_create(&(rmrr->rm_scope_list), sizeof (scope_t),
368 	    offsetof(scope_t, scp_node));
369 
370 	/* RMRR region is [base,limit] */
371 	len = get_uint16(&uhead[2]);
372 	rmrr->rm_seg = get_uint16(&uhead[6]);
373 	rmrr->rm_base = get_uint64(&uhead[8]);
374 	rmrr->rm_limit = get_uint64(&uhead[16]);
375 
376 	if (rmrr->rm_base > rmrr->rm_limit) {
377 		ddi_err(DER_WARN, NULL, "IMMU: BIOS bug detected: "
378 		    "RMRR: base (%lx) > limit (%lx)",
379 		    rmrr->rm_base, rmrr->rm_limit);
380 		list_destroy(&(rmrr->rm_scope_list));
381 		mutex_destroy(&(rmrr->rm_lock));
382 		kmem_free(rmrr, sizeof (rmrr_t));
383 		return (DDI_SUCCESS);
384 	}
385 
386 	/*
387 	 * parse each scope in RMRR
388 	 */
389 	shead = &uhead[24];
390 	while (shead < &uhead[len - 1]) {
391 		scope = parse_scope(shead);
392 		if (scope == NULL) {
393 			return (DDI_FAILURE);
394 		}
395 		list_insert_tail(&(rmrr->rm_scope_list), scope);
396 		shead += get_uint8(&shead[1]);
397 	}
398 
399 	list_insert_tail(&(tbl->tbl_rmrr_list[rmrr->rm_seg]), rmrr);
400 
401 	return (DDI_SUCCESS);
402 }
403 
404 #define	TBL_OEM_ID_SZ		(6)
405 #define	TBL_OEM_TBLID_SZ	(8)
406 
407 /*
408  * parse the "raw" DMAR table and convert it
409  * into a useful form.
410  */
411 static int
412 dmar_parse(dmar_table_t **tblpp, char *raw)
413 {
414 	char *uhead;
415 	dmar_table_t *tbl;
416 	int i;
417 	char *unmstr;
418 
419 	ASSERT(raw);
420 	ASSERT(tblpp);
421 
422 	*tblpp = NULL;
423 
424 	/*
425 	 * do a sanity check. make sure the raw table
426 	 * has the right signature
427 	 */
428 	if (raw[0] != 'D' || raw[1] != 'M' ||
429 	    raw[2] != 'A' || raw[3] != 'R') {
430 		ddi_err(DER_WARN, NULL, "IOMMU ACPI "
431 		    "signature != \"DMAR\"");
432 		return (DDI_FAILURE);
433 	}
434 
435 	/*
436 	 * the platform has intel iommu, create processed ACPI struct
437 	 */
438 	tbl = kmem_zalloc(sizeof (dmar_table_t), KM_SLEEP);
439 	mutex_init(&(tbl->tbl_lock), NULL, MUTEX_DEFAULT, NULL);
440 
441 	tbl->tbl_raw = raw;
442 
443 	/*
444 	 * Note we explicitly show offsets for clarity
445 	 */
446 	tbl->tbl_rawlen = get_uint32(&raw[4]);
447 
448 	/* XXX TO DO verify checksum of table */
449 	tbl->tbl_oem_id = get_str(&raw[10], TBL_OEM_ID_SZ);
450 	tbl->tbl_oem_tblid = get_str(&raw[16], TBL_OEM_TBLID_SZ);
451 	tbl->tbl_oem_rev = get_uint32(&raw[24]);
452 	tbl->tbl_haw = get_uint8(&raw[36]) + 1;
453 	tbl->tbl_intrmap = (get_uint8(&raw[37]) & DMAR_INTRMAP_SUPPORT)
454 	    ? B_TRUE : B_FALSE;
455 
456 	/* create lists for DRHD and RMRR */
457 	for (i = 0; i < IMMU_MAXSEG; i++) {
458 		list_create(&(tbl->tbl_drhd_list[i]), sizeof (drhd_t),
459 		    offsetof(drhd_t, dr_node));
460 		list_create(&(tbl->tbl_rmrr_list[i]), sizeof (rmrr_t),
461 		    offsetof(rmrr_t, rm_node));
462 	}
463 
464 	ioapic_drhd_setup();
465 
466 	/*
467 	 * parse each unit. Currently only DRHD and RMRR types
468 	 * are parsed. We ignore all other types of units.
469 	 */
470 	uhead = &raw[48];
471 	while (uhead < &raw[tbl->tbl_rawlen - 1]) {
472 		unmstr = NULL;
473 		switch (get_uint16(uhead)) {
474 		case DMAR_DRHD:
475 			if (parse_drhd(uhead, tbl) != DDI_SUCCESS) {
476 				goto failed;
477 			}
478 			break;
479 		case DMAR_RMRR:
480 			if (parse_rmrr(uhead, tbl) != DDI_SUCCESS) {
481 				goto failed;
482 			}
483 			break;
484 		case DMAR_ATSR:
485 			unmstr = "ATSR";
486 			break;
487 		case DMAR_RHSA:
488 			unmstr = "RHSA";
489 			break;
490 		default:
491 			unmstr = "unknown unity type";
492 			break;
493 		}
494 		if (unmstr) {
495 			ddi_err(DER_NOTE, NULL, "DMAR ACPI table: "
496 			    "skipping unsupported unit type %s", unmstr);
497 		}
498 		uhead += get_uint16(&uhead[2]);
499 	}
500 
501 	*tblpp = tbl;
502 	return (DDI_SUCCESS);
503 
504 failed:
505 	dmar_table_destroy(tbl);
506 	return (DDI_FAILURE);
507 }
508 
509 static char *
510 scope_type(int devtype)
511 {
512 	char *typestr;
513 
514 	switch (devtype) {
515 	case DMAR_ENDPOINT:
516 		typestr = "endpoint-device";
517 		break;
518 	case DMAR_SUBTREE:
519 		typestr = "subtree-device";
520 		break;
521 	case DMAR_IOAPIC:
522 		typestr = "IOAPIC";
523 		break;
524 	case DMAR_HPET:
525 		typestr = "HPET";
526 		break;
527 	default:
528 		typestr = "Unknown device";
529 		break;
530 	}
531 
532 	return (typestr);
533 }
534 
535 static void
536 print_scope_list(list_t *scope_list)
537 {
538 	scope_t *scope;
539 
540 	if (list_is_empty(scope_list))
541 		return;
542 
543 	ddi_err(DER_CONT, NULL, "\tdevice list:\n");
544 
545 	for (scope = list_head(scope_list); scope;
546 	    scope = list_next(scope_list, scope)) {
547 		ddi_err(DER_CONT, NULL, "\t\ttype = %s\n",
548 		    scope_type(scope->scp_type));
549 		ddi_err(DER_CONT, NULL, "\n\t\tbus = %d\n",
550 		    scope->scp_bus);
551 		ddi_err(DER_CONT, NULL, "\t\tdev = %d\n",
552 		    scope->scp_dev);
553 		ddi_err(DER_CONT, NULL, "\t\tfunc = %d\n",
554 		    scope->scp_func);
555 	}
556 }
557 
558 static void
559 print_drhd_list(list_t *drhd_list)
560 {
561 	drhd_t *drhd;
562 
563 	if (list_is_empty(drhd_list))
564 		return;
565 
566 	ddi_err(DER_CONT, NULL, "\ndrhd list:\n");
567 
568 	for (drhd = list_head(drhd_list); drhd;
569 	    drhd = list_next(drhd_list, drhd)) {
570 
571 		ddi_err(DER_CONT, NULL, "\n\tsegment = %d\n",
572 		    drhd->dr_seg);
573 		ddi_err(DER_CONT, NULL, "\treg_base = 0x%" PRIx64 "\n",
574 		    drhd->dr_regs);
575 		ddi_err(DER_CONT, NULL, "\tinclude_all = %s\n",
576 		    drhd->dr_include_all == B_TRUE ? "TRUE" : "FALSE");
577 		ddi_err(DER_CONT, NULL, "\tdip = 0x%p\n",
578 		    (void *)drhd->dr_dip);
579 
580 		print_scope_list(&(drhd->dr_scope_list));
581 	}
582 }
583 
584 
585 static void
586 print_rmrr_list(list_t *rmrr_list)
587 {
588 	rmrr_t *rmrr;
589 
590 	if (list_is_empty(rmrr_list))
591 		return;
592 
593 	ddi_err(DER_CONT, NULL, "\nrmrr list:\n");
594 
595 	for (rmrr = list_head(rmrr_list); rmrr;
596 	    rmrr = list_next(rmrr_list, rmrr)) {
597 
598 		ddi_err(DER_CONT, NULL, "\n\tsegment = %d\n",
599 		    rmrr->rm_seg);
600 		ddi_err(DER_CONT, NULL, "\tbase = 0x%lx\n",
601 		    rmrr->rm_base);
602 		ddi_err(DER_CONT, NULL, "\tlimit = 0x%lx\n",
603 		    rmrr->rm_limit);
604 
605 		print_scope_list(&(rmrr->rm_scope_list));
606 	}
607 }
608 
609 /*
610  * print DMAR table
611  */
612 static void
613 dmar_table_print(dmar_table_t *tbl)
614 {
615 	int i;
616 
617 	if (dmar_print == B_FALSE) {
618 		return;
619 	}
620 
621 	/* print the title */
622 	ddi_err(DER_CONT, NULL, "#### Start of dmar_table ####\n");
623 	ddi_err(DER_CONT, NULL, "\thaw = %d\n", tbl->tbl_haw);
624 	ddi_err(DER_CONT, NULL, "\tintr_remap = %s\n",
625 	    tbl->tbl_intrmap == B_TRUE ? "<true>" : "<false>");
626 
627 	/* print drhd list */
628 	for (i = 0; i < IMMU_MAXSEG; i++) {
629 		print_drhd_list(&(tbl->tbl_drhd_list[i]));
630 	}
631 
632 
633 	/* print rmrr list */
634 	for (i = 0; i < IMMU_MAXSEG; i++) {
635 		print_rmrr_list(&(tbl->tbl_rmrr_list[i]));
636 	}
637 
638 	ddi_err(DER_CONT, NULL, "#### END of dmar_table ####\n");
639 }
640 
641 static void
642 drhd_devi_create(drhd_t *drhd, char *name)
643 {
644 	struct ddi_parent_private_data *pdptr;
645 	struct regspec reg;
646 	dev_info_t *dip;
647 
648 	ndi_devi_alloc_sleep(root_devinfo, name,
649 	    DEVI_SID_NODEID, &dip);
650 
651 	drhd->dr_dip = dip;
652 
653 	reg.regspec_bustype = 0;
654 	reg.regspec_addr = drhd->dr_regs;
655 	reg.regspec_size = IMMU_REGSZ;
656 
657 	/*
658 	 * update the reg properties
659 	 *
660 	 *   reg property will be used for register
661 	 *   set access
662 	 *
663 	 * refer to the bus_map of root nexus driver
664 	 * I/O or memory mapping:
665 	 *
666 	 * <bustype=0, addr=x, len=x>: memory
667 	 * <bustype=1, addr=x, len=x>: i/o
668 	 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
669 	 */
670 	(void) ndi_prop_update_int_array(DDI_DEV_T_NONE,
671 	    dip, "reg", (int *)&reg,
672 	    sizeof (struct regspec) / sizeof (int));
673 
674 	/*
675 	 * This is an artificially constructed dev_info, and we
676 	 * need to set a few more things to be able to use it
677 	 * for ddi_dma_alloc_handle/free_handle.
678 	 */
679 	ddi_set_driver(dip, ddi_get_driver(ddi_root_node()));
680 	DEVI(dip)->devi_bus_dma_allochdl =
681 	    DEVI(ddi_get_driver((ddi_root_node())));
682 
683 	pdptr = kmem_zalloc(sizeof (struct ddi_parent_private_data)
684 	    + sizeof (struct regspec), KM_SLEEP);
685 	pdptr->par_nreg = 1;
686 	pdptr->par_reg = (struct regspec *)(pdptr + 1);
687 	pdptr->par_reg->regspec_bustype = 0;
688 	pdptr->par_reg->regspec_addr = drhd->dr_regs;
689 	pdptr->par_reg->regspec_size = IMMU_REGSZ;
690 	ddi_set_parent_data(dip, pdptr);
691 }
692 
693 /*
694  * dmar_devinfos_create()
695  *
696  *   create the dev_info node in the device tree,
697  *   the info node is a nuxus child of the root
698  *   nexus
699  */
700 static void
701 dmar_devinfos_create(dmar_table_t *tbl)
702 {
703 	list_t *drhd_list;
704 	drhd_t *drhd;
705 	char name[IMMU_MAXNAMELEN];
706 	int i, unit;
707 
708 	for (i = 0; i < IMMU_MAXSEG; i++) {
709 
710 		drhd_list = &(tbl->tbl_drhd_list[i]);
711 
712 		if (list_is_empty(drhd_list))
713 			continue;
714 
715 		drhd = list_head(drhd_list);
716 		for (unit = 0; drhd;
717 		    drhd = list_next(drhd_list, drhd), unit++) {
718 			(void) snprintf(name, sizeof (name),
719 			    "drhd%d,%d", i, unit);
720 			drhd_devi_create(drhd, name);
721 		}
722 	}
723 }
724 
725 static void
726 drhd_devi_destroy(drhd_t *drhd)
727 {
728 	dev_info_t *dip;
729 	int count;
730 
731 	dip = drhd->dr_dip;
732 	ASSERT(dip);
733 
734 	ndi_devi_enter(root_devinfo, &count);
735 	if (ndi_devi_offline(dip, NDI_DEVI_REMOVE) != DDI_SUCCESS) {
736 		ddi_err(DER_WARN, dip, "Failed to destroy");
737 	}
738 	ndi_devi_exit(root_devinfo, count);
739 	drhd->dr_dip = NULL;
740 }
741 
742 /*
743  * dmar_devi_destroy()
744  *
745  * destroy dev_info nodes for all drhd units
746  */
747 static void
748 dmar_devi_destroy(dmar_table_t *tbl)
749 {
750 	drhd_t *drhd;
751 	list_t *drhd_list;
752 	int i;
753 
754 	for (i = 0; i < IMMU_MAXSEG; i++) {
755 		drhd_list = &(tbl->tbl_drhd_list[i]);
756 		if (list_is_empty(drhd_list))
757 			continue;
758 
759 		drhd = list_head(drhd_list);
760 		for (; drhd; drhd = list_next(drhd_list, drhd)) {
761 			drhd_devi_destroy(drhd);
762 		}
763 	}
764 }
765 
766 static int
767 match_bdf(dev_info_t *ddip, void *arg)
768 {
769 	immu_arg_t *imarg = (immu_arg_t *)arg;
770 	immu_devi_t *immu_devi;
771 
772 	ASSERT(ddip);
773 	ASSERT(imarg);
774 	ASSERT(imarg->ima_seg == 0);
775 	ASSERT(imarg->ima_bus >= 0);
776 	ASSERT(imarg->ima_devfunc >= 0);
777 	ASSERT(imarg->ima_ddip == NULL);
778 
779 	/* rdip can be NULL */
780 
781 	mutex_enter(&(DEVI(ddip)->devi_lock));
782 
783 	immu_devi = IMMU_DEVI(ddip);
784 	ASSERT(immu_devi);
785 
786 	if (immu_devi->imd_seg == imarg->ima_seg &&
787 	    immu_devi->imd_bus == imarg->ima_bus &&
788 	    immu_devi->imd_devfunc == imarg->ima_devfunc) {
789 		imarg->ima_ddip = ddip;
790 	}
791 
792 	mutex_exit(&(DEVI(ddip)->devi_lock));
793 
794 	return (imarg->ima_ddip ? DDI_WALK_TERMINATE : DDI_WALK_CONTINUE);
795 }
796 static void
797 dmar_table_destroy(dmar_table_t *tbl)
798 {
799 	int i;
800 
801 	ASSERT(tbl);
802 
803 	/* destroy lists for DRHD and RMRR */
804 	for (i = 0; i < IMMU_MAXSEG; i++) {
805 		rmrr_list_destroy(&(tbl->tbl_rmrr_list[i]));
806 		drhd_list_destroy(&(tbl->tbl_drhd_list[i]));
807 	}
808 
809 	/* free strings */
810 	kmem_free(tbl->tbl_oem_tblid, TBL_OEM_ID_SZ + 1);
811 	kmem_free(tbl->tbl_oem_id, TBL_OEM_TBLID_SZ + 1);
812 	tbl->tbl_raw = NULL; /* raw ACPI table doesn't have to be freed */
813 	mutex_destroy(&(tbl->tbl_lock));
814 	kmem_free(tbl, sizeof (dmar_table_t));
815 }
816 
817 /*
818  * #########################################################################
819  * Functions exported by dmar.c
820  * This file deals with reading and processing the DMAR ACPI table
821  * #########################################################################
822  */
823 
824 /*
825  * immu_dmar_setup()
826  *	Check if the system has a DMAR ACPI table. If yes, the system
827  *	has Intel IOMMU hardware
828  */
829 int
830 immu_dmar_setup(void)
831 {
832 	if (AcpiGetTable("DMAR", 1, (ACPI_TABLE_HEADER **)&dmar_raw) != AE_OK) {
833 		ddi_err(DER_LOG, NULL,
834 		    "No DMAR ACPI table. No Intel IOMMU present\n");
835 		dmar_raw = NULL;
836 		return (DDI_FAILURE);
837 	}
838 	ASSERT(dmar_raw);
839 	return (DDI_SUCCESS);
840 }
841 
842 /*
843  * immu_dmar_parse()
844  *  Called by immu.c to parse and convert "raw" ACPI DMAR table
845  */
846 int
847 immu_dmar_parse(void)
848 {
849 	dmar_table_t *tbl = NULL;
850 
851 	/* we should already have found the "raw" table */
852 	ASSERT(dmar_raw);
853 
854 	ddi_err(DER_CONT, NULL, "?Processing DMAR ACPI table\n");
855 
856 	dmar_table = NULL;
857 
858 	/*
859 	 * parse DMAR ACPI table
860 	 */
861 	if (dmar_parse(&tbl, dmar_raw) != DDI_SUCCESS) {
862 		ASSERT(tbl == NULL);
863 		return (DDI_FAILURE);
864 	}
865 
866 	ASSERT(tbl);
867 
868 	/*
869 	 * create one devinfo for every drhd unit
870 	 * in the DMAR table
871 	 */
872 	dmar_devinfos_create(tbl);
873 
874 	/*
875 	 * print the dmar table if the debug option is set
876 	 */
877 	dmar_table_print(tbl);
878 
879 	dmar_table = tbl;
880 
881 	return (DDI_SUCCESS);
882 }
883 
884 void
885 immu_dmar_startup(void)
886 {
887 	/* nothing to do */
888 }
889 
890 void
891 immu_dmar_shutdown(void)
892 {
893 	/* nothing to do */
894 }
895 
896 void
897 immu_dmar_destroy(void)
898 {
899 	dmar_devi_destroy(dmar_table);
900 	dmar_table_destroy(dmar_table);
901 	ioapic_drhd_destroy();
902 	dmar_table = NULL;
903 	dmar_raw = NULL;
904 }
905 
906 boolean_t
907 immu_dmar_blacklisted(char **strptr, uint_t nstrs)
908 {
909 	dmar_table_t *tbl = dmar_table;
910 	int i;
911 	char oem_rev[IMMU_MAXNAMELEN];
912 
913 	ASSERT(tbl);
914 
915 	ASSERT((strptr == NULL) ^ (nstrs != 0));
916 
917 	/*
918 	 * Must be a minimum of 4
919 	 */
920 	if (nstrs < 4) {
921 		return (B_FALSE);
922 	}
923 
924 	ddi_err(DER_CONT, NULL, "?System DMAR ACPI table information:\n");
925 	ddi_err(DER_CONT, NULL, "?OEM-ID = <%s>\n", tbl->tbl_oem_id);
926 	ddi_err(DER_CONT, NULL, "?Table-ID = <%s>\n", tbl->tbl_oem_tblid);
927 	(void) snprintf(oem_rev, sizeof (oem_rev), "%d", tbl->tbl_oem_rev);
928 	ddi_err(DER_CONT, NULL, "?Revision = <%s>\n", oem_rev);
929 
930 	for (i = 0; nstrs - i >= 4; i++) {
931 		if (strcmp(*strptr++, "DMAR") == 0) {
932 			if (strcmp(*strptr++, tbl->tbl_oem_id) == 0 &&
933 			    ((char *)strptr == '\0' ||
934 			    strcmp(*strptr++, tbl->tbl_oem_tblid) == 0) &&
935 			    ((char *)strptr == '\0' ||
936 			    strcmp(*strptr++, oem_rev) == 0)) {
937 				return (B_TRUE);
938 			}
939 			i += 3; /* for loops adds 1 as well, so only 3 here */
940 		}
941 	}
942 	return (B_FALSE);
943 }
944 
945 void
946 immu_dmar_rmrr_map(void)
947 {
948 	int seg;
949 	int e;
950 	int count;
951 	dev_info_t *rdip;
952 	scope_t *scope;
953 	rmrr_t *rmrr;
954 	dmar_table_t *tbl;
955 
956 	ASSERT(dmar_table);
957 
958 	tbl = dmar_table;
959 
960 	/* called during boot, when kernel is single threaded. No lock */
961 
962 	/*
963 	 * for each segment, walk the rmrr list looking for an exact match
964 	 */
965 	for (seg = 0; seg < IMMU_MAXSEG; seg++) {
966 		rmrr = list_head(&(tbl->tbl_rmrr_list)[seg]);
967 		for (; rmrr; rmrr = list_next(&(tbl->tbl_rmrr_list)[seg],
968 		    rmrr)) {
969 
970 			/*
971 			 * try to match BDF *exactly* to a device scope.
972 			 */
973 			scope = list_head(&(rmrr->rm_scope_list));
974 			for (; scope;
975 			    scope = list_next(&(rmrr->rm_scope_list), scope)) {
976 				immu_arg_t imarg = {0};
977 				memrng_t mrng = {0};
978 
979 				/* PCI endpoint devices only */
980 				if (scope->scp_type != DMAR_ENDPOINT)
981 					continue;
982 
983 				imarg.ima_seg = seg;
984 				imarg.ima_bus = scope->scp_bus;
985 				imarg.ima_devfunc =
986 				    IMMU_PCI_DEVFUNC(scope->scp_dev,
987 				    scope->scp_func);
988 				imarg.ima_ddip = NULL;
989 				imarg.ima_rdip = NULL;
990 
991 				ASSERT(root_devinfo);
992 				/* XXX should be optimized */
993 				ndi_devi_enter(root_devinfo, &count);
994 				ddi_walk_devs(ddi_get_child(root_devinfo),
995 				    match_bdf, &imarg);
996 				ndi_devi_exit(root_devinfo, count);
997 
998 				if (imarg.ima_ddip == NULL) {
999 					ddi_err(DER_WARN, NULL,
1000 					    "No dip found for "
1001 					    "bus=0x%x, dev=0x%x, func= 0x%x",
1002 					    scope->scp_bus, scope->scp_dev,
1003 					    scope->scp_func);
1004 					continue;
1005 				}
1006 
1007 				rdip = imarg.ima_ddip;
1008 				/*
1009 				 * This address must be in the BIOS reserved
1010 				 * map
1011 				 */
1012 				if (!address_in_memlist(bios_rsvd,
1013 				    (uint64_t)rmrr->rm_base, rmrr->rm_limit -
1014 				    rmrr->rm_base + 1)) {
1015 					ddi_err(DER_WARN, rdip, "RMRR range "
1016 					    " [0x%" PRIx64 " - 0x%" PRIx64 "]"
1017 					    " not in BIOS reserved map",
1018 					    rmrr->rm_base, rmrr->rm_limit);
1019 				}
1020 
1021 				/* XXX could be more efficient */
1022 				memlist_read_lock();
1023 				if (address_in_memlist(phys_install,
1024 				    (uint64_t)rmrr->rm_base, rmrr->rm_limit -
1025 				    rmrr->rm_base + 1)) {
1026 					ddi_err(DER_WARN, rdip, "RMRR range "
1027 					    " [0x%" PRIx64 " - 0x%" PRIx64 "]"
1028 					    " is in physinstall map",
1029 					    rmrr->rm_base, rmrr->rm_limit);
1030 				}
1031 				memlist_read_unlock();
1032 
1033 
1034 				ddi_err(DER_LOG, rdip,
1035 				    "IMMU: Mapping RMRR range "
1036 				    "[0x%" PRIx64 " - 0x%"PRIx64 "]",
1037 				    rmrr->rm_base, rmrr->rm_limit);
1038 
1039 				mrng.mrng_start =
1040 				    IMMU_ROUNDOWN((uintptr_t)rmrr->rm_base);
1041 				mrng.mrng_npages =
1042 				    IMMU_ROUNDUP((uintptr_t)rmrr->rm_limit -
1043 				    (uintptr_t)rmrr->rm_base + 1) /
1044 				    IMMU_PAGESIZE;
1045 				e = immu_dvma_map(NULL, NULL, &mrng, 0, rdip,
1046 				    IMMU_FLAGS_READ | IMMU_FLAGS_WRITE |
1047 				    IMMU_FLAGS_MEMRNG);
1048 				/*
1049 				 * dip may have unity domain or xlate domain
1050 				 * If the former, PHYSICAL is returned else
1051 				 * MAPPED is returned.
1052 				 */
1053 				ASSERT(e == DDI_DMA_MAPPED ||
1054 				    e == DDI_DMA_USE_PHYSICAL);
1055 			}
1056 		}
1057 	}
1058 
1059 }
1060 
1061 immu_t *
1062 immu_dmar_get_immu(dev_info_t *rdip)
1063 {
1064 	int seg;
1065 	int tlevel;
1066 	int level;
1067 	drhd_t *drhd;
1068 	drhd_t *tdrhd;
1069 	scope_t *scope;
1070 	dmar_table_t *tbl;
1071 
1072 	ASSERT(dmar_table);
1073 
1074 	tbl = dmar_table;
1075 
1076 	mutex_enter(&(tbl->tbl_lock));
1077 
1078 	/*
1079 	 * for each segment, walk the drhd list looking for an exact match
1080 	 */
1081 	for (seg = 0; seg < IMMU_MAXSEG; seg++) {
1082 		drhd = list_head(&(tbl->tbl_drhd_list)[seg]);
1083 		for (; drhd; drhd = list_next(&(tbl->tbl_drhd_list)[seg],
1084 		    drhd)) {
1085 
1086 			/*
1087 			 * we are currently searching for exact matches so
1088 			 * skip "include all" (catchall) and subtree matches
1089 			 */
1090 			if (drhd->dr_include_all == B_TRUE)
1091 				continue;
1092 
1093 			/*
1094 			 * try to match BDF *exactly* to a device scope.
1095 			 */
1096 			scope = list_head(&(drhd->dr_scope_list));
1097 			for (; scope;
1098 			    scope = list_next(&(drhd->dr_scope_list), scope)) {
1099 				immu_arg_t imarg = {0};
1100 
1101 				/* PCI endpoint devices only */
1102 				if (scope->scp_type != DMAR_ENDPOINT)
1103 					continue;
1104 
1105 				imarg.ima_seg = seg;
1106 				imarg.ima_bus = scope->scp_bus;
1107 				imarg.ima_devfunc =
1108 				    IMMU_PCI_DEVFUNC(scope->scp_dev,
1109 				    scope->scp_func);
1110 				imarg.ima_ddip = NULL;
1111 				imarg.ima_rdip = rdip;
1112 				level = 0;
1113 				if (immu_walk_ancestor(rdip, NULL, match_bdf,
1114 				    &imarg, &level, IMMU_FLAGS_DONTPASS)
1115 				    != DDI_SUCCESS) {
1116 					/* skip - nothing else we can do */
1117 					continue;
1118 				}
1119 
1120 				/* Should have walked only 1 level i.e. rdip */
1121 				ASSERT(level == 1);
1122 
1123 				if (imarg.ima_ddip) {
1124 					ASSERT(imarg.ima_ddip == rdip);
1125 					goto found;
1126 				}
1127 			}
1128 		}
1129 	}
1130 
1131 	/*
1132 	 * walk the drhd list looking for subtree match
1133 	 * i.e. is the device a descendant of a devscope BDF.
1134 	 * We want the lowest subtree.
1135 	 */
1136 	tdrhd = NULL;
1137 	tlevel = 0;
1138 	for (seg = 0; seg < IMMU_MAXSEG; seg++) {
1139 		drhd = list_head(&(tbl->tbl_drhd_list)[seg]);
1140 		for (; drhd; drhd = list_next(&(tbl->tbl_drhd_list)[seg],
1141 		    drhd)) {
1142 
1143 			/* looking for subtree match */
1144 			if (drhd->dr_include_all == B_TRUE)
1145 				continue;
1146 
1147 			/*
1148 			 * try to match the device scope
1149 			 */
1150 			scope = list_head(&(drhd->dr_scope_list));
1151 			for (; scope;
1152 			    scope = list_next(&(drhd->dr_scope_list), scope)) {
1153 				immu_arg_t imarg = {0};
1154 
1155 				/* PCI subtree only */
1156 				if (scope->scp_type != DMAR_SUBTREE)
1157 					continue;
1158 
1159 				imarg.ima_seg = seg;
1160 				imarg.ima_bus = scope->scp_bus;
1161 				imarg.ima_devfunc =
1162 				    IMMU_PCI_DEVFUNC(scope->scp_dev,
1163 				    scope->scp_func);
1164 
1165 				imarg.ima_ddip = NULL;
1166 				imarg.ima_rdip = rdip;
1167 				level = 0;
1168 				if (immu_walk_ancestor(rdip, NULL, match_bdf,
1169 				    &imarg, &level, 0) != DDI_SUCCESS) {
1170 					/* skip - nothing else we can do */
1171 					continue;
1172 				}
1173 
1174 				/* should have walked 1 level i.e. rdip */
1175 				ASSERT(level > 0);
1176 
1177 				/* look for lowest ancestor matching drhd */
1178 				if (imarg.ima_ddip && (tdrhd == NULL ||
1179 				    level < tlevel)) {
1180 					tdrhd = drhd;
1181 					tlevel = level;
1182 				}
1183 			}
1184 		}
1185 	}
1186 
1187 	if ((drhd = tdrhd) != NULL) {
1188 		goto found;
1189 	}
1190 
1191 	for (seg = 0; seg < IMMU_MAXSEG; seg++) {
1192 		drhd = list_head(&(tbl->tbl_drhd_list[seg]));
1193 		for (; drhd; drhd = list_next(&(tbl->tbl_drhd_list)[seg],
1194 		    drhd)) {
1195 			/* Look for include all */
1196 			if (drhd->dr_include_all == B_TRUE) {
1197 				break;
1198 			}
1199 		}
1200 	}
1201 
1202 	/*FALLTHRU*/
1203 
1204 found:
1205 	mutex_exit(&(tbl->tbl_lock));
1206 
1207 	/*
1208 	 * No drhd (dmar unit) found for this device in the ACPI DMAR tables.
1209 	 * This may happen with buggy versions of BIOSes. Just warn instead
1210 	 * of panic as we don't want whole system to go down because of one
1211 	 * device.
1212 	 */
1213 	if (drhd == NULL) {
1214 		ddi_err(DER_WARN, rdip, "can't find Intel IOMMU unit for "
1215 		    "device in ACPI DMAR table.");
1216 		return (NULL);
1217 	}
1218 
1219 	return (drhd->dr_immu);
1220 }
1221 
1222 char *
1223 immu_dmar_unit_name(void *dmar_unit)
1224 {
1225 	drhd_t *drhd = (drhd_t *)dmar_unit;
1226 
1227 	ASSERT(drhd->dr_dip);
1228 	return (ddi_node_name(drhd->dr_dip));
1229 }
1230 
1231 dev_info_t *
1232 immu_dmar_unit_dip(void *dmar_unit)
1233 {
1234 	drhd_t *drhd = (drhd_t *)dmar_unit;
1235 	return (drhd->dr_dip);
1236 }
1237 
1238 void *
1239 immu_dmar_walk_units(int seg, void *dmar_unit)
1240 {
1241 	list_t *drhd_list;
1242 	drhd_t *drhd = (drhd_t *)dmar_unit;
1243 
1244 	drhd_list = &(dmar_table->tbl_drhd_list[seg]);
1245 
1246 	if (drhd == NULL) {
1247 		return ((void *)list_head(drhd_list));
1248 	} else {
1249 		return ((void *)list_next(drhd_list, drhd));
1250 	}
1251 }
1252 
1253 void
1254 immu_dmar_set_immu(void *dmar_unit, immu_t *immu)
1255 {
1256 	drhd_t *drhd = (drhd_t *)dmar_unit;
1257 
1258 	ASSERT(drhd);
1259 	ASSERT(immu);
1260 
1261 	drhd->dr_immu = immu;
1262 }
1263 
1264 boolean_t
1265 immu_dmar_intrmap_supported(void)
1266 {
1267 	ASSERT(dmar_table);
1268 	return (dmar_table->tbl_intrmap);
1269 }
1270 
1271 /* for a given ioapicid, find the source id and immu */
1272 uint16_t
1273 immu_dmar_ioapic_sid(int ioapic_ix)
1274 {
1275 	ioapic_drhd_t *idt;
1276 
1277 	idt = ioapic_drhd_lookup(psm_get_ioapicid(ioapic_ix));
1278 	if (idt == NULL) {
1279 		ddi_err(DER_PANIC, NULL, "cannot determine source-id for "
1280 		    "IOAPIC (index = %d)", ioapic_ix);
1281 		/*NOTREACHED*/
1282 	}
1283 
1284 	return (idt->ioapic_sid);
1285 }
1286 
1287 /* for a given ioapicid, find the source id and immu */
1288 immu_t *
1289 immu_dmar_ioapic_immu(int ioapic_ix)
1290 {
1291 	ioapic_drhd_t *idt;
1292 
1293 	idt = ioapic_drhd_lookup(psm_get_ioapicid(ioapic_ix));
1294 	if (idt) {
1295 		return (idt->ioapic_drhd ? idt->ioapic_drhd->dr_immu : NULL);
1296 	}
1297 	return (NULL);
1298 }
1299