xref: /titanic_50/usr/src/uts/i86pc/io/immu.c (revision b56bf881a9655cb27b53cba1468312f7c6dfb0a2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Portions Copyright (c) 2010, Oracle and/or its affiliates.
23  * All rights reserved.
24  */
25 /*
26  * Copyright (c) 2009, Intel Corporation.
27  * All rights reserved.
28  */
29 
30 /*
31  * Intel IOMMU implementation
32  * This file contains Intel IOMMU code exported
33  * to the rest of the system and code that deals
34  * with the Intel IOMMU as a whole.
35  */
36 
37 #include <sys/conf.h>
38 #include <sys/modctl.h>
39 #include <sys/pci.h>
40 #include <sys/pci_impl.h>
41 #include <sys/sysmacros.h>
42 #include <sys/ddi.h>
43 #include <sys/ddidmareq.h>
44 #include <sys/ddi_impldefs.h>
45 #include <sys/ddifm.h>
46 #include <sys/sunndi.h>
47 #include <sys/debug.h>
48 #include <sys/fm/protocol.h>
49 #include <sys/note.h>
50 #include <sys/apic.h>
51 #include <vm/hat_i86.h>
52 #include <sys/smp_impldefs.h>
53 #include <sys/spl.h>
54 #include <sys/archsystm.h>
55 #include <sys/x86_archext.h>
56 #include <sys/rootnex.h>
57 #include <sys/avl.h>
58 #include <sys/bootconf.h>
59 #include <sys/bootinfo.h>
60 #include <sys/atomic.h>
61 #include <sys/immu.h>
62 /* ########################### Globals and tunables ######################## */
63 /*
64  * Global switches (boolean) that can be toggled either via boot options
65  * or via /etc/system or kmdb
66  */
67 
68 /* Various features */
69 boolean_t immu_enable = B_TRUE;
70 boolean_t immu_dvma_enable = B_TRUE;
71 
72 /* accessed in other files so not static */
73 boolean_t immu_gfxdvma_enable = B_TRUE;
74 boolean_t immu_intrmap_enable = B_FALSE;
75 boolean_t immu_qinv_enable = B_FALSE;
76 
77 /* various quirks that need working around */
78 
79 /* XXX We always map page 0 read/write for now */
80 boolean_t immu_quirk_usbpage0 = B_TRUE;
81 boolean_t immu_quirk_usbrmrr = B_TRUE;
82 boolean_t immu_quirk_usbfullpa;
83 boolean_t immu_quirk_mobile4;
84 
85 /* debug messages */
86 boolean_t immu_dmar_print;
87 
88 /* Tunables */
89 int64_t immu_flush_gran = 5;
90 
91 immu_flags_t immu_global_dvma_flags;
92 
93 /* ############  END OPTIONS section ################ */
94 
95 /*
96  * Global used internally by Intel IOMMU code
97  */
98 dev_info_t *root_devinfo;
99 kmutex_t immu_lock;
100 list_t immu_list;
101 void *immu_pgtable_cache;
102 boolean_t immu_setup;
103 boolean_t immu_running;
104 boolean_t immu_quiesced;
105 
106 /* ######################## END Globals and tunables ###################### */
107 /* Globals used only in this file */
108 static char **black_array;
109 static uint_t nblacks;
110 
111 static char **unity_driver_array;
112 static uint_t nunity;
113 static char **xlate_driver_array;
114 static uint_t nxlate;
115 /* ###################### Utility routines ############################# */
116 
117 /*
118  * Check if the device has mobile 4 chipset
119  */
120 static int
121 check_mobile4(dev_info_t *dip, void *arg)
122 {
123 	_NOTE(ARGUNUSED(arg));
124 	int vendor, device;
125 	int *ip = (int *)arg;
126 
127 	ASSERT(arg);
128 
129 	vendor = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
130 	    "vendor-id", -1);
131 	device = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
132 	    "device-id", -1);
133 
134 	if (vendor == 0x8086 && device == 0x2a40) {
135 		*ip = B_TRUE;
136 		ddi_err(DER_NOTE, dip, "IMMU: Mobile 4 chipset detected. "
137 		    "Force setting IOMMU write buffer");
138 		return (DDI_WALK_TERMINATE);
139 	} else {
140 		return (DDI_WALK_CONTINUE);
141 	}
142 }
143 
144 static void
145 map_bios_rsvd_mem(dev_info_t *dip)
146 {
147 	struct memlist *mp;
148 	int e;
149 
150 	memlist_read_lock();
151 
152 	mp = bios_rsvd;
153 	while (mp != NULL) {
154 		memrng_t mrng = {0};
155 
156 		ddi_err(DER_LOG, dip, "IMMU: Mapping BIOS rsvd range "
157 		    "[0x%" PRIx64 " - 0x%"PRIx64 "]\n", mp->ml_address,
158 		    mp->ml_address + mp->ml_size);
159 
160 		mrng.mrng_start = IMMU_ROUNDOWN(mp->ml_address);
161 		mrng.mrng_npages = IMMU_ROUNDUP(mp->ml_size) / IMMU_PAGESIZE;
162 
163 		e = immu_dvma_map(NULL, NULL, &mrng, 0, dip, IMMU_FLAGS_MEMRNG);
164 		ASSERT(e == DDI_DMA_MAPPED || e == DDI_DMA_USE_PHYSICAL);
165 
166 		mp = mp->ml_next;
167 	}
168 
169 	memlist_read_unlock();
170 }
171 
172 
173 /*
174  * Check if the driver requests a specific type of mapping.
175  */
176 /*ARGSUSED*/
177 static void
178 check_conf(dev_info_t *dip, void *arg)
179 {
180 	immu_devi_t *immu_devi;
181 	const char *dname;
182 	uint_t i;
183 	int hasprop = 0;
184 
185 	/*
186 	 * Only PCI devices can use an IOMMU. Legacy ISA devices
187 	 * are handled in check_lpc.
188 	 */
189 	if (!DEVI_IS_PCI(dip))
190 		return;
191 
192 	dname = ddi_driver_name(dip);
193 	if (dname == NULL)
194 		return;
195 	immu_devi = immu_devi_get(dip);
196 
197 	for (i = 0; i < nunity; i++) {
198 		if (strcmp(unity_driver_array[i], dname) == 0) {
199 			hasprop = 1;
200 			immu_devi->imd_dvma_flags |= IMMU_FLAGS_UNITY;
201 		}
202 	}
203 
204 	for (i = 0; i < nxlate; i++) {
205 		if (strcmp(xlate_driver_array[i], dname) == 0) {
206 			hasprop = 1;
207 			immu_devi->imd_dvma_flags &= ~IMMU_FLAGS_UNITY;
208 		}
209 	}
210 
211 	/*
212 	 * Report if we changed the value from the default.
213 	 */
214 	if (hasprop && (immu_devi->imd_dvma_flags ^ immu_global_dvma_flags))
215 		ddi_err(DER_LOG, dip, "using %s DVMA mapping",
216 		    immu_devi->imd_dvma_flags & IMMU_FLAGS_UNITY ?
217 		    DDI_DVMA_MAPTYPE_UNITY : DDI_DVMA_MAPTYPE_XLATE);
218 }
219 
220 /*
221  * Check if the device is USB controller
222  */
223 /*ARGSUSED*/
224 static void
225 check_usb(dev_info_t *dip, void *arg)
226 {
227 	const char *drv = ddi_driver_name(dip);
228 	immu_devi_t *immu_devi;
229 
230 
231 	if (drv == NULL ||
232 	    (strcmp(drv, "uhci") != 0 && strcmp(drv, "ohci") != 0 &&
233 	    strcmp(drv, "ehci") != 0)) {
234 		return;
235 	}
236 
237 	immu_devi = immu_devi_get(dip);
238 
239 	/*
240 	 * If unit mappings are already specified, globally or
241 	 * locally, we're done here, since that covers both
242 	 * quirks below.
243 	 */
244 	if (immu_devi->imd_dvma_flags & IMMU_FLAGS_UNITY)
245 		return;
246 
247 	/* This must come first since it does unity mapping */
248 	if (immu_quirk_usbfullpa == B_TRUE) {
249 		immu_devi->imd_dvma_flags |= IMMU_FLAGS_UNITY;
250 	} else if (immu_quirk_usbrmrr == B_TRUE) {
251 		ddi_err(DER_LOG, dip, "Applying USB RMRR quirk");
252 		map_bios_rsvd_mem(dip);
253 	}
254 }
255 
256 /*
257  * Check if the device is a LPC device
258  */
259 /*ARGSUSED*/
260 static void
261 check_lpc(dev_info_t *dip, void *arg)
262 {
263 	immu_devi_t *immu_devi;
264 
265 	immu_devi = immu_devi_get(dip);
266 	ASSERT(immu_devi);
267 	if (immu_devi->imd_lpc == B_TRUE) {
268 		ddi_err(DER_LOG, dip, "IMMU: Found LPC device");
269 		/* This will put the immu_devi on the LPC "specials" list */
270 		(void) immu_dvma_get_immu(dip, IMMU_FLAGS_SLEEP);
271 	}
272 }
273 
274 /*
275  * Check if the device is a GFX device
276  */
277 /*ARGSUSED*/
278 static void
279 check_gfx(dev_info_t *dip, void *arg)
280 {
281 	immu_devi_t *immu_devi;
282 
283 	immu_devi = immu_devi_get(dip);
284 	ASSERT(immu_devi);
285 	if (immu_devi->imd_display == B_TRUE) {
286 		immu_devi->imd_dvma_flags |= IMMU_FLAGS_UNITY;
287 		ddi_err(DER_LOG, dip, "IMMU: Found GFX device");
288 		/* This will put the immu_devi on the GFX "specials" list */
289 		(void) immu_dvma_get_immu(dip, IMMU_FLAGS_SLEEP);
290 	}
291 }
292 
293 static void
294 walk_tree(int (*f)(dev_info_t *, void *), void *arg)
295 {
296 	int count;
297 
298 	ndi_devi_enter(root_devinfo, &count);
299 	ddi_walk_devs(ddi_get_child(root_devinfo), f, arg);
300 	ndi_devi_exit(root_devinfo, count);
301 }
302 
303 static int
304 check_pre_setup_quirks(dev_info_t *dip, void *arg)
305 {
306 	/* just 1 check right now */
307 	return (check_mobile4(dip, arg));
308 }
309 
310 static int
311 check_pre_startup_quirks(dev_info_t *dip, void *arg)
312 {
313 	if (immu_devi_set(dip, IMMU_FLAGS_SLEEP) != DDI_SUCCESS) {
314 		ddi_err(DER_PANIC, dip, "Failed to get immu_devi");
315 	}
316 
317 	check_gfx(dip, arg);
318 
319 	check_lpc(dip, arg);
320 
321 	check_conf(dip, arg);
322 
323 	check_usb(dip, arg);
324 
325 	return (DDI_WALK_CONTINUE);
326 }
327 
328 static void
329 pre_setup_quirks(void)
330 {
331 	walk_tree(check_pre_setup_quirks, &immu_quirk_mobile4);
332 }
333 
334 static void
335 pre_startup_quirks(void)
336 {
337 	walk_tree(check_pre_startup_quirks, NULL);
338 
339 	immu_dmar_rmrr_map();
340 }
341 
342 static int
343 get_conf_str(char *bopt, char **val)
344 {
345 	int ret;
346 
347 	/*
348 	 * Check the rootnex.conf property
349 	 * Fake up a dev_t since searching the global
350 	 * property list needs it
351 	 */
352 	ret = ddi_prop_lookup_string(
353 	    makedevice(ddi_name_to_major("rootnex"), 0),
354 	    root_devinfo, DDI_PROP_DONTPASS | DDI_PROP_ROOTNEX_GLOBAL,
355 	    bopt, val);
356 
357 	return (ret);
358 }
359 
360 /*
361  * get_conf_opt()
362  * 	get a rootnex.conf setting  (always a boolean)
363  */
364 static void
365 get_conf_opt(char *bopt, boolean_t *kvar)
366 {
367 	char *val = NULL;
368 
369 	ASSERT(bopt);
370 	ASSERT(kvar);
371 
372 	/*
373 	 * Check the rootnex.conf property
374 	 * Fake up a dev_t since searching the global
375 	 * property list needs it
376 	 */
377 
378 	if (get_conf_str(bopt, &val) != DDI_PROP_SUCCESS)
379 		return;
380 
381 	if (strcmp(val, "true") == 0) {
382 		*kvar = B_TRUE;
383 	} else if (strcmp(val, "false") == 0) {
384 		*kvar = B_FALSE;
385 	} else {
386 		ddi_err(DER_WARN, NULL, "rootnex.conf switch %s=\"%s\" ",
387 		    "is not set to true or false. Ignoring option.",
388 		    bopt, val);
389 	}
390 	ddi_prop_free(val);
391 }
392 
393 /*
394  * get_bootopt()
395  * 	check a boot option  (always a boolean)
396  */
397 static int
398 get_boot_str(char *bopt, char **val)
399 {
400 	int ret;
401 
402 	ret = ddi_prop_lookup_string(DDI_DEV_T_ANY, root_devinfo,
403 	    DDI_PROP_DONTPASS, bopt, val);
404 
405 	return (ret);
406 }
407 
408 static void
409 get_bootopt(char *bopt, boolean_t *kvar)
410 {
411 	char *val = NULL;
412 
413 	/*
414 	 * All boot options set at the GRUB menu become
415 	 * properties on the rootnex.
416 	 */
417 	if (get_boot_str(bopt, &val) != DDI_PROP_SUCCESS)
418 		return;
419 
420 	if (strcmp(val, "true") == 0) {
421 		*kvar = B_TRUE;
422 	} else if (strcmp(val, "false") == 0) {
423 		*kvar = B_FALSE;
424 	} else {
425 		ddi_err(DER_WARN, NULL, "boot option %s=\"%s\" ",
426 		    "is not set to true or false. Ignoring option.",
427 		    bopt, val);
428 	}
429 	ddi_prop_free(val);
430 }
431 
432 static void
433 get_boot_dvma_mode(void)
434 {
435 	char *val = NULL;
436 
437 	if (get_boot_str(DDI_DVMA_MAPTYPE_ROOTNEX_PROP, &val)
438 	    != DDI_PROP_SUCCESS)
439 		return;
440 
441 	if (strcmp(val, DDI_DVMA_MAPTYPE_UNITY) == 0) {
442 		immu_global_dvma_flags |= IMMU_FLAGS_UNITY;
443 	} else if (strcmp(val, DDI_DVMA_MAPTYPE_XLATE) == 0) {
444 		immu_global_dvma_flags &= ~IMMU_FLAGS_UNITY;
445 	} else {
446 		ddi_err(DER_WARN, NULL, "bad value \"%s\" for boot option %s",
447 		    val, DDI_DVMA_MAPTYPE_ROOTNEX_PROP);
448 	}
449 	ddi_prop_free(val);
450 }
451 
452 static void
453 get_conf_dvma_mode(void)
454 {
455 	char *val = NULL;
456 
457 	if (get_conf_str(DDI_DVMA_MAPTYPE_ROOTNEX_PROP, &val)
458 	    != DDI_PROP_SUCCESS)
459 		return;
460 
461 	if (strcmp(val, DDI_DVMA_MAPTYPE_UNITY) == 0) {
462 		immu_global_dvma_flags |= IMMU_FLAGS_UNITY;
463 	} else if (strcmp(val, DDI_DVMA_MAPTYPE_XLATE) == 0) {
464 		immu_global_dvma_flags &= ~IMMU_FLAGS_UNITY;
465 	} else {
466 		ddi_err(DER_WARN, NULL, "bad value \"%s\" for rootnex "
467 		    "option %s", val, DDI_DVMA_MAPTYPE_ROOTNEX_PROP);
468 	}
469 	ddi_prop_free(val);
470 }
471 
472 
473 static void
474 get_conf_tunables(char *bopt, int64_t *ivar)
475 {
476 	int64_t	*iarray;
477 	uint_t n;
478 
479 	/*
480 	 * Check the rootnex.conf property
481 	 * Fake up a dev_t since searching the global
482 	 * property list needs it
483 	 */
484 	if (ddi_prop_lookup_int64_array(
485 	    makedevice(ddi_name_to_major("rootnex"), 0), root_devinfo,
486 	    DDI_PROP_DONTPASS | DDI_PROP_ROOTNEX_GLOBAL, bopt,
487 	    &iarray, &n) != DDI_PROP_SUCCESS) {
488 		return;
489 	}
490 
491 	if (n != 1) {
492 		ddi_err(DER_WARN, NULL, "More than one value specified for "
493 		    "%s property. Ignoring and using default",
494 		    "immu-flush-gran");
495 		ddi_prop_free(iarray);
496 		return;
497 	}
498 
499 	if (iarray[0] < 0) {
500 		ddi_err(DER_WARN, NULL, "Negative value specified for "
501 		    "%s property. Inoring and Using default value",
502 		    "immu-flush-gran");
503 		ddi_prop_free(iarray);
504 		return;
505 	}
506 
507 	*ivar = iarray[0];
508 
509 	ddi_prop_free(iarray);
510 }
511 
512 static void
513 read_conf_options(void)
514 {
515 	/* enable/disable options */
516 	get_conf_opt("immu-enable", &immu_enable);
517 	get_conf_opt("immu-dvma-enable", &immu_dvma_enable);
518 	get_conf_opt("immu-gfxdvma-enable", &immu_gfxdvma_enable);
519 	get_conf_opt("immu-intrmap-enable", &immu_intrmap_enable);
520 	get_conf_opt("immu-qinv-enable", &immu_qinv_enable);
521 
522 	/* workaround switches */
523 	get_conf_opt("immu-quirk-usbpage0", &immu_quirk_usbpage0);
524 	get_conf_opt("immu-quirk-usbfullpa", &immu_quirk_usbfullpa);
525 	get_conf_opt("immu-quirk-usbrmrr", &immu_quirk_usbrmrr);
526 
527 	/* debug printing */
528 	get_conf_opt("immu-dmar-print", &immu_dmar_print);
529 
530 	/* get tunables */
531 	get_conf_tunables("immu-flush-gran", &immu_flush_gran);
532 
533 	get_conf_dvma_mode();
534 }
535 
536 static void
537 read_boot_options(void)
538 {
539 	/* enable/disable options */
540 	get_bootopt("immu-enable", &immu_enable);
541 	get_bootopt("immu-dvma-enable", &immu_dvma_enable);
542 	get_bootopt("immu-gfxdvma-enable", &immu_gfxdvma_enable);
543 	get_bootopt("immu-intrmap-enable", &immu_intrmap_enable);
544 	get_bootopt("immu-qinv-enable", &immu_qinv_enable);
545 
546 	/* workaround switches */
547 	get_bootopt("immu-quirk-usbpage0", &immu_quirk_usbpage0);
548 	get_bootopt("immu-quirk-usbfullpa", &immu_quirk_usbfullpa);
549 	get_bootopt("immu-quirk-usbrmrr", &immu_quirk_usbrmrr);
550 
551 	/* debug printing */
552 	get_bootopt("immu-dmar-print", &immu_dmar_print);
553 
554 	get_boot_dvma_mode();
555 }
556 
557 static void
558 mapping_list_setup(void)
559 {
560 	char **string_array;
561 	uint_t nstrings;
562 
563 	if (ddi_prop_lookup_string_array(
564 	    makedevice(ddi_name_to_major("rootnex"), 0), root_devinfo,
565 	    DDI_PROP_DONTPASS | DDI_PROP_ROOTNEX_GLOBAL,
566 	    "immu-dvma-unity-drivers",
567 	    &string_array, &nstrings) == DDI_PROP_SUCCESS) {
568 		unity_driver_array = string_array;
569 		nunity = nstrings;
570 	}
571 
572 	if (ddi_prop_lookup_string_array(
573 	    makedevice(ddi_name_to_major("rootnex"), 0), root_devinfo,
574 	    DDI_PROP_DONTPASS | DDI_PROP_ROOTNEX_GLOBAL,
575 	    "immu-dvma-xlate-drivers",
576 	    &string_array, &nstrings) == DDI_PROP_SUCCESS) {
577 		xlate_driver_array = string_array;
578 		nxlate = nstrings;
579 	}
580 }
581 
582 /*
583  * Note, this will not catch hardware not enumerated
584  * in early boot
585  */
586 static boolean_t
587 blacklisted_driver(void)
588 {
589 	char **strptr;
590 	int i;
591 	major_t maj;
592 
593 	ASSERT((black_array == NULL) ^ (nblacks != 0));
594 
595 	/* need at least 2 strings */
596 	if (nblacks < 2) {
597 		return (B_FALSE);
598 	}
599 
600 	for (i = 0; nblacks - i > 1; i++) {
601 		strptr = &black_array[i];
602 		if (strcmp(*strptr++, "DRIVER") == 0) {
603 			if ((maj = ddi_name_to_major(*strptr++))
604 			    != DDI_MAJOR_T_NONE) {
605 				/* is there hardware bound to this drvr */
606 				if (devnamesp[maj].dn_head != NULL) {
607 					return (B_TRUE);
608 				}
609 			}
610 			i += 1;   /* for loop adds 1, so add only 1 here */
611 		}
612 	}
613 
614 	return (B_FALSE);
615 }
616 
617 static boolean_t
618 blacklisted_smbios(void)
619 {
620 	id_t smid;
621 	smbios_hdl_t *smhdl;
622 	smbios_info_t sminf;
623 	smbios_system_t smsys;
624 	char *mfg, *product, *version;
625 	char **strptr;
626 	int i;
627 
628 	ASSERT((black_array == NULL) ^ (nblacks != 0));
629 
630 	/* need at least 4 strings for this setting */
631 	if (nblacks < 4) {
632 		return (B_FALSE);
633 	}
634 
635 	smhdl = smbios_open(NULL, SMB_VERSION, ksmbios_flags, NULL);
636 	if (smhdl == NULL ||
637 	    (smid = smbios_info_system(smhdl, &smsys)) == SMB_ERR ||
638 	    smbios_info_common(smhdl, smid, &sminf) == SMB_ERR) {
639 		return (B_FALSE);
640 	}
641 
642 	mfg = (char *)sminf.smbi_manufacturer;
643 	product = (char *)sminf.smbi_product;
644 	version = (char *)sminf.smbi_version;
645 
646 	ddi_err(DER_CONT, NULL, "?System SMBIOS information:\n");
647 	ddi_err(DER_CONT, NULL, "?Manufacturer = <%s>\n", mfg);
648 	ddi_err(DER_CONT, NULL, "?Product = <%s>\n", product);
649 	ddi_err(DER_CONT, NULL, "?Version = <%s>\n", version);
650 
651 	for (i = 0; nblacks - i > 3; i++) {
652 		strptr = &black_array[i];
653 		if (strcmp(*strptr++, "SMBIOS") == 0) {
654 			if (strcmp(*strptr++, mfg) == 0 &&
655 			    ((char *)strptr == '\0' ||
656 			    strcmp(*strptr++, product) == 0) &&
657 			    ((char *)strptr == '\0' ||
658 			    strcmp(*strptr++, version) == 0)) {
659 				return (B_TRUE);
660 			}
661 			i += 3;
662 		}
663 	}
664 
665 	return (B_FALSE);
666 }
667 
668 static boolean_t
669 blacklisted_acpi(void)
670 {
671 	ASSERT((black_array == NULL) ^ (nblacks != 0));
672 	if (nblacks == 0) {
673 		return (B_FALSE);
674 	}
675 
676 	return (immu_dmar_blacklisted(black_array, nblacks));
677 }
678 
679 /*
680  * Check if system is blacklisted by Intel IOMMU driver
681  * i.e. should Intel IOMMU be disabled on this system
682  * Currently a system can be blacklistd based on the
683  * following bases:
684  *
685  * 1. DMAR ACPI table information.
686  *    This information includes things like
687  *    manufacturer and revision number. If rootnex.conf
688  *    has matching info set in its blacklist property
689  *    then Intel IOMMu will be disabled
690  *
691  * 2. SMBIOS information
692  *
693  * 3. Driver installed - useful if a particular
694  *    driver or hardware is toxic if Intel IOMMU
695  *    is turned on.
696  */
697 
698 static void
699 blacklist_setup(void)
700 {
701 	char **string_array;
702 	uint_t nstrings;
703 
704 	/*
705 	 * Check the rootnex.conf blacklist property.
706 	 * Fake up a dev_t since searching the global
707 	 * property list needs it
708 	 */
709 	if (ddi_prop_lookup_string_array(
710 	    makedevice(ddi_name_to_major("rootnex"), 0), root_devinfo,
711 	    DDI_PROP_DONTPASS | DDI_PROP_ROOTNEX_GLOBAL, "immu-blacklist",
712 	    &string_array, &nstrings) != DDI_PROP_SUCCESS) {
713 		return;
714 	}
715 
716 	/* smallest blacklist criteria works with multiples of 2 */
717 	if (nstrings % 2 != 0) {
718 		ddi_err(DER_WARN, NULL, "Invalid IOMMU blacklist "
719 		    "rootnex.conf: number of strings must be a "
720 		    "multiple of 2");
721 		ddi_prop_free(string_array);
722 		return;
723 	}
724 
725 	black_array = string_array;
726 	nblacks = nstrings;
727 }
728 
729 static void
730 blacklist_destroy(void)
731 {
732 	if (black_array) {
733 		ddi_prop_free(black_array);
734 		black_array = NULL;
735 		nblacks = 0;
736 	}
737 
738 	ASSERT(black_array == NULL);
739 	ASSERT(nblacks == 0);
740 }
741 
742 
743 /*
744  * Now set all the fields in the order they are defined
745  * We do this only as a defensive-coding practice, it is
746  * not a correctness issue.
747  */
748 static void *
749 immu_state_alloc(int seg, void *dmar_unit)
750 {
751 	immu_t *immu;
752 
753 	dmar_unit = immu_dmar_walk_units(seg, dmar_unit);
754 	if (dmar_unit == NULL) {
755 		/* No more IOMMUs in this segment */
756 		return (NULL);
757 	}
758 
759 	immu = kmem_zalloc(sizeof (immu_t), KM_SLEEP);
760 
761 	mutex_init(&(immu->immu_lock), NULL, MUTEX_DRIVER, NULL);
762 
763 	mutex_enter(&(immu->immu_lock));
764 
765 	immu->immu_dmar_unit = dmar_unit;
766 	immu->immu_name = ddi_strdup(immu_dmar_unit_name(dmar_unit),
767 	    KM_SLEEP);
768 	immu->immu_dip = immu_dmar_unit_dip(dmar_unit);
769 
770 	/*
771 	 * the immu_intr_lock mutex is grabbed by the IOMMU
772 	 * unit's interrupt handler so we need to use an
773 	 * interrupt cookie for the mutex
774 	 */
775 	mutex_init(&(immu->immu_intr_lock), NULL, MUTEX_DRIVER,
776 	    (void *)ipltospl(IMMU_INTR_IPL));
777 
778 	/* IOMMU regs related */
779 	mutex_init(&(immu->immu_regs_lock), NULL, MUTEX_DEFAULT, NULL);
780 	cv_init(&(immu->immu_regs_cv), NULL, CV_DEFAULT, NULL);
781 	immu->immu_regs_busy = B_FALSE;
782 
783 	/* DVMA related */
784 	immu->immu_dvma_coherent = B_FALSE;
785 
786 	/* DVMA context related */
787 	rw_init(&(immu->immu_ctx_rwlock), NULL, RW_DEFAULT, NULL);
788 
789 	/* DVMA domain related */
790 	list_create(&(immu->immu_domain_list), sizeof (domain_t),
791 	    offsetof(domain_t, dom_immu_node));
792 
793 	/* DVMA special device lists */
794 	immu->immu_dvma_gfx_only = B_FALSE;
795 	list_create(&(immu->immu_dvma_lpc_list), sizeof (immu_devi_t),
796 	    offsetof(immu_devi_t, imd_spc_node));
797 	list_create(&(immu->immu_dvma_gfx_list), sizeof (immu_devi_t),
798 	    offsetof(immu_devi_t, imd_spc_node));
799 
800 	/* interrupt remapping related */
801 	mutex_init(&(immu->immu_intrmap_lock), NULL, MUTEX_DEFAULT, NULL);
802 
803 	/* qinv related */
804 	mutex_init(&(immu->immu_qinv_lock), NULL, MUTEX_DEFAULT, NULL);
805 
806 	/*
807 	 * insert this immu unit into the system-wide list
808 	 */
809 	list_insert_tail(&immu_list, immu);
810 
811 	mutex_exit(&(immu->immu_lock));
812 
813 	ddi_err(DER_LOG, immu->immu_dip, "IMMU: unit setup");
814 
815 	immu_dmar_set_immu(dmar_unit, immu);
816 
817 	return (dmar_unit);
818 }
819 
820 static void
821 immu_subsystems_setup(void)
822 {
823 	int seg;
824 	void *unit_hdl;
825 
826 	ddi_err(DER_VERB, NULL,
827 	    "Creating state structures for Intel IOMMU units\n");
828 
829 	ASSERT(immu_setup == B_FALSE);
830 	ASSERT(immu_running == B_FALSE);
831 
832 	mutex_init(&immu_lock, NULL, MUTEX_DEFAULT, NULL);
833 	list_create(&immu_list, sizeof (immu_t), offsetof(immu_t, immu_node));
834 
835 	mutex_enter(&immu_lock);
836 
837 	ASSERT(immu_pgtable_cache == NULL);
838 
839 	immu_pgtable_cache = kmem_cache_create("immu_pgtable_cache",
840 	    sizeof (pgtable_t), 0,
841 	    pgtable_ctor, pgtable_dtor, NULL, NULL, NULL, 0);
842 
843 	unit_hdl = NULL;
844 	for (seg = 0; seg < IMMU_MAXSEG; seg++) {
845 		while (unit_hdl = immu_state_alloc(seg, unit_hdl)) {
846 			;
847 		}
848 	}
849 
850 	immu_regs_setup(&immu_list);	/* subsequent code needs this first */
851 	immu_dvma_setup(&immu_list);
852 	immu_intrmap_setup(&immu_list);
853 	immu_qinv_setup(&immu_list);
854 
855 	mutex_exit(&immu_lock);
856 }
857 
858 /*
859  * immu_subsystems_startup()
860  * 	startup all units that were setup
861  */
862 static void
863 immu_subsystems_startup(void)
864 {
865 	immu_t *immu;
866 
867 	mutex_enter(&immu_lock);
868 
869 	ASSERT(immu_setup == B_TRUE);
870 	ASSERT(immu_running == B_FALSE);
871 
872 	immu_dmar_startup();
873 
874 	immu = list_head(&immu_list);
875 	for (; immu; immu = list_next(&immu_list, immu)) {
876 
877 		mutex_enter(&(immu->immu_lock));
878 
879 		immu_intr_register(immu);
880 		immu_dvma_startup(immu);
881 		immu_intrmap_startup(immu);
882 		immu_qinv_startup(immu);
883 
884 		/*
885 		 * Set IOMMU unit's regs to do
886 		 * the actual startup. This will
887 		 * set immu->immu_running  field
888 		 * if the unit is successfully
889 		 * started
890 		 */
891 		immu_regs_startup(immu);
892 
893 		mutex_exit(&(immu->immu_lock));
894 	}
895 
896 	mutex_exit(&immu_lock);
897 }
898 
899 /* ##################  Intel IOMMU internal interfaces ###################### */
900 
901 /*
902  * Internal interfaces for IOMMU code (i.e. not exported to rootnex
903  * or rest of system)
904  */
905 
906 /*
907  * ddip can be NULL, in which case we walk up until we find the root dip
908  * NOTE: We never visit the root dip since its not a hardware node
909  */
910 int
911 immu_walk_ancestor(
912 	dev_info_t *rdip,
913 	dev_info_t *ddip,
914 	int (*func)(dev_info_t *, void *arg),
915 	void *arg,
916 	int *lvlp,
917 	immu_flags_t immu_flags)
918 {
919 	dev_info_t *pdip;
920 	int level;
921 	int error = DDI_SUCCESS;
922 
923 	ASSERT(root_devinfo);
924 	ASSERT(rdip);
925 	ASSERT(rdip != root_devinfo);
926 	ASSERT(func);
927 
928 	/* ddip and immu can be NULL */
929 
930 	/* Hold rdip so that branch is not detached */
931 	ndi_hold_devi(rdip);
932 	for (pdip = rdip, level = 1; pdip && pdip != root_devinfo;
933 	    pdip = ddi_get_parent(pdip), level++) {
934 
935 		if (immu_devi_set(pdip, immu_flags) != DDI_SUCCESS) {
936 			error = DDI_FAILURE;
937 			break;
938 		}
939 		if (func(pdip, arg) == DDI_WALK_TERMINATE) {
940 			break;
941 		}
942 		if (immu_flags & IMMU_FLAGS_DONTPASS) {
943 			break;
944 		}
945 		if (pdip == ddip) {
946 			break;
947 		}
948 	}
949 
950 	ndi_rele_devi(rdip);
951 
952 	if (lvlp)
953 		*lvlp = level;
954 
955 	return (error);
956 }
957 
958 /* ########################  Intel IOMMU entry points ####################### */
959 /*
960  * immu_init()
961  *	called from rootnex_attach(). setup but don't startup the Intel IOMMU
962  *      This is the first function called in Intel IOMMU code
963  */
964 void
965 immu_init(void)
966 {
967 	char *phony_reg = "A thing of beauty is a joy forever";
968 
969 	/* Set some global shorthands that are needed by all of IOMMU code */
970 	ASSERT(root_devinfo == NULL);
971 	root_devinfo = ddi_root_node();
972 
973 	/*
974 	 * Intel IOMMU only supported only if MMU(CPU) page size is ==
975 	 * IOMMU pages size.
976 	 */
977 	/*LINTED*/
978 	if (MMU_PAGESIZE != IMMU_PAGESIZE) {
979 		ddi_err(DER_WARN, NULL,
980 		    "MMU page size (%d) is not equal to\n"
981 		    "IOMMU page size (%d). "
982 		    "Disabling Intel IOMMU. ",
983 		    MMU_PAGESIZE, IMMU_PAGESIZE);
984 		immu_enable = B_FALSE;
985 		return;
986 	}
987 
988 	/*
989 	 * Read rootnex.conf options. Do this before
990 	 * boot options so boot options can override .conf options.
991 	 */
992 	read_conf_options();
993 
994 	/*
995 	 * retrieve the Intel IOMMU boot options.
996 	 * Do this before parsing immu ACPI table
997 	 * as a boot option could potentially affect
998 	 * ACPI parsing.
999 	 */
1000 	ddi_err(DER_CONT, NULL, "?Reading Intel IOMMU boot options\n");
1001 	read_boot_options();
1002 
1003 	/*
1004 	 * Check the IOMMU enable boot-option first.
1005 	 * This is so that we can skip parsing the ACPI table
1006 	 * if necessary because that may cause problems in
1007 	 * systems with buggy BIOS or ACPI tables
1008 	 */
1009 	if (immu_enable == B_FALSE) {
1010 		return;
1011 	}
1012 
1013 	/*
1014 	 * Next, check if the system even has an Intel IOMMU
1015 	 * We use the presence or absence of the IOMMU ACPI
1016 	 * table to detect Intel IOMMU.
1017 	 */
1018 	if (immu_dmar_setup() != DDI_SUCCESS) {
1019 		immu_enable = B_FALSE;
1020 		return;
1021 	}
1022 
1023 	mapping_list_setup();
1024 
1025 	/*
1026 	 * Check blacklists
1027 	 */
1028 	blacklist_setup();
1029 
1030 	if (blacklisted_smbios() == B_TRUE) {
1031 		blacklist_destroy();
1032 		immu_enable = B_FALSE;
1033 		return;
1034 	}
1035 
1036 	if (blacklisted_driver() == B_TRUE) {
1037 		blacklist_destroy();
1038 		immu_enable = B_FALSE;
1039 		return;
1040 	}
1041 
1042 	/*
1043 	 * Read the "raw" DMAR ACPI table to get information
1044 	 * and convert into a form we can use.
1045 	 */
1046 	if (immu_dmar_parse() != DDI_SUCCESS) {
1047 		blacklist_destroy();
1048 		immu_enable = B_FALSE;
1049 		return;
1050 	}
1051 
1052 	/*
1053 	 * now that we have processed the ACPI table
1054 	 * check if we need to blacklist this system
1055 	 * based on ACPI info
1056 	 */
1057 	if (blacklisted_acpi() == B_TRUE) {
1058 		immu_dmar_destroy();
1059 		blacklist_destroy();
1060 		immu_enable = B_FALSE;
1061 		return;
1062 	}
1063 
1064 	blacklist_destroy();
1065 
1066 	/*
1067 	 * Check if system has HW quirks.
1068 	 */
1069 	pre_setup_quirks();
1070 
1071 	/* Now do the rest of the setup */
1072 	immu_subsystems_setup();
1073 
1074 	/*
1075 	 * Now that the IMMU is setup, create a phony
1076 	 * reg prop so that suspend/resume works
1077 	 */
1078 	if (ddi_prop_update_byte_array(DDI_DEV_T_NONE, root_devinfo, "reg",
1079 	    (uchar_t *)phony_reg, strlen(phony_reg) + 1) != DDI_PROP_SUCCESS) {
1080 		ddi_err(DER_PANIC, NULL, "Failed to create reg prop for "
1081 		    "rootnex node");
1082 		/*NOTREACHED*/
1083 	}
1084 
1085 	immu_setup = B_TRUE;
1086 }
1087 
1088 /*
1089  * immu_startup()
1090  * 	called directly by boot code to startup
1091  *      all units of the IOMMU
1092  */
1093 void
1094 immu_startup(void)
1095 {
1096 	/*
1097 	 * If IOMMU is disabled, do nothing
1098 	 */
1099 	if (immu_enable == B_FALSE) {
1100 		return;
1101 	}
1102 
1103 	if (immu_setup == B_FALSE) {
1104 		ddi_err(DER_WARN, NULL, "Intel IOMMU not setup, "
1105 		    "skipping IOMU startup");
1106 		return;
1107 	}
1108 
1109 	pre_startup_quirks();
1110 
1111 	ddi_err(DER_CONT, NULL,
1112 	    "?Starting Intel IOMMU (dmar) units...\n");
1113 
1114 	immu_subsystems_startup();
1115 
1116 	immu_running = B_TRUE;
1117 }
1118 
1119 /*
1120  * immu_map_sgl()
1121  * 	called from rootnex_coredma_bindhdl() when Intel
1122  *	IOMMU is enabled to build DVMA cookies and map them.
1123  */
1124 int
1125 immu_map_sgl(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
1126     int prealloc_count, dev_info_t *rdip)
1127 {
1128 	if (immu_running == B_FALSE) {
1129 		return (DDI_DMA_USE_PHYSICAL);
1130 	}
1131 
1132 	return (immu_dvma_map(hp, dmareq, NULL, prealloc_count, rdip,
1133 	    IMMU_FLAGS_DMAHDL));
1134 }
1135 
1136 /*
1137  * immu_unmap_sgl()
1138  * 	called from rootnex_coredma_unbindhdl(), to unmap DVMA
1139  * 	cookies and free them
1140  */
1141 int
1142 immu_unmap_sgl(ddi_dma_impl_t *hp, dev_info_t *rdip)
1143 {
1144 	if (immu_running == B_FALSE) {
1145 		return (DDI_DMA_USE_PHYSICAL);
1146 	}
1147 
1148 	return (immu_dvma_unmap(hp, rdip));
1149 }
1150 
1151 /*
1152  * Hook to notify IOMMU code of device tree changes
1153  */
1154 void
1155 immu_device_tree_changed(void)
1156 {
1157 	if (immu_setup == B_FALSE) {
1158 		return;
1159 	}
1160 
1161 	ddi_err(DER_WARN, NULL, "Intel IOMMU currently "
1162 	    "does not use device tree updates");
1163 }
1164 
1165 /*
1166  * Hook to notify IOMMU code of memory changes
1167  */
1168 void
1169 immu_physmem_update(uint64_t addr, uint64_t size)
1170 {
1171 	if (immu_setup == B_FALSE) {
1172 		return;
1173 	}
1174 	immu_dvma_physmem_update(addr, size);
1175 }
1176 
1177 /*
1178  * immu_quiesce()
1179  * 	quiesce all units that are running
1180  */
1181 int
1182 immu_quiesce(void)
1183 {
1184 	immu_t *immu;
1185 	int ret = DDI_SUCCESS;
1186 
1187 	mutex_enter(&immu_lock);
1188 
1189 	if (immu_running == B_FALSE)
1190 		return (DDI_SUCCESS);
1191 
1192 	ASSERT(immu_setup == B_TRUE);
1193 
1194 	immu = list_head(&immu_list);
1195 	for (; immu; immu = list_next(&immu_list, immu)) {
1196 
1197 		/* if immu is not running, we dont quiesce */
1198 		if (immu->immu_regs_running == B_FALSE)
1199 			continue;
1200 
1201 		/* flush caches */
1202 		rw_enter(&(immu->immu_ctx_rwlock), RW_WRITER);
1203 		immu_regs_context_flush(immu, 0, 0, 0, CONTEXT_GLOBAL);
1204 		rw_exit(&(immu->immu_ctx_rwlock));
1205 		immu_regs_iotlb_flush(immu, 0, 0, 0, 0, IOTLB_GLOBAL);
1206 		immu_regs_wbf_flush(immu);
1207 
1208 		mutex_enter(&(immu->immu_lock));
1209 
1210 		/*
1211 		 * Set IOMMU unit's regs to do
1212 		 * the actual shutdown.
1213 		 */
1214 		immu_regs_shutdown(immu);
1215 		immu_regs_suspend(immu);
1216 
1217 		/* if immu is still running, we failed */
1218 		if (immu->immu_regs_running == B_TRUE)
1219 			ret = DDI_FAILURE;
1220 		else
1221 			immu->immu_regs_quiesced = B_TRUE;
1222 
1223 		mutex_exit(&(immu->immu_lock));
1224 	}
1225 	mutex_exit(&immu_lock);
1226 
1227 	if (ret == DDI_SUCCESS) {
1228 		immu_running = B_FALSE;
1229 		immu_quiesced = B_TRUE;
1230 	}
1231 
1232 	return (ret);
1233 }
1234 
1235 /*
1236  * immu_unquiesce()
1237  * 	unquiesce all units
1238  */
1239 int
1240 immu_unquiesce(void)
1241 {
1242 	immu_t *immu;
1243 	int ret = DDI_SUCCESS;
1244 
1245 	mutex_enter(&immu_lock);
1246 
1247 	if (immu_quiesced == B_FALSE)
1248 		return (DDI_SUCCESS);
1249 
1250 	ASSERT(immu_setup == B_TRUE);
1251 	ASSERT(immu_running == B_FALSE);
1252 
1253 	immu = list_head(&immu_list);
1254 	for (; immu; immu = list_next(&immu_list, immu)) {
1255 
1256 		mutex_enter(&(immu->immu_lock));
1257 
1258 		/* if immu was not quiesced, i.e was not running before */
1259 		if (immu->immu_regs_quiesced == B_FALSE) {
1260 			mutex_exit(&(immu->immu_lock));
1261 			continue;
1262 		}
1263 
1264 		if (immu_regs_resume(immu) != DDI_SUCCESS) {
1265 			ret = DDI_FAILURE;
1266 			mutex_exit(&(immu->immu_lock));
1267 			continue;
1268 		}
1269 
1270 		/* flush caches before unquiesce */
1271 		rw_enter(&(immu->immu_ctx_rwlock), RW_WRITER);
1272 		immu_regs_context_flush(immu, 0, 0, 0, CONTEXT_GLOBAL);
1273 		rw_exit(&(immu->immu_ctx_rwlock));
1274 		immu_regs_iotlb_flush(immu, 0, 0, 0, 0, IOTLB_GLOBAL);
1275 
1276 		/*
1277 		 * Set IOMMU unit's regs to do
1278 		 * the actual startup. This will
1279 		 * set immu->immu_regs_running  field
1280 		 * if the unit is successfully
1281 		 * started
1282 		 */
1283 		immu_regs_startup(immu);
1284 
1285 		if (immu->immu_regs_running == B_FALSE) {
1286 			ret = DDI_FAILURE;
1287 		} else {
1288 			immu_quiesced = B_TRUE;
1289 			immu_running = B_TRUE;
1290 			immu->immu_regs_quiesced = B_FALSE;
1291 		}
1292 
1293 		mutex_exit(&(immu->immu_lock));
1294 	}
1295 
1296 	mutex_exit(&immu_lock);
1297 
1298 	return (ret);
1299 }
1300 
1301 /* ##############  END Intel IOMMU entry points ################## */
1302