xref: /titanic_51/usr/src/uts/i86pc/io/immu.c (revision 3a634bfc9a31448c742688c603d3e76b83b041a0)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Portions Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 /*
26  * Copyright (c) 2009, Intel Corporation.
27  * All rights reserved.
28  */
29 
30 /*
31  * Intel IOMMU implementation
32  * This file contains Intel IOMMU code exported
33  * to the rest of the system and code that deals
34  * with the Intel IOMMU as a whole.
35  */
36 
37 #include <sys/conf.h>
38 #include <sys/modctl.h>
39 #include <sys/pci.h>
40 #include <sys/pci_impl.h>
41 #include <sys/sysmacros.h>
42 #include <sys/ddi.h>
43 #include <sys/ddidmareq.h>
44 #include <sys/ddi_impldefs.h>
45 #include <sys/ddifm.h>
46 #include <sys/sunndi.h>
47 #include <sys/debug.h>
48 #include <sys/fm/protocol.h>
49 #include <sys/note.h>
50 #include <sys/apic.h>
51 #include <vm/hat_i86.h>
52 #include <sys/smp_impldefs.h>
53 #include <sys/spl.h>
54 #include <sys/archsystm.h>
55 #include <sys/x86_archext.h>
56 #include <sys/rootnex.h>
57 #include <sys/avl.h>
58 #include <sys/bootconf.h>
59 #include <sys/bootinfo.h>
60 #include <sys/atomic.h>
61 #include <sys/immu.h>
62 
63 /* ########################### Globals and tunables ######################## */
64 /*
65  * Global switches (boolean) that can be toggled either via boot options
66  * or via /etc/system or kmdb
67  */
68 
69 /* Various features */
70 boolean_t immu_enable = B_TRUE;
71 boolean_t immu_dvma_enable = B_TRUE;
72 
73 /* accessed in other files so not static */
74 boolean_t immu_gfxdvma_enable = B_TRUE;
75 boolean_t immu_intrmap_enable = B_FALSE;
76 boolean_t immu_qinv_enable = B_FALSE;
77 
78 /* various quirks that need working around */
79 
80 /* XXX We always map page 0 read/write for now */
81 boolean_t immu_quirk_usbpage0 = B_TRUE;
82 boolean_t immu_quirk_usbrmrr = B_TRUE;
83 boolean_t immu_quirk_usbfullpa;
84 boolean_t immu_quirk_mobile4;
85 
86 boolean_t immu_mmio_safe = B_TRUE;
87 
88 /* debug messages */
89 boolean_t immu_dmar_print;
90 
91 /* ############  END OPTIONS section ################ */
92 
93 /*
94  * Global used internally by Intel IOMMU code
95  */
96 dev_info_t *root_devinfo;
97 kmutex_t immu_lock;
98 list_t immu_list;
99 boolean_t immu_setup;
100 boolean_t immu_running;
101 boolean_t immu_quiesced;
102 
103 /* ######################## END Globals and tunables ###################### */
104 /* Globals used only in this file */
105 static char **black_array;
106 static uint_t nblacks;
107 /* ###################### Utility routines ############################# */
108 
109 /*
110  * Check if the device has mobile 4 chipset
111  */
112 static int
113 check_mobile4(dev_info_t *dip, void *arg)
114 {
115 	_NOTE(ARGUNUSED(arg));
116 	int vendor, device;
117 	int *ip = (int *)arg;
118 
119 	ASSERT(arg);
120 
121 	vendor = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
122 	    "vendor-id", -1);
123 	device = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
124 	    "device-id", -1);
125 
126 	if (vendor == 0x8086 && device == 0x2a40) {
127 		*ip = B_TRUE;
128 		ddi_err(DER_NOTE, dip, "IMMU: Mobile 4 chipset detected. "
129 		    "Force setting IOMMU write buffer");
130 		return (DDI_WALK_TERMINATE);
131 	} else {
132 		return (DDI_WALK_CONTINUE);
133 	}
134 }
135 
136 static void
137 map_bios_rsvd_mem(dev_info_t *dip)
138 {
139 	struct memlist *mp;
140 	int e;
141 
142 	memlist_read_lock();
143 
144 	mp = bios_rsvd;
145 	while (mp != NULL) {
146 		memrng_t *mrng = {0};
147 
148 		ddi_err(DER_LOG, dip, "IMMU: Mapping BIOS rsvd range "
149 		    "[0x%" PRIx64 " - 0x%"PRIx64 "]\n", mp->ml_address,
150 		    mp->ml_address + mp->ml_size);
151 
152 		mrng->mrng_start = IMMU_ROUNDOWN(mp->ml_address);
153 		mrng->mrng_npages = IMMU_ROUNDUP(mp->ml_size) / IMMU_PAGESIZE;
154 
155 		e = immu_dvma_map(NULL, NULL, mrng, 0, dip, IMMU_FLAGS_MEMRNG);
156 		ASSERT(e == DDI_DMA_MAPPED || e == DDI_DMA_USE_PHYSICAL);
157 
158 		mp = mp->ml_next;
159 	}
160 
161 	memlist_read_unlock();
162 }
163 
164 /*
165  * Check if the device is USB controller
166  */
167 /*ARGSUSED*/
168 static void
169 check_usb(dev_info_t *dip, void *arg)
170 {
171 	const char *drv = ddi_driver_name(dip);
172 
173 	if (drv == NULL ||
174 	    (strcmp(drv, "uhci") != 0 && strcmp(drv, "ohci") != 0 &&
175 	    strcmp(drv, "ehci") != 0)) {
176 		return;
177 	}
178 
179 	/* This must come first since it does unity mapping */
180 	if (immu_quirk_usbfullpa == B_TRUE) {
181 		int e;
182 		ddi_err(DER_NOTE, dip, "Applying USB FULL PA quirk");
183 		e = immu_dvma_map(NULL, NULL, NULL, 0, dip, IMMU_FLAGS_UNITY);
184 		/* for unity mode, map will return USE_PHYSICAL */
185 		ASSERT(e == DDI_DMA_USE_PHYSICAL);
186 	}
187 
188 	if (immu_quirk_usbrmrr == B_TRUE) {
189 		ddi_err(DER_LOG, dip, "Applying USB RMRR quirk");
190 		map_bios_rsvd_mem(dip);
191 	}
192 }
193 
194 /*
195  * Check if the device is a LPC device
196  */
197 /*ARGSUSED*/
198 static void
199 check_lpc(dev_info_t *dip, void *arg)
200 {
201 	immu_devi_t *immu_devi;
202 
203 	immu_devi = immu_devi_get(dip);
204 	ASSERT(immu_devi);
205 	if (immu_devi->imd_lpc == B_TRUE) {
206 		ddi_err(DER_LOG, dip, "IMMU: Found LPC device");
207 		/* This will put the immu_devi on the LPC "specials" list */
208 		(void) immu_dvma_get_immu(dip, IMMU_FLAGS_SLEEP);
209 	}
210 }
211 
212 /*
213  * Check if the device is a GFX device
214  */
215 /*ARGSUSED*/
216 static void
217 check_gfx(dev_info_t *dip, void *arg)
218 {
219 	immu_devi_t *immu_devi;
220 	int e;
221 
222 	immu_devi = immu_devi_get(dip);
223 	ASSERT(immu_devi);
224 	if (immu_devi->imd_display == B_TRUE) {
225 		ddi_err(DER_LOG, dip, "IMMU: Found GFX device");
226 		/* This will put the immu_devi on the GFX "specials" list */
227 		(void) immu_dvma_get_immu(dip, IMMU_FLAGS_SLEEP);
228 		e = immu_dvma_map(NULL, NULL, NULL, 0, dip, IMMU_FLAGS_UNITY);
229 		/* for unity mode, map will return USE_PHYSICAL */
230 		ASSERT(e == DDI_DMA_USE_PHYSICAL);
231 	}
232 }
233 
234 static void
235 walk_tree(int (*f)(dev_info_t *, void *), void *arg)
236 {
237 	int count;
238 
239 	ndi_devi_enter(root_devinfo, &count);
240 	ddi_walk_devs(ddi_get_child(root_devinfo), f, arg);
241 	ndi_devi_exit(root_devinfo, count);
242 }
243 
244 static int
245 check_pre_setup_quirks(dev_info_t *dip, void *arg)
246 {
247 	/* just 1 check right now */
248 	return (check_mobile4(dip, arg));
249 }
250 
251 static int
252 check_pre_startup_quirks(dev_info_t *dip, void *arg)
253 {
254 	if (immu_devi_set(dip, IMMU_FLAGS_SLEEP) != DDI_SUCCESS) {
255 		ddi_err(DER_PANIC, dip, "Failed to get immu_devi");
256 	}
257 
258 	check_gfx(dip, arg);
259 
260 	check_lpc(dip, arg);
261 
262 	check_usb(dip, arg);
263 
264 	return (DDI_WALK_CONTINUE);
265 }
266 
267 static void
268 pre_setup_quirks(void)
269 {
270 	walk_tree(check_pre_setup_quirks, &immu_quirk_mobile4);
271 }
272 
273 static void
274 pre_startup_quirks(void)
275 {
276 	walk_tree(check_pre_startup_quirks, NULL);
277 
278 	immu_dmar_rmrr_map();
279 }
280 
281 /*
282  * get_bootopt()
283  * 	check a boot option  (always a boolean)
284  */
285 static void
286 get_bootopt(char *bopt, boolean_t *kvar)
287 {
288 	char *val = NULL;
289 
290 	ASSERT(bopt);
291 	ASSERT(kvar);
292 
293 	/*
294 	 * All boot options set at the GRUB menu become
295 	 * properties on the rootnex.
296 	 */
297 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, root_devinfo,
298 	    DDI_PROP_DONTPASS, bopt, &val) == DDI_SUCCESS) {
299 		ASSERT(val);
300 		if (strcmp(val, "true") == 0) {
301 			*kvar = B_TRUE;
302 		} else if (strcmp(val, "false") == 0) {
303 			*kvar = B_FALSE;
304 		} else {
305 			ddi_err(DER_WARN, NULL, "boot option %s=\"%s\" ",
306 			    "is not set to true or false. Ignoring option.",
307 			    bopt, val);
308 		}
309 		ddi_prop_free(val);
310 	}
311 }
312 
313 static void
314 read_boot_options(void)
315 {
316 	/* enable/disable options */
317 	get_bootopt("immu-enable", &immu_enable);
318 	get_bootopt("immu-dvma-enable", &immu_dvma_enable);
319 	get_bootopt("immu-gfxdvma-enable", &immu_gfxdvma_enable);
320 	get_bootopt("immu-intrmap-enable", &immu_intrmap_enable);
321 	get_bootopt("immu-qinv-enable", &immu_qinv_enable);
322 	get_bootopt("immu-mmio-safe", &immu_mmio_safe);
323 
324 	/* workaround switches */
325 	get_bootopt("immu-quirk-usbpage0", &immu_quirk_usbpage0);
326 	get_bootopt("immu-quirk-usbfullpa", &immu_quirk_usbfullpa);
327 	get_bootopt("immu-quirk-usbrmrr", &immu_quirk_usbrmrr);
328 
329 	/* debug printing */
330 	get_bootopt("immu-dmar-print", &immu_dmar_print);
331 }
332 
333 /*
334  * Note, this will not catch hardware not enumerated
335  * in early boot
336  */
337 static boolean_t
338 blacklisted_driver(void)
339 {
340 	char **strptr;
341 	int i;
342 	major_t maj;
343 
344 	ASSERT((black_array == NULL) ^ (nblacks != 0));
345 
346 	/* need at least 2 strings */
347 	if (nblacks < 2) {
348 		return (B_FALSE);
349 	}
350 
351 	strptr = black_array;
352 	for (i = 0; nblacks - i > 1; i++) {
353 		if (strcmp(*strptr++, "DRIVER") == 0) {
354 			if ((maj = ddi_name_to_major(*strptr++))
355 			    != DDI_MAJOR_T_NONE) {
356 				/* is there hardware bound to this drvr */
357 				if (devnamesp[maj].dn_head != NULL) {
358 					return (B_TRUE);
359 				}
360 			}
361 			i += 1;   /* for loop adds 1, so add only 1 here */
362 		}
363 	}
364 
365 	return (B_FALSE);
366 }
367 
368 static boolean_t
369 blacklisted_smbios(void)
370 {
371 	id_t smid;
372 	smbios_hdl_t *smhdl;
373 	smbios_info_t sminf;
374 	smbios_system_t smsys;
375 	char *mfg, *product, *version;
376 	char **strptr;
377 	int i;
378 
379 	ASSERT((black_array == NULL) ^ (nblacks != 0));
380 
381 	/* need at least 4 strings for this setting */
382 	if (nblacks < 4) {
383 		return (B_FALSE);
384 	}
385 
386 	smhdl = smbios_open(NULL, SMB_VERSION, ksmbios_flags, NULL);
387 	if (smhdl == NULL ||
388 	    (smid = smbios_info_system(smhdl, &smsys)) == SMB_ERR ||
389 	    smbios_info_common(smhdl, smid, &sminf) == SMB_ERR) {
390 		return (B_FALSE);
391 	}
392 
393 	mfg = (char *)sminf.smbi_manufacturer;
394 	product = (char *)sminf.smbi_product;
395 	version = (char *)sminf.smbi_version;
396 
397 	ddi_err(DER_CONT, NULL, "?System SMBIOS information:\n");
398 	ddi_err(DER_CONT, NULL, "?Manufacturer = <%s>\n", mfg);
399 	ddi_err(DER_CONT, NULL, "?Product = <%s>\n", product);
400 	ddi_err(DER_CONT, NULL, "?Version = <%s>\n", version);
401 
402 	strptr = black_array;
403 	for (i = 0; nblacks - i > 3; i++) {
404 		if (strcmp(*strptr++, "SMBIOS") == 0) {
405 			if (strcmp(*strptr++, mfg) == 0 &&
406 			    ((char *)strptr == '\0' ||
407 			    strcmp(*strptr++, product) == 0) &&
408 			    ((char *)strptr == '\0' ||
409 			    strcmp(*strptr++, version) == 0)) {
410 				return (B_TRUE);
411 			}
412 			i += 3;
413 		}
414 	}
415 
416 	return (B_FALSE);
417 }
418 
419 static boolean_t
420 blacklisted_acpi(void)
421 {
422 	ASSERT((black_array == NULL) ^ (nblacks != 0));
423 	if (nblacks == 0) {
424 		return (B_FALSE);
425 	}
426 
427 	return (immu_dmar_blacklisted(black_array, nblacks));
428 }
429 
430 /*
431  * Check if system is blacklisted by Intel IOMMU driver
432  * i.e. should Intel IOMMU be disabled on this system
433  * Currently a system can be blacklistd based on the
434  * following bases:
435  *
436  * 1. DMAR ACPI table information.
437  *    This information includes things like
438  *    manufacturer and revision number. If rootnex.conf
439  *    has matching info set in its blacklist property
440  *    then Intel IOMMu will be disabled
441  *
442  * 2. SMBIOS information
443  *
444  * 3. Driver installed - useful if a particular
445  *    driver or hardware is toxic if Intel IOMMU
446  *    is turned on.
447  */
448 
449 static void
450 blacklist_setup(void)
451 {
452 	char **string_array;
453 	uint_t nstrings;
454 
455 	/*
456 	 * Check the rootnex.conf blacklist property.
457 	 * Fake up a dev_t since searching the global
458 	 * property list needs it
459 	 */
460 	if (ddi_prop_lookup_string_array(
461 	    makedevice(ddi_name_to_major("rootnex"), 0), root_devinfo,
462 	    DDI_PROP_DONTPASS | DDI_PROP_ROOTNEX_GLOBAL, "immu-blacklist",
463 	    &string_array, &nstrings) != DDI_PROP_SUCCESS) {
464 		return;
465 	}
466 
467 	/* smallest blacklist criteria works with multiples of 2 */
468 	if (nstrings % 2 != 0) {
469 		ddi_err(DER_WARN, NULL, "Invalid IOMMU blacklist "
470 		    "rootnex.conf: number of strings must be a "
471 		    "multiple of 2");
472 		ddi_prop_free(string_array);
473 		return;
474 	}
475 
476 	black_array = string_array;
477 	nblacks = nstrings;
478 }
479 
480 static void
481 blacklist_destroy(void)
482 {
483 	if (black_array) {
484 		ddi_prop_free(black_array);
485 		black_array = NULL;
486 		nblacks = 0;
487 	}
488 
489 	ASSERT(black_array == NULL);
490 	ASSERT(nblacks == 0);
491 }
492 
493 
494 /*
495  * Now set all the fields in the order they are defined
496  * We do this only as a defensive-coding practice, it is
497  * not a correctness issue.
498  */
499 static void *
500 immu_state_alloc(int seg, void *dmar_unit)
501 {
502 	immu_t *immu;
503 
504 	dmar_unit = immu_dmar_walk_units(seg, dmar_unit);
505 	if (dmar_unit == NULL) {
506 		/* No more IOMMUs in this segment */
507 		return (NULL);
508 	}
509 
510 	immu = kmem_zalloc(sizeof (immu_t), KM_SLEEP);
511 
512 	mutex_init(&(immu->immu_lock), NULL, MUTEX_DRIVER, NULL);
513 
514 	mutex_enter(&(immu->immu_lock));
515 
516 	immu->immu_dmar_unit = dmar_unit;
517 	immu->immu_name = ddi_strdup(immu_dmar_unit_name(dmar_unit),
518 	    KM_SLEEP);
519 	immu->immu_dip = immu_dmar_unit_dip(dmar_unit);
520 
521 	/*
522 	 * the immu_intr_lock mutex is grabbed by the IOMMU
523 	 * unit's interrupt handler so we need to use an
524 	 * interrupt cookie for the mutex
525 	 */
526 	mutex_init(&(immu->immu_intr_lock), NULL, MUTEX_DRIVER,
527 	    (void *)ipltospl(IMMU_INTR_IPL));
528 
529 	/* IOMMU regs related */
530 	mutex_init(&(immu->immu_regs_lock), NULL, MUTEX_DEFAULT, NULL);
531 
532 	/* DVMA related */
533 	immu->immu_dvma_coherent = B_FALSE;
534 
535 	/* DVMA context related */
536 	rw_init(&(immu->immu_ctx_rwlock), NULL, RW_DEFAULT, NULL);
537 
538 	/* DVMA domain related */
539 	list_create(&(immu->immu_domain_list), sizeof (domain_t),
540 	    offsetof(domain_t, dom_immu_node));
541 
542 	/* DVMA special device lists */
543 	immu->immu_dvma_gfx_only = B_FALSE;
544 	list_create(&(immu->immu_dvma_lpc_list), sizeof (immu_devi_t),
545 	    offsetof(immu_devi_t, imd_spc_node));
546 	list_create(&(immu->immu_dvma_gfx_list), sizeof (immu_devi_t),
547 	    offsetof(immu_devi_t, imd_spc_node));
548 
549 	/* interrupt remapping related */
550 	mutex_init(&(immu->immu_intrmap_lock), NULL, MUTEX_DEFAULT, NULL);
551 
552 	/* qinv related */
553 	mutex_init(&(immu->immu_qinv_lock), NULL, MUTEX_DEFAULT, NULL);
554 
555 	/*
556 	 * insert this immu unit into the system-wide list
557 	 */
558 	list_insert_tail(&immu_list, immu);
559 
560 	mutex_exit(&(immu->immu_lock));
561 
562 	ddi_err(DER_LOG, immu->immu_dip, "IMMU: unit setup");
563 
564 	immu_dmar_set_immu(dmar_unit, immu);
565 
566 	return (dmar_unit);
567 }
568 
569 static void
570 immu_subsystems_setup(void)
571 {
572 	int seg;
573 	void *unit_hdl;
574 
575 	ddi_err(DER_VERB, NULL,
576 	    "Creating state structures for Intel IOMMU units\n");
577 
578 	ASSERT(immu_setup == B_FALSE);
579 	ASSERT(immu_running == B_FALSE);
580 
581 	mutex_init(&immu_lock, NULL, MUTEX_DEFAULT, NULL);
582 	list_create(&immu_list, sizeof (immu_t), offsetof(immu_t, immu_node));
583 
584 	mutex_enter(&immu_lock);
585 
586 	unit_hdl = NULL;
587 	for (seg = 0; seg < IMMU_MAXSEG; seg++) {
588 		while (unit_hdl = immu_state_alloc(seg, unit_hdl)) {
589 			;
590 		}
591 	}
592 
593 	immu_regs_setup(&immu_list);	/* subsequent code needs this first */
594 	immu_dvma_setup(&immu_list);
595 	immu_intrmap_setup(&immu_list);
596 	immu_qinv_setup(&immu_list);
597 
598 	mutex_exit(&immu_lock);
599 }
600 
601 /*
602  * immu_subsystems_startup()
603  * 	startup all units that were setup
604  */
605 static void
606 immu_subsystems_startup(void)
607 {
608 	immu_t *immu;
609 
610 	mutex_enter(&immu_lock);
611 
612 	ASSERT(immu_setup == B_TRUE);
613 	ASSERT(immu_running == B_FALSE);
614 
615 	immu_dmar_startup();
616 
617 	immu = list_head(&immu_list);
618 	for (; immu; immu = list_next(&immu_list, immu)) {
619 
620 		mutex_enter(&(immu->immu_lock));
621 
622 		immu_intr_register(immu);
623 		immu_dvma_startup(immu);
624 		immu_intrmap_startup(immu);
625 		immu_qinv_startup(immu);
626 
627 		/*
628 		 * Set IOMMU unit's regs to do
629 		 * the actual startup. This will
630 		 * set immu->immu_running  field
631 		 * if the unit is successfully
632 		 * started
633 		 */
634 		immu_regs_startup(immu);
635 
636 		mutex_exit(&(immu->immu_lock));
637 	}
638 
639 	mutex_exit(&immu_lock);
640 }
641 
642 /* ##################  Intel IOMMU internal interfaces ###################### */
643 
644 /*
645  * Internal interfaces for IOMMU code (i.e. not exported to rootnex
646  * or rest of system)
647  */
648 
649 /*
650  * ddip can be NULL, in which case we walk up until we find the root dip
651  * NOTE: We never visit the root dip since its not a hardware node
652  */
653 int
654 immu_walk_ancestor(
655 	dev_info_t *rdip,
656 	dev_info_t *ddip,
657 	int (*func)(dev_info_t *, void *arg),
658 	void *arg,
659 	int *lvlp,
660 	immu_flags_t immu_flags)
661 {
662 	dev_info_t *pdip;
663 	int level;
664 	int error = DDI_SUCCESS;
665 
666 	ASSERT(root_devinfo);
667 	ASSERT(rdip);
668 	ASSERT(rdip != root_devinfo);
669 	ASSERT(func);
670 
671 	/* ddip and immu can be NULL */
672 
673 	/* Hold rdip so that branch is not detached */
674 	ndi_hold_devi(rdip);
675 	for (pdip = rdip, level = 1; pdip && pdip != root_devinfo;
676 	    pdip = ddi_get_parent(pdip), level++) {
677 
678 		if (immu_devi_set(pdip, immu_flags) != DDI_SUCCESS) {
679 			error = DDI_FAILURE;
680 			break;
681 		}
682 		if (func(pdip, arg) == DDI_WALK_TERMINATE) {
683 			break;
684 		}
685 		if (immu_flags & IMMU_FLAGS_DONTPASS) {
686 			break;
687 		}
688 		if (pdip == ddip) {
689 			break;
690 		}
691 	}
692 
693 	ndi_rele_devi(rdip);
694 
695 	if (lvlp)
696 		*lvlp = level;
697 
698 	return (error);
699 }
700 
701 /* ########################  Intel IOMMU entry points ####################### */
702 /*
703  * immu_init()
704  *	called from rootnex_attach(). setup but don't startup the Intel IOMMU
705  *      This is the first function called in Intel IOMMU code
706  */
707 void
708 immu_init(void)
709 {
710 	char *phony_reg = "A thing of beauty is a joy forever";
711 
712 	/* Set some global shorthands that are needed by all of IOMMU code */
713 	ASSERT(root_devinfo == NULL);
714 	root_devinfo = ddi_root_node();
715 
716 	/*
717 	 * Intel IOMMU only supported only if MMU(CPU) page size is ==
718 	 * IOMMU pages size.
719 	 */
720 	/*LINTED*/
721 	if (MMU_PAGESIZE != IMMU_PAGESIZE) {
722 		ddi_err(DER_WARN, NULL,
723 		    "MMU page size (%d) is not equal to\n"
724 		    "IOMMU page size (%d). "
725 		    "Disabling Intel IOMMU. ",
726 		    MMU_PAGESIZE, IMMU_PAGESIZE);
727 		immu_enable = B_FALSE;
728 		return;
729 	}
730 
731 	/*
732 	 * retrieve the Intel IOMMU boot options.
733 	 * Do this before parsing immu ACPI table
734 	 * as a boot option could potentially affect
735 	 * ACPI parsing.
736 	 */
737 	ddi_err(DER_CONT, NULL, "?Reading Intel IOMMU boot options\n");
738 	read_boot_options();
739 
740 	/*
741 	 * Check the IOMMU enable boot-option first.
742 	 * This is so that we can skip parsing the ACPI table
743 	 * if necessary because that may cause problems in
744 	 * systems with buggy BIOS or ACPI tables
745 	 */
746 	if (immu_enable == B_FALSE) {
747 		return;
748 	}
749 
750 	/*
751 	 * Next, check if the system even has an Intel IOMMU
752 	 * We use the presence or absence of the IOMMU ACPI
753 	 * table to detect Intel IOMMU.
754 	 */
755 	if (immu_dmar_setup() != DDI_SUCCESS) {
756 		immu_enable = B_FALSE;
757 		return;
758 	}
759 
760 	/*
761 	 * Check blacklists
762 	 */
763 	blacklist_setup();
764 
765 	if (blacklisted_smbios() == B_TRUE) {
766 		blacklist_destroy();
767 		immu_enable = B_FALSE;
768 		return;
769 	}
770 
771 	if (blacklisted_driver() == B_TRUE) {
772 		blacklist_destroy();
773 		immu_enable = B_FALSE;
774 		return;
775 	}
776 
777 	/*
778 	 * Read the "raw" DMAR ACPI table to get information
779 	 * and convert into a form we can use.
780 	 */
781 	if (immu_dmar_parse() != DDI_SUCCESS) {
782 		blacklist_destroy();
783 		immu_enable = B_FALSE;
784 		return;
785 	}
786 
787 	/*
788 	 * now that we have processed the ACPI table
789 	 * check if we need to blacklist this system
790 	 * based on ACPI info
791 	 */
792 	if (blacklisted_acpi() == B_TRUE) {
793 		immu_dmar_destroy();
794 		blacklist_destroy();
795 		immu_enable = B_FALSE;
796 		return;
797 	}
798 
799 	blacklist_destroy();
800 
801 	/*
802 	 * Check if system has HW quirks.
803 	 */
804 	pre_setup_quirks();
805 
806 	/* Now do the rest of the setup */
807 	immu_subsystems_setup();
808 
809 	/*
810 	 * Now that the IMMU is setup, create a phony
811 	 * reg prop so that suspend/resume works
812 	 */
813 	if (ddi_prop_update_byte_array(DDI_DEV_T_NONE, root_devinfo, "reg",
814 	    (uchar_t *)phony_reg, strlen(phony_reg) + 1) != DDI_PROP_SUCCESS) {
815 		ddi_err(DER_PANIC, NULL, "Failed to create reg prop for "
816 		    "rootnex node");
817 		/*NOTREACHED*/
818 	}
819 
820 	immu_setup = B_TRUE;
821 }
822 
823 /*
824  * immu_startup()
825  * 	called directly by boot code to startup
826  *      all units of the IOMMU
827  */
828 void
829 immu_startup(void)
830 {
831 	/*
832 	 * If IOMMU is disabled, do nothing
833 	 */
834 	if (immu_enable == B_FALSE) {
835 		return;
836 	}
837 
838 	if (immu_setup == B_FALSE) {
839 		ddi_err(DER_WARN, NULL, "Intel IOMMU not setup, "
840 		    "skipping IOMU startup");
841 		return;
842 	}
843 
844 	pre_startup_quirks();
845 
846 	ddi_err(DER_CONT, NULL,
847 	    "?Starting Intel IOMMU (dmar) units...\n");
848 
849 	immu_subsystems_startup();
850 
851 	immu_running = B_TRUE;
852 }
853 
854 /*
855  * immu_map_sgl()
856  * 	called from rootnex_coredma_bindhdl() when Intel
857  *	IOMMU is enabled to build DVMA cookies and map them.
858  */
859 int
860 immu_map_sgl(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
861     int prealloc_count, dev_info_t *rdip)
862 {
863 	if (immu_running == B_FALSE) {
864 		return (DDI_DMA_USE_PHYSICAL);
865 	}
866 
867 	return (immu_dvma_map(hp, dmareq, NULL, prealloc_count, rdip,
868 	    IMMU_FLAGS_DMAHDL));
869 }
870 
871 /*
872  * immu_unmap_sgl()
873  * 	called from rootnex_coredma_unbindhdl(), to unmap DVMA
874  * 	cookies and free them
875  */
876 int
877 immu_unmap_sgl(ddi_dma_impl_t *hp, dev_info_t *rdip)
878 {
879 	if (immu_running == B_FALSE) {
880 		return (DDI_DMA_USE_PHYSICAL);
881 	}
882 
883 	return (immu_dvma_unmap(hp, rdip));
884 }
885 
886 /*
887  * Hook to notify IOMMU code of device tree changes
888  */
889 void
890 immu_device_tree_changed(void)
891 {
892 	if (immu_setup == B_FALSE) {
893 		return;
894 	}
895 
896 	ddi_err(DER_WARN, NULL, "Intel IOMMU currently "
897 	    "does not use device tree updates");
898 }
899 
900 /*
901  * Hook to notify IOMMU code of memory changes
902  */
903 void
904 immu_physmem_update(uint64_t addr, uint64_t size)
905 {
906 	if (immu_setup == B_FALSE) {
907 		return;
908 	}
909 	immu_dvma_physmem_update(addr, size);
910 }
911 
912 /*
913  * immu_quiesce()
914  * 	quiesce all units that are running
915  */
916 int
917 immu_quiesce(void)
918 {
919 	immu_t *immu;
920 	int ret = DDI_SUCCESS;
921 
922 	mutex_enter(&immu_lock);
923 
924 	if (immu_running == B_FALSE)
925 		return (DDI_SUCCESS);
926 
927 	ASSERT(immu_setup == B_TRUE);
928 
929 	immu = list_head(&immu_list);
930 	for (; immu; immu = list_next(&immu_list, immu)) {
931 
932 		/* if immu is not running, we dont quiesce */
933 		if (immu->immu_regs_running == B_FALSE)
934 			continue;
935 
936 		/* flush caches */
937 		rw_enter(&(immu->immu_ctx_rwlock), RW_WRITER);
938 		immu_regs_context_flush(immu, 0, 0, 0, CONTEXT_GLOBAL);
939 		rw_exit(&(immu->immu_ctx_rwlock));
940 		immu_regs_iotlb_flush(immu, 0, 0, 0, 0, IOTLB_GLOBAL);
941 		immu_regs_wbf_flush(immu);
942 
943 		mutex_enter(&(immu->immu_lock));
944 
945 		/*
946 		 * Set IOMMU unit's regs to do
947 		 * the actual shutdown.
948 		 */
949 		immu_regs_shutdown(immu);
950 		immu_regs_suspend(immu);
951 
952 		/* if immu is still running, we failed */
953 		if (immu->immu_regs_running == B_TRUE)
954 			ret = DDI_FAILURE;
955 		else
956 			immu->immu_regs_quiesced = B_TRUE;
957 
958 		mutex_exit(&(immu->immu_lock));
959 	}
960 	mutex_exit(&immu_lock);
961 
962 	if (ret == DDI_SUCCESS) {
963 		immu_running = B_FALSE;
964 		immu_quiesced = B_TRUE;
965 	}
966 
967 	return (ret);
968 }
969 
970 /*
971  * immu_unquiesce()
972  * 	unquiesce all units
973  */
974 int
975 immu_unquiesce(void)
976 {
977 	immu_t *immu;
978 	int ret = DDI_SUCCESS;
979 
980 	mutex_enter(&immu_lock);
981 
982 	if (immu_quiesced == B_FALSE)
983 		return (DDI_SUCCESS);
984 
985 	ASSERT(immu_setup == B_TRUE);
986 	ASSERT(immu_running == B_FALSE);
987 
988 	immu = list_head(&immu_list);
989 	for (; immu; immu = list_next(&immu_list, immu)) {
990 
991 		mutex_enter(&(immu->immu_lock));
992 
993 		/* if immu was not quiesced, i.e was not running before */
994 		if (immu->immu_regs_quiesced == B_FALSE)
995 			continue;
996 
997 		if (immu_regs_resume(immu) != DDI_SUCCESS) {
998 			ret = DDI_FAILURE;
999 			continue;
1000 		}
1001 
1002 		/* flush caches before unquiesce */
1003 		rw_enter(&(immu->immu_ctx_rwlock), RW_WRITER);
1004 		immu_regs_context_flush(immu, 0, 0, 0, CONTEXT_GLOBAL);
1005 		rw_exit(&(immu->immu_ctx_rwlock));
1006 		immu_regs_iotlb_flush(immu, 0, 0, 0, 0, IOTLB_GLOBAL);
1007 
1008 		/*
1009 		 * Set IOMMU unit's regs to do
1010 		 * the actual startup. This will
1011 		 * set immu->immu_regs_running  field
1012 		 * if the unit is successfully
1013 		 * started
1014 		 */
1015 		immu_regs_startup(immu);
1016 
1017 		if (immu->immu_regs_running == B_FALSE) {
1018 			ret = DDI_FAILURE;
1019 		} else {
1020 			immu_quiesced = B_TRUE;
1021 			immu_running = B_TRUE;
1022 			immu->immu_regs_quiesced = B_FALSE;
1023 		}
1024 
1025 		mutex_exit(&(immu->immu_lock));
1026 	}
1027 
1028 	mutex_exit(&immu_lock);
1029 
1030 	return (ret);
1031 }
1032 
1033 /* ##############  END Intel IOMMU entry points ################## */
1034