xref: /titanic_52/usr/src/uts/i86pc/io/immu_regs.c (revision a4aeef46cda1835da2b19f8f62b4526de6521e6c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Portions Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * immu_regs.c  - File that operates on a IMMU unit's regsiters
28  */
29 #include <sys/dditypes.h>
30 #include <sys/ddi.h>
31 #include <sys/archsystm.h>
32 #include <sys/x86_archext.h>
33 #include <sys/spl.h>
34 #include <sys/sysmacros.h>
35 #include <sys/immu.h>
36 
37 #define	get_reg32(immu, offset)	ddi_get32((immu)->immu_regs_handle, \
38 		(uint32_t *)(immu->immu_regs_addr + (offset)))
39 #define	get_reg64(immu, offset)	ddi_get64((immu)->immu_regs_handle, \
40 		(uint64_t *)(immu->immu_regs_addr + (offset)))
41 #define	put_reg32(immu, offset, val)	ddi_put32\
42 		((immu)->immu_regs_handle, \
43 		(uint32_t *)(immu->immu_regs_addr + (offset)), val)
44 #define	put_reg64(immu, offset, val)	ddi_put64\
45 		((immu)->immu_regs_handle, \
46 		(uint64_t *)(immu->immu_regs_addr + (offset)), val)
47 
48 /*
49  * wait max 60s for the hardware completion
50  */
51 #define	IMMU_MAX_WAIT_TIME		60000000
52 #define	wait_completion(immu, offset, getf, completion, status) \
53 { \
54 	clock_t stick = ddi_get_lbolt(); \
55 	clock_t ntick; \
56 	_NOTE(CONSTCOND) \
57 	while (1) { \
58 		status = getf(immu, offset); \
59 		ntick = ddi_get_lbolt(); \
60 		if (completion) { \
61 			break; \
62 		} \
63 		if (ntick - stick >= drv_usectohz(IMMU_MAX_WAIT_TIME)) { \
64 			ddi_err(DER_PANIC, NULL, \
65 			    "immu wait completion time out");		\
66 			/*NOTREACHED*/   \
67 		} else { \
68 			iommu_cpu_nop();\
69 		}\
70 	}\
71 }
72 
73 static ddi_device_acc_attr_t immu_regs_attr = {
74 	DDI_DEVICE_ATTR_V0,
75 	DDI_NEVERSWAP_ACC,
76 	DDI_STRICTORDER_ACC,
77 };
78 
79 /*
80  * iotlb_flush()
81  *   flush the iotlb cache
82  */
83 static void
84 iotlb_flush(immu_t *immu, uint_t domain_id,
85     uint64_t addr, uint_t am, uint_t hint, immu_iotlb_inv_t type)
86 {
87 	uint64_t command = 0, iva = 0;
88 	uint_t iva_offset, iotlb_offset;
89 	uint64_t status = 0;
90 
91 	/* no lock needed since cap and excap fields are RDONLY */
92 	iva_offset = IMMU_ECAP_GET_IRO(immu->immu_regs_excap);
93 	iotlb_offset = iva_offset + 8;
94 
95 	/*
96 	 * prepare drain read/write command
97 	 */
98 	if (IMMU_CAP_GET_DWD(immu->immu_regs_cap)) {
99 		command |= TLB_INV_DRAIN_WRITE;
100 	}
101 
102 	if (IMMU_CAP_GET_DRD(immu->immu_regs_cap)) {
103 		command |= TLB_INV_DRAIN_READ;
104 	}
105 
106 	/*
107 	 * if the hardward doesn't support page selective invalidation, we
108 	 * will use domain type. Otherwise, use global type
109 	 */
110 	switch (type) {
111 	case IOTLB_PSI:
112 		ASSERT(IMMU_CAP_GET_PSI(immu->immu_regs_cap));
113 		ASSERT(am <= IMMU_CAP_GET_MAMV(immu->immu_regs_cap));
114 		ASSERT(!(addr & IMMU_PAGEOFFSET));
115 		command |= TLB_INV_PAGE | TLB_INV_IVT |
116 		    TLB_INV_DID(domain_id);
117 		iva = addr | am | TLB_IVA_HINT(hint);
118 		break;
119 	case IOTLB_DSI:
120 		command |= TLB_INV_DOMAIN | TLB_INV_IVT |
121 		    TLB_INV_DID(domain_id);
122 		break;
123 	case IOTLB_GLOBAL:
124 		command |= TLB_INV_GLOBAL | TLB_INV_IVT;
125 		break;
126 	default:
127 		ddi_err(DER_MODE, NULL, "%s: incorrect iotlb flush type",
128 		    immu->immu_name);
129 		return;
130 	}
131 
132 	ASSERT(!(status & TLB_INV_IVT));
133 	if (iva)
134 		put_reg64(immu, iva_offset, iva);
135 	put_reg64(immu, iotlb_offset, command);
136 	wait_completion(immu, iotlb_offset, get_reg64,
137 	    (!(status & TLB_INV_IVT)), status);
138 }
139 
140 /*
141  * iotlb_psi()
142  *   iotlb page specific invalidation
143  */
144 static void
145 iotlb_psi(immu_t *immu, uint_t did, uint64_t dvma, uint_t snpages,
146     uint_t hint)
147 {
148 	int dvma_am;
149 	int npg_am;
150 	int max_am;
151 	int am;
152 	uint64_t align;
153 	int npages_left;
154 	int npages;
155 	int i;
156 
157 	ASSERT(IMMU_CAP_GET_PSI(immu->immu_regs_cap));
158 	ASSERT(dvma % IMMU_PAGESIZE == 0);
159 
160 	max_am = IMMU_CAP_GET_MAMV(immu->immu_regs_cap);
161 
162 	mutex_enter(&(immu->immu_regs_lock));
163 
164 	npages_left = snpages;
165 	for (i = 0; i < immu_flush_gran && npages_left > 0; i++) {
166 		/* First calculate alignment of DVMA */
167 
168 		if (dvma == 0) {
169 			dvma_am = max_am;
170 		} else {
171 			for (align = (1 << 12), dvma_am = 1;
172 			    (dvma & align) == 0; align <<= 1, dvma_am++)
173 				;
174 			dvma_am--;
175 		}
176 
177 		/* Calculate the npg_am */
178 		npages = npages_left;
179 		for (npg_am = 0, npages >>= 1; npages; npages >>= 1, npg_am++)
180 			;
181 
182 		am = MIN(max_am, MIN(dvma_am, npg_am));
183 
184 		iotlb_flush(immu, did, dvma, am, hint, IOTLB_PSI);
185 
186 		npages = (1 << am);
187 		npages_left -= npages;
188 		dvma += (npages * IMMU_PAGESIZE);
189 	}
190 
191 	if (npages_left) {
192 		iotlb_flush(immu, did, 0, 0, 0, IOTLB_DSI);
193 	}
194 	mutex_exit(&(immu->immu_regs_lock));
195 }
196 
197 /*
198  * iotlb_dsi()
199  *	domain specific invalidation
200  */
201 static void
202 iotlb_dsi(immu_t *immu, uint_t domain_id)
203 {
204 	mutex_enter(&(immu->immu_regs_lock));
205 	iotlb_flush(immu, domain_id, 0, 0, 0, IOTLB_DSI);
206 	mutex_exit(&(immu->immu_regs_lock));
207 }
208 
209 /*
210  * iotlb_global()
211  *     global iotlb invalidation
212  */
213 static void
214 iotlb_global(immu_t *immu)
215 {
216 	mutex_enter(&(immu->immu_regs_lock));
217 	iotlb_flush(immu, 0, 0, 0, 0, IOTLB_GLOBAL);
218 	mutex_exit(&(immu->immu_regs_lock));
219 }
220 
221 
222 static int
223 gaw2agaw(int gaw)
224 {
225 	int r, agaw;
226 
227 	r = (gaw - 12) % 9;
228 
229 	if (r == 0)
230 		agaw = gaw;
231 	else
232 		agaw = gaw + 9 - r;
233 
234 	if (agaw > 64)
235 		agaw = 64;
236 
237 	return (agaw);
238 }
239 
240 /*
241  * set_immu_agaw()
242  * 	calculate agaw for a IOMMU unit
243  */
244 static int
245 set_agaw(immu_t *immu)
246 {
247 	int mgaw, magaw, agaw;
248 	uint_t bitpos;
249 	int max_sagaw_mask, sagaw_mask, mask;
250 	int nlevels;
251 
252 	/*
253 	 * mgaw is the maximum guest address width.
254 	 * Addresses above this value will be
255 	 * blocked by the IOMMU unit.
256 	 * sagaw is a bitmask that lists all the
257 	 * AGAWs supported by this IOMMU unit.
258 	 */
259 	mgaw = IMMU_CAP_MGAW(immu->immu_regs_cap);
260 	sagaw_mask = IMMU_CAP_SAGAW(immu->immu_regs_cap);
261 
262 	magaw = gaw2agaw(mgaw);
263 
264 	/*
265 	 * Get bitpos corresponding to
266 	 * magaw
267 	 */
268 
269 	/*
270 	 * Maximum SAGAW is specified by
271 	 * Vt-d spec.
272 	 */
273 	max_sagaw_mask = ((1 << 5) - 1);
274 
275 	if (sagaw_mask > max_sagaw_mask) {
276 		ddi_err(DER_WARN, NULL, "%s: SAGAW bitmask (%x) "
277 		    "is larger than maximu SAGAW bitmask "
278 		    "(%x) specified by Intel Vt-d spec",
279 		    immu->immu_name, sagaw_mask, max_sagaw_mask);
280 		return (DDI_FAILURE);
281 	}
282 
283 	/*
284 	 * Find a supported AGAW <= magaw
285 	 *
286 	 *	sagaw_mask    bitpos   AGAW (bits)  nlevels
287 	 *	==============================================
288 	 *	0 0 0 0 1	0	30		2
289 	 *	0 0 0 1 0	1	39		3
290 	 *	0 0 1 0 0	2	48		4
291 	 *	0 1 0 0 0	3	57		5
292 	 *	1 0 0 0 0	4	64(66)		6
293 	 */
294 	mask = 1;
295 	nlevels = 0;
296 	agaw = 0;
297 	for (mask = 1, bitpos = 0; bitpos < 5;
298 	    bitpos++, mask <<= 1) {
299 		if (mask & sagaw_mask) {
300 			nlevels = bitpos + 2;
301 			agaw = 30 + (bitpos * 9);
302 		}
303 	}
304 
305 	/* calculated agaw can be > 64 */
306 	agaw = (agaw > 64) ? 64 : agaw;
307 
308 	if (agaw < 30 || agaw > magaw) {
309 		ddi_err(DER_WARN, NULL, "%s: Calculated AGAW (%d) "
310 		    "is outside valid limits [30,%d] specified by Vt-d spec "
311 		    "and magaw",  immu->immu_name, agaw, magaw);
312 		return (DDI_FAILURE);
313 	}
314 
315 	if (nlevels < 2 || nlevels > 6) {
316 		ddi_err(DER_WARN, NULL, "%s: Calculated pagetable "
317 		    "level (%d) is outside valid limits [2,6]",
318 		    immu->immu_name, nlevels);
319 		return (DDI_FAILURE);
320 	}
321 
322 	ddi_err(DER_LOG, NULL, "Calculated pagetable "
323 	    "level (%d), agaw = %d", nlevels, agaw);
324 
325 	immu->immu_dvma_nlevels = nlevels;
326 	immu->immu_dvma_agaw = agaw;
327 
328 	return (DDI_SUCCESS);
329 }
330 
331 static int
332 setup_regs(immu_t *immu)
333 {
334 	int error;
335 
336 	ASSERT(immu);
337 	ASSERT(immu->immu_name);
338 
339 	/*
340 	 * This lock may be acquired by the IOMMU interrupt handler
341 	 */
342 	mutex_init(&(immu->immu_regs_lock), NULL, MUTEX_DRIVER,
343 	    (void *)ipltospl(IMMU_INTR_IPL));
344 
345 	/*
346 	 * map the register address space
347 	 */
348 	error = ddi_regs_map_setup(immu->immu_dip, 0,
349 	    (caddr_t *)&(immu->immu_regs_addr), (offset_t)0,
350 	    (offset_t)IMMU_REGSZ, &immu_regs_attr,
351 	    &(immu->immu_regs_handle));
352 
353 	if (error == DDI_FAILURE) {
354 		ddi_err(DER_WARN, NULL, "%s: Intel IOMMU register map failed",
355 		    immu->immu_name);
356 		mutex_destroy(&(immu->immu_regs_lock));
357 		return (DDI_FAILURE);
358 	}
359 
360 	/*
361 	 * get the register value
362 	 */
363 	immu->immu_regs_cap = get_reg64(immu, IMMU_REG_CAP);
364 	immu->immu_regs_excap = get_reg64(immu, IMMU_REG_EXCAP);
365 
366 	/*
367 	 * if the hardware access is non-coherent, we need clflush
368 	 */
369 	if (IMMU_ECAP_GET_C(immu->immu_regs_excap)) {
370 		immu->immu_dvma_coherent = B_TRUE;
371 	} else {
372 		immu->immu_dvma_coherent = B_FALSE;
373 		if (!(x86_feature & X86_CLFSH)) {
374 			ddi_err(DER_WARN, NULL,
375 			    "immu unit %s can't be enabled due to "
376 			    "missing clflush functionality", immu->immu_name);
377 			ddi_regs_map_free(&(immu->immu_regs_handle));
378 			mutex_destroy(&(immu->immu_regs_lock));
379 			return (DDI_FAILURE);
380 		}
381 	}
382 
383 	/* Setup SNP and TM reserved fields */
384 	immu->immu_SNP_reserved = immu_regs_is_SNP_reserved(immu);
385 	immu->immu_TM_reserved = immu_regs_is_TM_reserved(immu);
386 
387 	/*
388 	 * Check for Mobile 4 series chipset
389 	 */
390 	if (immu_quirk_mobile4 == B_TRUE &&
391 	    !IMMU_CAP_GET_RWBF(immu->immu_regs_cap)) {
392 		ddi_err(DER_LOG, NULL,
393 		    "IMMU: Mobile 4 chipset quirk detected. "
394 		    "Force-setting RWBF");
395 		IMMU_CAP_SET_RWBF(immu->immu_regs_cap);
396 		ASSERT(IMMU_CAP_GET_RWBF(immu->immu_regs_cap));
397 	}
398 
399 	/*
400 	 * retrieve the maximum number of domains
401 	 */
402 	immu->immu_max_domains = IMMU_CAP_ND(immu->immu_regs_cap);
403 
404 	/*
405 	 * calculate the agaw
406 	 */
407 	if (set_agaw(immu) != DDI_SUCCESS) {
408 		ddi_regs_map_free(&(immu->immu_regs_handle));
409 		mutex_destroy(&(immu->immu_regs_lock));
410 		return (DDI_FAILURE);
411 	}
412 	immu->immu_regs_cmdval = 0;
413 
414 	return (DDI_SUCCESS);
415 }
416 
417 /* ############### Functions exported ################## */
418 
419 /*
420  * immu_regs_setup()
421  *       Setup mappings to a IMMU unit's registers
422  *       so that they can be read/written
423  */
424 void
425 immu_regs_setup(list_t *listp)
426 {
427 	int i;
428 	immu_t *immu;
429 
430 	for (i = 0; i < IMMU_MAXSEG; i++) {
431 		immu = list_head(listp);
432 		for (; immu; immu = list_next(listp, immu)) {
433 			/* do your best, continue on error */
434 			if (setup_regs(immu) != DDI_SUCCESS) {
435 				immu->immu_regs_setup = B_FALSE;
436 			} else {
437 				immu->immu_regs_setup = B_TRUE;
438 			}
439 		}
440 	}
441 }
442 
443 /*
444  * immu_regs_map()
445  */
446 int
447 immu_regs_resume(immu_t *immu)
448 {
449 	int error;
450 
451 	/*
452 	 * remap the register address space
453 	 */
454 	error = ddi_regs_map_setup(immu->immu_dip, 0,
455 	    (caddr_t *)&(immu->immu_regs_addr), (offset_t)0,
456 	    (offset_t)IMMU_REGSZ, &immu_regs_attr,
457 	    &(immu->immu_regs_handle));
458 	if (error != DDI_SUCCESS) {
459 		return (DDI_FAILURE);
460 	}
461 
462 	immu_regs_set_root_table(immu);
463 
464 	immu_regs_intr_enable(immu, immu->immu_regs_intr_msi_addr,
465 	    immu->immu_regs_intr_msi_data, immu->immu_regs_intr_uaddr);
466 
467 	(void) immu_intr_handler(immu);
468 
469 	immu_regs_intrmap_enable(immu, immu->immu_intrmap_irta_reg);
470 
471 	immu_regs_qinv_enable(immu, immu->immu_qinv_reg_value);
472 
473 
474 	return (error);
475 }
476 
477 /*
478  * immu_regs_suspend()
479  */
480 void
481 immu_regs_suspend(immu_t *immu)
482 {
483 
484 	immu->immu_intrmap_running = B_FALSE;
485 
486 	/* Finally, unmap the regs */
487 	ddi_regs_map_free(&(immu->immu_regs_handle));
488 }
489 
490 /*
491  * immu_regs_startup()
492  *	set a IMMU unit's registers to startup the unit
493  */
494 void
495 immu_regs_startup(immu_t *immu)
496 {
497 	uint32_t status;
498 
499 	if (immu->immu_regs_setup == B_FALSE) {
500 		return;
501 	}
502 
503 	ASSERT(immu->immu_regs_running == B_FALSE);
504 
505 	ASSERT(MUTEX_HELD(&(immu->immu_lock)));
506 
507 	mutex_enter(&(immu->immu_regs_lock));
508 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
509 	    immu->immu_regs_cmdval | IMMU_GCMD_TE);
510 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
511 	    get_reg32, (status & IMMU_GSTS_TES), status);
512 	immu->immu_regs_cmdval |= IMMU_GCMD_TE;
513 	immu->immu_regs_running = B_TRUE;
514 	mutex_exit(&(immu->immu_regs_lock));
515 
516 	ddi_err(DER_NOTE, NULL, "IMMU %s running", immu->immu_name);
517 }
518 
519 /*
520  * immu_regs_shutdown()
521  *	shutdown a unit
522  */
523 void
524 immu_regs_shutdown(immu_t *immu)
525 {
526 	uint32_t status;
527 
528 	if (immu->immu_regs_running == B_FALSE) {
529 		return;
530 	}
531 
532 	ASSERT(immu->immu_regs_setup == B_TRUE);
533 
534 	ASSERT(MUTEX_HELD(&(immu->immu_lock)));
535 
536 	mutex_enter(&(immu->immu_regs_lock));
537 	immu->immu_regs_cmdval &= ~IMMU_GCMD_TE;
538 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
539 	    immu->immu_regs_cmdval);
540 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
541 	    get_reg32, !(status & IMMU_GSTS_TES), status);
542 	immu->immu_regs_running = B_FALSE;
543 	mutex_exit(&(immu->immu_regs_lock));
544 
545 	ddi_err(DER_NOTE, NULL, "IOMMU %s stopped", immu->immu_name);
546 }
547 
548 /*
549  * immu_regs_intr()
550  *        Set a IMMU unit regs to setup a IMMU unit's
551  *        interrupt handler
552  */
553 void
554 immu_regs_intr_enable(immu_t *immu, uint32_t msi_addr, uint32_t msi_data,
555     uint32_t uaddr)
556 {
557 	mutex_enter(&(immu->immu_regs_lock));
558 	immu->immu_regs_intr_msi_addr = msi_addr;
559 	immu->immu_regs_intr_uaddr = uaddr;
560 	immu->immu_regs_intr_msi_data = msi_data;
561 	put_reg32(immu, IMMU_REG_FEVNT_ADDR, msi_addr);
562 	put_reg32(immu, IMMU_REG_FEVNT_UADDR, uaddr);
563 	put_reg32(immu, IMMU_REG_FEVNT_DATA, msi_data);
564 	put_reg32(immu, IMMU_REG_FEVNT_CON, 0);
565 	mutex_exit(&(immu->immu_regs_lock));
566 }
567 
568 /*
569  * immu_regs_passthru_supported()
570  *       Returns B_TRUE ifi passthru is supported
571  */
572 boolean_t
573 immu_regs_passthru_supported(immu_t *immu)
574 {
575 	if (IMMU_ECAP_GET_PT(immu->immu_regs_excap)) {
576 		return (B_TRUE);
577 	}
578 
579 	ddi_err(DER_WARN, NULL, "Passthru not supported");
580 	return (B_FALSE);
581 }
582 
583 /*
584  * immu_regs_is_TM_reserved()
585  *       Returns B_TRUE if TM field is reserved
586  */
587 boolean_t
588 immu_regs_is_TM_reserved(immu_t *immu)
589 {
590 	if (IMMU_ECAP_GET_DI(immu->immu_regs_excap) ||
591 	    IMMU_ECAP_GET_CH(immu->immu_regs_excap)) {
592 		return (B_FALSE);
593 	}
594 	return (B_TRUE);
595 }
596 
597 /*
598  * immu_regs_is_SNP_reserved()
599  *       Returns B_TRUE if SNP field is reserved
600  */
601 boolean_t
602 immu_regs_is_SNP_reserved(immu_t *immu)
603 {
604 
605 	return (IMMU_ECAP_GET_SC(immu->immu_regs_excap) ? B_FALSE : B_TRUE);
606 }
607 
608 /*
609  * immu_regs_wbf_flush()
610  *     If required and supported, write to IMMU
611  *     unit's regs to flush DMA write buffer(s)
612  */
613 void
614 immu_regs_wbf_flush(immu_t *immu)
615 {
616 	uint32_t status;
617 
618 	if (!IMMU_CAP_GET_RWBF(immu->immu_regs_cap)) {
619 		return;
620 	}
621 
622 	mutex_enter(&(immu->immu_regs_lock));
623 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
624 	    immu->immu_regs_cmdval | IMMU_GCMD_WBF);
625 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
626 	    get_reg32, (!(status & IMMU_GSTS_WBFS)), status);
627 	mutex_exit(&(immu->immu_regs_lock));
628 }
629 
630 /*
631  * immu_regs_cpu_flush()
632  * 	flush the cpu cache line after CPU memory writes, so
633  *      IOMMU can see the writes
634  */
635 void
636 immu_regs_cpu_flush(immu_t *immu, caddr_t addr, uint_t size)
637 {
638 	uint64_t i;
639 
640 	ASSERT(immu);
641 
642 	if (immu->immu_dvma_coherent == B_TRUE)
643 		return;
644 
645 	for (i = 0; i < size; i += x86_clflush_size, addr += x86_clflush_size) {
646 		clflush_insn(addr);
647 	}
648 
649 	mfence_insn();
650 }
651 
652 void
653 immu_regs_iotlb_flush(immu_t *immu, uint_t domainid, uint64_t dvma,
654     uint64_t count, uint_t hint, immu_iotlb_inv_t type)
655 {
656 	ASSERT(immu);
657 
658 #ifndef TEST
659 	if (type == IOTLB_PSI && !IMMU_CAP_GET_PSI(immu->immu_regs_cap)) {
660 		dvma = 0;
661 		count = 0;
662 		hint = 0;
663 		type = IOTLB_DSI;
664 	}
665 #else
666 	if (type == IOTLB_PSI) {
667 		dvma = 0;
668 		count = 0;
669 		hint = 0;
670 		type = IOTLB_DSI;
671 	}
672 #endif
673 
674 
675 	switch (type) {
676 	case IOTLB_PSI:
677 		ASSERT(domainid > 0);
678 		ASSERT(count > 0);
679 		iotlb_psi(immu, domainid, dvma, count, hint);
680 		break;
681 	case IOTLB_DSI:
682 		ASSERT(domainid > 0);
683 		ASSERT(dvma == 0);
684 		ASSERT(count == 0);
685 		ASSERT(hint == 0);
686 		iotlb_dsi(immu, domainid);
687 		break;
688 	case IOTLB_GLOBAL:
689 		ASSERT(domainid == 0);
690 		ASSERT(dvma == 0);
691 		ASSERT(count == 0);
692 		ASSERT(hint == 0);
693 		iotlb_global(immu);
694 		break;
695 	default:
696 		ddi_err(DER_PANIC, NULL, "invalid IOTLB invalidation type: %d",
697 		    type);
698 		/*NOTREACHED*/
699 	}
700 }
701 
702 /*
703  * immu_regs_context_flush()
704  *   flush the context cache
705  */
706 void
707 immu_regs_context_flush(immu_t *immu, uint8_t function_mask,
708     uint16_t sid, uint_t did, immu_context_inv_t type)
709 {
710 	uint64_t command = 0;
711 	uint64_t status;
712 
713 	ASSERT(immu);
714 	ASSERT(rw_write_held(&(immu->immu_ctx_rwlock)));
715 
716 	/*
717 	 * define the command
718 	 */
719 	switch (type) {
720 	case CONTEXT_FSI:
721 		command |= CCMD_INV_ICC | CCMD_INV_DEVICE
722 		    | CCMD_INV_DID(did)
723 		    | CCMD_INV_SID(sid) | CCMD_INV_FM(function_mask);
724 		break;
725 	case CONTEXT_DSI:
726 		ASSERT(function_mask == 0);
727 		ASSERT(sid == 0);
728 		command |= CCMD_INV_ICC | CCMD_INV_DOMAIN
729 		    | CCMD_INV_DID(did);
730 		break;
731 	case CONTEXT_GLOBAL:
732 		ASSERT(function_mask == 0);
733 		ASSERT(sid == 0);
734 		ASSERT(did == 0);
735 		command |= CCMD_INV_ICC | CCMD_INV_GLOBAL;
736 		break;
737 	default:
738 		ddi_err(DER_PANIC, NULL,
739 		    "%s: incorrect context cache flush type",
740 		    immu->immu_name);
741 		/*NOTREACHED*/
742 	}
743 
744 	mutex_enter(&(immu->immu_regs_lock));
745 	ASSERT(!(get_reg64(immu, IMMU_REG_CONTEXT_CMD) & CCMD_INV_ICC));
746 	put_reg64(immu, IMMU_REG_CONTEXT_CMD, command);
747 	wait_completion(immu, IMMU_REG_CONTEXT_CMD, get_reg64,
748 	    (!(status & CCMD_INV_ICC)), status);
749 	mutex_exit(&(immu->immu_regs_lock));
750 }
751 
752 void
753 immu_regs_set_root_table(immu_t *immu)
754 {
755 	uint32_t status;
756 
757 	mutex_enter(&(immu->immu_regs_lock));
758 	put_reg64(immu, IMMU_REG_ROOTENTRY,
759 	    immu->immu_ctx_root->hwpg_paddr);
760 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
761 	    immu->immu_regs_cmdval | IMMU_GCMD_SRTP);
762 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
763 	    get_reg32, (status & IMMU_GSTS_RTPS), status);
764 	mutex_exit(&(immu->immu_regs_lock));
765 }
766 
767 
768 /* enable queued invalidation interface */
769 void
770 immu_regs_qinv_enable(immu_t *immu, uint64_t qinv_reg_value)
771 {
772 	uint32_t status;
773 
774 	if (immu_qinv_enable == B_FALSE)
775 		return;
776 
777 	mutex_enter(&immu->immu_regs_lock);
778 	immu->immu_qinv_reg_value = qinv_reg_value;
779 	/* Initialize the Invalidation Queue Tail register to zero */
780 	put_reg64(immu, IMMU_REG_INVAL_QT, 0);
781 
782 	/* set invalidation queue base address register */
783 	put_reg64(immu, IMMU_REG_INVAL_QAR, qinv_reg_value);
784 
785 	/* enable queued invalidation interface */
786 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
787 	    immu->immu_regs_cmdval | IMMU_GCMD_QIE);
788 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
789 	    get_reg32, (status & IMMU_GSTS_QIES), status);
790 	mutex_exit(&immu->immu_regs_lock);
791 
792 	immu->immu_regs_cmdval |= IMMU_GCMD_QIE;
793 	immu->immu_qinv_running = B_TRUE;
794 
795 }
796 
797 /* enable interrupt remapping hardware unit */
798 void
799 immu_regs_intrmap_enable(immu_t *immu, uint64_t irta_reg)
800 {
801 	uint32_t status;
802 
803 	if (immu_intrmap_enable == B_FALSE)
804 		return;
805 
806 	/* set interrupt remap table pointer */
807 	mutex_enter(&(immu->immu_regs_lock));
808 	immu->immu_intrmap_irta_reg = irta_reg;
809 	put_reg64(immu, IMMU_REG_IRTAR, irta_reg);
810 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
811 	    immu->immu_regs_cmdval | IMMU_GCMD_SIRTP);
812 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
813 	    get_reg32, (status & IMMU_GSTS_IRTPS), status);
814 	mutex_exit(&(immu->immu_regs_lock));
815 
816 	/* global flush intr entry cache */
817 	if (immu_qinv_enable == B_TRUE)
818 		immu_qinv_intr_global(immu);
819 
820 	/* enable interrupt remapping */
821 	mutex_enter(&(immu->immu_regs_lock));
822 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
823 	    immu->immu_regs_cmdval | IMMU_GCMD_IRE);
824 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
825 	    get_reg32, (status & IMMU_GSTS_IRES),
826 	    status);
827 	immu->immu_regs_cmdval |= IMMU_GCMD_IRE;
828 
829 	/* set compatible mode */
830 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
831 	    immu->immu_regs_cmdval | IMMU_GCMD_CFI);
832 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
833 	    get_reg32, (status & IMMU_GSTS_CFIS),
834 	    status);
835 	immu->immu_regs_cmdval |= IMMU_GCMD_CFI;
836 	mutex_exit(&(immu->immu_regs_lock));
837 
838 	immu->immu_intrmap_running = B_TRUE;
839 }
840 
841 uint64_t
842 immu_regs_get64(immu_t *immu, uint_t reg)
843 {
844 	return (get_reg64(immu, reg));
845 }
846 
847 uint32_t
848 immu_regs_get32(immu_t *immu, uint_t reg)
849 {
850 	return (get_reg32(immu, reg));
851 }
852 
853 void
854 immu_regs_put64(immu_t *immu, uint_t reg, uint64_t val)
855 {
856 	put_reg64(immu, reg, val);
857 }
858 
859 void
860 immu_regs_put32(immu_t *immu, uint_t reg, uint32_t val)
861 {
862 	put_reg32(immu, reg, val);
863 }
864