xref: /illumos-gate/usr/src/uts/i86pc/io/immu_regs.c (revision 2dea4eed7ad1c66ae4770263aa2911815a8b86eb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Portions Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * immu_regs.c  - File that operates on a IMMU unit's regsiters
28  */
29 #include <sys/dditypes.h>
30 #include <sys/ddi.h>
31 #include <sys/archsystm.h>
32 #include <sys/x86_archext.h>
33 #include <sys/spl.h>
34 #include <sys/immu.h>
35 
36 #define	get_reg32(immu, offset)	ddi_get32((immu)->immu_regs_handle, \
37 		(uint32_t *)(immu->immu_regs_addr + (offset)))
38 #define	get_reg64(immu, offset)	ddi_get64((immu)->immu_regs_handle, \
39 		(uint64_t *)(immu->immu_regs_addr + (offset)))
40 #define	put_reg32(immu, offset, val)	ddi_put32\
41 		((immu)->immu_regs_handle, \
42 		(uint32_t *)(immu->immu_regs_addr + (offset)), val)
43 #define	put_reg64(immu, offset, val)	ddi_put64\
44 		((immu)->immu_regs_handle, \
45 		(uint64_t *)(immu->immu_regs_addr + (offset)), val)
46 
47 /*
48  * wait max 60s for the hardware completion
49  */
50 #define	IMMU_MAX_WAIT_TIME		60000000
51 #define	wait_completion(immu, offset, getf, completion, status) \
52 { \
53 	clock_t stick = ddi_get_lbolt(); \
54 	clock_t ntick; \
55 	_NOTE(CONSTCOND) \
56 	while (1) { \
57 		status = getf(immu, offset); \
58 		ntick = ddi_get_lbolt(); \
59 		if (completion) { \
60 			break; \
61 		} \
62 		if (ntick - stick >= drv_usectohz(IMMU_MAX_WAIT_TIME)) { \
63 			ddi_err(DER_PANIC, NULL, \
64 			    "immu wait completion time out");		\
65 			/*NOTREACHED*/   \
66 		} else { \
67 			iommu_cpu_nop();\
68 		}\
69 	}\
70 }
71 
72 static ddi_device_acc_attr_t immu_regs_attr = {
73 	DDI_DEVICE_ATTR_V0,
74 	DDI_NEVERSWAP_ACC,
75 	DDI_STRICTORDER_ACC,
76 };
77 
78 /*
79  * iotlb_flush()
80  *   flush the iotlb cache
81  */
82 static void
83 iotlb_flush(immu_t *immu, uint_t domain_id,
84     uint64_t addr, uint_t am, uint_t hint, immu_iotlb_inv_t type)
85 {
86 	uint64_t command = 0, iva = 0;
87 	uint_t iva_offset, iotlb_offset;
88 	uint64_t status = 0;
89 
90 	ASSERT(MUTEX_HELD(&(immu->immu_regs_lock)));
91 
92 	/* no lock needed since cap and excap fields are RDONLY */
93 	iva_offset = IMMU_ECAP_GET_IRO(immu->immu_regs_excap);
94 	iotlb_offset = iva_offset + 8;
95 
96 	/*
97 	 * prepare drain read/write command
98 	 */
99 	if (IMMU_CAP_GET_DWD(immu->immu_regs_cap)) {
100 		command |= TLB_INV_DRAIN_WRITE;
101 	}
102 
103 	if (IMMU_CAP_GET_DRD(immu->immu_regs_cap)) {
104 		command |= TLB_INV_DRAIN_READ;
105 	}
106 
107 	/*
108 	 * if the hardward doesn't support page selective invalidation, we
109 	 * will use domain type. Otherwise, use global type
110 	 */
111 	switch (type) {
112 	case IOTLB_PSI:
113 		if (!IMMU_CAP_GET_PSI(immu->immu_regs_cap) ||
114 		    (am > IMMU_CAP_GET_MAMV(immu->immu_regs_cap)) ||
115 		    (addr & IMMU_PAGEOFFSET)) {
116 			goto ignore_psi;
117 		}
118 		command |= TLB_INV_PAGE | TLB_INV_IVT |
119 		    TLB_INV_DID(domain_id);
120 		iva = addr | am | TLB_IVA_HINT(hint);
121 		break;
122 ignore_psi:
123 	case IOTLB_DSI:
124 		command |= TLB_INV_DOMAIN | TLB_INV_IVT |
125 		    TLB_INV_DID(domain_id);
126 		break;
127 	case IOTLB_GLOBAL:
128 		command |= TLB_INV_GLOBAL | TLB_INV_IVT;
129 		break;
130 	default:
131 		ddi_err(DER_MODE, NULL, "%s: incorrect iotlb flush type",
132 		    immu->immu_name);
133 		return;
134 	}
135 
136 	/* verify there is no pending command */
137 	wait_completion(immu, iotlb_offset, get_reg64,
138 	    (!(status & TLB_INV_IVT)), status);
139 	if (iva)
140 		put_reg64(immu, iva_offset, iva);
141 	put_reg64(immu, iotlb_offset, command);
142 	wait_completion(immu, iotlb_offset, get_reg64,
143 	    (!(status & TLB_INV_IVT)), status);
144 }
145 
146 /*
147  * iotlb_psi()
148  *   iotlb page specific invalidation
149  */
150 static void
151 iotlb_psi(immu_t *immu, uint_t domain_id,
152     uint64_t dvma, uint_t count, uint_t hint)
153 {
154 	uint_t am = 0;
155 	uint_t max_am = 0;
156 	uint64_t align = 0;
157 	uint64_t dvma_pg = 0;
158 	uint_t used_count = 0;
159 
160 	mutex_enter(&(immu->immu_regs_lock));
161 
162 	/* choose page specified invalidation */
163 	if (IMMU_CAP_GET_PSI(immu->immu_regs_cap)) {
164 		/* MAMV is valid only if PSI is set */
165 		max_am = IMMU_CAP_GET_MAMV(immu->immu_regs_cap);
166 		while (count != 0) {
167 			/* First calculate alignment of DVMA */
168 			dvma_pg = IMMU_BTOP(dvma);
169 			ASSERT(dvma_pg != NULL);
170 			ASSERT(count >= 1);
171 			for (align = 1; (dvma_pg & align) == 0; align <<= 1)
172 				;
173 			/* truncate count to the nearest power of 2 */
174 			for (used_count = 1, am = 0; count >> used_count != 0;
175 			    used_count <<= 1, am++)
176 				;
177 			if (am > max_am) {
178 				am = max_am;
179 				used_count = 1 << am;
180 			}
181 			if (align >= used_count) {
182 				iotlb_flush(immu, domain_id,
183 				    dvma, am, hint, IOTLB_PSI);
184 			} else {
185 				/* align < used_count */
186 				used_count = align;
187 				for (am = 0; (1 << am) != used_count; am++)
188 					;
189 				iotlb_flush(immu, domain_id,
190 				    dvma, am, hint, IOTLB_PSI);
191 			}
192 			count -= used_count;
193 			dvma = (dvma_pg + used_count) << IMMU_PAGESHIFT;
194 		}
195 	} else {
196 		/* choose domain invalidation */
197 		iotlb_flush(immu, domain_id, dvma, 0, 0, IOTLB_DSI);
198 	}
199 
200 	mutex_exit(&(immu->immu_regs_lock));
201 }
202 
203 /*
204  * iotlb_dsi()
205  *	domain specific invalidation
206  */
207 static void
208 iotlb_dsi(immu_t *immu, uint_t domain_id)
209 {
210 	mutex_enter(&(immu->immu_regs_lock));
211 	iotlb_flush(immu, domain_id, 0, 0, 0, IOTLB_DSI);
212 	mutex_exit(&(immu->immu_regs_lock));
213 }
214 
215 /*
216  * iotlb_global()
217  *     global iotlb invalidation
218  */
219 static void
220 iotlb_global(immu_t *immu)
221 {
222 	mutex_enter(&(immu->immu_regs_lock));
223 	iotlb_flush(immu, 0, 0, 0, 0, IOTLB_GLOBAL);
224 	mutex_exit(&(immu->immu_regs_lock));
225 }
226 
227 
228 static int
229 gaw2agaw(int gaw)
230 {
231 	int r, agaw;
232 
233 	r = (gaw - 12) % 9;
234 
235 	if (r == 0)
236 		agaw = gaw;
237 	else
238 		agaw = gaw + 9 - r;
239 
240 	if (agaw > 64)
241 		agaw = 64;
242 
243 	return (agaw);
244 }
245 
246 /*
247  * set_immu_agaw()
248  * 	calculate agaw for a IOMMU unit
249  */
250 static int
251 set_agaw(immu_t *immu)
252 {
253 	int mgaw, magaw, agaw;
254 	uint_t bitpos;
255 	int max_sagaw_mask, sagaw_mask, mask;
256 	int nlevels;
257 
258 	/*
259 	 * mgaw is the maximum guest address width.
260 	 * Addresses above this value will be
261 	 * blocked by the IOMMU unit.
262 	 * sagaw is a bitmask that lists all the
263 	 * AGAWs supported by this IOMMU unit.
264 	 */
265 	mgaw = IMMU_CAP_MGAW(immu->immu_regs_cap);
266 	sagaw_mask = IMMU_CAP_SAGAW(immu->immu_regs_cap);
267 
268 	magaw = gaw2agaw(mgaw);
269 
270 	/*
271 	 * Get bitpos corresponding to
272 	 * magaw
273 	 */
274 
275 	/*
276 	 * Maximum SAGAW is specified by
277 	 * Vt-d spec.
278 	 */
279 	max_sagaw_mask = ((1 << 5) - 1);
280 
281 	if (sagaw_mask > max_sagaw_mask) {
282 		ddi_err(DER_WARN, NULL, "%s: SAGAW bitmask (%x) "
283 		    "is larger than maximu SAGAW bitmask "
284 		    "(%x) specified by Intel Vt-d spec",
285 		    immu->immu_name, sagaw_mask, max_sagaw_mask);
286 		return (DDI_FAILURE);
287 	}
288 
289 	/*
290 	 * Find a supported AGAW <= magaw
291 	 *
292 	 *	sagaw_mask    bitpos   AGAW (bits)  nlevels
293 	 *	==============================================
294 	 *	0 0 0 0 1	0	30		2
295 	 *	0 0 0 1 0	1	39		3
296 	 *	0 0 1 0 0	2	48		4
297 	 *	0 1 0 0 0	3	57		5
298 	 *	1 0 0 0 0	4	64(66)		6
299 	 */
300 	mask = 1;
301 	nlevels = 0;
302 	agaw = 0;
303 	for (mask = 1, bitpos = 0; bitpos < 5;
304 	    bitpos++, mask <<= 1) {
305 		if (mask & sagaw_mask) {
306 			nlevels = bitpos + 2;
307 			agaw = 30 + (bitpos * 9);
308 		}
309 	}
310 
311 	/* calculated agaw can be > 64 */
312 	agaw = (agaw > 64) ? 64 : agaw;
313 
314 	if (agaw < 30 || agaw > magaw) {
315 		ddi_err(DER_WARN, NULL, "%s: Calculated AGAW (%d) "
316 		    "is outside valid limits [30,%d] specified by Vt-d spec "
317 		    "and magaw",  immu->immu_name, agaw, magaw);
318 		return (DDI_FAILURE);
319 	}
320 
321 	if (nlevels < 2 || nlevels > 6) {
322 		ddi_err(DER_WARN, NULL, "%s: Calculated pagetable "
323 		    "level (%d) is outside valid limits [2,6]",
324 		    immu->immu_name, nlevels);
325 		return (DDI_FAILURE);
326 	}
327 
328 	ddi_err(DER_LOG, NULL, "Calculated pagetable "
329 	    "level (%d), agaw = %d", nlevels, agaw);
330 
331 	immu->immu_dvma_nlevels = nlevels;
332 	immu->immu_dvma_agaw = agaw;
333 
334 	return (DDI_SUCCESS);
335 }
336 
337 static int
338 setup_regs(immu_t *immu)
339 {
340 	int error;
341 
342 	ASSERT(immu);
343 	ASSERT(immu->immu_name);
344 
345 	/*
346 	 * This lock may be acquired by the IOMMU interrupt handler
347 	 */
348 	mutex_init(&(immu->immu_regs_lock), NULL, MUTEX_DRIVER,
349 	    (void *)ipltospl(IMMU_INTR_IPL));
350 
351 	/*
352 	 * map the register address space
353 	 */
354 	error = ddi_regs_map_setup(immu->immu_dip, 0,
355 	    (caddr_t *)&(immu->immu_regs_addr), (offset_t)0,
356 	    (offset_t)IMMU_REGSZ, &immu_regs_attr,
357 	    &(immu->immu_regs_handle));
358 
359 	if (error == DDI_FAILURE) {
360 		ddi_err(DER_WARN, NULL, "%s: Intel IOMMU register map failed",
361 		    immu->immu_name);
362 		mutex_destroy(&(immu->immu_regs_lock));
363 		return (DDI_FAILURE);
364 	}
365 
366 	/*
367 	 * get the register value
368 	 */
369 	immu->immu_regs_cap = get_reg64(immu, IMMU_REG_CAP);
370 	immu->immu_regs_excap = get_reg64(immu, IMMU_REG_EXCAP);
371 
372 	/*
373 	 * if the hardware access is non-coherent, we need clflush
374 	 */
375 	if (IMMU_ECAP_GET_C(immu->immu_regs_excap)) {
376 		immu->immu_dvma_coherent = B_TRUE;
377 	} else {
378 		immu->immu_dvma_coherent = B_FALSE;
379 		if (!(x86_feature & X86_CLFSH)) {
380 			ddi_err(DER_WARN, NULL,
381 			    "immu unit %s can't be enabled due to "
382 			    "missing clflush functionality", immu->immu_name);
383 			ddi_regs_map_free(&(immu->immu_regs_handle));
384 			mutex_destroy(&(immu->immu_regs_lock));
385 			return (DDI_FAILURE);
386 		}
387 	}
388 
389 	/*
390 	 * Check for Mobile 4 series chipset
391 	 */
392 	if (immu_quirk_mobile4 == B_TRUE &&
393 	    !IMMU_CAP_GET_RWBF(immu->immu_regs_cap)) {
394 		ddi_err(DER_LOG, NULL,
395 		    "IMMU: Mobile 4 chipset quirk detected. "
396 		    "Force-setting RWBF");
397 		IMMU_CAP_SET_RWBF(immu->immu_regs_cap);
398 		ASSERT(IMMU_CAP_GET_RWBF(immu->immu_regs_cap));
399 	}
400 
401 	/*
402 	 * retrieve the maximum number of domains
403 	 */
404 	immu->immu_max_domains = IMMU_CAP_ND(immu->immu_regs_cap);
405 
406 	/*
407 	 * calculate the agaw
408 	 */
409 	if (set_agaw(immu) != DDI_SUCCESS) {
410 		ddi_regs_map_free(&(immu->immu_regs_handle));
411 		mutex_destroy(&(immu->immu_regs_lock));
412 		return (DDI_FAILURE);
413 	}
414 	immu->immu_regs_cmdval = 0;
415 
416 	return (DDI_SUCCESS);
417 }
418 
419 /* ############### Functions exported ################## */
420 
421 /*
422  * immu_regs_setup()
423  *       Setup mappings to a IMMU unit's registers
424  *       so that they can be read/written
425  */
426 void
427 immu_regs_setup(list_t *listp)
428 {
429 	int i;
430 	immu_t *immu;
431 
432 	for (i = 0; i < IMMU_MAXSEG; i++) {
433 		immu = list_head(listp);
434 		for (; immu; immu = list_next(listp, immu)) {
435 			/* do your best, continue on error */
436 			if (setup_regs(immu) != DDI_SUCCESS) {
437 				immu->immu_regs_setup = B_FALSE;
438 			} else {
439 				immu->immu_regs_setup = B_TRUE;
440 			}
441 		}
442 	}
443 }
444 
445 /*
446  * immu_regs_map()
447  */
448 int
449 immu_regs_resume(immu_t *immu)
450 {
451 	int error;
452 
453 	/*
454 	 * remap the register address space
455 	 */
456 	error = ddi_regs_map_setup(immu->immu_dip, 0,
457 	    (caddr_t *)&(immu->immu_regs_addr), (offset_t)0,
458 	    (offset_t)IMMU_REGSZ, &immu_regs_attr,
459 	    &(immu->immu_regs_handle));
460 	if (error != DDI_SUCCESS) {
461 		return (DDI_FAILURE);
462 	}
463 
464 	immu_regs_set_root_table(immu);
465 
466 	immu_regs_intr_enable(immu, immu->immu_regs_intr_msi_addr,
467 	    immu->immu_regs_intr_msi_data, immu->immu_regs_intr_uaddr);
468 
469 	(void) immu_intr_handler(immu);
470 
471 	immu_regs_intrmap_enable(immu, immu->immu_intrmap_irta_reg);
472 
473 	immu_regs_qinv_enable(immu, immu->immu_qinv_reg_value);
474 
475 
476 	return (error);
477 }
478 
479 /*
480  * immu_regs_suspend()
481  */
482 void
483 immu_regs_suspend(immu_t *immu)
484 {
485 
486 	immu->immu_intrmap_running = B_FALSE;
487 
488 	/* Finally, unmap the regs */
489 	ddi_regs_map_free(&(immu->immu_regs_handle));
490 }
491 
492 /*
493  * immu_regs_startup()
494  *	set a IMMU unit's registers to startup the unit
495  */
496 void
497 immu_regs_startup(immu_t *immu)
498 {
499 	uint32_t status;
500 
501 	if (immu->immu_regs_setup == B_FALSE) {
502 		return;
503 	}
504 
505 	ASSERT(immu->immu_regs_running == B_FALSE);
506 
507 	ASSERT(MUTEX_HELD(&(immu->immu_lock)));
508 
509 	mutex_enter(&(immu->immu_regs_lock));
510 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
511 	    immu->immu_regs_cmdval | IMMU_GCMD_TE);
512 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
513 	    get_reg32, (status & IMMU_GSTS_TES), status);
514 	immu->immu_regs_cmdval |= IMMU_GCMD_TE;
515 	immu->immu_regs_running = B_TRUE;
516 	mutex_exit(&(immu->immu_regs_lock));
517 
518 	ddi_err(DER_NOTE, NULL, "IMMU %s running", immu->immu_name);
519 }
520 
521 /*
522  * immu_regs_shutdown()
523  *	shutdown a unit
524  */
525 void
526 immu_regs_shutdown(immu_t *immu)
527 {
528 	uint32_t status;
529 
530 	if (immu->immu_regs_running == B_FALSE) {
531 		return;
532 	}
533 
534 	ASSERT(immu->immu_regs_setup == B_TRUE);
535 
536 	ASSERT(MUTEX_HELD(&(immu->immu_lock)));
537 
538 	mutex_enter(&(immu->immu_regs_lock));
539 	immu->immu_regs_cmdval &= ~IMMU_GCMD_TE;
540 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
541 	    immu->immu_regs_cmdval);
542 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
543 	    get_reg32, !(status & IMMU_GSTS_TES), status);
544 	immu->immu_regs_running = B_FALSE;
545 	mutex_exit(&(immu->immu_regs_lock));
546 
547 	ddi_err(DER_NOTE, NULL, "IOMMU %s stopped", immu->immu_name);
548 }
549 
550 /*
551  * immu_regs_intr()
552  *        Set a IMMU unit regs to setup a IMMU unit's
553  *        interrupt handler
554  */
555 void
556 immu_regs_intr_enable(immu_t *immu, uint32_t msi_addr, uint32_t msi_data,
557     uint32_t uaddr)
558 {
559 	mutex_enter(&(immu->immu_regs_lock));
560 	immu->immu_regs_intr_msi_addr = msi_addr;
561 	immu->immu_regs_intr_uaddr = uaddr;
562 	immu->immu_regs_intr_msi_data = msi_data;
563 	put_reg32(immu, IMMU_REG_FEVNT_ADDR, msi_addr);
564 	put_reg32(immu, IMMU_REG_FEVNT_UADDR, uaddr);
565 	put_reg32(immu, IMMU_REG_FEVNT_DATA, msi_data);
566 	put_reg32(immu, IMMU_REG_FEVNT_CON, 0);
567 	mutex_exit(&(immu->immu_regs_lock));
568 }
569 
570 /*
571  * immu_regs_passthru_supported()
572  *       Returns B_TRUE ifi passthru is supported
573  */
574 boolean_t
575 immu_regs_passthru_supported(immu_t *immu)
576 {
577 	if (IMMU_ECAP_GET_PT(immu->immu_regs_excap)) {
578 		return (B_TRUE);
579 	}
580 
581 	ddi_err(DER_WARN, NULL, "Passthru not supported");
582 	return (B_FALSE);
583 }
584 
585 /*
586  * immu_regs_is_TM_reserved()
587  *       Returns B_TRUE if TM field is reserved
588  */
589 boolean_t
590 immu_regs_is_TM_reserved(immu_t *immu)
591 {
592 	if (IMMU_ECAP_GET_DI(immu->immu_regs_excap) ||
593 	    IMMU_ECAP_GET_CH(immu->immu_regs_excap)) {
594 		return (B_FALSE);
595 	}
596 	return (B_TRUE);
597 }
598 
599 /*
600  * immu_regs_is_SNP_reserved()
601  *       Returns B_TRUE if SNP field is reserved
602  */
603 boolean_t
604 immu_regs_is_SNP_reserved(immu_t *immu)
605 {
606 
607 	return (IMMU_ECAP_GET_SC(immu->immu_regs_excap) ? B_FALSE : B_TRUE);
608 }
609 
610 /*
611  * immu_regs_wbf_flush()
612  *     If required and supported, write to IMMU
613  *     unit's regs to flush DMA write buffer(s)
614  */
615 void
616 immu_regs_wbf_flush(immu_t *immu)
617 {
618 	uint32_t status;
619 
620 	if (!IMMU_CAP_GET_RWBF(immu->immu_regs_cap)) {
621 		return;
622 	}
623 
624 	mutex_enter(&(immu->immu_regs_lock));
625 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
626 	    immu->immu_regs_cmdval | IMMU_GCMD_WBF);
627 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
628 	    get_reg32, (!(status & IMMU_GSTS_WBFS)), status);
629 	mutex_exit(&(immu->immu_regs_lock));
630 }
631 
632 /*
633  * immu_regs_cpu_flush()
634  * 	flush the cpu cache line after CPU memory writes, so
635  *      IOMMU can see the writes
636  */
637 void
638 immu_regs_cpu_flush(immu_t *immu, caddr_t addr, uint_t size)
639 {
640 	uint_t i;
641 
642 	ASSERT(immu);
643 
644 	if (immu->immu_dvma_coherent == B_TRUE)
645 		return;
646 
647 	for (i = 0; i < size; i += x86_clflush_size) {
648 		clflush_insn(addr+i);
649 	}
650 
651 	mfence_insn();
652 }
653 
654 void
655 immu_regs_iotlb_flush(immu_t *immu, uint_t domainid, uint64_t dvma,
656     uint64_t count, uint_t hint, immu_iotlb_inv_t type)
657 {
658 	ASSERT(immu);
659 
660 	switch (type) {
661 	case IOTLB_PSI:
662 		ASSERT(domainid > 0);
663 		ASSERT(dvma > 0);
664 		ASSERT(count > 0);
665 		iotlb_psi(immu, domainid, dvma, count, hint);
666 		break;
667 	case IOTLB_DSI:
668 		ASSERT(domainid > 0);
669 		ASSERT(dvma == 0);
670 		ASSERT(count == 0);
671 		ASSERT(hint == 0);
672 		iotlb_dsi(immu, domainid);
673 		break;
674 	case IOTLB_GLOBAL:
675 		ASSERT(domainid == 0);
676 		ASSERT(dvma == 0);
677 		ASSERT(count == 0);
678 		ASSERT(hint == 0);
679 		iotlb_global(immu);
680 		break;
681 	default:
682 		ddi_err(DER_PANIC, NULL, "invalid IOTLB invalidation type: %d",
683 		    type);
684 		/*NOTREACHED*/
685 	}
686 }
687 
688 /*
689  * immu_regs_context_flush()
690  *   flush the context cache
691  */
692 void
693 immu_regs_context_flush(immu_t *immu, uint8_t function_mask,
694     uint16_t sid, uint_t did, immu_context_inv_t type)
695 {
696 	uint64_t command = 0;
697 	uint64_t status;
698 
699 	ASSERT(immu);
700 	ASSERT(rw_write_held(&(immu->immu_ctx_rwlock)));
701 
702 	/*
703 	 * define the command
704 	 */
705 	switch (type) {
706 	case CONTEXT_FSI:
707 		command |= CCMD_INV_ICC | CCMD_INV_DEVICE
708 		    | CCMD_INV_DID(did)
709 		    | CCMD_INV_SID(sid) | CCMD_INV_FM(function_mask);
710 		break;
711 	case CONTEXT_DSI:
712 		ASSERT(function_mask == 0);
713 		ASSERT(sid == 0);
714 		command |= CCMD_INV_ICC | CCMD_INV_DOMAIN
715 		    | CCMD_INV_DID(did);
716 		break;
717 	case CONTEXT_GLOBAL:
718 		ASSERT(function_mask == 0);
719 		ASSERT(sid == 0);
720 		ASSERT(did == 0);
721 		command |= CCMD_INV_ICC | CCMD_INV_GLOBAL;
722 		break;
723 	default:
724 		ddi_err(DER_PANIC, NULL,
725 		    "%s: incorrect context cache flush type",
726 		    immu->immu_name);
727 		/*NOTREACHED*/
728 	}
729 
730 	mutex_enter(&(immu->immu_regs_lock));
731 	/* verify there is no pending command */
732 	wait_completion(immu, IMMU_REG_CONTEXT_CMD, get_reg64,
733 	    (!(status & CCMD_INV_ICC)), status);
734 	put_reg64(immu, IMMU_REG_CONTEXT_CMD, command);
735 	wait_completion(immu, IMMU_REG_CONTEXT_CMD, get_reg64,
736 	    (!(status & CCMD_INV_ICC)), status);
737 	mutex_exit(&(immu->immu_regs_lock));
738 }
739 
740 void
741 immu_regs_set_root_table(immu_t *immu)
742 {
743 	uint32_t status;
744 
745 	mutex_enter(&(immu->immu_regs_lock));
746 	put_reg64(immu, IMMU_REG_ROOTENTRY,
747 	    immu->immu_ctx_root->hwpg_paddr);
748 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
749 	    immu->immu_regs_cmdval | IMMU_GCMD_SRTP);
750 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
751 	    get_reg32, (status & IMMU_GSTS_RTPS), status);
752 	mutex_exit(&(immu->immu_regs_lock));
753 }
754 
755 
756 /* enable queued invalidation interface */
757 void
758 immu_regs_qinv_enable(immu_t *immu, uint64_t qinv_reg_value)
759 {
760 	uint32_t status;
761 
762 	if (immu_qinv_enable == B_FALSE)
763 		return;
764 
765 	mutex_enter(&immu->immu_regs_lock);
766 	immu->immu_qinv_reg_value = qinv_reg_value;
767 	/* Initialize the Invalidation Queue Tail register to zero */
768 	put_reg64(immu, IMMU_REG_INVAL_QT, 0);
769 
770 	/* set invalidation queue base address register */
771 	put_reg64(immu, IMMU_REG_INVAL_QAR, qinv_reg_value);
772 
773 	/* enable queued invalidation interface */
774 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
775 	    immu->immu_regs_cmdval | IMMU_GCMD_QIE);
776 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
777 	    get_reg32, (status & IMMU_GSTS_QIES), status);
778 	mutex_exit(&immu->immu_regs_lock);
779 
780 	immu->immu_regs_cmdval |= IMMU_GCMD_QIE;
781 	immu->immu_qinv_running = B_TRUE;
782 
783 }
784 
785 /* enable interrupt remapping hardware unit */
786 void
787 immu_regs_intrmap_enable(immu_t *immu, uint64_t irta_reg)
788 {
789 	uint32_t status;
790 
791 	if (immu_intrmap_enable == B_FALSE)
792 		return;
793 
794 	/* set interrupt remap table pointer */
795 	mutex_enter(&(immu->immu_regs_lock));
796 	immu->immu_intrmap_irta_reg = irta_reg;
797 	put_reg64(immu, IMMU_REG_IRTAR, irta_reg);
798 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
799 	    immu->immu_regs_cmdval | IMMU_GCMD_SIRTP);
800 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
801 	    get_reg32, (status & IMMU_GSTS_IRTPS), status);
802 	mutex_exit(&(immu->immu_regs_lock));
803 
804 	/* global flush intr entry cache */
805 	if (immu_qinv_enable == B_TRUE)
806 		immu_qinv_intr_global(immu);
807 
808 	/* enable interrupt remapping */
809 	mutex_enter(&(immu->immu_regs_lock));
810 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
811 	    immu->immu_regs_cmdval | IMMU_GCMD_IRE);
812 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
813 	    get_reg32, (status & IMMU_GSTS_IRES),
814 	    status);
815 	immu->immu_regs_cmdval |= IMMU_GCMD_IRE;
816 
817 	/* set compatible mode */
818 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
819 	    immu->immu_regs_cmdval | IMMU_GCMD_CFI);
820 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
821 	    get_reg32, (status & IMMU_GSTS_CFIS),
822 	    status);
823 	immu->immu_regs_cmdval |= IMMU_GCMD_CFI;
824 	mutex_exit(&(immu->immu_regs_lock));
825 
826 	immu->immu_intrmap_running = B_TRUE;
827 }
828 
829 uint64_t
830 immu_regs_get64(immu_t *immu, uint_t reg)
831 {
832 	return (get_reg64(immu, reg));
833 }
834 
835 uint32_t
836 immu_regs_get32(immu_t *immu, uint_t reg)
837 {
838 	return (get_reg32(immu, reg));
839 }
840 
841 void
842 immu_regs_put64(immu_t *immu, uint_t reg, uint64_t val)
843 {
844 	put_reg64(immu, reg, val);
845 }
846 
847 void
848 immu_regs_put32(immu_t *immu, uint_t reg, uint32_t val)
849 {
850 	put_reg32(immu, reg, val);
851 }
852