xref: /titanic_41/usr/src/uts/i86pc/io/immu_regs.c (revision ba7866cd2cbdf574f47d4e38a1301b90744dd677)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Portions Copyright (c) 2010, Oracle and/or its affiliates.
23  * All rights reserved.
24  */
25 
26 /*
27  * immu_regs.c  - File that operates on a IMMU unit's regsiters
28  */
29 #include <sys/dditypes.h>
30 #include <sys/ddi.h>
31 #include <sys/archsystm.h>
32 #include <sys/x86_archext.h>
33 #include <sys/spl.h>
34 #include <sys/sysmacros.h>
35 #include <sys/immu.h>
36 
37 #define	get_reg32(immu, offset)	ddi_get32((immu)->immu_regs_handle, \
38 		(uint32_t *)(immu->immu_regs_addr + (offset)))
39 #define	get_reg64(immu, offset)	ddi_get64((immu)->immu_regs_handle, \
40 		(uint64_t *)(immu->immu_regs_addr + (offset)))
41 #define	put_reg32(immu, offset, val)	ddi_put32\
42 		((immu)->immu_regs_handle, \
43 		(uint32_t *)(immu->immu_regs_addr + (offset)), val)
44 #define	put_reg64(immu, offset, val)	ddi_put64\
45 		((immu)->immu_regs_handle, \
46 		(uint64_t *)(immu->immu_regs_addr + (offset)), val)
47 
48 struct immu_flushops immu_regs_flushops = {
49 	immu_regs_context_fsi,
50 	immu_regs_context_dsi,
51 	immu_regs_context_gbl,
52 	immu_regs_iotlb_psi,
53 	immu_regs_iotlb_dsi,
54 	immu_regs_iotlb_gbl
55 };
56 
57 /*
58  * wait max 60s for the hardware completion
59  */
60 #define	IMMU_MAX_WAIT_TIME		60000000
61 #define	wait_completion(immu, offset, getf, completion, status) \
62 { \
63 	clock_t stick = ddi_get_lbolt(); \
64 	clock_t ntick; \
65 	_NOTE(CONSTCOND) \
66 	while (1) { \
67 		status = getf(immu, offset); \
68 		ntick = ddi_get_lbolt(); \
69 		if (completion) { \
70 			break; \
71 		} \
72 		if (ntick - stick >= drv_usectohz(IMMU_MAX_WAIT_TIME)) { \
73 			ddi_err(DER_PANIC, NULL, \
74 			    "immu wait completion time out");		\
75 			/*NOTREACHED*/   \
76 		} else { \
77 			iommu_cpu_nop();\
78 		}\
79 	}\
80 }
81 
82 static ddi_device_acc_attr_t immu_regs_attr = {
83 	DDI_DEVICE_ATTR_V0,
84 	DDI_NEVERSWAP_ACC,
85 	DDI_STRICTORDER_ACC,
86 };
87 
88 /*
89  * iotlb_flush()
90  *   flush the iotlb cache
91  */
92 static void
93 iotlb_flush(immu_t *immu, uint_t domain_id,
94     uint64_t addr, uint_t am, uint_t hint, immu_iotlb_inv_t type)
95 {
96 	uint64_t command = 0, iva = 0;
97 	uint_t iva_offset, iotlb_offset;
98 	uint64_t status = 0;
99 
100 	/* no lock needed since cap and excap fields are RDONLY */
101 	iva_offset = IMMU_ECAP_GET_IRO(immu->immu_regs_excap);
102 	iotlb_offset = iva_offset + 8;
103 
104 	/*
105 	 * prepare drain read/write command
106 	 */
107 	if (IMMU_CAP_GET_DWD(immu->immu_regs_cap)) {
108 		command |= TLB_INV_DRAIN_WRITE;
109 	}
110 
111 	if (IMMU_CAP_GET_DRD(immu->immu_regs_cap)) {
112 		command |= TLB_INV_DRAIN_READ;
113 	}
114 
115 	/*
116 	 * if the hardward doesn't support page selective invalidation, we
117 	 * will use domain type. Otherwise, use global type
118 	 */
119 	switch (type) {
120 	case IOTLB_PSI:
121 		ASSERT(IMMU_CAP_GET_PSI(immu->immu_regs_cap));
122 		ASSERT(am <= IMMU_CAP_GET_MAMV(immu->immu_regs_cap));
123 		ASSERT(!(addr & IMMU_PAGEOFFSET));
124 		command |= TLB_INV_PAGE | TLB_INV_IVT |
125 		    TLB_INV_DID(domain_id);
126 		iva = addr | am | TLB_IVA_HINT(hint);
127 		break;
128 	case IOTLB_DSI:
129 		command |= TLB_INV_DOMAIN | TLB_INV_IVT |
130 		    TLB_INV_DID(domain_id);
131 		break;
132 	case IOTLB_GLOBAL:
133 		command |= TLB_INV_GLOBAL | TLB_INV_IVT;
134 		break;
135 	default:
136 		ddi_err(DER_MODE, NULL, "%s: incorrect iotlb flush type",
137 		    immu->immu_name);
138 		return;
139 	}
140 
141 	if (iva)
142 		put_reg64(immu, iva_offset, iva);
143 	put_reg64(immu, iotlb_offset, command);
144 	wait_completion(immu, iotlb_offset, get_reg64,
145 	    (!(status & TLB_INV_IVT)), status);
146 }
147 
148 /*
149  * immu_regs_iotlb_psi()
150  *   iotlb page specific invalidation
151  */
152 void
153 immu_regs_iotlb_psi(immu_t *immu, uint_t did, uint64_t dvma, uint_t snpages,
154     uint_t hint)
155 {
156 	int dvma_am;
157 	int npg_am;
158 	int max_am;
159 	int am;
160 	uint64_t align;
161 	int npages_left;
162 	int npages;
163 	int i;
164 
165 	if (!IMMU_CAP_GET_PSI(immu->immu_regs_cap)) {
166 		immu_regs_iotlb_dsi(immu, did);
167 		return;
168 	}
169 
170 	ASSERT(dvma % IMMU_PAGESIZE == 0);
171 
172 	max_am = IMMU_CAP_GET_MAMV(immu->immu_regs_cap);
173 
174 	mutex_enter(&(immu->immu_regs_lock));
175 
176 	npages_left = snpages;
177 	for (i = 0; i < immu_flush_gran && npages_left > 0; i++) {
178 		/* First calculate alignment of DVMA */
179 
180 		if (dvma == 0) {
181 			dvma_am = max_am;
182 		} else {
183 			for (align = (1 << 12), dvma_am = 1;
184 			    (dvma & align) == 0; align <<= 1, dvma_am++)
185 				;
186 			dvma_am--;
187 		}
188 
189 		/* Calculate the npg_am */
190 		npages = npages_left;
191 		for (npg_am = 0, npages >>= 1; npages; npages >>= 1, npg_am++)
192 			;
193 
194 		am = MIN(max_am, MIN(dvma_am, npg_am));
195 
196 		iotlb_flush(immu, did, dvma, am, hint, IOTLB_PSI);
197 
198 		npages = (1 << am);
199 		npages_left -= npages;
200 		dvma += (npages * IMMU_PAGESIZE);
201 	}
202 
203 	if (npages_left) {
204 		iotlb_flush(immu, did, 0, 0, 0, IOTLB_DSI);
205 	}
206 	mutex_exit(&(immu->immu_regs_lock));
207 }
208 
209 /*
210  * immu_regs_iotlb_dsi()
211  *	domain specific invalidation
212  */
213 void
214 immu_regs_iotlb_dsi(immu_t *immu, uint_t domain_id)
215 {
216 	mutex_enter(&(immu->immu_regs_lock));
217 	iotlb_flush(immu, domain_id, 0, 0, 0, IOTLB_DSI);
218 	mutex_exit(&(immu->immu_regs_lock));
219 }
220 
221 /*
222  * immu_regs_iotlb_gbl()
223  *     global iotlb invalidation
224  */
225 void
226 immu_regs_iotlb_gbl(immu_t *immu)
227 {
228 	mutex_enter(&(immu->immu_regs_lock));
229 	iotlb_flush(immu, 0, 0, 0, 0, IOTLB_GLOBAL);
230 	mutex_exit(&(immu->immu_regs_lock));
231 }
232 
233 
234 static int
235 gaw2agaw(int gaw)
236 {
237 	int r, agaw;
238 
239 	r = (gaw - 12) % 9;
240 
241 	if (r == 0)
242 		agaw = gaw;
243 	else
244 		agaw = gaw + 9 - r;
245 
246 	if (agaw > 64)
247 		agaw = 64;
248 
249 	return (agaw);
250 }
251 
252 /*
253  * set_immu_agaw()
254  * 	calculate agaw for a IOMMU unit
255  */
256 static int
257 set_agaw(immu_t *immu)
258 {
259 	int mgaw, magaw, agaw;
260 	uint_t bitpos;
261 	int max_sagaw_mask, sagaw_mask, mask;
262 	int nlevels;
263 
264 	/*
265 	 * mgaw is the maximum guest address width.
266 	 * Addresses above this value will be
267 	 * blocked by the IOMMU unit.
268 	 * sagaw is a bitmask that lists all the
269 	 * AGAWs supported by this IOMMU unit.
270 	 */
271 	mgaw = IMMU_CAP_MGAW(immu->immu_regs_cap);
272 	sagaw_mask = IMMU_CAP_SAGAW(immu->immu_regs_cap);
273 
274 	magaw = gaw2agaw(mgaw);
275 
276 	/*
277 	 * Get bitpos corresponding to
278 	 * magaw
279 	 */
280 
281 	/*
282 	 * Maximum SAGAW is specified by
283 	 * Vt-d spec.
284 	 */
285 	max_sagaw_mask = ((1 << 5) - 1);
286 
287 	if (sagaw_mask > max_sagaw_mask) {
288 		ddi_err(DER_WARN, NULL, "%s: SAGAW bitmask (%x) "
289 		    "is larger than maximu SAGAW bitmask "
290 		    "(%x) specified by Intel Vt-d spec",
291 		    immu->immu_name, sagaw_mask, max_sagaw_mask);
292 		return (DDI_FAILURE);
293 	}
294 
295 	/*
296 	 * Find a supported AGAW <= magaw
297 	 *
298 	 *	sagaw_mask    bitpos   AGAW (bits)  nlevels
299 	 *	==============================================
300 	 *	0 0 0 0 1	0	30		2
301 	 *	0 0 0 1 0	1	39		3
302 	 *	0 0 1 0 0	2	48		4
303 	 *	0 1 0 0 0	3	57		5
304 	 *	1 0 0 0 0	4	64(66)		6
305 	 */
306 	mask = 1;
307 	nlevels = 0;
308 	agaw = 0;
309 	for (mask = 1, bitpos = 0; bitpos < 5;
310 	    bitpos++, mask <<= 1) {
311 		if (mask & sagaw_mask) {
312 			nlevels = bitpos + 2;
313 			agaw = 30 + (bitpos * 9);
314 		}
315 	}
316 
317 	/* calculated agaw can be > 64 */
318 	agaw = (agaw > 64) ? 64 : agaw;
319 
320 	if (agaw < 30 || agaw > magaw) {
321 		ddi_err(DER_WARN, NULL, "%s: Calculated AGAW (%d) "
322 		    "is outside valid limits [30,%d] specified by Vt-d spec "
323 		    "and magaw",  immu->immu_name, agaw, magaw);
324 		return (DDI_FAILURE);
325 	}
326 
327 	if (nlevels < 2 || nlevels > 6) {
328 		ddi_err(DER_WARN, NULL, "%s: Calculated pagetable "
329 		    "level (%d) is outside valid limits [2,6]",
330 		    immu->immu_name, nlevels);
331 		return (DDI_FAILURE);
332 	}
333 
334 	ddi_err(DER_LOG, NULL, "Calculated pagetable "
335 	    "level (%d), agaw = %d", nlevels, agaw);
336 
337 	immu->immu_dvma_nlevels = nlevels;
338 	immu->immu_dvma_agaw = agaw;
339 
340 	return (DDI_SUCCESS);
341 }
342 
343 static int
344 setup_regs(immu_t *immu)
345 {
346 	int error;
347 
348 	ASSERT(immu);
349 	ASSERT(immu->immu_name);
350 
351 	/*
352 	 * This lock may be acquired by the IOMMU interrupt handler
353 	 */
354 	mutex_init(&(immu->immu_regs_lock), NULL, MUTEX_DRIVER,
355 	    (void *)ipltospl(IMMU_INTR_IPL));
356 
357 	/*
358 	 * map the register address space
359 	 */
360 	error = ddi_regs_map_setup(immu->immu_dip, 0,
361 	    (caddr_t *)&(immu->immu_regs_addr), (offset_t)0,
362 	    (offset_t)IMMU_REGSZ, &immu_regs_attr,
363 	    &(immu->immu_regs_handle));
364 
365 	if (error == DDI_FAILURE) {
366 		ddi_err(DER_WARN, NULL, "%s: Intel IOMMU register map failed",
367 		    immu->immu_name);
368 		mutex_destroy(&(immu->immu_regs_lock));
369 		return (DDI_FAILURE);
370 	}
371 
372 	/*
373 	 * get the register value
374 	 */
375 	immu->immu_regs_cap = get_reg64(immu, IMMU_REG_CAP);
376 	immu->immu_regs_excap = get_reg64(immu, IMMU_REG_EXCAP);
377 
378 	/*
379 	 * if the hardware access is non-coherent, we need clflush
380 	 */
381 	if (IMMU_ECAP_GET_C(immu->immu_regs_excap)) {
382 		immu->immu_dvma_coherent = B_TRUE;
383 	} else {
384 		immu->immu_dvma_coherent = B_FALSE;
385 		if (!(x86_feature & X86_CLFSH)) {
386 			ddi_err(DER_WARN, NULL,
387 			    "immu unit %s can't be enabled due to "
388 			    "missing clflush functionality", immu->immu_name);
389 			ddi_regs_map_free(&(immu->immu_regs_handle));
390 			mutex_destroy(&(immu->immu_regs_lock));
391 			return (DDI_FAILURE);
392 		}
393 	}
394 
395 	/* Setup SNP and TM reserved fields */
396 	immu->immu_SNP_reserved = immu_regs_is_SNP_reserved(immu);
397 	immu->immu_TM_reserved = immu_regs_is_TM_reserved(immu);
398 
399 	/*
400 	 * Check for Mobile 4 series chipset
401 	 */
402 	if (immu_quirk_mobile4 == B_TRUE &&
403 	    !IMMU_CAP_GET_RWBF(immu->immu_regs_cap)) {
404 		ddi_err(DER_LOG, NULL,
405 		    "IMMU: Mobile 4 chipset quirk detected. "
406 		    "Force-setting RWBF");
407 		IMMU_CAP_SET_RWBF(immu->immu_regs_cap);
408 		ASSERT(IMMU_CAP_GET_RWBF(immu->immu_regs_cap));
409 	}
410 
411 	/*
412 	 * retrieve the maximum number of domains
413 	 */
414 	immu->immu_max_domains = IMMU_CAP_ND(immu->immu_regs_cap);
415 
416 	/*
417 	 * calculate the agaw
418 	 */
419 	if (set_agaw(immu) != DDI_SUCCESS) {
420 		ddi_regs_map_free(&(immu->immu_regs_handle));
421 		mutex_destroy(&(immu->immu_regs_lock));
422 		return (DDI_FAILURE);
423 	}
424 	immu->immu_regs_cmdval = 0;
425 
426 	immu->immu_flushops = &immu_regs_flushops;
427 
428 	return (DDI_SUCCESS);
429 }
430 
431 /* ############### Functions exported ################## */
432 
433 /*
434  * immu_regs_setup()
435  *       Setup mappings to a IMMU unit's registers
436  *       so that they can be read/written
437  */
438 void
439 immu_regs_setup(list_t *listp)
440 {
441 	int i;
442 	immu_t *immu;
443 
444 	for (i = 0; i < IMMU_MAXSEG; i++) {
445 		immu = list_head(listp);
446 		for (; immu; immu = list_next(listp, immu)) {
447 			/* do your best, continue on error */
448 			if (setup_regs(immu) != DDI_SUCCESS) {
449 				immu->immu_regs_setup = B_FALSE;
450 			} else {
451 				immu->immu_regs_setup = B_TRUE;
452 			}
453 		}
454 	}
455 }
456 
457 /*
458  * immu_regs_map()
459  */
460 int
461 immu_regs_resume(immu_t *immu)
462 {
463 	int error;
464 
465 	/*
466 	 * remap the register address space
467 	 */
468 	error = ddi_regs_map_setup(immu->immu_dip, 0,
469 	    (caddr_t *)&(immu->immu_regs_addr), (offset_t)0,
470 	    (offset_t)IMMU_REGSZ, &immu_regs_attr,
471 	    &(immu->immu_regs_handle));
472 	if (error != DDI_SUCCESS) {
473 		return (DDI_FAILURE);
474 	}
475 
476 	immu_regs_set_root_table(immu);
477 
478 	immu_regs_intr_enable(immu, immu->immu_regs_intr_msi_addr,
479 	    immu->immu_regs_intr_msi_data, immu->immu_regs_intr_uaddr);
480 
481 	(void) immu_intr_handler(immu);
482 
483 	immu_regs_intrmap_enable(immu, immu->immu_intrmap_irta_reg);
484 
485 	immu_regs_qinv_enable(immu, immu->immu_qinv_reg_value);
486 
487 
488 	return (error);
489 }
490 
491 /*
492  * immu_regs_suspend()
493  */
494 void
495 immu_regs_suspend(immu_t *immu)
496 {
497 
498 	immu->immu_intrmap_running = B_FALSE;
499 
500 	/* Finally, unmap the regs */
501 	ddi_regs_map_free(&(immu->immu_regs_handle));
502 }
503 
504 /*
505  * immu_regs_startup()
506  *	set a IMMU unit's registers to startup the unit
507  */
508 void
509 immu_regs_startup(immu_t *immu)
510 {
511 	uint32_t status;
512 
513 	if (immu->immu_regs_setup == B_FALSE) {
514 		return;
515 	}
516 
517 	ASSERT(immu->immu_regs_running == B_FALSE);
518 
519 	ASSERT(MUTEX_HELD(&(immu->immu_lock)));
520 
521 	mutex_enter(&(immu->immu_regs_lock));
522 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
523 	    immu->immu_regs_cmdval | IMMU_GCMD_TE);
524 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
525 	    get_reg32, (status & IMMU_GSTS_TES), status);
526 	immu->immu_regs_cmdval |= IMMU_GCMD_TE;
527 	immu->immu_regs_running = B_TRUE;
528 	mutex_exit(&(immu->immu_regs_lock));
529 
530 	ddi_err(DER_NOTE, NULL, "IMMU %s running", immu->immu_name);
531 }
532 
533 /*
534  * immu_regs_shutdown()
535  *	shutdown a unit
536  */
537 void
538 immu_regs_shutdown(immu_t *immu)
539 {
540 	uint32_t status;
541 
542 	if (immu->immu_regs_running == B_FALSE) {
543 		return;
544 	}
545 
546 	ASSERT(immu->immu_regs_setup == B_TRUE);
547 
548 	ASSERT(MUTEX_HELD(&(immu->immu_lock)));
549 
550 	mutex_enter(&(immu->immu_regs_lock));
551 	immu->immu_regs_cmdval &= ~IMMU_GCMD_TE;
552 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
553 	    immu->immu_regs_cmdval);
554 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
555 	    get_reg32, !(status & IMMU_GSTS_TES), status);
556 	immu->immu_regs_running = B_FALSE;
557 	mutex_exit(&(immu->immu_regs_lock));
558 
559 	ddi_err(DER_NOTE, NULL, "IOMMU %s stopped", immu->immu_name);
560 }
561 
562 /*
563  * immu_regs_intr()
564  *        Set a IMMU unit regs to setup a IMMU unit's
565  *        interrupt handler
566  */
567 void
568 immu_regs_intr_enable(immu_t *immu, uint32_t msi_addr, uint32_t msi_data,
569     uint32_t uaddr)
570 {
571 	mutex_enter(&(immu->immu_regs_lock));
572 	immu->immu_regs_intr_msi_addr = msi_addr;
573 	immu->immu_regs_intr_uaddr = uaddr;
574 	immu->immu_regs_intr_msi_data = msi_data;
575 	put_reg32(immu, IMMU_REG_FEVNT_ADDR, msi_addr);
576 	put_reg32(immu, IMMU_REG_FEVNT_UADDR, uaddr);
577 	put_reg32(immu, IMMU_REG_FEVNT_DATA, msi_data);
578 	put_reg32(immu, IMMU_REG_FEVNT_CON, 0);
579 	mutex_exit(&(immu->immu_regs_lock));
580 }
581 
582 /*
583  * immu_regs_passthru_supported()
584  *       Returns B_TRUE ifi passthru is supported
585  */
586 boolean_t
587 immu_regs_passthru_supported(immu_t *immu)
588 {
589 	if (IMMU_ECAP_GET_PT(immu->immu_regs_excap)) {
590 		return (B_TRUE);
591 	}
592 
593 	ddi_err(DER_WARN, NULL, "Passthru not supported");
594 	return (B_FALSE);
595 }
596 
597 /*
598  * immu_regs_is_TM_reserved()
599  *       Returns B_TRUE if TM field is reserved
600  */
601 boolean_t
602 immu_regs_is_TM_reserved(immu_t *immu)
603 {
604 	if (IMMU_ECAP_GET_DI(immu->immu_regs_excap) ||
605 	    IMMU_ECAP_GET_CH(immu->immu_regs_excap)) {
606 		return (B_FALSE);
607 	}
608 	return (B_TRUE);
609 }
610 
611 /*
612  * immu_regs_is_SNP_reserved()
613  *       Returns B_TRUE if SNP field is reserved
614  */
615 boolean_t
616 immu_regs_is_SNP_reserved(immu_t *immu)
617 {
618 
619 	return (IMMU_ECAP_GET_SC(immu->immu_regs_excap) ? B_FALSE : B_TRUE);
620 }
621 
622 /*
623  * immu_regs_wbf_flush()
624  *     If required and supported, write to IMMU
625  *     unit's regs to flush DMA write buffer(s)
626  */
627 void
628 immu_regs_wbf_flush(immu_t *immu)
629 {
630 	uint32_t status;
631 
632 	if (!IMMU_CAP_GET_RWBF(immu->immu_regs_cap)) {
633 		return;
634 	}
635 
636 	mutex_enter(&(immu->immu_regs_lock));
637 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
638 	    immu->immu_regs_cmdval | IMMU_GCMD_WBF);
639 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
640 	    get_reg32, (!(status & IMMU_GSTS_WBFS)), status);
641 	mutex_exit(&(immu->immu_regs_lock));
642 }
643 
644 /*
645  * immu_regs_cpu_flush()
646  * 	flush the cpu cache line after CPU memory writes, so
647  *      IOMMU can see the writes
648  */
649 void
650 immu_regs_cpu_flush(immu_t *immu, caddr_t addr, uint_t size)
651 {
652 	uint64_t i;
653 
654 	ASSERT(immu);
655 
656 	if (immu->immu_dvma_coherent == B_TRUE)
657 		return;
658 
659 	for (i = 0; i < size; i += x86_clflush_size, addr += x86_clflush_size) {
660 		clflush_insn(addr);
661 	}
662 
663 	mfence_insn();
664 }
665 
666 /*
667  * immu_regs_context_flush()
668  *   flush the context cache
669  */
670 static void
671 context_flush(immu_t *immu, uint8_t function_mask,
672     uint16_t sid, uint_t did, immu_context_inv_t type)
673 {
674 	uint64_t command = 0;
675 	uint64_t status;
676 
677 	ASSERT(immu);
678 	ASSERT(rw_write_held(&(immu->immu_ctx_rwlock)));
679 
680 	/*
681 	 * define the command
682 	 */
683 	switch (type) {
684 	case CONTEXT_FSI:
685 		command |= CCMD_INV_ICC | CCMD_INV_DEVICE
686 		    | CCMD_INV_DID(did)
687 		    | CCMD_INV_SID(sid) | CCMD_INV_FM(function_mask);
688 		break;
689 	case CONTEXT_DSI:
690 		ASSERT(function_mask == 0);
691 		ASSERT(sid == 0);
692 		command |= CCMD_INV_ICC | CCMD_INV_DOMAIN
693 		    | CCMD_INV_DID(did);
694 		break;
695 	case CONTEXT_GLOBAL:
696 		ASSERT(function_mask == 0);
697 		ASSERT(sid == 0);
698 		ASSERT(did == 0);
699 		command |= CCMD_INV_ICC | CCMD_INV_GLOBAL;
700 		break;
701 	default:
702 		ddi_err(DER_PANIC, NULL,
703 		    "%s: incorrect context cache flush type",
704 		    immu->immu_name);
705 		/*NOTREACHED*/
706 	}
707 
708 	mutex_enter(&(immu->immu_regs_lock));
709 	ASSERT(!(get_reg64(immu, IMMU_REG_CONTEXT_CMD) & CCMD_INV_ICC));
710 	put_reg64(immu, IMMU_REG_CONTEXT_CMD, command);
711 	wait_completion(immu, IMMU_REG_CONTEXT_CMD, get_reg64,
712 	    (!(status & CCMD_INV_ICC)), status);
713 	mutex_exit(&(immu->immu_regs_lock));
714 }
715 
716 void
717 immu_regs_context_fsi(immu_t *immu, uint8_t function_mask,
718     uint16_t source_id, uint_t domain_id)
719 {
720 	context_flush(immu, function_mask, source_id, domain_id, CONTEXT_FSI);
721 }
722 
723 void
724 immu_regs_context_dsi(immu_t *immu, uint_t domain_id)
725 {
726 	context_flush(immu, 0, 0, domain_id, CONTEXT_DSI);
727 }
728 
729 void
730 immu_regs_context_gbl(immu_t *immu)
731 {
732 	context_flush(immu, 0, 0, 0, CONTEXT_GLOBAL);
733 }
734 
735 void
736 immu_regs_set_root_table(immu_t *immu)
737 {
738 	uint32_t status;
739 
740 	mutex_enter(&(immu->immu_regs_lock));
741 	put_reg64(immu, IMMU_REG_ROOTENTRY,
742 	    immu->immu_ctx_root->hwpg_paddr);
743 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
744 	    immu->immu_regs_cmdval | IMMU_GCMD_SRTP);
745 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
746 	    get_reg32, (status & IMMU_GSTS_RTPS), status);
747 	mutex_exit(&(immu->immu_regs_lock));
748 }
749 
750 
751 /* enable queued invalidation interface */
752 void
753 immu_regs_qinv_enable(immu_t *immu, uint64_t qinv_reg_value)
754 {
755 	uint32_t status;
756 
757 	if (immu_qinv_enable == B_FALSE)
758 		return;
759 
760 	mutex_enter(&immu->immu_regs_lock);
761 	immu->immu_qinv_reg_value = qinv_reg_value;
762 	/* Initialize the Invalidation Queue Tail register to zero */
763 	put_reg64(immu, IMMU_REG_INVAL_QT, 0);
764 
765 	/* set invalidation queue base address register */
766 	put_reg64(immu, IMMU_REG_INVAL_QAR, qinv_reg_value);
767 
768 	/* enable queued invalidation interface */
769 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
770 	    immu->immu_regs_cmdval | IMMU_GCMD_QIE);
771 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
772 	    get_reg32, (status & IMMU_GSTS_QIES), status);
773 	mutex_exit(&immu->immu_regs_lock);
774 
775 	immu->immu_regs_cmdval |= IMMU_GCMD_QIE;
776 	immu->immu_qinv_running = B_TRUE;
777 
778 }
779 
780 /* enable interrupt remapping hardware unit */
781 void
782 immu_regs_intrmap_enable(immu_t *immu, uint64_t irta_reg)
783 {
784 	uint32_t status;
785 
786 	if (immu_intrmap_enable == B_FALSE)
787 		return;
788 
789 	/* set interrupt remap table pointer */
790 	mutex_enter(&(immu->immu_regs_lock));
791 	immu->immu_intrmap_irta_reg = irta_reg;
792 	put_reg64(immu, IMMU_REG_IRTAR, irta_reg);
793 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
794 	    immu->immu_regs_cmdval | IMMU_GCMD_SIRTP);
795 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
796 	    get_reg32, (status & IMMU_GSTS_IRTPS), status);
797 	mutex_exit(&(immu->immu_regs_lock));
798 
799 	/* global flush intr entry cache */
800 	if (immu_qinv_enable == B_TRUE)
801 		immu_qinv_intr_global(immu);
802 
803 	/* enable interrupt remapping */
804 	mutex_enter(&(immu->immu_regs_lock));
805 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
806 	    immu->immu_regs_cmdval | IMMU_GCMD_IRE);
807 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
808 	    get_reg32, (status & IMMU_GSTS_IRES),
809 	    status);
810 	immu->immu_regs_cmdval |= IMMU_GCMD_IRE;
811 
812 	/* set compatible mode */
813 	put_reg32(immu, IMMU_REG_GLOBAL_CMD,
814 	    immu->immu_regs_cmdval | IMMU_GCMD_CFI);
815 	wait_completion(immu, IMMU_REG_GLOBAL_STS,
816 	    get_reg32, (status & IMMU_GSTS_CFIS),
817 	    status);
818 	immu->immu_regs_cmdval |= IMMU_GCMD_CFI;
819 	mutex_exit(&(immu->immu_regs_lock));
820 
821 	immu->immu_intrmap_running = B_TRUE;
822 }
823 
824 uint64_t
825 immu_regs_get64(immu_t *immu, uint_t reg)
826 {
827 	return (get_reg64(immu, reg));
828 }
829 
830 uint32_t
831 immu_regs_get32(immu_t *immu, uint_t reg)
832 {
833 	return (get_reg32(immu, reg));
834 }
835 
836 void
837 immu_regs_put64(immu_t *immu, uint_t reg, uint64_t val)
838 {
839 	put_reg64(immu, reg, val);
840 }
841 
842 void
843 immu_regs_put32(immu_t *immu, uint_t reg, uint32_t val)
844 {
845 	put_reg32(immu, reg, val);
846 }
847