xref: /titanic_51/usr/src/uts/i86pc/io/immu_intrmap.c (revision 1e49577a7fcde812700ded04431b49d67cc57d6d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  * Copyright (c) 2009, Intel Corporation.
28  * All rights reserved.
29  */
30 
31 
32 #include <sys/apic.h>
33 #include <vm/hat_i86.h>
34 #include <sys/sysmacros.h>
35 #include <sys/smp_impldefs.h>
36 #include <sys/immu.h>
37 
38 
39 typedef struct intrmap_private {
40 	immu_t		*ir_immu;
41 	uint16_t	ir_idx;
42 	uint32_t	ir_sid_svt_sq;
43 } intrmap_private_t;
44 
45 #define	INTRMAP_PRIVATE(intrmap) ((intrmap_private_t *)intrmap)
46 
47 /* interrupt remapping table entry */
48 typedef struct intrmap_rte {
49 	uint64_t	lo;
50 	uint64_t	hi;
51 } intrmap_rte_t;
52 
53 #define	IRTE_HIGH(sid_svt_sq) (sid_svt_sq)
54 #define	IRTE_LOW(dst, vector, dlm, tm, rh, dm, fpd, p)	\
55 	    (((uint64_t)(dst) << 32) |  \
56 	    ((uint64_t)(vector) << 16) | \
57 	    ((uint64_t)(dlm) << 5) | \
58 	    ((uint64_t)(tm) << 4) | \
59 	    ((uint64_t)(rh) << 3) | \
60 	    ((uint64_t)(dm) << 2) | \
61 	    ((uint64_t)(fpd) << 1) | \
62 	    (p))
63 
64 typedef enum {
65 	SVT_NO_VERIFY = 0, 	/* no verification */
66 	SVT_ALL_VERIFY,		/* using sid and sq to verify */
67 	SVT_BUS_VERIFY,		/* verify #startbus and #endbus */
68 	SVT_RSVD
69 } intrmap_svt_t;
70 
71 typedef enum {
72 	SQ_VERIFY_ALL = 0,	/* verify all 16 bits */
73 	SQ_VERIFY_IGR_1,	/* ignore bit 3 */
74 	SQ_VERIFY_IGR_2,	/* ignore bit 2-3 */
75 	SQ_VERIFY_IGR_3		/* ignore bit 1-3 */
76 } intrmap_sq_t;
77 
78 /*
79  * S field of the Interrupt Remapping Table Address Register
80  * the size of the interrupt remapping table is 1 << (immu_intrmap_irta_s + 1)
81  */
82 static uint_t intrmap_irta_s = INTRMAP_MAX_IRTA_SIZE;
83 
84 /*
85  * If true, arrange to suppress broadcast EOI by setting edge-triggered mode
86  * even for level-triggered interrupts in the interrupt-remapping engine.
87  * If false, broadcast EOI can still be suppressed if the CPU supports the
88  * APIC_SVR_SUPPRESS_BROADCAST_EOI bit.  In both cases, the IOAPIC is still
89  * programmed with the correct trigger mode, and pcplusmp must send an EOI
90  * to the IOAPIC by writing to the IOAPIC's EOI register to make up for the
91  * missing broadcast EOI.
92  */
93 static int intrmap_suppress_brdcst_eoi = 0;
94 
95 /*
96  * whether verify the source id of interrupt request
97  */
98 static int intrmap_enable_sid_verify = 0;
99 
100 /* fault types for DVMA remapping */
101 static char *immu_dvma_faults[] = {
102 	"Reserved",
103 	"The present field in root-entry is Clear",
104 	"The present field in context-entry is Clear",
105 	"Hardware detected invalid programming of a context-entry",
106 	"The DMA request attempted to access an address beyond max support",
107 	"The Write field in a page-table entry is Clear when DMA write",
108 	"The Read field in a page-table entry is Clear when DMA read",
109 	"Access the next level page table resulted in error",
110 	"Access the root-entry table resulted in error",
111 	"Access the context-entry table resulted in error",
112 	"Reserved field not initialized to zero in a present root-entry",
113 	"Reserved field not initialized to zero in a present context-entry",
114 	"Reserved field not initialized to zero in a present page-table entry",
115 	"DMA blocked due to the Translation Type field in context-entry",
116 	"Incorrect fault event reason number",
117 };
118 #define	DVMA_MAX_FAULTS (sizeof (immu_dvma_faults)/(sizeof (char *))) - 1
119 
120 /* fault types for interrupt remapping */
121 static char *immu_intrmap_faults[] = {
122 	"reserved field set in IRTE",
123 	"interrupt_index exceed the intr-remap table size",
124 	"present field in IRTE is clear",
125 	"hardware access intr-remap table address resulted in error",
126 	"reserved field set in IRTE, include various conditional",
127 	"hardware blocked an interrupt request in Compatibility format",
128 	"remappable interrupt request blocked due to verification failure"
129 };
130 #define	INTRMAP_MAX_FAULTS \
131 	(sizeof (immu_intrmap_faults) / (sizeof (char *))) - 1
132 
133 /* Function prototypes */
134 static int immu_intrmap_init(int apic_mode);
135 static void immu_intrmap_switchon(int suppress_brdcst_eoi);
136 static void immu_intrmap_alloc(void **intrmap_private_tbl, dev_info_t *dip,
137     uint16_t type, int count, uchar_t ioapic_index);
138 static void immu_intrmap_map(void *intrmap_private, void *intrmap_data,
139     uint16_t type, int count);
140 static void immu_intrmap_free(void **intrmap_privatep);
141 static void immu_intrmap_rdt(void *intrmap_private, ioapic_rdt_t *irdt);
142 static void immu_intrmap_msi(void *intrmap_private, msi_regs_t *mregs);
143 
144 static struct apic_intrmap_ops intrmap_ops = {
145 	immu_intrmap_init,
146 	immu_intrmap_switchon,
147 	immu_intrmap_alloc,
148 	immu_intrmap_map,
149 	immu_intrmap_free,
150 	immu_intrmap_rdt,
151 	immu_intrmap_msi,
152 };
153 
154 /* apic mode, APIC/X2APIC */
155 static int intrmap_apic_mode = LOCAL_APIC;
156 
157 
158 /*
159  * helper functions
160  */
161 static uint_t
162 bitset_find_free(bitset_t *b, uint_t post)
163 {
164 	uint_t	i;
165 	uint_t	cap = bitset_capacity(b);
166 
167 	if (post == cap)
168 		post = 0;
169 
170 	ASSERT(post < cap);
171 
172 	for (i = post; i < cap; i++) {
173 		if (!bitset_in_set(b, i))
174 			return (i);
175 	}
176 
177 	for (i = 0; i < post; i++) {
178 		if (!bitset_in_set(b, i))
179 			return (i);
180 	}
181 
182 	return (INTRMAP_IDX_FULL);	/* no free index */
183 }
184 
185 /*
186  * helper function to find 'count' contigous free
187  * interrupt remapping table entries
188  */
189 static uint_t
190 bitset_find_multi_free(bitset_t *b, uint_t post, uint_t count)
191 {
192 	uint_t  i, j;
193 	uint_t	cap = bitset_capacity(b);
194 
195 	if (post == INTRMAP_IDX_FULL) {
196 		return (INTRMAP_IDX_FULL);
197 	}
198 
199 	if (count > cap)
200 		return (INTRMAP_IDX_FULL);
201 
202 	ASSERT(post < cap);
203 
204 	for (i = post; (i + count) <= cap; i++) {
205 		for (j = 0; j < count; j++) {
206 			if (bitset_in_set(b, (i + j))) {
207 				i = i + j;
208 				break;
209 			}
210 			if (j == count - 1)
211 				return (i);
212 		}
213 	}
214 
215 	for (i = 0; (i < post) && ((i + count) <= cap); i++) {
216 		for (j = 0; j < count; j++) {
217 			if (bitset_in_set(b, (i + j))) {
218 				i = i + j;
219 				break;
220 			}
221 			if (j == count - 1)
222 				return (i);
223 		}
224 	}
225 
226 	return (INTRMAP_IDX_FULL);  		/* no free index */
227 }
228 
229 /* alloc one interrupt remapping table entry */
230 static int
231 alloc_tbl_entry(intrmap_t *intrmap)
232 {
233 	uint32_t idx;
234 
235 	for (;;) {
236 		mutex_enter(&intrmap->intrmap_lock);
237 		idx = intrmap->intrmap_free;
238 		if (idx != INTRMAP_IDX_FULL) {
239 			bitset_add(&intrmap->intrmap_map, idx);
240 			intrmap->intrmap_free =
241 			    bitset_find_free(&intrmap->intrmap_map, idx + 1);
242 			mutex_exit(&intrmap->intrmap_lock);
243 			break;
244 		}
245 
246 		/* no free intr entry, use compatible format intr */
247 		mutex_exit(&intrmap->intrmap_lock);
248 
249 		if (intrmap_apic_mode != LOCAL_X2APIC) {
250 			break;
251 		}
252 
253 		/*
254 		 * x2apic mode not allowed compatible
255 		 * interrupt
256 		 */
257 		delay(IMMU_ALLOC_RESOURCE_DELAY);
258 	}
259 
260 	return (idx);
261 }
262 
263 /* alloc 'cnt' contigous interrupt remapping table entries */
264 static int
265 alloc_tbl_multi_entries(intrmap_t *intrmap, uint_t cnt)
266 {
267 	uint_t idx, pos, i;
268 
269 	for (; ; ) {
270 		mutex_enter(&intrmap->intrmap_lock);
271 		pos = intrmap->intrmap_free;
272 		idx = bitset_find_multi_free(&intrmap->intrmap_map, pos, cnt);
273 
274 		if (idx != INTRMAP_IDX_FULL) {
275 			if (idx <= pos && pos < (idx + cnt)) {
276 				intrmap->intrmap_free = bitset_find_free(
277 				    &intrmap->intrmap_map, idx + cnt);
278 			}
279 			for (i = 0; i < cnt; i++) {
280 				bitset_add(&intrmap->intrmap_map, idx + i);
281 			}
282 			mutex_exit(&intrmap->intrmap_lock);
283 			break;
284 		}
285 
286 		mutex_exit(&intrmap->intrmap_lock);
287 
288 		if (intrmap_apic_mode != LOCAL_X2APIC) {
289 			break;
290 		}
291 
292 		/* x2apic mode not allowed comapitible interrupt */
293 		delay(IMMU_ALLOC_RESOURCE_DELAY);
294 	}
295 
296 	return (idx);
297 }
298 
299 /* init interrupt remapping table */
300 static int
301 init_unit(immu_t *immu)
302 {
303 	intrmap_t *intrmap;
304 	size_t size;
305 
306 	ddi_dma_attr_t intrmap_dma_attr = {
307 		DMA_ATTR_V0,
308 		0U,
309 		0xffffffffffffffffULL,
310 		0xffffffffU,
311 		MMU_PAGESIZE,	/* page aligned */
312 		0x1,
313 		0x1,
314 		0xffffffffU,
315 		0xffffffffffffffffULL,
316 		1,
317 		4,
318 		0
319 	};
320 
321 	ddi_device_acc_attr_t intrmap_acc_attr = {
322 		DDI_DEVICE_ATTR_V0,
323 		DDI_NEVERSWAP_ACC,
324 		DDI_STRICTORDER_ACC
325 	};
326 
327 	/*
328 	 * Using interrupt remapping implies using the queue
329 	 * invalidation interface. According to Intel,
330 	 * hardware that supports interrupt remapping should
331 	 * also support QI.
332 	 */
333 	ASSERT(IMMU_ECAP_GET_QI(immu->immu_regs_excap));
334 
335 	if (intrmap_apic_mode == LOCAL_X2APIC) {
336 		if (!IMMU_ECAP_GET_EIM(immu->immu_regs_excap)) {
337 			return (DDI_FAILURE);
338 		}
339 	}
340 
341 	if (intrmap_irta_s > INTRMAP_MAX_IRTA_SIZE) {
342 		intrmap_irta_s = INTRMAP_MAX_IRTA_SIZE;
343 	}
344 
345 	intrmap =  kmem_zalloc(sizeof (intrmap_t), KM_SLEEP);
346 
347 	if (ddi_dma_alloc_handle(immu->immu_dip,
348 	    &intrmap_dma_attr,
349 	    DDI_DMA_SLEEP,
350 	    NULL,
351 	    &(intrmap->intrmap_dma_hdl)) != DDI_SUCCESS) {
352 		kmem_free(intrmap, sizeof (intrmap_t));
353 		return (DDI_FAILURE);
354 	}
355 
356 	intrmap->intrmap_size = 1 << (intrmap_irta_s + 1);
357 	size = intrmap->intrmap_size * INTRMAP_RTE_SIZE;
358 	if (ddi_dma_mem_alloc(intrmap->intrmap_dma_hdl,
359 	    size,
360 	    &intrmap_acc_attr,
361 	    DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
362 	    DDI_DMA_SLEEP,
363 	    NULL,
364 	    &(intrmap->intrmap_vaddr),
365 	    &size,
366 	    &(intrmap->intrmap_acc_hdl)) != DDI_SUCCESS) {
367 		ddi_dma_free_handle(&(intrmap->intrmap_dma_hdl));
368 		kmem_free(intrmap, sizeof (intrmap_t));
369 		return (DDI_FAILURE);
370 	}
371 
372 	ASSERT(!((uintptr_t)intrmap->intrmap_vaddr & MMU_PAGEOFFSET));
373 	bzero(intrmap->intrmap_vaddr, size);
374 	intrmap->intrmap_paddr = pfn_to_pa(
375 	    hat_getpfnum(kas.a_hat, intrmap->intrmap_vaddr));
376 
377 	mutex_init(&(intrmap->intrmap_lock), NULL, MUTEX_DRIVER, NULL);
378 	bitset_init(&intrmap->intrmap_map);
379 	bitset_resize(&intrmap->intrmap_map, intrmap->intrmap_size);
380 	intrmap->intrmap_free = 0;
381 
382 	immu->immu_intrmap = intrmap;
383 
384 	return (DDI_SUCCESS);
385 }
386 
387 static immu_t *
388 get_immu(dev_info_t *dip, uint16_t type, uchar_t ioapic_index)
389 {
390 	immu_t	*immu = NULL;
391 
392 	if (!DDI_INTR_IS_MSI_OR_MSIX(type)) {
393 		immu = immu_dmar_ioapic_immu(ioapic_index);
394 	} else {
395 		if (dip != NULL)
396 			immu = immu_dmar_get_immu(dip);
397 	}
398 
399 	return (immu);
400 }
401 
402 static int
403 get_top_pcibridge(dev_info_t *dip, void *arg)
404 {
405 	dev_info_t **topdipp = arg;
406 	immu_devi_t *immu_devi;
407 
408 	mutex_enter(&(DEVI(dip)->devi_lock));
409 	immu_devi = DEVI(dip)->devi_iommu;
410 	mutex_exit(&(DEVI(dip)->devi_lock));
411 
412 	if (immu_devi == NULL || immu_devi->imd_pcib_type == IMMU_PCIB_BAD ||
413 	    immu_devi->imd_pcib_type == IMMU_PCIB_ENDPOINT) {
414 		return (DDI_WALK_CONTINUE);
415 	}
416 
417 	*topdipp = dip;
418 
419 	return (DDI_WALK_CONTINUE);
420 }
421 
422 static dev_info_t *
423 intrmap_top_pcibridge(dev_info_t *rdip)
424 {
425 	dev_info_t *top_pcibridge = NULL;
426 
427 	if (immu_walk_ancestor(rdip, NULL, get_top_pcibridge,
428 	    &top_pcibridge, NULL, 0) != DDI_SUCCESS) {
429 		return (NULL);
430 	}
431 
432 	return (top_pcibridge);
433 }
434 
435 /* function to get interrupt request source id */
436 static uint32_t
437 get_sid(dev_info_t *dip, uint16_t type, uchar_t ioapic_index)
438 {
439 	dev_info_t	*pdip;
440 	immu_devi_t	*immu_devi;
441 	uint16_t	sid;
442 	uchar_t		svt, sq;
443 
444 	if (!intrmap_enable_sid_verify) {
445 		return (0);
446 	}
447 
448 	if (!DDI_INTR_IS_MSI_OR_MSIX(type)) {
449 		/* for interrupt through I/O APIC */
450 		sid = immu_dmar_ioapic_sid(ioapic_index);
451 		svt = SVT_ALL_VERIFY;
452 		sq = SQ_VERIFY_ALL;
453 	} else {
454 		/* MSI/MSI-X interrupt */
455 		ASSERT(dip);
456 		pdip = intrmap_top_pcibridge(dip);
457 		ASSERT(pdip);
458 		immu_devi = DEVI(pdip)->devi_iommu;
459 		ASSERT(immu_devi);
460 		if (immu_devi->imd_pcib_type == IMMU_PCIB_PCIE_PCI) {
461 			/* device behind pcie to pci bridge */
462 			sid = (immu_devi->imd_bus << 8) | immu_devi->imd_sec;
463 			svt = SVT_BUS_VERIFY;
464 			sq = SQ_VERIFY_ALL;
465 		} else {
466 			/* pcie device or device behind pci to pci bridge */
467 			sid = (immu_devi->imd_bus << 8) |
468 			    immu_devi->imd_devfunc;
469 			svt = SVT_ALL_VERIFY;
470 			sq = SQ_VERIFY_ALL;
471 		}
472 	}
473 
474 	return (sid | (svt << 18) | (sq << 16));
475 }
476 
477 static void
478 intrmap_enable(immu_t *immu)
479 {
480 	intrmap_t *intrmap;
481 	uint64_t irta_reg;
482 
483 	intrmap = immu->immu_intrmap;
484 
485 	irta_reg = intrmap->intrmap_paddr | intrmap_irta_s;
486 	if (intrmap_apic_mode == LOCAL_X2APIC) {
487 		irta_reg |= (0x1 << 11);
488 	}
489 
490 	immu_regs_intrmap_enable(immu, irta_reg);
491 }
492 
493 /* ####################################################################### */
494 
495 /*
496  * immu_intr_handler()
497  * 	the fault event handler for a single immu unit
498  */
499 int
500 immu_intr_handler(immu_t *immu)
501 {
502 	uint32_t status;
503 	int index, fault_reg_offset;
504 	int max_fault_index;
505 	boolean_t found_fault;
506 	dev_info_t *idip;
507 
508 	mutex_enter(&(immu->immu_intr_lock));
509 	mutex_enter(&(immu->immu_regs_lock));
510 
511 	/* read the fault status */
512 	status = immu_regs_get32(immu, IMMU_REG_FAULT_STS);
513 
514 	idip = immu->immu_dip;
515 	ASSERT(idip);
516 
517 	/* check if we have a pending fault for this immu unit */
518 	if ((status & IMMU_FAULT_STS_PPF) == 0) {
519 		mutex_exit(&(immu->immu_regs_lock));
520 		mutex_exit(&(immu->immu_intr_lock));
521 		return (DDI_INTR_UNCLAIMED);
522 	}
523 
524 	/*
525 	 * handle all primary pending faults
526 	 */
527 	index = IMMU_FAULT_GET_INDEX(status);
528 	max_fault_index =  IMMU_CAP_GET_NFR(immu->immu_regs_cap) - 1;
529 	fault_reg_offset = IMMU_CAP_GET_FRO(immu->immu_regs_cap);
530 
531 	found_fault = B_FALSE;
532 	_NOTE(CONSTCOND)
533 	while (1) {
534 		uint64_t val;
535 		uint8_t fault_reason;
536 		uint8_t fault_type;
537 		uint16_t sid;
538 		uint64_t pg_addr;
539 		uint64_t idx;
540 
541 		/* read the higher 64bits */
542 		val = immu_regs_get64(immu, fault_reg_offset + index * 16 + 8);
543 
544 		/* check if this fault register has pending fault */
545 		if (!IMMU_FRR_GET_F(val)) {
546 			break;
547 		}
548 
549 		found_fault = B_TRUE;
550 
551 		/* get the fault reason, fault type and sid */
552 		fault_reason = IMMU_FRR_GET_FR(val);
553 		fault_type = IMMU_FRR_GET_FT(val);
554 		sid = IMMU_FRR_GET_SID(val);
555 
556 		/* read the first 64bits */
557 		val = immu_regs_get64(immu, fault_reg_offset + index * 16);
558 		pg_addr = val & IMMU_PAGEMASK;
559 		idx = val >> 48;
560 
561 		/* clear the fault */
562 		immu_regs_put32(immu, fault_reg_offset + index * 16 + 12,
563 		    (((uint32_t)1) << 31));
564 
565 		/* report the fault info */
566 		if (fault_reason < 0x20) {
567 			/* immu-remapping fault */
568 			ddi_err(DER_WARN, idip,
569 			    "generated a fault event when translating DMA %s\n"
570 			    "\t on address 0x%" PRIx64 " for PCI(%d, %d, %d), "
571 			    "the reason is:\n\t %s",
572 			    fault_type ? "read" : "write", pg_addr,
573 			    (sid >> 8) & 0xff, (sid >> 3) & 0x1f, sid & 0x7,
574 			    immu_dvma_faults[MIN(fault_reason,
575 			    DVMA_MAX_FAULTS)]);
576 		} else if (fault_reason < 0x27) {
577 			/* intr-remapping fault */
578 			ddi_err(DER_WARN, idip,
579 			    "generated a fault event when translating "
580 			    "interrupt request\n"
581 			    "\t on index 0x%" PRIx64 " for PCI(%d, %d, %d), "
582 			    "the reason is:\n\t %s",
583 			    idx,
584 			    (sid >> 8) & 0xff, (sid >> 3) & 0x1f, sid & 0x7,
585 			    immu_intrmap_faults[MIN((fault_reason - 0x20),
586 			    INTRMAP_MAX_FAULTS)]);
587 		} else {
588 			ddi_err(DER_WARN, idip, "Unknown fault reason: 0x%x",
589 			    fault_reason);
590 		}
591 
592 		index++;
593 		if (index > max_fault_index)
594 			index = 0;
595 	}
596 
597 	/* Clear the fault */
598 	if (!found_fault) {
599 		ddi_err(DER_MODE, idip,
600 		    "Fault register set but no fault present");
601 	}
602 	immu_regs_put32(immu, IMMU_REG_FAULT_STS, 1);
603 	mutex_exit(&(immu->immu_regs_lock));
604 	mutex_exit(&(immu->immu_intr_lock));
605 	return (DDI_INTR_CLAIMED);
606 }
607 /* ######################################################################### */
608 
609 /*
610  * Interrupt remap entry points
611  */
612 
613 /* initialize interrupt remapping */
614 static int
615 immu_intrmap_init(int apic_mode)
616 {
617 	immu_t *immu;
618 	int error = DDI_FAILURE;
619 
620 	if (immu_intrmap_enable == B_FALSE) {
621 		return (DDI_SUCCESS);
622 	}
623 
624 	intrmap_apic_mode = apic_mode;
625 
626 	immu = list_head(&immu_list);
627 	for (; immu; immu = list_next(&immu_list, immu)) {
628 		if ((immu->immu_intrmap_running == B_TRUE) &&
629 		    IMMU_ECAP_GET_IR(immu->immu_regs_excap)) {
630 			if (init_unit(immu) == DDI_SUCCESS) {
631 				error = DDI_SUCCESS;
632 			}
633 		}
634 	}
635 
636 	/*
637 	 * if all IOMMU units disable intr remapping,
638 	 * return FAILURE
639 	 */
640 	return (error);
641 }
642 
643 
644 
645 /* enable interrupt remapping */
646 static void
647 immu_intrmap_switchon(int suppress_brdcst_eoi)
648 {
649 	immu_t *immu;
650 
651 
652 	intrmap_suppress_brdcst_eoi = suppress_brdcst_eoi;
653 
654 	immu = list_head(&immu_list);
655 	for (; immu; immu = list_next(&immu_list, immu)) {
656 		if (immu->immu_intrmap_setup == B_TRUE) {
657 			intrmap_enable(immu);
658 		}
659 	}
660 }
661 
662 /* alloc remapping entry for the interrupt */
663 static void
664 immu_intrmap_alloc(void **intrmap_private_tbl, dev_info_t *dip,
665     uint16_t type, int count, uchar_t ioapic_index)
666 {
667 	immu_t	*immu;
668 	intrmap_t *intrmap;
669 	uint32_t		idx, i;
670 	uint32_t		sid_svt_sq;
671 	intrmap_private_t	*intrmap_private;
672 
673 	if (intrmap_private_tbl[0] == INTRMAP_DISABLE ||
674 	    intrmap_private_tbl[0] != NULL) {
675 		return;
676 	}
677 
678 	intrmap_private_tbl[0] =
679 	    kmem_zalloc(sizeof (intrmap_private_t), KM_SLEEP);
680 	intrmap_private = INTRMAP_PRIVATE(intrmap_private_tbl[0]);
681 
682 	immu = get_immu(dip, type, ioapic_index);
683 	if ((immu != NULL) && (immu->immu_intrmap_running == B_TRUE)) {
684 		intrmap_private->ir_immu = immu;
685 	} else {
686 		goto intrmap_disable;
687 	}
688 
689 	intrmap = immu->immu_intrmap;
690 
691 	if (count == 1) {
692 		idx = alloc_tbl_entry(intrmap);
693 	} else {
694 		idx = alloc_tbl_multi_entries(intrmap, count);
695 	}
696 
697 	if (idx == INTRMAP_IDX_FULL) {
698 		goto intrmap_disable;
699 	}
700 
701 	intrmap_private->ir_idx = idx;
702 
703 	sid_svt_sq = intrmap_private->ir_sid_svt_sq =
704 	    get_sid(dip, type, ioapic_index);
705 
706 	if (count == 1) {
707 		if (IMMU_CAP_GET_CM(immu->immu_regs_cap)) {
708 			immu_qinv_intr_one_cache(immu, idx);
709 		} else {
710 			immu_regs_wbf_flush(immu);
711 		}
712 		return;
713 	}
714 
715 	for (i = 1; i < count; i++) {
716 		intrmap_private_tbl[i] =
717 		    kmem_zalloc(sizeof (intrmap_private_t), KM_SLEEP);
718 
719 		INTRMAP_PRIVATE(intrmap_private_tbl[i])->ir_immu = immu;
720 		INTRMAP_PRIVATE(intrmap_private_tbl[i])->ir_sid_svt_sq =
721 		    sid_svt_sq;
722 		INTRMAP_PRIVATE(intrmap_private_tbl[i])->ir_idx = idx + i;
723 	}
724 
725 	if (IMMU_CAP_GET_CM(immu->immu_regs_cap)) {
726 		immu_qinv_intr_caches(immu, idx, count);
727 	} else {
728 		immu_regs_wbf_flush(immu);
729 	}
730 
731 	return;
732 
733 intrmap_disable:
734 	kmem_free(intrmap_private_tbl[0], sizeof (intrmap_private_t));
735 	intrmap_private_tbl[0] = INTRMAP_DISABLE;
736 }
737 
738 
739 /* remapping the interrupt */
740 static void
741 immu_intrmap_map(void *intrmap_private, void *intrmap_data, uint16_t type,
742     int count)
743 {
744 	immu_t	*immu;
745 	intrmap_t	*intrmap;
746 	ioapic_rdt_t	*irdt = (ioapic_rdt_t *)intrmap_data;
747 	msi_regs_t	*mregs = (msi_regs_t *)intrmap_data;
748 	intrmap_rte_t	irte;
749 	uint_t		idx, i;
750 	uint32_t	dst, sid_svt_sq;
751 	uchar_t		vector, dlm, tm, rh, dm;
752 
753 	if (intrmap_private == INTRMAP_DISABLE)
754 		return;
755 
756 	idx = INTRMAP_PRIVATE(intrmap_private)->ir_idx;
757 	immu = INTRMAP_PRIVATE(intrmap_private)->ir_immu;
758 	intrmap = immu->immu_intrmap;
759 	sid_svt_sq = INTRMAP_PRIVATE(intrmap_private)->ir_sid_svt_sq;
760 
761 	if (!DDI_INTR_IS_MSI_OR_MSIX(type)) {
762 		dm = RDT_DM(irdt->ir_lo);
763 		rh = 0;
764 		tm = RDT_TM(irdt->ir_lo);
765 		dlm = RDT_DLM(irdt->ir_lo);
766 		dst = irdt->ir_hi;
767 
768 		/*
769 		 * Mark the IRTE's TM as Edge to suppress broadcast EOI.
770 		 */
771 		if (intrmap_suppress_brdcst_eoi) {
772 			tm = TRIGGER_MODE_EDGE;
773 		}
774 
775 		vector = RDT_VECTOR(irdt->ir_lo);
776 	} else {
777 		dm = MSI_ADDR_DM_PHYSICAL;
778 		rh = MSI_ADDR_RH_FIXED;
779 		tm = TRIGGER_MODE_EDGE;
780 		dlm = 0;
781 		dst = mregs->mr_addr;
782 
783 		vector = mregs->mr_data & 0xff;
784 	}
785 
786 	if (intrmap_apic_mode == LOCAL_APIC)
787 		dst = (dst & 0xFF) << 8;
788 
789 	if (count == 1) {
790 		irte.lo = IRTE_LOW(dst, vector, dlm, tm, rh, dm, 0, 1);
791 		irte.hi = IRTE_HIGH(sid_svt_sq);
792 
793 		/* set interrupt remapping table entry */
794 		bcopy(&irte, intrmap->intrmap_vaddr +
795 		    idx * INTRMAP_RTE_SIZE,
796 		    INTRMAP_RTE_SIZE);
797 
798 		immu_qinv_intr_one_cache(immu, idx);
799 
800 	} else {
801 		for (i = 0; i < count; i++) {
802 			irte.lo = IRTE_LOW(dst, vector, dlm, tm, rh, dm, 0, 1);
803 			irte.hi = IRTE_HIGH(sid_svt_sq);
804 
805 			/* set interrupt remapping table entry */
806 			bcopy(&irte, intrmap->intrmap_vaddr +
807 			    idx * INTRMAP_RTE_SIZE,
808 			    INTRMAP_RTE_SIZE);
809 			vector++;
810 			idx++;
811 		}
812 
813 		immu_qinv_intr_caches(immu, idx, count);
814 	}
815 }
816 
817 /* free the remapping entry */
818 static void
819 immu_intrmap_free(void **intrmap_privatep)
820 {
821 	immu_t *immu;
822 	intrmap_t *intrmap;
823 	uint32_t idx;
824 
825 	if (*intrmap_privatep == INTRMAP_DISABLE || *intrmap_privatep == NULL) {
826 		*intrmap_privatep = NULL;
827 		return;
828 	}
829 
830 	immu = INTRMAP_PRIVATE(*intrmap_privatep)->ir_immu;
831 	intrmap = immu->immu_intrmap;
832 	idx = INTRMAP_PRIVATE(*intrmap_privatep)->ir_idx;
833 
834 	bzero(intrmap->intrmap_vaddr + idx * INTRMAP_RTE_SIZE,
835 	    INTRMAP_RTE_SIZE);
836 
837 	immu_qinv_intr_one_cache(immu, idx);
838 
839 	mutex_enter(&intrmap->intrmap_lock);
840 	bitset_del(&intrmap->intrmap_map, idx);
841 	if (intrmap->intrmap_free == INTRMAP_IDX_FULL) {
842 		intrmap->intrmap_free = idx;
843 	}
844 	mutex_exit(&intrmap->intrmap_lock);
845 
846 	kmem_free(*intrmap_privatep, sizeof (intrmap_private_t));
847 	*intrmap_privatep = NULL;
848 }
849 
850 /* record the ioapic rdt entry */
851 static void
852 immu_intrmap_rdt(void *intrmap_private, ioapic_rdt_t *irdt)
853 {
854 	uint32_t rdt_entry, tm, pol, idx, vector;
855 
856 	rdt_entry = irdt->ir_lo;
857 
858 	if (intrmap_private != INTRMAP_DISABLE && intrmap_private != NULL) {
859 		idx = INTRMAP_PRIVATE(intrmap_private)->ir_idx;
860 		tm = RDT_TM(rdt_entry);
861 		pol = RDT_POL(rdt_entry);
862 		vector = RDT_VECTOR(rdt_entry);
863 		irdt->ir_lo = (tm << INTRMAP_IOAPIC_TM_SHIFT) |
864 		    (pol << INTRMAP_IOAPIC_POL_SHIFT) |
865 		    ((idx >> 15) << INTRMAP_IOAPIC_IDX15_SHIFT) |
866 		    vector;
867 		irdt->ir_hi = (idx << INTRMAP_IOAPIC_IDX_SHIFT) |
868 		    (1 << INTRMAP_IOAPIC_FORMAT_SHIFT);
869 	} else {
870 		irdt->ir_hi <<= APIC_ID_BIT_OFFSET;
871 	}
872 }
873 
874 /* record the msi interrupt structure */
875 /*ARGSUSED*/
876 static void
877 immu_intrmap_msi(void *intrmap_private, msi_regs_t *mregs)
878 {
879 	uint_t	idx;
880 
881 	if (intrmap_private != INTRMAP_DISABLE && intrmap_private != NULL) {
882 		idx = INTRMAP_PRIVATE(intrmap_private)->ir_idx;
883 
884 		mregs->mr_data = 0;
885 		mregs->mr_addr = MSI_ADDR_HDR |
886 		    ((idx & 0x7fff) << INTRMAP_MSI_IDX_SHIFT) |
887 		    (1 << INTRMAP_MSI_FORMAT_SHIFT) |
888 		    (1 << INTRMAP_MSI_SHV_SHIFT) |
889 		    ((idx >> 15) << INTRMAP_MSI_IDX15_SHIFT);
890 	} else {
891 		mregs->mr_addr = MSI_ADDR_HDR |
892 		    (MSI_ADDR_RH_FIXED << MSI_ADDR_RH_SHIFT) |
893 		    (MSI_ADDR_DM_PHYSICAL << MSI_ADDR_DM_SHIFT) |
894 		    (mregs->mr_addr << MSI_ADDR_DEST_SHIFT);
895 		mregs->mr_data = (MSI_DATA_TM_EDGE << MSI_DATA_TM_SHIFT) |
896 		    mregs->mr_data;
897 	}
898 }
899 
900 /* ######################################################################### */
901 /*
902  * Functions exported by immu_intr.c
903  */
904 void
905 immu_intrmap_setup(list_t *listp)
906 {
907 	immu_t *immu;
908 
909 	/*
910 	 * Check if ACPI DMAR tables say that
911 	 * interrupt remapping is supported
912 	 */
913 	if (immu_dmar_intrmap_supported() == B_FALSE) {
914 		return;
915 	}
916 
917 	/*
918 	 * Check if interrupt remapping is disabled.
919 	 */
920 	if (immu_intrmap_enable == B_FALSE) {
921 		return;
922 	}
923 
924 	psm_vt_ops = &intrmap_ops;
925 
926 	immu = list_head(listp);
927 	for (; immu; immu = list_next(listp, immu)) {
928 		mutex_init(&(immu->immu_intrmap_lock), NULL,
929 		    MUTEX_DEFAULT, NULL);
930 		mutex_enter(&(immu->immu_intrmap_lock));
931 		immu->immu_intrmap_setup = B_TRUE;
932 		mutex_exit(&(immu->immu_intrmap_lock));
933 	}
934 }
935 
936 void
937 immu_intrmap_startup(immu_t *immu)
938 {
939 	/* do nothing */
940 	mutex_enter(&(immu->immu_intrmap_lock));
941 	if (immu->immu_intrmap_setup == B_TRUE) {
942 		immu->immu_intrmap_running = B_TRUE;
943 	}
944 	mutex_exit(&(immu->immu_intrmap_lock));
945 }
946 
947 /*
948  * Register a Intel IOMMU unit (i.e. DMAR unit's)
949  * interrupt handler
950  */
951 void
952 immu_intr_register(immu_t *immu)
953 {
954 	int irq, vect;
955 	char intr_handler_name[IMMU_MAXNAMELEN];
956 	uint32_t msi_data;
957 	uint32_t uaddr;
958 	uint32_t msi_addr;
959 	uint32_t localapic_id = 0;
960 
961 	if (psm_get_localapicid)
962 		localapic_id = psm_get_localapicid(0);
963 
964 	msi_addr = (MSI_ADDR_HDR |
965 	    ((localapic_id & 0xFF) << MSI_ADDR_DEST_SHIFT) |
966 	    (MSI_ADDR_RH_FIXED << MSI_ADDR_RH_SHIFT) |
967 	    (MSI_ADDR_DM_PHYSICAL << MSI_ADDR_DM_SHIFT));
968 
969 	if (intrmap_apic_mode == LOCAL_X2APIC) {
970 		uaddr = localapic_id & 0xFFFFFF00;
971 	} else {
972 		uaddr = 0;
973 	}
974 
975 	/* Dont need to hold immu_intr_lock since we are in boot */
976 	irq = vect = psm_get_ipivect(IMMU_INTR_IPL, -1);
977 	if (psm_xlate_vector_by_irq != NULL)
978 		vect = psm_xlate_vector_by_irq(irq);
979 
980 	msi_data = ((MSI_DATA_DELIVERY_FIXED <<
981 	    MSI_DATA_DELIVERY_SHIFT) | vect);
982 
983 	(void) snprintf(intr_handler_name, sizeof (intr_handler_name),
984 	    "%s-intr-handler", immu->immu_name);
985 
986 	(void) add_avintr((void *)NULL, IMMU_INTR_IPL,
987 	    (avfunc)(immu_intr_handler), intr_handler_name, irq,
988 	    (caddr_t)immu, NULL, NULL, NULL);
989 
990 	immu_regs_intr_enable(immu, msi_addr, msi_data, uaddr);
991 
992 	(void) immu_intr_handler(immu);
993 }
994