xref: /titanic_41/usr/src/uts/i86pc/io/immu_intrmap.c (revision c39526b769298791ff5b0b6c5e761f49aabaeb4e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Portions Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2009, Intel Corporation.
29  * All rights reserved.
30  */
31 
32 
33 #include <sys/apic.h>
34 #include <vm/hat_i86.h>
35 #include <sys/sysmacros.h>
36 #include <sys/smp_impldefs.h>
37 #include <sys/immu.h>
38 
39 
40 typedef struct intrmap_private {
41 	immu_t		*ir_immu;
42 	uint16_t	ir_idx;
43 	uint32_t	ir_sid_svt_sq;
44 } intrmap_private_t;
45 
46 #define	INTRMAP_PRIVATE(airq) ((intrmap_private_t *)airq->airq_intrmap_private)
47 #define	AIRQ_PRIVATE(airq) (airq->airq_intrmap_private)
48 
49 /* interrupt remapping table entry */
50 typedef struct intrmap_rte {
51 	uint64_t	lo;
52 	uint64_t	hi;
53 } intrmap_rte_t;
54 
55 #define	IRTE_HIGH(sid_svt_sq) (sid_svt_sq)
56 #define	IRTE_LOW(dst, vector, dlm, tm, rh, dm, fpd, p)	\
57 	    (((uint64_t)(dst) << 32) |  \
58 	    ((uint64_t)(vector) << 16) | \
59 	    ((uint64_t)(dlm) << 5) | \
60 	    ((uint64_t)(tm) << 4) | \
61 	    ((uint64_t)(rh) << 3) | \
62 	    ((uint64_t)(dm) << 2) | \
63 	    ((uint64_t)(fpd) << 1) | \
64 	    (p))
65 
66 typedef enum {
67 	SVT_NO_VERIFY = 0, 	/* no verification */
68 	SVT_ALL_VERIFY,		/* using sid and sq to verify */
69 	SVT_BUS_VERIFY,		/* verify #startbus and #endbus */
70 	SVT_RSVD
71 } intrmap_svt_t;
72 
73 typedef enum {
74 	SQ_VERIFY_ALL = 0,	/* verify all 16 bits */
75 	SQ_VERIFY_IGR_1,	/* ignore bit 3 */
76 	SQ_VERIFY_IGR_2,	/* ignore bit 2-3 */
77 	SQ_VERIFY_IGR_3		/* ignore bit 1-3 */
78 } intrmap_sq_t;
79 
80 /*
81  * S field of the Interrupt Remapping Table Address Register
82  * the size of the interrupt remapping table is 1 << (immu_intrmap_irta_s + 1)
83  */
84 static uint_t intrmap_irta_s = INTRMAP_MAX_IRTA_SIZE;
85 
86 /*
87  * If true, arrange to suppress broadcast EOI by setting edge-triggered mode
88  * even for level-triggered interrupts in the interrupt-remapping engine.
89  * If false, broadcast EOI can still be suppressed if the CPU supports the
90  * APIC_SVR_SUPPRESS_BROADCAST_EOI bit.  In both cases, the IOAPIC is still
91  * programmed with the correct trigger mode, and pcplusmp must send an EOI
92  * to the IOAPIC by writing to the IOAPIC's EOI register to make up for the
93  * missing broadcast EOI.
94  */
95 static int intrmap_suppress_brdcst_eoi = 0;
96 
97 /*
98  * whether verify the source id of interrupt request
99  */
100 static int intrmap_enable_sid_verify = 0;
101 
102 /* fault types for DVMA remapping */
103 static char *immu_dvma_faults[] = {
104 	"Reserved",
105 	"The present field in root-entry is Clear",
106 	"The present field in context-entry is Clear",
107 	"Hardware detected invalid programming of a context-entry",
108 	"The DMA request attempted to access an address beyond max support",
109 	"The Write field in a page-table entry is Clear when DMA write",
110 	"The Read field in a page-table entry is Clear when DMA read",
111 	"Access the next level page table resulted in error",
112 	"Access the root-entry table resulted in error",
113 	"Access the context-entry table resulted in error",
114 	"Reserved field not initialized to zero in a present root-entry",
115 	"Reserved field not initialized to zero in a present context-entry",
116 	"Reserved field not initialized to zero in a present page-table entry",
117 	"DMA blocked due to the Translation Type field in context-entry",
118 	"Incorrect fault event reason number",
119 };
120 #define	DVMA_MAX_FAULTS (sizeof (immu_dvma_faults)/(sizeof (char *))) - 1
121 
122 /* fault types for interrupt remapping */
123 static char *immu_intrmap_faults[] = {
124 	"reserved field set in IRTE",
125 	"interrupt_index exceed the intr-remap table size",
126 	"present field in IRTE is clear",
127 	"hardware access intr-remap table address resulted in error",
128 	"reserved field set in IRTE, include various conditional",
129 	"hardware blocked an interrupt request in Compatibility format",
130 	"remappable interrupt request blocked due to verification failure"
131 };
132 #define	INTRMAP_MAX_FAULTS \
133 	(sizeof (immu_intrmap_faults) / (sizeof (char *))) - 1
134 
135 /* Function prototypes */
136 static int immu_intrmap_init(int apic_mode);
137 static void immu_intrmap_switchon(int suppress_brdcst_eoi);
138 static void immu_intrmap_alloc(apic_irq_t *irq_ptr);
139 static void immu_intrmap_map(apic_irq_t *irq_ptr, void *intrmap_data);
140 static void immu_intrmap_free(apic_irq_t *irq_ptr);
141 static void immu_intrmap_rdt(apic_irq_t *irq_ptr, ioapic_rdt_t *irdt);
142 static void immu_intrmap_msi(apic_irq_t *irq_ptr, msi_regs_t *mregs);
143 
144 static struct apic_intrmap_ops intrmap_ops = {
145 	immu_intrmap_init,
146 	immu_intrmap_switchon,
147 	immu_intrmap_alloc,
148 	immu_intrmap_map,
149 	immu_intrmap_free,
150 	immu_intrmap_rdt,
151 	immu_intrmap_msi,
152 };
153 
154 /* apic mode, APIC/X2APIC */
155 static int intrmap_apic_mode = LOCAL_APIC;
156 
157 
158 /*
159  * helper functions
160  */
161 static uint_t
162 bitset_find_free(bitset_t *b, uint_t post)
163 {
164 	uint_t	i;
165 	uint_t	cap = bitset_capacity(b);
166 
167 	if (post == cap)
168 		post = 0;
169 
170 	ASSERT(post < cap);
171 
172 	for (i = post; i < cap; i++) {
173 		if (!bitset_in_set(b, i))
174 			return (i);
175 	}
176 
177 	for (i = 0; i < post; i++) {
178 		if (!bitset_in_set(b, i))
179 			return (i);
180 	}
181 
182 	return (INTRMAP_IDX_FULL);	/* no free index */
183 }
184 
185 /*
186  * helper function to find 'count' contigous free
187  * interrupt remapping table entries
188  */
189 static uint_t
190 bitset_find_multi_free(bitset_t *b, uint_t post, uint_t count)
191 {
192 	uint_t  i, j;
193 	uint_t	cap = bitset_capacity(b);
194 
195 	if (post == INTRMAP_IDX_FULL) {
196 		return (INTRMAP_IDX_FULL);
197 	}
198 
199 	if (count > cap)
200 		return (INTRMAP_IDX_FULL);
201 
202 	ASSERT(post < cap);
203 
204 	for (i = post; (i + count) <= cap; i++) {
205 		for (j = 0; j < count; j++) {
206 			if (bitset_in_set(b, (i + j))) {
207 				i = i + j;
208 				break;
209 			}
210 			if (j == count - 1)
211 				return (i);
212 		}
213 	}
214 
215 	for (i = 0; (i < post) && ((i + count) <= cap); i++) {
216 		for (j = 0; j < count; j++) {
217 			if (bitset_in_set(b, (i + j))) {
218 				i = i + j;
219 				break;
220 			}
221 			if (j == count - 1)
222 				return (i);
223 		}
224 	}
225 
226 	return (INTRMAP_IDX_FULL);  		/* no free index */
227 }
228 
229 /* alloc one interrupt remapping table entry */
230 static int
231 alloc_tbl_entry(intrmap_t *intrmap)
232 {
233 	uint32_t idx;
234 
235 	for (;;) {
236 		mutex_enter(&intrmap->intrmap_lock);
237 		idx = intrmap->intrmap_free;
238 		if (idx != INTRMAP_IDX_FULL) {
239 			bitset_add(&intrmap->intrmap_map, idx);
240 			intrmap->intrmap_free =
241 			    bitset_find_free(&intrmap->intrmap_map, idx + 1);
242 			mutex_exit(&intrmap->intrmap_lock);
243 			break;
244 		}
245 
246 		/* no free intr entry, use compatible format intr */
247 		mutex_exit(&intrmap->intrmap_lock);
248 
249 		if (intrmap_apic_mode != LOCAL_X2APIC) {
250 			break;
251 		}
252 
253 		/*
254 		 * x2apic mode not allowed compatible
255 		 * interrupt
256 		 */
257 		delay(IMMU_ALLOC_RESOURCE_DELAY);
258 	}
259 
260 	return (idx);
261 }
262 
263 /* alloc 'cnt' contigous interrupt remapping table entries */
264 static int
265 alloc_tbl_multi_entries(intrmap_t *intrmap, uint_t cnt)
266 {
267 	uint_t idx, pos, i;
268 
269 	for (; ; ) {
270 		mutex_enter(&intrmap->intrmap_lock);
271 		pos = intrmap->intrmap_free;
272 		idx = bitset_find_multi_free(&intrmap->intrmap_map, pos, cnt);
273 
274 		if (idx != INTRMAP_IDX_FULL) {
275 			if (idx <= pos && pos < (idx + cnt)) {
276 				intrmap->intrmap_free = bitset_find_free(
277 				    &intrmap->intrmap_map, idx + cnt);
278 			}
279 			for (i = 0; i < cnt; i++) {
280 				bitset_add(&intrmap->intrmap_map, idx + i);
281 			}
282 			mutex_exit(&intrmap->intrmap_lock);
283 		}
284 
285 		mutex_exit(&intrmap->intrmap_lock);
286 
287 		if (intrmap_apic_mode != LOCAL_X2APIC) {
288 			break;
289 		}
290 
291 		/* x2apic mode not allowed comapitible interrupt */
292 		delay(IMMU_ALLOC_RESOURCE_DELAY);
293 	}
294 
295 	return (idx);
296 }
297 
298 /* init interrupt remapping table */
299 static int
300 init_unit(immu_t *immu)
301 {
302 	intrmap_t *intrmap;
303 	size_t size;
304 
305 	ddi_dma_attr_t intrmap_dma_attr = {
306 		DMA_ATTR_V0,
307 		0U,
308 		0xffffffffU,
309 		0xffffffffU,
310 		MMU_PAGESIZE,	/* page aligned */
311 		0x1,
312 		0x1,
313 		0xffffffffU,
314 		0xffffffffU,
315 		1,
316 		4,
317 		0
318 	};
319 
320 	ddi_device_acc_attr_t intrmap_acc_attr = {
321 		DDI_DEVICE_ATTR_V0,
322 		DDI_NEVERSWAP_ACC,
323 		DDI_STRICTORDER_ACC
324 	};
325 
326 	if (intrmap_apic_mode == LOCAL_X2APIC) {
327 		if (!IMMU_ECAP_GET_EIM(immu->immu_regs_excap)) {
328 			return (DDI_FAILURE);
329 		}
330 	}
331 
332 	if (intrmap_irta_s > INTRMAP_MAX_IRTA_SIZE) {
333 		intrmap_irta_s = INTRMAP_MAX_IRTA_SIZE;
334 	}
335 
336 	intrmap =  kmem_zalloc(sizeof (intrmap_t), KM_SLEEP);
337 
338 	if (ddi_dma_alloc_handle(immu->immu_dip,
339 	    &intrmap_dma_attr,
340 	    DDI_DMA_SLEEP,
341 	    NULL,
342 	    &(intrmap->intrmap_dma_hdl)) != DDI_SUCCESS) {
343 		kmem_free(intrmap, sizeof (intrmap_t));
344 		return (DDI_FAILURE);
345 	}
346 
347 	intrmap->intrmap_size = 1 << (intrmap_irta_s + 1);
348 	size = intrmap->intrmap_size * INTRMAP_RTE_SIZE;
349 	if (ddi_dma_mem_alloc(intrmap->intrmap_dma_hdl,
350 	    size,
351 	    &intrmap_acc_attr,
352 	    DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
353 	    DDI_DMA_SLEEP,
354 	    NULL,
355 	    &(intrmap->intrmap_vaddr),
356 	    &size,
357 	    &(intrmap->intrmap_acc_hdl)) != DDI_SUCCESS) {
358 		ddi_dma_free_handle(&(intrmap->intrmap_dma_hdl));
359 		kmem_free(intrmap, sizeof (intrmap_t));
360 		return (DDI_FAILURE);
361 	}
362 
363 	ASSERT(!((uintptr_t)intrmap->intrmap_vaddr & MMU_PAGEOFFSET));
364 	bzero(intrmap->intrmap_vaddr, size);
365 	intrmap->intrmap_paddr = pfn_to_pa(
366 	    hat_getpfnum(kas.a_hat, intrmap->intrmap_vaddr));
367 
368 	mutex_init(&(intrmap->intrmap_lock), NULL, MUTEX_DRIVER, NULL);
369 	bitset_init(&intrmap->intrmap_map);
370 	bitset_resize(&intrmap->intrmap_map, intrmap->intrmap_size);
371 	intrmap->intrmap_free = 0;
372 
373 	immu->immu_intrmap = intrmap;
374 
375 	return (DDI_SUCCESS);
376 }
377 
378 static void
379 get_immu(apic_irq_t *irq_ptr)
380 {
381 	immu_t	*immu = NULL;
382 
383 	ASSERT(INTRMAP_PRIVATE(irq_ptr)->ir_immu == NULL);
384 
385 	if (!APIC_IS_MSI_OR_MSIX_INDEX(irq_ptr->airq_mps_intr_index)) {
386 		immu = immu_dmar_ioapic_immu(irq_ptr->airq_ioapicindex);
387 	} else {
388 		if (irq_ptr->airq_dip != NULL) {
389 			immu = immu_dmar_get_immu(irq_ptr->airq_dip);
390 		}
391 	}
392 
393 	if (immu && (immu->immu_intrmap_running == B_TRUE)) {
394 		INTRMAP_PRIVATE(irq_ptr)->ir_immu = immu;
395 	}
396 }
397 
398 static int
399 get_top_pcibridge(dev_info_t *dip, void *arg)
400 {
401 	dev_info_t **topdipp = arg;
402 	immu_devi_t *immu_devi;
403 
404 	mutex_enter(&(DEVI(dip)->devi_lock));
405 	immu_devi = DEVI(dip)->devi_iommu;
406 	mutex_exit(&(DEVI(dip)->devi_lock));
407 
408 	if (immu_devi == NULL || immu_devi->imd_pcib_type == IMMU_PCIB_BAD ||
409 	    immu_devi->imd_pcib_type == IMMU_PCIB_ENDPOINT) {
410 		return (DDI_WALK_CONTINUE);
411 	}
412 
413 	*topdipp = dip;
414 
415 	return (DDI_WALK_CONTINUE);
416 }
417 
418 static dev_info_t *
419 intrmap_top_pcibridge(dev_info_t *rdip)
420 {
421 	dev_info_t *top_pcibridge = NULL;
422 
423 	if (immu_walk_ancestor(rdip, NULL, get_top_pcibridge,
424 	    &top_pcibridge, NULL, 0) != DDI_SUCCESS) {
425 		return (NULL);
426 	}
427 
428 	return (top_pcibridge);
429 }
430 
431 /* function to get interrupt request source id */
432 static void
433 get_sid(apic_irq_t *irq_ptr)
434 {
435 	dev_info_t	*dip, *pdip;
436 	immu_devi_t	*immu_devi;
437 	uint16_t	sid;
438 	uchar_t		svt, sq;
439 
440 	if (!intrmap_enable_sid_verify) {
441 		return;
442 	}
443 
444 	if (!APIC_IS_MSI_OR_MSIX_INDEX(irq_ptr->airq_mps_intr_index)) {
445 		/* for interrupt through I/O APIC */
446 		sid = immu_dmar_ioapic_sid(irq_ptr->airq_ioapicindex);
447 		svt = SVT_ALL_VERIFY;
448 		sq = SQ_VERIFY_ALL;
449 	} else {
450 		/* MSI/MSI-X interrupt */
451 		dip = irq_ptr->airq_dip;
452 		ASSERT(dip);
453 		pdip = intrmap_top_pcibridge(dip);
454 		ASSERT(pdip);
455 		immu_devi = DEVI(pdip)->devi_iommu;
456 		ASSERT(immu_devi);
457 		if (immu_devi->imd_pcib_type == IMMU_PCIB_PCIE_PCI) {
458 			/* device behind pcie to pci bridge */
459 			sid = (immu_devi->imd_bus << 8) | immu_devi->imd_sec;
460 			svt = SVT_BUS_VERIFY;
461 			sq = SQ_VERIFY_ALL;
462 		} else {
463 			/* pcie device or device behind pci to pci bridge */
464 			sid = (immu_devi->imd_bus << 8) |
465 			    immu_devi->imd_devfunc;
466 			svt = SVT_ALL_VERIFY;
467 			sq = SQ_VERIFY_ALL;
468 		}
469 	}
470 
471 	INTRMAP_PRIVATE(irq_ptr)->ir_sid_svt_sq =
472 	    sid | (svt << 18) | (sq << 16);
473 }
474 
475 static void
476 intrmap_enable(immu_t *immu)
477 {
478 	intrmap_t *intrmap;
479 	uint64_t irta_reg;
480 
481 	intrmap = immu->immu_intrmap;
482 
483 	irta_reg = intrmap->intrmap_paddr | intrmap_irta_s;
484 	if (intrmap_apic_mode == LOCAL_X2APIC) {
485 		irta_reg |= (0x1 << 11);
486 	}
487 
488 	immu_regs_intrmap_enable(immu, irta_reg);
489 }
490 
491 /* ####################################################################### */
492 
493 /*
494  * immu_intr_handler()
495  * 	the fault event handler for a single immu unit
496  */
497 int
498 immu_intr_handler(immu_t *immu)
499 {
500 	uint32_t status;
501 	int index, fault_reg_offset;
502 	int max_fault_index;
503 	boolean_t found_fault;
504 	dev_info_t *idip;
505 
506 	mutex_enter(&(immu->immu_intr_lock));
507 	mutex_enter(&(immu->immu_regs_lock));
508 
509 	/* read the fault status */
510 	status = immu_regs_get32(immu, IMMU_REG_FAULT_STS);
511 
512 	idip = immu->immu_dip;
513 	ASSERT(idip);
514 
515 	/* check if we have a pending fault for this immu unit */
516 	if ((status & IMMU_FAULT_STS_PPF) == 0) {
517 		mutex_exit(&(immu->immu_regs_lock));
518 		mutex_exit(&(immu->immu_intr_lock));
519 		return (DDI_INTR_UNCLAIMED);
520 	}
521 
522 	/*
523 	 * handle all primary pending faults
524 	 */
525 	index = IMMU_FAULT_GET_INDEX(status);
526 	max_fault_index =  IMMU_CAP_GET_NFR(immu->immu_regs_cap) - 1;
527 	fault_reg_offset = IMMU_CAP_GET_FRO(immu->immu_regs_cap);
528 
529 	found_fault = B_FALSE;
530 	_NOTE(CONSTCOND)
531 	while (1) {
532 		uint64_t val;
533 		uint8_t fault_reason;
534 		uint8_t fault_type;
535 		uint16_t sid;
536 		uint64_t pg_addr;
537 		uint64_t idx;
538 
539 		/* read the higher 64bits */
540 		val = immu_regs_get64(immu, fault_reg_offset + index * 16 + 8);
541 
542 		/* check if this fault register has pending fault */
543 		if (!IMMU_FRR_GET_F(val)) {
544 			break;
545 		}
546 
547 		found_fault = B_TRUE;
548 
549 		/* get the fault reason, fault type and sid */
550 		fault_reason = IMMU_FRR_GET_FR(val);
551 		fault_type = IMMU_FRR_GET_FT(val);
552 		sid = IMMU_FRR_GET_SID(val);
553 
554 		/* read the first 64bits */
555 		val = immu_regs_get64(immu, fault_reg_offset + index * 16);
556 		pg_addr = val & IMMU_PAGEMASK;
557 		idx = val >> 48;
558 
559 		/* clear the fault */
560 		immu_regs_put32(immu, fault_reg_offset + index * 16 + 12,
561 		    (((uint32_t)1) << 31));
562 
563 		/* report the fault info */
564 		if (fault_reason < 0x20) {
565 			/* immu-remapping fault */
566 			ddi_err(DER_WARN, idip,
567 			    "generated a fault event when translating DMA %s\n"
568 			    "\t on address 0x%" PRIx64 " for PCI(%d, %d, %d), "
569 			    "the reason is:\n\t %s",
570 			    fault_type ? "read" : "write", pg_addr,
571 			    (sid >> 8) & 0xff, (sid >> 3) & 0x1f, sid & 0x7,
572 			    immu_dvma_faults[MIN(fault_reason,
573 			    DVMA_MAX_FAULTS)]);
574 		} else if (fault_reason < 0x27) {
575 			/* intr-remapping fault */
576 			ddi_err(DER_WARN, idip,
577 			    "generated a fault event when translating "
578 			    "interrupt request\n"
579 			    "\t on index 0x%" PRIx64 " for PCI(%d, %d, %d), "
580 			    "the reason is:\n\t %s",
581 			    idx,
582 			    (sid >> 8) & 0xff, (sid >> 3) & 0x1f, sid & 0x7,
583 			    immu_intrmap_faults[MIN((fault_reason - 0x20),
584 			    INTRMAP_MAX_FAULTS)]);
585 		} else {
586 			ddi_err(DER_WARN, idip, "Unknown fault reason: 0x%x",
587 			    fault_reason);
588 		}
589 
590 		index++;
591 		if (index > max_fault_index)
592 			index = 0;
593 	}
594 
595 	/* Clear the fault */
596 	if (!found_fault) {
597 		ddi_err(DER_MODE, idip,
598 		    "Fault register set but no fault present");
599 	}
600 	immu_regs_put32(immu, IMMU_REG_FAULT_STS, 1);
601 	mutex_exit(&(immu->immu_regs_lock));
602 	mutex_exit(&(immu->immu_intr_lock));
603 	return (DDI_INTR_CLAIMED);
604 }
605 /* ######################################################################### */
606 
607 /*
608  * Interrupt remap entry points
609  */
610 
611 /* initialize interrupt remapping */
612 static int
613 immu_intrmap_init(int apic_mode)
614 {
615 	immu_t *immu;
616 	int error = DDI_FAILURE;
617 
618 	if (immu_intrmap_enable == B_FALSE) {
619 		return (DDI_SUCCESS);
620 	}
621 
622 	intrmap_apic_mode = apic_mode;
623 
624 	immu = list_head(&immu_list);
625 	for (; immu; immu = list_next(&immu_list, immu)) {
626 		if ((immu->immu_intrmap_running == B_TRUE) &&
627 		    IMMU_ECAP_GET_IR(immu->immu_regs_excap)) {
628 			if (init_unit(immu) == DDI_SUCCESS) {
629 				error = DDI_SUCCESS;
630 			}
631 		}
632 	}
633 
634 	/*
635 	 * if all IOMMU units disable intr remapping,
636 	 * return FAILURE
637 	 */
638 	return (error);
639 }
640 
641 
642 
643 /* enable interrupt remapping */
644 static void
645 immu_intrmap_switchon(int suppress_brdcst_eoi)
646 {
647 	immu_t *immu;
648 
649 
650 	intrmap_suppress_brdcst_eoi = suppress_brdcst_eoi;
651 
652 	immu = list_head(&immu_list);
653 	for (; immu; immu = list_next(&immu_list, immu)) {
654 		if (immu->immu_intrmap_setup == B_TRUE) {
655 			intrmap_enable(immu);
656 		}
657 	}
658 }
659 
660 /* alloc remapping entry for the interrupt */
661 static void
662 immu_intrmap_alloc(apic_irq_t *irq_ptr)
663 {
664 	immu_t	*immu;
665 	intrmap_t *intrmap;
666 	uint32_t		idx, cnt, i;
667 	uint_t			vector, irqno;
668 	uint32_t		sid_svt_sq;
669 
670 	if (AIRQ_PRIVATE(irq_ptr) == INTRMAP_DISABLE ||
671 	    AIRQ_PRIVATE(irq_ptr) != NULL) {
672 		return;
673 	}
674 
675 	AIRQ_PRIVATE(irq_ptr) =
676 	    kmem_zalloc(sizeof (intrmap_private_t), KM_SLEEP);
677 
678 	get_immu(irq_ptr);
679 
680 	immu = INTRMAP_PRIVATE(irq_ptr)->ir_immu;
681 	if (immu == NULL) {
682 		goto intrmap_disable;
683 	}
684 
685 	intrmap = immu->immu_intrmap;
686 
687 	if (irq_ptr->airq_mps_intr_index == MSI_INDEX) {
688 		cnt = irq_ptr->airq_intin_no;
689 	} else {
690 		cnt = 1;
691 	}
692 
693 	if (cnt == 1) {
694 		idx = alloc_tbl_entry(intrmap);
695 	} else {
696 		idx = alloc_tbl_multi_entries(intrmap, cnt);
697 	}
698 
699 	if (idx == INTRMAP_IDX_FULL) {
700 		goto intrmap_disable;
701 	}
702 
703 	INTRMAP_PRIVATE(irq_ptr)->ir_idx = idx;
704 
705 	get_sid(irq_ptr);
706 
707 	if (cnt == 1) {
708 		if (IMMU_CAP_GET_CM(immu->immu_regs_cap)) {
709 			immu_qinv_intr_one_cache(immu, idx);
710 		} else {
711 			immu_regs_wbf_flush(immu);
712 		}
713 		return;
714 	}
715 
716 	sid_svt_sq = INTRMAP_PRIVATE(irq_ptr)->ir_sid_svt_sq;
717 
718 	vector = irq_ptr->airq_vector;
719 
720 	for (i = 1; i < cnt; i++) {
721 		irqno = apic_vector_to_irq[vector + i];
722 		irq_ptr = apic_irq_table[irqno];
723 
724 		ASSERT(irq_ptr);
725 
726 		AIRQ_PRIVATE(irq_ptr) =
727 		    kmem_zalloc(sizeof (intrmap_private_t), KM_SLEEP);
728 
729 		INTRMAP_PRIVATE(irq_ptr)->ir_immu = immu;
730 		INTRMAP_PRIVATE(irq_ptr)->ir_sid_svt_sq = sid_svt_sq;
731 		INTRMAP_PRIVATE(irq_ptr)->ir_idx = idx + i;
732 	}
733 
734 	if (IMMU_CAP_GET_CM(immu->immu_regs_cap)) {
735 		immu_qinv_intr_caches(immu, idx, cnt);
736 	} else {
737 		immu_regs_wbf_flush(immu);
738 	}
739 
740 	return;
741 
742 intrmap_disable:
743 	kmem_free(AIRQ_PRIVATE(irq_ptr), sizeof (intrmap_private_t));
744 	AIRQ_PRIVATE(irq_ptr) = INTRMAP_DISABLE;
745 }
746 
747 
748 /* remapping the interrupt */
749 static void
750 immu_intrmap_map(apic_irq_t *irq_ptr, void *intrmap_data)
751 {
752 	immu_t	*immu;
753 	intrmap_t	*intrmap;
754 	ioapic_rdt_t	*irdt = (ioapic_rdt_t *)intrmap_data;
755 	msi_regs_t	*mregs = (msi_regs_t *)intrmap_data;
756 	intrmap_rte_t	irte;
757 	uint_t		idx, i, cnt;
758 	uint32_t	dst, sid_svt_sq;
759 	uchar_t		vector, dlm, tm, rh, dm;
760 
761 	if (AIRQ_PRIVATE(irq_ptr) == INTRMAP_DISABLE) {
762 		return;
763 	}
764 
765 	if (irq_ptr->airq_mps_intr_index == MSI_INDEX) {
766 		cnt = irq_ptr->airq_intin_no;
767 	} else {
768 		cnt = 1;
769 	}
770 
771 	idx = INTRMAP_PRIVATE(irq_ptr)->ir_idx;
772 	immu = INTRMAP_PRIVATE(irq_ptr)->ir_immu;
773 	intrmap = immu->immu_intrmap;
774 	sid_svt_sq = INTRMAP_PRIVATE(irq_ptr)->ir_sid_svt_sq;
775 	vector = irq_ptr->airq_vector;
776 
777 	if (!APIC_IS_MSI_OR_MSIX_INDEX(irq_ptr->airq_mps_intr_index)) {
778 		dm = RDT_DM(irdt->ir_lo);
779 		rh = 0;
780 		tm = RDT_TM(irdt->ir_lo);
781 		dlm = RDT_DLM(irdt->ir_lo);
782 		dst = irdt->ir_hi;
783 
784 		/*
785 		 * Mark the IRTE's TM as Edge to suppress broadcast EOI.
786 		 */
787 		if (intrmap_suppress_brdcst_eoi) {
788 			tm = TRIGGER_MODE_EDGE;
789 		}
790 	} else {
791 		dm = MSI_ADDR_DM_PHYSICAL;
792 		rh = MSI_ADDR_RH_FIXED;
793 		tm = TRIGGER_MODE_EDGE;
794 		dlm = 0;
795 		dst = mregs->mr_addr;
796 	}
797 
798 	if (intrmap_apic_mode == LOCAL_APIC)
799 		dst = (dst & 0xFF) << 8;
800 
801 	if (cnt == 1) {
802 		irte.lo = IRTE_LOW(dst, vector, dlm, tm, rh, dm, 0, 1);
803 		irte.hi = IRTE_HIGH(sid_svt_sq);
804 
805 		/* set interrupt remapping table entry */
806 		bcopy(&irte, intrmap->intrmap_vaddr +
807 		    idx * INTRMAP_RTE_SIZE,
808 		    INTRMAP_RTE_SIZE);
809 
810 		immu_qinv_intr_one_cache(immu, idx);
811 
812 	} else {
813 		vector = irq_ptr->airq_vector;
814 		for (i = 0; i < cnt; i++) {
815 			irte.lo = IRTE_LOW(dst, vector, dlm, tm, rh, dm, 0, 1);
816 			irte.hi = IRTE_HIGH(sid_svt_sq);
817 
818 			/* set interrupt remapping table entry */
819 			bcopy(&irte, intrmap->intrmap_vaddr +
820 			    idx * INTRMAP_RTE_SIZE,
821 			    INTRMAP_RTE_SIZE);
822 			vector++;
823 			idx++;
824 		}
825 
826 		immu_qinv_intr_caches(immu, idx, cnt);
827 	}
828 }
829 
830 /* free the remapping entry */
831 static void
832 immu_intrmap_free(apic_irq_t *irq_ptr)
833 {
834 	immu_t *immu;
835 	intrmap_t *intrmap;
836 	uint32_t idx;
837 
838 	if (AIRQ_PRIVATE(irq_ptr) == INTRMAP_DISABLE) {
839 		AIRQ_PRIVATE(irq_ptr) = NULL;
840 		return;
841 	}
842 
843 	immu = INTRMAP_PRIVATE(irq_ptr)->ir_immu;
844 	intrmap = immu->immu_intrmap;
845 	idx = INTRMAP_PRIVATE(irq_ptr)->ir_idx;
846 
847 	bzero(intrmap->intrmap_vaddr + idx * INTRMAP_RTE_SIZE,
848 	    INTRMAP_RTE_SIZE);
849 
850 	immu_qinv_intr_one_cache(immu, idx);
851 
852 	mutex_enter(&intrmap->intrmap_lock);
853 	bitset_del(&intrmap->intrmap_map, idx);
854 	if (intrmap->intrmap_free == INTRMAP_IDX_FULL) {
855 		intrmap->intrmap_free = idx;
856 	}
857 	mutex_exit(&intrmap->intrmap_lock);
858 
859 	kmem_free(AIRQ_PRIVATE(irq_ptr), sizeof (intrmap_private_t));
860 	AIRQ_PRIVATE(irq_ptr) = NULL;
861 }
862 
863 /* record the ioapic rdt entry */
864 static void
865 immu_intrmap_rdt(apic_irq_t *irq_ptr, ioapic_rdt_t *irdt)
866 {
867 	uint32_t rdt_entry, tm, pol, idx, vector;
868 
869 	rdt_entry = irdt->ir_lo;
870 
871 	if (INTRMAP_PRIVATE(irq_ptr) != NULL) {
872 		idx = INTRMAP_PRIVATE(irq_ptr)->ir_idx;
873 		tm = RDT_TM(rdt_entry);
874 		pol = RDT_POL(rdt_entry);
875 		vector = irq_ptr->airq_vector;
876 		irdt->ir_lo = (tm << INTRMAP_IOAPIC_TM_SHIFT) |
877 		    (pol << INTRMAP_IOAPIC_POL_SHIFT) |
878 		    ((idx >> 15) << INTRMAP_IOAPIC_IDX15_SHIFT) |
879 		    vector;
880 		irdt->ir_hi = (idx << INTRMAP_IOAPIC_IDX_SHIFT) |
881 		    (1 << INTRMAP_IOAPIC_FORMAT_SHIFT);
882 	} else {
883 		irdt->ir_hi <<= APIC_ID_BIT_OFFSET;
884 	}
885 }
886 
887 /* record the msi interrupt structure */
888 /*ARGSUSED*/
889 static void
890 immu_intrmap_msi(apic_irq_t *irq_ptr, msi_regs_t *mregs)
891 {
892 	uint_t	idx;
893 
894 	if (INTRMAP_PRIVATE(irq_ptr) != NULL) {
895 		idx = INTRMAP_PRIVATE(irq_ptr)->ir_idx;
896 
897 		mregs->mr_data = 0;
898 		mregs->mr_addr = MSI_ADDR_HDR |
899 		    ((idx & 0x7fff) << INTRMAP_MSI_IDX_SHIFT) |
900 		    (1 << INTRMAP_MSI_FORMAT_SHIFT) |
901 		    (1 << INTRMAP_MSI_SHV_SHIFT) |
902 		    ((idx >> 15) << INTRMAP_MSI_IDX15_SHIFT);
903 	} else {
904 		mregs->mr_addr = MSI_ADDR_HDR |
905 		    (MSI_ADDR_RH_FIXED << MSI_ADDR_RH_SHIFT) |
906 		    (MSI_ADDR_DM_PHYSICAL << MSI_ADDR_DM_SHIFT) |
907 		    (mregs->mr_addr << MSI_ADDR_DEST_SHIFT);
908 		mregs->mr_data = (MSI_DATA_TM_EDGE << MSI_DATA_TM_SHIFT) |
909 		    mregs->mr_data;
910 	}
911 }
912 
913 /* ######################################################################### */
914 /*
915  * Functions exported by immu_intr.c
916  */
917 void
918 immu_intrmap_setup(list_t *listp)
919 {
920 	immu_t *immu;
921 
922 	/*
923 	 * Check if ACPI DMAR tables say that
924 	 * interrupt remapping is supported
925 	 */
926 	if (immu_dmar_intrmap_supported() == B_FALSE) {
927 		return;
928 	}
929 
930 	/*
931 	 * Check if interrupt remapping is disabled.
932 	 */
933 	if (immu_intrmap_enable == B_FALSE) {
934 		return;
935 	}
936 
937 	psm_vt_ops = &intrmap_ops;
938 
939 	immu = list_head(listp);
940 	for (; immu; immu = list_next(listp, immu)) {
941 		mutex_init(&(immu->immu_intrmap_lock), NULL,
942 		    MUTEX_DEFAULT, NULL);
943 		mutex_enter(&(immu->immu_intrmap_lock));
944 		immu->immu_intrmap_setup = B_TRUE;
945 		mutex_exit(&(immu->immu_intrmap_lock));
946 	}
947 }
948 
949 void
950 immu_intrmap_startup(immu_t *immu)
951 {
952 	/* do nothing */
953 	mutex_enter(&(immu->immu_intrmap_lock));
954 	if (immu->immu_intrmap_setup == B_TRUE) {
955 		immu->immu_intrmap_running = B_TRUE;
956 	}
957 	mutex_exit(&(immu->immu_intrmap_lock));
958 }
959 
960 /*
961  * Register a Intel IOMMU unit (i.e. DMAR unit's)
962  * interrupt handler
963  */
964 void
965 immu_intr_register(immu_t *immu)
966 {
967 	int irq, vect;
968 	char intr_handler_name[IMMU_MAXNAMELEN];
969 	uint32_t msi_data;
970 	uint32_t uaddr;
971 	uint32_t msi_addr;
972 
973 	msi_addr = (MSI_ADDR_HDR |
974 	    apic_cpus[0].aci_local_id & 0xFF) << ((MSI_ADDR_DEST_SHIFT) |
975 	    (MSI_ADDR_RH_FIXED << MSI_ADDR_RH_SHIFT) |
976 	    (MSI_ADDR_DM_PHYSICAL << MSI_ADDR_DM_SHIFT));
977 
978 	if (intrmap_apic_mode == LOCAL_X2APIC) {
979 		uaddr = (apic_cpus[0].aci_local_id & 0xFFFFFF00);
980 	} else {
981 		uaddr = 0;
982 	}
983 
984 	/* Dont need to hold immu_intr_lock since we are in boot */
985 	irq = psm_get_ipivect(IMMU_INTR_IPL, -1);
986 	vect = apic_irq_table[irq]->airq_vector;
987 	msi_data = ((MSI_DATA_DELIVERY_FIXED <<
988 	    MSI_DATA_DELIVERY_SHIFT) | vect);
989 
990 	(void) snprintf(intr_handler_name, sizeof (intr_handler_name),
991 	    "%s-intr-handler", immu->immu_name);
992 
993 	(void) add_avintr((void *)NULL, IMMU_INTR_IPL,
994 	    (avfunc)(immu_intr_handler), intr_handler_name, irq,
995 	    (caddr_t)immu, NULL, NULL, NULL);
996 
997 	immu_regs_intr_enable(immu, msi_addr, msi_data, uaddr);
998 
999 	(void) immu_intr_handler(immu);
1000 }
1001