1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 /* 27 * Copyright (c) 2009, Intel Corporation. 28 * All rights reserved. 29 */ 30 31 32 #include <sys/apic.h> 33 #include <vm/hat_i86.h> 34 #include <sys/sysmacros.h> 35 #include <sys/smp_impldefs.h> 36 #include <sys/immu.h> 37 38 39 typedef struct intrmap_private { 40 immu_t *ir_immu; 41 uint16_t ir_idx; 42 uint32_t ir_sid_svt_sq; 43 } intrmap_private_t; 44 45 #define INTRMAP_PRIVATE(intrmap) ((intrmap_private_t *)intrmap) 46 47 /* interrupt remapping table entry */ 48 typedef struct intrmap_rte { 49 uint64_t lo; 50 uint64_t hi; 51 } intrmap_rte_t; 52 53 #define IRTE_HIGH(sid_svt_sq) (sid_svt_sq) 54 #define IRTE_LOW(dst, vector, dlm, tm, rh, dm, fpd, p) \ 55 (((uint64_t)(dst) << 32) | \ 56 ((uint64_t)(vector) << 16) | \ 57 ((uint64_t)(dlm) << 5) | \ 58 ((uint64_t)(tm) << 4) | \ 59 ((uint64_t)(rh) << 3) | \ 60 ((uint64_t)(dm) << 2) | \ 61 ((uint64_t)(fpd) << 1) | \ 62 (p)) 63 64 typedef enum { 65 SVT_NO_VERIFY = 0, /* no verification */ 66 SVT_ALL_VERIFY, /* using sid and sq to verify */ 67 SVT_BUS_VERIFY, /* verify #startbus and #endbus */ 68 SVT_RSVD 69 } intrmap_svt_t; 70 71 typedef enum { 72 SQ_VERIFY_ALL = 0, /* verify all 16 bits */ 73 SQ_VERIFY_IGR_1, /* ignore bit 3 */ 74 SQ_VERIFY_IGR_2, /* ignore bit 2-3 */ 75 SQ_VERIFY_IGR_3 /* ignore bit 1-3 */ 76 } intrmap_sq_t; 77 78 /* 79 * S field of the Interrupt Remapping Table Address Register 80 * the size of the interrupt remapping table is 1 << (immu_intrmap_irta_s + 1) 81 */ 82 static uint_t intrmap_irta_s = INTRMAP_MAX_IRTA_SIZE; 83 84 /* 85 * If true, arrange to suppress broadcast EOI by setting edge-triggered mode 86 * even for level-triggered interrupts in the interrupt-remapping engine. 87 * If false, broadcast EOI can still be suppressed if the CPU supports the 88 * APIC_SVR_SUPPRESS_BROADCAST_EOI bit. In both cases, the IOAPIC is still 89 * programmed with the correct trigger mode, and pcplusmp must send an EOI 90 * to the IOAPIC by writing to the IOAPIC's EOI register to make up for the 91 * missing broadcast EOI. 92 */ 93 static int intrmap_suppress_brdcst_eoi = 0; 94 95 /* 96 * whether verify the source id of interrupt request 97 */ 98 static int intrmap_enable_sid_verify = 0; 99 100 /* fault types for DVMA remapping */ 101 static char *immu_dvma_faults[] = { 102 "Reserved", 103 "The present field in root-entry is Clear", 104 "The present field in context-entry is Clear", 105 "Hardware detected invalid programming of a context-entry", 106 "The DMA request attempted to access an address beyond max support", 107 "The Write field in a page-table entry is Clear when DMA write", 108 "The Read field in a page-table entry is Clear when DMA read", 109 "Access the next level page table resulted in error", 110 "Access the root-entry table resulted in error", 111 "Access the context-entry table resulted in error", 112 "Reserved field not initialized to zero in a present root-entry", 113 "Reserved field not initialized to zero in a present context-entry", 114 "Reserved field not initialized to zero in a present page-table entry", 115 "DMA blocked due to the Translation Type field in context-entry", 116 "Incorrect fault event reason number", 117 }; 118 #define DVMA_MAX_FAULTS (sizeof (immu_dvma_faults)/(sizeof (char *))) - 1 119 120 /* fault types for interrupt remapping */ 121 static char *immu_intrmap_faults[] = { 122 "reserved field set in IRTE", 123 "interrupt_index exceed the intr-remap table size", 124 "present field in IRTE is clear", 125 "hardware access intr-remap table address resulted in error", 126 "reserved field set in IRTE, include various conditional", 127 "hardware blocked an interrupt request in Compatibility format", 128 "remappable interrupt request blocked due to verification failure" 129 }; 130 #define INTRMAP_MAX_FAULTS \ 131 (sizeof (immu_intrmap_faults) / (sizeof (char *))) - 1 132 133 /* Function prototypes */ 134 static int immu_intrmap_init(int apic_mode); 135 static void immu_intrmap_switchon(int suppress_brdcst_eoi); 136 static void immu_intrmap_alloc(void **intrmap_private_tbl, dev_info_t *dip, 137 uint16_t type, int count, uchar_t ioapic_index); 138 static void immu_intrmap_map(void *intrmap_private, void *intrmap_data, 139 uint16_t type, int count); 140 static void immu_intrmap_free(void **intrmap_privatep); 141 static void immu_intrmap_rdt(void *intrmap_private, ioapic_rdt_t *irdt); 142 static void immu_intrmap_msi(void *intrmap_private, msi_regs_t *mregs); 143 144 static struct apic_intrmap_ops intrmap_ops = { 145 immu_intrmap_init, 146 immu_intrmap_switchon, 147 immu_intrmap_alloc, 148 immu_intrmap_map, 149 immu_intrmap_free, 150 immu_intrmap_rdt, 151 immu_intrmap_msi, 152 }; 153 154 /* apic mode, APIC/X2APIC */ 155 static int intrmap_apic_mode = LOCAL_APIC; 156 157 158 /* 159 * helper functions 160 */ 161 static uint_t 162 bitset_find_free(bitset_t *b, uint_t post) 163 { 164 uint_t i; 165 uint_t cap = bitset_capacity(b); 166 167 if (post == cap) 168 post = 0; 169 170 ASSERT(post < cap); 171 172 for (i = post; i < cap; i++) { 173 if (!bitset_in_set(b, i)) 174 return (i); 175 } 176 177 for (i = 0; i < post; i++) { 178 if (!bitset_in_set(b, i)) 179 return (i); 180 } 181 182 return (INTRMAP_IDX_FULL); /* no free index */ 183 } 184 185 /* 186 * helper function to find 'count' contigous free 187 * interrupt remapping table entries 188 */ 189 static uint_t 190 bitset_find_multi_free(bitset_t *b, uint_t post, uint_t count) 191 { 192 uint_t i, j; 193 uint_t cap = bitset_capacity(b); 194 195 if (post == INTRMAP_IDX_FULL) { 196 return (INTRMAP_IDX_FULL); 197 } 198 199 if (count > cap) 200 return (INTRMAP_IDX_FULL); 201 202 ASSERT(post < cap); 203 204 for (i = post; (i + count) <= cap; i++) { 205 for (j = 0; j < count; j++) { 206 if (bitset_in_set(b, (i + j))) { 207 i = i + j; 208 break; 209 } 210 if (j == count - 1) 211 return (i); 212 } 213 } 214 215 for (i = 0; (i < post) && ((i + count) <= cap); i++) { 216 for (j = 0; j < count; j++) { 217 if (bitset_in_set(b, (i + j))) { 218 i = i + j; 219 break; 220 } 221 if (j == count - 1) 222 return (i); 223 } 224 } 225 226 return (INTRMAP_IDX_FULL); /* no free index */ 227 } 228 229 /* alloc one interrupt remapping table entry */ 230 static int 231 alloc_tbl_entry(intrmap_t *intrmap) 232 { 233 uint32_t idx; 234 235 for (;;) { 236 mutex_enter(&intrmap->intrmap_lock); 237 idx = intrmap->intrmap_free; 238 if (idx != INTRMAP_IDX_FULL) { 239 bitset_add(&intrmap->intrmap_map, idx); 240 intrmap->intrmap_free = 241 bitset_find_free(&intrmap->intrmap_map, idx + 1); 242 mutex_exit(&intrmap->intrmap_lock); 243 break; 244 } 245 246 /* no free intr entry, use compatible format intr */ 247 mutex_exit(&intrmap->intrmap_lock); 248 249 if (intrmap_apic_mode != LOCAL_X2APIC) { 250 break; 251 } 252 253 /* 254 * x2apic mode not allowed compatible 255 * interrupt 256 */ 257 delay(IMMU_ALLOC_RESOURCE_DELAY); 258 } 259 260 return (idx); 261 } 262 263 /* alloc 'cnt' contigous interrupt remapping table entries */ 264 static int 265 alloc_tbl_multi_entries(intrmap_t *intrmap, uint_t cnt) 266 { 267 uint_t idx, pos, i; 268 269 for (; ; ) { 270 mutex_enter(&intrmap->intrmap_lock); 271 pos = intrmap->intrmap_free; 272 idx = bitset_find_multi_free(&intrmap->intrmap_map, pos, cnt); 273 274 if (idx != INTRMAP_IDX_FULL) { 275 if (idx <= pos && pos < (idx + cnt)) { 276 intrmap->intrmap_free = bitset_find_free( 277 &intrmap->intrmap_map, idx + cnt); 278 } 279 for (i = 0; i < cnt; i++) { 280 bitset_add(&intrmap->intrmap_map, idx + i); 281 } 282 mutex_exit(&intrmap->intrmap_lock); 283 } 284 285 mutex_exit(&intrmap->intrmap_lock); 286 287 if (intrmap_apic_mode != LOCAL_X2APIC) { 288 break; 289 } 290 291 /* x2apic mode not allowed comapitible interrupt */ 292 delay(IMMU_ALLOC_RESOURCE_DELAY); 293 } 294 295 return (idx); 296 } 297 298 /* init interrupt remapping table */ 299 static int 300 init_unit(immu_t *immu) 301 { 302 intrmap_t *intrmap; 303 size_t size; 304 305 ddi_dma_attr_t intrmap_dma_attr = { 306 DMA_ATTR_V0, 307 0U, 308 0xffffffffU, 309 0xffffffffU, 310 MMU_PAGESIZE, /* page aligned */ 311 0x1, 312 0x1, 313 0xffffffffU, 314 0xffffffffU, 315 1, 316 4, 317 0 318 }; 319 320 ddi_device_acc_attr_t intrmap_acc_attr = { 321 DDI_DEVICE_ATTR_V0, 322 DDI_NEVERSWAP_ACC, 323 DDI_STRICTORDER_ACC 324 }; 325 326 /* 327 * Using interrupt remapping implies using the queue 328 * invalidation interface. According to Intel, 329 * hardware that supports interrupt remapping should 330 * also support QI. 331 */ 332 ASSERT(IMMU_ECAP_GET_QI(immu->immu_regs_excap)); 333 334 if (intrmap_apic_mode == LOCAL_X2APIC) { 335 if (!IMMU_ECAP_GET_EIM(immu->immu_regs_excap)) { 336 return (DDI_FAILURE); 337 } 338 } 339 340 if (intrmap_irta_s > INTRMAP_MAX_IRTA_SIZE) { 341 intrmap_irta_s = INTRMAP_MAX_IRTA_SIZE; 342 } 343 344 intrmap = kmem_zalloc(sizeof (intrmap_t), KM_SLEEP); 345 346 if (ddi_dma_alloc_handle(immu->immu_dip, 347 &intrmap_dma_attr, 348 DDI_DMA_SLEEP, 349 NULL, 350 &(intrmap->intrmap_dma_hdl)) != DDI_SUCCESS) { 351 kmem_free(intrmap, sizeof (intrmap_t)); 352 return (DDI_FAILURE); 353 } 354 355 intrmap->intrmap_size = 1 << (intrmap_irta_s + 1); 356 size = intrmap->intrmap_size * INTRMAP_RTE_SIZE; 357 if (ddi_dma_mem_alloc(intrmap->intrmap_dma_hdl, 358 size, 359 &intrmap_acc_attr, 360 DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED, 361 DDI_DMA_SLEEP, 362 NULL, 363 &(intrmap->intrmap_vaddr), 364 &size, 365 &(intrmap->intrmap_acc_hdl)) != DDI_SUCCESS) { 366 ddi_dma_free_handle(&(intrmap->intrmap_dma_hdl)); 367 kmem_free(intrmap, sizeof (intrmap_t)); 368 return (DDI_FAILURE); 369 } 370 371 ASSERT(!((uintptr_t)intrmap->intrmap_vaddr & MMU_PAGEOFFSET)); 372 bzero(intrmap->intrmap_vaddr, size); 373 intrmap->intrmap_paddr = pfn_to_pa( 374 hat_getpfnum(kas.a_hat, intrmap->intrmap_vaddr)); 375 376 mutex_init(&(intrmap->intrmap_lock), NULL, MUTEX_DRIVER, NULL); 377 bitset_init(&intrmap->intrmap_map); 378 bitset_resize(&intrmap->intrmap_map, intrmap->intrmap_size); 379 intrmap->intrmap_free = 0; 380 381 immu->immu_intrmap = intrmap; 382 383 return (DDI_SUCCESS); 384 } 385 386 static immu_t * 387 get_immu(dev_info_t *dip, uint16_t type, uchar_t ioapic_index) 388 { 389 immu_t *immu = NULL; 390 391 if (!DDI_INTR_IS_MSI_OR_MSIX(type)) { 392 immu = immu_dmar_ioapic_immu(ioapic_index); 393 } else { 394 if (dip != NULL) 395 immu = immu_dmar_get_immu(dip); 396 } 397 398 return (immu); 399 } 400 401 static int 402 get_top_pcibridge(dev_info_t *dip, void *arg) 403 { 404 dev_info_t **topdipp = arg; 405 immu_devi_t *immu_devi; 406 407 mutex_enter(&(DEVI(dip)->devi_lock)); 408 immu_devi = DEVI(dip)->devi_iommu; 409 mutex_exit(&(DEVI(dip)->devi_lock)); 410 411 if (immu_devi == NULL || immu_devi->imd_pcib_type == IMMU_PCIB_BAD || 412 immu_devi->imd_pcib_type == IMMU_PCIB_ENDPOINT) { 413 return (DDI_WALK_CONTINUE); 414 } 415 416 *topdipp = dip; 417 418 return (DDI_WALK_CONTINUE); 419 } 420 421 static dev_info_t * 422 intrmap_top_pcibridge(dev_info_t *rdip) 423 { 424 dev_info_t *top_pcibridge = NULL; 425 426 if (immu_walk_ancestor(rdip, NULL, get_top_pcibridge, 427 &top_pcibridge, NULL, 0) != DDI_SUCCESS) { 428 return (NULL); 429 } 430 431 return (top_pcibridge); 432 } 433 434 /* function to get interrupt request source id */ 435 static uint32_t 436 get_sid(dev_info_t *dip, uint16_t type, uchar_t ioapic_index) 437 { 438 dev_info_t *pdip; 439 immu_devi_t *immu_devi; 440 uint16_t sid; 441 uchar_t svt, sq; 442 443 if (!intrmap_enable_sid_verify) { 444 return (0); 445 } 446 447 if (!DDI_INTR_IS_MSI_OR_MSIX(type)) { 448 /* for interrupt through I/O APIC */ 449 sid = immu_dmar_ioapic_sid(ioapic_index); 450 svt = SVT_ALL_VERIFY; 451 sq = SQ_VERIFY_ALL; 452 } else { 453 /* MSI/MSI-X interrupt */ 454 ASSERT(dip); 455 pdip = intrmap_top_pcibridge(dip); 456 ASSERT(pdip); 457 immu_devi = DEVI(pdip)->devi_iommu; 458 ASSERT(immu_devi); 459 if (immu_devi->imd_pcib_type == IMMU_PCIB_PCIE_PCI) { 460 /* device behind pcie to pci bridge */ 461 sid = (immu_devi->imd_bus << 8) | immu_devi->imd_sec; 462 svt = SVT_BUS_VERIFY; 463 sq = SQ_VERIFY_ALL; 464 } else { 465 /* pcie device or device behind pci to pci bridge */ 466 sid = (immu_devi->imd_bus << 8) | 467 immu_devi->imd_devfunc; 468 svt = SVT_ALL_VERIFY; 469 sq = SQ_VERIFY_ALL; 470 } 471 } 472 473 return (sid | (svt << 18) | (sq << 16)); 474 } 475 476 static void 477 intrmap_enable(immu_t *immu) 478 { 479 intrmap_t *intrmap; 480 uint64_t irta_reg; 481 482 intrmap = immu->immu_intrmap; 483 484 irta_reg = intrmap->intrmap_paddr | intrmap_irta_s; 485 if (intrmap_apic_mode == LOCAL_X2APIC) { 486 irta_reg |= (0x1 << 11); 487 } 488 489 immu_regs_intrmap_enable(immu, irta_reg); 490 } 491 492 /* ####################################################################### */ 493 494 /* 495 * immu_intr_handler() 496 * the fault event handler for a single immu unit 497 */ 498 int 499 immu_intr_handler(immu_t *immu) 500 { 501 uint32_t status; 502 int index, fault_reg_offset; 503 int max_fault_index; 504 boolean_t found_fault; 505 dev_info_t *idip; 506 507 mutex_enter(&(immu->immu_intr_lock)); 508 mutex_enter(&(immu->immu_regs_lock)); 509 510 /* read the fault status */ 511 status = immu_regs_get32(immu, IMMU_REG_FAULT_STS); 512 513 idip = immu->immu_dip; 514 ASSERT(idip); 515 516 /* check if we have a pending fault for this immu unit */ 517 if ((status & IMMU_FAULT_STS_PPF) == 0) { 518 mutex_exit(&(immu->immu_regs_lock)); 519 mutex_exit(&(immu->immu_intr_lock)); 520 return (DDI_INTR_UNCLAIMED); 521 } 522 523 /* 524 * handle all primary pending faults 525 */ 526 index = IMMU_FAULT_GET_INDEX(status); 527 max_fault_index = IMMU_CAP_GET_NFR(immu->immu_regs_cap) - 1; 528 fault_reg_offset = IMMU_CAP_GET_FRO(immu->immu_regs_cap); 529 530 found_fault = B_FALSE; 531 _NOTE(CONSTCOND) 532 while (1) { 533 uint64_t val; 534 uint8_t fault_reason; 535 uint8_t fault_type; 536 uint16_t sid; 537 uint64_t pg_addr; 538 uint64_t idx; 539 540 /* read the higher 64bits */ 541 val = immu_regs_get64(immu, fault_reg_offset + index * 16 + 8); 542 543 /* check if this fault register has pending fault */ 544 if (!IMMU_FRR_GET_F(val)) { 545 break; 546 } 547 548 found_fault = B_TRUE; 549 550 /* get the fault reason, fault type and sid */ 551 fault_reason = IMMU_FRR_GET_FR(val); 552 fault_type = IMMU_FRR_GET_FT(val); 553 sid = IMMU_FRR_GET_SID(val); 554 555 /* read the first 64bits */ 556 val = immu_regs_get64(immu, fault_reg_offset + index * 16); 557 pg_addr = val & IMMU_PAGEMASK; 558 idx = val >> 48; 559 560 /* clear the fault */ 561 immu_regs_put32(immu, fault_reg_offset + index * 16 + 12, 562 (((uint32_t)1) << 31)); 563 564 /* report the fault info */ 565 if (fault_reason < 0x20) { 566 /* immu-remapping fault */ 567 ddi_err(DER_WARN, idip, 568 "generated a fault event when translating DMA %s\n" 569 "\t on address 0x%" PRIx64 " for PCI(%d, %d, %d), " 570 "the reason is:\n\t %s", 571 fault_type ? "read" : "write", pg_addr, 572 (sid >> 8) & 0xff, (sid >> 3) & 0x1f, sid & 0x7, 573 immu_dvma_faults[MIN(fault_reason, 574 DVMA_MAX_FAULTS)]); 575 } else if (fault_reason < 0x27) { 576 /* intr-remapping fault */ 577 ddi_err(DER_WARN, idip, 578 "generated a fault event when translating " 579 "interrupt request\n" 580 "\t on index 0x%" PRIx64 " for PCI(%d, %d, %d), " 581 "the reason is:\n\t %s", 582 idx, 583 (sid >> 8) & 0xff, (sid >> 3) & 0x1f, sid & 0x7, 584 immu_intrmap_faults[MIN((fault_reason - 0x20), 585 INTRMAP_MAX_FAULTS)]); 586 } else { 587 ddi_err(DER_WARN, idip, "Unknown fault reason: 0x%x", 588 fault_reason); 589 } 590 591 index++; 592 if (index > max_fault_index) 593 index = 0; 594 } 595 596 /* Clear the fault */ 597 if (!found_fault) { 598 ddi_err(DER_MODE, idip, 599 "Fault register set but no fault present"); 600 } 601 immu_regs_put32(immu, IMMU_REG_FAULT_STS, 1); 602 mutex_exit(&(immu->immu_regs_lock)); 603 mutex_exit(&(immu->immu_intr_lock)); 604 return (DDI_INTR_CLAIMED); 605 } 606 /* ######################################################################### */ 607 608 /* 609 * Interrupt remap entry points 610 */ 611 612 /* initialize interrupt remapping */ 613 static int 614 immu_intrmap_init(int apic_mode) 615 { 616 immu_t *immu; 617 int error = DDI_FAILURE; 618 619 if (immu_intrmap_enable == B_FALSE) { 620 return (DDI_SUCCESS); 621 } 622 623 intrmap_apic_mode = apic_mode; 624 625 immu = list_head(&immu_list); 626 for (; immu; immu = list_next(&immu_list, immu)) { 627 if ((immu->immu_intrmap_running == B_TRUE) && 628 IMMU_ECAP_GET_IR(immu->immu_regs_excap)) { 629 if (init_unit(immu) == DDI_SUCCESS) { 630 error = DDI_SUCCESS; 631 } 632 } 633 } 634 635 /* 636 * if all IOMMU units disable intr remapping, 637 * return FAILURE 638 */ 639 return (error); 640 } 641 642 643 644 /* enable interrupt remapping */ 645 static void 646 immu_intrmap_switchon(int suppress_brdcst_eoi) 647 { 648 immu_t *immu; 649 650 651 intrmap_suppress_brdcst_eoi = suppress_brdcst_eoi; 652 653 immu = list_head(&immu_list); 654 for (; immu; immu = list_next(&immu_list, immu)) { 655 if (immu->immu_intrmap_setup == B_TRUE) { 656 intrmap_enable(immu); 657 } 658 } 659 } 660 661 /* alloc remapping entry for the interrupt */ 662 static void 663 immu_intrmap_alloc(void **intrmap_private_tbl, dev_info_t *dip, 664 uint16_t type, int count, uchar_t ioapic_index) 665 { 666 immu_t *immu; 667 intrmap_t *intrmap; 668 uint32_t idx, i; 669 uint32_t sid_svt_sq; 670 intrmap_private_t *intrmap_private; 671 672 if (intrmap_private_tbl[0] == INTRMAP_DISABLE || 673 intrmap_private_tbl[0] != NULL) { 674 return; 675 } 676 677 intrmap_private_tbl[0] = 678 kmem_zalloc(sizeof (intrmap_private_t), KM_SLEEP); 679 intrmap_private = INTRMAP_PRIVATE(intrmap_private_tbl[0]); 680 681 immu = get_immu(dip, type, ioapic_index); 682 if ((immu != NULL) && (immu->immu_intrmap_running == B_TRUE)) { 683 intrmap_private->ir_immu = immu; 684 } else { 685 goto intrmap_disable; 686 } 687 688 intrmap = immu->immu_intrmap; 689 690 if (count == 1) { 691 idx = alloc_tbl_entry(intrmap); 692 } else { 693 idx = alloc_tbl_multi_entries(intrmap, count); 694 } 695 696 if (idx == INTRMAP_IDX_FULL) { 697 goto intrmap_disable; 698 } 699 700 intrmap_private->ir_idx = idx; 701 702 sid_svt_sq = intrmap_private->ir_sid_svt_sq = 703 get_sid(dip, type, ioapic_index); 704 705 if (count == 1) { 706 if (IMMU_CAP_GET_CM(immu->immu_regs_cap)) { 707 immu_qinv_intr_one_cache(immu, idx); 708 } else { 709 immu_regs_wbf_flush(immu); 710 } 711 return; 712 } 713 714 for (i = 1; i < count; i++) { 715 intrmap_private_tbl[i] = 716 kmem_zalloc(sizeof (intrmap_private_t), KM_SLEEP); 717 718 INTRMAP_PRIVATE(intrmap_private_tbl[i])->ir_immu = immu; 719 INTRMAP_PRIVATE(intrmap_private_tbl[i])->ir_sid_svt_sq = 720 sid_svt_sq; 721 INTRMAP_PRIVATE(intrmap_private_tbl[i])->ir_idx = idx + i; 722 } 723 724 if (IMMU_CAP_GET_CM(immu->immu_regs_cap)) { 725 immu_qinv_intr_caches(immu, idx, count); 726 } else { 727 immu_regs_wbf_flush(immu); 728 } 729 730 return; 731 732 intrmap_disable: 733 kmem_free(intrmap_private_tbl[0], sizeof (intrmap_private_t)); 734 intrmap_private_tbl[0] = INTRMAP_DISABLE; 735 } 736 737 738 /* remapping the interrupt */ 739 static void 740 immu_intrmap_map(void *intrmap_private, void *intrmap_data, uint16_t type, 741 int count) 742 { 743 immu_t *immu; 744 intrmap_t *intrmap; 745 ioapic_rdt_t *irdt = (ioapic_rdt_t *)intrmap_data; 746 msi_regs_t *mregs = (msi_regs_t *)intrmap_data; 747 intrmap_rte_t irte; 748 uint_t idx, i; 749 uint32_t dst, sid_svt_sq; 750 uchar_t vector, dlm, tm, rh, dm; 751 752 if (intrmap_private == INTRMAP_DISABLE) 753 return; 754 755 idx = INTRMAP_PRIVATE(intrmap_private)->ir_idx; 756 immu = INTRMAP_PRIVATE(intrmap_private)->ir_immu; 757 intrmap = immu->immu_intrmap; 758 sid_svt_sq = INTRMAP_PRIVATE(intrmap_private)->ir_sid_svt_sq; 759 760 if (!DDI_INTR_IS_MSI_OR_MSIX(type)) { 761 dm = RDT_DM(irdt->ir_lo); 762 rh = 0; 763 tm = RDT_TM(irdt->ir_lo); 764 dlm = RDT_DLM(irdt->ir_lo); 765 dst = irdt->ir_hi; 766 767 /* 768 * Mark the IRTE's TM as Edge to suppress broadcast EOI. 769 */ 770 if (intrmap_suppress_brdcst_eoi) { 771 tm = TRIGGER_MODE_EDGE; 772 } 773 774 vector = RDT_VECTOR(irdt->ir_lo); 775 } else { 776 dm = MSI_ADDR_DM_PHYSICAL; 777 rh = MSI_ADDR_RH_FIXED; 778 tm = TRIGGER_MODE_EDGE; 779 dlm = 0; 780 dst = mregs->mr_addr; 781 782 vector = mregs->mr_data & 0xff; 783 } 784 785 if (intrmap_apic_mode == LOCAL_APIC) 786 dst = (dst & 0xFF) << 8; 787 788 if (count == 1) { 789 irte.lo = IRTE_LOW(dst, vector, dlm, tm, rh, dm, 0, 1); 790 irte.hi = IRTE_HIGH(sid_svt_sq); 791 792 /* set interrupt remapping table entry */ 793 bcopy(&irte, intrmap->intrmap_vaddr + 794 idx * INTRMAP_RTE_SIZE, 795 INTRMAP_RTE_SIZE); 796 797 immu_qinv_intr_one_cache(immu, idx); 798 799 } else { 800 for (i = 0; i < count; i++) { 801 irte.lo = IRTE_LOW(dst, vector, dlm, tm, rh, dm, 0, 1); 802 irte.hi = IRTE_HIGH(sid_svt_sq); 803 804 /* set interrupt remapping table entry */ 805 bcopy(&irte, intrmap->intrmap_vaddr + 806 idx * INTRMAP_RTE_SIZE, 807 INTRMAP_RTE_SIZE); 808 vector++; 809 idx++; 810 } 811 812 immu_qinv_intr_caches(immu, idx, count); 813 } 814 } 815 816 /* free the remapping entry */ 817 static void 818 immu_intrmap_free(void **intrmap_privatep) 819 { 820 immu_t *immu; 821 intrmap_t *intrmap; 822 uint32_t idx; 823 824 if (*intrmap_privatep == INTRMAP_DISABLE || *intrmap_privatep == NULL) { 825 *intrmap_privatep = NULL; 826 return; 827 } 828 829 immu = INTRMAP_PRIVATE(*intrmap_privatep)->ir_immu; 830 intrmap = immu->immu_intrmap; 831 idx = INTRMAP_PRIVATE(*intrmap_privatep)->ir_idx; 832 833 bzero(intrmap->intrmap_vaddr + idx * INTRMAP_RTE_SIZE, 834 INTRMAP_RTE_SIZE); 835 836 immu_qinv_intr_one_cache(immu, idx); 837 838 mutex_enter(&intrmap->intrmap_lock); 839 bitset_del(&intrmap->intrmap_map, idx); 840 if (intrmap->intrmap_free == INTRMAP_IDX_FULL) { 841 intrmap->intrmap_free = idx; 842 } 843 mutex_exit(&intrmap->intrmap_lock); 844 845 kmem_free(*intrmap_privatep, sizeof (intrmap_private_t)); 846 *intrmap_privatep = NULL; 847 } 848 849 /* record the ioapic rdt entry */ 850 static void 851 immu_intrmap_rdt(void *intrmap_private, ioapic_rdt_t *irdt) 852 { 853 uint32_t rdt_entry, tm, pol, idx, vector; 854 855 rdt_entry = irdt->ir_lo; 856 857 if (intrmap_private != INTRMAP_DISABLE && intrmap_private != NULL) { 858 idx = INTRMAP_PRIVATE(intrmap_private)->ir_idx; 859 tm = RDT_TM(rdt_entry); 860 pol = RDT_POL(rdt_entry); 861 vector = RDT_VECTOR(rdt_entry); 862 irdt->ir_lo = (tm << INTRMAP_IOAPIC_TM_SHIFT) | 863 (pol << INTRMAP_IOAPIC_POL_SHIFT) | 864 ((idx >> 15) << INTRMAP_IOAPIC_IDX15_SHIFT) | 865 vector; 866 irdt->ir_hi = (idx << INTRMAP_IOAPIC_IDX_SHIFT) | 867 (1 << INTRMAP_IOAPIC_FORMAT_SHIFT); 868 } else { 869 irdt->ir_hi <<= APIC_ID_BIT_OFFSET; 870 } 871 } 872 873 /* record the msi interrupt structure */ 874 /*ARGSUSED*/ 875 static void 876 immu_intrmap_msi(void *intrmap_private, msi_regs_t *mregs) 877 { 878 uint_t idx; 879 880 if (intrmap_private != INTRMAP_DISABLE && intrmap_private != NULL) { 881 idx = INTRMAP_PRIVATE(intrmap_private)->ir_idx; 882 883 mregs->mr_data = 0; 884 mregs->mr_addr = MSI_ADDR_HDR | 885 ((idx & 0x7fff) << INTRMAP_MSI_IDX_SHIFT) | 886 (1 << INTRMAP_MSI_FORMAT_SHIFT) | 887 (1 << INTRMAP_MSI_SHV_SHIFT) | 888 ((idx >> 15) << INTRMAP_MSI_IDX15_SHIFT); 889 } else { 890 mregs->mr_addr = MSI_ADDR_HDR | 891 (MSI_ADDR_RH_FIXED << MSI_ADDR_RH_SHIFT) | 892 (MSI_ADDR_DM_PHYSICAL << MSI_ADDR_DM_SHIFT) | 893 (mregs->mr_addr << MSI_ADDR_DEST_SHIFT); 894 mregs->mr_data = (MSI_DATA_TM_EDGE << MSI_DATA_TM_SHIFT) | 895 mregs->mr_data; 896 } 897 } 898 899 /* ######################################################################### */ 900 /* 901 * Functions exported by immu_intr.c 902 */ 903 void 904 immu_intrmap_setup(list_t *listp) 905 { 906 immu_t *immu; 907 908 /* 909 * Check if ACPI DMAR tables say that 910 * interrupt remapping is supported 911 */ 912 if (immu_dmar_intrmap_supported() == B_FALSE) { 913 return; 914 } 915 916 /* 917 * Check if interrupt remapping is disabled. 918 */ 919 if (immu_intrmap_enable == B_FALSE) { 920 return; 921 } 922 923 psm_vt_ops = &intrmap_ops; 924 925 immu = list_head(listp); 926 for (; immu; immu = list_next(listp, immu)) { 927 mutex_init(&(immu->immu_intrmap_lock), NULL, 928 MUTEX_DEFAULT, NULL); 929 mutex_enter(&(immu->immu_intrmap_lock)); 930 immu->immu_intrmap_setup = B_TRUE; 931 mutex_exit(&(immu->immu_intrmap_lock)); 932 } 933 } 934 935 void 936 immu_intrmap_startup(immu_t *immu) 937 { 938 /* do nothing */ 939 mutex_enter(&(immu->immu_intrmap_lock)); 940 if (immu->immu_intrmap_setup == B_TRUE) { 941 immu->immu_intrmap_running = B_TRUE; 942 } 943 mutex_exit(&(immu->immu_intrmap_lock)); 944 } 945 946 /* 947 * Register a Intel IOMMU unit (i.e. DMAR unit's) 948 * interrupt handler 949 */ 950 void 951 immu_intr_register(immu_t *immu) 952 { 953 int irq, vect; 954 char intr_handler_name[IMMU_MAXNAMELEN]; 955 uint32_t msi_data; 956 uint32_t uaddr; 957 uint32_t msi_addr; 958 uint32_t localapic_id = 0; 959 960 if (psm_get_localapicid) 961 localapic_id = psm_get_localapicid(0); 962 963 msi_addr = (MSI_ADDR_HDR | 964 ((localapic_id & 0xFF) << MSI_ADDR_DEST_SHIFT) | 965 (MSI_ADDR_RH_FIXED << MSI_ADDR_RH_SHIFT) | 966 (MSI_ADDR_DM_PHYSICAL << MSI_ADDR_DM_SHIFT)); 967 968 if (intrmap_apic_mode == LOCAL_X2APIC) { 969 uaddr = localapic_id & 0xFFFFFF00; 970 } else { 971 uaddr = 0; 972 } 973 974 /* Dont need to hold immu_intr_lock since we are in boot */ 975 irq = vect = psm_get_ipivect(IMMU_INTR_IPL, -1); 976 if (psm_xlate_vector_by_irq != NULL) 977 vect = psm_xlate_vector_by_irq(irq); 978 979 msi_data = ((MSI_DATA_DELIVERY_FIXED << 980 MSI_DATA_DELIVERY_SHIFT) | vect); 981 982 (void) snprintf(intr_handler_name, sizeof (intr_handler_name), 983 "%s-intr-handler", immu->immu_name); 984 985 (void) add_avintr((void *)NULL, IMMU_INTR_IPL, 986 (avfunc)(immu_intr_handler), intr_handler_name, irq, 987 (caddr_t)immu, NULL, NULL, NULL); 988 989 immu_regs_intr_enable(immu, msi_addr, msi_data, uaddr); 990 991 (void) immu_intr_handler(immu); 992 } 993