1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * PCI iommu initialization and configuration 31 */ 32 33 #include <sys/types.h> 34 #include <sys/kmem.h> 35 #include <sys/async.h> 36 #include <sys/sysmacros.h> 37 #include <sys/sunddi.h> 38 #include <sys/ddi_impldefs.h> 39 #include <sys/vmem.h> 40 #include <sys/machsystm.h> /* lddphys() */ 41 #include <sys/iommutsb.h> 42 #include <sys/pci/pci_obj.h> 43 44 /*LINTLIBRARY*/ 45 46 static void iommu_tlb_flushall(iommu_t *iommu_p); 47 static void iommu_preserve_tsb(iommu_t *iommu_p); 48 49 void 50 iommu_create(pci_t *pci_p) 51 { 52 dev_info_t *dip = pci_p->pci_dip; 53 iommu_t *iommu_p; 54 uintptr_t a; 55 size_t cache_size; 56 uint32_t tsb_entries; 57 58 char map_name[32]; 59 extern uint64_t va_to_pa(void *); 60 61 pci_dvma_range_prop_t pci_dvma_range; 62 63 /* 64 * Allocate iommu state structure and link it to the 65 * pci state structure. 66 */ 67 iommu_p = (iommu_t *)kmem_zalloc(sizeof (iommu_t), KM_SLEEP); 68 pci_p->pci_iommu_p = iommu_p; 69 iommu_p->iommu_pci_p = pci_p; 70 iommu_p->iommu_inst = ddi_get_instance(dip); 71 72 /* 73 * chip specific dvma_end, tsb_size & context support 74 */ 75 iommu_p->iommu_dvma_end = pci_iommu_dvma_end; 76 a = pci_iommu_setup(iommu_p); 77 78 /* 79 * Determine the virtual address of iommu registers. 80 */ 81 iommu_p->iommu_ctrl_reg = 82 (uint64_t *)(a + COMMON_IOMMU_CTRL_REG_OFFSET); 83 iommu_p->iommu_tsb_base_addr_reg = 84 (uint64_t *)(a + COMMON_IOMMU_TSB_BASE_ADDR_REG_OFFSET); 85 iommu_p->iommu_flush_page_reg = 86 (uint64_t *)(a + COMMON_IOMMU_FLUSH_PAGE_REG_OFFSET); 87 88 /* 89 * Configure the rest of the iommu parameters according to: 90 * tsb_size and dvma_end 91 */ 92 iommu_p->iommu_tsb_vaddr = /* retrieve TSB VA reserved by system */ 93 iommu_tsb_cookie_to_va(pci_p->pci_tsb_cookie); 94 iommu_p->iommu_tsb_entries = tsb_entries = 95 IOMMU_TSBSIZE_TO_TSBENTRIES(iommu_p->iommu_tsb_size); 96 iommu_p->iommu_tsb_paddr = va_to_pa((caddr_t)iommu_p->iommu_tsb_vaddr); 97 iommu_p->iommu_dvma_cache_locks = 98 kmem_zalloc(pci_dvma_page_cache_entries, KM_SLEEP); 99 100 iommu_p->iommu_dvma_base = iommu_p->iommu_dvma_end + 1 101 - (tsb_entries * IOMMU_PAGE_SIZE); 102 iommu_p->dvma_base_pg = IOMMU_BTOP(iommu_p->iommu_dvma_base); 103 iommu_p->iommu_dvma_reserve = tsb_entries >> 1; 104 iommu_p->dvma_end_pg = IOMMU_BTOP(iommu_p->iommu_dvma_end); 105 iommu_p->iommu_dma_bypass_base = COMMON_IOMMU_BYPASS_BASE; 106 iommu_p->iommu_dma_bypass_end = COMMON_IOMMU_BYPASS_END; 107 108 /* 109 * export "virtual-dma" software property to support 110 * child devices needing to know DVMA range 111 */ 112 pci_dvma_range.dvma_base = (uint32_t)iommu_p->iommu_dvma_base; 113 pci_dvma_range.dvma_len = (uint32_t) 114 iommu_p->iommu_dvma_end - iommu_p->iommu_dvma_base + 1; 115 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 116 "virtual-dma", (caddr_t)&pci_dvma_range, 117 sizeof (pci_dvma_range)); 118 119 DEBUG2(DBG_ATTACH, dip, "iommu_create: ctrl=%p, tsb=%p\n", 120 iommu_p->iommu_ctrl_reg, iommu_p->iommu_tsb_base_addr_reg); 121 DEBUG2(DBG_ATTACH, dip, "iommu_create: page_flush=%p, ctx_flush=%p\n", 122 iommu_p->iommu_flush_page_reg, iommu_p->iommu_flush_ctx_reg); 123 DEBUG2(DBG_ATTACH, dip, "iommu_create: tsb vaddr=%p tsb_paddr=%p\n", 124 iommu_p->iommu_tsb_vaddr, iommu_p->iommu_tsb_paddr); 125 DEBUG1(DBG_ATTACH, dip, "iommu_create: allocated size=%x\n", 126 iommu_tsb_cookie_to_size(pci_p->pci_tsb_cookie)); 127 DEBUG2(DBG_ATTACH, dip, "iommu_create: fast tsb tte addr: %x + %x\n", 128 iommu_p->iommu_tsb_vaddr, 129 pci_dvma_page_cache_entries * pci_dvma_page_cache_clustsz); 130 DEBUG3(DBG_ATTACH, dip, 131 "iommu_create: tsb size=%x, tsb entries=%x, dvma base=%x\n", 132 iommu_p->iommu_tsb_size, iommu_p->iommu_tsb_entries, 133 iommu_p->iommu_dvma_base); 134 DEBUG2(DBG_ATTACH, dip, 135 "iommu_create: dvma_cache_locks=%x cache_entries=%x\n", 136 iommu_p->iommu_dvma_cache_locks, pci_dvma_page_cache_entries); 137 138 /* 139 * zero out the area to be used for iommu tsb 140 */ 141 bzero(iommu_p->iommu_tsb_vaddr, tsb_entries << 3); 142 143 /* 144 * Create a virtual memory map for dvma address space. 145 * Reserve 'size' bytes of low dvma space for fast track cache. 146 */ 147 (void) snprintf(map_name, sizeof (map_name), "%s%d_dvma", 148 ddi_driver_name(dip), ddi_get_instance(dip)); 149 150 cache_size = IOMMU_PTOB(pci_dvma_page_cache_entries * 151 pci_dvma_page_cache_clustsz); 152 iommu_p->iommu_dvma_fast_end = iommu_p->iommu_dvma_base + 153 cache_size - 1; 154 iommu_p->iommu_dvma_map = vmem_create(map_name, 155 (void *)(iommu_p->iommu_dvma_fast_end + 1), 156 IOMMU_PTOB(tsb_entries) - cache_size, IOMMU_PAGE_SIZE, 157 NULL, NULL, NULL, IOMMU_PAGE_SIZE, VM_SLEEP); 158 159 mutex_init(&iommu_p->dvma_debug_lock, NULL, MUTEX_DRIVER, NULL); 160 161 /* 162 * On detach, the TSB Base Address Register gets set to zero, 163 * so if its zero here, there is no need to preserve TTEs. 164 */ 165 if (pci_preserve_iommu_tsb && *iommu_p->iommu_tsb_base_addr_reg) 166 iommu_preserve_tsb(iommu_p); 167 168 iommu_configure(iommu_p); 169 } 170 171 void 172 iommu_destroy(pci_t *pci_p) 173 { 174 #ifdef DEBUG 175 dev_info_t *dip = pci_p->pci_dip; 176 #endif 177 iommu_t *iommu_p = pci_p->pci_iommu_p; 178 volatile uint64_t ctl_val = *iommu_p->iommu_ctrl_reg; 179 180 DEBUG0(DBG_DETACH, dip, "iommu_destroy:\n"); 181 182 /* 183 * Disable the IOMMU by setting the TSB Base Address to zero 184 * and the TSB Table size to the smallest possible. 185 */ 186 ctl_val = ctl_val & ~(7 << COMMON_IOMMU_CTRL_TSB_SZ_SHIFT); 187 188 *iommu_p->iommu_ctrl_reg = ctl_val; 189 *iommu_p->iommu_tsb_base_addr_reg = 0; 190 191 /* 192 * Return the boot time allocated tsb. 193 */ 194 iommu_tsb_free(pci_p->pci_tsb_cookie); 195 196 /* 197 * Teardown any implementation-specific structures set up in 198 * pci_iommu_setup. 199 */ 200 pci_iommu_teardown(iommu_p); 201 202 if (DVMA_DBG_ON(iommu_p)) 203 pci_dvma_debug_fini(iommu_p); 204 mutex_destroy(&iommu_p->dvma_debug_lock); 205 206 /* 207 * Free the dvma resource map. 208 */ 209 vmem_destroy(iommu_p->iommu_dvma_map); 210 211 kmem_free(iommu_p->iommu_dvma_cache_locks, 212 pci_dvma_page_cache_entries); 213 214 /* 215 * Free the iommu state structure. 216 */ 217 kmem_free(iommu_p, sizeof (iommu_t)); 218 pci_p->pci_iommu_p = NULL; 219 } 220 221 /* 222 * re-program iommu on the fly while preserving on-going dma 223 * transactions on the PCI bus. 224 */ 225 void 226 iommu_configure(iommu_t *iommu_p) 227 { 228 pci_t *pci_p = iommu_p->iommu_pci_p; 229 uint64_t cfgpa = pci_get_cfg_pabase(pci_p); 230 dev_info_t *dip = iommu_p->iommu_pci_p->pci_dip; 231 dev_info_t *cdip = NULL; 232 volatile uint64_t ctl_val = (uint64_t) 233 ((iommu_p->iommu_tsb_size << COMMON_IOMMU_CTRL_TSB_SZ_SHIFT) | 234 (0 /* 8k page */ << COMMON_IOMMU_CTRL_TBW_SZ_SHIFT) | 235 COMMON_IOMMU_CTRL_ENABLE | 236 COMMON_IOMMU_CTRL_DIAG_ENABLE | 237 (pci_lock_tlb ? COMMON_IOMMU_CTRL_LCK_ENABLE : 0)); 238 239 DEBUG2(DBG_ATTACH, dip, "iommu_configure: iommu_ctl=%08x.%08x\n", 240 HI32(ctl_val), LO32(ctl_val)); 241 if (!pci_preserve_iommu_tsb || !(*iommu_p->iommu_tsb_base_addr_reg)) { 242 *iommu_p->iommu_ctrl_reg = COMMON_IOMMU_CTRL_DIAG_ENABLE; 243 iommu_tlb_flushall(iommu_p); 244 goto config; 245 } 246 cdip = ddi_get_child(dip); 247 for (; cdip; cdip = ddi_get_next_sibling(cdip)) { 248 uint32_t *reg_p; 249 int reg_len; 250 if (ddi_getlongprop(DDI_DEV_T_NONE, cdip, DDI_PROP_DONTPASS, 251 "reg", (caddr_t)®_p, ®_len) != DDI_PROP_SUCCESS) 252 continue; 253 cfgpa += (*reg_p) & (PCI_CONF_ADDR_MASK ^ PCI_REG_REG_M); 254 kmem_free(reg_p, reg_len); 255 break; 256 } 257 258 config: 259 pci_iommu_config(iommu_p, ctl_val, cdip ? cfgpa : 0); 260 } 261 262 void 263 iommu_map_pages(iommu_t *iommu_p, ddi_dma_impl_t *mp, 264 dvma_addr_t dvma_pg, size_t npages, size_t pfn_index) 265 { 266 int i; 267 dvma_addr_t pg_index = dvma_pg - iommu_p->dvma_base_pg; 268 uint64_t *tte_addr = iommu_p->iommu_tsb_vaddr + pg_index; 269 size_t pfn_last = pfn_index + npages; 270 uint64_t tte = PCI_GET_MP_TTE(mp->dmai_tte); 271 #ifdef DEBUG 272 dev_info_t *dip = iommu_p->iommu_pci_p->pci_dip; 273 #endif 274 275 ASSERT(pfn_last <= mp->dmai_ndvmapages); 276 277 DEBUG5(DBG_MAP_WIN, dip, 278 "iommu_map_pages:%x+%x=%x npages=0x%x pfn_index=0x%x\n", 279 (uint_t)iommu_p->dvma_base_pg, (uint_t)pg_index, dvma_pg, 280 (uint_t)npages, (uint_t)pfn_index); 281 282 for (i = pfn_index; i < pfn_last; i++, pg_index++, tte_addr++) { 283 iopfn_t pfn = PCI_GET_MP_PFN(mp, i); 284 volatile uint64_t cur_tte = IOMMU_PTOB(pfn) | tte; 285 286 DEBUG3(DBG_MAP_WIN, dip, "iommu_map_pages: mp=%p pg[%x]=%x\n", 287 mp, i, (uint_t)pfn); 288 DEBUG3(DBG_MAP_WIN, dip, 289 "iommu_map_pages: pg_index=%x tte=%08x.%08x\n", 290 pg_index, HI32(cur_tte), LO32(cur_tte)); 291 ASSERT(TTE_IS_INVALID(*tte_addr)); 292 *tte_addr = cur_tte; 293 #ifdef DEBUG 294 if (pfn == 0 && pci_warn_pp0) 295 cmn_err(CE_WARN, "%s%d <%p> doing DMA to pp0\n", 296 ddi_driver_name(mp->dmai_rdip), 297 ddi_get_instance(mp->dmai_rdip), mp); 298 #endif 299 } 300 ASSERT(tte_addr == iommu_p->iommu_tsb_vaddr + pg_index); 301 #ifdef DEBUG 302 if (HAS_REDZONE(mp)) { 303 DEBUG1(DBG_MAP_WIN, dip, "iommu_map_pages: redzone pg=%x\n", 304 pg_index); 305 ASSERT(TTE_IS_INVALID(iommu_p->iommu_tsb_vaddr[pg_index])); 306 } 307 #endif 308 if (DVMA_DBG_ON(iommu_p)) 309 pci_dvma_alloc_debug(iommu_p, (char *)mp->dmai_mapping, 310 mp->dmai_size, mp); 311 } 312 313 /* 314 * iommu_map_window - map a dvma window into the iommu 315 * 316 * used by: pci_dma_win(), pci_dma_ctlops() - DDI_DMA_MOVWIN, DDI_DMA_NEXTWIN 317 * 318 * return value: none 319 */ 320 /*ARGSUSED*/ 321 void 322 iommu_map_window(iommu_t *iommu_p, ddi_dma_impl_t *mp, window_t win_no) 323 { 324 uint32_t obj_pg0_off = mp->dmai_roffset; 325 uint32_t win_pg0_off = win_no ? 0 : obj_pg0_off; 326 size_t win_size = mp->dmai_winsize; 327 size_t pfn_index = win_size * win_no; /* temp value */ 328 size_t obj_off = win_no ? pfn_index - obj_pg0_off : 0; /* xferred sz */ 329 dvma_addr_t dvma_pg = IOMMU_BTOP(mp->dmai_mapping); 330 size_t res_size = mp->dmai_object.dmao_size - obj_off + win_pg0_off; 331 332 ASSERT(!(win_size & IOMMU_PAGE_OFFSET)); 333 if (win_no >= mp->dmai_nwin) 334 return; 335 if (res_size < win_size) /* last window */ 336 win_size = res_size; /* mp->dmai_winsize unchanged */ 337 338 mp->dmai_mapping = IOMMU_PTOB(dvma_pg) | win_pg0_off; 339 mp->dmai_size = win_size - win_pg0_off; /* cur win xferrable size */ 340 mp->dmai_offset = obj_off; /* win offset into object */ 341 pfn_index = IOMMU_BTOP(pfn_index); /* index into pfnlist */ 342 iommu_map_pages(iommu_p, mp, dvma_pg, IOMMU_BTOPR(win_size), pfn_index); 343 } 344 345 void 346 iommu_unmap_pages(iommu_t *iommu_p, dvma_addr_t dvma_pg, uint_t npages) 347 { 348 dvma_addr_t pg_index = IOMMU_PAGE_INDEX(iommu_p, dvma_pg); 349 350 for (; npages; npages--, dvma_pg++, pg_index++) { 351 DEBUG1(DBG_UNMAP_WIN|DBG_CONT, 0, " %x", dvma_pg); 352 IOMMU_UNLOAD_TTE(iommu_p, pg_index); 353 354 if (!tm_mtlb_gc) 355 IOMMU_PAGE_FLUSH(iommu_p, dvma_pg); 356 } 357 } 358 359 void 360 iommu_remap_pages(iommu_t *iommu_p, ddi_dma_impl_t *mp, dvma_addr_t dvma_pg, 361 size_t npages, size_t pfn_index) 362 { 363 iommu_unmap_pages(iommu_p, dvma_pg, npages); 364 iommu_map_pages(iommu_p, mp, dvma_pg, npages, pfn_index); 365 } 366 367 /* 368 * iommu_unmap_window 369 * 370 * This routine is called to break down the iommu mappings to a dvma window. 371 * Non partial mappings are viewed as single window mapping. 372 * 373 * used by: pci_dma_unbindhdl(), pci_dma_window(), 374 * and pci_dma_ctlops() - DDI_DMA_FREE, DDI_DMA_MOVWIN, DDI_DMA_NEXTWIN 375 * 376 * return value: none 377 */ 378 /*ARGSUSED*/ 379 void 380 iommu_unmap_window(iommu_t *iommu_p, ddi_dma_impl_t *mp) 381 { 382 dvma_addr_t dvma_pg = IOMMU_BTOP(mp->dmai_mapping); 383 dvma_addr_t pg_index = IOMMU_PAGE_INDEX(iommu_p, dvma_pg); 384 uint_t npages = IOMMU_BTOP(mp->dmai_winsize); 385 #ifdef DEBUG 386 dev_info_t *dip = iommu_p->iommu_pci_p->pci_dip; 387 #endif 388 /* 389 * Invalidate each page of the mapping in the tsb and flush 390 * it from the tlb. 391 */ 392 DEBUG2(DBG_UNMAP_WIN, dip, "mp=%p %x pfns:", mp, npages); 393 if (mp->dmai_flags & DMAI_FLAGS_CONTEXT) { 394 dvma_context_t ctx = MP2CTX(mp); 395 for (; npages; npages--, pg_index++) { 396 DEBUG1(DBG_UNMAP_WIN|DBG_CONT, dip, " %x", pg_index); 397 IOMMU_UNLOAD_TTE(iommu_p, pg_index); 398 } 399 DEBUG1(DBG_UNMAP_WIN|DBG_CONT, dip, " (context %x)", ctx); 400 *iommu_p->iommu_flush_ctx_reg = ctx; 401 } else 402 iommu_unmap_pages(iommu_p, dvma_pg, npages); 403 404 DEBUG0(DBG_UNMAP_WIN|DBG_CONT, dip, "\n"); 405 406 if (DVMA_DBG_ON(iommu_p)) 407 pci_dvma_free_debug(iommu_p, (char *)mp->dmai_mapping, 408 mp->dmai_size, mp); 409 } 410 411 int 412 pci_alloc_tsb(pci_t *pci_p) 413 { 414 uint16_t tsbc; 415 416 if ((tsbc = iommu_tsb_alloc(pci_p->pci_id)) == IOMMU_TSB_COOKIE_NONE) { 417 cmn_err(CE_WARN, "%s%d: Unable to allocate IOMMU TSB.", 418 ddi_driver_name(pci_p->pci_dip), 419 ddi_get_instance(pci_p->pci_dip)); 420 return (DDI_FAILURE); 421 } 422 pci_p->pci_tsb_cookie = tsbc; 423 return (DDI_SUCCESS); 424 } 425 426 void 427 pci_free_tsb(pci_t *pci_p) 428 { 429 iommu_tsb_free(pci_p->pci_tsb_cookie); 430 } 431 432 #if 0 433 /* 434 * The following data structure is used to map a tsb size 435 * to a tsb size configuration parameter in the iommu 436 * control register. 437 * This is a hardware table. It is here for reference only. 438 */ 439 static int pci_iommu_tsb_sizes[] = { 440 0x2000, /* 0 - 8 mb */ 441 0x4000, /* 1 - 16 mb */ 442 0x8000, /* 2 - 32 mb */ 443 0x10000, /* 3 - 64 mb */ 444 0x20000, /* 4 - 128 mb */ 445 0x40000, /* 5 - 256 mb */ 446 0x80000, /* 6 - 512 mb */ 447 0x100000 /* 7 - 1 gb */ 448 }; 449 #endif 450 451 uint_t 452 iommu_tsb_size_encode(uint_t tsb_bytes) 453 { 454 uint_t i; 455 456 for (i = 7; i && (tsb_bytes < (0x2000 << i)); i--) 457 /* empty */; 458 return (i); 459 } 460 461 /* 462 * invalidate IOMMU TLB entries through diagnostic registers. 463 */ 464 static void 465 iommu_tlb_flushall(iommu_t *iommu_p) 466 { 467 int i; 468 uint64_t base = (uint64_t)(iommu_p->iommu_ctrl_reg) - 469 COMMON_IOMMU_CTRL_REG_OFFSET; 470 volatile uint64_t *tlb_tag = (volatile uint64_t *) 471 (base + COMMON_IOMMU_TLB_TAG_DIAG_ACC_OFFSET); 472 volatile uint64_t *tlb_data = (volatile uint64_t *) 473 (base + COMMON_IOMMU_TLB_DATA_DIAG_ACC_OFFSET); 474 for (i = 0; i < IOMMU_TLB_ENTRIES; i++) 475 tlb_tag[i] = tlb_data[i] = 0ull; 476 } 477 478 static void 479 iommu_preserve_tsb(iommu_t *iommu_p) 480 { 481 #ifdef DEBUG 482 dev_info_t *dip = iommu_p->iommu_pci_p->pci_dip; 483 #endif 484 uint_t i, obp_tsb_entries, obp_tsb_size, base_pg_index; 485 uint64_t ctl = *iommu_p->iommu_ctrl_reg; 486 uint64_t obp_tsb_pa = *iommu_p->iommu_tsb_base_addr_reg; 487 uint64_t *base_tte_addr; 488 489 DEBUG3(DBG_ATTACH, dip, 490 "iommu_tsb_base_addr_reg=0x%08x (0x%08x.0x%08x)\n", 491 iommu_p->iommu_tsb_base_addr_reg, 492 (uint32_t)(*iommu_p->iommu_tsb_base_addr_reg >> 32), 493 (uint32_t)(*iommu_p->iommu_tsb_base_addr_reg & 0xffffffff)); 494 495 obp_tsb_size = IOMMU_CTL_TO_TSBSIZE(ctl); 496 obp_tsb_entries = IOMMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size); 497 base_pg_index = iommu_p->dvma_end_pg - obp_tsb_entries + 1; 498 base_tte_addr = iommu_p->iommu_tsb_vaddr + 499 (iommu_p->iommu_tsb_entries - obp_tsb_entries); 500 501 /* 502 * old darwin prom does not set tsb size correctly, bail out. 503 */ 504 if ((obp_tsb_size == IOMMU_DARWIN_BOGUS_TSBSIZE) && 505 (CHIP_TYPE(iommu_p->iommu_pci_p) == PCI_CHIP_SABRE)) 506 return; 507 508 DEBUG3(DBG_ATTACH, dip, "iommu_preserve_tsb: kernel info\n" 509 "iommu_tsb_vaddr=%08x copy to base_tte_addr=%08x " 510 "base_pg_index=%x\n", iommu_p->iommu_tsb_vaddr, 511 base_tte_addr, base_pg_index); 512 513 DEBUG3(DBG_ATTACH | DBG_CONT, dip, "iommu_preserve_tsb: obp info " 514 "obp_tsb_entries=0x%x obp_tsb_pa=%08x.%08x\n", obp_tsb_entries, 515 (uint32_t)(obp_tsb_pa >> 32), (uint32_t)obp_tsb_pa); 516 517 for (i = 0; i < obp_tsb_entries; i++) { 518 uint64_t tte = lddphys(obp_tsb_pa + i * 8); 519 caddr_t va; 520 521 if (TTE_IS_INVALID(tte)) { 522 DEBUG0(DBG_ATTACH | DBG_CONT, dip, "."); 523 continue; 524 } 525 526 base_tte_addr[i] = tte; 527 DEBUG3(DBG_ATTACH | DBG_CONT, dip, 528 "\npreserve_tsb: (%x)=%08x.%08x\n", base_tte_addr + i, 529 (uint_t)(tte >> 32), (uint_t)(tte & 0xffffffff)); 530 531 /* 532 * permanantly reserve this page from dvma address space 533 * resource map 534 */ 535 536 va = (caddr_t)(IOMMU_PTOB(base_pg_index + i)); 537 (void) vmem_xalloc(iommu_p->iommu_dvma_map, IOMMU_PAGE_SIZE, 538 IOMMU_PAGE_SIZE, 0, 0, va, va + IOMMU_PAGE_SIZE, 539 VM_NOSLEEP | VM_BESTFIT | VM_PANIC); 540 } 541 } 542