1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
27 */
28
29 /*
30 * PCI iommu initialization and configuration
31 */
32
33 #include <sys/types.h>
34 #include <sys/kmem.h>
35 #include <sys/async.h>
36 #include <sys/sysmacros.h>
37 #include <sys/sunddi.h>
38 #include <sys/ddi_impldefs.h>
39 #include <sys/vmem.h>
40 #include <sys/machsystm.h> /* lddphys() */
41 #include <sys/iommutsb.h>
42 #include <sys/pci/pci_obj.h>
43
44 /*LINTLIBRARY*/
45
46 static void iommu_tlb_flushall(iommu_t *iommu_p);
47 static void iommu_preserve_tsb(iommu_t *iommu_p);
48
49 void
iommu_create(pci_t * pci_p)50 iommu_create(pci_t *pci_p)
51 {
52 dev_info_t *dip = pci_p->pci_dip;
53 iommu_t *iommu_p;
54 uintptr_t a;
55 size_t cache_size;
56 uint32_t tsb_entries;
57
58 char map_name[32];
59 extern uint64_t va_to_pa(void *);
60
61 pci_dvma_range_prop_t pci_dvma_range;
62
63 /*
64 * Allocate iommu state structure and link it to the
65 * pci state structure.
66 */
67 iommu_p = (iommu_t *)kmem_zalloc(sizeof (iommu_t), KM_SLEEP);
68 pci_p->pci_iommu_p = iommu_p;
69 iommu_p->iommu_pci_p = pci_p;
70 iommu_p->iommu_inst = ddi_get_instance(dip);
71
72 /*
73 * chip specific dvma_end, tsb_size & context support
74 */
75 iommu_p->iommu_dvma_end = pci_iommu_dvma_end;
76 a = pci_iommu_setup(iommu_p);
77
78 /*
79 * Determine the virtual address of iommu registers.
80 */
81 iommu_p->iommu_ctrl_reg =
82 (uint64_t *)(a + COMMON_IOMMU_CTRL_REG_OFFSET);
83 iommu_p->iommu_tsb_base_addr_reg =
84 (uint64_t *)(a + COMMON_IOMMU_TSB_BASE_ADDR_REG_OFFSET);
85 iommu_p->iommu_flush_page_reg =
86 (uint64_t *)(a + COMMON_IOMMU_FLUSH_PAGE_REG_OFFSET);
87
88 /*
89 * Configure the rest of the iommu parameters according to:
90 * tsb_size and dvma_end
91 */
92 iommu_p->iommu_tsb_vaddr = /* retrieve TSB VA reserved by system */
93 iommu_tsb_cookie_to_va(pci_p->pci_tsb_cookie);
94 iommu_p->iommu_tsb_entries = tsb_entries =
95 IOMMU_TSBSIZE_TO_TSBENTRIES(iommu_p->iommu_tsb_size);
96 iommu_p->iommu_tsb_paddr = va_to_pa((caddr_t)iommu_p->iommu_tsb_vaddr);
97 iommu_p->iommu_dvma_cache_locks =
98 kmem_zalloc(pci_dvma_page_cache_entries, KM_SLEEP);
99
100 iommu_p->iommu_dvma_base = iommu_p->iommu_dvma_end + 1
101 - (tsb_entries * IOMMU_PAGE_SIZE);
102 iommu_p->dvma_base_pg = IOMMU_BTOP(iommu_p->iommu_dvma_base);
103 iommu_p->iommu_dvma_reserve = tsb_entries >> 1;
104 iommu_p->dvma_end_pg = IOMMU_BTOP(iommu_p->iommu_dvma_end);
105 iommu_p->iommu_dma_bypass_base = COMMON_IOMMU_BYPASS_BASE;
106 iommu_p->iommu_dma_bypass_end = pci_iommu_bypass_end_configure();
107
108 /*
109 * export "virtual-dma" software property to support
110 * child devices needing to know DVMA range
111 */
112 pci_dvma_range.dvma_base = (uint32_t)iommu_p->iommu_dvma_base;
113 pci_dvma_range.dvma_len = (uint32_t)
114 iommu_p->iommu_dvma_end - iommu_p->iommu_dvma_base + 1;
115 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
116 "virtual-dma", (caddr_t)&pci_dvma_range,
117 sizeof (pci_dvma_range));
118
119 DEBUG2(DBG_ATTACH, dip, "iommu_create: ctrl=%p, tsb=%p\n",
120 iommu_p->iommu_ctrl_reg, iommu_p->iommu_tsb_base_addr_reg);
121 DEBUG2(DBG_ATTACH, dip, "iommu_create: page_flush=%p, ctx_flush=%p\n",
122 iommu_p->iommu_flush_page_reg, iommu_p->iommu_flush_ctx_reg);
123 DEBUG2(DBG_ATTACH, dip, "iommu_create: tsb vaddr=%p tsb_paddr=%p\n",
124 iommu_p->iommu_tsb_vaddr, iommu_p->iommu_tsb_paddr);
125 DEBUG1(DBG_ATTACH, dip, "iommu_create: allocated size=%x\n",
126 iommu_tsb_cookie_to_size(pci_p->pci_tsb_cookie));
127 DEBUG2(DBG_ATTACH, dip, "iommu_create: fast tsb tte addr: %x + %x\n",
128 iommu_p->iommu_tsb_vaddr,
129 pci_dvma_page_cache_entries * pci_dvma_page_cache_clustsz);
130 DEBUG3(DBG_ATTACH, dip,
131 "iommu_create: tsb size=%x, tsb entries=%x, dvma base=%x\n",
132 iommu_p->iommu_tsb_size, iommu_p->iommu_tsb_entries,
133 iommu_p->iommu_dvma_base);
134 DEBUG2(DBG_ATTACH, dip,
135 "iommu_create: dvma_cache_locks=%x cache_entries=%x\n",
136 iommu_p->iommu_dvma_cache_locks, pci_dvma_page_cache_entries);
137
138 /*
139 * zero out the area to be used for iommu tsb
140 */
141 bzero(iommu_p->iommu_tsb_vaddr, tsb_entries << 3);
142
143 /*
144 * Create a virtual memory map for dvma address space.
145 * Reserve 'size' bytes of low dvma space for fast track cache.
146 */
147 (void) snprintf(map_name, sizeof (map_name), "%s%d_dvma",
148 ddi_driver_name(dip), ddi_get_instance(dip));
149
150 cache_size = IOMMU_PTOB(pci_dvma_page_cache_entries *
151 pci_dvma_page_cache_clustsz);
152 iommu_p->iommu_dvma_fast_end = iommu_p->iommu_dvma_base +
153 cache_size - 1;
154 iommu_p->iommu_dvma_map = vmem_create(map_name,
155 (void *)(iommu_p->iommu_dvma_fast_end + 1),
156 IOMMU_PTOB(tsb_entries) - cache_size, IOMMU_PAGE_SIZE,
157 NULL, NULL, NULL, IOMMU_PAGE_SIZE, VM_SLEEP);
158
159 mutex_init(&iommu_p->dvma_debug_lock, NULL, MUTEX_DRIVER, NULL);
160
161 /*
162 * On detach, the TSB Base Address Register gets set to zero,
163 * so if its zero here, there is no need to preserve TTEs.
164 */
165 if (pci_preserve_iommu_tsb && *iommu_p->iommu_tsb_base_addr_reg)
166 iommu_preserve_tsb(iommu_p);
167
168 iommu_configure(iommu_p);
169 }
170
171 void
iommu_destroy(pci_t * pci_p)172 iommu_destroy(pci_t *pci_p)
173 {
174 #ifdef DEBUG
175 dev_info_t *dip = pci_p->pci_dip;
176 #endif
177 iommu_t *iommu_p = pci_p->pci_iommu_p;
178 volatile uint64_t ctl_val = *iommu_p->iommu_ctrl_reg;
179
180 DEBUG0(DBG_DETACH, dip, "iommu_destroy:\n");
181
182 /*
183 * Disable the IOMMU by setting the TSB Base Address to zero
184 * and the TSB Table size to the smallest possible.
185 */
186 ctl_val = ctl_val & ~(7 << COMMON_IOMMU_CTRL_TSB_SZ_SHIFT);
187
188 *iommu_p->iommu_ctrl_reg = ctl_val;
189 *iommu_p->iommu_tsb_base_addr_reg = 0;
190
191 /*
192 * Return the boot time allocated tsb.
193 */
194 iommu_tsb_free(pci_p->pci_tsb_cookie);
195
196 /*
197 * Teardown any implementation-specific structures set up in
198 * pci_iommu_setup.
199 */
200 pci_iommu_teardown(iommu_p);
201
202 if (DVMA_DBG_ON(iommu_p))
203 pci_dvma_debug_fini(iommu_p);
204 mutex_destroy(&iommu_p->dvma_debug_lock);
205
206 /*
207 * Free the dvma resource map.
208 */
209 vmem_destroy(iommu_p->iommu_dvma_map);
210
211 kmem_free(iommu_p->iommu_dvma_cache_locks,
212 pci_dvma_page_cache_entries);
213
214 /*
215 * Free the iommu state structure.
216 */
217 kmem_free(iommu_p, sizeof (iommu_t));
218 pci_p->pci_iommu_p = NULL;
219 }
220
221 /*
222 * re-program iommu on the fly while preserving on-going dma
223 * transactions on the PCI bus.
224 */
225 void
iommu_configure(iommu_t * iommu_p)226 iommu_configure(iommu_t *iommu_p)
227 {
228 pci_t *pci_p = iommu_p->iommu_pci_p;
229 uint64_t cfgpa = pci_get_cfg_pabase(pci_p);
230 dev_info_t *dip = iommu_p->iommu_pci_p->pci_dip;
231 dev_info_t *cdip = NULL;
232 volatile uint64_t ctl_val = (uint64_t)
233 ((iommu_p->iommu_tsb_size << COMMON_IOMMU_CTRL_TSB_SZ_SHIFT) |
234 (0 /* 8k page */ << COMMON_IOMMU_CTRL_TBW_SZ_SHIFT) |
235 COMMON_IOMMU_CTRL_ENABLE | COMMON_IOMMU_CTRL_DIAG_ENABLE |
236 (pci_lock_tlb ? COMMON_IOMMU_CTRL_LCK_ENABLE : 0));
237
238 DEBUG2(DBG_ATTACH, dip, "iommu_configure: iommu_ctl=%08x.%08x\n",
239 HI32(ctl_val), LO32(ctl_val));
240 if (!pci_preserve_iommu_tsb || !(*iommu_p->iommu_tsb_base_addr_reg)) {
241 *iommu_p->iommu_ctrl_reg = COMMON_IOMMU_CTRL_DIAG_ENABLE;
242 iommu_tlb_flushall(iommu_p);
243 goto config;
244 }
245 cdip = ddi_get_child(dip);
246 for (; cdip; cdip = ddi_get_next_sibling(cdip)) {
247 uint32_t *reg_p;
248 int reg_len;
249 if (ddi_getlongprop(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
250 "reg", (caddr_t)®_p, ®_len) != DDI_PROP_SUCCESS)
251 continue;
252 cfgpa += (*reg_p) & (PCI_CONF_ADDR_MASK ^ PCI_REG_REG_M);
253 kmem_free(reg_p, reg_len);
254 break;
255 }
256
257 config:
258 pci_iommu_config(iommu_p, ctl_val, cdip ? cfgpa : 0);
259 }
260
261 void
iommu_map_pages(iommu_t * iommu_p,ddi_dma_impl_t * mp,dvma_addr_t dvma_pg,size_t npages,size_t pfn_index)262 iommu_map_pages(iommu_t *iommu_p, ddi_dma_impl_t *mp,
263 dvma_addr_t dvma_pg, size_t npages, size_t pfn_index)
264 {
265 int i;
266 dvma_addr_t pg_index = dvma_pg - iommu_p->dvma_base_pg;
267 uint64_t *tte_addr = iommu_p->iommu_tsb_vaddr + pg_index;
268 size_t pfn_last = pfn_index + npages;
269 uint64_t tte = PCI_GET_MP_TTE(mp->dmai_tte);
270 #ifdef DEBUG
271 dev_info_t *dip = iommu_p->iommu_pci_p->pci_dip;
272 #endif
273
274 ASSERT(pfn_last <= mp->dmai_ndvmapages);
275
276 DEBUG5(DBG_MAP_WIN, dip,
277 "iommu_map_pages:%x+%x=%x npages=0x%x pfn_index=0x%x\n",
278 (uint_t)iommu_p->dvma_base_pg, (uint_t)pg_index, dvma_pg,
279 (uint_t)npages, (uint_t)pfn_index);
280
281 for (i = pfn_index; i < pfn_last; i++, pg_index++, tte_addr++) {
282 iopfn_t pfn = PCI_GET_MP_PFN(mp, i);
283 volatile uint64_t cur_tte = IOMMU_PTOB(pfn) | tte;
284
285 DEBUG3(DBG_MAP_WIN, dip, "iommu_map_pages: mp=%p pg[%x]=%x\n",
286 mp, i, (uint_t)pfn);
287 DEBUG3(DBG_MAP_WIN, dip,
288 "iommu_map_pages: pg_index=%x tte=%08x.%08x\n",
289 pg_index, HI32(cur_tte), LO32(cur_tte));
290 ASSERT(TTE_IS_INVALID(*tte_addr));
291 *tte_addr = cur_tte;
292 #ifdef DEBUG
293 if (pfn == 0 && pci_warn_pp0)
294 cmn_err(CE_WARN, "%s%d <%p> doing DMA to pp0\n",
295 ddi_driver_name(mp->dmai_rdip),
296 ddi_get_instance(mp->dmai_rdip), mp);
297 #endif
298 }
299 ASSERT(tte_addr == iommu_p->iommu_tsb_vaddr + pg_index);
300 #ifdef DEBUG
301 if (HAS_REDZONE(mp)) {
302 DEBUG1(DBG_MAP_WIN, dip, "iommu_map_pages: redzone pg=%x\n",
303 pg_index);
304 ASSERT(TTE_IS_INVALID(iommu_p->iommu_tsb_vaddr[pg_index]));
305 }
306 #endif
307 if (DVMA_DBG_ON(iommu_p))
308 pci_dvma_alloc_debug(iommu_p, (char *)mp->dmai_mapping,
309 mp->dmai_size, mp);
310 }
311
312 /*
313 * iommu_map_window - map a dvma window into the iommu
314 *
315 * used by: pci_dma_win(), pci_dma_ctlops() - DDI_DMA_MOVWIN
316 *
317 * return value: none
318 */
319 /*ARGSUSED*/
320 void
iommu_map_window(iommu_t * iommu_p,ddi_dma_impl_t * mp,window_t win_no)321 iommu_map_window(iommu_t *iommu_p, ddi_dma_impl_t *mp, window_t win_no)
322 {
323 uint32_t obj_pg0_off = mp->dmai_roffset;
324 uint32_t win_pg0_off = win_no ? 0 : obj_pg0_off;
325 size_t win_size = mp->dmai_winsize;
326 size_t pfn_index = win_size * win_no; /* temp value */
327 size_t obj_off = win_no ? pfn_index - obj_pg0_off : 0; /* xferred sz */
328 dvma_addr_t dvma_pg = IOMMU_BTOP(mp->dmai_mapping);
329 size_t res_size = mp->dmai_object.dmao_size - obj_off + win_pg0_off;
330
331 ASSERT(!(win_size & IOMMU_PAGE_OFFSET));
332 if (win_no >= mp->dmai_nwin)
333 return;
334 if (res_size < win_size) /* last window */
335 win_size = res_size; /* mp->dmai_winsize unchanged */
336
337 mp->dmai_mapping = IOMMU_PTOB(dvma_pg) | win_pg0_off;
338 mp->dmai_size = win_size - win_pg0_off; /* cur win xferrable size */
339 mp->dmai_offset = obj_off; /* win offset into object */
340 pfn_index = IOMMU_BTOP(pfn_index); /* index into pfnlist */
341 iommu_map_pages(iommu_p, mp, dvma_pg, IOMMU_BTOPR(win_size), pfn_index);
342 }
343
344 void
iommu_unmap_pages(iommu_t * iommu_p,dvma_addr_t dvma_pg,uint_t npages)345 iommu_unmap_pages(iommu_t *iommu_p, dvma_addr_t dvma_pg, uint_t npages)
346 {
347 dvma_addr_t pg_index = IOMMU_PAGE_INDEX(iommu_p, dvma_pg);
348
349 for (; npages; npages--, dvma_pg++, pg_index++) {
350 DEBUG1(DBG_UNMAP_WIN|DBG_CONT, 0, " %x", dvma_pg);
351 IOMMU_UNLOAD_TTE(iommu_p, pg_index);
352
353 if (!tm_mtlb_gc)
354 IOMMU_PAGE_FLUSH(iommu_p, dvma_pg);
355 }
356 }
357
358 void
iommu_remap_pages(iommu_t * iommu_p,ddi_dma_impl_t * mp,dvma_addr_t dvma_pg,size_t npages,size_t pfn_index)359 iommu_remap_pages(iommu_t *iommu_p, ddi_dma_impl_t *mp, dvma_addr_t dvma_pg,
360 size_t npages, size_t pfn_index)
361 {
362 iommu_unmap_pages(iommu_p, dvma_pg, npages);
363 iommu_map_pages(iommu_p, mp, dvma_pg, npages, pfn_index);
364 }
365
366 /*
367 * iommu_unmap_window
368 *
369 * This routine is called to break down the iommu mappings to a dvma window.
370 * Non partial mappings are viewed as single window mapping.
371 *
372 * used by: pci_dma_unbindhdl(), pci_dma_window(),
373 * and pci_dma_ctlops() - DDI_DMA_FREE, DDI_DMA_MOVWIN, DDI_DMA_NEXTWIN
374 *
375 * return value: none
376 */
377 /*ARGSUSED*/
378 void
iommu_unmap_window(iommu_t * iommu_p,ddi_dma_impl_t * mp)379 iommu_unmap_window(iommu_t *iommu_p, ddi_dma_impl_t *mp)
380 {
381 dvma_addr_t dvma_pg = IOMMU_BTOP(mp->dmai_mapping);
382 dvma_addr_t pg_index = IOMMU_PAGE_INDEX(iommu_p, dvma_pg);
383 uint_t npages = IOMMU_BTOP(mp->dmai_winsize);
384 #ifdef DEBUG
385 dev_info_t *dip = iommu_p->iommu_pci_p->pci_dip;
386 #endif
387 /*
388 * Invalidate each page of the mapping in the tsb and flush
389 * it from the tlb.
390 */
391 DEBUG2(DBG_UNMAP_WIN, dip, "mp=%p %x pfns:", mp, npages);
392 if (mp->dmai_flags & DMAI_FLAGS_CONTEXT) {
393 dvma_context_t ctx = MP2CTX(mp);
394 for (; npages; npages--, pg_index++) {
395 DEBUG1(DBG_UNMAP_WIN|DBG_CONT, dip, " %x", pg_index);
396 IOMMU_UNLOAD_TTE(iommu_p, pg_index);
397 }
398 DEBUG1(DBG_UNMAP_WIN|DBG_CONT, dip, " (context %x)", ctx);
399 *iommu_p->iommu_flush_ctx_reg = ctx;
400 } else
401 iommu_unmap_pages(iommu_p, dvma_pg, npages);
402
403 DEBUG0(DBG_UNMAP_WIN|DBG_CONT, dip, "\n");
404
405 if (DVMA_DBG_ON(iommu_p))
406 pci_dvma_free_debug(iommu_p, (char *)mp->dmai_mapping,
407 mp->dmai_size, mp);
408 }
409
410 int
pci_alloc_tsb(pci_t * pci_p)411 pci_alloc_tsb(pci_t *pci_p)
412 {
413 uint16_t tsbc;
414
415 if ((tsbc = iommu_tsb_alloc(pci_p->pci_id)) == IOMMU_TSB_COOKIE_NONE) {
416 cmn_err(CE_WARN, "%s%d: Unable to allocate IOMMU TSB.",
417 ddi_driver_name(pci_p->pci_dip),
418 ddi_get_instance(pci_p->pci_dip));
419 return (DDI_FAILURE);
420 }
421 pci_p->pci_tsb_cookie = tsbc;
422 return (DDI_SUCCESS);
423 }
424
425 void
pci_free_tsb(pci_t * pci_p)426 pci_free_tsb(pci_t *pci_p)
427 {
428 iommu_tsb_free(pci_p->pci_tsb_cookie);
429 }
430
431 #if 0
432 /*
433 * The following data structure is used to map a tsb size
434 * to a tsb size configuration parameter in the iommu
435 * control register.
436 * This is a hardware table. It is here for reference only.
437 */
438 static int pci_iommu_tsb_sizes[] = {
439 0x2000, /* 0 - 8 mb */
440 0x4000, /* 1 - 16 mb */
441 0x8000, /* 2 - 32 mb */
442 0x10000, /* 3 - 64 mb */
443 0x20000, /* 4 - 128 mb */
444 0x40000, /* 5 - 256 mb */
445 0x80000, /* 6 - 512 mb */
446 0x100000 /* 7 - 1 gb */
447 };
448 #endif
449
450 uint_t
iommu_tsb_size_encode(uint_t tsb_bytes)451 iommu_tsb_size_encode(uint_t tsb_bytes)
452 {
453 uint_t i;
454
455 for (i = 7; i && (tsb_bytes < (0x2000 << i)); i--)
456 /* empty */;
457 return (i);
458 }
459
460 /*
461 * invalidate IOMMU TLB entries through diagnostic registers.
462 */
463 static void
iommu_tlb_flushall(iommu_t * iommu_p)464 iommu_tlb_flushall(iommu_t *iommu_p)
465 {
466 int i;
467 uint64_t base = (uint64_t)(iommu_p->iommu_ctrl_reg) -
468 COMMON_IOMMU_CTRL_REG_OFFSET;
469 volatile uint64_t *tlb_tag = (volatile uint64_t *)
470 (base + COMMON_IOMMU_TLB_TAG_DIAG_ACC_OFFSET);
471 volatile uint64_t *tlb_data = (volatile uint64_t *)
472 (base + COMMON_IOMMU_TLB_DATA_DIAG_ACC_OFFSET);
473 for (i = 0; i < IOMMU_TLB_ENTRIES; i++)
474 tlb_tag[i] = tlb_data[i] = 0ull;
475 }
476
477 static void
iommu_preserve_tsb(iommu_t * iommu_p)478 iommu_preserve_tsb(iommu_t *iommu_p)
479 {
480 #ifdef DEBUG
481 dev_info_t *dip = iommu_p->iommu_pci_p->pci_dip;
482 #endif
483 uint_t i, obp_tsb_entries, obp_tsb_size, base_pg_index;
484 uint64_t ctl = *iommu_p->iommu_ctrl_reg;
485 uint64_t obp_tsb_pa = *iommu_p->iommu_tsb_base_addr_reg;
486 uint64_t *base_tte_addr;
487
488 DEBUG3(DBG_ATTACH, dip,
489 "iommu_tsb_base_addr_reg=0x%08x (0x%08x.0x%08x)\n",
490 iommu_p->iommu_tsb_base_addr_reg,
491 (uint32_t)(*iommu_p->iommu_tsb_base_addr_reg >> 32),
492 (uint32_t)(*iommu_p->iommu_tsb_base_addr_reg & 0xffffffff));
493
494 obp_tsb_size = IOMMU_CTL_TO_TSBSIZE(ctl);
495 obp_tsb_entries = IOMMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size);
496 base_pg_index = iommu_p->dvma_end_pg - obp_tsb_entries + 1;
497 base_tte_addr = iommu_p->iommu_tsb_vaddr +
498 (iommu_p->iommu_tsb_entries - obp_tsb_entries);
499
500 /*
501 * old darwin prom does not set tsb size correctly, bail out.
502 */
503 if ((obp_tsb_size == IOMMU_DARWIN_BOGUS_TSBSIZE) &&
504 (CHIP_TYPE(iommu_p->iommu_pci_p) == PCI_CHIP_SABRE))
505 return;
506
507 DEBUG3(DBG_ATTACH, dip, "iommu_preserve_tsb: kernel info\n"
508 "iommu_tsb_vaddr=%08x copy to base_tte_addr=%08x "
509 "base_pg_index=%x\n", iommu_p->iommu_tsb_vaddr,
510 base_tte_addr, base_pg_index);
511
512 DEBUG3(DBG_ATTACH | DBG_CONT, dip, "iommu_preserve_tsb: obp info "
513 "obp_tsb_entries=0x%x obp_tsb_pa=%08x.%08x\n", obp_tsb_entries,
514 (uint32_t)(obp_tsb_pa >> 32), (uint32_t)obp_tsb_pa);
515
516 for (i = 0; i < obp_tsb_entries; i++) {
517 uint64_t tte = lddphys(obp_tsb_pa + i * 8);
518 caddr_t va;
519
520 if (TTE_IS_INVALID(tte)) {
521 DEBUG0(DBG_ATTACH | DBG_CONT, dip, ".");
522 continue;
523 }
524
525 base_tte_addr[i] = tte;
526 DEBUG3(DBG_ATTACH | DBG_CONT, dip,
527 "\npreserve_tsb: (%x)=%08x.%08x\n", base_tte_addr + i,
528 (uint_t)(tte >> 32), (uint_t)(tte & 0xffffffff));
529
530 /*
531 * permanantly reserve this page from dvma address space
532 * resource map
533 */
534
535 va = (caddr_t)(IOMMU_PTOB(base_pg_index + i));
536 (void) vmem_xalloc(iommu_p->iommu_dvma_map, IOMMU_PAGE_SIZE,
537 IOMMU_PAGE_SIZE, 0, 0, va, va + IOMMU_PAGE_SIZE,
538 VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
539 }
540 }
541