1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
27 */
28
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/conf.h>
32 #include <sys/ddi.h>
33 #include <sys/sunddi.h>
34 #include <sys/ddi_impldefs.h>
35 #include <sys/cmn_err.h>
36 #include <sys/kmem.h>
37 #include <sys/vmem.h>
38 #include <sys/sysmacros.h>
39
40 #include <sys/ddidmareq.h>
41 #include <sys/sysiosbus.h>
42 #include <sys/iommu.h>
43 #include <sys/iocache.h>
44 #include <sys/dvma.h>
45
46 #include <vm/as.h>
47 #include <vm/hat.h>
48 #include <vm/page.h>
49 #include <vm/hat_sfmmu.h>
50 #include <sys/machparam.h>
51 #include <sys/machsystm.h>
52 #include <sys/vmsystm.h>
53 #include <sys/iommutsb.h>
54
55 /* Useful debugging Stuff */
56 #include <sys/nexusdebug.h>
57 #include <sys/debug.h>
58 /* Bitfield debugging definitions for this file */
59 #define IOMMU_GETDVMAPAGES_DEBUG 0x1
60 #define IOMMU_DMAMAP_DEBUG 0x2
61 #define IOMMU_DMAMCTL_DEBUG 0x4
62 #define IOMMU_DMAMCTL_SYNC_DEBUG 0x8
63 #define IOMMU_DMAMCTL_HTOC_DEBUG 0x10
64 #define IOMMU_DMAMCTL_KVADDR_DEBUG 0x20
65 #define IOMMU_DMAMCTL_GETERR_DEBUG 0x400
66 #define IOMMU_DMAMCTL_DMA_FREE_DEBUG 0x1000
67 #define IOMMU_REGISTERS_DEBUG 0x2000
68 #define IOMMU_DMA_SETUP_DEBUG 0x4000
69 #define IOMMU_DMA_UNBINDHDL_DEBUG 0x8000
70 #define IOMMU_DMA_BINDHDL_DEBUG 0x10000
71 #define IOMMU_DMA_WIN_DEBUG 0x20000
72 #define IOMMU_DMA_ALLOCHDL_DEBUG 0x40000
73 #define IOMMU_DMA_LIM_SETUP_DEBUG 0x80000
74 #define IOMMU_FASTDMA_RESERVE 0x100000
75 #define IOMMU_FASTDMA_LOAD 0x200000
76 #define IOMMU_INTER_INTRA_XFER 0x400000
77 #define IOMMU_TTE 0x800000
78 #define IOMMU_TLB 0x1000000
79 #define IOMMU_FASTDMA_SYNC 0x2000000
80
81 /* Turn on if you need to keep track of outstanding IOMMU usage */
82 /* #define IO_MEMUSAGE */
83 /* Turn on to debug IOMMU unmapping code */
84 /* #define IO_MEMDEBUG */
85
86 static struct dvma_ops iommu_dvma_ops = {
87 DVMAO_REV,
88 iommu_dvma_kaddr_load,
89 iommu_dvma_unload,
90 iommu_dvma_sync
91 };
92
93 extern void *sbusp; /* sbus soft state hook */
94
95 #define DVMA_MAX_CACHE 65536
96
97 /*
98 * This is the number of pages that a mapping request needs before we force
99 * the TLB flush code to use diagnostic registers. This value was determined
100 * through a series of test runs measuring dma mapping settup performance.
101 */
102 int tlb_flush_using_diag = 16;
103
104 int sysio_iommu_tsb_sizes[] = {
105 IOMMU_TSB_SIZE_8M,
106 IOMMU_TSB_SIZE_16M,
107 IOMMU_TSB_SIZE_32M,
108 IOMMU_TSB_SIZE_64M,
109 IOMMU_TSB_SIZE_128M,
110 IOMMU_TSB_SIZE_256M,
111 IOMMU_TSB_SIZE_512M,
112 IOMMU_TSB_SIZE_1G
113 };
114
115 static int iommu_map_window(ddi_dma_impl_t *, off_t, size_t);
116
117 int
iommu_init(struct sbus_soft_state * softsp,caddr_t address)118 iommu_init(struct sbus_soft_state *softsp, caddr_t address)
119 {
120 int i;
121 char name[40];
122
123 #ifdef DEBUG
124 debug_info = 1;
125 #endif
126
127 /*
128 * Simply add each registers offset to the base address
129 * to calculate the already mapped virtual address of
130 * the device register...
131 *
132 * define a macro for the pointer arithmetic; all registers
133 * are 64 bits wide and are defined as uint64_t's.
134 */
135
136 #define REG_ADDR(b, o) (uint64_t *)((caddr_t)(b) + (o))
137
138 softsp->iommu_ctrl_reg = REG_ADDR(address, OFF_IOMMU_CTRL_REG);
139 softsp->tsb_base_addr = REG_ADDR(address, OFF_TSB_BASE_ADDR);
140 softsp->iommu_flush_reg = REG_ADDR(address, OFF_IOMMU_FLUSH_REG);
141 softsp->iommu_tlb_tag = REG_ADDR(address, OFF_IOMMU_TLB_TAG);
142 softsp->iommu_tlb_data = REG_ADDR(address, OFF_IOMMU_TLB_DATA);
143
144 #undef REG_ADDR
145
146 mutex_init(&softsp->dma_pool_lock, NULL, MUTEX_DEFAULT, NULL);
147 mutex_init(&softsp->intr_poll_list_lock, NULL, MUTEX_DEFAULT, NULL);
148
149 /* Set up the DVMA resource sizes */
150 if ((softsp->iommu_tsb_cookie = iommu_tsb_alloc(softsp->upa_id)) ==
151 IOMMU_TSB_COOKIE_NONE) {
152 cmn_err(CE_WARN, "%s%d: Unable to retrieve IOMMU array.",
153 ddi_driver_name(softsp->dip),
154 ddi_get_instance(softsp->dip));
155 return (DDI_FAILURE);
156 }
157 softsp->soft_tsb_base_addr =
158 iommu_tsb_cookie_to_va(softsp->iommu_tsb_cookie);
159 softsp->iommu_dvma_size =
160 iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie) <<
161 IOMMU_TSB_TO_RNG;
162 softsp->iommu_dvma_base = (ioaddr_t)
163 (0 - (ioaddr_t)softsp->iommu_dvma_size);
164
165 (void) snprintf(name, sizeof (name), "%s%d_dvma",
166 ddi_driver_name(softsp->dip), ddi_get_instance(softsp->dip));
167
168 /*
169 * Initialize the DVMA vmem arena.
170 */
171 softsp->dvma_arena = vmem_create(name,
172 (void *)(uintptr_t)softsp->iommu_dvma_base,
173 softsp->iommu_dvma_size, PAGESIZE, NULL, NULL, NULL,
174 DVMA_MAX_CACHE, VM_SLEEP);
175
176 /* Set the limit for dvma_reserve() to 1/2 of the total dvma space */
177 softsp->dma_reserve = iommu_btop(softsp->iommu_dvma_size >> 1);
178
179 #if defined(DEBUG) && defined(IO_MEMUSAGE)
180 mutex_init(&softsp->iomemlock, NULL, MUTEX_DEFAULT, NULL);
181 softsp->iomem = (struct io_mem_list *)0;
182 #endif /* DEBUG && IO_MEMUSAGE */
183 /*
184 * Get the base address of the TSB table and store it in the hardware
185 */
186
187 /*
188 * We plan on the PROM flushing all TLB entries. If this is not the
189 * case, this is where we should flush the hardware TLB.
190 */
191
192 /* Set the IOMMU registers */
193 (void) iommu_resume_init(softsp);
194
195 /* check the convenient copy of TSB base, and flush write buffers */
196 if (*softsp->tsb_base_addr !=
197 va_to_pa((caddr_t)softsp->soft_tsb_base_addr)) {
198 iommu_tsb_free(softsp->iommu_tsb_cookie);
199 return (DDI_FAILURE);
200 }
201
202 softsp->sbus_io_lo_pfn = UINT32_MAX;
203 softsp->sbus_io_hi_pfn = 0;
204 for (i = 0; i < sysio_pd_getnrng(softsp->dip); i++) {
205 struct rangespec *rangep;
206 uint64_t addr;
207 pfn_t hipfn, lopfn;
208
209 rangep = sysio_pd_getrng(softsp->dip, i);
210 addr = (uint64_t)((uint64_t)rangep->rng_bustype << 32);
211 addr |= (uint64_t)rangep->rng_offset;
212 lopfn = (pfn_t)(addr >> MMU_PAGESHIFT);
213 addr += (uint64_t)(rangep->rng_size - 1);
214 hipfn = (pfn_t)(addr >> MMU_PAGESHIFT);
215
216 softsp->sbus_io_lo_pfn = (lopfn < softsp->sbus_io_lo_pfn) ?
217 lopfn : softsp->sbus_io_lo_pfn;
218
219 softsp->sbus_io_hi_pfn = (hipfn > softsp->sbus_io_hi_pfn) ?
220 hipfn : softsp->sbus_io_hi_pfn;
221 }
222
223 DPRINTF(IOMMU_REGISTERS_DEBUG, ("IOMMU Control reg: %p IOMMU TSB "
224 "base reg: %p IOMMU flush reg: %p TSB base addr %p\n",
225 (void *)softsp->iommu_ctrl_reg, (void *)softsp->tsb_base_addr,
226 (void *)softsp->iommu_flush_reg,
227 (void *)softsp->soft_tsb_base_addr));
228
229 return (DDI_SUCCESS);
230 }
231
232 /*
233 * function to uninitialize the iommu and release the tsb back to
234 * the spare pool. See startup.c for tsb spare management.
235 */
236
237 int
iommu_uninit(struct sbus_soft_state * softsp)238 iommu_uninit(struct sbus_soft_state *softsp)
239 {
240 vmem_destroy(softsp->dvma_arena);
241
242 /* flip off the IOMMU enable switch */
243 *softsp->iommu_ctrl_reg &=
244 (TSB_SIZE << TSB_SIZE_SHIFT | IOMMU_DISABLE);
245
246 iommu_tsb_free(softsp->iommu_tsb_cookie);
247
248 return (DDI_SUCCESS);
249 }
250
251 /*
252 * Initialize iommu hardware registers when the system is being resumed.
253 * (Subset of iommu_init())
254 */
255 int
iommu_resume_init(struct sbus_soft_state * softsp)256 iommu_resume_init(struct sbus_soft_state *softsp)
257 {
258 int i;
259 uint_t tsb_size;
260 uint_t tsb_bytes;
261
262 /*
263 * Reset the base address of the TSB table in the hardware
264 */
265 *softsp->tsb_base_addr = va_to_pa((caddr_t)softsp->soft_tsb_base_addr);
266
267 /*
268 * Figure out the correct size of the IOMMU TSB entries. If we
269 * end up with a size smaller than that needed for 8M of IOMMU
270 * space, default the size to 8M. XXX We could probably panic here
271 */
272 i = sizeof (sysio_iommu_tsb_sizes) / sizeof (sysio_iommu_tsb_sizes[0])
273 - 1;
274
275 tsb_bytes = iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie);
276
277 while (i > 0) {
278 if (tsb_bytes >= sysio_iommu_tsb_sizes[i])
279 break;
280 i--;
281 }
282
283 tsb_size = i;
284
285 /* OK, lets flip the "on" switch of the IOMMU */
286 *softsp->iommu_ctrl_reg = (uint64_t)(tsb_size << TSB_SIZE_SHIFT
287 | IOMMU_ENABLE | IOMMU_DIAG_ENABLE);
288
289 return (DDI_SUCCESS);
290 }
291
292 void
iommu_tlb_flush(struct sbus_soft_state * softsp,ioaddr_t addr,pgcnt_t npages)293 iommu_tlb_flush(struct sbus_soft_state *softsp, ioaddr_t addr, pgcnt_t npages)
294 {
295 volatile uint64_t tmpreg;
296 volatile uint64_t *vaddr_reg, *valid_bit_reg;
297 ioaddr_t hiaddr, ioaddr;
298 int i, do_flush = 0;
299
300 if (npages == 1) {
301 *softsp->iommu_flush_reg = (uint64_t)addr;
302 tmpreg = *softsp->sbus_ctrl_reg;
303 return;
304 }
305
306 hiaddr = addr + (ioaddr_t)(npages * IOMMU_PAGESIZE);
307 for (i = 0, vaddr_reg = softsp->iommu_tlb_tag,
308 valid_bit_reg = softsp->iommu_tlb_data;
309 i < IOMMU_TLB_ENTRIES; i++, vaddr_reg++, valid_bit_reg++) {
310 tmpreg = *vaddr_reg;
311 ioaddr = (ioaddr_t)((tmpreg & IOMMU_TLBTAG_VA_MASK) <<
312 IOMMU_TLBTAG_VA_SHIFT);
313
314 DPRINTF(IOMMU_TLB, ("Vaddr reg 0x%p, "
315 "TLB vaddr reg %lx, IO addr 0x%x "
316 "Base addr 0x%x, Hi addr 0x%x\n",
317 (void *)vaddr_reg, tmpreg, ioaddr, addr, hiaddr));
318
319 if (ioaddr >= addr && ioaddr <= hiaddr) {
320 tmpreg = *valid_bit_reg;
321
322 DPRINTF(IOMMU_TLB, ("Valid reg addr 0x%p, "
323 "TLB valid reg %lx\n",
324 (void *)valid_bit_reg, tmpreg));
325
326 if (tmpreg & IOMMU_TLB_VALID) {
327 *softsp->iommu_flush_reg = (uint64_t)ioaddr;
328 do_flush = 1;
329 }
330 }
331 }
332
333 if (do_flush)
334 tmpreg = *softsp->sbus_ctrl_reg;
335 }
336
337
338 /*
339 * Shorthand defines
340 */
341
342 #define ALO dma_lim->dlim_addr_lo
343 #define AHI dma_lim->dlim_addr_hi
344 #define OBJSIZE dmareq->dmar_object.dmao_size
345 #define IOTTE_NDX(vaddr, base) (base + \
346 (int)(iommu_btop((vaddr & ~IOMMU_PAGEMASK) - \
347 softsp->iommu_dvma_base)))
348 /*
349 * If DDI_DMA_PARTIAL flag is set and the request is for
350 * less than MIN_DVMA_WIN_SIZE, it's not worth the hassle so
351 * we turn off the DDI_DMA_PARTIAL flag
352 */
353 #define MIN_DVMA_WIN_SIZE (128)
354
355 /* ARGSUSED */
356 void
iommu_remove_mappings(ddi_dma_impl_t * mp)357 iommu_remove_mappings(ddi_dma_impl_t *mp)
358 {
359 #if defined(DEBUG) && defined(IO_MEMDEBUG)
360 pgcnt_t npages;
361 ioaddr_t ioaddr;
362 volatile uint64_t *iotte_ptr;
363 ioaddr_t ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET;
364 pgcnt_t npages = mp->dmai_ndvmapages;
365 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
366 struct sbus_soft_state *softsp = mppriv->softsp;
367
368 #if defined(IO_MEMUSAGE)
369 struct io_mem_list **prevp, *walk;
370 #endif /* DEBUG && IO_MEMUSAGE */
371
372 ASSERT(softsp != NULL);
373 /*
374 * Run thru the mapped entries and free 'em
375 */
376
377 ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET;
378 npages = mp->dmai_ndvmapages;
379
380 #if defined(IO_MEMUSAGE)
381 mutex_enter(&softsp->iomemlock);
382 prevp = &softsp->iomem;
383 walk = softsp->iomem;
384
385 while (walk) {
386 if (walk->ioaddr == ioaddr) {
387 *prevp = walk->next;
388 break;
389 }
390
391 prevp = &walk->next;
392 walk = walk->next;
393 }
394 mutex_exit(&softsp->iomemlock);
395
396 kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1));
397 kmem_free(walk, sizeof (struct io_mem_list));
398 #endif /* IO_MEMUSAGE */
399
400 iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
401
402 while (npages) {
403 DPRINTF(IOMMU_DMAMCTL_DEBUG,
404 ("dma_mctl: freeing ioaddr %x iotte %p\n",
405 ioaddr, iotte_ptr));
406 *iotte_ptr = (uint64_t)0; /* unload tte */
407 iommu_tlb_flush(softsp, ioaddr, 1);
408 npages--;
409 ioaddr += IOMMU_PAGESIZE;
410 iotte_ptr++;
411 }
412 #endif /* DEBUG && IO_MEMDEBUG */
413 }
414
415
416 int
iommu_create_vaddr_mappings(ddi_dma_impl_t * mp,uintptr_t addr)417 iommu_create_vaddr_mappings(ddi_dma_impl_t *mp, uintptr_t addr)
418 {
419 pfn_t pfn;
420 struct as *as = NULL;
421 pgcnt_t npages;
422 ioaddr_t ioaddr;
423 uint_t offset;
424 volatile uint64_t *iotte_ptr;
425 uint64_t tmp_iotte_flag;
426 int rval = DDI_DMA_MAPPED;
427 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
428 struct sbus_soft_state *softsp = mppriv->softsp;
429 int diag_tlb_flush;
430 #if defined(DEBUG) && defined(IO_MEMUSAGE)
431 struct io_mem_list *iomemp;
432 pfn_t *pfnp;
433 #endif /* DEBUG && IO_MEMUSAGE */
434
435 ASSERT(softsp != NULL);
436
437 /* Set Valid and Cache for mem xfer */
438 tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM;
439
440 offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
441 npages = iommu_btopr(mp->dmai_size + offset);
442 ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
443 iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
444 diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0;
445
446 as = mp->dmai_object.dmao_obj.virt_obj.v_as;
447 if (as == NULL)
448 as = &kas;
449
450 /*
451 * Set the per object bits of the TTE here. We optimize this for
452 * the memory case so that the while loop overhead is minimal.
453 */
454 /* Turn on NOSYNC if we need consistent mem */
455 if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
456 mp->dmai_rflags |= DMP_NOSYNC;
457 tmp_iotte_flag ^= IOTTE_STREAM;
458 /* Set streaming mode if not consistent mem */
459 } else if (softsp->stream_buf_off) {
460 tmp_iotte_flag ^= IOTTE_STREAM;
461 }
462
463 #if defined(DEBUG) && defined(IO_MEMUSAGE)
464 iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
465 iomemp->rdip = mp->dmai_rdip;
466 iomemp->ioaddr = ioaddr;
467 iomemp->addr = addr;
468 iomemp->npages = npages;
469 pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
470 KM_SLEEP);
471 #endif /* DEBUG && IO_MEMUSAGE */
472 /*
473 * Grab the mappings from the dmmu and stick 'em into the
474 * iommu.
475 */
476 ASSERT(npages != 0);
477
478 /* If we're going to flush the TLB using diag mode, do it now. */
479 if (diag_tlb_flush)
480 iommu_tlb_flush(softsp, ioaddr, npages);
481
482 do {
483 uint64_t iotte_flag = tmp_iotte_flag;
484
485 /*
486 * Fetch the pfn for the DMA object
487 */
488
489 ASSERT(as);
490 pfn = hat_getpfnum(as->a_hat, (caddr_t)addr);
491 ASSERT(pfn != PFN_INVALID);
492
493 if (!pf_is_memory(pfn)) {
494 /* DVMA'ing to IO space */
495
496 /* Turn off cache bit if set */
497 if (iotte_flag & IOTTE_CACHE)
498 iotte_flag ^= IOTTE_CACHE;
499
500 /* Turn off stream bit if set */
501 if (iotte_flag & IOTTE_STREAM)
502 iotte_flag ^= IOTTE_STREAM;
503
504 if (IS_INTRA_SBUS(softsp, pfn)) {
505 /* Intra sbus transfer */
506
507 /* Turn on intra flag */
508 iotte_flag |= IOTTE_INTRA;
509
510 DPRINTF(IOMMU_INTER_INTRA_XFER, (
511 "Intra xfer pfnum %lx TTE %lx\n",
512 pfn, iotte_flag));
513 } else {
514 if (pf_is_dmacapable(pfn) == 1) {
515 /*EMPTY*/
516 DPRINTF(IOMMU_INTER_INTRA_XFER,
517 ("Inter xfer pfnum %lx "
518 "tte hi %lx\n",
519 pfn, iotte_flag));
520 } else {
521 rval = DDI_DMA_NOMAPPING;
522 #if defined(DEBUG) && defined(IO_MEMDEBUG)
523 goto bad;
524 #endif /* DEBUG && IO_MEMDEBUG */
525 }
526 }
527 }
528 addr += IOMMU_PAGESIZE;
529
530 DPRINTF(IOMMU_TTE, ("vaddr mapping: tte index %p pfn %lx "
531 "tte flag %lx addr %lx ioaddr %x\n",
532 (void *)iotte_ptr, pfn, iotte_flag, addr, ioaddr));
533
534 /* Flush the IOMMU TLB before loading a new mapping */
535 if (!diag_tlb_flush)
536 iommu_tlb_flush(softsp, ioaddr, 1);
537
538 /* Set the hardware IO TTE */
539 *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
540
541 ioaddr += IOMMU_PAGESIZE;
542 npages--;
543 iotte_ptr++;
544 #if defined(DEBUG) && defined(IO_MEMUSAGE)
545 *pfnp = pfn;
546 pfnp++;
547 #endif /* DEBUG && IO_MEMUSAGE */
548 } while (npages != 0);
549
550 #if defined(DEBUG) && defined(IO_MEMUSAGE)
551 mutex_enter(&softsp->iomemlock);
552 iomemp->next = softsp->iomem;
553 softsp->iomem = iomemp;
554 mutex_exit(&softsp->iomemlock);
555 #endif /* DEBUG && IO_MEMUSAGE */
556
557 return (rval);
558
559 #if defined(DEBUG) && defined(IO_MEMDEBUG)
560 bad:
561 /* If we fail a mapping, free up any mapping resources used */
562 iommu_remove_mappings(mp);
563 return (rval);
564 #endif /* DEBUG && IO_MEMDEBUG */
565 }
566
567
568 int
iommu_create_pp_mappings(ddi_dma_impl_t * mp,page_t * pp,page_t ** pplist)569 iommu_create_pp_mappings(ddi_dma_impl_t *mp, page_t *pp, page_t **pplist)
570 {
571 pfn_t pfn;
572 pgcnt_t npages;
573 ioaddr_t ioaddr;
574 uint_t offset;
575 volatile uint64_t *iotte_ptr;
576 uint64_t tmp_iotte_flag;
577 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
578 struct sbus_soft_state *softsp = mppriv->softsp;
579 int diag_tlb_flush;
580 #if defined(DEBUG) && defined(IO_MEMUSAGE)
581 struct io_mem_list *iomemp;
582 pfn_t *pfnp;
583 #endif /* DEBUG && IO_MEMUSAGE */
584 int rval = DDI_DMA_MAPPED;
585
586 /* Set Valid and Cache for mem xfer */
587 tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM;
588
589 ASSERT(softsp != NULL);
590
591 offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
592 npages = iommu_btopr(mp->dmai_size + offset);
593 ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
594 iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
595 diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0;
596
597 /*
598 * Set the per object bits of the TTE here. We optimize this for
599 * the memory case so that the while loop overhead is minimal.
600 */
601 if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
602 /* Turn on NOSYNC if we need consistent mem */
603 mp->dmai_rflags |= DMP_NOSYNC;
604 tmp_iotte_flag ^= IOTTE_STREAM;
605 } else if (softsp->stream_buf_off) {
606 /* Set streaming mode if not consistent mem */
607 tmp_iotte_flag ^= IOTTE_STREAM;
608 }
609
610 #if defined(DEBUG) && defined(IO_MEMUSAGE)
611 iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
612 iomemp->rdip = mp->dmai_rdip;
613 iomemp->ioaddr = ioaddr;
614 iomemp->npages = npages;
615 pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
616 KM_SLEEP);
617 #endif /* DEBUG && IO_MEMUSAGE */
618 /*
619 * Grab the mappings from the dmmu and stick 'em into the
620 * iommu.
621 */
622 ASSERT(npages != 0);
623
624 /* If we're going to flush the TLB using diag mode, do it now. */
625 if (diag_tlb_flush)
626 iommu_tlb_flush(softsp, ioaddr, npages);
627
628 do {
629 uint64_t iotte_flag;
630
631 iotte_flag = tmp_iotte_flag;
632
633 if (pp != NULL) {
634 pfn = pp->p_pagenum;
635 pp = pp->p_next;
636 } else {
637 pfn = (*pplist)->p_pagenum;
638 pplist++;
639 }
640
641 DPRINTF(IOMMU_TTE, ("pp mapping TTE index %p pfn %lx "
642 "tte flag %lx ioaddr %x\n", (void *)iotte_ptr,
643 pfn, iotte_flag, ioaddr));
644
645 /* Flush the IOMMU TLB before loading a new mapping */
646 if (!diag_tlb_flush)
647 iommu_tlb_flush(softsp, ioaddr, 1);
648
649 /* Set the hardware IO TTE */
650 *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
651
652 ioaddr += IOMMU_PAGESIZE;
653 npages--;
654 iotte_ptr++;
655
656 #if defined(DEBUG) && defined(IO_MEMUSAGE)
657 *pfnp = pfn;
658 pfnp++;
659 #endif /* DEBUG && IO_MEMUSAGE */
660
661 } while (npages != 0);
662
663 #if defined(DEBUG) && defined(IO_MEMUSAGE)
664 mutex_enter(&softsp->iomemlock);
665 iomemp->next = softsp->iomem;
666 softsp->iomem = iomemp;
667 mutex_exit(&softsp->iomemlock);
668 #endif /* DEBUG && IO_MEMUSAGE */
669
670 return (rval);
671 }
672
673
674 int
iommu_dma_lim_setup(dev_info_t * dip,dev_info_t * rdip,struct sbus_soft_state * softsp,uint_t * burstsizep,uint_t burstsize64,uint_t * minxferp,uint_t dma_flags)675 iommu_dma_lim_setup(dev_info_t *dip, dev_info_t *rdip,
676 struct sbus_soft_state *softsp, uint_t *burstsizep, uint_t burstsize64,
677 uint_t *minxferp, uint_t dma_flags)
678 {
679 struct regspec *rp;
680
681 /* Take care of 64 bit limits. */
682 if (!(dma_flags & DDI_DMA_SBUS_64BIT)) {
683 /*
684 * return burst size for 32-bit mode
685 */
686 *burstsizep &= softsp->sbus_burst_sizes;
687 return (DDI_FAILURE);
688 }
689
690 /*
691 * check if SBus supports 64 bit and if caller
692 * is child of SBus. No support through bridges
693 */
694 if (!softsp->sbus64_burst_sizes || (ddi_get_parent(rdip) != dip)) {
695 /*
696 * SBus doesn't support it or bridge. Do 32-bit
697 * xfers
698 */
699 *burstsizep &= softsp->sbus_burst_sizes;
700 return (DDI_FAILURE);
701 }
702
703 rp = ddi_rnumber_to_regspec(rdip, 0);
704 if (rp == NULL) {
705 *burstsizep &= softsp->sbus_burst_sizes;
706 return (DDI_FAILURE);
707 }
708
709 /* Check for old-style 64 bit burstsizes */
710 if (burstsize64 & SYSIO64_BURST_MASK) {
711 /* Scale back burstsizes if Necessary */
712 *burstsizep &= (softsp->sbus64_burst_sizes |
713 softsp->sbus_burst_sizes);
714 } else {
715 /* Get the 64 bit burstsizes. */
716 *burstsizep = burstsize64;
717
718 /* Scale back burstsizes if Necessary */
719 *burstsizep &= (softsp->sbus64_burst_sizes >>
720 SYSIO64_BURST_SHIFT);
721 }
722
723 /*
724 * Set the largest value of the smallest burstsize that the
725 * device or the bus can manage.
726 */
727 *minxferp = MAX(*minxferp,
728 (1 << (ddi_ffs(softsp->sbus64_burst_sizes) - 1)));
729
730 return (DDI_SUCCESS);
731 }
732
733
734 int
iommu_dma_allochdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * dma_attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * handlep)735 iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
736 ddi_dma_attr_t *dma_attr, int (*waitfp)(caddr_t), caddr_t arg,
737 ddi_dma_handle_t *handlep)
738 {
739 ioaddr_t addrlow, addrhigh, segalign;
740 ddi_dma_impl_t *mp;
741 struct dma_impl_priv *mppriv;
742 struct sbus_soft_state *softsp = (struct sbus_soft_state *)
743 ddi_get_soft_state(sbusp, ddi_get_instance(dip));
744
745 /*
746 * Setup dma burstsizes and min-xfer counts.
747 */
748 (void) iommu_dma_lim_setup(dip, rdip, softsp,
749 &dma_attr->dma_attr_burstsizes,
750 dma_attr->dma_attr_burstsizes, &dma_attr->dma_attr_minxfer,
751 dma_attr->dma_attr_flags);
752
753 if (dma_attr->dma_attr_burstsizes == 0)
754 return (DDI_DMA_BADATTR);
755
756 addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo;
757 addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi;
758 segalign = (ioaddr_t)dma_attr->dma_attr_seg;
759
760 /*
761 * Check sanity for hi and lo address limits
762 */
763 if ((addrhigh <= addrlow) ||
764 (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) {
765 return (DDI_DMA_BADATTR);
766 }
767 if (dma_attr->dma_attr_flags & DDI_DMA_FORCE_PHYSICAL)
768 return (DDI_DMA_BADATTR);
769
770 mppriv = kmem_zalloc(sizeof (*mppriv),
771 (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
772
773 if (mppriv == NULL) {
774 if (waitfp != DDI_DMA_DONTWAIT) {
775 ddi_set_callback(waitfp, arg,
776 &softsp->dvma_call_list_id);
777 }
778 return (DDI_DMA_NORESOURCES);
779 }
780 mp = (ddi_dma_impl_t *)mppriv;
781
782 DPRINTF(IOMMU_DMA_ALLOCHDL_DEBUG, ("dma_allochdl: (%s) handle %p "
783 "hi %x lo %x min %x burst %x\n",
784 ddi_get_name(dip), (void *)mp, addrhigh, addrlow,
785 dma_attr->dma_attr_minxfer, dma_attr->dma_attr_burstsizes));
786
787 mp->dmai_rdip = rdip;
788 mp->dmai_minxfer = (uint_t)dma_attr->dma_attr_minxfer;
789 mp->dmai_burstsizes = (uint_t)dma_attr->dma_attr_burstsizes;
790 mp->dmai_attr = *dma_attr;
791 /* See if the DMA engine has any limit restrictions. */
792 if (segalign == (ioaddr_t)UINT32_MAX &&
793 addrhigh == (ioaddr_t)UINT32_MAX &&
794 (dma_attr->dma_attr_align <= IOMMU_PAGESIZE) && addrlow == 0) {
795 mp->dmai_rflags |= DMP_NOLIMIT;
796 }
797 mppriv->softsp = softsp;
798 mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag);
799
800 *handlep = (ddi_dma_handle_t)mp;
801 return (DDI_SUCCESS);
802 }
803
804 /*ARGSUSED*/
805 int
iommu_dma_freehdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)806 iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
807 {
808 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)handle;
809 struct sbus_soft_state *softsp = mppriv->softsp;
810 ASSERT(softsp != NULL);
811
812 kmem_free(mppriv, sizeof (*mppriv));
813
814 if (softsp->dvma_call_list_id != 0) {
815 ddi_run_callback(&softsp->dvma_call_list_id);
816 }
817 return (DDI_SUCCESS);
818 }
819
820 static int
check_dma_attr(struct ddi_dma_req * dmareq,ddi_dma_attr_t * dma_attr,uint32_t * size)821 check_dma_attr(struct ddi_dma_req *dmareq, ddi_dma_attr_t *dma_attr,
822 uint32_t *size)
823 {
824 ioaddr_t addrlow;
825 ioaddr_t addrhigh;
826 uint32_t segalign;
827 uint32_t smask;
828
829 smask = *size - 1;
830 segalign = dma_attr->dma_attr_seg;
831 if (smask > segalign) {
832 if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0)
833 return (DDI_DMA_TOOBIG);
834 *size = segalign + 1;
835 }
836 addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo;
837 addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi;
838 if (addrlow + smask > addrhigh || addrlow + smask < addrlow) {
839 if (!((addrlow + dmareq->dmar_object.dmao_size == 0) &&
840 (addrhigh == (ioaddr_t)-1))) {
841 if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0)
842 return (DDI_DMA_TOOBIG);
843 *size = MIN(addrhigh - addrlow + 1, *size);
844 }
845 }
846 return (DDI_DMA_MAPOK);
847 }
848
849 int
iommu_dma_bindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cp,uint_t * ccountp)850 iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
851 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
852 ddi_dma_cookie_t *cp, uint_t *ccountp)
853 {
854 page_t *pp;
855 uint32_t size;
856 ioaddr_t ioaddr;
857 uint_t offset;
858 uintptr_t addr = 0;
859 pgcnt_t npages;
860 int rval;
861 ddi_dma_attr_t *dma_attr;
862 struct sbus_soft_state *softsp;
863 struct page **pplist = NULL;
864 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
865 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
866
867 #ifdef lint
868 dip = dip;
869 rdip = rdip;
870 #endif
871
872 if (mp->dmai_inuse)
873 return (DDI_DMA_INUSE);
874
875 dma_attr = &mp->dmai_attr;
876 size = (uint32_t)dmareq->dmar_object.dmao_size;
877 if (!(mp->dmai_rflags & DMP_NOLIMIT)) {
878 rval = check_dma_attr(dmareq, dma_attr, &size);
879 if (rval != DDI_DMA_MAPOK)
880 return (rval);
881 }
882 mp->dmai_inuse = 1;
883 mp->dmai_offset = 0;
884 mp->dmai_rflags = (dmareq->dmar_flags & DMP_DDIFLAGS) |
885 (mp->dmai_rflags & DMP_NOLIMIT);
886
887 switch (dmareq->dmar_object.dmao_type) {
888 case DMA_OTYP_VADDR:
889 case DMA_OTYP_BUFVADDR:
890 addr = (uintptr_t)dmareq->dmar_object.dmao_obj.virt_obj.v_addr;
891 offset = addr & IOMMU_PAGEOFFSET;
892 pplist = dmareq->dmar_object.dmao_obj.virt_obj.v_priv;
893 npages = iommu_btopr(OBJSIZE + offset);
894
895 DPRINTF(IOMMU_DMAMAP_DEBUG, ("dma_map vaddr: %lx pages "
896 "req addr %lx off %x OBJSIZE %x\n",
897 npages, addr, offset, OBJSIZE));
898
899 /* We don't need the addr anymore if we have a shadow list */
900 if (pplist != NULL)
901 addr = (uintptr_t)NULL;
902 pp = NULL;
903 break;
904
905 case DMA_OTYP_PAGES:
906 pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
907 offset = dmareq->dmar_object.dmao_obj.pp_obj.pp_offset;
908 npages = iommu_btopr(OBJSIZE + offset);
909 break;
910
911 case DMA_OTYP_PADDR:
912 default:
913 /*
914 * Not a supported type for this implementation
915 */
916 rval = DDI_DMA_NOMAPPING;
917 goto bad;
918 }
919
920 /* Get our soft state once we know we're mapping an object. */
921 softsp = mppriv->softsp;
922 ASSERT(softsp != NULL);
923
924 if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
925 if (size != OBJSIZE) {
926 /*
927 * If the request is for partial mapping arrangement,
928 * the device has to be able to address at least the
929 * size of the window we are establishing.
930 */
931 if (size < iommu_ptob(MIN_DVMA_WIN_SIZE)) {
932 rval = DDI_DMA_NOMAPPING;
933 goto bad;
934 }
935 npages = iommu_btopr(size + offset);
936 }
937 /*
938 * If the size requested is less than a moderate amt,
939 * skip the partial mapping stuff- it's not worth the
940 * effort.
941 */
942 if (npages > MIN_DVMA_WIN_SIZE) {
943 npages = MIN_DVMA_WIN_SIZE + iommu_btopr(offset);
944 size = iommu_ptob(MIN_DVMA_WIN_SIZE);
945 DPRINTF(IOMMU_DMA_SETUP_DEBUG, ("dma_setup: SZ %x pg "
946 "%lx sz %x\n", OBJSIZE, npages, size));
947 if (pplist != NULL) {
948 mp->dmai_minfo = (void *)pplist;
949 mp->dmai_rflags |= DMP_SHADOW;
950 }
951 } else {
952 mp->dmai_rflags ^= DDI_DMA_PARTIAL;
953 }
954 } else {
955 if (npages >= iommu_btop(softsp->iommu_dvma_size) -
956 MIN_DVMA_WIN_SIZE) {
957 rval = DDI_DMA_TOOBIG;
958 goto bad;
959 }
960 }
961
962 /*
963 * save dmareq-object, size and npages into mp
964 */
965 mp->dmai_object = dmareq->dmar_object;
966 mp->dmai_size = size;
967 mp->dmai_ndvmapages = npages;
968
969 if (mp->dmai_rflags & DMP_NOLIMIT) {
970 ioaddr = (ioaddr_t)(uintptr_t)vmem_alloc(softsp->dvma_arena,
971 iommu_ptob(npages),
972 dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
973 if (ioaddr == 0) {
974 rval = DDI_DMA_NORESOURCES;
975 goto bad;
976 }
977
978 /*
979 * If we have a 1 page request and we're working with a page
980 * list, we're going to speed load an IOMMU entry.
981 */
982 if (npages == 1 && !addr) {
983 uint64_t iotte_flag = IOTTE_VALID | IOTTE_CACHE |
984 IOTTE_WRITE | IOTTE_STREAM;
985 volatile uint64_t *iotte_ptr;
986 pfn_t pfn;
987 #if defined(DEBUG) && defined(IO_MEMUSAGE)
988 struct io_mem_list *iomemp;
989 pfn_t *pfnp;
990 #endif /* DEBUG && IO_MEMUSAGE */
991
992 iotte_ptr = IOTTE_NDX(ioaddr,
993 softsp->soft_tsb_base_addr);
994
995 if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
996 mp->dmai_rflags |= DMP_NOSYNC;
997 iotte_flag ^= IOTTE_STREAM;
998 } else if (softsp->stream_buf_off)
999 iotte_flag ^= IOTTE_STREAM;
1000
1001 mp->dmai_rflags ^= DDI_DMA_PARTIAL;
1002
1003 if (pp != NULL)
1004 pfn = pp->p_pagenum;
1005 else
1006 pfn = (*pplist)->p_pagenum;
1007
1008 iommu_tlb_flush(softsp, ioaddr, 1);
1009
1010 *iotte_ptr =
1011 ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
1012
1013 mp->dmai_mapping = (ioaddr_t)(ioaddr + offset);
1014 mp->dmai_nwin = 0;
1015 if (cp != NULL) {
1016 cp->dmac_notused = 0;
1017 cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
1018 cp->dmac_size = mp->dmai_size;
1019 cp->dmac_type = 0;
1020 *ccountp = 1;
1021 }
1022
1023 DPRINTF(IOMMU_TTE, ("speed loading: TTE index %p "
1024 "pfn %lx tte flag %lx addr %lx ioaddr %x\n",
1025 (void *)iotte_ptr, pfn, iotte_flag, addr, ioaddr));
1026
1027 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1028 iomemp = kmem_alloc(sizeof (struct io_mem_list),
1029 KM_SLEEP);
1030 iomemp->rdip = mp->dmai_rdip;
1031 iomemp->ioaddr = ioaddr;
1032 iomemp->addr = addr;
1033 iomemp->npages = npages;
1034 pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) *
1035 (npages + 1), KM_SLEEP);
1036 *pfnp = pfn;
1037 mutex_enter(&softsp->iomemlock);
1038 iomemp->next = softsp->iomem;
1039 softsp->iomem = iomemp;
1040 mutex_exit(&softsp->iomemlock);
1041 #endif /* DEBUG && IO_MEMUSAGE */
1042
1043 return (DDI_DMA_MAPPED);
1044 }
1045 } else {
1046 ioaddr = (ioaddr_t)(uintptr_t)vmem_xalloc(softsp->dvma_arena,
1047 iommu_ptob(npages),
1048 MAX((uint_t)dma_attr->dma_attr_align, IOMMU_PAGESIZE), 0,
1049 (uint_t)dma_attr->dma_attr_seg + 1,
1050 (void *)(uintptr_t)(ioaddr_t)dma_attr->dma_attr_addr_lo,
1051 (void *)(uintptr_t)
1052 ((ioaddr_t)dma_attr->dma_attr_addr_hi + 1),
1053 dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
1054 }
1055
1056 if (ioaddr == 0) {
1057 if (dmareq->dmar_fp == DDI_DMA_SLEEP)
1058 rval = DDI_DMA_NOMAPPING;
1059 else
1060 rval = DDI_DMA_NORESOURCES;
1061 goto bad;
1062 }
1063
1064 mp->dmai_mapping = ioaddr + offset;
1065 ASSERT(mp->dmai_mapping >= softsp->iommu_dvma_base);
1066
1067 /*
1068 * At this point we have a range of virtual address allocated
1069 * with which we now have to map to the requested object.
1070 */
1071 if (addr) {
1072 rval = iommu_create_vaddr_mappings(mp,
1073 addr & ~IOMMU_PAGEOFFSET);
1074 if (rval == DDI_DMA_NOMAPPING)
1075 goto bad_nomap;
1076 } else {
1077 rval = iommu_create_pp_mappings(mp, pp, pplist);
1078 if (rval == DDI_DMA_NOMAPPING)
1079 goto bad_nomap;
1080 }
1081
1082 if (cp) {
1083 cp->dmac_notused = 0;
1084 cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
1085 cp->dmac_size = mp->dmai_size;
1086 cp->dmac_type = 0;
1087 *ccountp = 1;
1088 }
1089 if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
1090 size = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
1091 mp->dmai_nwin =
1092 (dmareq->dmar_object.dmao_size + (size - 1)) / size;
1093 return (DDI_DMA_PARTIAL_MAP);
1094 } else {
1095 mp->dmai_nwin = 0;
1096 return (DDI_DMA_MAPPED);
1097 }
1098
1099 bad_nomap:
1100 /*
1101 * Could not create mmu mappings.
1102 */
1103 if (mp->dmai_rflags & DMP_NOLIMIT) {
1104 vmem_free(softsp->dvma_arena, (void *)(uintptr_t)ioaddr,
1105 iommu_ptob(npages));
1106 } else {
1107 vmem_xfree(softsp->dvma_arena, (void *)(uintptr_t)ioaddr,
1108 iommu_ptob(npages));
1109 }
1110
1111 bad:
1112 if (rval == DDI_DMA_NORESOURCES &&
1113 dmareq->dmar_fp != DDI_DMA_DONTWAIT) {
1114 ddi_set_callback(dmareq->dmar_fp,
1115 dmareq->dmar_arg, &softsp->dvma_call_list_id);
1116 }
1117 mp->dmai_inuse = 0;
1118 return (rval);
1119 }
1120
1121 /* ARGSUSED */
1122 int
iommu_dma_unbindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)1123 iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1124 ddi_dma_handle_t handle)
1125 {
1126 ioaddr_t addr;
1127 uint_t npages;
1128 size_t size;
1129 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1130 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1131 struct sbus_soft_state *softsp = mppriv->softsp;
1132 ASSERT(softsp != NULL);
1133
1134 addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
1135 npages = mp->dmai_ndvmapages;
1136 size = iommu_ptob(npages);
1137
1138 DPRINTF(IOMMU_DMA_UNBINDHDL_DEBUG, ("iommu_dma_unbindhdl: "
1139 "unbinding addr %x for %x pages\n", addr, mp->dmai_ndvmapages));
1140
1141 /* sync the entire object */
1142 if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
1143 /* flush stream write buffers */
1144 sync_stream_buf(softsp, addr, npages, (int *)&mppriv->sync_flag,
1145 mppriv->phys_sync_flag);
1146 }
1147
1148 #if defined(DEBUG) && defined(IO_MEMDEBUG)
1149 /*
1150 * 'Free' the dma mappings.
1151 */
1152 iommu_remove_mappings(mp);
1153 #endif /* DEBUG && IO_MEMDEBUG */
1154
1155 ASSERT(npages > (uint_t)0);
1156 if (mp->dmai_rflags & DMP_NOLIMIT)
1157 vmem_free(softsp->dvma_arena, (void *)(uintptr_t)addr, size);
1158 else
1159 vmem_xfree(softsp->dvma_arena, (void *)(uintptr_t)addr, size);
1160
1161 mp->dmai_ndvmapages = 0;
1162 mp->dmai_inuse = 0;
1163 mp->dmai_minfo = NULL;
1164
1165 if (softsp->dvma_call_list_id != 0)
1166 ddi_run_callback(&softsp->dvma_call_list_id);
1167
1168 return (DDI_SUCCESS);
1169 }
1170
1171 /*ARGSUSED*/
1172 int
iommu_dma_flush(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,off_t off,size_t len,uint_t cache_flags)1173 iommu_dma_flush(dev_info_t *dip, dev_info_t *rdip,
1174 ddi_dma_handle_t handle, off_t off, size_t len,
1175 uint_t cache_flags)
1176 {
1177 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1178 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1179
1180 if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
1181 sync_stream_buf(mppriv->softsp, mp->dmai_mapping,
1182 mp->dmai_ndvmapages, (int *)&mppriv->sync_flag,
1183 mppriv->phys_sync_flag);
1184 }
1185 return (DDI_SUCCESS);
1186 }
1187
1188 /*ARGSUSED*/
1189 int
iommu_dma_win(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)1190 iommu_dma_win(dev_info_t *dip, dev_info_t *rdip,
1191 ddi_dma_handle_t handle, uint_t win, off_t *offp,
1192 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1193 {
1194 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1195 off_t offset;
1196 uint_t winsize;
1197 uint_t newoff;
1198 int rval;
1199
1200 offset = mp->dmai_mapping & IOMMU_PAGEOFFSET;
1201 winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
1202
1203 DPRINTF(IOMMU_DMA_WIN_DEBUG, ("getwin win %d winsize %x\n", win,
1204 winsize));
1205
1206 /*
1207 * win is in the range [0 .. dmai_nwin-1]
1208 */
1209 if (win >= mp->dmai_nwin)
1210 return (DDI_FAILURE);
1211
1212 newoff = win * winsize;
1213 if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer)
1214 return (DDI_FAILURE);
1215
1216 ASSERT(cookiep);
1217 cookiep->dmac_notused = 0;
1218 cookiep->dmac_type = 0;
1219 cookiep->dmac_address = (ioaddr_t)mp->dmai_mapping;
1220 cookiep->dmac_size = mp->dmai_size;
1221 *ccountp = 1;
1222 *offp = (off_t)newoff;
1223 *lenp = (uint_t)winsize;
1224
1225 if (newoff == mp->dmai_offset) {
1226 /*
1227 * Nothing to do...
1228 */
1229 return (DDI_SUCCESS);
1230 }
1231
1232 if ((rval = iommu_map_window(mp, newoff, winsize)) != DDI_SUCCESS)
1233 return (rval);
1234
1235 /*
1236 * Set this again in case iommu_map_window() has changed it
1237 */
1238 cookiep->dmac_size = mp->dmai_size;
1239
1240 return (DDI_SUCCESS);
1241 }
1242
1243 static int
iommu_map_window(ddi_dma_impl_t * mp,off_t newoff,size_t winsize)1244 iommu_map_window(ddi_dma_impl_t *mp, off_t newoff, size_t winsize)
1245 {
1246 uintptr_t addr = 0;
1247 page_t *pp;
1248 uint_t flags;
1249 struct page **pplist = NULL;
1250
1251 #if defined(DEBUG) && defined(IO_MEMDEBUG)
1252 /* Free mappings for current window */
1253 iommu_remove_mappings(mp);
1254 #endif /* DEBUG && IO_MEMDEBUG */
1255
1256 mp->dmai_offset = newoff;
1257 mp->dmai_size = mp->dmai_object.dmao_size - newoff;
1258 mp->dmai_size = MIN(mp->dmai_size, winsize);
1259
1260 if (mp->dmai_object.dmao_type == DMA_OTYP_VADDR ||
1261 mp->dmai_object.dmao_type == DMA_OTYP_BUFVADDR) {
1262 if (mp->dmai_rflags & DMP_SHADOW) {
1263 pplist = (struct page **)mp->dmai_minfo;
1264 ASSERT(pplist != NULL);
1265 pplist = pplist + (newoff >> MMU_PAGESHIFT);
1266 } else {
1267 addr = (uintptr_t)
1268 mp->dmai_object.dmao_obj.virt_obj.v_addr;
1269 addr = (addr + newoff) & ~IOMMU_PAGEOFFSET;
1270 }
1271 pp = NULL;
1272 } else {
1273 pp = mp->dmai_object.dmao_obj.pp_obj.pp_pp;
1274 flags = 0;
1275 while (flags < newoff) {
1276 pp = pp->p_next;
1277 flags += MMU_PAGESIZE;
1278 }
1279 }
1280
1281 /* Set up mappings for next window */
1282 if (addr) {
1283 if (iommu_create_vaddr_mappings(mp, addr) < 0)
1284 return (DDI_FAILURE);
1285 } else {
1286 if (iommu_create_pp_mappings(mp, pp, pplist) < 0)
1287 return (DDI_FAILURE);
1288 }
1289
1290 /*
1291 * also invalidate read stream buffer
1292 */
1293 if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
1294 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1295
1296 sync_stream_buf(mppriv->softsp, mp->dmai_mapping,
1297 mp->dmai_ndvmapages, (int *)&mppriv->sync_flag,
1298 mppriv->phys_sync_flag);
1299 }
1300
1301 return (DDI_SUCCESS);
1302
1303 }
1304
1305
1306 /*ARGSUSED*/
1307 int
iommu_dma_mctl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,enum ddi_dma_ctlops request,off_t * offp,size_t * lenp,caddr_t * objp,uint_t cache_flags)1308 iommu_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
1309 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
1310 off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags)
1311 {
1312 pgcnt_t npages;
1313 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1314
1315 DPRINTF(IOMMU_DMAMCTL_DEBUG, ("dma_mctl: handle %p ", (void *)mp));
1316 switch (request) {
1317
1318 case DDI_DMA_SET_SBUS64:
1319 {
1320 struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1321
1322 return (iommu_dma_lim_setup(dip, rdip, mppriv->softsp,
1323 &mp->dmai_burstsizes, (uint_t)*lenp, &mp->dmai_minxfer,
1324 DDI_DMA_SBUS_64BIT));
1325 }
1326
1327 case DDI_DMA_RESERVE:
1328 {
1329 struct ddi_dma_req *dmareq = (struct ddi_dma_req *)offp;
1330 ddi_dma_lim_t *dma_lim;
1331 ddi_dma_handle_t *handlep;
1332 uint_t np;
1333 ioaddr_t ioaddr;
1334 int i;
1335 struct fast_dvma *iommu_fast_dvma;
1336 struct sbus_soft_state *softsp =
1337 (struct sbus_soft_state *)ddi_get_soft_state(sbusp,
1338 ddi_get_instance(dip));
1339
1340 /* Some simple sanity checks */
1341 dma_lim = dmareq->dmar_limits;
1342 if (dma_lim->dlim_burstsizes == 0) {
1343 DPRINTF(IOMMU_FASTDMA_RESERVE,
1344 ("Reserve: bad burstsizes\n"));
1345 return (DDI_DMA_BADLIMITS);
1346 }
1347 if ((AHI <= ALO) || (AHI < softsp->iommu_dvma_base)) {
1348 DPRINTF(IOMMU_FASTDMA_RESERVE,
1349 ("Reserve: bad limits\n"));
1350 return (DDI_DMA_BADLIMITS);
1351 }
1352
1353 np = dmareq->dmar_object.dmao_size;
1354 mutex_enter(&softsp->dma_pool_lock);
1355 if (np > softsp->dma_reserve) {
1356 mutex_exit(&softsp->dma_pool_lock);
1357 DPRINTF(IOMMU_FASTDMA_RESERVE,
1358 ("Reserve: dma_reserve is exhausted\n"));
1359 return (DDI_DMA_NORESOURCES);
1360 }
1361
1362 softsp->dma_reserve -= np;
1363 mutex_exit(&softsp->dma_pool_lock);
1364 mp = kmem_zalloc(sizeof (*mp), KM_SLEEP);
1365 mp->dmai_rflags = DMP_BYPASSNEXUS;
1366 mp->dmai_rdip = rdip;
1367 mp->dmai_minxfer = dma_lim->dlim_minxfer;
1368 mp->dmai_burstsizes = dma_lim->dlim_burstsizes;
1369
1370 ioaddr = (ioaddr_t)(uintptr_t)vmem_xalloc(softsp->dvma_arena,
1371 iommu_ptob(np), IOMMU_PAGESIZE, 0,
1372 dma_lim->dlim_cntr_max + 1,
1373 (void *)(uintptr_t)ALO, (void *)(uintptr_t)(AHI + 1),
1374 dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
1375
1376 if (ioaddr == 0) {
1377 mutex_enter(&softsp->dma_pool_lock);
1378 softsp->dma_reserve += np;
1379 mutex_exit(&softsp->dma_pool_lock);
1380 kmem_free(mp, sizeof (*mp));
1381 DPRINTF(IOMMU_FASTDMA_RESERVE,
1382 ("Reserve: No dvma resources available\n"));
1383 return (DDI_DMA_NOMAPPING);
1384 }
1385
1386 /* create a per request structure */
1387 iommu_fast_dvma = kmem_alloc(sizeof (struct fast_dvma),
1388 KM_SLEEP);
1389
1390 /*
1391 * We need to remember the size of the transfer so that
1392 * we can figure the virtual pages to sync when the transfer
1393 * is complete.
1394 */
1395 iommu_fast_dvma->pagecnt = kmem_zalloc(np *
1396 sizeof (uint_t), KM_SLEEP);
1397
1398 /* Allocate a streaming cache sync flag for each index */
1399 iommu_fast_dvma->sync_flag = kmem_zalloc(np *
1400 sizeof (int), KM_SLEEP);
1401
1402 /* Allocate a physical sync flag for each index */
1403 iommu_fast_dvma->phys_sync_flag =
1404 kmem_zalloc(np * sizeof (uint64_t), KM_SLEEP);
1405
1406 for (i = 0; i < np; i++)
1407 iommu_fast_dvma->phys_sync_flag[i] = va_to_pa((caddr_t)
1408 &iommu_fast_dvma->sync_flag[i]);
1409
1410 mp->dmai_mapping = ioaddr;
1411 mp->dmai_ndvmapages = np;
1412 iommu_fast_dvma->ops = &iommu_dvma_ops;
1413 iommu_fast_dvma->softsp = (caddr_t)softsp;
1414 mp->dmai_nexus_private = (caddr_t)iommu_fast_dvma;
1415 handlep = (ddi_dma_handle_t *)objp;
1416 *handlep = (ddi_dma_handle_t)mp;
1417
1418 DPRINTF(IOMMU_FASTDMA_RESERVE,
1419 ("Reserve: mapping object %p base addr %lx size %x\n",
1420 (void *)mp, mp->dmai_mapping, mp->dmai_ndvmapages));
1421
1422 break;
1423 }
1424
1425 case DDI_DMA_RELEASE:
1426 {
1427 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1428 uint_t np = npages = mp->dmai_ndvmapages;
1429 ioaddr_t ioaddr = mp->dmai_mapping;
1430 volatile uint64_t *iotte_ptr;
1431 struct fast_dvma *iommu_fast_dvma = (struct fast_dvma *)
1432 mp->dmai_nexus_private;
1433 struct sbus_soft_state *softsp = (struct sbus_soft_state *)
1434 iommu_fast_dvma->softsp;
1435
1436 ASSERT(softsp != NULL);
1437
1438 /* Unload stale mappings and flush stale tlb's */
1439 iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
1440
1441 while (npages > (uint_t)0) {
1442 *iotte_ptr = (uint64_t)0; /* unload tte */
1443 iommu_tlb_flush(softsp, ioaddr, 1);
1444
1445 npages--;
1446 iotte_ptr++;
1447 ioaddr += IOMMU_PAGESIZE;
1448 }
1449
1450 ioaddr = (ioaddr_t)mp->dmai_mapping;
1451 mutex_enter(&softsp->dma_pool_lock);
1452 softsp->dma_reserve += np;
1453 mutex_exit(&softsp->dma_pool_lock);
1454
1455 if (mp->dmai_rflags & DMP_NOLIMIT)
1456 vmem_free(softsp->dvma_arena,
1457 (void *)(uintptr_t)ioaddr, iommu_ptob(np));
1458 else
1459 vmem_xfree(softsp->dvma_arena,
1460 (void *)(uintptr_t)ioaddr, iommu_ptob(np));
1461
1462 kmem_free(mp, sizeof (*mp));
1463 kmem_free(iommu_fast_dvma->pagecnt, np * sizeof (uint_t));
1464 kmem_free(iommu_fast_dvma->sync_flag, np * sizeof (int));
1465 kmem_free(iommu_fast_dvma->phys_sync_flag, np *
1466 sizeof (uint64_t));
1467 kmem_free(iommu_fast_dvma, sizeof (struct fast_dvma));
1468
1469
1470 DPRINTF(IOMMU_FASTDMA_RESERVE,
1471 ("Release: Base addr %x size %x\n", ioaddr, np));
1472 /*
1473 * Now that we've freed some resource,
1474 * if there is anybody waiting for it
1475 * try and get them going.
1476 */
1477 if (softsp->dvma_call_list_id != 0)
1478 ddi_run_callback(&softsp->dvma_call_list_id);
1479
1480 break;
1481 }
1482
1483 default:
1484 DPRINTF(IOMMU_DMAMCTL_DEBUG, ("iommu_dma_mctl: unknown option "
1485 "0%x\n", request));
1486
1487 return (DDI_FAILURE);
1488 }
1489 return (DDI_SUCCESS);
1490 }
1491
1492 /*ARGSUSED*/
1493 void
iommu_dvma_kaddr_load(ddi_dma_handle_t h,caddr_t a,uint_t len,uint_t index,ddi_dma_cookie_t * cp)1494 iommu_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
1495 ddi_dma_cookie_t *cp)
1496 {
1497 uintptr_t addr;
1498 ioaddr_t ioaddr;
1499 uint_t offset;
1500 pfn_t pfn;
1501 int npages;
1502 volatile uint64_t *iotte_ptr;
1503 uint64_t iotte_flag = 0;
1504 struct as *as = NULL;
1505 extern struct as kas;
1506 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
1507 struct fast_dvma *iommu_fast_dvma =
1508 (struct fast_dvma *)mp->dmai_nexus_private;
1509 struct sbus_soft_state *softsp = (struct sbus_soft_state *)
1510 iommu_fast_dvma->softsp;
1511 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1512 struct io_mem_list *iomemp;
1513 pfn_t *pfnp;
1514 #endif /* DEBUG && IO_MEMUSAGE */
1515
1516 ASSERT(softsp != NULL);
1517
1518 addr = (uintptr_t)a;
1519 ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
1520 offset = (uint_t)(addr & IOMMU_PAGEOFFSET);
1521 iommu_fast_dvma->pagecnt[index] = iommu_btopr(len + offset);
1522 as = &kas;
1523 addr &= ~IOMMU_PAGEOFFSET;
1524 npages = iommu_btopr(len + offset);
1525
1526 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1527 iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
1528 iomemp->rdip = mp->dmai_rdip;
1529 iomemp->ioaddr = ioaddr;
1530 iomemp->addr = addr;
1531 iomemp->npages = npages;
1532 pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
1533 KM_SLEEP);
1534 #endif /* DEBUG && IO_MEMUSAGE */
1535
1536 cp->dmac_address = ioaddr | offset;
1537 cp->dmac_size = len;
1538
1539 iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
1540 /* read/write and streaming io on */
1541 iotte_flag = IOTTE_VALID | IOTTE_WRITE | IOTTE_CACHE;
1542
1543 if (mp->dmai_rflags & DDI_DMA_CONSISTENT)
1544 mp->dmai_rflags |= DMP_NOSYNC;
1545 else if (!softsp->stream_buf_off)
1546 iotte_flag |= IOTTE_STREAM;
1547
1548 DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: ioaddr %x "
1549 "size %x offset %x index %x kaddr %lx\n",
1550 ioaddr, len, offset, index, addr));
1551 ASSERT(npages > 0);
1552 do {
1553 pfn = hat_getpfnum(as->a_hat, (caddr_t)addr);
1554 if (pfn == PFN_INVALID) {
1555 DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: invalid pfn "
1556 "from hat_getpfnum()\n"));
1557 }
1558
1559 iommu_tlb_flush(softsp, ioaddr, 1);
1560
1561 /* load tte */
1562 *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
1563
1564 npages--;
1565 iotte_ptr++;
1566
1567 addr += IOMMU_PAGESIZE;
1568 ioaddr += IOMMU_PAGESIZE;
1569
1570 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1571 *pfnp = pfn;
1572 pfnp++;
1573 #endif /* DEBUG && IO_MEMUSAGE */
1574
1575 } while (npages > 0);
1576
1577 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1578 mutex_enter(&softsp->iomemlock);
1579 iomemp->next = softsp->iomem;
1580 softsp->iomem = iomemp;
1581 mutex_exit(&softsp->iomemlock);
1582 #endif /* DEBUG && IO_MEMUSAGE */
1583 }
1584
1585 /*ARGSUSED*/
1586 void
iommu_dvma_unload(ddi_dma_handle_t h,uint_t index,uint_t view)1587 iommu_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view)
1588 {
1589 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
1590 ioaddr_t ioaddr;
1591 pgcnt_t npages;
1592 struct fast_dvma *iommu_fast_dvma =
1593 (struct fast_dvma *)mp->dmai_nexus_private;
1594 struct sbus_soft_state *softsp = (struct sbus_soft_state *)
1595 iommu_fast_dvma->softsp;
1596 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1597 struct io_mem_list **prevp, *walk;
1598 #endif /* DEBUG && IO_MEMUSAGE */
1599
1600 ASSERT(softsp != NULL);
1601
1602 ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
1603 npages = iommu_fast_dvma->pagecnt[index];
1604
1605 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1606 mutex_enter(&softsp->iomemlock);
1607 prevp = &softsp->iomem;
1608 walk = softsp->iomem;
1609
1610 while (walk != NULL) {
1611 if (walk->ioaddr == ioaddr) {
1612 *prevp = walk->next;
1613 break;
1614 }
1615 prevp = &walk->next;
1616 walk = walk->next;
1617 }
1618 mutex_exit(&softsp->iomemlock);
1619
1620 kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1));
1621 kmem_free(walk, sizeof (struct io_mem_list));
1622 #endif /* DEBUG && IO_MEMUSAGE */
1623
1624 DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_unload: handle %p sync flag "
1625 "addr %p sync flag pfn %llx index %x page count %lx\n", (void *)mp,
1626 (void *)&iommu_fast_dvma->sync_flag[index],
1627 iommu_fast_dvma->phys_sync_flag[index],
1628 index, npages));
1629
1630 if ((mp->dmai_rflags & DMP_NOSYNC) != DMP_NOSYNC) {
1631 sync_stream_buf(softsp, ioaddr, npages,
1632 (int *)&iommu_fast_dvma->sync_flag[index],
1633 iommu_fast_dvma->phys_sync_flag[index]);
1634 }
1635 }
1636
1637 /*ARGSUSED*/
1638 void
iommu_dvma_sync(ddi_dma_handle_t h,uint_t index,uint_t view)1639 iommu_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view)
1640 {
1641 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
1642 ioaddr_t ioaddr;
1643 uint_t npages;
1644 struct fast_dvma *iommu_fast_dvma =
1645 (struct fast_dvma *)mp->dmai_nexus_private;
1646 struct sbus_soft_state *softsp = (struct sbus_soft_state *)
1647 iommu_fast_dvma->softsp;
1648
1649 if ((mp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
1650 return;
1651
1652 ASSERT(softsp != NULL);
1653 ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
1654 npages = iommu_fast_dvma->pagecnt[index];
1655
1656 DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_sync: handle %p, "
1657 "sync flag addr %p, sync flag pfn %llx\n", (void *)mp,
1658 (void *)&iommu_fast_dvma->sync_flag[index],
1659 iommu_fast_dvma->phys_sync_flag[index]));
1660
1661 sync_stream_buf(softsp, ioaddr, npages,
1662 (int *)&iommu_fast_dvma->sync_flag[index],
1663 iommu_fast_dvma->phys_sync_flag[index]);
1664 }
1665