17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*8793b36bSNick Todd * Common Development and Distribution License (the "License"). 6*8793b36bSNick Todd * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22*8793b36bSNick Todd * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate #include <sys/types.h> 277c478bd9Sstevel@tonic-gate #include <sys/param.h> 287c478bd9Sstevel@tonic-gate #include <sys/conf.h> 297c478bd9Sstevel@tonic-gate #include <sys/ddi.h> 307c478bd9Sstevel@tonic-gate #include <sys/sunddi.h> 317c478bd9Sstevel@tonic-gate #include <sys/ddi_impldefs.h> 327c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 337c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 347c478bd9Sstevel@tonic-gate #include <sys/vmem.h> 357c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 367c478bd9Sstevel@tonic-gate 377c478bd9Sstevel@tonic-gate #include <sys/ddidmareq.h> 387c478bd9Sstevel@tonic-gate #include <sys/sysiosbus.h> 397c478bd9Sstevel@tonic-gate #include <sys/iommu.h> 407c478bd9Sstevel@tonic-gate #include <sys/iocache.h> 417c478bd9Sstevel@tonic-gate #include <sys/dvma.h> 427c478bd9Sstevel@tonic-gate 437c478bd9Sstevel@tonic-gate #include <vm/as.h> 447c478bd9Sstevel@tonic-gate #include <vm/hat.h> 457c478bd9Sstevel@tonic-gate #include <vm/page.h> 467c478bd9Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 477c478bd9Sstevel@tonic-gate #include <sys/machparam.h> 487c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 497c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h> 507c478bd9Sstevel@tonic-gate #include <sys/iommutsb.h> 517c478bd9Sstevel@tonic-gate 527c478bd9Sstevel@tonic-gate /* Useful debugging Stuff */ 537c478bd9Sstevel@tonic-gate #include <sys/nexusdebug.h> 547c478bd9Sstevel@tonic-gate #include <sys/debug.h> 557c478bd9Sstevel@tonic-gate /* Bitfield debugging definitions for this file */ 567c478bd9Sstevel@tonic-gate #define IOMMU_GETDVMAPAGES_DEBUG 0x1 577c478bd9Sstevel@tonic-gate #define IOMMU_DMAMAP_DEBUG 0x2 587c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_DEBUG 0x4 597c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_SYNC_DEBUG 0x8 607c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_HTOC_DEBUG 0x10 617c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_KVADDR_DEBUG 0x20 627c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_NEXTWIN_DEBUG 0x40 637c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_NEXTSEG_DEBUG 0x80 647c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_MOVWIN_DEBUG 0x100 657c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_REPWIN_DEBUG 0x200 667c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_GETERR_DEBUG 0x400 677c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_COFF_DEBUG 0x800 687c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_DMA_FREE_DEBUG 0x1000 697c478bd9Sstevel@tonic-gate #define IOMMU_REGISTERS_DEBUG 0x2000 707c478bd9Sstevel@tonic-gate #define IOMMU_DMA_SETUP_DEBUG 0x4000 717c478bd9Sstevel@tonic-gate #define IOMMU_DMA_UNBINDHDL_DEBUG 0x8000 727c478bd9Sstevel@tonic-gate #define IOMMU_DMA_BINDHDL_DEBUG 0x10000 737c478bd9Sstevel@tonic-gate #define IOMMU_DMA_WIN_DEBUG 0x20000 747c478bd9Sstevel@tonic-gate #define IOMMU_DMA_ALLOCHDL_DEBUG 0x40000 757c478bd9Sstevel@tonic-gate #define IOMMU_DMA_LIM_SETUP_DEBUG 0x80000 767c478bd9Sstevel@tonic-gate #define IOMMU_FASTDMA_RESERVE 0x100000 777c478bd9Sstevel@tonic-gate #define IOMMU_FASTDMA_LOAD 0x200000 787c478bd9Sstevel@tonic-gate #define IOMMU_INTER_INTRA_XFER 0x400000 797c478bd9Sstevel@tonic-gate #define IOMMU_TTE 0x800000 807c478bd9Sstevel@tonic-gate #define IOMMU_TLB 0x1000000 817c478bd9Sstevel@tonic-gate #define IOMMU_FASTDMA_SYNC 0x2000000 827c478bd9Sstevel@tonic-gate 837c478bd9Sstevel@tonic-gate /* Turn on if you need to keep track of outstanding IOMMU usage */ 847c478bd9Sstevel@tonic-gate /* #define IO_MEMUSAGE */ 857c478bd9Sstevel@tonic-gate /* Turn on to debug IOMMU unmapping code */ 867c478bd9Sstevel@tonic-gate /* #define IO_MEMDEBUG */ 877c478bd9Sstevel@tonic-gate 887c478bd9Sstevel@tonic-gate static struct dvma_ops iommu_dvma_ops = { 897c478bd9Sstevel@tonic-gate DVMAO_REV, 907c478bd9Sstevel@tonic-gate iommu_dvma_kaddr_load, 917c478bd9Sstevel@tonic-gate iommu_dvma_unload, 927c478bd9Sstevel@tonic-gate iommu_dvma_sync 937c478bd9Sstevel@tonic-gate }; 947c478bd9Sstevel@tonic-gate 957c478bd9Sstevel@tonic-gate extern void *sbusp; /* sbus soft state hook */ 967c478bd9Sstevel@tonic-gate 977c478bd9Sstevel@tonic-gate #define DVMA_MAX_CACHE 65536 987c478bd9Sstevel@tonic-gate 997c478bd9Sstevel@tonic-gate /* 1007c478bd9Sstevel@tonic-gate * This is the number of pages that a mapping request needs before we force 1017c478bd9Sstevel@tonic-gate * the TLB flush code to use diagnostic registers. This value was determined 1027c478bd9Sstevel@tonic-gate * through a series of test runs measuring dma mapping settup performance. 1037c478bd9Sstevel@tonic-gate */ 1047c478bd9Sstevel@tonic-gate int tlb_flush_using_diag = 16; 1057c478bd9Sstevel@tonic-gate 1067c478bd9Sstevel@tonic-gate int sysio_iommu_tsb_sizes[] = { 1077c478bd9Sstevel@tonic-gate IOMMU_TSB_SIZE_8M, 1087c478bd9Sstevel@tonic-gate IOMMU_TSB_SIZE_16M, 1097c478bd9Sstevel@tonic-gate IOMMU_TSB_SIZE_32M, 1107c478bd9Sstevel@tonic-gate IOMMU_TSB_SIZE_64M, 1117c478bd9Sstevel@tonic-gate IOMMU_TSB_SIZE_128M, 1127c478bd9Sstevel@tonic-gate IOMMU_TSB_SIZE_256M, 1137c478bd9Sstevel@tonic-gate IOMMU_TSB_SIZE_512M, 1147c478bd9Sstevel@tonic-gate IOMMU_TSB_SIZE_1G 1157c478bd9Sstevel@tonic-gate }; 1167c478bd9Sstevel@tonic-gate 1177c478bd9Sstevel@tonic-gate static int iommu_map_window(ddi_dma_impl_t *, off_t, size_t); 1187c478bd9Sstevel@tonic-gate 1197c478bd9Sstevel@tonic-gate int 1207c478bd9Sstevel@tonic-gate iommu_init(struct sbus_soft_state *softsp, caddr_t address) 1217c478bd9Sstevel@tonic-gate { 1227c478bd9Sstevel@tonic-gate int i; 1237c478bd9Sstevel@tonic-gate char name[40]; 1247c478bd9Sstevel@tonic-gate 1257c478bd9Sstevel@tonic-gate #ifdef DEBUG 1267c478bd9Sstevel@tonic-gate debug_info = 1; 1277c478bd9Sstevel@tonic-gate #endif 1287c478bd9Sstevel@tonic-gate 1297c478bd9Sstevel@tonic-gate /* 1307c478bd9Sstevel@tonic-gate * Simply add each registers offset to the base address 1317c478bd9Sstevel@tonic-gate * to calculate the already mapped virtual address of 1327c478bd9Sstevel@tonic-gate * the device register... 1337c478bd9Sstevel@tonic-gate * 1347c478bd9Sstevel@tonic-gate * define a macro for the pointer arithmetic; all registers 1357c478bd9Sstevel@tonic-gate * are 64 bits wide and are defined as uint64_t's. 1367c478bd9Sstevel@tonic-gate */ 1377c478bd9Sstevel@tonic-gate 1387c478bd9Sstevel@tonic-gate #define REG_ADDR(b, o) (uint64_t *)((caddr_t)(b) + (o)) 1397c478bd9Sstevel@tonic-gate 1407c478bd9Sstevel@tonic-gate softsp->iommu_ctrl_reg = REG_ADDR(address, OFF_IOMMU_CTRL_REG); 1417c478bd9Sstevel@tonic-gate softsp->tsb_base_addr = REG_ADDR(address, OFF_TSB_BASE_ADDR); 1427c478bd9Sstevel@tonic-gate softsp->iommu_flush_reg = REG_ADDR(address, OFF_IOMMU_FLUSH_REG); 1437c478bd9Sstevel@tonic-gate softsp->iommu_tlb_tag = REG_ADDR(address, OFF_IOMMU_TLB_TAG); 1447c478bd9Sstevel@tonic-gate softsp->iommu_tlb_data = REG_ADDR(address, OFF_IOMMU_TLB_DATA); 1457c478bd9Sstevel@tonic-gate 1467c478bd9Sstevel@tonic-gate #undef REG_ADDR 1477c478bd9Sstevel@tonic-gate 1487c478bd9Sstevel@tonic-gate mutex_init(&softsp->dma_pool_lock, NULL, MUTEX_DEFAULT, NULL); 1497c478bd9Sstevel@tonic-gate mutex_init(&softsp->intr_poll_list_lock, NULL, MUTEX_DEFAULT, NULL); 1507c478bd9Sstevel@tonic-gate 1517c478bd9Sstevel@tonic-gate /* Set up the DVMA resource sizes */ 1527c478bd9Sstevel@tonic-gate if ((softsp->iommu_tsb_cookie = iommu_tsb_alloc(softsp->upa_id)) == 1537c478bd9Sstevel@tonic-gate IOMMU_TSB_COOKIE_NONE) { 1547c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "%s%d: Unable to retrieve IOMMU array.", 1557c478bd9Sstevel@tonic-gate ddi_driver_name(softsp->dip), 1567c478bd9Sstevel@tonic-gate ddi_get_instance(softsp->dip)); 1577c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 1587c478bd9Sstevel@tonic-gate } 1597c478bd9Sstevel@tonic-gate softsp->soft_tsb_base_addr = 1607c478bd9Sstevel@tonic-gate iommu_tsb_cookie_to_va(softsp->iommu_tsb_cookie); 1617c478bd9Sstevel@tonic-gate softsp->iommu_dvma_size = 1627c478bd9Sstevel@tonic-gate iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie) << 1637c478bd9Sstevel@tonic-gate IOMMU_TSB_TO_RNG; 1647c478bd9Sstevel@tonic-gate softsp->iommu_dvma_base = (ioaddr_t) 1657c478bd9Sstevel@tonic-gate (0 - (ioaddr_t)softsp->iommu_dvma_size); 1667c478bd9Sstevel@tonic-gate 1677c478bd9Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "%s%d_dvma", 1687c478bd9Sstevel@tonic-gate ddi_driver_name(softsp->dip), ddi_get_instance(softsp->dip)); 1697c478bd9Sstevel@tonic-gate 1707c478bd9Sstevel@tonic-gate /* 1717c478bd9Sstevel@tonic-gate * Initialize the DVMA vmem arena. 1727c478bd9Sstevel@tonic-gate */ 173ffadc26eSmike_s softsp->dvma_arena = vmem_create(name, 174ffadc26eSmike_s (void *)(uintptr_t)softsp->iommu_dvma_base, 1757c478bd9Sstevel@tonic-gate softsp->iommu_dvma_size, PAGESIZE, NULL, NULL, NULL, 1767c478bd9Sstevel@tonic-gate DVMA_MAX_CACHE, VM_SLEEP); 1777c478bd9Sstevel@tonic-gate 1787c478bd9Sstevel@tonic-gate /* Set the limit for dvma_reserve() to 1/2 of the total dvma space */ 1797c478bd9Sstevel@tonic-gate softsp->dma_reserve = iommu_btop(softsp->iommu_dvma_size >> 1); 1807c478bd9Sstevel@tonic-gate 1817c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 1827c478bd9Sstevel@tonic-gate mutex_init(&softsp->iomemlock, NULL, MUTEX_DEFAULT, NULL); 1837c478bd9Sstevel@tonic-gate softsp->iomem = (struct io_mem_list *)0; 1847c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 1857c478bd9Sstevel@tonic-gate /* 1867c478bd9Sstevel@tonic-gate * Get the base address of the TSB table and store it in the hardware 1877c478bd9Sstevel@tonic-gate */ 1887c478bd9Sstevel@tonic-gate 1897c478bd9Sstevel@tonic-gate /* 1907c478bd9Sstevel@tonic-gate * We plan on the PROM flushing all TLB entries. If this is not the 1917c478bd9Sstevel@tonic-gate * case, this is where we should flush the hardware TLB. 1927c478bd9Sstevel@tonic-gate */ 1937c478bd9Sstevel@tonic-gate 1947c478bd9Sstevel@tonic-gate /* Set the IOMMU registers */ 1957c478bd9Sstevel@tonic-gate (void) iommu_resume_init(softsp); 1967c478bd9Sstevel@tonic-gate 1977c478bd9Sstevel@tonic-gate /* check the convenient copy of TSB base, and flush write buffers */ 1987c478bd9Sstevel@tonic-gate if (*softsp->tsb_base_addr != 1997c478bd9Sstevel@tonic-gate va_to_pa((caddr_t)softsp->soft_tsb_base_addr)) { 2007c478bd9Sstevel@tonic-gate iommu_tsb_free(softsp->iommu_tsb_cookie); 2017c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 2027c478bd9Sstevel@tonic-gate } 2037c478bd9Sstevel@tonic-gate 2047c478bd9Sstevel@tonic-gate softsp->sbus_io_lo_pfn = UINT32_MAX; 2057c478bd9Sstevel@tonic-gate softsp->sbus_io_hi_pfn = 0; 2067c478bd9Sstevel@tonic-gate for (i = 0; i < sysio_pd_getnrng(softsp->dip); i++) { 2077c478bd9Sstevel@tonic-gate struct rangespec *rangep; 2087c478bd9Sstevel@tonic-gate uint64_t addr; 2097c478bd9Sstevel@tonic-gate pfn_t hipfn, lopfn; 2107c478bd9Sstevel@tonic-gate 2117c478bd9Sstevel@tonic-gate rangep = sysio_pd_getrng(softsp->dip, i); 2127c478bd9Sstevel@tonic-gate addr = (uint64_t)((uint64_t)rangep->rng_bustype << 32); 2137c478bd9Sstevel@tonic-gate addr |= (uint64_t)rangep->rng_offset; 2147c478bd9Sstevel@tonic-gate lopfn = (pfn_t)(addr >> MMU_PAGESHIFT); 2157c478bd9Sstevel@tonic-gate addr += (uint64_t)(rangep->rng_size - 1); 2167c478bd9Sstevel@tonic-gate hipfn = (pfn_t)(addr >> MMU_PAGESHIFT); 2177c478bd9Sstevel@tonic-gate 2187c478bd9Sstevel@tonic-gate softsp->sbus_io_lo_pfn = (lopfn < softsp->sbus_io_lo_pfn) ? 2197c478bd9Sstevel@tonic-gate lopfn : softsp->sbus_io_lo_pfn; 2207c478bd9Sstevel@tonic-gate 2217c478bd9Sstevel@tonic-gate softsp->sbus_io_hi_pfn = (hipfn > softsp->sbus_io_hi_pfn) ? 2227c478bd9Sstevel@tonic-gate hipfn : softsp->sbus_io_hi_pfn; 2237c478bd9Sstevel@tonic-gate } 2247c478bd9Sstevel@tonic-gate 2257c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_REGISTERS_DEBUG, ("IOMMU Control reg: %p IOMMU TSB " 2267c478bd9Sstevel@tonic-gate "base reg: %p IOMMU flush reg: %p TSB base addr %p\n", 227*8793b36bSNick Todd (void *)softsp->iommu_ctrl_reg, (void *)softsp->tsb_base_addr, 228*8793b36bSNick Todd (void *)softsp->iommu_flush_reg, 229*8793b36bSNick Todd (void *)softsp->soft_tsb_base_addr)); 2307c478bd9Sstevel@tonic-gate 2317c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 2327c478bd9Sstevel@tonic-gate } 2337c478bd9Sstevel@tonic-gate 2347c478bd9Sstevel@tonic-gate /* 2357c478bd9Sstevel@tonic-gate * function to uninitialize the iommu and release the tsb back to 2367c478bd9Sstevel@tonic-gate * the spare pool. See startup.c for tsb spare management. 2377c478bd9Sstevel@tonic-gate */ 2387c478bd9Sstevel@tonic-gate 2397c478bd9Sstevel@tonic-gate int 2407c478bd9Sstevel@tonic-gate iommu_uninit(struct sbus_soft_state *softsp) 2417c478bd9Sstevel@tonic-gate { 2427c478bd9Sstevel@tonic-gate vmem_destroy(softsp->dvma_arena); 2437c478bd9Sstevel@tonic-gate 2447c478bd9Sstevel@tonic-gate /* flip off the IOMMU enable switch */ 2457c478bd9Sstevel@tonic-gate *softsp->iommu_ctrl_reg &= 2467c478bd9Sstevel@tonic-gate (TSB_SIZE << TSB_SIZE_SHIFT | IOMMU_DISABLE); 2477c478bd9Sstevel@tonic-gate 2487c478bd9Sstevel@tonic-gate iommu_tsb_free(softsp->iommu_tsb_cookie); 2497c478bd9Sstevel@tonic-gate 2507c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 2517c478bd9Sstevel@tonic-gate } 2527c478bd9Sstevel@tonic-gate 2537c478bd9Sstevel@tonic-gate /* 2547c478bd9Sstevel@tonic-gate * Initialize iommu hardware registers when the system is being resumed. 2557c478bd9Sstevel@tonic-gate * (Subset of iommu_init()) 2567c478bd9Sstevel@tonic-gate */ 2577c478bd9Sstevel@tonic-gate int 2587c478bd9Sstevel@tonic-gate iommu_resume_init(struct sbus_soft_state *softsp) 2597c478bd9Sstevel@tonic-gate { 2607c478bd9Sstevel@tonic-gate int i; 2617c478bd9Sstevel@tonic-gate uint_t tsb_size; 2627c478bd9Sstevel@tonic-gate uint_t tsb_bytes; 2637c478bd9Sstevel@tonic-gate 2647c478bd9Sstevel@tonic-gate /* 2657c478bd9Sstevel@tonic-gate * Reset the base address of the TSB table in the hardware 2667c478bd9Sstevel@tonic-gate */ 2677c478bd9Sstevel@tonic-gate *softsp->tsb_base_addr = va_to_pa((caddr_t)softsp->soft_tsb_base_addr); 2687c478bd9Sstevel@tonic-gate 2697c478bd9Sstevel@tonic-gate /* 2707c478bd9Sstevel@tonic-gate * Figure out the correct size of the IOMMU TSB entries. If we 2717c478bd9Sstevel@tonic-gate * end up with a size smaller than that needed for 8M of IOMMU 2727c478bd9Sstevel@tonic-gate * space, default the size to 8M. XXX We could probably panic here 2737c478bd9Sstevel@tonic-gate */ 2747c478bd9Sstevel@tonic-gate i = sizeof (sysio_iommu_tsb_sizes) / sizeof (sysio_iommu_tsb_sizes[0]) 2757c478bd9Sstevel@tonic-gate - 1; 2767c478bd9Sstevel@tonic-gate 2777c478bd9Sstevel@tonic-gate tsb_bytes = iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie); 2787c478bd9Sstevel@tonic-gate 2797c478bd9Sstevel@tonic-gate while (i > 0) { 2807c478bd9Sstevel@tonic-gate if (tsb_bytes >= sysio_iommu_tsb_sizes[i]) 2817c478bd9Sstevel@tonic-gate break; 2827c478bd9Sstevel@tonic-gate i--; 2837c478bd9Sstevel@tonic-gate } 2847c478bd9Sstevel@tonic-gate 2857c478bd9Sstevel@tonic-gate tsb_size = i; 2867c478bd9Sstevel@tonic-gate 2877c478bd9Sstevel@tonic-gate /* OK, lets flip the "on" switch of the IOMMU */ 2887c478bd9Sstevel@tonic-gate *softsp->iommu_ctrl_reg = (uint64_t)(tsb_size << TSB_SIZE_SHIFT 2897c478bd9Sstevel@tonic-gate | IOMMU_ENABLE | IOMMU_DIAG_ENABLE); 2907c478bd9Sstevel@tonic-gate 2917c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 2927c478bd9Sstevel@tonic-gate } 2937c478bd9Sstevel@tonic-gate 2947c478bd9Sstevel@tonic-gate void 2957c478bd9Sstevel@tonic-gate iommu_tlb_flush(struct sbus_soft_state *softsp, ioaddr_t addr, pgcnt_t npages) 2967c478bd9Sstevel@tonic-gate { 2977c478bd9Sstevel@tonic-gate volatile uint64_t tmpreg; 2987c478bd9Sstevel@tonic-gate volatile uint64_t *vaddr_reg, *valid_bit_reg; 2997c478bd9Sstevel@tonic-gate ioaddr_t hiaddr, ioaddr; 3007c478bd9Sstevel@tonic-gate int i, do_flush = 0; 3017c478bd9Sstevel@tonic-gate 3027c478bd9Sstevel@tonic-gate if (npages == 1) { 3037c478bd9Sstevel@tonic-gate *softsp->iommu_flush_reg = (uint64_t)addr; 3047c478bd9Sstevel@tonic-gate tmpreg = *softsp->sbus_ctrl_reg; 3057c478bd9Sstevel@tonic-gate return; 3067c478bd9Sstevel@tonic-gate } 3077c478bd9Sstevel@tonic-gate 3087c478bd9Sstevel@tonic-gate hiaddr = addr + (ioaddr_t)(npages * IOMMU_PAGESIZE); 3097c478bd9Sstevel@tonic-gate for (i = 0, vaddr_reg = softsp->iommu_tlb_tag, 3107c478bd9Sstevel@tonic-gate valid_bit_reg = softsp->iommu_tlb_data; 3117c478bd9Sstevel@tonic-gate i < IOMMU_TLB_ENTRIES; i++, vaddr_reg++, valid_bit_reg++) { 3127c478bd9Sstevel@tonic-gate tmpreg = *vaddr_reg; 3137c478bd9Sstevel@tonic-gate ioaddr = (ioaddr_t)((tmpreg & IOMMU_TLBTAG_VA_MASK) << 3147c478bd9Sstevel@tonic-gate IOMMU_TLBTAG_VA_SHIFT); 3157c478bd9Sstevel@tonic-gate 316ffadc26eSmike_s DPRINTF(IOMMU_TLB, ("Vaddr reg 0x%p, " 317ffadc26eSmike_s "TLB vaddr reg %lx, IO addr 0x%x " 3187c478bd9Sstevel@tonic-gate "Base addr 0x%x, Hi addr 0x%x\n", 319*8793b36bSNick Todd (void *)vaddr_reg, tmpreg, ioaddr, addr, hiaddr)); 3207c478bd9Sstevel@tonic-gate 3217c478bd9Sstevel@tonic-gate if (ioaddr >= addr && ioaddr <= hiaddr) { 3227c478bd9Sstevel@tonic-gate tmpreg = *valid_bit_reg; 3237c478bd9Sstevel@tonic-gate 324ffadc26eSmike_s DPRINTF(IOMMU_TLB, ("Valid reg addr 0x%p, " 325ffadc26eSmike_s "TLB valid reg %lx\n", 326*8793b36bSNick Todd (void *)valid_bit_reg, tmpreg)); 3277c478bd9Sstevel@tonic-gate 3287c478bd9Sstevel@tonic-gate if (tmpreg & IOMMU_TLB_VALID) { 3297c478bd9Sstevel@tonic-gate *softsp->iommu_flush_reg = (uint64_t)ioaddr; 3307c478bd9Sstevel@tonic-gate do_flush = 1; 3317c478bd9Sstevel@tonic-gate } 3327c478bd9Sstevel@tonic-gate } 3337c478bd9Sstevel@tonic-gate } 3347c478bd9Sstevel@tonic-gate 3357c478bd9Sstevel@tonic-gate if (do_flush) 3367c478bd9Sstevel@tonic-gate tmpreg = *softsp->sbus_ctrl_reg; 3377c478bd9Sstevel@tonic-gate } 3387c478bd9Sstevel@tonic-gate 3397c478bd9Sstevel@tonic-gate 3407c478bd9Sstevel@tonic-gate /* 3417c478bd9Sstevel@tonic-gate * Shorthand defines 3427c478bd9Sstevel@tonic-gate */ 3437c478bd9Sstevel@tonic-gate 3447c478bd9Sstevel@tonic-gate #define ALO dma_lim->dlim_addr_lo 3457c478bd9Sstevel@tonic-gate #define AHI dma_lim->dlim_addr_hi 3467c478bd9Sstevel@tonic-gate #define OBJSIZE dmareq->dmar_object.dmao_size 3477c478bd9Sstevel@tonic-gate #define IOTTE_NDX(vaddr, base) (base + \ 3487c478bd9Sstevel@tonic-gate (int)(iommu_btop((vaddr & ~IOMMU_PAGEMASK) - \ 3497c478bd9Sstevel@tonic-gate softsp->iommu_dvma_base))) 3507c478bd9Sstevel@tonic-gate /* 3517c478bd9Sstevel@tonic-gate * If DDI_DMA_PARTIAL flag is set and the request is for 3527c478bd9Sstevel@tonic-gate * less than MIN_DVMA_WIN_SIZE, it's not worth the hassle so 3537c478bd9Sstevel@tonic-gate * we turn off the DDI_DMA_PARTIAL flag 3547c478bd9Sstevel@tonic-gate */ 3557c478bd9Sstevel@tonic-gate #define MIN_DVMA_WIN_SIZE (128) 3567c478bd9Sstevel@tonic-gate 3577c478bd9Sstevel@tonic-gate /* ARGSUSED */ 3587c478bd9Sstevel@tonic-gate void 3597c478bd9Sstevel@tonic-gate iommu_remove_mappings(ddi_dma_impl_t *mp) 3607c478bd9Sstevel@tonic-gate { 3617c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 3627c478bd9Sstevel@tonic-gate pgcnt_t npages; 3637c478bd9Sstevel@tonic-gate ioaddr_t ioaddr; 3647c478bd9Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 3657c478bd9Sstevel@tonic-gate ioaddr_t ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET; 3667c478bd9Sstevel@tonic-gate pgcnt_t npages = mp->dmai_ndvmapages; 3677c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 3687c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 3697c478bd9Sstevel@tonic-gate 3707c478bd9Sstevel@tonic-gate #if defined(IO_MEMUSAGE) 3717c478bd9Sstevel@tonic-gate struct io_mem_list **prevp, *walk; 3727c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 3737c478bd9Sstevel@tonic-gate 3747c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 3757c478bd9Sstevel@tonic-gate /* 3767c478bd9Sstevel@tonic-gate * Run thru the mapped entries and free 'em 3777c478bd9Sstevel@tonic-gate */ 3787c478bd9Sstevel@tonic-gate 3797c478bd9Sstevel@tonic-gate ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET; 3807c478bd9Sstevel@tonic-gate npages = mp->dmai_ndvmapages; 3817c478bd9Sstevel@tonic-gate 3827c478bd9Sstevel@tonic-gate #if defined(IO_MEMUSAGE) 3837c478bd9Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 3847c478bd9Sstevel@tonic-gate prevp = &softsp->iomem; 3857c478bd9Sstevel@tonic-gate walk = softsp->iomem; 3867c478bd9Sstevel@tonic-gate 3877c478bd9Sstevel@tonic-gate while (walk) { 3887c478bd9Sstevel@tonic-gate if (walk->ioaddr == ioaddr) { 3897c478bd9Sstevel@tonic-gate *prevp = walk->next; 3907c478bd9Sstevel@tonic-gate break; 3917c478bd9Sstevel@tonic-gate } 3927c478bd9Sstevel@tonic-gate 3937c478bd9Sstevel@tonic-gate prevp = &walk->next; 3947c478bd9Sstevel@tonic-gate walk = walk->next; 3957c478bd9Sstevel@tonic-gate } 3967c478bd9Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 3977c478bd9Sstevel@tonic-gate 3987c478bd9Sstevel@tonic-gate kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1)); 3997c478bd9Sstevel@tonic-gate kmem_free(walk, sizeof (struct io_mem_list)); 4007c478bd9Sstevel@tonic-gate #endif /* IO_MEMUSAGE */ 4017c478bd9Sstevel@tonic-gate 4027c478bd9Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 4037c478bd9Sstevel@tonic-gate 4047c478bd9Sstevel@tonic-gate while (npages) { 4057c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_DEBUG, 4067c478bd9Sstevel@tonic-gate ("dma_mctl: freeing ioaddr %x iotte %p\n", 4077c478bd9Sstevel@tonic-gate ioaddr, iotte_ptr)); 4087c478bd9Sstevel@tonic-gate *iotte_ptr = (uint64_t)0; /* unload tte */ 4097c478bd9Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 4107c478bd9Sstevel@tonic-gate npages--; 4117c478bd9Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 4127c478bd9Sstevel@tonic-gate iotte_ptr++; 4137c478bd9Sstevel@tonic-gate } 4147c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 4157c478bd9Sstevel@tonic-gate } 4167c478bd9Sstevel@tonic-gate 4177c478bd9Sstevel@tonic-gate 4187c478bd9Sstevel@tonic-gate int 4197c478bd9Sstevel@tonic-gate iommu_create_vaddr_mappings(ddi_dma_impl_t *mp, uintptr_t addr) 4207c478bd9Sstevel@tonic-gate { 4217c478bd9Sstevel@tonic-gate pfn_t pfn; 4227c478bd9Sstevel@tonic-gate struct as *as = NULL; 4237c478bd9Sstevel@tonic-gate pgcnt_t npages; 4247c478bd9Sstevel@tonic-gate ioaddr_t ioaddr; 4257c478bd9Sstevel@tonic-gate uint_t offset; 4267c478bd9Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 4277c478bd9Sstevel@tonic-gate uint64_t tmp_iotte_flag; 4287c478bd9Sstevel@tonic-gate int rval = DDI_DMA_MAPPED; 4297c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 4307c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 4317c478bd9Sstevel@tonic-gate int diag_tlb_flush; 4327c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 4337c478bd9Sstevel@tonic-gate struct io_mem_list *iomemp; 4347c478bd9Sstevel@tonic-gate pfn_t *pfnp; 4357c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 4367c478bd9Sstevel@tonic-gate 4377c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 4387c478bd9Sstevel@tonic-gate 4397c478bd9Sstevel@tonic-gate /* Set Valid and Cache for mem xfer */ 4407c478bd9Sstevel@tonic-gate tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM; 4417c478bd9Sstevel@tonic-gate 4427c478bd9Sstevel@tonic-gate offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET); 4437c478bd9Sstevel@tonic-gate npages = iommu_btopr(mp->dmai_size + offset); 4447c478bd9Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET); 4457c478bd9Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 4467c478bd9Sstevel@tonic-gate diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0; 4477c478bd9Sstevel@tonic-gate 4487c478bd9Sstevel@tonic-gate as = mp->dmai_object.dmao_obj.virt_obj.v_as; 4497c478bd9Sstevel@tonic-gate if (as == NULL) 4507c478bd9Sstevel@tonic-gate as = &kas; 4517c478bd9Sstevel@tonic-gate 4527c478bd9Sstevel@tonic-gate /* 4537c478bd9Sstevel@tonic-gate * Set the per object bits of the TTE here. We optimize this for 4547c478bd9Sstevel@tonic-gate * the memory case so that the while loop overhead is minimal. 4557c478bd9Sstevel@tonic-gate */ 4567c478bd9Sstevel@tonic-gate /* Turn on NOSYNC if we need consistent mem */ 4577c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_CONSISTENT) { 4587c478bd9Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOSYNC; 4597c478bd9Sstevel@tonic-gate tmp_iotte_flag ^= IOTTE_STREAM; 4607c478bd9Sstevel@tonic-gate /* Set streaming mode if not consistent mem */ 4617c478bd9Sstevel@tonic-gate } else if (softsp->stream_buf_off) { 4627c478bd9Sstevel@tonic-gate tmp_iotte_flag ^= IOTTE_STREAM; 4637c478bd9Sstevel@tonic-gate } 4647c478bd9Sstevel@tonic-gate 4657c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 4667c478bd9Sstevel@tonic-gate iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP); 4677c478bd9Sstevel@tonic-gate iomemp->rdip = mp->dmai_rdip; 4687c478bd9Sstevel@tonic-gate iomemp->ioaddr = ioaddr; 4697c478bd9Sstevel@tonic-gate iomemp->addr = addr; 4707c478bd9Sstevel@tonic-gate iomemp->npages = npages; 4717c478bd9Sstevel@tonic-gate pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1), 4727c478bd9Sstevel@tonic-gate KM_SLEEP); 4737c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 4747c478bd9Sstevel@tonic-gate /* 4757c478bd9Sstevel@tonic-gate * Grab the mappings from the dmmu and stick 'em into the 4767c478bd9Sstevel@tonic-gate * iommu. 4777c478bd9Sstevel@tonic-gate */ 4787c478bd9Sstevel@tonic-gate ASSERT(npages != 0); 4797c478bd9Sstevel@tonic-gate 4807c478bd9Sstevel@tonic-gate /* If we're going to flush the TLB using diag mode, do it now. */ 4817c478bd9Sstevel@tonic-gate if (diag_tlb_flush) 4827c478bd9Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, npages); 4837c478bd9Sstevel@tonic-gate 4847c478bd9Sstevel@tonic-gate do { 4857c478bd9Sstevel@tonic-gate uint64_t iotte_flag = tmp_iotte_flag; 4867c478bd9Sstevel@tonic-gate 4877c478bd9Sstevel@tonic-gate /* 4887c478bd9Sstevel@tonic-gate * Fetch the pfn for the DMA object 4897c478bd9Sstevel@tonic-gate */ 4907c478bd9Sstevel@tonic-gate 4917c478bd9Sstevel@tonic-gate ASSERT(as); 4927c478bd9Sstevel@tonic-gate pfn = hat_getpfnum(as->a_hat, (caddr_t)addr); 4937c478bd9Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 4947c478bd9Sstevel@tonic-gate 4957c478bd9Sstevel@tonic-gate if (!pf_is_memory(pfn)) { 4967c478bd9Sstevel@tonic-gate /* DVMA'ing to IO space */ 4977c478bd9Sstevel@tonic-gate 4987c478bd9Sstevel@tonic-gate /* Turn off cache bit if set */ 4997c478bd9Sstevel@tonic-gate if (iotte_flag & IOTTE_CACHE) 5007c478bd9Sstevel@tonic-gate iotte_flag ^= IOTTE_CACHE; 5017c478bd9Sstevel@tonic-gate 5027c478bd9Sstevel@tonic-gate /* Turn off stream bit if set */ 5037c478bd9Sstevel@tonic-gate if (iotte_flag & IOTTE_STREAM) 5047c478bd9Sstevel@tonic-gate iotte_flag ^= IOTTE_STREAM; 5057c478bd9Sstevel@tonic-gate 5067c478bd9Sstevel@tonic-gate if (IS_INTRA_SBUS(softsp, pfn)) { 5077c478bd9Sstevel@tonic-gate /* Intra sbus transfer */ 5087c478bd9Sstevel@tonic-gate 5097c478bd9Sstevel@tonic-gate /* Turn on intra flag */ 5107c478bd9Sstevel@tonic-gate iotte_flag |= IOTTE_INTRA; 5117c478bd9Sstevel@tonic-gate 5127c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_INTER_INTRA_XFER, ( 513ffadc26eSmike_s "Intra xfer pfnum %lx TTE %lx\n", 5147c478bd9Sstevel@tonic-gate pfn, iotte_flag)); 5157c478bd9Sstevel@tonic-gate } else { 5167c478bd9Sstevel@tonic-gate if (pf_is_dmacapable(pfn) == 1) { 5177c478bd9Sstevel@tonic-gate /*EMPTY*/ 5187c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_INTER_INTRA_XFER, 5197c478bd9Sstevel@tonic-gate ("Inter xfer pfnum %lx " 520ffadc26eSmike_s "tte hi %lx\n", 5217c478bd9Sstevel@tonic-gate pfn, iotte_flag)); 5227c478bd9Sstevel@tonic-gate } else { 5237c478bd9Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 5247c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 5257c478bd9Sstevel@tonic-gate goto bad; 5267c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 5277c478bd9Sstevel@tonic-gate } 5287c478bd9Sstevel@tonic-gate } 5297c478bd9Sstevel@tonic-gate } 5307c478bd9Sstevel@tonic-gate addr += IOMMU_PAGESIZE; 5317c478bd9Sstevel@tonic-gate 532ffadc26eSmike_s DPRINTF(IOMMU_TTE, ("vaddr mapping: tte index %p pfn %lx " 533ffadc26eSmike_s "tte flag %lx addr %lx ioaddr %x\n", 534*8793b36bSNick Todd (void *)iotte_ptr, pfn, iotte_flag, addr, ioaddr)); 5357c478bd9Sstevel@tonic-gate 5367c478bd9Sstevel@tonic-gate /* Flush the IOMMU TLB before loading a new mapping */ 5377c478bd9Sstevel@tonic-gate if (!diag_tlb_flush) 5387c478bd9Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 5397c478bd9Sstevel@tonic-gate 5407c478bd9Sstevel@tonic-gate /* Set the hardware IO TTE */ 5417c478bd9Sstevel@tonic-gate *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag; 5427c478bd9Sstevel@tonic-gate 5437c478bd9Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 5447c478bd9Sstevel@tonic-gate npages--; 5457c478bd9Sstevel@tonic-gate iotte_ptr++; 5467c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 5477c478bd9Sstevel@tonic-gate *pfnp = pfn; 5487c478bd9Sstevel@tonic-gate pfnp++; 5497c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 5507c478bd9Sstevel@tonic-gate } while (npages != 0); 5517c478bd9Sstevel@tonic-gate 5527c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 5537c478bd9Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 5547c478bd9Sstevel@tonic-gate iomemp->next = softsp->iomem; 5557c478bd9Sstevel@tonic-gate softsp->iomem = iomemp; 5567c478bd9Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 5577c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 5587c478bd9Sstevel@tonic-gate 5597c478bd9Sstevel@tonic-gate return (rval); 5607c478bd9Sstevel@tonic-gate 5617c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 5627c478bd9Sstevel@tonic-gate bad: 5637c478bd9Sstevel@tonic-gate /* If we fail a mapping, free up any mapping resources used */ 5647c478bd9Sstevel@tonic-gate iommu_remove_mappings(mp); 5657c478bd9Sstevel@tonic-gate return (rval); 5667c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 5677c478bd9Sstevel@tonic-gate } 5687c478bd9Sstevel@tonic-gate 5697c478bd9Sstevel@tonic-gate 5707c478bd9Sstevel@tonic-gate int 5717c478bd9Sstevel@tonic-gate iommu_create_pp_mappings(ddi_dma_impl_t *mp, page_t *pp, page_t **pplist) 5727c478bd9Sstevel@tonic-gate { 5737c478bd9Sstevel@tonic-gate pfn_t pfn; 5747c478bd9Sstevel@tonic-gate pgcnt_t npages; 5757c478bd9Sstevel@tonic-gate ioaddr_t ioaddr; 5767c478bd9Sstevel@tonic-gate uint_t offset; 5777c478bd9Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 5787c478bd9Sstevel@tonic-gate uint64_t tmp_iotte_flag; 5797c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 5807c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 5817c478bd9Sstevel@tonic-gate int diag_tlb_flush; 5827c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 5837c478bd9Sstevel@tonic-gate struct io_mem_list *iomemp; 5847c478bd9Sstevel@tonic-gate pfn_t *pfnp; 5857c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 5867c478bd9Sstevel@tonic-gate int rval = DDI_DMA_MAPPED; 5877c478bd9Sstevel@tonic-gate 5887c478bd9Sstevel@tonic-gate /* Set Valid and Cache for mem xfer */ 5897c478bd9Sstevel@tonic-gate tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM; 5907c478bd9Sstevel@tonic-gate 5917c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 5927c478bd9Sstevel@tonic-gate 5937c478bd9Sstevel@tonic-gate offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET); 5947c478bd9Sstevel@tonic-gate npages = iommu_btopr(mp->dmai_size + offset); 5957c478bd9Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET); 5967c478bd9Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 5977c478bd9Sstevel@tonic-gate diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0; 5987c478bd9Sstevel@tonic-gate 5997c478bd9Sstevel@tonic-gate /* 6007c478bd9Sstevel@tonic-gate * Set the per object bits of the TTE here. We optimize this for 6017c478bd9Sstevel@tonic-gate * the memory case so that the while loop overhead is minimal. 6027c478bd9Sstevel@tonic-gate */ 6037c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_CONSISTENT) { 6047c478bd9Sstevel@tonic-gate /* Turn on NOSYNC if we need consistent mem */ 6057c478bd9Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOSYNC; 6067c478bd9Sstevel@tonic-gate tmp_iotte_flag ^= IOTTE_STREAM; 6077c478bd9Sstevel@tonic-gate } else if (softsp->stream_buf_off) { 6087c478bd9Sstevel@tonic-gate /* Set streaming mode if not consistent mem */ 6097c478bd9Sstevel@tonic-gate tmp_iotte_flag ^= IOTTE_STREAM; 6107c478bd9Sstevel@tonic-gate } 6117c478bd9Sstevel@tonic-gate 6127c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 6137c478bd9Sstevel@tonic-gate iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP); 6147c478bd9Sstevel@tonic-gate iomemp->rdip = mp->dmai_rdip; 6157c478bd9Sstevel@tonic-gate iomemp->ioaddr = ioaddr; 6167c478bd9Sstevel@tonic-gate iomemp->npages = npages; 6177c478bd9Sstevel@tonic-gate pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1), 6187c478bd9Sstevel@tonic-gate KM_SLEEP); 6197c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 6207c478bd9Sstevel@tonic-gate /* 6217c478bd9Sstevel@tonic-gate * Grab the mappings from the dmmu and stick 'em into the 6227c478bd9Sstevel@tonic-gate * iommu. 6237c478bd9Sstevel@tonic-gate */ 6247c478bd9Sstevel@tonic-gate ASSERT(npages != 0); 6257c478bd9Sstevel@tonic-gate 6267c478bd9Sstevel@tonic-gate /* If we're going to flush the TLB using diag mode, do it now. */ 6277c478bd9Sstevel@tonic-gate if (diag_tlb_flush) 6287c478bd9Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, npages); 6297c478bd9Sstevel@tonic-gate 6307c478bd9Sstevel@tonic-gate do { 6317c478bd9Sstevel@tonic-gate uint64_t iotte_flag; 6327c478bd9Sstevel@tonic-gate 6337c478bd9Sstevel@tonic-gate iotte_flag = tmp_iotte_flag; 6347c478bd9Sstevel@tonic-gate 6357c478bd9Sstevel@tonic-gate if (pp != NULL) { 6367c478bd9Sstevel@tonic-gate pfn = pp->p_pagenum; 6377c478bd9Sstevel@tonic-gate pp = pp->p_next; 6387c478bd9Sstevel@tonic-gate } else { 6397c478bd9Sstevel@tonic-gate pfn = (*pplist)->p_pagenum; 6407c478bd9Sstevel@tonic-gate pplist++; 6417c478bd9Sstevel@tonic-gate } 6427c478bd9Sstevel@tonic-gate 643ffadc26eSmike_s DPRINTF(IOMMU_TTE, ("pp mapping TTE index %p pfn %lx " 644*8793b36bSNick Todd "tte flag %lx ioaddr %x\n", (void *)iotte_ptr, 6457c478bd9Sstevel@tonic-gate pfn, iotte_flag, ioaddr)); 6467c478bd9Sstevel@tonic-gate 6477c478bd9Sstevel@tonic-gate /* Flush the IOMMU TLB before loading a new mapping */ 6487c478bd9Sstevel@tonic-gate if (!diag_tlb_flush) 6497c478bd9Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 6507c478bd9Sstevel@tonic-gate 6517c478bd9Sstevel@tonic-gate /* Set the hardware IO TTE */ 6527c478bd9Sstevel@tonic-gate *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag; 6537c478bd9Sstevel@tonic-gate 6547c478bd9Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 6557c478bd9Sstevel@tonic-gate npages--; 6567c478bd9Sstevel@tonic-gate iotte_ptr++; 6577c478bd9Sstevel@tonic-gate 6587c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 6597c478bd9Sstevel@tonic-gate *pfnp = pfn; 6607c478bd9Sstevel@tonic-gate pfnp++; 6617c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 6627c478bd9Sstevel@tonic-gate 6637c478bd9Sstevel@tonic-gate } while (npages != 0); 6647c478bd9Sstevel@tonic-gate 6657c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 6667c478bd9Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 6677c478bd9Sstevel@tonic-gate iomemp->next = softsp->iomem; 6687c478bd9Sstevel@tonic-gate softsp->iomem = iomemp; 6697c478bd9Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 6707c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 6717c478bd9Sstevel@tonic-gate 6727c478bd9Sstevel@tonic-gate return (rval); 6737c478bd9Sstevel@tonic-gate } 6747c478bd9Sstevel@tonic-gate 6757c478bd9Sstevel@tonic-gate 6767c478bd9Sstevel@tonic-gate int 6777c478bd9Sstevel@tonic-gate iommu_dma_lim_setup(dev_info_t *dip, dev_info_t *rdip, 6787c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp, uint_t *burstsizep, uint_t burstsize64, 6797c478bd9Sstevel@tonic-gate uint_t *minxferp, uint_t dma_flags) 6807c478bd9Sstevel@tonic-gate { 6817c478bd9Sstevel@tonic-gate struct regspec *rp; 6827c478bd9Sstevel@tonic-gate 6837c478bd9Sstevel@tonic-gate /* Take care of 64 bit limits. */ 6847c478bd9Sstevel@tonic-gate if (!(dma_flags & DDI_DMA_SBUS_64BIT)) { 6857c478bd9Sstevel@tonic-gate /* 6867c478bd9Sstevel@tonic-gate * return burst size for 32-bit mode 6877c478bd9Sstevel@tonic-gate */ 6887c478bd9Sstevel@tonic-gate *burstsizep &= softsp->sbus_burst_sizes; 6897c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 6907c478bd9Sstevel@tonic-gate } 6917c478bd9Sstevel@tonic-gate 6927c478bd9Sstevel@tonic-gate /* 6937c478bd9Sstevel@tonic-gate * check if SBus supports 64 bit and if caller 6947c478bd9Sstevel@tonic-gate * is child of SBus. No support through bridges 6957c478bd9Sstevel@tonic-gate */ 6967c478bd9Sstevel@tonic-gate if (!softsp->sbus64_burst_sizes || (ddi_get_parent(rdip) != dip)) { 6977c478bd9Sstevel@tonic-gate /* 6987c478bd9Sstevel@tonic-gate * SBus doesn't support it or bridge. Do 32-bit 6997c478bd9Sstevel@tonic-gate * xfers 7007c478bd9Sstevel@tonic-gate */ 7017c478bd9Sstevel@tonic-gate *burstsizep &= softsp->sbus_burst_sizes; 7027c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 7037c478bd9Sstevel@tonic-gate } 7047c478bd9Sstevel@tonic-gate 7057c478bd9Sstevel@tonic-gate rp = ddi_rnumber_to_regspec(rdip, 0); 7067c478bd9Sstevel@tonic-gate if (rp == NULL) { 7077c478bd9Sstevel@tonic-gate *burstsizep &= softsp->sbus_burst_sizes; 7087c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 7097c478bd9Sstevel@tonic-gate } 7107c478bd9Sstevel@tonic-gate 7117c478bd9Sstevel@tonic-gate /* Check for old-style 64 bit burstsizes */ 7127c478bd9Sstevel@tonic-gate if (burstsize64 & SYSIO64_BURST_MASK) { 7137c478bd9Sstevel@tonic-gate /* Scale back burstsizes if Necessary */ 7147c478bd9Sstevel@tonic-gate *burstsizep &= (softsp->sbus64_burst_sizes | 7157c478bd9Sstevel@tonic-gate softsp->sbus_burst_sizes); 7167c478bd9Sstevel@tonic-gate } else { 7177c478bd9Sstevel@tonic-gate /* Get the 64 bit burstsizes. */ 7187c478bd9Sstevel@tonic-gate *burstsizep = burstsize64; 7197c478bd9Sstevel@tonic-gate 7207c478bd9Sstevel@tonic-gate /* Scale back burstsizes if Necessary */ 7217c478bd9Sstevel@tonic-gate *burstsizep &= (softsp->sbus64_burst_sizes >> 7227c478bd9Sstevel@tonic-gate SYSIO64_BURST_SHIFT); 7237c478bd9Sstevel@tonic-gate } 7247c478bd9Sstevel@tonic-gate 7257c478bd9Sstevel@tonic-gate /* 7267c478bd9Sstevel@tonic-gate * Set the largest value of the smallest burstsize that the 7277c478bd9Sstevel@tonic-gate * device or the bus can manage. 7287c478bd9Sstevel@tonic-gate */ 7297c478bd9Sstevel@tonic-gate *minxferp = MAX(*minxferp, 7307c478bd9Sstevel@tonic-gate (1 << (ddi_ffs(softsp->sbus64_burst_sizes) - 1))); 7317c478bd9Sstevel@tonic-gate 7327c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 7337c478bd9Sstevel@tonic-gate } 7347c478bd9Sstevel@tonic-gate 7357c478bd9Sstevel@tonic-gate 7367c478bd9Sstevel@tonic-gate int 7377c478bd9Sstevel@tonic-gate iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, 7387c478bd9Sstevel@tonic-gate ddi_dma_attr_t *dma_attr, int (*waitfp)(caddr_t), caddr_t arg, 7397c478bd9Sstevel@tonic-gate ddi_dma_handle_t *handlep) 7407c478bd9Sstevel@tonic-gate { 7417c478bd9Sstevel@tonic-gate ioaddr_t addrlow, addrhigh, segalign; 7427c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp; 7437c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv; 7447c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 7457c478bd9Sstevel@tonic-gate ddi_get_soft_state(sbusp, ddi_get_instance(dip)); 7467c478bd9Sstevel@tonic-gate 7477c478bd9Sstevel@tonic-gate /* 7487c478bd9Sstevel@tonic-gate * Setup dma burstsizes and min-xfer counts. 7497c478bd9Sstevel@tonic-gate */ 7507c478bd9Sstevel@tonic-gate (void) iommu_dma_lim_setup(dip, rdip, softsp, 7517c478bd9Sstevel@tonic-gate &dma_attr->dma_attr_burstsizes, 7527c478bd9Sstevel@tonic-gate dma_attr->dma_attr_burstsizes, &dma_attr->dma_attr_minxfer, 7537c478bd9Sstevel@tonic-gate dma_attr->dma_attr_flags); 7547c478bd9Sstevel@tonic-gate 7557c478bd9Sstevel@tonic-gate if (dma_attr->dma_attr_burstsizes == 0) 7567c478bd9Sstevel@tonic-gate return (DDI_DMA_BADATTR); 7577c478bd9Sstevel@tonic-gate 7587c478bd9Sstevel@tonic-gate addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo; 7597c478bd9Sstevel@tonic-gate addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi; 7607c478bd9Sstevel@tonic-gate segalign = (ioaddr_t)dma_attr->dma_attr_seg; 7617c478bd9Sstevel@tonic-gate 7627c478bd9Sstevel@tonic-gate /* 7637c478bd9Sstevel@tonic-gate * Check sanity for hi and lo address limits 7647c478bd9Sstevel@tonic-gate */ 7657c478bd9Sstevel@tonic-gate if ((addrhigh <= addrlow) || 7667c478bd9Sstevel@tonic-gate (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) { 7677c478bd9Sstevel@tonic-gate return (DDI_DMA_BADATTR); 7687c478bd9Sstevel@tonic-gate } 7697c478bd9Sstevel@tonic-gate if (dma_attr->dma_attr_flags & DDI_DMA_FORCE_PHYSICAL) 7707c478bd9Sstevel@tonic-gate return (DDI_DMA_BADATTR); 7717c478bd9Sstevel@tonic-gate 7727c478bd9Sstevel@tonic-gate mppriv = kmem_zalloc(sizeof (*mppriv), 7737c478bd9Sstevel@tonic-gate (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP); 7747c478bd9Sstevel@tonic-gate 7757c478bd9Sstevel@tonic-gate if (mppriv == NULL) { 7767c478bd9Sstevel@tonic-gate if (waitfp != DDI_DMA_DONTWAIT) { 777*8793b36bSNick Todd ddi_set_callback(waitfp, arg, 778*8793b36bSNick Todd &softsp->dvma_call_list_id); 7797c478bd9Sstevel@tonic-gate } 7807c478bd9Sstevel@tonic-gate return (DDI_DMA_NORESOURCES); 7817c478bd9Sstevel@tonic-gate } 7827c478bd9Sstevel@tonic-gate mp = (ddi_dma_impl_t *)mppriv; 7837c478bd9Sstevel@tonic-gate 784ffadc26eSmike_s DPRINTF(IOMMU_DMA_ALLOCHDL_DEBUG, ("dma_allochdl: (%s) handle %p " 7857c478bd9Sstevel@tonic-gate "hi %x lo %x min %x burst %x\n", 786*8793b36bSNick Todd ddi_get_name(dip), (void *)mp, addrhigh, addrlow, 7877c478bd9Sstevel@tonic-gate dma_attr->dma_attr_minxfer, dma_attr->dma_attr_burstsizes)); 7887c478bd9Sstevel@tonic-gate 7897c478bd9Sstevel@tonic-gate mp->dmai_rdip = rdip; 7907c478bd9Sstevel@tonic-gate mp->dmai_minxfer = (uint_t)dma_attr->dma_attr_minxfer; 7917c478bd9Sstevel@tonic-gate mp->dmai_burstsizes = (uint_t)dma_attr->dma_attr_burstsizes; 7927c478bd9Sstevel@tonic-gate mp->dmai_attr = *dma_attr; 7937c478bd9Sstevel@tonic-gate /* See if the DMA engine has any limit restrictions. */ 7947c478bd9Sstevel@tonic-gate if (segalign == (ioaddr_t)UINT32_MAX && 7957c478bd9Sstevel@tonic-gate addrhigh == (ioaddr_t)UINT32_MAX && 7967c478bd9Sstevel@tonic-gate (dma_attr->dma_attr_align <= IOMMU_PAGESIZE) && addrlow == 0) { 7977c478bd9Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOLIMIT; 7987c478bd9Sstevel@tonic-gate } 7997c478bd9Sstevel@tonic-gate mppriv->softsp = softsp; 8007c478bd9Sstevel@tonic-gate mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag); 8017c478bd9Sstevel@tonic-gate 8027c478bd9Sstevel@tonic-gate *handlep = (ddi_dma_handle_t)mp; 8037c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 8047c478bd9Sstevel@tonic-gate } 8057c478bd9Sstevel@tonic-gate 8067c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 8077c478bd9Sstevel@tonic-gate int 8087c478bd9Sstevel@tonic-gate iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 8097c478bd9Sstevel@tonic-gate { 8107c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)handle; 8117c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 8127c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 8137c478bd9Sstevel@tonic-gate 8147c478bd9Sstevel@tonic-gate kmem_free(mppriv, sizeof (*mppriv)); 8157c478bd9Sstevel@tonic-gate 8167c478bd9Sstevel@tonic-gate if (softsp->dvma_call_list_id != 0) { 8177c478bd9Sstevel@tonic-gate ddi_run_callback(&softsp->dvma_call_list_id); 8187c478bd9Sstevel@tonic-gate } 8197c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 8207c478bd9Sstevel@tonic-gate } 8217c478bd9Sstevel@tonic-gate 8227c478bd9Sstevel@tonic-gate static int 8237c478bd9Sstevel@tonic-gate check_dma_attr(struct ddi_dma_req *dmareq, ddi_dma_attr_t *dma_attr, 8247c478bd9Sstevel@tonic-gate uint32_t *size) 8257c478bd9Sstevel@tonic-gate { 8267c478bd9Sstevel@tonic-gate ioaddr_t addrlow; 8277c478bd9Sstevel@tonic-gate ioaddr_t addrhigh; 8287c478bd9Sstevel@tonic-gate uint32_t segalign; 8297c478bd9Sstevel@tonic-gate uint32_t smask; 8307c478bd9Sstevel@tonic-gate 8317c478bd9Sstevel@tonic-gate smask = *size - 1; 8327c478bd9Sstevel@tonic-gate segalign = dma_attr->dma_attr_seg; 8337c478bd9Sstevel@tonic-gate if (smask > segalign) { 8347c478bd9Sstevel@tonic-gate if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0) 8357c478bd9Sstevel@tonic-gate return (DDI_DMA_TOOBIG); 8367c478bd9Sstevel@tonic-gate *size = segalign + 1; 8377c478bd9Sstevel@tonic-gate } 8387c478bd9Sstevel@tonic-gate addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo; 8397c478bd9Sstevel@tonic-gate addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi; 8407c478bd9Sstevel@tonic-gate if (addrlow + smask > addrhigh || addrlow + smask < addrlow) { 8417c478bd9Sstevel@tonic-gate if (!((addrlow + dmareq->dmar_object.dmao_size == 0) && 8427c478bd9Sstevel@tonic-gate (addrhigh == (ioaddr_t)-1))) { 8437c478bd9Sstevel@tonic-gate if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0) 8447c478bd9Sstevel@tonic-gate return (DDI_DMA_TOOBIG); 8457c478bd9Sstevel@tonic-gate *size = MIN(addrhigh - addrlow + 1, *size); 8467c478bd9Sstevel@tonic-gate } 8477c478bd9Sstevel@tonic-gate } 8487c478bd9Sstevel@tonic-gate return (DDI_DMA_MAPOK); 8497c478bd9Sstevel@tonic-gate } 8507c478bd9Sstevel@tonic-gate 8517c478bd9Sstevel@tonic-gate int 8527c478bd9Sstevel@tonic-gate iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 8537c478bd9Sstevel@tonic-gate ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 8547c478bd9Sstevel@tonic-gate ddi_dma_cookie_t *cp, uint_t *ccountp) 8557c478bd9Sstevel@tonic-gate { 8567c478bd9Sstevel@tonic-gate page_t *pp; 8577c478bd9Sstevel@tonic-gate uint32_t size; 8587c478bd9Sstevel@tonic-gate ioaddr_t ioaddr; 8597c478bd9Sstevel@tonic-gate uint_t offset; 8607c478bd9Sstevel@tonic-gate uintptr_t addr = 0; 8617c478bd9Sstevel@tonic-gate pgcnt_t npages; 8627c478bd9Sstevel@tonic-gate int rval; 8637c478bd9Sstevel@tonic-gate ddi_dma_attr_t *dma_attr; 8647c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp; 8657c478bd9Sstevel@tonic-gate struct page **pplist = NULL; 8667c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 8677c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 8687c478bd9Sstevel@tonic-gate 8697c478bd9Sstevel@tonic-gate #ifdef lint 8707c478bd9Sstevel@tonic-gate dip = dip; 8717c478bd9Sstevel@tonic-gate rdip = rdip; 8727c478bd9Sstevel@tonic-gate #endif 8737c478bd9Sstevel@tonic-gate 8747c478bd9Sstevel@tonic-gate if (mp->dmai_inuse) 8757c478bd9Sstevel@tonic-gate return (DDI_DMA_INUSE); 8767c478bd9Sstevel@tonic-gate 8777c478bd9Sstevel@tonic-gate dma_attr = &mp->dmai_attr; 8787c478bd9Sstevel@tonic-gate size = (uint32_t)dmareq->dmar_object.dmao_size; 8797c478bd9Sstevel@tonic-gate if (!(mp->dmai_rflags & DMP_NOLIMIT)) { 8807c478bd9Sstevel@tonic-gate rval = check_dma_attr(dmareq, dma_attr, &size); 8817c478bd9Sstevel@tonic-gate if (rval != DDI_DMA_MAPOK) 8827c478bd9Sstevel@tonic-gate return (rval); 8837c478bd9Sstevel@tonic-gate } 8847c478bd9Sstevel@tonic-gate mp->dmai_inuse = 1; 8857c478bd9Sstevel@tonic-gate mp->dmai_offset = 0; 8867c478bd9Sstevel@tonic-gate mp->dmai_rflags = (dmareq->dmar_flags & DMP_DDIFLAGS) | 8877c478bd9Sstevel@tonic-gate (mp->dmai_rflags & DMP_NOLIMIT); 8887c478bd9Sstevel@tonic-gate 8897c478bd9Sstevel@tonic-gate switch (dmareq->dmar_object.dmao_type) { 8907c478bd9Sstevel@tonic-gate case DMA_OTYP_VADDR: 8917c478bd9Sstevel@tonic-gate case DMA_OTYP_BUFVADDR: 8927c478bd9Sstevel@tonic-gate addr = (uintptr_t)dmareq->dmar_object.dmao_obj.virt_obj.v_addr; 8937c478bd9Sstevel@tonic-gate offset = addr & IOMMU_PAGEOFFSET; 8947c478bd9Sstevel@tonic-gate pplist = dmareq->dmar_object.dmao_obj.virt_obj.v_priv; 8957c478bd9Sstevel@tonic-gate npages = iommu_btopr(OBJSIZE + offset); 8967c478bd9Sstevel@tonic-gate 897ffadc26eSmike_s DPRINTF(IOMMU_DMAMAP_DEBUG, ("dma_map vaddr: %lx pages " 8987c478bd9Sstevel@tonic-gate "req addr %lx off %x OBJSIZE %x\n", 8997c478bd9Sstevel@tonic-gate npages, addr, offset, OBJSIZE)); 9007c478bd9Sstevel@tonic-gate 9017c478bd9Sstevel@tonic-gate /* We don't need the addr anymore if we have a shadow list */ 9027c478bd9Sstevel@tonic-gate if (pplist != NULL) 9037c478bd9Sstevel@tonic-gate addr = NULL; 9047c478bd9Sstevel@tonic-gate pp = NULL; 9057c478bd9Sstevel@tonic-gate break; 9067c478bd9Sstevel@tonic-gate 9077c478bd9Sstevel@tonic-gate case DMA_OTYP_PAGES: 9087c478bd9Sstevel@tonic-gate pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp; 9097c478bd9Sstevel@tonic-gate offset = dmareq->dmar_object.dmao_obj.pp_obj.pp_offset; 9107c478bd9Sstevel@tonic-gate npages = iommu_btopr(OBJSIZE + offset); 9117c478bd9Sstevel@tonic-gate break; 9127c478bd9Sstevel@tonic-gate 9137c478bd9Sstevel@tonic-gate case DMA_OTYP_PADDR: 9147c478bd9Sstevel@tonic-gate default: 9157c478bd9Sstevel@tonic-gate /* 9167c478bd9Sstevel@tonic-gate * Not a supported type for this implementation 9177c478bd9Sstevel@tonic-gate */ 9187c478bd9Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 9197c478bd9Sstevel@tonic-gate goto bad; 9207c478bd9Sstevel@tonic-gate } 9217c478bd9Sstevel@tonic-gate 9227c478bd9Sstevel@tonic-gate /* Get our soft state once we know we're mapping an object. */ 9237c478bd9Sstevel@tonic-gate softsp = mppriv->softsp; 9247c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 9257c478bd9Sstevel@tonic-gate 9267c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_PARTIAL) { 9277c478bd9Sstevel@tonic-gate if (size != OBJSIZE) { 9287c478bd9Sstevel@tonic-gate /* 9297c478bd9Sstevel@tonic-gate * If the request is for partial mapping arrangement, 9307c478bd9Sstevel@tonic-gate * the device has to be able to address at least the 9317c478bd9Sstevel@tonic-gate * size of the window we are establishing. 9327c478bd9Sstevel@tonic-gate */ 9337c478bd9Sstevel@tonic-gate if (size < iommu_ptob(MIN_DVMA_WIN_SIZE)) { 9347c478bd9Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 9357c478bd9Sstevel@tonic-gate goto bad; 9367c478bd9Sstevel@tonic-gate } 9377c478bd9Sstevel@tonic-gate npages = iommu_btopr(size + offset); 9387c478bd9Sstevel@tonic-gate } 9397c478bd9Sstevel@tonic-gate /* 9407c478bd9Sstevel@tonic-gate * If the size requested is less than a moderate amt, 9417c478bd9Sstevel@tonic-gate * skip the partial mapping stuff- it's not worth the 9427c478bd9Sstevel@tonic-gate * effort. 9437c478bd9Sstevel@tonic-gate */ 9447c478bd9Sstevel@tonic-gate if (npages > MIN_DVMA_WIN_SIZE) { 9457c478bd9Sstevel@tonic-gate npages = MIN_DVMA_WIN_SIZE + iommu_btopr(offset); 9467c478bd9Sstevel@tonic-gate size = iommu_ptob(MIN_DVMA_WIN_SIZE); 9477c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMA_SETUP_DEBUG, ("dma_setup: SZ %x pg " 948ffadc26eSmike_s "%lx sz %x\n", OBJSIZE, npages, size)); 9497c478bd9Sstevel@tonic-gate if (pplist != NULL) { 9507c478bd9Sstevel@tonic-gate mp->dmai_minfo = (void *)pplist; 9517c478bd9Sstevel@tonic-gate mp->dmai_rflags |= DMP_SHADOW; 9527c478bd9Sstevel@tonic-gate } 9537c478bd9Sstevel@tonic-gate } else { 9547c478bd9Sstevel@tonic-gate mp->dmai_rflags ^= DDI_DMA_PARTIAL; 9557c478bd9Sstevel@tonic-gate } 9567c478bd9Sstevel@tonic-gate } else { 9577c478bd9Sstevel@tonic-gate if (npages >= iommu_btop(softsp->iommu_dvma_size) - 9587c478bd9Sstevel@tonic-gate MIN_DVMA_WIN_SIZE) { 9597c478bd9Sstevel@tonic-gate rval = DDI_DMA_TOOBIG; 9607c478bd9Sstevel@tonic-gate goto bad; 9617c478bd9Sstevel@tonic-gate } 9627c478bd9Sstevel@tonic-gate } 9637c478bd9Sstevel@tonic-gate 9647c478bd9Sstevel@tonic-gate /* 9657c478bd9Sstevel@tonic-gate * save dmareq-object, size and npages into mp 9667c478bd9Sstevel@tonic-gate */ 9677c478bd9Sstevel@tonic-gate mp->dmai_object = dmareq->dmar_object; 9687c478bd9Sstevel@tonic-gate mp->dmai_size = size; 9697c478bd9Sstevel@tonic-gate mp->dmai_ndvmapages = npages; 9707c478bd9Sstevel@tonic-gate 9717c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) { 972ffadc26eSmike_s ioaddr = (ioaddr_t)(uintptr_t)vmem_alloc(softsp->dvma_arena, 9737c478bd9Sstevel@tonic-gate iommu_ptob(npages), 9747c478bd9Sstevel@tonic-gate dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP); 9757c478bd9Sstevel@tonic-gate if (ioaddr == 0) { 9767c478bd9Sstevel@tonic-gate rval = DDI_DMA_NORESOURCES; 9777c478bd9Sstevel@tonic-gate goto bad; 9787c478bd9Sstevel@tonic-gate } 9797c478bd9Sstevel@tonic-gate 9807c478bd9Sstevel@tonic-gate /* 9817c478bd9Sstevel@tonic-gate * If we have a 1 page request and we're working with a page 9827c478bd9Sstevel@tonic-gate * list, we're going to speed load an IOMMU entry. 9837c478bd9Sstevel@tonic-gate */ 9847c478bd9Sstevel@tonic-gate if (npages == 1 && !addr) { 9857c478bd9Sstevel@tonic-gate uint64_t iotte_flag = IOTTE_VALID | IOTTE_CACHE | 9867c478bd9Sstevel@tonic-gate IOTTE_WRITE | IOTTE_STREAM; 9877c478bd9Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 9887c478bd9Sstevel@tonic-gate pfn_t pfn; 9897c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 9907c478bd9Sstevel@tonic-gate struct io_mem_list *iomemp; 9917c478bd9Sstevel@tonic-gate pfn_t *pfnp; 9927c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 9937c478bd9Sstevel@tonic-gate 9947c478bd9Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, 9957c478bd9Sstevel@tonic-gate softsp->soft_tsb_base_addr); 9967c478bd9Sstevel@tonic-gate 9977c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_CONSISTENT) { 9987c478bd9Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOSYNC; 9997c478bd9Sstevel@tonic-gate iotte_flag ^= IOTTE_STREAM; 10007c478bd9Sstevel@tonic-gate } else if (softsp->stream_buf_off) 10017c478bd9Sstevel@tonic-gate iotte_flag ^= IOTTE_STREAM; 10027c478bd9Sstevel@tonic-gate 10037c478bd9Sstevel@tonic-gate mp->dmai_rflags ^= DDI_DMA_PARTIAL; 10047c478bd9Sstevel@tonic-gate 10057c478bd9Sstevel@tonic-gate if (pp != NULL) 10067c478bd9Sstevel@tonic-gate pfn = pp->p_pagenum; 10077c478bd9Sstevel@tonic-gate else 10087c478bd9Sstevel@tonic-gate pfn = (*pplist)->p_pagenum; 10097c478bd9Sstevel@tonic-gate 10107c478bd9Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 10117c478bd9Sstevel@tonic-gate 10127c478bd9Sstevel@tonic-gate *iotte_ptr = 10137c478bd9Sstevel@tonic-gate ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag; 10147c478bd9Sstevel@tonic-gate 10157c478bd9Sstevel@tonic-gate mp->dmai_mapping = (ioaddr_t)(ioaddr + offset); 10167c478bd9Sstevel@tonic-gate mp->dmai_nwin = 0; 10177c478bd9Sstevel@tonic-gate if (cp != NULL) { 10187c478bd9Sstevel@tonic-gate cp->dmac_notused = 0; 10197c478bd9Sstevel@tonic-gate cp->dmac_address = (ioaddr_t)mp->dmai_mapping; 10207c478bd9Sstevel@tonic-gate cp->dmac_size = mp->dmai_size; 10217c478bd9Sstevel@tonic-gate cp->dmac_type = 0; 10227c478bd9Sstevel@tonic-gate *ccountp = 1; 10237c478bd9Sstevel@tonic-gate } 10247c478bd9Sstevel@tonic-gate 1025ffadc26eSmike_s DPRINTF(IOMMU_TTE, ("speed loading: TTE index %p " 1026ffadc26eSmike_s "pfn %lx tte flag %lx addr %lx ioaddr %x\n", 1027*8793b36bSNick Todd (void *)iotte_ptr, pfn, iotte_flag, addr, ioaddr)); 10287c478bd9Sstevel@tonic-gate 10297c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 10307c478bd9Sstevel@tonic-gate iomemp = kmem_alloc(sizeof (struct io_mem_list), 10317c478bd9Sstevel@tonic-gate KM_SLEEP); 10327c478bd9Sstevel@tonic-gate iomemp->rdip = mp->dmai_rdip; 10337c478bd9Sstevel@tonic-gate iomemp->ioaddr = ioaddr; 10347c478bd9Sstevel@tonic-gate iomemp->addr = addr; 10357c478bd9Sstevel@tonic-gate iomemp->npages = npages; 10367c478bd9Sstevel@tonic-gate pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * 10377c478bd9Sstevel@tonic-gate (npages + 1), KM_SLEEP); 10387c478bd9Sstevel@tonic-gate *pfnp = pfn; 10397c478bd9Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 10407c478bd9Sstevel@tonic-gate iomemp->next = softsp->iomem; 10417c478bd9Sstevel@tonic-gate softsp->iomem = iomemp; 10427c478bd9Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 10437c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 10447c478bd9Sstevel@tonic-gate 10457c478bd9Sstevel@tonic-gate return (DDI_DMA_MAPPED); 10467c478bd9Sstevel@tonic-gate } 10477c478bd9Sstevel@tonic-gate } else { 1048ffadc26eSmike_s ioaddr = (ioaddr_t)(uintptr_t)vmem_xalloc(softsp->dvma_arena, 10497c478bd9Sstevel@tonic-gate iommu_ptob(npages), 10507c478bd9Sstevel@tonic-gate MAX((uint_t)dma_attr->dma_attr_align, IOMMU_PAGESIZE), 0, 10517c478bd9Sstevel@tonic-gate (uint_t)dma_attr->dma_attr_seg + 1, 1052ffadc26eSmike_s (void *)(uintptr_t)(ioaddr_t)dma_attr->dma_attr_addr_lo, 1053ffadc26eSmike_s (void *)(uintptr_t) 1054ffadc26eSmike_s ((ioaddr_t)dma_attr->dma_attr_addr_hi + 1), 10557c478bd9Sstevel@tonic-gate dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP); 10567c478bd9Sstevel@tonic-gate } 10577c478bd9Sstevel@tonic-gate 10587c478bd9Sstevel@tonic-gate if (ioaddr == 0) { 10597c478bd9Sstevel@tonic-gate if (dmareq->dmar_fp == DDI_DMA_SLEEP) 10607c478bd9Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 10617c478bd9Sstevel@tonic-gate else 10627c478bd9Sstevel@tonic-gate rval = DDI_DMA_NORESOURCES; 10637c478bd9Sstevel@tonic-gate goto bad; 10647c478bd9Sstevel@tonic-gate } 10657c478bd9Sstevel@tonic-gate 10667c478bd9Sstevel@tonic-gate mp->dmai_mapping = ioaddr + offset; 10677c478bd9Sstevel@tonic-gate ASSERT(mp->dmai_mapping >= softsp->iommu_dvma_base); 10687c478bd9Sstevel@tonic-gate 10697c478bd9Sstevel@tonic-gate /* 10707c478bd9Sstevel@tonic-gate * At this point we have a range of virtual address allocated 10717c478bd9Sstevel@tonic-gate * with which we now have to map to the requested object. 10727c478bd9Sstevel@tonic-gate */ 10737c478bd9Sstevel@tonic-gate if (addr) { 10747c478bd9Sstevel@tonic-gate rval = iommu_create_vaddr_mappings(mp, 10757c478bd9Sstevel@tonic-gate addr & ~IOMMU_PAGEOFFSET); 10767c478bd9Sstevel@tonic-gate if (rval == DDI_DMA_NOMAPPING) 10777c478bd9Sstevel@tonic-gate goto bad_nomap; 10787c478bd9Sstevel@tonic-gate } else { 10797c478bd9Sstevel@tonic-gate rval = iommu_create_pp_mappings(mp, pp, pplist); 10807c478bd9Sstevel@tonic-gate if (rval == DDI_DMA_NOMAPPING) 10817c478bd9Sstevel@tonic-gate goto bad_nomap; 10827c478bd9Sstevel@tonic-gate } 10837c478bd9Sstevel@tonic-gate 10847c478bd9Sstevel@tonic-gate if (cp) { 10857c478bd9Sstevel@tonic-gate cp->dmac_notused = 0; 10867c478bd9Sstevel@tonic-gate cp->dmac_address = (ioaddr_t)mp->dmai_mapping; 10877c478bd9Sstevel@tonic-gate cp->dmac_size = mp->dmai_size; 10887c478bd9Sstevel@tonic-gate cp->dmac_type = 0; 10897c478bd9Sstevel@tonic-gate *ccountp = 1; 10907c478bd9Sstevel@tonic-gate } 10917c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_PARTIAL) { 10927c478bd9Sstevel@tonic-gate size = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset)); 10937c478bd9Sstevel@tonic-gate mp->dmai_nwin = 10947c478bd9Sstevel@tonic-gate (dmareq->dmar_object.dmao_size + (size - 1)) / size; 10957c478bd9Sstevel@tonic-gate return (DDI_DMA_PARTIAL_MAP); 10967c478bd9Sstevel@tonic-gate } else { 10977c478bd9Sstevel@tonic-gate mp->dmai_nwin = 0; 10987c478bd9Sstevel@tonic-gate return (DDI_DMA_MAPPED); 10997c478bd9Sstevel@tonic-gate } 11007c478bd9Sstevel@tonic-gate 11017c478bd9Sstevel@tonic-gate bad_nomap: 11027c478bd9Sstevel@tonic-gate /* 11037c478bd9Sstevel@tonic-gate * Could not create mmu mappings. 11047c478bd9Sstevel@tonic-gate */ 11057c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) { 1106ffadc26eSmike_s vmem_free(softsp->dvma_arena, (void *)(uintptr_t)ioaddr, 11077c478bd9Sstevel@tonic-gate iommu_ptob(npages)); 11087c478bd9Sstevel@tonic-gate } else { 1109ffadc26eSmike_s vmem_xfree(softsp->dvma_arena, (void *)(uintptr_t)ioaddr, 11107c478bd9Sstevel@tonic-gate iommu_ptob(npages)); 11117c478bd9Sstevel@tonic-gate } 11127c478bd9Sstevel@tonic-gate 11137c478bd9Sstevel@tonic-gate bad: 11147c478bd9Sstevel@tonic-gate if (rval == DDI_DMA_NORESOURCES && 11157c478bd9Sstevel@tonic-gate dmareq->dmar_fp != DDI_DMA_DONTWAIT) { 11167c478bd9Sstevel@tonic-gate ddi_set_callback(dmareq->dmar_fp, 11177c478bd9Sstevel@tonic-gate dmareq->dmar_arg, &softsp->dvma_call_list_id); 11187c478bd9Sstevel@tonic-gate } 11197c478bd9Sstevel@tonic-gate mp->dmai_inuse = 0; 11207c478bd9Sstevel@tonic-gate return (rval); 11217c478bd9Sstevel@tonic-gate } 11227c478bd9Sstevel@tonic-gate 11237c478bd9Sstevel@tonic-gate /* ARGSUSED */ 11247c478bd9Sstevel@tonic-gate int 11257c478bd9Sstevel@tonic-gate iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 11267c478bd9Sstevel@tonic-gate ddi_dma_handle_t handle) 11277c478bd9Sstevel@tonic-gate { 11287c478bd9Sstevel@tonic-gate ioaddr_t addr; 11297c478bd9Sstevel@tonic-gate uint_t npages; 11307c478bd9Sstevel@tonic-gate size_t size; 11317c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 11327c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 11337c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 11347c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 11357c478bd9Sstevel@tonic-gate 11367c478bd9Sstevel@tonic-gate addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET); 11377c478bd9Sstevel@tonic-gate npages = mp->dmai_ndvmapages; 11387c478bd9Sstevel@tonic-gate size = iommu_ptob(npages); 11397c478bd9Sstevel@tonic-gate 11407c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMA_UNBINDHDL_DEBUG, ("iommu_dma_unbindhdl: " 11417c478bd9Sstevel@tonic-gate "unbinding addr %x for %x pages\n", addr, mp->dmai_ndvmapages)); 11427c478bd9Sstevel@tonic-gate 11437c478bd9Sstevel@tonic-gate /* sync the entire object */ 11447c478bd9Sstevel@tonic-gate if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) { 11457c478bd9Sstevel@tonic-gate /* flush stream write buffers */ 11467c478bd9Sstevel@tonic-gate sync_stream_buf(softsp, addr, npages, (int *)&mppriv->sync_flag, 11477c478bd9Sstevel@tonic-gate mppriv->phys_sync_flag); 11487c478bd9Sstevel@tonic-gate } 11497c478bd9Sstevel@tonic-gate 11507c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 11517c478bd9Sstevel@tonic-gate /* 11527c478bd9Sstevel@tonic-gate * 'Free' the dma mappings. 11537c478bd9Sstevel@tonic-gate */ 11547c478bd9Sstevel@tonic-gate iommu_remove_mappings(mp); 11557c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 11567c478bd9Sstevel@tonic-gate 11577c478bd9Sstevel@tonic-gate ASSERT(npages > (uint_t)0); 11587c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) 1159ffadc26eSmike_s vmem_free(softsp->dvma_arena, (void *)(uintptr_t)addr, size); 11607c478bd9Sstevel@tonic-gate else 1161ffadc26eSmike_s vmem_xfree(softsp->dvma_arena, (void *)(uintptr_t)addr, size); 11627c478bd9Sstevel@tonic-gate 11637c478bd9Sstevel@tonic-gate mp->dmai_ndvmapages = 0; 11647c478bd9Sstevel@tonic-gate mp->dmai_inuse = 0; 11657c478bd9Sstevel@tonic-gate mp->dmai_minfo = NULL; 11667c478bd9Sstevel@tonic-gate 11677c478bd9Sstevel@tonic-gate if (softsp->dvma_call_list_id != 0) 11687c478bd9Sstevel@tonic-gate ddi_run_callback(&softsp->dvma_call_list_id); 11697c478bd9Sstevel@tonic-gate 11707c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 11717c478bd9Sstevel@tonic-gate } 11727c478bd9Sstevel@tonic-gate 11737c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 11747c478bd9Sstevel@tonic-gate int 11757c478bd9Sstevel@tonic-gate iommu_dma_flush(dev_info_t *dip, dev_info_t *rdip, 11767c478bd9Sstevel@tonic-gate ddi_dma_handle_t handle, off_t off, size_t len, 11777c478bd9Sstevel@tonic-gate uint_t cache_flags) 11787c478bd9Sstevel@tonic-gate { 11797c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 11807c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 11817c478bd9Sstevel@tonic-gate 11827c478bd9Sstevel@tonic-gate if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) { 11837c478bd9Sstevel@tonic-gate sync_stream_buf(mppriv->softsp, mp->dmai_mapping, 11847c478bd9Sstevel@tonic-gate mp->dmai_ndvmapages, (int *)&mppriv->sync_flag, 11857c478bd9Sstevel@tonic-gate mppriv->phys_sync_flag); 11867c478bd9Sstevel@tonic-gate } 11877c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 11887c478bd9Sstevel@tonic-gate } 11897c478bd9Sstevel@tonic-gate 11907c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 11917c478bd9Sstevel@tonic-gate int 11927c478bd9Sstevel@tonic-gate iommu_dma_win(dev_info_t *dip, dev_info_t *rdip, 11937c478bd9Sstevel@tonic-gate ddi_dma_handle_t handle, uint_t win, off_t *offp, 11947c478bd9Sstevel@tonic-gate size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 11957c478bd9Sstevel@tonic-gate { 11967c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 11977c478bd9Sstevel@tonic-gate off_t offset; 11987c478bd9Sstevel@tonic-gate uint_t winsize; 11997c478bd9Sstevel@tonic-gate uint_t newoff; 12007c478bd9Sstevel@tonic-gate int rval; 12017c478bd9Sstevel@tonic-gate 12027c478bd9Sstevel@tonic-gate offset = mp->dmai_mapping & IOMMU_PAGEOFFSET; 12037c478bd9Sstevel@tonic-gate winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset)); 12047c478bd9Sstevel@tonic-gate 12057c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMA_WIN_DEBUG, ("getwin win %d winsize %x\n", win, 12067c478bd9Sstevel@tonic-gate winsize)); 12077c478bd9Sstevel@tonic-gate 12087c478bd9Sstevel@tonic-gate /* 12097c478bd9Sstevel@tonic-gate * win is in the range [0 .. dmai_nwin-1] 12107c478bd9Sstevel@tonic-gate */ 12117c478bd9Sstevel@tonic-gate if (win >= mp->dmai_nwin) 12127c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 12137c478bd9Sstevel@tonic-gate 12147c478bd9Sstevel@tonic-gate newoff = win * winsize; 12157c478bd9Sstevel@tonic-gate if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer) 12167c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 12177c478bd9Sstevel@tonic-gate 12187c478bd9Sstevel@tonic-gate ASSERT(cookiep); 12197c478bd9Sstevel@tonic-gate cookiep->dmac_notused = 0; 12207c478bd9Sstevel@tonic-gate cookiep->dmac_type = 0; 12217c478bd9Sstevel@tonic-gate cookiep->dmac_address = (ioaddr_t)mp->dmai_mapping; 12227c478bd9Sstevel@tonic-gate cookiep->dmac_size = mp->dmai_size; 12237c478bd9Sstevel@tonic-gate *ccountp = 1; 12247c478bd9Sstevel@tonic-gate *offp = (off_t)newoff; 12257c478bd9Sstevel@tonic-gate *lenp = (uint_t)winsize; 12267c478bd9Sstevel@tonic-gate 12277c478bd9Sstevel@tonic-gate if (newoff == mp->dmai_offset) { 12287c478bd9Sstevel@tonic-gate /* 12297c478bd9Sstevel@tonic-gate * Nothing to do... 12307c478bd9Sstevel@tonic-gate */ 12317c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 12327c478bd9Sstevel@tonic-gate } 12337c478bd9Sstevel@tonic-gate 12347c478bd9Sstevel@tonic-gate if ((rval = iommu_map_window(mp, newoff, winsize)) != DDI_SUCCESS) 12357c478bd9Sstevel@tonic-gate return (rval); 12367c478bd9Sstevel@tonic-gate 12377c478bd9Sstevel@tonic-gate /* 12387c478bd9Sstevel@tonic-gate * Set this again in case iommu_map_window() has changed it 12397c478bd9Sstevel@tonic-gate */ 12407c478bd9Sstevel@tonic-gate cookiep->dmac_size = mp->dmai_size; 12417c478bd9Sstevel@tonic-gate 12427c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 12437c478bd9Sstevel@tonic-gate } 12447c478bd9Sstevel@tonic-gate 12457c478bd9Sstevel@tonic-gate static int 12467c478bd9Sstevel@tonic-gate iommu_map_window(ddi_dma_impl_t *mp, off_t newoff, size_t winsize) 12477c478bd9Sstevel@tonic-gate { 12487c478bd9Sstevel@tonic-gate uintptr_t addr = 0; 12497c478bd9Sstevel@tonic-gate page_t *pp; 12507c478bd9Sstevel@tonic-gate uint_t flags; 12517c478bd9Sstevel@tonic-gate struct page **pplist = NULL; 12527c478bd9Sstevel@tonic-gate 12537c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 12547c478bd9Sstevel@tonic-gate /* Free mappings for current window */ 12557c478bd9Sstevel@tonic-gate iommu_remove_mappings(mp); 12567c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 12577c478bd9Sstevel@tonic-gate 12587c478bd9Sstevel@tonic-gate mp->dmai_offset = newoff; 12597c478bd9Sstevel@tonic-gate mp->dmai_size = mp->dmai_object.dmao_size - newoff; 12607c478bd9Sstevel@tonic-gate mp->dmai_size = MIN(mp->dmai_size, winsize); 12617c478bd9Sstevel@tonic-gate 12627c478bd9Sstevel@tonic-gate if (mp->dmai_object.dmao_type == DMA_OTYP_VADDR || 12637c478bd9Sstevel@tonic-gate mp->dmai_object.dmao_type == DMA_OTYP_BUFVADDR) { 12647c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DMP_SHADOW) { 12657c478bd9Sstevel@tonic-gate pplist = (struct page **)mp->dmai_minfo; 12667c478bd9Sstevel@tonic-gate ASSERT(pplist != NULL); 12677c478bd9Sstevel@tonic-gate pplist = pplist + (newoff >> MMU_PAGESHIFT); 12687c478bd9Sstevel@tonic-gate } else { 12697c478bd9Sstevel@tonic-gate addr = (uintptr_t) 12707c478bd9Sstevel@tonic-gate mp->dmai_object.dmao_obj.virt_obj.v_addr; 12717c478bd9Sstevel@tonic-gate addr = (addr + newoff) & ~IOMMU_PAGEOFFSET; 12727c478bd9Sstevel@tonic-gate } 12737c478bd9Sstevel@tonic-gate pp = NULL; 12747c478bd9Sstevel@tonic-gate } else { 12757c478bd9Sstevel@tonic-gate pp = mp->dmai_object.dmao_obj.pp_obj.pp_pp; 12767c478bd9Sstevel@tonic-gate flags = 0; 12777c478bd9Sstevel@tonic-gate while (flags < newoff) { 12787c478bd9Sstevel@tonic-gate pp = pp->p_next; 12797c478bd9Sstevel@tonic-gate flags += MMU_PAGESIZE; 12807c478bd9Sstevel@tonic-gate } 12817c478bd9Sstevel@tonic-gate } 12827c478bd9Sstevel@tonic-gate 12837c478bd9Sstevel@tonic-gate /* Set up mappings for next window */ 12847c478bd9Sstevel@tonic-gate if (addr) { 12857c478bd9Sstevel@tonic-gate if (iommu_create_vaddr_mappings(mp, addr) < 0) 12867c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 12877c478bd9Sstevel@tonic-gate } else { 12887c478bd9Sstevel@tonic-gate if (iommu_create_pp_mappings(mp, pp, pplist) < 0) 12897c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 12907c478bd9Sstevel@tonic-gate } 12917c478bd9Sstevel@tonic-gate 12927c478bd9Sstevel@tonic-gate /* 12937c478bd9Sstevel@tonic-gate * also invalidate read stream buffer 12947c478bd9Sstevel@tonic-gate */ 12957c478bd9Sstevel@tonic-gate if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) { 12967c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 12977c478bd9Sstevel@tonic-gate 12987c478bd9Sstevel@tonic-gate sync_stream_buf(mppriv->softsp, mp->dmai_mapping, 12997c478bd9Sstevel@tonic-gate mp->dmai_ndvmapages, (int *)&mppriv->sync_flag, 13007c478bd9Sstevel@tonic-gate mppriv->phys_sync_flag); 13017c478bd9Sstevel@tonic-gate } 13027c478bd9Sstevel@tonic-gate 13037c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 13047c478bd9Sstevel@tonic-gate 13057c478bd9Sstevel@tonic-gate } 13067c478bd9Sstevel@tonic-gate 13077c478bd9Sstevel@tonic-gate int 13087c478bd9Sstevel@tonic-gate iommu_dma_map(dev_info_t *dip, dev_info_t *rdip, 13097c478bd9Sstevel@tonic-gate struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep) 13107c478bd9Sstevel@tonic-gate { 13117c478bd9Sstevel@tonic-gate ddi_dma_lim_t *dma_lim = dmareq->dmar_limits; 13127c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp; 13137c478bd9Sstevel@tonic-gate ddi_dma_attr_t *dma_attr; 13147c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv; 13157c478bd9Sstevel@tonic-gate ioaddr_t addrlow, addrhigh; 13167c478bd9Sstevel@tonic-gate ioaddr_t segalign; 13177c478bd9Sstevel@tonic-gate int rval; 13187c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = 13197c478bd9Sstevel@tonic-gate (struct sbus_soft_state *)ddi_get_soft_state(sbusp, 13207c478bd9Sstevel@tonic-gate ddi_get_instance(dip)); 13217c478bd9Sstevel@tonic-gate 13227c478bd9Sstevel@tonic-gate addrlow = dma_lim->dlim_addr_lo; 13237c478bd9Sstevel@tonic-gate addrhigh = dma_lim->dlim_addr_hi; 13247c478bd9Sstevel@tonic-gate if ((addrhigh <= addrlow) || 13257c478bd9Sstevel@tonic-gate (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) { 13267c478bd9Sstevel@tonic-gate return (DDI_DMA_NOMAPPING); 13277c478bd9Sstevel@tonic-gate } 13287c478bd9Sstevel@tonic-gate 13297c478bd9Sstevel@tonic-gate /* 13307c478bd9Sstevel@tonic-gate * Setup DMA burstsizes and min-xfer counts. 13317c478bd9Sstevel@tonic-gate */ 13327c478bd9Sstevel@tonic-gate (void) iommu_dma_lim_setup(dip, rdip, softsp, &dma_lim->dlim_burstsizes, 13337c478bd9Sstevel@tonic-gate (uint_t)dma_lim->dlim_burstsizes, &dma_lim->dlim_minxfer, 13347c478bd9Sstevel@tonic-gate dmareq->dmar_flags); 13357c478bd9Sstevel@tonic-gate 13367c478bd9Sstevel@tonic-gate if (dma_lim->dlim_burstsizes == 0) 13377c478bd9Sstevel@tonic-gate return (DDI_DMA_NOMAPPING); 13387c478bd9Sstevel@tonic-gate /* 13397c478bd9Sstevel@tonic-gate * If not an advisory call, get a DMA handle 13407c478bd9Sstevel@tonic-gate */ 13417c478bd9Sstevel@tonic-gate if (!handlep) { 13427c478bd9Sstevel@tonic-gate return (DDI_DMA_MAPOK); 13437c478bd9Sstevel@tonic-gate } 13447c478bd9Sstevel@tonic-gate 13457c478bd9Sstevel@tonic-gate mppriv = kmem_zalloc(sizeof (*mppriv), 13467c478bd9Sstevel@tonic-gate (dmareq->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP); 13477c478bd9Sstevel@tonic-gate if (mppriv == NULL) { 13487c478bd9Sstevel@tonic-gate if (dmareq->dmar_fp != DDI_DMA_DONTWAIT) { 13497c478bd9Sstevel@tonic-gate ddi_set_callback(dmareq->dmar_fp, 13507c478bd9Sstevel@tonic-gate dmareq->dmar_arg, &softsp->dvma_call_list_id); 13517c478bd9Sstevel@tonic-gate } 13527c478bd9Sstevel@tonic-gate return (DDI_DMA_NORESOURCES); 13537c478bd9Sstevel@tonic-gate } 13547c478bd9Sstevel@tonic-gate mp = (ddi_dma_impl_t *)mppriv; 13557c478bd9Sstevel@tonic-gate mp->dmai_rdip = rdip; 13567c478bd9Sstevel@tonic-gate mp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS; 13577c478bd9Sstevel@tonic-gate mp->dmai_minxfer = dma_lim->dlim_minxfer; 13587c478bd9Sstevel@tonic-gate mp->dmai_burstsizes = dma_lim->dlim_burstsizes; 13597c478bd9Sstevel@tonic-gate mp->dmai_offset = 0; 13607c478bd9Sstevel@tonic-gate mp->dmai_ndvmapages = 0; 13617c478bd9Sstevel@tonic-gate mp->dmai_minfo = 0; 13627c478bd9Sstevel@tonic-gate mp->dmai_inuse = 0; 13637c478bd9Sstevel@tonic-gate segalign = dma_lim->dlim_cntr_max; 13647c478bd9Sstevel@tonic-gate /* See if the DMA engine has any limit restrictions. */ 13657c478bd9Sstevel@tonic-gate if (segalign == UINT32_MAX && addrhigh == UINT32_MAX && 13667c478bd9Sstevel@tonic-gate addrlow == 0) { 13677c478bd9Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOLIMIT; 13687c478bd9Sstevel@tonic-gate } 13697c478bd9Sstevel@tonic-gate mppriv->softsp = softsp; 13707c478bd9Sstevel@tonic-gate mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag); 13717c478bd9Sstevel@tonic-gate dma_attr = &mp->dmai_attr; 13727c478bd9Sstevel@tonic-gate dma_attr->dma_attr_align = 1; 13737c478bd9Sstevel@tonic-gate dma_attr->dma_attr_addr_lo = addrlow; 13747c478bd9Sstevel@tonic-gate dma_attr->dma_attr_addr_hi = addrhigh; 13757c478bd9Sstevel@tonic-gate dma_attr->dma_attr_seg = segalign; 13767c478bd9Sstevel@tonic-gate dma_attr->dma_attr_burstsizes = dma_lim->dlim_burstsizes; 13777c478bd9Sstevel@tonic-gate rval = iommu_dma_bindhdl(dip, rdip, (ddi_dma_handle_t)mp, 13787c478bd9Sstevel@tonic-gate dmareq, NULL, NULL); 13797c478bd9Sstevel@tonic-gate if (rval && (rval != DDI_DMA_PARTIAL_MAP)) { 13807c478bd9Sstevel@tonic-gate kmem_free(mppriv, sizeof (*mppriv)); 13817c478bd9Sstevel@tonic-gate } else { 13827c478bd9Sstevel@tonic-gate *handlep = (ddi_dma_handle_t)mp; 13837c478bd9Sstevel@tonic-gate } 13847c478bd9Sstevel@tonic-gate return (rval); 13857c478bd9Sstevel@tonic-gate } 13867c478bd9Sstevel@tonic-gate 13877c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 13887c478bd9Sstevel@tonic-gate int 13897c478bd9Sstevel@tonic-gate iommu_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 13907c478bd9Sstevel@tonic-gate ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 13917c478bd9Sstevel@tonic-gate off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags) 13927c478bd9Sstevel@tonic-gate { 13937c478bd9Sstevel@tonic-gate ioaddr_t addr; 13947c478bd9Sstevel@tonic-gate uint_t offset; 13957c478bd9Sstevel@tonic-gate pgcnt_t npages; 13967c478bd9Sstevel@tonic-gate size_t size; 13977c478bd9Sstevel@tonic-gate ddi_dma_cookie_t *cp; 13987c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 13997c478bd9Sstevel@tonic-gate 1400*8793b36bSNick Todd DPRINTF(IOMMU_DMAMCTL_DEBUG, ("dma_mctl: handle %p ", (void *)mp)); 14017c478bd9Sstevel@tonic-gate switch (request) { 14027c478bd9Sstevel@tonic-gate case DDI_DMA_FREE: 14037c478bd9Sstevel@tonic-gate { 14047c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 14057c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 14067c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 14077c478bd9Sstevel@tonic-gate 14087c478bd9Sstevel@tonic-gate /* 14097c478bd9Sstevel@tonic-gate * 'Free' the dma mappings. 14107c478bd9Sstevel@tonic-gate */ 14117c478bd9Sstevel@tonic-gate addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET); 14127c478bd9Sstevel@tonic-gate npages = mp->dmai_ndvmapages; 14137c478bd9Sstevel@tonic-gate size = iommu_ptob(npages); 14147c478bd9Sstevel@tonic-gate 14157c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_DMA_FREE_DEBUG, ("iommu_dma_mctl dmafree:" 14167c478bd9Sstevel@tonic-gate "freeing vaddr %x for %x pages.\n", addr, 14177c478bd9Sstevel@tonic-gate mp->dmai_ndvmapages)); 14187c478bd9Sstevel@tonic-gate /* sync the entire object */ 14197c478bd9Sstevel@tonic-gate if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) { 14207c478bd9Sstevel@tonic-gate /* flush stream write buffers */ 14217c478bd9Sstevel@tonic-gate sync_stream_buf(softsp, addr, npages, 14227c478bd9Sstevel@tonic-gate (int *)&mppriv->sync_flag, mppriv->phys_sync_flag); 14237c478bd9Sstevel@tonic-gate } 14247c478bd9Sstevel@tonic-gate 14257c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 14267c478bd9Sstevel@tonic-gate iommu_remove_mappings(mp); 14277c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 14287c478bd9Sstevel@tonic-gate 14297c478bd9Sstevel@tonic-gate ASSERT(npages > (uint_t)0); 14307c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) 1431ffadc26eSmike_s vmem_free(softsp->dvma_arena, 1432ffadc26eSmike_s (void *)(uintptr_t)addr, size); 14337c478bd9Sstevel@tonic-gate else 1434ffadc26eSmike_s vmem_xfree(softsp->dvma_arena, 1435ffadc26eSmike_s (void *)(uintptr_t)addr, size); 14367c478bd9Sstevel@tonic-gate 14377c478bd9Sstevel@tonic-gate kmem_free(mppriv, sizeof (*mppriv)); 14387c478bd9Sstevel@tonic-gate 14397c478bd9Sstevel@tonic-gate if (softsp->dvma_call_list_id != 0) 14407c478bd9Sstevel@tonic-gate ddi_run_callback(&softsp->dvma_call_list_id); 14417c478bd9Sstevel@tonic-gate 14427c478bd9Sstevel@tonic-gate break; 14437c478bd9Sstevel@tonic-gate } 14447c478bd9Sstevel@tonic-gate 14457c478bd9Sstevel@tonic-gate case DDI_DMA_SET_SBUS64: 14467c478bd9Sstevel@tonic-gate { 14477c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 14487c478bd9Sstevel@tonic-gate 14497c478bd9Sstevel@tonic-gate return (iommu_dma_lim_setup(dip, rdip, mppriv->softsp, 14507c478bd9Sstevel@tonic-gate &mp->dmai_burstsizes, (uint_t)*lenp, &mp->dmai_minxfer, 14517c478bd9Sstevel@tonic-gate DDI_DMA_SBUS_64BIT)); 14527c478bd9Sstevel@tonic-gate } 14537c478bd9Sstevel@tonic-gate 14547c478bd9Sstevel@tonic-gate case DDI_DMA_HTOC: 14557c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_HTOC_DEBUG, ("htoc off %lx mapping %lx " 1456ffadc26eSmike_s "size %x\n", *offp, mp->dmai_mapping, 14577c478bd9Sstevel@tonic-gate mp->dmai_size)); 14587c478bd9Sstevel@tonic-gate 14597c478bd9Sstevel@tonic-gate if ((uint_t)(*offp) >= mp->dmai_size) 14607c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14617c478bd9Sstevel@tonic-gate 14627c478bd9Sstevel@tonic-gate cp = (ddi_dma_cookie_t *)objp; 14637c478bd9Sstevel@tonic-gate cp->dmac_notused = 0; 14647c478bd9Sstevel@tonic-gate cp->dmac_address = (mp->dmai_mapping + (uint_t)(*offp)); 14657c478bd9Sstevel@tonic-gate cp->dmac_size = 14667c478bd9Sstevel@tonic-gate mp->dmai_mapping + mp->dmai_size - cp->dmac_address; 14677c478bd9Sstevel@tonic-gate cp->dmac_type = 0; 14687c478bd9Sstevel@tonic-gate 14697c478bd9Sstevel@tonic-gate break; 14707c478bd9Sstevel@tonic-gate 14717c478bd9Sstevel@tonic-gate case DDI_DMA_KVADDR: 14727c478bd9Sstevel@tonic-gate /* 14737c478bd9Sstevel@tonic-gate * If a physical address mapping has percolated this high, 14747c478bd9Sstevel@tonic-gate * that is an error (maybe?). 14757c478bd9Sstevel@tonic-gate */ 14767c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DMP_PHYSADDR) { 14777c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_KVADDR_DEBUG, ("kvaddr of phys " 14787c478bd9Sstevel@tonic-gate "mapping\n")); 14797c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14807c478bd9Sstevel@tonic-gate } 14817c478bd9Sstevel@tonic-gate 14827c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14837c478bd9Sstevel@tonic-gate 14847c478bd9Sstevel@tonic-gate case DDI_DMA_NEXTWIN: 14857c478bd9Sstevel@tonic-gate { 14867c478bd9Sstevel@tonic-gate ddi_dma_win_t *owin, *nwin; 14877c478bd9Sstevel@tonic-gate uint_t winsize, newoff; 14887c478bd9Sstevel@tonic-gate int rval; 14897c478bd9Sstevel@tonic-gate 14907c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, ("nextwin\n")); 14917c478bd9Sstevel@tonic-gate 14927c478bd9Sstevel@tonic-gate mp = (ddi_dma_impl_t *)handle; 14937c478bd9Sstevel@tonic-gate owin = (ddi_dma_win_t *)offp; 14947c478bd9Sstevel@tonic-gate nwin = (ddi_dma_win_t *)objp; 14957c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_PARTIAL) { 14967c478bd9Sstevel@tonic-gate if (*owin == NULL) { 14977c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, 14987c478bd9Sstevel@tonic-gate ("nextwin: win == NULL\n")); 14997c478bd9Sstevel@tonic-gate mp->dmai_offset = 0; 15007c478bd9Sstevel@tonic-gate *nwin = (ddi_dma_win_t)mp; 15017c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 15027c478bd9Sstevel@tonic-gate } 15037c478bd9Sstevel@tonic-gate 15047c478bd9Sstevel@tonic-gate offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET); 15057c478bd9Sstevel@tonic-gate winsize = iommu_ptob(mp->dmai_ndvmapages - 15067c478bd9Sstevel@tonic-gate iommu_btopr(offset)); 15077c478bd9Sstevel@tonic-gate 15087c478bd9Sstevel@tonic-gate newoff = (uint_t)(mp->dmai_offset + winsize); 15097c478bd9Sstevel@tonic-gate if (newoff > mp->dmai_object.dmao_size - 15107c478bd9Sstevel@tonic-gate mp->dmai_minxfer) 15117c478bd9Sstevel@tonic-gate return (DDI_DMA_DONE); 15127c478bd9Sstevel@tonic-gate 15137c478bd9Sstevel@tonic-gate if ((rval = iommu_map_window(mp, newoff, winsize)) 15147c478bd9Sstevel@tonic-gate != DDI_SUCCESS) 15157c478bd9Sstevel@tonic-gate return (rval); 15167c478bd9Sstevel@tonic-gate } else { 15177c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, ("nextwin: no " 15187c478bd9Sstevel@tonic-gate "partial mapping\n")); 15197c478bd9Sstevel@tonic-gate if (*owin != NULL) 15207c478bd9Sstevel@tonic-gate return (DDI_DMA_DONE); 15217c478bd9Sstevel@tonic-gate mp->dmai_offset = 0; 15227c478bd9Sstevel@tonic-gate *nwin = (ddi_dma_win_t)mp; 15237c478bd9Sstevel@tonic-gate } 15247c478bd9Sstevel@tonic-gate break; 15257c478bd9Sstevel@tonic-gate } 15267c478bd9Sstevel@tonic-gate 15277c478bd9Sstevel@tonic-gate case DDI_DMA_NEXTSEG: 15287c478bd9Sstevel@tonic-gate { 15297c478bd9Sstevel@tonic-gate ddi_dma_seg_t *oseg, *nseg; 15307c478bd9Sstevel@tonic-gate 15317c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_NEXTSEG_DEBUG, ("nextseg:\n")); 15327c478bd9Sstevel@tonic-gate 15337c478bd9Sstevel@tonic-gate oseg = (ddi_dma_seg_t *)lenp; 15347c478bd9Sstevel@tonic-gate if (*oseg != NULL) 15357c478bd9Sstevel@tonic-gate return (DDI_DMA_DONE); 15367c478bd9Sstevel@tonic-gate nseg = (ddi_dma_seg_t *)objp; 15377c478bd9Sstevel@tonic-gate *nseg = *((ddi_dma_seg_t *)offp); 15387c478bd9Sstevel@tonic-gate break; 15397c478bd9Sstevel@tonic-gate } 15407c478bd9Sstevel@tonic-gate 15417c478bd9Sstevel@tonic-gate case DDI_DMA_SEGTOC: 15427c478bd9Sstevel@tonic-gate { 15437c478bd9Sstevel@tonic-gate ddi_dma_seg_impl_t *seg; 15447c478bd9Sstevel@tonic-gate 15457c478bd9Sstevel@tonic-gate seg = (ddi_dma_seg_impl_t *)handle; 15467c478bd9Sstevel@tonic-gate cp = (ddi_dma_cookie_t *)objp; 15477c478bd9Sstevel@tonic-gate cp->dmac_notused = 0; 15487c478bd9Sstevel@tonic-gate cp->dmac_address = (ioaddr_t)seg->dmai_mapping; 15497c478bd9Sstevel@tonic-gate cp->dmac_size = *lenp = seg->dmai_size; 15507c478bd9Sstevel@tonic-gate cp->dmac_type = 0; 15517c478bd9Sstevel@tonic-gate *offp = seg->dmai_offset; 15527c478bd9Sstevel@tonic-gate break; 15537c478bd9Sstevel@tonic-gate } 15547c478bd9Sstevel@tonic-gate 15557c478bd9Sstevel@tonic-gate case DDI_DMA_MOVWIN: 15567c478bd9Sstevel@tonic-gate { 15577c478bd9Sstevel@tonic-gate uint_t winsize; 15587c478bd9Sstevel@tonic-gate uint_t newoff; 15597c478bd9Sstevel@tonic-gate int rval; 15607c478bd9Sstevel@tonic-gate 15617c478bd9Sstevel@tonic-gate offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET); 15627c478bd9Sstevel@tonic-gate winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset)); 15637c478bd9Sstevel@tonic-gate 1564ffadc26eSmike_s DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("movwin off %lx len %lx " 15657c478bd9Sstevel@tonic-gate "winsize %x\n", *offp, *lenp, winsize)); 15667c478bd9Sstevel@tonic-gate 15677c478bd9Sstevel@tonic-gate if ((mp->dmai_rflags & DDI_DMA_PARTIAL) == 0) 15687c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 15697c478bd9Sstevel@tonic-gate 15707c478bd9Sstevel@tonic-gate if (*lenp != (uint_t)-1 && *lenp != winsize) { 15717c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("bad length\n")); 15727c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 15737c478bd9Sstevel@tonic-gate } 15747c478bd9Sstevel@tonic-gate newoff = (uint_t)*offp; 15757c478bd9Sstevel@tonic-gate if (newoff & (winsize - 1)) { 15767c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("bad off\n")); 15777c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 15787c478bd9Sstevel@tonic-gate } 15797c478bd9Sstevel@tonic-gate 15807c478bd9Sstevel@tonic-gate if (newoff == mp->dmai_offset) { 15817c478bd9Sstevel@tonic-gate /* 15827c478bd9Sstevel@tonic-gate * Nothing to do... 15837c478bd9Sstevel@tonic-gate */ 15847c478bd9Sstevel@tonic-gate break; 15857c478bd9Sstevel@tonic-gate } 15867c478bd9Sstevel@tonic-gate 15877c478bd9Sstevel@tonic-gate /* 15887c478bd9Sstevel@tonic-gate * Check out new address... 15897c478bd9Sstevel@tonic-gate */ 15907c478bd9Sstevel@tonic-gate if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer) { 15917c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("newoff out of " 15927c478bd9Sstevel@tonic-gate "range\n")); 15937c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 15947c478bd9Sstevel@tonic-gate } 15957c478bd9Sstevel@tonic-gate 15967c478bd9Sstevel@tonic-gate rval = iommu_map_window(mp, newoff, winsize); 15977c478bd9Sstevel@tonic-gate if (rval != DDI_SUCCESS) 15987c478bd9Sstevel@tonic-gate return (rval); 15997c478bd9Sstevel@tonic-gate 16007c478bd9Sstevel@tonic-gate if ((cp = (ddi_dma_cookie_t *)objp) != 0) { 16017c478bd9Sstevel@tonic-gate cp->dmac_notused = 0; 16027c478bd9Sstevel@tonic-gate cp->dmac_address = (ioaddr_t)mp->dmai_mapping; 16037c478bd9Sstevel@tonic-gate cp->dmac_size = mp->dmai_size; 16047c478bd9Sstevel@tonic-gate cp->dmac_type = 0; 16057c478bd9Sstevel@tonic-gate } 16067c478bd9Sstevel@tonic-gate *offp = (off_t)newoff; 16077c478bd9Sstevel@tonic-gate *lenp = (uint_t)winsize; 16087c478bd9Sstevel@tonic-gate break; 16097c478bd9Sstevel@tonic-gate } 16107c478bd9Sstevel@tonic-gate 16117c478bd9Sstevel@tonic-gate case DDI_DMA_REPWIN: 16127c478bd9Sstevel@tonic-gate if ((mp->dmai_rflags & DDI_DMA_PARTIAL) == 0) { 16137c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_REPWIN_DEBUG, ("repwin fail\n")); 16147c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 16157c478bd9Sstevel@tonic-gate } 16167c478bd9Sstevel@tonic-gate 16177c478bd9Sstevel@tonic-gate *offp = (off_t)mp->dmai_offset; 16187c478bd9Sstevel@tonic-gate 16197c478bd9Sstevel@tonic-gate addr = mp->dmai_ndvmapages - 16207c478bd9Sstevel@tonic-gate iommu_btopr(mp->dmai_mapping & IOMMU_PAGEOFFSET); 16217c478bd9Sstevel@tonic-gate 16227c478bd9Sstevel@tonic-gate *lenp = (uint_t)iommu_ptob(addr); 16237c478bd9Sstevel@tonic-gate 1624ffadc26eSmike_s DPRINTF(IOMMU_DMAMCTL_REPWIN_DEBUG, ("repwin off %lx len %x\n", 16257c478bd9Sstevel@tonic-gate mp->dmai_offset, mp->dmai_size)); 16267c478bd9Sstevel@tonic-gate 16277c478bd9Sstevel@tonic-gate break; 16287c478bd9Sstevel@tonic-gate 16297c478bd9Sstevel@tonic-gate case DDI_DMA_GETERR: 16307c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_GETERR_DEBUG, 16317c478bd9Sstevel@tonic-gate ("iommu_dma_mctl: geterr\n")); 16327c478bd9Sstevel@tonic-gate 16337c478bd9Sstevel@tonic-gate break; 16347c478bd9Sstevel@tonic-gate 16357c478bd9Sstevel@tonic-gate case DDI_DMA_COFF: 16367c478bd9Sstevel@tonic-gate cp = (ddi_dma_cookie_t *)offp; 16377c478bd9Sstevel@tonic-gate addr = cp->dmac_address; 16387c478bd9Sstevel@tonic-gate 16397c478bd9Sstevel@tonic-gate if (addr < mp->dmai_mapping || 16407c478bd9Sstevel@tonic-gate addr >= mp->dmai_mapping + mp->dmai_size) 16417c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 16427c478bd9Sstevel@tonic-gate 16437c478bd9Sstevel@tonic-gate *objp = (caddr_t)(addr - mp->dmai_mapping); 16447c478bd9Sstevel@tonic-gate 1645ffadc26eSmike_s DPRINTF(IOMMU_DMAMCTL_COFF_DEBUG, ("coff off %lx mapping %lx " 16467c478bd9Sstevel@tonic-gate "size %x\n", (ulong_t)*objp, mp->dmai_mapping, 16477c478bd9Sstevel@tonic-gate mp->dmai_size)); 16487c478bd9Sstevel@tonic-gate 16497c478bd9Sstevel@tonic-gate break; 16507c478bd9Sstevel@tonic-gate 16517c478bd9Sstevel@tonic-gate case DDI_DMA_RESERVE: 16527c478bd9Sstevel@tonic-gate { 16537c478bd9Sstevel@tonic-gate struct ddi_dma_req *dmareq = (struct ddi_dma_req *)offp; 16547c478bd9Sstevel@tonic-gate ddi_dma_lim_t *dma_lim; 16557c478bd9Sstevel@tonic-gate ddi_dma_handle_t *handlep; 16567c478bd9Sstevel@tonic-gate uint_t np; 16577c478bd9Sstevel@tonic-gate ioaddr_t ioaddr; 16587c478bd9Sstevel@tonic-gate int i; 16597c478bd9Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma; 16607c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = 16617c478bd9Sstevel@tonic-gate (struct sbus_soft_state *)ddi_get_soft_state(sbusp, 16627c478bd9Sstevel@tonic-gate ddi_get_instance(dip)); 16637c478bd9Sstevel@tonic-gate 16647c478bd9Sstevel@tonic-gate /* Some simple sanity checks */ 16657c478bd9Sstevel@tonic-gate dma_lim = dmareq->dmar_limits; 16667c478bd9Sstevel@tonic-gate if (dma_lim->dlim_burstsizes == 0) { 16677c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 16687c478bd9Sstevel@tonic-gate ("Reserve: bad burstsizes\n")); 16697c478bd9Sstevel@tonic-gate return (DDI_DMA_BADLIMITS); 16707c478bd9Sstevel@tonic-gate } 16717c478bd9Sstevel@tonic-gate if ((AHI <= ALO) || (AHI < softsp->iommu_dvma_base)) { 16727c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 16737c478bd9Sstevel@tonic-gate ("Reserve: bad limits\n")); 16747c478bd9Sstevel@tonic-gate return (DDI_DMA_BADLIMITS); 16757c478bd9Sstevel@tonic-gate } 16767c478bd9Sstevel@tonic-gate 16777c478bd9Sstevel@tonic-gate np = dmareq->dmar_object.dmao_size; 16787c478bd9Sstevel@tonic-gate mutex_enter(&softsp->dma_pool_lock); 16797c478bd9Sstevel@tonic-gate if (np > softsp->dma_reserve) { 16807c478bd9Sstevel@tonic-gate mutex_exit(&softsp->dma_pool_lock); 16817c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 16827c478bd9Sstevel@tonic-gate ("Reserve: dma_reserve is exhausted\n")); 16837c478bd9Sstevel@tonic-gate return (DDI_DMA_NORESOURCES); 16847c478bd9Sstevel@tonic-gate } 16857c478bd9Sstevel@tonic-gate 16867c478bd9Sstevel@tonic-gate softsp->dma_reserve -= np; 16877c478bd9Sstevel@tonic-gate mutex_exit(&softsp->dma_pool_lock); 16887c478bd9Sstevel@tonic-gate mp = kmem_zalloc(sizeof (*mp), KM_SLEEP); 16897c478bd9Sstevel@tonic-gate mp->dmai_rflags = DMP_BYPASSNEXUS; 16907c478bd9Sstevel@tonic-gate mp->dmai_rdip = rdip; 16917c478bd9Sstevel@tonic-gate mp->dmai_minxfer = dma_lim->dlim_minxfer; 16927c478bd9Sstevel@tonic-gate mp->dmai_burstsizes = dma_lim->dlim_burstsizes; 16937c478bd9Sstevel@tonic-gate 1694ffadc26eSmike_s ioaddr = (ioaddr_t)(uintptr_t)vmem_xalloc(softsp->dvma_arena, 16957c478bd9Sstevel@tonic-gate iommu_ptob(np), IOMMU_PAGESIZE, 0, 1696ffadc26eSmike_s dma_lim->dlim_cntr_max + 1, 1697ffadc26eSmike_s (void *)(uintptr_t)ALO, (void *)(uintptr_t)(AHI + 1), 16987c478bd9Sstevel@tonic-gate dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP); 16997c478bd9Sstevel@tonic-gate 17007c478bd9Sstevel@tonic-gate if (ioaddr == 0) { 17017c478bd9Sstevel@tonic-gate mutex_enter(&softsp->dma_pool_lock); 17027c478bd9Sstevel@tonic-gate softsp->dma_reserve += np; 17037c478bd9Sstevel@tonic-gate mutex_exit(&softsp->dma_pool_lock); 17047c478bd9Sstevel@tonic-gate kmem_free(mp, sizeof (*mp)); 17057c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 17067c478bd9Sstevel@tonic-gate ("Reserve: No dvma resources available\n")); 17077c478bd9Sstevel@tonic-gate return (DDI_DMA_NOMAPPING); 17087c478bd9Sstevel@tonic-gate } 17097c478bd9Sstevel@tonic-gate 17107c478bd9Sstevel@tonic-gate /* create a per request structure */ 17117c478bd9Sstevel@tonic-gate iommu_fast_dvma = kmem_alloc(sizeof (struct fast_dvma), 17127c478bd9Sstevel@tonic-gate KM_SLEEP); 17137c478bd9Sstevel@tonic-gate 17147c478bd9Sstevel@tonic-gate /* 17157c478bd9Sstevel@tonic-gate * We need to remember the size of the transfer so that 17167c478bd9Sstevel@tonic-gate * we can figure the virtual pages to sync when the transfer 17177c478bd9Sstevel@tonic-gate * is complete. 17187c478bd9Sstevel@tonic-gate */ 17197c478bd9Sstevel@tonic-gate iommu_fast_dvma->pagecnt = kmem_zalloc(np * 17207c478bd9Sstevel@tonic-gate sizeof (uint_t), KM_SLEEP); 17217c478bd9Sstevel@tonic-gate 17227c478bd9Sstevel@tonic-gate /* Allocate a streaming cache sync flag for each index */ 17237c478bd9Sstevel@tonic-gate iommu_fast_dvma->sync_flag = kmem_zalloc(np * 17247c478bd9Sstevel@tonic-gate sizeof (int), KM_SLEEP); 17257c478bd9Sstevel@tonic-gate 17267c478bd9Sstevel@tonic-gate /* Allocate a physical sync flag for each index */ 17277c478bd9Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag = 17287c478bd9Sstevel@tonic-gate kmem_zalloc(np * sizeof (uint64_t), KM_SLEEP); 17297c478bd9Sstevel@tonic-gate 17307c478bd9Sstevel@tonic-gate for (i = 0; i < np; i++) 17317c478bd9Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[i] = va_to_pa((caddr_t) 17327c478bd9Sstevel@tonic-gate &iommu_fast_dvma->sync_flag[i]); 17337c478bd9Sstevel@tonic-gate 17347c478bd9Sstevel@tonic-gate mp->dmai_mapping = ioaddr; 17357c478bd9Sstevel@tonic-gate mp->dmai_ndvmapages = np; 17367c478bd9Sstevel@tonic-gate iommu_fast_dvma->ops = &iommu_dvma_ops; 17377c478bd9Sstevel@tonic-gate iommu_fast_dvma->softsp = (caddr_t)softsp; 17387c478bd9Sstevel@tonic-gate mp->dmai_nexus_private = (caddr_t)iommu_fast_dvma; 17397c478bd9Sstevel@tonic-gate handlep = (ddi_dma_handle_t *)objp; 17407c478bd9Sstevel@tonic-gate *handlep = (ddi_dma_handle_t)mp; 17417c478bd9Sstevel@tonic-gate 17427c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 17437c478bd9Sstevel@tonic-gate ("Reserve: mapping object %p base addr %lx size %x\n", 1744*8793b36bSNick Todd (void *)mp, mp->dmai_mapping, mp->dmai_ndvmapages)); 17457c478bd9Sstevel@tonic-gate 17467c478bd9Sstevel@tonic-gate break; 17477c478bd9Sstevel@tonic-gate } 17487c478bd9Sstevel@tonic-gate 17497c478bd9Sstevel@tonic-gate case DDI_DMA_RELEASE: 17507c478bd9Sstevel@tonic-gate { 17517c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 17527c478bd9Sstevel@tonic-gate uint_t np = npages = mp->dmai_ndvmapages; 17537c478bd9Sstevel@tonic-gate ioaddr_t ioaddr = mp->dmai_mapping; 17547c478bd9Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 17557c478bd9Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma = (struct fast_dvma *) 17567c478bd9Sstevel@tonic-gate mp->dmai_nexus_private; 17577c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 17587c478bd9Sstevel@tonic-gate iommu_fast_dvma->softsp; 17597c478bd9Sstevel@tonic-gate 17607c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 17617c478bd9Sstevel@tonic-gate 17627c478bd9Sstevel@tonic-gate /* Unload stale mappings and flush stale tlb's */ 17637c478bd9Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 17647c478bd9Sstevel@tonic-gate 17657c478bd9Sstevel@tonic-gate while (npages > (uint_t)0) { 17667c478bd9Sstevel@tonic-gate *iotte_ptr = (uint64_t)0; /* unload tte */ 17677c478bd9Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 17687c478bd9Sstevel@tonic-gate 17697c478bd9Sstevel@tonic-gate npages--; 17707c478bd9Sstevel@tonic-gate iotte_ptr++; 17717c478bd9Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 17727c478bd9Sstevel@tonic-gate } 17737c478bd9Sstevel@tonic-gate 17747c478bd9Sstevel@tonic-gate ioaddr = (ioaddr_t)mp->dmai_mapping; 17757c478bd9Sstevel@tonic-gate mutex_enter(&softsp->dma_pool_lock); 17767c478bd9Sstevel@tonic-gate softsp->dma_reserve += np; 17777c478bd9Sstevel@tonic-gate mutex_exit(&softsp->dma_pool_lock); 17787c478bd9Sstevel@tonic-gate 17797c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) 1780ffadc26eSmike_s vmem_free(softsp->dvma_arena, 1781ffadc26eSmike_s (void *)(uintptr_t)ioaddr, iommu_ptob(np)); 17827c478bd9Sstevel@tonic-gate else 1783ffadc26eSmike_s vmem_xfree(softsp->dvma_arena, 1784ffadc26eSmike_s (void *)(uintptr_t)ioaddr, iommu_ptob(np)); 17857c478bd9Sstevel@tonic-gate 17867c478bd9Sstevel@tonic-gate kmem_free(mp, sizeof (*mp)); 17877c478bd9Sstevel@tonic-gate kmem_free(iommu_fast_dvma->pagecnt, np * sizeof (uint_t)); 17887c478bd9Sstevel@tonic-gate kmem_free(iommu_fast_dvma->sync_flag, np * sizeof (int)); 17897c478bd9Sstevel@tonic-gate kmem_free(iommu_fast_dvma->phys_sync_flag, np * 17907c478bd9Sstevel@tonic-gate sizeof (uint64_t)); 17917c478bd9Sstevel@tonic-gate kmem_free(iommu_fast_dvma, sizeof (struct fast_dvma)); 17927c478bd9Sstevel@tonic-gate 17937c478bd9Sstevel@tonic-gate 17947c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 17957c478bd9Sstevel@tonic-gate ("Release: Base addr %x size %x\n", ioaddr, np)); 17967c478bd9Sstevel@tonic-gate /* 17977c478bd9Sstevel@tonic-gate * Now that we've freed some resource, 17987c478bd9Sstevel@tonic-gate * if there is anybody waiting for it 17997c478bd9Sstevel@tonic-gate * try and get them going. 18007c478bd9Sstevel@tonic-gate */ 18017c478bd9Sstevel@tonic-gate if (softsp->dvma_call_list_id != 0) 18027c478bd9Sstevel@tonic-gate ddi_run_callback(&softsp->dvma_call_list_id); 18037c478bd9Sstevel@tonic-gate 18047c478bd9Sstevel@tonic-gate break; 18057c478bd9Sstevel@tonic-gate } 18067c478bd9Sstevel@tonic-gate 18077c478bd9Sstevel@tonic-gate default: 18087c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_DEBUG, ("iommu_dma_mctl: unknown option " 18097c478bd9Sstevel@tonic-gate "0%x\n", request)); 18107c478bd9Sstevel@tonic-gate 18117c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 18127c478bd9Sstevel@tonic-gate } 18137c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 18147c478bd9Sstevel@tonic-gate } 18157c478bd9Sstevel@tonic-gate 18167c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 18177c478bd9Sstevel@tonic-gate void 18187c478bd9Sstevel@tonic-gate iommu_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index, 18197c478bd9Sstevel@tonic-gate ddi_dma_cookie_t *cp) 18207c478bd9Sstevel@tonic-gate { 18217c478bd9Sstevel@tonic-gate uintptr_t addr; 18227c478bd9Sstevel@tonic-gate ioaddr_t ioaddr; 18237c478bd9Sstevel@tonic-gate uint_t offset; 18247c478bd9Sstevel@tonic-gate pfn_t pfn; 18257c478bd9Sstevel@tonic-gate int npages; 18267c478bd9Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 18277c478bd9Sstevel@tonic-gate uint64_t iotte_flag = 0; 18287c478bd9Sstevel@tonic-gate struct as *as = NULL; 18297c478bd9Sstevel@tonic-gate extern struct as kas; 18307c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 18317c478bd9Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma = 18327c478bd9Sstevel@tonic-gate (struct fast_dvma *)mp->dmai_nexus_private; 18337c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 18347c478bd9Sstevel@tonic-gate iommu_fast_dvma->softsp; 18357c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 18367c478bd9Sstevel@tonic-gate struct io_mem_list *iomemp; 18377c478bd9Sstevel@tonic-gate pfn_t *pfnp; 18387c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 18397c478bd9Sstevel@tonic-gate 18407c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 18417c478bd9Sstevel@tonic-gate 18427c478bd9Sstevel@tonic-gate addr = (uintptr_t)a; 18437c478bd9Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index)); 18447c478bd9Sstevel@tonic-gate offset = (uint_t)(addr & IOMMU_PAGEOFFSET); 18457c478bd9Sstevel@tonic-gate iommu_fast_dvma->pagecnt[index] = iommu_btopr(len + offset); 18467c478bd9Sstevel@tonic-gate as = &kas; 18477c478bd9Sstevel@tonic-gate addr &= ~IOMMU_PAGEOFFSET; 18487c478bd9Sstevel@tonic-gate npages = iommu_btopr(len + offset); 18497c478bd9Sstevel@tonic-gate 18507c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 18517c478bd9Sstevel@tonic-gate iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP); 18527c478bd9Sstevel@tonic-gate iomemp->rdip = mp->dmai_rdip; 18537c478bd9Sstevel@tonic-gate iomemp->ioaddr = ioaddr; 18547c478bd9Sstevel@tonic-gate iomemp->addr = addr; 18557c478bd9Sstevel@tonic-gate iomemp->npages = npages; 18567c478bd9Sstevel@tonic-gate pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1), 18577c478bd9Sstevel@tonic-gate KM_SLEEP); 18587c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 18597c478bd9Sstevel@tonic-gate 18607c478bd9Sstevel@tonic-gate cp->dmac_address = ioaddr | offset; 18617c478bd9Sstevel@tonic-gate cp->dmac_size = len; 18627c478bd9Sstevel@tonic-gate 18637c478bd9Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 18647c478bd9Sstevel@tonic-gate /* read/write and streaming io on */ 18657c478bd9Sstevel@tonic-gate iotte_flag = IOTTE_VALID | IOTTE_WRITE | IOTTE_CACHE; 18667c478bd9Sstevel@tonic-gate 18677c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_CONSISTENT) 18687c478bd9Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOSYNC; 18697c478bd9Sstevel@tonic-gate else if (!softsp->stream_buf_off) 18707c478bd9Sstevel@tonic-gate iotte_flag |= IOTTE_STREAM; 18717c478bd9Sstevel@tonic-gate 18727c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: ioaddr %x " 1873ffadc26eSmike_s "size %x offset %x index %x kaddr %lx\n", 18747c478bd9Sstevel@tonic-gate ioaddr, len, offset, index, addr)); 18757c478bd9Sstevel@tonic-gate ASSERT(npages > 0); 18767c478bd9Sstevel@tonic-gate do { 18777c478bd9Sstevel@tonic-gate pfn = hat_getpfnum(as->a_hat, (caddr_t)addr); 18787c478bd9Sstevel@tonic-gate if (pfn == PFN_INVALID) { 18797c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: invalid pfn " 18807c478bd9Sstevel@tonic-gate "from hat_getpfnum()\n")); 18817c478bd9Sstevel@tonic-gate } 18827c478bd9Sstevel@tonic-gate 18837c478bd9Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 18847c478bd9Sstevel@tonic-gate 18857c478bd9Sstevel@tonic-gate /* load tte */ 18867c478bd9Sstevel@tonic-gate *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag; 18877c478bd9Sstevel@tonic-gate 18887c478bd9Sstevel@tonic-gate npages--; 18897c478bd9Sstevel@tonic-gate iotte_ptr++; 18907c478bd9Sstevel@tonic-gate 18917c478bd9Sstevel@tonic-gate addr += IOMMU_PAGESIZE; 18927c478bd9Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 18937c478bd9Sstevel@tonic-gate 18947c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 18957c478bd9Sstevel@tonic-gate *pfnp = pfn; 18967c478bd9Sstevel@tonic-gate pfnp++; 18977c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 18987c478bd9Sstevel@tonic-gate 18997c478bd9Sstevel@tonic-gate } while (npages > 0); 19007c478bd9Sstevel@tonic-gate 19017c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 19027c478bd9Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 19037c478bd9Sstevel@tonic-gate iomemp->next = softsp->iomem; 19047c478bd9Sstevel@tonic-gate softsp->iomem = iomemp; 19057c478bd9Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 19067c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 19077c478bd9Sstevel@tonic-gate } 19087c478bd9Sstevel@tonic-gate 19097c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 19107c478bd9Sstevel@tonic-gate void 19117c478bd9Sstevel@tonic-gate iommu_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view) 19127c478bd9Sstevel@tonic-gate { 19137c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 19147c478bd9Sstevel@tonic-gate ioaddr_t ioaddr; 19157c478bd9Sstevel@tonic-gate pgcnt_t npages; 19167c478bd9Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma = 19177c478bd9Sstevel@tonic-gate (struct fast_dvma *)mp->dmai_nexus_private; 19187c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 19197c478bd9Sstevel@tonic-gate iommu_fast_dvma->softsp; 19207c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 19217c478bd9Sstevel@tonic-gate struct io_mem_list **prevp, *walk; 19227c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 19237c478bd9Sstevel@tonic-gate 19247c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 19257c478bd9Sstevel@tonic-gate 19267c478bd9Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index)); 19277c478bd9Sstevel@tonic-gate npages = iommu_fast_dvma->pagecnt[index]; 19287c478bd9Sstevel@tonic-gate 19297c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 19307c478bd9Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 19317c478bd9Sstevel@tonic-gate prevp = &softsp->iomem; 19327c478bd9Sstevel@tonic-gate walk = softsp->iomem; 19337c478bd9Sstevel@tonic-gate 19347c478bd9Sstevel@tonic-gate while (walk != NULL) { 19357c478bd9Sstevel@tonic-gate if (walk->ioaddr == ioaddr) { 19367c478bd9Sstevel@tonic-gate *prevp = walk->next; 19377c478bd9Sstevel@tonic-gate break; 19387c478bd9Sstevel@tonic-gate } 19397c478bd9Sstevel@tonic-gate prevp = &walk->next; 19407c478bd9Sstevel@tonic-gate walk = walk->next; 19417c478bd9Sstevel@tonic-gate } 19427c478bd9Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 19437c478bd9Sstevel@tonic-gate 19447c478bd9Sstevel@tonic-gate kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1)); 19457c478bd9Sstevel@tonic-gate kmem_free(walk, sizeof (struct io_mem_list)); 19467c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 19477c478bd9Sstevel@tonic-gate 19487c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_unload: handle %p sync flag " 1949*8793b36bSNick Todd "addr %p sync flag pfn %llx index %x page count %lx\n", (void *)mp, 1950*8793b36bSNick Todd (void *)&iommu_fast_dvma->sync_flag[index], 19517c478bd9Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[index], 19527c478bd9Sstevel@tonic-gate index, npages)); 19537c478bd9Sstevel@tonic-gate 19547c478bd9Sstevel@tonic-gate if ((mp->dmai_rflags & DMP_NOSYNC) != DMP_NOSYNC) { 19557c478bd9Sstevel@tonic-gate sync_stream_buf(softsp, ioaddr, npages, 19567c478bd9Sstevel@tonic-gate (int *)&iommu_fast_dvma->sync_flag[index], 19577c478bd9Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[index]); 19587c478bd9Sstevel@tonic-gate } 19597c478bd9Sstevel@tonic-gate } 19607c478bd9Sstevel@tonic-gate 19617c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 19627c478bd9Sstevel@tonic-gate void 19637c478bd9Sstevel@tonic-gate iommu_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view) 19647c478bd9Sstevel@tonic-gate { 19657c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 19667c478bd9Sstevel@tonic-gate ioaddr_t ioaddr; 19677c478bd9Sstevel@tonic-gate uint_t npages; 19687c478bd9Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma = 19697c478bd9Sstevel@tonic-gate (struct fast_dvma *)mp->dmai_nexus_private; 19707c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 19717c478bd9Sstevel@tonic-gate iommu_fast_dvma->softsp; 19727c478bd9Sstevel@tonic-gate 19737c478bd9Sstevel@tonic-gate if ((mp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC) 19747c478bd9Sstevel@tonic-gate return; 19757c478bd9Sstevel@tonic-gate 19767c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 19777c478bd9Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index)); 19787c478bd9Sstevel@tonic-gate npages = iommu_fast_dvma->pagecnt[index]; 19797c478bd9Sstevel@tonic-gate 19807c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_sync: handle %p, " 1981*8793b36bSNick Todd "sync flag addr %p, sync flag pfn %llx\n", (void *)mp, 1982*8793b36bSNick Todd (void *)&iommu_fast_dvma->sync_flag[index], 19837c478bd9Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[index])); 19847c478bd9Sstevel@tonic-gate 19857c478bd9Sstevel@tonic-gate sync_stream_buf(softsp, ioaddr, npages, 19867c478bd9Sstevel@tonic-gate (int *)&iommu_fast_dvma->sync_flag[index], 19877c478bd9Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[index]); 19887c478bd9Sstevel@tonic-gate } 1989