17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 58793b36bSNick Todd * Common Development and Distribution License (the "License"). 68793b36bSNick Todd * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 228793b36bSNick Todd * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 25*cd21e7c5SGarrett D'Amore /* 26*cd21e7c5SGarrett D'Amore * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved. 27*cd21e7c5SGarrett D'Amore */ 287c478bd9Sstevel@tonic-gate 297c478bd9Sstevel@tonic-gate #include <sys/types.h> 307c478bd9Sstevel@tonic-gate #include <sys/param.h> 317c478bd9Sstevel@tonic-gate #include <sys/conf.h> 327c478bd9Sstevel@tonic-gate #include <sys/ddi.h> 337c478bd9Sstevel@tonic-gate #include <sys/sunddi.h> 347c478bd9Sstevel@tonic-gate #include <sys/ddi_impldefs.h> 357c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 367c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 377c478bd9Sstevel@tonic-gate #include <sys/vmem.h> 387c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 397c478bd9Sstevel@tonic-gate 407c478bd9Sstevel@tonic-gate #include <sys/ddidmareq.h> 417c478bd9Sstevel@tonic-gate #include <sys/sysiosbus.h> 427c478bd9Sstevel@tonic-gate #include <sys/iommu.h> 437c478bd9Sstevel@tonic-gate #include <sys/iocache.h> 447c478bd9Sstevel@tonic-gate #include <sys/dvma.h> 457c478bd9Sstevel@tonic-gate 467c478bd9Sstevel@tonic-gate #include <vm/as.h> 477c478bd9Sstevel@tonic-gate #include <vm/hat.h> 487c478bd9Sstevel@tonic-gate #include <vm/page.h> 497c478bd9Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 507c478bd9Sstevel@tonic-gate #include <sys/machparam.h> 517c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 527c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h> 537c478bd9Sstevel@tonic-gate #include <sys/iommutsb.h> 547c478bd9Sstevel@tonic-gate 557c478bd9Sstevel@tonic-gate /* Useful debugging Stuff */ 567c478bd9Sstevel@tonic-gate #include <sys/nexusdebug.h> 577c478bd9Sstevel@tonic-gate #include <sys/debug.h> 587c478bd9Sstevel@tonic-gate /* Bitfield debugging definitions for this file */ 597c478bd9Sstevel@tonic-gate #define IOMMU_GETDVMAPAGES_DEBUG 0x1 607c478bd9Sstevel@tonic-gate #define IOMMU_DMAMAP_DEBUG 0x2 617c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_DEBUG 0x4 627c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_SYNC_DEBUG 0x8 637c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_HTOC_DEBUG 0x10 647c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_KVADDR_DEBUG 0x20 657c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_GETERR_DEBUG 0x400 667c478bd9Sstevel@tonic-gate #define IOMMU_DMAMCTL_DMA_FREE_DEBUG 0x1000 677c478bd9Sstevel@tonic-gate #define IOMMU_REGISTERS_DEBUG 0x2000 687c478bd9Sstevel@tonic-gate #define IOMMU_DMA_SETUP_DEBUG 0x4000 697c478bd9Sstevel@tonic-gate #define IOMMU_DMA_UNBINDHDL_DEBUG 0x8000 707c478bd9Sstevel@tonic-gate #define IOMMU_DMA_BINDHDL_DEBUG 0x10000 717c478bd9Sstevel@tonic-gate #define IOMMU_DMA_WIN_DEBUG 0x20000 727c478bd9Sstevel@tonic-gate #define IOMMU_DMA_ALLOCHDL_DEBUG 0x40000 737c478bd9Sstevel@tonic-gate #define IOMMU_DMA_LIM_SETUP_DEBUG 0x80000 747c478bd9Sstevel@tonic-gate #define IOMMU_FASTDMA_RESERVE 0x100000 757c478bd9Sstevel@tonic-gate #define IOMMU_FASTDMA_LOAD 0x200000 767c478bd9Sstevel@tonic-gate #define IOMMU_INTER_INTRA_XFER 0x400000 777c478bd9Sstevel@tonic-gate #define IOMMU_TTE 0x800000 787c478bd9Sstevel@tonic-gate #define IOMMU_TLB 0x1000000 797c478bd9Sstevel@tonic-gate #define IOMMU_FASTDMA_SYNC 0x2000000 807c478bd9Sstevel@tonic-gate 817c478bd9Sstevel@tonic-gate /* Turn on if you need to keep track of outstanding IOMMU usage */ 827c478bd9Sstevel@tonic-gate /* #define IO_MEMUSAGE */ 837c478bd9Sstevel@tonic-gate /* Turn on to debug IOMMU unmapping code */ 847c478bd9Sstevel@tonic-gate /* #define IO_MEMDEBUG */ 857c478bd9Sstevel@tonic-gate 867c478bd9Sstevel@tonic-gate static struct dvma_ops iommu_dvma_ops = { 877c478bd9Sstevel@tonic-gate DVMAO_REV, 887c478bd9Sstevel@tonic-gate iommu_dvma_kaddr_load, 897c478bd9Sstevel@tonic-gate iommu_dvma_unload, 907c478bd9Sstevel@tonic-gate iommu_dvma_sync 917c478bd9Sstevel@tonic-gate }; 927c478bd9Sstevel@tonic-gate 937c478bd9Sstevel@tonic-gate extern void *sbusp; /* sbus soft state hook */ 947c478bd9Sstevel@tonic-gate 957c478bd9Sstevel@tonic-gate #define DVMA_MAX_CACHE 65536 967c478bd9Sstevel@tonic-gate 977c478bd9Sstevel@tonic-gate /* 987c478bd9Sstevel@tonic-gate * This is the number of pages that a mapping request needs before we force 997c478bd9Sstevel@tonic-gate * the TLB flush code to use diagnostic registers. This value was determined 1007c478bd9Sstevel@tonic-gate * through a series of test runs measuring dma mapping settup performance. 1017c478bd9Sstevel@tonic-gate */ 1027c478bd9Sstevel@tonic-gate int tlb_flush_using_diag = 16; 1037c478bd9Sstevel@tonic-gate 1047c478bd9Sstevel@tonic-gate int sysio_iommu_tsb_sizes[] = { 1057c478bd9Sstevel@tonic-gate IOMMU_TSB_SIZE_8M, 1067c478bd9Sstevel@tonic-gate IOMMU_TSB_SIZE_16M, 1077c478bd9Sstevel@tonic-gate IOMMU_TSB_SIZE_32M, 1087c478bd9Sstevel@tonic-gate IOMMU_TSB_SIZE_64M, 1097c478bd9Sstevel@tonic-gate IOMMU_TSB_SIZE_128M, 1107c478bd9Sstevel@tonic-gate IOMMU_TSB_SIZE_256M, 1117c478bd9Sstevel@tonic-gate IOMMU_TSB_SIZE_512M, 1127c478bd9Sstevel@tonic-gate IOMMU_TSB_SIZE_1G 1137c478bd9Sstevel@tonic-gate }; 1147c478bd9Sstevel@tonic-gate 1157c478bd9Sstevel@tonic-gate static int iommu_map_window(ddi_dma_impl_t *, off_t, size_t); 1167c478bd9Sstevel@tonic-gate 1177c478bd9Sstevel@tonic-gate int 1187c478bd9Sstevel@tonic-gate iommu_init(struct sbus_soft_state *softsp, caddr_t address) 1197c478bd9Sstevel@tonic-gate { 1207c478bd9Sstevel@tonic-gate int i; 1217c478bd9Sstevel@tonic-gate char name[40]; 1227c478bd9Sstevel@tonic-gate 1237c478bd9Sstevel@tonic-gate #ifdef DEBUG 1247c478bd9Sstevel@tonic-gate debug_info = 1; 1257c478bd9Sstevel@tonic-gate #endif 1267c478bd9Sstevel@tonic-gate 1277c478bd9Sstevel@tonic-gate /* 1287c478bd9Sstevel@tonic-gate * Simply add each registers offset to the base address 1297c478bd9Sstevel@tonic-gate * to calculate the already mapped virtual address of 1307c478bd9Sstevel@tonic-gate * the device register... 1317c478bd9Sstevel@tonic-gate * 1327c478bd9Sstevel@tonic-gate * define a macro for the pointer arithmetic; all registers 1337c478bd9Sstevel@tonic-gate * are 64 bits wide and are defined as uint64_t's. 1347c478bd9Sstevel@tonic-gate */ 1357c478bd9Sstevel@tonic-gate 1367c478bd9Sstevel@tonic-gate #define REG_ADDR(b, o) (uint64_t *)((caddr_t)(b) + (o)) 1377c478bd9Sstevel@tonic-gate 1387c478bd9Sstevel@tonic-gate softsp->iommu_ctrl_reg = REG_ADDR(address, OFF_IOMMU_CTRL_REG); 1397c478bd9Sstevel@tonic-gate softsp->tsb_base_addr = REG_ADDR(address, OFF_TSB_BASE_ADDR); 1407c478bd9Sstevel@tonic-gate softsp->iommu_flush_reg = REG_ADDR(address, OFF_IOMMU_FLUSH_REG); 1417c478bd9Sstevel@tonic-gate softsp->iommu_tlb_tag = REG_ADDR(address, OFF_IOMMU_TLB_TAG); 1427c478bd9Sstevel@tonic-gate softsp->iommu_tlb_data = REG_ADDR(address, OFF_IOMMU_TLB_DATA); 1437c478bd9Sstevel@tonic-gate 1447c478bd9Sstevel@tonic-gate #undef REG_ADDR 1457c478bd9Sstevel@tonic-gate 1467c478bd9Sstevel@tonic-gate mutex_init(&softsp->dma_pool_lock, NULL, MUTEX_DEFAULT, NULL); 1477c478bd9Sstevel@tonic-gate mutex_init(&softsp->intr_poll_list_lock, NULL, MUTEX_DEFAULT, NULL); 1487c478bd9Sstevel@tonic-gate 1497c478bd9Sstevel@tonic-gate /* Set up the DVMA resource sizes */ 1507c478bd9Sstevel@tonic-gate if ((softsp->iommu_tsb_cookie = iommu_tsb_alloc(softsp->upa_id)) == 1517c478bd9Sstevel@tonic-gate IOMMU_TSB_COOKIE_NONE) { 1527c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "%s%d: Unable to retrieve IOMMU array.", 1537c478bd9Sstevel@tonic-gate ddi_driver_name(softsp->dip), 1547c478bd9Sstevel@tonic-gate ddi_get_instance(softsp->dip)); 1557c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 1567c478bd9Sstevel@tonic-gate } 1577c478bd9Sstevel@tonic-gate softsp->soft_tsb_base_addr = 1587c478bd9Sstevel@tonic-gate iommu_tsb_cookie_to_va(softsp->iommu_tsb_cookie); 1597c478bd9Sstevel@tonic-gate softsp->iommu_dvma_size = 1607c478bd9Sstevel@tonic-gate iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie) << 1617c478bd9Sstevel@tonic-gate IOMMU_TSB_TO_RNG; 1627c478bd9Sstevel@tonic-gate softsp->iommu_dvma_base = (ioaddr_t) 1637c478bd9Sstevel@tonic-gate (0 - (ioaddr_t)softsp->iommu_dvma_size); 1647c478bd9Sstevel@tonic-gate 1657c478bd9Sstevel@tonic-gate (void) snprintf(name, sizeof (name), "%s%d_dvma", 1667c478bd9Sstevel@tonic-gate ddi_driver_name(softsp->dip), ddi_get_instance(softsp->dip)); 1677c478bd9Sstevel@tonic-gate 1687c478bd9Sstevel@tonic-gate /* 1697c478bd9Sstevel@tonic-gate * Initialize the DVMA vmem arena. 1707c478bd9Sstevel@tonic-gate */ 171ffadc26eSmike_s softsp->dvma_arena = vmem_create(name, 172ffadc26eSmike_s (void *)(uintptr_t)softsp->iommu_dvma_base, 1737c478bd9Sstevel@tonic-gate softsp->iommu_dvma_size, PAGESIZE, NULL, NULL, NULL, 1747c478bd9Sstevel@tonic-gate DVMA_MAX_CACHE, VM_SLEEP); 1757c478bd9Sstevel@tonic-gate 1767c478bd9Sstevel@tonic-gate /* Set the limit for dvma_reserve() to 1/2 of the total dvma space */ 1777c478bd9Sstevel@tonic-gate softsp->dma_reserve = iommu_btop(softsp->iommu_dvma_size >> 1); 1787c478bd9Sstevel@tonic-gate 1797c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 1807c478bd9Sstevel@tonic-gate mutex_init(&softsp->iomemlock, NULL, MUTEX_DEFAULT, NULL); 1817c478bd9Sstevel@tonic-gate softsp->iomem = (struct io_mem_list *)0; 1827c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 1837c478bd9Sstevel@tonic-gate /* 1847c478bd9Sstevel@tonic-gate * Get the base address of the TSB table and store it in the hardware 1857c478bd9Sstevel@tonic-gate */ 1867c478bd9Sstevel@tonic-gate 1877c478bd9Sstevel@tonic-gate /* 1887c478bd9Sstevel@tonic-gate * We plan on the PROM flushing all TLB entries. If this is not the 1897c478bd9Sstevel@tonic-gate * case, this is where we should flush the hardware TLB. 1907c478bd9Sstevel@tonic-gate */ 1917c478bd9Sstevel@tonic-gate 1927c478bd9Sstevel@tonic-gate /* Set the IOMMU registers */ 1937c478bd9Sstevel@tonic-gate (void) iommu_resume_init(softsp); 1947c478bd9Sstevel@tonic-gate 1957c478bd9Sstevel@tonic-gate /* check the convenient copy of TSB base, and flush write buffers */ 1967c478bd9Sstevel@tonic-gate if (*softsp->tsb_base_addr != 1977c478bd9Sstevel@tonic-gate va_to_pa((caddr_t)softsp->soft_tsb_base_addr)) { 1987c478bd9Sstevel@tonic-gate iommu_tsb_free(softsp->iommu_tsb_cookie); 1997c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 2007c478bd9Sstevel@tonic-gate } 2017c478bd9Sstevel@tonic-gate 2027c478bd9Sstevel@tonic-gate softsp->sbus_io_lo_pfn = UINT32_MAX; 2037c478bd9Sstevel@tonic-gate softsp->sbus_io_hi_pfn = 0; 2047c478bd9Sstevel@tonic-gate for (i = 0; i < sysio_pd_getnrng(softsp->dip); i++) { 2057c478bd9Sstevel@tonic-gate struct rangespec *rangep; 2067c478bd9Sstevel@tonic-gate uint64_t addr; 2077c478bd9Sstevel@tonic-gate pfn_t hipfn, lopfn; 2087c478bd9Sstevel@tonic-gate 2097c478bd9Sstevel@tonic-gate rangep = sysio_pd_getrng(softsp->dip, i); 2107c478bd9Sstevel@tonic-gate addr = (uint64_t)((uint64_t)rangep->rng_bustype << 32); 2117c478bd9Sstevel@tonic-gate addr |= (uint64_t)rangep->rng_offset; 2127c478bd9Sstevel@tonic-gate lopfn = (pfn_t)(addr >> MMU_PAGESHIFT); 2137c478bd9Sstevel@tonic-gate addr += (uint64_t)(rangep->rng_size - 1); 2147c478bd9Sstevel@tonic-gate hipfn = (pfn_t)(addr >> MMU_PAGESHIFT); 2157c478bd9Sstevel@tonic-gate 2167c478bd9Sstevel@tonic-gate softsp->sbus_io_lo_pfn = (lopfn < softsp->sbus_io_lo_pfn) ? 2177c478bd9Sstevel@tonic-gate lopfn : softsp->sbus_io_lo_pfn; 2187c478bd9Sstevel@tonic-gate 2197c478bd9Sstevel@tonic-gate softsp->sbus_io_hi_pfn = (hipfn > softsp->sbus_io_hi_pfn) ? 2207c478bd9Sstevel@tonic-gate hipfn : softsp->sbus_io_hi_pfn; 2217c478bd9Sstevel@tonic-gate } 2227c478bd9Sstevel@tonic-gate 2237c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_REGISTERS_DEBUG, ("IOMMU Control reg: %p IOMMU TSB " 2247c478bd9Sstevel@tonic-gate "base reg: %p IOMMU flush reg: %p TSB base addr %p\n", 2258793b36bSNick Todd (void *)softsp->iommu_ctrl_reg, (void *)softsp->tsb_base_addr, 2268793b36bSNick Todd (void *)softsp->iommu_flush_reg, 2278793b36bSNick Todd (void *)softsp->soft_tsb_base_addr)); 2287c478bd9Sstevel@tonic-gate 2297c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 2307c478bd9Sstevel@tonic-gate } 2317c478bd9Sstevel@tonic-gate 2327c478bd9Sstevel@tonic-gate /* 2337c478bd9Sstevel@tonic-gate * function to uninitialize the iommu and release the tsb back to 2347c478bd9Sstevel@tonic-gate * the spare pool. See startup.c for tsb spare management. 2357c478bd9Sstevel@tonic-gate */ 2367c478bd9Sstevel@tonic-gate 2377c478bd9Sstevel@tonic-gate int 2387c478bd9Sstevel@tonic-gate iommu_uninit(struct sbus_soft_state *softsp) 2397c478bd9Sstevel@tonic-gate { 2407c478bd9Sstevel@tonic-gate vmem_destroy(softsp->dvma_arena); 2417c478bd9Sstevel@tonic-gate 2427c478bd9Sstevel@tonic-gate /* flip off the IOMMU enable switch */ 2437c478bd9Sstevel@tonic-gate *softsp->iommu_ctrl_reg &= 2447c478bd9Sstevel@tonic-gate (TSB_SIZE << TSB_SIZE_SHIFT | IOMMU_DISABLE); 2457c478bd9Sstevel@tonic-gate 2467c478bd9Sstevel@tonic-gate iommu_tsb_free(softsp->iommu_tsb_cookie); 2477c478bd9Sstevel@tonic-gate 2487c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 2497c478bd9Sstevel@tonic-gate } 2507c478bd9Sstevel@tonic-gate 2517c478bd9Sstevel@tonic-gate /* 2527c478bd9Sstevel@tonic-gate * Initialize iommu hardware registers when the system is being resumed. 2537c478bd9Sstevel@tonic-gate * (Subset of iommu_init()) 2547c478bd9Sstevel@tonic-gate */ 2557c478bd9Sstevel@tonic-gate int 2567c478bd9Sstevel@tonic-gate iommu_resume_init(struct sbus_soft_state *softsp) 2577c478bd9Sstevel@tonic-gate { 2587c478bd9Sstevel@tonic-gate int i; 2597c478bd9Sstevel@tonic-gate uint_t tsb_size; 2607c478bd9Sstevel@tonic-gate uint_t tsb_bytes; 2617c478bd9Sstevel@tonic-gate 2627c478bd9Sstevel@tonic-gate /* 2637c478bd9Sstevel@tonic-gate * Reset the base address of the TSB table in the hardware 2647c478bd9Sstevel@tonic-gate */ 2657c478bd9Sstevel@tonic-gate *softsp->tsb_base_addr = va_to_pa((caddr_t)softsp->soft_tsb_base_addr); 2667c478bd9Sstevel@tonic-gate 2677c478bd9Sstevel@tonic-gate /* 2687c478bd9Sstevel@tonic-gate * Figure out the correct size of the IOMMU TSB entries. If we 2697c478bd9Sstevel@tonic-gate * end up with a size smaller than that needed for 8M of IOMMU 2707c478bd9Sstevel@tonic-gate * space, default the size to 8M. XXX We could probably panic here 2717c478bd9Sstevel@tonic-gate */ 2727c478bd9Sstevel@tonic-gate i = sizeof (sysio_iommu_tsb_sizes) / sizeof (sysio_iommu_tsb_sizes[0]) 2737c478bd9Sstevel@tonic-gate - 1; 2747c478bd9Sstevel@tonic-gate 2757c478bd9Sstevel@tonic-gate tsb_bytes = iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie); 2767c478bd9Sstevel@tonic-gate 2777c478bd9Sstevel@tonic-gate while (i > 0) { 2787c478bd9Sstevel@tonic-gate if (tsb_bytes >= sysio_iommu_tsb_sizes[i]) 2797c478bd9Sstevel@tonic-gate break; 2807c478bd9Sstevel@tonic-gate i--; 2817c478bd9Sstevel@tonic-gate } 2827c478bd9Sstevel@tonic-gate 2837c478bd9Sstevel@tonic-gate tsb_size = i; 2847c478bd9Sstevel@tonic-gate 2857c478bd9Sstevel@tonic-gate /* OK, lets flip the "on" switch of the IOMMU */ 2867c478bd9Sstevel@tonic-gate *softsp->iommu_ctrl_reg = (uint64_t)(tsb_size << TSB_SIZE_SHIFT 2877c478bd9Sstevel@tonic-gate | IOMMU_ENABLE | IOMMU_DIAG_ENABLE); 2887c478bd9Sstevel@tonic-gate 2897c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 2907c478bd9Sstevel@tonic-gate } 2917c478bd9Sstevel@tonic-gate 2927c478bd9Sstevel@tonic-gate void 2937c478bd9Sstevel@tonic-gate iommu_tlb_flush(struct sbus_soft_state *softsp, ioaddr_t addr, pgcnt_t npages) 2947c478bd9Sstevel@tonic-gate { 2957c478bd9Sstevel@tonic-gate volatile uint64_t tmpreg; 2967c478bd9Sstevel@tonic-gate volatile uint64_t *vaddr_reg, *valid_bit_reg; 2977c478bd9Sstevel@tonic-gate ioaddr_t hiaddr, ioaddr; 2987c478bd9Sstevel@tonic-gate int i, do_flush = 0; 2997c478bd9Sstevel@tonic-gate 3007c478bd9Sstevel@tonic-gate if (npages == 1) { 3017c478bd9Sstevel@tonic-gate *softsp->iommu_flush_reg = (uint64_t)addr; 3027c478bd9Sstevel@tonic-gate tmpreg = *softsp->sbus_ctrl_reg; 3037c478bd9Sstevel@tonic-gate return; 3047c478bd9Sstevel@tonic-gate } 3057c478bd9Sstevel@tonic-gate 3067c478bd9Sstevel@tonic-gate hiaddr = addr + (ioaddr_t)(npages * IOMMU_PAGESIZE); 3077c478bd9Sstevel@tonic-gate for (i = 0, vaddr_reg = softsp->iommu_tlb_tag, 3087c478bd9Sstevel@tonic-gate valid_bit_reg = softsp->iommu_tlb_data; 3097c478bd9Sstevel@tonic-gate i < IOMMU_TLB_ENTRIES; i++, vaddr_reg++, valid_bit_reg++) { 3107c478bd9Sstevel@tonic-gate tmpreg = *vaddr_reg; 3117c478bd9Sstevel@tonic-gate ioaddr = (ioaddr_t)((tmpreg & IOMMU_TLBTAG_VA_MASK) << 3127c478bd9Sstevel@tonic-gate IOMMU_TLBTAG_VA_SHIFT); 3137c478bd9Sstevel@tonic-gate 314ffadc26eSmike_s DPRINTF(IOMMU_TLB, ("Vaddr reg 0x%p, " 315ffadc26eSmike_s "TLB vaddr reg %lx, IO addr 0x%x " 3167c478bd9Sstevel@tonic-gate "Base addr 0x%x, Hi addr 0x%x\n", 3178793b36bSNick Todd (void *)vaddr_reg, tmpreg, ioaddr, addr, hiaddr)); 3187c478bd9Sstevel@tonic-gate 3197c478bd9Sstevel@tonic-gate if (ioaddr >= addr && ioaddr <= hiaddr) { 3207c478bd9Sstevel@tonic-gate tmpreg = *valid_bit_reg; 3217c478bd9Sstevel@tonic-gate 322ffadc26eSmike_s DPRINTF(IOMMU_TLB, ("Valid reg addr 0x%p, " 323ffadc26eSmike_s "TLB valid reg %lx\n", 3248793b36bSNick Todd (void *)valid_bit_reg, tmpreg)); 3257c478bd9Sstevel@tonic-gate 3267c478bd9Sstevel@tonic-gate if (tmpreg & IOMMU_TLB_VALID) { 3277c478bd9Sstevel@tonic-gate *softsp->iommu_flush_reg = (uint64_t)ioaddr; 3287c478bd9Sstevel@tonic-gate do_flush = 1; 3297c478bd9Sstevel@tonic-gate } 3307c478bd9Sstevel@tonic-gate } 3317c478bd9Sstevel@tonic-gate } 3327c478bd9Sstevel@tonic-gate 3337c478bd9Sstevel@tonic-gate if (do_flush) 3347c478bd9Sstevel@tonic-gate tmpreg = *softsp->sbus_ctrl_reg; 3357c478bd9Sstevel@tonic-gate } 3367c478bd9Sstevel@tonic-gate 3377c478bd9Sstevel@tonic-gate 3387c478bd9Sstevel@tonic-gate /* 3397c478bd9Sstevel@tonic-gate * Shorthand defines 3407c478bd9Sstevel@tonic-gate */ 3417c478bd9Sstevel@tonic-gate 3427c478bd9Sstevel@tonic-gate #define ALO dma_lim->dlim_addr_lo 3437c478bd9Sstevel@tonic-gate #define AHI dma_lim->dlim_addr_hi 3447c478bd9Sstevel@tonic-gate #define OBJSIZE dmareq->dmar_object.dmao_size 3457c478bd9Sstevel@tonic-gate #define IOTTE_NDX(vaddr, base) (base + \ 3467c478bd9Sstevel@tonic-gate (int)(iommu_btop((vaddr & ~IOMMU_PAGEMASK) - \ 3477c478bd9Sstevel@tonic-gate softsp->iommu_dvma_base))) 3487c478bd9Sstevel@tonic-gate /* 3497c478bd9Sstevel@tonic-gate * If DDI_DMA_PARTIAL flag is set and the request is for 3507c478bd9Sstevel@tonic-gate * less than MIN_DVMA_WIN_SIZE, it's not worth the hassle so 3517c478bd9Sstevel@tonic-gate * we turn off the DDI_DMA_PARTIAL flag 3527c478bd9Sstevel@tonic-gate */ 3537c478bd9Sstevel@tonic-gate #define MIN_DVMA_WIN_SIZE (128) 3547c478bd9Sstevel@tonic-gate 3557c478bd9Sstevel@tonic-gate /* ARGSUSED */ 3567c478bd9Sstevel@tonic-gate void 3577c478bd9Sstevel@tonic-gate iommu_remove_mappings(ddi_dma_impl_t *mp) 3587c478bd9Sstevel@tonic-gate { 3597c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 3607c478bd9Sstevel@tonic-gate pgcnt_t npages; 3617c478bd9Sstevel@tonic-gate ioaddr_t ioaddr; 3627c478bd9Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 3637c478bd9Sstevel@tonic-gate ioaddr_t ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET; 3647c478bd9Sstevel@tonic-gate pgcnt_t npages = mp->dmai_ndvmapages; 3657c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 3667c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 3677c478bd9Sstevel@tonic-gate 3687c478bd9Sstevel@tonic-gate #if defined(IO_MEMUSAGE) 3697c478bd9Sstevel@tonic-gate struct io_mem_list **prevp, *walk; 3707c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 3717c478bd9Sstevel@tonic-gate 3727c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 3737c478bd9Sstevel@tonic-gate /* 3747c478bd9Sstevel@tonic-gate * Run thru the mapped entries and free 'em 3757c478bd9Sstevel@tonic-gate */ 3767c478bd9Sstevel@tonic-gate 3777c478bd9Sstevel@tonic-gate ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET; 3787c478bd9Sstevel@tonic-gate npages = mp->dmai_ndvmapages; 3797c478bd9Sstevel@tonic-gate 3807c478bd9Sstevel@tonic-gate #if defined(IO_MEMUSAGE) 3817c478bd9Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 3827c478bd9Sstevel@tonic-gate prevp = &softsp->iomem; 3837c478bd9Sstevel@tonic-gate walk = softsp->iomem; 3847c478bd9Sstevel@tonic-gate 3857c478bd9Sstevel@tonic-gate while (walk) { 3867c478bd9Sstevel@tonic-gate if (walk->ioaddr == ioaddr) { 3877c478bd9Sstevel@tonic-gate *prevp = walk->next; 3887c478bd9Sstevel@tonic-gate break; 3897c478bd9Sstevel@tonic-gate } 3907c478bd9Sstevel@tonic-gate 3917c478bd9Sstevel@tonic-gate prevp = &walk->next; 3927c478bd9Sstevel@tonic-gate walk = walk->next; 3937c478bd9Sstevel@tonic-gate } 3947c478bd9Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 3957c478bd9Sstevel@tonic-gate 3967c478bd9Sstevel@tonic-gate kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1)); 3977c478bd9Sstevel@tonic-gate kmem_free(walk, sizeof (struct io_mem_list)); 3987c478bd9Sstevel@tonic-gate #endif /* IO_MEMUSAGE */ 3997c478bd9Sstevel@tonic-gate 4007c478bd9Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 4017c478bd9Sstevel@tonic-gate 4027c478bd9Sstevel@tonic-gate while (npages) { 4037c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_DEBUG, 4047c478bd9Sstevel@tonic-gate ("dma_mctl: freeing ioaddr %x iotte %p\n", 4057c478bd9Sstevel@tonic-gate ioaddr, iotte_ptr)); 4067c478bd9Sstevel@tonic-gate *iotte_ptr = (uint64_t)0; /* unload tte */ 4077c478bd9Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 4087c478bd9Sstevel@tonic-gate npages--; 4097c478bd9Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 4107c478bd9Sstevel@tonic-gate iotte_ptr++; 4117c478bd9Sstevel@tonic-gate } 4127c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 4137c478bd9Sstevel@tonic-gate } 4147c478bd9Sstevel@tonic-gate 4157c478bd9Sstevel@tonic-gate 4167c478bd9Sstevel@tonic-gate int 4177c478bd9Sstevel@tonic-gate iommu_create_vaddr_mappings(ddi_dma_impl_t *mp, uintptr_t addr) 4187c478bd9Sstevel@tonic-gate { 4197c478bd9Sstevel@tonic-gate pfn_t pfn; 4207c478bd9Sstevel@tonic-gate struct as *as = NULL; 4217c478bd9Sstevel@tonic-gate pgcnt_t npages; 4227c478bd9Sstevel@tonic-gate ioaddr_t ioaddr; 4237c478bd9Sstevel@tonic-gate uint_t offset; 4247c478bd9Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 4257c478bd9Sstevel@tonic-gate uint64_t tmp_iotte_flag; 4267c478bd9Sstevel@tonic-gate int rval = DDI_DMA_MAPPED; 4277c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 4287c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 4297c478bd9Sstevel@tonic-gate int diag_tlb_flush; 4307c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 4317c478bd9Sstevel@tonic-gate struct io_mem_list *iomemp; 4327c478bd9Sstevel@tonic-gate pfn_t *pfnp; 4337c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 4347c478bd9Sstevel@tonic-gate 4357c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 4367c478bd9Sstevel@tonic-gate 4377c478bd9Sstevel@tonic-gate /* Set Valid and Cache for mem xfer */ 4387c478bd9Sstevel@tonic-gate tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM; 4397c478bd9Sstevel@tonic-gate 4407c478bd9Sstevel@tonic-gate offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET); 4417c478bd9Sstevel@tonic-gate npages = iommu_btopr(mp->dmai_size + offset); 4427c478bd9Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET); 4437c478bd9Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 4447c478bd9Sstevel@tonic-gate diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0; 4457c478bd9Sstevel@tonic-gate 4467c478bd9Sstevel@tonic-gate as = mp->dmai_object.dmao_obj.virt_obj.v_as; 4477c478bd9Sstevel@tonic-gate if (as == NULL) 4487c478bd9Sstevel@tonic-gate as = &kas; 4497c478bd9Sstevel@tonic-gate 4507c478bd9Sstevel@tonic-gate /* 4517c478bd9Sstevel@tonic-gate * Set the per object bits of the TTE here. We optimize this for 4527c478bd9Sstevel@tonic-gate * the memory case so that the while loop overhead is minimal. 4537c478bd9Sstevel@tonic-gate */ 4547c478bd9Sstevel@tonic-gate /* Turn on NOSYNC if we need consistent mem */ 4557c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_CONSISTENT) { 4567c478bd9Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOSYNC; 4577c478bd9Sstevel@tonic-gate tmp_iotte_flag ^= IOTTE_STREAM; 4587c478bd9Sstevel@tonic-gate /* Set streaming mode if not consistent mem */ 4597c478bd9Sstevel@tonic-gate } else if (softsp->stream_buf_off) { 4607c478bd9Sstevel@tonic-gate tmp_iotte_flag ^= IOTTE_STREAM; 4617c478bd9Sstevel@tonic-gate } 4627c478bd9Sstevel@tonic-gate 4637c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 4647c478bd9Sstevel@tonic-gate iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP); 4657c478bd9Sstevel@tonic-gate iomemp->rdip = mp->dmai_rdip; 4667c478bd9Sstevel@tonic-gate iomemp->ioaddr = ioaddr; 4677c478bd9Sstevel@tonic-gate iomemp->addr = addr; 4687c478bd9Sstevel@tonic-gate iomemp->npages = npages; 4697c478bd9Sstevel@tonic-gate pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1), 4707c478bd9Sstevel@tonic-gate KM_SLEEP); 4717c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 4727c478bd9Sstevel@tonic-gate /* 4737c478bd9Sstevel@tonic-gate * Grab the mappings from the dmmu and stick 'em into the 4747c478bd9Sstevel@tonic-gate * iommu. 4757c478bd9Sstevel@tonic-gate */ 4767c478bd9Sstevel@tonic-gate ASSERT(npages != 0); 4777c478bd9Sstevel@tonic-gate 4787c478bd9Sstevel@tonic-gate /* If we're going to flush the TLB using diag mode, do it now. */ 4797c478bd9Sstevel@tonic-gate if (diag_tlb_flush) 4807c478bd9Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, npages); 4817c478bd9Sstevel@tonic-gate 4827c478bd9Sstevel@tonic-gate do { 4837c478bd9Sstevel@tonic-gate uint64_t iotte_flag = tmp_iotte_flag; 4847c478bd9Sstevel@tonic-gate 4857c478bd9Sstevel@tonic-gate /* 4867c478bd9Sstevel@tonic-gate * Fetch the pfn for the DMA object 4877c478bd9Sstevel@tonic-gate */ 4887c478bd9Sstevel@tonic-gate 4897c478bd9Sstevel@tonic-gate ASSERT(as); 4907c478bd9Sstevel@tonic-gate pfn = hat_getpfnum(as->a_hat, (caddr_t)addr); 4917c478bd9Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 4927c478bd9Sstevel@tonic-gate 4937c478bd9Sstevel@tonic-gate if (!pf_is_memory(pfn)) { 4947c478bd9Sstevel@tonic-gate /* DVMA'ing to IO space */ 4957c478bd9Sstevel@tonic-gate 4967c478bd9Sstevel@tonic-gate /* Turn off cache bit if set */ 4977c478bd9Sstevel@tonic-gate if (iotte_flag & IOTTE_CACHE) 4987c478bd9Sstevel@tonic-gate iotte_flag ^= IOTTE_CACHE; 4997c478bd9Sstevel@tonic-gate 5007c478bd9Sstevel@tonic-gate /* Turn off stream bit if set */ 5017c478bd9Sstevel@tonic-gate if (iotte_flag & IOTTE_STREAM) 5027c478bd9Sstevel@tonic-gate iotte_flag ^= IOTTE_STREAM; 5037c478bd9Sstevel@tonic-gate 5047c478bd9Sstevel@tonic-gate if (IS_INTRA_SBUS(softsp, pfn)) { 5057c478bd9Sstevel@tonic-gate /* Intra sbus transfer */ 5067c478bd9Sstevel@tonic-gate 5077c478bd9Sstevel@tonic-gate /* Turn on intra flag */ 5087c478bd9Sstevel@tonic-gate iotte_flag |= IOTTE_INTRA; 5097c478bd9Sstevel@tonic-gate 5107c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_INTER_INTRA_XFER, ( 511ffadc26eSmike_s "Intra xfer pfnum %lx TTE %lx\n", 5127c478bd9Sstevel@tonic-gate pfn, iotte_flag)); 5137c478bd9Sstevel@tonic-gate } else { 5147c478bd9Sstevel@tonic-gate if (pf_is_dmacapable(pfn) == 1) { 5157c478bd9Sstevel@tonic-gate /*EMPTY*/ 5167c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_INTER_INTRA_XFER, 5177c478bd9Sstevel@tonic-gate ("Inter xfer pfnum %lx " 518ffadc26eSmike_s "tte hi %lx\n", 5197c478bd9Sstevel@tonic-gate pfn, iotte_flag)); 5207c478bd9Sstevel@tonic-gate } else { 5217c478bd9Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 5227c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 5237c478bd9Sstevel@tonic-gate goto bad; 5247c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 5257c478bd9Sstevel@tonic-gate } 5267c478bd9Sstevel@tonic-gate } 5277c478bd9Sstevel@tonic-gate } 5287c478bd9Sstevel@tonic-gate addr += IOMMU_PAGESIZE; 5297c478bd9Sstevel@tonic-gate 530ffadc26eSmike_s DPRINTF(IOMMU_TTE, ("vaddr mapping: tte index %p pfn %lx " 531ffadc26eSmike_s "tte flag %lx addr %lx ioaddr %x\n", 5328793b36bSNick Todd (void *)iotte_ptr, pfn, iotte_flag, addr, ioaddr)); 5337c478bd9Sstevel@tonic-gate 5347c478bd9Sstevel@tonic-gate /* Flush the IOMMU TLB before loading a new mapping */ 5357c478bd9Sstevel@tonic-gate if (!diag_tlb_flush) 5367c478bd9Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 5377c478bd9Sstevel@tonic-gate 5387c478bd9Sstevel@tonic-gate /* Set the hardware IO TTE */ 5397c478bd9Sstevel@tonic-gate *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag; 5407c478bd9Sstevel@tonic-gate 5417c478bd9Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 5427c478bd9Sstevel@tonic-gate npages--; 5437c478bd9Sstevel@tonic-gate iotte_ptr++; 5447c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 5457c478bd9Sstevel@tonic-gate *pfnp = pfn; 5467c478bd9Sstevel@tonic-gate pfnp++; 5477c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 5487c478bd9Sstevel@tonic-gate } while (npages != 0); 5497c478bd9Sstevel@tonic-gate 5507c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 5517c478bd9Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 5527c478bd9Sstevel@tonic-gate iomemp->next = softsp->iomem; 5537c478bd9Sstevel@tonic-gate softsp->iomem = iomemp; 5547c478bd9Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 5557c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 5567c478bd9Sstevel@tonic-gate 5577c478bd9Sstevel@tonic-gate return (rval); 5587c478bd9Sstevel@tonic-gate 5597c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 5607c478bd9Sstevel@tonic-gate bad: 5617c478bd9Sstevel@tonic-gate /* If we fail a mapping, free up any mapping resources used */ 5627c478bd9Sstevel@tonic-gate iommu_remove_mappings(mp); 5637c478bd9Sstevel@tonic-gate return (rval); 5647c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 5657c478bd9Sstevel@tonic-gate } 5667c478bd9Sstevel@tonic-gate 5677c478bd9Sstevel@tonic-gate 5687c478bd9Sstevel@tonic-gate int 5697c478bd9Sstevel@tonic-gate iommu_create_pp_mappings(ddi_dma_impl_t *mp, page_t *pp, page_t **pplist) 5707c478bd9Sstevel@tonic-gate { 5717c478bd9Sstevel@tonic-gate pfn_t pfn; 5727c478bd9Sstevel@tonic-gate pgcnt_t npages; 5737c478bd9Sstevel@tonic-gate ioaddr_t ioaddr; 5747c478bd9Sstevel@tonic-gate uint_t offset; 5757c478bd9Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 5767c478bd9Sstevel@tonic-gate uint64_t tmp_iotte_flag; 5777c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 5787c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 5797c478bd9Sstevel@tonic-gate int diag_tlb_flush; 5807c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 5817c478bd9Sstevel@tonic-gate struct io_mem_list *iomemp; 5827c478bd9Sstevel@tonic-gate pfn_t *pfnp; 5837c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 5847c478bd9Sstevel@tonic-gate int rval = DDI_DMA_MAPPED; 5857c478bd9Sstevel@tonic-gate 5867c478bd9Sstevel@tonic-gate /* Set Valid and Cache for mem xfer */ 5877c478bd9Sstevel@tonic-gate tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM; 5887c478bd9Sstevel@tonic-gate 5897c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 5907c478bd9Sstevel@tonic-gate 5917c478bd9Sstevel@tonic-gate offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET); 5927c478bd9Sstevel@tonic-gate npages = iommu_btopr(mp->dmai_size + offset); 5937c478bd9Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET); 5947c478bd9Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 5957c478bd9Sstevel@tonic-gate diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0; 5967c478bd9Sstevel@tonic-gate 5977c478bd9Sstevel@tonic-gate /* 5987c478bd9Sstevel@tonic-gate * Set the per object bits of the TTE here. We optimize this for 5997c478bd9Sstevel@tonic-gate * the memory case so that the while loop overhead is minimal. 6007c478bd9Sstevel@tonic-gate */ 6017c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_CONSISTENT) { 6027c478bd9Sstevel@tonic-gate /* Turn on NOSYNC if we need consistent mem */ 6037c478bd9Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOSYNC; 6047c478bd9Sstevel@tonic-gate tmp_iotte_flag ^= IOTTE_STREAM; 6057c478bd9Sstevel@tonic-gate } else if (softsp->stream_buf_off) { 6067c478bd9Sstevel@tonic-gate /* Set streaming mode if not consistent mem */ 6077c478bd9Sstevel@tonic-gate tmp_iotte_flag ^= IOTTE_STREAM; 6087c478bd9Sstevel@tonic-gate } 6097c478bd9Sstevel@tonic-gate 6107c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 6117c478bd9Sstevel@tonic-gate iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP); 6127c478bd9Sstevel@tonic-gate iomemp->rdip = mp->dmai_rdip; 6137c478bd9Sstevel@tonic-gate iomemp->ioaddr = ioaddr; 6147c478bd9Sstevel@tonic-gate iomemp->npages = npages; 6157c478bd9Sstevel@tonic-gate pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1), 6167c478bd9Sstevel@tonic-gate KM_SLEEP); 6177c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 6187c478bd9Sstevel@tonic-gate /* 6197c478bd9Sstevel@tonic-gate * Grab the mappings from the dmmu and stick 'em into the 6207c478bd9Sstevel@tonic-gate * iommu. 6217c478bd9Sstevel@tonic-gate */ 6227c478bd9Sstevel@tonic-gate ASSERT(npages != 0); 6237c478bd9Sstevel@tonic-gate 6247c478bd9Sstevel@tonic-gate /* If we're going to flush the TLB using diag mode, do it now. */ 6257c478bd9Sstevel@tonic-gate if (diag_tlb_flush) 6267c478bd9Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, npages); 6277c478bd9Sstevel@tonic-gate 6287c478bd9Sstevel@tonic-gate do { 6297c478bd9Sstevel@tonic-gate uint64_t iotte_flag; 6307c478bd9Sstevel@tonic-gate 6317c478bd9Sstevel@tonic-gate iotte_flag = tmp_iotte_flag; 6327c478bd9Sstevel@tonic-gate 6337c478bd9Sstevel@tonic-gate if (pp != NULL) { 6347c478bd9Sstevel@tonic-gate pfn = pp->p_pagenum; 6357c478bd9Sstevel@tonic-gate pp = pp->p_next; 6367c478bd9Sstevel@tonic-gate } else { 6377c478bd9Sstevel@tonic-gate pfn = (*pplist)->p_pagenum; 6387c478bd9Sstevel@tonic-gate pplist++; 6397c478bd9Sstevel@tonic-gate } 6407c478bd9Sstevel@tonic-gate 641ffadc26eSmike_s DPRINTF(IOMMU_TTE, ("pp mapping TTE index %p pfn %lx " 6428793b36bSNick Todd "tte flag %lx ioaddr %x\n", (void *)iotte_ptr, 6437c478bd9Sstevel@tonic-gate pfn, iotte_flag, ioaddr)); 6447c478bd9Sstevel@tonic-gate 6457c478bd9Sstevel@tonic-gate /* Flush the IOMMU TLB before loading a new mapping */ 6467c478bd9Sstevel@tonic-gate if (!diag_tlb_flush) 6477c478bd9Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 6487c478bd9Sstevel@tonic-gate 6497c478bd9Sstevel@tonic-gate /* Set the hardware IO TTE */ 6507c478bd9Sstevel@tonic-gate *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag; 6517c478bd9Sstevel@tonic-gate 6527c478bd9Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 6537c478bd9Sstevel@tonic-gate npages--; 6547c478bd9Sstevel@tonic-gate iotte_ptr++; 6557c478bd9Sstevel@tonic-gate 6567c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 6577c478bd9Sstevel@tonic-gate *pfnp = pfn; 6587c478bd9Sstevel@tonic-gate pfnp++; 6597c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 6607c478bd9Sstevel@tonic-gate 6617c478bd9Sstevel@tonic-gate } while (npages != 0); 6627c478bd9Sstevel@tonic-gate 6637c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 6647c478bd9Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 6657c478bd9Sstevel@tonic-gate iomemp->next = softsp->iomem; 6667c478bd9Sstevel@tonic-gate softsp->iomem = iomemp; 6677c478bd9Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 6687c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 6697c478bd9Sstevel@tonic-gate 6707c478bd9Sstevel@tonic-gate return (rval); 6717c478bd9Sstevel@tonic-gate } 6727c478bd9Sstevel@tonic-gate 6737c478bd9Sstevel@tonic-gate 6747c478bd9Sstevel@tonic-gate int 6757c478bd9Sstevel@tonic-gate iommu_dma_lim_setup(dev_info_t *dip, dev_info_t *rdip, 6767c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp, uint_t *burstsizep, uint_t burstsize64, 6777c478bd9Sstevel@tonic-gate uint_t *minxferp, uint_t dma_flags) 6787c478bd9Sstevel@tonic-gate { 6797c478bd9Sstevel@tonic-gate struct regspec *rp; 6807c478bd9Sstevel@tonic-gate 6817c478bd9Sstevel@tonic-gate /* Take care of 64 bit limits. */ 6827c478bd9Sstevel@tonic-gate if (!(dma_flags & DDI_DMA_SBUS_64BIT)) { 6837c478bd9Sstevel@tonic-gate /* 6847c478bd9Sstevel@tonic-gate * return burst size for 32-bit mode 6857c478bd9Sstevel@tonic-gate */ 6867c478bd9Sstevel@tonic-gate *burstsizep &= softsp->sbus_burst_sizes; 6877c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 6887c478bd9Sstevel@tonic-gate } 6897c478bd9Sstevel@tonic-gate 6907c478bd9Sstevel@tonic-gate /* 6917c478bd9Sstevel@tonic-gate * check if SBus supports 64 bit and if caller 6927c478bd9Sstevel@tonic-gate * is child of SBus. No support through bridges 6937c478bd9Sstevel@tonic-gate */ 6947c478bd9Sstevel@tonic-gate if (!softsp->sbus64_burst_sizes || (ddi_get_parent(rdip) != dip)) { 6957c478bd9Sstevel@tonic-gate /* 6967c478bd9Sstevel@tonic-gate * SBus doesn't support it or bridge. Do 32-bit 6977c478bd9Sstevel@tonic-gate * xfers 6987c478bd9Sstevel@tonic-gate */ 6997c478bd9Sstevel@tonic-gate *burstsizep &= softsp->sbus_burst_sizes; 7007c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 7017c478bd9Sstevel@tonic-gate } 7027c478bd9Sstevel@tonic-gate 7037c478bd9Sstevel@tonic-gate rp = ddi_rnumber_to_regspec(rdip, 0); 7047c478bd9Sstevel@tonic-gate if (rp == NULL) { 7057c478bd9Sstevel@tonic-gate *burstsizep &= softsp->sbus_burst_sizes; 7067c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 7077c478bd9Sstevel@tonic-gate } 7087c478bd9Sstevel@tonic-gate 7097c478bd9Sstevel@tonic-gate /* Check for old-style 64 bit burstsizes */ 7107c478bd9Sstevel@tonic-gate if (burstsize64 & SYSIO64_BURST_MASK) { 7117c478bd9Sstevel@tonic-gate /* Scale back burstsizes if Necessary */ 7127c478bd9Sstevel@tonic-gate *burstsizep &= (softsp->sbus64_burst_sizes | 7137c478bd9Sstevel@tonic-gate softsp->sbus_burst_sizes); 7147c478bd9Sstevel@tonic-gate } else { 7157c478bd9Sstevel@tonic-gate /* Get the 64 bit burstsizes. */ 7167c478bd9Sstevel@tonic-gate *burstsizep = burstsize64; 7177c478bd9Sstevel@tonic-gate 7187c478bd9Sstevel@tonic-gate /* Scale back burstsizes if Necessary */ 7197c478bd9Sstevel@tonic-gate *burstsizep &= (softsp->sbus64_burst_sizes >> 7207c478bd9Sstevel@tonic-gate SYSIO64_BURST_SHIFT); 7217c478bd9Sstevel@tonic-gate } 7227c478bd9Sstevel@tonic-gate 7237c478bd9Sstevel@tonic-gate /* 7247c478bd9Sstevel@tonic-gate * Set the largest value of the smallest burstsize that the 7257c478bd9Sstevel@tonic-gate * device or the bus can manage. 7267c478bd9Sstevel@tonic-gate */ 7277c478bd9Sstevel@tonic-gate *minxferp = MAX(*minxferp, 7287c478bd9Sstevel@tonic-gate (1 << (ddi_ffs(softsp->sbus64_burst_sizes) - 1))); 7297c478bd9Sstevel@tonic-gate 7307c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 7317c478bd9Sstevel@tonic-gate } 7327c478bd9Sstevel@tonic-gate 7337c478bd9Sstevel@tonic-gate 7347c478bd9Sstevel@tonic-gate int 7357c478bd9Sstevel@tonic-gate iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, 7367c478bd9Sstevel@tonic-gate ddi_dma_attr_t *dma_attr, int (*waitfp)(caddr_t), caddr_t arg, 7377c478bd9Sstevel@tonic-gate ddi_dma_handle_t *handlep) 7387c478bd9Sstevel@tonic-gate { 7397c478bd9Sstevel@tonic-gate ioaddr_t addrlow, addrhigh, segalign; 7407c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp; 7417c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv; 7427c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 7437c478bd9Sstevel@tonic-gate ddi_get_soft_state(sbusp, ddi_get_instance(dip)); 7447c478bd9Sstevel@tonic-gate 7457c478bd9Sstevel@tonic-gate /* 7467c478bd9Sstevel@tonic-gate * Setup dma burstsizes and min-xfer counts. 7477c478bd9Sstevel@tonic-gate */ 7487c478bd9Sstevel@tonic-gate (void) iommu_dma_lim_setup(dip, rdip, softsp, 7497c478bd9Sstevel@tonic-gate &dma_attr->dma_attr_burstsizes, 7507c478bd9Sstevel@tonic-gate dma_attr->dma_attr_burstsizes, &dma_attr->dma_attr_minxfer, 7517c478bd9Sstevel@tonic-gate dma_attr->dma_attr_flags); 7527c478bd9Sstevel@tonic-gate 7537c478bd9Sstevel@tonic-gate if (dma_attr->dma_attr_burstsizes == 0) 7547c478bd9Sstevel@tonic-gate return (DDI_DMA_BADATTR); 7557c478bd9Sstevel@tonic-gate 7567c478bd9Sstevel@tonic-gate addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo; 7577c478bd9Sstevel@tonic-gate addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi; 7587c478bd9Sstevel@tonic-gate segalign = (ioaddr_t)dma_attr->dma_attr_seg; 7597c478bd9Sstevel@tonic-gate 7607c478bd9Sstevel@tonic-gate /* 7617c478bd9Sstevel@tonic-gate * Check sanity for hi and lo address limits 7627c478bd9Sstevel@tonic-gate */ 7637c478bd9Sstevel@tonic-gate if ((addrhigh <= addrlow) || 7647c478bd9Sstevel@tonic-gate (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) { 7657c478bd9Sstevel@tonic-gate return (DDI_DMA_BADATTR); 7667c478bd9Sstevel@tonic-gate } 7677c478bd9Sstevel@tonic-gate if (dma_attr->dma_attr_flags & DDI_DMA_FORCE_PHYSICAL) 7687c478bd9Sstevel@tonic-gate return (DDI_DMA_BADATTR); 7697c478bd9Sstevel@tonic-gate 7707c478bd9Sstevel@tonic-gate mppriv = kmem_zalloc(sizeof (*mppriv), 7717c478bd9Sstevel@tonic-gate (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP); 7727c478bd9Sstevel@tonic-gate 7737c478bd9Sstevel@tonic-gate if (mppriv == NULL) { 7747c478bd9Sstevel@tonic-gate if (waitfp != DDI_DMA_DONTWAIT) { 7758793b36bSNick Todd ddi_set_callback(waitfp, arg, 7768793b36bSNick Todd &softsp->dvma_call_list_id); 7777c478bd9Sstevel@tonic-gate } 7787c478bd9Sstevel@tonic-gate return (DDI_DMA_NORESOURCES); 7797c478bd9Sstevel@tonic-gate } 7807c478bd9Sstevel@tonic-gate mp = (ddi_dma_impl_t *)mppriv; 7817c478bd9Sstevel@tonic-gate 782ffadc26eSmike_s DPRINTF(IOMMU_DMA_ALLOCHDL_DEBUG, ("dma_allochdl: (%s) handle %p " 7837c478bd9Sstevel@tonic-gate "hi %x lo %x min %x burst %x\n", 7848793b36bSNick Todd ddi_get_name(dip), (void *)mp, addrhigh, addrlow, 7857c478bd9Sstevel@tonic-gate dma_attr->dma_attr_minxfer, dma_attr->dma_attr_burstsizes)); 7867c478bd9Sstevel@tonic-gate 7877c478bd9Sstevel@tonic-gate mp->dmai_rdip = rdip; 7887c478bd9Sstevel@tonic-gate mp->dmai_minxfer = (uint_t)dma_attr->dma_attr_minxfer; 7897c478bd9Sstevel@tonic-gate mp->dmai_burstsizes = (uint_t)dma_attr->dma_attr_burstsizes; 7907c478bd9Sstevel@tonic-gate mp->dmai_attr = *dma_attr; 7917c478bd9Sstevel@tonic-gate /* See if the DMA engine has any limit restrictions. */ 7927c478bd9Sstevel@tonic-gate if (segalign == (ioaddr_t)UINT32_MAX && 7937c478bd9Sstevel@tonic-gate addrhigh == (ioaddr_t)UINT32_MAX && 7947c478bd9Sstevel@tonic-gate (dma_attr->dma_attr_align <= IOMMU_PAGESIZE) && addrlow == 0) { 7957c478bd9Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOLIMIT; 7967c478bd9Sstevel@tonic-gate } 7977c478bd9Sstevel@tonic-gate mppriv->softsp = softsp; 7987c478bd9Sstevel@tonic-gate mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag); 7997c478bd9Sstevel@tonic-gate 8007c478bd9Sstevel@tonic-gate *handlep = (ddi_dma_handle_t)mp; 8017c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 8027c478bd9Sstevel@tonic-gate } 8037c478bd9Sstevel@tonic-gate 8047c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 8057c478bd9Sstevel@tonic-gate int 8067c478bd9Sstevel@tonic-gate iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 8077c478bd9Sstevel@tonic-gate { 8087c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)handle; 8097c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 8107c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 8117c478bd9Sstevel@tonic-gate 8127c478bd9Sstevel@tonic-gate kmem_free(mppriv, sizeof (*mppriv)); 8137c478bd9Sstevel@tonic-gate 8147c478bd9Sstevel@tonic-gate if (softsp->dvma_call_list_id != 0) { 8157c478bd9Sstevel@tonic-gate ddi_run_callback(&softsp->dvma_call_list_id); 8167c478bd9Sstevel@tonic-gate } 8177c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 8187c478bd9Sstevel@tonic-gate } 8197c478bd9Sstevel@tonic-gate 8207c478bd9Sstevel@tonic-gate static int 8217c478bd9Sstevel@tonic-gate check_dma_attr(struct ddi_dma_req *dmareq, ddi_dma_attr_t *dma_attr, 8227c478bd9Sstevel@tonic-gate uint32_t *size) 8237c478bd9Sstevel@tonic-gate { 8247c478bd9Sstevel@tonic-gate ioaddr_t addrlow; 8257c478bd9Sstevel@tonic-gate ioaddr_t addrhigh; 8267c478bd9Sstevel@tonic-gate uint32_t segalign; 8277c478bd9Sstevel@tonic-gate uint32_t smask; 8287c478bd9Sstevel@tonic-gate 8297c478bd9Sstevel@tonic-gate smask = *size - 1; 8307c478bd9Sstevel@tonic-gate segalign = dma_attr->dma_attr_seg; 8317c478bd9Sstevel@tonic-gate if (smask > segalign) { 8327c478bd9Sstevel@tonic-gate if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0) 8337c478bd9Sstevel@tonic-gate return (DDI_DMA_TOOBIG); 8347c478bd9Sstevel@tonic-gate *size = segalign + 1; 8357c478bd9Sstevel@tonic-gate } 8367c478bd9Sstevel@tonic-gate addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo; 8377c478bd9Sstevel@tonic-gate addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi; 8387c478bd9Sstevel@tonic-gate if (addrlow + smask > addrhigh || addrlow + smask < addrlow) { 8397c478bd9Sstevel@tonic-gate if (!((addrlow + dmareq->dmar_object.dmao_size == 0) && 8407c478bd9Sstevel@tonic-gate (addrhigh == (ioaddr_t)-1))) { 8417c478bd9Sstevel@tonic-gate if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0) 8427c478bd9Sstevel@tonic-gate return (DDI_DMA_TOOBIG); 8437c478bd9Sstevel@tonic-gate *size = MIN(addrhigh - addrlow + 1, *size); 8447c478bd9Sstevel@tonic-gate } 8457c478bd9Sstevel@tonic-gate } 8467c478bd9Sstevel@tonic-gate return (DDI_DMA_MAPOK); 8477c478bd9Sstevel@tonic-gate } 8487c478bd9Sstevel@tonic-gate 8497c478bd9Sstevel@tonic-gate int 8507c478bd9Sstevel@tonic-gate iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 8517c478bd9Sstevel@tonic-gate ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 8527c478bd9Sstevel@tonic-gate ddi_dma_cookie_t *cp, uint_t *ccountp) 8537c478bd9Sstevel@tonic-gate { 8547c478bd9Sstevel@tonic-gate page_t *pp; 8557c478bd9Sstevel@tonic-gate uint32_t size; 8567c478bd9Sstevel@tonic-gate ioaddr_t ioaddr; 8577c478bd9Sstevel@tonic-gate uint_t offset; 8587c478bd9Sstevel@tonic-gate uintptr_t addr = 0; 8597c478bd9Sstevel@tonic-gate pgcnt_t npages; 8607c478bd9Sstevel@tonic-gate int rval; 8617c478bd9Sstevel@tonic-gate ddi_dma_attr_t *dma_attr; 8627c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp; 8637c478bd9Sstevel@tonic-gate struct page **pplist = NULL; 8647c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 8657c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 8667c478bd9Sstevel@tonic-gate 8677c478bd9Sstevel@tonic-gate #ifdef lint 8687c478bd9Sstevel@tonic-gate dip = dip; 8697c478bd9Sstevel@tonic-gate rdip = rdip; 8707c478bd9Sstevel@tonic-gate #endif 8717c478bd9Sstevel@tonic-gate 8727c478bd9Sstevel@tonic-gate if (mp->dmai_inuse) 8737c478bd9Sstevel@tonic-gate return (DDI_DMA_INUSE); 8747c478bd9Sstevel@tonic-gate 8757c478bd9Sstevel@tonic-gate dma_attr = &mp->dmai_attr; 8767c478bd9Sstevel@tonic-gate size = (uint32_t)dmareq->dmar_object.dmao_size; 8777c478bd9Sstevel@tonic-gate if (!(mp->dmai_rflags & DMP_NOLIMIT)) { 8787c478bd9Sstevel@tonic-gate rval = check_dma_attr(dmareq, dma_attr, &size); 8797c478bd9Sstevel@tonic-gate if (rval != DDI_DMA_MAPOK) 8807c478bd9Sstevel@tonic-gate return (rval); 8817c478bd9Sstevel@tonic-gate } 8827c478bd9Sstevel@tonic-gate mp->dmai_inuse = 1; 8837c478bd9Sstevel@tonic-gate mp->dmai_offset = 0; 8847c478bd9Sstevel@tonic-gate mp->dmai_rflags = (dmareq->dmar_flags & DMP_DDIFLAGS) | 8857c478bd9Sstevel@tonic-gate (mp->dmai_rflags & DMP_NOLIMIT); 8867c478bd9Sstevel@tonic-gate 8877c478bd9Sstevel@tonic-gate switch (dmareq->dmar_object.dmao_type) { 8887c478bd9Sstevel@tonic-gate case DMA_OTYP_VADDR: 8897c478bd9Sstevel@tonic-gate case DMA_OTYP_BUFVADDR: 8907c478bd9Sstevel@tonic-gate addr = (uintptr_t)dmareq->dmar_object.dmao_obj.virt_obj.v_addr; 8917c478bd9Sstevel@tonic-gate offset = addr & IOMMU_PAGEOFFSET; 8927c478bd9Sstevel@tonic-gate pplist = dmareq->dmar_object.dmao_obj.virt_obj.v_priv; 8937c478bd9Sstevel@tonic-gate npages = iommu_btopr(OBJSIZE + offset); 8947c478bd9Sstevel@tonic-gate 895ffadc26eSmike_s DPRINTF(IOMMU_DMAMAP_DEBUG, ("dma_map vaddr: %lx pages " 8967c478bd9Sstevel@tonic-gate "req addr %lx off %x OBJSIZE %x\n", 8977c478bd9Sstevel@tonic-gate npages, addr, offset, OBJSIZE)); 8987c478bd9Sstevel@tonic-gate 8997c478bd9Sstevel@tonic-gate /* We don't need the addr anymore if we have a shadow list */ 9007c478bd9Sstevel@tonic-gate if (pplist != NULL) 9017c478bd9Sstevel@tonic-gate addr = NULL; 9027c478bd9Sstevel@tonic-gate pp = NULL; 9037c478bd9Sstevel@tonic-gate break; 9047c478bd9Sstevel@tonic-gate 9057c478bd9Sstevel@tonic-gate case DMA_OTYP_PAGES: 9067c478bd9Sstevel@tonic-gate pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp; 9077c478bd9Sstevel@tonic-gate offset = dmareq->dmar_object.dmao_obj.pp_obj.pp_offset; 9087c478bd9Sstevel@tonic-gate npages = iommu_btopr(OBJSIZE + offset); 9097c478bd9Sstevel@tonic-gate break; 9107c478bd9Sstevel@tonic-gate 9117c478bd9Sstevel@tonic-gate case DMA_OTYP_PADDR: 9127c478bd9Sstevel@tonic-gate default: 9137c478bd9Sstevel@tonic-gate /* 9147c478bd9Sstevel@tonic-gate * Not a supported type for this implementation 9157c478bd9Sstevel@tonic-gate */ 9167c478bd9Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 9177c478bd9Sstevel@tonic-gate goto bad; 9187c478bd9Sstevel@tonic-gate } 9197c478bd9Sstevel@tonic-gate 9207c478bd9Sstevel@tonic-gate /* Get our soft state once we know we're mapping an object. */ 9217c478bd9Sstevel@tonic-gate softsp = mppriv->softsp; 9227c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 9237c478bd9Sstevel@tonic-gate 9247c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_PARTIAL) { 9257c478bd9Sstevel@tonic-gate if (size != OBJSIZE) { 9267c478bd9Sstevel@tonic-gate /* 9277c478bd9Sstevel@tonic-gate * If the request is for partial mapping arrangement, 9287c478bd9Sstevel@tonic-gate * the device has to be able to address at least the 9297c478bd9Sstevel@tonic-gate * size of the window we are establishing. 9307c478bd9Sstevel@tonic-gate */ 9317c478bd9Sstevel@tonic-gate if (size < iommu_ptob(MIN_DVMA_WIN_SIZE)) { 9327c478bd9Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 9337c478bd9Sstevel@tonic-gate goto bad; 9347c478bd9Sstevel@tonic-gate } 9357c478bd9Sstevel@tonic-gate npages = iommu_btopr(size + offset); 9367c478bd9Sstevel@tonic-gate } 9377c478bd9Sstevel@tonic-gate /* 9387c478bd9Sstevel@tonic-gate * If the size requested is less than a moderate amt, 9397c478bd9Sstevel@tonic-gate * skip the partial mapping stuff- it's not worth the 9407c478bd9Sstevel@tonic-gate * effort. 9417c478bd9Sstevel@tonic-gate */ 9427c478bd9Sstevel@tonic-gate if (npages > MIN_DVMA_WIN_SIZE) { 9437c478bd9Sstevel@tonic-gate npages = MIN_DVMA_WIN_SIZE + iommu_btopr(offset); 9447c478bd9Sstevel@tonic-gate size = iommu_ptob(MIN_DVMA_WIN_SIZE); 9457c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMA_SETUP_DEBUG, ("dma_setup: SZ %x pg " 946ffadc26eSmike_s "%lx sz %x\n", OBJSIZE, npages, size)); 9477c478bd9Sstevel@tonic-gate if (pplist != NULL) { 9487c478bd9Sstevel@tonic-gate mp->dmai_minfo = (void *)pplist; 9497c478bd9Sstevel@tonic-gate mp->dmai_rflags |= DMP_SHADOW; 9507c478bd9Sstevel@tonic-gate } 9517c478bd9Sstevel@tonic-gate } else { 9527c478bd9Sstevel@tonic-gate mp->dmai_rflags ^= DDI_DMA_PARTIAL; 9537c478bd9Sstevel@tonic-gate } 9547c478bd9Sstevel@tonic-gate } else { 9557c478bd9Sstevel@tonic-gate if (npages >= iommu_btop(softsp->iommu_dvma_size) - 9567c478bd9Sstevel@tonic-gate MIN_DVMA_WIN_SIZE) { 9577c478bd9Sstevel@tonic-gate rval = DDI_DMA_TOOBIG; 9587c478bd9Sstevel@tonic-gate goto bad; 9597c478bd9Sstevel@tonic-gate } 9607c478bd9Sstevel@tonic-gate } 9617c478bd9Sstevel@tonic-gate 9627c478bd9Sstevel@tonic-gate /* 9637c478bd9Sstevel@tonic-gate * save dmareq-object, size and npages into mp 9647c478bd9Sstevel@tonic-gate */ 9657c478bd9Sstevel@tonic-gate mp->dmai_object = dmareq->dmar_object; 9667c478bd9Sstevel@tonic-gate mp->dmai_size = size; 9677c478bd9Sstevel@tonic-gate mp->dmai_ndvmapages = npages; 9687c478bd9Sstevel@tonic-gate 9697c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) { 970ffadc26eSmike_s ioaddr = (ioaddr_t)(uintptr_t)vmem_alloc(softsp->dvma_arena, 9717c478bd9Sstevel@tonic-gate iommu_ptob(npages), 9727c478bd9Sstevel@tonic-gate dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP); 9737c478bd9Sstevel@tonic-gate if (ioaddr == 0) { 9747c478bd9Sstevel@tonic-gate rval = DDI_DMA_NORESOURCES; 9757c478bd9Sstevel@tonic-gate goto bad; 9767c478bd9Sstevel@tonic-gate } 9777c478bd9Sstevel@tonic-gate 9787c478bd9Sstevel@tonic-gate /* 9797c478bd9Sstevel@tonic-gate * If we have a 1 page request and we're working with a page 9807c478bd9Sstevel@tonic-gate * list, we're going to speed load an IOMMU entry. 9817c478bd9Sstevel@tonic-gate */ 9827c478bd9Sstevel@tonic-gate if (npages == 1 && !addr) { 9837c478bd9Sstevel@tonic-gate uint64_t iotte_flag = IOTTE_VALID | IOTTE_CACHE | 9847c478bd9Sstevel@tonic-gate IOTTE_WRITE | IOTTE_STREAM; 9857c478bd9Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 9867c478bd9Sstevel@tonic-gate pfn_t pfn; 9877c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 9887c478bd9Sstevel@tonic-gate struct io_mem_list *iomemp; 9897c478bd9Sstevel@tonic-gate pfn_t *pfnp; 9907c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 9917c478bd9Sstevel@tonic-gate 9927c478bd9Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, 9937c478bd9Sstevel@tonic-gate softsp->soft_tsb_base_addr); 9947c478bd9Sstevel@tonic-gate 9957c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_CONSISTENT) { 9967c478bd9Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOSYNC; 9977c478bd9Sstevel@tonic-gate iotte_flag ^= IOTTE_STREAM; 9987c478bd9Sstevel@tonic-gate } else if (softsp->stream_buf_off) 9997c478bd9Sstevel@tonic-gate iotte_flag ^= IOTTE_STREAM; 10007c478bd9Sstevel@tonic-gate 10017c478bd9Sstevel@tonic-gate mp->dmai_rflags ^= DDI_DMA_PARTIAL; 10027c478bd9Sstevel@tonic-gate 10037c478bd9Sstevel@tonic-gate if (pp != NULL) 10047c478bd9Sstevel@tonic-gate pfn = pp->p_pagenum; 10057c478bd9Sstevel@tonic-gate else 10067c478bd9Sstevel@tonic-gate pfn = (*pplist)->p_pagenum; 10077c478bd9Sstevel@tonic-gate 10087c478bd9Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 10097c478bd9Sstevel@tonic-gate 10107c478bd9Sstevel@tonic-gate *iotte_ptr = 10117c478bd9Sstevel@tonic-gate ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag; 10127c478bd9Sstevel@tonic-gate 10137c478bd9Sstevel@tonic-gate mp->dmai_mapping = (ioaddr_t)(ioaddr + offset); 10147c478bd9Sstevel@tonic-gate mp->dmai_nwin = 0; 10157c478bd9Sstevel@tonic-gate if (cp != NULL) { 10167c478bd9Sstevel@tonic-gate cp->dmac_notused = 0; 10177c478bd9Sstevel@tonic-gate cp->dmac_address = (ioaddr_t)mp->dmai_mapping; 10187c478bd9Sstevel@tonic-gate cp->dmac_size = mp->dmai_size; 10197c478bd9Sstevel@tonic-gate cp->dmac_type = 0; 10207c478bd9Sstevel@tonic-gate *ccountp = 1; 10217c478bd9Sstevel@tonic-gate } 10227c478bd9Sstevel@tonic-gate 1023ffadc26eSmike_s DPRINTF(IOMMU_TTE, ("speed loading: TTE index %p " 1024ffadc26eSmike_s "pfn %lx tte flag %lx addr %lx ioaddr %x\n", 10258793b36bSNick Todd (void *)iotte_ptr, pfn, iotte_flag, addr, ioaddr)); 10267c478bd9Sstevel@tonic-gate 10277c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 10287c478bd9Sstevel@tonic-gate iomemp = kmem_alloc(sizeof (struct io_mem_list), 10297c478bd9Sstevel@tonic-gate KM_SLEEP); 10307c478bd9Sstevel@tonic-gate iomemp->rdip = mp->dmai_rdip; 10317c478bd9Sstevel@tonic-gate iomemp->ioaddr = ioaddr; 10327c478bd9Sstevel@tonic-gate iomemp->addr = addr; 10337c478bd9Sstevel@tonic-gate iomemp->npages = npages; 10347c478bd9Sstevel@tonic-gate pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * 10357c478bd9Sstevel@tonic-gate (npages + 1), KM_SLEEP); 10367c478bd9Sstevel@tonic-gate *pfnp = pfn; 10377c478bd9Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 10387c478bd9Sstevel@tonic-gate iomemp->next = softsp->iomem; 10397c478bd9Sstevel@tonic-gate softsp->iomem = iomemp; 10407c478bd9Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 10417c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 10427c478bd9Sstevel@tonic-gate 10437c478bd9Sstevel@tonic-gate return (DDI_DMA_MAPPED); 10447c478bd9Sstevel@tonic-gate } 10457c478bd9Sstevel@tonic-gate } else { 1046ffadc26eSmike_s ioaddr = (ioaddr_t)(uintptr_t)vmem_xalloc(softsp->dvma_arena, 10477c478bd9Sstevel@tonic-gate iommu_ptob(npages), 10487c478bd9Sstevel@tonic-gate MAX((uint_t)dma_attr->dma_attr_align, IOMMU_PAGESIZE), 0, 10497c478bd9Sstevel@tonic-gate (uint_t)dma_attr->dma_attr_seg + 1, 1050ffadc26eSmike_s (void *)(uintptr_t)(ioaddr_t)dma_attr->dma_attr_addr_lo, 1051ffadc26eSmike_s (void *)(uintptr_t) 1052ffadc26eSmike_s ((ioaddr_t)dma_attr->dma_attr_addr_hi + 1), 10537c478bd9Sstevel@tonic-gate dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP); 10547c478bd9Sstevel@tonic-gate } 10557c478bd9Sstevel@tonic-gate 10567c478bd9Sstevel@tonic-gate if (ioaddr == 0) { 10577c478bd9Sstevel@tonic-gate if (dmareq->dmar_fp == DDI_DMA_SLEEP) 10587c478bd9Sstevel@tonic-gate rval = DDI_DMA_NOMAPPING; 10597c478bd9Sstevel@tonic-gate else 10607c478bd9Sstevel@tonic-gate rval = DDI_DMA_NORESOURCES; 10617c478bd9Sstevel@tonic-gate goto bad; 10627c478bd9Sstevel@tonic-gate } 10637c478bd9Sstevel@tonic-gate 10647c478bd9Sstevel@tonic-gate mp->dmai_mapping = ioaddr + offset; 10657c478bd9Sstevel@tonic-gate ASSERT(mp->dmai_mapping >= softsp->iommu_dvma_base); 10667c478bd9Sstevel@tonic-gate 10677c478bd9Sstevel@tonic-gate /* 10687c478bd9Sstevel@tonic-gate * At this point we have a range of virtual address allocated 10697c478bd9Sstevel@tonic-gate * with which we now have to map to the requested object. 10707c478bd9Sstevel@tonic-gate */ 10717c478bd9Sstevel@tonic-gate if (addr) { 10727c478bd9Sstevel@tonic-gate rval = iommu_create_vaddr_mappings(mp, 10737c478bd9Sstevel@tonic-gate addr & ~IOMMU_PAGEOFFSET); 10747c478bd9Sstevel@tonic-gate if (rval == DDI_DMA_NOMAPPING) 10757c478bd9Sstevel@tonic-gate goto bad_nomap; 10767c478bd9Sstevel@tonic-gate } else { 10777c478bd9Sstevel@tonic-gate rval = iommu_create_pp_mappings(mp, pp, pplist); 10787c478bd9Sstevel@tonic-gate if (rval == DDI_DMA_NOMAPPING) 10797c478bd9Sstevel@tonic-gate goto bad_nomap; 10807c478bd9Sstevel@tonic-gate } 10817c478bd9Sstevel@tonic-gate 10827c478bd9Sstevel@tonic-gate if (cp) { 10837c478bd9Sstevel@tonic-gate cp->dmac_notused = 0; 10847c478bd9Sstevel@tonic-gate cp->dmac_address = (ioaddr_t)mp->dmai_mapping; 10857c478bd9Sstevel@tonic-gate cp->dmac_size = mp->dmai_size; 10867c478bd9Sstevel@tonic-gate cp->dmac_type = 0; 10877c478bd9Sstevel@tonic-gate *ccountp = 1; 10887c478bd9Sstevel@tonic-gate } 10897c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_PARTIAL) { 10907c478bd9Sstevel@tonic-gate size = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset)); 10917c478bd9Sstevel@tonic-gate mp->dmai_nwin = 10927c478bd9Sstevel@tonic-gate (dmareq->dmar_object.dmao_size + (size - 1)) / size; 10937c478bd9Sstevel@tonic-gate return (DDI_DMA_PARTIAL_MAP); 10947c478bd9Sstevel@tonic-gate } else { 10957c478bd9Sstevel@tonic-gate mp->dmai_nwin = 0; 10967c478bd9Sstevel@tonic-gate return (DDI_DMA_MAPPED); 10977c478bd9Sstevel@tonic-gate } 10987c478bd9Sstevel@tonic-gate 10997c478bd9Sstevel@tonic-gate bad_nomap: 11007c478bd9Sstevel@tonic-gate /* 11017c478bd9Sstevel@tonic-gate * Could not create mmu mappings. 11027c478bd9Sstevel@tonic-gate */ 11037c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) { 1104ffadc26eSmike_s vmem_free(softsp->dvma_arena, (void *)(uintptr_t)ioaddr, 11057c478bd9Sstevel@tonic-gate iommu_ptob(npages)); 11067c478bd9Sstevel@tonic-gate } else { 1107ffadc26eSmike_s vmem_xfree(softsp->dvma_arena, (void *)(uintptr_t)ioaddr, 11087c478bd9Sstevel@tonic-gate iommu_ptob(npages)); 11097c478bd9Sstevel@tonic-gate } 11107c478bd9Sstevel@tonic-gate 11117c478bd9Sstevel@tonic-gate bad: 11127c478bd9Sstevel@tonic-gate if (rval == DDI_DMA_NORESOURCES && 11137c478bd9Sstevel@tonic-gate dmareq->dmar_fp != DDI_DMA_DONTWAIT) { 11147c478bd9Sstevel@tonic-gate ddi_set_callback(dmareq->dmar_fp, 11157c478bd9Sstevel@tonic-gate dmareq->dmar_arg, &softsp->dvma_call_list_id); 11167c478bd9Sstevel@tonic-gate } 11177c478bd9Sstevel@tonic-gate mp->dmai_inuse = 0; 11187c478bd9Sstevel@tonic-gate return (rval); 11197c478bd9Sstevel@tonic-gate } 11207c478bd9Sstevel@tonic-gate 11217c478bd9Sstevel@tonic-gate /* ARGSUSED */ 11227c478bd9Sstevel@tonic-gate int 11237c478bd9Sstevel@tonic-gate iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 11247c478bd9Sstevel@tonic-gate ddi_dma_handle_t handle) 11257c478bd9Sstevel@tonic-gate { 11267c478bd9Sstevel@tonic-gate ioaddr_t addr; 11277c478bd9Sstevel@tonic-gate uint_t npages; 11287c478bd9Sstevel@tonic-gate size_t size; 11297c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 11307c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 11317c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = mppriv->softsp; 11327c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 11337c478bd9Sstevel@tonic-gate 11347c478bd9Sstevel@tonic-gate addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET); 11357c478bd9Sstevel@tonic-gate npages = mp->dmai_ndvmapages; 11367c478bd9Sstevel@tonic-gate size = iommu_ptob(npages); 11377c478bd9Sstevel@tonic-gate 11387c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMA_UNBINDHDL_DEBUG, ("iommu_dma_unbindhdl: " 11397c478bd9Sstevel@tonic-gate "unbinding addr %x for %x pages\n", addr, mp->dmai_ndvmapages)); 11407c478bd9Sstevel@tonic-gate 11417c478bd9Sstevel@tonic-gate /* sync the entire object */ 11427c478bd9Sstevel@tonic-gate if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) { 11437c478bd9Sstevel@tonic-gate /* flush stream write buffers */ 11447c478bd9Sstevel@tonic-gate sync_stream_buf(softsp, addr, npages, (int *)&mppriv->sync_flag, 11457c478bd9Sstevel@tonic-gate mppriv->phys_sync_flag); 11467c478bd9Sstevel@tonic-gate } 11477c478bd9Sstevel@tonic-gate 11487c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 11497c478bd9Sstevel@tonic-gate /* 11507c478bd9Sstevel@tonic-gate * 'Free' the dma mappings. 11517c478bd9Sstevel@tonic-gate */ 11527c478bd9Sstevel@tonic-gate iommu_remove_mappings(mp); 11537c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 11547c478bd9Sstevel@tonic-gate 11557c478bd9Sstevel@tonic-gate ASSERT(npages > (uint_t)0); 11567c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) 1157ffadc26eSmike_s vmem_free(softsp->dvma_arena, (void *)(uintptr_t)addr, size); 11587c478bd9Sstevel@tonic-gate else 1159ffadc26eSmike_s vmem_xfree(softsp->dvma_arena, (void *)(uintptr_t)addr, size); 11607c478bd9Sstevel@tonic-gate 11617c478bd9Sstevel@tonic-gate mp->dmai_ndvmapages = 0; 11627c478bd9Sstevel@tonic-gate mp->dmai_inuse = 0; 11637c478bd9Sstevel@tonic-gate mp->dmai_minfo = NULL; 11647c478bd9Sstevel@tonic-gate 11657c478bd9Sstevel@tonic-gate if (softsp->dvma_call_list_id != 0) 11667c478bd9Sstevel@tonic-gate ddi_run_callback(&softsp->dvma_call_list_id); 11677c478bd9Sstevel@tonic-gate 11687c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 11697c478bd9Sstevel@tonic-gate } 11707c478bd9Sstevel@tonic-gate 11717c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 11727c478bd9Sstevel@tonic-gate int 11737c478bd9Sstevel@tonic-gate iommu_dma_flush(dev_info_t *dip, dev_info_t *rdip, 11747c478bd9Sstevel@tonic-gate ddi_dma_handle_t handle, off_t off, size_t len, 11757c478bd9Sstevel@tonic-gate uint_t cache_flags) 11767c478bd9Sstevel@tonic-gate { 11777c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 11787c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 11797c478bd9Sstevel@tonic-gate 11807c478bd9Sstevel@tonic-gate if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) { 11817c478bd9Sstevel@tonic-gate sync_stream_buf(mppriv->softsp, mp->dmai_mapping, 11827c478bd9Sstevel@tonic-gate mp->dmai_ndvmapages, (int *)&mppriv->sync_flag, 11837c478bd9Sstevel@tonic-gate mppriv->phys_sync_flag); 11847c478bd9Sstevel@tonic-gate } 11857c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 11867c478bd9Sstevel@tonic-gate } 11877c478bd9Sstevel@tonic-gate 11887c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 11897c478bd9Sstevel@tonic-gate int 11907c478bd9Sstevel@tonic-gate iommu_dma_win(dev_info_t *dip, dev_info_t *rdip, 11917c478bd9Sstevel@tonic-gate ddi_dma_handle_t handle, uint_t win, off_t *offp, 11927c478bd9Sstevel@tonic-gate size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 11937c478bd9Sstevel@tonic-gate { 11947c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 11957c478bd9Sstevel@tonic-gate off_t offset; 11967c478bd9Sstevel@tonic-gate uint_t winsize; 11977c478bd9Sstevel@tonic-gate uint_t newoff; 11987c478bd9Sstevel@tonic-gate int rval; 11997c478bd9Sstevel@tonic-gate 12007c478bd9Sstevel@tonic-gate offset = mp->dmai_mapping & IOMMU_PAGEOFFSET; 12017c478bd9Sstevel@tonic-gate winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset)); 12027c478bd9Sstevel@tonic-gate 12037c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMA_WIN_DEBUG, ("getwin win %d winsize %x\n", win, 12047c478bd9Sstevel@tonic-gate winsize)); 12057c478bd9Sstevel@tonic-gate 12067c478bd9Sstevel@tonic-gate /* 12077c478bd9Sstevel@tonic-gate * win is in the range [0 .. dmai_nwin-1] 12087c478bd9Sstevel@tonic-gate */ 12097c478bd9Sstevel@tonic-gate if (win >= mp->dmai_nwin) 12107c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 12117c478bd9Sstevel@tonic-gate 12127c478bd9Sstevel@tonic-gate newoff = win * winsize; 12137c478bd9Sstevel@tonic-gate if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer) 12147c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 12157c478bd9Sstevel@tonic-gate 12167c478bd9Sstevel@tonic-gate ASSERT(cookiep); 12177c478bd9Sstevel@tonic-gate cookiep->dmac_notused = 0; 12187c478bd9Sstevel@tonic-gate cookiep->dmac_type = 0; 12197c478bd9Sstevel@tonic-gate cookiep->dmac_address = (ioaddr_t)mp->dmai_mapping; 12207c478bd9Sstevel@tonic-gate cookiep->dmac_size = mp->dmai_size; 12217c478bd9Sstevel@tonic-gate *ccountp = 1; 12227c478bd9Sstevel@tonic-gate *offp = (off_t)newoff; 12237c478bd9Sstevel@tonic-gate *lenp = (uint_t)winsize; 12247c478bd9Sstevel@tonic-gate 12257c478bd9Sstevel@tonic-gate if (newoff == mp->dmai_offset) { 12267c478bd9Sstevel@tonic-gate /* 12277c478bd9Sstevel@tonic-gate * Nothing to do... 12287c478bd9Sstevel@tonic-gate */ 12297c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 12307c478bd9Sstevel@tonic-gate } 12317c478bd9Sstevel@tonic-gate 12327c478bd9Sstevel@tonic-gate if ((rval = iommu_map_window(mp, newoff, winsize)) != DDI_SUCCESS) 12337c478bd9Sstevel@tonic-gate return (rval); 12347c478bd9Sstevel@tonic-gate 12357c478bd9Sstevel@tonic-gate /* 12367c478bd9Sstevel@tonic-gate * Set this again in case iommu_map_window() has changed it 12377c478bd9Sstevel@tonic-gate */ 12387c478bd9Sstevel@tonic-gate cookiep->dmac_size = mp->dmai_size; 12397c478bd9Sstevel@tonic-gate 12407c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 12417c478bd9Sstevel@tonic-gate } 12427c478bd9Sstevel@tonic-gate 12437c478bd9Sstevel@tonic-gate static int 12447c478bd9Sstevel@tonic-gate iommu_map_window(ddi_dma_impl_t *mp, off_t newoff, size_t winsize) 12457c478bd9Sstevel@tonic-gate { 12467c478bd9Sstevel@tonic-gate uintptr_t addr = 0; 12477c478bd9Sstevel@tonic-gate page_t *pp; 12487c478bd9Sstevel@tonic-gate uint_t flags; 12497c478bd9Sstevel@tonic-gate struct page **pplist = NULL; 12507c478bd9Sstevel@tonic-gate 12517c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG) 12527c478bd9Sstevel@tonic-gate /* Free mappings for current window */ 12537c478bd9Sstevel@tonic-gate iommu_remove_mappings(mp); 12547c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */ 12557c478bd9Sstevel@tonic-gate 12567c478bd9Sstevel@tonic-gate mp->dmai_offset = newoff; 12577c478bd9Sstevel@tonic-gate mp->dmai_size = mp->dmai_object.dmao_size - newoff; 12587c478bd9Sstevel@tonic-gate mp->dmai_size = MIN(mp->dmai_size, winsize); 12597c478bd9Sstevel@tonic-gate 12607c478bd9Sstevel@tonic-gate if (mp->dmai_object.dmao_type == DMA_OTYP_VADDR || 12617c478bd9Sstevel@tonic-gate mp->dmai_object.dmao_type == DMA_OTYP_BUFVADDR) { 12627c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DMP_SHADOW) { 12637c478bd9Sstevel@tonic-gate pplist = (struct page **)mp->dmai_minfo; 12647c478bd9Sstevel@tonic-gate ASSERT(pplist != NULL); 12657c478bd9Sstevel@tonic-gate pplist = pplist + (newoff >> MMU_PAGESHIFT); 12667c478bd9Sstevel@tonic-gate } else { 12677c478bd9Sstevel@tonic-gate addr = (uintptr_t) 12687c478bd9Sstevel@tonic-gate mp->dmai_object.dmao_obj.virt_obj.v_addr; 12697c478bd9Sstevel@tonic-gate addr = (addr + newoff) & ~IOMMU_PAGEOFFSET; 12707c478bd9Sstevel@tonic-gate } 12717c478bd9Sstevel@tonic-gate pp = NULL; 12727c478bd9Sstevel@tonic-gate } else { 12737c478bd9Sstevel@tonic-gate pp = mp->dmai_object.dmao_obj.pp_obj.pp_pp; 12747c478bd9Sstevel@tonic-gate flags = 0; 12757c478bd9Sstevel@tonic-gate while (flags < newoff) { 12767c478bd9Sstevel@tonic-gate pp = pp->p_next; 12777c478bd9Sstevel@tonic-gate flags += MMU_PAGESIZE; 12787c478bd9Sstevel@tonic-gate } 12797c478bd9Sstevel@tonic-gate } 12807c478bd9Sstevel@tonic-gate 12817c478bd9Sstevel@tonic-gate /* Set up mappings for next window */ 12827c478bd9Sstevel@tonic-gate if (addr) { 12837c478bd9Sstevel@tonic-gate if (iommu_create_vaddr_mappings(mp, addr) < 0) 12847c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 12857c478bd9Sstevel@tonic-gate } else { 12867c478bd9Sstevel@tonic-gate if (iommu_create_pp_mappings(mp, pp, pplist) < 0) 12877c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 12887c478bd9Sstevel@tonic-gate } 12897c478bd9Sstevel@tonic-gate 12907c478bd9Sstevel@tonic-gate /* 12917c478bd9Sstevel@tonic-gate * also invalidate read stream buffer 12927c478bd9Sstevel@tonic-gate */ 12937c478bd9Sstevel@tonic-gate if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) { 12947c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 12957c478bd9Sstevel@tonic-gate 12967c478bd9Sstevel@tonic-gate sync_stream_buf(mppriv->softsp, mp->dmai_mapping, 12977c478bd9Sstevel@tonic-gate mp->dmai_ndvmapages, (int *)&mppriv->sync_flag, 12987c478bd9Sstevel@tonic-gate mppriv->phys_sync_flag); 12997c478bd9Sstevel@tonic-gate } 13007c478bd9Sstevel@tonic-gate 13017c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 13027c478bd9Sstevel@tonic-gate 13037c478bd9Sstevel@tonic-gate } 13047c478bd9Sstevel@tonic-gate 13057c478bd9Sstevel@tonic-gate 13067c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 13077c478bd9Sstevel@tonic-gate int 13087c478bd9Sstevel@tonic-gate iommu_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 13097c478bd9Sstevel@tonic-gate ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 13107c478bd9Sstevel@tonic-gate off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags) 13117c478bd9Sstevel@tonic-gate { 13127c478bd9Sstevel@tonic-gate pgcnt_t npages; 13137c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 13147c478bd9Sstevel@tonic-gate 13158793b36bSNick Todd DPRINTF(IOMMU_DMAMCTL_DEBUG, ("dma_mctl: handle %p ", (void *)mp)); 13167c478bd9Sstevel@tonic-gate switch (request) { 13177c478bd9Sstevel@tonic-gate 13187c478bd9Sstevel@tonic-gate case DDI_DMA_SET_SBUS64: 13197c478bd9Sstevel@tonic-gate { 13207c478bd9Sstevel@tonic-gate struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp; 13217c478bd9Sstevel@tonic-gate 13227c478bd9Sstevel@tonic-gate return (iommu_dma_lim_setup(dip, rdip, mppriv->softsp, 13237c478bd9Sstevel@tonic-gate &mp->dmai_burstsizes, (uint_t)*lenp, &mp->dmai_minxfer, 13247c478bd9Sstevel@tonic-gate DDI_DMA_SBUS_64BIT)); 13257c478bd9Sstevel@tonic-gate } 13267c478bd9Sstevel@tonic-gate 13277c478bd9Sstevel@tonic-gate case DDI_DMA_RESERVE: 13287c478bd9Sstevel@tonic-gate { 13297c478bd9Sstevel@tonic-gate struct ddi_dma_req *dmareq = (struct ddi_dma_req *)offp; 13307c478bd9Sstevel@tonic-gate ddi_dma_lim_t *dma_lim; 13317c478bd9Sstevel@tonic-gate ddi_dma_handle_t *handlep; 13327c478bd9Sstevel@tonic-gate uint_t np; 13337c478bd9Sstevel@tonic-gate ioaddr_t ioaddr; 13347c478bd9Sstevel@tonic-gate int i; 13357c478bd9Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma; 13367c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = 13377c478bd9Sstevel@tonic-gate (struct sbus_soft_state *)ddi_get_soft_state(sbusp, 13387c478bd9Sstevel@tonic-gate ddi_get_instance(dip)); 13397c478bd9Sstevel@tonic-gate 13407c478bd9Sstevel@tonic-gate /* Some simple sanity checks */ 13417c478bd9Sstevel@tonic-gate dma_lim = dmareq->dmar_limits; 13427c478bd9Sstevel@tonic-gate if (dma_lim->dlim_burstsizes == 0) { 13437c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 13447c478bd9Sstevel@tonic-gate ("Reserve: bad burstsizes\n")); 13457c478bd9Sstevel@tonic-gate return (DDI_DMA_BADLIMITS); 13467c478bd9Sstevel@tonic-gate } 13477c478bd9Sstevel@tonic-gate if ((AHI <= ALO) || (AHI < softsp->iommu_dvma_base)) { 13487c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 13497c478bd9Sstevel@tonic-gate ("Reserve: bad limits\n")); 13507c478bd9Sstevel@tonic-gate return (DDI_DMA_BADLIMITS); 13517c478bd9Sstevel@tonic-gate } 13527c478bd9Sstevel@tonic-gate 13537c478bd9Sstevel@tonic-gate np = dmareq->dmar_object.dmao_size; 13547c478bd9Sstevel@tonic-gate mutex_enter(&softsp->dma_pool_lock); 13557c478bd9Sstevel@tonic-gate if (np > softsp->dma_reserve) { 13567c478bd9Sstevel@tonic-gate mutex_exit(&softsp->dma_pool_lock); 13577c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 13587c478bd9Sstevel@tonic-gate ("Reserve: dma_reserve is exhausted\n")); 13597c478bd9Sstevel@tonic-gate return (DDI_DMA_NORESOURCES); 13607c478bd9Sstevel@tonic-gate } 13617c478bd9Sstevel@tonic-gate 13627c478bd9Sstevel@tonic-gate softsp->dma_reserve -= np; 13637c478bd9Sstevel@tonic-gate mutex_exit(&softsp->dma_pool_lock); 13647c478bd9Sstevel@tonic-gate mp = kmem_zalloc(sizeof (*mp), KM_SLEEP); 13657c478bd9Sstevel@tonic-gate mp->dmai_rflags = DMP_BYPASSNEXUS; 13667c478bd9Sstevel@tonic-gate mp->dmai_rdip = rdip; 13677c478bd9Sstevel@tonic-gate mp->dmai_minxfer = dma_lim->dlim_minxfer; 13687c478bd9Sstevel@tonic-gate mp->dmai_burstsizes = dma_lim->dlim_burstsizes; 13697c478bd9Sstevel@tonic-gate 1370ffadc26eSmike_s ioaddr = (ioaddr_t)(uintptr_t)vmem_xalloc(softsp->dvma_arena, 13717c478bd9Sstevel@tonic-gate iommu_ptob(np), IOMMU_PAGESIZE, 0, 1372ffadc26eSmike_s dma_lim->dlim_cntr_max + 1, 1373ffadc26eSmike_s (void *)(uintptr_t)ALO, (void *)(uintptr_t)(AHI + 1), 13747c478bd9Sstevel@tonic-gate dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP); 13757c478bd9Sstevel@tonic-gate 13767c478bd9Sstevel@tonic-gate if (ioaddr == 0) { 13777c478bd9Sstevel@tonic-gate mutex_enter(&softsp->dma_pool_lock); 13787c478bd9Sstevel@tonic-gate softsp->dma_reserve += np; 13797c478bd9Sstevel@tonic-gate mutex_exit(&softsp->dma_pool_lock); 13807c478bd9Sstevel@tonic-gate kmem_free(mp, sizeof (*mp)); 13817c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 13827c478bd9Sstevel@tonic-gate ("Reserve: No dvma resources available\n")); 13837c478bd9Sstevel@tonic-gate return (DDI_DMA_NOMAPPING); 13847c478bd9Sstevel@tonic-gate } 13857c478bd9Sstevel@tonic-gate 13867c478bd9Sstevel@tonic-gate /* create a per request structure */ 13877c478bd9Sstevel@tonic-gate iommu_fast_dvma = kmem_alloc(sizeof (struct fast_dvma), 13887c478bd9Sstevel@tonic-gate KM_SLEEP); 13897c478bd9Sstevel@tonic-gate 13907c478bd9Sstevel@tonic-gate /* 13917c478bd9Sstevel@tonic-gate * We need to remember the size of the transfer so that 13927c478bd9Sstevel@tonic-gate * we can figure the virtual pages to sync when the transfer 13937c478bd9Sstevel@tonic-gate * is complete. 13947c478bd9Sstevel@tonic-gate */ 13957c478bd9Sstevel@tonic-gate iommu_fast_dvma->pagecnt = kmem_zalloc(np * 13967c478bd9Sstevel@tonic-gate sizeof (uint_t), KM_SLEEP); 13977c478bd9Sstevel@tonic-gate 13987c478bd9Sstevel@tonic-gate /* Allocate a streaming cache sync flag for each index */ 13997c478bd9Sstevel@tonic-gate iommu_fast_dvma->sync_flag = kmem_zalloc(np * 14007c478bd9Sstevel@tonic-gate sizeof (int), KM_SLEEP); 14017c478bd9Sstevel@tonic-gate 14027c478bd9Sstevel@tonic-gate /* Allocate a physical sync flag for each index */ 14037c478bd9Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag = 14047c478bd9Sstevel@tonic-gate kmem_zalloc(np * sizeof (uint64_t), KM_SLEEP); 14057c478bd9Sstevel@tonic-gate 14067c478bd9Sstevel@tonic-gate for (i = 0; i < np; i++) 14077c478bd9Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[i] = va_to_pa((caddr_t) 14087c478bd9Sstevel@tonic-gate &iommu_fast_dvma->sync_flag[i]); 14097c478bd9Sstevel@tonic-gate 14107c478bd9Sstevel@tonic-gate mp->dmai_mapping = ioaddr; 14117c478bd9Sstevel@tonic-gate mp->dmai_ndvmapages = np; 14127c478bd9Sstevel@tonic-gate iommu_fast_dvma->ops = &iommu_dvma_ops; 14137c478bd9Sstevel@tonic-gate iommu_fast_dvma->softsp = (caddr_t)softsp; 14147c478bd9Sstevel@tonic-gate mp->dmai_nexus_private = (caddr_t)iommu_fast_dvma; 14157c478bd9Sstevel@tonic-gate handlep = (ddi_dma_handle_t *)objp; 14167c478bd9Sstevel@tonic-gate *handlep = (ddi_dma_handle_t)mp; 14177c478bd9Sstevel@tonic-gate 14187c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 14197c478bd9Sstevel@tonic-gate ("Reserve: mapping object %p base addr %lx size %x\n", 14208793b36bSNick Todd (void *)mp, mp->dmai_mapping, mp->dmai_ndvmapages)); 14217c478bd9Sstevel@tonic-gate 14227c478bd9Sstevel@tonic-gate break; 14237c478bd9Sstevel@tonic-gate } 14247c478bd9Sstevel@tonic-gate 14257c478bd9Sstevel@tonic-gate case DDI_DMA_RELEASE: 14267c478bd9Sstevel@tonic-gate { 14277c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 14287c478bd9Sstevel@tonic-gate uint_t np = npages = mp->dmai_ndvmapages; 14297c478bd9Sstevel@tonic-gate ioaddr_t ioaddr = mp->dmai_mapping; 14307c478bd9Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 14317c478bd9Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma = (struct fast_dvma *) 14327c478bd9Sstevel@tonic-gate mp->dmai_nexus_private; 14337c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 14347c478bd9Sstevel@tonic-gate iommu_fast_dvma->softsp; 14357c478bd9Sstevel@tonic-gate 14367c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 14377c478bd9Sstevel@tonic-gate 14387c478bd9Sstevel@tonic-gate /* Unload stale mappings and flush stale tlb's */ 14397c478bd9Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 14407c478bd9Sstevel@tonic-gate 14417c478bd9Sstevel@tonic-gate while (npages > (uint_t)0) { 14427c478bd9Sstevel@tonic-gate *iotte_ptr = (uint64_t)0; /* unload tte */ 14437c478bd9Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 14447c478bd9Sstevel@tonic-gate 14457c478bd9Sstevel@tonic-gate npages--; 14467c478bd9Sstevel@tonic-gate iotte_ptr++; 14477c478bd9Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 14487c478bd9Sstevel@tonic-gate } 14497c478bd9Sstevel@tonic-gate 14507c478bd9Sstevel@tonic-gate ioaddr = (ioaddr_t)mp->dmai_mapping; 14517c478bd9Sstevel@tonic-gate mutex_enter(&softsp->dma_pool_lock); 14527c478bd9Sstevel@tonic-gate softsp->dma_reserve += np; 14537c478bd9Sstevel@tonic-gate mutex_exit(&softsp->dma_pool_lock); 14547c478bd9Sstevel@tonic-gate 14557c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DMP_NOLIMIT) 1456ffadc26eSmike_s vmem_free(softsp->dvma_arena, 1457ffadc26eSmike_s (void *)(uintptr_t)ioaddr, iommu_ptob(np)); 14587c478bd9Sstevel@tonic-gate else 1459ffadc26eSmike_s vmem_xfree(softsp->dvma_arena, 1460ffadc26eSmike_s (void *)(uintptr_t)ioaddr, iommu_ptob(np)); 14617c478bd9Sstevel@tonic-gate 14627c478bd9Sstevel@tonic-gate kmem_free(mp, sizeof (*mp)); 14637c478bd9Sstevel@tonic-gate kmem_free(iommu_fast_dvma->pagecnt, np * sizeof (uint_t)); 14647c478bd9Sstevel@tonic-gate kmem_free(iommu_fast_dvma->sync_flag, np * sizeof (int)); 14657c478bd9Sstevel@tonic-gate kmem_free(iommu_fast_dvma->phys_sync_flag, np * 14667c478bd9Sstevel@tonic-gate sizeof (uint64_t)); 14677c478bd9Sstevel@tonic-gate kmem_free(iommu_fast_dvma, sizeof (struct fast_dvma)); 14687c478bd9Sstevel@tonic-gate 14697c478bd9Sstevel@tonic-gate 14707c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_RESERVE, 14717c478bd9Sstevel@tonic-gate ("Release: Base addr %x size %x\n", ioaddr, np)); 14727c478bd9Sstevel@tonic-gate /* 14737c478bd9Sstevel@tonic-gate * Now that we've freed some resource, 14747c478bd9Sstevel@tonic-gate * if there is anybody waiting for it 14757c478bd9Sstevel@tonic-gate * try and get them going. 14767c478bd9Sstevel@tonic-gate */ 14777c478bd9Sstevel@tonic-gate if (softsp->dvma_call_list_id != 0) 14787c478bd9Sstevel@tonic-gate ddi_run_callback(&softsp->dvma_call_list_id); 14797c478bd9Sstevel@tonic-gate 14807c478bd9Sstevel@tonic-gate break; 14817c478bd9Sstevel@tonic-gate } 14827c478bd9Sstevel@tonic-gate 14837c478bd9Sstevel@tonic-gate default: 14847c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_DMAMCTL_DEBUG, ("iommu_dma_mctl: unknown option " 14857c478bd9Sstevel@tonic-gate "0%x\n", request)); 14867c478bd9Sstevel@tonic-gate 14877c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14887c478bd9Sstevel@tonic-gate } 14897c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 14907c478bd9Sstevel@tonic-gate } 14917c478bd9Sstevel@tonic-gate 14927c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 14937c478bd9Sstevel@tonic-gate void 14947c478bd9Sstevel@tonic-gate iommu_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index, 14957c478bd9Sstevel@tonic-gate ddi_dma_cookie_t *cp) 14967c478bd9Sstevel@tonic-gate { 14977c478bd9Sstevel@tonic-gate uintptr_t addr; 14987c478bd9Sstevel@tonic-gate ioaddr_t ioaddr; 14997c478bd9Sstevel@tonic-gate uint_t offset; 15007c478bd9Sstevel@tonic-gate pfn_t pfn; 15017c478bd9Sstevel@tonic-gate int npages; 15027c478bd9Sstevel@tonic-gate volatile uint64_t *iotte_ptr; 15037c478bd9Sstevel@tonic-gate uint64_t iotte_flag = 0; 15047c478bd9Sstevel@tonic-gate struct as *as = NULL; 15057c478bd9Sstevel@tonic-gate extern struct as kas; 15067c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 15077c478bd9Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma = 15087c478bd9Sstevel@tonic-gate (struct fast_dvma *)mp->dmai_nexus_private; 15097c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 15107c478bd9Sstevel@tonic-gate iommu_fast_dvma->softsp; 15117c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 15127c478bd9Sstevel@tonic-gate struct io_mem_list *iomemp; 15137c478bd9Sstevel@tonic-gate pfn_t *pfnp; 15147c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 15157c478bd9Sstevel@tonic-gate 15167c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 15177c478bd9Sstevel@tonic-gate 15187c478bd9Sstevel@tonic-gate addr = (uintptr_t)a; 15197c478bd9Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index)); 15207c478bd9Sstevel@tonic-gate offset = (uint_t)(addr & IOMMU_PAGEOFFSET); 15217c478bd9Sstevel@tonic-gate iommu_fast_dvma->pagecnt[index] = iommu_btopr(len + offset); 15227c478bd9Sstevel@tonic-gate as = &kas; 15237c478bd9Sstevel@tonic-gate addr &= ~IOMMU_PAGEOFFSET; 15247c478bd9Sstevel@tonic-gate npages = iommu_btopr(len + offset); 15257c478bd9Sstevel@tonic-gate 15267c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 15277c478bd9Sstevel@tonic-gate iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP); 15287c478bd9Sstevel@tonic-gate iomemp->rdip = mp->dmai_rdip; 15297c478bd9Sstevel@tonic-gate iomemp->ioaddr = ioaddr; 15307c478bd9Sstevel@tonic-gate iomemp->addr = addr; 15317c478bd9Sstevel@tonic-gate iomemp->npages = npages; 15327c478bd9Sstevel@tonic-gate pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1), 15337c478bd9Sstevel@tonic-gate KM_SLEEP); 15347c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 15357c478bd9Sstevel@tonic-gate 15367c478bd9Sstevel@tonic-gate cp->dmac_address = ioaddr | offset; 15377c478bd9Sstevel@tonic-gate cp->dmac_size = len; 15387c478bd9Sstevel@tonic-gate 15397c478bd9Sstevel@tonic-gate iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr); 15407c478bd9Sstevel@tonic-gate /* read/write and streaming io on */ 15417c478bd9Sstevel@tonic-gate iotte_flag = IOTTE_VALID | IOTTE_WRITE | IOTTE_CACHE; 15427c478bd9Sstevel@tonic-gate 15437c478bd9Sstevel@tonic-gate if (mp->dmai_rflags & DDI_DMA_CONSISTENT) 15447c478bd9Sstevel@tonic-gate mp->dmai_rflags |= DMP_NOSYNC; 15457c478bd9Sstevel@tonic-gate else if (!softsp->stream_buf_off) 15467c478bd9Sstevel@tonic-gate iotte_flag |= IOTTE_STREAM; 15477c478bd9Sstevel@tonic-gate 15487c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: ioaddr %x " 1549ffadc26eSmike_s "size %x offset %x index %x kaddr %lx\n", 15507c478bd9Sstevel@tonic-gate ioaddr, len, offset, index, addr)); 15517c478bd9Sstevel@tonic-gate ASSERT(npages > 0); 15527c478bd9Sstevel@tonic-gate do { 15537c478bd9Sstevel@tonic-gate pfn = hat_getpfnum(as->a_hat, (caddr_t)addr); 15547c478bd9Sstevel@tonic-gate if (pfn == PFN_INVALID) { 15557c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: invalid pfn " 15567c478bd9Sstevel@tonic-gate "from hat_getpfnum()\n")); 15577c478bd9Sstevel@tonic-gate } 15587c478bd9Sstevel@tonic-gate 15597c478bd9Sstevel@tonic-gate iommu_tlb_flush(softsp, ioaddr, 1); 15607c478bd9Sstevel@tonic-gate 15617c478bd9Sstevel@tonic-gate /* load tte */ 15627c478bd9Sstevel@tonic-gate *iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag; 15637c478bd9Sstevel@tonic-gate 15647c478bd9Sstevel@tonic-gate npages--; 15657c478bd9Sstevel@tonic-gate iotte_ptr++; 15667c478bd9Sstevel@tonic-gate 15677c478bd9Sstevel@tonic-gate addr += IOMMU_PAGESIZE; 15687c478bd9Sstevel@tonic-gate ioaddr += IOMMU_PAGESIZE; 15697c478bd9Sstevel@tonic-gate 15707c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 15717c478bd9Sstevel@tonic-gate *pfnp = pfn; 15727c478bd9Sstevel@tonic-gate pfnp++; 15737c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 15747c478bd9Sstevel@tonic-gate 15757c478bd9Sstevel@tonic-gate } while (npages > 0); 15767c478bd9Sstevel@tonic-gate 15777c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 15787c478bd9Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 15797c478bd9Sstevel@tonic-gate iomemp->next = softsp->iomem; 15807c478bd9Sstevel@tonic-gate softsp->iomem = iomemp; 15817c478bd9Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 15827c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 15837c478bd9Sstevel@tonic-gate } 15847c478bd9Sstevel@tonic-gate 15857c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 15867c478bd9Sstevel@tonic-gate void 15877c478bd9Sstevel@tonic-gate iommu_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view) 15887c478bd9Sstevel@tonic-gate { 15897c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 15907c478bd9Sstevel@tonic-gate ioaddr_t ioaddr; 15917c478bd9Sstevel@tonic-gate pgcnt_t npages; 15927c478bd9Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma = 15937c478bd9Sstevel@tonic-gate (struct fast_dvma *)mp->dmai_nexus_private; 15947c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 15957c478bd9Sstevel@tonic-gate iommu_fast_dvma->softsp; 15967c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 15977c478bd9Sstevel@tonic-gate struct io_mem_list **prevp, *walk; 15987c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 15997c478bd9Sstevel@tonic-gate 16007c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 16017c478bd9Sstevel@tonic-gate 16027c478bd9Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index)); 16037c478bd9Sstevel@tonic-gate npages = iommu_fast_dvma->pagecnt[index]; 16047c478bd9Sstevel@tonic-gate 16057c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE) 16067c478bd9Sstevel@tonic-gate mutex_enter(&softsp->iomemlock); 16077c478bd9Sstevel@tonic-gate prevp = &softsp->iomem; 16087c478bd9Sstevel@tonic-gate walk = softsp->iomem; 16097c478bd9Sstevel@tonic-gate 16107c478bd9Sstevel@tonic-gate while (walk != NULL) { 16117c478bd9Sstevel@tonic-gate if (walk->ioaddr == ioaddr) { 16127c478bd9Sstevel@tonic-gate *prevp = walk->next; 16137c478bd9Sstevel@tonic-gate break; 16147c478bd9Sstevel@tonic-gate } 16157c478bd9Sstevel@tonic-gate prevp = &walk->next; 16167c478bd9Sstevel@tonic-gate walk = walk->next; 16177c478bd9Sstevel@tonic-gate } 16187c478bd9Sstevel@tonic-gate mutex_exit(&softsp->iomemlock); 16197c478bd9Sstevel@tonic-gate 16207c478bd9Sstevel@tonic-gate kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1)); 16217c478bd9Sstevel@tonic-gate kmem_free(walk, sizeof (struct io_mem_list)); 16227c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */ 16237c478bd9Sstevel@tonic-gate 16247c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_unload: handle %p sync flag " 16258793b36bSNick Todd "addr %p sync flag pfn %llx index %x page count %lx\n", (void *)mp, 16268793b36bSNick Todd (void *)&iommu_fast_dvma->sync_flag[index], 16277c478bd9Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[index], 16287c478bd9Sstevel@tonic-gate index, npages)); 16297c478bd9Sstevel@tonic-gate 16307c478bd9Sstevel@tonic-gate if ((mp->dmai_rflags & DMP_NOSYNC) != DMP_NOSYNC) { 16317c478bd9Sstevel@tonic-gate sync_stream_buf(softsp, ioaddr, npages, 16327c478bd9Sstevel@tonic-gate (int *)&iommu_fast_dvma->sync_flag[index], 16337c478bd9Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[index]); 16347c478bd9Sstevel@tonic-gate } 16357c478bd9Sstevel@tonic-gate } 16367c478bd9Sstevel@tonic-gate 16377c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 16387c478bd9Sstevel@tonic-gate void 16397c478bd9Sstevel@tonic-gate iommu_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view) 16407c478bd9Sstevel@tonic-gate { 16417c478bd9Sstevel@tonic-gate ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 16427c478bd9Sstevel@tonic-gate ioaddr_t ioaddr; 16437c478bd9Sstevel@tonic-gate uint_t npages; 16447c478bd9Sstevel@tonic-gate struct fast_dvma *iommu_fast_dvma = 16457c478bd9Sstevel@tonic-gate (struct fast_dvma *)mp->dmai_nexus_private; 16467c478bd9Sstevel@tonic-gate struct sbus_soft_state *softsp = (struct sbus_soft_state *) 16477c478bd9Sstevel@tonic-gate iommu_fast_dvma->softsp; 16487c478bd9Sstevel@tonic-gate 16497c478bd9Sstevel@tonic-gate if ((mp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC) 16507c478bd9Sstevel@tonic-gate return; 16517c478bd9Sstevel@tonic-gate 16527c478bd9Sstevel@tonic-gate ASSERT(softsp != NULL); 16537c478bd9Sstevel@tonic-gate ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index)); 16547c478bd9Sstevel@tonic-gate npages = iommu_fast_dvma->pagecnt[index]; 16557c478bd9Sstevel@tonic-gate 16567c478bd9Sstevel@tonic-gate DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_sync: handle %p, " 16578793b36bSNick Todd "sync flag addr %p, sync flag pfn %llx\n", (void *)mp, 16588793b36bSNick Todd (void *)&iommu_fast_dvma->sync_flag[index], 16597c478bd9Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[index])); 16607c478bd9Sstevel@tonic-gate 16617c478bd9Sstevel@tonic-gate sync_stream_buf(softsp, ioaddr, npages, 16627c478bd9Sstevel@tonic-gate (int *)&iommu_fast_dvma->sync_flag[index], 16637c478bd9Sstevel@tonic-gate iommu_fast_dvma->phys_sync_flag[index]); 16647c478bd9Sstevel@tonic-gate } 1665