1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * Internal PCI Fast DVMA implementation 31 */ 32 #include <sys/types.h> 33 #include <sys/kmem.h> 34 #include <sys/async.h> 35 #include <sys/sysmacros.h> 36 #include <sys/sunddi.h> 37 #include <sys/ddi_impldefs.h> 38 #include <sys/dvma.h> 39 #include <vm/hat.h> 40 #include <sys/pci/pci_obj.h> 41 42 /*LINTLIBRARY*/ 43 44 static struct dvma_ops fdvma_ops; 45 46 /* 47 * The following routines are used to implement the sun4u fast dvma 48 * routines on this bus. 49 */ 50 51 /*ARGSUSED*/ 52 static void 53 pci_fdvma_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index, 54 ddi_dma_cookie_t *cp) 55 { 56 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 57 fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma; 58 pci_t *pci_p = (pci_t *)fdvma_p->softsp; 59 iommu_t *iommu_p = pci_p->pci_iommu_p; 60 dev_info_t *dip = pci_p->pci_dip; 61 dvma_addr_t dvma_addr, dvma_pg; 62 caddr_t baseaddr = (caddr_t)((uintptr_t)a & PAGEMASK); 63 uint32_t offset; 64 size_t npages, pg_index; 65 pfn_t pfn; 66 int i; 67 uint64_t tte; 68 69 offset = (uint32_t)(uintptr_t)a & IOMMU_PAGE_OFFSET; 70 npages = IOMMU_BTOPR(len + offset); 71 if (!npages) 72 return; 73 74 /* make sure we don't exceed reserved boundary */ 75 DEBUG3(DBG_FAST_DVMA, dip, "load index=%x: %p+%x ", index, a, len); 76 if (index + npages > mp->dmai_ndvmapages) { 77 cmn_err(pci_panic_on_fatal_errors ? CE_PANIC : CE_WARN, 78 "%s%d: kaddr_load index(%x)+pgs(%lx) exceeds limit\n", 79 ddi_driver_name(dip), ddi_get_instance(dip), 80 index, npages); 81 return; 82 } 83 84 /* better have not already loaded something at this address */ 85 ASSERT(fdvma_p->kvbase[index] == NULL); 86 ASSERT(fdvma_p->pagecnt[index] == 0); 87 88 dvma_addr = mp->dmai_mapping + IOMMU_PTOB(index); 89 dvma_pg = IOMMU_BTOP(dvma_addr); 90 pg_index = dvma_pg - iommu_p->dvma_base_pg; 91 92 /* construct the dma cookie to be returned */ 93 MAKE_DMA_COOKIE(cp, dvma_addr | offset, len); 94 DEBUG2(DBG_FAST_DVMA | DBG_CONT, dip, "cookie: %x+%x\n", 95 cp->dmac_address, cp->dmac_size); 96 97 for (i = 0, a = baseaddr; i < npages; i++, a += IOMMU_PAGE_SIZE) { 98 if (pci_dvma_remap_enabled) { 99 uint_t flags = HAC_NOSLEEP | HAC_PAGELOCK; 100 101 (void) hat_add_callback(pci_fast_dvma_cbid, a, 102 IOMMU_PAGE_SIZE, flags, mp, &pfn); 103 104 mp->dmai_flags |= DMAI_FLAGS_RELOC; 105 } else { 106 pfn = hat_getpfnum(kas.a_hat, a); 107 } 108 if (pfn == PFN_INVALID) 109 goto bad_pfn; 110 111 if (i == 0) /* setup template, all bits except pfn value */ 112 tte = MAKE_TTE_TEMPLATE((iopfn_t)pfn, mp); 113 114 /* XXX assumes iommu and mmu has same page size */ 115 iommu_p->iommu_tsb_vaddr[pg_index + i] = tte | IOMMU_PTOB(pfn); 116 IOMMU_PAGE_FLUSH(iommu_p, (dvma_pg + i)); 117 } 118 119 mp->dmai_flags |= DMAI_FLAGS_MAPPED; 120 fdvma_p->kvbase[index] = baseaddr; 121 fdvma_p->pagecnt[index] = npages; 122 123 return; 124 bad_pfn: 125 cmn_err(CE_WARN, "%s%d: kaddr_load can't get page frame for vaddr %x", 126 ddi_driver_name(dip), ddi_get_instance(dip), (int)(uintptr_t)a); 127 } 128 129 /*ARGSUSED*/ 130 static void 131 pci_fdvma_unload(ddi_dma_handle_t h, uint_t index, uint_t sync_flags) 132 { 133 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 134 fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma; 135 pci_t *pci_p = (pci_t *)fdvma_p->softsp; 136 size_t npg = fdvma_p->pagecnt[index]; 137 138 dvma_addr_t dvma_pg = IOMMU_BTOP(mp->dmai_mapping + IOMMU_PTOB(index)); 139 140 DEBUG5(DBG_FAST_DVMA, pci_p->pci_dip, 141 "unload index=%x flags=%x %x+%x+%x\n", index, sync_flags, 142 mp->dmai_mapping, IOMMU_PTOB(index), IOMMU_PTOB(npg)); 143 144 if (!pci_dvma_sync_before_unmap) { 145 if (PCI_DMA_CANRELOC(mp)) 146 pci_fdvma_unregister_callbacks(pci_p, fdvma_p, mp, 147 index); 148 fdvma_p->kvbase[index] = NULL; 149 iommu_unmap_pages(pci_p->pci_iommu_p, dvma_pg, npg); 150 } 151 if (sync_flags != -1) 152 pci_dma_sync(pci_p->pci_dip, mp->dmai_rdip, h, 153 IOMMU_PTOB(index), IOMMU_PTOB(npg), sync_flags); 154 if (pci_dvma_sync_before_unmap) { 155 if (PCI_DMA_CANRELOC(mp)) 156 pci_fdvma_unregister_callbacks(pci_p, fdvma_p, mp, 157 index); 158 fdvma_p->kvbase[index] = NULL; 159 iommu_unmap_pages(pci_p->pci_iommu_p, dvma_pg, npg); 160 } 161 fdvma_p->pagecnt[index] = 0; 162 } 163 164 /*ARGSUSED*/ 165 static void 166 pci_fdvma_sync(ddi_dma_handle_t h, uint_t index, uint_t sync_flags) 167 { 168 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 169 fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma; 170 pci_t *pci_p = (pci_t *)fdvma_p->softsp; 171 size_t npg = fdvma_p->pagecnt[index]; 172 173 DEBUG5(DBG_FAST_DVMA, pci_p->pci_dip, 174 "sync index=%x flags=%x %x+%x+%x\n", index, sync_flags, 175 mp->dmai_mapping, IOMMU_PTOB(index), IOMMU_PTOB(npg)); 176 pci_dma_sync(pci_p->pci_dip, mp->dmai_rdip, h, IOMMU_PTOB(index), 177 IOMMU_PTOB(npg), sync_flags); 178 } 179 180 int 181 pci_fdvma_reserve(dev_info_t *dip, dev_info_t *rdip, pci_t *pci_p, 182 ddi_dma_req_t *dmareq, ddi_dma_handle_t *handlep) 183 { 184 fdvma_t *fdvma_p; 185 dvma_addr_t dvma_pg; 186 iommu_t *iommu_p = pci_p->pci_iommu_p; 187 size_t npages; 188 ddi_dma_impl_t *mp; 189 ddi_dma_lim_t *lim_p = dmareq->dmar_limits; 190 ulong_t hi = lim_p->dlim_addr_hi; 191 ulong_t lo = lim_p->dlim_addr_lo; 192 size_t counter_max = (lim_p->dlim_cntr_max + 1) & IOMMU_PAGE_MASK; 193 194 if (pci_disable_fdvma) 195 return (DDI_FAILURE); 196 197 DEBUG2(DBG_DMA_CTL, dip, "DDI_DMA_RESERVE: rdip=%s%d\n", 198 ddi_driver_name(rdip), ddi_get_instance(rdip)); 199 200 /* 201 * Check the limit structure. 202 */ 203 if ((lo >= hi) || (hi < iommu_p->iommu_dvma_base)) 204 return (DDI_DMA_BADLIMITS); 205 206 /* 207 * Check the size of the request. 208 */ 209 npages = dmareq->dmar_object.dmao_size; 210 if (npages > iommu_p->iommu_dvma_reserve) 211 return (DDI_DMA_NORESOURCES); 212 213 /* 214 * Allocate the dma handle. 215 */ 216 mp = kmem_zalloc(sizeof (pci_dma_hdl_t), KM_SLEEP); 217 218 /* 219 * Get entries from dvma space map. 220 * (vmem_t *vmp, 221 * size_t size, size_t align, size_t phase, 222 * size_t nocross, void *minaddr, void *maxaddr, int vmflag) 223 */ 224 dvma_pg = IOMMU_BTOP((ulong_t)vmem_xalloc(iommu_p->iommu_dvma_map, 225 IOMMU_PTOB(npages), IOMMU_PAGE_SIZE, 0, 226 counter_max, (void *)lo, (void *)(hi + 1), 227 dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP)); 228 if (dvma_pg == 0) { 229 kmem_free(mp, sizeof (pci_dma_hdl_t)); 230 return (DDI_DMA_NOMAPPING); 231 } 232 iommu_p->iommu_dvma_reserve -= npages; 233 234 /* 235 * Create the fast dvma request structure. 236 */ 237 fdvma_p = kmem_alloc(sizeof (fdvma_t), KM_SLEEP); 238 fdvma_p->kvbase = kmem_zalloc(npages * sizeof (caddr_t), KM_SLEEP); 239 fdvma_p->pagecnt = kmem_zalloc(npages * sizeof (uint_t), KM_SLEEP); 240 fdvma_p->ops = &fdvma_ops; 241 fdvma_p->softsp = (caddr_t)pci_p; 242 fdvma_p->sync_flag = NULL; 243 244 /* 245 * Initialize the handle. 246 */ 247 mp->dmai_rdip = rdip; 248 mp->dmai_rflags = DMP_BYPASSNEXUS | 249 pci_dma_consist_check(dmareq->dmar_flags, pci_p->pci_pbm_p); 250 if (!(dmareq->dmar_flags & DDI_DMA_RDWR)) 251 mp->dmai_rflags |= DDI_DMA_READ; 252 mp->dmai_flags = DMAI_FLAGS_INUSE | 253 (mp->dmai_rflags & DMP_NOSYNC ? DMAI_FLAGS_NOSYNC : 0); 254 mp->dmai_minxfer = dmareq->dmar_limits->dlim_minxfer; 255 mp->dmai_burstsizes = dmareq->dmar_limits->dlim_burstsizes; 256 mp->dmai_mapping = IOMMU_PTOB(dvma_pg); 257 mp->dmai_ndvmapages = npages; 258 mp->dmai_size = npages * IOMMU_PAGE_SIZE; 259 mp->dmai_nwin = 0; 260 mp->dmai_fdvma = (caddr_t)fdvma_p; 261 262 DEBUG4(DBG_DMA_CTL, dip, 263 "PCI_DVMA_RESERVE: mp=%p dvma=%x npages=%x private=%p\n", 264 mp, mp->dmai_mapping, npages, fdvma_p); 265 *handlep = (ddi_dma_handle_t)mp; 266 return (DDI_SUCCESS); 267 } 268 269 int 270 pci_fdvma_release(dev_info_t *dip, pci_t *pci_p, ddi_dma_impl_t *mp) 271 { 272 iommu_t *iommu_p = pci_p->pci_iommu_p; 273 size_t npages; 274 fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma; 275 276 if (pci_disable_fdvma) 277 return (DDI_FAILURE); 278 279 /* validate fdvma handle */ 280 if (!(mp->dmai_rflags & DMP_BYPASSNEXUS)) { 281 DEBUG0(DBG_DMA_CTL, dip, "DDI_DMA_RELEASE: not fast dma\n"); 282 return (DDI_FAILURE); 283 } 284 285 /* flush all reserved dvma addresses from iommu */ 286 pci_dma_sync_unmap(dip, mp->dmai_rdip, mp); 287 288 npages = mp->dmai_ndvmapages; 289 pci_vmem_free(iommu_p, mp, (void *)mp->dmai_mapping, npages); 290 291 iommu_p->iommu_dvma_reserve += npages; 292 mp->dmai_ndvmapages = 0; 293 294 /* see if there is anyone waiting for dvma space */ 295 if (iommu_p->iommu_dvma_clid != 0) { 296 DEBUG0(DBG_DMA_CTL, dip, "run dvma callback\n"); 297 ddi_run_callback(&iommu_p->iommu_dvma_clid); 298 } 299 300 /* free data structures */ 301 kmem_free(fdvma_p->kvbase, npages * sizeof (caddr_t)); 302 kmem_free(fdvma_p->pagecnt, npages * sizeof (uint_t)); 303 kmem_free(fdvma_p, sizeof (fdvma_t)); 304 kmem_free(mp, sizeof (pci_dma_hdl_t)); 305 306 /* see if there is anyone waiting for kmem */ 307 if (pci_kmem_clid != 0) { 308 DEBUG0(DBG_DMA_CTL, dip, "run handle callback\n"); 309 ddi_run_callback(&pci_kmem_clid); 310 } 311 return (DDI_SUCCESS); 312 } 313 314 /* 315 * fast dvma ops structure: 316 */ 317 static struct dvma_ops fdvma_ops = { 318 DVMAO_REV, 319 pci_fdvma_load, 320 pci_fdvma_unload, 321 pci_fdvma_sync 322 }; 323