1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kmem.h> 33 #include <sys/cpu.h> 34 #include <sys/sunddi.h> 35 #include <sys/ddi_impldefs.h> 36 #include <sys/pte.h> 37 #include <sys/machsystm.h> 38 #include <sys/mmu.h> 39 #include <sys/dvma.h> 40 #include <sys/debug.h> 41 42 #define HD ((ddi_dma_impl_t *)h)->dmai_rdip 43 44 unsigned long 45 dvma_pagesize(dev_info_t *dip) 46 { 47 auto unsigned long dvmapgsz; 48 49 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_DVMAPAGESIZE, 50 NULL, (void *) &dvmapgsz); 51 return (dvmapgsz); 52 } 53 54 int 55 dvma_reserve(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t pages, 56 ddi_dma_handle_t *handlep) 57 { 58 auto ddi_dma_lim_t dma_lim; 59 auto ddi_dma_impl_t implhdl; 60 struct ddi_dma_req dmareq; 61 ddi_dma_handle_t reqhdl; 62 ddi_dma_impl_t *mp; 63 int ret; 64 65 if (limp == (ddi_dma_lim_t *)0) { 66 return (DDI_DMA_BADLIMITS); 67 } else { 68 dma_lim = *limp; 69 } 70 bzero(&dmareq, sizeof (dmareq)); 71 dmareq.dmar_fp = DDI_DMA_DONTWAIT; 72 dmareq.dmar_flags = DDI_DMA_RDWR | DDI_DMA_STREAMING; 73 dmareq.dmar_limits = &dma_lim; 74 dmareq.dmar_object.dmao_size = pages; 75 /* 76 * pass in a dummy handle. This avoids the problem when 77 * somebody is dereferencing the handle before checking 78 * the operation. This can be avoided once we separate 79 * handle allocation and actual operation. 80 */ 81 bzero((caddr_t)&implhdl, sizeof (ddi_dma_impl_t)); 82 reqhdl = (ddi_dma_handle_t)&implhdl; 83 84 ret = ddi_dma_mctl(dip, dip, reqhdl, DDI_DMA_RESERVE, (off_t *)&dmareq, 85 0, (caddr_t *)handlep, 0); 86 87 if (ret == DDI_SUCCESS) { 88 mp = (ddi_dma_impl_t *)(*handlep); 89 if (!(mp->dmai_rflags & DMP_BYPASSNEXUS)) { 90 uint_t np = mp->dmai_ndvmapages; 91 92 mp->dmai_mapping = (ulong_t)kmem_alloc( 93 sizeof (ddi_dma_lim_t), KM_SLEEP); 94 bcopy((char *)&dma_lim, (char *)mp->dmai_mapping, 95 sizeof (ddi_dma_lim_t)); 96 mp->dmai_minfo = kmem_alloc( 97 np * sizeof (ddi_dma_handle_t), KM_SLEEP); 98 } 99 } 100 return (ret); 101 } 102 103 void 104 dvma_release(ddi_dma_handle_t h) 105 { 106 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 107 uint_t np = mp->dmai_ndvmapages; 108 109 if (!(mp->dmai_rflags & DMP_BYPASSNEXUS)) { 110 kmem_free((void *)mp->dmai_mapping, sizeof (ddi_dma_lim_t)); 111 kmem_free(mp->dmai_minfo, np * sizeof (ddi_dma_handle_t)); 112 } 113 (void) ddi_dma_mctl(HD, HD, h, DDI_DMA_RELEASE, 0, 0, 0, 0); 114 115 } 116 117 void 118 dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index, 119 ddi_dma_cookie_t *cp) 120 { 121 register ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 122 struct fast_dvma *nexus_private; 123 struct dvma_ops *nexus_funcptr; 124 125 if (mp->dmai_rflags & DMP_BYPASSNEXUS) { 126 nexus_private = (struct fast_dvma *)mp->dmai_nexus_private; 127 nexus_funcptr = (struct dvma_ops *)nexus_private->ops; 128 (void) (*nexus_funcptr->dvma_kaddr_load)(h, a, len, index, cp); 129 } else { 130 ddi_dma_handle_t handle; 131 ddi_dma_lim_t *limp; 132 133 limp = (ddi_dma_lim_t *)mp->dmai_mapping; 134 (void) ddi_dma_addr_setup(HD, NULL, a, len, DDI_DMA_RDWR, 135 DDI_DMA_SLEEP, NULL, limp, &handle); 136 ((ddi_dma_handle_t *)mp->dmai_minfo)[index] = handle; 137 (void) ddi_dma_htoc(handle, 0, cp); 138 } 139 } 140 141 /*ARGSUSED*/ 142 void 143 dvma_unload(ddi_dma_handle_t h, uint_t objindex, uint_t type) 144 { 145 register ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 146 struct fast_dvma *nexus_private; 147 struct dvma_ops *nexus_funcptr; 148 149 if (mp->dmai_rflags & DMP_BYPASSNEXUS) { 150 nexus_private = (struct fast_dvma *)mp->dmai_nexus_private; 151 nexus_funcptr = (struct dvma_ops *)nexus_private->ops; 152 (void) (*nexus_funcptr->dvma_unload)(h, objindex, type); 153 } else { 154 ddi_dma_handle_t handle; 155 156 handle = ((ddi_dma_handle_t *)mp->dmai_minfo)[objindex]; 157 (void) ddi_dma_free(handle); 158 } 159 } 160 161 void 162 dvma_sync(ddi_dma_handle_t h, uint_t objindex, uint_t type) 163 { 164 register ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; 165 struct fast_dvma *nexus_private; 166 struct dvma_ops *nexus_funcptr; 167 168 if (mp->dmai_rflags & DMP_BYPASSNEXUS) { 169 nexus_private = (struct fast_dvma *)mp->dmai_nexus_private; 170 nexus_funcptr = (struct dvma_ops *)nexus_private->ops; 171 (void) (*nexus_funcptr->dvma_sync)(h, objindex, type); 172 } else { 173 ddi_dma_handle_t handle; 174 175 handle = ((ddi_dma_handle_t *)mp->dmai_minfo)[objindex]; 176 (void) ddi_dma_sync(handle, 0, 0, type); 177 } 178 } 179