1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kmem.h>
33 #include <sys/cpu.h>
34 #include <sys/sunddi.h>
35 #include <sys/ddi_impldefs.h>
36 #include <sys/pte.h>
37 #include <sys/machsystm.h>
38 #include <sys/mmu.h>
39 #include <sys/dvma.h>
40 #include <sys/debug.h>
41
42 #define HD ((ddi_dma_impl_t *)h)->dmai_rdip
43
44 unsigned long
dvma_pagesize(dev_info_t * dip)45 dvma_pagesize(dev_info_t *dip)
46 {
47 auto unsigned long dvmapgsz;
48
49 (void) ddi_ctlops(dip, dip, DDI_CTLOPS_DVMAPAGESIZE,
50 NULL, (void *) &dvmapgsz);
51 return (dvmapgsz);
52 }
53
54 int
dvma_reserve(dev_info_t * dip,ddi_dma_lim_t * limp,uint_t pages,ddi_dma_handle_t * handlep)55 dvma_reserve(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t pages,
56 ddi_dma_handle_t *handlep)
57 {
58 auto ddi_dma_lim_t dma_lim;
59 auto ddi_dma_impl_t implhdl;
60 struct ddi_dma_req dmareq;
61 ddi_dma_handle_t reqhdl;
62 ddi_dma_impl_t *mp;
63 int ret;
64
65 if (limp == (ddi_dma_lim_t *)0) {
66 return (DDI_DMA_BADLIMITS);
67 } else {
68 dma_lim = *limp;
69 }
70 bzero(&dmareq, sizeof (dmareq));
71 dmareq.dmar_fp = DDI_DMA_DONTWAIT;
72 dmareq.dmar_flags = DDI_DMA_RDWR | DDI_DMA_STREAMING;
73 dmareq.dmar_limits = &dma_lim;
74 dmareq.dmar_object.dmao_size = pages;
75 /*
76 * pass in a dummy handle. This avoids the problem when
77 * somebody is dereferencing the handle before checking
78 * the operation. This can be avoided once we separate
79 * handle allocation and actual operation.
80 */
81 bzero((caddr_t)&implhdl, sizeof (ddi_dma_impl_t));
82 reqhdl = (ddi_dma_handle_t)&implhdl;
83
84 ret = ddi_dma_mctl(dip, dip, reqhdl, DDI_DMA_RESERVE, (off_t *)&dmareq,
85 0, (caddr_t *)handlep, 0);
86
87 if (ret == DDI_SUCCESS) {
88 mp = (ddi_dma_impl_t *)(*handlep);
89 if (!(mp->dmai_rflags & DMP_BYPASSNEXUS)) {
90 uint_t np = mp->dmai_ndvmapages;
91
92 mp->dmai_mapping = (ulong_t)kmem_alloc(
93 sizeof (ddi_dma_lim_t), KM_SLEEP);
94 bcopy((char *)&dma_lim, (char *)mp->dmai_mapping,
95 sizeof (ddi_dma_lim_t));
96 mp->dmai_minfo = kmem_alloc(
97 np * sizeof (ddi_dma_handle_t), KM_SLEEP);
98 }
99 }
100 return (ret);
101 }
102
103 void
dvma_release(ddi_dma_handle_t h)104 dvma_release(ddi_dma_handle_t h)
105 {
106 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
107 uint_t np = mp->dmai_ndvmapages;
108
109 if (!(mp->dmai_rflags & DMP_BYPASSNEXUS)) {
110 kmem_free((void *)mp->dmai_mapping, sizeof (ddi_dma_lim_t));
111 kmem_free(mp->dmai_minfo, np * sizeof (ddi_dma_handle_t));
112 }
113 (void) ddi_dma_mctl(HD, HD, h, DDI_DMA_RELEASE, 0, 0, 0, 0);
114
115 }
116
117 void
dvma_kaddr_load(ddi_dma_handle_t h,caddr_t a,uint_t len,uint_t index,ddi_dma_cookie_t * cp)118 dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
119 ddi_dma_cookie_t *cp)
120 {
121 register ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
122 struct fast_dvma *nexus_private;
123 struct dvma_ops *nexus_funcptr;
124 ddi_dma_attr_t dma_attr;
125 uint_t ccnt;
126
127 if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
128 nexus_private = (struct fast_dvma *)mp->dmai_nexus_private;
129 nexus_funcptr = (struct dvma_ops *)nexus_private->ops;
130 (void) (*nexus_funcptr->dvma_kaddr_load)(h, a, len, index, cp);
131 } else {
132 ddi_dma_handle_t handle;
133 ddi_dma_lim_t *limp;
134
135 limp = (ddi_dma_lim_t *)mp->dmai_mapping;
136 dma_attr.dma_attr_version = DMA_ATTR_V0;
137 dma_attr.dma_attr_addr_lo = limp->dlim_addr_lo;
138 dma_attr.dma_attr_addr_hi = limp->dlim_addr_hi;
139 dma_attr.dma_attr_count_max = limp->dlim_cntr_max;
140 dma_attr.dma_attr_align = 1;
141 dma_attr.dma_attr_burstsizes = limp->dlim_burstsizes;
142 dma_attr.dma_attr_minxfer = limp->dlim_minxfer;
143 dma_attr.dma_attr_maxxfer = 0xFFFFFFFFull;
144 dma_attr.dma_attr_seg = 0xFFFFFFFFull;
145 dma_attr.dma_attr_sgllen = 1;
146 dma_attr.dma_attr_granular = 1;
147 dma_attr.dma_attr_flags = 0;
148 (void) ddi_dma_alloc_handle(HD, &dma_attr, DDI_DMA_SLEEP, NULL,
149 &handle);
150 (void) ddi_dma_addr_bind_handle(handle, NULL, a, len,
151 DDI_DMA_RDWR, DDI_DMA_SLEEP, NULL, cp, &ccnt);
152 ((ddi_dma_handle_t *)mp->dmai_minfo)[index] = handle;
153 }
154 }
155
156 /*ARGSUSED*/
157 void
dvma_unload(ddi_dma_handle_t h,uint_t objindex,uint_t type)158 dvma_unload(ddi_dma_handle_t h, uint_t objindex, uint_t type)
159 {
160 register ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
161 struct fast_dvma *nexus_private;
162 struct dvma_ops *nexus_funcptr;
163
164 if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
165 nexus_private = (struct fast_dvma *)mp->dmai_nexus_private;
166 nexus_funcptr = (struct dvma_ops *)nexus_private->ops;
167 (void) (*nexus_funcptr->dvma_unload)(h, objindex, type);
168 } else {
169 ddi_dma_handle_t handle;
170
171 handle = ((ddi_dma_handle_t *)mp->dmai_minfo)[objindex];
172 (void) ddi_dma_unbind_handle(handle);
173 (void) ddi_dma_free_handle(&handle);
174 }
175 }
176
177 void
dvma_sync(ddi_dma_handle_t h,uint_t objindex,uint_t type)178 dvma_sync(ddi_dma_handle_t h, uint_t objindex, uint_t type)
179 {
180 register ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
181 struct fast_dvma *nexus_private;
182 struct dvma_ops *nexus_funcptr;
183
184 if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
185 nexus_private = (struct fast_dvma *)mp->dmai_nexus_private;
186 nexus_funcptr = (struct dvma_ops *)nexus_private->ops;
187 (void) (*nexus_funcptr->dvma_sync)(h, objindex, type);
188 } else {
189 ddi_dma_handle_t handle;
190
191 handle = ((ddi_dma_handle_t *)mp->dmai_minfo)[objindex];
192 (void) ddi_dma_sync(handle, 0, 0, type);
193 }
194 }
195