1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2019 Joyent, Inc.
14 * Copyright 2022 Oxide Computer Company
15 */
16
17 /*
18 * VIRTIO FRAMEWORK: DMA ROUTINES
19 *
20 * For design and usage documentation, see the comments in "virtio.h".
21 */
22
23 #include <sys/conf.h>
24 #include <sys/kmem.h>
25 #include <sys/debug.h>
26 #include <sys/modctl.h>
27 #include <sys/autoconf.h>
28 #include <sys/ddi_impldefs.h>
29 #include <sys/ddi.h>
30 #include <sys/sunddi.h>
31 #include <sys/sunndi.h>
32 #include <sys/avintr.h>
33 #include <sys/spl.h>
34 #include <sys/promif.h>
35 #include <sys/list.h>
36 #include <sys/bootconf.h>
37 #include <sys/bootsvcs.h>
38 #include <sys/sysmacros.h>
39 #include <sys/pci.h>
40
41 #include "virtio.h"
42 #include "virtio_impl.h"
43
44 typedef int (dma_wait_t)(caddr_t);
45
46 static dma_wait_t *
virtio_dma_wait_from_kmflags(int kmflags)47 virtio_dma_wait_from_kmflags(int kmflags)
48 {
49 switch (kmflags) {
50 case KM_SLEEP:
51 return (DDI_DMA_SLEEP);
52 case KM_NOSLEEP:
53 case KM_NOSLEEP_LAZY:
54 return (DDI_DMA_DONTWAIT);
55 default:
56 panic("unexpected kmflags value 0x%x", kmflags);
57 }
58 }
59
60 void
virtio_dma_sync(virtio_dma_t * vidma,int flag)61 virtio_dma_sync(virtio_dma_t *vidma, int flag)
62 {
63 VERIFY0(ddi_dma_sync(vidma->vidma_dma_handle, 0, 0, flag));
64 }
65
66 uint_t
virtio_dma_ncookies(virtio_dma_t * vidma)67 virtio_dma_ncookies(virtio_dma_t *vidma)
68 {
69 return (vidma->vidma_dma_ncookies);
70 }
71
72 size_t
virtio_dma_size(virtio_dma_t * vidma)73 virtio_dma_size(virtio_dma_t *vidma)
74 {
75 return (vidma->vidma_size);
76 }
77
78 void *
virtio_dma_va(virtio_dma_t * vidma,size_t offset)79 virtio_dma_va(virtio_dma_t *vidma, size_t offset)
80 {
81 VERIFY3U(offset, <, vidma->vidma_size);
82
83 return (vidma->vidma_va + offset);
84 }
85
86 uint64_t
virtio_dma_cookie_pa(virtio_dma_t * vidma,uint_t cookie)87 virtio_dma_cookie_pa(virtio_dma_t *vidma, uint_t cookie)
88 {
89 VERIFY3U(cookie, <, vidma->vidma_dma_ncookies);
90
91 return (vidma->vidma_dma_cookies[cookie].dmac_laddress);
92 }
93
94 size_t
virtio_dma_cookie_size(virtio_dma_t * vidma,uint_t cookie)95 virtio_dma_cookie_size(virtio_dma_t *vidma, uint_t cookie)
96 {
97 VERIFY3U(cookie, <, vidma->vidma_dma_ncookies);
98
99 return (vidma->vidma_dma_cookies[cookie].dmac_size);
100 }
101
102 int
virtio_dma_init_handle(virtio_t * vio,virtio_dma_t * vidma,const ddi_dma_attr_t * attr,int kmflags)103 virtio_dma_init_handle(virtio_t *vio, virtio_dma_t *vidma,
104 const ddi_dma_attr_t *attr, int kmflags)
105 {
106 int r;
107 dev_info_t *dip = vio->vio_dip;
108 int (*dma_wait)(caddr_t) = virtio_dma_wait_from_kmflags(kmflags);
109
110 vidma->vidma_virtio = vio;
111
112 /*
113 * Ensure we don't try to allocate a second time using the same
114 * tracking object.
115 */
116 VERIFY0(vidma->vidma_level);
117
118 if ((r = ddi_dma_alloc_handle(dip, (ddi_dma_attr_t *)attr, dma_wait,
119 NULL, &vidma->vidma_dma_handle)) != DDI_SUCCESS) {
120 dev_err(dip, CE_WARN, "DMA handle allocation failed (%x)", r);
121 goto fail;
122 }
123 vidma->vidma_level |= VIRTIO_DMALEVEL_HANDLE_ALLOC;
124
125 return (DDI_SUCCESS);
126
127 fail:
128 virtio_dma_fini(vidma);
129 return (DDI_FAILURE);
130 }
131
132 int
virtio_dma_init(virtio_t * vio,virtio_dma_t * vidma,size_t sz,const ddi_dma_attr_t * attr,int dmaflags,int kmflags)133 virtio_dma_init(virtio_t *vio, virtio_dma_t *vidma, size_t sz,
134 const ddi_dma_attr_t *attr, int dmaflags, int kmflags)
135 {
136 int r;
137 dev_info_t *dip = vio->vio_dip;
138 caddr_t va = NULL;
139 int (*dma_wait)(caddr_t) = virtio_dma_wait_from_kmflags(kmflags);
140
141 if (virtio_dma_init_handle(vio, vidma, attr, kmflags) !=
142 DDI_SUCCESS) {
143 goto fail;
144 }
145
146 if ((r = ddi_dma_mem_alloc(vidma->vidma_dma_handle, sz,
147 &virtio_acc_attr,
148 dmaflags & (DDI_DMA_STREAMING | DDI_DMA_CONSISTENT),
149 dma_wait, NULL, &va, &vidma->vidma_real_size,
150 &vidma->vidma_acc_handle)) != DDI_SUCCESS) {
151 dev_err(dip, CE_WARN, "DMA memory allocation failed (%x)", r);
152 goto fail;
153 }
154 vidma->vidma_level |= VIRTIO_DMALEVEL_MEMORY_ALLOC;
155
156 /*
157 * Zero the memory to avoid accidental exposure of arbitrary kernel
158 * memory.
159 */
160 bzero(va, vidma->vidma_real_size);
161
162 if (virtio_dma_bind(vidma, va, sz, dmaflags, kmflags) != DDI_SUCCESS) {
163 goto fail;
164 }
165
166 return (DDI_SUCCESS);
167
168 fail:
169 virtio_dma_fini(vidma);
170 return (DDI_FAILURE);
171 }
172
173 int
virtio_dma_bind(virtio_dma_t * vidma,void * va,size_t sz,int dmaflags,int kmflags)174 virtio_dma_bind(virtio_dma_t *vidma, void *va, size_t sz, int dmaflags,
175 int kmflags)
176 {
177 int r;
178 dev_info_t *dip = vidma->vidma_virtio->vio_dip;
179 ddi_dma_cookie_t dmac;
180 int (*dma_wait)(caddr_t) = virtio_dma_wait_from_kmflags(kmflags);
181
182 VERIFY(vidma->vidma_level & VIRTIO_DMALEVEL_HANDLE_ALLOC);
183 VERIFY(!(vidma->vidma_level & VIRTIO_DMALEVEL_HANDLE_BOUND));
184
185 vidma->vidma_va = va;
186 vidma->vidma_size = sz;
187
188 if ((r = ddi_dma_addr_bind_handle(vidma->vidma_dma_handle, NULL,
189 vidma->vidma_va, vidma->vidma_size, dmaflags, dma_wait, NULL,
190 &dmac, &vidma->vidma_dma_ncookies)) != DDI_DMA_MAPPED) {
191 VERIFY3S(r, !=, DDI_DMA_PARTIAL_MAP);
192 dev_err(dip, CE_WARN, "DMA handle bind failed (%x)", r);
193 goto fail;
194 }
195 vidma->vidma_level |= VIRTIO_DMALEVEL_HANDLE_BOUND;
196
197 if ((vidma->vidma_dma_cookies = kmem_alloc(
198 vidma->vidma_dma_ncookies * sizeof (ddi_dma_cookie_t),
199 kmflags)) == NULL) {
200 dev_err(dip, CE_WARN, "DMA cookie array allocation failure");
201 goto fail;
202 }
203 vidma->vidma_level |= VIRTIO_DMALEVEL_COOKIE_ARRAY;
204
205 vidma->vidma_dma_cookies[0] = dmac;
206 for (uint_t n = 1; n < vidma->vidma_dma_ncookies; n++) {
207 ddi_dma_nextcookie(vidma->vidma_dma_handle,
208 &vidma->vidma_dma_cookies[n]);
209 }
210
211 return (DDI_SUCCESS);
212
213 fail:
214 virtio_dma_unbind(vidma);
215 return (DDI_FAILURE);
216 }
217
218 virtio_dma_t *
virtio_dma_alloc(virtio_t * vio,size_t sz,const ddi_dma_attr_t * attr,int dmaflags,int kmflags)219 virtio_dma_alloc(virtio_t *vio, size_t sz, const ddi_dma_attr_t *attr,
220 int dmaflags, int kmflags)
221 {
222 virtio_dma_t *vidma;
223
224 if ((vidma = kmem_zalloc(sizeof (*vidma), kmflags)) == NULL) {
225 return (NULL);
226 }
227
228 if (virtio_dma_init(vio, vidma, sz, attr, dmaflags, kmflags) !=
229 DDI_SUCCESS) {
230 kmem_free(vidma, sizeof (*vidma));
231 return (NULL);
232 }
233
234 return (vidma);
235 }
236
237 virtio_dma_t *
virtio_dma_alloc_nomem(virtio_t * vio,const ddi_dma_attr_t * attr,int kmflags)238 virtio_dma_alloc_nomem(virtio_t *vio, const ddi_dma_attr_t *attr, int kmflags)
239 {
240 virtio_dma_t *vidma;
241
242 if ((vidma = kmem_zalloc(sizeof (*vidma), kmflags)) == NULL) {
243 return (NULL);
244 }
245
246 if (virtio_dma_init_handle(vio, vidma, attr, kmflags) != DDI_SUCCESS) {
247 kmem_free(vidma, sizeof (*vidma));
248 return (NULL);
249 }
250
251 return (vidma);
252 }
253
254 void
virtio_dma_fini(virtio_dma_t * vidma)255 virtio_dma_fini(virtio_dma_t *vidma)
256 {
257 virtio_dma_unbind(vidma);
258
259 if (vidma->vidma_level & VIRTIO_DMALEVEL_MEMORY_ALLOC) {
260 ddi_dma_mem_free(&vidma->vidma_acc_handle);
261
262 vidma->vidma_level &= ~VIRTIO_DMALEVEL_MEMORY_ALLOC;
263 }
264
265 if (vidma->vidma_level & VIRTIO_DMALEVEL_HANDLE_ALLOC) {
266 ddi_dma_free_handle(&vidma->vidma_dma_handle);
267
268 vidma->vidma_level &= ~VIRTIO_DMALEVEL_HANDLE_ALLOC;
269 }
270
271 VERIFY0(vidma->vidma_level);
272 bzero(vidma, sizeof (*vidma));
273 }
274
275 void
virtio_dma_unbind(virtio_dma_t * vidma)276 virtio_dma_unbind(virtio_dma_t *vidma)
277 {
278 if (vidma->vidma_level & VIRTIO_DMALEVEL_COOKIE_ARRAY) {
279 kmem_free(vidma->vidma_dma_cookies,
280 vidma->vidma_dma_ncookies * sizeof (ddi_dma_cookie_t));
281
282 vidma->vidma_level &= ~VIRTIO_DMALEVEL_COOKIE_ARRAY;
283 }
284
285 if (vidma->vidma_level & VIRTIO_DMALEVEL_HANDLE_BOUND) {
286 VERIFY3U(ddi_dma_unbind_handle(vidma->vidma_dma_handle), ==,
287 DDI_SUCCESS);
288
289 vidma->vidma_level &= ~VIRTIO_DMALEVEL_HANDLE_BOUND;
290 }
291
292 vidma->vidma_va = 0;
293 vidma->vidma_size = 0;
294 }
295
296 void
virtio_dma_free(virtio_dma_t * vidma)297 virtio_dma_free(virtio_dma_t *vidma)
298 {
299 virtio_dma_fini(vidma);
300 kmem_free(vidma, sizeof (*vidma));
301 }
302