xref: /illumos-gate/usr/src/uts/common/io/mlxcx/mlxcx_dma.c (revision aab20b47bd0a2879ccd534e4b5516c6af3f5a1d2)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2020, The University of Queensland
14  * Copyright (c) 2018, Joyent, Inc.
15  */
16 
17 /*
18  * DMA allocation and tear down routines.
19  */
20 
21 #include <mlxcx.h>
22 
23 void
24 mlxcx_dma_acc_attr(mlxcx_t *mlxp, ddi_device_acc_attr_t *accp)
25 {
26 	bzero(accp, sizeof (*accp));
27 	accp->devacc_attr_version = DDI_DEVICE_ATTR_V0;
28 	accp->devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
29 	accp->devacc_attr_dataorder = DDI_STRICTORDER_ACC;
30 
31 	if (DDI_FM_DMA_ERR_CAP(mlxp->mlx_fm_caps)) {
32 		accp->devacc_attr_access = DDI_FLAGERR_ACC;
33 	} else {
34 		accp->devacc_attr_access = DDI_DEFAULT_ACC;
35 	}
36 }
37 
38 void
39 mlxcx_dma_page_attr(mlxcx_t *mlxp, ddi_dma_attr_t *attrp)
40 {
41 	bzero(attrp, sizeof (*attrp));
42 	attrp->dma_attr_version = DMA_ATTR_V0;
43 
44 	/*
45 	 * This is a 64-bit PCIe device. We can use the entire address space.
46 	 */
47 	attrp->dma_attr_addr_lo = 0x0;
48 	attrp->dma_attr_addr_hi = UINT64_MAX;
49 
50 	/*
51 	 * The count max indicates the total amount that can fit into one
52 	 * cookie. Because we're creating a single page for tracking purposes,
53 	 * this can be a page in size. The alignment and segment are related to
54 	 * this same requirement. The alignment needs to be page aligned and the
55 	 * segment is the boundary that this can't cross, aka a 4k page.
56 	 */
57 	attrp->dma_attr_count_max = MLXCX_CMD_DMA_PAGE_SIZE - 1;
58 	attrp->dma_attr_align = MLXCX_CMD_DMA_PAGE_SIZE;
59 	attrp->dma_attr_seg = MLXCX_CMD_DMA_PAGE_SIZE - 1;
60 
61 	attrp->dma_attr_burstsizes = 0xfff;
62 
63 	/*
64 	 * The minimum and and maximum sizes that we can send. We cap this based
65 	 * on the use of this, which is a page size.
66 	 */
67 	attrp->dma_attr_minxfer = 0x1;
68 	attrp->dma_attr_maxxfer = MLXCX_CMD_DMA_PAGE_SIZE;
69 
70 	/*
71 	 * This is supposed to be used for static data structures, therefore we
72 	 * keep this just to a page.
73 	 */
74 	attrp->dma_attr_sgllen = 1;
75 
76 	/*
77 	 * The granularity describe the addressing graularity. That is, the
78 	 * hardware can ask for chunks in this units of bytes.
79 	 */
80 	attrp->dma_attr_granular = MLXCX_CMD_DMA_PAGE_SIZE;
81 
82 	if (DDI_FM_DMA_ERR_CAP(mlxp->mlx_fm_caps)) {
83 		attrp->dma_attr_flags = DDI_DMA_FLAGERR;
84 	} else {
85 		attrp->dma_attr_flags = 0;
86 	}
87 }
88 
89 /*
90  * DMA attributes for queue memory (EQ, CQ, WQ etc)
91  *
92  * These have to allocate in units of whole pages, but can be multiple
93  * pages and don't have to be physically contiguous.
94  */
95 void
96 mlxcx_dma_queue_attr(mlxcx_t *mlxp, ddi_dma_attr_t *attrp)
97 {
98 	bzero(attrp, sizeof (*attrp));
99 	attrp->dma_attr_version = DMA_ATTR_V0;
100 
101 	/*
102 	 * This is a 64-bit PCIe device. We can use the entire address space.
103 	 */
104 	attrp->dma_attr_addr_lo = 0x0;
105 	attrp->dma_attr_addr_hi = UINT64_MAX;
106 
107 	attrp->dma_attr_count_max = MLXCX_QUEUE_DMA_PAGE_SIZE - 1;
108 
109 	attrp->dma_attr_align = MLXCX_QUEUE_DMA_PAGE_SIZE;
110 
111 	attrp->dma_attr_burstsizes = 0xfff;
112 
113 	/*
114 	 * The minimum and and maximum sizes that we can send. We cap this based
115 	 * on the use of this, which is a page size.
116 	 */
117 	attrp->dma_attr_minxfer = MLXCX_QUEUE_DMA_PAGE_SIZE;
118 	attrp->dma_attr_maxxfer = UINT32_MAX;
119 
120 	attrp->dma_attr_seg = UINT64_MAX;
121 
122 	attrp->dma_attr_granular = MLXCX_QUEUE_DMA_PAGE_SIZE;
123 
124 	/* But we can have more than one. */
125 	attrp->dma_attr_sgllen = MLXCX_CREATE_QUEUE_MAX_PAGES;
126 
127 	if (DDI_FM_DMA_ERR_CAP(mlxp->mlx_fm_caps)) {
128 		attrp->dma_attr_flags = DDI_DMA_FLAGERR;
129 	} else {
130 		attrp->dma_attr_flags = 0;
131 	}
132 }
133 
134 /*
135  * DMA attributes for packet buffers
136  */
137 void
138 mlxcx_dma_buf_attr(mlxcx_t *mlxp, ddi_dma_attr_t *attrp)
139 {
140 	bzero(attrp, sizeof (*attrp));
141 	attrp->dma_attr_version = DMA_ATTR_V0;
142 
143 	/*
144 	 * This is a 64-bit PCIe device. We can use the entire address space.
145 	 */
146 	attrp->dma_attr_addr_lo = 0x0;
147 	attrp->dma_attr_addr_hi = UINT64_MAX;
148 
149 	/*
150 	 * Each scatter pointer has a 32-bit length field.
151 	 */
152 	attrp->dma_attr_count_max = UINT32_MAX;
153 
154 	/*
155 	 * The PRM gives us no alignment requirements for scatter pointers,
156 	 * but it implies that units < 16bytes are a bad idea.
157 	 */
158 	attrp->dma_attr_align = 16;
159 	attrp->dma_attr_granular = 1;
160 
161 	attrp->dma_attr_burstsizes = 0xfff;
162 
163 	attrp->dma_attr_minxfer = 1;
164 	attrp->dma_attr_maxxfer = UINT64_MAX;
165 
166 	attrp->dma_attr_seg = UINT64_MAX;
167 
168 	/*
169 	 * We choose how many scatter pointers we're allowed per packet when
170 	 * we set the recv queue stride. This macro is from mlxcx_reg.h where
171 	 * we fix that for all of our receive queues.
172 	 */
173 	attrp->dma_attr_sgllen = MLXCX_RECVQ_MAX_PTRS;
174 
175 	if (DDI_FM_DMA_ERR_CAP(mlxp->mlx_fm_caps)) {
176 		attrp->dma_attr_flags = DDI_DMA_FLAGERR;
177 	} else {
178 		attrp->dma_attr_flags = 0;
179 	}
180 }
181 
182 /*
183  * DMA attributes for queue doorbells
184  */
185 void
186 mlxcx_dma_qdbell_attr(mlxcx_t *mlxp, ddi_dma_attr_t *attrp)
187 {
188 	bzero(attrp, sizeof (*attrp));
189 	attrp->dma_attr_version = DMA_ATTR_V0;
190 
191 	/*
192 	 * This is a 64-bit PCIe device. We can use the entire address space.
193 	 */
194 	attrp->dma_attr_addr_lo = 0x0;
195 	attrp->dma_attr_addr_hi = UINT64_MAX;
196 
197 	/*
198 	 * Queue doorbells are always exactly 16 bytes in length, but
199 	 * the ddi_dma functions don't like such small values of count_max.
200 	 *
201 	 * We tell some lies here.
202 	 */
203 	attrp->dma_attr_count_max = MLXCX_QUEUE_DMA_PAGE_SIZE - 1;
204 	attrp->dma_attr_align = 8;
205 	attrp->dma_attr_burstsizes = 0x8;
206 	attrp->dma_attr_minxfer = 1;
207 	attrp->dma_attr_maxxfer = UINT16_MAX;
208 	attrp->dma_attr_seg = MLXCX_QUEUE_DMA_PAGE_SIZE - 1;
209 	attrp->dma_attr_granular = 1;
210 	attrp->dma_attr_sgllen = 1;
211 
212 	if (DDI_FM_DMA_ERR_CAP(mlxp->mlx_fm_caps)) {
213 		attrp->dma_attr_flags = DDI_DMA_FLAGERR;
214 	} else {
215 		attrp->dma_attr_flags = 0;
216 	}
217 }
218 
219 void
220 mlxcx_dma_free(mlxcx_dma_buffer_t *mxdb)
221 {
222 	int ret;
223 
224 	if (mxdb->mxdb_flags & MLXCX_DMABUF_BOUND) {
225 		VERIFY(mxdb->mxdb_dma_handle != NULL);
226 		ret = ddi_dma_unbind_handle(mxdb->mxdb_dma_handle);
227 		VERIFY3S(ret, ==, DDI_SUCCESS);
228 		mxdb->mxdb_flags &= ~MLXCX_DMABUF_BOUND;
229 		mxdb->mxdb_ncookies = 0;
230 	}
231 
232 	if (mxdb->mxdb_flags & MLXCX_DMABUF_MEM_ALLOC) {
233 		ddi_dma_mem_free(&mxdb->mxdb_acc_handle);
234 		mxdb->mxdb_acc_handle = NULL;
235 		mxdb->mxdb_va = NULL;
236 		mxdb->mxdb_len = 0;
237 		mxdb->mxdb_flags &= ~MLXCX_DMABUF_MEM_ALLOC;
238 	}
239 
240 	if (mxdb->mxdb_flags & MLXCX_DMABUF_FOREIGN) {
241 		/* The mblk will be freed separately */
242 		mxdb->mxdb_va = NULL;
243 		mxdb->mxdb_len = 0;
244 		mxdb->mxdb_flags &= ~MLXCX_DMABUF_FOREIGN;
245 	}
246 
247 	if (mxdb->mxdb_flags & MLXCX_DMABUF_HDL_ALLOC) {
248 		ddi_dma_free_handle(&mxdb->mxdb_dma_handle);
249 		mxdb->mxdb_dma_handle = NULL;
250 		mxdb->mxdb_flags &= ~MLXCX_DMABUF_HDL_ALLOC;
251 	}
252 
253 	ASSERT3U(mxdb->mxdb_flags, ==, 0);
254 	ASSERT3P(mxdb->mxdb_dma_handle, ==, NULL);
255 	ASSERT3P(mxdb->mxdb_va, ==, NULL);
256 	ASSERT3U(mxdb->mxdb_len, ==, 0);
257 	ASSERT3U(mxdb->mxdb_ncookies, ==, 0);
258 }
259 
260 void
261 mlxcx_dma_unbind(mlxcx_t *mlxp, mlxcx_dma_buffer_t *mxdb)
262 {
263 	int ret;
264 
265 	ASSERT(mxdb->mxdb_flags & MLXCX_DMABUF_HDL_ALLOC);
266 	ASSERT(mxdb->mxdb_flags & MLXCX_DMABUF_BOUND);
267 
268 	if (mxdb->mxdb_flags & MLXCX_DMABUF_FOREIGN) {
269 		/* The mblk will be freed separately */
270 		mxdb->mxdb_va = NULL;
271 		mxdb->mxdb_len = 0;
272 		mxdb->mxdb_flags &= ~MLXCX_DMABUF_FOREIGN;
273 	}
274 
275 	ret = ddi_dma_unbind_handle(mxdb->mxdb_dma_handle);
276 	VERIFY3S(ret, ==, DDI_SUCCESS);
277 	mxdb->mxdb_flags &= ~MLXCX_DMABUF_BOUND;
278 	mxdb->mxdb_ncookies = 0;
279 }
280 
281 boolean_t
282 mlxcx_dma_init(mlxcx_t *mlxp, mlxcx_dma_buffer_t *mxdb,
283     ddi_dma_attr_t *attrp, boolean_t wait)
284 {
285 	int ret;
286 	int (*memcb)(caddr_t);
287 
288 	if (wait == B_TRUE) {
289 		memcb = DDI_DMA_SLEEP;
290 	} else {
291 		memcb = DDI_DMA_DONTWAIT;
292 	}
293 
294 	ASSERT3S(mxdb->mxdb_flags, ==, 0);
295 
296 	ret = ddi_dma_alloc_handle(mlxp->mlx_dip, attrp, memcb, NULL,
297 	    &mxdb->mxdb_dma_handle);
298 	if (ret != 0) {
299 		mlxcx_warn(mlxp, "!failed to allocate DMA handle: %d", ret);
300 		mxdb->mxdb_dma_handle = NULL;
301 		return (B_FALSE);
302 	}
303 	mxdb->mxdb_flags |= MLXCX_DMABUF_HDL_ALLOC;
304 
305 	return (B_TRUE);
306 }
307 
308 boolean_t
309 mlxcx_dma_bind_mblk(mlxcx_t *mlxp, mlxcx_dma_buffer_t *mxdb,
310     const mblk_t *mp, size_t off, boolean_t wait)
311 {
312 	int ret;
313 	uint_t flags = DDI_DMA_STREAMING;
314 	int (*memcb)(caddr_t);
315 
316 	if (wait == B_TRUE) {
317 		memcb = DDI_DMA_SLEEP;
318 	} else {
319 		memcb = DDI_DMA_DONTWAIT;
320 	}
321 
322 	ASSERT(mxdb->mxdb_flags & MLXCX_DMABUF_HDL_ALLOC);
323 	ASSERT0(mxdb->mxdb_flags &
324 	    (MLXCX_DMABUF_FOREIGN | MLXCX_DMABUF_MEM_ALLOC));
325 	ASSERT0(mxdb->mxdb_flags & MLXCX_DMABUF_BOUND);
326 
327 	ASSERT3U(off, <=, MBLKL(mp));
328 	mxdb->mxdb_va = (caddr_t)(mp->b_rptr + off);
329 	mxdb->mxdb_len = MBLKL(mp) - off;
330 	mxdb->mxdb_flags |= MLXCX_DMABUF_FOREIGN;
331 
332 	ret = ddi_dma_addr_bind_handle(mxdb->mxdb_dma_handle, NULL,
333 	    mxdb->mxdb_va, mxdb->mxdb_len, DDI_DMA_WRITE | flags, memcb, NULL,
334 	    NULL, NULL);
335 	if (ret != DDI_DMA_MAPPED) {
336 		mxdb->mxdb_va = NULL;
337 		mxdb->mxdb_len = 0;
338 		mxdb->mxdb_flags &= ~MLXCX_DMABUF_FOREIGN;
339 		return (B_FALSE);
340 	}
341 	mxdb->mxdb_flags |= MLXCX_DMABUF_BOUND;
342 	mxdb->mxdb_ncookies = ddi_dma_ncookies(mxdb->mxdb_dma_handle);
343 
344 	return (B_TRUE);
345 }
346 
347 boolean_t
348 mlxcx_dma_alloc(mlxcx_t *mlxp, mlxcx_dma_buffer_t *mxdb,
349     ddi_dma_attr_t *attrp, ddi_device_acc_attr_t *accp, boolean_t zero,
350     size_t size, boolean_t wait)
351 {
352 	int ret;
353 	uint_t flags = DDI_DMA_CONSISTENT;
354 	size_t len;
355 	int (*memcb)(caddr_t);
356 
357 	if (wait == B_TRUE) {
358 		memcb = DDI_DMA_SLEEP;
359 	} else {
360 		memcb = DDI_DMA_DONTWAIT;
361 	}
362 
363 	ASSERT3U(mxdb->mxdb_flags, ==, 0);
364 
365 	ret = ddi_dma_alloc_handle(mlxp->mlx_dip, attrp, memcb, NULL,
366 	    &mxdb->mxdb_dma_handle);
367 	if (ret != 0) {
368 		mlxcx_warn(mlxp, "!failed to allocate DMA handle: %d", ret);
369 		mxdb->mxdb_dma_handle = NULL;
370 		return (B_FALSE);
371 	}
372 	mxdb->mxdb_flags |= MLXCX_DMABUF_HDL_ALLOC;
373 
374 	ret = ddi_dma_mem_alloc(mxdb->mxdb_dma_handle, size, accp, flags, memcb,
375 	    NULL, &mxdb->mxdb_va, &len, &mxdb->mxdb_acc_handle);
376 	if (ret != DDI_SUCCESS) {
377 		mlxcx_warn(mlxp, "!failed to allocate DMA memory: %d", ret);
378 		mxdb->mxdb_va = NULL;
379 		mxdb->mxdb_acc_handle = NULL;
380 		mlxcx_dma_free(mxdb);
381 		return (B_FALSE);
382 	}
383 	mxdb->mxdb_len = size;
384 	mxdb->mxdb_flags |= MLXCX_DMABUF_MEM_ALLOC;
385 
386 	if (zero == B_TRUE)
387 		bzero(mxdb->mxdb_va, len);
388 
389 	ret = ddi_dma_addr_bind_handle(mxdb->mxdb_dma_handle, NULL,
390 	    mxdb->mxdb_va, len, DDI_DMA_RDWR | flags, memcb, NULL, NULL,
391 	    NULL);
392 	if (ret != 0) {
393 		mlxcx_warn(mlxp, "!failed to bind DMA memory: %d", ret);
394 		mlxcx_dma_free(mxdb);
395 		return (B_FALSE);
396 	}
397 	mxdb->mxdb_flags |= MLXCX_DMABUF_BOUND;
398 	mxdb->mxdb_ncookies = ddi_dma_ncookies(mxdb->mxdb_dma_handle);
399 
400 	return (B_TRUE);
401 }
402 
403 boolean_t
404 mlxcx_dma_alloc_offset(mlxcx_t *mlxp, mlxcx_dma_buffer_t *mxdb,
405     ddi_dma_attr_t *attrp, ddi_device_acc_attr_t *accp, boolean_t zero,
406     size_t size, size_t offset, boolean_t wait)
407 {
408 	int ret;
409 	uint_t flags = DDI_DMA_STREAMING;
410 	size_t len;
411 	int (*memcb)(caddr_t);
412 
413 	if (wait == B_TRUE) {
414 		memcb = DDI_DMA_SLEEP;
415 	} else {
416 		memcb = DDI_DMA_DONTWAIT;
417 	}
418 
419 	ASSERT3U(mxdb->mxdb_flags, ==, 0);
420 
421 	ret = ddi_dma_alloc_handle(mlxp->mlx_dip, attrp, memcb, NULL,
422 	    &mxdb->mxdb_dma_handle);
423 	if (ret != 0) {
424 		mlxcx_warn(mlxp, "!failed to allocate DMA handle: %d", ret);
425 		mxdb->mxdb_dma_handle = NULL;
426 		return (B_FALSE);
427 	}
428 	mxdb->mxdb_flags |= MLXCX_DMABUF_HDL_ALLOC;
429 
430 	ret = ddi_dma_mem_alloc(mxdb->mxdb_dma_handle, size + offset, accp,
431 	    flags, memcb, NULL, &mxdb->mxdb_va, &len, &mxdb->mxdb_acc_handle);
432 	if (ret != DDI_SUCCESS) {
433 		mlxcx_warn(mlxp, "!failed to allocate DMA memory: %d", ret);
434 		mxdb->mxdb_va = NULL;
435 		mxdb->mxdb_acc_handle = NULL;
436 		mlxcx_dma_free(mxdb);
437 		return (B_FALSE);
438 	}
439 
440 	if (zero == B_TRUE)
441 		bzero(mxdb->mxdb_va, len);
442 
443 	mxdb->mxdb_va += offset;
444 	len -= offset;
445 	mxdb->mxdb_len = len;
446 	mxdb->mxdb_flags |= MLXCX_DMABUF_MEM_ALLOC;
447 
448 	ret = ddi_dma_addr_bind_handle(mxdb->mxdb_dma_handle, NULL,
449 	    mxdb->mxdb_va, len, DDI_DMA_RDWR | flags, memcb, NULL, NULL,
450 	    NULL);
451 	if (ret != 0) {
452 		mlxcx_warn(mlxp, "!failed to bind DMA memory: %d", ret);
453 		mlxcx_dma_free(mxdb);
454 		return (B_FALSE);
455 	}
456 	mxdb->mxdb_flags |= MLXCX_DMABUF_BOUND;
457 	mxdb->mxdb_ncookies = ddi_dma_ncookies(mxdb->mxdb_dma_handle);
458 
459 	return (B_TRUE);
460 }
461