1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * sun4v LDC Link Layer Shared Memory Routines
28 */
29 #include <sys/types.h>
30 #include <sys/kmem.h>
31 #include <sys/cmn_err.h>
32 #include <sys/ksynch.h>
33 #include <sys/debug.h>
34 #include <sys/cyclic.h>
35 #include <sys/machsystm.h>
36 #include <sys/vm.h>
37 #include <sys/machcpuvar.h>
38 #include <sys/mmu.h>
39 #include <sys/pte.h>
40 #include <vm/hat.h>
41 #include <vm/as.h>
42 #include <vm/hat_sfmmu.h>
43 #include <sys/vm_machparam.h>
44 #include <vm/seg_kmem.h>
45 #include <vm/seg_kpm.h>
46 #include <sys/hypervisor_api.h>
47 #include <sys/ldc.h>
48 #include <sys/ldc_impl.h>
49
50 /* LDC variables used by shared memory routines */
51 extern ldc_soft_state_t *ldcssp;
52 extern int ldc_max_retries;
53 extern clock_t ldc_delay;
54
55 #ifdef DEBUG
56 extern int ldcdbg;
57 #endif
58
59 /* LDC internal functions used by shared memory routines */
60 extern void i_ldc_reset(ldc_chan_t *ldcp, boolean_t force_reset);
61 extern int i_ldc_h2v_error(int h_error);
62
63 #ifdef DEBUG
64 extern void ldcdebug(int64_t id, const char *fmt, ...);
65 #endif
66
67 /* Memory synchronization internal functions */
68 static int i_ldc_mem_acquire_release(ldc_mem_handle_t mhandle,
69 uint8_t direction, uint64_t offset, size_t size);
70 static int i_ldc_dring_acquire_release(ldc_dring_handle_t dhandle,
71 uint8_t direction, uint64_t start, uint64_t end);
72 static int i_ldc_mem_map(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie,
73 uint32_t ccount, uint8_t mtype, uint8_t perm, caddr_t *vaddr,
74 caddr_t *raddr);
75 static int i_ldc_mem_bind_handle(ldc_mem_handle_t mhandle, caddr_t vaddr,
76 size_t len, uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie,
77 uint32_t *ccount);
78
79 /*
80 * LDC framework supports mapping remote domain's memory
81 * either directly or via shadow memory pages. Default
82 * support is currently implemented via shadow copy.
83 * Direct map can be enabled by setting 'ldc_shmem_enabled'
84 */
85 int ldc_shmem_enabled = 1;
86
87 /*
88 * Use of directly mapped shared memory for LDC descriptor
89 * rings is permitted if this variable is non-zero.
90 */
91 int ldc_dring_shmem_enabled = 1;
92
93 /*
94 * The major and minor versions required to use directly
95 * mapped shared memory for LDC descriptor rings. The
96 * ldc_dring_shmem_hv_force variable, if set to a non-zero
97 * value, overrides the hypervisor API version check.
98 */
99 static int ldc_dring_shmem_hv_major = 1;
100 static int ldc_dring_shmem_hv_minor = 1;
101 static int ldc_dring_shmem_hv_force = 0;
102
103 /*
104 * The results of the hypervisor service group API check.
105 * A non-zero value indicates the HV includes support for
106 * descriptor ring shared memory.
107 */
108 static int ldc_dring_shmem_hv_ok = 0;
109
110 /*
111 * Pages exported for remote access over each channel is
112 * maintained in a table registered with the Hypervisor.
113 * The default number of entries in the table is set to
114 * 'ldc_mtbl_entries'.
115 */
116 uint64_t ldc_maptable_entries = LDC_MTBL_ENTRIES;
117
118 #define IDX2COOKIE(idx, pg_szc, pg_shift) \
119 (((pg_szc) << LDC_COOKIE_PGSZC_SHIFT) | ((idx) << (pg_shift)))
120
121 /*
122 * Pages imported over each channel are maintained in a global (per-guest)
123 * mapin table. Starting with HV LDC API version 1.2, HV supports APIs to
124 * obtain information about the total size of the memory that can be direct
125 * mapped through this mapin table. The minimum size of the mapin area that we
126 * expect is defined below.
127 */
128 #define GIGABYTE ((uint64_t)(1 << 30))
129 uint64_t ldc_mapin_size_min = GIGABYTE;
130
131 /* HV LDC API version that supports mapin size info */
132 #define LDC_MAPIN_VER_MAJOR 1
133 #define LDC_MAPIN_VER_MINOR 2
134
135 /*
136 * Sets ldc_dring_shmem_hv_ok to a non-zero value if the HV LDC
137 * API version supports directly mapped shared memory or if it has
138 * been explicitly enabled via ldc_dring_shmem_hv_force.
139 */
140 void
i_ldc_mem_set_hsvc_vers(uint64_t major,uint64_t minor)141 i_ldc_mem_set_hsvc_vers(uint64_t major, uint64_t minor)
142 {
143 if ((major == ldc_dring_shmem_hv_major &&
144 minor >= ldc_dring_shmem_hv_minor) ||
145 (major > ldc_dring_shmem_hv_major) ||
146 (ldc_dring_shmem_hv_force != 0)) {
147 ldc_dring_shmem_hv_ok = 1;
148 }
149 }
150
151 /*
152 * initialize mapin table.
153 */
154 void
i_ldc_init_mapin(ldc_soft_state_t * ldcssp,uint64_t major,uint64_t minor)155 i_ldc_init_mapin(ldc_soft_state_t *ldcssp, uint64_t major, uint64_t minor)
156 {
157 int rv;
158 uint64_t sz;
159 uint64_t table_type = LDC_MAPIN_TYPE_REGULAR;
160
161 /* set mapin size to default. */
162 ldcssp->mapin_size = LDC_DIRECT_MAP_SIZE_DEFAULT;
163
164 /* Check if the HV supports mapin size API. */
165 if ((major == LDC_MAPIN_VER_MAJOR &&
166 minor < LDC_MAPIN_VER_MINOR) ||
167 (major < LDC_MAPIN_VER_MAJOR)) {
168 /* Older version of HV. */
169 return;
170 }
171
172 /* Get info about the mapin size supported by HV */
173 rv = hv_ldc_mapin_size_max(table_type, &sz);
174 if (rv != 0) {
175 cmn_err(CE_NOTE, "Failed to get mapin information\n");
176 return;
177 }
178
179 /* Save the table size */
180 ldcssp->mapin_size = sz;
181
182 D1(DBG_ALL_LDCS, "%s: mapin_size read from HV is (0x%llx)\n",
183 __func__, sz);
184 }
185
186 /*
187 * Allocate a memory handle for the channel and link it into the list
188 * Also choose which memory table to use if this is the first handle
189 * being assigned to this channel
190 */
191 int
ldc_mem_alloc_handle(ldc_handle_t handle,ldc_mem_handle_t * mhandle)192 ldc_mem_alloc_handle(ldc_handle_t handle, ldc_mem_handle_t *mhandle)
193 {
194 ldc_chan_t *ldcp;
195 ldc_mhdl_t *mhdl;
196
197 if (handle == NULL) {
198 DWARN(DBG_ALL_LDCS,
199 "ldc_mem_alloc_handle: invalid channel handle\n");
200 return (EINVAL);
201 }
202 ldcp = (ldc_chan_t *)handle;
203
204 mutex_enter(&ldcp->lock);
205
206 /* check to see if channel is initalized */
207 if ((ldcp->tstate & ~TS_IN_RESET) < TS_INIT) {
208 DWARN(ldcp->id,
209 "ldc_mem_alloc_handle: (0x%llx) channel not initialized\n",
210 ldcp->id);
211 mutex_exit(&ldcp->lock);
212 return (EINVAL);
213 }
214
215 /* allocate handle for channel */
216 mhdl = kmem_cache_alloc(ldcssp->memhdl_cache, KM_SLEEP);
217
218 /* initialize the lock */
219 mutex_init(&mhdl->lock, NULL, MUTEX_DRIVER, NULL);
220
221 mhdl->myshadow = B_FALSE;
222 mhdl->memseg = NULL;
223 mhdl->ldcp = ldcp;
224 mhdl->status = LDC_UNBOUND;
225
226 /* insert memory handle (@ head) into list */
227 if (ldcp->mhdl_list == NULL) {
228 ldcp->mhdl_list = mhdl;
229 mhdl->next = NULL;
230 } else {
231 /* insert @ head */
232 mhdl->next = ldcp->mhdl_list;
233 ldcp->mhdl_list = mhdl;
234 }
235
236 /* return the handle */
237 *mhandle = (ldc_mem_handle_t)mhdl;
238
239 mutex_exit(&ldcp->lock);
240
241 D1(ldcp->id, "ldc_mem_alloc_handle: (0x%llx) allocated handle 0x%llx\n",
242 ldcp->id, mhdl);
243
244 return (0);
245 }
246
247 /*
248 * Free memory handle for the channel and unlink it from the list
249 */
250 int
ldc_mem_free_handle(ldc_mem_handle_t mhandle)251 ldc_mem_free_handle(ldc_mem_handle_t mhandle)
252 {
253 ldc_mhdl_t *mhdl, *phdl;
254 ldc_chan_t *ldcp;
255
256 if (mhandle == NULL) {
257 DWARN(DBG_ALL_LDCS,
258 "ldc_mem_free_handle: invalid memory handle\n");
259 return (EINVAL);
260 }
261 mhdl = (ldc_mhdl_t *)mhandle;
262
263 mutex_enter(&mhdl->lock);
264
265 ldcp = mhdl->ldcp;
266
267 if (mhdl->status == LDC_BOUND || mhdl->status == LDC_MAPPED) {
268 DWARN(ldcp->id,
269 "ldc_mem_free_handle: cannot free, 0x%llx hdl bound\n",
270 mhdl);
271 mutex_exit(&mhdl->lock);
272 return (EINVAL);
273 }
274 mutex_exit(&mhdl->lock);
275
276 mutex_enter(&ldcp->mlist_lock);
277
278 phdl = ldcp->mhdl_list;
279
280 /* first handle */
281 if (phdl == mhdl) {
282 ldcp->mhdl_list = mhdl->next;
283 mutex_destroy(&mhdl->lock);
284 kmem_cache_free(ldcssp->memhdl_cache, mhdl);
285
286 D1(ldcp->id,
287 "ldc_mem_free_handle: (0x%llx) freed handle 0x%llx\n",
288 ldcp->id, mhdl);
289 } else {
290 /* walk the list - unlink and free */
291 while (phdl != NULL) {
292 if (phdl->next == mhdl) {
293 phdl->next = mhdl->next;
294 mutex_destroy(&mhdl->lock);
295 kmem_cache_free(ldcssp->memhdl_cache, mhdl);
296 D1(ldcp->id,
297 "ldc_mem_free_handle: (0x%llx) freed "
298 "handle 0x%llx\n", ldcp->id, mhdl);
299 break;
300 }
301 phdl = phdl->next;
302 }
303 }
304
305 if (phdl == NULL) {
306 DWARN(ldcp->id,
307 "ldc_mem_free_handle: invalid handle 0x%llx\n", mhdl);
308 mutex_exit(&ldcp->mlist_lock);
309 return (EINVAL);
310 }
311
312 mutex_exit(&ldcp->mlist_lock);
313
314 return (0);
315 }
316
317 /*
318 * Bind a memory handle to a virtual address.
319 * The virtual address is converted to the corresponding real addresses.
320 * Returns pointer to the first ldc_mem_cookie and the total number
321 * of cookies for this virtual address. Other cookies can be obtained
322 * using the ldc_mem_nextcookie() call. If the pages are stored in
323 * consecutive locations in the table, a single cookie corresponding to
324 * the first location is returned. The cookie size spans all the entries.
325 *
326 * If the VA corresponds to a page that is already being exported, reuse
327 * the page and do not export it again. Bump the page's use count.
328 */
329 int
ldc_mem_bind_handle(ldc_mem_handle_t mhandle,caddr_t vaddr,size_t len,uint8_t mtype,uint8_t perm,ldc_mem_cookie_t * cookie,uint32_t * ccount)330 ldc_mem_bind_handle(ldc_mem_handle_t mhandle, caddr_t vaddr, size_t len,
331 uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie, uint32_t *ccount)
332 {
333 /*
334 * Check if direct shared memory map is enabled, if not change
335 * the mapping type to SHADOW_MAP.
336 */
337 if (ldc_shmem_enabled == 0)
338 mtype = LDC_SHADOW_MAP;
339
340 return (i_ldc_mem_bind_handle(mhandle, vaddr, len, mtype, perm,
341 cookie, ccount));
342 }
343
344 static int
i_ldc_mem_bind_handle(ldc_mem_handle_t mhandle,caddr_t vaddr,size_t len,uint8_t mtype,uint8_t perm,ldc_mem_cookie_t * cookie,uint32_t * ccount)345 i_ldc_mem_bind_handle(ldc_mem_handle_t mhandle, caddr_t vaddr, size_t len,
346 uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie, uint32_t *ccount)
347 {
348 ldc_mhdl_t *mhdl;
349 ldc_chan_t *ldcp;
350 ldc_mtbl_t *mtbl;
351 ldc_memseg_t *memseg;
352 ldc_mte_t tmp_mte;
353 uint64_t index, prev_index = 0;
354 int64_t cookie_idx;
355 uintptr_t raddr, ra_aligned;
356 uint64_t psize, poffset, v_offset;
357 uint64_t pg_shift, pg_size, pg_size_code, pg_mask;
358 pgcnt_t npages;
359 caddr_t v_align, addr;
360 int i, rv;
361
362 if (mhandle == NULL) {
363 DWARN(DBG_ALL_LDCS,
364 "ldc_mem_bind_handle: invalid memory handle\n");
365 return (EINVAL);
366 }
367 mhdl = (ldc_mhdl_t *)mhandle;
368 ldcp = mhdl->ldcp;
369
370 /* clear count */
371 *ccount = 0;
372
373 mutex_enter(&mhdl->lock);
374
375 if (mhdl->status == LDC_BOUND || mhdl->memseg != NULL) {
376 DWARN(ldcp->id,
377 "ldc_mem_bind_handle: (0x%x) handle already bound\n",
378 mhandle);
379 mutex_exit(&mhdl->lock);
380 return (EINVAL);
381 }
382
383 /* Force address and size to be 8-byte aligned */
384 if ((((uintptr_t)vaddr | len) & 0x7) != 0) {
385 DWARN(ldcp->id,
386 "ldc_mem_bind_handle: addr/size is not 8-byte aligned\n");
387 mutex_exit(&mhdl->lock);
388 return (EINVAL);
389 }
390
391 mutex_enter(&ldcp->lock);
392
393 /*
394 * If this channel is binding a memory handle for the
395 * first time allocate it a memory map table and initialize it
396 */
397 if ((mtbl = ldcp->mtbl) == NULL) {
398
399 /* Allocate and initialize the map table structure */
400 mtbl = kmem_zalloc(sizeof (ldc_mtbl_t), KM_SLEEP);
401 mtbl->num_entries = mtbl->num_avail = ldc_maptable_entries;
402 mtbl->size = ldc_maptable_entries * sizeof (ldc_mte_slot_t);
403 mtbl->next_entry = NULL;
404 mtbl->contigmem = B_TRUE;
405
406 /* Allocate the table itself */
407 mtbl->table = (ldc_mte_slot_t *)
408 contig_mem_alloc_align(mtbl->size, MMU_PAGESIZE);
409 if (mtbl->table == NULL) {
410
411 /* allocate a page of memory using kmem_alloc */
412 mtbl->table = kmem_alloc(MMU_PAGESIZE, KM_SLEEP);
413 mtbl->size = MMU_PAGESIZE;
414 mtbl->contigmem = B_FALSE;
415 mtbl->num_entries = mtbl->num_avail =
416 mtbl->size / sizeof (ldc_mte_slot_t);
417 DWARN(ldcp->id,
418 "ldc_mem_bind_handle: (0x%llx) reduced tbl size "
419 "to %lx entries\n", ldcp->id, mtbl->num_entries);
420 }
421
422 /* zero out the memory */
423 bzero(mtbl->table, mtbl->size);
424
425 /* initialize the lock */
426 mutex_init(&mtbl->lock, NULL, MUTEX_DRIVER, NULL);
427
428 /* register table for this channel */
429 rv = hv_ldc_set_map_table(ldcp->id,
430 va_to_pa(mtbl->table), mtbl->num_entries);
431 if (rv != 0) {
432 DWARN(DBG_ALL_LDCS,
433 "ldc_mem_bind_handle: (0x%lx) err %d mapping tbl",
434 ldcp->id, rv);
435 if (mtbl->contigmem)
436 contig_mem_free(mtbl->table, mtbl->size);
437 else
438 kmem_free(mtbl->table, mtbl->size);
439 mutex_destroy(&mtbl->lock);
440 kmem_free(mtbl, sizeof (ldc_mtbl_t));
441 mutex_exit(&ldcp->lock);
442 mutex_exit(&mhdl->lock);
443 return (EIO);
444 }
445
446 ldcp->mtbl = mtbl;
447
448 D1(ldcp->id,
449 "ldc_mem_bind_handle: (0x%llx) alloc'd map table 0x%llx\n",
450 ldcp->id, ldcp->mtbl->table);
451 }
452
453 mutex_exit(&ldcp->lock);
454
455 /* FUTURE: get the page size, pgsz code, and shift */
456 pg_size = MMU_PAGESIZE;
457 pg_size_code = page_szc(pg_size);
458 pg_shift = page_get_shift(pg_size_code);
459 pg_mask = ~(pg_size - 1);
460
461 D1(ldcp->id, "ldc_mem_bind_handle: (0x%llx) binding "
462 "va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
463 ldcp->id, vaddr, pg_size, pg_size_code, pg_shift);
464
465 /* aligned VA and its offset */
466 v_align = (caddr_t)(((uintptr_t)vaddr) & ~(pg_size - 1));
467 v_offset = ((uintptr_t)vaddr) & (pg_size - 1);
468
469 npages = (len+v_offset)/pg_size;
470 npages = ((len+v_offset)%pg_size == 0) ? npages : npages+1;
471
472 D1(ldcp->id, "ldc_mem_bind_handle: binding "
473 "(0x%llx) v=0x%llx,val=0x%llx,off=0x%x,pgs=0x%x\n",
474 ldcp->id, vaddr, v_align, v_offset, npages);
475
476 /* lock the memory table - exclusive access to channel */
477 mutex_enter(&mtbl->lock);
478
479 if (npages > mtbl->num_avail) {
480 D1(ldcp->id, "ldc_mem_bind_handle: (0x%llx) no table entries\n",
481 ldcp->id);
482 mutex_exit(&mtbl->lock);
483 mutex_exit(&mhdl->lock);
484 return (ENOMEM);
485 }
486
487 /* Allocate a memseg structure */
488 memseg = mhdl->memseg =
489 kmem_cache_alloc(ldcssp->memseg_cache, KM_SLEEP);
490
491 /* Allocate memory to store all pages and cookies */
492 memseg->pages = kmem_zalloc((sizeof (ldc_page_t) * npages), KM_SLEEP);
493 memseg->cookies =
494 kmem_zalloc((sizeof (ldc_mem_cookie_t) * npages), KM_SLEEP);
495
496 D2(ldcp->id, "ldc_mem_bind_handle: (0x%llx) processing 0x%llx pages\n",
497 ldcp->id, npages);
498
499 addr = v_align;
500
501 /*
502 * Table slots are used in a round-robin manner. The algorithm permits
503 * inserting duplicate entries. Slots allocated earlier will typically
504 * get freed before we get back to reusing the slot.Inserting duplicate
505 * entries should be OK as we only lookup entries using the cookie addr
506 * i.e. tbl index, during export, unexport and copy operation.
507 *
508 * One implementation what was tried was to search for a duplicate
509 * page entry first and reuse it. The search overhead is very high and
510 * in the vnet case dropped the perf by almost half, 50 to 24 mbps.
511 * So it does make sense to avoid searching for duplicates.
512 *
513 * But during the process of searching for a free slot, if we find a
514 * duplicate entry we will go ahead and use it, and bump its use count.
515 */
516
517 /* index to start searching from */
518 index = mtbl->next_entry;
519 cookie_idx = -1;
520
521 tmp_mte.ll = 0; /* initialise fields to 0 */
522
523 if (mtype & LDC_DIRECT_MAP) {
524 tmp_mte.mte_r = (perm & LDC_MEM_R) ? 1 : 0;
525 tmp_mte.mte_w = (perm & LDC_MEM_W) ? 1 : 0;
526 tmp_mte.mte_x = (perm & LDC_MEM_X) ? 1 : 0;
527 }
528
529 if (mtype & LDC_SHADOW_MAP) {
530 tmp_mte.mte_cr = (perm & LDC_MEM_R) ? 1 : 0;
531 tmp_mte.mte_cw = (perm & LDC_MEM_W) ? 1 : 0;
532 }
533
534 if (mtype & LDC_IO_MAP) {
535 tmp_mte.mte_ir = (perm & LDC_MEM_R) ? 1 : 0;
536 tmp_mte.mte_iw = (perm & LDC_MEM_W) ? 1 : 0;
537 }
538
539 D1(ldcp->id, "ldc_mem_bind_handle mte=0x%llx\n", tmp_mte.ll);
540
541 tmp_mte.mte_pgszc = pg_size_code;
542
543 /* initialize each mem table entry */
544 for (i = 0; i < npages; i++) {
545
546 /* check if slot is available in the table */
547 while (mtbl->table[index].entry.ll != 0) {
548
549 index = (index + 1) % mtbl->num_entries;
550
551 if (index == mtbl->next_entry) {
552 /* we have looped around */
553 DWARN(DBG_ALL_LDCS,
554 "ldc_mem_bind_handle: (0x%llx) cannot find "
555 "entry\n", ldcp->id);
556 *ccount = 0;
557
558 /* NOTE: free memory, remove previous entries */
559 /* this shouldnt happen as num_avail was ok */
560
561 mutex_exit(&mtbl->lock);
562 mutex_exit(&mhdl->lock);
563 return (ENOMEM);
564 }
565 }
566
567 /* get the real address */
568 raddr = va_to_pa((void *)addr);
569 ra_aligned = ((uintptr_t)raddr & pg_mask);
570
571 /* build the mte */
572 tmp_mte.mte_rpfn = ra_aligned >> pg_shift;
573
574 D1(ldcp->id, "ldc_mem_bind_handle mte=0x%llx\n", tmp_mte.ll);
575
576 /* update entry in table */
577 mtbl->table[index].entry = tmp_mte;
578
579 D2(ldcp->id, "ldc_mem_bind_handle: (0x%llx) stored MTE 0x%llx"
580 " into loc 0x%llx\n", ldcp->id, tmp_mte.ll, index);
581
582 /* calculate the size and offset for this export range */
583 if (i == 0) {
584 /* first page */
585 psize = min((pg_size - v_offset), len);
586 poffset = v_offset;
587
588 } else if (i == (npages - 1)) {
589 /* last page */
590 psize = (((uintptr_t)(vaddr + len)) &
591 ((uint64_t)(pg_size-1)));
592 if (psize == 0)
593 psize = pg_size;
594 poffset = 0;
595
596 } else {
597 /* middle pages */
598 psize = pg_size;
599 poffset = 0;
600 }
601
602 /* store entry for this page */
603 memseg->pages[i].index = index;
604 memseg->pages[i].raddr = raddr;
605 memseg->pages[i].mte = &(mtbl->table[index]);
606
607 /* create the cookie */
608 if (i == 0 || (index != prev_index + 1)) {
609 cookie_idx++;
610 memseg->cookies[cookie_idx].addr =
611 IDX2COOKIE(index, pg_size_code, pg_shift);
612 memseg->cookies[cookie_idx].addr |= poffset;
613 memseg->cookies[cookie_idx].size = psize;
614
615 } else {
616 memseg->cookies[cookie_idx].size += psize;
617 }
618
619 D1(ldcp->id, "ldc_mem_bind_handle: bound "
620 "(0x%llx) va=0x%llx, idx=0x%llx, "
621 "ra=0x%llx(sz=0x%x,off=0x%x)\n",
622 ldcp->id, addr, index, raddr, psize, poffset);
623
624 /* decrement number of available entries */
625 mtbl->num_avail--;
626
627 /* increment va by page size */
628 addr += pg_size;
629
630 /* increment index */
631 prev_index = index;
632 index = (index + 1) % mtbl->num_entries;
633
634 /* save the next slot */
635 mtbl->next_entry = index;
636 }
637
638 mutex_exit(&mtbl->lock);
639
640 /* memory handle = bound */
641 mhdl->mtype = mtype;
642 mhdl->perm = perm;
643 mhdl->status = LDC_BOUND;
644
645 /* update memseg_t */
646 memseg->vaddr = vaddr;
647 memseg->raddr = memseg->pages[0].raddr;
648 memseg->size = len;
649 memseg->npages = npages;
650 memseg->ncookies = cookie_idx + 1;
651 memseg->next_cookie = (memseg->ncookies > 1) ? 1 : 0;
652
653 /* return count and first cookie */
654 *ccount = memseg->ncookies;
655 cookie->addr = memseg->cookies[0].addr;
656 cookie->size = memseg->cookies[0].size;
657
658 D1(ldcp->id,
659 "ldc_mem_bind_handle: (0x%llx) bound 0x%llx, va=0x%llx, "
660 "pgs=0x%llx cookies=0x%llx\n",
661 ldcp->id, mhdl, vaddr, npages, memseg->ncookies);
662
663 mutex_exit(&mhdl->lock);
664 return (0);
665 }
666
667 /*
668 * Return the next cookie associated with the specified memory handle
669 */
670 int
ldc_mem_nextcookie(ldc_mem_handle_t mhandle,ldc_mem_cookie_t * cookie)671 ldc_mem_nextcookie(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie)
672 {
673 ldc_mhdl_t *mhdl;
674 ldc_chan_t *ldcp;
675 ldc_memseg_t *memseg;
676
677 if (mhandle == NULL) {
678 DWARN(DBG_ALL_LDCS,
679 "ldc_mem_nextcookie: invalid memory handle\n");
680 return (EINVAL);
681 }
682 mhdl = (ldc_mhdl_t *)mhandle;
683
684 mutex_enter(&mhdl->lock);
685
686 ldcp = mhdl->ldcp;
687 memseg = mhdl->memseg;
688
689 if (cookie == 0) {
690 DWARN(ldcp->id,
691 "ldc_mem_nextcookie:(0x%llx) invalid cookie arg\n",
692 ldcp->id);
693 mutex_exit(&mhdl->lock);
694 return (EINVAL);
695 }
696
697 if (memseg->next_cookie != 0) {
698 cookie->addr = memseg->cookies[memseg->next_cookie].addr;
699 cookie->size = memseg->cookies[memseg->next_cookie].size;
700 memseg->next_cookie++;
701 if (memseg->next_cookie == memseg->ncookies)
702 memseg->next_cookie = 0;
703
704 } else {
705 DWARN(ldcp->id,
706 "ldc_mem_nextcookie:(0x%llx) no more cookies\n", ldcp->id);
707 cookie->addr = 0;
708 cookie->size = 0;
709 mutex_exit(&mhdl->lock);
710 return (EINVAL);
711 }
712
713 D1(ldcp->id,
714 "ldc_mem_nextcookie: (0x%llx) cookie addr=0x%llx,sz=0x%llx\n",
715 ldcp->id, cookie->addr, cookie->size);
716
717 mutex_exit(&mhdl->lock);
718 return (0);
719 }
720
721 /*
722 * Unbind the virtual memory region associated with the specified
723 * memory handle. Allassociated cookies are freed and the corresponding
724 * RA space is no longer exported.
725 */
726 int
ldc_mem_unbind_handle(ldc_mem_handle_t mhandle)727 ldc_mem_unbind_handle(ldc_mem_handle_t mhandle)
728 {
729 ldc_mhdl_t *mhdl;
730 ldc_chan_t *ldcp;
731 ldc_mtbl_t *mtbl;
732 ldc_memseg_t *memseg;
733 uint64_t cookie_addr;
734 uint64_t pg_shift, pg_size_code;
735 int i, rv, retries;
736
737 if (mhandle == NULL) {
738 DWARN(DBG_ALL_LDCS,
739 "ldc_mem_unbind_handle: invalid memory handle\n");
740 return (EINVAL);
741 }
742 mhdl = (ldc_mhdl_t *)mhandle;
743
744 mutex_enter(&mhdl->lock);
745
746 if (mhdl->status == LDC_UNBOUND) {
747 DWARN(DBG_ALL_LDCS,
748 "ldc_mem_unbind_handle: (0x%x) handle is not bound\n",
749 mhandle);
750 mutex_exit(&mhdl->lock);
751 return (EINVAL);
752 }
753
754 ldcp = mhdl->ldcp;
755 mtbl = ldcp->mtbl;
756
757 memseg = mhdl->memseg;
758
759 /* lock the memory table - exclusive access to channel */
760 mutex_enter(&mtbl->lock);
761
762 /* undo the pages exported */
763 for (i = 0; i < memseg->npages; i++) {
764
765 /* clear the entry from the table */
766 memseg->pages[i].mte->entry.ll = 0;
767
768 /* check for mapped pages, revocation cookie != 0 */
769 if (memseg->pages[i].mte->cookie) {
770
771 pg_size_code = page_szc(MMU_PAGESIZE);
772 pg_shift = page_get_shift(pg_size_code);
773 cookie_addr = IDX2COOKIE(memseg->pages[i].index,
774 pg_size_code, pg_shift);
775
776 D1(ldcp->id, "ldc_mem_unbind_handle: (0x%llx) revoke "
777 "cookie 0x%llx, rcookie 0x%llx\n", ldcp->id,
778 cookie_addr, memseg->pages[i].mte->cookie);
779
780 retries = 0;
781 do {
782 rv = hv_ldc_revoke(ldcp->id, cookie_addr,
783 memseg->pages[i].mte->cookie);
784
785 if (rv != H_EWOULDBLOCK)
786 break;
787
788 drv_usecwait(ldc_delay);
789
790 } while (retries++ < ldc_max_retries);
791
792 if (rv) {
793 DWARN(ldcp->id,
794 "ldc_mem_unbind_handle: (0x%llx) cannot "
795 "revoke mapping, cookie %llx\n", ldcp->id,
796 cookie_addr);
797 }
798 }
799
800 mtbl->num_avail++;
801 }
802 mutex_exit(&mtbl->lock);
803
804 /* free the allocated memseg and page structures */
805 kmem_free(memseg->pages, (sizeof (ldc_page_t) * memseg->npages));
806 kmem_free(memseg->cookies,
807 (sizeof (ldc_mem_cookie_t) * memseg->npages));
808 kmem_cache_free(ldcssp->memseg_cache, memseg);
809
810 /* uninitialize the memory handle */
811 mhdl->memseg = NULL;
812 mhdl->status = LDC_UNBOUND;
813
814 D1(ldcp->id, "ldc_mem_unbind_handle: (0x%llx) unbound handle 0x%llx\n",
815 ldcp->id, mhdl);
816
817 mutex_exit(&mhdl->lock);
818 return (0);
819 }
820
821 /*
822 * Get information about the dring. The base address of the descriptor
823 * ring along with the type and permission are returned back.
824 */
825 int
ldc_mem_info(ldc_mem_handle_t mhandle,ldc_mem_info_t * minfo)826 ldc_mem_info(ldc_mem_handle_t mhandle, ldc_mem_info_t *minfo)
827 {
828 ldc_mhdl_t *mhdl;
829
830 if (mhandle == NULL) {
831 DWARN(DBG_ALL_LDCS, "ldc_mem_info: invalid memory handle\n");
832 return (EINVAL);
833 }
834 mhdl = (ldc_mhdl_t *)mhandle;
835
836 if (minfo == NULL) {
837 DWARN(DBG_ALL_LDCS, "ldc_mem_info: invalid args\n");
838 return (EINVAL);
839 }
840
841 mutex_enter(&mhdl->lock);
842
843 minfo->status = mhdl->status;
844 if (mhdl->status == LDC_BOUND || mhdl->status == LDC_MAPPED) {
845 minfo->vaddr = mhdl->memseg->vaddr;
846 minfo->raddr = mhdl->memseg->raddr;
847 minfo->mtype = mhdl->mtype;
848 minfo->perm = mhdl->perm;
849 }
850 mutex_exit(&mhdl->lock);
851
852 return (0);
853 }
854
855 /*
856 * Copy data either from or to the client specified virtual address
857 * space to or from the exported memory associated with the cookies.
858 * The direction argument determines whether the data is read from or
859 * written to exported memory.
860 */
861 int
ldc_mem_copy(ldc_handle_t handle,caddr_t vaddr,uint64_t off,size_t * size,ldc_mem_cookie_t * cookies,uint32_t ccount,uint8_t direction)862 ldc_mem_copy(ldc_handle_t handle, caddr_t vaddr, uint64_t off, size_t *size,
863 ldc_mem_cookie_t *cookies, uint32_t ccount, uint8_t direction)
864 {
865 ldc_chan_t *ldcp;
866 uint64_t local_voff, local_valign;
867 uint64_t cookie_addr, cookie_size;
868 uint64_t pg_shift, pg_size, pg_size_code;
869 uint64_t export_caddr, export_poff, export_psize, export_size;
870 uint64_t local_ra, local_poff, local_psize;
871 uint64_t copy_size, copied_len = 0, total_bal = 0, idx = 0;
872 pgcnt_t npages;
873 size_t len = *size;
874 int i, rv = 0;
875
876 uint64_t chid;
877
878 if (handle == NULL) {
879 DWARN(DBG_ALL_LDCS, "ldc_mem_copy: invalid channel handle\n");
880 return (EINVAL);
881 }
882 ldcp = (ldc_chan_t *)handle;
883 chid = ldcp->id;
884
885 /* check to see if channel is UP */
886 if (ldcp->tstate != TS_UP) {
887 DWARN(chid, "ldc_mem_copy: (0x%llx) channel is not UP\n",
888 chid);
889 return (ECONNRESET);
890 }
891
892 /* Force address and size to be 8-byte aligned */
893 if ((((uintptr_t)vaddr | len) & 0x7) != 0) {
894 DWARN(chid,
895 "ldc_mem_copy: addr/sz is not 8-byte aligned\n");
896 return (EINVAL);
897 }
898
899 /* Find the size of the exported memory */
900 export_size = 0;
901 for (i = 0; i < ccount; i++)
902 export_size += cookies[i].size;
903
904 /* check to see if offset is valid */
905 if (off > export_size) {
906 DWARN(chid,
907 "ldc_mem_copy: (0x%llx) start offset > export mem size\n",
908 chid);
909 return (EINVAL);
910 }
911
912 /*
913 * Check to see if the export size is smaller than the size we
914 * are requesting to copy - if so flag an error
915 */
916 if ((export_size - off) < *size) {
917 DWARN(chid,
918 "ldc_mem_copy: (0x%llx) copy size > export mem size\n",
919 chid);
920 return (EINVAL);
921 }
922
923 total_bal = min(export_size, *size);
924
925 /* FUTURE: get the page size, pgsz code, and shift */
926 pg_size = MMU_PAGESIZE;
927 pg_size_code = page_szc(pg_size);
928 pg_shift = page_get_shift(pg_size_code);
929
930 D1(chid, "ldc_mem_copy: copying data "
931 "(0x%llx) va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
932 chid, vaddr, pg_size, pg_size_code, pg_shift);
933
934 /* aligned VA and its offset */
935 local_valign = (((uintptr_t)vaddr) & ~(pg_size - 1));
936 local_voff = ((uintptr_t)vaddr) & (pg_size - 1);
937
938 npages = (len+local_voff)/pg_size;
939 npages = ((len+local_voff)%pg_size == 0) ? npages : npages+1;
940
941 D1(chid,
942 "ldc_mem_copy: (0x%llx) v=0x%llx,val=0x%llx,off=0x%x,pgs=0x%x\n",
943 chid, vaddr, local_valign, local_voff, npages);
944
945 local_ra = va_to_pa((void *)local_valign);
946 local_poff = local_voff;
947 local_psize = min(len, (pg_size - local_voff));
948
949 len -= local_psize;
950
951 /*
952 * find the first cookie in the list of cookies
953 * if the offset passed in is not zero
954 */
955 for (idx = 0; idx < ccount; idx++) {
956 cookie_size = cookies[idx].size;
957 if (off < cookie_size)
958 break;
959 off -= cookie_size;
960 }
961
962 cookie_addr = cookies[idx].addr + off;
963 cookie_size = cookies[idx].size - off;
964
965 export_caddr = cookie_addr & ~(pg_size - 1);
966 export_poff = cookie_addr & (pg_size - 1);
967 export_psize = min(cookie_size, (pg_size - export_poff));
968
969 for (;;) {
970
971 copy_size = min(export_psize, local_psize);
972
973 D1(chid,
974 "ldc_mem_copy:(0x%llx) dir=0x%x, caddr=0x%llx,"
975 " loc_ra=0x%llx, exp_poff=0x%llx, loc_poff=0x%llx,"
976 " exp_psz=0x%llx, loc_psz=0x%llx, copy_sz=0x%llx,"
977 " total_bal=0x%llx\n",
978 chid, direction, export_caddr, local_ra, export_poff,
979 local_poff, export_psize, local_psize, copy_size,
980 total_bal);
981
982 rv = hv_ldc_copy(chid, direction,
983 (export_caddr + export_poff), (local_ra + local_poff),
984 copy_size, &copied_len);
985
986 if (rv != 0) {
987 int error = EIO;
988 uint64_t rx_hd, rx_tl;
989
990 DWARN(chid,
991 "ldc_mem_copy: (0x%llx) err %d during copy\n",
992 (unsigned long long)chid, rv);
993 DWARN(chid,
994 "ldc_mem_copy: (0x%llx) dir=0x%x, caddr=0x%lx, "
995 "loc_ra=0x%lx, exp_poff=0x%lx, loc_poff=0x%lx,"
996 " exp_psz=0x%lx, loc_psz=0x%lx, copy_sz=0x%lx,"
997 " copied_len=0x%lx, total_bal=0x%lx\n",
998 chid, direction, export_caddr, local_ra,
999 export_poff, local_poff, export_psize, local_psize,
1000 copy_size, copied_len, total_bal);
1001
1002 *size = *size - total_bal;
1003
1004 /*
1005 * check if reason for copy error was due to
1006 * a channel reset. we need to grab the lock
1007 * just in case we have to do a reset.
1008 */
1009 mutex_enter(&ldcp->lock);
1010 mutex_enter(&ldcp->tx_lock);
1011
1012 rv = hv_ldc_rx_get_state(ldcp->id,
1013 &rx_hd, &rx_tl, &(ldcp->link_state));
1014 if (ldcp->link_state == LDC_CHANNEL_DOWN ||
1015 ldcp->link_state == LDC_CHANNEL_RESET) {
1016 i_ldc_reset(ldcp, B_FALSE);
1017 error = ECONNRESET;
1018 }
1019
1020 mutex_exit(&ldcp->tx_lock);
1021 mutex_exit(&ldcp->lock);
1022
1023 return (error);
1024 }
1025
1026 ASSERT(copied_len <= copy_size);
1027
1028 D2(chid, "ldc_mem_copy: copied=0x%llx\n", copied_len);
1029 export_poff += copied_len;
1030 local_poff += copied_len;
1031 export_psize -= copied_len;
1032 local_psize -= copied_len;
1033 cookie_size -= copied_len;
1034
1035 total_bal -= copied_len;
1036
1037 if (copy_size != copied_len)
1038 continue;
1039
1040 if (export_psize == 0 && total_bal != 0) {
1041
1042 if (cookie_size == 0) {
1043 idx++;
1044 cookie_addr = cookies[idx].addr;
1045 cookie_size = cookies[idx].size;
1046
1047 export_caddr = cookie_addr & ~(pg_size - 1);
1048 export_poff = cookie_addr & (pg_size - 1);
1049 export_psize =
1050 min(cookie_size, (pg_size-export_poff));
1051 } else {
1052 export_caddr += pg_size;
1053 export_poff = 0;
1054 export_psize = min(cookie_size, pg_size);
1055 }
1056 }
1057
1058 if (local_psize == 0 && total_bal != 0) {
1059 local_valign += pg_size;
1060 local_ra = va_to_pa((void *)local_valign);
1061 local_poff = 0;
1062 local_psize = min(pg_size, len);
1063 len -= local_psize;
1064 }
1065
1066 /* check if we are all done */
1067 if (total_bal == 0)
1068 break;
1069 }
1070
1071
1072 D1(chid,
1073 "ldc_mem_copy: (0x%llx) done copying sz=0x%llx\n",
1074 chid, *size);
1075
1076 return (0);
1077 }
1078
1079 /*
1080 * Copy data either from or to the client specified virtual address
1081 * space to or from HV physical memory.
1082 *
1083 * The direction argument determines whether the data is read from or
1084 * written to HV memory. direction values are LDC_COPY_IN/OUT similar
1085 * to the ldc_mem_copy interface
1086 */
1087 int
ldc_mem_rdwr_cookie(ldc_handle_t handle,caddr_t vaddr,size_t * size,caddr_t paddr,uint8_t direction)1088 ldc_mem_rdwr_cookie(ldc_handle_t handle, caddr_t vaddr, size_t *size,
1089 caddr_t paddr, uint8_t direction)
1090 {
1091 ldc_chan_t *ldcp;
1092 uint64_t local_voff, local_valign;
1093 uint64_t pg_shift, pg_size, pg_size_code;
1094 uint64_t target_pa, target_poff, target_psize, target_size;
1095 uint64_t local_ra, local_poff, local_psize;
1096 uint64_t copy_size, copied_len = 0;
1097 pgcnt_t npages;
1098 size_t len = *size;
1099 int rv = 0;
1100
1101 if (handle == NULL) {
1102 DWARN(DBG_ALL_LDCS,
1103 "ldc_mem_rdwr_cookie: invalid channel handle\n");
1104 return (EINVAL);
1105 }
1106 ldcp = (ldc_chan_t *)handle;
1107
1108 mutex_enter(&ldcp->lock);
1109
1110 /* check to see if channel is UP */
1111 if (ldcp->tstate != TS_UP) {
1112 DWARN(ldcp->id,
1113 "ldc_mem_rdwr_cookie: (0x%llx) channel is not UP\n",
1114 ldcp->id);
1115 mutex_exit(&ldcp->lock);
1116 return (ECONNRESET);
1117 }
1118
1119 /* Force address and size to be 8-byte aligned */
1120 if ((((uintptr_t)vaddr | len) & 0x7) != 0) {
1121 DWARN(ldcp->id,
1122 "ldc_mem_rdwr_cookie: addr/size is not 8-byte aligned\n");
1123 mutex_exit(&ldcp->lock);
1124 return (EINVAL);
1125 }
1126
1127 target_size = *size;
1128
1129 /* FUTURE: get the page size, pgsz code, and shift */
1130 pg_size = MMU_PAGESIZE;
1131 pg_size_code = page_szc(pg_size);
1132 pg_shift = page_get_shift(pg_size_code);
1133
1134 D1(ldcp->id, "ldc_mem_rdwr_cookie: copying data "
1135 "(0x%llx) va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
1136 ldcp->id, vaddr, pg_size, pg_size_code, pg_shift);
1137
1138 /* aligned VA and its offset */
1139 local_valign = ((uintptr_t)vaddr) & ~(pg_size - 1);
1140 local_voff = ((uintptr_t)vaddr) & (pg_size - 1);
1141
1142 npages = (len + local_voff) / pg_size;
1143 npages = ((len + local_voff) % pg_size == 0) ? npages : npages+1;
1144
1145 D1(ldcp->id, "ldc_mem_rdwr_cookie: (0x%llx) v=0x%llx, "
1146 "val=0x%llx,off=0x%x,pgs=0x%x\n",
1147 ldcp->id, vaddr, local_valign, local_voff, npages);
1148
1149 local_ra = va_to_pa((void *)local_valign);
1150 local_poff = local_voff;
1151 local_psize = min(len, (pg_size - local_voff));
1152
1153 len -= local_psize;
1154
1155 target_pa = ((uintptr_t)paddr) & ~(pg_size - 1);
1156 target_poff = ((uintptr_t)paddr) & (pg_size - 1);
1157 target_psize = pg_size - target_poff;
1158
1159 for (;;) {
1160
1161 copy_size = min(target_psize, local_psize);
1162
1163 D1(ldcp->id,
1164 "ldc_mem_rdwr_cookie: (0x%llx) dir=0x%x, tar_pa=0x%llx,"
1165 " loc_ra=0x%llx, tar_poff=0x%llx, loc_poff=0x%llx,"
1166 " tar_psz=0x%llx, loc_psz=0x%llx, copy_sz=0x%llx,"
1167 " total_bal=0x%llx\n",
1168 ldcp->id, direction, target_pa, local_ra, target_poff,
1169 local_poff, target_psize, local_psize, copy_size,
1170 target_size);
1171
1172 rv = hv_ldc_copy(ldcp->id, direction,
1173 (target_pa + target_poff), (local_ra + local_poff),
1174 copy_size, &copied_len);
1175
1176 if (rv != 0) {
1177 DWARN(DBG_ALL_LDCS,
1178 "ldc_mem_rdwr_cookie: (0x%lx) err %d during copy\n",
1179 ldcp->id, rv);
1180 DWARN(DBG_ALL_LDCS,
1181 "ldc_mem_rdwr_cookie: (0x%llx) dir=%lld, "
1182 "tar_pa=0x%llx, loc_ra=0x%llx, tar_poff=0x%llx, "
1183 "loc_poff=0x%llx, tar_psz=0x%llx, loc_psz=0x%llx, "
1184 "copy_sz=0x%llx, total_bal=0x%llx\n",
1185 ldcp->id, direction, target_pa, local_ra,
1186 target_poff, local_poff, target_psize, local_psize,
1187 copy_size, target_size);
1188
1189 *size = *size - target_size;
1190 mutex_exit(&ldcp->lock);
1191 return (i_ldc_h2v_error(rv));
1192 }
1193
1194 D2(ldcp->id, "ldc_mem_rdwr_cookie: copied=0x%llx\n",
1195 copied_len);
1196 target_poff += copied_len;
1197 local_poff += copied_len;
1198 target_psize -= copied_len;
1199 local_psize -= copied_len;
1200
1201 target_size -= copied_len;
1202
1203 if (copy_size != copied_len)
1204 continue;
1205
1206 if (target_psize == 0 && target_size != 0) {
1207 target_pa += pg_size;
1208 target_poff = 0;
1209 target_psize = min(pg_size, target_size);
1210 }
1211
1212 if (local_psize == 0 && target_size != 0) {
1213 local_valign += pg_size;
1214 local_ra = va_to_pa((void *)local_valign);
1215 local_poff = 0;
1216 local_psize = min(pg_size, len);
1217 len -= local_psize;
1218 }
1219
1220 /* check if we are all done */
1221 if (target_size == 0)
1222 break;
1223 }
1224
1225 mutex_exit(&ldcp->lock);
1226
1227 D1(ldcp->id, "ldc_mem_rdwr_cookie: (0x%llx) done copying sz=0x%llx\n",
1228 ldcp->id, *size);
1229
1230 return (0);
1231 }
1232
1233 /*
1234 * Map an exported memory segment into the local address space. If the
1235 * memory range was exported for direct map access, a HV call is made
1236 * to allocate a RA range. If the map is done via a shadow copy, local
1237 * shadow memory is allocated and the base VA is returned in 'vaddr'. If
1238 * the mapping is a direct map then the RA is returned in 'raddr'.
1239 */
1240 int
ldc_mem_map(ldc_mem_handle_t mhandle,ldc_mem_cookie_t * cookie,uint32_t ccount,uint8_t mtype,uint8_t perm,caddr_t * vaddr,caddr_t * raddr)1241 ldc_mem_map(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie, uint32_t ccount,
1242 uint8_t mtype, uint8_t perm, caddr_t *vaddr, caddr_t *raddr)
1243 {
1244 /*
1245 * Check if direct map over shared memory is enabled, if not change
1246 * the mapping type to SHADOW_MAP.
1247 */
1248 if (ldc_shmem_enabled == 0)
1249 mtype = LDC_SHADOW_MAP;
1250
1251 return (i_ldc_mem_map(mhandle, cookie, ccount, mtype, perm,
1252 vaddr, raddr));
1253 }
1254
1255 static int
i_ldc_mem_map(ldc_mem_handle_t mhandle,ldc_mem_cookie_t * cookie,uint32_t ccount,uint8_t mtype,uint8_t perm,caddr_t * vaddr,caddr_t * raddr)1256 i_ldc_mem_map(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie,
1257 uint32_t ccount, uint8_t mtype, uint8_t perm, caddr_t *vaddr,
1258 caddr_t *raddr)
1259 {
1260
1261 int i, j, idx, rv, retries;
1262 ldc_chan_t *ldcp;
1263 ldc_mhdl_t *mhdl;
1264 ldc_memseg_t *memseg;
1265 caddr_t tmpaddr;
1266 uint64_t map_perm = perm;
1267 uint64_t pg_size, pg_shift, pg_size_code, pg_mask;
1268 uint64_t exp_size = 0, base_off, map_size, npages;
1269 uint64_t cookie_addr, cookie_off, cookie_size;
1270 tte_t ldc_tte;
1271
1272 if (mhandle == NULL) {
1273 DWARN(DBG_ALL_LDCS, "ldc_mem_map: invalid memory handle\n");
1274 return (EINVAL);
1275 }
1276 mhdl = (ldc_mhdl_t *)mhandle;
1277
1278 mutex_enter(&mhdl->lock);
1279
1280 if (mhdl->status == LDC_BOUND || mhdl->status == LDC_MAPPED ||
1281 mhdl->memseg != NULL) {
1282 DWARN(DBG_ALL_LDCS,
1283 "ldc_mem_map: (0x%llx) handle bound/mapped\n", mhandle);
1284 mutex_exit(&mhdl->lock);
1285 return (EINVAL);
1286 }
1287
1288 ldcp = mhdl->ldcp;
1289
1290 mutex_enter(&ldcp->lock);
1291
1292 if (ldcp->tstate != TS_UP) {
1293 DWARN(ldcp->id,
1294 "ldc_mem_dring_map: (0x%llx) channel is not UP\n",
1295 ldcp->id);
1296 mutex_exit(&ldcp->lock);
1297 mutex_exit(&mhdl->lock);
1298 return (ECONNRESET);
1299 }
1300
1301 if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP|LDC_IO_MAP)) == 0) {
1302 DWARN(ldcp->id, "ldc_mem_map: invalid map type\n");
1303 mutex_exit(&ldcp->lock);
1304 mutex_exit(&mhdl->lock);
1305 return (EINVAL);
1306 }
1307
1308 D1(ldcp->id, "ldc_mem_map: (0x%llx) cookie = 0x%llx,0x%llx\n",
1309 ldcp->id, cookie->addr, cookie->size);
1310
1311 /* FUTURE: get the page size, pgsz code, and shift */
1312 pg_size = MMU_PAGESIZE;
1313 pg_size_code = page_szc(pg_size);
1314 pg_shift = page_get_shift(pg_size_code);
1315 pg_mask = ~(pg_size - 1);
1316
1317 /* calculate the number of pages in the exported cookie */
1318 base_off = cookie[0].addr & (pg_size - 1);
1319 for (idx = 0; idx < ccount; idx++)
1320 exp_size += cookie[idx].size;
1321 map_size = P2ROUNDUP((exp_size + base_off), pg_size);
1322 npages = (map_size >> pg_shift);
1323
1324 /* Allocate memseg structure */
1325 memseg = mhdl->memseg =
1326 kmem_cache_alloc(ldcssp->memseg_cache, KM_SLEEP);
1327
1328 /* Allocate memory to store all pages and cookies */
1329 memseg->pages = kmem_zalloc((sizeof (ldc_page_t) * npages), KM_SLEEP);
1330 memseg->cookies =
1331 kmem_zalloc((sizeof (ldc_mem_cookie_t) * ccount), KM_SLEEP);
1332
1333 D2(ldcp->id, "ldc_mem_map: (0x%llx) exp_size=0x%llx, map_size=0x%llx,"
1334 "pages=0x%llx\n", ldcp->id, exp_size, map_size, npages);
1335
1336 /*
1337 * Check to see if the client is requesting direct or shadow map
1338 * If direct map is requested, try to map remote memory first,
1339 * and if that fails, revert to shadow map
1340 */
1341 if (mtype == LDC_DIRECT_MAP) {
1342
1343 /* Allocate kernel virtual space for mapping */
1344 memseg->vaddr = vmem_xalloc(heap_arena, map_size,
1345 pg_size, 0, 0, NULL, NULL, VM_NOSLEEP);
1346 if (memseg->vaddr == NULL) {
1347 DWARN(DBG_ALL_LDCS,
1348 "ldc_mem_map: (0x%lx) memory map failed\n",
1349 ldcp->id);
1350 kmem_free(memseg->cookies,
1351 (sizeof (ldc_mem_cookie_t) * ccount));
1352 kmem_free(memseg->pages,
1353 (sizeof (ldc_page_t) * npages));
1354 kmem_cache_free(ldcssp->memseg_cache, memseg);
1355
1356 mutex_exit(&ldcp->lock);
1357 mutex_exit(&mhdl->lock);
1358 return (ENOMEM);
1359 }
1360
1361 /* Unload previous mapping */
1362 hat_unload(kas.a_hat, memseg->vaddr, map_size,
1363 HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK);
1364
1365 /* for each cookie passed in - map into address space */
1366 idx = 0;
1367 cookie_size = 0;
1368 tmpaddr = memseg->vaddr;
1369
1370 for (i = 0; i < npages; i++) {
1371
1372 if (cookie_size == 0) {
1373 ASSERT(idx < ccount);
1374 cookie_addr = cookie[idx].addr & pg_mask;
1375 cookie_off = cookie[idx].addr & (pg_size - 1);
1376 cookie_size =
1377 P2ROUNDUP((cookie_off + cookie[idx].size),
1378 pg_size);
1379 idx++;
1380 }
1381
1382 D1(ldcp->id, "ldc_mem_map: (0x%llx) mapping "
1383 "cookie 0x%llx, bal=0x%llx\n", ldcp->id,
1384 cookie_addr, cookie_size);
1385
1386 /* map the cookie into address space */
1387 for (retries = 0; retries < ldc_max_retries;
1388 retries++) {
1389
1390 rv = hv_ldc_mapin(ldcp->id, cookie_addr,
1391 &memseg->pages[i].raddr, &map_perm);
1392 if (rv != H_EWOULDBLOCK && rv != H_ETOOMANY)
1393 break;
1394
1395 drv_usecwait(ldc_delay);
1396 }
1397
1398 if (rv || memseg->pages[i].raddr == 0) {
1399 DWARN(ldcp->id,
1400 "ldc_mem_map: (0x%llx) hv mapin err %d\n",
1401 ldcp->id, rv);
1402
1403 /* remove previous mapins */
1404 hat_unload(kas.a_hat, memseg->vaddr, map_size,
1405 HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK);
1406 for (j = 0; j < i; j++) {
1407 rv = hv_ldc_unmap(
1408 memseg->pages[j].raddr);
1409 if (rv) {
1410 DWARN(ldcp->id,
1411 "ldc_mem_map: (0x%llx) "
1412 "cannot unmap ra=0x%llx\n",
1413 ldcp->id,
1414 memseg->pages[j].raddr);
1415 }
1416 }
1417
1418 /* free kernel virtual space */
1419 vmem_free(heap_arena, (void *)memseg->vaddr,
1420 map_size);
1421
1422 /* direct map failed - revert to shadow map */
1423 mtype = LDC_SHADOW_MAP;
1424 break;
1425
1426 } else {
1427
1428 D1(ldcp->id,
1429 "ldc_mem_map: (0x%llx) vtop map 0x%llx -> "
1430 "0x%llx, cookie=0x%llx, perm=0x%llx\n",
1431 ldcp->id, tmpaddr, memseg->pages[i].raddr,
1432 cookie_addr, perm);
1433
1434 /*
1435 * NOTE: Calling hat_devload directly, causes it
1436 * to look for page_t using the pfn. Since this
1437 * addr is greater than the memlist, it treates
1438 * it as non-memory
1439 */
1440 sfmmu_memtte(&ldc_tte,
1441 (pfn_t)(memseg->pages[i].raddr >> pg_shift),
1442 PROT_READ | PROT_WRITE | HAT_NOSYNC, TTE8K);
1443
1444 D1(ldcp->id,
1445 "ldc_mem_map: (0x%llx) ra 0x%llx -> "
1446 "tte 0x%llx\n", ldcp->id,
1447 memseg->pages[i].raddr, ldc_tte);
1448
1449 sfmmu_tteload(kas.a_hat, &ldc_tte, tmpaddr,
1450 NULL, HAT_LOAD_LOCK);
1451
1452 cookie_size -= pg_size;
1453 cookie_addr += pg_size;
1454 tmpaddr += pg_size;
1455 }
1456 }
1457 }
1458
1459 if (mtype == LDC_SHADOW_MAP) {
1460 if (*vaddr == NULL) {
1461 memseg->vaddr = kmem_zalloc(exp_size, KM_SLEEP);
1462 mhdl->myshadow = B_TRUE;
1463
1464 D1(ldcp->id, "ldc_mem_map: (0x%llx) allocated "
1465 "shadow page va=0x%llx\n", ldcp->id, memseg->vaddr);
1466 } else {
1467 /*
1468 * Use client supplied memory for memseg->vaddr
1469 * WARNING: assuming that client mem is >= exp_size
1470 */
1471 memseg->vaddr = *vaddr;
1472 }
1473
1474 /* Save all page and cookie information */
1475 for (i = 0, tmpaddr = memseg->vaddr; i < npages; i++) {
1476 memseg->pages[i].raddr = va_to_pa(tmpaddr);
1477 tmpaddr += pg_size;
1478 }
1479
1480 }
1481
1482 /* save all cookies */
1483 bcopy(cookie, memseg->cookies, ccount * sizeof (ldc_mem_cookie_t));
1484
1485 /* update memseg_t */
1486 memseg->raddr = memseg->pages[0].raddr;
1487 memseg->size = (mtype == LDC_SHADOW_MAP) ? exp_size : map_size;
1488 memseg->npages = npages;
1489 memseg->ncookies = ccount;
1490 memseg->next_cookie = 0;
1491
1492 /* memory handle = mapped */
1493 mhdl->mtype = mtype;
1494 mhdl->perm = perm;
1495 mhdl->status = LDC_MAPPED;
1496
1497 D1(ldcp->id, "ldc_mem_map: (0x%llx) mapped 0x%llx, ra=0x%llx, "
1498 "va=0x%llx, pgs=0x%llx cookies=0x%llx\n",
1499 ldcp->id, mhdl, memseg->raddr, memseg->vaddr,
1500 memseg->npages, memseg->ncookies);
1501
1502 if (mtype == LDC_SHADOW_MAP)
1503 base_off = 0;
1504 if (raddr)
1505 *raddr = (caddr_t)(memseg->raddr | base_off);
1506 if (vaddr)
1507 *vaddr = (caddr_t)((uintptr_t)memseg->vaddr | base_off);
1508
1509 mutex_exit(&ldcp->lock);
1510 mutex_exit(&mhdl->lock);
1511 return (0);
1512 }
1513
1514 /*
1515 * Unmap a memory segment. Free shadow memory (if any).
1516 */
1517 int
ldc_mem_unmap(ldc_mem_handle_t mhandle)1518 ldc_mem_unmap(ldc_mem_handle_t mhandle)
1519 {
1520 int i, rv;
1521 ldc_mhdl_t *mhdl = (ldc_mhdl_t *)mhandle;
1522 ldc_chan_t *ldcp;
1523 ldc_memseg_t *memseg;
1524
1525 if (mhdl == 0 || mhdl->status != LDC_MAPPED) {
1526 DWARN(DBG_ALL_LDCS,
1527 "ldc_mem_unmap: (0x%llx) handle is not mapped\n",
1528 mhandle);
1529 return (EINVAL);
1530 }
1531
1532 mutex_enter(&mhdl->lock);
1533
1534 ldcp = mhdl->ldcp;
1535 memseg = mhdl->memseg;
1536
1537 D1(ldcp->id, "ldc_mem_unmap: (0x%llx) unmapping handle 0x%llx\n",
1538 ldcp->id, mhdl);
1539
1540 /* if we allocated shadow memory - free it */
1541 if (mhdl->mtype == LDC_SHADOW_MAP && mhdl->myshadow) {
1542 kmem_free(memseg->vaddr, memseg->size);
1543 } else if (mhdl->mtype == LDC_DIRECT_MAP) {
1544
1545 /* unmap in the case of DIRECT_MAP */
1546 hat_unload(kas.a_hat, memseg->vaddr, memseg->size,
1547 HAT_UNLOAD_UNLOCK);
1548
1549 for (i = 0; i < memseg->npages; i++) {
1550 rv = hv_ldc_unmap(memseg->pages[i].raddr);
1551 if (rv) {
1552 DWARN(DBG_ALL_LDCS,
1553 "ldc_mem_map: (0x%lx) hv unmap err %d\n",
1554 ldcp->id, rv);
1555 }
1556 }
1557
1558 vmem_free(heap_arena, (void *)memseg->vaddr, memseg->size);
1559 }
1560
1561 /* free the allocated memseg and page structures */
1562 kmem_free(memseg->pages, (sizeof (ldc_page_t) * memseg->npages));
1563 kmem_free(memseg->cookies,
1564 (sizeof (ldc_mem_cookie_t) * memseg->ncookies));
1565 kmem_cache_free(ldcssp->memseg_cache, memseg);
1566
1567 /* uninitialize the memory handle */
1568 mhdl->memseg = NULL;
1569 mhdl->status = LDC_UNBOUND;
1570
1571 D1(ldcp->id, "ldc_mem_unmap: (0x%llx) unmapped handle 0x%llx\n",
1572 ldcp->id, mhdl);
1573
1574 mutex_exit(&mhdl->lock);
1575 return (0);
1576 }
1577
1578 /*
1579 * Internal entry point for LDC mapped memory entry consistency
1580 * semantics. Acquire copies the contents of the remote memory
1581 * into the local shadow copy. The release operation copies the local
1582 * contents into the remote memory. The offset and size specify the
1583 * bounds for the memory range being synchronized.
1584 */
1585 static int
i_ldc_mem_acquire_release(ldc_mem_handle_t mhandle,uint8_t direction,uint64_t offset,size_t size)1586 i_ldc_mem_acquire_release(ldc_mem_handle_t mhandle, uint8_t direction,
1587 uint64_t offset, size_t size)
1588 {
1589 int err;
1590 ldc_mhdl_t *mhdl;
1591 ldc_chan_t *ldcp;
1592 ldc_memseg_t *memseg;
1593 caddr_t local_vaddr;
1594 size_t copy_size;
1595
1596 if (mhandle == NULL) {
1597 DWARN(DBG_ALL_LDCS,
1598 "i_ldc_mem_acquire_release: invalid memory handle\n");
1599 return (EINVAL);
1600 }
1601 mhdl = (ldc_mhdl_t *)mhandle;
1602
1603 mutex_enter(&mhdl->lock);
1604
1605 if (mhdl->status != LDC_MAPPED || mhdl->ldcp == NULL) {
1606 DWARN(DBG_ALL_LDCS,
1607 "i_ldc_mem_acquire_release: not mapped memory\n");
1608 mutex_exit(&mhdl->lock);
1609 return (EINVAL);
1610 }
1611
1612 /* do nothing for direct map */
1613 if (mhdl->mtype == LDC_DIRECT_MAP) {
1614 mutex_exit(&mhdl->lock);
1615 return (0);
1616 }
1617
1618 /* do nothing if COPY_IN+MEM_W and COPY_OUT+MEM_R */
1619 if ((direction == LDC_COPY_IN && (mhdl->perm & LDC_MEM_R) == 0) ||
1620 (direction == LDC_COPY_OUT && (mhdl->perm & LDC_MEM_W) == 0)) {
1621 mutex_exit(&mhdl->lock);
1622 return (0);
1623 }
1624
1625 if (offset >= mhdl->memseg->size ||
1626 (offset + size) > mhdl->memseg->size) {
1627 DWARN(DBG_ALL_LDCS,
1628 "i_ldc_mem_acquire_release: memory out of range\n");
1629 mutex_exit(&mhdl->lock);
1630 return (EINVAL);
1631 }
1632
1633 /* get the channel handle and memory segment */
1634 ldcp = mhdl->ldcp;
1635 memseg = mhdl->memseg;
1636
1637 if (mhdl->mtype == LDC_SHADOW_MAP) {
1638
1639 local_vaddr = memseg->vaddr + offset;
1640 copy_size = size;
1641
1642 /* copy to/from remote from/to local memory */
1643 err = ldc_mem_copy((ldc_handle_t)ldcp, local_vaddr, offset,
1644 ©_size, memseg->cookies, memseg->ncookies,
1645 direction);
1646 if (err || copy_size != size) {
1647 DWARN(ldcp->id,
1648 "i_ldc_mem_acquire_release: copy failed\n");
1649 mutex_exit(&mhdl->lock);
1650 return (err);
1651 }
1652 }
1653
1654 mutex_exit(&mhdl->lock);
1655
1656 return (0);
1657 }
1658
1659 /*
1660 * Ensure that the contents in the remote memory seg are consistent
1661 * with the contents if of local segment
1662 */
1663 int
ldc_mem_acquire(ldc_mem_handle_t mhandle,uint64_t offset,uint64_t size)1664 ldc_mem_acquire(ldc_mem_handle_t mhandle, uint64_t offset, uint64_t size)
1665 {
1666 return (i_ldc_mem_acquire_release(mhandle, LDC_COPY_IN, offset, size));
1667 }
1668
1669
1670 /*
1671 * Ensure that the contents in the local memory seg are consistent
1672 * with the contents if of remote segment
1673 */
1674 int
ldc_mem_release(ldc_mem_handle_t mhandle,uint64_t offset,uint64_t size)1675 ldc_mem_release(ldc_mem_handle_t mhandle, uint64_t offset, uint64_t size)
1676 {
1677 return (i_ldc_mem_acquire_release(mhandle, LDC_COPY_OUT, offset, size));
1678 }
1679
1680 /*
1681 * Allocate a descriptor ring. The size of each each descriptor
1682 * must be 8-byte aligned and the entire ring should be a multiple
1683 * of MMU_PAGESIZE.
1684 */
1685 int
ldc_mem_dring_create(uint32_t len,uint32_t dsize,ldc_dring_handle_t * dhandle)1686 ldc_mem_dring_create(uint32_t len, uint32_t dsize, ldc_dring_handle_t *dhandle)
1687 {
1688 ldc_dring_t *dringp;
1689 size_t size = (dsize * len);
1690
1691 D1(DBG_ALL_LDCS, "ldc_mem_dring_create: len=0x%x, size=0x%x\n",
1692 len, dsize);
1693
1694 if (dhandle == NULL) {
1695 DWARN(DBG_ALL_LDCS, "ldc_mem_dring_create: invalid dhandle\n");
1696 return (EINVAL);
1697 }
1698
1699 if (len == 0) {
1700 DWARN(DBG_ALL_LDCS, "ldc_mem_dring_create: invalid length\n");
1701 return (EINVAL);
1702 }
1703
1704 /* descriptor size should be 8-byte aligned */
1705 if (dsize == 0 || (dsize & 0x7)) {
1706 DWARN(DBG_ALL_LDCS, "ldc_mem_dring_create: invalid size\n");
1707 return (EINVAL);
1708 }
1709
1710 *dhandle = 0;
1711
1712 /* Allocate a desc ring structure */
1713 dringp = kmem_zalloc(sizeof (ldc_dring_t), KM_SLEEP);
1714
1715 /* Initialize dring */
1716 dringp->length = len;
1717 dringp->dsize = dsize;
1718
1719 /* round off to multiple of pagesize */
1720 dringp->size = (size & MMU_PAGEMASK);
1721 if (size & MMU_PAGEOFFSET)
1722 dringp->size += MMU_PAGESIZE;
1723
1724 dringp->status = LDC_UNBOUND;
1725
1726 /* allocate descriptor ring memory */
1727 dringp->base = kmem_zalloc(dringp->size, KM_SLEEP);
1728
1729 /* initialize the desc ring lock */
1730 mutex_init(&dringp->lock, NULL, MUTEX_DRIVER, NULL);
1731
1732 /* Add descriptor ring to the head of global list */
1733 mutex_enter(&ldcssp->lock);
1734 dringp->next = ldcssp->dring_list;
1735 ldcssp->dring_list = dringp;
1736 mutex_exit(&ldcssp->lock);
1737
1738 *dhandle = (ldc_dring_handle_t)dringp;
1739
1740 D1(DBG_ALL_LDCS, "ldc_mem_dring_create: dring allocated\n");
1741
1742 return (0);
1743 }
1744
1745
1746 /*
1747 * Destroy a descriptor ring.
1748 */
1749 int
ldc_mem_dring_destroy(ldc_dring_handle_t dhandle)1750 ldc_mem_dring_destroy(ldc_dring_handle_t dhandle)
1751 {
1752 ldc_dring_t *dringp;
1753 ldc_dring_t *tmp_dringp;
1754
1755 D1(DBG_ALL_LDCS, "ldc_mem_dring_destroy: entered\n");
1756
1757 if (dhandle == NULL) {
1758 DWARN(DBG_ALL_LDCS,
1759 "ldc_mem_dring_destroy: invalid desc ring handle\n");
1760 return (EINVAL);
1761 }
1762 dringp = (ldc_dring_t *)dhandle;
1763
1764 if (dringp->status == LDC_BOUND) {
1765 DWARN(DBG_ALL_LDCS,
1766 "ldc_mem_dring_destroy: desc ring is bound\n");
1767 return (EACCES);
1768 }
1769
1770 mutex_enter(&dringp->lock);
1771 mutex_enter(&ldcssp->lock);
1772
1773 /* remove from linked list - if not bound */
1774 tmp_dringp = ldcssp->dring_list;
1775 if (tmp_dringp == dringp) {
1776 ldcssp->dring_list = dringp->next;
1777 dringp->next = NULL;
1778
1779 } else {
1780 while (tmp_dringp != NULL) {
1781 if (tmp_dringp->next == dringp) {
1782 tmp_dringp->next = dringp->next;
1783 dringp->next = NULL;
1784 break;
1785 }
1786 tmp_dringp = tmp_dringp->next;
1787 }
1788 if (tmp_dringp == NULL) {
1789 DWARN(DBG_ALL_LDCS,
1790 "ldc_mem_dring_destroy: invalid descriptor\n");
1791 mutex_exit(&ldcssp->lock);
1792 mutex_exit(&dringp->lock);
1793 return (EINVAL);
1794 }
1795 }
1796
1797 mutex_exit(&ldcssp->lock);
1798
1799 /* free the descriptor ring */
1800 kmem_free(dringp->base, dringp->size);
1801
1802 mutex_exit(&dringp->lock);
1803
1804 /* destroy dring lock */
1805 mutex_destroy(&dringp->lock);
1806
1807 /* free desc ring object */
1808 kmem_free(dringp, sizeof (ldc_dring_t));
1809
1810 return (0);
1811 }
1812
1813 /*
1814 * Bind a previously allocated dring to a channel. The channel should
1815 * be OPEN in order to bind the ring to the channel. Returns back a
1816 * descriptor ring cookie. The descriptor ring is exported for remote
1817 * access by the client at the other end of the channel. An entry for
1818 * dring pages is stored in map table (via call to ldc_mem_bind_handle).
1819 */
1820 int
ldc_mem_dring_bind(ldc_handle_t handle,ldc_dring_handle_t dhandle,uint8_t mtype,uint8_t perm,ldc_mem_cookie_t * cookie,uint32_t * ccount)1821 ldc_mem_dring_bind(ldc_handle_t handle, ldc_dring_handle_t dhandle,
1822 uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie, uint32_t *ccount)
1823 {
1824 int err;
1825 ldc_chan_t *ldcp;
1826 ldc_dring_t *dringp;
1827 ldc_mem_handle_t mhandle;
1828
1829 /* check to see if channel is initalized */
1830 if (handle == NULL) {
1831 DWARN(DBG_ALL_LDCS,
1832 "ldc_mem_dring_bind: invalid channel handle\n");
1833 return (EINVAL);
1834 }
1835 ldcp = (ldc_chan_t *)handle;
1836
1837 if (dhandle == NULL) {
1838 DWARN(DBG_ALL_LDCS,
1839 "ldc_mem_dring_bind: invalid desc ring handle\n");
1840 return (EINVAL);
1841 }
1842 dringp = (ldc_dring_t *)dhandle;
1843
1844 if (cookie == NULL) {
1845 DWARN(ldcp->id,
1846 "ldc_mem_dring_bind: invalid cookie arg\n");
1847 return (EINVAL);
1848 }
1849
1850 /* ensure the mtype is valid */
1851 if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP)) == 0) {
1852 DWARN(ldcp->id, "ldc_mem_dring_bind: invalid map type\n");
1853 return (EINVAL);
1854 }
1855
1856 /* no need to bind as direct map if it's not HV supported or enabled */
1857 if (!ldc_dring_shmem_hv_ok || !ldc_dring_shmem_enabled) {
1858 mtype = LDC_SHADOW_MAP;
1859 }
1860
1861 mutex_enter(&dringp->lock);
1862
1863 if (dringp->status == LDC_BOUND) {
1864 DWARN(DBG_ALL_LDCS,
1865 "ldc_mem_dring_bind: (0x%llx) descriptor ring is bound\n",
1866 ldcp->id);
1867 mutex_exit(&dringp->lock);
1868 return (EINVAL);
1869 }
1870
1871 if ((perm & LDC_MEM_RW) == 0) {
1872 DWARN(DBG_ALL_LDCS,
1873 "ldc_mem_dring_bind: invalid permissions\n");
1874 mutex_exit(&dringp->lock);
1875 return (EINVAL);
1876 }
1877
1878 if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP|LDC_IO_MAP)) == 0) {
1879 DWARN(DBG_ALL_LDCS, "ldc_mem_dring_bind: invalid type\n");
1880 mutex_exit(&dringp->lock);
1881 return (EINVAL);
1882 }
1883
1884 dringp->ldcp = ldcp;
1885
1886 /* create an memory handle */
1887 err = ldc_mem_alloc_handle(handle, &mhandle);
1888 if (err || mhandle == NULL) {
1889 DWARN(DBG_ALL_LDCS,
1890 "ldc_mem_dring_bind: (0x%llx) error allocating mhandle\n",
1891 ldcp->id);
1892 mutex_exit(&dringp->lock);
1893 return (err);
1894 }
1895 dringp->mhdl = mhandle;
1896
1897 /* bind the descriptor ring to channel */
1898 err = i_ldc_mem_bind_handle(mhandle, dringp->base, dringp->size,
1899 mtype, perm, cookie, ccount);
1900 if (err) {
1901 DWARN(ldcp->id,
1902 "ldc_mem_dring_bind: (0x%llx) error binding mhandle\n",
1903 ldcp->id);
1904 mutex_exit(&dringp->lock);
1905 return (err);
1906 }
1907
1908 /*
1909 * For now return error if we get more than one cookie
1910 * FUTURE: Return multiple cookies ..
1911 */
1912 if (*ccount > 1) {
1913 (void) ldc_mem_unbind_handle(mhandle);
1914 (void) ldc_mem_free_handle(mhandle);
1915
1916 dringp->ldcp = NULL;
1917 dringp->mhdl = NULL;
1918 *ccount = 0;
1919
1920 mutex_exit(&dringp->lock);
1921 return (EAGAIN);
1922 }
1923
1924 /* Add descriptor ring to channel's exported dring list */
1925 mutex_enter(&ldcp->exp_dlist_lock);
1926 dringp->ch_next = ldcp->exp_dring_list;
1927 ldcp->exp_dring_list = dringp;
1928 mutex_exit(&ldcp->exp_dlist_lock);
1929
1930 dringp->status = LDC_BOUND;
1931
1932 mutex_exit(&dringp->lock);
1933
1934 return (0);
1935 }
1936
1937 /*
1938 * Return the next cookie associated with the specified dring handle
1939 */
1940 int
ldc_mem_dring_nextcookie(ldc_dring_handle_t dhandle,ldc_mem_cookie_t * cookie)1941 ldc_mem_dring_nextcookie(ldc_dring_handle_t dhandle, ldc_mem_cookie_t *cookie)
1942 {
1943 int rv = 0;
1944 ldc_dring_t *dringp;
1945 ldc_chan_t *ldcp;
1946
1947 if (dhandle == NULL) {
1948 DWARN(DBG_ALL_LDCS,
1949 "ldc_mem_dring_nextcookie: invalid desc ring handle\n");
1950 return (EINVAL);
1951 }
1952 dringp = (ldc_dring_t *)dhandle;
1953 mutex_enter(&dringp->lock);
1954
1955 if (dringp->status != LDC_BOUND) {
1956 DWARN(DBG_ALL_LDCS,
1957 "ldc_mem_dring_nextcookie: descriptor ring 0x%llx "
1958 "is not bound\n", dringp);
1959 mutex_exit(&dringp->lock);
1960 return (EINVAL);
1961 }
1962
1963 ldcp = dringp->ldcp;
1964
1965 if (cookie == NULL) {
1966 DWARN(ldcp->id,
1967 "ldc_mem_dring_nextcookie:(0x%llx) invalid cookie arg\n",
1968 ldcp->id);
1969 mutex_exit(&dringp->lock);
1970 return (EINVAL);
1971 }
1972
1973 rv = ldc_mem_nextcookie((ldc_mem_handle_t)dringp->mhdl, cookie);
1974 mutex_exit(&dringp->lock);
1975
1976 return (rv);
1977 }
1978
1979 /*
1980 * Unbind a previously bound dring from a channel.
1981 */
1982 int
ldc_mem_dring_unbind(ldc_dring_handle_t dhandle)1983 ldc_mem_dring_unbind(ldc_dring_handle_t dhandle)
1984 {
1985 ldc_dring_t *dringp;
1986 ldc_dring_t *tmp_dringp;
1987 ldc_chan_t *ldcp;
1988
1989 if (dhandle == NULL) {
1990 DWARN(DBG_ALL_LDCS,
1991 "ldc_mem_dring_unbind: invalid desc ring handle\n");
1992 return (EINVAL);
1993 }
1994 dringp = (ldc_dring_t *)dhandle;
1995
1996 mutex_enter(&dringp->lock);
1997
1998 if (dringp->status == LDC_UNBOUND) {
1999 DWARN(DBG_ALL_LDCS,
2000 "ldc_mem_dring_bind: descriptor ring 0x%llx is unbound\n",
2001 dringp);
2002 mutex_exit(&dringp->lock);
2003 return (EINVAL);
2004 }
2005 ldcp = dringp->ldcp;
2006
2007 mutex_enter(&ldcp->exp_dlist_lock);
2008
2009 tmp_dringp = ldcp->exp_dring_list;
2010 if (tmp_dringp == dringp) {
2011 ldcp->exp_dring_list = dringp->ch_next;
2012 dringp->ch_next = NULL;
2013
2014 } else {
2015 while (tmp_dringp != NULL) {
2016 if (tmp_dringp->ch_next == dringp) {
2017 tmp_dringp->ch_next = dringp->ch_next;
2018 dringp->ch_next = NULL;
2019 break;
2020 }
2021 tmp_dringp = tmp_dringp->ch_next;
2022 }
2023 if (tmp_dringp == NULL) {
2024 DWARN(DBG_ALL_LDCS,
2025 "ldc_mem_dring_unbind: invalid descriptor\n");
2026 mutex_exit(&ldcp->exp_dlist_lock);
2027 mutex_exit(&dringp->lock);
2028 return (EINVAL);
2029 }
2030 }
2031
2032 mutex_exit(&ldcp->exp_dlist_lock);
2033
2034 (void) ldc_mem_unbind_handle((ldc_mem_handle_t)dringp->mhdl);
2035 (void) ldc_mem_free_handle((ldc_mem_handle_t)dringp->mhdl);
2036
2037 dringp->ldcp = NULL;
2038 dringp->mhdl = NULL;
2039 dringp->status = LDC_UNBOUND;
2040
2041 mutex_exit(&dringp->lock);
2042
2043 return (0);
2044 }
2045
2046 #ifdef DEBUG
2047 void
i_ldc_mem_inject_dring_clear(ldc_chan_t * ldcp)2048 i_ldc_mem_inject_dring_clear(ldc_chan_t *ldcp)
2049 {
2050 ldc_dring_t *dp;
2051 ldc_mhdl_t *mhdl;
2052 ldc_mtbl_t *mtbl;
2053 ldc_memseg_t *memseg;
2054 uint64_t cookie_addr;
2055 uint64_t pg_shift, pg_size_code;
2056 int i, rv, retries;
2057
2058 /* has a map table been allocated? */
2059 if ((mtbl = ldcp->mtbl) == NULL)
2060 return;
2061
2062 /* lock the memory table - exclusive access to channel */
2063 mutex_enter(&mtbl->lock);
2064
2065 /* lock the exported dring list */
2066 mutex_enter(&ldcp->exp_dlist_lock);
2067
2068 for (dp = ldcp->exp_dring_list; dp != NULL; dp = dp->ch_next) {
2069 if ((mhdl = (ldc_mhdl_t *)dp->mhdl) == NULL)
2070 continue;
2071
2072 if ((memseg = mhdl->memseg) == NULL)
2073 continue;
2074
2075 /* undo the pages exported */
2076 for (i = 0; i < memseg->npages; i++) {
2077
2078 /* clear the entry from the table */
2079 memseg->pages[i].mte->entry.ll = 0;
2080
2081 pg_size_code = page_szc(MMU_PAGESIZE);
2082 pg_shift = page_get_shift(pg_size_code);
2083 cookie_addr = IDX2COOKIE(memseg->pages[i].index,
2084 pg_size_code, pg_shift);
2085
2086 retries = 0;
2087 do {
2088 rv = hv_ldc_revoke(ldcp->id, cookie_addr,
2089 memseg->pages[i].mte->cookie);
2090
2091 if (rv != H_EWOULDBLOCK)
2092 break;
2093
2094 drv_usecwait(ldc_delay);
2095
2096 } while (retries++ < ldc_max_retries);
2097
2098 if (rv != 0) {
2099 DWARN(ldcp->id,
2100 "i_ldc_mem_inject_dring_clear(): "
2101 "hv_ldc_revoke failed: "
2102 "channel: 0x%lx, cookie addr: 0x%p,"
2103 "cookie: 0x%lx, rv: %d",
2104 ldcp->id, cookie_addr,
2105 memseg->pages[i].mte->cookie, rv);
2106 }
2107
2108 mtbl->num_avail++;
2109 }
2110 }
2111
2112 mutex_exit(&ldcp->exp_dlist_lock);
2113 mutex_exit(&mtbl->lock);
2114 }
2115 #endif
2116
2117 /*
2118 * Get information about the dring. The base address of the descriptor
2119 * ring along with the type and permission are returned back.
2120 */
2121 int
ldc_mem_dring_info(ldc_dring_handle_t dhandle,ldc_mem_info_t * minfo)2122 ldc_mem_dring_info(ldc_dring_handle_t dhandle, ldc_mem_info_t *minfo)
2123 {
2124 ldc_dring_t *dringp;
2125 int rv;
2126
2127 if (dhandle == NULL) {
2128 DWARN(DBG_ALL_LDCS,
2129 "ldc_mem_dring_info: invalid desc ring handle\n");
2130 return (EINVAL);
2131 }
2132 dringp = (ldc_dring_t *)dhandle;
2133
2134 mutex_enter(&dringp->lock);
2135
2136 if (dringp->mhdl) {
2137 rv = ldc_mem_info(dringp->mhdl, minfo);
2138 if (rv) {
2139 DWARN(DBG_ALL_LDCS,
2140 "ldc_mem_dring_info: error reading mem info\n");
2141 mutex_exit(&dringp->lock);
2142 return (rv);
2143 }
2144 } else {
2145 minfo->vaddr = dringp->base;
2146 minfo->raddr = NULL;
2147 minfo->status = dringp->status;
2148 }
2149
2150 mutex_exit(&dringp->lock);
2151
2152 return (0);
2153 }
2154
2155 /*
2156 * Map an exported descriptor ring into the local address space. If the
2157 * descriptor ring was exported for direct map access, a HV call is made
2158 * to allocate a RA range. If the map is done via a shadow copy, local
2159 * shadow memory is allocated.
2160 */
2161 int
ldc_mem_dring_map(ldc_handle_t handle,ldc_mem_cookie_t * cookie,uint32_t ccount,uint32_t len,uint32_t dsize,uint8_t mtype,ldc_dring_handle_t * dhandle)2162 ldc_mem_dring_map(ldc_handle_t handle, ldc_mem_cookie_t *cookie,
2163 uint32_t ccount, uint32_t len, uint32_t dsize, uint8_t mtype,
2164 ldc_dring_handle_t *dhandle)
2165 {
2166 int err;
2167 ldc_chan_t *ldcp = (ldc_chan_t *)handle;
2168 ldc_mem_handle_t mhandle;
2169 ldc_dring_t *dringp;
2170 size_t dring_size;
2171
2172 if (dhandle == NULL) {
2173 DWARN(DBG_ALL_LDCS,
2174 "ldc_mem_dring_map: invalid dhandle\n");
2175 return (EINVAL);
2176 }
2177
2178 /* check to see if channel is initalized */
2179 if (handle == NULL) {
2180 DWARN(DBG_ALL_LDCS,
2181 "ldc_mem_dring_map: invalid channel handle\n");
2182 return (EINVAL);
2183 }
2184 ldcp = (ldc_chan_t *)handle;
2185
2186 if (cookie == NULL) {
2187 DWARN(ldcp->id,
2188 "ldc_mem_dring_map: (0x%llx) invalid cookie\n",
2189 ldcp->id);
2190 return (EINVAL);
2191 }
2192
2193 /* FUTURE: For now we support only one cookie per dring */
2194 ASSERT(ccount == 1);
2195
2196 if (cookie->size < (dsize * len)) {
2197 DWARN(ldcp->id,
2198 "ldc_mem_dring_map: (0x%llx) invalid dsize/len\n",
2199 ldcp->id);
2200 return (EINVAL);
2201 }
2202
2203 /* ensure the mtype is valid */
2204 if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP)) == 0) {
2205 DWARN(ldcp->id, "ldc_mem_dring_map: invalid map type\n");
2206 return (EINVAL);
2207 }
2208
2209 /* do not attempt direct map if it's not HV supported or enabled */
2210 if (!ldc_dring_shmem_hv_ok || !ldc_dring_shmem_enabled) {
2211 mtype = LDC_SHADOW_MAP;
2212 }
2213
2214 *dhandle = 0;
2215
2216 /* Allocate an dring structure */
2217 dringp = kmem_zalloc(sizeof (ldc_dring_t), KM_SLEEP);
2218
2219 D1(ldcp->id,
2220 "ldc_mem_dring_map: 0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
2221 mtype, len, dsize, cookie->addr, cookie->size);
2222
2223 /* Initialize dring */
2224 dringp->length = len;
2225 dringp->dsize = dsize;
2226
2227 /* round of to multiple of page size */
2228 dring_size = len * dsize;
2229 dringp->size = (dring_size & MMU_PAGEMASK);
2230 if (dring_size & MMU_PAGEOFFSET)
2231 dringp->size += MMU_PAGESIZE;
2232
2233 dringp->ldcp = ldcp;
2234
2235 /* create an memory handle */
2236 err = ldc_mem_alloc_handle(handle, &mhandle);
2237 if (err || mhandle == NULL) {
2238 DWARN(DBG_ALL_LDCS,
2239 "ldc_mem_dring_map: cannot alloc hdl err=%d\n",
2240 err);
2241 kmem_free(dringp, sizeof (ldc_dring_t));
2242 return (ENOMEM);
2243 }
2244
2245 dringp->mhdl = mhandle;
2246 dringp->base = NULL;
2247
2248 /* map the dring into local memory */
2249 err = i_ldc_mem_map(mhandle, cookie, ccount, mtype, LDC_MEM_RW,
2250 &(dringp->base), NULL);
2251 if (err || dringp->base == NULL) {
2252 DWARN(DBG_ALL_LDCS,
2253 "ldc_mem_dring_map: cannot map desc ring err=%d\n", err);
2254 (void) ldc_mem_free_handle(mhandle);
2255 kmem_free(dringp, sizeof (ldc_dring_t));
2256 return (ENOMEM);
2257 }
2258
2259 /* initialize the desc ring lock */
2260 mutex_init(&dringp->lock, NULL, MUTEX_DRIVER, NULL);
2261
2262 /* Add descriptor ring to channel's imported dring list */
2263 mutex_enter(&ldcp->imp_dlist_lock);
2264 dringp->ch_next = ldcp->imp_dring_list;
2265 ldcp->imp_dring_list = dringp;
2266 mutex_exit(&ldcp->imp_dlist_lock);
2267
2268 dringp->status = LDC_MAPPED;
2269
2270 *dhandle = (ldc_dring_handle_t)dringp;
2271
2272 return (0);
2273 }
2274
2275 /*
2276 * Unmap a descriptor ring. Free shadow memory (if any).
2277 */
2278 int
ldc_mem_dring_unmap(ldc_dring_handle_t dhandle)2279 ldc_mem_dring_unmap(ldc_dring_handle_t dhandle)
2280 {
2281 ldc_dring_t *dringp;
2282 ldc_dring_t *tmp_dringp;
2283 ldc_chan_t *ldcp;
2284
2285 if (dhandle == NULL) {
2286 DWARN(DBG_ALL_LDCS,
2287 "ldc_mem_dring_unmap: invalid desc ring handle\n");
2288 return (EINVAL);
2289 }
2290 dringp = (ldc_dring_t *)dhandle;
2291
2292 if (dringp->status != LDC_MAPPED) {
2293 DWARN(DBG_ALL_LDCS,
2294 "ldc_mem_dring_unmap: not a mapped desc ring\n");
2295 return (EINVAL);
2296 }
2297
2298 mutex_enter(&dringp->lock);
2299
2300 ldcp = dringp->ldcp;
2301
2302 mutex_enter(&ldcp->imp_dlist_lock);
2303
2304 /* find and unlink the desc ring from channel import list */
2305 tmp_dringp = ldcp->imp_dring_list;
2306 if (tmp_dringp == dringp) {
2307 ldcp->imp_dring_list = dringp->ch_next;
2308 dringp->ch_next = NULL;
2309
2310 } else {
2311 while (tmp_dringp != NULL) {
2312 if (tmp_dringp->ch_next == dringp) {
2313 tmp_dringp->ch_next = dringp->ch_next;
2314 dringp->ch_next = NULL;
2315 break;
2316 }
2317 tmp_dringp = tmp_dringp->ch_next;
2318 }
2319 if (tmp_dringp == NULL) {
2320 DWARN(DBG_ALL_LDCS,
2321 "ldc_mem_dring_unmap: invalid descriptor\n");
2322 mutex_exit(&ldcp->imp_dlist_lock);
2323 mutex_exit(&dringp->lock);
2324 return (EINVAL);
2325 }
2326 }
2327
2328 mutex_exit(&ldcp->imp_dlist_lock);
2329
2330 /* do a LDC memory handle unmap and free */
2331 (void) ldc_mem_unmap(dringp->mhdl);
2332 (void) ldc_mem_free_handle((ldc_mem_handle_t)dringp->mhdl);
2333
2334 dringp->status = 0;
2335 dringp->ldcp = NULL;
2336
2337 mutex_exit(&dringp->lock);
2338
2339 /* destroy dring lock */
2340 mutex_destroy(&dringp->lock);
2341
2342 /* free desc ring object */
2343 kmem_free(dringp, sizeof (ldc_dring_t));
2344
2345 return (0);
2346 }
2347
2348 /*
2349 * Internal entry point for descriptor ring access entry consistency
2350 * semantics. Acquire copies the contents of the remote descriptor ring
2351 * into the local shadow copy. The release operation copies the local
2352 * contents into the remote dring. The start and end locations specify
2353 * bounds for the entries being synchronized.
2354 */
2355 static int
i_ldc_dring_acquire_release(ldc_dring_handle_t dhandle,uint8_t direction,uint64_t start,uint64_t end)2356 i_ldc_dring_acquire_release(ldc_dring_handle_t dhandle,
2357 uint8_t direction, uint64_t start, uint64_t end)
2358 {
2359 int err;
2360 ldc_dring_t *dringp;
2361 ldc_chan_t *ldcp;
2362 ldc_mhdl_t *mhdl;
2363 uint64_t soff;
2364 size_t copy_size;
2365
2366 if (dhandle == NULL) {
2367 DWARN(DBG_ALL_LDCS,
2368 "i_ldc_dring_acquire_release: invalid desc ring handle\n");
2369 return (EINVAL);
2370 }
2371 dringp = (ldc_dring_t *)dhandle;
2372 mutex_enter(&dringp->lock);
2373
2374 if (dringp->status != LDC_MAPPED || dringp->ldcp == NULL) {
2375 DWARN(DBG_ALL_LDCS,
2376 "i_ldc_dring_acquire_release: not a mapped desc ring\n");
2377 mutex_exit(&dringp->lock);
2378 return (EINVAL);
2379 }
2380
2381 if (start >= dringp->length || end >= dringp->length) {
2382 DWARN(DBG_ALL_LDCS,
2383 "i_ldc_dring_acquire_release: index out of range\n");
2384 mutex_exit(&dringp->lock);
2385 return (EINVAL);
2386 }
2387
2388 mhdl = (ldc_mhdl_t *)dringp->mhdl;
2389 if (mhdl == NULL) {
2390 DWARN(DBG_ALL_LDCS,
2391 "i_ldc_dring_acquire_release: invalid memory handle\n");
2392 mutex_exit(&dringp->lock);
2393 return (EINVAL);
2394 }
2395
2396 if (mhdl->mtype != LDC_SHADOW_MAP) {
2397 DWARN(DBG_ALL_LDCS,
2398 "i_ldc_dring_acquire_release: invalid mtype: %d\n",
2399 mhdl->mtype);
2400 mutex_exit(&dringp->lock);
2401 return (EINVAL);
2402 }
2403
2404 /* get the channel handle */
2405 ldcp = dringp->ldcp;
2406
2407 copy_size = (start <= end) ? (((end - start) + 1) * dringp->dsize) :
2408 ((dringp->length - start) * dringp->dsize);
2409
2410 /* Calculate the relative offset for the first desc */
2411 soff = (start * dringp->dsize);
2412
2413 /* copy to/from remote from/to local memory */
2414 D1(ldcp->id, "i_ldc_dring_acquire_release: c1 off=0x%llx sz=0x%llx\n",
2415 soff, copy_size);
2416 err = i_ldc_mem_acquire_release((ldc_mem_handle_t)dringp->mhdl,
2417 direction, soff, copy_size);
2418 if (err) {
2419 DWARN(ldcp->id,
2420 "i_ldc_dring_acquire_release: copy failed\n");
2421 mutex_exit(&dringp->lock);
2422 return (err);
2423 }
2424
2425 /* do the balance */
2426 if (start > end) {
2427 copy_size = ((end + 1) * dringp->dsize);
2428 soff = 0;
2429
2430 /* copy to/from remote from/to local memory */
2431 D1(ldcp->id, "i_ldc_dring_acquire_release: c2 "
2432 "off=0x%llx sz=0x%llx\n", soff, copy_size);
2433 err = i_ldc_mem_acquire_release((ldc_mem_handle_t)dringp->mhdl,
2434 direction, soff, copy_size);
2435 if (err) {
2436 DWARN(ldcp->id,
2437 "i_ldc_dring_acquire_release: copy failed\n");
2438 mutex_exit(&dringp->lock);
2439 return (err);
2440 }
2441 }
2442
2443 mutex_exit(&dringp->lock);
2444
2445 return (0);
2446 }
2447
2448 /*
2449 * Ensure that the contents in the local dring are consistent
2450 * with the contents if of remote dring
2451 */
2452 int
ldc_mem_dring_acquire(ldc_dring_handle_t dhandle,uint64_t start,uint64_t end)2453 ldc_mem_dring_acquire(ldc_dring_handle_t dhandle, uint64_t start, uint64_t end)
2454 {
2455 return (i_ldc_dring_acquire_release(dhandle, LDC_COPY_IN, start, end));
2456 }
2457
2458 /*
2459 * Ensure that the contents in the remote dring are consistent
2460 * with the contents if of local dring
2461 */
2462 int
ldc_mem_dring_release(ldc_dring_handle_t dhandle,uint64_t start,uint64_t end)2463 ldc_mem_dring_release(ldc_dring_handle_t dhandle, uint64_t start, uint64_t end)
2464 {
2465 return (i_ldc_dring_acquire_release(dhandle, LDC_COPY_OUT, start, end));
2466 }
2467