xref: /illumos-gate/usr/src/uts/sun4v/io/ldc_shm.c (revision 3ddf1763bcdffce8506ef0f2b7db0e860039e58b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * sun4v LDC Link Layer Shared Memory Routines
31  */
32 #include <sys/types.h>
33 #include <sys/kmem.h>
34 #include <sys/cmn_err.h>
35 #include <sys/ksynch.h>
36 #include <sys/debug.h>
37 #include <sys/cyclic.h>
38 #include <sys/machsystm.h>
39 #include <sys/vm.h>
40 #include <sys/machcpuvar.h>
41 #include <sys/mmu.h>
42 #include <sys/pte.h>
43 #include <vm/hat.h>
44 #include <vm/as.h>
45 #include <vm/hat_sfmmu.h>
46 #include <sys/vm_machparam.h>
47 #include <vm/seg_kmem.h>
48 #include <vm/seg_kpm.h>
49 #include <sys/hypervisor_api.h>
50 #include <sys/ldc.h>
51 #include <sys/ldc_impl.h>
52 
53 /* LDC variables used by shared memory routines */
54 extern ldc_soft_state_t *ldcssp;
55 extern int ldc_max_retries;
56 extern clock_t ldc_delay;
57 
58 #ifdef DEBUG
59 extern int ldcdbg;
60 #endif
61 
62 /* LDC internal functions used by shared memory routines */
63 extern void i_ldc_reset(ldc_chan_t *ldcp, boolean_t force_reset);
64 extern int i_ldc_h2v_error(int h_error);
65 
66 #ifdef DEBUG
67 extern void ldcdebug(int64_t id, const char *fmt, ...);
68 #endif
69 
70 /* Memory synchronization internal functions */
71 static int i_ldc_mem_acquire_release(ldc_mem_handle_t mhandle,
72     uint8_t direction, uint64_t offset, size_t size);
73 static int i_ldc_dring_acquire_release(ldc_dring_handle_t dhandle,
74     uint8_t direction, uint64_t start, uint64_t end);
75 static int i_ldc_mem_map(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie,
76     uint32_t ccount, uint8_t mtype, uint8_t perm, caddr_t *vaddr,
77     caddr_t *raddr);
78 static int i_ldc_mem_bind_handle(ldc_mem_handle_t mhandle, caddr_t vaddr,
79     size_t len, uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie,
80     uint32_t *ccount);
81 
82 /*
83  * LDC framework supports mapping remote domain's memory
84  * either directly or via shadow memory pages. Default
85  * support is currently implemented via shadow copy.
86  * Direct map can be enabled by setting 'ldc_shmem_enabled'
87  */
88 int ldc_shmem_enabled = 0;
89 
90 /*
91  * Use of directly mapped shared memory for LDC descriptor
92  * rings is permitted if this variable is non-zero.
93  */
94 int ldc_dring_shmem_enabled = 1;
95 
96 /*
97  * The major and minor versions required to use directly
98  * mapped shared memory for LDC descriptor rings. The
99  * ldc_dring_shmem_hv_force variable, if set to a non-zero
100  * value, overrides the hypervisor API version check.
101  */
102 static int ldc_dring_shmem_hv_major = 1;
103 static int ldc_dring_shmem_hv_minor = 1;
104 static int ldc_dring_shmem_hv_force = 0;
105 
106 /*
107  * The results of the hypervisor service group API check.
108  * A non-zero value indicates the HV includes support for
109  * descriptor ring shared memory.
110  */
111 static int ldc_dring_shmem_hv_ok = 0;
112 
113 /*
114  * Pages exported for remote access over each channel is
115  * maintained in a table registered with the Hypervisor.
116  * The default number of entries in the table is set to
117  * 'ldc_mtbl_entries'.
118  */
119 uint64_t ldc_maptable_entries = LDC_MTBL_ENTRIES;
120 
121 #define	IDX2COOKIE(idx, pg_szc, pg_shift)				\
122 	(((pg_szc) << LDC_COOKIE_PGSZC_SHIFT) | ((idx) << (pg_shift)))
123 
124 /*
125  * Sets ldc_dring_shmem_hv_ok to a non-zero value if the HV LDC
126  * API version supports directly mapped shared memory or if it has
127  * been explicitly enabled via ldc_dring_shmem_hv_force.
128  */
129 void
130 i_ldc_mem_set_hsvc_vers(uint64_t major, uint64_t minor)
131 {
132 	if ((major == ldc_dring_shmem_hv_major &&
133 	    minor >= ldc_dring_shmem_hv_minor) ||
134 	    (major > ldc_dring_shmem_hv_major) ||
135 	    (ldc_dring_shmem_hv_force != 0)) {
136 		ldc_dring_shmem_hv_ok = 1;
137 	}
138 }
139 
140 /*
141  * Allocate a memory handle for the channel and link it into the list
142  * Also choose which memory table to use if this is the first handle
143  * being assigned to this channel
144  */
145 int
146 ldc_mem_alloc_handle(ldc_handle_t handle, ldc_mem_handle_t *mhandle)
147 {
148 	ldc_chan_t 	*ldcp;
149 	ldc_mhdl_t	*mhdl;
150 
151 	if (handle == NULL) {
152 		DWARN(DBG_ALL_LDCS,
153 		    "ldc_mem_alloc_handle: invalid channel handle\n");
154 		return (EINVAL);
155 	}
156 	ldcp = (ldc_chan_t *)handle;
157 
158 	mutex_enter(&ldcp->lock);
159 
160 	/* check to see if channel is initalized */
161 	if ((ldcp->tstate & ~TS_IN_RESET) < TS_INIT) {
162 		DWARN(ldcp->id,
163 		    "ldc_mem_alloc_handle: (0x%llx) channel not initialized\n",
164 		    ldcp->id);
165 		mutex_exit(&ldcp->lock);
166 		return (EINVAL);
167 	}
168 
169 	/* allocate handle for channel */
170 	mhdl = kmem_cache_alloc(ldcssp->memhdl_cache, KM_SLEEP);
171 
172 	/* initialize the lock */
173 	mutex_init(&mhdl->lock, NULL, MUTEX_DRIVER, NULL);
174 
175 	mhdl->myshadow = B_FALSE;
176 	mhdl->memseg = NULL;
177 	mhdl->ldcp = ldcp;
178 	mhdl->status = LDC_UNBOUND;
179 
180 	/* insert memory handle (@ head) into list */
181 	if (ldcp->mhdl_list == NULL) {
182 		ldcp->mhdl_list = mhdl;
183 		mhdl->next = NULL;
184 	} else {
185 		/* insert @ head */
186 		mhdl->next = ldcp->mhdl_list;
187 		ldcp->mhdl_list = mhdl;
188 	}
189 
190 	/* return the handle */
191 	*mhandle = (ldc_mem_handle_t)mhdl;
192 
193 	mutex_exit(&ldcp->lock);
194 
195 	D1(ldcp->id, "ldc_mem_alloc_handle: (0x%llx) allocated handle 0x%llx\n",
196 	    ldcp->id, mhdl);
197 
198 	return (0);
199 }
200 
201 /*
202  * Free memory handle for the channel and unlink it from the list
203  */
204 int
205 ldc_mem_free_handle(ldc_mem_handle_t mhandle)
206 {
207 	ldc_mhdl_t 	*mhdl, *phdl;
208 	ldc_chan_t 	*ldcp;
209 
210 	if (mhandle == NULL) {
211 		DWARN(DBG_ALL_LDCS,
212 		    "ldc_mem_free_handle: invalid memory handle\n");
213 		return (EINVAL);
214 	}
215 	mhdl = (ldc_mhdl_t *)mhandle;
216 
217 	mutex_enter(&mhdl->lock);
218 
219 	ldcp = mhdl->ldcp;
220 
221 	if (mhdl->status == LDC_BOUND || mhdl->status == LDC_MAPPED) {
222 		DWARN(ldcp->id,
223 		    "ldc_mem_free_handle: cannot free, 0x%llx hdl bound\n",
224 		    mhdl);
225 		mutex_exit(&mhdl->lock);
226 		return (EINVAL);
227 	}
228 	mutex_exit(&mhdl->lock);
229 
230 	mutex_enter(&ldcp->mlist_lock);
231 
232 	phdl = ldcp->mhdl_list;
233 
234 	/* first handle */
235 	if (phdl == mhdl) {
236 		ldcp->mhdl_list = mhdl->next;
237 		mutex_destroy(&mhdl->lock);
238 		kmem_cache_free(ldcssp->memhdl_cache, mhdl);
239 
240 		D1(ldcp->id,
241 		    "ldc_mem_free_handle: (0x%llx) freed handle 0x%llx\n",
242 		    ldcp->id, mhdl);
243 	} else {
244 		/* walk the list - unlink and free */
245 		while (phdl != NULL) {
246 			if (phdl->next == mhdl) {
247 				phdl->next = mhdl->next;
248 				mutex_destroy(&mhdl->lock);
249 				kmem_cache_free(ldcssp->memhdl_cache, mhdl);
250 				D1(ldcp->id,
251 				    "ldc_mem_free_handle: (0x%llx) freed "
252 				    "handle 0x%llx\n", ldcp->id, mhdl);
253 				break;
254 			}
255 			phdl = phdl->next;
256 		}
257 	}
258 
259 	if (phdl == NULL) {
260 		DWARN(ldcp->id,
261 		    "ldc_mem_free_handle: invalid handle 0x%llx\n", mhdl);
262 		mutex_exit(&ldcp->mlist_lock);
263 		return (EINVAL);
264 	}
265 
266 	mutex_exit(&ldcp->mlist_lock);
267 
268 	return (0);
269 }
270 
271 /*
272  * Bind a memory handle to a virtual address.
273  * The virtual address is converted to the corresponding real addresses.
274  * Returns pointer to the first ldc_mem_cookie and the total number
275  * of cookies for this virtual address. Other cookies can be obtained
276  * using the ldc_mem_nextcookie() call. If the pages are stored in
277  * consecutive locations in the table, a single cookie corresponding to
278  * the first location is returned. The cookie size spans all the entries.
279  *
280  * If the VA corresponds to a page that is already being exported, reuse
281  * the page and do not export it again. Bump the page's use count.
282  */
283 int
284 ldc_mem_bind_handle(ldc_mem_handle_t mhandle, caddr_t vaddr, size_t len,
285     uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie, uint32_t *ccount)
286 {
287 	/*
288 	 * Check if direct shared memory map is enabled, if not change
289 	 * the mapping type to SHADOW_MAP.
290 	 */
291 	if (ldc_shmem_enabled == 0)
292 		mtype = LDC_SHADOW_MAP;
293 
294 	return (i_ldc_mem_bind_handle(mhandle, vaddr, len, mtype, perm,
295 	    cookie, ccount));
296 }
297 
298 static int
299 i_ldc_mem_bind_handle(ldc_mem_handle_t mhandle, caddr_t vaddr, size_t len,
300     uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie, uint32_t *ccount)
301 {
302 	ldc_mhdl_t	*mhdl;
303 	ldc_chan_t 	*ldcp;
304 	ldc_mtbl_t	*mtbl;
305 	ldc_memseg_t	*memseg;
306 	ldc_mte_t	tmp_mte;
307 	uint64_t	index, prev_index = 0;
308 	int64_t		cookie_idx;
309 	uintptr_t	raddr, ra_aligned;
310 	uint64_t	psize, poffset, v_offset;
311 	uint64_t	pg_shift, pg_size, pg_size_code, pg_mask;
312 	pgcnt_t		npages;
313 	caddr_t		v_align, addr;
314 	int 		i, rv;
315 
316 	if (mhandle == NULL) {
317 		DWARN(DBG_ALL_LDCS,
318 		    "ldc_mem_bind_handle: invalid memory handle\n");
319 		return (EINVAL);
320 	}
321 	mhdl = (ldc_mhdl_t *)mhandle;
322 	ldcp = mhdl->ldcp;
323 
324 	/* clear count */
325 	*ccount = 0;
326 
327 	mutex_enter(&mhdl->lock);
328 
329 	if (mhdl->status == LDC_BOUND || mhdl->memseg != NULL) {
330 		DWARN(ldcp->id,
331 		    "ldc_mem_bind_handle: (0x%x) handle already bound\n",
332 		    mhandle);
333 		mutex_exit(&mhdl->lock);
334 		return (EINVAL);
335 	}
336 
337 	/* Force address and size to be 8-byte aligned */
338 	if ((((uintptr_t)vaddr | len) & 0x7) != 0) {
339 		DWARN(ldcp->id,
340 		    "ldc_mem_bind_handle: addr/size is not 8-byte aligned\n");
341 		mutex_exit(&mhdl->lock);
342 		return (EINVAL);
343 	}
344 
345 	mutex_enter(&ldcp->lock);
346 
347 	/*
348 	 * If this channel is binding a memory handle for the
349 	 * first time allocate it a memory map table and initialize it
350 	 */
351 	if ((mtbl = ldcp->mtbl) == NULL) {
352 
353 		/* Allocate and initialize the map table structure */
354 		mtbl = kmem_zalloc(sizeof (ldc_mtbl_t), KM_SLEEP);
355 		mtbl->num_entries = mtbl->num_avail = ldc_maptable_entries;
356 		mtbl->size = ldc_maptable_entries * sizeof (ldc_mte_slot_t);
357 		mtbl->next_entry = NULL;
358 		mtbl->contigmem = B_TRUE;
359 
360 		/* Allocate the table itself */
361 		mtbl->table = (ldc_mte_slot_t *)
362 		    contig_mem_alloc_align(mtbl->size, MMU_PAGESIZE);
363 		if (mtbl->table == NULL) {
364 
365 			/* allocate a page of memory using kmem_alloc */
366 			mtbl->table = kmem_alloc(MMU_PAGESIZE, KM_SLEEP);
367 			mtbl->size = MMU_PAGESIZE;
368 			mtbl->contigmem = B_FALSE;
369 			mtbl->num_entries = mtbl->num_avail =
370 			    mtbl->size / sizeof (ldc_mte_slot_t);
371 			DWARN(ldcp->id,
372 			    "ldc_mem_bind_handle: (0x%llx) reduced tbl size "
373 			    "to %lx entries\n", ldcp->id, mtbl->num_entries);
374 		}
375 
376 		/* zero out the memory */
377 		bzero(mtbl->table, mtbl->size);
378 
379 		/* initialize the lock */
380 		mutex_init(&mtbl->lock, NULL, MUTEX_DRIVER, NULL);
381 
382 		/* register table for this channel */
383 		rv = hv_ldc_set_map_table(ldcp->id,
384 		    va_to_pa(mtbl->table), mtbl->num_entries);
385 		if (rv != 0) {
386 			DWARN(DBG_ALL_LDCS,
387 			    "ldc_mem_bind_handle: (0x%lx) err %d mapping tbl",
388 			    ldcp->id, rv);
389 			if (mtbl->contigmem)
390 				contig_mem_free(mtbl->table, mtbl->size);
391 			else
392 				kmem_free(mtbl->table, mtbl->size);
393 			mutex_destroy(&mtbl->lock);
394 			kmem_free(mtbl, sizeof (ldc_mtbl_t));
395 			mutex_exit(&ldcp->lock);
396 			mutex_exit(&mhdl->lock);
397 			return (EIO);
398 		}
399 
400 		ldcp->mtbl = mtbl;
401 
402 		D1(ldcp->id,
403 		    "ldc_mem_bind_handle: (0x%llx) alloc'd map table 0x%llx\n",
404 		    ldcp->id, ldcp->mtbl->table);
405 	}
406 
407 	mutex_exit(&ldcp->lock);
408 
409 	/* FUTURE: get the page size, pgsz code, and shift */
410 	pg_size = MMU_PAGESIZE;
411 	pg_size_code = page_szc(pg_size);
412 	pg_shift = page_get_shift(pg_size_code);
413 	pg_mask = ~(pg_size - 1);
414 
415 	D1(ldcp->id, "ldc_mem_bind_handle: (0x%llx) binding "
416 	    "va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
417 	    ldcp->id, vaddr, pg_size, pg_size_code, pg_shift);
418 
419 	/* aligned VA and its offset */
420 	v_align = (caddr_t)(((uintptr_t)vaddr) & ~(pg_size - 1));
421 	v_offset = ((uintptr_t)vaddr) & (pg_size - 1);
422 
423 	npages = (len+v_offset)/pg_size;
424 	npages = ((len+v_offset)%pg_size == 0) ? npages : npages+1;
425 
426 	D1(ldcp->id, "ldc_mem_bind_handle: binding "
427 	    "(0x%llx) v=0x%llx,val=0x%llx,off=0x%x,pgs=0x%x\n",
428 	    ldcp->id, vaddr, v_align, v_offset, npages);
429 
430 	/* lock the memory table - exclusive access to channel */
431 	mutex_enter(&mtbl->lock);
432 
433 	if (npages > mtbl->num_avail) {
434 		D1(ldcp->id, "ldc_mem_bind_handle: (0x%llx) no table entries\n",
435 		    ldcp->id);
436 		mutex_exit(&mtbl->lock);
437 		mutex_exit(&mhdl->lock);
438 		return (ENOMEM);
439 	}
440 
441 	/* Allocate a memseg structure */
442 	memseg = mhdl->memseg =
443 	    kmem_cache_alloc(ldcssp->memseg_cache, KM_SLEEP);
444 
445 	/* Allocate memory to store all pages and cookies */
446 	memseg->pages = kmem_zalloc((sizeof (ldc_page_t) * npages), KM_SLEEP);
447 	memseg->cookies =
448 	    kmem_zalloc((sizeof (ldc_mem_cookie_t) * npages), KM_SLEEP);
449 
450 	D2(ldcp->id, "ldc_mem_bind_handle: (0x%llx) processing 0x%llx pages\n",
451 	    ldcp->id, npages);
452 
453 	addr = v_align;
454 
455 	/*
456 	 * Table slots are used in a round-robin manner. The algorithm permits
457 	 * inserting duplicate entries. Slots allocated earlier will typically
458 	 * get freed before we get back to reusing the slot.Inserting duplicate
459 	 * entries should be OK as we only lookup entries using the cookie addr
460 	 * i.e. tbl index, during export, unexport and copy operation.
461 	 *
462 	 * One implementation what was tried was to search for a duplicate
463 	 * page entry first and reuse it. The search overhead is very high and
464 	 * in the vnet case dropped the perf by almost half, 50 to 24 mbps.
465 	 * So it does make sense to avoid searching for duplicates.
466 	 *
467 	 * But during the process of searching for a free slot, if we find a
468 	 * duplicate entry we will go ahead and use it, and bump its use count.
469 	 */
470 
471 	/* index to start searching from */
472 	index = mtbl->next_entry;
473 	cookie_idx = -1;
474 
475 	tmp_mte.ll = 0;	/* initialise fields to 0 */
476 
477 	if (mtype & LDC_DIRECT_MAP) {
478 		tmp_mte.mte_r = (perm & LDC_MEM_R) ? 1 : 0;
479 		tmp_mte.mte_w = (perm & LDC_MEM_W) ? 1 : 0;
480 		tmp_mte.mte_x = (perm & LDC_MEM_X) ? 1 : 0;
481 	}
482 
483 	if (mtype & LDC_SHADOW_MAP) {
484 		tmp_mte.mte_cr = (perm & LDC_MEM_R) ? 1 : 0;
485 		tmp_mte.mte_cw = (perm & LDC_MEM_W) ? 1 : 0;
486 	}
487 
488 	if (mtype & LDC_IO_MAP) {
489 		tmp_mte.mte_ir = (perm & LDC_MEM_R) ? 1 : 0;
490 		tmp_mte.mte_iw = (perm & LDC_MEM_W) ? 1 : 0;
491 	}
492 
493 	D1(ldcp->id, "ldc_mem_bind_handle mte=0x%llx\n", tmp_mte.ll);
494 
495 	tmp_mte.mte_pgszc = pg_size_code;
496 
497 	/* initialize each mem table entry */
498 	for (i = 0; i < npages; i++) {
499 
500 		/* check if slot is available in the table */
501 		while (mtbl->table[index].entry.ll != 0) {
502 
503 			index = (index + 1) % mtbl->num_entries;
504 
505 			if (index == mtbl->next_entry) {
506 				/* we have looped around */
507 				DWARN(DBG_ALL_LDCS,
508 				    "ldc_mem_bind_handle: (0x%llx) cannot find "
509 				    "entry\n", ldcp->id);
510 				*ccount = 0;
511 
512 				/* NOTE: free memory, remove previous entries */
513 				/* this shouldnt happen as num_avail was ok */
514 
515 				mutex_exit(&mtbl->lock);
516 				mutex_exit(&mhdl->lock);
517 				return (ENOMEM);
518 			}
519 		}
520 
521 		/* get the real address */
522 		raddr = va_to_pa((void *)addr);
523 		ra_aligned = ((uintptr_t)raddr & pg_mask);
524 
525 		/* build the mte */
526 		tmp_mte.mte_rpfn = ra_aligned >> pg_shift;
527 
528 		D1(ldcp->id, "ldc_mem_bind_handle mte=0x%llx\n", tmp_mte.ll);
529 
530 		/* update entry in table */
531 		mtbl->table[index].entry = tmp_mte;
532 
533 		D2(ldcp->id, "ldc_mem_bind_handle: (0x%llx) stored MTE 0x%llx"
534 		    " into loc 0x%llx\n", ldcp->id, tmp_mte.ll, index);
535 
536 		/* calculate the size and offset for this export range */
537 		if (i == 0) {
538 			/* first page */
539 			psize = min((pg_size - v_offset), len);
540 			poffset = v_offset;
541 
542 		} else if (i == (npages - 1)) {
543 			/* last page */
544 			psize =	(((uintptr_t)(vaddr + len)) &
545 			    ((uint64_t)(pg_size-1)));
546 			if (psize == 0)
547 				psize = pg_size;
548 			poffset = 0;
549 
550 		} else {
551 			/* middle pages */
552 			psize = pg_size;
553 			poffset = 0;
554 		}
555 
556 		/* store entry for this page */
557 		memseg->pages[i].index = index;
558 		memseg->pages[i].raddr = raddr;
559 		memseg->pages[i].offset = poffset;
560 		memseg->pages[i].size = psize;
561 		memseg->pages[i].mte = &(mtbl->table[index]);
562 
563 		/* create the cookie */
564 		if (i == 0 || (index != prev_index + 1)) {
565 			cookie_idx++;
566 			memseg->cookies[cookie_idx].addr =
567 			    IDX2COOKIE(index, pg_size_code, pg_shift);
568 			memseg->cookies[cookie_idx].addr |= poffset;
569 			memseg->cookies[cookie_idx].size = psize;
570 
571 		} else {
572 			memseg->cookies[cookie_idx].size += psize;
573 		}
574 
575 		D1(ldcp->id, "ldc_mem_bind_handle: bound "
576 		    "(0x%llx) va=0x%llx, idx=0x%llx, "
577 		    "ra=0x%llx(sz=0x%x,off=0x%x)\n",
578 		    ldcp->id, addr, index, raddr, psize, poffset);
579 
580 		/* decrement number of available entries */
581 		mtbl->num_avail--;
582 
583 		/* increment va by page size */
584 		addr += pg_size;
585 
586 		/* increment index */
587 		prev_index = index;
588 		index = (index + 1) % mtbl->num_entries;
589 
590 		/* save the next slot */
591 		mtbl->next_entry = index;
592 	}
593 
594 	mutex_exit(&mtbl->lock);
595 
596 	/* memory handle = bound */
597 	mhdl->mtype = mtype;
598 	mhdl->perm = perm;
599 	mhdl->status = LDC_BOUND;
600 
601 	/* update memseg_t */
602 	memseg->vaddr = vaddr;
603 	memseg->raddr = memseg->pages[0].raddr;
604 	memseg->size = len;
605 	memseg->npages = npages;
606 	memseg->ncookies = cookie_idx + 1;
607 	memseg->next_cookie = (memseg->ncookies > 1) ? 1 : 0;
608 
609 	/* return count and first cookie */
610 	*ccount = memseg->ncookies;
611 	cookie->addr = memseg->cookies[0].addr;
612 	cookie->size = memseg->cookies[0].size;
613 
614 	D1(ldcp->id,
615 	    "ldc_mem_bind_handle: (0x%llx) bound 0x%llx, va=0x%llx, "
616 	    "pgs=0x%llx cookies=0x%llx\n",
617 	    ldcp->id, mhdl, vaddr, npages, memseg->ncookies);
618 
619 	mutex_exit(&mhdl->lock);
620 	return (0);
621 }
622 
623 /*
624  * Return the next cookie associated with the specified memory handle
625  */
626 int
627 ldc_mem_nextcookie(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie)
628 {
629 	ldc_mhdl_t	*mhdl;
630 	ldc_chan_t 	*ldcp;
631 	ldc_memseg_t	*memseg;
632 
633 	if (mhandle == NULL) {
634 		DWARN(DBG_ALL_LDCS,
635 		    "ldc_mem_nextcookie: invalid memory handle\n");
636 		return (EINVAL);
637 	}
638 	mhdl = (ldc_mhdl_t *)mhandle;
639 
640 	mutex_enter(&mhdl->lock);
641 
642 	ldcp = mhdl->ldcp;
643 	memseg = mhdl->memseg;
644 
645 	if (cookie == 0) {
646 		DWARN(ldcp->id,
647 		    "ldc_mem_nextcookie:(0x%llx) invalid cookie arg\n",
648 		    ldcp->id);
649 		mutex_exit(&mhdl->lock);
650 		return (EINVAL);
651 	}
652 
653 	if (memseg->next_cookie != 0) {
654 		cookie->addr = memseg->cookies[memseg->next_cookie].addr;
655 		cookie->size = memseg->cookies[memseg->next_cookie].size;
656 		memseg->next_cookie++;
657 		if (memseg->next_cookie == memseg->ncookies)
658 			memseg->next_cookie = 0;
659 
660 	} else {
661 		DWARN(ldcp->id,
662 		    "ldc_mem_nextcookie:(0x%llx) no more cookies\n", ldcp->id);
663 		cookie->addr = 0;
664 		cookie->size = 0;
665 		mutex_exit(&mhdl->lock);
666 		return (EINVAL);
667 	}
668 
669 	D1(ldcp->id,
670 	    "ldc_mem_nextcookie: (0x%llx) cookie addr=0x%llx,sz=0x%llx\n",
671 	    ldcp->id, cookie->addr, cookie->size);
672 
673 	mutex_exit(&mhdl->lock);
674 	return (0);
675 }
676 
677 /*
678  * Unbind the virtual memory region associated with the specified
679  * memory handle. Allassociated cookies are freed and the corresponding
680  * RA space is no longer exported.
681  */
682 int
683 ldc_mem_unbind_handle(ldc_mem_handle_t mhandle)
684 {
685 	ldc_mhdl_t	*mhdl;
686 	ldc_chan_t 	*ldcp;
687 	ldc_mtbl_t	*mtbl;
688 	ldc_memseg_t	*memseg;
689 	uint64_t	cookie_addr;
690 	uint64_t	pg_shift, pg_size_code;
691 	int		i, rv, retries;
692 
693 	if (mhandle == NULL) {
694 		DWARN(DBG_ALL_LDCS,
695 		    "ldc_mem_unbind_handle: invalid memory handle\n");
696 		return (EINVAL);
697 	}
698 	mhdl = (ldc_mhdl_t *)mhandle;
699 
700 	mutex_enter(&mhdl->lock);
701 
702 	if (mhdl->status == LDC_UNBOUND) {
703 		DWARN(DBG_ALL_LDCS,
704 		    "ldc_mem_unbind_handle: (0x%x) handle is not bound\n",
705 		    mhandle);
706 		mutex_exit(&mhdl->lock);
707 		return (EINVAL);
708 	}
709 
710 	ldcp = mhdl->ldcp;
711 	mtbl = ldcp->mtbl;
712 
713 	memseg = mhdl->memseg;
714 
715 	/* lock the memory table - exclusive access to channel */
716 	mutex_enter(&mtbl->lock);
717 
718 	/* undo the pages exported */
719 	for (i = 0; i < memseg->npages; i++) {
720 
721 		/* clear the entry from the table */
722 		memseg->pages[i].mte->entry.ll = 0;
723 
724 		/* check for mapped pages, revocation cookie != 0 */
725 		if (memseg->pages[i].mte->cookie) {
726 
727 			pg_size_code = page_szc(memseg->pages[i].size);
728 			pg_shift = page_get_shift(pg_size_code);
729 			cookie_addr = IDX2COOKIE(memseg->pages[i].index,
730 			    pg_size_code, pg_shift);
731 
732 			D1(ldcp->id, "ldc_mem_unbind_handle: (0x%llx) revoke "
733 			    "cookie 0x%llx, rcookie 0x%llx\n", ldcp->id,
734 			    cookie_addr, memseg->pages[i].mte->cookie);
735 
736 			retries = 0;
737 			do {
738 				rv = hv_ldc_revoke(ldcp->id, cookie_addr,
739 				    memseg->pages[i].mte->cookie);
740 
741 				if (rv != H_EWOULDBLOCK)
742 					break;
743 
744 				drv_usecwait(ldc_delay);
745 
746 			} while (retries++ < ldc_max_retries);
747 
748 			if (rv) {
749 				DWARN(ldcp->id,
750 				    "ldc_mem_unbind_handle: (0x%llx) cannot "
751 				    "revoke mapping, cookie %llx\n", ldcp->id,
752 				    cookie_addr);
753 			}
754 		}
755 
756 		mtbl->num_avail++;
757 	}
758 	mutex_exit(&mtbl->lock);
759 
760 	/* free the allocated memseg and page structures */
761 	kmem_free(memseg->pages, (sizeof (ldc_page_t) * memseg->npages));
762 	kmem_free(memseg->cookies,
763 	    (sizeof (ldc_mem_cookie_t) * memseg->npages));
764 	kmem_cache_free(ldcssp->memseg_cache, memseg);
765 
766 	/* uninitialize the memory handle */
767 	mhdl->memseg = NULL;
768 	mhdl->status = LDC_UNBOUND;
769 
770 	D1(ldcp->id, "ldc_mem_unbind_handle: (0x%llx) unbound handle 0x%llx\n",
771 	    ldcp->id, mhdl);
772 
773 	mutex_exit(&mhdl->lock);
774 	return (0);
775 }
776 
777 /*
778  * Get information about the dring. The base address of the descriptor
779  * ring along with the type and permission are returned back.
780  */
781 int
782 ldc_mem_info(ldc_mem_handle_t mhandle, ldc_mem_info_t *minfo)
783 {
784 	ldc_mhdl_t	*mhdl;
785 
786 	if (mhandle == NULL) {
787 		DWARN(DBG_ALL_LDCS, "ldc_mem_info: invalid memory handle\n");
788 		return (EINVAL);
789 	}
790 	mhdl = (ldc_mhdl_t *)mhandle;
791 
792 	if (minfo == NULL) {
793 		DWARN(DBG_ALL_LDCS, "ldc_mem_info: invalid args\n");
794 		return (EINVAL);
795 	}
796 
797 	mutex_enter(&mhdl->lock);
798 
799 	minfo->status = mhdl->status;
800 	if (mhdl->status == LDC_BOUND || mhdl->status == LDC_MAPPED) {
801 		minfo->vaddr = mhdl->memseg->vaddr;
802 		minfo->raddr = mhdl->memseg->raddr;
803 		minfo->mtype = mhdl->mtype;
804 		minfo->perm = mhdl->perm;
805 	}
806 	mutex_exit(&mhdl->lock);
807 
808 	return (0);
809 }
810 
811 /*
812  * Copy data either from or to the client specified virtual address
813  * space to or from the exported memory associated with the cookies.
814  * The direction argument determines whether the data is read from or
815  * written to exported memory.
816  */
817 int
818 ldc_mem_copy(ldc_handle_t handle, caddr_t vaddr, uint64_t off, size_t *size,
819     ldc_mem_cookie_t *cookies, uint32_t ccount, uint8_t direction)
820 {
821 	ldc_chan_t 	*ldcp;
822 	uint64_t	local_voff, local_valign;
823 	uint64_t	cookie_addr, cookie_size;
824 	uint64_t	pg_shift, pg_size, pg_size_code;
825 	uint64_t 	export_caddr, export_poff, export_psize, export_size;
826 	uint64_t	local_ra, local_poff, local_psize;
827 	uint64_t	copy_size, copied_len = 0, total_bal = 0, idx = 0;
828 	pgcnt_t		npages;
829 	size_t		len = *size;
830 	int 		i, rv = 0;
831 
832 	uint64_t	chid;
833 
834 	if (handle == NULL) {
835 		DWARN(DBG_ALL_LDCS, "ldc_mem_copy: invalid channel handle\n");
836 		return (EINVAL);
837 	}
838 	ldcp = (ldc_chan_t *)handle;
839 	chid = ldcp->id;
840 
841 	/* check to see if channel is UP */
842 	if (ldcp->tstate != TS_UP) {
843 		DWARN(chid, "ldc_mem_copy: (0x%llx) channel is not UP\n",
844 		    chid);
845 		return (ECONNRESET);
846 	}
847 
848 	/* Force address and size to be 8-byte aligned */
849 	if ((((uintptr_t)vaddr | len) & 0x7) != 0) {
850 		DWARN(chid,
851 		    "ldc_mem_copy: addr/sz is not 8-byte aligned\n");
852 		return (EINVAL);
853 	}
854 
855 	/* Find the size of the exported memory */
856 	export_size = 0;
857 	for (i = 0; i < ccount; i++)
858 		export_size += cookies[i].size;
859 
860 	/* check to see if offset is valid */
861 	if (off > export_size) {
862 		DWARN(chid,
863 		    "ldc_mem_copy: (0x%llx) start offset > export mem size\n",
864 		    chid);
865 		return (EINVAL);
866 	}
867 
868 	/*
869 	 * Check to see if the export size is smaller than the size we
870 	 * are requesting to copy - if so flag an error
871 	 */
872 	if ((export_size - off) < *size) {
873 		DWARN(chid,
874 		    "ldc_mem_copy: (0x%llx) copy size > export mem size\n",
875 		    chid);
876 		return (EINVAL);
877 	}
878 
879 	total_bal = min(export_size, *size);
880 
881 	/* FUTURE: get the page size, pgsz code, and shift */
882 	pg_size = MMU_PAGESIZE;
883 	pg_size_code = page_szc(pg_size);
884 	pg_shift = page_get_shift(pg_size_code);
885 
886 	D1(chid, "ldc_mem_copy: copying data "
887 	    "(0x%llx) va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
888 	    chid, vaddr, pg_size, pg_size_code, pg_shift);
889 
890 	/* aligned VA and its offset */
891 	local_valign = (((uintptr_t)vaddr) & ~(pg_size - 1));
892 	local_voff = ((uintptr_t)vaddr) & (pg_size - 1);
893 
894 	npages = (len+local_voff)/pg_size;
895 	npages = ((len+local_voff)%pg_size == 0) ? npages : npages+1;
896 
897 	D1(chid,
898 	    "ldc_mem_copy: (0x%llx) v=0x%llx,val=0x%llx,off=0x%x,pgs=0x%x\n",
899 	    chid, vaddr, local_valign, local_voff, npages);
900 
901 	local_ra = va_to_pa((void *)local_valign);
902 	local_poff = local_voff;
903 	local_psize = min(len, (pg_size - local_voff));
904 
905 	len -= local_psize;
906 
907 	/*
908 	 * find the first cookie in the list of cookies
909 	 * if the offset passed in is not zero
910 	 */
911 	for (idx = 0; idx < ccount; idx++) {
912 		cookie_size = cookies[idx].size;
913 		if (off < cookie_size)
914 			break;
915 		off -= cookie_size;
916 	}
917 
918 	cookie_addr = cookies[idx].addr + off;
919 	cookie_size = cookies[idx].size - off;
920 
921 	export_caddr = cookie_addr & ~(pg_size - 1);
922 	export_poff = cookie_addr & (pg_size - 1);
923 	export_psize = min(cookie_size, (pg_size - export_poff));
924 
925 	for (;;) {
926 
927 		copy_size = min(export_psize, local_psize);
928 
929 		D1(chid,
930 		    "ldc_mem_copy:(0x%llx) dir=0x%x, caddr=0x%llx,"
931 		    " loc_ra=0x%llx, exp_poff=0x%llx, loc_poff=0x%llx,"
932 		    " exp_psz=0x%llx, loc_psz=0x%llx, copy_sz=0x%llx,"
933 		    " total_bal=0x%llx\n",
934 		    chid, direction, export_caddr, local_ra, export_poff,
935 		    local_poff, export_psize, local_psize, copy_size,
936 		    total_bal);
937 
938 		rv = hv_ldc_copy(chid, direction,
939 		    (export_caddr + export_poff), (local_ra + local_poff),
940 		    copy_size, &copied_len);
941 
942 		if (rv != 0) {
943 			int 		error = EIO;
944 			uint64_t	rx_hd, rx_tl;
945 
946 			DWARN(chid,
947 			    "ldc_mem_copy: (0x%llx) err %d during copy\n",
948 			    (unsigned long long)chid, rv);
949 			DWARN(chid,
950 			    "ldc_mem_copy: (0x%llx) dir=0x%x, caddr=0x%lx, "
951 			    "loc_ra=0x%lx, exp_poff=0x%lx, loc_poff=0x%lx,"
952 			    " exp_psz=0x%lx, loc_psz=0x%lx, copy_sz=0x%lx,"
953 			    " copied_len=0x%lx, total_bal=0x%lx\n",
954 			    chid, direction, export_caddr, local_ra,
955 			    export_poff, local_poff, export_psize, local_psize,
956 			    copy_size, copied_len, total_bal);
957 
958 			*size = *size - total_bal;
959 
960 			/*
961 			 * check if reason for copy error was due to
962 			 * a channel reset. we need to grab the lock
963 			 * just in case we have to do a reset.
964 			 */
965 			mutex_enter(&ldcp->lock);
966 			mutex_enter(&ldcp->tx_lock);
967 
968 			rv = hv_ldc_rx_get_state(ldcp->id,
969 			    &rx_hd, &rx_tl, &(ldcp->link_state));
970 			if (ldcp->link_state == LDC_CHANNEL_DOWN ||
971 			    ldcp->link_state == LDC_CHANNEL_RESET) {
972 				i_ldc_reset(ldcp, B_FALSE);
973 				error = ECONNRESET;
974 			}
975 
976 			mutex_exit(&ldcp->tx_lock);
977 			mutex_exit(&ldcp->lock);
978 
979 			return (error);
980 		}
981 
982 		ASSERT(copied_len <= copy_size);
983 
984 		D2(chid, "ldc_mem_copy: copied=0x%llx\n", copied_len);
985 		export_poff += copied_len;
986 		local_poff += copied_len;
987 		export_psize -= copied_len;
988 		local_psize -= copied_len;
989 		cookie_size -= copied_len;
990 
991 		total_bal -= copied_len;
992 
993 		if (copy_size != copied_len)
994 			continue;
995 
996 		if (export_psize == 0 && total_bal != 0) {
997 
998 			if (cookie_size == 0) {
999 				idx++;
1000 				cookie_addr = cookies[idx].addr;
1001 				cookie_size = cookies[idx].size;
1002 
1003 				export_caddr = cookie_addr & ~(pg_size - 1);
1004 				export_poff = cookie_addr & (pg_size - 1);
1005 				export_psize =
1006 				    min(cookie_size, (pg_size-export_poff));
1007 			} else {
1008 				export_caddr += pg_size;
1009 				export_poff = 0;
1010 				export_psize = min(cookie_size, pg_size);
1011 			}
1012 		}
1013 
1014 		if (local_psize == 0 && total_bal != 0) {
1015 			local_valign += pg_size;
1016 			local_ra = va_to_pa((void *)local_valign);
1017 			local_poff = 0;
1018 			local_psize = min(pg_size, len);
1019 			len -= local_psize;
1020 		}
1021 
1022 		/* check if we are all done */
1023 		if (total_bal == 0)
1024 			break;
1025 	}
1026 
1027 
1028 	D1(chid,
1029 	    "ldc_mem_copy: (0x%llx) done copying sz=0x%llx\n",
1030 	    chid, *size);
1031 
1032 	return (0);
1033 }
1034 
1035 /*
1036  * Copy data either from or to the client specified virtual address
1037  * space to or from HV physical memory.
1038  *
1039  * The direction argument determines whether the data is read from or
1040  * written to HV memory. direction values are LDC_COPY_IN/OUT similar
1041  * to the ldc_mem_copy interface
1042  */
1043 int
1044 ldc_mem_rdwr_cookie(ldc_handle_t handle, caddr_t vaddr, size_t *size,
1045     caddr_t paddr, uint8_t direction)
1046 {
1047 	ldc_chan_t 	*ldcp;
1048 	uint64_t	local_voff, local_valign;
1049 	uint64_t	pg_shift, pg_size, pg_size_code;
1050 	uint64_t 	target_pa, target_poff, target_psize, target_size;
1051 	uint64_t	local_ra, local_poff, local_psize;
1052 	uint64_t	copy_size, copied_len = 0;
1053 	pgcnt_t		npages;
1054 	size_t		len = *size;
1055 	int 		rv = 0;
1056 
1057 	if (handle == NULL) {
1058 		DWARN(DBG_ALL_LDCS,
1059 		    "ldc_mem_rdwr_cookie: invalid channel handle\n");
1060 		return (EINVAL);
1061 	}
1062 	ldcp = (ldc_chan_t *)handle;
1063 
1064 	mutex_enter(&ldcp->lock);
1065 
1066 	/* check to see if channel is UP */
1067 	if (ldcp->tstate != TS_UP) {
1068 		DWARN(ldcp->id,
1069 		    "ldc_mem_rdwr_cookie: (0x%llx) channel is not UP\n",
1070 		    ldcp->id);
1071 		mutex_exit(&ldcp->lock);
1072 		return (ECONNRESET);
1073 	}
1074 
1075 	/* Force address and size to be 8-byte aligned */
1076 	if ((((uintptr_t)vaddr | len) & 0x7) != 0) {
1077 		DWARN(ldcp->id,
1078 		    "ldc_mem_rdwr_cookie: addr/size is not 8-byte aligned\n");
1079 		mutex_exit(&ldcp->lock);
1080 		return (EINVAL);
1081 	}
1082 
1083 	target_size = *size;
1084 
1085 	/* FUTURE: get the page size, pgsz code, and shift */
1086 	pg_size = MMU_PAGESIZE;
1087 	pg_size_code = page_szc(pg_size);
1088 	pg_shift = page_get_shift(pg_size_code);
1089 
1090 	D1(ldcp->id, "ldc_mem_rdwr_cookie: copying data "
1091 	    "(0x%llx) va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
1092 	    ldcp->id, vaddr, pg_size, pg_size_code, pg_shift);
1093 
1094 	/* aligned VA and its offset */
1095 	local_valign = ((uintptr_t)vaddr) & ~(pg_size - 1);
1096 	local_voff = ((uintptr_t)vaddr) & (pg_size - 1);
1097 
1098 	npages = (len + local_voff) / pg_size;
1099 	npages = ((len + local_voff) % pg_size == 0) ? npages : npages+1;
1100 
1101 	D1(ldcp->id, "ldc_mem_rdwr_cookie: (0x%llx) v=0x%llx, "
1102 	    "val=0x%llx,off=0x%x,pgs=0x%x\n",
1103 	    ldcp->id, vaddr, local_valign, local_voff, npages);
1104 
1105 	local_ra = va_to_pa((void *)local_valign);
1106 	local_poff = local_voff;
1107 	local_psize = min(len, (pg_size - local_voff));
1108 
1109 	len -= local_psize;
1110 
1111 	target_pa = ((uintptr_t)paddr) & ~(pg_size - 1);
1112 	target_poff = ((uintptr_t)paddr) & (pg_size - 1);
1113 	target_psize = pg_size - target_poff;
1114 
1115 	for (;;) {
1116 
1117 		copy_size = min(target_psize, local_psize);
1118 
1119 		D1(ldcp->id,
1120 		    "ldc_mem_rdwr_cookie: (0x%llx) dir=0x%x, tar_pa=0x%llx,"
1121 		    " loc_ra=0x%llx, tar_poff=0x%llx, loc_poff=0x%llx,"
1122 		    " tar_psz=0x%llx, loc_psz=0x%llx, copy_sz=0x%llx,"
1123 		    " total_bal=0x%llx\n",
1124 		    ldcp->id, direction, target_pa, local_ra, target_poff,
1125 		    local_poff, target_psize, local_psize, copy_size,
1126 		    target_size);
1127 
1128 		rv = hv_ldc_copy(ldcp->id, direction,
1129 		    (target_pa + target_poff), (local_ra + local_poff),
1130 		    copy_size, &copied_len);
1131 
1132 		if (rv != 0) {
1133 			DWARN(DBG_ALL_LDCS,
1134 			    "ldc_mem_rdwr_cookie: (0x%lx) err %d during copy\n",
1135 			    ldcp->id, rv);
1136 			DWARN(DBG_ALL_LDCS,
1137 			    "ldc_mem_rdwr_cookie: (0x%llx) dir=%lld, "
1138 			    "tar_pa=0x%llx, loc_ra=0x%llx, tar_poff=0x%llx, "
1139 			    "loc_poff=0x%llx, tar_psz=0x%llx, loc_psz=0x%llx, "
1140 			    "copy_sz=0x%llx, total_bal=0x%llx\n",
1141 			    ldcp->id, direction, target_pa, local_ra,
1142 			    target_poff, local_poff, target_psize, local_psize,
1143 			    copy_size, target_size);
1144 
1145 			*size = *size - target_size;
1146 			mutex_exit(&ldcp->lock);
1147 			return (i_ldc_h2v_error(rv));
1148 		}
1149 
1150 		D2(ldcp->id, "ldc_mem_rdwr_cookie: copied=0x%llx\n",
1151 		    copied_len);
1152 		target_poff += copied_len;
1153 		local_poff += copied_len;
1154 		target_psize -= copied_len;
1155 		local_psize -= copied_len;
1156 
1157 		target_size -= copied_len;
1158 
1159 		if (copy_size != copied_len)
1160 			continue;
1161 
1162 		if (target_psize == 0 && target_size != 0) {
1163 			target_pa += pg_size;
1164 			target_poff = 0;
1165 			target_psize = min(pg_size, target_size);
1166 		}
1167 
1168 		if (local_psize == 0 && target_size != 0) {
1169 			local_valign += pg_size;
1170 			local_ra = va_to_pa((void *)local_valign);
1171 			local_poff = 0;
1172 			local_psize = min(pg_size, len);
1173 			len -= local_psize;
1174 		}
1175 
1176 		/* check if we are all done */
1177 		if (target_size == 0)
1178 			break;
1179 	}
1180 
1181 	mutex_exit(&ldcp->lock);
1182 
1183 	D1(ldcp->id, "ldc_mem_rdwr_cookie: (0x%llx) done copying sz=0x%llx\n",
1184 	    ldcp->id, *size);
1185 
1186 	return (0);
1187 }
1188 
1189 /*
1190  * Map an exported memory segment into the local address space. If the
1191  * memory range was exported for direct map access, a HV call is made
1192  * to allocate a RA range. If the map is done via a shadow copy, local
1193  * shadow memory is allocated and the base VA is returned in 'vaddr'. If
1194  * the mapping is a direct map then the RA is returned in 'raddr'.
1195  */
1196 int
1197 ldc_mem_map(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie, uint32_t ccount,
1198     uint8_t mtype, uint8_t perm, caddr_t *vaddr, caddr_t *raddr)
1199 {
1200 	/*
1201 	 * Check if direct map over shared memory is enabled, if not change
1202 	 * the mapping type to SHADOW_MAP.
1203 	 */
1204 	if (ldc_shmem_enabled == 0)
1205 		mtype = LDC_SHADOW_MAP;
1206 
1207 	return (i_ldc_mem_map(mhandle, cookie, ccount, mtype, perm,
1208 	    vaddr, raddr));
1209 }
1210 
1211 static int
1212 i_ldc_mem_map(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie,
1213     uint32_t ccount, uint8_t mtype, uint8_t perm, caddr_t *vaddr,
1214     caddr_t *raddr)
1215 {
1216 
1217 	int		i, j, idx, rv, retries;
1218 	ldc_chan_t 	*ldcp;
1219 	ldc_mhdl_t	*mhdl;
1220 	ldc_memseg_t	*memseg;
1221 	caddr_t		tmpaddr;
1222 	uint64_t	map_perm = perm;
1223 	uint64_t	pg_size, pg_shift, pg_size_code, pg_mask;
1224 	uint64_t	exp_size = 0, base_off, map_size, npages;
1225 	uint64_t	cookie_addr, cookie_off, cookie_size;
1226 	tte_t		ldc_tte;
1227 
1228 	if (mhandle == NULL) {
1229 		DWARN(DBG_ALL_LDCS, "ldc_mem_map: invalid memory handle\n");
1230 		return (EINVAL);
1231 	}
1232 	mhdl = (ldc_mhdl_t *)mhandle;
1233 
1234 	mutex_enter(&mhdl->lock);
1235 
1236 	if (mhdl->status == LDC_BOUND || mhdl->status == LDC_MAPPED ||
1237 	    mhdl->memseg != NULL) {
1238 		DWARN(DBG_ALL_LDCS,
1239 		    "ldc_mem_map: (0x%llx) handle bound/mapped\n", mhandle);
1240 		mutex_exit(&mhdl->lock);
1241 		return (EINVAL);
1242 	}
1243 
1244 	ldcp = mhdl->ldcp;
1245 
1246 	mutex_enter(&ldcp->lock);
1247 
1248 	if (ldcp->tstate != TS_UP) {
1249 		DWARN(ldcp->id,
1250 		    "ldc_mem_dring_map: (0x%llx) channel is not UP\n",
1251 		    ldcp->id);
1252 		mutex_exit(&ldcp->lock);
1253 		mutex_exit(&mhdl->lock);
1254 		return (ECONNRESET);
1255 	}
1256 
1257 	if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP|LDC_IO_MAP)) == 0) {
1258 		DWARN(ldcp->id, "ldc_mem_map: invalid map type\n");
1259 		mutex_exit(&ldcp->lock);
1260 		mutex_exit(&mhdl->lock);
1261 		return (EINVAL);
1262 	}
1263 
1264 	D1(ldcp->id, "ldc_mem_map: (0x%llx) cookie = 0x%llx,0x%llx\n",
1265 	    ldcp->id, cookie->addr, cookie->size);
1266 
1267 	/* FUTURE: get the page size, pgsz code, and shift */
1268 	pg_size = MMU_PAGESIZE;
1269 	pg_size_code = page_szc(pg_size);
1270 	pg_shift = page_get_shift(pg_size_code);
1271 	pg_mask = ~(pg_size - 1);
1272 
1273 	/* calculate the number of pages in the exported cookie */
1274 	base_off = cookie[0].addr & (pg_size - 1);
1275 	for (idx = 0; idx < ccount; idx++)
1276 		exp_size += cookie[idx].size;
1277 	map_size = P2ROUNDUP((exp_size + base_off), pg_size);
1278 	npages = (map_size >> pg_shift);
1279 
1280 	/* Allocate memseg structure */
1281 	memseg = mhdl->memseg =
1282 	    kmem_cache_alloc(ldcssp->memseg_cache, KM_SLEEP);
1283 
1284 	/* Allocate memory to store all pages and cookies */
1285 	memseg->pages =	kmem_zalloc((sizeof (ldc_page_t) * npages), KM_SLEEP);
1286 	memseg->cookies =
1287 	    kmem_zalloc((sizeof (ldc_mem_cookie_t) * ccount), KM_SLEEP);
1288 
1289 	D2(ldcp->id, "ldc_mem_map: (0x%llx) exp_size=0x%llx, map_size=0x%llx,"
1290 	    "pages=0x%llx\n", ldcp->id, exp_size, map_size, npages);
1291 
1292 	/*
1293 	 * Check to see if the client is requesting direct or shadow map
1294 	 * If direct map is requested, try to map remote memory first,
1295 	 * and if that fails, revert to shadow map
1296 	 */
1297 	if (mtype == LDC_DIRECT_MAP) {
1298 
1299 		/* Allocate kernel virtual space for mapping */
1300 		memseg->vaddr = vmem_xalloc(heap_arena, map_size,
1301 		    pg_size, 0, 0, NULL, NULL, VM_NOSLEEP);
1302 		if (memseg->vaddr == NULL) {
1303 			DWARN(DBG_ALL_LDCS,
1304 			    "ldc_mem_map: (0x%lx) memory map failed\n",
1305 			    ldcp->id);
1306 			kmem_free(memseg->cookies,
1307 			    (sizeof (ldc_mem_cookie_t) * ccount));
1308 			kmem_free(memseg->pages,
1309 			    (sizeof (ldc_page_t) * npages));
1310 			kmem_cache_free(ldcssp->memseg_cache, memseg);
1311 
1312 			mutex_exit(&ldcp->lock);
1313 			mutex_exit(&mhdl->lock);
1314 			return (ENOMEM);
1315 		}
1316 
1317 		/* Unload previous mapping */
1318 		hat_unload(kas.a_hat, memseg->vaddr, map_size,
1319 		    HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK);
1320 
1321 		/* for each cookie passed in - map into address space */
1322 		idx = 0;
1323 		cookie_size = 0;
1324 		tmpaddr = memseg->vaddr;
1325 
1326 		for (i = 0; i < npages; i++) {
1327 
1328 			if (cookie_size == 0) {
1329 				ASSERT(idx < ccount);
1330 				cookie_addr = cookie[idx].addr & pg_mask;
1331 				cookie_off = cookie[idx].addr & (pg_size - 1);
1332 				cookie_size =
1333 				    P2ROUNDUP((cookie_off + cookie[idx].size),
1334 				    pg_size);
1335 				idx++;
1336 			}
1337 
1338 			D1(ldcp->id, "ldc_mem_map: (0x%llx) mapping "
1339 			    "cookie 0x%llx, bal=0x%llx\n", ldcp->id,
1340 			    cookie_addr, cookie_size);
1341 
1342 			/* map the cookie into address space */
1343 			for (retries = 0; retries < ldc_max_retries;
1344 			    retries++) {
1345 
1346 				rv = hv_ldc_mapin(ldcp->id, cookie_addr,
1347 				    &memseg->pages[i].raddr, &map_perm);
1348 				if (rv != H_EWOULDBLOCK && rv != H_ETOOMANY)
1349 					break;
1350 
1351 				drv_usecwait(ldc_delay);
1352 			}
1353 
1354 			if (rv || memseg->pages[i].raddr == 0) {
1355 				DWARN(ldcp->id,
1356 				    "ldc_mem_map: (0x%llx) hv mapin err %d\n",
1357 				    ldcp->id, rv);
1358 
1359 				/* remove previous mapins */
1360 				hat_unload(kas.a_hat, memseg->vaddr, map_size,
1361 				    HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK);
1362 				for (j = 0; j < i; j++) {
1363 					rv = hv_ldc_unmap(
1364 					    memseg->pages[j].raddr);
1365 					if (rv) {
1366 						DWARN(ldcp->id,
1367 						    "ldc_mem_map: (0x%llx) "
1368 						    "cannot unmap ra=0x%llx\n",
1369 						    ldcp->id,
1370 						    memseg->pages[j].raddr);
1371 					}
1372 				}
1373 
1374 				/* free kernel virtual space */
1375 				vmem_free(heap_arena, (void *)memseg->vaddr,
1376 				    map_size);
1377 
1378 				/* direct map failed - revert to shadow map */
1379 				mtype = LDC_SHADOW_MAP;
1380 				break;
1381 
1382 			} else {
1383 
1384 				D1(ldcp->id,
1385 				    "ldc_mem_map: (0x%llx) vtop map 0x%llx -> "
1386 				    "0x%llx, cookie=0x%llx, perm=0x%llx\n",
1387 				    ldcp->id, tmpaddr, memseg->pages[i].raddr,
1388 				    cookie_addr, perm);
1389 
1390 				/*
1391 				 * NOTE: Calling hat_devload directly, causes it
1392 				 * to look for page_t using the pfn. Since this
1393 				 * addr is greater than the memlist, it treates
1394 				 * it as non-memory
1395 				 */
1396 				sfmmu_memtte(&ldc_tte,
1397 				    (pfn_t)(memseg->pages[i].raddr >> pg_shift),
1398 				    PROT_READ | PROT_WRITE | HAT_NOSYNC, TTE8K);
1399 
1400 				D1(ldcp->id,
1401 				    "ldc_mem_map: (0x%llx) ra 0x%llx -> "
1402 				    "tte 0x%llx\n", ldcp->id,
1403 				    memseg->pages[i].raddr, ldc_tte);
1404 
1405 				sfmmu_tteload(kas.a_hat, &ldc_tte, tmpaddr,
1406 				    NULL, HAT_LOAD_LOCK);
1407 
1408 				cookie_size -= pg_size;
1409 				cookie_addr += pg_size;
1410 				tmpaddr += pg_size;
1411 			}
1412 		}
1413 	}
1414 
1415 	if (mtype == LDC_SHADOW_MAP) {
1416 		if (*vaddr == NULL) {
1417 			memseg->vaddr = kmem_zalloc(exp_size, KM_SLEEP);
1418 			mhdl->myshadow = B_TRUE;
1419 
1420 			D1(ldcp->id, "ldc_mem_map: (0x%llx) allocated "
1421 			    "shadow page va=0x%llx\n", ldcp->id, memseg->vaddr);
1422 		} else {
1423 			/*
1424 			 * Use client supplied memory for memseg->vaddr
1425 			 * WARNING: assuming that client mem is >= exp_size
1426 			 */
1427 			memseg->vaddr = *vaddr;
1428 		}
1429 
1430 		/* Save all page and cookie information */
1431 		for (i = 0, tmpaddr = memseg->vaddr; i < npages; i++) {
1432 			memseg->pages[i].raddr = va_to_pa(tmpaddr);
1433 			memseg->pages[i].size = pg_size;
1434 			tmpaddr += pg_size;
1435 		}
1436 
1437 	}
1438 
1439 	/* save all cookies */
1440 	bcopy(cookie, memseg->cookies, ccount * sizeof (ldc_mem_cookie_t));
1441 
1442 	/* update memseg_t */
1443 	memseg->raddr = memseg->pages[0].raddr;
1444 	memseg->size = (mtype == LDC_SHADOW_MAP) ? exp_size : map_size;
1445 	memseg->npages = npages;
1446 	memseg->ncookies = ccount;
1447 	memseg->next_cookie = 0;
1448 
1449 	/* memory handle = mapped */
1450 	mhdl->mtype = mtype;
1451 	mhdl->perm = perm;
1452 	mhdl->status = LDC_MAPPED;
1453 
1454 	D1(ldcp->id, "ldc_mem_map: (0x%llx) mapped 0x%llx, ra=0x%llx, "
1455 	    "va=0x%llx, pgs=0x%llx cookies=0x%llx\n",
1456 	    ldcp->id, mhdl, memseg->raddr, memseg->vaddr,
1457 	    memseg->npages, memseg->ncookies);
1458 
1459 	if (mtype == LDC_SHADOW_MAP)
1460 		base_off = 0;
1461 	if (raddr)
1462 		*raddr = (caddr_t)(memseg->raddr | base_off);
1463 	if (vaddr)
1464 		*vaddr = (caddr_t)((uintptr_t)memseg->vaddr | base_off);
1465 
1466 	mutex_exit(&ldcp->lock);
1467 	mutex_exit(&mhdl->lock);
1468 	return (0);
1469 }
1470 
1471 /*
1472  * Unmap a memory segment. Free shadow memory (if any).
1473  */
1474 int
1475 ldc_mem_unmap(ldc_mem_handle_t mhandle)
1476 {
1477 	int		i, rv;
1478 	ldc_mhdl_t	*mhdl = (ldc_mhdl_t *)mhandle;
1479 	ldc_chan_t 	*ldcp;
1480 	ldc_memseg_t	*memseg;
1481 
1482 	if (mhdl == 0 || mhdl->status != LDC_MAPPED) {
1483 		DWARN(DBG_ALL_LDCS,
1484 		    "ldc_mem_unmap: (0x%llx) handle is not mapped\n",
1485 		    mhandle);
1486 		return (EINVAL);
1487 	}
1488 
1489 	mutex_enter(&mhdl->lock);
1490 
1491 	ldcp = mhdl->ldcp;
1492 	memseg = mhdl->memseg;
1493 
1494 	D1(ldcp->id, "ldc_mem_unmap: (0x%llx) unmapping handle 0x%llx\n",
1495 	    ldcp->id, mhdl);
1496 
1497 	/* if we allocated shadow memory - free it */
1498 	if (mhdl->mtype == LDC_SHADOW_MAP && mhdl->myshadow) {
1499 		kmem_free(memseg->vaddr, memseg->size);
1500 	} else if (mhdl->mtype == LDC_DIRECT_MAP) {
1501 
1502 		/* unmap in the case of DIRECT_MAP */
1503 		hat_unload(kas.a_hat, memseg->vaddr, memseg->size,
1504 		    HAT_UNLOAD_UNLOCK);
1505 
1506 		for (i = 0; i < memseg->npages; i++) {
1507 			rv = hv_ldc_unmap(memseg->pages[i].raddr);
1508 			if (rv) {
1509 				DWARN(DBG_ALL_LDCS,
1510 				    "ldc_mem_map: (0x%lx) hv unmap err %d\n",
1511 				    ldcp->id, rv);
1512 			}
1513 		}
1514 
1515 		vmem_free(heap_arena, (void *)memseg->vaddr, memseg->size);
1516 	}
1517 
1518 	/* free the allocated memseg and page structures */
1519 	kmem_free(memseg->pages, (sizeof (ldc_page_t) * memseg->npages));
1520 	kmem_free(memseg->cookies,
1521 	    (sizeof (ldc_mem_cookie_t) * memseg->ncookies));
1522 	kmem_cache_free(ldcssp->memseg_cache, memseg);
1523 
1524 	/* uninitialize the memory handle */
1525 	mhdl->memseg = NULL;
1526 	mhdl->status = LDC_UNBOUND;
1527 
1528 	D1(ldcp->id, "ldc_mem_unmap: (0x%llx) unmapped handle 0x%llx\n",
1529 	    ldcp->id, mhdl);
1530 
1531 	mutex_exit(&mhdl->lock);
1532 	return (0);
1533 }
1534 
1535 /*
1536  * Internal entry point for LDC mapped memory entry consistency
1537  * semantics. Acquire copies the contents of the remote memory
1538  * into the local shadow copy. The release operation copies the local
1539  * contents into the remote memory. The offset and size specify the
1540  * bounds for the memory range being synchronized.
1541  */
1542 static int
1543 i_ldc_mem_acquire_release(ldc_mem_handle_t mhandle, uint8_t direction,
1544     uint64_t offset, size_t size)
1545 {
1546 	int 		err;
1547 	ldc_mhdl_t	*mhdl;
1548 	ldc_chan_t	*ldcp;
1549 	ldc_memseg_t	*memseg;
1550 	caddr_t		local_vaddr;
1551 	size_t		copy_size;
1552 
1553 	if (mhandle == NULL) {
1554 		DWARN(DBG_ALL_LDCS,
1555 		    "i_ldc_mem_acquire_release: invalid memory handle\n");
1556 		return (EINVAL);
1557 	}
1558 	mhdl = (ldc_mhdl_t *)mhandle;
1559 
1560 	mutex_enter(&mhdl->lock);
1561 
1562 	if (mhdl->status != LDC_MAPPED || mhdl->ldcp == NULL) {
1563 		DWARN(DBG_ALL_LDCS,
1564 		    "i_ldc_mem_acquire_release: not mapped memory\n");
1565 		mutex_exit(&mhdl->lock);
1566 		return (EINVAL);
1567 	}
1568 
1569 	/* do nothing for direct map */
1570 	if (mhdl->mtype == LDC_DIRECT_MAP) {
1571 		mutex_exit(&mhdl->lock);
1572 		return (0);
1573 	}
1574 
1575 	/* do nothing if COPY_IN+MEM_W and COPY_OUT+MEM_R */
1576 	if ((direction == LDC_COPY_IN && (mhdl->perm & LDC_MEM_R) == 0) ||
1577 	    (direction == LDC_COPY_OUT && (mhdl->perm & LDC_MEM_W) == 0)) {
1578 		mutex_exit(&mhdl->lock);
1579 		return (0);
1580 	}
1581 
1582 	if (offset >= mhdl->memseg->size ||
1583 	    (offset + size) > mhdl->memseg->size) {
1584 		DWARN(DBG_ALL_LDCS,
1585 		    "i_ldc_mem_acquire_release: memory out of range\n");
1586 		mutex_exit(&mhdl->lock);
1587 		return (EINVAL);
1588 	}
1589 
1590 	/* get the channel handle and memory segment */
1591 	ldcp = mhdl->ldcp;
1592 	memseg = mhdl->memseg;
1593 
1594 	if (mhdl->mtype == LDC_SHADOW_MAP) {
1595 
1596 		local_vaddr = memseg->vaddr + offset;
1597 		copy_size = size;
1598 
1599 		/* copy to/from remote from/to local memory */
1600 		err = ldc_mem_copy((ldc_handle_t)ldcp, local_vaddr, offset,
1601 		    &copy_size, memseg->cookies, memseg->ncookies,
1602 		    direction);
1603 		if (err || copy_size != size) {
1604 			DWARN(ldcp->id,
1605 			    "i_ldc_mem_acquire_release: copy failed\n");
1606 			mutex_exit(&mhdl->lock);
1607 			return (err);
1608 		}
1609 	}
1610 
1611 	mutex_exit(&mhdl->lock);
1612 
1613 	return (0);
1614 }
1615 
1616 /*
1617  * Ensure that the contents in the remote memory seg are consistent
1618  * with the contents if of local segment
1619  */
1620 int
1621 ldc_mem_acquire(ldc_mem_handle_t mhandle, uint64_t offset, uint64_t size)
1622 {
1623 	return (i_ldc_mem_acquire_release(mhandle, LDC_COPY_IN, offset, size));
1624 }
1625 
1626 
1627 /*
1628  * Ensure that the contents in the local memory seg are consistent
1629  * with the contents if of remote segment
1630  */
1631 int
1632 ldc_mem_release(ldc_mem_handle_t mhandle, uint64_t offset, uint64_t size)
1633 {
1634 	return (i_ldc_mem_acquire_release(mhandle, LDC_COPY_OUT, offset, size));
1635 }
1636 
1637 /*
1638  * Allocate a descriptor ring. The size of each each descriptor
1639  * must be 8-byte aligned and the entire ring should be a multiple
1640  * of MMU_PAGESIZE.
1641  */
1642 int
1643 ldc_mem_dring_create(uint32_t len, uint32_t dsize, ldc_dring_handle_t *dhandle)
1644 {
1645 	ldc_dring_t *dringp;
1646 	size_t size = (dsize * len);
1647 
1648 	D1(DBG_ALL_LDCS, "ldc_mem_dring_create: len=0x%x, size=0x%x\n",
1649 	    len, dsize);
1650 
1651 	if (dhandle == NULL) {
1652 		DWARN(DBG_ALL_LDCS, "ldc_mem_dring_create: invalid dhandle\n");
1653 		return (EINVAL);
1654 	}
1655 
1656 	if (len == 0) {
1657 		DWARN(DBG_ALL_LDCS, "ldc_mem_dring_create: invalid length\n");
1658 		return (EINVAL);
1659 	}
1660 
1661 	/* descriptor size should be 8-byte aligned */
1662 	if (dsize == 0 || (dsize & 0x7)) {
1663 		DWARN(DBG_ALL_LDCS, "ldc_mem_dring_create: invalid size\n");
1664 		return (EINVAL);
1665 	}
1666 
1667 	*dhandle = 0;
1668 
1669 	/* Allocate a desc ring structure */
1670 	dringp = kmem_zalloc(sizeof (ldc_dring_t), KM_SLEEP);
1671 
1672 	/* Initialize dring */
1673 	dringp->length = len;
1674 	dringp->dsize = dsize;
1675 
1676 	/* round off to multiple of pagesize */
1677 	dringp->size = (size & MMU_PAGEMASK);
1678 	if (size & MMU_PAGEOFFSET)
1679 		dringp->size += MMU_PAGESIZE;
1680 
1681 	dringp->status = LDC_UNBOUND;
1682 
1683 	/* allocate descriptor ring memory */
1684 	dringp->base = kmem_zalloc(dringp->size, KM_SLEEP);
1685 
1686 	/* initialize the desc ring lock */
1687 	mutex_init(&dringp->lock, NULL, MUTEX_DRIVER, NULL);
1688 
1689 	/* Add descriptor ring to the head of global list */
1690 	mutex_enter(&ldcssp->lock);
1691 	dringp->next = ldcssp->dring_list;
1692 	ldcssp->dring_list = dringp;
1693 	mutex_exit(&ldcssp->lock);
1694 
1695 	*dhandle = (ldc_dring_handle_t)dringp;
1696 
1697 	D1(DBG_ALL_LDCS, "ldc_mem_dring_create: dring allocated\n");
1698 
1699 	return (0);
1700 }
1701 
1702 
1703 /*
1704  * Destroy a descriptor ring.
1705  */
1706 int
1707 ldc_mem_dring_destroy(ldc_dring_handle_t dhandle)
1708 {
1709 	ldc_dring_t *dringp;
1710 	ldc_dring_t *tmp_dringp;
1711 
1712 	D1(DBG_ALL_LDCS, "ldc_mem_dring_destroy: entered\n");
1713 
1714 	if (dhandle == NULL) {
1715 		DWARN(DBG_ALL_LDCS,
1716 		    "ldc_mem_dring_destroy: invalid desc ring handle\n");
1717 		return (EINVAL);
1718 	}
1719 	dringp = (ldc_dring_t *)dhandle;
1720 
1721 	if (dringp->status == LDC_BOUND) {
1722 		DWARN(DBG_ALL_LDCS,
1723 		    "ldc_mem_dring_destroy: desc ring is bound\n");
1724 		return (EACCES);
1725 	}
1726 
1727 	mutex_enter(&dringp->lock);
1728 	mutex_enter(&ldcssp->lock);
1729 
1730 	/* remove from linked list - if not bound */
1731 	tmp_dringp = ldcssp->dring_list;
1732 	if (tmp_dringp == dringp) {
1733 		ldcssp->dring_list = dringp->next;
1734 		dringp->next = NULL;
1735 
1736 	} else {
1737 		while (tmp_dringp != NULL) {
1738 			if (tmp_dringp->next == dringp) {
1739 				tmp_dringp->next = dringp->next;
1740 				dringp->next = NULL;
1741 				break;
1742 			}
1743 			tmp_dringp = tmp_dringp->next;
1744 		}
1745 		if (tmp_dringp == NULL) {
1746 			DWARN(DBG_ALL_LDCS,
1747 			    "ldc_mem_dring_destroy: invalid descriptor\n");
1748 			mutex_exit(&ldcssp->lock);
1749 			mutex_exit(&dringp->lock);
1750 			return (EINVAL);
1751 		}
1752 	}
1753 
1754 	mutex_exit(&ldcssp->lock);
1755 
1756 	/* free the descriptor ring */
1757 	kmem_free(dringp->base, dringp->size);
1758 
1759 	mutex_exit(&dringp->lock);
1760 
1761 	/* destroy dring lock */
1762 	mutex_destroy(&dringp->lock);
1763 
1764 	/* free desc ring object */
1765 	kmem_free(dringp, sizeof (ldc_dring_t));
1766 
1767 	return (0);
1768 }
1769 
1770 /*
1771  * Bind a previously allocated dring to a channel. The channel should
1772  * be OPEN in order to bind the ring to the channel. Returns back a
1773  * descriptor ring cookie. The descriptor ring is exported for remote
1774  * access by the client at the other end of the channel. An entry for
1775  * dring pages is stored in map table (via call to ldc_mem_bind_handle).
1776  */
1777 int
1778 ldc_mem_dring_bind(ldc_handle_t handle, ldc_dring_handle_t dhandle,
1779     uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie, uint32_t *ccount)
1780 {
1781 	int		err;
1782 	ldc_chan_t 	*ldcp;
1783 	ldc_dring_t	*dringp;
1784 	ldc_mem_handle_t mhandle;
1785 
1786 	/* check to see if channel is initalized */
1787 	if (handle == NULL) {
1788 		DWARN(DBG_ALL_LDCS,
1789 		    "ldc_mem_dring_bind: invalid channel handle\n");
1790 		return (EINVAL);
1791 	}
1792 	ldcp = (ldc_chan_t *)handle;
1793 
1794 	if (dhandle == NULL) {
1795 		DWARN(DBG_ALL_LDCS,
1796 		    "ldc_mem_dring_bind: invalid desc ring handle\n");
1797 		return (EINVAL);
1798 	}
1799 	dringp = (ldc_dring_t *)dhandle;
1800 
1801 	if (cookie == NULL) {
1802 		DWARN(ldcp->id,
1803 		    "ldc_mem_dring_bind: invalid cookie arg\n");
1804 		return (EINVAL);
1805 	}
1806 
1807 	/* ensure the mtype is valid */
1808 	if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP)) == 0) {
1809 		DWARN(ldcp->id, "ldc_mem_dring_bind: invalid map type\n");
1810 		return (EINVAL);
1811 	}
1812 
1813 	/* no need to bind as direct map if it's not HV supported or enabled */
1814 	if (!ldc_dring_shmem_hv_ok || !ldc_dring_shmem_enabled) {
1815 		mtype = LDC_SHADOW_MAP;
1816 	}
1817 
1818 	mutex_enter(&dringp->lock);
1819 
1820 	if (dringp->status == LDC_BOUND) {
1821 		DWARN(DBG_ALL_LDCS,
1822 		    "ldc_mem_dring_bind: (0x%llx) descriptor ring is bound\n",
1823 		    ldcp->id);
1824 		mutex_exit(&dringp->lock);
1825 		return (EINVAL);
1826 	}
1827 
1828 	if ((perm & LDC_MEM_RW) == 0) {
1829 		DWARN(DBG_ALL_LDCS,
1830 		    "ldc_mem_dring_bind: invalid permissions\n");
1831 		mutex_exit(&dringp->lock);
1832 		return (EINVAL);
1833 	}
1834 
1835 	if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP|LDC_IO_MAP)) == 0) {
1836 		DWARN(DBG_ALL_LDCS, "ldc_mem_dring_bind: invalid type\n");
1837 		mutex_exit(&dringp->lock);
1838 		return (EINVAL);
1839 	}
1840 
1841 	dringp->ldcp = ldcp;
1842 
1843 	/* create an memory handle */
1844 	err = ldc_mem_alloc_handle(handle, &mhandle);
1845 	if (err || mhandle == NULL) {
1846 		DWARN(DBG_ALL_LDCS,
1847 		    "ldc_mem_dring_bind: (0x%llx) error allocating mhandle\n",
1848 		    ldcp->id);
1849 		mutex_exit(&dringp->lock);
1850 		return (err);
1851 	}
1852 	dringp->mhdl = mhandle;
1853 
1854 	/* bind the descriptor ring to channel */
1855 	err = i_ldc_mem_bind_handle(mhandle, dringp->base, dringp->size,
1856 	    mtype, perm, cookie, ccount);
1857 	if (err) {
1858 		DWARN(ldcp->id,
1859 		    "ldc_mem_dring_bind: (0x%llx) error binding mhandle\n",
1860 		    ldcp->id);
1861 		mutex_exit(&dringp->lock);
1862 		return (err);
1863 	}
1864 
1865 	/*
1866 	 * For now return error if we get more than one cookie
1867 	 * FUTURE: Return multiple cookies ..
1868 	 */
1869 	if (*ccount > 1) {
1870 		(void) ldc_mem_unbind_handle(mhandle);
1871 		(void) ldc_mem_free_handle(mhandle);
1872 
1873 		dringp->ldcp = NULL;
1874 		dringp->mhdl = NULL;
1875 		*ccount = 0;
1876 
1877 		mutex_exit(&dringp->lock);
1878 		return (EAGAIN);
1879 	}
1880 
1881 	/* Add descriptor ring to channel's exported dring list */
1882 	mutex_enter(&ldcp->exp_dlist_lock);
1883 	dringp->ch_next = ldcp->exp_dring_list;
1884 	ldcp->exp_dring_list = dringp;
1885 	mutex_exit(&ldcp->exp_dlist_lock);
1886 
1887 	dringp->status = LDC_BOUND;
1888 
1889 	mutex_exit(&dringp->lock);
1890 
1891 	return (0);
1892 }
1893 
1894 /*
1895  * Return the next cookie associated with the specified dring handle
1896  */
1897 int
1898 ldc_mem_dring_nextcookie(ldc_dring_handle_t dhandle, ldc_mem_cookie_t *cookie)
1899 {
1900 	int		rv = 0;
1901 	ldc_dring_t 	*dringp;
1902 	ldc_chan_t	*ldcp;
1903 
1904 	if (dhandle == NULL) {
1905 		DWARN(DBG_ALL_LDCS,
1906 		    "ldc_mem_dring_nextcookie: invalid desc ring handle\n");
1907 		return (EINVAL);
1908 	}
1909 	dringp = (ldc_dring_t *)dhandle;
1910 	mutex_enter(&dringp->lock);
1911 
1912 	if (dringp->status != LDC_BOUND) {
1913 		DWARN(DBG_ALL_LDCS,
1914 		    "ldc_mem_dring_nextcookie: descriptor ring 0x%llx "
1915 		    "is not bound\n", dringp);
1916 		mutex_exit(&dringp->lock);
1917 		return (EINVAL);
1918 	}
1919 
1920 	ldcp = dringp->ldcp;
1921 
1922 	if (cookie == NULL) {
1923 		DWARN(ldcp->id,
1924 		    "ldc_mem_dring_nextcookie:(0x%llx) invalid cookie arg\n",
1925 		    ldcp->id);
1926 		mutex_exit(&dringp->lock);
1927 		return (EINVAL);
1928 	}
1929 
1930 	rv = ldc_mem_nextcookie((ldc_mem_handle_t)dringp->mhdl, cookie);
1931 	mutex_exit(&dringp->lock);
1932 
1933 	return (rv);
1934 }
1935 
1936 /*
1937  * Unbind a previously bound dring from a channel.
1938  */
1939 int
1940 ldc_mem_dring_unbind(ldc_dring_handle_t dhandle)
1941 {
1942 	ldc_dring_t 	*dringp;
1943 	ldc_dring_t	*tmp_dringp;
1944 	ldc_chan_t	*ldcp;
1945 
1946 	if (dhandle == NULL) {
1947 		DWARN(DBG_ALL_LDCS,
1948 		    "ldc_mem_dring_unbind: invalid desc ring handle\n");
1949 		return (EINVAL);
1950 	}
1951 	dringp = (ldc_dring_t *)dhandle;
1952 
1953 	mutex_enter(&dringp->lock);
1954 
1955 	if (dringp->status == LDC_UNBOUND) {
1956 		DWARN(DBG_ALL_LDCS,
1957 		    "ldc_mem_dring_bind: descriptor ring 0x%llx is unbound\n",
1958 		    dringp);
1959 		mutex_exit(&dringp->lock);
1960 		return (EINVAL);
1961 	}
1962 	ldcp = dringp->ldcp;
1963 
1964 	mutex_enter(&ldcp->exp_dlist_lock);
1965 
1966 	tmp_dringp = ldcp->exp_dring_list;
1967 	if (tmp_dringp == dringp) {
1968 		ldcp->exp_dring_list = dringp->ch_next;
1969 		dringp->ch_next = NULL;
1970 
1971 	} else {
1972 		while (tmp_dringp != NULL) {
1973 			if (tmp_dringp->ch_next == dringp) {
1974 				tmp_dringp->ch_next = dringp->ch_next;
1975 				dringp->ch_next = NULL;
1976 				break;
1977 			}
1978 			tmp_dringp = tmp_dringp->ch_next;
1979 		}
1980 		if (tmp_dringp == NULL) {
1981 			DWARN(DBG_ALL_LDCS,
1982 			    "ldc_mem_dring_unbind: invalid descriptor\n");
1983 			mutex_exit(&ldcp->exp_dlist_lock);
1984 			mutex_exit(&dringp->lock);
1985 			return (EINVAL);
1986 		}
1987 	}
1988 
1989 	mutex_exit(&ldcp->exp_dlist_lock);
1990 
1991 	(void) ldc_mem_unbind_handle((ldc_mem_handle_t)dringp->mhdl);
1992 	(void) ldc_mem_free_handle((ldc_mem_handle_t)dringp->mhdl);
1993 
1994 	dringp->ldcp = NULL;
1995 	dringp->mhdl = NULL;
1996 	dringp->status = LDC_UNBOUND;
1997 
1998 	mutex_exit(&dringp->lock);
1999 
2000 	return (0);
2001 }
2002 
2003 #ifdef	DEBUG
2004 void
2005 i_ldc_mem_inject_dring_clear(ldc_chan_t *ldcp)
2006 {
2007 	ldc_dring_t	*dp;
2008 	ldc_mhdl_t	*mhdl;
2009 	ldc_mtbl_t	*mtbl;
2010 	ldc_memseg_t	*memseg;
2011 	uint64_t	cookie_addr;
2012 	uint64_t	pg_shift, pg_size_code;
2013 	int		i, rv, retries;
2014 
2015 	/* has a map table been allocated? */
2016 	if ((mtbl = ldcp->mtbl) == NULL)
2017 		return;
2018 
2019 	/* lock the memory table - exclusive access to channel */
2020 	mutex_enter(&mtbl->lock);
2021 
2022 	/* lock the exported dring list */
2023 	mutex_enter(&ldcp->exp_dlist_lock);
2024 
2025 	for (dp = ldcp->exp_dring_list; dp != NULL; dp = dp->ch_next) {
2026 		if ((mhdl = (ldc_mhdl_t *)dp->mhdl) == NULL)
2027 			continue;
2028 
2029 		if ((memseg = mhdl->memseg) == NULL)
2030 			continue;
2031 
2032 		/* undo the pages exported */
2033 		for (i = 0; i < memseg->npages; i++) {
2034 
2035 			/* clear the entry from the table */
2036 			memseg->pages[i].mte->entry.ll = 0;
2037 
2038 			pg_size_code = page_szc(memseg->pages[i].size);
2039 			pg_shift = page_get_shift(pg_size_code);
2040 			cookie_addr = IDX2COOKIE(memseg->pages[i].index,
2041 			    pg_size_code, pg_shift);
2042 
2043 			retries = 0;
2044 			do {
2045 				rv = hv_ldc_revoke(ldcp->id, cookie_addr,
2046 				    memseg->pages[i].mte->cookie);
2047 
2048 				if (rv != H_EWOULDBLOCK)
2049 					break;
2050 
2051 				drv_usecwait(ldc_delay);
2052 
2053 			} while (retries++ < ldc_max_retries);
2054 
2055 			if (rv != 0) {
2056 				DWARN(ldcp->id,
2057 				    "i_ldc_mem_inject_dring_clear(): "
2058 				    "hv_ldc_revoke failed: "
2059 				    "channel: 0x%lx, cookie addr: 0x%p,"
2060 				    "cookie: 0x%lx, rv: %d",
2061 				    ldcp->id, cookie_addr,
2062 				    memseg->pages[i].mte->cookie, rv);
2063 			}
2064 
2065 			mtbl->num_avail++;
2066 		}
2067 	}
2068 
2069 	mutex_exit(&ldcp->exp_dlist_lock);
2070 	mutex_exit(&mtbl->lock);
2071 }
2072 #endif
2073 
2074 /*
2075  * Get information about the dring. The base address of the descriptor
2076  * ring along with the type and permission are returned back.
2077  */
2078 int
2079 ldc_mem_dring_info(ldc_dring_handle_t dhandle, ldc_mem_info_t *minfo)
2080 {
2081 	ldc_dring_t	*dringp;
2082 	int		rv;
2083 
2084 	if (dhandle == NULL) {
2085 		DWARN(DBG_ALL_LDCS,
2086 		    "ldc_mem_dring_info: invalid desc ring handle\n");
2087 		return (EINVAL);
2088 	}
2089 	dringp = (ldc_dring_t *)dhandle;
2090 
2091 	mutex_enter(&dringp->lock);
2092 
2093 	if (dringp->mhdl) {
2094 		rv = ldc_mem_info(dringp->mhdl, minfo);
2095 		if (rv) {
2096 			DWARN(DBG_ALL_LDCS,
2097 			    "ldc_mem_dring_info: error reading mem info\n");
2098 			mutex_exit(&dringp->lock);
2099 			return (rv);
2100 		}
2101 	} else {
2102 		minfo->vaddr = dringp->base;
2103 		minfo->raddr = NULL;
2104 		minfo->status = dringp->status;
2105 	}
2106 
2107 	mutex_exit(&dringp->lock);
2108 
2109 	return (0);
2110 }
2111 
2112 /*
2113  * Map an exported descriptor ring into the local address space. If the
2114  * descriptor ring was exported for direct map access, a HV call is made
2115  * to allocate a RA range. If the map is done via a shadow copy, local
2116  * shadow memory is allocated.
2117  */
2118 int
2119 ldc_mem_dring_map(ldc_handle_t handle, ldc_mem_cookie_t *cookie,
2120     uint32_t ccount, uint32_t len, uint32_t dsize, uint8_t mtype,
2121     ldc_dring_handle_t *dhandle)
2122 {
2123 	int		err;
2124 	ldc_chan_t 	*ldcp = (ldc_chan_t *)handle;
2125 	ldc_mem_handle_t mhandle;
2126 	ldc_dring_t	*dringp;
2127 	size_t		dring_size;
2128 
2129 	if (dhandle == NULL) {
2130 		DWARN(DBG_ALL_LDCS,
2131 		    "ldc_mem_dring_map: invalid dhandle\n");
2132 		return (EINVAL);
2133 	}
2134 
2135 	/* check to see if channel is initalized */
2136 	if (handle == NULL) {
2137 		DWARN(DBG_ALL_LDCS,
2138 		    "ldc_mem_dring_map: invalid channel handle\n");
2139 		return (EINVAL);
2140 	}
2141 	ldcp = (ldc_chan_t *)handle;
2142 
2143 	if (cookie == NULL) {
2144 		DWARN(ldcp->id,
2145 		    "ldc_mem_dring_map: (0x%llx) invalid cookie\n",
2146 		    ldcp->id);
2147 		return (EINVAL);
2148 	}
2149 
2150 	/* FUTURE: For now we support only one cookie per dring */
2151 	ASSERT(ccount == 1);
2152 
2153 	if (cookie->size < (dsize * len)) {
2154 		DWARN(ldcp->id,
2155 		    "ldc_mem_dring_map: (0x%llx) invalid dsize/len\n",
2156 		    ldcp->id);
2157 		return (EINVAL);
2158 	}
2159 
2160 	/* ensure the mtype is valid */
2161 	if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP)) == 0) {
2162 		DWARN(ldcp->id, "ldc_mem_dring_map: invalid map type\n");
2163 		return (EINVAL);
2164 	}
2165 
2166 	/* do not attempt direct map if it's not HV supported or enabled */
2167 	if (!ldc_dring_shmem_hv_ok || !ldc_dring_shmem_enabled) {
2168 		mtype = LDC_SHADOW_MAP;
2169 	}
2170 
2171 	*dhandle = 0;
2172 
2173 	/* Allocate an dring structure */
2174 	dringp = kmem_zalloc(sizeof (ldc_dring_t), KM_SLEEP);
2175 
2176 	D1(ldcp->id,
2177 	    "ldc_mem_dring_map: 0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
2178 	    mtype, len, dsize, cookie->addr, cookie->size);
2179 
2180 	/* Initialize dring */
2181 	dringp->length = len;
2182 	dringp->dsize = dsize;
2183 
2184 	/* round of to multiple of page size */
2185 	dring_size = len * dsize;
2186 	dringp->size = (dring_size & MMU_PAGEMASK);
2187 	if (dring_size & MMU_PAGEOFFSET)
2188 		dringp->size += MMU_PAGESIZE;
2189 
2190 	dringp->ldcp = ldcp;
2191 
2192 	/* create an memory handle */
2193 	err = ldc_mem_alloc_handle(handle, &mhandle);
2194 	if (err || mhandle == NULL) {
2195 		DWARN(DBG_ALL_LDCS,
2196 		    "ldc_mem_dring_map: cannot alloc hdl err=%d\n",
2197 		    err);
2198 		kmem_free(dringp, sizeof (ldc_dring_t));
2199 		return (ENOMEM);
2200 	}
2201 
2202 	dringp->mhdl = mhandle;
2203 	dringp->base = NULL;
2204 
2205 	/* map the dring into local memory */
2206 	err = i_ldc_mem_map(mhandle, cookie, ccount, mtype, LDC_MEM_RW,
2207 	    &(dringp->base), NULL);
2208 	if (err || dringp->base == NULL) {
2209 		DWARN(DBG_ALL_LDCS,
2210 		    "ldc_mem_dring_map: cannot map desc ring err=%d\n", err);
2211 		(void) ldc_mem_free_handle(mhandle);
2212 		kmem_free(dringp, sizeof (ldc_dring_t));
2213 		return (ENOMEM);
2214 	}
2215 
2216 	/* initialize the desc ring lock */
2217 	mutex_init(&dringp->lock, NULL, MUTEX_DRIVER, NULL);
2218 
2219 	/* Add descriptor ring to channel's imported dring list */
2220 	mutex_enter(&ldcp->imp_dlist_lock);
2221 	dringp->ch_next = ldcp->imp_dring_list;
2222 	ldcp->imp_dring_list = dringp;
2223 	mutex_exit(&ldcp->imp_dlist_lock);
2224 
2225 	dringp->status = LDC_MAPPED;
2226 
2227 	*dhandle = (ldc_dring_handle_t)dringp;
2228 
2229 	return (0);
2230 }
2231 
2232 /*
2233  * Unmap a descriptor ring. Free shadow memory (if any).
2234  */
2235 int
2236 ldc_mem_dring_unmap(ldc_dring_handle_t dhandle)
2237 {
2238 	ldc_dring_t 	*dringp;
2239 	ldc_dring_t	*tmp_dringp;
2240 	ldc_chan_t	*ldcp;
2241 
2242 	if (dhandle == NULL) {
2243 		DWARN(DBG_ALL_LDCS,
2244 		    "ldc_mem_dring_unmap: invalid desc ring handle\n");
2245 		return (EINVAL);
2246 	}
2247 	dringp = (ldc_dring_t *)dhandle;
2248 
2249 	if (dringp->status != LDC_MAPPED) {
2250 		DWARN(DBG_ALL_LDCS,
2251 		    "ldc_mem_dring_unmap: not a mapped desc ring\n");
2252 		return (EINVAL);
2253 	}
2254 
2255 	mutex_enter(&dringp->lock);
2256 
2257 	ldcp = dringp->ldcp;
2258 
2259 	mutex_enter(&ldcp->imp_dlist_lock);
2260 
2261 	/* find and unlink the desc ring from channel import list */
2262 	tmp_dringp = ldcp->imp_dring_list;
2263 	if (tmp_dringp == dringp) {
2264 		ldcp->imp_dring_list = dringp->ch_next;
2265 		dringp->ch_next = NULL;
2266 
2267 	} else {
2268 		while (tmp_dringp != NULL) {
2269 			if (tmp_dringp->ch_next == dringp) {
2270 				tmp_dringp->ch_next = dringp->ch_next;
2271 				dringp->ch_next = NULL;
2272 				break;
2273 			}
2274 			tmp_dringp = tmp_dringp->ch_next;
2275 		}
2276 		if (tmp_dringp == NULL) {
2277 			DWARN(DBG_ALL_LDCS,
2278 			    "ldc_mem_dring_unmap: invalid descriptor\n");
2279 			mutex_exit(&ldcp->imp_dlist_lock);
2280 			mutex_exit(&dringp->lock);
2281 			return (EINVAL);
2282 		}
2283 	}
2284 
2285 	mutex_exit(&ldcp->imp_dlist_lock);
2286 
2287 	/* do a LDC memory handle unmap and free */
2288 	(void) ldc_mem_unmap(dringp->mhdl);
2289 	(void) ldc_mem_free_handle((ldc_mem_handle_t)dringp->mhdl);
2290 
2291 	dringp->status = 0;
2292 	dringp->ldcp = NULL;
2293 
2294 	mutex_exit(&dringp->lock);
2295 
2296 	/* destroy dring lock */
2297 	mutex_destroy(&dringp->lock);
2298 
2299 	/* free desc ring object */
2300 	kmem_free(dringp, sizeof (ldc_dring_t));
2301 
2302 	return (0);
2303 }
2304 
2305 /*
2306  * Internal entry point for descriptor ring access entry consistency
2307  * semantics. Acquire copies the contents of the remote descriptor ring
2308  * into the local shadow copy. The release operation copies the local
2309  * contents into the remote dring. The start and end locations specify
2310  * bounds for the entries being synchronized.
2311  */
2312 static int
2313 i_ldc_dring_acquire_release(ldc_dring_handle_t dhandle,
2314     uint8_t direction, uint64_t start, uint64_t end)
2315 {
2316 	int 			err;
2317 	ldc_dring_t		*dringp;
2318 	ldc_chan_t		*ldcp;
2319 	ldc_mhdl_t		*mhdl;
2320 	uint64_t		soff;
2321 	size_t			copy_size;
2322 
2323 	if (dhandle == NULL) {
2324 		DWARN(DBG_ALL_LDCS,
2325 		    "i_ldc_dring_acquire_release: invalid desc ring handle\n");
2326 		return (EINVAL);
2327 	}
2328 	dringp = (ldc_dring_t *)dhandle;
2329 	mutex_enter(&dringp->lock);
2330 
2331 	if (dringp->status != LDC_MAPPED || dringp->ldcp == NULL) {
2332 		DWARN(DBG_ALL_LDCS,
2333 		    "i_ldc_dring_acquire_release: not a mapped desc ring\n");
2334 		mutex_exit(&dringp->lock);
2335 		return (EINVAL);
2336 	}
2337 
2338 	if (start >= dringp->length || end >= dringp->length) {
2339 		DWARN(DBG_ALL_LDCS,
2340 		    "i_ldc_dring_acquire_release: index out of range\n");
2341 		mutex_exit(&dringp->lock);
2342 		return (EINVAL);
2343 	}
2344 
2345 	mhdl = (ldc_mhdl_t *)dringp->mhdl;
2346 	if (mhdl == NULL) {
2347 		DWARN(DBG_ALL_LDCS,
2348 		    "i_ldc_dring_acquire_release: invalid memory handle\n");
2349 		mutex_exit(&dringp->lock);
2350 		return (EINVAL);
2351 	}
2352 
2353 	if (mhdl->mtype != LDC_SHADOW_MAP) {
2354 		DWARN(DBG_ALL_LDCS,
2355 		    "i_ldc_dring_acquire_release: invalid mtype: %d\n",
2356 		    mhdl->mtype);
2357 		mutex_exit(&dringp->lock);
2358 		return (EINVAL);
2359 	}
2360 
2361 	/* get the channel handle */
2362 	ldcp = dringp->ldcp;
2363 
2364 	copy_size = (start <= end) ? (((end - start) + 1) * dringp->dsize) :
2365 	    ((dringp->length - start) * dringp->dsize);
2366 
2367 	/* Calculate the relative offset for the first desc */
2368 	soff = (start * dringp->dsize);
2369 
2370 	/* copy to/from remote from/to local memory */
2371 	D1(ldcp->id, "i_ldc_dring_acquire_release: c1 off=0x%llx sz=0x%llx\n",
2372 	    soff, copy_size);
2373 	err = i_ldc_mem_acquire_release((ldc_mem_handle_t)dringp->mhdl,
2374 	    direction, soff, copy_size);
2375 	if (err) {
2376 		DWARN(ldcp->id,
2377 		    "i_ldc_dring_acquire_release: copy failed\n");
2378 		mutex_exit(&dringp->lock);
2379 		return (err);
2380 	}
2381 
2382 	/* do the balance */
2383 	if (start > end) {
2384 		copy_size = ((end + 1) * dringp->dsize);
2385 		soff = 0;
2386 
2387 		/* copy to/from remote from/to local memory */
2388 		D1(ldcp->id, "i_ldc_dring_acquire_release: c2 "
2389 		    "off=0x%llx sz=0x%llx\n", soff, copy_size);
2390 		err = i_ldc_mem_acquire_release((ldc_mem_handle_t)dringp->mhdl,
2391 		    direction, soff, copy_size);
2392 		if (err) {
2393 			DWARN(ldcp->id,
2394 			    "i_ldc_dring_acquire_release: copy failed\n");
2395 			mutex_exit(&dringp->lock);
2396 			return (err);
2397 		}
2398 	}
2399 
2400 	mutex_exit(&dringp->lock);
2401 
2402 	return (0);
2403 }
2404 
2405 /*
2406  * Ensure that the contents in the local dring are consistent
2407  * with the contents if of remote dring
2408  */
2409 int
2410 ldc_mem_dring_acquire(ldc_dring_handle_t dhandle, uint64_t start, uint64_t end)
2411 {
2412 	return (i_ldc_dring_acquire_release(dhandle, LDC_COPY_IN, start, end));
2413 }
2414 
2415 /*
2416  * Ensure that the contents in the remote dring are consistent
2417  * with the contents if of local dring
2418  */
2419 int
2420 ldc_mem_dring_release(ldc_dring_handle_t dhandle, uint64_t start, uint64_t end)
2421 {
2422 	return (i_ldc_dring_acquire_release(dhandle, LDC_COPY_OUT, start, end));
2423 }
2424