xref: /titanic_41/usr/src/uts/sun4v/io/ldc_shm.c (revision eb0cc229f19c437a6b538d3ac0d0443268290b7e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * sun4v LDC Link Layer Shared Memory Routines
31  */
32 #include <sys/types.h>
33 #include <sys/kmem.h>
34 #include <sys/cmn_err.h>
35 #include <sys/ksynch.h>
36 #include <sys/debug.h>
37 #include <sys/cyclic.h>
38 #include <sys/machsystm.h>
39 #include <sys/vm.h>
40 #include <sys/machcpuvar.h>
41 #include <sys/mmu.h>
42 #include <sys/pte.h>
43 #include <vm/hat.h>
44 #include <vm/as.h>
45 #include <vm/hat_sfmmu.h>
46 #include <sys/vm_machparam.h>
47 #include <vm/seg_kmem.h>
48 #include <vm/seg_kpm.h>
49 #include <sys/hypervisor_api.h>
50 #include <sys/ldc.h>
51 #include <sys/ldc_impl.h>
52 
53 /* LDC variables used by shared memory routines */
54 extern ldc_soft_state_t *ldcssp;
55 extern int ldc_max_retries;
56 extern clock_t ldc_delay;
57 
58 #ifdef DEBUG
59 extern int ldcdbg;
60 #endif
61 
62 /* LDC internal functions used by shared memory routines */
63 extern void i_ldc_reset(ldc_chan_t *ldcp, boolean_t force_reset);
64 extern int i_ldc_h2v_error(int h_error);
65 
66 #ifdef DEBUG
67 extern void ldcdebug(int64_t id, const char *fmt, ...);
68 #endif
69 
70 /* Memory synchronization internal functions */
71 static int i_ldc_mem_acquire_release(ldc_mem_handle_t mhandle,
72     uint8_t direction, uint64_t offset, size_t size);
73 static int i_ldc_dring_acquire_release(ldc_dring_handle_t dhandle,
74     uint8_t direction, uint64_t start, uint64_t end);
75 
76 /*
77  * LDC framework supports mapping remote domain's memory
78  * either directly or via shadow memory pages. Default
79  * support is currently implemented via shadow copy.
80  * Direct map can be enabled by setting 'ldc_shmem_enabled'
81  */
82 int ldc_shmem_enabled = 0;
83 
84 /*
85  * Pages exported for remote access over each channel is
86  * maintained in a table registered with the Hypervisor.
87  * The default number of entries in the table is set to
88  * 'ldc_mtbl_entries'.
89  */
90 uint64_t ldc_maptable_entries = LDC_MTBL_ENTRIES;
91 
92 #define	IDX2COOKIE(idx, pg_szc, pg_shift)				\
93 	(((pg_szc) << LDC_COOKIE_PGSZC_SHIFT) | ((idx) << (pg_shift)))
94 
95 /*
96  * Allocate a memory handle for the channel and link it into the list
97  * Also choose which memory table to use if this is the first handle
98  * being assigned to this channel
99  */
100 int
101 ldc_mem_alloc_handle(ldc_handle_t handle, ldc_mem_handle_t *mhandle)
102 {
103 	ldc_chan_t 	*ldcp;
104 	ldc_mhdl_t	*mhdl;
105 
106 	if (handle == NULL) {
107 		DWARN(DBG_ALL_LDCS,
108 		    "ldc_mem_alloc_handle: invalid channel handle\n");
109 		return (EINVAL);
110 	}
111 	ldcp = (ldc_chan_t *)handle;
112 
113 	mutex_enter(&ldcp->lock);
114 
115 	/* check to see if channel is initalized */
116 	if ((ldcp->tstate & ~TS_IN_RESET) < TS_INIT) {
117 		DWARN(ldcp->id,
118 		    "ldc_mem_alloc_handle: (0x%llx) channel not initialized\n",
119 		    ldcp->id);
120 		mutex_exit(&ldcp->lock);
121 		return (EINVAL);
122 	}
123 
124 	/* allocate handle for channel */
125 	mhdl = kmem_cache_alloc(ldcssp->memhdl_cache, KM_SLEEP);
126 
127 	/* initialize the lock */
128 	mutex_init(&mhdl->lock, NULL, MUTEX_DRIVER, NULL);
129 
130 	mhdl->myshadow = B_FALSE;
131 	mhdl->memseg = NULL;
132 	mhdl->ldcp = ldcp;
133 	mhdl->status = LDC_UNBOUND;
134 
135 	/* insert memory handle (@ head) into list */
136 	if (ldcp->mhdl_list == NULL) {
137 		ldcp->mhdl_list = mhdl;
138 		mhdl->next = NULL;
139 	} else {
140 		/* insert @ head */
141 		mhdl->next = ldcp->mhdl_list;
142 		ldcp->mhdl_list = mhdl;
143 	}
144 
145 	/* return the handle */
146 	*mhandle = (ldc_mem_handle_t)mhdl;
147 
148 	mutex_exit(&ldcp->lock);
149 
150 	D1(ldcp->id, "ldc_mem_alloc_handle: (0x%llx) allocated handle 0x%llx\n",
151 	    ldcp->id, mhdl);
152 
153 	return (0);
154 }
155 
156 /*
157  * Free memory handle for the channel and unlink it from the list
158  */
159 int
160 ldc_mem_free_handle(ldc_mem_handle_t mhandle)
161 {
162 	ldc_mhdl_t 	*mhdl, *phdl;
163 	ldc_chan_t 	*ldcp;
164 
165 	if (mhandle == NULL) {
166 		DWARN(DBG_ALL_LDCS,
167 		    "ldc_mem_free_handle: invalid memory handle\n");
168 		return (EINVAL);
169 	}
170 	mhdl = (ldc_mhdl_t *)mhandle;
171 
172 	mutex_enter(&mhdl->lock);
173 
174 	ldcp = mhdl->ldcp;
175 
176 	if (mhdl->status == LDC_BOUND || mhdl->status == LDC_MAPPED) {
177 		DWARN(ldcp->id,
178 		    "ldc_mem_free_handle: cannot free, 0x%llx hdl bound\n",
179 		    mhdl);
180 		mutex_exit(&mhdl->lock);
181 		return (EINVAL);
182 	}
183 	mutex_exit(&mhdl->lock);
184 
185 	mutex_enter(&ldcp->mlist_lock);
186 
187 	phdl = ldcp->mhdl_list;
188 
189 	/* first handle */
190 	if (phdl == mhdl) {
191 		ldcp->mhdl_list = mhdl->next;
192 		mutex_destroy(&mhdl->lock);
193 		kmem_cache_free(ldcssp->memhdl_cache, mhdl);
194 
195 		D1(ldcp->id,
196 		    "ldc_mem_free_handle: (0x%llx) freed handle 0x%llx\n",
197 		    ldcp->id, mhdl);
198 	} else {
199 		/* walk the list - unlink and free */
200 		while (phdl != NULL) {
201 			if (phdl->next == mhdl) {
202 				phdl->next = mhdl->next;
203 				mutex_destroy(&mhdl->lock);
204 				kmem_cache_free(ldcssp->memhdl_cache, mhdl);
205 				D1(ldcp->id,
206 				    "ldc_mem_free_handle: (0x%llx) freed "
207 				    "handle 0x%llx\n", ldcp->id, mhdl);
208 				break;
209 			}
210 			phdl = phdl->next;
211 		}
212 	}
213 
214 	if (phdl == NULL) {
215 		DWARN(ldcp->id,
216 		    "ldc_mem_free_handle: invalid handle 0x%llx\n", mhdl);
217 		mutex_exit(&ldcp->mlist_lock);
218 		return (EINVAL);
219 	}
220 
221 	mutex_exit(&ldcp->mlist_lock);
222 
223 	return (0);
224 }
225 
226 /*
227  * Bind a memory handle to a virtual address.
228  * The virtual address is converted to the corresponding real addresses.
229  * Returns pointer to the first ldc_mem_cookie and the total number
230  * of cookies for this virtual address. Other cookies can be obtained
231  * using the ldc_mem_nextcookie() call. If the pages are stored in
232  * consecutive locations in the table, a single cookie corresponding to
233  * the first location is returned. The cookie size spans all the entries.
234  *
235  * If the VA corresponds to a page that is already being exported, reuse
236  * the page and do not export it again. Bump the page's use count.
237  */
238 int
239 ldc_mem_bind_handle(ldc_mem_handle_t mhandle, caddr_t vaddr, size_t len,
240     uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie, uint32_t *ccount)
241 {
242 	ldc_mhdl_t	*mhdl;
243 	ldc_chan_t 	*ldcp;
244 	ldc_mtbl_t	*mtbl;
245 	ldc_memseg_t	*memseg;
246 	ldc_mte_t	tmp_mte;
247 	uint64_t	index, prev_index = 0;
248 	int64_t		cookie_idx;
249 	uintptr_t	raddr, ra_aligned;
250 	uint64_t	psize, poffset, v_offset;
251 	uint64_t	pg_shift, pg_size, pg_size_code, pg_mask;
252 	pgcnt_t		npages;
253 	caddr_t		v_align, addr;
254 	int 		i, rv;
255 
256 	if (mhandle == NULL) {
257 		DWARN(DBG_ALL_LDCS,
258 		    "ldc_mem_bind_handle: invalid memory handle\n");
259 		return (EINVAL);
260 	}
261 	mhdl = (ldc_mhdl_t *)mhandle;
262 	ldcp = mhdl->ldcp;
263 
264 	/* clear count */
265 	*ccount = 0;
266 
267 	mutex_enter(&mhdl->lock);
268 
269 	if (mhdl->status == LDC_BOUND || mhdl->memseg != NULL) {
270 		DWARN(ldcp->id,
271 		    "ldc_mem_bind_handle: (0x%x) handle already bound\n",
272 		    mhandle);
273 		mutex_exit(&mhdl->lock);
274 		return (EINVAL);
275 	}
276 
277 	/* Force address and size to be 8-byte aligned */
278 	if ((((uintptr_t)vaddr | len) & 0x7) != 0) {
279 		DWARN(ldcp->id,
280 		    "ldc_mem_bind_handle: addr/size is not 8-byte aligned\n");
281 		mutex_exit(&mhdl->lock);
282 		return (EINVAL);
283 	}
284 
285 	/*
286 	 * If this channel is binding a memory handle for the
287 	 * first time allocate it a memory map table and initialize it
288 	 */
289 	if ((mtbl = ldcp->mtbl) == NULL) {
290 
291 		mutex_enter(&ldcp->lock);
292 
293 		/* Allocate and initialize the map table structure */
294 		mtbl = kmem_zalloc(sizeof (ldc_mtbl_t), KM_SLEEP);
295 		mtbl->num_entries = mtbl->num_avail = ldc_maptable_entries;
296 		mtbl->size = ldc_maptable_entries * sizeof (ldc_mte_slot_t);
297 		mtbl->next_entry = NULL;
298 		mtbl->contigmem = B_TRUE;
299 
300 		/* Allocate the table itself */
301 		mtbl->table = (ldc_mte_slot_t *)
302 		    contig_mem_alloc_align(mtbl->size, MMU_PAGESIZE);
303 		if (mtbl->table == NULL) {
304 
305 			/* allocate a page of memory using kmem_alloc */
306 			mtbl->table = kmem_alloc(MMU_PAGESIZE, KM_SLEEP);
307 			mtbl->size = MMU_PAGESIZE;
308 			mtbl->contigmem = B_FALSE;
309 			mtbl->num_entries = mtbl->num_avail =
310 			    mtbl->size / sizeof (ldc_mte_slot_t);
311 			DWARN(ldcp->id,
312 			    "ldc_mem_bind_handle: (0x%llx) reduced tbl size "
313 			    "to %lx entries\n", ldcp->id, mtbl->num_entries);
314 		}
315 
316 		/* zero out the memory */
317 		bzero(mtbl->table, mtbl->size);
318 
319 		/* initialize the lock */
320 		mutex_init(&mtbl->lock, NULL, MUTEX_DRIVER, NULL);
321 
322 		/* register table for this channel */
323 		rv = hv_ldc_set_map_table(ldcp->id,
324 		    va_to_pa(mtbl->table), mtbl->num_entries);
325 		if (rv != 0) {
326 			cmn_err(CE_WARN,
327 			    "ldc_mem_bind_handle: (0x%lx) err %d mapping tbl",
328 			    ldcp->id, rv);
329 			if (mtbl->contigmem)
330 				contig_mem_free(mtbl->table, mtbl->size);
331 			else
332 				kmem_free(mtbl->table, mtbl->size);
333 			mutex_destroy(&mtbl->lock);
334 			kmem_free(mtbl, sizeof (ldc_mtbl_t));
335 			mutex_exit(&ldcp->lock);
336 			mutex_exit(&mhdl->lock);
337 			return (EIO);
338 		}
339 
340 		ldcp->mtbl = mtbl;
341 		mutex_exit(&ldcp->lock);
342 
343 		D1(ldcp->id,
344 		    "ldc_mem_bind_handle: (0x%llx) alloc'd map table 0x%llx\n",
345 		    ldcp->id, ldcp->mtbl->table);
346 	}
347 
348 	/* FUTURE: get the page size, pgsz code, and shift */
349 	pg_size = MMU_PAGESIZE;
350 	pg_size_code = page_szc(pg_size);
351 	pg_shift = page_get_shift(pg_size_code);
352 	pg_mask = ~(pg_size - 1);
353 
354 	D1(ldcp->id, "ldc_mem_bind_handle: (0x%llx) binding "
355 	    "va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
356 	    ldcp->id, vaddr, pg_size, pg_size_code, pg_shift);
357 
358 	/* aligned VA and its offset */
359 	v_align = (caddr_t)(((uintptr_t)vaddr) & ~(pg_size - 1));
360 	v_offset = ((uintptr_t)vaddr) & (pg_size - 1);
361 
362 	npages = (len+v_offset)/pg_size;
363 	npages = ((len+v_offset)%pg_size == 0) ? npages : npages+1;
364 
365 	D1(ldcp->id, "ldc_mem_bind_handle: binding "
366 	    "(0x%llx) v=0x%llx,val=0x%llx,off=0x%x,pgs=0x%x\n",
367 	    ldcp->id, vaddr, v_align, v_offset, npages);
368 
369 	/* lock the memory table - exclusive access to channel */
370 	mutex_enter(&mtbl->lock);
371 
372 	if (npages > mtbl->num_avail) {
373 		D1(ldcp->id, "ldc_mem_bind_handle: (0x%llx) no table entries\n",
374 		    ldcp->id);
375 		mutex_exit(&mtbl->lock);
376 		mutex_exit(&mhdl->lock);
377 		return (ENOMEM);
378 	}
379 
380 	/* Allocate a memseg structure */
381 	memseg = mhdl->memseg =
382 	    kmem_cache_alloc(ldcssp->memseg_cache, KM_SLEEP);
383 
384 	/* Allocate memory to store all pages and cookies */
385 	memseg->pages = kmem_zalloc((sizeof (ldc_page_t) * npages), KM_SLEEP);
386 	memseg->cookies =
387 	    kmem_zalloc((sizeof (ldc_mem_cookie_t) * npages), KM_SLEEP);
388 
389 	D2(ldcp->id, "ldc_mem_bind_handle: (0x%llx) processing 0x%llx pages\n",
390 	    ldcp->id, npages);
391 
392 	addr = v_align;
393 
394 	/*
395 	 * Check if direct shared memory map is enabled, if not change
396 	 * the mapping type to include SHADOW_MAP.
397 	 */
398 	if (ldc_shmem_enabled == 0)
399 		mtype = LDC_SHADOW_MAP;
400 
401 	/*
402 	 * Table slots are used in a round-robin manner. The algorithm permits
403 	 * inserting duplicate entries. Slots allocated earlier will typically
404 	 * get freed before we get back to reusing the slot.Inserting duplicate
405 	 * entries should be OK as we only lookup entries using the cookie addr
406 	 * i.e. tbl index, during export, unexport and copy operation.
407 	 *
408 	 * One implementation what was tried was to search for a duplicate
409 	 * page entry first and reuse it. The search overhead is very high and
410 	 * in the vnet case dropped the perf by almost half, 50 to 24 mbps.
411 	 * So it does make sense to avoid searching for duplicates.
412 	 *
413 	 * But during the process of searching for a free slot, if we find a
414 	 * duplicate entry we will go ahead and use it, and bump its use count.
415 	 */
416 
417 	/* index to start searching from */
418 	index = mtbl->next_entry;
419 	cookie_idx = -1;
420 
421 	tmp_mte.ll = 0;	/* initialise fields to 0 */
422 
423 	if (mtype & LDC_DIRECT_MAP) {
424 		tmp_mte.mte_r = (perm & LDC_MEM_R) ? 1 : 0;
425 		tmp_mte.mte_w = (perm & LDC_MEM_W) ? 1 : 0;
426 		tmp_mte.mte_x = (perm & LDC_MEM_X) ? 1 : 0;
427 	}
428 
429 	if (mtype & LDC_SHADOW_MAP) {
430 		tmp_mte.mte_cr = (perm & LDC_MEM_R) ? 1 : 0;
431 		tmp_mte.mte_cw = (perm & LDC_MEM_W) ? 1 : 0;
432 	}
433 
434 	if (mtype & LDC_IO_MAP) {
435 		tmp_mte.mte_ir = (perm & LDC_MEM_R) ? 1 : 0;
436 		tmp_mte.mte_iw = (perm & LDC_MEM_W) ? 1 : 0;
437 	}
438 
439 	D1(ldcp->id, "ldc_mem_bind_handle mte=0x%llx\n", tmp_mte.ll);
440 
441 	tmp_mte.mte_pgszc = pg_size_code;
442 
443 	/* initialize each mem table entry */
444 	for (i = 0; i < npages; i++) {
445 
446 		/* check if slot is available in the table */
447 		while (mtbl->table[index].entry.ll != 0) {
448 
449 			index = (index + 1) % mtbl->num_entries;
450 
451 			if (index == mtbl->next_entry) {
452 				/* we have looped around */
453 				DWARN(DBG_ALL_LDCS,
454 				    "ldc_mem_bind_handle: (0x%llx) cannot find "
455 				    "entry\n", ldcp->id);
456 				*ccount = 0;
457 
458 				/* NOTE: free memory, remove previous entries */
459 				/* this shouldnt happen as num_avail was ok */
460 
461 				mutex_exit(&mtbl->lock);
462 				mutex_exit(&mhdl->lock);
463 				return (ENOMEM);
464 			}
465 		}
466 
467 		/* get the real address */
468 		raddr = va_to_pa((void *)addr);
469 		ra_aligned = ((uintptr_t)raddr & pg_mask);
470 
471 		/* build the mte */
472 		tmp_mte.mte_rpfn = ra_aligned >> pg_shift;
473 
474 		D1(ldcp->id, "ldc_mem_bind_handle mte=0x%llx\n", tmp_mte.ll);
475 
476 		/* update entry in table */
477 		mtbl->table[index].entry = tmp_mte;
478 
479 		D2(ldcp->id, "ldc_mem_bind_handle: (0x%llx) stored MTE 0x%llx"
480 		    " into loc 0x%llx\n", ldcp->id, tmp_mte.ll, index);
481 
482 		/* calculate the size and offset for this export range */
483 		if (i == 0) {
484 			/* first page */
485 			psize = min((pg_size - v_offset), len);
486 			poffset = v_offset;
487 
488 		} else if (i == (npages - 1)) {
489 			/* last page */
490 			psize =	(((uintptr_t)(vaddr + len)) &
491 			    ((uint64_t)(pg_size-1)));
492 			if (psize == 0)
493 				psize = pg_size;
494 			poffset = 0;
495 
496 		} else {
497 			/* middle pages */
498 			psize = pg_size;
499 			poffset = 0;
500 		}
501 
502 		/* store entry for this page */
503 		memseg->pages[i].index = index;
504 		memseg->pages[i].raddr = raddr;
505 		memseg->pages[i].offset = poffset;
506 		memseg->pages[i].size = psize;
507 		memseg->pages[i].mte = &(mtbl->table[index]);
508 
509 		/* create the cookie */
510 		if (i == 0 || (index != prev_index + 1)) {
511 			cookie_idx++;
512 			memseg->cookies[cookie_idx].addr =
513 			    IDX2COOKIE(index, pg_size_code, pg_shift);
514 			memseg->cookies[cookie_idx].addr |= poffset;
515 			memseg->cookies[cookie_idx].size = psize;
516 
517 		} else {
518 			memseg->cookies[cookie_idx].size += psize;
519 		}
520 
521 		D1(ldcp->id, "ldc_mem_bind_handle: bound "
522 		    "(0x%llx) va=0x%llx, idx=0x%llx, "
523 		    "ra=0x%llx(sz=0x%x,off=0x%x)\n",
524 		    ldcp->id, addr, index, raddr, psize, poffset);
525 
526 		/* decrement number of available entries */
527 		mtbl->num_avail--;
528 
529 		/* increment va by page size */
530 		addr += pg_size;
531 
532 		/* increment index */
533 		prev_index = index;
534 		index = (index + 1) % mtbl->num_entries;
535 
536 		/* save the next slot */
537 		mtbl->next_entry = index;
538 	}
539 
540 	mutex_exit(&mtbl->lock);
541 
542 	/* memory handle = bound */
543 	mhdl->mtype = mtype;
544 	mhdl->perm = perm;
545 	mhdl->status = LDC_BOUND;
546 
547 	/* update memseg_t */
548 	memseg->vaddr = vaddr;
549 	memseg->raddr = memseg->pages[0].raddr;
550 	memseg->size = len;
551 	memseg->npages = npages;
552 	memseg->ncookies = cookie_idx + 1;
553 	memseg->next_cookie = (memseg->ncookies > 1) ? 1 : 0;
554 
555 	/* return count and first cookie */
556 	*ccount = memseg->ncookies;
557 	cookie->addr = memseg->cookies[0].addr;
558 	cookie->size = memseg->cookies[0].size;
559 
560 	D1(ldcp->id,
561 	    "ldc_mem_bind_handle: (0x%llx) bound 0x%llx, va=0x%llx, "
562 	    "pgs=0x%llx cookies=0x%llx\n",
563 	    ldcp->id, mhdl, vaddr, npages, memseg->ncookies);
564 
565 	mutex_exit(&mhdl->lock);
566 	return (0);
567 }
568 
569 /*
570  * Return the next cookie associated with the specified memory handle
571  */
572 int
573 ldc_mem_nextcookie(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie)
574 {
575 	ldc_mhdl_t	*mhdl;
576 	ldc_chan_t 	*ldcp;
577 	ldc_memseg_t	*memseg;
578 
579 	if (mhandle == NULL) {
580 		DWARN(DBG_ALL_LDCS,
581 		    "ldc_mem_nextcookie: invalid memory handle\n");
582 		return (EINVAL);
583 	}
584 	mhdl = (ldc_mhdl_t *)mhandle;
585 
586 	mutex_enter(&mhdl->lock);
587 
588 	ldcp = mhdl->ldcp;
589 	memseg = mhdl->memseg;
590 
591 	if (cookie == 0) {
592 		DWARN(ldcp->id,
593 		    "ldc_mem_nextcookie:(0x%llx) invalid cookie arg\n",
594 		    ldcp->id);
595 		mutex_exit(&mhdl->lock);
596 		return (EINVAL);
597 	}
598 
599 	if (memseg->next_cookie != 0) {
600 		cookie->addr = memseg->cookies[memseg->next_cookie].addr;
601 		cookie->size = memseg->cookies[memseg->next_cookie].size;
602 		memseg->next_cookie++;
603 		if (memseg->next_cookie == memseg->ncookies)
604 			memseg->next_cookie = 0;
605 
606 	} else {
607 		DWARN(ldcp->id,
608 		    "ldc_mem_nextcookie:(0x%llx) no more cookies\n", ldcp->id);
609 		cookie->addr = 0;
610 		cookie->size = 0;
611 		mutex_exit(&mhdl->lock);
612 		return (EINVAL);
613 	}
614 
615 	D1(ldcp->id,
616 	    "ldc_mem_nextcookie: (0x%llx) cookie addr=0x%llx,sz=0x%llx\n",
617 	    ldcp->id, cookie->addr, cookie->size);
618 
619 	mutex_exit(&mhdl->lock);
620 	return (0);
621 }
622 
623 /*
624  * Unbind the virtual memory region associated with the specified
625  * memory handle. Allassociated cookies are freed and the corresponding
626  * RA space is no longer exported.
627  */
628 int
629 ldc_mem_unbind_handle(ldc_mem_handle_t mhandle)
630 {
631 	ldc_mhdl_t	*mhdl;
632 	ldc_chan_t 	*ldcp;
633 	ldc_mtbl_t	*mtbl;
634 	ldc_memseg_t	*memseg;
635 	uint64_t	cookie_addr;
636 	uint64_t	pg_shift, pg_size_code;
637 	int		i, rv;
638 
639 	if (mhandle == NULL) {
640 		DWARN(DBG_ALL_LDCS,
641 		    "ldc_mem_unbind_handle: invalid memory handle\n");
642 		return (EINVAL);
643 	}
644 	mhdl = (ldc_mhdl_t *)mhandle;
645 
646 	mutex_enter(&mhdl->lock);
647 
648 	if (mhdl->status == LDC_UNBOUND) {
649 		DWARN(DBG_ALL_LDCS,
650 		    "ldc_mem_unbind_handle: (0x%x) handle is not bound\n",
651 		    mhandle);
652 		mutex_exit(&mhdl->lock);
653 		return (EINVAL);
654 	}
655 
656 	ldcp = mhdl->ldcp;
657 	mtbl = ldcp->mtbl;
658 
659 	memseg = mhdl->memseg;
660 
661 	/* lock the memory table - exclusive access to channel */
662 	mutex_enter(&mtbl->lock);
663 
664 	/* undo the pages exported */
665 	for (i = 0; i < memseg->npages; i++) {
666 
667 		/* check for mapped pages, revocation cookie != 0 */
668 		if (memseg->pages[i].mte->cookie) {
669 
670 			pg_size_code = page_szc(memseg->pages[i].size);
671 			pg_shift = page_get_shift(memseg->pages[i].size);
672 			cookie_addr = IDX2COOKIE(memseg->pages[i].index,
673 			    pg_size_code, pg_shift);
674 
675 			D1(ldcp->id, "ldc_mem_unbind_handle: (0x%llx) revoke "
676 			    "cookie 0x%llx, rcookie 0x%llx\n", ldcp->id,
677 			    cookie_addr, memseg->pages[i].mte->cookie);
678 			rv = hv_ldc_revoke(ldcp->id, cookie_addr,
679 			    memseg->pages[i].mte->cookie);
680 			if (rv) {
681 				DWARN(ldcp->id,
682 				    "ldc_mem_unbind_handle: (0x%llx) cannot "
683 				    "revoke mapping, cookie %llx\n", ldcp->id,
684 				    cookie_addr);
685 			}
686 		}
687 
688 		/* clear the entry from the table */
689 		memseg->pages[i].mte->entry.ll = 0;
690 		mtbl->num_avail++;
691 	}
692 	mutex_exit(&mtbl->lock);
693 
694 	/* free the allocated memseg and page structures */
695 	kmem_free(memseg->pages, (sizeof (ldc_page_t) * memseg->npages));
696 	kmem_free(memseg->cookies,
697 	    (sizeof (ldc_mem_cookie_t) * memseg->npages));
698 	kmem_cache_free(ldcssp->memseg_cache, memseg);
699 
700 	/* uninitialize the memory handle */
701 	mhdl->memseg = NULL;
702 	mhdl->status = LDC_UNBOUND;
703 
704 	D1(ldcp->id, "ldc_mem_unbind_handle: (0x%llx) unbound handle 0x%llx\n",
705 	    ldcp->id, mhdl);
706 
707 	mutex_exit(&mhdl->lock);
708 	return (0);
709 }
710 
711 /*
712  * Get information about the dring. The base address of the descriptor
713  * ring along with the type and permission are returned back.
714  */
715 int
716 ldc_mem_info(ldc_mem_handle_t mhandle, ldc_mem_info_t *minfo)
717 {
718 	ldc_mhdl_t	*mhdl;
719 
720 	if (mhandle == NULL) {
721 		DWARN(DBG_ALL_LDCS, "ldc_mem_info: invalid memory handle\n");
722 		return (EINVAL);
723 	}
724 	mhdl = (ldc_mhdl_t *)mhandle;
725 
726 	if (minfo == NULL) {
727 		DWARN(DBG_ALL_LDCS, "ldc_mem_info: invalid args\n");
728 		return (EINVAL);
729 	}
730 
731 	mutex_enter(&mhdl->lock);
732 
733 	minfo->status = mhdl->status;
734 	if (mhdl->status == LDC_BOUND || mhdl->status == LDC_MAPPED) {
735 		minfo->vaddr = mhdl->memseg->vaddr;
736 		minfo->raddr = mhdl->memseg->raddr;
737 		minfo->mtype = mhdl->mtype;
738 		minfo->perm = mhdl->perm;
739 	}
740 	mutex_exit(&mhdl->lock);
741 
742 	return (0);
743 }
744 
745 /*
746  * Copy data either from or to the client specified virtual address
747  * space to or from the exported memory associated with the cookies.
748  * The direction argument determines whether the data is read from or
749  * written to exported memory.
750  */
751 int
752 ldc_mem_copy(ldc_handle_t handle, caddr_t vaddr, uint64_t off, size_t *size,
753     ldc_mem_cookie_t *cookies, uint32_t ccount, uint8_t direction)
754 {
755 	ldc_chan_t 	*ldcp;
756 	uint64_t	local_voff, local_valign;
757 	uint64_t	cookie_addr, cookie_size;
758 	uint64_t	pg_shift, pg_size, pg_size_code;
759 	uint64_t 	export_caddr, export_poff, export_psize, export_size;
760 	uint64_t	local_ra, local_poff, local_psize;
761 	uint64_t	copy_size, copied_len = 0, total_bal = 0, idx = 0;
762 	pgcnt_t		npages;
763 	size_t		len = *size;
764 	int 		i, rv = 0;
765 
766 	uint64_t	chid;
767 
768 	if (handle == NULL) {
769 		DWARN(DBG_ALL_LDCS, "ldc_mem_copy: invalid channel handle\n");
770 		return (EINVAL);
771 	}
772 	ldcp = (ldc_chan_t *)handle;
773 	chid = ldcp->id;
774 
775 	/* check to see if channel is UP */
776 	if (ldcp->tstate != TS_UP) {
777 		DWARN(chid, "ldc_mem_copy: (0x%llx) channel is not UP\n",
778 		    chid);
779 		return (ECONNRESET);
780 	}
781 
782 	/* Force address and size to be 8-byte aligned */
783 	if ((((uintptr_t)vaddr | len) & 0x7) != 0) {
784 		DWARN(chid,
785 		    "ldc_mem_copy: addr/sz is not 8-byte aligned\n");
786 		return (EINVAL);
787 	}
788 
789 	/* Find the size of the exported memory */
790 	export_size = 0;
791 	for (i = 0; i < ccount; i++)
792 		export_size += cookies[i].size;
793 
794 	/* check to see if offset is valid */
795 	if (off > export_size) {
796 		DWARN(chid,
797 		    "ldc_mem_copy: (0x%llx) start offset > export mem size\n",
798 		    chid);
799 		return (EINVAL);
800 	}
801 
802 	/*
803 	 * Check to see if the export size is smaller than the size we
804 	 * are requesting to copy - if so flag an error
805 	 */
806 	if ((export_size - off) < *size) {
807 		DWARN(chid,
808 		    "ldc_mem_copy: (0x%llx) copy size > export mem size\n",
809 		    chid);
810 		return (EINVAL);
811 	}
812 
813 	total_bal = min(export_size, *size);
814 
815 	/* FUTURE: get the page size, pgsz code, and shift */
816 	pg_size = MMU_PAGESIZE;
817 	pg_size_code = page_szc(pg_size);
818 	pg_shift = page_get_shift(pg_size_code);
819 
820 	D1(chid, "ldc_mem_copy: copying data "
821 	    "(0x%llx) va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
822 	    chid, vaddr, pg_size, pg_size_code, pg_shift);
823 
824 	/* aligned VA and its offset */
825 	local_valign = (((uintptr_t)vaddr) & ~(pg_size - 1));
826 	local_voff = ((uintptr_t)vaddr) & (pg_size - 1);
827 
828 	npages = (len+local_voff)/pg_size;
829 	npages = ((len+local_voff)%pg_size == 0) ? npages : npages+1;
830 
831 	D1(chid,
832 	    "ldc_mem_copy: (0x%llx) v=0x%llx,val=0x%llx,off=0x%x,pgs=0x%x\n",
833 	    chid, vaddr, local_valign, local_voff, npages);
834 
835 	local_ra = va_to_pa((void *)local_valign);
836 	local_poff = local_voff;
837 	local_psize = min(len, (pg_size - local_voff));
838 
839 	len -= local_psize;
840 
841 	/*
842 	 * find the first cookie in the list of cookies
843 	 * if the offset passed in is not zero
844 	 */
845 	for (idx = 0; idx < ccount; idx++) {
846 		cookie_size = cookies[idx].size;
847 		if (off < cookie_size)
848 			break;
849 		off -= cookie_size;
850 	}
851 
852 	cookie_addr = cookies[idx].addr + off;
853 	cookie_size = cookies[idx].size - off;
854 
855 	export_caddr = cookie_addr & ~(pg_size - 1);
856 	export_poff = cookie_addr & (pg_size - 1);
857 	export_psize = min(cookie_size, (pg_size - export_poff));
858 
859 	for (;;) {
860 
861 		copy_size = min(export_psize, local_psize);
862 
863 		D1(chid,
864 		    "ldc_mem_copy:(0x%llx) dir=0x%x, caddr=0x%llx,"
865 		    " loc_ra=0x%llx, exp_poff=0x%llx, loc_poff=0x%llx,"
866 		    " exp_psz=0x%llx, loc_psz=0x%llx, copy_sz=0x%llx,"
867 		    " total_bal=0x%llx\n",
868 		    chid, direction, export_caddr, local_ra, export_poff,
869 		    local_poff, export_psize, local_psize, copy_size,
870 		    total_bal);
871 
872 		rv = hv_ldc_copy(chid, direction,
873 		    (export_caddr + export_poff), (local_ra + local_poff),
874 		    copy_size, &copied_len);
875 
876 		if (rv != 0) {
877 			int 		error = EIO;
878 			uint64_t	rx_hd, rx_tl;
879 
880 			DWARN(chid,
881 			    "ldc_mem_copy: (0x%llx) err %d during copy\n",
882 			    (unsigned long long)chid, rv);
883 			DWARN(chid,
884 			    "ldc_mem_copy: (0x%llx) dir=0x%x, caddr=0x%lx, "
885 			    "loc_ra=0x%lx, exp_poff=0x%lx, loc_poff=0x%lx,"
886 			    " exp_psz=0x%lx, loc_psz=0x%lx, copy_sz=0x%lx,"
887 			    " copied_len=0x%lx, total_bal=0x%lx\n",
888 			    chid, direction, export_caddr, local_ra,
889 			    export_poff, local_poff, export_psize, local_psize,
890 			    copy_size, copied_len, total_bal);
891 
892 			*size = *size - total_bal;
893 
894 			/*
895 			 * check if reason for copy error was due to
896 			 * a channel reset. we need to grab the lock
897 			 * just in case we have to do a reset.
898 			 */
899 			mutex_enter(&ldcp->lock);
900 			mutex_enter(&ldcp->tx_lock);
901 
902 			rv = hv_ldc_rx_get_state(ldcp->id,
903 			    &rx_hd, &rx_tl, &(ldcp->link_state));
904 			if (ldcp->link_state == LDC_CHANNEL_DOWN ||
905 			    ldcp->link_state == LDC_CHANNEL_RESET) {
906 				i_ldc_reset(ldcp, B_FALSE);
907 				error = ECONNRESET;
908 			}
909 
910 			mutex_exit(&ldcp->tx_lock);
911 			mutex_exit(&ldcp->lock);
912 
913 			return (error);
914 		}
915 
916 		ASSERT(copied_len <= copy_size);
917 
918 		D2(chid, "ldc_mem_copy: copied=0x%llx\n", copied_len);
919 		export_poff += copied_len;
920 		local_poff += copied_len;
921 		export_psize -= copied_len;
922 		local_psize -= copied_len;
923 		cookie_size -= copied_len;
924 
925 		total_bal -= copied_len;
926 
927 		if (copy_size != copied_len)
928 			continue;
929 
930 		if (export_psize == 0 && total_bal != 0) {
931 
932 			if (cookie_size == 0) {
933 				idx++;
934 				cookie_addr = cookies[idx].addr;
935 				cookie_size = cookies[idx].size;
936 
937 				export_caddr = cookie_addr & ~(pg_size - 1);
938 				export_poff = cookie_addr & (pg_size - 1);
939 				export_psize =
940 				    min(cookie_size, (pg_size-export_poff));
941 			} else {
942 				export_caddr += pg_size;
943 				export_poff = 0;
944 				export_psize = min(cookie_size, pg_size);
945 			}
946 		}
947 
948 		if (local_psize == 0 && total_bal != 0) {
949 			local_valign += pg_size;
950 			local_ra = va_to_pa((void *)local_valign);
951 			local_poff = 0;
952 			local_psize = min(pg_size, len);
953 			len -= local_psize;
954 		}
955 
956 		/* check if we are all done */
957 		if (total_bal == 0)
958 			break;
959 	}
960 
961 
962 	D1(chid,
963 	    "ldc_mem_copy: (0x%llx) done copying sz=0x%llx\n",
964 	    chid, *size);
965 
966 	return (0);
967 }
968 
969 /*
970  * Copy data either from or to the client specified virtual address
971  * space to or from HV physical memory.
972  *
973  * The direction argument determines whether the data is read from or
974  * written to HV memory. direction values are LDC_COPY_IN/OUT similar
975  * to the ldc_mem_copy interface
976  */
977 int
978 ldc_mem_rdwr_cookie(ldc_handle_t handle, caddr_t vaddr, size_t *size,
979     caddr_t paddr, uint8_t direction)
980 {
981 	ldc_chan_t 	*ldcp;
982 	uint64_t	local_voff, local_valign;
983 	uint64_t	pg_shift, pg_size, pg_size_code;
984 	uint64_t 	target_pa, target_poff, target_psize, target_size;
985 	uint64_t	local_ra, local_poff, local_psize;
986 	uint64_t	copy_size, copied_len = 0;
987 	pgcnt_t		npages;
988 	size_t		len = *size;
989 	int 		rv = 0;
990 
991 	if (handle == NULL) {
992 		DWARN(DBG_ALL_LDCS,
993 		    "ldc_mem_rdwr_cookie: invalid channel handle\n");
994 		return (EINVAL);
995 	}
996 	ldcp = (ldc_chan_t *)handle;
997 
998 	mutex_enter(&ldcp->lock);
999 
1000 	/* check to see if channel is UP */
1001 	if (ldcp->tstate != TS_UP) {
1002 		DWARN(ldcp->id,
1003 		    "ldc_mem_rdwr_cookie: (0x%llx) channel is not UP\n",
1004 		    ldcp->id);
1005 		mutex_exit(&ldcp->lock);
1006 		return (ECONNRESET);
1007 	}
1008 
1009 	/* Force address and size to be 8-byte aligned */
1010 	if ((((uintptr_t)vaddr | len) & 0x7) != 0) {
1011 		DWARN(ldcp->id,
1012 		    "ldc_mem_rdwr_cookie: addr/size is not 8-byte aligned\n");
1013 		mutex_exit(&ldcp->lock);
1014 		return (EINVAL);
1015 	}
1016 
1017 	target_size = *size;
1018 
1019 	/* FUTURE: get the page size, pgsz code, and shift */
1020 	pg_size = MMU_PAGESIZE;
1021 	pg_size_code = page_szc(pg_size);
1022 	pg_shift = page_get_shift(pg_size_code);
1023 
1024 	D1(ldcp->id, "ldc_mem_rdwr_cookie: copying data "
1025 	    "(0x%llx) va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
1026 	    ldcp->id, vaddr, pg_size, pg_size_code, pg_shift);
1027 
1028 	/* aligned VA and its offset */
1029 	local_valign = ((uintptr_t)vaddr) & ~(pg_size - 1);
1030 	local_voff = ((uintptr_t)vaddr) & (pg_size - 1);
1031 
1032 	npages = (len + local_voff) / pg_size;
1033 	npages = ((len + local_voff) % pg_size == 0) ? npages : npages+1;
1034 
1035 	D1(ldcp->id, "ldc_mem_rdwr_cookie: (0x%llx) v=0x%llx, "
1036 	    "val=0x%llx,off=0x%x,pgs=0x%x\n",
1037 	    ldcp->id, vaddr, local_valign, local_voff, npages);
1038 
1039 	local_ra = va_to_pa((void *)local_valign);
1040 	local_poff = local_voff;
1041 	local_psize = min(len, (pg_size - local_voff));
1042 
1043 	len -= local_psize;
1044 
1045 	target_pa = ((uintptr_t)paddr) & ~(pg_size - 1);
1046 	target_poff = ((uintptr_t)paddr) & (pg_size - 1);
1047 	target_psize = pg_size - target_poff;
1048 
1049 	for (;;) {
1050 
1051 		copy_size = min(target_psize, local_psize);
1052 
1053 		D1(ldcp->id,
1054 		    "ldc_mem_rdwr_cookie: (0x%llx) dir=0x%x, tar_pa=0x%llx,"
1055 		    " loc_ra=0x%llx, tar_poff=0x%llx, loc_poff=0x%llx,"
1056 		    " tar_psz=0x%llx, loc_psz=0x%llx, copy_sz=0x%llx,"
1057 		    " total_bal=0x%llx\n",
1058 		    ldcp->id, direction, target_pa, local_ra, target_poff,
1059 		    local_poff, target_psize, local_psize, copy_size,
1060 		    target_size);
1061 
1062 		rv = hv_ldc_copy(ldcp->id, direction,
1063 		    (target_pa + target_poff), (local_ra + local_poff),
1064 		    copy_size, &copied_len);
1065 
1066 		if (rv != 0) {
1067 			DWARN(DBG_ALL_LDCS,
1068 			    "ldc_mem_rdwr_cookie: (0x%lx) err %d during copy\n",
1069 			    ldcp->id, rv);
1070 			DWARN(DBG_ALL_LDCS,
1071 			    "ldc_mem_rdwr_cookie: (0x%llx) dir=%lld, "
1072 			    "tar_pa=0x%llx, loc_ra=0x%llx, tar_poff=0x%llx, "
1073 			    "loc_poff=0x%llx, tar_psz=0x%llx, loc_psz=0x%llx, "
1074 			    "copy_sz=0x%llx, total_bal=0x%llx\n",
1075 			    ldcp->id, direction, target_pa, local_ra,
1076 			    target_poff, local_poff, target_psize, local_psize,
1077 			    copy_size, target_size);
1078 
1079 			*size = *size - target_size;
1080 			mutex_exit(&ldcp->lock);
1081 			return (i_ldc_h2v_error(rv));
1082 		}
1083 
1084 		D2(ldcp->id, "ldc_mem_rdwr_cookie: copied=0x%llx\n",
1085 		    copied_len);
1086 		target_poff += copied_len;
1087 		local_poff += copied_len;
1088 		target_psize -= copied_len;
1089 		local_psize -= copied_len;
1090 
1091 		target_size -= copied_len;
1092 
1093 		if (copy_size != copied_len)
1094 			continue;
1095 
1096 		if (target_psize == 0 && target_size != 0) {
1097 			target_pa += pg_size;
1098 			target_poff = 0;
1099 			target_psize = min(pg_size, target_size);
1100 		}
1101 
1102 		if (local_psize == 0 && target_size != 0) {
1103 			local_valign += pg_size;
1104 			local_ra = va_to_pa((void *)local_valign);
1105 			local_poff = 0;
1106 			local_psize = min(pg_size, len);
1107 			len -= local_psize;
1108 		}
1109 
1110 		/* check if we are all done */
1111 		if (target_size == 0)
1112 			break;
1113 	}
1114 
1115 	mutex_exit(&ldcp->lock);
1116 
1117 	D1(ldcp->id, "ldc_mem_rdwr_cookie: (0x%llx) done copying sz=0x%llx\n",
1118 	    ldcp->id, *size);
1119 
1120 	return (0);
1121 }
1122 
1123 /*
1124  * Map an exported memory segment into the local address space. If the
1125  * memory range was exported for direct map access, a HV call is made
1126  * to allocate a RA range. If the map is done via a shadow copy, local
1127  * shadow memory is allocated and the base VA is returned in 'vaddr'. If
1128  * the mapping is a direct map then the RA is returned in 'raddr'.
1129  */
1130 int
1131 ldc_mem_map(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie, uint32_t ccount,
1132     uint8_t mtype, uint8_t perm, caddr_t *vaddr, caddr_t *raddr)
1133 {
1134 	int		i, j, idx, rv, retries;
1135 	ldc_chan_t 	*ldcp;
1136 	ldc_mhdl_t	*mhdl;
1137 	ldc_memseg_t	*memseg;
1138 	caddr_t		tmpaddr;
1139 	uint64_t	map_perm = perm;
1140 	uint64_t	pg_size, pg_shift, pg_size_code, pg_mask;
1141 	uint64_t	exp_size = 0, base_off, map_size, npages;
1142 	uint64_t	cookie_addr, cookie_off, cookie_size;
1143 	tte_t		ldc_tte;
1144 
1145 	if (mhandle == NULL) {
1146 		DWARN(DBG_ALL_LDCS, "ldc_mem_map: invalid memory handle\n");
1147 		return (EINVAL);
1148 	}
1149 	mhdl = (ldc_mhdl_t *)mhandle;
1150 
1151 	mutex_enter(&mhdl->lock);
1152 
1153 	if (mhdl->status == LDC_BOUND || mhdl->status == LDC_MAPPED ||
1154 	    mhdl->memseg != NULL) {
1155 		DWARN(DBG_ALL_LDCS,
1156 		    "ldc_mem_map: (0x%llx) handle bound/mapped\n", mhandle);
1157 		mutex_exit(&mhdl->lock);
1158 		return (EINVAL);
1159 	}
1160 
1161 	ldcp = mhdl->ldcp;
1162 
1163 	mutex_enter(&ldcp->lock);
1164 
1165 	if (ldcp->tstate != TS_UP) {
1166 		DWARN(ldcp->id,
1167 		    "ldc_mem_dring_map: (0x%llx) channel is not UP\n",
1168 		    ldcp->id);
1169 		mutex_exit(&ldcp->lock);
1170 		mutex_exit(&mhdl->lock);
1171 		return (ECONNRESET);
1172 	}
1173 
1174 	if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP|LDC_IO_MAP)) == 0) {
1175 		DWARN(ldcp->id, "ldc_mem_map: invalid map type\n");
1176 		mutex_exit(&ldcp->lock);
1177 		mutex_exit(&mhdl->lock);
1178 		return (EINVAL);
1179 	}
1180 
1181 	D1(ldcp->id, "ldc_mem_map: (0x%llx) cookie = 0x%llx,0x%llx\n",
1182 	    ldcp->id, cookie->addr, cookie->size);
1183 
1184 	/* FUTURE: get the page size, pgsz code, and shift */
1185 	pg_size = MMU_PAGESIZE;
1186 	pg_size_code = page_szc(pg_size);
1187 	pg_shift = page_get_shift(pg_size_code);
1188 	pg_mask = ~(pg_size - 1);
1189 
1190 	/* calculate the number of pages in the exported cookie */
1191 	base_off = cookie[0].addr & (pg_size - 1);
1192 	for (idx = 0; idx < ccount; idx++)
1193 		exp_size += cookie[idx].size;
1194 	map_size = P2ROUNDUP((exp_size + base_off), pg_size);
1195 	npages = (map_size >> pg_shift);
1196 
1197 	/* Allocate memseg structure */
1198 	memseg = mhdl->memseg =
1199 	    kmem_cache_alloc(ldcssp->memseg_cache, KM_SLEEP);
1200 
1201 	/* Allocate memory to store all pages and cookies */
1202 	memseg->pages =	kmem_zalloc((sizeof (ldc_page_t) * npages), KM_SLEEP);
1203 	memseg->cookies =
1204 	    kmem_zalloc((sizeof (ldc_mem_cookie_t) * ccount), KM_SLEEP);
1205 
1206 	D2(ldcp->id, "ldc_mem_map: (0x%llx) exp_size=0x%llx, map_size=0x%llx,"
1207 	    "pages=0x%llx\n", ldcp->id, exp_size, map_size, npages);
1208 
1209 	/*
1210 	 * Check if direct map over shared memory is enabled, if not change
1211 	 * the mapping type to SHADOW_MAP.
1212 	 */
1213 	if (ldc_shmem_enabled == 0)
1214 		mtype = LDC_SHADOW_MAP;
1215 
1216 	/*
1217 	 * Check to see if the client is requesting direct or shadow map
1218 	 * If direct map is requested, try to map remote memory first,
1219 	 * and if that fails, revert to shadow map
1220 	 */
1221 	if (mtype == LDC_DIRECT_MAP) {
1222 
1223 		/* Allocate kernel virtual space for mapping */
1224 		memseg->vaddr = vmem_xalloc(heap_arena, map_size,
1225 		    pg_size, 0, 0, NULL, NULL, VM_NOSLEEP);
1226 		if (memseg->vaddr == NULL) {
1227 			cmn_err(CE_WARN,
1228 			    "ldc_mem_map: (0x%lx) memory map failed\n",
1229 			    ldcp->id);
1230 			kmem_free(memseg->cookies,
1231 			    (sizeof (ldc_mem_cookie_t) * ccount));
1232 			kmem_free(memseg->pages,
1233 			    (sizeof (ldc_page_t) * npages));
1234 			kmem_cache_free(ldcssp->memseg_cache, memseg);
1235 
1236 			mutex_exit(&ldcp->lock);
1237 			mutex_exit(&mhdl->lock);
1238 			return (ENOMEM);
1239 		}
1240 
1241 		/* Unload previous mapping */
1242 		hat_unload(kas.a_hat, memseg->vaddr, map_size,
1243 		    HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK);
1244 
1245 		/* for each cookie passed in - map into address space */
1246 		idx = 0;
1247 		cookie_size = 0;
1248 		tmpaddr = memseg->vaddr;
1249 
1250 		for (i = 0; i < npages; i++) {
1251 
1252 			if (cookie_size == 0) {
1253 				ASSERT(idx < ccount);
1254 				cookie_addr = cookie[idx].addr & pg_mask;
1255 				cookie_off = cookie[idx].addr & (pg_size - 1);
1256 				cookie_size =
1257 				    P2ROUNDUP((cookie_off + cookie[idx].size),
1258 				    pg_size);
1259 				idx++;
1260 			}
1261 
1262 			D1(ldcp->id, "ldc_mem_map: (0x%llx) mapping "
1263 			    "cookie 0x%llx, bal=0x%llx\n", ldcp->id,
1264 			    cookie_addr, cookie_size);
1265 
1266 			/* map the cookie into address space */
1267 			for (retries = 0; retries < ldc_max_retries;
1268 			    retries++) {
1269 
1270 				rv = hv_ldc_mapin(ldcp->id, cookie_addr,
1271 				    &memseg->pages[i].raddr, &map_perm);
1272 				if (rv != H_EWOULDBLOCK && rv != H_ETOOMANY)
1273 					break;
1274 
1275 				drv_usecwait(ldc_delay);
1276 			}
1277 
1278 			if (rv || memseg->pages[i].raddr == 0) {
1279 				DWARN(ldcp->id,
1280 				    "ldc_mem_map: (0x%llx) hv mapin err %d\n",
1281 				    ldcp->id, rv);
1282 
1283 				/* remove previous mapins */
1284 				hat_unload(kas.a_hat, memseg->vaddr, map_size,
1285 				    HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK);
1286 				for (j = 0; j < i; j++) {
1287 					rv = hv_ldc_unmap(
1288 					    memseg->pages[j].raddr);
1289 					if (rv) {
1290 						DWARN(ldcp->id,
1291 						    "ldc_mem_map: (0x%llx) "
1292 						    "cannot unmap ra=0x%llx\n",
1293 						    ldcp->id,
1294 						    memseg->pages[j].raddr);
1295 					}
1296 				}
1297 
1298 				/* free kernel virtual space */
1299 				vmem_free(heap_arena, (void *)memseg->vaddr,
1300 				    map_size);
1301 
1302 				/* direct map failed - revert to shadow map */
1303 				mtype = LDC_SHADOW_MAP;
1304 				break;
1305 
1306 			} else {
1307 
1308 				D1(ldcp->id,
1309 				    "ldc_mem_map: (0x%llx) vtop map 0x%llx -> "
1310 				    "0x%llx, cookie=0x%llx, perm=0x%llx\n",
1311 				    ldcp->id, tmpaddr, memseg->pages[i].raddr,
1312 				    cookie_addr, perm);
1313 
1314 				/*
1315 				 * NOTE: Calling hat_devload directly, causes it
1316 				 * to look for page_t using the pfn. Since this
1317 				 * addr is greater than the memlist, it treates
1318 				 * it as non-memory
1319 				 */
1320 				sfmmu_memtte(&ldc_tte,
1321 				    (pfn_t)(memseg->pages[i].raddr >> pg_shift),
1322 				    PROT_READ | PROT_WRITE | HAT_NOSYNC, TTE8K);
1323 
1324 				D1(ldcp->id,
1325 				    "ldc_mem_map: (0x%llx) ra 0x%llx -> "
1326 				    "tte 0x%llx\n", ldcp->id,
1327 				    memseg->pages[i].raddr, ldc_tte);
1328 
1329 				sfmmu_tteload(kas.a_hat, &ldc_tte, tmpaddr,
1330 				    NULL, HAT_LOAD_LOCK);
1331 
1332 				cookie_size -= pg_size;
1333 				cookie_addr += pg_size;
1334 				tmpaddr += pg_size;
1335 			}
1336 		}
1337 	}
1338 
1339 	if (mtype == LDC_SHADOW_MAP) {
1340 		if (*vaddr == NULL) {
1341 			memseg->vaddr = kmem_zalloc(exp_size, KM_SLEEP);
1342 			mhdl->myshadow = B_TRUE;
1343 
1344 			D1(ldcp->id, "ldc_mem_map: (0x%llx) allocated "
1345 			    "shadow page va=0x%llx\n", ldcp->id, memseg->vaddr);
1346 		} else {
1347 			/*
1348 			 * Use client supplied memory for memseg->vaddr
1349 			 * WARNING: assuming that client mem is >= exp_size
1350 			 */
1351 			memseg->vaddr = *vaddr;
1352 		}
1353 
1354 		/* Save all page and cookie information */
1355 		for (i = 0, tmpaddr = memseg->vaddr; i < npages; i++) {
1356 			memseg->pages[i].raddr = va_to_pa(tmpaddr);
1357 			memseg->pages[i].size = pg_size;
1358 			tmpaddr += pg_size;
1359 		}
1360 
1361 	}
1362 
1363 	/* save all cookies */
1364 	bcopy(cookie, memseg->cookies, ccount * sizeof (ldc_mem_cookie_t));
1365 
1366 	/* update memseg_t */
1367 	memseg->raddr = memseg->pages[0].raddr;
1368 	memseg->size = (mtype == LDC_SHADOW_MAP) ? exp_size : map_size;
1369 	memseg->npages = npages;
1370 	memseg->ncookies = ccount;
1371 	memseg->next_cookie = 0;
1372 
1373 	/* memory handle = mapped */
1374 	mhdl->mtype = mtype;
1375 	mhdl->perm = perm;
1376 	mhdl->status = LDC_MAPPED;
1377 
1378 	D1(ldcp->id, "ldc_mem_map: (0x%llx) mapped 0x%llx, ra=0x%llx, "
1379 	    "va=0x%llx, pgs=0x%llx cookies=0x%llx\n",
1380 	    ldcp->id, mhdl, memseg->raddr, memseg->vaddr,
1381 	    memseg->npages, memseg->ncookies);
1382 
1383 	if (mtype == LDC_SHADOW_MAP)
1384 		base_off = 0;
1385 	if (raddr)
1386 		*raddr = (caddr_t)(memseg->raddr | base_off);
1387 	if (vaddr)
1388 		*vaddr = (caddr_t)((uintptr_t)memseg->vaddr | base_off);
1389 
1390 	mutex_exit(&ldcp->lock);
1391 	mutex_exit(&mhdl->lock);
1392 	return (0);
1393 }
1394 
1395 /*
1396  * Unmap a memory segment. Free shadow memory (if any).
1397  */
1398 int
1399 ldc_mem_unmap(ldc_mem_handle_t mhandle)
1400 {
1401 	int		i, rv;
1402 	ldc_mhdl_t	*mhdl = (ldc_mhdl_t *)mhandle;
1403 	ldc_chan_t 	*ldcp;
1404 	ldc_memseg_t	*memseg;
1405 
1406 	if (mhdl == 0 || mhdl->status != LDC_MAPPED) {
1407 		DWARN(DBG_ALL_LDCS,
1408 		    "ldc_mem_unmap: (0x%llx) handle is not mapped\n",
1409 		    mhandle);
1410 		return (EINVAL);
1411 	}
1412 
1413 	mutex_enter(&mhdl->lock);
1414 
1415 	ldcp = mhdl->ldcp;
1416 	memseg = mhdl->memseg;
1417 
1418 	D1(ldcp->id, "ldc_mem_unmap: (0x%llx) unmapping handle 0x%llx\n",
1419 	    ldcp->id, mhdl);
1420 
1421 	/* if we allocated shadow memory - free it */
1422 	if (mhdl->mtype == LDC_SHADOW_MAP && mhdl->myshadow) {
1423 		kmem_free(memseg->vaddr, memseg->size);
1424 	} else if (mhdl->mtype == LDC_DIRECT_MAP) {
1425 
1426 		/* unmap in the case of DIRECT_MAP */
1427 		hat_unload(kas.a_hat, memseg->vaddr, memseg->size,
1428 		    HAT_UNLOAD_UNLOCK);
1429 
1430 		for (i = 0; i < memseg->npages; i++) {
1431 			rv = hv_ldc_unmap(memseg->pages[i].raddr);
1432 			if (rv) {
1433 				cmn_err(CE_WARN,
1434 				    "ldc_mem_map: (0x%lx) hv unmap err %d\n",
1435 				    ldcp->id, rv);
1436 			}
1437 		}
1438 
1439 		vmem_free(heap_arena, (void *)memseg->vaddr, memseg->size);
1440 	}
1441 
1442 	/* free the allocated memseg and page structures */
1443 	kmem_free(memseg->pages, (sizeof (ldc_page_t) * memseg->npages));
1444 	kmem_free(memseg->cookies,
1445 	    (sizeof (ldc_mem_cookie_t) * memseg->ncookies));
1446 	kmem_cache_free(ldcssp->memseg_cache, memseg);
1447 
1448 	/* uninitialize the memory handle */
1449 	mhdl->memseg = NULL;
1450 	mhdl->status = LDC_UNBOUND;
1451 
1452 	D1(ldcp->id, "ldc_mem_unmap: (0x%llx) unmapped handle 0x%llx\n",
1453 	    ldcp->id, mhdl);
1454 
1455 	mutex_exit(&mhdl->lock);
1456 	return (0);
1457 }
1458 
1459 /*
1460  * Internal entry point for LDC mapped memory entry consistency
1461  * semantics. Acquire copies the contents of the remote memory
1462  * into the local shadow copy. The release operation copies the local
1463  * contents into the remote memory. The offset and size specify the
1464  * bounds for the memory range being synchronized.
1465  */
1466 static int
1467 i_ldc_mem_acquire_release(ldc_mem_handle_t mhandle, uint8_t direction,
1468     uint64_t offset, size_t size)
1469 {
1470 	int 		err;
1471 	ldc_mhdl_t	*mhdl;
1472 	ldc_chan_t	*ldcp;
1473 	ldc_memseg_t	*memseg;
1474 	caddr_t		local_vaddr;
1475 	size_t		copy_size;
1476 
1477 	if (mhandle == NULL) {
1478 		DWARN(DBG_ALL_LDCS,
1479 		    "i_ldc_mem_acquire_release: invalid memory handle\n");
1480 		return (EINVAL);
1481 	}
1482 	mhdl = (ldc_mhdl_t *)mhandle;
1483 
1484 	mutex_enter(&mhdl->lock);
1485 
1486 	if (mhdl->status != LDC_MAPPED || mhdl->ldcp == NULL) {
1487 		DWARN(DBG_ALL_LDCS,
1488 		    "i_ldc_mem_acquire_release: not mapped memory\n");
1489 		mutex_exit(&mhdl->lock);
1490 		return (EINVAL);
1491 	}
1492 
1493 	/* do nothing for direct map */
1494 	if (mhdl->mtype == LDC_DIRECT_MAP) {
1495 		mutex_exit(&mhdl->lock);
1496 		return (0);
1497 	}
1498 
1499 	/* do nothing if COPY_IN+MEM_W and COPY_OUT+MEM_R */
1500 	if ((direction == LDC_COPY_IN && (mhdl->perm & LDC_MEM_R) == 0) ||
1501 	    (direction == LDC_COPY_OUT && (mhdl->perm & LDC_MEM_W) == 0)) {
1502 		mutex_exit(&mhdl->lock);
1503 		return (0);
1504 	}
1505 
1506 	if (offset >= mhdl->memseg->size ||
1507 	    (offset + size) > mhdl->memseg->size) {
1508 		DWARN(DBG_ALL_LDCS,
1509 		    "i_ldc_mem_acquire_release: memory out of range\n");
1510 		mutex_exit(&mhdl->lock);
1511 		return (EINVAL);
1512 	}
1513 
1514 	/* get the channel handle and memory segment */
1515 	ldcp = mhdl->ldcp;
1516 	memseg = mhdl->memseg;
1517 
1518 	if (mhdl->mtype == LDC_SHADOW_MAP) {
1519 
1520 		local_vaddr = memseg->vaddr + offset;
1521 		copy_size = size;
1522 
1523 		/* copy to/from remote from/to local memory */
1524 		err = ldc_mem_copy((ldc_handle_t)ldcp, local_vaddr, offset,
1525 		    &copy_size, memseg->cookies, memseg->ncookies,
1526 		    direction);
1527 		if (err || copy_size != size) {
1528 			DWARN(ldcp->id,
1529 			    "i_ldc_mem_acquire_release: copy failed\n");
1530 			mutex_exit(&mhdl->lock);
1531 			return (err);
1532 		}
1533 	}
1534 
1535 	mutex_exit(&mhdl->lock);
1536 
1537 	return (0);
1538 }
1539 
1540 /*
1541  * Ensure that the contents in the remote memory seg are consistent
1542  * with the contents if of local segment
1543  */
1544 int
1545 ldc_mem_acquire(ldc_mem_handle_t mhandle, uint64_t offset, uint64_t size)
1546 {
1547 	return (i_ldc_mem_acquire_release(mhandle, LDC_COPY_IN, offset, size));
1548 }
1549 
1550 
1551 /*
1552  * Ensure that the contents in the local memory seg are consistent
1553  * with the contents if of remote segment
1554  */
1555 int
1556 ldc_mem_release(ldc_mem_handle_t mhandle, uint64_t offset, uint64_t size)
1557 {
1558 	return (i_ldc_mem_acquire_release(mhandle, LDC_COPY_OUT, offset, size));
1559 }
1560 
1561 /*
1562  * Allocate a descriptor ring. The size of each each descriptor
1563  * must be 8-byte aligned and the entire ring should be a multiple
1564  * of MMU_PAGESIZE.
1565  */
1566 int
1567 ldc_mem_dring_create(uint32_t len, uint32_t dsize, ldc_dring_handle_t *dhandle)
1568 {
1569 	ldc_dring_t *dringp;
1570 	size_t size = (dsize * len);
1571 
1572 	D1(DBG_ALL_LDCS, "ldc_mem_dring_create: len=0x%x, size=0x%x\n",
1573 	    len, dsize);
1574 
1575 	if (dhandle == NULL) {
1576 		DWARN(DBG_ALL_LDCS, "ldc_mem_dring_create: invalid dhandle\n");
1577 		return (EINVAL);
1578 	}
1579 
1580 	if (len == 0) {
1581 		DWARN(DBG_ALL_LDCS, "ldc_mem_dring_create: invalid length\n");
1582 		return (EINVAL);
1583 	}
1584 
1585 	/* descriptor size should be 8-byte aligned */
1586 	if (dsize == 0 || (dsize & 0x7)) {
1587 		DWARN(DBG_ALL_LDCS, "ldc_mem_dring_create: invalid size\n");
1588 		return (EINVAL);
1589 	}
1590 
1591 	*dhandle = 0;
1592 
1593 	/* Allocate a desc ring structure */
1594 	dringp = kmem_zalloc(sizeof (ldc_dring_t), KM_SLEEP);
1595 
1596 	/* Initialize dring */
1597 	dringp->length = len;
1598 	dringp->dsize = dsize;
1599 
1600 	/* round off to multiple of pagesize */
1601 	dringp->size = (size & MMU_PAGEMASK);
1602 	if (size & MMU_PAGEOFFSET)
1603 		dringp->size += MMU_PAGESIZE;
1604 
1605 	dringp->status = LDC_UNBOUND;
1606 
1607 	/* allocate descriptor ring memory */
1608 	dringp->base = kmem_zalloc(dringp->size, KM_SLEEP);
1609 
1610 	/* initialize the desc ring lock */
1611 	mutex_init(&dringp->lock, NULL, MUTEX_DRIVER, NULL);
1612 
1613 	/* Add descriptor ring to the head of global list */
1614 	mutex_enter(&ldcssp->lock);
1615 	dringp->next = ldcssp->dring_list;
1616 	ldcssp->dring_list = dringp;
1617 	mutex_exit(&ldcssp->lock);
1618 
1619 	*dhandle = (ldc_dring_handle_t)dringp;
1620 
1621 	D1(DBG_ALL_LDCS, "ldc_mem_dring_create: dring allocated\n");
1622 
1623 	return (0);
1624 }
1625 
1626 
1627 /*
1628  * Destroy a descriptor ring.
1629  */
1630 int
1631 ldc_mem_dring_destroy(ldc_dring_handle_t dhandle)
1632 {
1633 	ldc_dring_t *dringp;
1634 	ldc_dring_t *tmp_dringp;
1635 
1636 	D1(DBG_ALL_LDCS, "ldc_mem_dring_destroy: entered\n");
1637 
1638 	if (dhandle == NULL) {
1639 		DWARN(DBG_ALL_LDCS,
1640 		    "ldc_mem_dring_destroy: invalid desc ring handle\n");
1641 		return (EINVAL);
1642 	}
1643 	dringp = (ldc_dring_t *)dhandle;
1644 
1645 	if (dringp->status == LDC_BOUND) {
1646 		DWARN(DBG_ALL_LDCS,
1647 		    "ldc_mem_dring_destroy: desc ring is bound\n");
1648 		return (EACCES);
1649 	}
1650 
1651 	mutex_enter(&dringp->lock);
1652 	mutex_enter(&ldcssp->lock);
1653 
1654 	/* remove from linked list - if not bound */
1655 	tmp_dringp = ldcssp->dring_list;
1656 	if (tmp_dringp == dringp) {
1657 		ldcssp->dring_list = dringp->next;
1658 		dringp->next = NULL;
1659 
1660 	} else {
1661 		while (tmp_dringp != NULL) {
1662 			if (tmp_dringp->next == dringp) {
1663 				tmp_dringp->next = dringp->next;
1664 				dringp->next = NULL;
1665 				break;
1666 			}
1667 			tmp_dringp = tmp_dringp->next;
1668 		}
1669 		if (tmp_dringp == NULL) {
1670 			DWARN(DBG_ALL_LDCS,
1671 			    "ldc_mem_dring_destroy: invalid descriptor\n");
1672 			mutex_exit(&ldcssp->lock);
1673 			mutex_exit(&dringp->lock);
1674 			return (EINVAL);
1675 		}
1676 	}
1677 
1678 	mutex_exit(&ldcssp->lock);
1679 
1680 	/* free the descriptor ring */
1681 	kmem_free(dringp->base, dringp->size);
1682 
1683 	mutex_exit(&dringp->lock);
1684 
1685 	/* destroy dring lock */
1686 	mutex_destroy(&dringp->lock);
1687 
1688 	/* free desc ring object */
1689 	kmem_free(dringp, sizeof (ldc_dring_t));
1690 
1691 	return (0);
1692 }
1693 
1694 /*
1695  * Bind a previously allocated dring to a channel. The channel should
1696  * be OPEN in order to bind the ring to the channel. Returns back a
1697  * descriptor ring cookie. The descriptor ring is exported for remote
1698  * access by the client at the other end of the channel. An entry for
1699  * dring pages is stored in map table (via call to ldc_mem_bind_handle).
1700  */
1701 int
1702 ldc_mem_dring_bind(ldc_handle_t handle, ldc_dring_handle_t dhandle,
1703     uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie, uint32_t *ccount)
1704 {
1705 	int		err;
1706 	ldc_chan_t 	*ldcp;
1707 	ldc_dring_t	*dringp;
1708 	ldc_mem_handle_t mhandle;
1709 
1710 	/* check to see if channel is initalized */
1711 	if (handle == NULL) {
1712 		DWARN(DBG_ALL_LDCS,
1713 		    "ldc_mem_dring_bind: invalid channel handle\n");
1714 		return (EINVAL);
1715 	}
1716 	ldcp = (ldc_chan_t *)handle;
1717 
1718 	if (dhandle == NULL) {
1719 		DWARN(DBG_ALL_LDCS,
1720 		    "ldc_mem_dring_bind: invalid desc ring handle\n");
1721 		return (EINVAL);
1722 	}
1723 	dringp = (ldc_dring_t *)dhandle;
1724 
1725 	if (cookie == NULL) {
1726 		DWARN(ldcp->id,
1727 		    "ldc_mem_dring_bind: invalid cookie arg\n");
1728 		return (EINVAL);
1729 	}
1730 
1731 	mutex_enter(&dringp->lock);
1732 
1733 	if (dringp->status == LDC_BOUND) {
1734 		DWARN(DBG_ALL_LDCS,
1735 		    "ldc_mem_dring_bind: (0x%llx) descriptor ring is bound\n",
1736 		    ldcp->id);
1737 		mutex_exit(&dringp->lock);
1738 		return (EINVAL);
1739 	}
1740 
1741 	if ((perm & LDC_MEM_RW) == 0) {
1742 		DWARN(DBG_ALL_LDCS,
1743 		    "ldc_mem_dring_bind: invalid permissions\n");
1744 		mutex_exit(&dringp->lock);
1745 		return (EINVAL);
1746 	}
1747 
1748 	if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP|LDC_IO_MAP)) == 0) {
1749 		DWARN(DBG_ALL_LDCS, "ldc_mem_dring_bind: invalid type\n");
1750 		mutex_exit(&dringp->lock);
1751 		return (EINVAL);
1752 	}
1753 
1754 	dringp->ldcp = ldcp;
1755 
1756 	/* create an memory handle */
1757 	err = ldc_mem_alloc_handle(handle, &mhandle);
1758 	if (err || mhandle == NULL) {
1759 		DWARN(DBG_ALL_LDCS,
1760 		    "ldc_mem_dring_bind: (0x%llx) error allocating mhandle\n",
1761 		    ldcp->id);
1762 		mutex_exit(&dringp->lock);
1763 		return (err);
1764 	}
1765 	dringp->mhdl = mhandle;
1766 
1767 	/* bind the descriptor ring to channel */
1768 	err = ldc_mem_bind_handle(mhandle, dringp->base, dringp->size,
1769 	    mtype, perm, cookie, ccount);
1770 	if (err) {
1771 		DWARN(ldcp->id,
1772 		    "ldc_mem_dring_bind: (0x%llx) error binding mhandle\n",
1773 		    ldcp->id);
1774 		mutex_exit(&dringp->lock);
1775 		return (err);
1776 	}
1777 
1778 	/*
1779 	 * For now return error if we get more than one cookie
1780 	 * FUTURE: Return multiple cookies ..
1781 	 */
1782 	if (*ccount > 1) {
1783 		(void) ldc_mem_unbind_handle(mhandle);
1784 		(void) ldc_mem_free_handle(mhandle);
1785 
1786 		dringp->ldcp = NULL;
1787 		dringp->mhdl = NULL;
1788 		*ccount = 0;
1789 
1790 		mutex_exit(&dringp->lock);
1791 		return (EAGAIN);
1792 	}
1793 
1794 	/* Add descriptor ring to channel's exported dring list */
1795 	mutex_enter(&ldcp->exp_dlist_lock);
1796 	dringp->ch_next = ldcp->exp_dring_list;
1797 	ldcp->exp_dring_list = dringp;
1798 	mutex_exit(&ldcp->exp_dlist_lock);
1799 
1800 	dringp->status = LDC_BOUND;
1801 
1802 	mutex_exit(&dringp->lock);
1803 
1804 	return (0);
1805 }
1806 
1807 /*
1808  * Return the next cookie associated with the specified dring handle
1809  */
1810 int
1811 ldc_mem_dring_nextcookie(ldc_dring_handle_t dhandle, ldc_mem_cookie_t *cookie)
1812 {
1813 	int		rv = 0;
1814 	ldc_dring_t 	*dringp;
1815 	ldc_chan_t	*ldcp;
1816 
1817 	if (dhandle == NULL) {
1818 		DWARN(DBG_ALL_LDCS,
1819 		    "ldc_mem_dring_nextcookie: invalid desc ring handle\n");
1820 		return (EINVAL);
1821 	}
1822 	dringp = (ldc_dring_t *)dhandle;
1823 	mutex_enter(&dringp->lock);
1824 
1825 	if (dringp->status != LDC_BOUND) {
1826 		DWARN(DBG_ALL_LDCS,
1827 		    "ldc_mem_dring_nextcookie: descriptor ring 0x%llx "
1828 		    "is not bound\n", dringp);
1829 		mutex_exit(&dringp->lock);
1830 		return (EINVAL);
1831 	}
1832 
1833 	ldcp = dringp->ldcp;
1834 
1835 	if (cookie == NULL) {
1836 		DWARN(ldcp->id,
1837 		    "ldc_mem_dring_nextcookie:(0x%llx) invalid cookie arg\n",
1838 		    ldcp->id);
1839 		mutex_exit(&dringp->lock);
1840 		return (EINVAL);
1841 	}
1842 
1843 	rv = ldc_mem_nextcookie((ldc_mem_handle_t)dringp->mhdl, cookie);
1844 	mutex_exit(&dringp->lock);
1845 
1846 	return (rv);
1847 }
1848 /*
1849  * Unbind a previously bound dring from a channel.
1850  */
1851 int
1852 ldc_mem_dring_unbind(ldc_dring_handle_t dhandle)
1853 {
1854 	ldc_dring_t 	*dringp;
1855 	ldc_dring_t	*tmp_dringp;
1856 	ldc_chan_t	*ldcp;
1857 
1858 	if (dhandle == NULL) {
1859 		DWARN(DBG_ALL_LDCS,
1860 		    "ldc_mem_dring_unbind: invalid desc ring handle\n");
1861 		return (EINVAL);
1862 	}
1863 	dringp = (ldc_dring_t *)dhandle;
1864 
1865 	mutex_enter(&dringp->lock);
1866 
1867 	if (dringp->status == LDC_UNBOUND) {
1868 		DWARN(DBG_ALL_LDCS,
1869 		    "ldc_mem_dring_bind: descriptor ring 0x%llx is unbound\n",
1870 		    dringp);
1871 		mutex_exit(&dringp->lock);
1872 		return (EINVAL);
1873 	}
1874 	ldcp = dringp->ldcp;
1875 
1876 	mutex_enter(&ldcp->exp_dlist_lock);
1877 
1878 	tmp_dringp = ldcp->exp_dring_list;
1879 	if (tmp_dringp == dringp) {
1880 		ldcp->exp_dring_list = dringp->ch_next;
1881 		dringp->ch_next = NULL;
1882 
1883 	} else {
1884 		while (tmp_dringp != NULL) {
1885 			if (tmp_dringp->ch_next == dringp) {
1886 				tmp_dringp->ch_next = dringp->ch_next;
1887 				dringp->ch_next = NULL;
1888 				break;
1889 			}
1890 			tmp_dringp = tmp_dringp->ch_next;
1891 		}
1892 		if (tmp_dringp == NULL) {
1893 			DWARN(DBG_ALL_LDCS,
1894 			    "ldc_mem_dring_unbind: invalid descriptor\n");
1895 			mutex_exit(&ldcp->exp_dlist_lock);
1896 			mutex_exit(&dringp->lock);
1897 			return (EINVAL);
1898 		}
1899 	}
1900 
1901 	mutex_exit(&ldcp->exp_dlist_lock);
1902 
1903 	(void) ldc_mem_unbind_handle((ldc_mem_handle_t)dringp->mhdl);
1904 	(void) ldc_mem_free_handle((ldc_mem_handle_t)dringp->mhdl);
1905 
1906 	dringp->ldcp = NULL;
1907 	dringp->mhdl = NULL;
1908 	dringp->status = LDC_UNBOUND;
1909 
1910 	mutex_exit(&dringp->lock);
1911 
1912 	return (0);
1913 }
1914 
1915 /*
1916  * Get information about the dring. The base address of the descriptor
1917  * ring along with the type and permission are returned back.
1918  */
1919 int
1920 ldc_mem_dring_info(ldc_dring_handle_t dhandle, ldc_mem_info_t *minfo)
1921 {
1922 	ldc_dring_t	*dringp;
1923 	int		rv;
1924 
1925 	if (dhandle == NULL) {
1926 		DWARN(DBG_ALL_LDCS,
1927 		    "ldc_mem_dring_info: invalid desc ring handle\n");
1928 		return (EINVAL);
1929 	}
1930 	dringp = (ldc_dring_t *)dhandle;
1931 
1932 	mutex_enter(&dringp->lock);
1933 
1934 	if (dringp->mhdl) {
1935 		rv = ldc_mem_info(dringp->mhdl, minfo);
1936 		if (rv) {
1937 			DWARN(DBG_ALL_LDCS,
1938 			    "ldc_mem_dring_info: error reading mem info\n");
1939 			mutex_exit(&dringp->lock);
1940 			return (rv);
1941 		}
1942 	} else {
1943 		minfo->vaddr = dringp->base;
1944 		minfo->raddr = NULL;
1945 		minfo->status = dringp->status;
1946 	}
1947 
1948 	mutex_exit(&dringp->lock);
1949 
1950 	return (0);
1951 }
1952 
1953 /*
1954  * Map an exported descriptor ring into the local address space. If the
1955  * descriptor ring was exported for direct map access, a HV call is made
1956  * to allocate a RA range. If the map is done via a shadow copy, local
1957  * shadow memory is allocated.
1958  */
1959 int
1960 ldc_mem_dring_map(ldc_handle_t handle, ldc_mem_cookie_t *cookie,
1961     uint32_t ccount, uint32_t len, uint32_t dsize, uint8_t mtype,
1962     ldc_dring_handle_t *dhandle)
1963 {
1964 	int		err;
1965 	ldc_chan_t 	*ldcp = (ldc_chan_t *)handle;
1966 	ldc_mem_handle_t mhandle;
1967 	ldc_dring_t	*dringp;
1968 	size_t		dring_size;
1969 
1970 	if (dhandle == NULL) {
1971 		DWARN(DBG_ALL_LDCS,
1972 		    "ldc_mem_dring_map: invalid dhandle\n");
1973 		return (EINVAL);
1974 	}
1975 
1976 	/* check to see if channel is initalized */
1977 	if (handle == NULL) {
1978 		DWARN(DBG_ALL_LDCS,
1979 		    "ldc_mem_dring_map: invalid channel handle\n");
1980 		return (EINVAL);
1981 	}
1982 	ldcp = (ldc_chan_t *)handle;
1983 
1984 	if (cookie == NULL) {
1985 		DWARN(ldcp->id,
1986 		    "ldc_mem_dring_map: (0x%llx) invalid cookie\n",
1987 		    ldcp->id);
1988 		return (EINVAL);
1989 	}
1990 
1991 	/* FUTURE: For now we support only one cookie per dring */
1992 	ASSERT(ccount == 1);
1993 
1994 	if (cookie->size < (dsize * len)) {
1995 		DWARN(ldcp->id,
1996 		    "ldc_mem_dring_map: (0x%llx) invalid dsize/len\n",
1997 		    ldcp->id);
1998 		return (EINVAL);
1999 	}
2000 
2001 	*dhandle = 0;
2002 
2003 	/* Allocate an dring structure */
2004 	dringp = kmem_zalloc(sizeof (ldc_dring_t), KM_SLEEP);
2005 
2006 	D1(ldcp->id,
2007 	    "ldc_mem_dring_map: 0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
2008 	    mtype, len, dsize, cookie->addr, cookie->size);
2009 
2010 	/* Initialize dring */
2011 	dringp->length = len;
2012 	dringp->dsize = dsize;
2013 
2014 	/* round of to multiple of page size */
2015 	dring_size = len * dsize;
2016 	dringp->size = (dring_size & MMU_PAGEMASK);
2017 	if (dring_size & MMU_PAGEOFFSET)
2018 		dringp->size += MMU_PAGESIZE;
2019 
2020 	dringp->ldcp = ldcp;
2021 
2022 	/* create an memory handle */
2023 	err = ldc_mem_alloc_handle(handle, &mhandle);
2024 	if (err || mhandle == NULL) {
2025 		DWARN(DBG_ALL_LDCS,
2026 		    "ldc_mem_dring_map: cannot alloc hdl err=%d\n",
2027 		    err);
2028 		kmem_free(dringp, sizeof (ldc_dring_t));
2029 		return (ENOMEM);
2030 	}
2031 
2032 	dringp->mhdl = mhandle;
2033 	dringp->base = NULL;
2034 
2035 	/* map the dring into local memory */
2036 	err = ldc_mem_map(mhandle, cookie, ccount, mtype, LDC_MEM_RW,
2037 	    &(dringp->base), NULL);
2038 	if (err || dringp->base == NULL) {
2039 		cmn_err(CE_WARN,
2040 		    "ldc_mem_dring_map: cannot map desc ring err=%d\n", err);
2041 		(void) ldc_mem_free_handle(mhandle);
2042 		kmem_free(dringp, sizeof (ldc_dring_t));
2043 		return (ENOMEM);
2044 	}
2045 
2046 	/* initialize the desc ring lock */
2047 	mutex_init(&dringp->lock, NULL, MUTEX_DRIVER, NULL);
2048 
2049 	/* Add descriptor ring to channel's imported dring list */
2050 	mutex_enter(&ldcp->imp_dlist_lock);
2051 	dringp->ch_next = ldcp->imp_dring_list;
2052 	ldcp->imp_dring_list = dringp;
2053 	mutex_exit(&ldcp->imp_dlist_lock);
2054 
2055 	dringp->status = LDC_MAPPED;
2056 
2057 	*dhandle = (ldc_dring_handle_t)dringp;
2058 
2059 	return (0);
2060 }
2061 
2062 /*
2063  * Unmap a descriptor ring. Free shadow memory (if any).
2064  */
2065 int
2066 ldc_mem_dring_unmap(ldc_dring_handle_t dhandle)
2067 {
2068 	ldc_dring_t 	*dringp;
2069 	ldc_dring_t	*tmp_dringp;
2070 	ldc_chan_t	*ldcp;
2071 
2072 	if (dhandle == NULL) {
2073 		DWARN(DBG_ALL_LDCS,
2074 		    "ldc_mem_dring_unmap: invalid desc ring handle\n");
2075 		return (EINVAL);
2076 	}
2077 	dringp = (ldc_dring_t *)dhandle;
2078 
2079 	if (dringp->status != LDC_MAPPED) {
2080 		DWARN(DBG_ALL_LDCS,
2081 		    "ldc_mem_dring_unmap: not a mapped desc ring\n");
2082 		return (EINVAL);
2083 	}
2084 
2085 	mutex_enter(&dringp->lock);
2086 
2087 	ldcp = dringp->ldcp;
2088 
2089 	mutex_enter(&ldcp->imp_dlist_lock);
2090 
2091 	/* find and unlink the desc ring from channel import list */
2092 	tmp_dringp = ldcp->imp_dring_list;
2093 	if (tmp_dringp == dringp) {
2094 		ldcp->imp_dring_list = dringp->ch_next;
2095 		dringp->ch_next = NULL;
2096 
2097 	} else {
2098 		while (tmp_dringp != NULL) {
2099 			if (tmp_dringp->ch_next == dringp) {
2100 				tmp_dringp->ch_next = dringp->ch_next;
2101 				dringp->ch_next = NULL;
2102 				break;
2103 			}
2104 			tmp_dringp = tmp_dringp->ch_next;
2105 		}
2106 		if (tmp_dringp == NULL) {
2107 			DWARN(DBG_ALL_LDCS,
2108 			    "ldc_mem_dring_unmap: invalid descriptor\n");
2109 			mutex_exit(&ldcp->imp_dlist_lock);
2110 			mutex_exit(&dringp->lock);
2111 			return (EINVAL);
2112 		}
2113 	}
2114 
2115 	mutex_exit(&ldcp->imp_dlist_lock);
2116 
2117 	/* do a LDC memory handle unmap and free */
2118 	(void) ldc_mem_unmap(dringp->mhdl);
2119 	(void) ldc_mem_free_handle((ldc_mem_handle_t)dringp->mhdl);
2120 
2121 	dringp->status = 0;
2122 	dringp->ldcp = NULL;
2123 
2124 	mutex_exit(&dringp->lock);
2125 
2126 	/* destroy dring lock */
2127 	mutex_destroy(&dringp->lock);
2128 
2129 	/* free desc ring object */
2130 	kmem_free(dringp, sizeof (ldc_dring_t));
2131 
2132 	return (0);
2133 }
2134 
2135 /*
2136  * Internal entry point for descriptor ring access entry consistency
2137  * semantics. Acquire copies the contents of the remote descriptor ring
2138  * into the local shadow copy. The release operation copies the local
2139  * contents into the remote dring. The start and end locations specify
2140  * bounds for the entries being synchronized.
2141  */
2142 static int
2143 i_ldc_dring_acquire_release(ldc_dring_handle_t dhandle,
2144     uint8_t direction, uint64_t start, uint64_t end)
2145 {
2146 	int 			err;
2147 	ldc_dring_t		*dringp;
2148 	ldc_chan_t		*ldcp;
2149 	uint64_t		soff;
2150 	size_t			copy_size;
2151 
2152 	if (dhandle == NULL) {
2153 		DWARN(DBG_ALL_LDCS,
2154 		    "i_ldc_dring_acquire_release: invalid desc ring handle\n");
2155 		return (EINVAL);
2156 	}
2157 	dringp = (ldc_dring_t *)dhandle;
2158 	mutex_enter(&dringp->lock);
2159 
2160 	if (dringp->status != LDC_MAPPED || dringp->ldcp == NULL) {
2161 		DWARN(DBG_ALL_LDCS,
2162 		    "i_ldc_dring_acquire_release: not a mapped desc ring\n");
2163 		mutex_exit(&dringp->lock);
2164 		return (EINVAL);
2165 	}
2166 
2167 	if (start >= dringp->length || end >= dringp->length) {
2168 		DWARN(DBG_ALL_LDCS,
2169 		    "i_ldc_dring_acquire_release: index out of range\n");
2170 		mutex_exit(&dringp->lock);
2171 		return (EINVAL);
2172 	}
2173 
2174 	/* get the channel handle */
2175 	ldcp = dringp->ldcp;
2176 
2177 	copy_size = (start <= end) ? (((end - start) + 1) * dringp->dsize) :
2178 	    ((dringp->length - start) * dringp->dsize);
2179 
2180 	/* Calculate the relative offset for the first desc */
2181 	soff = (start * dringp->dsize);
2182 
2183 	/* copy to/from remote from/to local memory */
2184 	D1(ldcp->id, "i_ldc_dring_acquire_release: c1 off=0x%llx sz=0x%llx\n",
2185 	    soff, copy_size);
2186 	err = i_ldc_mem_acquire_release((ldc_mem_handle_t)dringp->mhdl,
2187 	    direction, soff, copy_size);
2188 	if (err) {
2189 		DWARN(ldcp->id,
2190 		    "i_ldc_dring_acquire_release: copy failed\n");
2191 		mutex_exit(&dringp->lock);
2192 		return (err);
2193 	}
2194 
2195 	/* do the balance */
2196 	if (start > end) {
2197 		copy_size = ((end + 1) * dringp->dsize);
2198 		soff = 0;
2199 
2200 		/* copy to/from remote from/to local memory */
2201 		D1(ldcp->id, "i_ldc_dring_acquire_release: c2 "
2202 		    "off=0x%llx sz=0x%llx\n", soff, copy_size);
2203 		err = i_ldc_mem_acquire_release((ldc_mem_handle_t)dringp->mhdl,
2204 		    direction, soff, copy_size);
2205 		if (err) {
2206 			DWARN(ldcp->id,
2207 			    "i_ldc_dring_acquire_release: copy failed\n");
2208 			mutex_exit(&dringp->lock);
2209 			return (err);
2210 		}
2211 	}
2212 
2213 	mutex_exit(&dringp->lock);
2214 
2215 	return (0);
2216 }
2217 
2218 /*
2219  * Ensure that the contents in the local dring are consistent
2220  * with the contents if of remote dring
2221  */
2222 int
2223 ldc_mem_dring_acquire(ldc_dring_handle_t dhandle, uint64_t start, uint64_t end)
2224 {
2225 	return (i_ldc_dring_acquire_release(dhandle, LDC_COPY_IN, start, end));
2226 }
2227 
2228 /*
2229  * Ensure that the contents in the remote dring are consistent
2230  * with the contents if of local dring
2231  */
2232 int
2233 ldc_mem_dring_release(ldc_dring_handle_t dhandle, uint64_t start, uint64_t end)
2234 {
2235 	return (i_ldc_dring_acquire_release(dhandle, LDC_COPY_OUT, start, end));
2236 }
2237