120ae46ebSha137994 /*
220ae46ebSha137994 * CDDL HEADER START
320ae46ebSha137994 *
420ae46ebSha137994 * The contents of this file are subject to the terms of the
520ae46ebSha137994 * Common Development and Distribution License (the "License").
620ae46ebSha137994 * You may not use this file except in compliance with the License.
720ae46ebSha137994 *
820ae46ebSha137994 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
920ae46ebSha137994 * or http://www.opensolaris.org/os/licensing.
1020ae46ebSha137994 * See the License for the specific language governing permissions
1120ae46ebSha137994 * and limitations under the License.
1220ae46ebSha137994 *
1320ae46ebSha137994 * When distributing Covered Code, include this CDDL HEADER in each
1420ae46ebSha137994 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1520ae46ebSha137994 * If applicable, add the following below this CDDL HEADER, with the
1620ae46ebSha137994 * fields enclosed by brackets "[]" replaced with your own identifying
1720ae46ebSha137994 * information: Portions Copyright [yyyy] [name of copyright owner]
1820ae46ebSha137994 *
1920ae46ebSha137994 * CDDL HEADER END
2020ae46ebSha137994 */
2120ae46ebSha137994
2220ae46ebSha137994 /*
23*34f94fbcSWENTAO YANG * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
2420ae46ebSha137994 */
2520ae46ebSha137994
2620ae46ebSha137994 /*
2720ae46ebSha137994 * sun4v LDC Link Layer Shared Memory Routines
2820ae46ebSha137994 */
2920ae46ebSha137994 #include <sys/types.h>
3020ae46ebSha137994 #include <sys/kmem.h>
3120ae46ebSha137994 #include <sys/cmn_err.h>
3220ae46ebSha137994 #include <sys/ksynch.h>
3320ae46ebSha137994 #include <sys/debug.h>
3420ae46ebSha137994 #include <sys/cyclic.h>
3520ae46ebSha137994 #include <sys/machsystm.h>
3620ae46ebSha137994 #include <sys/vm.h>
3720ae46ebSha137994 #include <sys/machcpuvar.h>
3820ae46ebSha137994 #include <sys/mmu.h>
3920ae46ebSha137994 #include <sys/pte.h>
4020ae46ebSha137994 #include <vm/hat.h>
4120ae46ebSha137994 #include <vm/as.h>
4220ae46ebSha137994 #include <vm/hat_sfmmu.h>
4320ae46ebSha137994 #include <sys/vm_machparam.h>
4420ae46ebSha137994 #include <vm/seg_kmem.h>
4520ae46ebSha137994 #include <vm/seg_kpm.h>
4620ae46ebSha137994 #include <sys/hypervisor_api.h>
4720ae46ebSha137994 #include <sys/ldc.h>
4820ae46ebSha137994 #include <sys/ldc_impl.h>
4920ae46ebSha137994
5020ae46ebSha137994 /* LDC variables used by shared memory routines */
5120ae46ebSha137994 extern ldc_soft_state_t *ldcssp;
5220ae46ebSha137994 extern int ldc_max_retries;
5320ae46ebSha137994 extern clock_t ldc_delay;
5420ae46ebSha137994
5520ae46ebSha137994 #ifdef DEBUG
5620ae46ebSha137994 extern int ldcdbg;
5720ae46ebSha137994 #endif
5820ae46ebSha137994
5920ae46ebSha137994 /* LDC internal functions used by shared memory routines */
6020ae46ebSha137994 extern void i_ldc_reset(ldc_chan_t *ldcp, boolean_t force_reset);
6120ae46ebSha137994 extern int i_ldc_h2v_error(int h_error);
6220ae46ebSha137994
6320ae46ebSha137994 #ifdef DEBUG
6420ae46ebSha137994 extern void ldcdebug(int64_t id, const char *fmt, ...);
6520ae46ebSha137994 #endif
6620ae46ebSha137994
6720ae46ebSha137994 /* Memory synchronization internal functions */
6820ae46ebSha137994 static int i_ldc_mem_acquire_release(ldc_mem_handle_t mhandle,
6920ae46ebSha137994 uint8_t direction, uint64_t offset, size_t size);
7020ae46ebSha137994 static int i_ldc_dring_acquire_release(ldc_dring_handle_t dhandle,
7120ae46ebSha137994 uint8_t direction, uint64_t start, uint64_t end);
72bbfa0259Sha137994 static int i_ldc_mem_map(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie,
73bbfa0259Sha137994 uint32_t ccount, uint8_t mtype, uint8_t perm, caddr_t *vaddr,
74bbfa0259Sha137994 caddr_t *raddr);
75bbfa0259Sha137994 static int i_ldc_mem_bind_handle(ldc_mem_handle_t mhandle, caddr_t vaddr,
76bbfa0259Sha137994 size_t len, uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie,
77bbfa0259Sha137994 uint32_t *ccount);
7820ae46ebSha137994
7920ae46ebSha137994 /*
8020ae46ebSha137994 * LDC framework supports mapping remote domain's memory
8120ae46ebSha137994 * either directly or via shadow memory pages. Default
8220ae46ebSha137994 * support is currently implemented via shadow copy.
8320ae46ebSha137994 * Direct map can be enabled by setting 'ldc_shmem_enabled'
8420ae46ebSha137994 */
85*34f94fbcSWENTAO YANG int ldc_shmem_enabled = 1;
8620ae46ebSha137994
8720ae46ebSha137994 /*
88bbfa0259Sha137994 * Use of directly mapped shared memory for LDC descriptor
89bbfa0259Sha137994 * rings is permitted if this variable is non-zero.
90bbfa0259Sha137994 */
91bbfa0259Sha137994 int ldc_dring_shmem_enabled = 1;
92bbfa0259Sha137994
93bbfa0259Sha137994 /*
94bbfa0259Sha137994 * The major and minor versions required to use directly
95bbfa0259Sha137994 * mapped shared memory for LDC descriptor rings. The
96bbfa0259Sha137994 * ldc_dring_shmem_hv_force variable, if set to a non-zero
97bbfa0259Sha137994 * value, overrides the hypervisor API version check.
98bbfa0259Sha137994 */
99bbfa0259Sha137994 static int ldc_dring_shmem_hv_major = 1;
100bbfa0259Sha137994 static int ldc_dring_shmem_hv_minor = 1;
101bbfa0259Sha137994 static int ldc_dring_shmem_hv_force = 0;
102bbfa0259Sha137994
103bbfa0259Sha137994 /*
104bbfa0259Sha137994 * The results of the hypervisor service group API check.
105bbfa0259Sha137994 * A non-zero value indicates the HV includes support for
106bbfa0259Sha137994 * descriptor ring shared memory.
107bbfa0259Sha137994 */
108bbfa0259Sha137994 static int ldc_dring_shmem_hv_ok = 0;
109bbfa0259Sha137994
110bbfa0259Sha137994 /*
11120ae46ebSha137994 * Pages exported for remote access over each channel is
11220ae46ebSha137994 * maintained in a table registered with the Hypervisor.
11320ae46ebSha137994 * The default number of entries in the table is set to
11420ae46ebSha137994 * 'ldc_mtbl_entries'.
11520ae46ebSha137994 */
11620ae46ebSha137994 uint64_t ldc_maptable_entries = LDC_MTBL_ENTRIES;
11720ae46ebSha137994
11820ae46ebSha137994 #define IDX2COOKIE(idx, pg_szc, pg_shift) \
11920ae46ebSha137994 (((pg_szc) << LDC_COOKIE_PGSZC_SHIFT) | ((idx) << (pg_shift)))
12020ae46ebSha137994
12120ae46ebSha137994 /*
122*34f94fbcSWENTAO YANG * Pages imported over each channel are maintained in a global (per-guest)
123*34f94fbcSWENTAO YANG * mapin table. Starting with HV LDC API version 1.2, HV supports APIs to
124*34f94fbcSWENTAO YANG * obtain information about the total size of the memory that can be direct
125*34f94fbcSWENTAO YANG * mapped through this mapin table. The minimum size of the mapin area that we
126*34f94fbcSWENTAO YANG * expect is defined below.
127*34f94fbcSWENTAO YANG */
128*34f94fbcSWENTAO YANG #define GIGABYTE ((uint64_t)(1 << 30))
129*34f94fbcSWENTAO YANG uint64_t ldc_mapin_size_min = GIGABYTE;
130*34f94fbcSWENTAO YANG
131*34f94fbcSWENTAO YANG /* HV LDC API version that supports mapin size info */
132*34f94fbcSWENTAO YANG #define LDC_MAPIN_VER_MAJOR 1
133*34f94fbcSWENTAO YANG #define LDC_MAPIN_VER_MINOR 2
134*34f94fbcSWENTAO YANG
135*34f94fbcSWENTAO YANG /*
136bbfa0259Sha137994 * Sets ldc_dring_shmem_hv_ok to a non-zero value if the HV LDC
137bbfa0259Sha137994 * API version supports directly mapped shared memory or if it has
138bbfa0259Sha137994 * been explicitly enabled via ldc_dring_shmem_hv_force.
139bbfa0259Sha137994 */
140bbfa0259Sha137994 void
i_ldc_mem_set_hsvc_vers(uint64_t major,uint64_t minor)141bbfa0259Sha137994 i_ldc_mem_set_hsvc_vers(uint64_t major, uint64_t minor)
142bbfa0259Sha137994 {
143bbfa0259Sha137994 if ((major == ldc_dring_shmem_hv_major &&
144bbfa0259Sha137994 minor >= ldc_dring_shmem_hv_minor) ||
145bbfa0259Sha137994 (major > ldc_dring_shmem_hv_major) ||
146bbfa0259Sha137994 (ldc_dring_shmem_hv_force != 0)) {
147bbfa0259Sha137994 ldc_dring_shmem_hv_ok = 1;
148bbfa0259Sha137994 }
149bbfa0259Sha137994 }
150bbfa0259Sha137994
151bbfa0259Sha137994 /*
152*34f94fbcSWENTAO YANG * initialize mapin table.
153*34f94fbcSWENTAO YANG */
154*34f94fbcSWENTAO YANG void
i_ldc_init_mapin(ldc_soft_state_t * ldcssp,uint64_t major,uint64_t minor)155*34f94fbcSWENTAO YANG i_ldc_init_mapin(ldc_soft_state_t *ldcssp, uint64_t major, uint64_t minor)
156*34f94fbcSWENTAO YANG {
157*34f94fbcSWENTAO YANG int rv;
158*34f94fbcSWENTAO YANG uint64_t sz;
159*34f94fbcSWENTAO YANG uint64_t table_type = LDC_MAPIN_TYPE_REGULAR;
160*34f94fbcSWENTAO YANG
161*34f94fbcSWENTAO YANG /* set mapin size to default. */
162*34f94fbcSWENTAO YANG ldcssp->mapin_size = LDC_DIRECT_MAP_SIZE_DEFAULT;
163*34f94fbcSWENTAO YANG
164*34f94fbcSWENTAO YANG /* Check if the HV supports mapin size API. */
165*34f94fbcSWENTAO YANG if ((major == LDC_MAPIN_VER_MAJOR &&
166*34f94fbcSWENTAO YANG minor < LDC_MAPIN_VER_MINOR) ||
167*34f94fbcSWENTAO YANG (major < LDC_MAPIN_VER_MAJOR)) {
168*34f94fbcSWENTAO YANG /* Older version of HV. */
169*34f94fbcSWENTAO YANG return;
170*34f94fbcSWENTAO YANG }
171*34f94fbcSWENTAO YANG
172*34f94fbcSWENTAO YANG /* Get info about the mapin size supported by HV */
173*34f94fbcSWENTAO YANG rv = hv_ldc_mapin_size_max(table_type, &sz);
174*34f94fbcSWENTAO YANG if (rv != 0) {
175*34f94fbcSWENTAO YANG cmn_err(CE_NOTE, "Failed to get mapin information\n");
176*34f94fbcSWENTAO YANG return;
177*34f94fbcSWENTAO YANG }
178*34f94fbcSWENTAO YANG
179*34f94fbcSWENTAO YANG /* Save the table size */
180*34f94fbcSWENTAO YANG ldcssp->mapin_size = sz;
181*34f94fbcSWENTAO YANG
182*34f94fbcSWENTAO YANG D1(DBG_ALL_LDCS, "%s: mapin_size read from HV is (0x%llx)\n",
183*34f94fbcSWENTAO YANG __func__, sz);
184*34f94fbcSWENTAO YANG }
185*34f94fbcSWENTAO YANG
186*34f94fbcSWENTAO YANG /*
18720ae46ebSha137994 * Allocate a memory handle for the channel and link it into the list
18820ae46ebSha137994 * Also choose which memory table to use if this is the first handle
18920ae46ebSha137994 * being assigned to this channel
19020ae46ebSha137994 */
19120ae46ebSha137994 int
ldc_mem_alloc_handle(ldc_handle_t handle,ldc_mem_handle_t * mhandle)19220ae46ebSha137994 ldc_mem_alloc_handle(ldc_handle_t handle, ldc_mem_handle_t *mhandle)
19320ae46ebSha137994 {
19420ae46ebSha137994 ldc_chan_t *ldcp;
19520ae46ebSha137994 ldc_mhdl_t *mhdl;
19620ae46ebSha137994
19720ae46ebSha137994 if (handle == NULL) {
19820ae46ebSha137994 DWARN(DBG_ALL_LDCS,
19920ae46ebSha137994 "ldc_mem_alloc_handle: invalid channel handle\n");
20020ae46ebSha137994 return (EINVAL);
20120ae46ebSha137994 }
20220ae46ebSha137994 ldcp = (ldc_chan_t *)handle;
20320ae46ebSha137994
20420ae46ebSha137994 mutex_enter(&ldcp->lock);
20520ae46ebSha137994
20620ae46ebSha137994 /* check to see if channel is initalized */
20720ae46ebSha137994 if ((ldcp->tstate & ~TS_IN_RESET) < TS_INIT) {
20820ae46ebSha137994 DWARN(ldcp->id,
20920ae46ebSha137994 "ldc_mem_alloc_handle: (0x%llx) channel not initialized\n",
21020ae46ebSha137994 ldcp->id);
21120ae46ebSha137994 mutex_exit(&ldcp->lock);
21220ae46ebSha137994 return (EINVAL);
21320ae46ebSha137994 }
21420ae46ebSha137994
21520ae46ebSha137994 /* allocate handle for channel */
21620ae46ebSha137994 mhdl = kmem_cache_alloc(ldcssp->memhdl_cache, KM_SLEEP);
21720ae46ebSha137994
21820ae46ebSha137994 /* initialize the lock */
21920ae46ebSha137994 mutex_init(&mhdl->lock, NULL, MUTEX_DRIVER, NULL);
22020ae46ebSha137994
22120ae46ebSha137994 mhdl->myshadow = B_FALSE;
22220ae46ebSha137994 mhdl->memseg = NULL;
22320ae46ebSha137994 mhdl->ldcp = ldcp;
22420ae46ebSha137994 mhdl->status = LDC_UNBOUND;
22520ae46ebSha137994
22620ae46ebSha137994 /* insert memory handle (@ head) into list */
22720ae46ebSha137994 if (ldcp->mhdl_list == NULL) {
22820ae46ebSha137994 ldcp->mhdl_list = mhdl;
22920ae46ebSha137994 mhdl->next = NULL;
23020ae46ebSha137994 } else {
23120ae46ebSha137994 /* insert @ head */
23220ae46ebSha137994 mhdl->next = ldcp->mhdl_list;
23320ae46ebSha137994 ldcp->mhdl_list = mhdl;
23420ae46ebSha137994 }
23520ae46ebSha137994
23620ae46ebSha137994 /* return the handle */
23720ae46ebSha137994 *mhandle = (ldc_mem_handle_t)mhdl;
23820ae46ebSha137994
23920ae46ebSha137994 mutex_exit(&ldcp->lock);
24020ae46ebSha137994
24120ae46ebSha137994 D1(ldcp->id, "ldc_mem_alloc_handle: (0x%llx) allocated handle 0x%llx\n",
24220ae46ebSha137994 ldcp->id, mhdl);
24320ae46ebSha137994
24420ae46ebSha137994 return (0);
24520ae46ebSha137994 }
24620ae46ebSha137994
24720ae46ebSha137994 /*
24820ae46ebSha137994 * Free memory handle for the channel and unlink it from the list
24920ae46ebSha137994 */
25020ae46ebSha137994 int
ldc_mem_free_handle(ldc_mem_handle_t mhandle)25120ae46ebSha137994 ldc_mem_free_handle(ldc_mem_handle_t mhandle)
25220ae46ebSha137994 {
25320ae46ebSha137994 ldc_mhdl_t *mhdl, *phdl;
25420ae46ebSha137994 ldc_chan_t *ldcp;
25520ae46ebSha137994
25620ae46ebSha137994 if (mhandle == NULL) {
25720ae46ebSha137994 DWARN(DBG_ALL_LDCS,
25820ae46ebSha137994 "ldc_mem_free_handle: invalid memory handle\n");
25920ae46ebSha137994 return (EINVAL);
26020ae46ebSha137994 }
26120ae46ebSha137994 mhdl = (ldc_mhdl_t *)mhandle;
26220ae46ebSha137994
26320ae46ebSha137994 mutex_enter(&mhdl->lock);
26420ae46ebSha137994
26520ae46ebSha137994 ldcp = mhdl->ldcp;
26620ae46ebSha137994
26720ae46ebSha137994 if (mhdl->status == LDC_BOUND || mhdl->status == LDC_MAPPED) {
26820ae46ebSha137994 DWARN(ldcp->id,
26920ae46ebSha137994 "ldc_mem_free_handle: cannot free, 0x%llx hdl bound\n",
27020ae46ebSha137994 mhdl);
27120ae46ebSha137994 mutex_exit(&mhdl->lock);
27220ae46ebSha137994 return (EINVAL);
27320ae46ebSha137994 }
27420ae46ebSha137994 mutex_exit(&mhdl->lock);
27520ae46ebSha137994
27620ae46ebSha137994 mutex_enter(&ldcp->mlist_lock);
27720ae46ebSha137994
27820ae46ebSha137994 phdl = ldcp->mhdl_list;
27920ae46ebSha137994
28020ae46ebSha137994 /* first handle */
28120ae46ebSha137994 if (phdl == mhdl) {
28220ae46ebSha137994 ldcp->mhdl_list = mhdl->next;
28320ae46ebSha137994 mutex_destroy(&mhdl->lock);
28420ae46ebSha137994 kmem_cache_free(ldcssp->memhdl_cache, mhdl);
28520ae46ebSha137994
28620ae46ebSha137994 D1(ldcp->id,
28720ae46ebSha137994 "ldc_mem_free_handle: (0x%llx) freed handle 0x%llx\n",
28820ae46ebSha137994 ldcp->id, mhdl);
28920ae46ebSha137994 } else {
29020ae46ebSha137994 /* walk the list - unlink and free */
29120ae46ebSha137994 while (phdl != NULL) {
29220ae46ebSha137994 if (phdl->next == mhdl) {
29320ae46ebSha137994 phdl->next = mhdl->next;
29420ae46ebSha137994 mutex_destroy(&mhdl->lock);
29520ae46ebSha137994 kmem_cache_free(ldcssp->memhdl_cache, mhdl);
29620ae46ebSha137994 D1(ldcp->id,
29720ae46ebSha137994 "ldc_mem_free_handle: (0x%llx) freed "
29820ae46ebSha137994 "handle 0x%llx\n", ldcp->id, mhdl);
29920ae46ebSha137994 break;
30020ae46ebSha137994 }
30120ae46ebSha137994 phdl = phdl->next;
30220ae46ebSha137994 }
30320ae46ebSha137994 }
30420ae46ebSha137994
30520ae46ebSha137994 if (phdl == NULL) {
30620ae46ebSha137994 DWARN(ldcp->id,
30720ae46ebSha137994 "ldc_mem_free_handle: invalid handle 0x%llx\n", mhdl);
30820ae46ebSha137994 mutex_exit(&ldcp->mlist_lock);
30920ae46ebSha137994 return (EINVAL);
31020ae46ebSha137994 }
31120ae46ebSha137994
31220ae46ebSha137994 mutex_exit(&ldcp->mlist_lock);
31320ae46ebSha137994
31420ae46ebSha137994 return (0);
31520ae46ebSha137994 }
31620ae46ebSha137994
31720ae46ebSha137994 /*
31820ae46ebSha137994 * Bind a memory handle to a virtual address.
31920ae46ebSha137994 * The virtual address is converted to the corresponding real addresses.
32020ae46ebSha137994 * Returns pointer to the first ldc_mem_cookie and the total number
32120ae46ebSha137994 * of cookies for this virtual address. Other cookies can be obtained
32220ae46ebSha137994 * using the ldc_mem_nextcookie() call. If the pages are stored in
32320ae46ebSha137994 * consecutive locations in the table, a single cookie corresponding to
32420ae46ebSha137994 * the first location is returned. The cookie size spans all the entries.
32520ae46ebSha137994 *
32620ae46ebSha137994 * If the VA corresponds to a page that is already being exported, reuse
32720ae46ebSha137994 * the page and do not export it again. Bump the page's use count.
32820ae46ebSha137994 */
32920ae46ebSha137994 int
ldc_mem_bind_handle(ldc_mem_handle_t mhandle,caddr_t vaddr,size_t len,uint8_t mtype,uint8_t perm,ldc_mem_cookie_t * cookie,uint32_t * ccount)33020ae46ebSha137994 ldc_mem_bind_handle(ldc_mem_handle_t mhandle, caddr_t vaddr, size_t len,
33120ae46ebSha137994 uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie, uint32_t *ccount)
33220ae46ebSha137994 {
333bbfa0259Sha137994 /*
334bbfa0259Sha137994 * Check if direct shared memory map is enabled, if not change
335bbfa0259Sha137994 * the mapping type to SHADOW_MAP.
336bbfa0259Sha137994 */
337bbfa0259Sha137994 if (ldc_shmem_enabled == 0)
338bbfa0259Sha137994 mtype = LDC_SHADOW_MAP;
339bbfa0259Sha137994
340bbfa0259Sha137994 return (i_ldc_mem_bind_handle(mhandle, vaddr, len, mtype, perm,
341bbfa0259Sha137994 cookie, ccount));
342bbfa0259Sha137994 }
343bbfa0259Sha137994
344bbfa0259Sha137994 static int
i_ldc_mem_bind_handle(ldc_mem_handle_t mhandle,caddr_t vaddr,size_t len,uint8_t mtype,uint8_t perm,ldc_mem_cookie_t * cookie,uint32_t * ccount)345bbfa0259Sha137994 i_ldc_mem_bind_handle(ldc_mem_handle_t mhandle, caddr_t vaddr, size_t len,
346bbfa0259Sha137994 uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie, uint32_t *ccount)
347bbfa0259Sha137994 {
34820ae46ebSha137994 ldc_mhdl_t *mhdl;
34920ae46ebSha137994 ldc_chan_t *ldcp;
35020ae46ebSha137994 ldc_mtbl_t *mtbl;
35120ae46ebSha137994 ldc_memseg_t *memseg;
35220ae46ebSha137994 ldc_mte_t tmp_mte;
35320ae46ebSha137994 uint64_t index, prev_index = 0;
35420ae46ebSha137994 int64_t cookie_idx;
35520ae46ebSha137994 uintptr_t raddr, ra_aligned;
35620ae46ebSha137994 uint64_t psize, poffset, v_offset;
35720ae46ebSha137994 uint64_t pg_shift, pg_size, pg_size_code, pg_mask;
35820ae46ebSha137994 pgcnt_t npages;
35920ae46ebSha137994 caddr_t v_align, addr;
36020ae46ebSha137994 int i, rv;
36120ae46ebSha137994
36220ae46ebSha137994 if (mhandle == NULL) {
36320ae46ebSha137994 DWARN(DBG_ALL_LDCS,
36420ae46ebSha137994 "ldc_mem_bind_handle: invalid memory handle\n");
36520ae46ebSha137994 return (EINVAL);
36620ae46ebSha137994 }
36720ae46ebSha137994 mhdl = (ldc_mhdl_t *)mhandle;
36820ae46ebSha137994 ldcp = mhdl->ldcp;
36920ae46ebSha137994
37020ae46ebSha137994 /* clear count */
37120ae46ebSha137994 *ccount = 0;
37220ae46ebSha137994
37320ae46ebSha137994 mutex_enter(&mhdl->lock);
37420ae46ebSha137994
37520ae46ebSha137994 if (mhdl->status == LDC_BOUND || mhdl->memseg != NULL) {
37620ae46ebSha137994 DWARN(ldcp->id,
37720ae46ebSha137994 "ldc_mem_bind_handle: (0x%x) handle already bound\n",
37820ae46ebSha137994 mhandle);
37920ae46ebSha137994 mutex_exit(&mhdl->lock);
38020ae46ebSha137994 return (EINVAL);
38120ae46ebSha137994 }
38220ae46ebSha137994
38320ae46ebSha137994 /* Force address and size to be 8-byte aligned */
38420ae46ebSha137994 if ((((uintptr_t)vaddr | len) & 0x7) != 0) {
38520ae46ebSha137994 DWARN(ldcp->id,
38620ae46ebSha137994 "ldc_mem_bind_handle: addr/size is not 8-byte aligned\n");
38720ae46ebSha137994 mutex_exit(&mhdl->lock);
38820ae46ebSha137994 return (EINVAL);
38920ae46ebSha137994 }
39020ae46ebSha137994
391bbfa0259Sha137994 mutex_enter(&ldcp->lock);
392bbfa0259Sha137994
39320ae46ebSha137994 /*
39420ae46ebSha137994 * If this channel is binding a memory handle for the
39520ae46ebSha137994 * first time allocate it a memory map table and initialize it
39620ae46ebSha137994 */
39720ae46ebSha137994 if ((mtbl = ldcp->mtbl) == NULL) {
39820ae46ebSha137994
39920ae46ebSha137994 /* Allocate and initialize the map table structure */
40020ae46ebSha137994 mtbl = kmem_zalloc(sizeof (ldc_mtbl_t), KM_SLEEP);
40120ae46ebSha137994 mtbl->num_entries = mtbl->num_avail = ldc_maptable_entries;
40220ae46ebSha137994 mtbl->size = ldc_maptable_entries * sizeof (ldc_mte_slot_t);
40320ae46ebSha137994 mtbl->next_entry = NULL;
40420ae46ebSha137994 mtbl->contigmem = B_TRUE;
40520ae46ebSha137994
40620ae46ebSha137994 /* Allocate the table itself */
40720ae46ebSha137994 mtbl->table = (ldc_mte_slot_t *)
40820ae46ebSha137994 contig_mem_alloc_align(mtbl->size, MMU_PAGESIZE);
40920ae46ebSha137994 if (mtbl->table == NULL) {
41020ae46ebSha137994
41120ae46ebSha137994 /* allocate a page of memory using kmem_alloc */
41220ae46ebSha137994 mtbl->table = kmem_alloc(MMU_PAGESIZE, KM_SLEEP);
41320ae46ebSha137994 mtbl->size = MMU_PAGESIZE;
41420ae46ebSha137994 mtbl->contigmem = B_FALSE;
41520ae46ebSha137994 mtbl->num_entries = mtbl->num_avail =
41620ae46ebSha137994 mtbl->size / sizeof (ldc_mte_slot_t);
41720ae46ebSha137994 DWARN(ldcp->id,
41820ae46ebSha137994 "ldc_mem_bind_handle: (0x%llx) reduced tbl size "
41920ae46ebSha137994 "to %lx entries\n", ldcp->id, mtbl->num_entries);
42020ae46ebSha137994 }
42120ae46ebSha137994
42220ae46ebSha137994 /* zero out the memory */
42320ae46ebSha137994 bzero(mtbl->table, mtbl->size);
42420ae46ebSha137994
42520ae46ebSha137994 /* initialize the lock */
42620ae46ebSha137994 mutex_init(&mtbl->lock, NULL, MUTEX_DRIVER, NULL);
42720ae46ebSha137994
42820ae46ebSha137994 /* register table for this channel */
42920ae46ebSha137994 rv = hv_ldc_set_map_table(ldcp->id,
43020ae46ebSha137994 va_to_pa(mtbl->table), mtbl->num_entries);
43120ae46ebSha137994 if (rv != 0) {
432bbfa0259Sha137994 DWARN(DBG_ALL_LDCS,
43320ae46ebSha137994 "ldc_mem_bind_handle: (0x%lx) err %d mapping tbl",
43420ae46ebSha137994 ldcp->id, rv);
43520ae46ebSha137994 if (mtbl->contigmem)
43620ae46ebSha137994 contig_mem_free(mtbl->table, mtbl->size);
43720ae46ebSha137994 else
43820ae46ebSha137994 kmem_free(mtbl->table, mtbl->size);
43920ae46ebSha137994 mutex_destroy(&mtbl->lock);
44020ae46ebSha137994 kmem_free(mtbl, sizeof (ldc_mtbl_t));
44120ae46ebSha137994 mutex_exit(&ldcp->lock);
44220ae46ebSha137994 mutex_exit(&mhdl->lock);
44320ae46ebSha137994 return (EIO);
44420ae46ebSha137994 }
44520ae46ebSha137994
44620ae46ebSha137994 ldcp->mtbl = mtbl;
44720ae46ebSha137994
44820ae46ebSha137994 D1(ldcp->id,
44920ae46ebSha137994 "ldc_mem_bind_handle: (0x%llx) alloc'd map table 0x%llx\n",
45020ae46ebSha137994 ldcp->id, ldcp->mtbl->table);
45120ae46ebSha137994 }
45220ae46ebSha137994
453bbfa0259Sha137994 mutex_exit(&ldcp->lock);
454bbfa0259Sha137994
45520ae46ebSha137994 /* FUTURE: get the page size, pgsz code, and shift */
45620ae46ebSha137994 pg_size = MMU_PAGESIZE;
45720ae46ebSha137994 pg_size_code = page_szc(pg_size);
45820ae46ebSha137994 pg_shift = page_get_shift(pg_size_code);
45920ae46ebSha137994 pg_mask = ~(pg_size - 1);
46020ae46ebSha137994
46120ae46ebSha137994 D1(ldcp->id, "ldc_mem_bind_handle: (0x%llx) binding "
46220ae46ebSha137994 "va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
46320ae46ebSha137994 ldcp->id, vaddr, pg_size, pg_size_code, pg_shift);
46420ae46ebSha137994
46520ae46ebSha137994 /* aligned VA and its offset */
46620ae46ebSha137994 v_align = (caddr_t)(((uintptr_t)vaddr) & ~(pg_size - 1));
46720ae46ebSha137994 v_offset = ((uintptr_t)vaddr) & (pg_size - 1);
46820ae46ebSha137994
46920ae46ebSha137994 npages = (len+v_offset)/pg_size;
47020ae46ebSha137994 npages = ((len+v_offset)%pg_size == 0) ? npages : npages+1;
47120ae46ebSha137994
47220ae46ebSha137994 D1(ldcp->id, "ldc_mem_bind_handle: binding "
47320ae46ebSha137994 "(0x%llx) v=0x%llx,val=0x%llx,off=0x%x,pgs=0x%x\n",
47420ae46ebSha137994 ldcp->id, vaddr, v_align, v_offset, npages);
47520ae46ebSha137994
47620ae46ebSha137994 /* lock the memory table - exclusive access to channel */
47720ae46ebSha137994 mutex_enter(&mtbl->lock);
47820ae46ebSha137994
47920ae46ebSha137994 if (npages > mtbl->num_avail) {
48020ae46ebSha137994 D1(ldcp->id, "ldc_mem_bind_handle: (0x%llx) no table entries\n",
48120ae46ebSha137994 ldcp->id);
48220ae46ebSha137994 mutex_exit(&mtbl->lock);
48320ae46ebSha137994 mutex_exit(&mhdl->lock);
48420ae46ebSha137994 return (ENOMEM);
48520ae46ebSha137994 }
48620ae46ebSha137994
48720ae46ebSha137994 /* Allocate a memseg structure */
48820ae46ebSha137994 memseg = mhdl->memseg =
48920ae46ebSha137994 kmem_cache_alloc(ldcssp->memseg_cache, KM_SLEEP);
49020ae46ebSha137994
49120ae46ebSha137994 /* Allocate memory to store all pages and cookies */
49220ae46ebSha137994 memseg->pages = kmem_zalloc((sizeof (ldc_page_t) * npages), KM_SLEEP);
49320ae46ebSha137994 memseg->cookies =
49420ae46ebSha137994 kmem_zalloc((sizeof (ldc_mem_cookie_t) * npages), KM_SLEEP);
49520ae46ebSha137994
49620ae46ebSha137994 D2(ldcp->id, "ldc_mem_bind_handle: (0x%llx) processing 0x%llx pages\n",
49720ae46ebSha137994 ldcp->id, npages);
49820ae46ebSha137994
49920ae46ebSha137994 addr = v_align;
50020ae46ebSha137994
50120ae46ebSha137994 /*
50220ae46ebSha137994 * Table slots are used in a round-robin manner. The algorithm permits
50320ae46ebSha137994 * inserting duplicate entries. Slots allocated earlier will typically
50420ae46ebSha137994 * get freed before we get back to reusing the slot.Inserting duplicate
50520ae46ebSha137994 * entries should be OK as we only lookup entries using the cookie addr
50620ae46ebSha137994 * i.e. tbl index, during export, unexport and copy operation.
50720ae46ebSha137994 *
50820ae46ebSha137994 * One implementation what was tried was to search for a duplicate
50920ae46ebSha137994 * page entry first and reuse it. The search overhead is very high and
51020ae46ebSha137994 * in the vnet case dropped the perf by almost half, 50 to 24 mbps.
51120ae46ebSha137994 * So it does make sense to avoid searching for duplicates.
51220ae46ebSha137994 *
51320ae46ebSha137994 * But during the process of searching for a free slot, if we find a
51420ae46ebSha137994 * duplicate entry we will go ahead and use it, and bump its use count.
51520ae46ebSha137994 */
51620ae46ebSha137994
51720ae46ebSha137994 /* index to start searching from */
51820ae46ebSha137994 index = mtbl->next_entry;
51920ae46ebSha137994 cookie_idx = -1;
52020ae46ebSha137994
52120ae46ebSha137994 tmp_mte.ll = 0; /* initialise fields to 0 */
52220ae46ebSha137994
52320ae46ebSha137994 if (mtype & LDC_DIRECT_MAP) {
52420ae46ebSha137994 tmp_mte.mte_r = (perm & LDC_MEM_R) ? 1 : 0;
52520ae46ebSha137994 tmp_mte.mte_w = (perm & LDC_MEM_W) ? 1 : 0;
52620ae46ebSha137994 tmp_mte.mte_x = (perm & LDC_MEM_X) ? 1 : 0;
52720ae46ebSha137994 }
52820ae46ebSha137994
52920ae46ebSha137994 if (mtype & LDC_SHADOW_MAP) {
53020ae46ebSha137994 tmp_mte.mte_cr = (perm & LDC_MEM_R) ? 1 : 0;
53120ae46ebSha137994 tmp_mte.mte_cw = (perm & LDC_MEM_W) ? 1 : 0;
53220ae46ebSha137994 }
53320ae46ebSha137994
53420ae46ebSha137994 if (mtype & LDC_IO_MAP) {
53520ae46ebSha137994 tmp_mte.mte_ir = (perm & LDC_MEM_R) ? 1 : 0;
53620ae46ebSha137994 tmp_mte.mte_iw = (perm & LDC_MEM_W) ? 1 : 0;
53720ae46ebSha137994 }
53820ae46ebSha137994
53920ae46ebSha137994 D1(ldcp->id, "ldc_mem_bind_handle mte=0x%llx\n", tmp_mte.ll);
54020ae46ebSha137994
54120ae46ebSha137994 tmp_mte.mte_pgszc = pg_size_code;
54220ae46ebSha137994
54320ae46ebSha137994 /* initialize each mem table entry */
54420ae46ebSha137994 for (i = 0; i < npages; i++) {
54520ae46ebSha137994
54620ae46ebSha137994 /* check if slot is available in the table */
54720ae46ebSha137994 while (mtbl->table[index].entry.ll != 0) {
54820ae46ebSha137994
54920ae46ebSha137994 index = (index + 1) % mtbl->num_entries;
55020ae46ebSha137994
55120ae46ebSha137994 if (index == mtbl->next_entry) {
55220ae46ebSha137994 /* we have looped around */
55320ae46ebSha137994 DWARN(DBG_ALL_LDCS,
55420ae46ebSha137994 "ldc_mem_bind_handle: (0x%llx) cannot find "
55520ae46ebSha137994 "entry\n", ldcp->id);
55620ae46ebSha137994 *ccount = 0;
55720ae46ebSha137994
55820ae46ebSha137994 /* NOTE: free memory, remove previous entries */
55920ae46ebSha137994 /* this shouldnt happen as num_avail was ok */
56020ae46ebSha137994
56120ae46ebSha137994 mutex_exit(&mtbl->lock);
56220ae46ebSha137994 mutex_exit(&mhdl->lock);
56320ae46ebSha137994 return (ENOMEM);
56420ae46ebSha137994 }
56520ae46ebSha137994 }
56620ae46ebSha137994
56720ae46ebSha137994 /* get the real address */
56820ae46ebSha137994 raddr = va_to_pa((void *)addr);
56920ae46ebSha137994 ra_aligned = ((uintptr_t)raddr & pg_mask);
57020ae46ebSha137994
57120ae46ebSha137994 /* build the mte */
57220ae46ebSha137994 tmp_mte.mte_rpfn = ra_aligned >> pg_shift;
57320ae46ebSha137994
57420ae46ebSha137994 D1(ldcp->id, "ldc_mem_bind_handle mte=0x%llx\n", tmp_mte.ll);
57520ae46ebSha137994
57620ae46ebSha137994 /* update entry in table */
57720ae46ebSha137994 mtbl->table[index].entry = tmp_mte;
57820ae46ebSha137994
57920ae46ebSha137994 D2(ldcp->id, "ldc_mem_bind_handle: (0x%llx) stored MTE 0x%llx"
58020ae46ebSha137994 " into loc 0x%llx\n", ldcp->id, tmp_mte.ll, index);
58120ae46ebSha137994
58220ae46ebSha137994 /* calculate the size and offset for this export range */
58320ae46ebSha137994 if (i == 0) {
58420ae46ebSha137994 /* first page */
58520ae46ebSha137994 psize = min((pg_size - v_offset), len);
58620ae46ebSha137994 poffset = v_offset;
58720ae46ebSha137994
58820ae46ebSha137994 } else if (i == (npages - 1)) {
58920ae46ebSha137994 /* last page */
59020ae46ebSha137994 psize = (((uintptr_t)(vaddr + len)) &
59120ae46ebSha137994 ((uint64_t)(pg_size-1)));
59220ae46ebSha137994 if (psize == 0)
59320ae46ebSha137994 psize = pg_size;
59420ae46ebSha137994 poffset = 0;
59520ae46ebSha137994
59620ae46ebSha137994 } else {
59720ae46ebSha137994 /* middle pages */
59820ae46ebSha137994 psize = pg_size;
59920ae46ebSha137994 poffset = 0;
60020ae46ebSha137994 }
60120ae46ebSha137994
60220ae46ebSha137994 /* store entry for this page */
60320ae46ebSha137994 memseg->pages[i].index = index;
60420ae46ebSha137994 memseg->pages[i].raddr = raddr;
60520ae46ebSha137994 memseg->pages[i].mte = &(mtbl->table[index]);
60620ae46ebSha137994
60720ae46ebSha137994 /* create the cookie */
60820ae46ebSha137994 if (i == 0 || (index != prev_index + 1)) {
60920ae46ebSha137994 cookie_idx++;
61020ae46ebSha137994 memseg->cookies[cookie_idx].addr =
61120ae46ebSha137994 IDX2COOKIE(index, pg_size_code, pg_shift);
61220ae46ebSha137994 memseg->cookies[cookie_idx].addr |= poffset;
61320ae46ebSha137994 memseg->cookies[cookie_idx].size = psize;
61420ae46ebSha137994
61520ae46ebSha137994 } else {
61620ae46ebSha137994 memseg->cookies[cookie_idx].size += psize;
61720ae46ebSha137994 }
61820ae46ebSha137994
61920ae46ebSha137994 D1(ldcp->id, "ldc_mem_bind_handle: bound "
62020ae46ebSha137994 "(0x%llx) va=0x%llx, idx=0x%llx, "
62120ae46ebSha137994 "ra=0x%llx(sz=0x%x,off=0x%x)\n",
62220ae46ebSha137994 ldcp->id, addr, index, raddr, psize, poffset);
62320ae46ebSha137994
62420ae46ebSha137994 /* decrement number of available entries */
62520ae46ebSha137994 mtbl->num_avail--;
62620ae46ebSha137994
62720ae46ebSha137994 /* increment va by page size */
62820ae46ebSha137994 addr += pg_size;
62920ae46ebSha137994
63020ae46ebSha137994 /* increment index */
63120ae46ebSha137994 prev_index = index;
63220ae46ebSha137994 index = (index + 1) % mtbl->num_entries;
63320ae46ebSha137994
63420ae46ebSha137994 /* save the next slot */
63520ae46ebSha137994 mtbl->next_entry = index;
63620ae46ebSha137994 }
63720ae46ebSha137994
63820ae46ebSha137994 mutex_exit(&mtbl->lock);
63920ae46ebSha137994
64020ae46ebSha137994 /* memory handle = bound */
64120ae46ebSha137994 mhdl->mtype = mtype;
64220ae46ebSha137994 mhdl->perm = perm;
64320ae46ebSha137994 mhdl->status = LDC_BOUND;
64420ae46ebSha137994
64520ae46ebSha137994 /* update memseg_t */
64620ae46ebSha137994 memseg->vaddr = vaddr;
64720ae46ebSha137994 memseg->raddr = memseg->pages[0].raddr;
64820ae46ebSha137994 memseg->size = len;
64920ae46ebSha137994 memseg->npages = npages;
65020ae46ebSha137994 memseg->ncookies = cookie_idx + 1;
65120ae46ebSha137994 memseg->next_cookie = (memseg->ncookies > 1) ? 1 : 0;
65220ae46ebSha137994
65320ae46ebSha137994 /* return count and first cookie */
65420ae46ebSha137994 *ccount = memseg->ncookies;
65520ae46ebSha137994 cookie->addr = memseg->cookies[0].addr;
65620ae46ebSha137994 cookie->size = memseg->cookies[0].size;
65720ae46ebSha137994
65820ae46ebSha137994 D1(ldcp->id,
65920ae46ebSha137994 "ldc_mem_bind_handle: (0x%llx) bound 0x%llx, va=0x%llx, "
66020ae46ebSha137994 "pgs=0x%llx cookies=0x%llx\n",
66120ae46ebSha137994 ldcp->id, mhdl, vaddr, npages, memseg->ncookies);
66220ae46ebSha137994
66320ae46ebSha137994 mutex_exit(&mhdl->lock);
66420ae46ebSha137994 return (0);
66520ae46ebSha137994 }
66620ae46ebSha137994
66720ae46ebSha137994 /*
66820ae46ebSha137994 * Return the next cookie associated with the specified memory handle
66920ae46ebSha137994 */
67020ae46ebSha137994 int
ldc_mem_nextcookie(ldc_mem_handle_t mhandle,ldc_mem_cookie_t * cookie)67120ae46ebSha137994 ldc_mem_nextcookie(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie)
67220ae46ebSha137994 {
67320ae46ebSha137994 ldc_mhdl_t *mhdl;
67420ae46ebSha137994 ldc_chan_t *ldcp;
67520ae46ebSha137994 ldc_memseg_t *memseg;
67620ae46ebSha137994
67720ae46ebSha137994 if (mhandle == NULL) {
67820ae46ebSha137994 DWARN(DBG_ALL_LDCS,
67920ae46ebSha137994 "ldc_mem_nextcookie: invalid memory handle\n");
68020ae46ebSha137994 return (EINVAL);
68120ae46ebSha137994 }
68220ae46ebSha137994 mhdl = (ldc_mhdl_t *)mhandle;
68320ae46ebSha137994
68420ae46ebSha137994 mutex_enter(&mhdl->lock);
68520ae46ebSha137994
68620ae46ebSha137994 ldcp = mhdl->ldcp;
68720ae46ebSha137994 memseg = mhdl->memseg;
68820ae46ebSha137994
68920ae46ebSha137994 if (cookie == 0) {
69020ae46ebSha137994 DWARN(ldcp->id,
69120ae46ebSha137994 "ldc_mem_nextcookie:(0x%llx) invalid cookie arg\n",
69220ae46ebSha137994 ldcp->id);
69320ae46ebSha137994 mutex_exit(&mhdl->lock);
69420ae46ebSha137994 return (EINVAL);
69520ae46ebSha137994 }
69620ae46ebSha137994
69720ae46ebSha137994 if (memseg->next_cookie != 0) {
69820ae46ebSha137994 cookie->addr = memseg->cookies[memseg->next_cookie].addr;
69920ae46ebSha137994 cookie->size = memseg->cookies[memseg->next_cookie].size;
70020ae46ebSha137994 memseg->next_cookie++;
70120ae46ebSha137994 if (memseg->next_cookie == memseg->ncookies)
70220ae46ebSha137994 memseg->next_cookie = 0;
70320ae46ebSha137994
70420ae46ebSha137994 } else {
70520ae46ebSha137994 DWARN(ldcp->id,
70620ae46ebSha137994 "ldc_mem_nextcookie:(0x%llx) no more cookies\n", ldcp->id);
70720ae46ebSha137994 cookie->addr = 0;
70820ae46ebSha137994 cookie->size = 0;
70920ae46ebSha137994 mutex_exit(&mhdl->lock);
71020ae46ebSha137994 return (EINVAL);
71120ae46ebSha137994 }
71220ae46ebSha137994
71320ae46ebSha137994 D1(ldcp->id,
71420ae46ebSha137994 "ldc_mem_nextcookie: (0x%llx) cookie addr=0x%llx,sz=0x%llx\n",
71520ae46ebSha137994 ldcp->id, cookie->addr, cookie->size);
71620ae46ebSha137994
71720ae46ebSha137994 mutex_exit(&mhdl->lock);
71820ae46ebSha137994 return (0);
71920ae46ebSha137994 }
72020ae46ebSha137994
72120ae46ebSha137994 /*
72220ae46ebSha137994 * Unbind the virtual memory region associated with the specified
72320ae46ebSha137994 * memory handle. Allassociated cookies are freed and the corresponding
72420ae46ebSha137994 * RA space is no longer exported.
72520ae46ebSha137994 */
72620ae46ebSha137994 int
ldc_mem_unbind_handle(ldc_mem_handle_t mhandle)72720ae46ebSha137994 ldc_mem_unbind_handle(ldc_mem_handle_t mhandle)
72820ae46ebSha137994 {
72920ae46ebSha137994 ldc_mhdl_t *mhdl;
73020ae46ebSha137994 ldc_chan_t *ldcp;
73120ae46ebSha137994 ldc_mtbl_t *mtbl;
73220ae46ebSha137994 ldc_memseg_t *memseg;
73320ae46ebSha137994 uint64_t cookie_addr;
73420ae46ebSha137994 uint64_t pg_shift, pg_size_code;
735bbfa0259Sha137994 int i, rv, retries;
73620ae46ebSha137994
73720ae46ebSha137994 if (mhandle == NULL) {
73820ae46ebSha137994 DWARN(DBG_ALL_LDCS,
73920ae46ebSha137994 "ldc_mem_unbind_handle: invalid memory handle\n");
74020ae46ebSha137994 return (EINVAL);
74120ae46ebSha137994 }
74220ae46ebSha137994 mhdl = (ldc_mhdl_t *)mhandle;
74320ae46ebSha137994
74420ae46ebSha137994 mutex_enter(&mhdl->lock);
74520ae46ebSha137994
74620ae46ebSha137994 if (mhdl->status == LDC_UNBOUND) {
74720ae46ebSha137994 DWARN(DBG_ALL_LDCS,
74820ae46ebSha137994 "ldc_mem_unbind_handle: (0x%x) handle is not bound\n",
74920ae46ebSha137994 mhandle);
75020ae46ebSha137994 mutex_exit(&mhdl->lock);
75120ae46ebSha137994 return (EINVAL);
75220ae46ebSha137994 }
75320ae46ebSha137994
75420ae46ebSha137994 ldcp = mhdl->ldcp;
75520ae46ebSha137994 mtbl = ldcp->mtbl;
75620ae46ebSha137994
75720ae46ebSha137994 memseg = mhdl->memseg;
75820ae46ebSha137994
75920ae46ebSha137994 /* lock the memory table - exclusive access to channel */
76020ae46ebSha137994 mutex_enter(&mtbl->lock);
76120ae46ebSha137994
76220ae46ebSha137994 /* undo the pages exported */
76320ae46ebSha137994 for (i = 0; i < memseg->npages; i++) {
76420ae46ebSha137994
765bbfa0259Sha137994 /* clear the entry from the table */
766bbfa0259Sha137994 memseg->pages[i].mte->entry.ll = 0;
767bbfa0259Sha137994
76820ae46ebSha137994 /* check for mapped pages, revocation cookie != 0 */
76920ae46ebSha137994 if (memseg->pages[i].mte->cookie) {
77020ae46ebSha137994
7715b7cb889Sha137994 pg_size_code = page_szc(MMU_PAGESIZE);
772bbfa0259Sha137994 pg_shift = page_get_shift(pg_size_code);
77320ae46ebSha137994 cookie_addr = IDX2COOKIE(memseg->pages[i].index,
77420ae46ebSha137994 pg_size_code, pg_shift);
77520ae46ebSha137994
77620ae46ebSha137994 D1(ldcp->id, "ldc_mem_unbind_handle: (0x%llx) revoke "
77720ae46ebSha137994 "cookie 0x%llx, rcookie 0x%llx\n", ldcp->id,
77820ae46ebSha137994 cookie_addr, memseg->pages[i].mte->cookie);
779bbfa0259Sha137994
780bbfa0259Sha137994 retries = 0;
781bbfa0259Sha137994 do {
78220ae46ebSha137994 rv = hv_ldc_revoke(ldcp->id, cookie_addr,
78320ae46ebSha137994 memseg->pages[i].mte->cookie);
784bbfa0259Sha137994
785bbfa0259Sha137994 if (rv != H_EWOULDBLOCK)
786bbfa0259Sha137994 break;
787bbfa0259Sha137994
788bbfa0259Sha137994 drv_usecwait(ldc_delay);
789bbfa0259Sha137994
790bbfa0259Sha137994 } while (retries++ < ldc_max_retries);
791bbfa0259Sha137994
79220ae46ebSha137994 if (rv) {
79320ae46ebSha137994 DWARN(ldcp->id,
79420ae46ebSha137994 "ldc_mem_unbind_handle: (0x%llx) cannot "
79520ae46ebSha137994 "revoke mapping, cookie %llx\n", ldcp->id,
79620ae46ebSha137994 cookie_addr);
79720ae46ebSha137994 }
79820ae46ebSha137994 }
79920ae46ebSha137994
80020ae46ebSha137994 mtbl->num_avail++;
80120ae46ebSha137994 }
80220ae46ebSha137994 mutex_exit(&mtbl->lock);
80320ae46ebSha137994
80420ae46ebSha137994 /* free the allocated memseg and page structures */
80520ae46ebSha137994 kmem_free(memseg->pages, (sizeof (ldc_page_t) * memseg->npages));
80620ae46ebSha137994 kmem_free(memseg->cookies,
80720ae46ebSha137994 (sizeof (ldc_mem_cookie_t) * memseg->npages));
80820ae46ebSha137994 kmem_cache_free(ldcssp->memseg_cache, memseg);
80920ae46ebSha137994
81020ae46ebSha137994 /* uninitialize the memory handle */
81120ae46ebSha137994 mhdl->memseg = NULL;
81220ae46ebSha137994 mhdl->status = LDC_UNBOUND;
81320ae46ebSha137994
81420ae46ebSha137994 D1(ldcp->id, "ldc_mem_unbind_handle: (0x%llx) unbound handle 0x%llx\n",
81520ae46ebSha137994 ldcp->id, mhdl);
81620ae46ebSha137994
81720ae46ebSha137994 mutex_exit(&mhdl->lock);
81820ae46ebSha137994 return (0);
81920ae46ebSha137994 }
82020ae46ebSha137994
82120ae46ebSha137994 /*
82220ae46ebSha137994 * Get information about the dring. The base address of the descriptor
82320ae46ebSha137994 * ring along with the type and permission are returned back.
82420ae46ebSha137994 */
82520ae46ebSha137994 int
ldc_mem_info(ldc_mem_handle_t mhandle,ldc_mem_info_t * minfo)82620ae46ebSha137994 ldc_mem_info(ldc_mem_handle_t mhandle, ldc_mem_info_t *minfo)
82720ae46ebSha137994 {
82820ae46ebSha137994 ldc_mhdl_t *mhdl;
82920ae46ebSha137994
83020ae46ebSha137994 if (mhandle == NULL) {
83120ae46ebSha137994 DWARN(DBG_ALL_LDCS, "ldc_mem_info: invalid memory handle\n");
83220ae46ebSha137994 return (EINVAL);
83320ae46ebSha137994 }
83420ae46ebSha137994 mhdl = (ldc_mhdl_t *)mhandle;
83520ae46ebSha137994
83620ae46ebSha137994 if (minfo == NULL) {
83720ae46ebSha137994 DWARN(DBG_ALL_LDCS, "ldc_mem_info: invalid args\n");
83820ae46ebSha137994 return (EINVAL);
83920ae46ebSha137994 }
84020ae46ebSha137994
84120ae46ebSha137994 mutex_enter(&mhdl->lock);
84220ae46ebSha137994
84320ae46ebSha137994 minfo->status = mhdl->status;
84420ae46ebSha137994 if (mhdl->status == LDC_BOUND || mhdl->status == LDC_MAPPED) {
84520ae46ebSha137994 minfo->vaddr = mhdl->memseg->vaddr;
84620ae46ebSha137994 minfo->raddr = mhdl->memseg->raddr;
84720ae46ebSha137994 minfo->mtype = mhdl->mtype;
84820ae46ebSha137994 minfo->perm = mhdl->perm;
84920ae46ebSha137994 }
85020ae46ebSha137994 mutex_exit(&mhdl->lock);
85120ae46ebSha137994
85220ae46ebSha137994 return (0);
85320ae46ebSha137994 }
85420ae46ebSha137994
85520ae46ebSha137994 /*
85620ae46ebSha137994 * Copy data either from or to the client specified virtual address
85720ae46ebSha137994 * space to or from the exported memory associated with the cookies.
85820ae46ebSha137994 * The direction argument determines whether the data is read from or
85920ae46ebSha137994 * written to exported memory.
86020ae46ebSha137994 */
86120ae46ebSha137994 int
ldc_mem_copy(ldc_handle_t handle,caddr_t vaddr,uint64_t off,size_t * size,ldc_mem_cookie_t * cookies,uint32_t ccount,uint8_t direction)86220ae46ebSha137994 ldc_mem_copy(ldc_handle_t handle, caddr_t vaddr, uint64_t off, size_t *size,
86320ae46ebSha137994 ldc_mem_cookie_t *cookies, uint32_t ccount, uint8_t direction)
86420ae46ebSha137994 {
86520ae46ebSha137994 ldc_chan_t *ldcp;
86620ae46ebSha137994 uint64_t local_voff, local_valign;
86720ae46ebSha137994 uint64_t cookie_addr, cookie_size;
86820ae46ebSha137994 uint64_t pg_shift, pg_size, pg_size_code;
86920ae46ebSha137994 uint64_t export_caddr, export_poff, export_psize, export_size;
87020ae46ebSha137994 uint64_t local_ra, local_poff, local_psize;
87120ae46ebSha137994 uint64_t copy_size, copied_len = 0, total_bal = 0, idx = 0;
87220ae46ebSha137994 pgcnt_t npages;
87320ae46ebSha137994 size_t len = *size;
87420ae46ebSha137994 int i, rv = 0;
87520ae46ebSha137994
87620ae46ebSha137994 uint64_t chid;
87720ae46ebSha137994
87820ae46ebSha137994 if (handle == NULL) {
87920ae46ebSha137994 DWARN(DBG_ALL_LDCS, "ldc_mem_copy: invalid channel handle\n");
88020ae46ebSha137994 return (EINVAL);
88120ae46ebSha137994 }
88220ae46ebSha137994 ldcp = (ldc_chan_t *)handle;
88320ae46ebSha137994 chid = ldcp->id;
88420ae46ebSha137994
88520ae46ebSha137994 /* check to see if channel is UP */
88620ae46ebSha137994 if (ldcp->tstate != TS_UP) {
88720ae46ebSha137994 DWARN(chid, "ldc_mem_copy: (0x%llx) channel is not UP\n",
88820ae46ebSha137994 chid);
88920ae46ebSha137994 return (ECONNRESET);
89020ae46ebSha137994 }
89120ae46ebSha137994
89220ae46ebSha137994 /* Force address and size to be 8-byte aligned */
89320ae46ebSha137994 if ((((uintptr_t)vaddr | len) & 0x7) != 0) {
89420ae46ebSha137994 DWARN(chid,
89520ae46ebSha137994 "ldc_mem_copy: addr/sz is not 8-byte aligned\n");
89620ae46ebSha137994 return (EINVAL);
89720ae46ebSha137994 }
89820ae46ebSha137994
89920ae46ebSha137994 /* Find the size of the exported memory */
90020ae46ebSha137994 export_size = 0;
90120ae46ebSha137994 for (i = 0; i < ccount; i++)
90220ae46ebSha137994 export_size += cookies[i].size;
90320ae46ebSha137994
90420ae46ebSha137994 /* check to see if offset is valid */
90520ae46ebSha137994 if (off > export_size) {
90620ae46ebSha137994 DWARN(chid,
90720ae46ebSha137994 "ldc_mem_copy: (0x%llx) start offset > export mem size\n",
90820ae46ebSha137994 chid);
90920ae46ebSha137994 return (EINVAL);
91020ae46ebSha137994 }
91120ae46ebSha137994
91220ae46ebSha137994 /*
91320ae46ebSha137994 * Check to see if the export size is smaller than the size we
91420ae46ebSha137994 * are requesting to copy - if so flag an error
91520ae46ebSha137994 */
91620ae46ebSha137994 if ((export_size - off) < *size) {
91720ae46ebSha137994 DWARN(chid,
91820ae46ebSha137994 "ldc_mem_copy: (0x%llx) copy size > export mem size\n",
91920ae46ebSha137994 chid);
92020ae46ebSha137994 return (EINVAL);
92120ae46ebSha137994 }
92220ae46ebSha137994
92320ae46ebSha137994 total_bal = min(export_size, *size);
92420ae46ebSha137994
92520ae46ebSha137994 /* FUTURE: get the page size, pgsz code, and shift */
92620ae46ebSha137994 pg_size = MMU_PAGESIZE;
92720ae46ebSha137994 pg_size_code = page_szc(pg_size);
92820ae46ebSha137994 pg_shift = page_get_shift(pg_size_code);
92920ae46ebSha137994
93020ae46ebSha137994 D1(chid, "ldc_mem_copy: copying data "
93120ae46ebSha137994 "(0x%llx) va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
93220ae46ebSha137994 chid, vaddr, pg_size, pg_size_code, pg_shift);
93320ae46ebSha137994
93420ae46ebSha137994 /* aligned VA and its offset */
93520ae46ebSha137994 local_valign = (((uintptr_t)vaddr) & ~(pg_size - 1));
93620ae46ebSha137994 local_voff = ((uintptr_t)vaddr) & (pg_size - 1);
93720ae46ebSha137994
93820ae46ebSha137994 npages = (len+local_voff)/pg_size;
93920ae46ebSha137994 npages = ((len+local_voff)%pg_size == 0) ? npages : npages+1;
94020ae46ebSha137994
94120ae46ebSha137994 D1(chid,
94220ae46ebSha137994 "ldc_mem_copy: (0x%llx) v=0x%llx,val=0x%llx,off=0x%x,pgs=0x%x\n",
94320ae46ebSha137994 chid, vaddr, local_valign, local_voff, npages);
94420ae46ebSha137994
94520ae46ebSha137994 local_ra = va_to_pa((void *)local_valign);
94620ae46ebSha137994 local_poff = local_voff;
94720ae46ebSha137994 local_psize = min(len, (pg_size - local_voff));
94820ae46ebSha137994
94920ae46ebSha137994 len -= local_psize;
95020ae46ebSha137994
95120ae46ebSha137994 /*
95220ae46ebSha137994 * find the first cookie in the list of cookies
95320ae46ebSha137994 * if the offset passed in is not zero
95420ae46ebSha137994 */
95520ae46ebSha137994 for (idx = 0; idx < ccount; idx++) {
95620ae46ebSha137994 cookie_size = cookies[idx].size;
95720ae46ebSha137994 if (off < cookie_size)
95820ae46ebSha137994 break;
95920ae46ebSha137994 off -= cookie_size;
96020ae46ebSha137994 }
96120ae46ebSha137994
96220ae46ebSha137994 cookie_addr = cookies[idx].addr + off;
96320ae46ebSha137994 cookie_size = cookies[idx].size - off;
96420ae46ebSha137994
96520ae46ebSha137994 export_caddr = cookie_addr & ~(pg_size - 1);
96620ae46ebSha137994 export_poff = cookie_addr & (pg_size - 1);
96720ae46ebSha137994 export_psize = min(cookie_size, (pg_size - export_poff));
96820ae46ebSha137994
96920ae46ebSha137994 for (;;) {
97020ae46ebSha137994
97120ae46ebSha137994 copy_size = min(export_psize, local_psize);
97220ae46ebSha137994
97320ae46ebSha137994 D1(chid,
97420ae46ebSha137994 "ldc_mem_copy:(0x%llx) dir=0x%x, caddr=0x%llx,"
97520ae46ebSha137994 " loc_ra=0x%llx, exp_poff=0x%llx, loc_poff=0x%llx,"
97620ae46ebSha137994 " exp_psz=0x%llx, loc_psz=0x%llx, copy_sz=0x%llx,"
97720ae46ebSha137994 " total_bal=0x%llx\n",
97820ae46ebSha137994 chid, direction, export_caddr, local_ra, export_poff,
97920ae46ebSha137994 local_poff, export_psize, local_psize, copy_size,
98020ae46ebSha137994 total_bal);
98120ae46ebSha137994
98220ae46ebSha137994 rv = hv_ldc_copy(chid, direction,
98320ae46ebSha137994 (export_caddr + export_poff), (local_ra + local_poff),
98420ae46ebSha137994 copy_size, &copied_len);
98520ae46ebSha137994
98620ae46ebSha137994 if (rv != 0) {
98720ae46ebSha137994 int error = EIO;
98820ae46ebSha137994 uint64_t rx_hd, rx_tl;
98920ae46ebSha137994
99020ae46ebSha137994 DWARN(chid,
99120ae46ebSha137994 "ldc_mem_copy: (0x%llx) err %d during copy\n",
99220ae46ebSha137994 (unsigned long long)chid, rv);
99320ae46ebSha137994 DWARN(chid,
99420ae46ebSha137994 "ldc_mem_copy: (0x%llx) dir=0x%x, caddr=0x%lx, "
99520ae46ebSha137994 "loc_ra=0x%lx, exp_poff=0x%lx, loc_poff=0x%lx,"
99620ae46ebSha137994 " exp_psz=0x%lx, loc_psz=0x%lx, copy_sz=0x%lx,"
99720ae46ebSha137994 " copied_len=0x%lx, total_bal=0x%lx\n",
99820ae46ebSha137994 chid, direction, export_caddr, local_ra,
99920ae46ebSha137994 export_poff, local_poff, export_psize, local_psize,
100020ae46ebSha137994 copy_size, copied_len, total_bal);
100120ae46ebSha137994
100220ae46ebSha137994 *size = *size - total_bal;
100320ae46ebSha137994
100420ae46ebSha137994 /*
100520ae46ebSha137994 * check if reason for copy error was due to
100620ae46ebSha137994 * a channel reset. we need to grab the lock
100720ae46ebSha137994 * just in case we have to do a reset.
100820ae46ebSha137994 */
100920ae46ebSha137994 mutex_enter(&ldcp->lock);
101020ae46ebSha137994 mutex_enter(&ldcp->tx_lock);
101120ae46ebSha137994
101220ae46ebSha137994 rv = hv_ldc_rx_get_state(ldcp->id,
101320ae46ebSha137994 &rx_hd, &rx_tl, &(ldcp->link_state));
101420ae46ebSha137994 if (ldcp->link_state == LDC_CHANNEL_DOWN ||
101520ae46ebSha137994 ldcp->link_state == LDC_CHANNEL_RESET) {
101620ae46ebSha137994 i_ldc_reset(ldcp, B_FALSE);
101720ae46ebSha137994 error = ECONNRESET;
101820ae46ebSha137994 }
101920ae46ebSha137994
102020ae46ebSha137994 mutex_exit(&ldcp->tx_lock);
102120ae46ebSha137994 mutex_exit(&ldcp->lock);
102220ae46ebSha137994
102320ae46ebSha137994 return (error);
102420ae46ebSha137994 }
102520ae46ebSha137994
102620ae46ebSha137994 ASSERT(copied_len <= copy_size);
102720ae46ebSha137994
102820ae46ebSha137994 D2(chid, "ldc_mem_copy: copied=0x%llx\n", copied_len);
102920ae46ebSha137994 export_poff += copied_len;
103020ae46ebSha137994 local_poff += copied_len;
103120ae46ebSha137994 export_psize -= copied_len;
103220ae46ebSha137994 local_psize -= copied_len;
103320ae46ebSha137994 cookie_size -= copied_len;
103420ae46ebSha137994
103520ae46ebSha137994 total_bal -= copied_len;
103620ae46ebSha137994
103720ae46ebSha137994 if (copy_size != copied_len)
103820ae46ebSha137994 continue;
103920ae46ebSha137994
104020ae46ebSha137994 if (export_psize == 0 && total_bal != 0) {
104120ae46ebSha137994
104220ae46ebSha137994 if (cookie_size == 0) {
104320ae46ebSha137994 idx++;
104420ae46ebSha137994 cookie_addr = cookies[idx].addr;
104520ae46ebSha137994 cookie_size = cookies[idx].size;
104620ae46ebSha137994
104720ae46ebSha137994 export_caddr = cookie_addr & ~(pg_size - 1);
104820ae46ebSha137994 export_poff = cookie_addr & (pg_size - 1);
104920ae46ebSha137994 export_psize =
105020ae46ebSha137994 min(cookie_size, (pg_size-export_poff));
105120ae46ebSha137994 } else {
105220ae46ebSha137994 export_caddr += pg_size;
105320ae46ebSha137994 export_poff = 0;
105420ae46ebSha137994 export_psize = min(cookie_size, pg_size);
105520ae46ebSha137994 }
105620ae46ebSha137994 }
105720ae46ebSha137994
105820ae46ebSha137994 if (local_psize == 0 && total_bal != 0) {
105920ae46ebSha137994 local_valign += pg_size;
106020ae46ebSha137994 local_ra = va_to_pa((void *)local_valign);
106120ae46ebSha137994 local_poff = 0;
106220ae46ebSha137994 local_psize = min(pg_size, len);
106320ae46ebSha137994 len -= local_psize;
106420ae46ebSha137994 }
106520ae46ebSha137994
106620ae46ebSha137994 /* check if we are all done */
106720ae46ebSha137994 if (total_bal == 0)
106820ae46ebSha137994 break;
106920ae46ebSha137994 }
107020ae46ebSha137994
107120ae46ebSha137994
107220ae46ebSha137994 D1(chid,
107320ae46ebSha137994 "ldc_mem_copy: (0x%llx) done copying sz=0x%llx\n",
107420ae46ebSha137994 chid, *size);
107520ae46ebSha137994
107620ae46ebSha137994 return (0);
107720ae46ebSha137994 }
107820ae46ebSha137994
107920ae46ebSha137994 /*
108020ae46ebSha137994 * Copy data either from or to the client specified virtual address
108120ae46ebSha137994 * space to or from HV physical memory.
108220ae46ebSha137994 *
108320ae46ebSha137994 * The direction argument determines whether the data is read from or
108420ae46ebSha137994 * written to HV memory. direction values are LDC_COPY_IN/OUT similar
108520ae46ebSha137994 * to the ldc_mem_copy interface
108620ae46ebSha137994 */
108720ae46ebSha137994 int
ldc_mem_rdwr_cookie(ldc_handle_t handle,caddr_t vaddr,size_t * size,caddr_t paddr,uint8_t direction)108820ae46ebSha137994 ldc_mem_rdwr_cookie(ldc_handle_t handle, caddr_t vaddr, size_t *size,
108920ae46ebSha137994 caddr_t paddr, uint8_t direction)
109020ae46ebSha137994 {
109120ae46ebSha137994 ldc_chan_t *ldcp;
109220ae46ebSha137994 uint64_t local_voff, local_valign;
109320ae46ebSha137994 uint64_t pg_shift, pg_size, pg_size_code;
109420ae46ebSha137994 uint64_t target_pa, target_poff, target_psize, target_size;
109520ae46ebSha137994 uint64_t local_ra, local_poff, local_psize;
109620ae46ebSha137994 uint64_t copy_size, copied_len = 0;
109720ae46ebSha137994 pgcnt_t npages;
109820ae46ebSha137994 size_t len = *size;
109920ae46ebSha137994 int rv = 0;
110020ae46ebSha137994
110120ae46ebSha137994 if (handle == NULL) {
110220ae46ebSha137994 DWARN(DBG_ALL_LDCS,
110320ae46ebSha137994 "ldc_mem_rdwr_cookie: invalid channel handle\n");
110420ae46ebSha137994 return (EINVAL);
110520ae46ebSha137994 }
110620ae46ebSha137994 ldcp = (ldc_chan_t *)handle;
110720ae46ebSha137994
110820ae46ebSha137994 mutex_enter(&ldcp->lock);
110920ae46ebSha137994
111020ae46ebSha137994 /* check to see if channel is UP */
111120ae46ebSha137994 if (ldcp->tstate != TS_UP) {
111220ae46ebSha137994 DWARN(ldcp->id,
111320ae46ebSha137994 "ldc_mem_rdwr_cookie: (0x%llx) channel is not UP\n",
111420ae46ebSha137994 ldcp->id);
111520ae46ebSha137994 mutex_exit(&ldcp->lock);
111620ae46ebSha137994 return (ECONNRESET);
111720ae46ebSha137994 }
111820ae46ebSha137994
111920ae46ebSha137994 /* Force address and size to be 8-byte aligned */
112020ae46ebSha137994 if ((((uintptr_t)vaddr | len) & 0x7) != 0) {
112120ae46ebSha137994 DWARN(ldcp->id,
112220ae46ebSha137994 "ldc_mem_rdwr_cookie: addr/size is not 8-byte aligned\n");
112320ae46ebSha137994 mutex_exit(&ldcp->lock);
112420ae46ebSha137994 return (EINVAL);
112520ae46ebSha137994 }
112620ae46ebSha137994
112720ae46ebSha137994 target_size = *size;
112820ae46ebSha137994
112920ae46ebSha137994 /* FUTURE: get the page size, pgsz code, and shift */
113020ae46ebSha137994 pg_size = MMU_PAGESIZE;
113120ae46ebSha137994 pg_size_code = page_szc(pg_size);
113220ae46ebSha137994 pg_shift = page_get_shift(pg_size_code);
113320ae46ebSha137994
113420ae46ebSha137994 D1(ldcp->id, "ldc_mem_rdwr_cookie: copying data "
113520ae46ebSha137994 "(0x%llx) va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
113620ae46ebSha137994 ldcp->id, vaddr, pg_size, pg_size_code, pg_shift);
113720ae46ebSha137994
113820ae46ebSha137994 /* aligned VA and its offset */
113920ae46ebSha137994 local_valign = ((uintptr_t)vaddr) & ~(pg_size - 1);
114020ae46ebSha137994 local_voff = ((uintptr_t)vaddr) & (pg_size - 1);
114120ae46ebSha137994
114220ae46ebSha137994 npages = (len + local_voff) / pg_size;
114320ae46ebSha137994 npages = ((len + local_voff) % pg_size == 0) ? npages : npages+1;
114420ae46ebSha137994
114520ae46ebSha137994 D1(ldcp->id, "ldc_mem_rdwr_cookie: (0x%llx) v=0x%llx, "
114620ae46ebSha137994 "val=0x%llx,off=0x%x,pgs=0x%x\n",
114720ae46ebSha137994 ldcp->id, vaddr, local_valign, local_voff, npages);
114820ae46ebSha137994
114920ae46ebSha137994 local_ra = va_to_pa((void *)local_valign);
115020ae46ebSha137994 local_poff = local_voff;
115120ae46ebSha137994 local_psize = min(len, (pg_size - local_voff));
115220ae46ebSha137994
115320ae46ebSha137994 len -= local_psize;
115420ae46ebSha137994
115520ae46ebSha137994 target_pa = ((uintptr_t)paddr) & ~(pg_size - 1);
115620ae46ebSha137994 target_poff = ((uintptr_t)paddr) & (pg_size - 1);
115720ae46ebSha137994 target_psize = pg_size - target_poff;
115820ae46ebSha137994
115920ae46ebSha137994 for (;;) {
116020ae46ebSha137994
116120ae46ebSha137994 copy_size = min(target_psize, local_psize);
116220ae46ebSha137994
116320ae46ebSha137994 D1(ldcp->id,
116420ae46ebSha137994 "ldc_mem_rdwr_cookie: (0x%llx) dir=0x%x, tar_pa=0x%llx,"
116520ae46ebSha137994 " loc_ra=0x%llx, tar_poff=0x%llx, loc_poff=0x%llx,"
116620ae46ebSha137994 " tar_psz=0x%llx, loc_psz=0x%llx, copy_sz=0x%llx,"
116720ae46ebSha137994 " total_bal=0x%llx\n",
116820ae46ebSha137994 ldcp->id, direction, target_pa, local_ra, target_poff,
116920ae46ebSha137994 local_poff, target_psize, local_psize, copy_size,
117020ae46ebSha137994 target_size);
117120ae46ebSha137994
117220ae46ebSha137994 rv = hv_ldc_copy(ldcp->id, direction,
117320ae46ebSha137994 (target_pa + target_poff), (local_ra + local_poff),
117420ae46ebSha137994 copy_size, &copied_len);
117520ae46ebSha137994
117620ae46ebSha137994 if (rv != 0) {
117720ae46ebSha137994 DWARN(DBG_ALL_LDCS,
117820ae46ebSha137994 "ldc_mem_rdwr_cookie: (0x%lx) err %d during copy\n",
117920ae46ebSha137994 ldcp->id, rv);
118020ae46ebSha137994 DWARN(DBG_ALL_LDCS,
118120ae46ebSha137994 "ldc_mem_rdwr_cookie: (0x%llx) dir=%lld, "
118220ae46ebSha137994 "tar_pa=0x%llx, loc_ra=0x%llx, tar_poff=0x%llx, "
118320ae46ebSha137994 "loc_poff=0x%llx, tar_psz=0x%llx, loc_psz=0x%llx, "
118420ae46ebSha137994 "copy_sz=0x%llx, total_bal=0x%llx\n",
118520ae46ebSha137994 ldcp->id, direction, target_pa, local_ra,
118620ae46ebSha137994 target_poff, local_poff, target_psize, local_psize,
118720ae46ebSha137994 copy_size, target_size);
118820ae46ebSha137994
118920ae46ebSha137994 *size = *size - target_size;
119020ae46ebSha137994 mutex_exit(&ldcp->lock);
119120ae46ebSha137994 return (i_ldc_h2v_error(rv));
119220ae46ebSha137994 }
119320ae46ebSha137994
119420ae46ebSha137994 D2(ldcp->id, "ldc_mem_rdwr_cookie: copied=0x%llx\n",
119520ae46ebSha137994 copied_len);
119620ae46ebSha137994 target_poff += copied_len;
119720ae46ebSha137994 local_poff += copied_len;
119820ae46ebSha137994 target_psize -= copied_len;
119920ae46ebSha137994 local_psize -= copied_len;
120020ae46ebSha137994
120120ae46ebSha137994 target_size -= copied_len;
120220ae46ebSha137994
120320ae46ebSha137994 if (copy_size != copied_len)
120420ae46ebSha137994 continue;
120520ae46ebSha137994
120620ae46ebSha137994 if (target_psize == 0 && target_size != 0) {
120720ae46ebSha137994 target_pa += pg_size;
120820ae46ebSha137994 target_poff = 0;
120920ae46ebSha137994 target_psize = min(pg_size, target_size);
121020ae46ebSha137994 }
121120ae46ebSha137994
121220ae46ebSha137994 if (local_psize == 0 && target_size != 0) {
121320ae46ebSha137994 local_valign += pg_size;
121420ae46ebSha137994 local_ra = va_to_pa((void *)local_valign);
121520ae46ebSha137994 local_poff = 0;
121620ae46ebSha137994 local_psize = min(pg_size, len);
121720ae46ebSha137994 len -= local_psize;
121820ae46ebSha137994 }
121920ae46ebSha137994
122020ae46ebSha137994 /* check if we are all done */
122120ae46ebSha137994 if (target_size == 0)
122220ae46ebSha137994 break;
122320ae46ebSha137994 }
122420ae46ebSha137994
122520ae46ebSha137994 mutex_exit(&ldcp->lock);
122620ae46ebSha137994
122720ae46ebSha137994 D1(ldcp->id, "ldc_mem_rdwr_cookie: (0x%llx) done copying sz=0x%llx\n",
122820ae46ebSha137994 ldcp->id, *size);
122920ae46ebSha137994
123020ae46ebSha137994 return (0);
123120ae46ebSha137994 }
123220ae46ebSha137994
123320ae46ebSha137994 /*
123420ae46ebSha137994 * Map an exported memory segment into the local address space. If the
123520ae46ebSha137994 * memory range was exported for direct map access, a HV call is made
123620ae46ebSha137994 * to allocate a RA range. If the map is done via a shadow copy, local
123720ae46ebSha137994 * shadow memory is allocated and the base VA is returned in 'vaddr'. If
123820ae46ebSha137994 * the mapping is a direct map then the RA is returned in 'raddr'.
123920ae46ebSha137994 */
124020ae46ebSha137994 int
ldc_mem_map(ldc_mem_handle_t mhandle,ldc_mem_cookie_t * cookie,uint32_t ccount,uint8_t mtype,uint8_t perm,caddr_t * vaddr,caddr_t * raddr)124120ae46ebSha137994 ldc_mem_map(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie, uint32_t ccount,
124220ae46ebSha137994 uint8_t mtype, uint8_t perm, caddr_t *vaddr, caddr_t *raddr)
124320ae46ebSha137994 {
1244bbfa0259Sha137994 /*
1245bbfa0259Sha137994 * Check if direct map over shared memory is enabled, if not change
1246bbfa0259Sha137994 * the mapping type to SHADOW_MAP.
1247bbfa0259Sha137994 */
1248bbfa0259Sha137994 if (ldc_shmem_enabled == 0)
1249bbfa0259Sha137994 mtype = LDC_SHADOW_MAP;
1250bbfa0259Sha137994
1251bbfa0259Sha137994 return (i_ldc_mem_map(mhandle, cookie, ccount, mtype, perm,
1252bbfa0259Sha137994 vaddr, raddr));
1253bbfa0259Sha137994 }
1254bbfa0259Sha137994
1255bbfa0259Sha137994 static int
i_ldc_mem_map(ldc_mem_handle_t mhandle,ldc_mem_cookie_t * cookie,uint32_t ccount,uint8_t mtype,uint8_t perm,caddr_t * vaddr,caddr_t * raddr)1256bbfa0259Sha137994 i_ldc_mem_map(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie,
1257bbfa0259Sha137994 uint32_t ccount, uint8_t mtype, uint8_t perm, caddr_t *vaddr,
1258bbfa0259Sha137994 caddr_t *raddr)
1259bbfa0259Sha137994 {
1260bbfa0259Sha137994
126120ae46ebSha137994 int i, j, idx, rv, retries;
126220ae46ebSha137994 ldc_chan_t *ldcp;
126320ae46ebSha137994 ldc_mhdl_t *mhdl;
126420ae46ebSha137994 ldc_memseg_t *memseg;
126520ae46ebSha137994 caddr_t tmpaddr;
126620ae46ebSha137994 uint64_t map_perm = perm;
126720ae46ebSha137994 uint64_t pg_size, pg_shift, pg_size_code, pg_mask;
126820ae46ebSha137994 uint64_t exp_size = 0, base_off, map_size, npages;
126920ae46ebSha137994 uint64_t cookie_addr, cookie_off, cookie_size;
127020ae46ebSha137994 tte_t ldc_tte;
127120ae46ebSha137994
127220ae46ebSha137994 if (mhandle == NULL) {
127320ae46ebSha137994 DWARN(DBG_ALL_LDCS, "ldc_mem_map: invalid memory handle\n");
127420ae46ebSha137994 return (EINVAL);
127520ae46ebSha137994 }
127620ae46ebSha137994 mhdl = (ldc_mhdl_t *)mhandle;
127720ae46ebSha137994
127820ae46ebSha137994 mutex_enter(&mhdl->lock);
127920ae46ebSha137994
128020ae46ebSha137994 if (mhdl->status == LDC_BOUND || mhdl->status == LDC_MAPPED ||
128120ae46ebSha137994 mhdl->memseg != NULL) {
128220ae46ebSha137994 DWARN(DBG_ALL_LDCS,
128320ae46ebSha137994 "ldc_mem_map: (0x%llx) handle bound/mapped\n", mhandle);
128420ae46ebSha137994 mutex_exit(&mhdl->lock);
128520ae46ebSha137994 return (EINVAL);
128620ae46ebSha137994 }
128720ae46ebSha137994
128820ae46ebSha137994 ldcp = mhdl->ldcp;
128920ae46ebSha137994
129020ae46ebSha137994 mutex_enter(&ldcp->lock);
129120ae46ebSha137994
129220ae46ebSha137994 if (ldcp->tstate != TS_UP) {
129320ae46ebSha137994 DWARN(ldcp->id,
129420ae46ebSha137994 "ldc_mem_dring_map: (0x%llx) channel is not UP\n",
129520ae46ebSha137994 ldcp->id);
129620ae46ebSha137994 mutex_exit(&ldcp->lock);
129720ae46ebSha137994 mutex_exit(&mhdl->lock);
129820ae46ebSha137994 return (ECONNRESET);
129920ae46ebSha137994 }
130020ae46ebSha137994
130120ae46ebSha137994 if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP|LDC_IO_MAP)) == 0) {
130220ae46ebSha137994 DWARN(ldcp->id, "ldc_mem_map: invalid map type\n");
130320ae46ebSha137994 mutex_exit(&ldcp->lock);
130420ae46ebSha137994 mutex_exit(&mhdl->lock);
130520ae46ebSha137994 return (EINVAL);
130620ae46ebSha137994 }
130720ae46ebSha137994
130820ae46ebSha137994 D1(ldcp->id, "ldc_mem_map: (0x%llx) cookie = 0x%llx,0x%llx\n",
130920ae46ebSha137994 ldcp->id, cookie->addr, cookie->size);
131020ae46ebSha137994
131120ae46ebSha137994 /* FUTURE: get the page size, pgsz code, and shift */
131220ae46ebSha137994 pg_size = MMU_PAGESIZE;
131320ae46ebSha137994 pg_size_code = page_szc(pg_size);
131420ae46ebSha137994 pg_shift = page_get_shift(pg_size_code);
131520ae46ebSha137994 pg_mask = ~(pg_size - 1);
131620ae46ebSha137994
131720ae46ebSha137994 /* calculate the number of pages in the exported cookie */
131820ae46ebSha137994 base_off = cookie[0].addr & (pg_size - 1);
131920ae46ebSha137994 for (idx = 0; idx < ccount; idx++)
132020ae46ebSha137994 exp_size += cookie[idx].size;
132120ae46ebSha137994 map_size = P2ROUNDUP((exp_size + base_off), pg_size);
132220ae46ebSha137994 npages = (map_size >> pg_shift);
132320ae46ebSha137994
132420ae46ebSha137994 /* Allocate memseg structure */
132520ae46ebSha137994 memseg = mhdl->memseg =
132620ae46ebSha137994 kmem_cache_alloc(ldcssp->memseg_cache, KM_SLEEP);
132720ae46ebSha137994
132820ae46ebSha137994 /* Allocate memory to store all pages and cookies */
132920ae46ebSha137994 memseg->pages = kmem_zalloc((sizeof (ldc_page_t) * npages), KM_SLEEP);
133020ae46ebSha137994 memseg->cookies =
133120ae46ebSha137994 kmem_zalloc((sizeof (ldc_mem_cookie_t) * ccount), KM_SLEEP);
133220ae46ebSha137994
133320ae46ebSha137994 D2(ldcp->id, "ldc_mem_map: (0x%llx) exp_size=0x%llx, map_size=0x%llx,"
133420ae46ebSha137994 "pages=0x%llx\n", ldcp->id, exp_size, map_size, npages);
133520ae46ebSha137994
133620ae46ebSha137994 /*
133720ae46ebSha137994 * Check to see if the client is requesting direct or shadow map
133820ae46ebSha137994 * If direct map is requested, try to map remote memory first,
133920ae46ebSha137994 * and if that fails, revert to shadow map
134020ae46ebSha137994 */
134120ae46ebSha137994 if (mtype == LDC_DIRECT_MAP) {
134220ae46ebSha137994
134320ae46ebSha137994 /* Allocate kernel virtual space for mapping */
134420ae46ebSha137994 memseg->vaddr = vmem_xalloc(heap_arena, map_size,
134520ae46ebSha137994 pg_size, 0, 0, NULL, NULL, VM_NOSLEEP);
134620ae46ebSha137994 if (memseg->vaddr == NULL) {
1347bbfa0259Sha137994 DWARN(DBG_ALL_LDCS,
134820ae46ebSha137994 "ldc_mem_map: (0x%lx) memory map failed\n",
134920ae46ebSha137994 ldcp->id);
135020ae46ebSha137994 kmem_free(memseg->cookies,
135120ae46ebSha137994 (sizeof (ldc_mem_cookie_t) * ccount));
135220ae46ebSha137994 kmem_free(memseg->pages,
135320ae46ebSha137994 (sizeof (ldc_page_t) * npages));
135420ae46ebSha137994 kmem_cache_free(ldcssp->memseg_cache, memseg);
135520ae46ebSha137994
135620ae46ebSha137994 mutex_exit(&ldcp->lock);
135720ae46ebSha137994 mutex_exit(&mhdl->lock);
135820ae46ebSha137994 return (ENOMEM);
135920ae46ebSha137994 }
136020ae46ebSha137994
136120ae46ebSha137994 /* Unload previous mapping */
136220ae46ebSha137994 hat_unload(kas.a_hat, memseg->vaddr, map_size,
136320ae46ebSha137994 HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK);
136420ae46ebSha137994
136520ae46ebSha137994 /* for each cookie passed in - map into address space */
136620ae46ebSha137994 idx = 0;
136720ae46ebSha137994 cookie_size = 0;
136820ae46ebSha137994 tmpaddr = memseg->vaddr;
136920ae46ebSha137994
137020ae46ebSha137994 for (i = 0; i < npages; i++) {
137120ae46ebSha137994
137220ae46ebSha137994 if (cookie_size == 0) {
137320ae46ebSha137994 ASSERT(idx < ccount);
137420ae46ebSha137994 cookie_addr = cookie[idx].addr & pg_mask;
137520ae46ebSha137994 cookie_off = cookie[idx].addr & (pg_size - 1);
137620ae46ebSha137994 cookie_size =
137720ae46ebSha137994 P2ROUNDUP((cookie_off + cookie[idx].size),
137820ae46ebSha137994 pg_size);
137920ae46ebSha137994 idx++;
138020ae46ebSha137994 }
138120ae46ebSha137994
138220ae46ebSha137994 D1(ldcp->id, "ldc_mem_map: (0x%llx) mapping "
138320ae46ebSha137994 "cookie 0x%llx, bal=0x%llx\n", ldcp->id,
138420ae46ebSha137994 cookie_addr, cookie_size);
138520ae46ebSha137994
138620ae46ebSha137994 /* map the cookie into address space */
138720ae46ebSha137994 for (retries = 0; retries < ldc_max_retries;
138820ae46ebSha137994 retries++) {
138920ae46ebSha137994
139020ae46ebSha137994 rv = hv_ldc_mapin(ldcp->id, cookie_addr,
139120ae46ebSha137994 &memseg->pages[i].raddr, &map_perm);
139220ae46ebSha137994 if (rv != H_EWOULDBLOCK && rv != H_ETOOMANY)
139320ae46ebSha137994 break;
139420ae46ebSha137994
139520ae46ebSha137994 drv_usecwait(ldc_delay);
139620ae46ebSha137994 }
139720ae46ebSha137994
139820ae46ebSha137994 if (rv || memseg->pages[i].raddr == 0) {
139920ae46ebSha137994 DWARN(ldcp->id,
140020ae46ebSha137994 "ldc_mem_map: (0x%llx) hv mapin err %d\n",
140120ae46ebSha137994 ldcp->id, rv);
140220ae46ebSha137994
140320ae46ebSha137994 /* remove previous mapins */
140420ae46ebSha137994 hat_unload(kas.a_hat, memseg->vaddr, map_size,
140520ae46ebSha137994 HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK);
140620ae46ebSha137994 for (j = 0; j < i; j++) {
140720ae46ebSha137994 rv = hv_ldc_unmap(
140820ae46ebSha137994 memseg->pages[j].raddr);
140920ae46ebSha137994 if (rv) {
141020ae46ebSha137994 DWARN(ldcp->id,
141120ae46ebSha137994 "ldc_mem_map: (0x%llx) "
141220ae46ebSha137994 "cannot unmap ra=0x%llx\n",
141320ae46ebSha137994 ldcp->id,
141420ae46ebSha137994 memseg->pages[j].raddr);
141520ae46ebSha137994 }
141620ae46ebSha137994 }
141720ae46ebSha137994
141820ae46ebSha137994 /* free kernel virtual space */
141920ae46ebSha137994 vmem_free(heap_arena, (void *)memseg->vaddr,
142020ae46ebSha137994 map_size);
142120ae46ebSha137994
142220ae46ebSha137994 /* direct map failed - revert to shadow map */
142320ae46ebSha137994 mtype = LDC_SHADOW_MAP;
142420ae46ebSha137994 break;
142520ae46ebSha137994
142620ae46ebSha137994 } else {
142720ae46ebSha137994
142820ae46ebSha137994 D1(ldcp->id,
142920ae46ebSha137994 "ldc_mem_map: (0x%llx) vtop map 0x%llx -> "
143020ae46ebSha137994 "0x%llx, cookie=0x%llx, perm=0x%llx\n",
143120ae46ebSha137994 ldcp->id, tmpaddr, memseg->pages[i].raddr,
143220ae46ebSha137994 cookie_addr, perm);
143320ae46ebSha137994
143420ae46ebSha137994 /*
143520ae46ebSha137994 * NOTE: Calling hat_devload directly, causes it
143620ae46ebSha137994 * to look for page_t using the pfn. Since this
143720ae46ebSha137994 * addr is greater than the memlist, it treates
143820ae46ebSha137994 * it as non-memory
143920ae46ebSha137994 */
144020ae46ebSha137994 sfmmu_memtte(&ldc_tte,
144120ae46ebSha137994 (pfn_t)(memseg->pages[i].raddr >> pg_shift),
144220ae46ebSha137994 PROT_READ | PROT_WRITE | HAT_NOSYNC, TTE8K);
144320ae46ebSha137994
144420ae46ebSha137994 D1(ldcp->id,
144520ae46ebSha137994 "ldc_mem_map: (0x%llx) ra 0x%llx -> "
144620ae46ebSha137994 "tte 0x%llx\n", ldcp->id,
144720ae46ebSha137994 memseg->pages[i].raddr, ldc_tte);
144820ae46ebSha137994
144920ae46ebSha137994 sfmmu_tteload(kas.a_hat, &ldc_tte, tmpaddr,
145020ae46ebSha137994 NULL, HAT_LOAD_LOCK);
145120ae46ebSha137994
145220ae46ebSha137994 cookie_size -= pg_size;
145320ae46ebSha137994 cookie_addr += pg_size;
145420ae46ebSha137994 tmpaddr += pg_size;
145520ae46ebSha137994 }
145620ae46ebSha137994 }
145720ae46ebSha137994 }
145820ae46ebSha137994
145920ae46ebSha137994 if (mtype == LDC_SHADOW_MAP) {
146020ae46ebSha137994 if (*vaddr == NULL) {
146120ae46ebSha137994 memseg->vaddr = kmem_zalloc(exp_size, KM_SLEEP);
146220ae46ebSha137994 mhdl->myshadow = B_TRUE;
146320ae46ebSha137994
146420ae46ebSha137994 D1(ldcp->id, "ldc_mem_map: (0x%llx) allocated "
146520ae46ebSha137994 "shadow page va=0x%llx\n", ldcp->id, memseg->vaddr);
146620ae46ebSha137994 } else {
146720ae46ebSha137994 /*
146820ae46ebSha137994 * Use client supplied memory for memseg->vaddr
146920ae46ebSha137994 * WARNING: assuming that client mem is >= exp_size
147020ae46ebSha137994 */
147120ae46ebSha137994 memseg->vaddr = *vaddr;
147220ae46ebSha137994 }
147320ae46ebSha137994
147420ae46ebSha137994 /* Save all page and cookie information */
147520ae46ebSha137994 for (i = 0, tmpaddr = memseg->vaddr; i < npages; i++) {
147620ae46ebSha137994 memseg->pages[i].raddr = va_to_pa(tmpaddr);
147720ae46ebSha137994 tmpaddr += pg_size;
147820ae46ebSha137994 }
147920ae46ebSha137994
148020ae46ebSha137994 }
148120ae46ebSha137994
148220ae46ebSha137994 /* save all cookies */
148320ae46ebSha137994 bcopy(cookie, memseg->cookies, ccount * sizeof (ldc_mem_cookie_t));
148420ae46ebSha137994
148520ae46ebSha137994 /* update memseg_t */
148620ae46ebSha137994 memseg->raddr = memseg->pages[0].raddr;
148720ae46ebSha137994 memseg->size = (mtype == LDC_SHADOW_MAP) ? exp_size : map_size;
148820ae46ebSha137994 memseg->npages = npages;
148920ae46ebSha137994 memseg->ncookies = ccount;
149020ae46ebSha137994 memseg->next_cookie = 0;
149120ae46ebSha137994
149220ae46ebSha137994 /* memory handle = mapped */
149320ae46ebSha137994 mhdl->mtype = mtype;
149420ae46ebSha137994 mhdl->perm = perm;
149520ae46ebSha137994 mhdl->status = LDC_MAPPED;
149620ae46ebSha137994
149720ae46ebSha137994 D1(ldcp->id, "ldc_mem_map: (0x%llx) mapped 0x%llx, ra=0x%llx, "
149820ae46ebSha137994 "va=0x%llx, pgs=0x%llx cookies=0x%llx\n",
149920ae46ebSha137994 ldcp->id, mhdl, memseg->raddr, memseg->vaddr,
150020ae46ebSha137994 memseg->npages, memseg->ncookies);
150120ae46ebSha137994
150220ae46ebSha137994 if (mtype == LDC_SHADOW_MAP)
150320ae46ebSha137994 base_off = 0;
150420ae46ebSha137994 if (raddr)
150520ae46ebSha137994 *raddr = (caddr_t)(memseg->raddr | base_off);
150620ae46ebSha137994 if (vaddr)
150720ae46ebSha137994 *vaddr = (caddr_t)((uintptr_t)memseg->vaddr | base_off);
150820ae46ebSha137994
150920ae46ebSha137994 mutex_exit(&ldcp->lock);
151020ae46ebSha137994 mutex_exit(&mhdl->lock);
151120ae46ebSha137994 return (0);
151220ae46ebSha137994 }
151320ae46ebSha137994
151420ae46ebSha137994 /*
151520ae46ebSha137994 * Unmap a memory segment. Free shadow memory (if any).
151620ae46ebSha137994 */
151720ae46ebSha137994 int
ldc_mem_unmap(ldc_mem_handle_t mhandle)151820ae46ebSha137994 ldc_mem_unmap(ldc_mem_handle_t mhandle)
151920ae46ebSha137994 {
152020ae46ebSha137994 int i, rv;
152120ae46ebSha137994 ldc_mhdl_t *mhdl = (ldc_mhdl_t *)mhandle;
152220ae46ebSha137994 ldc_chan_t *ldcp;
152320ae46ebSha137994 ldc_memseg_t *memseg;
152420ae46ebSha137994
152520ae46ebSha137994 if (mhdl == 0 || mhdl->status != LDC_MAPPED) {
152620ae46ebSha137994 DWARN(DBG_ALL_LDCS,
152720ae46ebSha137994 "ldc_mem_unmap: (0x%llx) handle is not mapped\n",
152820ae46ebSha137994 mhandle);
152920ae46ebSha137994 return (EINVAL);
153020ae46ebSha137994 }
153120ae46ebSha137994
153220ae46ebSha137994 mutex_enter(&mhdl->lock);
153320ae46ebSha137994
153420ae46ebSha137994 ldcp = mhdl->ldcp;
153520ae46ebSha137994 memseg = mhdl->memseg;
153620ae46ebSha137994
153720ae46ebSha137994 D1(ldcp->id, "ldc_mem_unmap: (0x%llx) unmapping handle 0x%llx\n",
153820ae46ebSha137994 ldcp->id, mhdl);
153920ae46ebSha137994
154020ae46ebSha137994 /* if we allocated shadow memory - free it */
154120ae46ebSha137994 if (mhdl->mtype == LDC_SHADOW_MAP && mhdl->myshadow) {
154220ae46ebSha137994 kmem_free(memseg->vaddr, memseg->size);
154320ae46ebSha137994 } else if (mhdl->mtype == LDC_DIRECT_MAP) {
154420ae46ebSha137994
154520ae46ebSha137994 /* unmap in the case of DIRECT_MAP */
154620ae46ebSha137994 hat_unload(kas.a_hat, memseg->vaddr, memseg->size,
154720ae46ebSha137994 HAT_UNLOAD_UNLOCK);
154820ae46ebSha137994
154920ae46ebSha137994 for (i = 0; i < memseg->npages; i++) {
155020ae46ebSha137994 rv = hv_ldc_unmap(memseg->pages[i].raddr);
155120ae46ebSha137994 if (rv) {
1552bbfa0259Sha137994 DWARN(DBG_ALL_LDCS,
155320ae46ebSha137994 "ldc_mem_map: (0x%lx) hv unmap err %d\n",
155420ae46ebSha137994 ldcp->id, rv);
155520ae46ebSha137994 }
155620ae46ebSha137994 }
155720ae46ebSha137994
155820ae46ebSha137994 vmem_free(heap_arena, (void *)memseg->vaddr, memseg->size);
155920ae46ebSha137994 }
156020ae46ebSha137994
156120ae46ebSha137994 /* free the allocated memseg and page structures */
156220ae46ebSha137994 kmem_free(memseg->pages, (sizeof (ldc_page_t) * memseg->npages));
156320ae46ebSha137994 kmem_free(memseg->cookies,
156420ae46ebSha137994 (sizeof (ldc_mem_cookie_t) * memseg->ncookies));
156520ae46ebSha137994 kmem_cache_free(ldcssp->memseg_cache, memseg);
156620ae46ebSha137994
156720ae46ebSha137994 /* uninitialize the memory handle */
156820ae46ebSha137994 mhdl->memseg = NULL;
156920ae46ebSha137994 mhdl->status = LDC_UNBOUND;
157020ae46ebSha137994
157120ae46ebSha137994 D1(ldcp->id, "ldc_mem_unmap: (0x%llx) unmapped handle 0x%llx\n",
157220ae46ebSha137994 ldcp->id, mhdl);
157320ae46ebSha137994
157420ae46ebSha137994 mutex_exit(&mhdl->lock);
157520ae46ebSha137994 return (0);
157620ae46ebSha137994 }
157720ae46ebSha137994
157820ae46ebSha137994 /*
157920ae46ebSha137994 * Internal entry point for LDC mapped memory entry consistency
158020ae46ebSha137994 * semantics. Acquire copies the contents of the remote memory
158120ae46ebSha137994 * into the local shadow copy. The release operation copies the local
158220ae46ebSha137994 * contents into the remote memory. The offset and size specify the
158320ae46ebSha137994 * bounds for the memory range being synchronized.
158420ae46ebSha137994 */
158520ae46ebSha137994 static int
i_ldc_mem_acquire_release(ldc_mem_handle_t mhandle,uint8_t direction,uint64_t offset,size_t size)158620ae46ebSha137994 i_ldc_mem_acquire_release(ldc_mem_handle_t mhandle, uint8_t direction,
158720ae46ebSha137994 uint64_t offset, size_t size)
158820ae46ebSha137994 {
158920ae46ebSha137994 int err;
159020ae46ebSha137994 ldc_mhdl_t *mhdl;
159120ae46ebSha137994 ldc_chan_t *ldcp;
159220ae46ebSha137994 ldc_memseg_t *memseg;
159320ae46ebSha137994 caddr_t local_vaddr;
159420ae46ebSha137994 size_t copy_size;
159520ae46ebSha137994
159620ae46ebSha137994 if (mhandle == NULL) {
159720ae46ebSha137994 DWARN(DBG_ALL_LDCS,
159820ae46ebSha137994 "i_ldc_mem_acquire_release: invalid memory handle\n");
159920ae46ebSha137994 return (EINVAL);
160020ae46ebSha137994 }
160120ae46ebSha137994 mhdl = (ldc_mhdl_t *)mhandle;
160220ae46ebSha137994
160320ae46ebSha137994 mutex_enter(&mhdl->lock);
160420ae46ebSha137994
160520ae46ebSha137994 if (mhdl->status != LDC_MAPPED || mhdl->ldcp == NULL) {
160620ae46ebSha137994 DWARN(DBG_ALL_LDCS,
160720ae46ebSha137994 "i_ldc_mem_acquire_release: not mapped memory\n");
160820ae46ebSha137994 mutex_exit(&mhdl->lock);
160920ae46ebSha137994 return (EINVAL);
161020ae46ebSha137994 }
161120ae46ebSha137994
161220ae46ebSha137994 /* do nothing for direct map */
161320ae46ebSha137994 if (mhdl->mtype == LDC_DIRECT_MAP) {
161420ae46ebSha137994 mutex_exit(&mhdl->lock);
161520ae46ebSha137994 return (0);
161620ae46ebSha137994 }
161720ae46ebSha137994
161820ae46ebSha137994 /* do nothing if COPY_IN+MEM_W and COPY_OUT+MEM_R */
161920ae46ebSha137994 if ((direction == LDC_COPY_IN && (mhdl->perm & LDC_MEM_R) == 0) ||
162020ae46ebSha137994 (direction == LDC_COPY_OUT && (mhdl->perm & LDC_MEM_W) == 0)) {
162120ae46ebSha137994 mutex_exit(&mhdl->lock);
162220ae46ebSha137994 return (0);
162320ae46ebSha137994 }
162420ae46ebSha137994
162520ae46ebSha137994 if (offset >= mhdl->memseg->size ||
162620ae46ebSha137994 (offset + size) > mhdl->memseg->size) {
162720ae46ebSha137994 DWARN(DBG_ALL_LDCS,
162820ae46ebSha137994 "i_ldc_mem_acquire_release: memory out of range\n");
162920ae46ebSha137994 mutex_exit(&mhdl->lock);
163020ae46ebSha137994 return (EINVAL);
163120ae46ebSha137994 }
163220ae46ebSha137994
163320ae46ebSha137994 /* get the channel handle and memory segment */
163420ae46ebSha137994 ldcp = mhdl->ldcp;
163520ae46ebSha137994 memseg = mhdl->memseg;
163620ae46ebSha137994
163720ae46ebSha137994 if (mhdl->mtype == LDC_SHADOW_MAP) {
163820ae46ebSha137994
163920ae46ebSha137994 local_vaddr = memseg->vaddr + offset;
164020ae46ebSha137994 copy_size = size;
164120ae46ebSha137994
164220ae46ebSha137994 /* copy to/from remote from/to local memory */
164320ae46ebSha137994 err = ldc_mem_copy((ldc_handle_t)ldcp, local_vaddr, offset,
164420ae46ebSha137994 ©_size, memseg->cookies, memseg->ncookies,
164520ae46ebSha137994 direction);
164620ae46ebSha137994 if (err || copy_size != size) {
164720ae46ebSha137994 DWARN(ldcp->id,
164820ae46ebSha137994 "i_ldc_mem_acquire_release: copy failed\n");
164920ae46ebSha137994 mutex_exit(&mhdl->lock);
165020ae46ebSha137994 return (err);
165120ae46ebSha137994 }
165220ae46ebSha137994 }
165320ae46ebSha137994
165420ae46ebSha137994 mutex_exit(&mhdl->lock);
165520ae46ebSha137994
165620ae46ebSha137994 return (0);
165720ae46ebSha137994 }
165820ae46ebSha137994
165920ae46ebSha137994 /*
166020ae46ebSha137994 * Ensure that the contents in the remote memory seg are consistent
166120ae46ebSha137994 * with the contents if of local segment
166220ae46ebSha137994 */
166320ae46ebSha137994 int
ldc_mem_acquire(ldc_mem_handle_t mhandle,uint64_t offset,uint64_t size)166420ae46ebSha137994 ldc_mem_acquire(ldc_mem_handle_t mhandle, uint64_t offset, uint64_t size)
166520ae46ebSha137994 {
166620ae46ebSha137994 return (i_ldc_mem_acquire_release(mhandle, LDC_COPY_IN, offset, size));
166720ae46ebSha137994 }
166820ae46ebSha137994
166920ae46ebSha137994
167020ae46ebSha137994 /*
167120ae46ebSha137994 * Ensure that the contents in the local memory seg are consistent
167220ae46ebSha137994 * with the contents if of remote segment
167320ae46ebSha137994 */
167420ae46ebSha137994 int
ldc_mem_release(ldc_mem_handle_t mhandle,uint64_t offset,uint64_t size)167520ae46ebSha137994 ldc_mem_release(ldc_mem_handle_t mhandle, uint64_t offset, uint64_t size)
167620ae46ebSha137994 {
167720ae46ebSha137994 return (i_ldc_mem_acquire_release(mhandle, LDC_COPY_OUT, offset, size));
167820ae46ebSha137994 }
167920ae46ebSha137994
168020ae46ebSha137994 /*
168120ae46ebSha137994 * Allocate a descriptor ring. The size of each each descriptor
168220ae46ebSha137994 * must be 8-byte aligned and the entire ring should be a multiple
168320ae46ebSha137994 * of MMU_PAGESIZE.
168420ae46ebSha137994 */
168520ae46ebSha137994 int
ldc_mem_dring_create(uint32_t len,uint32_t dsize,ldc_dring_handle_t * dhandle)168620ae46ebSha137994 ldc_mem_dring_create(uint32_t len, uint32_t dsize, ldc_dring_handle_t *dhandle)
168720ae46ebSha137994 {
168820ae46ebSha137994 ldc_dring_t *dringp;
168920ae46ebSha137994 size_t size = (dsize * len);
169020ae46ebSha137994
169120ae46ebSha137994 D1(DBG_ALL_LDCS, "ldc_mem_dring_create: len=0x%x, size=0x%x\n",
169220ae46ebSha137994 len, dsize);
169320ae46ebSha137994
169420ae46ebSha137994 if (dhandle == NULL) {
169520ae46ebSha137994 DWARN(DBG_ALL_LDCS, "ldc_mem_dring_create: invalid dhandle\n");
169620ae46ebSha137994 return (EINVAL);
169720ae46ebSha137994 }
169820ae46ebSha137994
169920ae46ebSha137994 if (len == 0) {
170020ae46ebSha137994 DWARN(DBG_ALL_LDCS, "ldc_mem_dring_create: invalid length\n");
170120ae46ebSha137994 return (EINVAL);
170220ae46ebSha137994 }
170320ae46ebSha137994
170420ae46ebSha137994 /* descriptor size should be 8-byte aligned */
170520ae46ebSha137994 if (dsize == 0 || (dsize & 0x7)) {
170620ae46ebSha137994 DWARN(DBG_ALL_LDCS, "ldc_mem_dring_create: invalid size\n");
170720ae46ebSha137994 return (EINVAL);
170820ae46ebSha137994 }
170920ae46ebSha137994
171020ae46ebSha137994 *dhandle = 0;
171120ae46ebSha137994
171220ae46ebSha137994 /* Allocate a desc ring structure */
171320ae46ebSha137994 dringp = kmem_zalloc(sizeof (ldc_dring_t), KM_SLEEP);
171420ae46ebSha137994
171520ae46ebSha137994 /* Initialize dring */
171620ae46ebSha137994 dringp->length = len;
171720ae46ebSha137994 dringp->dsize = dsize;
171820ae46ebSha137994
171920ae46ebSha137994 /* round off to multiple of pagesize */
172020ae46ebSha137994 dringp->size = (size & MMU_PAGEMASK);
172120ae46ebSha137994 if (size & MMU_PAGEOFFSET)
172220ae46ebSha137994 dringp->size += MMU_PAGESIZE;
172320ae46ebSha137994
172420ae46ebSha137994 dringp->status = LDC_UNBOUND;
172520ae46ebSha137994
172620ae46ebSha137994 /* allocate descriptor ring memory */
172720ae46ebSha137994 dringp->base = kmem_zalloc(dringp->size, KM_SLEEP);
172820ae46ebSha137994
172920ae46ebSha137994 /* initialize the desc ring lock */
173020ae46ebSha137994 mutex_init(&dringp->lock, NULL, MUTEX_DRIVER, NULL);
173120ae46ebSha137994
173220ae46ebSha137994 /* Add descriptor ring to the head of global list */
173320ae46ebSha137994 mutex_enter(&ldcssp->lock);
173420ae46ebSha137994 dringp->next = ldcssp->dring_list;
173520ae46ebSha137994 ldcssp->dring_list = dringp;
173620ae46ebSha137994 mutex_exit(&ldcssp->lock);
173720ae46ebSha137994
173820ae46ebSha137994 *dhandle = (ldc_dring_handle_t)dringp;
173920ae46ebSha137994
174020ae46ebSha137994 D1(DBG_ALL_LDCS, "ldc_mem_dring_create: dring allocated\n");
174120ae46ebSha137994
174220ae46ebSha137994 return (0);
174320ae46ebSha137994 }
174420ae46ebSha137994
174520ae46ebSha137994
174620ae46ebSha137994 /*
174720ae46ebSha137994 * Destroy a descriptor ring.
174820ae46ebSha137994 */
174920ae46ebSha137994 int
ldc_mem_dring_destroy(ldc_dring_handle_t dhandle)175020ae46ebSha137994 ldc_mem_dring_destroy(ldc_dring_handle_t dhandle)
175120ae46ebSha137994 {
175220ae46ebSha137994 ldc_dring_t *dringp;
175320ae46ebSha137994 ldc_dring_t *tmp_dringp;
175420ae46ebSha137994
175520ae46ebSha137994 D1(DBG_ALL_LDCS, "ldc_mem_dring_destroy: entered\n");
175620ae46ebSha137994
175720ae46ebSha137994 if (dhandle == NULL) {
175820ae46ebSha137994 DWARN(DBG_ALL_LDCS,
175920ae46ebSha137994 "ldc_mem_dring_destroy: invalid desc ring handle\n");
176020ae46ebSha137994 return (EINVAL);
176120ae46ebSha137994 }
176220ae46ebSha137994 dringp = (ldc_dring_t *)dhandle;
176320ae46ebSha137994
176420ae46ebSha137994 if (dringp->status == LDC_BOUND) {
176520ae46ebSha137994 DWARN(DBG_ALL_LDCS,
176620ae46ebSha137994 "ldc_mem_dring_destroy: desc ring is bound\n");
176720ae46ebSha137994 return (EACCES);
176820ae46ebSha137994 }
176920ae46ebSha137994
177020ae46ebSha137994 mutex_enter(&dringp->lock);
177120ae46ebSha137994 mutex_enter(&ldcssp->lock);
177220ae46ebSha137994
177320ae46ebSha137994 /* remove from linked list - if not bound */
177420ae46ebSha137994 tmp_dringp = ldcssp->dring_list;
177520ae46ebSha137994 if (tmp_dringp == dringp) {
177620ae46ebSha137994 ldcssp->dring_list = dringp->next;
177720ae46ebSha137994 dringp->next = NULL;
177820ae46ebSha137994
177920ae46ebSha137994 } else {
178020ae46ebSha137994 while (tmp_dringp != NULL) {
178120ae46ebSha137994 if (tmp_dringp->next == dringp) {
178220ae46ebSha137994 tmp_dringp->next = dringp->next;
178320ae46ebSha137994 dringp->next = NULL;
178420ae46ebSha137994 break;
178520ae46ebSha137994 }
178620ae46ebSha137994 tmp_dringp = tmp_dringp->next;
178720ae46ebSha137994 }
178820ae46ebSha137994 if (tmp_dringp == NULL) {
178920ae46ebSha137994 DWARN(DBG_ALL_LDCS,
179020ae46ebSha137994 "ldc_mem_dring_destroy: invalid descriptor\n");
179120ae46ebSha137994 mutex_exit(&ldcssp->lock);
179220ae46ebSha137994 mutex_exit(&dringp->lock);
179320ae46ebSha137994 return (EINVAL);
179420ae46ebSha137994 }
179520ae46ebSha137994 }
179620ae46ebSha137994
179720ae46ebSha137994 mutex_exit(&ldcssp->lock);
179820ae46ebSha137994
179920ae46ebSha137994 /* free the descriptor ring */
180020ae46ebSha137994 kmem_free(dringp->base, dringp->size);
180120ae46ebSha137994
180220ae46ebSha137994 mutex_exit(&dringp->lock);
180320ae46ebSha137994
180420ae46ebSha137994 /* destroy dring lock */
180520ae46ebSha137994 mutex_destroy(&dringp->lock);
180620ae46ebSha137994
180720ae46ebSha137994 /* free desc ring object */
180820ae46ebSha137994 kmem_free(dringp, sizeof (ldc_dring_t));
180920ae46ebSha137994
181020ae46ebSha137994 return (0);
181120ae46ebSha137994 }
181220ae46ebSha137994
181320ae46ebSha137994 /*
181420ae46ebSha137994 * Bind a previously allocated dring to a channel. The channel should
181520ae46ebSha137994 * be OPEN in order to bind the ring to the channel. Returns back a
181620ae46ebSha137994 * descriptor ring cookie. The descriptor ring is exported for remote
181720ae46ebSha137994 * access by the client at the other end of the channel. An entry for
181820ae46ebSha137994 * dring pages is stored in map table (via call to ldc_mem_bind_handle).
181920ae46ebSha137994 */
182020ae46ebSha137994 int
ldc_mem_dring_bind(ldc_handle_t handle,ldc_dring_handle_t dhandle,uint8_t mtype,uint8_t perm,ldc_mem_cookie_t * cookie,uint32_t * ccount)182120ae46ebSha137994 ldc_mem_dring_bind(ldc_handle_t handle, ldc_dring_handle_t dhandle,
182220ae46ebSha137994 uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie, uint32_t *ccount)
182320ae46ebSha137994 {
182420ae46ebSha137994 int err;
182520ae46ebSha137994 ldc_chan_t *ldcp;
182620ae46ebSha137994 ldc_dring_t *dringp;
182720ae46ebSha137994 ldc_mem_handle_t mhandle;
182820ae46ebSha137994
182920ae46ebSha137994 /* check to see if channel is initalized */
183020ae46ebSha137994 if (handle == NULL) {
183120ae46ebSha137994 DWARN(DBG_ALL_LDCS,
183220ae46ebSha137994 "ldc_mem_dring_bind: invalid channel handle\n");
183320ae46ebSha137994 return (EINVAL);
183420ae46ebSha137994 }
183520ae46ebSha137994 ldcp = (ldc_chan_t *)handle;
183620ae46ebSha137994
183720ae46ebSha137994 if (dhandle == NULL) {
183820ae46ebSha137994 DWARN(DBG_ALL_LDCS,
183920ae46ebSha137994 "ldc_mem_dring_bind: invalid desc ring handle\n");
184020ae46ebSha137994 return (EINVAL);
184120ae46ebSha137994 }
184220ae46ebSha137994 dringp = (ldc_dring_t *)dhandle;
184320ae46ebSha137994
184420ae46ebSha137994 if (cookie == NULL) {
184520ae46ebSha137994 DWARN(ldcp->id,
184620ae46ebSha137994 "ldc_mem_dring_bind: invalid cookie arg\n");
184720ae46ebSha137994 return (EINVAL);
184820ae46ebSha137994 }
184920ae46ebSha137994
1850bbfa0259Sha137994 /* ensure the mtype is valid */
1851bbfa0259Sha137994 if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP)) == 0) {
1852bbfa0259Sha137994 DWARN(ldcp->id, "ldc_mem_dring_bind: invalid map type\n");
1853bbfa0259Sha137994 return (EINVAL);
1854bbfa0259Sha137994 }
1855bbfa0259Sha137994
1856bbfa0259Sha137994 /* no need to bind as direct map if it's not HV supported or enabled */
1857bbfa0259Sha137994 if (!ldc_dring_shmem_hv_ok || !ldc_dring_shmem_enabled) {
1858bbfa0259Sha137994 mtype = LDC_SHADOW_MAP;
1859bbfa0259Sha137994 }
1860bbfa0259Sha137994
186120ae46ebSha137994 mutex_enter(&dringp->lock);
186220ae46ebSha137994
186320ae46ebSha137994 if (dringp->status == LDC_BOUND) {
186420ae46ebSha137994 DWARN(DBG_ALL_LDCS,
186520ae46ebSha137994 "ldc_mem_dring_bind: (0x%llx) descriptor ring is bound\n",
186620ae46ebSha137994 ldcp->id);
186720ae46ebSha137994 mutex_exit(&dringp->lock);
186820ae46ebSha137994 return (EINVAL);
186920ae46ebSha137994 }
187020ae46ebSha137994
187120ae46ebSha137994 if ((perm & LDC_MEM_RW) == 0) {
187220ae46ebSha137994 DWARN(DBG_ALL_LDCS,
187320ae46ebSha137994 "ldc_mem_dring_bind: invalid permissions\n");
187420ae46ebSha137994 mutex_exit(&dringp->lock);
187520ae46ebSha137994 return (EINVAL);
187620ae46ebSha137994 }
187720ae46ebSha137994
187820ae46ebSha137994 if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP|LDC_IO_MAP)) == 0) {
187920ae46ebSha137994 DWARN(DBG_ALL_LDCS, "ldc_mem_dring_bind: invalid type\n");
188020ae46ebSha137994 mutex_exit(&dringp->lock);
188120ae46ebSha137994 return (EINVAL);
188220ae46ebSha137994 }
188320ae46ebSha137994
188420ae46ebSha137994 dringp->ldcp = ldcp;
188520ae46ebSha137994
188620ae46ebSha137994 /* create an memory handle */
188720ae46ebSha137994 err = ldc_mem_alloc_handle(handle, &mhandle);
188820ae46ebSha137994 if (err || mhandle == NULL) {
188920ae46ebSha137994 DWARN(DBG_ALL_LDCS,
189020ae46ebSha137994 "ldc_mem_dring_bind: (0x%llx) error allocating mhandle\n",
189120ae46ebSha137994 ldcp->id);
189220ae46ebSha137994 mutex_exit(&dringp->lock);
189320ae46ebSha137994 return (err);
189420ae46ebSha137994 }
189520ae46ebSha137994 dringp->mhdl = mhandle;
189620ae46ebSha137994
189720ae46ebSha137994 /* bind the descriptor ring to channel */
1898bbfa0259Sha137994 err = i_ldc_mem_bind_handle(mhandle, dringp->base, dringp->size,
189920ae46ebSha137994 mtype, perm, cookie, ccount);
190020ae46ebSha137994 if (err) {
190120ae46ebSha137994 DWARN(ldcp->id,
190220ae46ebSha137994 "ldc_mem_dring_bind: (0x%llx) error binding mhandle\n",
190320ae46ebSha137994 ldcp->id);
190420ae46ebSha137994 mutex_exit(&dringp->lock);
190520ae46ebSha137994 return (err);
190620ae46ebSha137994 }
190720ae46ebSha137994
190820ae46ebSha137994 /*
190920ae46ebSha137994 * For now return error if we get more than one cookie
191020ae46ebSha137994 * FUTURE: Return multiple cookies ..
191120ae46ebSha137994 */
191220ae46ebSha137994 if (*ccount > 1) {
191320ae46ebSha137994 (void) ldc_mem_unbind_handle(mhandle);
191420ae46ebSha137994 (void) ldc_mem_free_handle(mhandle);
191520ae46ebSha137994
191620ae46ebSha137994 dringp->ldcp = NULL;
191720ae46ebSha137994 dringp->mhdl = NULL;
191820ae46ebSha137994 *ccount = 0;
191920ae46ebSha137994
192020ae46ebSha137994 mutex_exit(&dringp->lock);
192120ae46ebSha137994 return (EAGAIN);
192220ae46ebSha137994 }
192320ae46ebSha137994
192420ae46ebSha137994 /* Add descriptor ring to channel's exported dring list */
192520ae46ebSha137994 mutex_enter(&ldcp->exp_dlist_lock);
192620ae46ebSha137994 dringp->ch_next = ldcp->exp_dring_list;
192720ae46ebSha137994 ldcp->exp_dring_list = dringp;
192820ae46ebSha137994 mutex_exit(&ldcp->exp_dlist_lock);
192920ae46ebSha137994
193020ae46ebSha137994 dringp->status = LDC_BOUND;
193120ae46ebSha137994
193220ae46ebSha137994 mutex_exit(&dringp->lock);
193320ae46ebSha137994
193420ae46ebSha137994 return (0);
193520ae46ebSha137994 }
193620ae46ebSha137994
193720ae46ebSha137994 /*
193820ae46ebSha137994 * Return the next cookie associated with the specified dring handle
193920ae46ebSha137994 */
194020ae46ebSha137994 int
ldc_mem_dring_nextcookie(ldc_dring_handle_t dhandle,ldc_mem_cookie_t * cookie)194120ae46ebSha137994 ldc_mem_dring_nextcookie(ldc_dring_handle_t dhandle, ldc_mem_cookie_t *cookie)
194220ae46ebSha137994 {
194320ae46ebSha137994 int rv = 0;
194420ae46ebSha137994 ldc_dring_t *dringp;
194520ae46ebSha137994 ldc_chan_t *ldcp;
194620ae46ebSha137994
194720ae46ebSha137994 if (dhandle == NULL) {
194820ae46ebSha137994 DWARN(DBG_ALL_LDCS,
194920ae46ebSha137994 "ldc_mem_dring_nextcookie: invalid desc ring handle\n");
195020ae46ebSha137994 return (EINVAL);
195120ae46ebSha137994 }
195220ae46ebSha137994 dringp = (ldc_dring_t *)dhandle;
195320ae46ebSha137994 mutex_enter(&dringp->lock);
195420ae46ebSha137994
195520ae46ebSha137994 if (dringp->status != LDC_BOUND) {
195620ae46ebSha137994 DWARN(DBG_ALL_LDCS,
195720ae46ebSha137994 "ldc_mem_dring_nextcookie: descriptor ring 0x%llx "
195820ae46ebSha137994 "is not bound\n", dringp);
195920ae46ebSha137994 mutex_exit(&dringp->lock);
196020ae46ebSha137994 return (EINVAL);
196120ae46ebSha137994 }
196220ae46ebSha137994
196320ae46ebSha137994 ldcp = dringp->ldcp;
196420ae46ebSha137994
196520ae46ebSha137994 if (cookie == NULL) {
196620ae46ebSha137994 DWARN(ldcp->id,
196720ae46ebSha137994 "ldc_mem_dring_nextcookie:(0x%llx) invalid cookie arg\n",
196820ae46ebSha137994 ldcp->id);
196920ae46ebSha137994 mutex_exit(&dringp->lock);
197020ae46ebSha137994 return (EINVAL);
197120ae46ebSha137994 }
197220ae46ebSha137994
197320ae46ebSha137994 rv = ldc_mem_nextcookie((ldc_mem_handle_t)dringp->mhdl, cookie);
197420ae46ebSha137994 mutex_exit(&dringp->lock);
197520ae46ebSha137994
197620ae46ebSha137994 return (rv);
197720ae46ebSha137994 }
1978bbfa0259Sha137994
197920ae46ebSha137994 /*
198020ae46ebSha137994 * Unbind a previously bound dring from a channel.
198120ae46ebSha137994 */
198220ae46ebSha137994 int
ldc_mem_dring_unbind(ldc_dring_handle_t dhandle)198320ae46ebSha137994 ldc_mem_dring_unbind(ldc_dring_handle_t dhandle)
198420ae46ebSha137994 {
198520ae46ebSha137994 ldc_dring_t *dringp;
198620ae46ebSha137994 ldc_dring_t *tmp_dringp;
198720ae46ebSha137994 ldc_chan_t *ldcp;
198820ae46ebSha137994
198920ae46ebSha137994 if (dhandle == NULL) {
199020ae46ebSha137994 DWARN(DBG_ALL_LDCS,
199120ae46ebSha137994 "ldc_mem_dring_unbind: invalid desc ring handle\n");
199220ae46ebSha137994 return (EINVAL);
199320ae46ebSha137994 }
199420ae46ebSha137994 dringp = (ldc_dring_t *)dhandle;
199520ae46ebSha137994
199620ae46ebSha137994 mutex_enter(&dringp->lock);
199720ae46ebSha137994
199820ae46ebSha137994 if (dringp->status == LDC_UNBOUND) {
199920ae46ebSha137994 DWARN(DBG_ALL_LDCS,
200020ae46ebSha137994 "ldc_mem_dring_bind: descriptor ring 0x%llx is unbound\n",
200120ae46ebSha137994 dringp);
200220ae46ebSha137994 mutex_exit(&dringp->lock);
200320ae46ebSha137994 return (EINVAL);
200420ae46ebSha137994 }
200520ae46ebSha137994 ldcp = dringp->ldcp;
200620ae46ebSha137994
200720ae46ebSha137994 mutex_enter(&ldcp->exp_dlist_lock);
200820ae46ebSha137994
200920ae46ebSha137994 tmp_dringp = ldcp->exp_dring_list;
201020ae46ebSha137994 if (tmp_dringp == dringp) {
201120ae46ebSha137994 ldcp->exp_dring_list = dringp->ch_next;
201220ae46ebSha137994 dringp->ch_next = NULL;
201320ae46ebSha137994
201420ae46ebSha137994 } else {
201520ae46ebSha137994 while (tmp_dringp != NULL) {
201620ae46ebSha137994 if (tmp_dringp->ch_next == dringp) {
201720ae46ebSha137994 tmp_dringp->ch_next = dringp->ch_next;
201820ae46ebSha137994 dringp->ch_next = NULL;
201920ae46ebSha137994 break;
202020ae46ebSha137994 }
202120ae46ebSha137994 tmp_dringp = tmp_dringp->ch_next;
202220ae46ebSha137994 }
202320ae46ebSha137994 if (tmp_dringp == NULL) {
202420ae46ebSha137994 DWARN(DBG_ALL_LDCS,
202520ae46ebSha137994 "ldc_mem_dring_unbind: invalid descriptor\n");
202620ae46ebSha137994 mutex_exit(&ldcp->exp_dlist_lock);
202720ae46ebSha137994 mutex_exit(&dringp->lock);
202820ae46ebSha137994 return (EINVAL);
202920ae46ebSha137994 }
203020ae46ebSha137994 }
203120ae46ebSha137994
203220ae46ebSha137994 mutex_exit(&ldcp->exp_dlist_lock);
203320ae46ebSha137994
203420ae46ebSha137994 (void) ldc_mem_unbind_handle((ldc_mem_handle_t)dringp->mhdl);
203520ae46ebSha137994 (void) ldc_mem_free_handle((ldc_mem_handle_t)dringp->mhdl);
203620ae46ebSha137994
203720ae46ebSha137994 dringp->ldcp = NULL;
203820ae46ebSha137994 dringp->mhdl = NULL;
203920ae46ebSha137994 dringp->status = LDC_UNBOUND;
204020ae46ebSha137994
204120ae46ebSha137994 mutex_exit(&dringp->lock);
204220ae46ebSha137994
204320ae46ebSha137994 return (0);
204420ae46ebSha137994 }
204520ae46ebSha137994
2046bbfa0259Sha137994 #ifdef DEBUG
2047bbfa0259Sha137994 void
i_ldc_mem_inject_dring_clear(ldc_chan_t * ldcp)2048bbfa0259Sha137994 i_ldc_mem_inject_dring_clear(ldc_chan_t *ldcp)
2049bbfa0259Sha137994 {
2050bbfa0259Sha137994 ldc_dring_t *dp;
2051bbfa0259Sha137994 ldc_mhdl_t *mhdl;
2052bbfa0259Sha137994 ldc_mtbl_t *mtbl;
2053bbfa0259Sha137994 ldc_memseg_t *memseg;
2054bbfa0259Sha137994 uint64_t cookie_addr;
2055bbfa0259Sha137994 uint64_t pg_shift, pg_size_code;
2056bbfa0259Sha137994 int i, rv, retries;
2057bbfa0259Sha137994
2058bbfa0259Sha137994 /* has a map table been allocated? */
2059bbfa0259Sha137994 if ((mtbl = ldcp->mtbl) == NULL)
2060bbfa0259Sha137994 return;
2061bbfa0259Sha137994
2062bbfa0259Sha137994 /* lock the memory table - exclusive access to channel */
2063bbfa0259Sha137994 mutex_enter(&mtbl->lock);
2064bbfa0259Sha137994
2065bbfa0259Sha137994 /* lock the exported dring list */
2066bbfa0259Sha137994 mutex_enter(&ldcp->exp_dlist_lock);
2067bbfa0259Sha137994
2068bbfa0259Sha137994 for (dp = ldcp->exp_dring_list; dp != NULL; dp = dp->ch_next) {
2069bbfa0259Sha137994 if ((mhdl = (ldc_mhdl_t *)dp->mhdl) == NULL)
2070bbfa0259Sha137994 continue;
2071bbfa0259Sha137994
2072bbfa0259Sha137994 if ((memseg = mhdl->memseg) == NULL)
2073bbfa0259Sha137994 continue;
2074bbfa0259Sha137994
2075bbfa0259Sha137994 /* undo the pages exported */
2076bbfa0259Sha137994 for (i = 0; i < memseg->npages; i++) {
2077bbfa0259Sha137994
2078bbfa0259Sha137994 /* clear the entry from the table */
2079bbfa0259Sha137994 memseg->pages[i].mte->entry.ll = 0;
2080bbfa0259Sha137994
20815b7cb889Sha137994 pg_size_code = page_szc(MMU_PAGESIZE);
2082bbfa0259Sha137994 pg_shift = page_get_shift(pg_size_code);
2083bbfa0259Sha137994 cookie_addr = IDX2COOKIE(memseg->pages[i].index,
2084bbfa0259Sha137994 pg_size_code, pg_shift);
2085bbfa0259Sha137994
2086bbfa0259Sha137994 retries = 0;
2087bbfa0259Sha137994 do {
2088bbfa0259Sha137994 rv = hv_ldc_revoke(ldcp->id, cookie_addr,
2089bbfa0259Sha137994 memseg->pages[i].mte->cookie);
2090bbfa0259Sha137994
2091bbfa0259Sha137994 if (rv != H_EWOULDBLOCK)
2092bbfa0259Sha137994 break;
2093bbfa0259Sha137994
2094bbfa0259Sha137994 drv_usecwait(ldc_delay);
2095bbfa0259Sha137994
2096bbfa0259Sha137994 } while (retries++ < ldc_max_retries);
2097bbfa0259Sha137994
2098bbfa0259Sha137994 if (rv != 0) {
2099bbfa0259Sha137994 DWARN(ldcp->id,
2100bbfa0259Sha137994 "i_ldc_mem_inject_dring_clear(): "
2101bbfa0259Sha137994 "hv_ldc_revoke failed: "
2102bbfa0259Sha137994 "channel: 0x%lx, cookie addr: 0x%p,"
2103bbfa0259Sha137994 "cookie: 0x%lx, rv: %d",
2104bbfa0259Sha137994 ldcp->id, cookie_addr,
2105bbfa0259Sha137994 memseg->pages[i].mte->cookie, rv);
2106bbfa0259Sha137994 }
2107bbfa0259Sha137994
2108bbfa0259Sha137994 mtbl->num_avail++;
2109bbfa0259Sha137994 }
2110bbfa0259Sha137994 }
2111bbfa0259Sha137994
2112bbfa0259Sha137994 mutex_exit(&ldcp->exp_dlist_lock);
2113bbfa0259Sha137994 mutex_exit(&mtbl->lock);
2114bbfa0259Sha137994 }
2115bbfa0259Sha137994 #endif
2116bbfa0259Sha137994
211720ae46ebSha137994 /*
211820ae46ebSha137994 * Get information about the dring. The base address of the descriptor
211920ae46ebSha137994 * ring along with the type and permission are returned back.
212020ae46ebSha137994 */
212120ae46ebSha137994 int
ldc_mem_dring_info(ldc_dring_handle_t dhandle,ldc_mem_info_t * minfo)212220ae46ebSha137994 ldc_mem_dring_info(ldc_dring_handle_t dhandle, ldc_mem_info_t *minfo)
212320ae46ebSha137994 {
212420ae46ebSha137994 ldc_dring_t *dringp;
212520ae46ebSha137994 int rv;
212620ae46ebSha137994
212720ae46ebSha137994 if (dhandle == NULL) {
212820ae46ebSha137994 DWARN(DBG_ALL_LDCS,
212920ae46ebSha137994 "ldc_mem_dring_info: invalid desc ring handle\n");
213020ae46ebSha137994 return (EINVAL);
213120ae46ebSha137994 }
213220ae46ebSha137994 dringp = (ldc_dring_t *)dhandle;
213320ae46ebSha137994
213420ae46ebSha137994 mutex_enter(&dringp->lock);
213520ae46ebSha137994
213620ae46ebSha137994 if (dringp->mhdl) {
213720ae46ebSha137994 rv = ldc_mem_info(dringp->mhdl, minfo);
213820ae46ebSha137994 if (rv) {
213920ae46ebSha137994 DWARN(DBG_ALL_LDCS,
214020ae46ebSha137994 "ldc_mem_dring_info: error reading mem info\n");
214120ae46ebSha137994 mutex_exit(&dringp->lock);
214220ae46ebSha137994 return (rv);
214320ae46ebSha137994 }
214420ae46ebSha137994 } else {
214520ae46ebSha137994 minfo->vaddr = dringp->base;
214620ae46ebSha137994 minfo->raddr = NULL;
214720ae46ebSha137994 minfo->status = dringp->status;
214820ae46ebSha137994 }
214920ae46ebSha137994
215020ae46ebSha137994 mutex_exit(&dringp->lock);
215120ae46ebSha137994
215220ae46ebSha137994 return (0);
215320ae46ebSha137994 }
215420ae46ebSha137994
215520ae46ebSha137994 /*
215620ae46ebSha137994 * Map an exported descriptor ring into the local address space. If the
215720ae46ebSha137994 * descriptor ring was exported for direct map access, a HV call is made
215820ae46ebSha137994 * to allocate a RA range. If the map is done via a shadow copy, local
215920ae46ebSha137994 * shadow memory is allocated.
216020ae46ebSha137994 */
216120ae46ebSha137994 int
ldc_mem_dring_map(ldc_handle_t handle,ldc_mem_cookie_t * cookie,uint32_t ccount,uint32_t len,uint32_t dsize,uint8_t mtype,ldc_dring_handle_t * dhandle)216220ae46ebSha137994 ldc_mem_dring_map(ldc_handle_t handle, ldc_mem_cookie_t *cookie,
216320ae46ebSha137994 uint32_t ccount, uint32_t len, uint32_t dsize, uint8_t mtype,
216420ae46ebSha137994 ldc_dring_handle_t *dhandle)
216520ae46ebSha137994 {
216620ae46ebSha137994 int err;
216720ae46ebSha137994 ldc_chan_t *ldcp = (ldc_chan_t *)handle;
216820ae46ebSha137994 ldc_mem_handle_t mhandle;
216920ae46ebSha137994 ldc_dring_t *dringp;
217020ae46ebSha137994 size_t dring_size;
217120ae46ebSha137994
217220ae46ebSha137994 if (dhandle == NULL) {
217320ae46ebSha137994 DWARN(DBG_ALL_LDCS,
217420ae46ebSha137994 "ldc_mem_dring_map: invalid dhandle\n");
217520ae46ebSha137994 return (EINVAL);
217620ae46ebSha137994 }
217720ae46ebSha137994
217820ae46ebSha137994 /* check to see if channel is initalized */
217920ae46ebSha137994 if (handle == NULL) {
218020ae46ebSha137994 DWARN(DBG_ALL_LDCS,
218120ae46ebSha137994 "ldc_mem_dring_map: invalid channel handle\n");
218220ae46ebSha137994 return (EINVAL);
218320ae46ebSha137994 }
218420ae46ebSha137994 ldcp = (ldc_chan_t *)handle;
218520ae46ebSha137994
218620ae46ebSha137994 if (cookie == NULL) {
218720ae46ebSha137994 DWARN(ldcp->id,
218820ae46ebSha137994 "ldc_mem_dring_map: (0x%llx) invalid cookie\n",
218920ae46ebSha137994 ldcp->id);
219020ae46ebSha137994 return (EINVAL);
219120ae46ebSha137994 }
219220ae46ebSha137994
219320ae46ebSha137994 /* FUTURE: For now we support only one cookie per dring */
219420ae46ebSha137994 ASSERT(ccount == 1);
219520ae46ebSha137994
219620ae46ebSha137994 if (cookie->size < (dsize * len)) {
219720ae46ebSha137994 DWARN(ldcp->id,
219820ae46ebSha137994 "ldc_mem_dring_map: (0x%llx) invalid dsize/len\n",
219920ae46ebSha137994 ldcp->id);
220020ae46ebSha137994 return (EINVAL);
220120ae46ebSha137994 }
220220ae46ebSha137994
2203bbfa0259Sha137994 /* ensure the mtype is valid */
2204bbfa0259Sha137994 if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP)) == 0) {
2205bbfa0259Sha137994 DWARN(ldcp->id, "ldc_mem_dring_map: invalid map type\n");
2206bbfa0259Sha137994 return (EINVAL);
2207bbfa0259Sha137994 }
2208bbfa0259Sha137994
2209bbfa0259Sha137994 /* do not attempt direct map if it's not HV supported or enabled */
2210bbfa0259Sha137994 if (!ldc_dring_shmem_hv_ok || !ldc_dring_shmem_enabled) {
2211bbfa0259Sha137994 mtype = LDC_SHADOW_MAP;
2212bbfa0259Sha137994 }
2213bbfa0259Sha137994
221420ae46ebSha137994 *dhandle = 0;
221520ae46ebSha137994
221620ae46ebSha137994 /* Allocate an dring structure */
221720ae46ebSha137994 dringp = kmem_zalloc(sizeof (ldc_dring_t), KM_SLEEP);
221820ae46ebSha137994
221920ae46ebSha137994 D1(ldcp->id,
222020ae46ebSha137994 "ldc_mem_dring_map: 0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
222120ae46ebSha137994 mtype, len, dsize, cookie->addr, cookie->size);
222220ae46ebSha137994
222320ae46ebSha137994 /* Initialize dring */
222420ae46ebSha137994 dringp->length = len;
222520ae46ebSha137994 dringp->dsize = dsize;
222620ae46ebSha137994
222720ae46ebSha137994 /* round of to multiple of page size */
222820ae46ebSha137994 dring_size = len * dsize;
222920ae46ebSha137994 dringp->size = (dring_size & MMU_PAGEMASK);
223020ae46ebSha137994 if (dring_size & MMU_PAGEOFFSET)
223120ae46ebSha137994 dringp->size += MMU_PAGESIZE;
223220ae46ebSha137994
223320ae46ebSha137994 dringp->ldcp = ldcp;
223420ae46ebSha137994
223520ae46ebSha137994 /* create an memory handle */
223620ae46ebSha137994 err = ldc_mem_alloc_handle(handle, &mhandle);
223720ae46ebSha137994 if (err || mhandle == NULL) {
223820ae46ebSha137994 DWARN(DBG_ALL_LDCS,
223920ae46ebSha137994 "ldc_mem_dring_map: cannot alloc hdl err=%d\n",
224020ae46ebSha137994 err);
224120ae46ebSha137994 kmem_free(dringp, sizeof (ldc_dring_t));
224220ae46ebSha137994 return (ENOMEM);
224320ae46ebSha137994 }
224420ae46ebSha137994
224520ae46ebSha137994 dringp->mhdl = mhandle;
224620ae46ebSha137994 dringp->base = NULL;
224720ae46ebSha137994
224820ae46ebSha137994 /* map the dring into local memory */
2249bbfa0259Sha137994 err = i_ldc_mem_map(mhandle, cookie, ccount, mtype, LDC_MEM_RW,
225020ae46ebSha137994 &(dringp->base), NULL);
225120ae46ebSha137994 if (err || dringp->base == NULL) {
2252bbfa0259Sha137994 DWARN(DBG_ALL_LDCS,
225320ae46ebSha137994 "ldc_mem_dring_map: cannot map desc ring err=%d\n", err);
225420ae46ebSha137994 (void) ldc_mem_free_handle(mhandle);
225520ae46ebSha137994 kmem_free(dringp, sizeof (ldc_dring_t));
225620ae46ebSha137994 return (ENOMEM);
225720ae46ebSha137994 }
225820ae46ebSha137994
225920ae46ebSha137994 /* initialize the desc ring lock */
226020ae46ebSha137994 mutex_init(&dringp->lock, NULL, MUTEX_DRIVER, NULL);
226120ae46ebSha137994
226220ae46ebSha137994 /* Add descriptor ring to channel's imported dring list */
226320ae46ebSha137994 mutex_enter(&ldcp->imp_dlist_lock);
226420ae46ebSha137994 dringp->ch_next = ldcp->imp_dring_list;
226520ae46ebSha137994 ldcp->imp_dring_list = dringp;
226620ae46ebSha137994 mutex_exit(&ldcp->imp_dlist_lock);
226720ae46ebSha137994
226820ae46ebSha137994 dringp->status = LDC_MAPPED;
226920ae46ebSha137994
227020ae46ebSha137994 *dhandle = (ldc_dring_handle_t)dringp;
227120ae46ebSha137994
227220ae46ebSha137994 return (0);
227320ae46ebSha137994 }
227420ae46ebSha137994
227520ae46ebSha137994 /*
227620ae46ebSha137994 * Unmap a descriptor ring. Free shadow memory (if any).
227720ae46ebSha137994 */
227820ae46ebSha137994 int
ldc_mem_dring_unmap(ldc_dring_handle_t dhandle)227920ae46ebSha137994 ldc_mem_dring_unmap(ldc_dring_handle_t dhandle)
228020ae46ebSha137994 {
228120ae46ebSha137994 ldc_dring_t *dringp;
228220ae46ebSha137994 ldc_dring_t *tmp_dringp;
228320ae46ebSha137994 ldc_chan_t *ldcp;
228420ae46ebSha137994
228520ae46ebSha137994 if (dhandle == NULL) {
228620ae46ebSha137994 DWARN(DBG_ALL_LDCS,
228720ae46ebSha137994 "ldc_mem_dring_unmap: invalid desc ring handle\n");
228820ae46ebSha137994 return (EINVAL);
228920ae46ebSha137994 }
229020ae46ebSha137994 dringp = (ldc_dring_t *)dhandle;
229120ae46ebSha137994
229220ae46ebSha137994 if (dringp->status != LDC_MAPPED) {
229320ae46ebSha137994 DWARN(DBG_ALL_LDCS,
229420ae46ebSha137994 "ldc_mem_dring_unmap: not a mapped desc ring\n");
229520ae46ebSha137994 return (EINVAL);
229620ae46ebSha137994 }
229720ae46ebSha137994
229820ae46ebSha137994 mutex_enter(&dringp->lock);
229920ae46ebSha137994
230020ae46ebSha137994 ldcp = dringp->ldcp;
230120ae46ebSha137994
230220ae46ebSha137994 mutex_enter(&ldcp->imp_dlist_lock);
230320ae46ebSha137994
230420ae46ebSha137994 /* find and unlink the desc ring from channel import list */
230520ae46ebSha137994 tmp_dringp = ldcp->imp_dring_list;
230620ae46ebSha137994 if (tmp_dringp == dringp) {
230720ae46ebSha137994 ldcp->imp_dring_list = dringp->ch_next;
230820ae46ebSha137994 dringp->ch_next = NULL;
230920ae46ebSha137994
231020ae46ebSha137994 } else {
231120ae46ebSha137994 while (tmp_dringp != NULL) {
231220ae46ebSha137994 if (tmp_dringp->ch_next == dringp) {
231320ae46ebSha137994 tmp_dringp->ch_next = dringp->ch_next;
231420ae46ebSha137994 dringp->ch_next = NULL;
231520ae46ebSha137994 break;
231620ae46ebSha137994 }
231720ae46ebSha137994 tmp_dringp = tmp_dringp->ch_next;
231820ae46ebSha137994 }
231920ae46ebSha137994 if (tmp_dringp == NULL) {
232020ae46ebSha137994 DWARN(DBG_ALL_LDCS,
232120ae46ebSha137994 "ldc_mem_dring_unmap: invalid descriptor\n");
232220ae46ebSha137994 mutex_exit(&ldcp->imp_dlist_lock);
232320ae46ebSha137994 mutex_exit(&dringp->lock);
232420ae46ebSha137994 return (EINVAL);
232520ae46ebSha137994 }
232620ae46ebSha137994 }
232720ae46ebSha137994
232820ae46ebSha137994 mutex_exit(&ldcp->imp_dlist_lock);
232920ae46ebSha137994
233020ae46ebSha137994 /* do a LDC memory handle unmap and free */
233120ae46ebSha137994 (void) ldc_mem_unmap(dringp->mhdl);
233220ae46ebSha137994 (void) ldc_mem_free_handle((ldc_mem_handle_t)dringp->mhdl);
233320ae46ebSha137994
233420ae46ebSha137994 dringp->status = 0;
233520ae46ebSha137994 dringp->ldcp = NULL;
233620ae46ebSha137994
233720ae46ebSha137994 mutex_exit(&dringp->lock);
233820ae46ebSha137994
233920ae46ebSha137994 /* destroy dring lock */
234020ae46ebSha137994 mutex_destroy(&dringp->lock);
234120ae46ebSha137994
234220ae46ebSha137994 /* free desc ring object */
234320ae46ebSha137994 kmem_free(dringp, sizeof (ldc_dring_t));
234420ae46ebSha137994
234520ae46ebSha137994 return (0);
234620ae46ebSha137994 }
234720ae46ebSha137994
234820ae46ebSha137994 /*
234920ae46ebSha137994 * Internal entry point for descriptor ring access entry consistency
235020ae46ebSha137994 * semantics. Acquire copies the contents of the remote descriptor ring
235120ae46ebSha137994 * into the local shadow copy. The release operation copies the local
235220ae46ebSha137994 * contents into the remote dring. The start and end locations specify
235320ae46ebSha137994 * bounds for the entries being synchronized.
235420ae46ebSha137994 */
235520ae46ebSha137994 static int
i_ldc_dring_acquire_release(ldc_dring_handle_t dhandle,uint8_t direction,uint64_t start,uint64_t end)235620ae46ebSha137994 i_ldc_dring_acquire_release(ldc_dring_handle_t dhandle,
235720ae46ebSha137994 uint8_t direction, uint64_t start, uint64_t end)
235820ae46ebSha137994 {
235920ae46ebSha137994 int err;
236020ae46ebSha137994 ldc_dring_t *dringp;
236120ae46ebSha137994 ldc_chan_t *ldcp;
2362bbfa0259Sha137994 ldc_mhdl_t *mhdl;
236320ae46ebSha137994 uint64_t soff;
236420ae46ebSha137994 size_t copy_size;
236520ae46ebSha137994
236620ae46ebSha137994 if (dhandle == NULL) {
236720ae46ebSha137994 DWARN(DBG_ALL_LDCS,
236820ae46ebSha137994 "i_ldc_dring_acquire_release: invalid desc ring handle\n");
236920ae46ebSha137994 return (EINVAL);
237020ae46ebSha137994 }
237120ae46ebSha137994 dringp = (ldc_dring_t *)dhandle;
237220ae46ebSha137994 mutex_enter(&dringp->lock);
237320ae46ebSha137994
237420ae46ebSha137994 if (dringp->status != LDC_MAPPED || dringp->ldcp == NULL) {
237520ae46ebSha137994 DWARN(DBG_ALL_LDCS,
237620ae46ebSha137994 "i_ldc_dring_acquire_release: not a mapped desc ring\n");
237720ae46ebSha137994 mutex_exit(&dringp->lock);
237820ae46ebSha137994 return (EINVAL);
237920ae46ebSha137994 }
238020ae46ebSha137994
238120ae46ebSha137994 if (start >= dringp->length || end >= dringp->length) {
238220ae46ebSha137994 DWARN(DBG_ALL_LDCS,
238320ae46ebSha137994 "i_ldc_dring_acquire_release: index out of range\n");
238420ae46ebSha137994 mutex_exit(&dringp->lock);
238520ae46ebSha137994 return (EINVAL);
238620ae46ebSha137994 }
238720ae46ebSha137994
2388bbfa0259Sha137994 mhdl = (ldc_mhdl_t *)dringp->mhdl;
2389bbfa0259Sha137994 if (mhdl == NULL) {
2390bbfa0259Sha137994 DWARN(DBG_ALL_LDCS,
2391bbfa0259Sha137994 "i_ldc_dring_acquire_release: invalid memory handle\n");
2392bbfa0259Sha137994 mutex_exit(&dringp->lock);
2393bbfa0259Sha137994 return (EINVAL);
2394bbfa0259Sha137994 }
2395bbfa0259Sha137994
2396bbfa0259Sha137994 if (mhdl->mtype != LDC_SHADOW_MAP) {
2397bbfa0259Sha137994 DWARN(DBG_ALL_LDCS,
2398bbfa0259Sha137994 "i_ldc_dring_acquire_release: invalid mtype: %d\n",
2399bbfa0259Sha137994 mhdl->mtype);
2400bbfa0259Sha137994 mutex_exit(&dringp->lock);
2401bbfa0259Sha137994 return (EINVAL);
2402bbfa0259Sha137994 }
2403bbfa0259Sha137994
240420ae46ebSha137994 /* get the channel handle */
240520ae46ebSha137994 ldcp = dringp->ldcp;
240620ae46ebSha137994
240720ae46ebSha137994 copy_size = (start <= end) ? (((end - start) + 1) * dringp->dsize) :
240820ae46ebSha137994 ((dringp->length - start) * dringp->dsize);
240920ae46ebSha137994
241020ae46ebSha137994 /* Calculate the relative offset for the first desc */
241120ae46ebSha137994 soff = (start * dringp->dsize);
241220ae46ebSha137994
241320ae46ebSha137994 /* copy to/from remote from/to local memory */
241420ae46ebSha137994 D1(ldcp->id, "i_ldc_dring_acquire_release: c1 off=0x%llx sz=0x%llx\n",
241520ae46ebSha137994 soff, copy_size);
241620ae46ebSha137994 err = i_ldc_mem_acquire_release((ldc_mem_handle_t)dringp->mhdl,
241720ae46ebSha137994 direction, soff, copy_size);
241820ae46ebSha137994 if (err) {
241920ae46ebSha137994 DWARN(ldcp->id,
242020ae46ebSha137994 "i_ldc_dring_acquire_release: copy failed\n");
242120ae46ebSha137994 mutex_exit(&dringp->lock);
242220ae46ebSha137994 return (err);
242320ae46ebSha137994 }
242420ae46ebSha137994
242520ae46ebSha137994 /* do the balance */
242620ae46ebSha137994 if (start > end) {
242720ae46ebSha137994 copy_size = ((end + 1) * dringp->dsize);
242820ae46ebSha137994 soff = 0;
242920ae46ebSha137994
243020ae46ebSha137994 /* copy to/from remote from/to local memory */
243120ae46ebSha137994 D1(ldcp->id, "i_ldc_dring_acquire_release: c2 "
243220ae46ebSha137994 "off=0x%llx sz=0x%llx\n", soff, copy_size);
243320ae46ebSha137994 err = i_ldc_mem_acquire_release((ldc_mem_handle_t)dringp->mhdl,
243420ae46ebSha137994 direction, soff, copy_size);
243520ae46ebSha137994 if (err) {
243620ae46ebSha137994 DWARN(ldcp->id,
243720ae46ebSha137994 "i_ldc_dring_acquire_release: copy failed\n");
243820ae46ebSha137994 mutex_exit(&dringp->lock);
243920ae46ebSha137994 return (err);
244020ae46ebSha137994 }
244120ae46ebSha137994 }
244220ae46ebSha137994
244320ae46ebSha137994 mutex_exit(&dringp->lock);
244420ae46ebSha137994
244520ae46ebSha137994 return (0);
244620ae46ebSha137994 }
244720ae46ebSha137994
244820ae46ebSha137994 /*
244920ae46ebSha137994 * Ensure that the contents in the local dring are consistent
245020ae46ebSha137994 * with the contents if of remote dring
245120ae46ebSha137994 */
245220ae46ebSha137994 int
ldc_mem_dring_acquire(ldc_dring_handle_t dhandle,uint64_t start,uint64_t end)245320ae46ebSha137994 ldc_mem_dring_acquire(ldc_dring_handle_t dhandle, uint64_t start, uint64_t end)
245420ae46ebSha137994 {
245520ae46ebSha137994 return (i_ldc_dring_acquire_release(dhandle, LDC_COPY_IN, start, end));
245620ae46ebSha137994 }
245720ae46ebSha137994
245820ae46ebSha137994 /*
245920ae46ebSha137994 * Ensure that the contents in the remote dring are consistent
246020ae46ebSha137994 * with the contents if of local dring
246120ae46ebSha137994 */
246220ae46ebSha137994 int
ldc_mem_dring_release(ldc_dring_handle_t dhandle,uint64_t start,uint64_t end)246320ae46ebSha137994 ldc_mem_dring_release(ldc_dring_handle_t dhandle, uint64_t start, uint64_t end)
246420ae46ebSha137994 {
246520ae46ebSha137994 return (i_ldc_dring_acquire_release(dhandle, LDC_COPY_OUT, start, end));
246620ae46ebSha137994 }
2467