xref: /titanic_53/usr/src/uts/sun4v/io/vds.c (revision 4bac220845f606f60663ed6f3a2b88caa00ae87e)
11ae08745Sheppo /*
21ae08745Sheppo  * CDDL HEADER START
31ae08745Sheppo  *
41ae08745Sheppo  * The contents of this file are subject to the terms of the
51ae08745Sheppo  * Common Development and Distribution License (the "License").
61ae08745Sheppo  * You may not use this file except in compliance with the License.
71ae08745Sheppo  *
81ae08745Sheppo  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
91ae08745Sheppo  * or http://www.opensolaris.org/os/licensing.
101ae08745Sheppo  * See the License for the specific language governing permissions
111ae08745Sheppo  * and limitations under the License.
121ae08745Sheppo  *
131ae08745Sheppo  * When distributing Covered Code, include this CDDL HEADER in each
141ae08745Sheppo  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
151ae08745Sheppo  * If applicable, add the following below this CDDL HEADER, with the
161ae08745Sheppo  * fields enclosed by brackets "[]" replaced with your own identifying
171ae08745Sheppo  * information: Portions Copyright [yyyy] [name of copyright owner]
181ae08745Sheppo  *
191ae08745Sheppo  * CDDL HEADER END
201ae08745Sheppo  */
211ae08745Sheppo 
221ae08745Sheppo /*
231ae08745Sheppo  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
241ae08745Sheppo  * Use is subject to license terms.
251ae08745Sheppo  */
261ae08745Sheppo 
271ae08745Sheppo #pragma ident	"%Z%%M%	%I%	%E% SMI"
281ae08745Sheppo 
291ae08745Sheppo /*
301ae08745Sheppo  * Virtual disk server
311ae08745Sheppo  */
321ae08745Sheppo 
331ae08745Sheppo 
341ae08745Sheppo #include <sys/types.h>
351ae08745Sheppo #include <sys/conf.h>
36*4bac2208Snarayan #include <sys/crc32.h>
371ae08745Sheppo #include <sys/ddi.h>
381ae08745Sheppo #include <sys/dkio.h>
391ae08745Sheppo #include <sys/file.h>
401ae08745Sheppo #include <sys/mdeg.h>
411ae08745Sheppo #include <sys/modhash.h>
421ae08745Sheppo #include <sys/note.h>
431ae08745Sheppo #include <sys/pathname.h>
441ae08745Sheppo #include <sys/sunddi.h>
451ae08745Sheppo #include <sys/sunldi.h>
461ae08745Sheppo #include <sys/sysmacros.h>
471ae08745Sheppo #include <sys/vio_common.h>
481ae08745Sheppo #include <sys/vdsk_mailbox.h>
491ae08745Sheppo #include <sys/vdsk_common.h>
501ae08745Sheppo #include <sys/vtoc.h>
511ae08745Sheppo 
521ae08745Sheppo 
531ae08745Sheppo /* Virtual disk server initialization flags */
54d10e4ef2Snarayan #define	VDS_LDI			0x01
55d10e4ef2Snarayan #define	VDS_MDEG		0x02
561ae08745Sheppo 
571ae08745Sheppo /* Virtual disk server tunable parameters */
581ae08745Sheppo #define	VDS_LDC_RETRIES		3
591ae08745Sheppo #define	VDS_NCHAINS		32
601ae08745Sheppo 
611ae08745Sheppo /* Identification parameters for MD, synthetic dkio(7i) structures, etc. */
621ae08745Sheppo #define	VDS_NAME		"virtual-disk-server"
631ae08745Sheppo 
641ae08745Sheppo #define	VD_NAME			"vd"
651ae08745Sheppo #define	VD_VOLUME_NAME		"vdisk"
661ae08745Sheppo #define	VD_ASCIILABEL		"Virtual Disk"
671ae08745Sheppo 
681ae08745Sheppo #define	VD_CHANNEL_ENDPOINT	"channel-endpoint"
691ae08745Sheppo #define	VD_ID_PROP		"id"
701ae08745Sheppo #define	VD_BLOCK_DEVICE_PROP	"vds-block-device"
711ae08745Sheppo 
721ae08745Sheppo /* Virtual disk initialization flags */
731ae08745Sheppo #define	VD_LOCKING		0x01
74d10e4ef2Snarayan #define	VD_LDC			0x02
75d10e4ef2Snarayan #define	VD_DRING		0x04
76d10e4ef2Snarayan #define	VD_SID			0x08
77d10e4ef2Snarayan #define	VD_SEQ_NUM		0x10
781ae08745Sheppo 
791ae08745Sheppo /* Flags for opening/closing backing devices via LDI */
801ae08745Sheppo #define	VD_OPEN_FLAGS		(FEXCL | FREAD | FWRITE)
811ae08745Sheppo 
821ae08745Sheppo /*
831ae08745Sheppo  * By Solaris convention, slice/partition 2 represents the entire disk;
841ae08745Sheppo  * unfortunately, this convention does not appear to be codified.
851ae08745Sheppo  */
861ae08745Sheppo #define	VD_ENTIRE_DISK_SLICE	2
871ae08745Sheppo 
881ae08745Sheppo /* Return a cpp token as a string */
891ae08745Sheppo #define	STRINGIZE(token)	#token
901ae08745Sheppo 
911ae08745Sheppo /*
921ae08745Sheppo  * Print a message prefixed with the current function name to the message log
931ae08745Sheppo  * (and optionally to the console for verbose boots); these macros use cpp's
941ae08745Sheppo  * concatenation of string literals and C99 variable-length-argument-list
951ae08745Sheppo  * macros
961ae08745Sheppo  */
971ae08745Sheppo #define	PRN(...)	_PRN("?%s():  "__VA_ARGS__, "")
981ae08745Sheppo #define	_PRN(format, ...)					\
991ae08745Sheppo 	cmn_err(CE_CONT, format"%s", __func__, __VA_ARGS__)
1001ae08745Sheppo 
1011ae08745Sheppo /* Return a pointer to the "i"th vdisk dring element */
1021ae08745Sheppo #define	VD_DRING_ELEM(i)	((vd_dring_entry_t *)(void *)	\
1031ae08745Sheppo 	    (vd->dring + (i)*vd->descriptor_size))
1041ae08745Sheppo 
1051ae08745Sheppo /* Return the virtual disk client's type as a string (for use in messages) */
1061ae08745Sheppo #define	VD_CLIENT(vd)							\
1071ae08745Sheppo 	(((vd)->xfer_mode == VIO_DESC_MODE) ? "in-band client" :	\
1081ae08745Sheppo 	    (((vd)->xfer_mode == VIO_DRING_MODE) ? "dring client" :	\
1091ae08745Sheppo 		(((vd)->xfer_mode == 0) ? "null client" :		\
1101ae08745Sheppo 		    "unsupported client")))
1111ae08745Sheppo 
1121ae08745Sheppo /* Debugging macros */
1131ae08745Sheppo #ifdef DEBUG
1141ae08745Sheppo #define	PR0 if (vd_msglevel > 0)	PRN
1151ae08745Sheppo #define	PR1 if (vd_msglevel > 1)	PRN
1161ae08745Sheppo #define	PR2 if (vd_msglevel > 2)	PRN
1171ae08745Sheppo 
1181ae08745Sheppo #define	VD_DUMP_DRING_ELEM(elem)					\
1191ae08745Sheppo 	PRN("dst:%x op:%x st:%u nb:%lx addr:%lx ncook:%u\n",		\
1201ae08745Sheppo 	    elem->hdr.dstate,						\
1211ae08745Sheppo 	    elem->payload.operation,					\
1221ae08745Sheppo 	    elem->payload.status,					\
1231ae08745Sheppo 	    elem->payload.nbytes,					\
1241ae08745Sheppo 	    elem->payload.addr,						\
1251ae08745Sheppo 	    elem->payload.ncookies);
1261ae08745Sheppo 
1271ae08745Sheppo #else	/* !DEBUG */
1281ae08745Sheppo #define	PR0(...)
1291ae08745Sheppo #define	PR1(...)
1301ae08745Sheppo #define	PR2(...)
1311ae08745Sheppo 
1321ae08745Sheppo #define	VD_DUMP_DRING_ELEM(elem)
1331ae08745Sheppo 
1341ae08745Sheppo #endif	/* DEBUG */
1351ae08745Sheppo 
1361ae08745Sheppo 
137d10e4ef2Snarayan /*
138d10e4ef2Snarayan  * Soft state structure for a vds instance
139d10e4ef2Snarayan  */
1401ae08745Sheppo typedef struct vds {
1411ae08745Sheppo 	uint_t		initialized;	/* driver inst initialization flags */
1421ae08745Sheppo 	dev_info_t	*dip;		/* driver inst devinfo pointer */
1431ae08745Sheppo 	ldi_ident_t	ldi_ident;	/* driver's identifier for LDI */
1441ae08745Sheppo 	mod_hash_t	*vd_table;	/* table of virtual disks served */
1451ae08745Sheppo 	mdeg_handle_t	mdeg;		/* handle for MDEG operations  */
1461ae08745Sheppo } vds_t;
1471ae08745Sheppo 
148d10e4ef2Snarayan /*
149d10e4ef2Snarayan  * Types of descriptor-processing tasks
150d10e4ef2Snarayan  */
151d10e4ef2Snarayan typedef enum vd_task_type {
152d10e4ef2Snarayan 	VD_NONFINAL_RANGE_TASK,	/* task for intermediate descriptor in range */
153d10e4ef2Snarayan 	VD_FINAL_RANGE_TASK,	/* task for last in a range of descriptors */
154d10e4ef2Snarayan } vd_task_type_t;
155d10e4ef2Snarayan 
156d10e4ef2Snarayan /*
157d10e4ef2Snarayan  * Structure describing the task for processing a descriptor
158d10e4ef2Snarayan  */
159d10e4ef2Snarayan typedef struct vd_task {
160d10e4ef2Snarayan 	struct vd		*vd;		/* vd instance task is for */
161d10e4ef2Snarayan 	vd_task_type_t		type;		/* type of descriptor task */
162d10e4ef2Snarayan 	int			index;		/* dring elem index for task */
163d10e4ef2Snarayan 	vio_msg_t		*msg;		/* VIO message task is for */
164d10e4ef2Snarayan 	size_t			msglen;		/* length of message content */
165d10e4ef2Snarayan 	size_t			msgsize;	/* size of message buffer */
166d10e4ef2Snarayan 	vd_dring_payload_t	*request;	/* request task will perform */
167d10e4ef2Snarayan 	struct buf		buf;		/* buf(9s) for I/O request */
168*4bac2208Snarayan 	ldc_mem_handle_t	mhdl;		/* task memory handle */
169d10e4ef2Snarayan } vd_task_t;
170d10e4ef2Snarayan 
171d10e4ef2Snarayan /*
172d10e4ef2Snarayan  * Soft state structure for a virtual disk instance
173d10e4ef2Snarayan  */
1741ae08745Sheppo typedef struct vd {
1751ae08745Sheppo 	uint_t			initialized;	/* vdisk initialization flags */
1761ae08745Sheppo 	vds_t			*vds;		/* server for this vdisk */
177d10e4ef2Snarayan 	ddi_taskq_t		*startq;	/* queue for I/O start tasks */
178d10e4ef2Snarayan 	ddi_taskq_t		*completionq;	/* queue for completion tasks */
1791ae08745Sheppo 	ldi_handle_t		ldi_handle[V_NUMPAR];	/* LDI slice handles */
1801ae08745Sheppo 	dev_t			dev[V_NUMPAR];	/* dev numbers for slices */
181e1ebb9ecSlm66018 	uint_t			nslices;	/* number of slices */
1821ae08745Sheppo 	size_t			vdisk_size;	/* number of blocks in vdisk */
1831ae08745Sheppo 	vd_disk_type_t		vdisk_type;	/* slice or entire disk */
184*4bac2208Snarayan 	vd_disk_label_t		vdisk_label;	/* EFI or VTOC label */
185e1ebb9ecSlm66018 	ushort_t		max_xfer_sz;	/* max xfer size in DEV_BSIZE */
1861ae08745Sheppo 	boolean_t		pseudo;		/* underlying pseudo dev */
187*4bac2208Snarayan 	struct dk_efi		dk_efi;		/* synthetic for slice type */
1881ae08745Sheppo 	struct dk_geom		dk_geom;	/* synthetic for slice type */
1891ae08745Sheppo 	struct vtoc		vtoc;		/* synthetic for slice type */
1901ae08745Sheppo 	ldc_status_t		ldc_state;	/* LDC connection state */
1911ae08745Sheppo 	ldc_handle_t		ldc_handle;	/* handle for LDC comm */
1921ae08745Sheppo 	size_t			max_msglen;	/* largest LDC message len */
1931ae08745Sheppo 	vd_state_t		state;		/* client handshake state */
1941ae08745Sheppo 	uint8_t			xfer_mode;	/* transfer mode with client */
1951ae08745Sheppo 	uint32_t		sid;		/* client's session ID */
1961ae08745Sheppo 	uint64_t		seq_num;	/* message sequence number */
1971ae08745Sheppo 	uint64_t		dring_ident;	/* identifier of dring */
1981ae08745Sheppo 	ldc_dring_handle_t	dring_handle;	/* handle for dring ops */
1991ae08745Sheppo 	uint32_t		descriptor_size;	/* num bytes in desc */
2001ae08745Sheppo 	uint32_t		dring_len;	/* number of dring elements */
2011ae08745Sheppo 	caddr_t			dring;		/* address of dring */
202d10e4ef2Snarayan 	vd_task_t		inband_task;	/* task for inband descriptor */
203d10e4ef2Snarayan 	vd_task_t		*dring_task;	/* tasks dring elements */
204d10e4ef2Snarayan 
205d10e4ef2Snarayan 	kmutex_t		lock;		/* protects variables below */
206d10e4ef2Snarayan 	boolean_t		enabled;	/* is vdisk enabled? */
207d10e4ef2Snarayan 	boolean_t		reset_state;	/* reset connection state? */
208d10e4ef2Snarayan 	boolean_t		reset_ldc;	/* reset LDC channel? */
2091ae08745Sheppo } vd_t;
2101ae08745Sheppo 
2111ae08745Sheppo typedef struct vds_operation {
2121ae08745Sheppo 	uint8_t	operation;
213d10e4ef2Snarayan 	int	(*start)(vd_task_t *task);
214d10e4ef2Snarayan 	void	(*complete)(void *arg);
2151ae08745Sheppo } vds_operation_t;
2161ae08745Sheppo 
2170a55fbb7Slm66018 typedef struct vd_ioctl {
2180a55fbb7Slm66018 	uint8_t		operation;		/* vdisk operation */
2190a55fbb7Slm66018 	const char	*operation_name;	/* vdisk operation name */
2200a55fbb7Slm66018 	size_t		nbytes;			/* size of operation buffer */
2210a55fbb7Slm66018 	int		cmd;			/* corresponding ioctl cmd */
2220a55fbb7Slm66018 	const char	*cmd_name;		/* ioctl cmd name */
2230a55fbb7Slm66018 	void		*arg;			/* ioctl cmd argument */
2240a55fbb7Slm66018 	/* convert input vd_buf to output ioctl_arg */
2250a55fbb7Slm66018 	void		(*copyin)(void *vd_buf, void *ioctl_arg);
2260a55fbb7Slm66018 	/* convert input ioctl_arg to output vd_buf */
2270a55fbb7Slm66018 	void		(*copyout)(void *ioctl_arg, void *vd_buf);
2280a55fbb7Slm66018 } vd_ioctl_t;
2290a55fbb7Slm66018 
2300a55fbb7Slm66018 /* Define trivial copyin/copyout conversion function flag */
2310a55fbb7Slm66018 #define	VD_IDENTITY	((void (*)(void *, void *))-1)
2321ae08745Sheppo 
2331ae08745Sheppo 
2341ae08745Sheppo static int	vds_ldc_retries = VDS_LDC_RETRIES;
2351ae08745Sheppo static void	*vds_state;
2361ae08745Sheppo static uint64_t	vds_operations;	/* see vds_operation[] definition below */
2371ae08745Sheppo 
2381ae08745Sheppo static int	vd_open_flags = VD_OPEN_FLAGS;
2391ae08745Sheppo 
2400a55fbb7Slm66018 /*
2410a55fbb7Slm66018  * Supported protocol version pairs, from highest (newest) to lowest (oldest)
2420a55fbb7Slm66018  *
2430a55fbb7Slm66018  * Each supported major version should appear only once, paired with (and only
2440a55fbb7Slm66018  * with) its highest supported minor version number (as the protocol requires
2450a55fbb7Slm66018  * supporting all lower minor version numbers as well)
2460a55fbb7Slm66018  */
2470a55fbb7Slm66018 static const vio_ver_t	vds_version[] = {{1, 0}};
2480a55fbb7Slm66018 static const size_t	vds_num_versions =
2490a55fbb7Slm66018     sizeof (vds_version)/sizeof (vds_version[0]);
2500a55fbb7Slm66018 
2511ae08745Sheppo #ifdef DEBUG
2521ae08745Sheppo static int	vd_msglevel;
2531ae08745Sheppo #endif /* DEBUG */
2541ae08745Sheppo 
2551ae08745Sheppo 
2561ae08745Sheppo static int
257d10e4ef2Snarayan vd_start_bio(vd_task_t *task)
2581ae08745Sheppo {
259*4bac2208Snarayan 	int			rv, status = 0;
260d10e4ef2Snarayan 	vd_t			*vd		= task->vd;
261d10e4ef2Snarayan 	vd_dring_payload_t	*request	= task->request;
262d10e4ef2Snarayan 	struct buf		*buf		= &task->buf;
263*4bac2208Snarayan 	uint8_t			mtype;
2641ae08745Sheppo 
265d10e4ef2Snarayan 
266d10e4ef2Snarayan 	ASSERT(vd != NULL);
267d10e4ef2Snarayan 	ASSERT(request != NULL);
268d10e4ef2Snarayan 	ASSERT(request->slice < vd->nslices);
269d10e4ef2Snarayan 	ASSERT((request->operation == VD_OP_BREAD) ||
270d10e4ef2Snarayan 	    (request->operation == VD_OP_BWRITE));
271d10e4ef2Snarayan 
2721ae08745Sheppo 	if (request->nbytes == 0)
2731ae08745Sheppo 		return (EINVAL);	/* no service for trivial requests */
2741ae08745Sheppo 
275d10e4ef2Snarayan 	PR1("%s %lu bytes at block %lu",
276d10e4ef2Snarayan 	    (request->operation == VD_OP_BREAD) ? "Read" : "Write",
277d10e4ef2Snarayan 	    request->nbytes, request->addr);
2781ae08745Sheppo 
279d10e4ef2Snarayan 	bioinit(buf);
280d10e4ef2Snarayan 	buf->b_flags		= B_BUSY;
281d10e4ef2Snarayan 	buf->b_bcount		= request->nbytes;
282d10e4ef2Snarayan 	buf->b_lblkno		= request->addr;
283d10e4ef2Snarayan 	buf->b_edev		= vd->dev[request->slice];
284d10e4ef2Snarayan 
285*4bac2208Snarayan 	mtype = (&vd->inband_task == task) ? LDC_SHADOW_MAP : LDC_DIRECT_MAP;
286*4bac2208Snarayan 
287*4bac2208Snarayan 	/* Map memory exported by client */
288*4bac2208Snarayan 	status = ldc_mem_map(task->mhdl, request->cookie, request->ncookies,
289*4bac2208Snarayan 	    mtype, (request->operation == VD_OP_BREAD) ? LDC_MEM_W : LDC_MEM_R,
290*4bac2208Snarayan 	    &(buf->b_un.b_addr), NULL);
291*4bac2208Snarayan 	if (status != 0) {
292*4bac2208Snarayan 		PRN("ldc_mem_map() returned err %d ", status);
293*4bac2208Snarayan 		biofini(buf);
294*4bac2208Snarayan 		return (status);
295d10e4ef2Snarayan 	}
296d10e4ef2Snarayan 
297*4bac2208Snarayan 	status = ldc_mem_acquire(task->mhdl, 0, buf->b_bcount);
298*4bac2208Snarayan 	if (status != 0) {
299*4bac2208Snarayan 		(void) ldc_mem_unmap(task->mhdl);
300*4bac2208Snarayan 		PRN("ldc_mem_map() returned err %d ", status);
301*4bac2208Snarayan 		biofini(buf);
302*4bac2208Snarayan 		return (status);
303*4bac2208Snarayan 	}
304*4bac2208Snarayan 
305*4bac2208Snarayan 	buf->b_flags |= (request->operation == VD_OP_BREAD) ? B_READ : B_WRITE;
306*4bac2208Snarayan 
307d10e4ef2Snarayan 	/* Start the block I/O */
308*4bac2208Snarayan 	if ((status = ldi_strategy(vd->ldi_handle[request->slice], buf)) == 0)
309d10e4ef2Snarayan 		return (EINPROGRESS);	/* will complete on completionq */
310d10e4ef2Snarayan 
311d10e4ef2Snarayan 	/* Clean up after error */
312*4bac2208Snarayan 	rv = ldc_mem_release(task->mhdl, 0, buf->b_bcount);
313*4bac2208Snarayan 	if (rv) {
314*4bac2208Snarayan 		PRN("ldc_mem_release() returned err %d ", status);
315*4bac2208Snarayan 	}
316*4bac2208Snarayan 	rv = ldc_mem_unmap(task->mhdl);
317*4bac2208Snarayan 	if (rv) {
318*4bac2208Snarayan 		PRN("ldc_mem_unmap() returned err %d ", status);
319*4bac2208Snarayan 	}
320*4bac2208Snarayan 
321d10e4ef2Snarayan 	biofini(buf);
322d10e4ef2Snarayan 	return (status);
323d10e4ef2Snarayan }
324d10e4ef2Snarayan 
325d10e4ef2Snarayan static int
326d10e4ef2Snarayan send_msg(ldc_handle_t ldc_handle, void *msg, size_t msglen)
327d10e4ef2Snarayan {
328d10e4ef2Snarayan 	int	retry, status;
329d10e4ef2Snarayan 	size_t	nbytes;
330d10e4ef2Snarayan 
331d10e4ef2Snarayan 
332d10e4ef2Snarayan 	for (retry = 0, status = EWOULDBLOCK;
333d10e4ef2Snarayan 	    retry < vds_ldc_retries && status == EWOULDBLOCK;
334d10e4ef2Snarayan 	    retry++) {
335d10e4ef2Snarayan 		PR1("ldc_write() attempt %d", (retry + 1));
336d10e4ef2Snarayan 		nbytes = msglen;
337d10e4ef2Snarayan 		status = ldc_write(ldc_handle, msg, &nbytes);
338d10e4ef2Snarayan 	}
339d10e4ef2Snarayan 
340d10e4ef2Snarayan 	if (status != 0) {
341d10e4ef2Snarayan 		PRN("ldc_write() returned errno %d", status);
342d10e4ef2Snarayan 		return (status);
343d10e4ef2Snarayan 	} else if (nbytes != msglen) {
344d10e4ef2Snarayan 		PRN("ldc_write() performed only partial write");
345d10e4ef2Snarayan 		return (EIO);
346d10e4ef2Snarayan 	}
347d10e4ef2Snarayan 
348d10e4ef2Snarayan 	PR1("SENT %lu bytes", msglen);
349d10e4ef2Snarayan 	return (0);
350d10e4ef2Snarayan }
351d10e4ef2Snarayan 
352d10e4ef2Snarayan static void
353d10e4ef2Snarayan vd_need_reset(vd_t *vd, boolean_t reset_ldc)
354d10e4ef2Snarayan {
355d10e4ef2Snarayan 	mutex_enter(&vd->lock);
356d10e4ef2Snarayan 	vd->reset_state	= B_TRUE;
357d10e4ef2Snarayan 	vd->reset_ldc	= reset_ldc;
358d10e4ef2Snarayan 	mutex_exit(&vd->lock);
359d10e4ef2Snarayan }
360d10e4ef2Snarayan 
361d10e4ef2Snarayan /*
362d10e4ef2Snarayan  * Reset the state of the connection with a client, if needed; reset the LDC
363d10e4ef2Snarayan  * transport as well, if needed.  This function should only be called from the
364d10e4ef2Snarayan  * "startq", as it waits for tasks on the "completionq" and will deadlock if
365d10e4ef2Snarayan  * called from that queue.
366d10e4ef2Snarayan  */
367d10e4ef2Snarayan static void
368d10e4ef2Snarayan vd_reset_if_needed(vd_t *vd)
369d10e4ef2Snarayan {
370d10e4ef2Snarayan 	int		status = 0;
371d10e4ef2Snarayan 
372d10e4ef2Snarayan 
373d10e4ef2Snarayan 	mutex_enter(&vd->lock);
374d10e4ef2Snarayan 	if (!vd->reset_state) {
375d10e4ef2Snarayan 		ASSERT(!vd->reset_ldc);
376d10e4ef2Snarayan 		mutex_exit(&vd->lock);
377d10e4ef2Snarayan 		return;
378d10e4ef2Snarayan 	}
379d10e4ef2Snarayan 	mutex_exit(&vd->lock);
380d10e4ef2Snarayan 
381d10e4ef2Snarayan 
382d10e4ef2Snarayan 	PR0("Resetting connection state with %s", VD_CLIENT(vd));
383d10e4ef2Snarayan 
384d10e4ef2Snarayan 	/*
385d10e4ef2Snarayan 	 * Let any asynchronous I/O complete before possibly pulling the rug
386d10e4ef2Snarayan 	 * out from under it; defer checking vd->reset_ldc, as one of the
387d10e4ef2Snarayan 	 * asynchronous tasks might set it
388d10e4ef2Snarayan 	 */
389d10e4ef2Snarayan 	ddi_taskq_wait(vd->completionq);
390d10e4ef2Snarayan 
391d10e4ef2Snarayan 	if ((vd->initialized & VD_DRING) &&
392d10e4ef2Snarayan 	    ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0))
393d10e4ef2Snarayan 		PRN("ldc_mem_dring_unmap() returned errno %d", status);
394d10e4ef2Snarayan 
395d10e4ef2Snarayan 	if (vd->dring_task != NULL) {
396d10e4ef2Snarayan 		ASSERT(vd->dring_len != 0);
397*4bac2208Snarayan 		/* Free all dring_task memory handles */
398*4bac2208Snarayan 		for (int i = 0; i < vd->dring_len; i++)
399*4bac2208Snarayan 			(void) ldc_mem_free_handle(vd->dring_task[i].mhdl);
400d10e4ef2Snarayan 		kmem_free(vd->dring_task,
401d10e4ef2Snarayan 		    (sizeof (*vd->dring_task)) * vd->dring_len);
402d10e4ef2Snarayan 		vd->dring_task = NULL;
403d10e4ef2Snarayan 	}
404d10e4ef2Snarayan 
405d10e4ef2Snarayan 
406d10e4ef2Snarayan 	mutex_enter(&vd->lock);
407e1ebb9ecSlm66018 	if (vd->reset_ldc && ((status = ldc_down(vd->ldc_handle)) != 0))
408e1ebb9ecSlm66018 		PRN("ldc_down() returned errno %d", status);
409d10e4ef2Snarayan 
410d10e4ef2Snarayan 	vd->initialized	&= ~(VD_SID | VD_SEQ_NUM | VD_DRING);
411d10e4ef2Snarayan 	vd->state	= VD_STATE_INIT;
412d10e4ef2Snarayan 	vd->max_msglen	= sizeof (vio_msg_t);	/* baseline vio message size */
413d10e4ef2Snarayan 
414d10e4ef2Snarayan 	vd->reset_state	= B_FALSE;
415d10e4ef2Snarayan 	vd->reset_ldc	= B_FALSE;
416d10e4ef2Snarayan 	mutex_exit(&vd->lock);
417d10e4ef2Snarayan }
418d10e4ef2Snarayan 
419d10e4ef2Snarayan static int
420d10e4ef2Snarayan vd_mark_elem_done(vd_t *vd, int idx, int elem_status)
421d10e4ef2Snarayan {
422d10e4ef2Snarayan 	boolean_t		accepted;
423d10e4ef2Snarayan 	int			status;
424d10e4ef2Snarayan 	vd_dring_entry_t	*elem = VD_DRING_ELEM(idx);
425d10e4ef2Snarayan 
426d10e4ef2Snarayan 
427d10e4ef2Snarayan 	/* Acquire the element */
428d10e4ef2Snarayan 	if ((status = ldc_mem_dring_acquire(vd->dring_handle, idx, idx)) != 0) {
429d10e4ef2Snarayan 		PRN("ldc_mem_dring_acquire() returned errno %d", status);
430d10e4ef2Snarayan 		return (status);
431d10e4ef2Snarayan 	}
432d10e4ef2Snarayan 
433d10e4ef2Snarayan 	/* Set the element's status and mark it done */
434d10e4ef2Snarayan 	accepted = (elem->hdr.dstate == VIO_DESC_ACCEPTED);
435d10e4ef2Snarayan 	if (accepted) {
436d10e4ef2Snarayan 		elem->payload.status	= elem_status;
437d10e4ef2Snarayan 		elem->hdr.dstate	= VIO_DESC_DONE;
438d10e4ef2Snarayan 	} else {
439d10e4ef2Snarayan 		/* Perhaps client timed out waiting for I/O... */
440d10e4ef2Snarayan 		PRN("element %u no longer \"accepted\"", idx);
441d10e4ef2Snarayan 		VD_DUMP_DRING_ELEM(elem);
442d10e4ef2Snarayan 	}
443d10e4ef2Snarayan 	/* Release the element */
444d10e4ef2Snarayan 	if ((status = ldc_mem_dring_release(vd->dring_handle, idx, idx)) != 0) {
445d10e4ef2Snarayan 		PRN("ldc_mem_dring_release() returned errno %d", status);
446d10e4ef2Snarayan 		return (status);
447d10e4ef2Snarayan 	}
448d10e4ef2Snarayan 
449d10e4ef2Snarayan 	return (accepted ? 0 : EINVAL);
450d10e4ef2Snarayan }
451d10e4ef2Snarayan 
452d10e4ef2Snarayan static void
453d10e4ef2Snarayan vd_complete_bio(void *arg)
454d10e4ef2Snarayan {
455d10e4ef2Snarayan 	int			status		= 0;
456d10e4ef2Snarayan 	vd_task_t		*task		= (vd_task_t *)arg;
457d10e4ef2Snarayan 	vd_t			*vd		= task->vd;
458d10e4ef2Snarayan 	vd_dring_payload_t	*request	= task->request;
459d10e4ef2Snarayan 	struct buf		*buf		= &task->buf;
460d10e4ef2Snarayan 
461d10e4ef2Snarayan 
462d10e4ef2Snarayan 	ASSERT(vd != NULL);
463d10e4ef2Snarayan 	ASSERT(request != NULL);
464d10e4ef2Snarayan 	ASSERT(task->msg != NULL);
465d10e4ef2Snarayan 	ASSERT(task->msglen >= sizeof (*task->msg));
466d10e4ef2Snarayan 	ASSERT(task->msgsize >= task->msglen);
467d10e4ef2Snarayan 
468d10e4ef2Snarayan 	/* Wait for the I/O to complete */
469d10e4ef2Snarayan 	request->status = biowait(buf);
470d10e4ef2Snarayan 
471*4bac2208Snarayan 	/* Release the buffer */
472*4bac2208Snarayan 	status = ldc_mem_release(task->mhdl, 0, buf->b_bcount);
473*4bac2208Snarayan 	if (status) {
474*4bac2208Snarayan 		PRN("ldc_mem_release() returned errno %d copying to client",
4751ae08745Sheppo 		    status);
4761ae08745Sheppo 	}
4771ae08745Sheppo 
478*4bac2208Snarayan 	/* Unmap the memory */
479*4bac2208Snarayan 	status = ldc_mem_unmap(task->mhdl);
480*4bac2208Snarayan 	if (status) {
481*4bac2208Snarayan 		PRN("ldc_mem_unmap() returned errno %d copying to client",
482*4bac2208Snarayan 		    status);
483*4bac2208Snarayan 	}
484*4bac2208Snarayan 
485d10e4ef2Snarayan 	biofini(buf);
4861ae08745Sheppo 
487d10e4ef2Snarayan 	/* Update the dring element for a dring client */
488d10e4ef2Snarayan 	if ((status == 0) && (vd->xfer_mode == VIO_DRING_MODE))
489d10e4ef2Snarayan 		status = vd_mark_elem_done(vd, task->index, request->status);
4901ae08745Sheppo 
491d10e4ef2Snarayan 	/*
492d10e4ef2Snarayan 	 * If a transport error occurred, arrange to "nack" the message when
493d10e4ef2Snarayan 	 * the final task in the descriptor element range completes
494d10e4ef2Snarayan 	 */
495d10e4ef2Snarayan 	if (status != 0)
496d10e4ef2Snarayan 		task->msg->tag.vio_subtype = VIO_SUBTYPE_NACK;
4971ae08745Sheppo 
498d10e4ef2Snarayan 	/*
499d10e4ef2Snarayan 	 * Only the final task for a range of elements will respond to and
500d10e4ef2Snarayan 	 * free the message
501d10e4ef2Snarayan 	 */
502d10e4ef2Snarayan 	if (task->type == VD_NONFINAL_RANGE_TASK)
503d10e4ef2Snarayan 		return;
5041ae08745Sheppo 
505d10e4ef2Snarayan 	/*
506d10e4ef2Snarayan 	 * Send the "ack" or "nack" back to the client; if sending the message
507d10e4ef2Snarayan 	 * via LDC fails, arrange to reset both the connection state and LDC
508d10e4ef2Snarayan 	 * itself
509d10e4ef2Snarayan 	 */
510d10e4ef2Snarayan 	PR1("Sending %s",
511d10e4ef2Snarayan 	    (task->msg->tag.vio_subtype == VIO_SUBTYPE_ACK) ? "ACK" : "NACK");
512d10e4ef2Snarayan 	if (send_msg(vd->ldc_handle, task->msg, task->msglen) != 0)
513d10e4ef2Snarayan 		vd_need_reset(vd, B_TRUE);
5141ae08745Sheppo 
515d10e4ef2Snarayan 	/* Free the message now that it has been used for the reply */
516d10e4ef2Snarayan 	kmem_free(task->msg, task->msgsize);
5171ae08745Sheppo }
5181ae08745Sheppo 
5190a55fbb7Slm66018 static void
5200a55fbb7Slm66018 vd_geom2dk_geom(void *vd_buf, void *ioctl_arg)
5210a55fbb7Slm66018 {
5220a55fbb7Slm66018 	VD_GEOM2DK_GEOM((vd_geom_t *)vd_buf, (struct dk_geom *)ioctl_arg);
5230a55fbb7Slm66018 }
5240a55fbb7Slm66018 
5250a55fbb7Slm66018 static void
5260a55fbb7Slm66018 vd_vtoc2vtoc(void *vd_buf, void *ioctl_arg)
5270a55fbb7Slm66018 {
5280a55fbb7Slm66018 	VD_VTOC2VTOC((vd_vtoc_t *)vd_buf, (struct vtoc *)ioctl_arg);
5290a55fbb7Slm66018 }
5300a55fbb7Slm66018 
5310a55fbb7Slm66018 static void
5320a55fbb7Slm66018 dk_geom2vd_geom(void *ioctl_arg, void *vd_buf)
5330a55fbb7Slm66018 {
5340a55fbb7Slm66018 	DK_GEOM2VD_GEOM((struct dk_geom *)ioctl_arg, (vd_geom_t *)vd_buf);
5350a55fbb7Slm66018 }
5360a55fbb7Slm66018 
5370a55fbb7Slm66018 static void
5380a55fbb7Slm66018 vtoc2vd_vtoc(void *ioctl_arg, void *vd_buf)
5390a55fbb7Slm66018 {
5400a55fbb7Slm66018 	VTOC2VD_VTOC((struct vtoc *)ioctl_arg, (vd_vtoc_t *)vd_buf);
5410a55fbb7Slm66018 }
5420a55fbb7Slm66018 
543*4bac2208Snarayan static void
544*4bac2208Snarayan vd_get_efi_in(void *vd_buf, void *ioctl_arg)
545*4bac2208Snarayan {
546*4bac2208Snarayan 	vd_efi_t *vd_efi = (vd_efi_t *)vd_buf;
547*4bac2208Snarayan 	dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg;
548*4bac2208Snarayan 
549*4bac2208Snarayan 	dk_efi->dki_lba = vd_efi->lba;
550*4bac2208Snarayan 	dk_efi->dki_length = vd_efi->length;
551*4bac2208Snarayan 	dk_efi->dki_data = kmem_zalloc(vd_efi->length, KM_SLEEP);
552*4bac2208Snarayan }
553*4bac2208Snarayan 
554*4bac2208Snarayan static void
555*4bac2208Snarayan vd_get_efi_out(void *ioctl_arg, void *vd_buf)
556*4bac2208Snarayan {
557*4bac2208Snarayan 	int len;
558*4bac2208Snarayan 	vd_efi_t *vd_efi = (vd_efi_t *)vd_buf;
559*4bac2208Snarayan 	dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg;
560*4bac2208Snarayan 
561*4bac2208Snarayan 	len = vd_efi->length;
562*4bac2208Snarayan 	DK_EFI2VD_EFI(dk_efi, vd_efi);
563*4bac2208Snarayan 	kmem_free(dk_efi->dki_data, len);
564*4bac2208Snarayan }
565*4bac2208Snarayan 
566*4bac2208Snarayan static void
567*4bac2208Snarayan vd_set_efi_in(void *vd_buf, void *ioctl_arg)
568*4bac2208Snarayan {
569*4bac2208Snarayan 	vd_efi_t *vd_efi = (vd_efi_t *)vd_buf;
570*4bac2208Snarayan 	dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg;
571*4bac2208Snarayan 
572*4bac2208Snarayan 	dk_efi->dki_data = kmem_alloc(vd_efi->length, KM_SLEEP);
573*4bac2208Snarayan 	VD_EFI2DK_EFI(vd_efi, dk_efi);
574*4bac2208Snarayan }
575*4bac2208Snarayan 
576*4bac2208Snarayan static void
577*4bac2208Snarayan vd_set_efi_out(void *ioctl_arg, void *vd_buf)
578*4bac2208Snarayan {
579*4bac2208Snarayan 	vd_efi_t *vd_efi = (vd_efi_t *)vd_buf;
580*4bac2208Snarayan 	dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg;
581*4bac2208Snarayan 
582*4bac2208Snarayan 	kmem_free(dk_efi->dki_data, vd_efi->length);
583*4bac2208Snarayan }
584*4bac2208Snarayan 
585*4bac2208Snarayan static int
586*4bac2208Snarayan vd_read_vtoc(ldi_handle_t handle, struct vtoc *vtoc, vd_disk_label_t *label)
587*4bac2208Snarayan {
588*4bac2208Snarayan 	int status, rval;
589*4bac2208Snarayan 	struct dk_gpt *efi;
590*4bac2208Snarayan 	size_t efi_len;
591*4bac2208Snarayan 
592*4bac2208Snarayan 	*label = VD_DISK_LABEL_UNK;
593*4bac2208Snarayan 
594*4bac2208Snarayan 	status = ldi_ioctl(handle, DKIOCGVTOC, (intptr_t)vtoc,
595*4bac2208Snarayan 	    (vd_open_flags | FKIOCTL), kcred, &rval);
596*4bac2208Snarayan 
597*4bac2208Snarayan 	if (status == 0) {
598*4bac2208Snarayan 		*label = VD_DISK_LABEL_VTOC;
599*4bac2208Snarayan 		return (0);
600*4bac2208Snarayan 	} else if (status != ENOTSUP) {
601*4bac2208Snarayan 		PRN("ldi_ioctl(DKIOCGVTOC) returned error %d", status);
602*4bac2208Snarayan 		return (status);
603*4bac2208Snarayan 	}
604*4bac2208Snarayan 
605*4bac2208Snarayan 	status = vds_efi_alloc_and_read(handle, &efi, &efi_len);
606*4bac2208Snarayan 
607*4bac2208Snarayan 	if (status) {
608*4bac2208Snarayan 		PRN("vds_efi_alloc_and_read returned error %d", status);
609*4bac2208Snarayan 		return (status);
610*4bac2208Snarayan 	}
611*4bac2208Snarayan 
612*4bac2208Snarayan 	*label = VD_DISK_LABEL_EFI;
613*4bac2208Snarayan 	vd_efi_to_vtoc(efi, vtoc);
614*4bac2208Snarayan 	vd_efi_free(efi, efi_len);
615*4bac2208Snarayan 
616*4bac2208Snarayan 	return (0);
617*4bac2208Snarayan }
618*4bac2208Snarayan 
6191ae08745Sheppo static int
6200a55fbb7Slm66018 vd_do_slice_ioctl(vd_t *vd, int cmd, void *ioctl_arg)
6211ae08745Sheppo {
622*4bac2208Snarayan 	dk_efi_t *dk_ioc;
623*4bac2208Snarayan 
624*4bac2208Snarayan 	switch (vd->vdisk_label) {
625*4bac2208Snarayan 
626*4bac2208Snarayan 	case VD_DISK_LABEL_VTOC:
627*4bac2208Snarayan 
6281ae08745Sheppo 		switch (cmd) {
6291ae08745Sheppo 		case DKIOCGGEOM:
6300a55fbb7Slm66018 			ASSERT(ioctl_arg != NULL);
6310a55fbb7Slm66018 			bcopy(&vd->dk_geom, ioctl_arg, sizeof (vd->dk_geom));
6321ae08745Sheppo 			return (0);
6331ae08745Sheppo 		case DKIOCGVTOC:
6340a55fbb7Slm66018 			ASSERT(ioctl_arg != NULL);
6350a55fbb7Slm66018 			bcopy(&vd->vtoc, ioctl_arg, sizeof (vd->vtoc));
6361ae08745Sheppo 			return (0);
6371ae08745Sheppo 		default:
6381ae08745Sheppo 			return (ENOTSUP);
6391ae08745Sheppo 		}
640*4bac2208Snarayan 
641*4bac2208Snarayan 	case VD_DISK_LABEL_EFI:
642*4bac2208Snarayan 
643*4bac2208Snarayan 		switch (cmd) {
644*4bac2208Snarayan 		case DKIOCGETEFI:
645*4bac2208Snarayan 			ASSERT(ioctl_arg != NULL);
646*4bac2208Snarayan 			dk_ioc = (dk_efi_t *)ioctl_arg;
647*4bac2208Snarayan 			if (dk_ioc->dki_length < vd->dk_efi.dki_length)
648*4bac2208Snarayan 				return (EINVAL);
649*4bac2208Snarayan 			bcopy(vd->dk_efi.dki_data, dk_ioc->dki_data,
650*4bac2208Snarayan 			    vd->dk_efi.dki_length);
651*4bac2208Snarayan 			return (0);
652*4bac2208Snarayan 		default:
653*4bac2208Snarayan 			return (ENOTSUP);
654*4bac2208Snarayan 		}
655*4bac2208Snarayan 
656*4bac2208Snarayan 	default:
657*4bac2208Snarayan 		return (ENOTSUP);
658*4bac2208Snarayan 	}
6591ae08745Sheppo }
6601ae08745Sheppo 
6611ae08745Sheppo static int
6620a55fbb7Slm66018 vd_do_ioctl(vd_t *vd, vd_dring_payload_t *request, void* buf, vd_ioctl_t *ioctl)
6631ae08745Sheppo {
6641ae08745Sheppo 	int	rval = 0, status;
6651ae08745Sheppo 	size_t	nbytes = request->nbytes;	/* modifiable copy */
6661ae08745Sheppo 
6671ae08745Sheppo 
6681ae08745Sheppo 	ASSERT(request->slice < vd->nslices);
6691ae08745Sheppo 	PR0("Performing %s", ioctl->operation_name);
6701ae08745Sheppo 
6710a55fbb7Slm66018 	/* Get data from client and convert, if necessary */
6720a55fbb7Slm66018 	if (ioctl->copyin != NULL)  {
6731ae08745Sheppo 		ASSERT(nbytes != 0 && buf != NULL);
6741ae08745Sheppo 		PR1("Getting \"arg\" data from client");
6751ae08745Sheppo 		if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes,
6761ae08745Sheppo 			    request->cookie, request->ncookies,
6771ae08745Sheppo 			    LDC_COPY_IN)) != 0) {
6781ae08745Sheppo 			PRN("ldc_mem_copy() returned errno %d "
6791ae08745Sheppo 			    "copying from client", status);
6801ae08745Sheppo 			return (status);
6811ae08745Sheppo 		}
6820a55fbb7Slm66018 
6830a55fbb7Slm66018 		/* Convert client's data, if necessary */
6840a55fbb7Slm66018 		if (ioctl->copyin == VD_IDENTITY)	/* use client buffer */
6850a55fbb7Slm66018 			ioctl->arg = buf;
6860a55fbb7Slm66018 		else	/* convert client vdisk operation data to ioctl data */
6870a55fbb7Slm66018 			(ioctl->copyin)(buf, (void *)ioctl->arg);
6881ae08745Sheppo 	}
6891ae08745Sheppo 
6901ae08745Sheppo 	/*
6911ae08745Sheppo 	 * Handle single-slice block devices internally; otherwise, have the
6921ae08745Sheppo 	 * real driver perform the ioctl()
6931ae08745Sheppo 	 */
6941ae08745Sheppo 	if (vd->vdisk_type == VD_DISK_TYPE_SLICE && !vd->pseudo) {
6950a55fbb7Slm66018 		if ((status = vd_do_slice_ioctl(vd, ioctl->cmd,
6960a55fbb7Slm66018 			    (void *)ioctl->arg)) != 0)
6971ae08745Sheppo 			return (status);
6981ae08745Sheppo 	} else if ((status = ldi_ioctl(vd->ldi_handle[request->slice],
699d10e4ef2Snarayan 		    ioctl->cmd, (intptr_t)ioctl->arg, (vd_open_flags | FKIOCTL),
700d10e4ef2Snarayan 		    kcred, &rval)) != 0) {
7011ae08745Sheppo 		PR0("ldi_ioctl(%s) = errno %d", ioctl->cmd_name, status);
7021ae08745Sheppo 		return (status);
7031ae08745Sheppo 	}
7041ae08745Sheppo #ifdef DEBUG
7051ae08745Sheppo 	if (rval != 0) {
7061ae08745Sheppo 		PRN("%s set rval = %d, which is not being returned to client",
7071ae08745Sheppo 		    ioctl->cmd_name, rval);
7081ae08745Sheppo 	}
7091ae08745Sheppo #endif /* DEBUG */
7101ae08745Sheppo 
7110a55fbb7Slm66018 	/* Convert data and send to client, if necessary */
7120a55fbb7Slm66018 	if (ioctl->copyout != NULL)  {
7131ae08745Sheppo 		ASSERT(nbytes != 0 && buf != NULL);
7141ae08745Sheppo 		PR1("Sending \"arg\" data to client");
7150a55fbb7Slm66018 
7160a55fbb7Slm66018 		/* Convert ioctl data to vdisk operation data, if necessary */
7170a55fbb7Slm66018 		if (ioctl->copyout != VD_IDENTITY)
7180a55fbb7Slm66018 			(ioctl->copyout)((void *)ioctl->arg, buf);
7190a55fbb7Slm66018 
7201ae08745Sheppo 		if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes,
7211ae08745Sheppo 			    request->cookie, request->ncookies,
7221ae08745Sheppo 			    LDC_COPY_OUT)) != 0) {
7231ae08745Sheppo 			PRN("ldc_mem_copy() returned errno %d "
7241ae08745Sheppo 			    "copying to client", status);
7251ae08745Sheppo 			return (status);
7261ae08745Sheppo 		}
7271ae08745Sheppo 	}
7281ae08745Sheppo 
7291ae08745Sheppo 	return (status);
7301ae08745Sheppo }
7311ae08745Sheppo 
7320a55fbb7Slm66018 /*
7330a55fbb7Slm66018  * Open any slices which have become non-empty as a result of performing a
7340a55fbb7Slm66018  * set-VTOC operation for the client.
7350a55fbb7Slm66018  *
7360a55fbb7Slm66018  * When serving a full disk, vds attempts to exclusively open all of the
7370a55fbb7Slm66018  * disk's slices to prevent another thread or process in the service domain
7380a55fbb7Slm66018  * from "stealing" a slice or from performing I/O to a slice while a vds
7390a55fbb7Slm66018  * client is accessing it.  Unfortunately, underlying drivers, such as sd(7d)
7400a55fbb7Slm66018  * and cmdk(7d), return an error when attempting to open the device file for a
7410a55fbb7Slm66018  * slice which is currently empty according to the VTOC.  This driver behavior
7420a55fbb7Slm66018  * means that vds must skip opening empty slices when initializing a vdisk for
7430a55fbb7Slm66018  * full-disk service and try to open slices that become non-empty (via a
7440a55fbb7Slm66018  * set-VTOC operation) during use of the full disk in order to begin serving
7450a55fbb7Slm66018  * such slices to the client.  This approach has an inherent (and therefore
7460a55fbb7Slm66018  * unavoidable) race condition; it also means that failure to open a
7470a55fbb7Slm66018  * newly-non-empty slice has different semantics than failure to open an
7480a55fbb7Slm66018  * initially-non-empty slice:  Due to driver bahavior, opening a
7490a55fbb7Slm66018  * newly-non-empty slice is a necessary side effect of vds performing a
7500a55fbb7Slm66018  * (successful) set-VTOC operation for a client on an in-service (and in-use)
7510a55fbb7Slm66018  * disk in order to begin serving the slice; failure of this side-effect
7520a55fbb7Slm66018  * operation does not mean that the client's set-VTOC operation failed or that
7530a55fbb7Slm66018  * operations on other slices must fail.  Therefore, this function prints an
7540a55fbb7Slm66018  * error message on failure to open a slice, but does not return an error to
7550a55fbb7Slm66018  * its caller--unlike failure to open a slice initially, which results in an
7560a55fbb7Slm66018  * error that prevents serving the vdisk (and thereby requires an
7570a55fbb7Slm66018  * administrator to resolve the problem).  Note that, apart from another
7580a55fbb7Slm66018  * thread or process opening a new slice during the race-condition window,
7590a55fbb7Slm66018  * failure to open a slice in this function will likely indicate an underlying
7600a55fbb7Slm66018  * drive problem, which will also likely become evident in errors returned by
7610a55fbb7Slm66018  * operations on other slices, and which will require administrative
7620a55fbb7Slm66018  * intervention and possibly servicing the drive.
7630a55fbb7Slm66018  */
7640a55fbb7Slm66018 static void
7650a55fbb7Slm66018 vd_open_new_slices(vd_t *vd)
7660a55fbb7Slm66018 {
767*4bac2208Snarayan 	int		status;
7680a55fbb7Slm66018 	struct vtoc	vtoc;
7690a55fbb7Slm66018 
770*4bac2208Snarayan 	/* Get the (new) partitions for updated slice sizes */
771*4bac2208Snarayan 	if ((status = vd_read_vtoc(vd->ldi_handle[0], &vtoc,
772*4bac2208Snarayan 	    &vd->vdisk_label)) != 0) {
773*4bac2208Snarayan 		PRN("vd_read_vtoc returned error %d", status);
7740a55fbb7Slm66018 		return;
7750a55fbb7Slm66018 	}
7760a55fbb7Slm66018 
7770a55fbb7Slm66018 	/* Open any newly-non-empty slices */
7780a55fbb7Slm66018 	for (int slice = 0; slice < vd->nslices; slice++) {
7790a55fbb7Slm66018 		/* Skip zero-length slices */
7800a55fbb7Slm66018 		if (vtoc.v_part[slice].p_size == 0) {
7810a55fbb7Slm66018 			if (vd->ldi_handle[slice] != NULL)
7820a55fbb7Slm66018 				PR0("Open slice %u now has zero length", slice);
7830a55fbb7Slm66018 			continue;
7840a55fbb7Slm66018 		}
7850a55fbb7Slm66018 
7860a55fbb7Slm66018 		/* Skip already-open slices */
7870a55fbb7Slm66018 		if (vd->ldi_handle[slice] != NULL)
7880a55fbb7Slm66018 			continue;
7890a55fbb7Slm66018 
7900a55fbb7Slm66018 		PR0("Opening newly-non-empty slice %u", slice);
7910a55fbb7Slm66018 		if ((status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK,
7920a55fbb7Slm66018 			    vd_open_flags, kcred, &vd->ldi_handle[slice],
7930a55fbb7Slm66018 			    vd->vds->ldi_ident)) != 0) {
7940a55fbb7Slm66018 			PRN("ldi_open_by_dev() returned errno %d "
7950a55fbb7Slm66018 			    "for slice %u", status, slice);
7960a55fbb7Slm66018 		}
7970a55fbb7Slm66018 	}
7980a55fbb7Slm66018 }
7990a55fbb7Slm66018 
8001ae08745Sheppo #define	RNDSIZE(expr) P2ROUNDUP(sizeof (expr), sizeof (uint64_t))
8011ae08745Sheppo static int
802d10e4ef2Snarayan vd_ioctl(vd_task_t *task)
8031ae08745Sheppo {
8041ae08745Sheppo 	int			i, status;
8051ae08745Sheppo 	void			*buf = NULL;
8060a55fbb7Slm66018 	struct dk_geom		dk_geom = {0};
8070a55fbb7Slm66018 	struct vtoc		vtoc = {0};
808*4bac2208Snarayan 	struct dk_efi		dk_efi = {0};
809d10e4ef2Snarayan 	vd_t			*vd		= task->vd;
810d10e4ef2Snarayan 	vd_dring_payload_t	*request	= task->request;
8110a55fbb7Slm66018 	vd_ioctl_t		ioctl[] = {
8120a55fbb7Slm66018 		/* Command (no-copy) operations */
8130a55fbb7Slm66018 		{VD_OP_FLUSH, STRINGIZE(VD_OP_FLUSH), 0,
8140a55fbb7Slm66018 		    DKIOCFLUSHWRITECACHE, STRINGIZE(DKIOCFLUSHWRITECACHE),
8150a55fbb7Slm66018 		    NULL, NULL, NULL},
8160a55fbb7Slm66018 
8170a55fbb7Slm66018 		/* "Get" (copy-out) operations */
8180a55fbb7Slm66018 		{VD_OP_GET_WCE, STRINGIZE(VD_OP_GET_WCE), RNDSIZE(int),
8190a55fbb7Slm66018 		    DKIOCGETWCE, STRINGIZE(DKIOCGETWCE),
820*4bac2208Snarayan 		    NULL, VD_IDENTITY, VD_IDENTITY},
8210a55fbb7Slm66018 		{VD_OP_GET_DISKGEOM, STRINGIZE(VD_OP_GET_DISKGEOM),
8220a55fbb7Slm66018 		    RNDSIZE(vd_geom_t),
8230a55fbb7Slm66018 		    DKIOCGGEOM, STRINGIZE(DKIOCGGEOM),
8240a55fbb7Slm66018 		    &dk_geom, NULL, dk_geom2vd_geom},
8250a55fbb7Slm66018 		{VD_OP_GET_VTOC, STRINGIZE(VD_OP_GET_VTOC), RNDSIZE(vd_vtoc_t),
8260a55fbb7Slm66018 		    DKIOCGVTOC, STRINGIZE(DKIOCGVTOC),
8270a55fbb7Slm66018 		    &vtoc, NULL, vtoc2vd_vtoc},
828*4bac2208Snarayan 		{VD_OP_GET_EFI, STRINGIZE(VD_OP_GET_EFI), RNDSIZE(vd_efi_t),
829*4bac2208Snarayan 		    DKIOCGETEFI, STRINGIZE(DKIOCGETEFI),
830*4bac2208Snarayan 		    &dk_efi, vd_get_efi_in, vd_get_efi_out},
8310a55fbb7Slm66018 
8320a55fbb7Slm66018 		/* "Set" (copy-in) operations */
8330a55fbb7Slm66018 		{VD_OP_SET_WCE, STRINGIZE(VD_OP_SET_WCE), RNDSIZE(int),
8340a55fbb7Slm66018 		    DKIOCSETWCE, STRINGIZE(DKIOCSETWCE),
835*4bac2208Snarayan 		    NULL, VD_IDENTITY, VD_IDENTITY},
8360a55fbb7Slm66018 		{VD_OP_SET_DISKGEOM, STRINGIZE(VD_OP_SET_DISKGEOM),
8370a55fbb7Slm66018 		    RNDSIZE(vd_geom_t),
8380a55fbb7Slm66018 		    DKIOCSGEOM, STRINGIZE(DKIOCSGEOM),
8390a55fbb7Slm66018 		    &dk_geom, vd_geom2dk_geom, NULL},
8400a55fbb7Slm66018 		{VD_OP_SET_VTOC, STRINGIZE(VD_OP_SET_VTOC), RNDSIZE(vd_vtoc_t),
8410a55fbb7Slm66018 		    DKIOCSVTOC, STRINGIZE(DKIOCSVTOC),
8420a55fbb7Slm66018 		    &vtoc, vd_vtoc2vtoc, NULL},
843*4bac2208Snarayan 		{VD_OP_SET_EFI, STRINGIZE(VD_OP_SET_EFI), RNDSIZE(vd_efi_t),
844*4bac2208Snarayan 		    DKIOCSETEFI, STRINGIZE(DKIOCSETEFI),
845*4bac2208Snarayan 		    &dk_efi, vd_set_efi_in, vd_set_efi_out},
8460a55fbb7Slm66018 	};
8471ae08745Sheppo 	size_t		nioctls = (sizeof (ioctl))/(sizeof (ioctl[0]));
8481ae08745Sheppo 
8491ae08745Sheppo 
850d10e4ef2Snarayan 	ASSERT(vd != NULL);
851d10e4ef2Snarayan 	ASSERT(request != NULL);
8521ae08745Sheppo 	ASSERT(request->slice < vd->nslices);
8531ae08745Sheppo 
8541ae08745Sheppo 	/*
8551ae08745Sheppo 	 * Determine ioctl corresponding to caller's "operation" and
8561ae08745Sheppo 	 * validate caller's "nbytes"
8571ae08745Sheppo 	 */
8581ae08745Sheppo 	for (i = 0; i < nioctls; i++) {
8591ae08745Sheppo 		if (request->operation == ioctl[i].operation) {
8600a55fbb7Slm66018 			/* LDC memory operations require 8-byte multiples */
8610a55fbb7Slm66018 			ASSERT(ioctl[i].nbytes % sizeof (uint64_t) == 0);
8620a55fbb7Slm66018 
863*4bac2208Snarayan 			if (request->operation == VD_OP_GET_EFI ||
864*4bac2208Snarayan 			    request->operation == VD_OP_SET_EFI) {
865*4bac2208Snarayan 				if (request->nbytes >= ioctl[i].nbytes)
866*4bac2208Snarayan 					break;
867*4bac2208Snarayan 				PRN("%s:  Expected at least nbytes = %lu, "
868*4bac2208Snarayan 				    "got %lu", ioctl[i].operation_name,
869*4bac2208Snarayan 				    ioctl[i].nbytes, request->nbytes);
870*4bac2208Snarayan 				return (EINVAL);
871*4bac2208Snarayan 			}
872*4bac2208Snarayan 
8730a55fbb7Slm66018 			if (request->nbytes != ioctl[i].nbytes) {
8740a55fbb7Slm66018 				PRN("%s:  Expected nbytes = %lu, got %lu",
8750a55fbb7Slm66018 				    ioctl[i].operation_name, ioctl[i].nbytes,
8760a55fbb7Slm66018 				    request->nbytes);
8771ae08745Sheppo 				return (EINVAL);
8781ae08745Sheppo 			}
8791ae08745Sheppo 
8801ae08745Sheppo 			break;
8811ae08745Sheppo 		}
8821ae08745Sheppo 	}
8831ae08745Sheppo 	ASSERT(i < nioctls);	/* because "operation" already validated */
8841ae08745Sheppo 
8851ae08745Sheppo 	if (request->nbytes)
8861ae08745Sheppo 		buf = kmem_zalloc(request->nbytes, KM_SLEEP);
8871ae08745Sheppo 	status = vd_do_ioctl(vd, request, buf, &ioctl[i]);
8881ae08745Sheppo 	if (request->nbytes)
8891ae08745Sheppo 		kmem_free(buf, request->nbytes);
890*4bac2208Snarayan 	if (vd->vdisk_type == VD_DISK_TYPE_DISK &&
891*4bac2208Snarayan 	    (request->operation == VD_OP_SET_VTOC ||
892*4bac2208Snarayan 	    request->operation == VD_OP_SET_EFI))
8930a55fbb7Slm66018 		vd_open_new_slices(vd);
894d10e4ef2Snarayan 	PR0("Returning %d", status);
8951ae08745Sheppo 	return (status);
8961ae08745Sheppo }
8971ae08745Sheppo 
898*4bac2208Snarayan static int
899*4bac2208Snarayan vd_get_devid(vd_task_t *task)
900*4bac2208Snarayan {
901*4bac2208Snarayan 	vd_t *vd = task->vd;
902*4bac2208Snarayan 	vd_dring_payload_t *request = task->request;
903*4bac2208Snarayan 	vd_devid_t *vd_devid;
904*4bac2208Snarayan 	impl_devid_t *devid;
905*4bac2208Snarayan 	int status, bufid_len, devid_len, len;
906*4bac2208Snarayan 
907*4bac2208Snarayan 	PR1("Get Device ID");
908*4bac2208Snarayan 
909*4bac2208Snarayan 	if (ddi_lyr_get_devid(vd->dev[request->slice],
910*4bac2208Snarayan 	    (ddi_devid_t *)&devid) != DDI_SUCCESS) {
911*4bac2208Snarayan 		/* the most common failure is that no devid is available */
912*4bac2208Snarayan 		return (ENOENT);
913*4bac2208Snarayan 	}
914*4bac2208Snarayan 
915*4bac2208Snarayan 	bufid_len = request->nbytes - sizeof (vd_devid_t) + 1;
916*4bac2208Snarayan 	devid_len = DEVID_GETLEN(devid);
917*4bac2208Snarayan 
918*4bac2208Snarayan 	vd_devid = kmem_zalloc(request->nbytes, KM_SLEEP);
919*4bac2208Snarayan 	vd_devid->length = devid_len;
920*4bac2208Snarayan 	vd_devid->type = DEVID_GETTYPE(devid);
921*4bac2208Snarayan 
922*4bac2208Snarayan 	len = (devid_len > bufid_len)? bufid_len : devid_len;
923*4bac2208Snarayan 
924*4bac2208Snarayan 	bcopy(devid->did_id, vd_devid->id, len);
925*4bac2208Snarayan 
926*4bac2208Snarayan 	/* LDC memory operations require 8-byte multiples */
927*4bac2208Snarayan 	ASSERT(request->nbytes % sizeof (uint64_t) == 0);
928*4bac2208Snarayan 
929*4bac2208Snarayan 	if ((status = ldc_mem_copy(vd->ldc_handle, (caddr_t)vd_devid, 0,
930*4bac2208Snarayan 	    &request->nbytes, request->cookie, request->ncookies,
931*4bac2208Snarayan 	    LDC_COPY_OUT)) != 0) {
932*4bac2208Snarayan 		PRN("ldc_mem_copy() returned errno %d copying to client",
933*4bac2208Snarayan 		    status);
934*4bac2208Snarayan 	}
935*4bac2208Snarayan 
936*4bac2208Snarayan 	kmem_free(vd_devid, request->nbytes);
937*4bac2208Snarayan 	ddi_devid_free((ddi_devid_t)devid);
938*4bac2208Snarayan 
939*4bac2208Snarayan 	return (status);
940*4bac2208Snarayan }
941*4bac2208Snarayan 
9421ae08745Sheppo /*
9431ae08745Sheppo  * Define the supported operations once the functions for performing them have
9441ae08745Sheppo  * been defined
9451ae08745Sheppo  */
9461ae08745Sheppo static const vds_operation_t	vds_operation[] = {
947d10e4ef2Snarayan 	{VD_OP_BREAD,		vd_start_bio,	vd_complete_bio},
948d10e4ef2Snarayan 	{VD_OP_BWRITE,		vd_start_bio,	vd_complete_bio},
949d10e4ef2Snarayan 	{VD_OP_FLUSH,		vd_ioctl,	NULL},
950d10e4ef2Snarayan 	{VD_OP_GET_WCE,		vd_ioctl,	NULL},
951d10e4ef2Snarayan 	{VD_OP_SET_WCE,		vd_ioctl,	NULL},
952d10e4ef2Snarayan 	{VD_OP_GET_VTOC,	vd_ioctl,	NULL},
953d10e4ef2Snarayan 	{VD_OP_SET_VTOC,	vd_ioctl,	NULL},
954d10e4ef2Snarayan 	{VD_OP_GET_DISKGEOM,	vd_ioctl,	NULL},
955*4bac2208Snarayan 	{VD_OP_SET_DISKGEOM,	vd_ioctl,	NULL},
956*4bac2208Snarayan 	{VD_OP_GET_EFI,		vd_ioctl,	NULL},
957*4bac2208Snarayan 	{VD_OP_SET_EFI,		vd_ioctl,	NULL},
958*4bac2208Snarayan 	{VD_OP_GET_DEVID,	vd_get_devid,	NULL},
9591ae08745Sheppo };
9601ae08745Sheppo 
9611ae08745Sheppo static const size_t	vds_noperations =
9621ae08745Sheppo 	(sizeof (vds_operation))/(sizeof (vds_operation[0]));
9631ae08745Sheppo 
9641ae08745Sheppo /*
965d10e4ef2Snarayan  * Process a task specifying a client I/O request
9661ae08745Sheppo  */
9671ae08745Sheppo static int
968d10e4ef2Snarayan vd_process_task(vd_task_t *task)
9691ae08745Sheppo {
970d10e4ef2Snarayan 	int			i, status;
971d10e4ef2Snarayan 	vd_t			*vd		= task->vd;
972d10e4ef2Snarayan 	vd_dring_payload_t	*request	= task->request;
9731ae08745Sheppo 
9741ae08745Sheppo 
975d10e4ef2Snarayan 	ASSERT(vd != NULL);
976d10e4ef2Snarayan 	ASSERT(request != NULL);
9771ae08745Sheppo 
9781ae08745Sheppo 	/* Range-check slice */
9791ae08745Sheppo 	if (request->slice >= vd->nslices) {
9801ae08745Sheppo 		PRN("Invalid \"slice\" %u (max %u) for virtual disk",
9811ae08745Sheppo 		    request->slice, (vd->nslices - 1));
9821ae08745Sheppo 		return (EINVAL);
9831ae08745Sheppo 	}
9841ae08745Sheppo 
985d10e4ef2Snarayan 	/* Find the requested operation */
9861ae08745Sheppo 	for (i = 0; i < vds_noperations; i++)
9871ae08745Sheppo 		if (request->operation == vds_operation[i].operation)
988d10e4ef2Snarayan 			break;
989d10e4ef2Snarayan 	if (i == vds_noperations) {
9901ae08745Sheppo 		PRN("Unsupported operation %u", request->operation);
9911ae08745Sheppo 		return (ENOTSUP);
9921ae08745Sheppo 	}
9931ae08745Sheppo 
994d10e4ef2Snarayan 	/* Start the operation */
995d10e4ef2Snarayan 	if ((status = vds_operation[i].start(task)) != EINPROGRESS) {
996d10e4ef2Snarayan 		request->status = status;	/* op succeeded or failed */
997d10e4ef2Snarayan 		return (0);			/* but request completed */
9981ae08745Sheppo 	}
9991ae08745Sheppo 
1000d10e4ef2Snarayan 	ASSERT(vds_operation[i].complete != NULL);	/* debug case */
1001d10e4ef2Snarayan 	if (vds_operation[i].complete == NULL) {	/* non-debug case */
1002d10e4ef2Snarayan 		PRN("Unexpected return of EINPROGRESS "
1003d10e4ef2Snarayan 		    "with no I/O completion handler");
1004d10e4ef2Snarayan 		request->status = EIO;	/* operation failed */
1005d10e4ef2Snarayan 		return (0);		/* but request completed */
10061ae08745Sheppo 	}
10071ae08745Sheppo 
1008d10e4ef2Snarayan 	/* Queue a task to complete the operation */
1009d10e4ef2Snarayan 	status = ddi_taskq_dispatch(vd->completionq, vds_operation[i].complete,
1010d10e4ef2Snarayan 	    task, DDI_SLEEP);
1011d10e4ef2Snarayan 	/* ddi_taskq_dispatch(9f) guarantees success with DDI_SLEEP */
1012d10e4ef2Snarayan 	ASSERT(status == DDI_SUCCESS);
1013d10e4ef2Snarayan 
1014d10e4ef2Snarayan 	PR1("Operation in progress");
1015d10e4ef2Snarayan 	return (EINPROGRESS);	/* completion handler will finish request */
10161ae08745Sheppo }
10171ae08745Sheppo 
10181ae08745Sheppo /*
10190a55fbb7Slm66018  * Return true if the "type", "subtype", and "env" fields of the "tag" first
10200a55fbb7Slm66018  * argument match the corresponding remaining arguments; otherwise, return false
10211ae08745Sheppo  */
10220a55fbb7Slm66018 boolean_t
10231ae08745Sheppo vd_msgtype(vio_msg_tag_t *tag, int type, int subtype, int env)
10241ae08745Sheppo {
10251ae08745Sheppo 	return ((tag->vio_msgtype == type) &&
10261ae08745Sheppo 		(tag->vio_subtype == subtype) &&
10270a55fbb7Slm66018 		(tag->vio_subtype_env == env)) ? B_TRUE : B_FALSE;
10281ae08745Sheppo }
10291ae08745Sheppo 
10300a55fbb7Slm66018 /*
10310a55fbb7Slm66018  * Check whether the major/minor version specified in "ver_msg" is supported
10320a55fbb7Slm66018  * by this server.
10330a55fbb7Slm66018  */
10340a55fbb7Slm66018 static boolean_t
10350a55fbb7Slm66018 vds_supported_version(vio_ver_msg_t *ver_msg)
10360a55fbb7Slm66018 {
10370a55fbb7Slm66018 	for (int i = 0; i < vds_num_versions; i++) {
10380a55fbb7Slm66018 		ASSERT(vds_version[i].major > 0);
10390a55fbb7Slm66018 		ASSERT((i == 0) ||
10400a55fbb7Slm66018 		    (vds_version[i].major < vds_version[i-1].major));
10410a55fbb7Slm66018 
10420a55fbb7Slm66018 		/*
10430a55fbb7Slm66018 		 * If the major versions match, adjust the minor version, if
10440a55fbb7Slm66018 		 * necessary, down to the highest value supported by this
10450a55fbb7Slm66018 		 * server and return true so this message will get "ack"ed;
10460a55fbb7Slm66018 		 * the client should also support all minor versions lower
10470a55fbb7Slm66018 		 * than the value it sent
10480a55fbb7Slm66018 		 */
10490a55fbb7Slm66018 		if (ver_msg->ver_major == vds_version[i].major) {
10500a55fbb7Slm66018 			if (ver_msg->ver_minor > vds_version[i].minor) {
10510a55fbb7Slm66018 				PR0("Adjusting minor version from %u to %u",
10520a55fbb7Slm66018 				    ver_msg->ver_minor, vds_version[i].minor);
10530a55fbb7Slm66018 				ver_msg->ver_minor = vds_version[i].minor;
10540a55fbb7Slm66018 			}
10550a55fbb7Slm66018 			return (B_TRUE);
10560a55fbb7Slm66018 		}
10570a55fbb7Slm66018 
10580a55fbb7Slm66018 		/*
10590a55fbb7Slm66018 		 * If the message contains a higher major version number, set
10600a55fbb7Slm66018 		 * the message's major/minor versions to the current values
10610a55fbb7Slm66018 		 * and return false, so this message will get "nack"ed with
10620a55fbb7Slm66018 		 * these values, and the client will potentially try again
10630a55fbb7Slm66018 		 * with the same or a lower version
10640a55fbb7Slm66018 		 */
10650a55fbb7Slm66018 		if (ver_msg->ver_major > vds_version[i].major) {
10660a55fbb7Slm66018 			ver_msg->ver_major = vds_version[i].major;
10670a55fbb7Slm66018 			ver_msg->ver_minor = vds_version[i].minor;
10680a55fbb7Slm66018 			return (B_FALSE);
10690a55fbb7Slm66018 		}
10700a55fbb7Slm66018 
10710a55fbb7Slm66018 		/*
10720a55fbb7Slm66018 		 * Otherwise, the message's major version is less than the
10730a55fbb7Slm66018 		 * current major version, so continue the loop to the next
10740a55fbb7Slm66018 		 * (lower) supported version
10750a55fbb7Slm66018 		 */
10760a55fbb7Slm66018 	}
10770a55fbb7Slm66018 
10780a55fbb7Slm66018 	/*
10790a55fbb7Slm66018 	 * No common version was found; "ground" the version pair in the
10800a55fbb7Slm66018 	 * message to terminate negotiation
10810a55fbb7Slm66018 	 */
10820a55fbb7Slm66018 	ver_msg->ver_major = 0;
10830a55fbb7Slm66018 	ver_msg->ver_minor = 0;
10840a55fbb7Slm66018 	return (B_FALSE);
10850a55fbb7Slm66018 }
10860a55fbb7Slm66018 
10870a55fbb7Slm66018 /*
10880a55fbb7Slm66018  * Process a version message from a client.  vds expects to receive version
10890a55fbb7Slm66018  * messages from clients seeking service, but never issues version messages
10900a55fbb7Slm66018  * itself; therefore, vds can ACK or NACK client version messages, but does
10910a55fbb7Slm66018  * not expect to receive version-message ACKs or NACKs (and will treat such
10920a55fbb7Slm66018  * messages as invalid).
10930a55fbb7Slm66018  */
10941ae08745Sheppo static int
10950a55fbb7Slm66018 vd_process_ver_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
10961ae08745Sheppo {
10971ae08745Sheppo 	vio_ver_msg_t	*ver_msg = (vio_ver_msg_t *)msg;
10981ae08745Sheppo 
10991ae08745Sheppo 
11001ae08745Sheppo 	ASSERT(msglen >= sizeof (msg->tag));
11011ae08745Sheppo 
11021ae08745Sheppo 	if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO,
11031ae08745Sheppo 		VIO_VER_INFO)) {
11041ae08745Sheppo 		return (ENOMSG);	/* not a version message */
11051ae08745Sheppo 	}
11061ae08745Sheppo 
11071ae08745Sheppo 	if (msglen != sizeof (*ver_msg)) {
11081ae08745Sheppo 		PRN("Expected %lu-byte version message; "
11091ae08745Sheppo 		    "received %lu bytes", sizeof (*ver_msg), msglen);
11101ae08745Sheppo 		return (EBADMSG);
11111ae08745Sheppo 	}
11121ae08745Sheppo 
11131ae08745Sheppo 	if (ver_msg->dev_class != VDEV_DISK) {
11141ae08745Sheppo 		PRN("Expected device class %u (disk); received %u",
11151ae08745Sheppo 		    VDEV_DISK, ver_msg->dev_class);
11161ae08745Sheppo 		return (EBADMSG);
11171ae08745Sheppo 	}
11181ae08745Sheppo 
11190a55fbb7Slm66018 	/*
11200a55fbb7Slm66018 	 * We're talking to the expected kind of client; set our device class
11210a55fbb7Slm66018 	 * for "ack/nack" back to the client
11220a55fbb7Slm66018 	 */
11231ae08745Sheppo 	ver_msg->dev_class = VDEV_DISK_SERVER;
11240a55fbb7Slm66018 
11250a55fbb7Slm66018 	/*
11260a55fbb7Slm66018 	 * Check whether the (valid) version message specifies a version
11270a55fbb7Slm66018 	 * supported by this server.  If the version is not supported, return
11280a55fbb7Slm66018 	 * EBADMSG so the message will get "nack"ed; vds_supported_version()
11290a55fbb7Slm66018 	 * will have updated the message with a supported version for the
11300a55fbb7Slm66018 	 * client to consider
11310a55fbb7Slm66018 	 */
11320a55fbb7Slm66018 	if (!vds_supported_version(ver_msg))
11330a55fbb7Slm66018 		return (EBADMSG);
11340a55fbb7Slm66018 
11350a55fbb7Slm66018 
11360a55fbb7Slm66018 	/*
11370a55fbb7Slm66018 	 * A version has been agreed upon; use the client's SID for
11380a55fbb7Slm66018 	 * communication on this channel now
11390a55fbb7Slm66018 	 */
11400a55fbb7Slm66018 	ASSERT(!(vd->initialized & VD_SID));
11410a55fbb7Slm66018 	vd->sid = ver_msg->tag.vio_sid;
11420a55fbb7Slm66018 	vd->initialized |= VD_SID;
11430a55fbb7Slm66018 
11440a55fbb7Slm66018 	/*
11450a55fbb7Slm66018 	 * When multiple versions are supported, this function should store
11460a55fbb7Slm66018 	 * the negotiated major and minor version values in the "vd" data
11470a55fbb7Slm66018 	 * structure to govern further communication; in particular, note that
11480a55fbb7Slm66018 	 * the client might have specified a lower minor version for the
11490a55fbb7Slm66018 	 * agreed major version than specifed in the vds_version[] array.  The
11500a55fbb7Slm66018 	 * following assertions should help remind future maintainers to make
11510a55fbb7Slm66018 	 * the appropriate changes to support multiple versions.
11520a55fbb7Slm66018 	 */
11530a55fbb7Slm66018 	ASSERT(vds_num_versions == 1);
11540a55fbb7Slm66018 	ASSERT(ver_msg->ver_major == vds_version[0].major);
11550a55fbb7Slm66018 	ASSERT(ver_msg->ver_minor == vds_version[0].minor);
11560a55fbb7Slm66018 
11570a55fbb7Slm66018 	PR0("Using major version %u, minor version %u",
11580a55fbb7Slm66018 	    ver_msg->ver_major, ver_msg->ver_minor);
11591ae08745Sheppo 	return (0);
11601ae08745Sheppo }
11611ae08745Sheppo 
11621ae08745Sheppo static int
11631ae08745Sheppo vd_process_attr_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
11641ae08745Sheppo {
11651ae08745Sheppo 	vd_attr_msg_t	*attr_msg = (vd_attr_msg_t *)msg;
11661ae08745Sheppo 
11671ae08745Sheppo 
11681ae08745Sheppo 	ASSERT(msglen >= sizeof (msg->tag));
11691ae08745Sheppo 
11701ae08745Sheppo 	if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO,
11711ae08745Sheppo 		VIO_ATTR_INFO)) {
1172d10e4ef2Snarayan 		PR0("Message is not an attribute message");
1173d10e4ef2Snarayan 		return (ENOMSG);
11741ae08745Sheppo 	}
11751ae08745Sheppo 
11761ae08745Sheppo 	if (msglen != sizeof (*attr_msg)) {
11771ae08745Sheppo 		PRN("Expected %lu-byte attribute message; "
11781ae08745Sheppo 		    "received %lu bytes", sizeof (*attr_msg), msglen);
11791ae08745Sheppo 		return (EBADMSG);
11801ae08745Sheppo 	}
11811ae08745Sheppo 
11821ae08745Sheppo 	if (attr_msg->max_xfer_sz == 0) {
11831ae08745Sheppo 		PRN("Received maximum transfer size of 0 from client");
11841ae08745Sheppo 		return (EBADMSG);
11851ae08745Sheppo 	}
11861ae08745Sheppo 
11871ae08745Sheppo 	if ((attr_msg->xfer_mode != VIO_DESC_MODE) &&
11881ae08745Sheppo 	    (attr_msg->xfer_mode != VIO_DRING_MODE)) {
11891ae08745Sheppo 		PRN("Client requested unsupported transfer mode");
11901ae08745Sheppo 		return (EBADMSG);
11911ae08745Sheppo 	}
11921ae08745Sheppo 
11931ae08745Sheppo 
11941ae08745Sheppo 	/* Success:  valid message and transfer mode */
11951ae08745Sheppo 	vd->xfer_mode = attr_msg->xfer_mode;
11961ae08745Sheppo 	if (vd->xfer_mode == VIO_DESC_MODE) {
11971ae08745Sheppo 		/*
11981ae08745Sheppo 		 * The vd_dring_inband_msg_t contains one cookie; need room
11991ae08745Sheppo 		 * for up to n-1 more cookies, where "n" is the number of full
12001ae08745Sheppo 		 * pages plus possibly one partial page required to cover
12011ae08745Sheppo 		 * "max_xfer_sz".  Add room for one more cookie if
12021ae08745Sheppo 		 * "max_xfer_sz" isn't an integral multiple of the page size.
12031ae08745Sheppo 		 * Must first get the maximum transfer size in bytes.
12041ae08745Sheppo 		 */
12051ae08745Sheppo 		size_t	max_xfer_bytes = attr_msg->vdisk_block_size ?
12061ae08745Sheppo 		    attr_msg->vdisk_block_size*attr_msg->max_xfer_sz :
12071ae08745Sheppo 		    attr_msg->max_xfer_sz;
12081ae08745Sheppo 		size_t	max_inband_msglen =
12091ae08745Sheppo 		    sizeof (vd_dring_inband_msg_t) +
12101ae08745Sheppo 		    ((max_xfer_bytes/PAGESIZE +
12111ae08745Sheppo 			((max_xfer_bytes % PAGESIZE) ? 1 : 0))*
12121ae08745Sheppo 			(sizeof (ldc_mem_cookie_t)));
12131ae08745Sheppo 
12141ae08745Sheppo 		/*
12151ae08745Sheppo 		 * Set the maximum expected message length to
12161ae08745Sheppo 		 * accommodate in-band-descriptor messages with all
12171ae08745Sheppo 		 * their cookies
12181ae08745Sheppo 		 */
12191ae08745Sheppo 		vd->max_msglen = MAX(vd->max_msglen, max_inband_msglen);
1220d10e4ef2Snarayan 
1221d10e4ef2Snarayan 		/*
1222d10e4ef2Snarayan 		 * Initialize the data structure for processing in-band I/O
1223d10e4ef2Snarayan 		 * request descriptors
1224d10e4ef2Snarayan 		 */
1225d10e4ef2Snarayan 		vd->inband_task.vd	= vd;
1226d10e4ef2Snarayan 		vd->inband_task.index	= 0;
1227d10e4ef2Snarayan 		vd->inband_task.type	= VD_FINAL_RANGE_TASK;	/* range == 1 */
12281ae08745Sheppo 	}
12291ae08745Sheppo 
1230e1ebb9ecSlm66018 	/* Return the device's block size and max transfer size to the client */
1231e1ebb9ecSlm66018 	attr_msg->vdisk_block_size	= DEV_BSIZE;
1232e1ebb9ecSlm66018 	attr_msg->max_xfer_sz		= vd->max_xfer_sz;
1233e1ebb9ecSlm66018 
12341ae08745Sheppo 	attr_msg->vdisk_size = vd->vdisk_size;
12351ae08745Sheppo 	attr_msg->vdisk_type = vd->vdisk_type;
12361ae08745Sheppo 	attr_msg->operations = vds_operations;
12371ae08745Sheppo 	PR0("%s", VD_CLIENT(vd));
12381ae08745Sheppo 	return (0);
12391ae08745Sheppo }
12401ae08745Sheppo 
12411ae08745Sheppo static int
12421ae08745Sheppo vd_process_dring_reg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
12431ae08745Sheppo {
12441ae08745Sheppo 	int			status;
12451ae08745Sheppo 	size_t			expected;
12461ae08745Sheppo 	ldc_mem_info_t		dring_minfo;
12471ae08745Sheppo 	vio_dring_reg_msg_t	*reg_msg = (vio_dring_reg_msg_t *)msg;
12481ae08745Sheppo 
12491ae08745Sheppo 
12501ae08745Sheppo 	ASSERT(msglen >= sizeof (msg->tag));
12511ae08745Sheppo 
12521ae08745Sheppo 	if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO,
12531ae08745Sheppo 		VIO_DRING_REG)) {
1254d10e4ef2Snarayan 		PR0("Message is not a register-dring message");
1255d10e4ef2Snarayan 		return (ENOMSG);
12561ae08745Sheppo 	}
12571ae08745Sheppo 
12581ae08745Sheppo 	if (msglen < sizeof (*reg_msg)) {
12591ae08745Sheppo 		PRN("Expected at least %lu-byte register-dring message; "
12601ae08745Sheppo 		    "received %lu bytes", sizeof (*reg_msg), msglen);
12611ae08745Sheppo 		return (EBADMSG);
12621ae08745Sheppo 	}
12631ae08745Sheppo 
12641ae08745Sheppo 	expected = sizeof (*reg_msg) +
12651ae08745Sheppo 	    (reg_msg->ncookies - 1)*(sizeof (reg_msg->cookie[0]));
12661ae08745Sheppo 	if (msglen != expected) {
12671ae08745Sheppo 		PRN("Expected %lu-byte register-dring message; "
12681ae08745Sheppo 		    "received %lu bytes", expected, msglen);
12691ae08745Sheppo 		return (EBADMSG);
12701ae08745Sheppo 	}
12711ae08745Sheppo 
12721ae08745Sheppo 	if (vd->initialized & VD_DRING) {
12731ae08745Sheppo 		PRN("A dring was previously registered; only support one");
12741ae08745Sheppo 		return (EBADMSG);
12751ae08745Sheppo 	}
12761ae08745Sheppo 
1277d10e4ef2Snarayan 	if (reg_msg->num_descriptors > INT32_MAX) {
1278d10e4ef2Snarayan 		PRN("reg_msg->num_descriptors = %u; must be <= %u (%s)",
1279d10e4ef2Snarayan 		    reg_msg->ncookies, INT32_MAX, STRINGIZE(INT32_MAX));
1280d10e4ef2Snarayan 		return (EBADMSG);
1281d10e4ef2Snarayan 	}
1282d10e4ef2Snarayan 
12831ae08745Sheppo 	if (reg_msg->ncookies != 1) {
12841ae08745Sheppo 		/*
12851ae08745Sheppo 		 * In addition to fixing the assertion in the success case
12861ae08745Sheppo 		 * below, supporting drings which require more than one
12871ae08745Sheppo 		 * "cookie" requires increasing the value of vd->max_msglen
12881ae08745Sheppo 		 * somewhere in the code path prior to receiving the message
12891ae08745Sheppo 		 * which results in calling this function.  Note that without
12901ae08745Sheppo 		 * making this change, the larger message size required to
12911ae08745Sheppo 		 * accommodate multiple cookies cannot be successfully
12921ae08745Sheppo 		 * received, so this function will not even get called.
12931ae08745Sheppo 		 * Gracefully accommodating more dring cookies might
12941ae08745Sheppo 		 * reasonably demand exchanging an additional attribute or
12951ae08745Sheppo 		 * making a minor protocol adjustment
12961ae08745Sheppo 		 */
12971ae08745Sheppo 		PRN("reg_msg->ncookies = %u != 1", reg_msg->ncookies);
12981ae08745Sheppo 		return (EBADMSG);
12991ae08745Sheppo 	}
13001ae08745Sheppo 
13011ae08745Sheppo 	status = ldc_mem_dring_map(vd->ldc_handle, reg_msg->cookie,
13021ae08745Sheppo 	    reg_msg->ncookies, reg_msg->num_descriptors,
1303*4bac2208Snarayan 	    reg_msg->descriptor_size, LDC_DIRECT_MAP, &vd->dring_handle);
13041ae08745Sheppo 	if (status != 0) {
13051ae08745Sheppo 		PRN("ldc_mem_dring_map() returned errno %d", status);
13061ae08745Sheppo 		return (status);
13071ae08745Sheppo 	}
13081ae08745Sheppo 
13091ae08745Sheppo 	/*
13101ae08745Sheppo 	 * To remove the need for this assertion, must call
13111ae08745Sheppo 	 * ldc_mem_dring_nextcookie() successfully ncookies-1 times after a
13121ae08745Sheppo 	 * successful call to ldc_mem_dring_map()
13131ae08745Sheppo 	 */
13141ae08745Sheppo 	ASSERT(reg_msg->ncookies == 1);
13151ae08745Sheppo 
13161ae08745Sheppo 	if ((status =
13171ae08745Sheppo 		ldc_mem_dring_info(vd->dring_handle, &dring_minfo)) != 0) {
13181ae08745Sheppo 		PRN("ldc_mem_dring_info() returned errno %d", status);
13191ae08745Sheppo 		if ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0)
13201ae08745Sheppo 			PRN("ldc_mem_dring_unmap() returned errno %d", status);
13211ae08745Sheppo 		return (status);
13221ae08745Sheppo 	}
13231ae08745Sheppo 
13241ae08745Sheppo 	if (dring_minfo.vaddr == NULL) {
13251ae08745Sheppo 		PRN("Descriptor ring virtual address is NULL");
13260a55fbb7Slm66018 		return (ENXIO);
13271ae08745Sheppo 	}
13281ae08745Sheppo 
13291ae08745Sheppo 
1330d10e4ef2Snarayan 	/* Initialize for valid message and mapped dring */
13311ae08745Sheppo 	PR1("descriptor size = %u, dring length = %u",
13321ae08745Sheppo 	    vd->descriptor_size, vd->dring_len);
13331ae08745Sheppo 	vd->initialized |= VD_DRING;
13341ae08745Sheppo 	vd->dring_ident = 1;	/* "There Can Be Only One" */
13351ae08745Sheppo 	vd->dring = dring_minfo.vaddr;
13361ae08745Sheppo 	vd->descriptor_size = reg_msg->descriptor_size;
13371ae08745Sheppo 	vd->dring_len = reg_msg->num_descriptors;
13381ae08745Sheppo 	reg_msg->dring_ident = vd->dring_ident;
1339d10e4ef2Snarayan 
1340d10e4ef2Snarayan 	/*
1341d10e4ef2Snarayan 	 * Allocate and initialize a "shadow" array of data structures for
1342d10e4ef2Snarayan 	 * tasks to process I/O requests in dring elements
1343d10e4ef2Snarayan 	 */
1344d10e4ef2Snarayan 	vd->dring_task =
1345d10e4ef2Snarayan 	    kmem_zalloc((sizeof (*vd->dring_task)) * vd->dring_len, KM_SLEEP);
1346d10e4ef2Snarayan 	for (int i = 0; i < vd->dring_len; i++) {
1347d10e4ef2Snarayan 		vd->dring_task[i].vd		= vd;
1348d10e4ef2Snarayan 		vd->dring_task[i].index		= i;
1349d10e4ef2Snarayan 		vd->dring_task[i].request	= &VD_DRING_ELEM(i)->payload;
1350*4bac2208Snarayan 
1351*4bac2208Snarayan 		status = ldc_mem_alloc_handle(vd->ldc_handle,
1352*4bac2208Snarayan 		    &(vd->dring_task[i].mhdl));
1353*4bac2208Snarayan 		if (status) {
1354*4bac2208Snarayan 			PRN("ldc_mem_alloc_handle() returned err %d ", status);
1355*4bac2208Snarayan 			return (ENXIO);
1356*4bac2208Snarayan 		}
1357d10e4ef2Snarayan 	}
1358d10e4ef2Snarayan 
13591ae08745Sheppo 	return (0);
13601ae08745Sheppo }
13611ae08745Sheppo 
13621ae08745Sheppo static int
13631ae08745Sheppo vd_process_dring_unreg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
13641ae08745Sheppo {
13651ae08745Sheppo 	vio_dring_unreg_msg_t	*unreg_msg = (vio_dring_unreg_msg_t *)msg;
13661ae08745Sheppo 
13671ae08745Sheppo 
13681ae08745Sheppo 	ASSERT(msglen >= sizeof (msg->tag));
13691ae08745Sheppo 
13701ae08745Sheppo 	if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO,
13711ae08745Sheppo 		VIO_DRING_UNREG)) {
1372d10e4ef2Snarayan 		PR0("Message is not an unregister-dring message");
1373d10e4ef2Snarayan 		return (ENOMSG);
13741ae08745Sheppo 	}
13751ae08745Sheppo 
13761ae08745Sheppo 	if (msglen != sizeof (*unreg_msg)) {
13771ae08745Sheppo 		PRN("Expected %lu-byte unregister-dring message; "
13781ae08745Sheppo 		    "received %lu bytes", sizeof (*unreg_msg), msglen);
13791ae08745Sheppo 		return (EBADMSG);
13801ae08745Sheppo 	}
13811ae08745Sheppo 
13821ae08745Sheppo 	if (unreg_msg->dring_ident != vd->dring_ident) {
13831ae08745Sheppo 		PRN("Expected dring ident %lu; received %lu",
13841ae08745Sheppo 		    vd->dring_ident, unreg_msg->dring_ident);
13851ae08745Sheppo 		return (EBADMSG);
13861ae08745Sheppo 	}
13871ae08745Sheppo 
13881ae08745Sheppo 	return (0);
13891ae08745Sheppo }
13901ae08745Sheppo 
13911ae08745Sheppo static int
13921ae08745Sheppo process_rdx_msg(vio_msg_t *msg, size_t msglen)
13931ae08745Sheppo {
13941ae08745Sheppo 	ASSERT(msglen >= sizeof (msg->tag));
13951ae08745Sheppo 
1396d10e4ef2Snarayan 	if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX)) {
1397d10e4ef2Snarayan 		PR0("Message is not an RDX message");
1398d10e4ef2Snarayan 		return (ENOMSG);
1399d10e4ef2Snarayan 	}
14001ae08745Sheppo 
14011ae08745Sheppo 	if (msglen != sizeof (vio_rdx_msg_t)) {
14021ae08745Sheppo 		PRN("Expected %lu-byte RDX message; received %lu bytes",
14031ae08745Sheppo 		    sizeof (vio_rdx_msg_t), msglen);
14041ae08745Sheppo 		return (EBADMSG);
14051ae08745Sheppo 	}
14061ae08745Sheppo 
1407d10e4ef2Snarayan 	PR0("Valid RDX message");
14081ae08745Sheppo 	return (0);
14091ae08745Sheppo }
14101ae08745Sheppo 
14111ae08745Sheppo static int
14121ae08745Sheppo vd_check_seq_num(vd_t *vd, uint64_t seq_num)
14131ae08745Sheppo {
14141ae08745Sheppo 	if ((vd->initialized & VD_SEQ_NUM) && (seq_num != vd->seq_num + 1)) {
14151ae08745Sheppo 		PRN("Received seq_num %lu; expected %lu",
14161ae08745Sheppo 		    seq_num, (vd->seq_num + 1));
1417d10e4ef2Snarayan 		vd_need_reset(vd, B_FALSE);
14181ae08745Sheppo 		return (1);
14191ae08745Sheppo 	}
14201ae08745Sheppo 
14211ae08745Sheppo 	vd->seq_num = seq_num;
14221ae08745Sheppo 	vd->initialized |= VD_SEQ_NUM;	/* superfluous after first time... */
14231ae08745Sheppo 	return (0);
14241ae08745Sheppo }
14251ae08745Sheppo 
14261ae08745Sheppo /*
14271ae08745Sheppo  * Return the expected size of an inband-descriptor message with all the
14281ae08745Sheppo  * cookies it claims to include
14291ae08745Sheppo  */
14301ae08745Sheppo static size_t
14311ae08745Sheppo expected_inband_size(vd_dring_inband_msg_t *msg)
14321ae08745Sheppo {
14331ae08745Sheppo 	return ((sizeof (*msg)) +
14341ae08745Sheppo 	    (msg->payload.ncookies - 1)*(sizeof (msg->payload.cookie[0])));
14351ae08745Sheppo }
14361ae08745Sheppo 
14371ae08745Sheppo /*
14381ae08745Sheppo  * Process an in-band descriptor message:  used with clients like OBP, with
14391ae08745Sheppo  * which vds exchanges descriptors within VIO message payloads, rather than
14401ae08745Sheppo  * operating on them within a descriptor ring
14411ae08745Sheppo  */
14421ae08745Sheppo static int
1443d10e4ef2Snarayan vd_process_desc_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize)
14441ae08745Sheppo {
14451ae08745Sheppo 	size_t			expected;
14461ae08745Sheppo 	vd_dring_inband_msg_t	*desc_msg = (vd_dring_inband_msg_t *)msg;
14471ae08745Sheppo 
14481ae08745Sheppo 
14491ae08745Sheppo 	ASSERT(msglen >= sizeof (msg->tag));
14501ae08745Sheppo 
14511ae08745Sheppo 	if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO,
1452d10e4ef2Snarayan 		VIO_DESC_DATA)) {
1453d10e4ef2Snarayan 		PR1("Message is not an in-band-descriptor message");
1454d10e4ef2Snarayan 		return (ENOMSG);
1455d10e4ef2Snarayan 	}
14561ae08745Sheppo 
14571ae08745Sheppo 	if (msglen < sizeof (*desc_msg)) {
14581ae08745Sheppo 		PRN("Expected at least %lu-byte descriptor message; "
14591ae08745Sheppo 		    "received %lu bytes", sizeof (*desc_msg), msglen);
14601ae08745Sheppo 		return (EBADMSG);
14611ae08745Sheppo 	}
14621ae08745Sheppo 
14631ae08745Sheppo 	if (msglen != (expected = expected_inband_size(desc_msg))) {
14641ae08745Sheppo 		PRN("Expected %lu-byte descriptor message; "
14651ae08745Sheppo 		    "received %lu bytes", expected, msglen);
14661ae08745Sheppo 		return (EBADMSG);
14671ae08745Sheppo 	}
14681ae08745Sheppo 
1469d10e4ef2Snarayan 	if (vd_check_seq_num(vd, desc_msg->hdr.seq_num) != 0)
14701ae08745Sheppo 		return (EBADMSG);
14711ae08745Sheppo 
1472d10e4ef2Snarayan 	/*
1473d10e4ef2Snarayan 	 * Valid message:  Set up the in-band descriptor task and process the
1474d10e4ef2Snarayan 	 * request.  Arrange to acknowledge the client's message, unless an
1475d10e4ef2Snarayan 	 * error processing the descriptor task results in setting
1476d10e4ef2Snarayan 	 * VIO_SUBTYPE_NACK
1477d10e4ef2Snarayan 	 */
1478d10e4ef2Snarayan 	PR1("Valid in-band-descriptor message");
1479d10e4ef2Snarayan 	msg->tag.vio_subtype = VIO_SUBTYPE_ACK;
1480d10e4ef2Snarayan 	vd->inband_task.msg	= msg;
1481d10e4ef2Snarayan 	vd->inband_task.msglen	= msglen;
1482d10e4ef2Snarayan 	vd->inband_task.msgsize	= msgsize;
1483d10e4ef2Snarayan 	vd->inband_task.request	= &desc_msg->payload;
1484d10e4ef2Snarayan 	return (vd_process_task(&vd->inband_task));
14851ae08745Sheppo }
14861ae08745Sheppo 
14871ae08745Sheppo static int
1488d10e4ef2Snarayan vd_process_element(vd_t *vd, vd_task_type_t type, uint32_t idx,
1489d10e4ef2Snarayan     vio_msg_t *msg, size_t msglen, size_t msgsize)
14901ae08745Sheppo {
14911ae08745Sheppo 	int			status;
1492d10e4ef2Snarayan 	boolean_t		ready;
1493d10e4ef2Snarayan 	vd_dring_entry_t	*elem = VD_DRING_ELEM(idx);
14941ae08745Sheppo 
14951ae08745Sheppo 
1496d10e4ef2Snarayan 	/* Accept the updated dring element */
1497d10e4ef2Snarayan 	if ((status = ldc_mem_dring_acquire(vd->dring_handle, idx, idx)) != 0) {
14981ae08745Sheppo 		PRN("ldc_mem_dring_acquire() returned errno %d", status);
14991ae08745Sheppo 		return (status);
15001ae08745Sheppo 	}
1501d10e4ef2Snarayan 	ready = (elem->hdr.dstate == VIO_DESC_READY);
1502d10e4ef2Snarayan 	if (ready) {
1503d10e4ef2Snarayan 		elem->hdr.dstate = VIO_DESC_ACCEPTED;
1504d10e4ef2Snarayan 	} else {
1505d10e4ef2Snarayan 		PRN("descriptor %u not ready", idx);
1506d10e4ef2Snarayan 		VD_DUMP_DRING_ELEM(elem);
1507d10e4ef2Snarayan 	}
1508d10e4ef2Snarayan 	if ((status = ldc_mem_dring_release(vd->dring_handle, idx, idx)) != 0) {
15091ae08745Sheppo 		PRN("ldc_mem_dring_release() returned errno %d", status);
15101ae08745Sheppo 		return (status);
15111ae08745Sheppo 	}
1512d10e4ef2Snarayan 	if (!ready)
1513d10e4ef2Snarayan 		return (EBUSY);
15141ae08745Sheppo 
15151ae08745Sheppo 
1516d10e4ef2Snarayan 	/* Initialize a task and process the accepted element */
1517d10e4ef2Snarayan 	PR1("Processing dring element %u", idx);
1518d10e4ef2Snarayan 	vd->dring_task[idx].type	= type;
1519d10e4ef2Snarayan 	vd->dring_task[idx].msg		= msg;
1520d10e4ef2Snarayan 	vd->dring_task[idx].msglen	= msglen;
1521d10e4ef2Snarayan 	vd->dring_task[idx].msgsize	= msgsize;
1522d10e4ef2Snarayan 	if ((status = vd_process_task(&vd->dring_task[idx])) != EINPROGRESS)
1523d10e4ef2Snarayan 		status = vd_mark_elem_done(vd, idx, elem->payload.status);
15241ae08745Sheppo 
15251ae08745Sheppo 	return (status);
15261ae08745Sheppo }
15271ae08745Sheppo 
15281ae08745Sheppo static int
1529d10e4ef2Snarayan vd_process_element_range(vd_t *vd, int start, int end,
1530d10e4ef2Snarayan     vio_msg_t *msg, size_t msglen, size_t msgsize)
1531d10e4ef2Snarayan {
1532d10e4ef2Snarayan 	int		i, n, nelem, status = 0;
1533d10e4ef2Snarayan 	boolean_t	inprogress = B_FALSE;
1534d10e4ef2Snarayan 	vd_task_type_t	type;
1535d10e4ef2Snarayan 
1536d10e4ef2Snarayan 
1537d10e4ef2Snarayan 	ASSERT(start >= 0);
1538d10e4ef2Snarayan 	ASSERT(end >= 0);
1539d10e4ef2Snarayan 
1540d10e4ef2Snarayan 	/*
1541d10e4ef2Snarayan 	 * Arrange to acknowledge the client's message, unless an error
1542d10e4ef2Snarayan 	 * processing one of the dring elements results in setting
1543d10e4ef2Snarayan 	 * VIO_SUBTYPE_NACK
1544d10e4ef2Snarayan 	 */
1545d10e4ef2Snarayan 	msg->tag.vio_subtype = VIO_SUBTYPE_ACK;
1546d10e4ef2Snarayan 
1547d10e4ef2Snarayan 	/*
1548d10e4ef2Snarayan 	 * Process the dring elements in the range
1549d10e4ef2Snarayan 	 */
1550d10e4ef2Snarayan 	nelem = ((end < start) ? end + vd->dring_len : end) - start + 1;
1551d10e4ef2Snarayan 	for (i = start, n = nelem; n > 0; i = (i + 1) % vd->dring_len, n--) {
1552d10e4ef2Snarayan 		((vio_dring_msg_t *)msg)->end_idx = i;
1553d10e4ef2Snarayan 		type = (n == 1) ? VD_FINAL_RANGE_TASK : VD_NONFINAL_RANGE_TASK;
1554d10e4ef2Snarayan 		status = vd_process_element(vd, type, i, msg, msglen, msgsize);
1555d10e4ef2Snarayan 		if (status == EINPROGRESS)
1556d10e4ef2Snarayan 			inprogress = B_TRUE;
1557d10e4ef2Snarayan 		else if (status != 0)
1558d10e4ef2Snarayan 			break;
1559d10e4ef2Snarayan 	}
1560d10e4ef2Snarayan 
1561d10e4ef2Snarayan 	/*
1562d10e4ef2Snarayan 	 * If some, but not all, operations of a multi-element range are in
1563d10e4ef2Snarayan 	 * progress, wait for other operations to complete before returning
1564d10e4ef2Snarayan 	 * (which will result in "ack" or "nack" of the message).  Note that
1565d10e4ef2Snarayan 	 * all outstanding operations will need to complete, not just the ones
1566d10e4ef2Snarayan 	 * corresponding to the current range of dring elements; howevever, as
1567d10e4ef2Snarayan 	 * this situation is an error case, performance is less critical.
1568d10e4ef2Snarayan 	 */
1569d10e4ef2Snarayan 	if ((nelem > 1) && (status != EINPROGRESS) && inprogress)
1570d10e4ef2Snarayan 		ddi_taskq_wait(vd->completionq);
1571d10e4ef2Snarayan 
1572d10e4ef2Snarayan 	return (status);
1573d10e4ef2Snarayan }
1574d10e4ef2Snarayan 
1575d10e4ef2Snarayan static int
1576d10e4ef2Snarayan vd_process_dring_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize)
15771ae08745Sheppo {
15781ae08745Sheppo 	vio_dring_msg_t	*dring_msg = (vio_dring_msg_t *)msg;
15791ae08745Sheppo 
15801ae08745Sheppo 
15811ae08745Sheppo 	ASSERT(msglen >= sizeof (msg->tag));
15821ae08745Sheppo 
15831ae08745Sheppo 	if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO,
15841ae08745Sheppo 		VIO_DRING_DATA)) {
1585d10e4ef2Snarayan 		PR1("Message is not a dring-data message");
1586d10e4ef2Snarayan 		return (ENOMSG);
15871ae08745Sheppo 	}
15881ae08745Sheppo 
15891ae08745Sheppo 	if (msglen != sizeof (*dring_msg)) {
15901ae08745Sheppo 		PRN("Expected %lu-byte dring message; received %lu bytes",
15911ae08745Sheppo 		    sizeof (*dring_msg), msglen);
15921ae08745Sheppo 		return (EBADMSG);
15931ae08745Sheppo 	}
15941ae08745Sheppo 
1595d10e4ef2Snarayan 	if (vd_check_seq_num(vd, dring_msg->seq_num) != 0)
15961ae08745Sheppo 		return (EBADMSG);
15971ae08745Sheppo 
15981ae08745Sheppo 	if (dring_msg->dring_ident != vd->dring_ident) {
15991ae08745Sheppo 		PRN("Expected dring ident %lu; received ident %lu",
16001ae08745Sheppo 		    vd->dring_ident, dring_msg->dring_ident);
16011ae08745Sheppo 		return (EBADMSG);
16021ae08745Sheppo 	}
16031ae08745Sheppo 
1604d10e4ef2Snarayan 	if (dring_msg->start_idx >= vd->dring_len) {
1605d10e4ef2Snarayan 		PRN("\"start_idx\" = %u; must be less than %u",
1606d10e4ef2Snarayan 		    dring_msg->start_idx, vd->dring_len);
1607d10e4ef2Snarayan 		return (EBADMSG);
1608d10e4ef2Snarayan 	}
16091ae08745Sheppo 
1610d10e4ef2Snarayan 	if ((dring_msg->end_idx < 0) ||
1611d10e4ef2Snarayan 	    (dring_msg->end_idx >= vd->dring_len)) {
1612d10e4ef2Snarayan 		PRN("\"end_idx\" = %u; must be >= 0 and less than %u",
1613d10e4ef2Snarayan 		    dring_msg->end_idx, vd->dring_len);
1614d10e4ef2Snarayan 		return (EBADMSG);
1615d10e4ef2Snarayan 	}
1616d10e4ef2Snarayan 
1617d10e4ef2Snarayan 	/* Valid message; process range of updated dring elements */
1618d10e4ef2Snarayan 	PR1("Processing descriptor range, start = %u, end = %u",
1619d10e4ef2Snarayan 	    dring_msg->start_idx, dring_msg->end_idx);
1620d10e4ef2Snarayan 	return (vd_process_element_range(vd, dring_msg->start_idx,
1621d10e4ef2Snarayan 		dring_msg->end_idx, msg, msglen, msgsize));
16221ae08745Sheppo }
16231ae08745Sheppo 
16241ae08745Sheppo static int
16251ae08745Sheppo recv_msg(ldc_handle_t ldc_handle, void *msg, size_t *nbytes)
16261ae08745Sheppo {
16271ae08745Sheppo 	int	retry, status;
16281ae08745Sheppo 	size_t	size = *nbytes;
16291ae08745Sheppo 
16301ae08745Sheppo 
16311ae08745Sheppo 	for (retry = 0, status = ETIMEDOUT;
16321ae08745Sheppo 	    retry < vds_ldc_retries && status == ETIMEDOUT;
16331ae08745Sheppo 	    retry++) {
16341ae08745Sheppo 		PR1("ldc_read() attempt %d", (retry + 1));
16351ae08745Sheppo 		*nbytes = size;
16361ae08745Sheppo 		status = ldc_read(ldc_handle, msg, nbytes);
16371ae08745Sheppo 	}
16381ae08745Sheppo 
16391ae08745Sheppo 	if (status != 0) {
16401ae08745Sheppo 		PRN("ldc_read() returned errno %d", status);
16411ae08745Sheppo 		return (status);
16421ae08745Sheppo 	} else if (*nbytes == 0) {
16431ae08745Sheppo 		PR1("ldc_read() returned 0 and no message read");
16441ae08745Sheppo 		return (ENOMSG);
16451ae08745Sheppo 	}
16461ae08745Sheppo 
16471ae08745Sheppo 	PR1("RCVD %lu-byte message", *nbytes);
16481ae08745Sheppo 	return (0);
16491ae08745Sheppo }
16501ae08745Sheppo 
16511ae08745Sheppo static int
1652d10e4ef2Snarayan vd_do_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize)
16531ae08745Sheppo {
16541ae08745Sheppo 	int		status;
16551ae08745Sheppo 
16561ae08745Sheppo 
16571ae08745Sheppo 	PR1("Processing (%x/%x/%x) message", msg->tag.vio_msgtype,
16581ae08745Sheppo 	    msg->tag.vio_subtype, msg->tag.vio_subtype_env);
16591ae08745Sheppo 
16601ae08745Sheppo 	/*
16611ae08745Sheppo 	 * Validate session ID up front, since it applies to all messages
16621ae08745Sheppo 	 * once set
16631ae08745Sheppo 	 */
16641ae08745Sheppo 	if ((msg->tag.vio_sid != vd->sid) && (vd->initialized & VD_SID)) {
16651ae08745Sheppo 		PRN("Expected SID %u, received %u", vd->sid,
16661ae08745Sheppo 		    msg->tag.vio_sid);
16671ae08745Sheppo 		return (EBADMSG);
16681ae08745Sheppo 	}
16691ae08745Sheppo 
16701ae08745Sheppo 
16711ae08745Sheppo 	/*
16721ae08745Sheppo 	 * Process the received message based on connection state
16731ae08745Sheppo 	 */
16741ae08745Sheppo 	switch (vd->state) {
16751ae08745Sheppo 	case VD_STATE_INIT:	/* expect version message */
16760a55fbb7Slm66018 		if ((status = vd_process_ver_msg(vd, msg, msglen)) != 0)
16771ae08745Sheppo 			return (status);
16781ae08745Sheppo 
16791ae08745Sheppo 		/* Version negotiated, move to that state */
16801ae08745Sheppo 		vd->state = VD_STATE_VER;
16811ae08745Sheppo 		return (0);
16821ae08745Sheppo 
16831ae08745Sheppo 	case VD_STATE_VER:	/* expect attribute message */
16841ae08745Sheppo 		if ((status = vd_process_attr_msg(vd, msg, msglen)) != 0)
16851ae08745Sheppo 			return (status);
16861ae08745Sheppo 
16871ae08745Sheppo 		/* Attributes exchanged, move to that state */
16881ae08745Sheppo 		vd->state = VD_STATE_ATTR;
16891ae08745Sheppo 		return (0);
16901ae08745Sheppo 
16911ae08745Sheppo 	case VD_STATE_ATTR:
16921ae08745Sheppo 		switch (vd->xfer_mode) {
16931ae08745Sheppo 		case VIO_DESC_MODE:	/* expect RDX message */
16941ae08745Sheppo 			if ((status = process_rdx_msg(msg, msglen)) != 0)
16951ae08745Sheppo 				return (status);
16961ae08745Sheppo 
16971ae08745Sheppo 			/* Ready to receive in-band descriptors */
16981ae08745Sheppo 			vd->state = VD_STATE_DATA;
16991ae08745Sheppo 			return (0);
17001ae08745Sheppo 
17011ae08745Sheppo 		case VIO_DRING_MODE:	/* expect register-dring message */
17021ae08745Sheppo 			if ((status =
17031ae08745Sheppo 				vd_process_dring_reg_msg(vd, msg, msglen)) != 0)
17041ae08745Sheppo 				return (status);
17051ae08745Sheppo 
17061ae08745Sheppo 			/* One dring negotiated, move to that state */
17071ae08745Sheppo 			vd->state = VD_STATE_DRING;
17081ae08745Sheppo 			return (0);
17091ae08745Sheppo 
17101ae08745Sheppo 		default:
17111ae08745Sheppo 			ASSERT("Unsupported transfer mode");
17121ae08745Sheppo 			PRN("Unsupported transfer mode");
17131ae08745Sheppo 			return (ENOTSUP);
17141ae08745Sheppo 		}
17151ae08745Sheppo 
17161ae08745Sheppo 	case VD_STATE_DRING:	/* expect RDX, register-dring, or unreg-dring */
17171ae08745Sheppo 		if ((status = process_rdx_msg(msg, msglen)) == 0) {
17181ae08745Sheppo 			/* Ready to receive data */
17191ae08745Sheppo 			vd->state = VD_STATE_DATA;
17201ae08745Sheppo 			return (0);
17211ae08745Sheppo 		} else if (status != ENOMSG) {
17221ae08745Sheppo 			return (status);
17231ae08745Sheppo 		}
17241ae08745Sheppo 
17251ae08745Sheppo 
17261ae08745Sheppo 		/*
17271ae08745Sheppo 		 * If another register-dring message is received, stay in
17281ae08745Sheppo 		 * dring state in case the client sends RDX; although the
17291ae08745Sheppo 		 * protocol allows multiple drings, this server does not
17301ae08745Sheppo 		 * support using more than one
17311ae08745Sheppo 		 */
17321ae08745Sheppo 		if ((status =
17331ae08745Sheppo 			vd_process_dring_reg_msg(vd, msg, msglen)) != ENOMSG)
17341ae08745Sheppo 			return (status);
17351ae08745Sheppo 
17361ae08745Sheppo 		/*
17371ae08745Sheppo 		 * Acknowledge an unregister-dring message, but reset the
17381ae08745Sheppo 		 * connection anyway:  Although the protocol allows
17391ae08745Sheppo 		 * unregistering drings, this server cannot serve a vdisk
17401ae08745Sheppo 		 * without its only dring
17411ae08745Sheppo 		 */
17421ae08745Sheppo 		status = vd_process_dring_unreg_msg(vd, msg, msglen);
17431ae08745Sheppo 		return ((status == 0) ? ENOTSUP : status);
17441ae08745Sheppo 
17451ae08745Sheppo 	case VD_STATE_DATA:
17461ae08745Sheppo 		switch (vd->xfer_mode) {
17471ae08745Sheppo 		case VIO_DESC_MODE:	/* expect in-band-descriptor message */
1748d10e4ef2Snarayan 			return (vd_process_desc_msg(vd, msg, msglen, msgsize));
17491ae08745Sheppo 
17501ae08745Sheppo 		case VIO_DRING_MODE:	/* expect dring-data or unreg-dring */
17511ae08745Sheppo 			/*
17521ae08745Sheppo 			 * Typically expect dring-data messages, so handle
17531ae08745Sheppo 			 * them first
17541ae08745Sheppo 			 */
17551ae08745Sheppo 			if ((status = vd_process_dring_msg(vd, msg,
1756d10e4ef2Snarayan 				    msglen, msgsize)) != ENOMSG)
17571ae08745Sheppo 				return (status);
17581ae08745Sheppo 
17591ae08745Sheppo 			/*
17601ae08745Sheppo 			 * Acknowledge an unregister-dring message, but reset
17611ae08745Sheppo 			 * the connection anyway:  Although the protocol
17621ae08745Sheppo 			 * allows unregistering drings, this server cannot
17631ae08745Sheppo 			 * serve a vdisk without its only dring
17641ae08745Sheppo 			 */
17651ae08745Sheppo 			status = vd_process_dring_unreg_msg(vd, msg, msglen);
17661ae08745Sheppo 			return ((status == 0) ? ENOTSUP : status);
17671ae08745Sheppo 
17681ae08745Sheppo 		default:
17691ae08745Sheppo 			ASSERT("Unsupported transfer mode");
17701ae08745Sheppo 			PRN("Unsupported transfer mode");
17711ae08745Sheppo 			return (ENOTSUP);
17721ae08745Sheppo 		}
17731ae08745Sheppo 
17741ae08745Sheppo 	default:
17751ae08745Sheppo 		ASSERT("Invalid client connection state");
17761ae08745Sheppo 		PRN("Invalid client connection state");
17771ae08745Sheppo 		return (ENOTSUP);
17781ae08745Sheppo 	}
17791ae08745Sheppo }
17801ae08745Sheppo 
1781d10e4ef2Snarayan static int
1782d10e4ef2Snarayan vd_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize)
17831ae08745Sheppo {
17841ae08745Sheppo 	int		status;
17851ae08745Sheppo 	boolean_t	reset_ldc = B_FALSE;
17861ae08745Sheppo 
17871ae08745Sheppo 
17881ae08745Sheppo 	/*
17891ae08745Sheppo 	 * Check that the message is at least big enough for a "tag", so that
17901ae08745Sheppo 	 * message processing can proceed based on tag-specified message type
17911ae08745Sheppo 	 */
17921ae08745Sheppo 	if (msglen < sizeof (vio_msg_tag_t)) {
17931ae08745Sheppo 		PRN("Received short (%lu-byte) message", msglen);
17941ae08745Sheppo 		/* Can't "nack" short message, so drop the big hammer */
1795d10e4ef2Snarayan 		vd_need_reset(vd, B_TRUE);
1796d10e4ef2Snarayan 		return (EBADMSG);
17971ae08745Sheppo 	}
17981ae08745Sheppo 
17991ae08745Sheppo 	/*
18001ae08745Sheppo 	 * Process the message
18011ae08745Sheppo 	 */
1802d10e4ef2Snarayan 	switch (status = vd_do_process_msg(vd, msg, msglen, msgsize)) {
18031ae08745Sheppo 	case 0:
18041ae08745Sheppo 		/* "ack" valid, successfully-processed messages */
18051ae08745Sheppo 		msg->tag.vio_subtype = VIO_SUBTYPE_ACK;
18061ae08745Sheppo 		break;
18071ae08745Sheppo 
1808d10e4ef2Snarayan 	case EINPROGRESS:
1809d10e4ef2Snarayan 		/* The completion handler will "ack" or "nack" the message */
1810d10e4ef2Snarayan 		return (EINPROGRESS);
18111ae08745Sheppo 	case ENOMSG:
18121ae08745Sheppo 		PRN("Received unexpected message");
18131ae08745Sheppo 		_NOTE(FALLTHROUGH);
18141ae08745Sheppo 	case EBADMSG:
18151ae08745Sheppo 	case ENOTSUP:
18161ae08745Sheppo 		/* "nack" invalid messages */
18171ae08745Sheppo 		msg->tag.vio_subtype = VIO_SUBTYPE_NACK;
18181ae08745Sheppo 		break;
18191ae08745Sheppo 
18201ae08745Sheppo 	default:
18211ae08745Sheppo 		/* "nack" failed messages */
18221ae08745Sheppo 		msg->tag.vio_subtype = VIO_SUBTYPE_NACK;
18231ae08745Sheppo 		/* An LDC error probably occurred, so try resetting it */
18241ae08745Sheppo 		reset_ldc = B_TRUE;
18251ae08745Sheppo 		break;
18261ae08745Sheppo 	}
18271ae08745Sheppo 
1828d10e4ef2Snarayan 	/* Send the "ack" or "nack" to the client */
18291ae08745Sheppo 	PR1("Sending %s",
18301ae08745Sheppo 	    (msg->tag.vio_subtype == VIO_SUBTYPE_ACK) ? "ACK" : "NACK");
18311ae08745Sheppo 	if (send_msg(vd->ldc_handle, msg, msglen) != 0)
18321ae08745Sheppo 		reset_ldc = B_TRUE;
18331ae08745Sheppo 
1834d10e4ef2Snarayan 	/* Arrange to reset the connection for nack'ed or failed messages */
18351ae08745Sheppo 	if ((status != 0) || reset_ldc)
1836d10e4ef2Snarayan 		vd_need_reset(vd, reset_ldc);
1837d10e4ef2Snarayan 
1838d10e4ef2Snarayan 	return (status);
1839d10e4ef2Snarayan }
1840d10e4ef2Snarayan 
1841d10e4ef2Snarayan static boolean_t
1842d10e4ef2Snarayan vd_enabled(vd_t *vd)
1843d10e4ef2Snarayan {
1844d10e4ef2Snarayan 	boolean_t	enabled;
1845d10e4ef2Snarayan 
1846d10e4ef2Snarayan 
1847d10e4ef2Snarayan 	mutex_enter(&vd->lock);
1848d10e4ef2Snarayan 	enabled = vd->enabled;
1849d10e4ef2Snarayan 	mutex_exit(&vd->lock);
1850d10e4ef2Snarayan 	return (enabled);
18511ae08745Sheppo }
18521ae08745Sheppo 
18531ae08745Sheppo static void
18540a55fbb7Slm66018 vd_recv_msg(void *arg)
18551ae08745Sheppo {
18561ae08745Sheppo 	vd_t	*vd = (vd_t *)arg;
18570a55fbb7Slm66018 	int	status = 0;
18581ae08745Sheppo 
18591ae08745Sheppo 
18601ae08745Sheppo 	ASSERT(vd != NULL);
1861d10e4ef2Snarayan 	PR2("New task to receive incoming message(s)");
1862d10e4ef2Snarayan 	while (vd_enabled(vd) && status == 0) {
1863d10e4ef2Snarayan 		size_t		msglen, msgsize;
1864d10e4ef2Snarayan 		vio_msg_t	*vio_msg;
1865d10e4ef2Snarayan 
1866d10e4ef2Snarayan 
18670a55fbb7Slm66018 		/*
1868d10e4ef2Snarayan 		 * Receive and process a message
18690a55fbb7Slm66018 		 */
1870d10e4ef2Snarayan 		vd_reset_if_needed(vd);	/* can change vd->max_msglen */
1871d10e4ef2Snarayan 		msgsize = vd->max_msglen;	/* stable copy for alloc/free */
1872d10e4ef2Snarayan 		msglen	= msgsize;	/* actual length after recv_msg() */
1873d10e4ef2Snarayan 		vio_msg = kmem_alloc(msgsize, KM_SLEEP);
1874d10e4ef2Snarayan 		if ((status = recv_msg(vd->ldc_handle, vio_msg, &msglen)) ==
1875d10e4ef2Snarayan 		    0) {
1876d10e4ef2Snarayan 			if (vd_process_msg(vd, vio_msg, msglen, msgsize) ==
1877d10e4ef2Snarayan 			    EINPROGRESS)
1878d10e4ef2Snarayan 				continue;	/* handler will free msg */
1879d10e4ef2Snarayan 		} else if (status != ENOMSG) {
1880d10e4ef2Snarayan 			/* Probably an LDC failure; arrange to reset it */
1881d10e4ef2Snarayan 			vd_need_reset(vd, B_TRUE);
18820a55fbb7Slm66018 		}
1883d10e4ef2Snarayan 		kmem_free(vio_msg, msgsize);
18841ae08745Sheppo 	}
1885d10e4ef2Snarayan 	PR2("Task finished");
18860a55fbb7Slm66018 }
18870a55fbb7Slm66018 
18880a55fbb7Slm66018 static uint_t
18891ae08745Sheppo vd_handle_ldc_events(uint64_t event, caddr_t arg)
18901ae08745Sheppo {
18911ae08745Sheppo 	vd_t	*vd = (vd_t *)(void *)arg;
18921ae08745Sheppo 
18931ae08745Sheppo 
18941ae08745Sheppo 	ASSERT(vd != NULL);
1895d10e4ef2Snarayan 
1896d10e4ef2Snarayan 	if (!vd_enabled(vd))
1897d10e4ef2Snarayan 		return (LDC_SUCCESS);
1898d10e4ef2Snarayan 
1899d10e4ef2Snarayan 	if (event & LDC_EVT_RESET) {
1900d10e4ef2Snarayan 		PR0("LDC channel was reset");
1901d10e4ef2Snarayan 		return (LDC_SUCCESS);
1902d10e4ef2Snarayan 	}
1903d10e4ef2Snarayan 
1904d10e4ef2Snarayan 	if (event & LDC_EVT_UP) {
1905d10e4ef2Snarayan 		PR0("LDC channel came up:  Resetting client connection state");
1906d10e4ef2Snarayan 		vd_need_reset(vd, B_FALSE);
1907d10e4ef2Snarayan 	}
1908d10e4ef2Snarayan 
1909d10e4ef2Snarayan 	if (event & LDC_EVT_READ) {
1910d10e4ef2Snarayan 		int	status;
1911d10e4ef2Snarayan 
1912d10e4ef2Snarayan 		PR1("New data available");
1913d10e4ef2Snarayan 		/* Queue a task to receive the new data */
1914d10e4ef2Snarayan 		status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd,
1915d10e4ef2Snarayan 		    DDI_SLEEP);
1916d10e4ef2Snarayan 		/* ddi_taskq_dispatch(9f) guarantees success with DDI_SLEEP */
1917d10e4ef2Snarayan 		ASSERT(status == DDI_SUCCESS);
1918d10e4ef2Snarayan 	}
1919d10e4ef2Snarayan 
1920d10e4ef2Snarayan 	return (LDC_SUCCESS);
19211ae08745Sheppo }
19221ae08745Sheppo 
19231ae08745Sheppo static uint_t
19241ae08745Sheppo vds_check_for_vd(mod_hash_key_t key, mod_hash_val_t *val, void *arg)
19251ae08745Sheppo {
19261ae08745Sheppo 	_NOTE(ARGUNUSED(key, val))
19271ae08745Sheppo 	(*((uint_t *)arg))++;
19281ae08745Sheppo 	return (MH_WALK_TERMINATE);
19291ae08745Sheppo }
19301ae08745Sheppo 
19311ae08745Sheppo 
19321ae08745Sheppo static int
19331ae08745Sheppo vds_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
19341ae08745Sheppo {
19351ae08745Sheppo 	uint_t	vd_present = 0;
19361ae08745Sheppo 	minor_t	instance;
19371ae08745Sheppo 	vds_t	*vds;
19381ae08745Sheppo 
19391ae08745Sheppo 
19401ae08745Sheppo 	switch (cmd) {
19411ae08745Sheppo 	case DDI_DETACH:
19421ae08745Sheppo 		/* the real work happens below */
19431ae08745Sheppo 		break;
19441ae08745Sheppo 	case DDI_SUSPEND:
1945d10e4ef2Snarayan 		PR0("No action required for DDI_SUSPEND");
19461ae08745Sheppo 		return (DDI_SUCCESS);
19471ae08745Sheppo 	default:
1948d10e4ef2Snarayan 		PRN("Unrecognized \"cmd\"");
19491ae08745Sheppo 		return (DDI_FAILURE);
19501ae08745Sheppo 	}
19511ae08745Sheppo 
19521ae08745Sheppo 	ASSERT(cmd == DDI_DETACH);
19531ae08745Sheppo 	instance = ddi_get_instance(dip);
19541ae08745Sheppo 	if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) {
19551ae08745Sheppo 		PRN("Could not get state for instance %u", instance);
19561ae08745Sheppo 		ddi_soft_state_free(vds_state, instance);
19571ae08745Sheppo 		return (DDI_FAILURE);
19581ae08745Sheppo 	}
19591ae08745Sheppo 
19601ae08745Sheppo 	/* Do no detach when serving any vdisks */
19611ae08745Sheppo 	mod_hash_walk(vds->vd_table, vds_check_for_vd, &vd_present);
19621ae08745Sheppo 	if (vd_present) {
19631ae08745Sheppo 		PR0("Not detaching because serving vdisks");
19641ae08745Sheppo 		return (DDI_FAILURE);
19651ae08745Sheppo 	}
19661ae08745Sheppo 
19671ae08745Sheppo 	PR0("Detaching");
19681ae08745Sheppo 	if (vds->initialized & VDS_MDEG)
19691ae08745Sheppo 		(void) mdeg_unregister(vds->mdeg);
19701ae08745Sheppo 	if (vds->initialized & VDS_LDI)
19711ae08745Sheppo 		(void) ldi_ident_release(vds->ldi_ident);
19721ae08745Sheppo 	mod_hash_destroy_hash(vds->vd_table);
19731ae08745Sheppo 	ddi_soft_state_free(vds_state, instance);
19741ae08745Sheppo 	return (DDI_SUCCESS);
19751ae08745Sheppo }
19761ae08745Sheppo 
19771ae08745Sheppo static boolean_t
19781ae08745Sheppo is_pseudo_device(dev_info_t *dip)
19791ae08745Sheppo {
19801ae08745Sheppo 	dev_info_t	*parent, *root = ddi_root_node();
19811ae08745Sheppo 
19821ae08745Sheppo 
19831ae08745Sheppo 	for (parent = ddi_get_parent(dip); (parent != NULL) && (parent != root);
19841ae08745Sheppo 	    parent = ddi_get_parent(parent)) {
19851ae08745Sheppo 		if (strcmp(ddi_get_name(parent), DEVI_PSEUDO_NEXNAME) == 0)
19861ae08745Sheppo 			return (B_TRUE);
19871ae08745Sheppo 	}
19881ae08745Sheppo 
19891ae08745Sheppo 	return (B_FALSE);
19901ae08745Sheppo }
19911ae08745Sheppo 
19921ae08745Sheppo static int
19930a55fbb7Slm66018 vd_setup_full_disk(vd_t *vd)
19940a55fbb7Slm66018 {
19950a55fbb7Slm66018 	int		rval, status;
19960a55fbb7Slm66018 	major_t		major = getmajor(vd->dev[0]);
19970a55fbb7Slm66018 	minor_t		minor = getminor(vd->dev[0]) - VD_ENTIRE_DISK_SLICE;
1998*4bac2208Snarayan 	struct dk_minfo	dk_minfo;
19990a55fbb7Slm66018 
2000*4bac2208Snarayan 	/*
2001*4bac2208Snarayan 	 * At this point, vdisk_size is set to the size of partition 2 but
2002*4bac2208Snarayan 	 * this does not represent the size of the disk because partition 2
2003*4bac2208Snarayan 	 * may not cover the entire disk and its size does not include reserved
2004*4bac2208Snarayan 	 * blocks. So we update vdisk_size to be the size of the entire disk.
2005*4bac2208Snarayan 	 */
2006*4bac2208Snarayan 	if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCGMEDIAINFO,
2007*4bac2208Snarayan 	    (intptr_t)&dk_minfo, (vd_open_flags | FKIOCTL),
2008*4bac2208Snarayan 	    kcred, &rval)) != 0) {
2009*4bac2208Snarayan 		PRN("ldi_ioctl(DKIOCGMEDIAINFO) returned errno %d",
2010*4bac2208Snarayan 		    status);
20110a55fbb7Slm66018 		return (status);
20120a55fbb7Slm66018 	}
2013*4bac2208Snarayan 	vd->vdisk_size = dk_minfo.dki_capacity;
20140a55fbb7Slm66018 
20150a55fbb7Slm66018 	/* Set full-disk parameters */
20160a55fbb7Slm66018 	vd->vdisk_type	= VD_DISK_TYPE_DISK;
20170a55fbb7Slm66018 	vd->nslices	= (sizeof (vd->dev))/(sizeof (vd->dev[0]));
20180a55fbb7Slm66018 
20190a55fbb7Slm66018 	/* Move dev number and LDI handle to entire-disk-slice array elements */
20200a55fbb7Slm66018 	vd->dev[VD_ENTIRE_DISK_SLICE]		= vd->dev[0];
20210a55fbb7Slm66018 	vd->dev[0]				= 0;
20220a55fbb7Slm66018 	vd->ldi_handle[VD_ENTIRE_DISK_SLICE]	= vd->ldi_handle[0];
20230a55fbb7Slm66018 	vd->ldi_handle[0]			= NULL;
20240a55fbb7Slm66018 
20250a55fbb7Slm66018 	/* Initialize device numbers for remaining slices and open them */
20260a55fbb7Slm66018 	for (int slice = 0; slice < vd->nslices; slice++) {
20270a55fbb7Slm66018 		/*
20280a55fbb7Slm66018 		 * Skip the entire-disk slice, as it's already open and its
20290a55fbb7Slm66018 		 * device known
20300a55fbb7Slm66018 		 */
20310a55fbb7Slm66018 		if (slice == VD_ENTIRE_DISK_SLICE)
20320a55fbb7Slm66018 			continue;
20330a55fbb7Slm66018 		ASSERT(vd->dev[slice] == 0);
20340a55fbb7Slm66018 		ASSERT(vd->ldi_handle[slice] == NULL);
20350a55fbb7Slm66018 
20360a55fbb7Slm66018 		/*
20370a55fbb7Slm66018 		 * Construct the device number for the current slice
20380a55fbb7Slm66018 		 */
20390a55fbb7Slm66018 		vd->dev[slice] = makedevice(major, (minor + slice));
20400a55fbb7Slm66018 
20410a55fbb7Slm66018 		/*
20420a55fbb7Slm66018 		 * At least some underlying drivers refuse to open
20430a55fbb7Slm66018 		 * devices for (currently) zero-length slices, so skip
20440a55fbb7Slm66018 		 * them for now
20450a55fbb7Slm66018 		 */
2046*4bac2208Snarayan 		if (vd->vtoc.v_part[slice].p_size == 0) {
20470a55fbb7Slm66018 			PR0("Skipping zero-length slice %u", slice);
20480a55fbb7Slm66018 			continue;
20490a55fbb7Slm66018 		}
20500a55fbb7Slm66018 
20510a55fbb7Slm66018 		/*
20520a55fbb7Slm66018 		 * Open all non-empty slices of the disk to serve them to the
20530a55fbb7Slm66018 		 * client.  Slices are opened exclusively to prevent other
20540a55fbb7Slm66018 		 * threads or processes in the service domain from performing
20550a55fbb7Slm66018 		 * I/O to slices being accessed by a client.  Failure to open
20560a55fbb7Slm66018 		 * a slice results in vds not serving this disk, as the client
20570a55fbb7Slm66018 		 * could attempt (and should be able) to access any non-empty
20580a55fbb7Slm66018 		 * slice immediately.  Any slices successfully opened before a
20590a55fbb7Slm66018 		 * failure will get closed by vds_destroy_vd() as a result of
20600a55fbb7Slm66018 		 * the error returned by this function.
20610a55fbb7Slm66018 		 */
20620a55fbb7Slm66018 		PR0("Opening device major %u, minor %u = slice %u",
20630a55fbb7Slm66018 		    major, minor, slice);
20640a55fbb7Slm66018 		if ((status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK,
20650a55fbb7Slm66018 			    vd_open_flags, kcred, &vd->ldi_handle[slice],
20660a55fbb7Slm66018 			    vd->vds->ldi_ident)) != 0) {
20670a55fbb7Slm66018 			PRN("ldi_open_by_dev() returned errno %d "
20680a55fbb7Slm66018 			    "for slice %u", status, slice);
20690a55fbb7Slm66018 			/* vds_destroy_vd() will close any open slices */
20700a55fbb7Slm66018 			return (status);
20710a55fbb7Slm66018 		}
20720a55fbb7Slm66018 	}
20730a55fbb7Slm66018 
20740a55fbb7Slm66018 	return (0);
20750a55fbb7Slm66018 }
20760a55fbb7Slm66018 
20770a55fbb7Slm66018 static int
2078*4bac2208Snarayan vd_setup_partition_efi(vd_t *vd)
2079*4bac2208Snarayan {
2080*4bac2208Snarayan 	efi_gpt_t *gpt;
2081*4bac2208Snarayan 	efi_gpe_t *gpe;
2082*4bac2208Snarayan 	struct uuid uuid = EFI_RESERVED;
2083*4bac2208Snarayan 	uint32_t crc;
2084*4bac2208Snarayan 	int length;
2085*4bac2208Snarayan 
2086*4bac2208Snarayan 	length = sizeof (efi_gpt_t) + sizeof (efi_gpe_t);
2087*4bac2208Snarayan 
2088*4bac2208Snarayan 	gpt = kmem_zalloc(length, KM_SLEEP);
2089*4bac2208Snarayan 	gpe = (efi_gpe_t *)(gpt + 1);
2090*4bac2208Snarayan 
2091*4bac2208Snarayan 	gpt->efi_gpt_Signature = LE_64(EFI_SIGNATURE);
2092*4bac2208Snarayan 	gpt->efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
2093*4bac2208Snarayan 	gpt->efi_gpt_HeaderSize = LE_32(sizeof (efi_gpt_t));
2094*4bac2208Snarayan 	gpt->efi_gpt_FirstUsableLBA = LE_64(0ULL);
2095*4bac2208Snarayan 	gpt->efi_gpt_LastUsableLBA = LE_64(vd->vdisk_size - 1);
2096*4bac2208Snarayan 	gpt->efi_gpt_NumberOfPartitionEntries = LE_32(1);
2097*4bac2208Snarayan 	gpt->efi_gpt_SizeOfPartitionEntry = LE_32(sizeof (efi_gpe_t));
2098*4bac2208Snarayan 
2099*4bac2208Snarayan 	UUID_LE_CONVERT(gpe->efi_gpe_PartitionTypeGUID, uuid);
2100*4bac2208Snarayan 	gpe->efi_gpe_StartingLBA = gpt->efi_gpt_FirstUsableLBA;
2101*4bac2208Snarayan 	gpe->efi_gpe_EndingLBA = gpt->efi_gpt_LastUsableLBA;
2102*4bac2208Snarayan 
2103*4bac2208Snarayan 	CRC32(crc, gpe, sizeof (efi_gpe_t), -1U, crc32_table);
2104*4bac2208Snarayan 	gpt->efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
2105*4bac2208Snarayan 
2106*4bac2208Snarayan 	CRC32(crc, gpt, sizeof (efi_gpt_t), -1U, crc32_table);
2107*4bac2208Snarayan 	gpt->efi_gpt_HeaderCRC32 = LE_32(~crc);
2108*4bac2208Snarayan 
2109*4bac2208Snarayan 	vd->dk_efi.dki_lba = 0;
2110*4bac2208Snarayan 	vd->dk_efi.dki_length = length;
2111*4bac2208Snarayan 	vd->dk_efi.dki_data = gpt;
2112*4bac2208Snarayan 
2113*4bac2208Snarayan 	return (0);
2114*4bac2208Snarayan }
2115*4bac2208Snarayan 
2116*4bac2208Snarayan static int
2117e1ebb9ecSlm66018 vd_setup_vd(char *device_path, vd_t *vd)
21181ae08745Sheppo {
2119e1ebb9ecSlm66018 	int		rval, status;
21201ae08745Sheppo 	dev_info_t	*dip;
21211ae08745Sheppo 	struct dk_cinfo	dk_cinfo;
21221ae08745Sheppo 
2123*4bac2208Snarayan 	/*
2124*4bac2208Snarayan 	 * We need to open with FNDELAY so that opening an empty partition
2125*4bac2208Snarayan 	 * does not fail.
2126*4bac2208Snarayan 	 */
2127*4bac2208Snarayan 	if ((status = ldi_open_by_name(device_path, vd_open_flags | FNDELAY,
2128*4bac2208Snarayan 	    kcred, &vd->ldi_handle[0], vd->vds->ldi_ident)) != 0) {
2129e1ebb9ecSlm66018 		PRN("ldi_open_by_name(%s) = errno %d", device_path, status);
21300a55fbb7Slm66018 		return (status);
21310a55fbb7Slm66018 	}
21320a55fbb7Slm66018 
2133*4bac2208Snarayan 	/*
2134*4bac2208Snarayan 	 * nslices must be updated now so that vds_destroy_vd() will close
2135*4bac2208Snarayan 	 * the slice we have just opened in case of an error.
2136*4bac2208Snarayan 	 */
2137*4bac2208Snarayan 	vd->nslices = 1;
2138*4bac2208Snarayan 
2139e1ebb9ecSlm66018 	/* Get device number and size of backing device */
21400a55fbb7Slm66018 	if ((status = ldi_get_dev(vd->ldi_handle[0], &vd->dev[0])) != 0) {
21411ae08745Sheppo 		PRN("ldi_get_dev() returned errno %d for %s",
2142e1ebb9ecSlm66018 		    status, device_path);
21431ae08745Sheppo 		return (status);
21441ae08745Sheppo 	}
21450a55fbb7Slm66018 	if (ldi_get_size(vd->ldi_handle[0], &vd->vdisk_size) != DDI_SUCCESS) {
2146e1ebb9ecSlm66018 		PRN("ldi_get_size() failed for %s", device_path);
21471ae08745Sheppo 		return (EIO);
21481ae08745Sheppo 	}
2149e1ebb9ecSlm66018 	vd->vdisk_size = lbtodb(vd->vdisk_size);	/* convert to blocks */
21501ae08745Sheppo 
2151e1ebb9ecSlm66018 	/* Verify backing device supports dk_cinfo, dk_geom, and vtoc */
2152e1ebb9ecSlm66018 	if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCINFO,
2153e1ebb9ecSlm66018 		    (intptr_t)&dk_cinfo, (vd_open_flags | FKIOCTL), kcred,
2154e1ebb9ecSlm66018 		    &rval)) != 0) {
2155e1ebb9ecSlm66018 		PRN("ldi_ioctl(DKIOCINFO) returned errno %d for %s",
2156e1ebb9ecSlm66018 		    status, device_path);
2157e1ebb9ecSlm66018 		return (status);
2158e1ebb9ecSlm66018 	}
2159e1ebb9ecSlm66018 	if (dk_cinfo.dki_partition >= V_NUMPAR) {
2160e1ebb9ecSlm66018 		PRN("slice %u >= maximum slice %u for %s",
2161e1ebb9ecSlm66018 		    dk_cinfo.dki_partition, V_NUMPAR, device_path);
2162e1ebb9ecSlm66018 		return (EIO);
2163e1ebb9ecSlm66018 	}
2164*4bac2208Snarayan 
2165*4bac2208Snarayan 	status = vd_read_vtoc(vd->ldi_handle[0], &vd->vtoc, &vd->vdisk_label);
2166*4bac2208Snarayan 
2167*4bac2208Snarayan 	if (status != 0) {
2168*4bac2208Snarayan 		PRN("vd_read_vtoc returned errno %d for %s",
2169e1ebb9ecSlm66018 		    status, device_path);
2170e1ebb9ecSlm66018 		return (status);
2171e1ebb9ecSlm66018 	}
2172*4bac2208Snarayan 
2173*4bac2208Snarayan 	if (vd->vdisk_label == VD_DISK_LABEL_VTOC &&
2174*4bac2208Snarayan 	    (status = ldi_ioctl(vd->ldi_handle[0], DKIOCGGEOM,
2175*4bac2208Snarayan 	    (intptr_t)&vd->dk_geom, (vd_open_flags | FKIOCTL),
2176*4bac2208Snarayan 	    kcred, &rval)) != 0) {
2177*4bac2208Snarayan 		    PRN("ldi_ioctl(DKIOCGEOM) returned errno %d for %s",
2178e1ebb9ecSlm66018 			status, device_path);
2179e1ebb9ecSlm66018 		    return (status);
2180e1ebb9ecSlm66018 	}
2181e1ebb9ecSlm66018 
2182e1ebb9ecSlm66018 	/* Store the device's max transfer size for return to the client */
2183e1ebb9ecSlm66018 	vd->max_xfer_sz = dk_cinfo.dki_maxtransfer;
2184e1ebb9ecSlm66018 
2185e1ebb9ecSlm66018 
2186e1ebb9ecSlm66018 	/* Determine if backing device is a pseudo device */
21871ae08745Sheppo 	if ((dip = ddi_hold_devi_by_instance(getmajor(vd->dev[0]),
21881ae08745Sheppo 		    dev_to_instance(vd->dev[0]), 0))  == NULL) {
2189e1ebb9ecSlm66018 		PRN("%s is no longer accessible", device_path);
21901ae08745Sheppo 		return (EIO);
21911ae08745Sheppo 	}
21921ae08745Sheppo 	vd->pseudo = is_pseudo_device(dip);
21931ae08745Sheppo 	ddi_release_devi(dip);
21941ae08745Sheppo 	if (vd->pseudo) {
21951ae08745Sheppo 		vd->vdisk_type	= VD_DISK_TYPE_SLICE;
21961ae08745Sheppo 		vd->nslices	= 1;
21971ae08745Sheppo 		return (0);	/* ...and we're done */
21981ae08745Sheppo 	}
21991ae08745Sheppo 
22001ae08745Sheppo 
22010a55fbb7Slm66018 	/* If slice is entire-disk slice, initialize for full disk */
22020a55fbb7Slm66018 	if (dk_cinfo.dki_partition == VD_ENTIRE_DISK_SLICE)
22030a55fbb7Slm66018 		return (vd_setup_full_disk(vd));
22041ae08745Sheppo 
22050a55fbb7Slm66018 
2206e1ebb9ecSlm66018 	/* Otherwise, we have a non-entire slice of a device */
22071ae08745Sheppo 	vd->vdisk_type	= VD_DISK_TYPE_SLICE;
22081ae08745Sheppo 	vd->nslices	= 1;
22091ae08745Sheppo 
2210*4bac2208Snarayan 	if (vd->vdisk_label == VD_DISK_LABEL_EFI) {
2211*4bac2208Snarayan 		status = vd_setup_partition_efi(vd);
2212*4bac2208Snarayan 		return (status);
2213*4bac2208Snarayan 	}
22141ae08745Sheppo 
2215e1ebb9ecSlm66018 	/* Initialize dk_geom structure for single-slice device */
22161ae08745Sheppo 	if (vd->dk_geom.dkg_nsect == 0) {
2217e1ebb9ecSlm66018 		PRN("%s geometry claims 0 sectors per track", device_path);
22181ae08745Sheppo 		return (EIO);
22191ae08745Sheppo 	}
22201ae08745Sheppo 	if (vd->dk_geom.dkg_nhead == 0) {
2221e1ebb9ecSlm66018 		PRN("%s geometry claims 0 heads", device_path);
22221ae08745Sheppo 		return (EIO);
22231ae08745Sheppo 	}
22241ae08745Sheppo 	vd->dk_geom.dkg_ncyl =
2225e1ebb9ecSlm66018 	    vd->vdisk_size/vd->dk_geom.dkg_nsect/vd->dk_geom.dkg_nhead;
22261ae08745Sheppo 	vd->dk_geom.dkg_acyl = 0;
22271ae08745Sheppo 	vd->dk_geom.dkg_pcyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl;
22281ae08745Sheppo 
22291ae08745Sheppo 
2230e1ebb9ecSlm66018 	/* Initialize vtoc structure for single-slice device */
22311ae08745Sheppo 	bcopy(VD_VOLUME_NAME, vd->vtoc.v_volume,
22321ae08745Sheppo 	    MIN(sizeof (VD_VOLUME_NAME), sizeof (vd->vtoc.v_volume)));
22331ae08745Sheppo 	bzero(vd->vtoc.v_part, sizeof (vd->vtoc.v_part));
22341ae08745Sheppo 	vd->vtoc.v_nparts = 1;
22351ae08745Sheppo 	vd->vtoc.v_part[0].p_tag = V_UNASSIGNED;
22361ae08745Sheppo 	vd->vtoc.v_part[0].p_flag = 0;
22371ae08745Sheppo 	vd->vtoc.v_part[0].p_start = 0;
2238e1ebb9ecSlm66018 	vd->vtoc.v_part[0].p_size = vd->vdisk_size;
22391ae08745Sheppo 	bcopy(VD_ASCIILABEL, vd->vtoc.v_asciilabel,
22401ae08745Sheppo 	    MIN(sizeof (VD_ASCIILABEL), sizeof (vd->vtoc.v_asciilabel)));
22411ae08745Sheppo 
22421ae08745Sheppo 
22431ae08745Sheppo 	return (0);
22441ae08745Sheppo }
22451ae08745Sheppo 
22461ae08745Sheppo static int
2247e1ebb9ecSlm66018 vds_do_init_vd(vds_t *vds, uint64_t id, char *device_path, uint64_t ldc_id,
22481ae08745Sheppo     vd_t **vdp)
22491ae08745Sheppo {
22501ae08745Sheppo 	char			tq_name[TASKQ_NAMELEN];
22510a55fbb7Slm66018 	int			status;
22521ae08745Sheppo 	ddi_iblock_cookie_t	iblock = NULL;
22531ae08745Sheppo 	ldc_attr_t		ldc_attr;
22541ae08745Sheppo 	vd_t			*vd;
22551ae08745Sheppo 
22561ae08745Sheppo 
22571ae08745Sheppo 	ASSERT(vds != NULL);
2258e1ebb9ecSlm66018 	ASSERT(device_path != NULL);
22591ae08745Sheppo 	ASSERT(vdp != NULL);
2260e1ebb9ecSlm66018 	PR0("Adding vdisk for %s", device_path);
22611ae08745Sheppo 
22621ae08745Sheppo 	if ((vd = kmem_zalloc(sizeof (*vd), KM_NOSLEEP)) == NULL) {
22631ae08745Sheppo 		PRN("No memory for virtual disk");
22641ae08745Sheppo 		return (EAGAIN);
22651ae08745Sheppo 	}
22661ae08745Sheppo 	*vdp = vd;	/* assign here so vds_destroy_vd() can cleanup later */
22671ae08745Sheppo 	vd->vds = vds;
22681ae08745Sheppo 
22691ae08745Sheppo 
22700a55fbb7Slm66018 	/* Open vdisk and initialize parameters */
2271e1ebb9ecSlm66018 	if ((status = vd_setup_vd(device_path, vd)) != 0)
22721ae08745Sheppo 		return (status);
22731ae08745Sheppo 	ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR);
22741ae08745Sheppo 	PR0("vdisk_type = %s, pseudo = %s, nslices = %u",
22751ae08745Sheppo 	    ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"),
22761ae08745Sheppo 	    (vd->pseudo ? "yes" : "no"), vd->nslices);
22771ae08745Sheppo 
22781ae08745Sheppo 
22791ae08745Sheppo 	/* Initialize locking */
22801ae08745Sheppo 	if (ddi_get_soft_iblock_cookie(vds->dip, DDI_SOFTINT_MED,
22811ae08745Sheppo 		&iblock) != DDI_SUCCESS) {
22821ae08745Sheppo 		PRN("Could not get iblock cookie.");
22831ae08745Sheppo 		return (EIO);
22841ae08745Sheppo 	}
22851ae08745Sheppo 
22861ae08745Sheppo 	mutex_init(&vd->lock, NULL, MUTEX_DRIVER, iblock);
22871ae08745Sheppo 	vd->initialized |= VD_LOCKING;
22881ae08745Sheppo 
22891ae08745Sheppo 
2290d10e4ef2Snarayan 	/* Create start and completion task queues for the vdisk */
2291d10e4ef2Snarayan 	(void) snprintf(tq_name, sizeof (tq_name), "vd_startq%lu", id);
22921ae08745Sheppo 	PR1("tq_name = %s", tq_name);
2293d10e4ef2Snarayan 	if ((vd->startq = ddi_taskq_create(vds->dip, tq_name, 1,
22941ae08745Sheppo 		    TASKQ_DEFAULTPRI, 0)) == NULL) {
22951ae08745Sheppo 		PRN("Could not create task queue");
22961ae08745Sheppo 		return (EIO);
22971ae08745Sheppo 	}
2298d10e4ef2Snarayan 	(void) snprintf(tq_name, sizeof (tq_name), "vd_completionq%lu", id);
2299d10e4ef2Snarayan 	PR1("tq_name = %s", tq_name);
2300d10e4ef2Snarayan 	if ((vd->completionq = ddi_taskq_create(vds->dip, tq_name, 1,
2301d10e4ef2Snarayan 		    TASKQ_DEFAULTPRI, 0)) == NULL) {
2302d10e4ef2Snarayan 		PRN("Could not create task queue");
2303d10e4ef2Snarayan 		return (EIO);
2304d10e4ef2Snarayan 	}
2305d10e4ef2Snarayan 	vd->enabled = 1;	/* before callback can dispatch to startq */
23061ae08745Sheppo 
23071ae08745Sheppo 
23081ae08745Sheppo 	/* Bring up LDC */
23091ae08745Sheppo 	ldc_attr.devclass	= LDC_DEV_BLK_SVC;
23101ae08745Sheppo 	ldc_attr.instance	= ddi_get_instance(vds->dip);
23111ae08745Sheppo 	ldc_attr.mode		= LDC_MODE_UNRELIABLE;
2312e1ebb9ecSlm66018 	ldc_attr.mtu		= VD_LDC_MTU;
23131ae08745Sheppo 	if ((status = ldc_init(ldc_id, &ldc_attr, &vd->ldc_handle)) != 0) {
23141ae08745Sheppo 		PRN("ldc_init(%lu) = errno %d", ldc_id, status);
23151ae08745Sheppo 		return (status);
23161ae08745Sheppo 	}
23171ae08745Sheppo 	vd->initialized |= VD_LDC;
23181ae08745Sheppo 
23191ae08745Sheppo 	if ((status = ldc_reg_callback(vd->ldc_handle, vd_handle_ldc_events,
23201ae08745Sheppo 		(caddr_t)vd)) != 0) {
23211ae08745Sheppo 		PRN("ldc_reg_callback() returned errno %d", status);
23221ae08745Sheppo 		return (status);
23231ae08745Sheppo 	}
23241ae08745Sheppo 
23251ae08745Sheppo 	if ((status = ldc_open(vd->ldc_handle)) != 0) {
23261ae08745Sheppo 		PRN("ldc_open() returned errno %d", status);
23271ae08745Sheppo 		return (status);
23281ae08745Sheppo 	}
23291ae08745Sheppo 
2330*4bac2208Snarayan 	/* Allocate the inband task memory handle */
2331*4bac2208Snarayan 	status = ldc_mem_alloc_handle(vd->ldc_handle, &(vd->inband_task.mhdl));
2332*4bac2208Snarayan 	if (status) {
2333*4bac2208Snarayan 		PRN("ldc_mem_alloc_handle() returned err %d ", status);
2334*4bac2208Snarayan 		return (ENXIO);
2335*4bac2208Snarayan 	}
23361ae08745Sheppo 
23371ae08745Sheppo 	/* Add the successfully-initialized vdisk to the server's table */
23381ae08745Sheppo 	if (mod_hash_insert(vds->vd_table, (mod_hash_key_t)id, vd) != 0) {
23391ae08745Sheppo 		PRN("Error adding vdisk ID %lu to table", id);
23401ae08745Sheppo 		return (EIO);
23411ae08745Sheppo 	}
23421ae08745Sheppo 
23431ae08745Sheppo 	return (0);
23441ae08745Sheppo }
23451ae08745Sheppo 
23461ae08745Sheppo /*
23471ae08745Sheppo  * Destroy the state associated with a virtual disk
23481ae08745Sheppo  */
23491ae08745Sheppo static void
23501ae08745Sheppo vds_destroy_vd(void *arg)
23511ae08745Sheppo {
23521ae08745Sheppo 	vd_t	*vd = (vd_t *)arg;
23531ae08745Sheppo 
23541ae08745Sheppo 
23551ae08745Sheppo 	if (vd == NULL)
23561ae08745Sheppo 		return;
23571ae08745Sheppo 
2358d10e4ef2Snarayan 	PR0("Destroying vdisk state");
2359d10e4ef2Snarayan 
2360*4bac2208Snarayan 	if (vd->dk_efi.dki_data != NULL)
2361*4bac2208Snarayan 		kmem_free(vd->dk_efi.dki_data, vd->dk_efi.dki_length);
2362*4bac2208Snarayan 
23631ae08745Sheppo 	/* Disable queuing requests for the vdisk */
23641ae08745Sheppo 	if (vd->initialized & VD_LOCKING) {
23651ae08745Sheppo 		mutex_enter(&vd->lock);
23661ae08745Sheppo 		vd->enabled = 0;
23671ae08745Sheppo 		mutex_exit(&vd->lock);
23681ae08745Sheppo 	}
23691ae08745Sheppo 
2370d10e4ef2Snarayan 	/* Drain and destroy start queue (*before* destroying completionq) */
2371d10e4ef2Snarayan 	if (vd->startq != NULL)
2372d10e4ef2Snarayan 		ddi_taskq_destroy(vd->startq);	/* waits for queued tasks */
2373d10e4ef2Snarayan 
2374d10e4ef2Snarayan 	/* Drain and destroy completion queue (*before* shutting down LDC) */
2375d10e4ef2Snarayan 	if (vd->completionq != NULL)
2376d10e4ef2Snarayan 		ddi_taskq_destroy(vd->completionq);	/* waits for tasks */
2377d10e4ef2Snarayan 
2378d10e4ef2Snarayan 	if (vd->dring_task != NULL) {
2379d10e4ef2Snarayan 		ASSERT(vd->dring_len != 0);
2380*4bac2208Snarayan 		/* Free all dring_task memory handles */
2381*4bac2208Snarayan 		for (int i = 0; i < vd->dring_len; i++)
2382*4bac2208Snarayan 			(void) ldc_mem_free_handle(vd->dring_task[i].mhdl);
2383d10e4ef2Snarayan 		kmem_free(vd->dring_task,
2384d10e4ef2Snarayan 		    (sizeof (*vd->dring_task)) * vd->dring_len);
2385d10e4ef2Snarayan 	}
23861ae08745Sheppo 
2387*4bac2208Snarayan 	/* Free the inband task memory handle */
2388*4bac2208Snarayan 	(void) ldc_mem_free_handle(vd->inband_task.mhdl);
2389*4bac2208Snarayan 
23901ae08745Sheppo 	/* Shut down LDC */
23911ae08745Sheppo 	if (vd->initialized & VD_LDC) {
23921ae08745Sheppo 		if (vd->initialized & VD_DRING)
23931ae08745Sheppo 			(void) ldc_mem_dring_unmap(vd->dring_handle);
23941ae08745Sheppo 		(void) ldc_unreg_callback(vd->ldc_handle);
23951ae08745Sheppo 		(void) ldc_close(vd->ldc_handle);
23961ae08745Sheppo 		(void) ldc_fini(vd->ldc_handle);
23971ae08745Sheppo 	}
23981ae08745Sheppo 
23991ae08745Sheppo 	/* Close any open backing-device slices */
24001ae08745Sheppo 	for (uint_t slice = 0; slice < vd->nslices; slice++) {
24011ae08745Sheppo 		if (vd->ldi_handle[slice] != NULL) {
24021ae08745Sheppo 			PR0("Closing slice %u", slice);
24031ae08745Sheppo 			(void) ldi_close(vd->ldi_handle[slice],
2404*4bac2208Snarayan 			    vd_open_flags | FNDELAY, kcred);
24051ae08745Sheppo 		}
24061ae08745Sheppo 	}
24071ae08745Sheppo 
24081ae08745Sheppo 	/* Free lock */
24091ae08745Sheppo 	if (vd->initialized & VD_LOCKING)
24101ae08745Sheppo 		mutex_destroy(&vd->lock);
24111ae08745Sheppo 
24121ae08745Sheppo 	/* Finally, free the vdisk structure itself */
24131ae08745Sheppo 	kmem_free(vd, sizeof (*vd));
24141ae08745Sheppo }
24151ae08745Sheppo 
24161ae08745Sheppo static int
2417e1ebb9ecSlm66018 vds_init_vd(vds_t *vds, uint64_t id, char *device_path, uint64_t ldc_id)
24181ae08745Sheppo {
24191ae08745Sheppo 	int	status;
24201ae08745Sheppo 	vd_t	*vd = NULL;
24211ae08745Sheppo 
24221ae08745Sheppo 
24231ae08745Sheppo #ifdef lint
24241ae08745Sheppo 	(void) vd;
24251ae08745Sheppo #endif	/* lint */
24261ae08745Sheppo 
2427e1ebb9ecSlm66018 	if ((status = vds_do_init_vd(vds, id, device_path, ldc_id, &vd)) != 0)
24281ae08745Sheppo 		vds_destroy_vd(vd);
24291ae08745Sheppo 
24301ae08745Sheppo 	return (status);
24311ae08745Sheppo }
24321ae08745Sheppo 
24331ae08745Sheppo static int
24341ae08745Sheppo vds_do_get_ldc_id(md_t *md, mde_cookie_t vd_node, mde_cookie_t *channel,
24351ae08745Sheppo     uint64_t *ldc_id)
24361ae08745Sheppo {
24371ae08745Sheppo 	int	num_channels;
24381ae08745Sheppo 
24391ae08745Sheppo 
24401ae08745Sheppo 	/* Look for channel endpoint child(ren) of the vdisk MD node */
24411ae08745Sheppo 	if ((num_channels = md_scan_dag(md, vd_node,
24421ae08745Sheppo 		    md_find_name(md, VD_CHANNEL_ENDPOINT),
24431ae08745Sheppo 		    md_find_name(md, "fwd"), channel)) <= 0) {
24441ae08745Sheppo 		PRN("No \"%s\" found for virtual disk", VD_CHANNEL_ENDPOINT);
24451ae08745Sheppo 		return (-1);
24461ae08745Sheppo 	}
24471ae08745Sheppo 
24481ae08745Sheppo 	/* Get the "id" value for the first channel endpoint node */
24491ae08745Sheppo 	if (md_get_prop_val(md, channel[0], VD_ID_PROP, ldc_id) != 0) {
24501ae08745Sheppo 		PRN("No \"%s\" property found for \"%s\" of vdisk",
24511ae08745Sheppo 		    VD_ID_PROP, VD_CHANNEL_ENDPOINT);
24521ae08745Sheppo 		return (-1);
24531ae08745Sheppo 	}
24541ae08745Sheppo 
24551ae08745Sheppo 	if (num_channels > 1) {
24561ae08745Sheppo 		PRN("Using ID of first of multiple channels for this vdisk");
24571ae08745Sheppo 	}
24581ae08745Sheppo 
24591ae08745Sheppo 	return (0);
24601ae08745Sheppo }
24611ae08745Sheppo 
24621ae08745Sheppo static int
24631ae08745Sheppo vds_get_ldc_id(md_t *md, mde_cookie_t vd_node, uint64_t *ldc_id)
24641ae08745Sheppo {
24651ae08745Sheppo 	int		num_nodes, status;
24661ae08745Sheppo 	size_t		size;
24671ae08745Sheppo 	mde_cookie_t	*channel;
24681ae08745Sheppo 
24691ae08745Sheppo 
24701ae08745Sheppo 	if ((num_nodes = md_node_count(md)) <= 0) {
24711ae08745Sheppo 		PRN("Invalid node count in Machine Description subtree");
24721ae08745Sheppo 		return (-1);
24731ae08745Sheppo 	}
24741ae08745Sheppo 	size = num_nodes*(sizeof (*channel));
24751ae08745Sheppo 	channel = kmem_zalloc(size, KM_SLEEP);
24761ae08745Sheppo 	status = vds_do_get_ldc_id(md, vd_node, channel, ldc_id);
24771ae08745Sheppo 	kmem_free(channel, size);
24781ae08745Sheppo 
24791ae08745Sheppo 	return (status);
24801ae08745Sheppo }
24811ae08745Sheppo 
24821ae08745Sheppo static void
24831ae08745Sheppo vds_add_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node)
24841ae08745Sheppo {
2485e1ebb9ecSlm66018 	char		*device_path = NULL;
24861ae08745Sheppo 	uint64_t	id = 0, ldc_id = 0;
24871ae08745Sheppo 
24881ae08745Sheppo 
24891ae08745Sheppo 	if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) {
24901ae08745Sheppo 		PRN("Error getting vdisk \"%s\"", VD_ID_PROP);
24911ae08745Sheppo 		return;
24921ae08745Sheppo 	}
24931ae08745Sheppo 	PR0("Adding vdisk ID %lu", id);
24941ae08745Sheppo 	if (md_get_prop_str(md, vd_node, VD_BLOCK_DEVICE_PROP,
2495e1ebb9ecSlm66018 		&device_path) != 0) {
24961ae08745Sheppo 		PRN("Error getting vdisk \"%s\"", VD_BLOCK_DEVICE_PROP);
24971ae08745Sheppo 		return;
24981ae08745Sheppo 	}
24991ae08745Sheppo 
25001ae08745Sheppo 	if (vds_get_ldc_id(md, vd_node, &ldc_id) != 0) {
25011ae08745Sheppo 		PRN("Error getting LDC ID for vdisk %lu", id);
25021ae08745Sheppo 		return;
25031ae08745Sheppo 	}
25041ae08745Sheppo 
2505e1ebb9ecSlm66018 	if (vds_init_vd(vds, id, device_path, ldc_id) != 0) {
25061ae08745Sheppo 		PRN("Failed to add vdisk ID %lu", id);
25071ae08745Sheppo 		return;
25081ae08745Sheppo 	}
25091ae08745Sheppo }
25101ae08745Sheppo 
25111ae08745Sheppo static void
25121ae08745Sheppo vds_remove_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node)
25131ae08745Sheppo {
25141ae08745Sheppo 	uint64_t	id = 0;
25151ae08745Sheppo 
25161ae08745Sheppo 
25171ae08745Sheppo 	if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) {
25181ae08745Sheppo 		PRN("Unable to get \"%s\" property from vdisk's MD node",
25191ae08745Sheppo 		    VD_ID_PROP);
25201ae08745Sheppo 		return;
25211ae08745Sheppo 	}
25221ae08745Sheppo 	PR0("Removing vdisk ID %lu", id);
25231ae08745Sheppo 	if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)id) != 0)
25241ae08745Sheppo 		PRN("No vdisk entry found for vdisk ID %lu", id);
25251ae08745Sheppo }
25261ae08745Sheppo 
25271ae08745Sheppo static void
25281ae08745Sheppo vds_change_vd(vds_t *vds, md_t *prev_md, mde_cookie_t prev_vd_node,
25291ae08745Sheppo     md_t *curr_md, mde_cookie_t curr_vd_node)
25301ae08745Sheppo {
25311ae08745Sheppo 	char		*curr_dev, *prev_dev;
25321ae08745Sheppo 	uint64_t	curr_id = 0, curr_ldc_id = 0;
25331ae08745Sheppo 	uint64_t	prev_id = 0, prev_ldc_id = 0;
25341ae08745Sheppo 	size_t		len;
25351ae08745Sheppo 
25361ae08745Sheppo 
25371ae08745Sheppo 	/* Validate that vdisk ID has not changed */
25381ae08745Sheppo 	if (md_get_prop_val(prev_md, prev_vd_node, VD_ID_PROP, &prev_id) != 0) {
25391ae08745Sheppo 		PRN("Error getting previous vdisk \"%s\" property",
25401ae08745Sheppo 		    VD_ID_PROP);
25411ae08745Sheppo 		return;
25421ae08745Sheppo 	}
25431ae08745Sheppo 	if (md_get_prop_val(curr_md, curr_vd_node, VD_ID_PROP, &curr_id) != 0) {
25441ae08745Sheppo 		PRN("Error getting current vdisk \"%s\" property", VD_ID_PROP);
25451ae08745Sheppo 		return;
25461ae08745Sheppo 	}
25471ae08745Sheppo 	if (curr_id != prev_id) {
25481ae08745Sheppo 		PRN("Not changing vdisk:  ID changed from %lu to %lu",
25491ae08745Sheppo 		    prev_id, curr_id);
25501ae08745Sheppo 		return;
25511ae08745Sheppo 	}
25521ae08745Sheppo 
25531ae08745Sheppo 	/* Validate that LDC ID has not changed */
25541ae08745Sheppo 	if (vds_get_ldc_id(prev_md, prev_vd_node, &prev_ldc_id) != 0) {
25551ae08745Sheppo 		PRN("Error getting LDC ID for vdisk %lu", prev_id);
25561ae08745Sheppo 		return;
25571ae08745Sheppo 	}
25581ae08745Sheppo 
25591ae08745Sheppo 	if (vds_get_ldc_id(curr_md, curr_vd_node, &curr_ldc_id) != 0) {
25601ae08745Sheppo 		PRN("Error getting LDC ID for vdisk %lu", curr_id);
25611ae08745Sheppo 		return;
25621ae08745Sheppo 	}
25631ae08745Sheppo 	if (curr_ldc_id != prev_ldc_id) {
25640a55fbb7Slm66018 		_NOTE(NOTREACHED);	/* lint is confused */
25651ae08745Sheppo 		PRN("Not changing vdisk:  "
25661ae08745Sheppo 		    "LDC ID changed from %lu to %lu", prev_ldc_id, curr_ldc_id);
25671ae08745Sheppo 		return;
25681ae08745Sheppo 	}
25691ae08745Sheppo 
25701ae08745Sheppo 	/* Determine whether device path has changed */
25711ae08745Sheppo 	if (md_get_prop_str(prev_md, prev_vd_node, VD_BLOCK_DEVICE_PROP,
25721ae08745Sheppo 		&prev_dev) != 0) {
25731ae08745Sheppo 		PRN("Error getting previous vdisk \"%s\"",
25741ae08745Sheppo 		    VD_BLOCK_DEVICE_PROP);
25751ae08745Sheppo 		return;
25761ae08745Sheppo 	}
25771ae08745Sheppo 	if (md_get_prop_str(curr_md, curr_vd_node, VD_BLOCK_DEVICE_PROP,
25781ae08745Sheppo 		&curr_dev) != 0) {
25791ae08745Sheppo 		PRN("Error getting current vdisk \"%s\"", VD_BLOCK_DEVICE_PROP);
25801ae08745Sheppo 		return;
25811ae08745Sheppo 	}
25821ae08745Sheppo 	if (((len = strlen(curr_dev)) == strlen(prev_dev)) &&
25831ae08745Sheppo 	    (strncmp(curr_dev, prev_dev, len) == 0))
25841ae08745Sheppo 		return;	/* no relevant (supported) change */
25851ae08745Sheppo 
25861ae08745Sheppo 	PR0("Changing vdisk ID %lu", prev_id);
25871ae08745Sheppo 	/* Remove old state, which will close vdisk and reset */
25881ae08745Sheppo 	if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)prev_id) != 0)
25891ae08745Sheppo 		PRN("No entry found for vdisk ID %lu", prev_id);
25901ae08745Sheppo 	/* Re-initialize vdisk with new state */
25911ae08745Sheppo 	if (vds_init_vd(vds, curr_id, curr_dev, curr_ldc_id) != 0) {
25921ae08745Sheppo 		PRN("Failed to change vdisk ID %lu", curr_id);
25931ae08745Sheppo 		return;
25941ae08745Sheppo 	}
25951ae08745Sheppo }
25961ae08745Sheppo 
25971ae08745Sheppo static int
25981ae08745Sheppo vds_process_md(void *arg, mdeg_result_t *md)
25991ae08745Sheppo {
26001ae08745Sheppo 	int	i;
26011ae08745Sheppo 	vds_t	*vds = arg;
26021ae08745Sheppo 
26031ae08745Sheppo 
26041ae08745Sheppo 	if (md == NULL)
26051ae08745Sheppo 		return (MDEG_FAILURE);
26061ae08745Sheppo 	ASSERT(vds != NULL);
26071ae08745Sheppo 
26081ae08745Sheppo 	for (i = 0; i < md->removed.nelem; i++)
26091ae08745Sheppo 		vds_remove_vd(vds, md->removed.mdp, md->removed.mdep[i]);
26101ae08745Sheppo 	for (i = 0; i < md->match_curr.nelem; i++)
26111ae08745Sheppo 		vds_change_vd(vds, md->match_prev.mdp, md->match_prev.mdep[i],
26121ae08745Sheppo 		    md->match_curr.mdp, md->match_curr.mdep[i]);
26131ae08745Sheppo 	for (i = 0; i < md->added.nelem; i++)
26141ae08745Sheppo 		vds_add_vd(vds, md->added.mdp, md->added.mdep[i]);
26151ae08745Sheppo 
26161ae08745Sheppo 	return (MDEG_SUCCESS);
26171ae08745Sheppo }
26181ae08745Sheppo 
26191ae08745Sheppo static int
26201ae08745Sheppo vds_do_attach(dev_info_t *dip)
26211ae08745Sheppo {
26221ae08745Sheppo 	static char	reg_prop[] = "reg";	/* devinfo ID prop */
26231ae08745Sheppo 
26241ae08745Sheppo 	/* MDEG specification for a (particular) vds node */
26251ae08745Sheppo 	static mdeg_prop_spec_t	vds_prop_spec[] = {
26261ae08745Sheppo 		{MDET_PROP_STR, "name", {VDS_NAME}},
26271ae08745Sheppo 		{MDET_PROP_VAL, "cfg-handle", {0}},
26281ae08745Sheppo 		{MDET_LIST_END, NULL, {0}}};
26291ae08745Sheppo 	static mdeg_node_spec_t	vds_spec = {"virtual-device", vds_prop_spec};
26301ae08745Sheppo 
26311ae08745Sheppo 	/* MDEG specification for matching a vd node */
26321ae08745Sheppo 	static md_prop_match_t	vd_prop_spec[] = {
26331ae08745Sheppo 		{MDET_PROP_VAL, VD_ID_PROP},
26341ae08745Sheppo 		{MDET_LIST_END, NULL}};
26351ae08745Sheppo 	static mdeg_node_match_t vd_spec = {"virtual-device-port",
26361ae08745Sheppo 					    vd_prop_spec};
26371ae08745Sheppo 
26381ae08745Sheppo 	int			status;
26391ae08745Sheppo 	uint64_t		cfg_handle;
26401ae08745Sheppo 	minor_t			instance = ddi_get_instance(dip);
26411ae08745Sheppo 	vds_t			*vds;
26421ae08745Sheppo 
26431ae08745Sheppo 
26441ae08745Sheppo 	/*
26451ae08745Sheppo 	 * The "cfg-handle" property of a vds node in an MD contains the MD's
26461ae08745Sheppo 	 * notion of "instance", or unique identifier, for that node; OBP
26471ae08745Sheppo 	 * stores the value of the "cfg-handle" MD property as the value of
26481ae08745Sheppo 	 * the "reg" property on the node in the device tree it builds from
26491ae08745Sheppo 	 * the MD and passes to Solaris.  Thus, we look up the devinfo node's
26501ae08745Sheppo 	 * "reg" property value to uniquely identify this device instance when
26511ae08745Sheppo 	 * registering with the MD event-generation framework.  If the "reg"
26521ae08745Sheppo 	 * property cannot be found, the device tree state is presumably so
26531ae08745Sheppo 	 * broken that there is no point in continuing.
26541ae08745Sheppo 	 */
26551ae08745Sheppo 	if (!ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, reg_prop)) {
26561ae08745Sheppo 		PRN("vds \"%s\" property does not exist", reg_prop);
26571ae08745Sheppo 		return (DDI_FAILURE);
26581ae08745Sheppo 	}
26591ae08745Sheppo 
26601ae08745Sheppo 	/* Get the MD instance for later MDEG registration */
26611ae08745Sheppo 	cfg_handle = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
26621ae08745Sheppo 	    reg_prop, -1);
26631ae08745Sheppo 
26641ae08745Sheppo 	if (ddi_soft_state_zalloc(vds_state, instance) != DDI_SUCCESS) {
26651ae08745Sheppo 		PRN("Could not allocate state for instance %u", instance);
26661ae08745Sheppo 		return (DDI_FAILURE);
26671ae08745Sheppo 	}
26681ae08745Sheppo 
26691ae08745Sheppo 	if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) {
26701ae08745Sheppo 		PRN("Could not get state for instance %u", instance);
26711ae08745Sheppo 		ddi_soft_state_free(vds_state, instance);
26721ae08745Sheppo 		return (DDI_FAILURE);
26731ae08745Sheppo 	}
26741ae08745Sheppo 
26751ae08745Sheppo 
26761ae08745Sheppo 	vds->dip	= dip;
26771ae08745Sheppo 	vds->vd_table	= mod_hash_create_ptrhash("vds_vd_table", VDS_NCHAINS,
26781ae08745Sheppo 							vds_destroy_vd,
26791ae08745Sheppo 							sizeof (void *));
26801ae08745Sheppo 	ASSERT(vds->vd_table != NULL);
26811ae08745Sheppo 
26821ae08745Sheppo 	if ((status = ldi_ident_from_dip(dip, &vds->ldi_ident)) != 0) {
26831ae08745Sheppo 		PRN("ldi_ident_from_dip() returned errno %d", status);
26841ae08745Sheppo 		return (DDI_FAILURE);
26851ae08745Sheppo 	}
26861ae08745Sheppo 	vds->initialized |= VDS_LDI;
26871ae08745Sheppo 
26881ae08745Sheppo 	/* Register for MD updates */
26891ae08745Sheppo 	vds_prop_spec[1].ps_val = cfg_handle;
26901ae08745Sheppo 	if (mdeg_register(&vds_spec, &vd_spec, vds_process_md, vds,
26911ae08745Sheppo 		&vds->mdeg) != MDEG_SUCCESS) {
26921ae08745Sheppo 		PRN("Unable to register for MD updates");
26931ae08745Sheppo 		return (DDI_FAILURE);
26941ae08745Sheppo 	}
26951ae08745Sheppo 	vds->initialized |= VDS_MDEG;
26961ae08745Sheppo 
26970a55fbb7Slm66018 	/* Prevent auto-detaching so driver is available whenever MD changes */
26980a55fbb7Slm66018 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip, DDI_NO_AUTODETACH, 1) !=
26990a55fbb7Slm66018 	    DDI_PROP_SUCCESS) {
27000a55fbb7Slm66018 		PRN("failed to set \"%s\" property for instance %u",
27010a55fbb7Slm66018 		    DDI_NO_AUTODETACH, instance);
27020a55fbb7Slm66018 	}
27030a55fbb7Slm66018 
27041ae08745Sheppo 	ddi_report_dev(dip);
27051ae08745Sheppo 	return (DDI_SUCCESS);
27061ae08745Sheppo }
27071ae08745Sheppo 
27081ae08745Sheppo static int
27091ae08745Sheppo vds_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
27101ae08745Sheppo {
27111ae08745Sheppo 	int	status;
27121ae08745Sheppo 
27131ae08745Sheppo 	switch (cmd) {
27141ae08745Sheppo 	case DDI_ATTACH:
2715d10e4ef2Snarayan 		PR0("Attaching");
27161ae08745Sheppo 		if ((status = vds_do_attach(dip)) != DDI_SUCCESS)
27171ae08745Sheppo 			(void) vds_detach(dip, DDI_DETACH);
27181ae08745Sheppo 		return (status);
27191ae08745Sheppo 	case DDI_RESUME:
2720d10e4ef2Snarayan 		PR0("No action required for DDI_RESUME");
27211ae08745Sheppo 		return (DDI_SUCCESS);
27221ae08745Sheppo 	default:
27231ae08745Sheppo 		return (DDI_FAILURE);
27241ae08745Sheppo 	}
27251ae08745Sheppo }
27261ae08745Sheppo 
27271ae08745Sheppo static struct dev_ops vds_ops = {
27281ae08745Sheppo 	DEVO_REV,	/* devo_rev */
27291ae08745Sheppo 	0,		/* devo_refcnt */
27301ae08745Sheppo 	ddi_no_info,	/* devo_getinfo */
27311ae08745Sheppo 	nulldev,	/* devo_identify */
27321ae08745Sheppo 	nulldev,	/* devo_probe */
27331ae08745Sheppo 	vds_attach,	/* devo_attach */
27341ae08745Sheppo 	vds_detach,	/* devo_detach */
27351ae08745Sheppo 	nodev,		/* devo_reset */
27361ae08745Sheppo 	NULL,		/* devo_cb_ops */
27371ae08745Sheppo 	NULL,		/* devo_bus_ops */
27381ae08745Sheppo 	nulldev		/* devo_power */
27391ae08745Sheppo };
27401ae08745Sheppo 
27411ae08745Sheppo static struct modldrv modldrv = {
27421ae08745Sheppo 	&mod_driverops,
27431ae08745Sheppo 	"virtual disk server v%I%",
27441ae08745Sheppo 	&vds_ops,
27451ae08745Sheppo };
27461ae08745Sheppo 
27471ae08745Sheppo static struct modlinkage modlinkage = {
27481ae08745Sheppo 	MODREV_1,
27491ae08745Sheppo 	&modldrv,
27501ae08745Sheppo 	NULL
27511ae08745Sheppo };
27521ae08745Sheppo 
27531ae08745Sheppo 
27541ae08745Sheppo int
27551ae08745Sheppo _init(void)
27561ae08745Sheppo {
27571ae08745Sheppo 	int		i, status;
27581ae08745Sheppo 
2759d10e4ef2Snarayan 
27601ae08745Sheppo 	if ((status = ddi_soft_state_init(&vds_state, sizeof (vds_t), 1)) != 0)
27611ae08745Sheppo 		return (status);
27621ae08745Sheppo 	if ((status = mod_install(&modlinkage)) != 0) {
27631ae08745Sheppo 		ddi_soft_state_fini(&vds_state);
27641ae08745Sheppo 		return (status);
27651ae08745Sheppo 	}
27661ae08745Sheppo 
27671ae08745Sheppo 	/* Fill in the bit-mask of server-supported operations */
27681ae08745Sheppo 	for (i = 0; i < vds_noperations; i++)
27691ae08745Sheppo 		vds_operations |= 1 << (vds_operation[i].operation - 1);
27701ae08745Sheppo 
27711ae08745Sheppo 	return (0);
27721ae08745Sheppo }
27731ae08745Sheppo 
27741ae08745Sheppo int
27751ae08745Sheppo _info(struct modinfo *modinfop)
27761ae08745Sheppo {
27771ae08745Sheppo 	return (mod_info(&modlinkage, modinfop));
27781ae08745Sheppo }
27791ae08745Sheppo 
27801ae08745Sheppo int
27811ae08745Sheppo _fini(void)
27821ae08745Sheppo {
27831ae08745Sheppo 	int	status;
27841ae08745Sheppo 
2785d10e4ef2Snarayan 
27861ae08745Sheppo 	if ((status = mod_remove(&modlinkage)) != 0)
27871ae08745Sheppo 		return (status);
27881ae08745Sheppo 	ddi_soft_state_fini(&vds_state);
27891ae08745Sheppo 	return (0);
27901ae08745Sheppo }
2791