xref: /titanic_53/usr/src/uts/sun4v/io/vds.c (revision e1ebb9ec908bc2d0a8810f137ebd6566cc8a8061)
11ae08745Sheppo /*
21ae08745Sheppo  * CDDL HEADER START
31ae08745Sheppo  *
41ae08745Sheppo  * The contents of this file are subject to the terms of the
51ae08745Sheppo  * Common Development and Distribution License (the "License").
61ae08745Sheppo  * You may not use this file except in compliance with the License.
71ae08745Sheppo  *
81ae08745Sheppo  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
91ae08745Sheppo  * or http://www.opensolaris.org/os/licensing.
101ae08745Sheppo  * See the License for the specific language governing permissions
111ae08745Sheppo  * and limitations under the License.
121ae08745Sheppo  *
131ae08745Sheppo  * When distributing Covered Code, include this CDDL HEADER in each
141ae08745Sheppo  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
151ae08745Sheppo  * If applicable, add the following below this CDDL HEADER, with the
161ae08745Sheppo  * fields enclosed by brackets "[]" replaced with your own identifying
171ae08745Sheppo  * information: Portions Copyright [yyyy] [name of copyright owner]
181ae08745Sheppo  *
191ae08745Sheppo  * CDDL HEADER END
201ae08745Sheppo  */
211ae08745Sheppo 
221ae08745Sheppo /*
231ae08745Sheppo  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
241ae08745Sheppo  * Use is subject to license terms.
251ae08745Sheppo  */
261ae08745Sheppo 
271ae08745Sheppo #pragma ident	"%Z%%M%	%I%	%E% SMI"
281ae08745Sheppo 
291ae08745Sheppo /*
301ae08745Sheppo  * Virtual disk server
311ae08745Sheppo  */
321ae08745Sheppo 
331ae08745Sheppo 
341ae08745Sheppo #include <sys/types.h>
351ae08745Sheppo #include <sys/conf.h>
361ae08745Sheppo #include <sys/ddi.h>
371ae08745Sheppo #include <sys/dkio.h>
381ae08745Sheppo #include <sys/file.h>
391ae08745Sheppo #include <sys/mdeg.h>
401ae08745Sheppo #include <sys/modhash.h>
411ae08745Sheppo #include <sys/note.h>
421ae08745Sheppo #include <sys/pathname.h>
431ae08745Sheppo #include <sys/sunddi.h>
441ae08745Sheppo #include <sys/sunldi.h>
451ae08745Sheppo #include <sys/sysmacros.h>
461ae08745Sheppo #include <sys/vio_common.h>
471ae08745Sheppo #include <sys/vdsk_mailbox.h>
481ae08745Sheppo #include <sys/vdsk_common.h>
491ae08745Sheppo #include <sys/vtoc.h>
501ae08745Sheppo 
511ae08745Sheppo 
521ae08745Sheppo /* Virtual disk server initialization flags */
53d10e4ef2Snarayan #define	VDS_LDI			0x01
54d10e4ef2Snarayan #define	VDS_MDEG		0x02
551ae08745Sheppo 
561ae08745Sheppo /* Virtual disk server tunable parameters */
571ae08745Sheppo #define	VDS_LDC_RETRIES		3
581ae08745Sheppo #define	VDS_NCHAINS		32
591ae08745Sheppo 
601ae08745Sheppo /* Identification parameters for MD, synthetic dkio(7i) structures, etc. */
611ae08745Sheppo #define	VDS_NAME		"virtual-disk-server"
621ae08745Sheppo 
631ae08745Sheppo #define	VD_NAME			"vd"
641ae08745Sheppo #define	VD_VOLUME_NAME		"vdisk"
651ae08745Sheppo #define	VD_ASCIILABEL		"Virtual Disk"
661ae08745Sheppo 
671ae08745Sheppo #define	VD_CHANNEL_ENDPOINT	"channel-endpoint"
681ae08745Sheppo #define	VD_ID_PROP		"id"
691ae08745Sheppo #define	VD_BLOCK_DEVICE_PROP	"vds-block-device"
701ae08745Sheppo 
711ae08745Sheppo /* Virtual disk initialization flags */
721ae08745Sheppo #define	VD_LOCKING		0x01
73d10e4ef2Snarayan #define	VD_LDC			0x02
74d10e4ef2Snarayan #define	VD_DRING		0x04
75d10e4ef2Snarayan #define	VD_SID			0x08
76d10e4ef2Snarayan #define	VD_SEQ_NUM		0x10
771ae08745Sheppo 
781ae08745Sheppo /* Flags for opening/closing backing devices via LDI */
791ae08745Sheppo #define	VD_OPEN_FLAGS		(FEXCL | FREAD | FWRITE)
801ae08745Sheppo 
811ae08745Sheppo /*
821ae08745Sheppo  * By Solaris convention, slice/partition 2 represents the entire disk;
831ae08745Sheppo  * unfortunately, this convention does not appear to be codified.
841ae08745Sheppo  */
851ae08745Sheppo #define	VD_ENTIRE_DISK_SLICE	2
861ae08745Sheppo 
871ae08745Sheppo /* Return a cpp token as a string */
881ae08745Sheppo #define	STRINGIZE(token)	#token
891ae08745Sheppo 
901ae08745Sheppo /*
911ae08745Sheppo  * Print a message prefixed with the current function name to the message log
921ae08745Sheppo  * (and optionally to the console for verbose boots); these macros use cpp's
931ae08745Sheppo  * concatenation of string literals and C99 variable-length-argument-list
941ae08745Sheppo  * macros
951ae08745Sheppo  */
961ae08745Sheppo #define	PRN(...)	_PRN("?%s():  "__VA_ARGS__, "")
971ae08745Sheppo #define	_PRN(format, ...)					\
981ae08745Sheppo 	cmn_err(CE_CONT, format"%s", __func__, __VA_ARGS__)
991ae08745Sheppo 
1001ae08745Sheppo /* Return a pointer to the "i"th vdisk dring element */
1011ae08745Sheppo #define	VD_DRING_ELEM(i)	((vd_dring_entry_t *)(void *)	\
1021ae08745Sheppo 	    (vd->dring + (i)*vd->descriptor_size))
1031ae08745Sheppo 
1041ae08745Sheppo /* Return the virtual disk client's type as a string (for use in messages) */
1051ae08745Sheppo #define	VD_CLIENT(vd)							\
1061ae08745Sheppo 	(((vd)->xfer_mode == VIO_DESC_MODE) ? "in-band client" :	\
1071ae08745Sheppo 	    (((vd)->xfer_mode == VIO_DRING_MODE) ? "dring client" :	\
1081ae08745Sheppo 		(((vd)->xfer_mode == 0) ? "null client" :		\
1091ae08745Sheppo 		    "unsupported client")))
1101ae08745Sheppo 
1111ae08745Sheppo /* Debugging macros */
1121ae08745Sheppo #ifdef DEBUG
1131ae08745Sheppo #define	PR0 if (vd_msglevel > 0)	PRN
1141ae08745Sheppo #define	PR1 if (vd_msglevel > 1)	PRN
1151ae08745Sheppo #define	PR2 if (vd_msglevel > 2)	PRN
1161ae08745Sheppo 
1171ae08745Sheppo #define	VD_DUMP_DRING_ELEM(elem)					\
1181ae08745Sheppo 	PRN("dst:%x op:%x st:%u nb:%lx addr:%lx ncook:%u\n",		\
1191ae08745Sheppo 	    elem->hdr.dstate,						\
1201ae08745Sheppo 	    elem->payload.operation,					\
1211ae08745Sheppo 	    elem->payload.status,					\
1221ae08745Sheppo 	    elem->payload.nbytes,					\
1231ae08745Sheppo 	    elem->payload.addr,						\
1241ae08745Sheppo 	    elem->payload.ncookies);
1251ae08745Sheppo 
1261ae08745Sheppo #else	/* !DEBUG */
1271ae08745Sheppo #define	PR0(...)
1281ae08745Sheppo #define	PR1(...)
1291ae08745Sheppo #define	PR2(...)
1301ae08745Sheppo 
1311ae08745Sheppo #define	VD_DUMP_DRING_ELEM(elem)
1321ae08745Sheppo 
1331ae08745Sheppo #endif	/* DEBUG */
1341ae08745Sheppo 
1351ae08745Sheppo 
136d10e4ef2Snarayan /*
137d10e4ef2Snarayan  * Soft state structure for a vds instance
138d10e4ef2Snarayan  */
1391ae08745Sheppo typedef struct vds {
1401ae08745Sheppo 	uint_t		initialized;	/* driver inst initialization flags */
1411ae08745Sheppo 	dev_info_t	*dip;		/* driver inst devinfo pointer */
1421ae08745Sheppo 	ldi_ident_t	ldi_ident;	/* driver's identifier for LDI */
1431ae08745Sheppo 	mod_hash_t	*vd_table;	/* table of virtual disks served */
1441ae08745Sheppo 	mdeg_handle_t	mdeg;		/* handle for MDEG operations  */
1451ae08745Sheppo } vds_t;
1461ae08745Sheppo 
147d10e4ef2Snarayan /*
148d10e4ef2Snarayan  * Types of descriptor-processing tasks
149d10e4ef2Snarayan  */
150d10e4ef2Snarayan typedef enum vd_task_type {
151d10e4ef2Snarayan 	VD_NONFINAL_RANGE_TASK,	/* task for intermediate descriptor in range */
152d10e4ef2Snarayan 	VD_FINAL_RANGE_TASK,	/* task for last in a range of descriptors */
153d10e4ef2Snarayan } vd_task_type_t;
154d10e4ef2Snarayan 
155d10e4ef2Snarayan /*
156d10e4ef2Snarayan  * Structure describing the task for processing a descriptor
157d10e4ef2Snarayan  */
158d10e4ef2Snarayan typedef struct vd_task {
159d10e4ef2Snarayan 	struct vd		*vd;		/* vd instance task is for */
160d10e4ef2Snarayan 	vd_task_type_t		type;		/* type of descriptor task */
161d10e4ef2Snarayan 	int			index;		/* dring elem index for task */
162d10e4ef2Snarayan 	vio_msg_t		*msg;		/* VIO message task is for */
163d10e4ef2Snarayan 	size_t			msglen;		/* length of message content */
164d10e4ef2Snarayan 	size_t			msgsize;	/* size of message buffer */
165d10e4ef2Snarayan 	vd_dring_payload_t	*request;	/* request task will perform */
166d10e4ef2Snarayan 	struct buf		buf;		/* buf(9s) for I/O request */
167d10e4ef2Snarayan 
168d10e4ef2Snarayan } vd_task_t;
169d10e4ef2Snarayan 
170d10e4ef2Snarayan /*
171d10e4ef2Snarayan  * Soft state structure for a virtual disk instance
172d10e4ef2Snarayan  */
1731ae08745Sheppo typedef struct vd {
1741ae08745Sheppo 	uint_t			initialized;	/* vdisk initialization flags */
1751ae08745Sheppo 	vds_t			*vds;		/* server for this vdisk */
176d10e4ef2Snarayan 	ddi_taskq_t		*startq;	/* queue for I/O start tasks */
177d10e4ef2Snarayan 	ddi_taskq_t		*completionq;	/* queue for completion tasks */
1781ae08745Sheppo 	ldi_handle_t		ldi_handle[V_NUMPAR];	/* LDI slice handles */
1791ae08745Sheppo 	dev_t			dev[V_NUMPAR];	/* dev numbers for slices */
180*e1ebb9ecSlm66018 	uint_t			nslices;	/* number of slices */
1811ae08745Sheppo 	size_t			vdisk_size;	/* number of blocks in vdisk */
1821ae08745Sheppo 	vd_disk_type_t		vdisk_type;	/* slice or entire disk */
183*e1ebb9ecSlm66018 	ushort_t		max_xfer_sz;	/* max xfer size in DEV_BSIZE */
1841ae08745Sheppo 	boolean_t		pseudo;		/* underlying pseudo dev */
1851ae08745Sheppo 	struct dk_geom		dk_geom;	/* synthetic for slice type */
1861ae08745Sheppo 	struct vtoc		vtoc;		/* synthetic for slice type */
1871ae08745Sheppo 	ldc_status_t		ldc_state;	/* LDC connection state */
1881ae08745Sheppo 	ldc_handle_t		ldc_handle;	/* handle for LDC comm */
1891ae08745Sheppo 	size_t			max_msglen;	/* largest LDC message len */
1901ae08745Sheppo 	vd_state_t		state;		/* client handshake state */
1911ae08745Sheppo 	uint8_t			xfer_mode;	/* transfer mode with client */
1921ae08745Sheppo 	uint32_t		sid;		/* client's session ID */
1931ae08745Sheppo 	uint64_t		seq_num;	/* message sequence number */
1941ae08745Sheppo 	uint64_t		dring_ident;	/* identifier of dring */
1951ae08745Sheppo 	ldc_dring_handle_t	dring_handle;	/* handle for dring ops */
1961ae08745Sheppo 	uint32_t		descriptor_size;	/* num bytes in desc */
1971ae08745Sheppo 	uint32_t		dring_len;	/* number of dring elements */
1981ae08745Sheppo 	caddr_t			dring;		/* address of dring */
199d10e4ef2Snarayan 	vd_task_t		inband_task;	/* task for inband descriptor */
200d10e4ef2Snarayan 	vd_task_t		*dring_task;	/* tasks dring elements */
201d10e4ef2Snarayan 
202d10e4ef2Snarayan 	kmutex_t		lock;		/* protects variables below */
203d10e4ef2Snarayan 	boolean_t		enabled;	/* is vdisk enabled? */
204d10e4ef2Snarayan 	boolean_t		reset_state;	/* reset connection state? */
205d10e4ef2Snarayan 	boolean_t		reset_ldc;	/* reset LDC channel? */
2061ae08745Sheppo } vd_t;
2071ae08745Sheppo 
2081ae08745Sheppo typedef struct vds_operation {
2091ae08745Sheppo 	uint8_t	operation;
210d10e4ef2Snarayan 	int	(*start)(vd_task_t *task);
211d10e4ef2Snarayan 	void	(*complete)(void *arg);
2121ae08745Sheppo } vds_operation_t;
2131ae08745Sheppo 
2140a55fbb7Slm66018 typedef struct vd_ioctl {
2150a55fbb7Slm66018 	uint8_t		operation;		/* vdisk operation */
2160a55fbb7Slm66018 	const char	*operation_name;	/* vdisk operation name */
2170a55fbb7Slm66018 	size_t		nbytes;			/* size of operation buffer */
2180a55fbb7Slm66018 	int		cmd;			/* corresponding ioctl cmd */
2190a55fbb7Slm66018 	const char	*cmd_name;		/* ioctl cmd name */
2200a55fbb7Slm66018 	void		*arg;			/* ioctl cmd argument */
2210a55fbb7Slm66018 	/* convert input vd_buf to output ioctl_arg */
2220a55fbb7Slm66018 	void		(*copyin)(void *vd_buf, void *ioctl_arg);
2230a55fbb7Slm66018 	/* convert input ioctl_arg to output vd_buf */
2240a55fbb7Slm66018 	void		(*copyout)(void *ioctl_arg, void *vd_buf);
2250a55fbb7Slm66018 } vd_ioctl_t;
2260a55fbb7Slm66018 
2270a55fbb7Slm66018 /* Define trivial copyin/copyout conversion function flag */
2280a55fbb7Slm66018 #define	VD_IDENTITY	((void (*)(void *, void *))-1)
2291ae08745Sheppo 
2301ae08745Sheppo 
2311ae08745Sheppo static int	vds_ldc_retries = VDS_LDC_RETRIES;
2321ae08745Sheppo static void	*vds_state;
2331ae08745Sheppo static uint64_t	vds_operations;	/* see vds_operation[] definition below */
2341ae08745Sheppo 
2351ae08745Sheppo static int	vd_open_flags = VD_OPEN_FLAGS;
2361ae08745Sheppo 
2370a55fbb7Slm66018 /*
2380a55fbb7Slm66018  * Supported protocol version pairs, from highest (newest) to lowest (oldest)
2390a55fbb7Slm66018  *
2400a55fbb7Slm66018  * Each supported major version should appear only once, paired with (and only
2410a55fbb7Slm66018  * with) its highest supported minor version number (as the protocol requires
2420a55fbb7Slm66018  * supporting all lower minor version numbers as well)
2430a55fbb7Slm66018  */
2440a55fbb7Slm66018 static const vio_ver_t	vds_version[] = {{1, 0}};
2450a55fbb7Slm66018 static const size_t	vds_num_versions =
2460a55fbb7Slm66018     sizeof (vds_version)/sizeof (vds_version[0]);
2470a55fbb7Slm66018 
2481ae08745Sheppo #ifdef DEBUG
2491ae08745Sheppo static int	vd_msglevel;
2501ae08745Sheppo #endif /* DEBUG */
2511ae08745Sheppo 
2521ae08745Sheppo 
2531ae08745Sheppo static int
254d10e4ef2Snarayan vd_start_bio(vd_task_t *task)
2551ae08745Sheppo {
256d10e4ef2Snarayan 	int			status = 0;
257d10e4ef2Snarayan 	vd_t			*vd		= task->vd;
258d10e4ef2Snarayan 	vd_dring_payload_t	*request	= task->request;
259d10e4ef2Snarayan 	struct buf		*buf		= &task->buf;
2601ae08745Sheppo 
261d10e4ef2Snarayan 
262d10e4ef2Snarayan 	ASSERT(vd != NULL);
263d10e4ef2Snarayan 	ASSERT(request != NULL);
264d10e4ef2Snarayan 	ASSERT(request->slice < vd->nslices);
265d10e4ef2Snarayan 	ASSERT((request->operation == VD_OP_BREAD) ||
266d10e4ef2Snarayan 	    (request->operation == VD_OP_BWRITE));
267d10e4ef2Snarayan 
2681ae08745Sheppo 	if (request->nbytes == 0)
2691ae08745Sheppo 		return (EINVAL);	/* no service for trivial requests */
2701ae08745Sheppo 
271d10e4ef2Snarayan 	PR1("%s %lu bytes at block %lu",
272d10e4ef2Snarayan 	    (request->operation == VD_OP_BREAD) ? "Read" : "Write",
273d10e4ef2Snarayan 	    request->nbytes, request->addr);
2741ae08745Sheppo 
275d10e4ef2Snarayan 	bioinit(buf);
276d10e4ef2Snarayan 	buf->b_flags		= B_BUSY;
277d10e4ef2Snarayan 	buf->b_bcount		= request->nbytes;
278d10e4ef2Snarayan 	buf->b_un.b_addr	= kmem_alloc(buf->b_bcount, KM_SLEEP);
279d10e4ef2Snarayan 	buf->b_lblkno		= request->addr;
280d10e4ef2Snarayan 	buf->b_edev		= vd->dev[request->slice];
281d10e4ef2Snarayan 
282d10e4ef2Snarayan 	if (request->operation == VD_OP_BREAD) {
283d10e4ef2Snarayan 		buf->b_flags |= B_READ;
284d10e4ef2Snarayan 	} else {
285d10e4ef2Snarayan 		buf->b_flags |= B_WRITE;
286d10e4ef2Snarayan 		/* Get data to write from client */
287d10e4ef2Snarayan 		if ((status = ldc_mem_copy(vd->ldc_handle, buf->b_un.b_addr, 0,
288d10e4ef2Snarayan 			    &request->nbytes, request->cookie,
289d10e4ef2Snarayan 			    request->ncookies, LDC_COPY_IN)) != 0) {
290d10e4ef2Snarayan 			PRN("ldc_mem_copy() returned errno %d "
291d10e4ef2Snarayan 			    "copying from client", status);
292d10e4ef2Snarayan 		}
293d10e4ef2Snarayan 	}
294d10e4ef2Snarayan 
295d10e4ef2Snarayan 	/* Start the block I/O */
2961ae08745Sheppo 	if ((status == 0) &&
297d10e4ef2Snarayan 	    ((status = ldi_strategy(vd->ldi_handle[request->slice], buf)) == 0))
298d10e4ef2Snarayan 		return (EINPROGRESS);	/* will complete on completionq */
299d10e4ef2Snarayan 
300d10e4ef2Snarayan 	/* Clean up after error */
301d10e4ef2Snarayan 	kmem_free(buf->b_un.b_addr, buf->b_bcount);
302d10e4ef2Snarayan 	biofini(buf);
303d10e4ef2Snarayan 	return (status);
304d10e4ef2Snarayan }
305d10e4ef2Snarayan 
306d10e4ef2Snarayan static int
307d10e4ef2Snarayan send_msg(ldc_handle_t ldc_handle, void *msg, size_t msglen)
308d10e4ef2Snarayan {
309d10e4ef2Snarayan 	int	retry, status;
310d10e4ef2Snarayan 	size_t	nbytes;
311d10e4ef2Snarayan 
312d10e4ef2Snarayan 
313d10e4ef2Snarayan 	for (retry = 0, status = EWOULDBLOCK;
314d10e4ef2Snarayan 	    retry < vds_ldc_retries && status == EWOULDBLOCK;
315d10e4ef2Snarayan 	    retry++) {
316d10e4ef2Snarayan 		PR1("ldc_write() attempt %d", (retry + 1));
317d10e4ef2Snarayan 		nbytes = msglen;
318d10e4ef2Snarayan 		status = ldc_write(ldc_handle, msg, &nbytes);
319d10e4ef2Snarayan 	}
320d10e4ef2Snarayan 
321d10e4ef2Snarayan 	if (status != 0) {
322d10e4ef2Snarayan 		PRN("ldc_write() returned errno %d", status);
323d10e4ef2Snarayan 		return (status);
324d10e4ef2Snarayan 	} else if (nbytes != msglen) {
325d10e4ef2Snarayan 		PRN("ldc_write() performed only partial write");
326d10e4ef2Snarayan 		return (EIO);
327d10e4ef2Snarayan 	}
328d10e4ef2Snarayan 
329d10e4ef2Snarayan 	PR1("SENT %lu bytes", msglen);
330d10e4ef2Snarayan 	return (0);
331d10e4ef2Snarayan }
332d10e4ef2Snarayan 
333d10e4ef2Snarayan static void
334d10e4ef2Snarayan vd_need_reset(vd_t *vd, boolean_t reset_ldc)
335d10e4ef2Snarayan {
336d10e4ef2Snarayan 	mutex_enter(&vd->lock);
337d10e4ef2Snarayan 	vd->reset_state	= B_TRUE;
338d10e4ef2Snarayan 	vd->reset_ldc	= reset_ldc;
339d10e4ef2Snarayan 	mutex_exit(&vd->lock);
340d10e4ef2Snarayan }
341d10e4ef2Snarayan 
342d10e4ef2Snarayan /*
343d10e4ef2Snarayan  * Reset the state of the connection with a client, if needed; reset the LDC
344d10e4ef2Snarayan  * transport as well, if needed.  This function should only be called from the
345d10e4ef2Snarayan  * "startq", as it waits for tasks on the "completionq" and will deadlock if
346d10e4ef2Snarayan  * called from that queue.
347d10e4ef2Snarayan  */
348d10e4ef2Snarayan static void
349d10e4ef2Snarayan vd_reset_if_needed(vd_t *vd)
350d10e4ef2Snarayan {
351d10e4ef2Snarayan 	int		status = 0;
352d10e4ef2Snarayan 
353d10e4ef2Snarayan 
354d10e4ef2Snarayan 	mutex_enter(&vd->lock);
355d10e4ef2Snarayan 	if (!vd->reset_state) {
356d10e4ef2Snarayan 		ASSERT(!vd->reset_ldc);
357d10e4ef2Snarayan 		mutex_exit(&vd->lock);
358d10e4ef2Snarayan 		return;
359d10e4ef2Snarayan 	}
360d10e4ef2Snarayan 	mutex_exit(&vd->lock);
361d10e4ef2Snarayan 
362d10e4ef2Snarayan 
363d10e4ef2Snarayan 	PR0("Resetting connection state with %s", VD_CLIENT(vd));
364d10e4ef2Snarayan 
365d10e4ef2Snarayan 	/*
366d10e4ef2Snarayan 	 * Let any asynchronous I/O complete before possibly pulling the rug
367d10e4ef2Snarayan 	 * out from under it; defer checking vd->reset_ldc, as one of the
368d10e4ef2Snarayan 	 * asynchronous tasks might set it
369d10e4ef2Snarayan 	 */
370d10e4ef2Snarayan 	ddi_taskq_wait(vd->completionq);
371d10e4ef2Snarayan 
372d10e4ef2Snarayan 
373d10e4ef2Snarayan 	if ((vd->initialized & VD_DRING) &&
374d10e4ef2Snarayan 	    ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0))
375d10e4ef2Snarayan 		PRN("ldc_mem_dring_unmap() returned errno %d", status);
376d10e4ef2Snarayan 
377d10e4ef2Snarayan 	if (vd->dring_task != NULL) {
378d10e4ef2Snarayan 		ASSERT(vd->dring_len != 0);
379d10e4ef2Snarayan 		kmem_free(vd->dring_task,
380d10e4ef2Snarayan 		    (sizeof (*vd->dring_task)) * vd->dring_len);
381d10e4ef2Snarayan 		vd->dring_task = NULL;
382d10e4ef2Snarayan 	}
383d10e4ef2Snarayan 
384d10e4ef2Snarayan 
385d10e4ef2Snarayan 	mutex_enter(&vd->lock);
386*e1ebb9ecSlm66018 	if (vd->reset_ldc && ((status = ldc_down(vd->ldc_handle)) != 0))
387*e1ebb9ecSlm66018 		PRN("ldc_down() returned errno %d", status);
388d10e4ef2Snarayan 
389d10e4ef2Snarayan 	vd->initialized	&= ~(VD_SID | VD_SEQ_NUM | VD_DRING);
390d10e4ef2Snarayan 	vd->state	= VD_STATE_INIT;
391d10e4ef2Snarayan 	vd->max_msglen	= sizeof (vio_msg_t);	/* baseline vio message size */
392d10e4ef2Snarayan 
393d10e4ef2Snarayan 	vd->reset_state	= B_FALSE;
394d10e4ef2Snarayan 	vd->reset_ldc	= B_FALSE;
395d10e4ef2Snarayan 	mutex_exit(&vd->lock);
396d10e4ef2Snarayan }
397d10e4ef2Snarayan 
398d10e4ef2Snarayan static int
399d10e4ef2Snarayan vd_mark_elem_done(vd_t *vd, int idx, int elem_status)
400d10e4ef2Snarayan {
401d10e4ef2Snarayan 	boolean_t		accepted;
402d10e4ef2Snarayan 	int			status;
403d10e4ef2Snarayan 	vd_dring_entry_t	*elem = VD_DRING_ELEM(idx);
404d10e4ef2Snarayan 
405d10e4ef2Snarayan 
406d10e4ef2Snarayan 	/* Acquire the element */
407d10e4ef2Snarayan 	if ((status = ldc_mem_dring_acquire(vd->dring_handle, idx, idx)) != 0) {
408d10e4ef2Snarayan 		PRN("ldc_mem_dring_acquire() returned errno %d", status);
409d10e4ef2Snarayan 		return (status);
410d10e4ef2Snarayan 	}
411d10e4ef2Snarayan 
412d10e4ef2Snarayan 	/* Set the element's status and mark it done */
413d10e4ef2Snarayan 	accepted = (elem->hdr.dstate == VIO_DESC_ACCEPTED);
414d10e4ef2Snarayan 	if (accepted) {
415d10e4ef2Snarayan 		elem->payload.status	= elem_status;
416d10e4ef2Snarayan 		elem->hdr.dstate	= VIO_DESC_DONE;
417d10e4ef2Snarayan 	} else {
418d10e4ef2Snarayan 		/* Perhaps client timed out waiting for I/O... */
419d10e4ef2Snarayan 		PRN("element %u no longer \"accepted\"", idx);
420d10e4ef2Snarayan 		VD_DUMP_DRING_ELEM(elem);
421d10e4ef2Snarayan 	}
422d10e4ef2Snarayan 	/* Release the element */
423d10e4ef2Snarayan 	if ((status = ldc_mem_dring_release(vd->dring_handle, idx, idx)) != 0) {
424d10e4ef2Snarayan 		PRN("ldc_mem_dring_release() returned errno %d", status);
425d10e4ef2Snarayan 		return (status);
426d10e4ef2Snarayan 	}
427d10e4ef2Snarayan 
428d10e4ef2Snarayan 	return (accepted ? 0 : EINVAL);
429d10e4ef2Snarayan }
430d10e4ef2Snarayan 
431d10e4ef2Snarayan static void
432d10e4ef2Snarayan vd_complete_bio(void *arg)
433d10e4ef2Snarayan {
434d10e4ef2Snarayan 	int			status		= 0;
435d10e4ef2Snarayan 	vd_task_t		*task		= (vd_task_t *)arg;
436d10e4ef2Snarayan 	vd_t			*vd		= task->vd;
437d10e4ef2Snarayan 	vd_dring_payload_t	*request	= task->request;
438d10e4ef2Snarayan 	struct buf		*buf		= &task->buf;
439d10e4ef2Snarayan 
440d10e4ef2Snarayan 
441d10e4ef2Snarayan 	ASSERT(vd != NULL);
442d10e4ef2Snarayan 	ASSERT(request != NULL);
443d10e4ef2Snarayan 	ASSERT(task->msg != NULL);
444d10e4ef2Snarayan 	ASSERT(task->msglen >= sizeof (*task->msg));
445d10e4ef2Snarayan 	ASSERT(task->msgsize >= task->msglen);
446d10e4ef2Snarayan 
447d10e4ef2Snarayan 	/* Wait for the I/O to complete */
448d10e4ef2Snarayan 	request->status = biowait(buf);
449d10e4ef2Snarayan 
450d10e4ef2Snarayan 	/* If data was read, copy it to the client */
451d10e4ef2Snarayan 	if ((request->status == 0) && (request->operation == VD_OP_BREAD) &&
452d10e4ef2Snarayan 	    ((status = ldc_mem_copy(vd->ldc_handle, buf->b_un.b_addr, 0,
4531ae08745Sheppo 		    &request->nbytes, request->cookie, request->ncookies,
4541ae08745Sheppo 		    LDC_COPY_OUT)) != 0)) {
4551ae08745Sheppo 		PRN("ldc_mem_copy() returned errno %d copying to client",
4561ae08745Sheppo 		    status);
4571ae08745Sheppo 	}
4581ae08745Sheppo 
459d10e4ef2Snarayan 	/* Release I/O buffer */
460d10e4ef2Snarayan 	kmem_free(buf->b_un.b_addr, buf->b_bcount);
461d10e4ef2Snarayan 	biofini(buf);
4621ae08745Sheppo 
463d10e4ef2Snarayan 	/* Update the dring element for a dring client */
464d10e4ef2Snarayan 	if ((status == 0) && (vd->xfer_mode == VIO_DRING_MODE))
465d10e4ef2Snarayan 		status = vd_mark_elem_done(vd, task->index, request->status);
4661ae08745Sheppo 
467d10e4ef2Snarayan 	/*
468d10e4ef2Snarayan 	 * If a transport error occurred, arrange to "nack" the message when
469d10e4ef2Snarayan 	 * the final task in the descriptor element range completes
470d10e4ef2Snarayan 	 */
471d10e4ef2Snarayan 	if (status != 0)
472d10e4ef2Snarayan 		task->msg->tag.vio_subtype = VIO_SUBTYPE_NACK;
4731ae08745Sheppo 
474d10e4ef2Snarayan 	/*
475d10e4ef2Snarayan 	 * Only the final task for a range of elements will respond to and
476d10e4ef2Snarayan 	 * free the message
477d10e4ef2Snarayan 	 */
478d10e4ef2Snarayan 	if (task->type == VD_NONFINAL_RANGE_TASK)
479d10e4ef2Snarayan 		return;
4801ae08745Sheppo 
481d10e4ef2Snarayan 	/*
482d10e4ef2Snarayan 	 * Send the "ack" or "nack" back to the client; if sending the message
483d10e4ef2Snarayan 	 * via LDC fails, arrange to reset both the connection state and LDC
484d10e4ef2Snarayan 	 * itself
485d10e4ef2Snarayan 	 */
486d10e4ef2Snarayan 	PR1("Sending %s",
487d10e4ef2Snarayan 	    (task->msg->tag.vio_subtype == VIO_SUBTYPE_ACK) ? "ACK" : "NACK");
488d10e4ef2Snarayan 	if (send_msg(vd->ldc_handle, task->msg, task->msglen) != 0)
489d10e4ef2Snarayan 		vd_need_reset(vd, B_TRUE);
4901ae08745Sheppo 
491d10e4ef2Snarayan 	/* Free the message now that it has been used for the reply */
492d10e4ef2Snarayan 	kmem_free(task->msg, task->msgsize);
4931ae08745Sheppo }
4941ae08745Sheppo 
4950a55fbb7Slm66018 static void
4960a55fbb7Slm66018 vd_geom2dk_geom(void *vd_buf, void *ioctl_arg)
4970a55fbb7Slm66018 {
4980a55fbb7Slm66018 	VD_GEOM2DK_GEOM((vd_geom_t *)vd_buf, (struct dk_geom *)ioctl_arg);
4990a55fbb7Slm66018 }
5000a55fbb7Slm66018 
5010a55fbb7Slm66018 static void
5020a55fbb7Slm66018 vd_vtoc2vtoc(void *vd_buf, void *ioctl_arg)
5030a55fbb7Slm66018 {
5040a55fbb7Slm66018 	VD_VTOC2VTOC((vd_vtoc_t *)vd_buf, (struct vtoc *)ioctl_arg);
5050a55fbb7Slm66018 }
5060a55fbb7Slm66018 
5070a55fbb7Slm66018 static void
5080a55fbb7Slm66018 dk_geom2vd_geom(void *ioctl_arg, void *vd_buf)
5090a55fbb7Slm66018 {
5100a55fbb7Slm66018 	DK_GEOM2VD_GEOM((struct dk_geom *)ioctl_arg, (vd_geom_t *)vd_buf);
5110a55fbb7Slm66018 }
5120a55fbb7Slm66018 
5130a55fbb7Slm66018 static void
5140a55fbb7Slm66018 vtoc2vd_vtoc(void *ioctl_arg, void *vd_buf)
5150a55fbb7Slm66018 {
5160a55fbb7Slm66018 	VTOC2VD_VTOC((struct vtoc *)ioctl_arg, (vd_vtoc_t *)vd_buf);
5170a55fbb7Slm66018 }
5180a55fbb7Slm66018 
5191ae08745Sheppo static int
5200a55fbb7Slm66018 vd_do_slice_ioctl(vd_t *vd, int cmd, void *ioctl_arg)
5211ae08745Sheppo {
5221ae08745Sheppo 	switch (cmd) {
5231ae08745Sheppo 	case DKIOCGGEOM:
5240a55fbb7Slm66018 		ASSERT(ioctl_arg != NULL);
5250a55fbb7Slm66018 		bcopy(&vd->dk_geom, ioctl_arg, sizeof (vd->dk_geom));
5261ae08745Sheppo 		return (0);
5271ae08745Sheppo 	case DKIOCGVTOC:
5280a55fbb7Slm66018 		ASSERT(ioctl_arg != NULL);
5290a55fbb7Slm66018 		bcopy(&vd->vtoc, ioctl_arg, sizeof (vd->vtoc));
5301ae08745Sheppo 		return (0);
5311ae08745Sheppo 	default:
5321ae08745Sheppo 		return (ENOTSUP);
5331ae08745Sheppo 	}
5341ae08745Sheppo }
5351ae08745Sheppo 
5361ae08745Sheppo static int
5370a55fbb7Slm66018 vd_do_ioctl(vd_t *vd, vd_dring_payload_t *request, void* buf, vd_ioctl_t *ioctl)
5381ae08745Sheppo {
5391ae08745Sheppo 	int	rval = 0, status;
5401ae08745Sheppo 	size_t	nbytes = request->nbytes;	/* modifiable copy */
5411ae08745Sheppo 
5421ae08745Sheppo 
5431ae08745Sheppo 	ASSERT(request->slice < vd->nslices);
5441ae08745Sheppo 	PR0("Performing %s", ioctl->operation_name);
5451ae08745Sheppo 
5460a55fbb7Slm66018 	/* Get data from client and convert, if necessary */
5470a55fbb7Slm66018 	if (ioctl->copyin != NULL)  {
5481ae08745Sheppo 		ASSERT(nbytes != 0 && buf != NULL);
5491ae08745Sheppo 		PR1("Getting \"arg\" data from client");
5501ae08745Sheppo 		if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes,
5511ae08745Sheppo 			    request->cookie, request->ncookies,
5521ae08745Sheppo 			    LDC_COPY_IN)) != 0) {
5531ae08745Sheppo 			PRN("ldc_mem_copy() returned errno %d "
5541ae08745Sheppo 			    "copying from client", status);
5551ae08745Sheppo 			return (status);
5561ae08745Sheppo 		}
5570a55fbb7Slm66018 
5580a55fbb7Slm66018 		/* Convert client's data, if necessary */
5590a55fbb7Slm66018 		if (ioctl->copyin == VD_IDENTITY)	/* use client buffer */
5600a55fbb7Slm66018 			ioctl->arg = buf;
5610a55fbb7Slm66018 		else	/* convert client vdisk operation data to ioctl data */
5620a55fbb7Slm66018 			(ioctl->copyin)(buf, (void *)ioctl->arg);
5631ae08745Sheppo 	}
5641ae08745Sheppo 
5651ae08745Sheppo 	/*
5661ae08745Sheppo 	 * Handle single-slice block devices internally; otherwise, have the
5671ae08745Sheppo 	 * real driver perform the ioctl()
5681ae08745Sheppo 	 */
5691ae08745Sheppo 	if (vd->vdisk_type == VD_DISK_TYPE_SLICE && !vd->pseudo) {
5700a55fbb7Slm66018 		if ((status = vd_do_slice_ioctl(vd, ioctl->cmd,
5710a55fbb7Slm66018 			    (void *)ioctl->arg)) != 0)
5721ae08745Sheppo 			return (status);
5731ae08745Sheppo 	} else if ((status = ldi_ioctl(vd->ldi_handle[request->slice],
574d10e4ef2Snarayan 		    ioctl->cmd, (intptr_t)ioctl->arg, (vd_open_flags | FKIOCTL),
575d10e4ef2Snarayan 		    kcred, &rval)) != 0) {
5761ae08745Sheppo 		PR0("ldi_ioctl(%s) = errno %d", ioctl->cmd_name, status);
5771ae08745Sheppo 		return (status);
5781ae08745Sheppo 	}
5791ae08745Sheppo #ifdef DEBUG
5801ae08745Sheppo 	if (rval != 0) {
5811ae08745Sheppo 		PRN("%s set rval = %d, which is not being returned to client",
5821ae08745Sheppo 		    ioctl->cmd_name, rval);
5831ae08745Sheppo 	}
5841ae08745Sheppo #endif /* DEBUG */
5851ae08745Sheppo 
5860a55fbb7Slm66018 	/* Convert data and send to client, if necessary */
5870a55fbb7Slm66018 	if (ioctl->copyout != NULL)  {
5881ae08745Sheppo 		ASSERT(nbytes != 0 && buf != NULL);
5891ae08745Sheppo 		PR1("Sending \"arg\" data to client");
5900a55fbb7Slm66018 
5910a55fbb7Slm66018 		/* Convert ioctl data to vdisk operation data, if necessary */
5920a55fbb7Slm66018 		if (ioctl->copyout != VD_IDENTITY)
5930a55fbb7Slm66018 			(ioctl->copyout)((void *)ioctl->arg, buf);
5940a55fbb7Slm66018 
5951ae08745Sheppo 		if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes,
5961ae08745Sheppo 			    request->cookie, request->ncookies,
5971ae08745Sheppo 			    LDC_COPY_OUT)) != 0) {
5981ae08745Sheppo 			PRN("ldc_mem_copy() returned errno %d "
5991ae08745Sheppo 			    "copying to client", status);
6001ae08745Sheppo 			return (status);
6011ae08745Sheppo 		}
6021ae08745Sheppo 	}
6031ae08745Sheppo 
6041ae08745Sheppo 	return (status);
6051ae08745Sheppo }
6061ae08745Sheppo 
6070a55fbb7Slm66018 /*
6080a55fbb7Slm66018  * Open any slices which have become non-empty as a result of performing a
6090a55fbb7Slm66018  * set-VTOC operation for the client.
6100a55fbb7Slm66018  *
6110a55fbb7Slm66018  * When serving a full disk, vds attempts to exclusively open all of the
6120a55fbb7Slm66018  * disk's slices to prevent another thread or process in the service domain
6130a55fbb7Slm66018  * from "stealing" a slice or from performing I/O to a slice while a vds
6140a55fbb7Slm66018  * client is accessing it.  Unfortunately, underlying drivers, such as sd(7d)
6150a55fbb7Slm66018  * and cmdk(7d), return an error when attempting to open the device file for a
6160a55fbb7Slm66018  * slice which is currently empty according to the VTOC.  This driver behavior
6170a55fbb7Slm66018  * means that vds must skip opening empty slices when initializing a vdisk for
6180a55fbb7Slm66018  * full-disk service and try to open slices that become non-empty (via a
6190a55fbb7Slm66018  * set-VTOC operation) during use of the full disk in order to begin serving
6200a55fbb7Slm66018  * such slices to the client.  This approach has an inherent (and therefore
6210a55fbb7Slm66018  * unavoidable) race condition; it also means that failure to open a
6220a55fbb7Slm66018  * newly-non-empty slice has different semantics than failure to open an
6230a55fbb7Slm66018  * initially-non-empty slice:  Due to driver bahavior, opening a
6240a55fbb7Slm66018  * newly-non-empty slice is a necessary side effect of vds performing a
6250a55fbb7Slm66018  * (successful) set-VTOC operation for a client on an in-service (and in-use)
6260a55fbb7Slm66018  * disk in order to begin serving the slice; failure of this side-effect
6270a55fbb7Slm66018  * operation does not mean that the client's set-VTOC operation failed or that
6280a55fbb7Slm66018  * operations on other slices must fail.  Therefore, this function prints an
6290a55fbb7Slm66018  * error message on failure to open a slice, but does not return an error to
6300a55fbb7Slm66018  * its caller--unlike failure to open a slice initially, which results in an
6310a55fbb7Slm66018  * error that prevents serving the vdisk (and thereby requires an
6320a55fbb7Slm66018  * administrator to resolve the problem).  Note that, apart from another
6330a55fbb7Slm66018  * thread or process opening a new slice during the race-condition window,
6340a55fbb7Slm66018  * failure to open a slice in this function will likely indicate an underlying
6350a55fbb7Slm66018  * drive problem, which will also likely become evident in errors returned by
6360a55fbb7Slm66018  * operations on other slices, and which will require administrative
6370a55fbb7Slm66018  * intervention and possibly servicing the drive.
6380a55fbb7Slm66018  */
6390a55fbb7Slm66018 static void
6400a55fbb7Slm66018 vd_open_new_slices(vd_t *vd)
6410a55fbb7Slm66018 {
6420a55fbb7Slm66018 	int		rval, status;
6430a55fbb7Slm66018 	struct vtoc	vtoc;
6440a55fbb7Slm66018 
6450a55fbb7Slm66018 
6460a55fbb7Slm66018 	/* Get the (new) VTOC for updated slice sizes */
6470a55fbb7Slm66018 	if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCGVTOC, (intptr_t)&vtoc,
648d10e4ef2Snarayan 		    (vd_open_flags | FKIOCTL), kcred, &rval)) != 0) {
6490a55fbb7Slm66018 		PRN("ldi_ioctl(DKIOCGVTOC) returned errno %d", status);
6500a55fbb7Slm66018 		return;
6510a55fbb7Slm66018 	}
6520a55fbb7Slm66018 
6530a55fbb7Slm66018 	/* Open any newly-non-empty slices */
6540a55fbb7Slm66018 	for (int slice = 0; slice < vd->nslices; slice++) {
6550a55fbb7Slm66018 		/* Skip zero-length slices */
6560a55fbb7Slm66018 		if (vtoc.v_part[slice].p_size == 0) {
6570a55fbb7Slm66018 			if (vd->ldi_handle[slice] != NULL)
6580a55fbb7Slm66018 				PR0("Open slice %u now has zero length", slice);
6590a55fbb7Slm66018 			continue;
6600a55fbb7Slm66018 		}
6610a55fbb7Slm66018 
6620a55fbb7Slm66018 		/* Skip already-open slices */
6630a55fbb7Slm66018 		if (vd->ldi_handle[slice] != NULL)
6640a55fbb7Slm66018 			continue;
6650a55fbb7Slm66018 
6660a55fbb7Slm66018 		PR0("Opening newly-non-empty slice %u", slice);
6670a55fbb7Slm66018 		if ((status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK,
6680a55fbb7Slm66018 			    vd_open_flags, kcred, &vd->ldi_handle[slice],
6690a55fbb7Slm66018 			    vd->vds->ldi_ident)) != 0) {
6700a55fbb7Slm66018 			PRN("ldi_open_by_dev() returned errno %d "
6710a55fbb7Slm66018 			    "for slice %u", status, slice);
6720a55fbb7Slm66018 		}
6730a55fbb7Slm66018 	}
6740a55fbb7Slm66018 }
6750a55fbb7Slm66018 
6761ae08745Sheppo #define	RNDSIZE(expr) P2ROUNDUP(sizeof (expr), sizeof (uint64_t))
6771ae08745Sheppo static int
678d10e4ef2Snarayan vd_ioctl(vd_task_t *task)
6791ae08745Sheppo {
6801ae08745Sheppo 	int			i, status;
6811ae08745Sheppo 	void			*buf = NULL;
6820a55fbb7Slm66018 	struct dk_geom		dk_geom = {0};
6830a55fbb7Slm66018 	struct vtoc		vtoc = {0};
684d10e4ef2Snarayan 	vd_t			*vd		= task->vd;
685d10e4ef2Snarayan 	vd_dring_payload_t	*request	= task->request;
6860a55fbb7Slm66018 	vd_ioctl_t		ioctl[] = {
6870a55fbb7Slm66018 		/* Command (no-copy) operations */
6880a55fbb7Slm66018 		{VD_OP_FLUSH, STRINGIZE(VD_OP_FLUSH), 0,
6890a55fbb7Slm66018 		    DKIOCFLUSHWRITECACHE, STRINGIZE(DKIOCFLUSHWRITECACHE),
6900a55fbb7Slm66018 		    NULL, NULL, NULL},
6910a55fbb7Slm66018 
6920a55fbb7Slm66018 		/* "Get" (copy-out) operations */
6930a55fbb7Slm66018 		{VD_OP_GET_WCE, STRINGIZE(VD_OP_GET_WCE), RNDSIZE(int),
6940a55fbb7Slm66018 		    DKIOCGETWCE, STRINGIZE(DKIOCGETWCE),
6950a55fbb7Slm66018 		    NULL, NULL, VD_IDENTITY},
6960a55fbb7Slm66018 		{VD_OP_GET_DISKGEOM, STRINGIZE(VD_OP_GET_DISKGEOM),
6970a55fbb7Slm66018 		    RNDSIZE(vd_geom_t),
6980a55fbb7Slm66018 		    DKIOCGGEOM, STRINGIZE(DKIOCGGEOM),
6990a55fbb7Slm66018 		    &dk_geom, NULL, dk_geom2vd_geom},
7000a55fbb7Slm66018 		{VD_OP_GET_VTOC, STRINGIZE(VD_OP_GET_VTOC), RNDSIZE(vd_vtoc_t),
7010a55fbb7Slm66018 		    DKIOCGVTOC, STRINGIZE(DKIOCGVTOC),
7020a55fbb7Slm66018 		    &vtoc, NULL, vtoc2vd_vtoc},
7030a55fbb7Slm66018 
7040a55fbb7Slm66018 		/* "Set" (copy-in) operations */
7050a55fbb7Slm66018 		{VD_OP_SET_WCE, STRINGIZE(VD_OP_SET_WCE), RNDSIZE(int),
7060a55fbb7Slm66018 		    DKIOCSETWCE, STRINGIZE(DKIOCSETWCE),
7070a55fbb7Slm66018 		    NULL, VD_IDENTITY, NULL},
7080a55fbb7Slm66018 		{VD_OP_SET_DISKGEOM, STRINGIZE(VD_OP_SET_DISKGEOM),
7090a55fbb7Slm66018 		    RNDSIZE(vd_geom_t),
7100a55fbb7Slm66018 		    DKIOCSGEOM, STRINGIZE(DKIOCSGEOM),
7110a55fbb7Slm66018 		    &dk_geom, vd_geom2dk_geom, NULL},
7120a55fbb7Slm66018 		{VD_OP_SET_VTOC, STRINGIZE(VD_OP_SET_VTOC), RNDSIZE(vd_vtoc_t),
7130a55fbb7Slm66018 		    DKIOCSVTOC, STRINGIZE(DKIOCSVTOC),
7140a55fbb7Slm66018 		    &vtoc, vd_vtoc2vtoc, NULL},
7150a55fbb7Slm66018 	};
7161ae08745Sheppo 	size_t		nioctls = (sizeof (ioctl))/(sizeof (ioctl[0]));
7171ae08745Sheppo 
7181ae08745Sheppo 
719d10e4ef2Snarayan 	ASSERT(vd != NULL);
720d10e4ef2Snarayan 	ASSERT(request != NULL);
7211ae08745Sheppo 	ASSERT(request->slice < vd->nslices);
7221ae08745Sheppo 
7231ae08745Sheppo 	/*
7241ae08745Sheppo 	 * Determine ioctl corresponding to caller's "operation" and
7251ae08745Sheppo 	 * validate caller's "nbytes"
7261ae08745Sheppo 	 */
7271ae08745Sheppo 	for (i = 0; i < nioctls; i++) {
7281ae08745Sheppo 		if (request->operation == ioctl[i].operation) {
7290a55fbb7Slm66018 			/* LDC memory operations require 8-byte multiples */
7300a55fbb7Slm66018 			ASSERT(ioctl[i].nbytes % sizeof (uint64_t) == 0);
7310a55fbb7Slm66018 
7320a55fbb7Slm66018 			if (request->nbytes != ioctl[i].nbytes) {
7330a55fbb7Slm66018 				PRN("%s:  Expected nbytes = %lu, got %lu",
7340a55fbb7Slm66018 				    ioctl[i].operation_name, ioctl[i].nbytes,
7350a55fbb7Slm66018 				    request->nbytes);
7361ae08745Sheppo 				return (EINVAL);
7371ae08745Sheppo 			}
7381ae08745Sheppo 
7391ae08745Sheppo 			break;
7401ae08745Sheppo 		}
7411ae08745Sheppo 	}
7421ae08745Sheppo 	ASSERT(i < nioctls);	/* because "operation" already validated */
7431ae08745Sheppo 
7441ae08745Sheppo 	if (request->nbytes)
7451ae08745Sheppo 		buf = kmem_zalloc(request->nbytes, KM_SLEEP);
7461ae08745Sheppo 	status = vd_do_ioctl(vd, request, buf, &ioctl[i]);
7471ae08745Sheppo 	if (request->nbytes)
7481ae08745Sheppo 		kmem_free(buf, request->nbytes);
7490a55fbb7Slm66018 	if ((request->operation == VD_OP_SET_VTOC) &&
7500a55fbb7Slm66018 	    (vd->vdisk_type == VD_DISK_TYPE_DISK))
7510a55fbb7Slm66018 		vd_open_new_slices(vd);
752d10e4ef2Snarayan 	PR0("Returning %d", status);
7531ae08745Sheppo 	return (status);
7541ae08745Sheppo }
7551ae08745Sheppo 
7561ae08745Sheppo /*
7571ae08745Sheppo  * Define the supported operations once the functions for performing them have
7581ae08745Sheppo  * been defined
7591ae08745Sheppo  */
7601ae08745Sheppo static const vds_operation_t	vds_operation[] = {
761d10e4ef2Snarayan 	{VD_OP_BREAD,		vd_start_bio,	vd_complete_bio},
762d10e4ef2Snarayan 	{VD_OP_BWRITE,		vd_start_bio,	vd_complete_bio},
763d10e4ef2Snarayan 	{VD_OP_FLUSH,		vd_ioctl,	NULL},
764d10e4ef2Snarayan 	{VD_OP_GET_WCE,		vd_ioctl,	NULL},
765d10e4ef2Snarayan 	{VD_OP_SET_WCE,		vd_ioctl,	NULL},
766d10e4ef2Snarayan 	{VD_OP_GET_VTOC,	vd_ioctl,	NULL},
767d10e4ef2Snarayan 	{VD_OP_SET_VTOC,	vd_ioctl,	NULL},
768d10e4ef2Snarayan 	{VD_OP_GET_DISKGEOM,	vd_ioctl,	NULL},
769d10e4ef2Snarayan 	{VD_OP_SET_DISKGEOM,	vd_ioctl,	NULL}
7701ae08745Sheppo };
7711ae08745Sheppo 
7721ae08745Sheppo static const size_t	vds_noperations =
7731ae08745Sheppo 	(sizeof (vds_operation))/(sizeof (vds_operation[0]));
7741ae08745Sheppo 
7751ae08745Sheppo /*
776d10e4ef2Snarayan  * Process a task specifying a client I/O request
7771ae08745Sheppo  */
7781ae08745Sheppo static int
779d10e4ef2Snarayan vd_process_task(vd_task_t *task)
7801ae08745Sheppo {
781d10e4ef2Snarayan 	int			i, status;
782d10e4ef2Snarayan 	vd_t			*vd		= task->vd;
783d10e4ef2Snarayan 	vd_dring_payload_t	*request	= task->request;
7841ae08745Sheppo 
7851ae08745Sheppo 
786d10e4ef2Snarayan 	ASSERT(vd != NULL);
787d10e4ef2Snarayan 	ASSERT(request != NULL);
7881ae08745Sheppo 
7891ae08745Sheppo 	/* Range-check slice */
7901ae08745Sheppo 	if (request->slice >= vd->nslices) {
7911ae08745Sheppo 		PRN("Invalid \"slice\" %u (max %u) for virtual disk",
7921ae08745Sheppo 		    request->slice, (vd->nslices - 1));
7931ae08745Sheppo 		return (EINVAL);
7941ae08745Sheppo 	}
7951ae08745Sheppo 
796d10e4ef2Snarayan 	/* Find the requested operation */
7971ae08745Sheppo 	for (i = 0; i < vds_noperations; i++)
7981ae08745Sheppo 		if (request->operation == vds_operation[i].operation)
799d10e4ef2Snarayan 			break;
800d10e4ef2Snarayan 	if (i == vds_noperations) {
8011ae08745Sheppo 		PRN("Unsupported operation %u", request->operation);
8021ae08745Sheppo 		return (ENOTSUP);
8031ae08745Sheppo 	}
8041ae08745Sheppo 
805d10e4ef2Snarayan 	/* Start the operation */
806d10e4ef2Snarayan 	if ((status = vds_operation[i].start(task)) != EINPROGRESS) {
807d10e4ef2Snarayan 		request->status = status;	/* op succeeded or failed */
808d10e4ef2Snarayan 		return (0);			/* but request completed */
8091ae08745Sheppo 	}
8101ae08745Sheppo 
811d10e4ef2Snarayan 	ASSERT(vds_operation[i].complete != NULL);	/* debug case */
812d10e4ef2Snarayan 	if (vds_operation[i].complete == NULL) {	/* non-debug case */
813d10e4ef2Snarayan 		PRN("Unexpected return of EINPROGRESS "
814d10e4ef2Snarayan 		    "with no I/O completion handler");
815d10e4ef2Snarayan 		request->status = EIO;	/* operation failed */
816d10e4ef2Snarayan 		return (0);		/* but request completed */
8171ae08745Sheppo 	}
8181ae08745Sheppo 
819d10e4ef2Snarayan 	/* Queue a task to complete the operation */
820d10e4ef2Snarayan 	status = ddi_taskq_dispatch(vd->completionq, vds_operation[i].complete,
821d10e4ef2Snarayan 	    task, DDI_SLEEP);
822d10e4ef2Snarayan 	/* ddi_taskq_dispatch(9f) guarantees success with DDI_SLEEP */
823d10e4ef2Snarayan 	ASSERT(status == DDI_SUCCESS);
824d10e4ef2Snarayan 
825d10e4ef2Snarayan 	PR1("Operation in progress");
826d10e4ef2Snarayan 	return (EINPROGRESS);	/* completion handler will finish request */
8271ae08745Sheppo }
8281ae08745Sheppo 
8291ae08745Sheppo /*
8300a55fbb7Slm66018  * Return true if the "type", "subtype", and "env" fields of the "tag" first
8310a55fbb7Slm66018  * argument match the corresponding remaining arguments; otherwise, return false
8321ae08745Sheppo  */
8330a55fbb7Slm66018 boolean_t
8341ae08745Sheppo vd_msgtype(vio_msg_tag_t *tag, int type, int subtype, int env)
8351ae08745Sheppo {
8361ae08745Sheppo 	return ((tag->vio_msgtype == type) &&
8371ae08745Sheppo 		(tag->vio_subtype == subtype) &&
8380a55fbb7Slm66018 		(tag->vio_subtype_env == env)) ? B_TRUE : B_FALSE;
8391ae08745Sheppo }
8401ae08745Sheppo 
8410a55fbb7Slm66018 /*
8420a55fbb7Slm66018  * Check whether the major/minor version specified in "ver_msg" is supported
8430a55fbb7Slm66018  * by this server.
8440a55fbb7Slm66018  */
8450a55fbb7Slm66018 static boolean_t
8460a55fbb7Slm66018 vds_supported_version(vio_ver_msg_t *ver_msg)
8470a55fbb7Slm66018 {
8480a55fbb7Slm66018 	for (int i = 0; i < vds_num_versions; i++) {
8490a55fbb7Slm66018 		ASSERT(vds_version[i].major > 0);
8500a55fbb7Slm66018 		ASSERT((i == 0) ||
8510a55fbb7Slm66018 		    (vds_version[i].major < vds_version[i-1].major));
8520a55fbb7Slm66018 
8530a55fbb7Slm66018 		/*
8540a55fbb7Slm66018 		 * If the major versions match, adjust the minor version, if
8550a55fbb7Slm66018 		 * necessary, down to the highest value supported by this
8560a55fbb7Slm66018 		 * server and return true so this message will get "ack"ed;
8570a55fbb7Slm66018 		 * the client should also support all minor versions lower
8580a55fbb7Slm66018 		 * than the value it sent
8590a55fbb7Slm66018 		 */
8600a55fbb7Slm66018 		if (ver_msg->ver_major == vds_version[i].major) {
8610a55fbb7Slm66018 			if (ver_msg->ver_minor > vds_version[i].minor) {
8620a55fbb7Slm66018 				PR0("Adjusting minor version from %u to %u",
8630a55fbb7Slm66018 				    ver_msg->ver_minor, vds_version[i].minor);
8640a55fbb7Slm66018 				ver_msg->ver_minor = vds_version[i].minor;
8650a55fbb7Slm66018 			}
8660a55fbb7Slm66018 			return (B_TRUE);
8670a55fbb7Slm66018 		}
8680a55fbb7Slm66018 
8690a55fbb7Slm66018 		/*
8700a55fbb7Slm66018 		 * If the message contains a higher major version number, set
8710a55fbb7Slm66018 		 * the message's major/minor versions to the current values
8720a55fbb7Slm66018 		 * and return false, so this message will get "nack"ed with
8730a55fbb7Slm66018 		 * these values, and the client will potentially try again
8740a55fbb7Slm66018 		 * with the same or a lower version
8750a55fbb7Slm66018 		 */
8760a55fbb7Slm66018 		if (ver_msg->ver_major > vds_version[i].major) {
8770a55fbb7Slm66018 			ver_msg->ver_major = vds_version[i].major;
8780a55fbb7Slm66018 			ver_msg->ver_minor = vds_version[i].minor;
8790a55fbb7Slm66018 			return (B_FALSE);
8800a55fbb7Slm66018 		}
8810a55fbb7Slm66018 
8820a55fbb7Slm66018 		/*
8830a55fbb7Slm66018 		 * Otherwise, the message's major version is less than the
8840a55fbb7Slm66018 		 * current major version, so continue the loop to the next
8850a55fbb7Slm66018 		 * (lower) supported version
8860a55fbb7Slm66018 		 */
8870a55fbb7Slm66018 	}
8880a55fbb7Slm66018 
8890a55fbb7Slm66018 	/*
8900a55fbb7Slm66018 	 * No common version was found; "ground" the version pair in the
8910a55fbb7Slm66018 	 * message to terminate negotiation
8920a55fbb7Slm66018 	 */
8930a55fbb7Slm66018 	ver_msg->ver_major = 0;
8940a55fbb7Slm66018 	ver_msg->ver_minor = 0;
8950a55fbb7Slm66018 	return (B_FALSE);
8960a55fbb7Slm66018 }
8970a55fbb7Slm66018 
8980a55fbb7Slm66018 /*
8990a55fbb7Slm66018  * Process a version message from a client.  vds expects to receive version
9000a55fbb7Slm66018  * messages from clients seeking service, but never issues version messages
9010a55fbb7Slm66018  * itself; therefore, vds can ACK or NACK client version messages, but does
9020a55fbb7Slm66018  * not expect to receive version-message ACKs or NACKs (and will treat such
9030a55fbb7Slm66018  * messages as invalid).
9040a55fbb7Slm66018  */
9051ae08745Sheppo static int
9060a55fbb7Slm66018 vd_process_ver_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
9071ae08745Sheppo {
9081ae08745Sheppo 	vio_ver_msg_t	*ver_msg = (vio_ver_msg_t *)msg;
9091ae08745Sheppo 
9101ae08745Sheppo 
9111ae08745Sheppo 	ASSERT(msglen >= sizeof (msg->tag));
9121ae08745Sheppo 
9131ae08745Sheppo 	if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO,
9141ae08745Sheppo 		VIO_VER_INFO)) {
9151ae08745Sheppo 		return (ENOMSG);	/* not a version message */
9161ae08745Sheppo 	}
9171ae08745Sheppo 
9181ae08745Sheppo 	if (msglen != sizeof (*ver_msg)) {
9191ae08745Sheppo 		PRN("Expected %lu-byte version message; "
9201ae08745Sheppo 		    "received %lu bytes", sizeof (*ver_msg), msglen);
9211ae08745Sheppo 		return (EBADMSG);
9221ae08745Sheppo 	}
9231ae08745Sheppo 
9241ae08745Sheppo 	if (ver_msg->dev_class != VDEV_DISK) {
9251ae08745Sheppo 		PRN("Expected device class %u (disk); received %u",
9261ae08745Sheppo 		    VDEV_DISK, ver_msg->dev_class);
9271ae08745Sheppo 		return (EBADMSG);
9281ae08745Sheppo 	}
9291ae08745Sheppo 
9300a55fbb7Slm66018 	/*
9310a55fbb7Slm66018 	 * We're talking to the expected kind of client; set our device class
9320a55fbb7Slm66018 	 * for "ack/nack" back to the client
9330a55fbb7Slm66018 	 */
9341ae08745Sheppo 	ver_msg->dev_class = VDEV_DISK_SERVER;
9350a55fbb7Slm66018 
9360a55fbb7Slm66018 	/*
9370a55fbb7Slm66018 	 * Check whether the (valid) version message specifies a version
9380a55fbb7Slm66018 	 * supported by this server.  If the version is not supported, return
9390a55fbb7Slm66018 	 * EBADMSG so the message will get "nack"ed; vds_supported_version()
9400a55fbb7Slm66018 	 * will have updated the message with a supported version for the
9410a55fbb7Slm66018 	 * client to consider
9420a55fbb7Slm66018 	 */
9430a55fbb7Slm66018 	if (!vds_supported_version(ver_msg))
9440a55fbb7Slm66018 		return (EBADMSG);
9450a55fbb7Slm66018 
9460a55fbb7Slm66018 
9470a55fbb7Slm66018 	/*
9480a55fbb7Slm66018 	 * A version has been agreed upon; use the client's SID for
9490a55fbb7Slm66018 	 * communication on this channel now
9500a55fbb7Slm66018 	 */
9510a55fbb7Slm66018 	ASSERT(!(vd->initialized & VD_SID));
9520a55fbb7Slm66018 	vd->sid = ver_msg->tag.vio_sid;
9530a55fbb7Slm66018 	vd->initialized |= VD_SID;
9540a55fbb7Slm66018 
9550a55fbb7Slm66018 	/*
9560a55fbb7Slm66018 	 * When multiple versions are supported, this function should store
9570a55fbb7Slm66018 	 * the negotiated major and minor version values in the "vd" data
9580a55fbb7Slm66018 	 * structure to govern further communication; in particular, note that
9590a55fbb7Slm66018 	 * the client might have specified a lower minor version for the
9600a55fbb7Slm66018 	 * agreed major version than specifed in the vds_version[] array.  The
9610a55fbb7Slm66018 	 * following assertions should help remind future maintainers to make
9620a55fbb7Slm66018 	 * the appropriate changes to support multiple versions.
9630a55fbb7Slm66018 	 */
9640a55fbb7Slm66018 	ASSERT(vds_num_versions == 1);
9650a55fbb7Slm66018 	ASSERT(ver_msg->ver_major == vds_version[0].major);
9660a55fbb7Slm66018 	ASSERT(ver_msg->ver_minor == vds_version[0].minor);
9670a55fbb7Slm66018 
9680a55fbb7Slm66018 	PR0("Using major version %u, minor version %u",
9690a55fbb7Slm66018 	    ver_msg->ver_major, ver_msg->ver_minor);
9701ae08745Sheppo 	return (0);
9711ae08745Sheppo }
9721ae08745Sheppo 
9731ae08745Sheppo static int
9741ae08745Sheppo vd_process_attr_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
9751ae08745Sheppo {
9761ae08745Sheppo 	vd_attr_msg_t	*attr_msg = (vd_attr_msg_t *)msg;
9771ae08745Sheppo 
9781ae08745Sheppo 
9791ae08745Sheppo 	ASSERT(msglen >= sizeof (msg->tag));
9801ae08745Sheppo 
9811ae08745Sheppo 	if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO,
9821ae08745Sheppo 		VIO_ATTR_INFO)) {
983d10e4ef2Snarayan 		PR0("Message is not an attribute message");
984d10e4ef2Snarayan 		return (ENOMSG);
9851ae08745Sheppo 	}
9861ae08745Sheppo 
9871ae08745Sheppo 	if (msglen != sizeof (*attr_msg)) {
9881ae08745Sheppo 		PRN("Expected %lu-byte attribute message; "
9891ae08745Sheppo 		    "received %lu bytes", sizeof (*attr_msg), msglen);
9901ae08745Sheppo 		return (EBADMSG);
9911ae08745Sheppo 	}
9921ae08745Sheppo 
9931ae08745Sheppo 	if (attr_msg->max_xfer_sz == 0) {
9941ae08745Sheppo 		PRN("Received maximum transfer size of 0 from client");
9951ae08745Sheppo 		return (EBADMSG);
9961ae08745Sheppo 	}
9971ae08745Sheppo 
9981ae08745Sheppo 	if ((attr_msg->xfer_mode != VIO_DESC_MODE) &&
9991ae08745Sheppo 	    (attr_msg->xfer_mode != VIO_DRING_MODE)) {
10001ae08745Sheppo 		PRN("Client requested unsupported transfer mode");
10011ae08745Sheppo 		return (EBADMSG);
10021ae08745Sheppo 	}
10031ae08745Sheppo 
10041ae08745Sheppo 
10051ae08745Sheppo 	/* Success:  valid message and transfer mode */
10061ae08745Sheppo 	vd->xfer_mode = attr_msg->xfer_mode;
10071ae08745Sheppo 	if (vd->xfer_mode == VIO_DESC_MODE) {
10081ae08745Sheppo 		/*
10091ae08745Sheppo 		 * The vd_dring_inband_msg_t contains one cookie; need room
10101ae08745Sheppo 		 * for up to n-1 more cookies, where "n" is the number of full
10111ae08745Sheppo 		 * pages plus possibly one partial page required to cover
10121ae08745Sheppo 		 * "max_xfer_sz".  Add room for one more cookie if
10131ae08745Sheppo 		 * "max_xfer_sz" isn't an integral multiple of the page size.
10141ae08745Sheppo 		 * Must first get the maximum transfer size in bytes.
10151ae08745Sheppo 		 */
10161ae08745Sheppo 		size_t	max_xfer_bytes = attr_msg->vdisk_block_size ?
10171ae08745Sheppo 		    attr_msg->vdisk_block_size*attr_msg->max_xfer_sz :
10181ae08745Sheppo 		    attr_msg->max_xfer_sz;
10191ae08745Sheppo 		size_t	max_inband_msglen =
10201ae08745Sheppo 		    sizeof (vd_dring_inband_msg_t) +
10211ae08745Sheppo 		    ((max_xfer_bytes/PAGESIZE +
10221ae08745Sheppo 			((max_xfer_bytes % PAGESIZE) ? 1 : 0))*
10231ae08745Sheppo 			(sizeof (ldc_mem_cookie_t)));
10241ae08745Sheppo 
10251ae08745Sheppo 		/*
10261ae08745Sheppo 		 * Set the maximum expected message length to
10271ae08745Sheppo 		 * accommodate in-band-descriptor messages with all
10281ae08745Sheppo 		 * their cookies
10291ae08745Sheppo 		 */
10301ae08745Sheppo 		vd->max_msglen = MAX(vd->max_msglen, max_inband_msglen);
1031d10e4ef2Snarayan 
1032d10e4ef2Snarayan 		/*
1033d10e4ef2Snarayan 		 * Initialize the data structure for processing in-band I/O
1034d10e4ef2Snarayan 		 * request descriptors
1035d10e4ef2Snarayan 		 */
1036d10e4ef2Snarayan 		vd->inband_task.vd	= vd;
1037d10e4ef2Snarayan 		vd->inband_task.index	= 0;
1038d10e4ef2Snarayan 		vd->inband_task.type	= VD_FINAL_RANGE_TASK;	/* range == 1 */
10391ae08745Sheppo 	}
10401ae08745Sheppo 
1041*e1ebb9ecSlm66018 	/* Return the device's block size and max transfer size to the client */
1042*e1ebb9ecSlm66018 	attr_msg->vdisk_block_size	= DEV_BSIZE;
1043*e1ebb9ecSlm66018 	attr_msg->max_xfer_sz		= vd->max_xfer_sz;
1044*e1ebb9ecSlm66018 
10451ae08745Sheppo 	attr_msg->vdisk_size = vd->vdisk_size;
10461ae08745Sheppo 	attr_msg->vdisk_type = vd->vdisk_type;
10471ae08745Sheppo 	attr_msg->operations = vds_operations;
10481ae08745Sheppo 	PR0("%s", VD_CLIENT(vd));
10491ae08745Sheppo 	return (0);
10501ae08745Sheppo }
10511ae08745Sheppo 
10521ae08745Sheppo static int
10531ae08745Sheppo vd_process_dring_reg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
10541ae08745Sheppo {
10551ae08745Sheppo 	int			status;
10561ae08745Sheppo 	size_t			expected;
10571ae08745Sheppo 	ldc_mem_info_t		dring_minfo;
10581ae08745Sheppo 	vio_dring_reg_msg_t	*reg_msg = (vio_dring_reg_msg_t *)msg;
10591ae08745Sheppo 
10601ae08745Sheppo 
10611ae08745Sheppo 	ASSERT(msglen >= sizeof (msg->tag));
10621ae08745Sheppo 
10631ae08745Sheppo 	if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO,
10641ae08745Sheppo 		VIO_DRING_REG)) {
1065d10e4ef2Snarayan 		PR0("Message is not a register-dring message");
1066d10e4ef2Snarayan 		return (ENOMSG);
10671ae08745Sheppo 	}
10681ae08745Sheppo 
10691ae08745Sheppo 	if (msglen < sizeof (*reg_msg)) {
10701ae08745Sheppo 		PRN("Expected at least %lu-byte register-dring message; "
10711ae08745Sheppo 		    "received %lu bytes", sizeof (*reg_msg), msglen);
10721ae08745Sheppo 		return (EBADMSG);
10731ae08745Sheppo 	}
10741ae08745Sheppo 
10751ae08745Sheppo 	expected = sizeof (*reg_msg) +
10761ae08745Sheppo 	    (reg_msg->ncookies - 1)*(sizeof (reg_msg->cookie[0]));
10771ae08745Sheppo 	if (msglen != expected) {
10781ae08745Sheppo 		PRN("Expected %lu-byte register-dring message; "
10791ae08745Sheppo 		    "received %lu bytes", expected, msglen);
10801ae08745Sheppo 		return (EBADMSG);
10811ae08745Sheppo 	}
10821ae08745Sheppo 
10831ae08745Sheppo 	if (vd->initialized & VD_DRING) {
10841ae08745Sheppo 		PRN("A dring was previously registered; only support one");
10851ae08745Sheppo 		return (EBADMSG);
10861ae08745Sheppo 	}
10871ae08745Sheppo 
1088d10e4ef2Snarayan 	if (reg_msg->num_descriptors > INT32_MAX) {
1089d10e4ef2Snarayan 		PRN("reg_msg->num_descriptors = %u; must be <= %u (%s)",
1090d10e4ef2Snarayan 		    reg_msg->ncookies, INT32_MAX, STRINGIZE(INT32_MAX));
1091d10e4ef2Snarayan 		return (EBADMSG);
1092d10e4ef2Snarayan 	}
1093d10e4ef2Snarayan 
10941ae08745Sheppo 	if (reg_msg->ncookies != 1) {
10951ae08745Sheppo 		/*
10961ae08745Sheppo 		 * In addition to fixing the assertion in the success case
10971ae08745Sheppo 		 * below, supporting drings which require more than one
10981ae08745Sheppo 		 * "cookie" requires increasing the value of vd->max_msglen
10991ae08745Sheppo 		 * somewhere in the code path prior to receiving the message
11001ae08745Sheppo 		 * which results in calling this function.  Note that without
11011ae08745Sheppo 		 * making this change, the larger message size required to
11021ae08745Sheppo 		 * accommodate multiple cookies cannot be successfully
11031ae08745Sheppo 		 * received, so this function will not even get called.
11041ae08745Sheppo 		 * Gracefully accommodating more dring cookies might
11051ae08745Sheppo 		 * reasonably demand exchanging an additional attribute or
11061ae08745Sheppo 		 * making a minor protocol adjustment
11071ae08745Sheppo 		 */
11081ae08745Sheppo 		PRN("reg_msg->ncookies = %u != 1", reg_msg->ncookies);
11091ae08745Sheppo 		return (EBADMSG);
11101ae08745Sheppo 	}
11111ae08745Sheppo 
11121ae08745Sheppo 	status = ldc_mem_dring_map(vd->ldc_handle, reg_msg->cookie,
11131ae08745Sheppo 	    reg_msg->ncookies, reg_msg->num_descriptors,
11141ae08745Sheppo 	    reg_msg->descriptor_size, LDC_SHADOW_MAP, &vd->dring_handle);
11151ae08745Sheppo 	if (status != 0) {
11161ae08745Sheppo 		PRN("ldc_mem_dring_map() returned errno %d", status);
11171ae08745Sheppo 		return (status);
11181ae08745Sheppo 	}
11191ae08745Sheppo 
11201ae08745Sheppo 	/*
11211ae08745Sheppo 	 * To remove the need for this assertion, must call
11221ae08745Sheppo 	 * ldc_mem_dring_nextcookie() successfully ncookies-1 times after a
11231ae08745Sheppo 	 * successful call to ldc_mem_dring_map()
11241ae08745Sheppo 	 */
11251ae08745Sheppo 	ASSERT(reg_msg->ncookies == 1);
11261ae08745Sheppo 
11271ae08745Sheppo 	if ((status =
11281ae08745Sheppo 		ldc_mem_dring_info(vd->dring_handle, &dring_minfo)) != 0) {
11291ae08745Sheppo 		PRN("ldc_mem_dring_info() returned errno %d", status);
11301ae08745Sheppo 		if ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0)
11311ae08745Sheppo 			PRN("ldc_mem_dring_unmap() returned errno %d", status);
11321ae08745Sheppo 		return (status);
11331ae08745Sheppo 	}
11341ae08745Sheppo 
11351ae08745Sheppo 	if (dring_minfo.vaddr == NULL) {
11361ae08745Sheppo 		PRN("Descriptor ring virtual address is NULL");
11370a55fbb7Slm66018 		return (ENXIO);
11381ae08745Sheppo 	}
11391ae08745Sheppo 
11401ae08745Sheppo 
1141d10e4ef2Snarayan 	/* Initialize for valid message and mapped dring */
11421ae08745Sheppo 	PR1("descriptor size = %u, dring length = %u",
11431ae08745Sheppo 	    vd->descriptor_size, vd->dring_len);
11441ae08745Sheppo 	vd->initialized |= VD_DRING;
11451ae08745Sheppo 	vd->dring_ident = 1;	/* "There Can Be Only One" */
11461ae08745Sheppo 	vd->dring = dring_minfo.vaddr;
11471ae08745Sheppo 	vd->descriptor_size = reg_msg->descriptor_size;
11481ae08745Sheppo 	vd->dring_len = reg_msg->num_descriptors;
11491ae08745Sheppo 	reg_msg->dring_ident = vd->dring_ident;
1150d10e4ef2Snarayan 
1151d10e4ef2Snarayan 	/*
1152d10e4ef2Snarayan 	 * Allocate and initialize a "shadow" array of data structures for
1153d10e4ef2Snarayan 	 * tasks to process I/O requests in dring elements
1154d10e4ef2Snarayan 	 */
1155d10e4ef2Snarayan 	vd->dring_task =
1156d10e4ef2Snarayan 	    kmem_zalloc((sizeof (*vd->dring_task)) * vd->dring_len, KM_SLEEP);
1157d10e4ef2Snarayan 	for (int i = 0; i < vd->dring_len; i++) {
1158d10e4ef2Snarayan 		vd->dring_task[i].vd		= vd;
1159d10e4ef2Snarayan 		vd->dring_task[i].index		= i;
1160d10e4ef2Snarayan 		vd->dring_task[i].request	= &VD_DRING_ELEM(i)->payload;
1161d10e4ef2Snarayan 	}
1162d10e4ef2Snarayan 
11631ae08745Sheppo 	return (0);
11641ae08745Sheppo }
11651ae08745Sheppo 
11661ae08745Sheppo static int
11671ae08745Sheppo vd_process_dring_unreg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
11681ae08745Sheppo {
11691ae08745Sheppo 	vio_dring_unreg_msg_t	*unreg_msg = (vio_dring_unreg_msg_t *)msg;
11701ae08745Sheppo 
11711ae08745Sheppo 
11721ae08745Sheppo 	ASSERT(msglen >= sizeof (msg->tag));
11731ae08745Sheppo 
11741ae08745Sheppo 	if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO,
11751ae08745Sheppo 		VIO_DRING_UNREG)) {
1176d10e4ef2Snarayan 		PR0("Message is not an unregister-dring message");
1177d10e4ef2Snarayan 		return (ENOMSG);
11781ae08745Sheppo 	}
11791ae08745Sheppo 
11801ae08745Sheppo 	if (msglen != sizeof (*unreg_msg)) {
11811ae08745Sheppo 		PRN("Expected %lu-byte unregister-dring message; "
11821ae08745Sheppo 		    "received %lu bytes", sizeof (*unreg_msg), msglen);
11831ae08745Sheppo 		return (EBADMSG);
11841ae08745Sheppo 	}
11851ae08745Sheppo 
11861ae08745Sheppo 	if (unreg_msg->dring_ident != vd->dring_ident) {
11871ae08745Sheppo 		PRN("Expected dring ident %lu; received %lu",
11881ae08745Sheppo 		    vd->dring_ident, unreg_msg->dring_ident);
11891ae08745Sheppo 		return (EBADMSG);
11901ae08745Sheppo 	}
11911ae08745Sheppo 
11921ae08745Sheppo 	return (0);
11931ae08745Sheppo }
11941ae08745Sheppo 
11951ae08745Sheppo static int
11961ae08745Sheppo process_rdx_msg(vio_msg_t *msg, size_t msglen)
11971ae08745Sheppo {
11981ae08745Sheppo 	ASSERT(msglen >= sizeof (msg->tag));
11991ae08745Sheppo 
1200d10e4ef2Snarayan 	if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX)) {
1201d10e4ef2Snarayan 		PR0("Message is not an RDX message");
1202d10e4ef2Snarayan 		return (ENOMSG);
1203d10e4ef2Snarayan 	}
12041ae08745Sheppo 
12051ae08745Sheppo 	if (msglen != sizeof (vio_rdx_msg_t)) {
12061ae08745Sheppo 		PRN("Expected %lu-byte RDX message; received %lu bytes",
12071ae08745Sheppo 		    sizeof (vio_rdx_msg_t), msglen);
12081ae08745Sheppo 		return (EBADMSG);
12091ae08745Sheppo 	}
12101ae08745Sheppo 
1211d10e4ef2Snarayan 	PR0("Valid RDX message");
12121ae08745Sheppo 	return (0);
12131ae08745Sheppo }
12141ae08745Sheppo 
12151ae08745Sheppo static int
12161ae08745Sheppo vd_check_seq_num(vd_t *vd, uint64_t seq_num)
12171ae08745Sheppo {
12181ae08745Sheppo 	if ((vd->initialized & VD_SEQ_NUM) && (seq_num != vd->seq_num + 1)) {
12191ae08745Sheppo 		PRN("Received seq_num %lu; expected %lu",
12201ae08745Sheppo 		    seq_num, (vd->seq_num + 1));
1221d10e4ef2Snarayan 		vd_need_reset(vd, B_FALSE);
12221ae08745Sheppo 		return (1);
12231ae08745Sheppo 	}
12241ae08745Sheppo 
12251ae08745Sheppo 	vd->seq_num = seq_num;
12261ae08745Sheppo 	vd->initialized |= VD_SEQ_NUM;	/* superfluous after first time... */
12271ae08745Sheppo 	return (0);
12281ae08745Sheppo }
12291ae08745Sheppo 
12301ae08745Sheppo /*
12311ae08745Sheppo  * Return the expected size of an inband-descriptor message with all the
12321ae08745Sheppo  * cookies it claims to include
12331ae08745Sheppo  */
12341ae08745Sheppo static size_t
12351ae08745Sheppo expected_inband_size(vd_dring_inband_msg_t *msg)
12361ae08745Sheppo {
12371ae08745Sheppo 	return ((sizeof (*msg)) +
12381ae08745Sheppo 	    (msg->payload.ncookies - 1)*(sizeof (msg->payload.cookie[0])));
12391ae08745Sheppo }
12401ae08745Sheppo 
12411ae08745Sheppo /*
12421ae08745Sheppo  * Process an in-band descriptor message:  used with clients like OBP, with
12431ae08745Sheppo  * which vds exchanges descriptors within VIO message payloads, rather than
12441ae08745Sheppo  * operating on them within a descriptor ring
12451ae08745Sheppo  */
12461ae08745Sheppo static int
1247d10e4ef2Snarayan vd_process_desc_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize)
12481ae08745Sheppo {
12491ae08745Sheppo 	size_t			expected;
12501ae08745Sheppo 	vd_dring_inband_msg_t	*desc_msg = (vd_dring_inband_msg_t *)msg;
12511ae08745Sheppo 
12521ae08745Sheppo 
12531ae08745Sheppo 	ASSERT(msglen >= sizeof (msg->tag));
12541ae08745Sheppo 
12551ae08745Sheppo 	if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO,
1256d10e4ef2Snarayan 		VIO_DESC_DATA)) {
1257d10e4ef2Snarayan 		PR1("Message is not an in-band-descriptor message");
1258d10e4ef2Snarayan 		return (ENOMSG);
1259d10e4ef2Snarayan 	}
12601ae08745Sheppo 
12611ae08745Sheppo 	if (msglen < sizeof (*desc_msg)) {
12621ae08745Sheppo 		PRN("Expected at least %lu-byte descriptor message; "
12631ae08745Sheppo 		    "received %lu bytes", sizeof (*desc_msg), msglen);
12641ae08745Sheppo 		return (EBADMSG);
12651ae08745Sheppo 	}
12661ae08745Sheppo 
12671ae08745Sheppo 	if (msglen != (expected = expected_inband_size(desc_msg))) {
12681ae08745Sheppo 		PRN("Expected %lu-byte descriptor message; "
12691ae08745Sheppo 		    "received %lu bytes", expected, msglen);
12701ae08745Sheppo 		return (EBADMSG);
12711ae08745Sheppo 	}
12721ae08745Sheppo 
1273d10e4ef2Snarayan 	if (vd_check_seq_num(vd, desc_msg->hdr.seq_num) != 0)
12741ae08745Sheppo 		return (EBADMSG);
12751ae08745Sheppo 
1276d10e4ef2Snarayan 	/*
1277d10e4ef2Snarayan 	 * Valid message:  Set up the in-band descriptor task and process the
1278d10e4ef2Snarayan 	 * request.  Arrange to acknowledge the client's message, unless an
1279d10e4ef2Snarayan 	 * error processing the descriptor task results in setting
1280d10e4ef2Snarayan 	 * VIO_SUBTYPE_NACK
1281d10e4ef2Snarayan 	 */
1282d10e4ef2Snarayan 	PR1("Valid in-band-descriptor message");
1283d10e4ef2Snarayan 	msg->tag.vio_subtype = VIO_SUBTYPE_ACK;
1284d10e4ef2Snarayan 	vd->inband_task.msg	= msg;
1285d10e4ef2Snarayan 	vd->inband_task.msglen	= msglen;
1286d10e4ef2Snarayan 	vd->inband_task.msgsize	= msgsize;
1287d10e4ef2Snarayan 	vd->inband_task.request	= &desc_msg->payload;
1288d10e4ef2Snarayan 	return (vd_process_task(&vd->inband_task));
12891ae08745Sheppo }
12901ae08745Sheppo 
12911ae08745Sheppo static int
1292d10e4ef2Snarayan vd_process_element(vd_t *vd, vd_task_type_t type, uint32_t idx,
1293d10e4ef2Snarayan     vio_msg_t *msg, size_t msglen, size_t msgsize)
12941ae08745Sheppo {
12951ae08745Sheppo 	int			status;
1296d10e4ef2Snarayan 	boolean_t		ready;
1297d10e4ef2Snarayan 	vd_dring_entry_t	*elem = VD_DRING_ELEM(idx);
12981ae08745Sheppo 
12991ae08745Sheppo 
1300d10e4ef2Snarayan 	/* Accept the updated dring element */
1301d10e4ef2Snarayan 	if ((status = ldc_mem_dring_acquire(vd->dring_handle, idx, idx)) != 0) {
13021ae08745Sheppo 		PRN("ldc_mem_dring_acquire() returned errno %d", status);
13031ae08745Sheppo 		return (status);
13041ae08745Sheppo 	}
1305d10e4ef2Snarayan 	ready = (elem->hdr.dstate == VIO_DESC_READY);
1306d10e4ef2Snarayan 	if (ready) {
1307d10e4ef2Snarayan 		elem->hdr.dstate = VIO_DESC_ACCEPTED;
1308d10e4ef2Snarayan 	} else {
1309d10e4ef2Snarayan 		PRN("descriptor %u not ready", idx);
1310d10e4ef2Snarayan 		VD_DUMP_DRING_ELEM(elem);
1311d10e4ef2Snarayan 	}
1312d10e4ef2Snarayan 	if ((status = ldc_mem_dring_release(vd->dring_handle, idx, idx)) != 0) {
13131ae08745Sheppo 		PRN("ldc_mem_dring_release() returned errno %d", status);
13141ae08745Sheppo 		return (status);
13151ae08745Sheppo 	}
1316d10e4ef2Snarayan 	if (!ready)
1317d10e4ef2Snarayan 		return (EBUSY);
13181ae08745Sheppo 
13191ae08745Sheppo 
1320d10e4ef2Snarayan 	/* Initialize a task and process the accepted element */
1321d10e4ef2Snarayan 	PR1("Processing dring element %u", idx);
1322d10e4ef2Snarayan 	vd->dring_task[idx].type	= type;
1323d10e4ef2Snarayan 	vd->dring_task[idx].msg		= msg;
1324d10e4ef2Snarayan 	vd->dring_task[idx].msglen	= msglen;
1325d10e4ef2Snarayan 	vd->dring_task[idx].msgsize	= msgsize;
1326d10e4ef2Snarayan 	if ((status = vd_process_task(&vd->dring_task[idx])) != EINPROGRESS)
1327d10e4ef2Snarayan 		status = vd_mark_elem_done(vd, idx, elem->payload.status);
13281ae08745Sheppo 
13291ae08745Sheppo 	return (status);
13301ae08745Sheppo }
13311ae08745Sheppo 
13321ae08745Sheppo static int
1333d10e4ef2Snarayan vd_process_element_range(vd_t *vd, int start, int end,
1334d10e4ef2Snarayan     vio_msg_t *msg, size_t msglen, size_t msgsize)
1335d10e4ef2Snarayan {
1336d10e4ef2Snarayan 	int		i, n, nelem, status = 0;
1337d10e4ef2Snarayan 	boolean_t	inprogress = B_FALSE;
1338d10e4ef2Snarayan 	vd_task_type_t	type;
1339d10e4ef2Snarayan 
1340d10e4ef2Snarayan 
1341d10e4ef2Snarayan 	ASSERT(start >= 0);
1342d10e4ef2Snarayan 	ASSERT(end >= 0);
1343d10e4ef2Snarayan 
1344d10e4ef2Snarayan 	/*
1345d10e4ef2Snarayan 	 * Arrange to acknowledge the client's message, unless an error
1346d10e4ef2Snarayan 	 * processing one of the dring elements results in setting
1347d10e4ef2Snarayan 	 * VIO_SUBTYPE_NACK
1348d10e4ef2Snarayan 	 */
1349d10e4ef2Snarayan 	msg->tag.vio_subtype = VIO_SUBTYPE_ACK;
1350d10e4ef2Snarayan 
1351d10e4ef2Snarayan 	/*
1352d10e4ef2Snarayan 	 * Process the dring elements in the range
1353d10e4ef2Snarayan 	 */
1354d10e4ef2Snarayan 	nelem = ((end < start) ? end + vd->dring_len : end) - start + 1;
1355d10e4ef2Snarayan 	for (i = start, n = nelem; n > 0; i = (i + 1) % vd->dring_len, n--) {
1356d10e4ef2Snarayan 		((vio_dring_msg_t *)msg)->end_idx = i;
1357d10e4ef2Snarayan 		type = (n == 1) ? VD_FINAL_RANGE_TASK : VD_NONFINAL_RANGE_TASK;
1358d10e4ef2Snarayan 		status = vd_process_element(vd, type, i, msg, msglen, msgsize);
1359d10e4ef2Snarayan 		if (status == EINPROGRESS)
1360d10e4ef2Snarayan 			inprogress = B_TRUE;
1361d10e4ef2Snarayan 		else if (status != 0)
1362d10e4ef2Snarayan 			break;
1363d10e4ef2Snarayan 	}
1364d10e4ef2Snarayan 
1365d10e4ef2Snarayan 	/*
1366d10e4ef2Snarayan 	 * If some, but not all, operations of a multi-element range are in
1367d10e4ef2Snarayan 	 * progress, wait for other operations to complete before returning
1368d10e4ef2Snarayan 	 * (which will result in "ack" or "nack" of the message).  Note that
1369d10e4ef2Snarayan 	 * all outstanding operations will need to complete, not just the ones
1370d10e4ef2Snarayan 	 * corresponding to the current range of dring elements; howevever, as
1371d10e4ef2Snarayan 	 * this situation is an error case, performance is less critical.
1372d10e4ef2Snarayan 	 */
1373d10e4ef2Snarayan 	if ((nelem > 1) && (status != EINPROGRESS) && inprogress)
1374d10e4ef2Snarayan 		ddi_taskq_wait(vd->completionq);
1375d10e4ef2Snarayan 
1376d10e4ef2Snarayan 	return (status);
1377d10e4ef2Snarayan }
1378d10e4ef2Snarayan 
1379d10e4ef2Snarayan static int
1380d10e4ef2Snarayan vd_process_dring_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize)
13811ae08745Sheppo {
13821ae08745Sheppo 	vio_dring_msg_t	*dring_msg = (vio_dring_msg_t *)msg;
13831ae08745Sheppo 
13841ae08745Sheppo 
13851ae08745Sheppo 	ASSERT(msglen >= sizeof (msg->tag));
13861ae08745Sheppo 
13871ae08745Sheppo 	if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO,
13881ae08745Sheppo 		VIO_DRING_DATA)) {
1389d10e4ef2Snarayan 		PR1("Message is not a dring-data message");
1390d10e4ef2Snarayan 		return (ENOMSG);
13911ae08745Sheppo 	}
13921ae08745Sheppo 
13931ae08745Sheppo 	if (msglen != sizeof (*dring_msg)) {
13941ae08745Sheppo 		PRN("Expected %lu-byte dring message; received %lu bytes",
13951ae08745Sheppo 		    sizeof (*dring_msg), msglen);
13961ae08745Sheppo 		return (EBADMSG);
13971ae08745Sheppo 	}
13981ae08745Sheppo 
1399d10e4ef2Snarayan 	if (vd_check_seq_num(vd, dring_msg->seq_num) != 0)
14001ae08745Sheppo 		return (EBADMSG);
14011ae08745Sheppo 
14021ae08745Sheppo 	if (dring_msg->dring_ident != vd->dring_ident) {
14031ae08745Sheppo 		PRN("Expected dring ident %lu; received ident %lu",
14041ae08745Sheppo 		    vd->dring_ident, dring_msg->dring_ident);
14051ae08745Sheppo 		return (EBADMSG);
14061ae08745Sheppo 	}
14071ae08745Sheppo 
1408d10e4ef2Snarayan 	if (dring_msg->start_idx >= vd->dring_len) {
1409d10e4ef2Snarayan 		PRN("\"start_idx\" = %u; must be less than %u",
1410d10e4ef2Snarayan 		    dring_msg->start_idx, vd->dring_len);
1411d10e4ef2Snarayan 		return (EBADMSG);
1412d10e4ef2Snarayan 	}
14131ae08745Sheppo 
1414d10e4ef2Snarayan 	if ((dring_msg->end_idx < 0) ||
1415d10e4ef2Snarayan 	    (dring_msg->end_idx >= vd->dring_len)) {
1416d10e4ef2Snarayan 		PRN("\"end_idx\" = %u; must be >= 0 and less than %u",
1417d10e4ef2Snarayan 		    dring_msg->end_idx, vd->dring_len);
1418d10e4ef2Snarayan 		return (EBADMSG);
1419d10e4ef2Snarayan 	}
1420d10e4ef2Snarayan 
1421d10e4ef2Snarayan 	/* Valid message; process range of updated dring elements */
1422d10e4ef2Snarayan 	PR1("Processing descriptor range, start = %u, end = %u",
1423d10e4ef2Snarayan 	    dring_msg->start_idx, dring_msg->end_idx);
1424d10e4ef2Snarayan 	return (vd_process_element_range(vd, dring_msg->start_idx,
1425d10e4ef2Snarayan 		dring_msg->end_idx, msg, msglen, msgsize));
14261ae08745Sheppo }
14271ae08745Sheppo 
14281ae08745Sheppo static int
14291ae08745Sheppo recv_msg(ldc_handle_t ldc_handle, void *msg, size_t *nbytes)
14301ae08745Sheppo {
14311ae08745Sheppo 	int	retry, status;
14321ae08745Sheppo 	size_t	size = *nbytes;
14331ae08745Sheppo 
14341ae08745Sheppo 
14351ae08745Sheppo 	for (retry = 0, status = ETIMEDOUT;
14361ae08745Sheppo 	    retry < vds_ldc_retries && status == ETIMEDOUT;
14371ae08745Sheppo 	    retry++) {
14381ae08745Sheppo 		PR1("ldc_read() attempt %d", (retry + 1));
14391ae08745Sheppo 		*nbytes = size;
14401ae08745Sheppo 		status = ldc_read(ldc_handle, msg, nbytes);
14411ae08745Sheppo 	}
14421ae08745Sheppo 
14431ae08745Sheppo 	if (status != 0) {
14441ae08745Sheppo 		PRN("ldc_read() returned errno %d", status);
14451ae08745Sheppo 		return (status);
14461ae08745Sheppo 	} else if (*nbytes == 0) {
14471ae08745Sheppo 		PR1("ldc_read() returned 0 and no message read");
14481ae08745Sheppo 		return (ENOMSG);
14491ae08745Sheppo 	}
14501ae08745Sheppo 
14511ae08745Sheppo 	PR1("RCVD %lu-byte message", *nbytes);
14521ae08745Sheppo 	return (0);
14531ae08745Sheppo }
14541ae08745Sheppo 
14551ae08745Sheppo static int
1456d10e4ef2Snarayan vd_do_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize)
14571ae08745Sheppo {
14581ae08745Sheppo 	int		status;
14591ae08745Sheppo 
14601ae08745Sheppo 
14611ae08745Sheppo 	PR1("Processing (%x/%x/%x) message", msg->tag.vio_msgtype,
14621ae08745Sheppo 	    msg->tag.vio_subtype, msg->tag.vio_subtype_env);
14631ae08745Sheppo 
14641ae08745Sheppo 	/*
14651ae08745Sheppo 	 * Validate session ID up front, since it applies to all messages
14661ae08745Sheppo 	 * once set
14671ae08745Sheppo 	 */
14681ae08745Sheppo 	if ((msg->tag.vio_sid != vd->sid) && (vd->initialized & VD_SID)) {
14691ae08745Sheppo 		PRN("Expected SID %u, received %u", vd->sid,
14701ae08745Sheppo 		    msg->tag.vio_sid);
14711ae08745Sheppo 		return (EBADMSG);
14721ae08745Sheppo 	}
14731ae08745Sheppo 
14741ae08745Sheppo 
14751ae08745Sheppo 	/*
14761ae08745Sheppo 	 * Process the received message based on connection state
14771ae08745Sheppo 	 */
14781ae08745Sheppo 	switch (vd->state) {
14791ae08745Sheppo 	case VD_STATE_INIT:	/* expect version message */
14800a55fbb7Slm66018 		if ((status = vd_process_ver_msg(vd, msg, msglen)) != 0)
14811ae08745Sheppo 			return (status);
14821ae08745Sheppo 
14831ae08745Sheppo 		/* Version negotiated, move to that state */
14841ae08745Sheppo 		vd->state = VD_STATE_VER;
14851ae08745Sheppo 		return (0);
14861ae08745Sheppo 
14871ae08745Sheppo 	case VD_STATE_VER:	/* expect attribute message */
14881ae08745Sheppo 		if ((status = vd_process_attr_msg(vd, msg, msglen)) != 0)
14891ae08745Sheppo 			return (status);
14901ae08745Sheppo 
14911ae08745Sheppo 		/* Attributes exchanged, move to that state */
14921ae08745Sheppo 		vd->state = VD_STATE_ATTR;
14931ae08745Sheppo 		return (0);
14941ae08745Sheppo 
14951ae08745Sheppo 	case VD_STATE_ATTR:
14961ae08745Sheppo 		switch (vd->xfer_mode) {
14971ae08745Sheppo 		case VIO_DESC_MODE:	/* expect RDX message */
14981ae08745Sheppo 			if ((status = process_rdx_msg(msg, msglen)) != 0)
14991ae08745Sheppo 				return (status);
15001ae08745Sheppo 
15011ae08745Sheppo 			/* Ready to receive in-band descriptors */
15021ae08745Sheppo 			vd->state = VD_STATE_DATA;
15031ae08745Sheppo 			return (0);
15041ae08745Sheppo 
15051ae08745Sheppo 		case VIO_DRING_MODE:	/* expect register-dring message */
15061ae08745Sheppo 			if ((status =
15071ae08745Sheppo 				vd_process_dring_reg_msg(vd, msg, msglen)) != 0)
15081ae08745Sheppo 				return (status);
15091ae08745Sheppo 
15101ae08745Sheppo 			/* One dring negotiated, move to that state */
15111ae08745Sheppo 			vd->state = VD_STATE_DRING;
15121ae08745Sheppo 			return (0);
15131ae08745Sheppo 
15141ae08745Sheppo 		default:
15151ae08745Sheppo 			ASSERT("Unsupported transfer mode");
15161ae08745Sheppo 			PRN("Unsupported transfer mode");
15171ae08745Sheppo 			return (ENOTSUP);
15181ae08745Sheppo 		}
15191ae08745Sheppo 
15201ae08745Sheppo 	case VD_STATE_DRING:	/* expect RDX, register-dring, or unreg-dring */
15211ae08745Sheppo 		if ((status = process_rdx_msg(msg, msglen)) == 0) {
15221ae08745Sheppo 			/* Ready to receive data */
15231ae08745Sheppo 			vd->state = VD_STATE_DATA;
15241ae08745Sheppo 			return (0);
15251ae08745Sheppo 		} else if (status != ENOMSG) {
15261ae08745Sheppo 			return (status);
15271ae08745Sheppo 		}
15281ae08745Sheppo 
15291ae08745Sheppo 
15301ae08745Sheppo 		/*
15311ae08745Sheppo 		 * If another register-dring message is received, stay in
15321ae08745Sheppo 		 * dring state in case the client sends RDX; although the
15331ae08745Sheppo 		 * protocol allows multiple drings, this server does not
15341ae08745Sheppo 		 * support using more than one
15351ae08745Sheppo 		 */
15361ae08745Sheppo 		if ((status =
15371ae08745Sheppo 			vd_process_dring_reg_msg(vd, msg, msglen)) != ENOMSG)
15381ae08745Sheppo 			return (status);
15391ae08745Sheppo 
15401ae08745Sheppo 		/*
15411ae08745Sheppo 		 * Acknowledge an unregister-dring message, but reset the
15421ae08745Sheppo 		 * connection anyway:  Although the protocol allows
15431ae08745Sheppo 		 * unregistering drings, this server cannot serve a vdisk
15441ae08745Sheppo 		 * without its only dring
15451ae08745Sheppo 		 */
15461ae08745Sheppo 		status = vd_process_dring_unreg_msg(vd, msg, msglen);
15471ae08745Sheppo 		return ((status == 0) ? ENOTSUP : status);
15481ae08745Sheppo 
15491ae08745Sheppo 	case VD_STATE_DATA:
15501ae08745Sheppo 		switch (vd->xfer_mode) {
15511ae08745Sheppo 		case VIO_DESC_MODE:	/* expect in-band-descriptor message */
1552d10e4ef2Snarayan 			return (vd_process_desc_msg(vd, msg, msglen, msgsize));
15531ae08745Sheppo 
15541ae08745Sheppo 		case VIO_DRING_MODE:	/* expect dring-data or unreg-dring */
15551ae08745Sheppo 			/*
15561ae08745Sheppo 			 * Typically expect dring-data messages, so handle
15571ae08745Sheppo 			 * them first
15581ae08745Sheppo 			 */
15591ae08745Sheppo 			if ((status = vd_process_dring_msg(vd, msg,
1560d10e4ef2Snarayan 				    msglen, msgsize)) != ENOMSG)
15611ae08745Sheppo 				return (status);
15621ae08745Sheppo 
15631ae08745Sheppo 			/*
15641ae08745Sheppo 			 * Acknowledge an unregister-dring message, but reset
15651ae08745Sheppo 			 * the connection anyway:  Although the protocol
15661ae08745Sheppo 			 * allows unregistering drings, this server cannot
15671ae08745Sheppo 			 * serve a vdisk without its only dring
15681ae08745Sheppo 			 */
15691ae08745Sheppo 			status = vd_process_dring_unreg_msg(vd, msg, msglen);
15701ae08745Sheppo 			return ((status == 0) ? ENOTSUP : status);
15711ae08745Sheppo 
15721ae08745Sheppo 		default:
15731ae08745Sheppo 			ASSERT("Unsupported transfer mode");
15741ae08745Sheppo 			PRN("Unsupported transfer mode");
15751ae08745Sheppo 			return (ENOTSUP);
15761ae08745Sheppo 		}
15771ae08745Sheppo 
15781ae08745Sheppo 	default:
15791ae08745Sheppo 		ASSERT("Invalid client connection state");
15801ae08745Sheppo 		PRN("Invalid client connection state");
15811ae08745Sheppo 		return (ENOTSUP);
15821ae08745Sheppo 	}
15831ae08745Sheppo }
15841ae08745Sheppo 
1585d10e4ef2Snarayan static int
1586d10e4ef2Snarayan vd_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize)
15871ae08745Sheppo {
15881ae08745Sheppo 	int		status;
15891ae08745Sheppo 	boolean_t	reset_ldc = B_FALSE;
15901ae08745Sheppo 
15911ae08745Sheppo 
15921ae08745Sheppo 	/*
15931ae08745Sheppo 	 * Check that the message is at least big enough for a "tag", so that
15941ae08745Sheppo 	 * message processing can proceed based on tag-specified message type
15951ae08745Sheppo 	 */
15961ae08745Sheppo 	if (msglen < sizeof (vio_msg_tag_t)) {
15971ae08745Sheppo 		PRN("Received short (%lu-byte) message", msglen);
15981ae08745Sheppo 		/* Can't "nack" short message, so drop the big hammer */
1599d10e4ef2Snarayan 		vd_need_reset(vd, B_TRUE);
1600d10e4ef2Snarayan 		return (EBADMSG);
16011ae08745Sheppo 	}
16021ae08745Sheppo 
16031ae08745Sheppo 	/*
16041ae08745Sheppo 	 * Process the message
16051ae08745Sheppo 	 */
1606d10e4ef2Snarayan 	switch (status = vd_do_process_msg(vd, msg, msglen, msgsize)) {
16071ae08745Sheppo 	case 0:
16081ae08745Sheppo 		/* "ack" valid, successfully-processed messages */
16091ae08745Sheppo 		msg->tag.vio_subtype = VIO_SUBTYPE_ACK;
16101ae08745Sheppo 		break;
16111ae08745Sheppo 
1612d10e4ef2Snarayan 	case EINPROGRESS:
1613d10e4ef2Snarayan 		/* The completion handler will "ack" or "nack" the message */
1614d10e4ef2Snarayan 		return (EINPROGRESS);
16151ae08745Sheppo 	case ENOMSG:
16161ae08745Sheppo 		PRN("Received unexpected message");
16171ae08745Sheppo 		_NOTE(FALLTHROUGH);
16181ae08745Sheppo 	case EBADMSG:
16191ae08745Sheppo 	case ENOTSUP:
16201ae08745Sheppo 		/* "nack" invalid messages */
16211ae08745Sheppo 		msg->tag.vio_subtype = VIO_SUBTYPE_NACK;
16221ae08745Sheppo 		break;
16231ae08745Sheppo 
16241ae08745Sheppo 	default:
16251ae08745Sheppo 		/* "nack" failed messages */
16261ae08745Sheppo 		msg->tag.vio_subtype = VIO_SUBTYPE_NACK;
16271ae08745Sheppo 		/* An LDC error probably occurred, so try resetting it */
16281ae08745Sheppo 		reset_ldc = B_TRUE;
16291ae08745Sheppo 		break;
16301ae08745Sheppo 	}
16311ae08745Sheppo 
1632d10e4ef2Snarayan 	/* Send the "ack" or "nack" to the client */
16331ae08745Sheppo 	PR1("Sending %s",
16341ae08745Sheppo 	    (msg->tag.vio_subtype == VIO_SUBTYPE_ACK) ? "ACK" : "NACK");
16351ae08745Sheppo 	if (send_msg(vd->ldc_handle, msg, msglen) != 0)
16361ae08745Sheppo 		reset_ldc = B_TRUE;
16371ae08745Sheppo 
1638d10e4ef2Snarayan 	/* Arrange to reset the connection for nack'ed or failed messages */
16391ae08745Sheppo 	if ((status != 0) || reset_ldc)
1640d10e4ef2Snarayan 		vd_need_reset(vd, reset_ldc);
1641d10e4ef2Snarayan 
1642d10e4ef2Snarayan 	return (status);
1643d10e4ef2Snarayan }
1644d10e4ef2Snarayan 
1645d10e4ef2Snarayan static boolean_t
1646d10e4ef2Snarayan vd_enabled(vd_t *vd)
1647d10e4ef2Snarayan {
1648d10e4ef2Snarayan 	boolean_t	enabled;
1649d10e4ef2Snarayan 
1650d10e4ef2Snarayan 
1651d10e4ef2Snarayan 	mutex_enter(&vd->lock);
1652d10e4ef2Snarayan 	enabled = vd->enabled;
1653d10e4ef2Snarayan 	mutex_exit(&vd->lock);
1654d10e4ef2Snarayan 	return (enabled);
16551ae08745Sheppo }
16561ae08745Sheppo 
16571ae08745Sheppo static void
16580a55fbb7Slm66018 vd_recv_msg(void *arg)
16591ae08745Sheppo {
16601ae08745Sheppo 	vd_t	*vd = (vd_t *)arg;
16610a55fbb7Slm66018 	int	status = 0;
16621ae08745Sheppo 
16631ae08745Sheppo 
16641ae08745Sheppo 	ASSERT(vd != NULL);
1665d10e4ef2Snarayan 	PR2("New task to receive incoming message(s)");
1666d10e4ef2Snarayan 	while (vd_enabled(vd) && status == 0) {
1667d10e4ef2Snarayan 		size_t		msglen, msgsize;
1668d10e4ef2Snarayan 		vio_msg_t	*vio_msg;
1669d10e4ef2Snarayan 
1670d10e4ef2Snarayan 
16710a55fbb7Slm66018 		/*
1672d10e4ef2Snarayan 		 * Receive and process a message
16730a55fbb7Slm66018 		 */
1674d10e4ef2Snarayan 		vd_reset_if_needed(vd);	/* can change vd->max_msglen */
1675d10e4ef2Snarayan 		msgsize = vd->max_msglen;	/* stable copy for alloc/free */
1676d10e4ef2Snarayan 		msglen	= msgsize;	/* actual length after recv_msg() */
1677d10e4ef2Snarayan 		vio_msg = kmem_alloc(msgsize, KM_SLEEP);
1678d10e4ef2Snarayan 		if ((status = recv_msg(vd->ldc_handle, vio_msg, &msglen)) ==
1679d10e4ef2Snarayan 		    0) {
1680d10e4ef2Snarayan 			if (vd_process_msg(vd, vio_msg, msglen, msgsize) ==
1681d10e4ef2Snarayan 			    EINPROGRESS)
1682d10e4ef2Snarayan 				continue;	/* handler will free msg */
1683d10e4ef2Snarayan 		} else if (status != ENOMSG) {
1684d10e4ef2Snarayan 			/* Probably an LDC failure; arrange to reset it */
1685d10e4ef2Snarayan 			vd_need_reset(vd, B_TRUE);
16860a55fbb7Slm66018 		}
1687d10e4ef2Snarayan 		kmem_free(vio_msg, msgsize);
16881ae08745Sheppo 	}
1689d10e4ef2Snarayan 	PR2("Task finished");
16900a55fbb7Slm66018 }
16910a55fbb7Slm66018 
16920a55fbb7Slm66018 static uint_t
16931ae08745Sheppo vd_handle_ldc_events(uint64_t event, caddr_t arg)
16941ae08745Sheppo {
16951ae08745Sheppo 	vd_t	*vd = (vd_t *)(void *)arg;
16961ae08745Sheppo 
16971ae08745Sheppo 
16981ae08745Sheppo 	ASSERT(vd != NULL);
1699d10e4ef2Snarayan 
1700d10e4ef2Snarayan 	if (!vd_enabled(vd))
1701d10e4ef2Snarayan 		return (LDC_SUCCESS);
1702d10e4ef2Snarayan 
1703d10e4ef2Snarayan 	if (event & LDC_EVT_RESET) {
1704d10e4ef2Snarayan 		PR0("LDC channel was reset");
1705d10e4ef2Snarayan 		return (LDC_SUCCESS);
1706d10e4ef2Snarayan 	}
1707d10e4ef2Snarayan 
1708d10e4ef2Snarayan 	if (event & LDC_EVT_UP) {
1709d10e4ef2Snarayan 		PR0("LDC channel came up:  Resetting client connection state");
1710d10e4ef2Snarayan 		vd_need_reset(vd, B_FALSE);
1711d10e4ef2Snarayan 	}
1712d10e4ef2Snarayan 
1713d10e4ef2Snarayan 	if (event & LDC_EVT_READ) {
1714d10e4ef2Snarayan 		int	status;
1715d10e4ef2Snarayan 
1716d10e4ef2Snarayan 		PR1("New data available");
1717d10e4ef2Snarayan 		/* Queue a task to receive the new data */
1718d10e4ef2Snarayan 		status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd,
1719d10e4ef2Snarayan 		    DDI_SLEEP);
1720d10e4ef2Snarayan 		/* ddi_taskq_dispatch(9f) guarantees success with DDI_SLEEP */
1721d10e4ef2Snarayan 		ASSERT(status == DDI_SUCCESS);
1722d10e4ef2Snarayan 	}
1723d10e4ef2Snarayan 
1724d10e4ef2Snarayan 	return (LDC_SUCCESS);
17251ae08745Sheppo }
17261ae08745Sheppo 
17271ae08745Sheppo static uint_t
17281ae08745Sheppo vds_check_for_vd(mod_hash_key_t key, mod_hash_val_t *val, void *arg)
17291ae08745Sheppo {
17301ae08745Sheppo 	_NOTE(ARGUNUSED(key, val))
17311ae08745Sheppo 	(*((uint_t *)arg))++;
17321ae08745Sheppo 	return (MH_WALK_TERMINATE);
17331ae08745Sheppo }
17341ae08745Sheppo 
17351ae08745Sheppo 
17361ae08745Sheppo static int
17371ae08745Sheppo vds_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
17381ae08745Sheppo {
17391ae08745Sheppo 	uint_t	vd_present = 0;
17401ae08745Sheppo 	minor_t	instance;
17411ae08745Sheppo 	vds_t	*vds;
17421ae08745Sheppo 
17431ae08745Sheppo 
17441ae08745Sheppo 	switch (cmd) {
17451ae08745Sheppo 	case DDI_DETACH:
17461ae08745Sheppo 		/* the real work happens below */
17471ae08745Sheppo 		break;
17481ae08745Sheppo 	case DDI_SUSPEND:
1749d10e4ef2Snarayan 		PR0("No action required for DDI_SUSPEND");
17501ae08745Sheppo 		return (DDI_SUCCESS);
17511ae08745Sheppo 	default:
1752d10e4ef2Snarayan 		PRN("Unrecognized \"cmd\"");
17531ae08745Sheppo 		return (DDI_FAILURE);
17541ae08745Sheppo 	}
17551ae08745Sheppo 
17561ae08745Sheppo 	ASSERT(cmd == DDI_DETACH);
17571ae08745Sheppo 	instance = ddi_get_instance(dip);
17581ae08745Sheppo 	if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) {
17591ae08745Sheppo 		PRN("Could not get state for instance %u", instance);
17601ae08745Sheppo 		ddi_soft_state_free(vds_state, instance);
17611ae08745Sheppo 		return (DDI_FAILURE);
17621ae08745Sheppo 	}
17631ae08745Sheppo 
17641ae08745Sheppo 	/* Do no detach when serving any vdisks */
17651ae08745Sheppo 	mod_hash_walk(vds->vd_table, vds_check_for_vd, &vd_present);
17661ae08745Sheppo 	if (vd_present) {
17671ae08745Sheppo 		PR0("Not detaching because serving vdisks");
17681ae08745Sheppo 		return (DDI_FAILURE);
17691ae08745Sheppo 	}
17701ae08745Sheppo 
17711ae08745Sheppo 	PR0("Detaching");
17721ae08745Sheppo 	if (vds->initialized & VDS_MDEG)
17731ae08745Sheppo 		(void) mdeg_unregister(vds->mdeg);
17741ae08745Sheppo 	if (vds->initialized & VDS_LDI)
17751ae08745Sheppo 		(void) ldi_ident_release(vds->ldi_ident);
17761ae08745Sheppo 	mod_hash_destroy_hash(vds->vd_table);
17771ae08745Sheppo 	ddi_soft_state_free(vds_state, instance);
17781ae08745Sheppo 	return (DDI_SUCCESS);
17791ae08745Sheppo }
17801ae08745Sheppo 
17811ae08745Sheppo static boolean_t
17821ae08745Sheppo is_pseudo_device(dev_info_t *dip)
17831ae08745Sheppo {
17841ae08745Sheppo 	dev_info_t	*parent, *root = ddi_root_node();
17851ae08745Sheppo 
17861ae08745Sheppo 
17871ae08745Sheppo 	for (parent = ddi_get_parent(dip); (parent != NULL) && (parent != root);
17881ae08745Sheppo 	    parent = ddi_get_parent(parent)) {
17891ae08745Sheppo 		if (strcmp(ddi_get_name(parent), DEVI_PSEUDO_NEXNAME) == 0)
17901ae08745Sheppo 			return (B_TRUE);
17911ae08745Sheppo 	}
17921ae08745Sheppo 
17931ae08745Sheppo 	return (B_FALSE);
17941ae08745Sheppo }
17951ae08745Sheppo 
17961ae08745Sheppo static int
17970a55fbb7Slm66018 vd_setup_full_disk(vd_t *vd)
17980a55fbb7Slm66018 {
17990a55fbb7Slm66018 	int		rval, status;
18000a55fbb7Slm66018 	major_t		major = getmajor(vd->dev[0]);
18010a55fbb7Slm66018 	minor_t		minor = getminor(vd->dev[0]) - VD_ENTIRE_DISK_SLICE;
18020a55fbb7Slm66018 	struct vtoc	vtoc;
18030a55fbb7Slm66018 
18040a55fbb7Slm66018 
18050a55fbb7Slm66018 	/* Get the VTOC for slice sizes */
18060a55fbb7Slm66018 	if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCGVTOC, (intptr_t)&vtoc,
1807d10e4ef2Snarayan 		    (vd_open_flags | FKIOCTL), kcred, &rval)) != 0) {
18080a55fbb7Slm66018 		PRN("ldi_ioctl(DKIOCGVTOC) returned errno %d", status);
18090a55fbb7Slm66018 		return (status);
18100a55fbb7Slm66018 	}
18110a55fbb7Slm66018 
18120a55fbb7Slm66018 	/* Set full-disk parameters */
18130a55fbb7Slm66018 	vd->vdisk_type	= VD_DISK_TYPE_DISK;
18140a55fbb7Slm66018 	vd->nslices	= (sizeof (vd->dev))/(sizeof (vd->dev[0]));
18150a55fbb7Slm66018 
18160a55fbb7Slm66018 	/* Move dev number and LDI handle to entire-disk-slice array elements */
18170a55fbb7Slm66018 	vd->dev[VD_ENTIRE_DISK_SLICE]		= vd->dev[0];
18180a55fbb7Slm66018 	vd->dev[0]				= 0;
18190a55fbb7Slm66018 	vd->ldi_handle[VD_ENTIRE_DISK_SLICE]	= vd->ldi_handle[0];
18200a55fbb7Slm66018 	vd->ldi_handle[0]			= NULL;
18210a55fbb7Slm66018 
18220a55fbb7Slm66018 	/* Initialize device numbers for remaining slices and open them */
18230a55fbb7Slm66018 	for (int slice = 0; slice < vd->nslices; slice++) {
18240a55fbb7Slm66018 		/*
18250a55fbb7Slm66018 		 * Skip the entire-disk slice, as it's already open and its
18260a55fbb7Slm66018 		 * device known
18270a55fbb7Slm66018 		 */
18280a55fbb7Slm66018 		if (slice == VD_ENTIRE_DISK_SLICE)
18290a55fbb7Slm66018 			continue;
18300a55fbb7Slm66018 		ASSERT(vd->dev[slice] == 0);
18310a55fbb7Slm66018 		ASSERT(vd->ldi_handle[slice] == NULL);
18320a55fbb7Slm66018 
18330a55fbb7Slm66018 		/*
18340a55fbb7Slm66018 		 * Construct the device number for the current slice
18350a55fbb7Slm66018 		 */
18360a55fbb7Slm66018 		vd->dev[slice] = makedevice(major, (minor + slice));
18370a55fbb7Slm66018 
18380a55fbb7Slm66018 		/*
18390a55fbb7Slm66018 		 * At least some underlying drivers refuse to open
18400a55fbb7Slm66018 		 * devices for (currently) zero-length slices, so skip
18410a55fbb7Slm66018 		 * them for now
18420a55fbb7Slm66018 		 */
18430a55fbb7Slm66018 		if (vtoc.v_part[slice].p_size == 0) {
18440a55fbb7Slm66018 			PR0("Skipping zero-length slice %u", slice);
18450a55fbb7Slm66018 			continue;
18460a55fbb7Slm66018 		}
18470a55fbb7Slm66018 
18480a55fbb7Slm66018 		/*
18490a55fbb7Slm66018 		 * Open all non-empty slices of the disk to serve them to the
18500a55fbb7Slm66018 		 * client.  Slices are opened exclusively to prevent other
18510a55fbb7Slm66018 		 * threads or processes in the service domain from performing
18520a55fbb7Slm66018 		 * I/O to slices being accessed by a client.  Failure to open
18530a55fbb7Slm66018 		 * a slice results in vds not serving this disk, as the client
18540a55fbb7Slm66018 		 * could attempt (and should be able) to access any non-empty
18550a55fbb7Slm66018 		 * slice immediately.  Any slices successfully opened before a
18560a55fbb7Slm66018 		 * failure will get closed by vds_destroy_vd() as a result of
18570a55fbb7Slm66018 		 * the error returned by this function.
18580a55fbb7Slm66018 		 */
18590a55fbb7Slm66018 		PR0("Opening device major %u, minor %u = slice %u",
18600a55fbb7Slm66018 		    major, minor, slice);
18610a55fbb7Slm66018 		if ((status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK,
18620a55fbb7Slm66018 			    vd_open_flags, kcred, &vd->ldi_handle[slice],
18630a55fbb7Slm66018 			    vd->vds->ldi_ident)) != 0) {
18640a55fbb7Slm66018 			PRN("ldi_open_by_dev() returned errno %d "
18650a55fbb7Slm66018 			    "for slice %u", status, slice);
18660a55fbb7Slm66018 			/* vds_destroy_vd() will close any open slices */
18670a55fbb7Slm66018 			return (status);
18680a55fbb7Slm66018 		}
18690a55fbb7Slm66018 	}
18700a55fbb7Slm66018 
18710a55fbb7Slm66018 	return (0);
18720a55fbb7Slm66018 }
18730a55fbb7Slm66018 
18740a55fbb7Slm66018 static int
1875*e1ebb9ecSlm66018 vd_setup_vd(char *device_path, vd_t *vd)
18761ae08745Sheppo {
1877*e1ebb9ecSlm66018 	int		rval, status;
18781ae08745Sheppo 	dev_info_t	*dip;
18791ae08745Sheppo 	struct dk_cinfo	dk_cinfo;
18801ae08745Sheppo 
18811ae08745Sheppo 
1882*e1ebb9ecSlm66018 	if ((status = ldi_open_by_name(device_path, vd_open_flags, kcred,
18830a55fbb7Slm66018 		    &vd->ldi_handle[0], vd->vds->ldi_ident)) != 0) {
1884*e1ebb9ecSlm66018 		PRN("ldi_open_by_name(%s) = errno %d", device_path, status);
18850a55fbb7Slm66018 		return (status);
18860a55fbb7Slm66018 	}
18870a55fbb7Slm66018 
1888*e1ebb9ecSlm66018 	/* Get device number and size of backing device */
18890a55fbb7Slm66018 	if ((status = ldi_get_dev(vd->ldi_handle[0], &vd->dev[0])) != 0) {
18901ae08745Sheppo 		PRN("ldi_get_dev() returned errno %d for %s",
1891*e1ebb9ecSlm66018 		    status, device_path);
18921ae08745Sheppo 		return (status);
18931ae08745Sheppo 	}
18940a55fbb7Slm66018 	if (ldi_get_size(vd->ldi_handle[0], &vd->vdisk_size) != DDI_SUCCESS) {
1895*e1ebb9ecSlm66018 		PRN("ldi_get_size() failed for %s", device_path);
18961ae08745Sheppo 		return (EIO);
18971ae08745Sheppo 	}
1898*e1ebb9ecSlm66018 	vd->vdisk_size = lbtodb(vd->vdisk_size);	/* convert to blocks */
18991ae08745Sheppo 
1900*e1ebb9ecSlm66018 	/* Verify backing device supports dk_cinfo, dk_geom, and vtoc */
1901*e1ebb9ecSlm66018 	if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCINFO,
1902*e1ebb9ecSlm66018 		    (intptr_t)&dk_cinfo, (vd_open_flags | FKIOCTL), kcred,
1903*e1ebb9ecSlm66018 		    &rval)) != 0) {
1904*e1ebb9ecSlm66018 		PRN("ldi_ioctl(DKIOCINFO) returned errno %d for %s",
1905*e1ebb9ecSlm66018 		    status, device_path);
1906*e1ebb9ecSlm66018 		return (status);
1907*e1ebb9ecSlm66018 	}
1908*e1ebb9ecSlm66018 	if (dk_cinfo.dki_partition >= V_NUMPAR) {
1909*e1ebb9ecSlm66018 		PRN("slice %u >= maximum slice %u for %s",
1910*e1ebb9ecSlm66018 		    dk_cinfo.dki_partition, V_NUMPAR, device_path);
1911*e1ebb9ecSlm66018 		return (EIO);
1912*e1ebb9ecSlm66018 	}
1913*e1ebb9ecSlm66018 	if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCGGEOM,
1914*e1ebb9ecSlm66018 		    (intptr_t)&vd->dk_geom, (vd_open_flags | FKIOCTL), kcred,
1915*e1ebb9ecSlm66018 		    &rval)) != 0) {
1916*e1ebb9ecSlm66018 		PRN("ldi_ioctl(DKIOCGEOM) returned errno %d for %s",
1917*e1ebb9ecSlm66018 		    status, device_path);
1918*e1ebb9ecSlm66018 		return (status);
1919*e1ebb9ecSlm66018 	}
1920*e1ebb9ecSlm66018 	if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCGVTOC,
1921*e1ebb9ecSlm66018 		    (intptr_t)&vd->vtoc, (vd_open_flags | FKIOCTL), kcred,
1922*e1ebb9ecSlm66018 		    &rval)) != 0) {
1923*e1ebb9ecSlm66018 		PRN("ldi_ioctl(DKIOCGVTOC) returned errno %d for %s",
1924*e1ebb9ecSlm66018 		    status, device_path);
1925*e1ebb9ecSlm66018 		return (status);
1926*e1ebb9ecSlm66018 	}
1927*e1ebb9ecSlm66018 
1928*e1ebb9ecSlm66018 	/* Store the device's max transfer size for return to the client */
1929*e1ebb9ecSlm66018 	vd->max_xfer_sz = dk_cinfo.dki_maxtransfer;
1930*e1ebb9ecSlm66018 
1931*e1ebb9ecSlm66018 
1932*e1ebb9ecSlm66018 	/* Determine if backing device is a pseudo device */
19331ae08745Sheppo 	if ((dip = ddi_hold_devi_by_instance(getmajor(vd->dev[0]),
19341ae08745Sheppo 		    dev_to_instance(vd->dev[0]), 0))  == NULL) {
1935*e1ebb9ecSlm66018 		PRN("%s is no longer accessible", device_path);
19361ae08745Sheppo 		return (EIO);
19371ae08745Sheppo 	}
19381ae08745Sheppo 	vd->pseudo = is_pseudo_device(dip);
19391ae08745Sheppo 	ddi_release_devi(dip);
19401ae08745Sheppo 	if (vd->pseudo) {
19411ae08745Sheppo 		vd->vdisk_type	= VD_DISK_TYPE_SLICE;
19421ae08745Sheppo 		vd->nslices	= 1;
19431ae08745Sheppo 		return (0);	/* ...and we're done */
19441ae08745Sheppo 	}
19451ae08745Sheppo 
19461ae08745Sheppo 
19470a55fbb7Slm66018 	/* If slice is entire-disk slice, initialize for full disk */
19480a55fbb7Slm66018 	if (dk_cinfo.dki_partition == VD_ENTIRE_DISK_SLICE)
19490a55fbb7Slm66018 		return (vd_setup_full_disk(vd));
19501ae08745Sheppo 
19510a55fbb7Slm66018 
1952*e1ebb9ecSlm66018 	/* Otherwise, we have a non-entire slice of a device */
19531ae08745Sheppo 	vd->vdisk_type	= VD_DISK_TYPE_SLICE;
19541ae08745Sheppo 	vd->nslices	= 1;
19551ae08745Sheppo 
19561ae08745Sheppo 
1957*e1ebb9ecSlm66018 	/* Initialize dk_geom structure for single-slice device */
19581ae08745Sheppo 	if (vd->dk_geom.dkg_nsect == 0) {
1959*e1ebb9ecSlm66018 		PRN("%s geometry claims 0 sectors per track", device_path);
19601ae08745Sheppo 		return (EIO);
19611ae08745Sheppo 	}
19621ae08745Sheppo 	if (vd->dk_geom.dkg_nhead == 0) {
1963*e1ebb9ecSlm66018 		PRN("%s geometry claims 0 heads", device_path);
19641ae08745Sheppo 		return (EIO);
19651ae08745Sheppo 	}
19661ae08745Sheppo 	vd->dk_geom.dkg_ncyl =
1967*e1ebb9ecSlm66018 	    vd->vdisk_size/vd->dk_geom.dkg_nsect/vd->dk_geom.dkg_nhead;
19681ae08745Sheppo 	vd->dk_geom.dkg_acyl = 0;
19691ae08745Sheppo 	vd->dk_geom.dkg_pcyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl;
19701ae08745Sheppo 
19711ae08745Sheppo 
1972*e1ebb9ecSlm66018 	/* Initialize vtoc structure for single-slice device */
19731ae08745Sheppo 	bcopy(VD_VOLUME_NAME, vd->vtoc.v_volume,
19741ae08745Sheppo 	    MIN(sizeof (VD_VOLUME_NAME), sizeof (vd->vtoc.v_volume)));
19751ae08745Sheppo 	bzero(vd->vtoc.v_part, sizeof (vd->vtoc.v_part));
19761ae08745Sheppo 	vd->vtoc.v_nparts = 1;
19771ae08745Sheppo 	vd->vtoc.v_part[0].p_tag = V_UNASSIGNED;
19781ae08745Sheppo 	vd->vtoc.v_part[0].p_flag = 0;
19791ae08745Sheppo 	vd->vtoc.v_part[0].p_start = 0;
1980*e1ebb9ecSlm66018 	vd->vtoc.v_part[0].p_size = vd->vdisk_size;
19811ae08745Sheppo 	bcopy(VD_ASCIILABEL, vd->vtoc.v_asciilabel,
19821ae08745Sheppo 	    MIN(sizeof (VD_ASCIILABEL), sizeof (vd->vtoc.v_asciilabel)));
19831ae08745Sheppo 
19841ae08745Sheppo 
19851ae08745Sheppo 	return (0);
19861ae08745Sheppo }
19871ae08745Sheppo 
19881ae08745Sheppo static int
1989*e1ebb9ecSlm66018 vds_do_init_vd(vds_t *vds, uint64_t id, char *device_path, uint64_t ldc_id,
19901ae08745Sheppo     vd_t **vdp)
19911ae08745Sheppo {
19921ae08745Sheppo 	char			tq_name[TASKQ_NAMELEN];
19930a55fbb7Slm66018 	int			status;
19941ae08745Sheppo 	ddi_iblock_cookie_t	iblock = NULL;
19951ae08745Sheppo 	ldc_attr_t		ldc_attr;
19961ae08745Sheppo 	vd_t			*vd;
19971ae08745Sheppo 
19981ae08745Sheppo 
19991ae08745Sheppo 	ASSERT(vds != NULL);
2000*e1ebb9ecSlm66018 	ASSERT(device_path != NULL);
20011ae08745Sheppo 	ASSERT(vdp != NULL);
2002*e1ebb9ecSlm66018 	PR0("Adding vdisk for %s", device_path);
20031ae08745Sheppo 
20041ae08745Sheppo 	if ((vd = kmem_zalloc(sizeof (*vd), KM_NOSLEEP)) == NULL) {
20051ae08745Sheppo 		PRN("No memory for virtual disk");
20061ae08745Sheppo 		return (EAGAIN);
20071ae08745Sheppo 	}
20081ae08745Sheppo 	*vdp = vd;	/* assign here so vds_destroy_vd() can cleanup later */
20091ae08745Sheppo 	vd->vds = vds;
20101ae08745Sheppo 
20111ae08745Sheppo 
20120a55fbb7Slm66018 	/* Open vdisk and initialize parameters */
2013*e1ebb9ecSlm66018 	if ((status = vd_setup_vd(device_path, vd)) != 0)
20141ae08745Sheppo 		return (status);
20151ae08745Sheppo 	ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR);
20161ae08745Sheppo 	PR0("vdisk_type = %s, pseudo = %s, nslices = %u",
20171ae08745Sheppo 	    ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"),
20181ae08745Sheppo 	    (vd->pseudo ? "yes" : "no"), vd->nslices);
20191ae08745Sheppo 
20201ae08745Sheppo 
20211ae08745Sheppo 	/* Initialize locking */
20221ae08745Sheppo 	if (ddi_get_soft_iblock_cookie(vds->dip, DDI_SOFTINT_MED,
20231ae08745Sheppo 		&iblock) != DDI_SUCCESS) {
20241ae08745Sheppo 		PRN("Could not get iblock cookie.");
20251ae08745Sheppo 		return (EIO);
20261ae08745Sheppo 	}
20271ae08745Sheppo 
20281ae08745Sheppo 	mutex_init(&vd->lock, NULL, MUTEX_DRIVER, iblock);
20291ae08745Sheppo 	vd->initialized |= VD_LOCKING;
20301ae08745Sheppo 
20311ae08745Sheppo 
2032d10e4ef2Snarayan 	/* Create start and completion task queues for the vdisk */
2033d10e4ef2Snarayan 	(void) snprintf(tq_name, sizeof (tq_name), "vd_startq%lu", id);
20341ae08745Sheppo 	PR1("tq_name = %s", tq_name);
2035d10e4ef2Snarayan 	if ((vd->startq = ddi_taskq_create(vds->dip, tq_name, 1,
20361ae08745Sheppo 		    TASKQ_DEFAULTPRI, 0)) == NULL) {
20371ae08745Sheppo 		PRN("Could not create task queue");
20381ae08745Sheppo 		return (EIO);
20391ae08745Sheppo 	}
2040d10e4ef2Snarayan 	(void) snprintf(tq_name, sizeof (tq_name), "vd_completionq%lu", id);
2041d10e4ef2Snarayan 	PR1("tq_name = %s", tq_name);
2042d10e4ef2Snarayan 	if ((vd->completionq = ddi_taskq_create(vds->dip, tq_name, 1,
2043d10e4ef2Snarayan 		    TASKQ_DEFAULTPRI, 0)) == NULL) {
2044d10e4ef2Snarayan 		PRN("Could not create task queue");
2045d10e4ef2Snarayan 		return (EIO);
2046d10e4ef2Snarayan 	}
2047d10e4ef2Snarayan 	vd->enabled = 1;	/* before callback can dispatch to startq */
20481ae08745Sheppo 
20491ae08745Sheppo 
20501ae08745Sheppo 	/* Bring up LDC */
20511ae08745Sheppo 	ldc_attr.devclass	= LDC_DEV_BLK_SVC;
20521ae08745Sheppo 	ldc_attr.instance	= ddi_get_instance(vds->dip);
20531ae08745Sheppo 	ldc_attr.mode		= LDC_MODE_UNRELIABLE;
2054*e1ebb9ecSlm66018 	ldc_attr.mtu		= VD_LDC_MTU;
20551ae08745Sheppo 	if ((status = ldc_init(ldc_id, &ldc_attr, &vd->ldc_handle)) != 0) {
20561ae08745Sheppo 		PRN("ldc_init(%lu) = errno %d", ldc_id, status);
20571ae08745Sheppo 		return (status);
20581ae08745Sheppo 	}
20591ae08745Sheppo 	vd->initialized |= VD_LDC;
20601ae08745Sheppo 
20611ae08745Sheppo 	if ((status = ldc_reg_callback(vd->ldc_handle, vd_handle_ldc_events,
20621ae08745Sheppo 		(caddr_t)vd)) != 0) {
20631ae08745Sheppo 		PRN("ldc_reg_callback() returned errno %d", status);
20641ae08745Sheppo 		return (status);
20651ae08745Sheppo 	}
20661ae08745Sheppo 
20671ae08745Sheppo 	if ((status = ldc_open(vd->ldc_handle)) != 0) {
20681ae08745Sheppo 		PRN("ldc_open() returned errno %d", status);
20691ae08745Sheppo 		return (status);
20701ae08745Sheppo 	}
20711ae08745Sheppo 
20721ae08745Sheppo 
20731ae08745Sheppo 	/* Add the successfully-initialized vdisk to the server's table */
20741ae08745Sheppo 	if (mod_hash_insert(vds->vd_table, (mod_hash_key_t)id, vd) != 0) {
20751ae08745Sheppo 		PRN("Error adding vdisk ID %lu to table", id);
20761ae08745Sheppo 		return (EIO);
20771ae08745Sheppo 	}
20781ae08745Sheppo 
20791ae08745Sheppo 	return (0);
20801ae08745Sheppo }
20811ae08745Sheppo 
20821ae08745Sheppo /*
20831ae08745Sheppo  * Destroy the state associated with a virtual disk
20841ae08745Sheppo  */
20851ae08745Sheppo static void
20861ae08745Sheppo vds_destroy_vd(void *arg)
20871ae08745Sheppo {
20881ae08745Sheppo 	vd_t	*vd = (vd_t *)arg;
20891ae08745Sheppo 
20901ae08745Sheppo 
20911ae08745Sheppo 	if (vd == NULL)
20921ae08745Sheppo 		return;
20931ae08745Sheppo 
2094d10e4ef2Snarayan 	PR0("Destroying vdisk state");
2095d10e4ef2Snarayan 
20961ae08745Sheppo 	/* Disable queuing requests for the vdisk */
20971ae08745Sheppo 	if (vd->initialized & VD_LOCKING) {
20981ae08745Sheppo 		mutex_enter(&vd->lock);
20991ae08745Sheppo 		vd->enabled = 0;
21001ae08745Sheppo 		mutex_exit(&vd->lock);
21011ae08745Sheppo 	}
21021ae08745Sheppo 
2103d10e4ef2Snarayan 	/* Drain and destroy start queue (*before* destroying completionq) */
2104d10e4ef2Snarayan 	if (vd->startq != NULL)
2105d10e4ef2Snarayan 		ddi_taskq_destroy(vd->startq);	/* waits for queued tasks */
2106d10e4ef2Snarayan 
2107d10e4ef2Snarayan 	/* Drain and destroy completion queue (*before* shutting down LDC) */
2108d10e4ef2Snarayan 	if (vd->completionq != NULL)
2109d10e4ef2Snarayan 		ddi_taskq_destroy(vd->completionq);	/* waits for tasks */
2110d10e4ef2Snarayan 
2111d10e4ef2Snarayan 	if (vd->dring_task != NULL) {
2112d10e4ef2Snarayan 		ASSERT(vd->dring_len != 0);
2113d10e4ef2Snarayan 		kmem_free(vd->dring_task,
2114d10e4ef2Snarayan 		    (sizeof (*vd->dring_task)) * vd->dring_len);
2115d10e4ef2Snarayan 	}
21161ae08745Sheppo 
21171ae08745Sheppo 	/* Shut down LDC */
21181ae08745Sheppo 	if (vd->initialized & VD_LDC) {
21191ae08745Sheppo 		if (vd->initialized & VD_DRING)
21201ae08745Sheppo 			(void) ldc_mem_dring_unmap(vd->dring_handle);
21211ae08745Sheppo 		(void) ldc_unreg_callback(vd->ldc_handle);
21221ae08745Sheppo 		(void) ldc_close(vd->ldc_handle);
21231ae08745Sheppo 		(void) ldc_fini(vd->ldc_handle);
21241ae08745Sheppo 	}
21251ae08745Sheppo 
21261ae08745Sheppo 	/* Close any open backing-device slices */
21271ae08745Sheppo 	for (uint_t slice = 0; slice < vd->nslices; slice++) {
21281ae08745Sheppo 		if (vd->ldi_handle[slice] != NULL) {
21291ae08745Sheppo 			PR0("Closing slice %u", slice);
21301ae08745Sheppo 			(void) ldi_close(vd->ldi_handle[slice],
21311ae08745Sheppo 			    vd_open_flags, kcred);
21321ae08745Sheppo 		}
21331ae08745Sheppo 	}
21341ae08745Sheppo 
21351ae08745Sheppo 	/* Free lock */
21361ae08745Sheppo 	if (vd->initialized & VD_LOCKING)
21371ae08745Sheppo 		mutex_destroy(&vd->lock);
21381ae08745Sheppo 
21391ae08745Sheppo 	/* Finally, free the vdisk structure itself */
21401ae08745Sheppo 	kmem_free(vd, sizeof (*vd));
21411ae08745Sheppo }
21421ae08745Sheppo 
21431ae08745Sheppo static int
2144*e1ebb9ecSlm66018 vds_init_vd(vds_t *vds, uint64_t id, char *device_path, uint64_t ldc_id)
21451ae08745Sheppo {
21461ae08745Sheppo 	int	status;
21471ae08745Sheppo 	vd_t	*vd = NULL;
21481ae08745Sheppo 
21491ae08745Sheppo 
21501ae08745Sheppo #ifdef lint
21511ae08745Sheppo 	(void) vd;
21521ae08745Sheppo #endif	/* lint */
21531ae08745Sheppo 
2154*e1ebb9ecSlm66018 	if ((status = vds_do_init_vd(vds, id, device_path, ldc_id, &vd)) != 0)
21551ae08745Sheppo 		vds_destroy_vd(vd);
21561ae08745Sheppo 
21571ae08745Sheppo 	return (status);
21581ae08745Sheppo }
21591ae08745Sheppo 
21601ae08745Sheppo static int
21611ae08745Sheppo vds_do_get_ldc_id(md_t *md, mde_cookie_t vd_node, mde_cookie_t *channel,
21621ae08745Sheppo     uint64_t *ldc_id)
21631ae08745Sheppo {
21641ae08745Sheppo 	int	num_channels;
21651ae08745Sheppo 
21661ae08745Sheppo 
21671ae08745Sheppo 	/* Look for channel endpoint child(ren) of the vdisk MD node */
21681ae08745Sheppo 	if ((num_channels = md_scan_dag(md, vd_node,
21691ae08745Sheppo 		    md_find_name(md, VD_CHANNEL_ENDPOINT),
21701ae08745Sheppo 		    md_find_name(md, "fwd"), channel)) <= 0) {
21711ae08745Sheppo 		PRN("No \"%s\" found for virtual disk", VD_CHANNEL_ENDPOINT);
21721ae08745Sheppo 		return (-1);
21731ae08745Sheppo 	}
21741ae08745Sheppo 
21751ae08745Sheppo 	/* Get the "id" value for the first channel endpoint node */
21761ae08745Sheppo 	if (md_get_prop_val(md, channel[0], VD_ID_PROP, ldc_id) != 0) {
21771ae08745Sheppo 		PRN("No \"%s\" property found for \"%s\" of vdisk",
21781ae08745Sheppo 		    VD_ID_PROP, VD_CHANNEL_ENDPOINT);
21791ae08745Sheppo 		return (-1);
21801ae08745Sheppo 	}
21811ae08745Sheppo 
21821ae08745Sheppo 	if (num_channels > 1) {
21831ae08745Sheppo 		PRN("Using ID of first of multiple channels for this vdisk");
21841ae08745Sheppo 	}
21851ae08745Sheppo 
21861ae08745Sheppo 	return (0);
21871ae08745Sheppo }
21881ae08745Sheppo 
21891ae08745Sheppo static int
21901ae08745Sheppo vds_get_ldc_id(md_t *md, mde_cookie_t vd_node, uint64_t *ldc_id)
21911ae08745Sheppo {
21921ae08745Sheppo 	int		num_nodes, status;
21931ae08745Sheppo 	size_t		size;
21941ae08745Sheppo 	mde_cookie_t	*channel;
21951ae08745Sheppo 
21961ae08745Sheppo 
21971ae08745Sheppo 	if ((num_nodes = md_node_count(md)) <= 0) {
21981ae08745Sheppo 		PRN("Invalid node count in Machine Description subtree");
21991ae08745Sheppo 		return (-1);
22001ae08745Sheppo 	}
22011ae08745Sheppo 	size = num_nodes*(sizeof (*channel));
22021ae08745Sheppo 	channel = kmem_zalloc(size, KM_SLEEP);
22031ae08745Sheppo 	status = vds_do_get_ldc_id(md, vd_node, channel, ldc_id);
22041ae08745Sheppo 	kmem_free(channel, size);
22051ae08745Sheppo 
22061ae08745Sheppo 	return (status);
22071ae08745Sheppo }
22081ae08745Sheppo 
22091ae08745Sheppo static void
22101ae08745Sheppo vds_add_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node)
22111ae08745Sheppo {
2212*e1ebb9ecSlm66018 	char		*device_path = NULL;
22131ae08745Sheppo 	uint64_t	id = 0, ldc_id = 0;
22141ae08745Sheppo 
22151ae08745Sheppo 
22161ae08745Sheppo 	if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) {
22171ae08745Sheppo 		PRN("Error getting vdisk \"%s\"", VD_ID_PROP);
22181ae08745Sheppo 		return;
22191ae08745Sheppo 	}
22201ae08745Sheppo 	PR0("Adding vdisk ID %lu", id);
22211ae08745Sheppo 	if (md_get_prop_str(md, vd_node, VD_BLOCK_DEVICE_PROP,
2222*e1ebb9ecSlm66018 		&device_path) != 0) {
22231ae08745Sheppo 		PRN("Error getting vdisk \"%s\"", VD_BLOCK_DEVICE_PROP);
22241ae08745Sheppo 		return;
22251ae08745Sheppo 	}
22261ae08745Sheppo 
22271ae08745Sheppo 	if (vds_get_ldc_id(md, vd_node, &ldc_id) != 0) {
22281ae08745Sheppo 		PRN("Error getting LDC ID for vdisk %lu", id);
22291ae08745Sheppo 		return;
22301ae08745Sheppo 	}
22311ae08745Sheppo 
2232*e1ebb9ecSlm66018 	if (vds_init_vd(vds, id, device_path, ldc_id) != 0) {
22331ae08745Sheppo 		PRN("Failed to add vdisk ID %lu", id);
22341ae08745Sheppo 		return;
22351ae08745Sheppo 	}
22361ae08745Sheppo }
22371ae08745Sheppo 
22381ae08745Sheppo static void
22391ae08745Sheppo vds_remove_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node)
22401ae08745Sheppo {
22411ae08745Sheppo 	uint64_t	id = 0;
22421ae08745Sheppo 
22431ae08745Sheppo 
22441ae08745Sheppo 	if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) {
22451ae08745Sheppo 		PRN("Unable to get \"%s\" property from vdisk's MD node",
22461ae08745Sheppo 		    VD_ID_PROP);
22471ae08745Sheppo 		return;
22481ae08745Sheppo 	}
22491ae08745Sheppo 	PR0("Removing vdisk ID %lu", id);
22501ae08745Sheppo 	if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)id) != 0)
22511ae08745Sheppo 		PRN("No vdisk entry found for vdisk ID %lu", id);
22521ae08745Sheppo }
22531ae08745Sheppo 
22541ae08745Sheppo static void
22551ae08745Sheppo vds_change_vd(vds_t *vds, md_t *prev_md, mde_cookie_t prev_vd_node,
22561ae08745Sheppo     md_t *curr_md, mde_cookie_t curr_vd_node)
22571ae08745Sheppo {
22581ae08745Sheppo 	char		*curr_dev, *prev_dev;
22591ae08745Sheppo 	uint64_t	curr_id = 0, curr_ldc_id = 0;
22601ae08745Sheppo 	uint64_t	prev_id = 0, prev_ldc_id = 0;
22611ae08745Sheppo 	size_t		len;
22621ae08745Sheppo 
22631ae08745Sheppo 
22641ae08745Sheppo 	/* Validate that vdisk ID has not changed */
22651ae08745Sheppo 	if (md_get_prop_val(prev_md, prev_vd_node, VD_ID_PROP, &prev_id) != 0) {
22661ae08745Sheppo 		PRN("Error getting previous vdisk \"%s\" property",
22671ae08745Sheppo 		    VD_ID_PROP);
22681ae08745Sheppo 		return;
22691ae08745Sheppo 	}
22701ae08745Sheppo 	if (md_get_prop_val(curr_md, curr_vd_node, VD_ID_PROP, &curr_id) != 0) {
22711ae08745Sheppo 		PRN("Error getting current vdisk \"%s\" property", VD_ID_PROP);
22721ae08745Sheppo 		return;
22731ae08745Sheppo 	}
22741ae08745Sheppo 	if (curr_id != prev_id) {
22751ae08745Sheppo 		PRN("Not changing vdisk:  ID changed from %lu to %lu",
22761ae08745Sheppo 		    prev_id, curr_id);
22771ae08745Sheppo 		return;
22781ae08745Sheppo 	}
22791ae08745Sheppo 
22801ae08745Sheppo 	/* Validate that LDC ID has not changed */
22811ae08745Sheppo 	if (vds_get_ldc_id(prev_md, prev_vd_node, &prev_ldc_id) != 0) {
22821ae08745Sheppo 		PRN("Error getting LDC ID for vdisk %lu", prev_id);
22831ae08745Sheppo 		return;
22841ae08745Sheppo 	}
22851ae08745Sheppo 
22861ae08745Sheppo 	if (vds_get_ldc_id(curr_md, curr_vd_node, &curr_ldc_id) != 0) {
22871ae08745Sheppo 		PRN("Error getting LDC ID for vdisk %lu", curr_id);
22881ae08745Sheppo 		return;
22891ae08745Sheppo 	}
22901ae08745Sheppo 	if (curr_ldc_id != prev_ldc_id) {
22910a55fbb7Slm66018 		_NOTE(NOTREACHED);	/* lint is confused */
22921ae08745Sheppo 		PRN("Not changing vdisk:  "
22931ae08745Sheppo 		    "LDC ID changed from %lu to %lu", prev_ldc_id, curr_ldc_id);
22941ae08745Sheppo 		return;
22951ae08745Sheppo 	}
22961ae08745Sheppo 
22971ae08745Sheppo 	/* Determine whether device path has changed */
22981ae08745Sheppo 	if (md_get_prop_str(prev_md, prev_vd_node, VD_BLOCK_DEVICE_PROP,
22991ae08745Sheppo 		&prev_dev) != 0) {
23001ae08745Sheppo 		PRN("Error getting previous vdisk \"%s\"",
23011ae08745Sheppo 		    VD_BLOCK_DEVICE_PROP);
23021ae08745Sheppo 		return;
23031ae08745Sheppo 	}
23041ae08745Sheppo 	if (md_get_prop_str(curr_md, curr_vd_node, VD_BLOCK_DEVICE_PROP,
23051ae08745Sheppo 		&curr_dev) != 0) {
23061ae08745Sheppo 		PRN("Error getting current vdisk \"%s\"", VD_BLOCK_DEVICE_PROP);
23071ae08745Sheppo 		return;
23081ae08745Sheppo 	}
23091ae08745Sheppo 	if (((len = strlen(curr_dev)) == strlen(prev_dev)) &&
23101ae08745Sheppo 	    (strncmp(curr_dev, prev_dev, len) == 0))
23111ae08745Sheppo 		return;	/* no relevant (supported) change */
23121ae08745Sheppo 
23131ae08745Sheppo 	PR0("Changing vdisk ID %lu", prev_id);
23141ae08745Sheppo 	/* Remove old state, which will close vdisk and reset */
23151ae08745Sheppo 	if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)prev_id) != 0)
23161ae08745Sheppo 		PRN("No entry found for vdisk ID %lu", prev_id);
23171ae08745Sheppo 	/* Re-initialize vdisk with new state */
23181ae08745Sheppo 	if (vds_init_vd(vds, curr_id, curr_dev, curr_ldc_id) != 0) {
23191ae08745Sheppo 		PRN("Failed to change vdisk ID %lu", curr_id);
23201ae08745Sheppo 		return;
23211ae08745Sheppo 	}
23221ae08745Sheppo }
23231ae08745Sheppo 
23241ae08745Sheppo static int
23251ae08745Sheppo vds_process_md(void *arg, mdeg_result_t *md)
23261ae08745Sheppo {
23271ae08745Sheppo 	int	i;
23281ae08745Sheppo 	vds_t	*vds = arg;
23291ae08745Sheppo 
23301ae08745Sheppo 
23311ae08745Sheppo 	if (md == NULL)
23321ae08745Sheppo 		return (MDEG_FAILURE);
23331ae08745Sheppo 	ASSERT(vds != NULL);
23341ae08745Sheppo 
23351ae08745Sheppo 	for (i = 0; i < md->removed.nelem; i++)
23361ae08745Sheppo 		vds_remove_vd(vds, md->removed.mdp, md->removed.mdep[i]);
23371ae08745Sheppo 	for (i = 0; i < md->match_curr.nelem; i++)
23381ae08745Sheppo 		vds_change_vd(vds, md->match_prev.mdp, md->match_prev.mdep[i],
23391ae08745Sheppo 		    md->match_curr.mdp, md->match_curr.mdep[i]);
23401ae08745Sheppo 	for (i = 0; i < md->added.nelem; i++)
23411ae08745Sheppo 		vds_add_vd(vds, md->added.mdp, md->added.mdep[i]);
23421ae08745Sheppo 
23431ae08745Sheppo 	return (MDEG_SUCCESS);
23441ae08745Sheppo }
23451ae08745Sheppo 
23461ae08745Sheppo static int
23471ae08745Sheppo vds_do_attach(dev_info_t *dip)
23481ae08745Sheppo {
23491ae08745Sheppo 	static char	reg_prop[] = "reg";	/* devinfo ID prop */
23501ae08745Sheppo 
23511ae08745Sheppo 	/* MDEG specification for a (particular) vds node */
23521ae08745Sheppo 	static mdeg_prop_spec_t	vds_prop_spec[] = {
23531ae08745Sheppo 		{MDET_PROP_STR, "name", {VDS_NAME}},
23541ae08745Sheppo 		{MDET_PROP_VAL, "cfg-handle", {0}},
23551ae08745Sheppo 		{MDET_LIST_END, NULL, {0}}};
23561ae08745Sheppo 	static mdeg_node_spec_t	vds_spec = {"virtual-device", vds_prop_spec};
23571ae08745Sheppo 
23581ae08745Sheppo 	/* MDEG specification for matching a vd node */
23591ae08745Sheppo 	static md_prop_match_t	vd_prop_spec[] = {
23601ae08745Sheppo 		{MDET_PROP_VAL, VD_ID_PROP},
23611ae08745Sheppo 		{MDET_LIST_END, NULL}};
23621ae08745Sheppo 	static mdeg_node_match_t vd_spec = {"virtual-device-port",
23631ae08745Sheppo 					    vd_prop_spec};
23641ae08745Sheppo 
23651ae08745Sheppo 	int			status;
23661ae08745Sheppo 	uint64_t		cfg_handle;
23671ae08745Sheppo 	minor_t			instance = ddi_get_instance(dip);
23681ae08745Sheppo 	vds_t			*vds;
23691ae08745Sheppo 
23701ae08745Sheppo 
23711ae08745Sheppo 	/*
23721ae08745Sheppo 	 * The "cfg-handle" property of a vds node in an MD contains the MD's
23731ae08745Sheppo 	 * notion of "instance", or unique identifier, for that node; OBP
23741ae08745Sheppo 	 * stores the value of the "cfg-handle" MD property as the value of
23751ae08745Sheppo 	 * the "reg" property on the node in the device tree it builds from
23761ae08745Sheppo 	 * the MD and passes to Solaris.  Thus, we look up the devinfo node's
23771ae08745Sheppo 	 * "reg" property value to uniquely identify this device instance when
23781ae08745Sheppo 	 * registering with the MD event-generation framework.  If the "reg"
23791ae08745Sheppo 	 * property cannot be found, the device tree state is presumably so
23801ae08745Sheppo 	 * broken that there is no point in continuing.
23811ae08745Sheppo 	 */
23821ae08745Sheppo 	if (!ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, reg_prop)) {
23831ae08745Sheppo 		PRN("vds \"%s\" property does not exist", reg_prop);
23841ae08745Sheppo 		return (DDI_FAILURE);
23851ae08745Sheppo 	}
23861ae08745Sheppo 
23871ae08745Sheppo 	/* Get the MD instance for later MDEG registration */
23881ae08745Sheppo 	cfg_handle = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
23891ae08745Sheppo 	    reg_prop, -1);
23901ae08745Sheppo 
23911ae08745Sheppo 	if (ddi_soft_state_zalloc(vds_state, instance) != DDI_SUCCESS) {
23921ae08745Sheppo 		PRN("Could not allocate state for instance %u", instance);
23931ae08745Sheppo 		return (DDI_FAILURE);
23941ae08745Sheppo 	}
23951ae08745Sheppo 
23961ae08745Sheppo 	if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) {
23971ae08745Sheppo 		PRN("Could not get state for instance %u", instance);
23981ae08745Sheppo 		ddi_soft_state_free(vds_state, instance);
23991ae08745Sheppo 		return (DDI_FAILURE);
24001ae08745Sheppo 	}
24011ae08745Sheppo 
24021ae08745Sheppo 
24031ae08745Sheppo 	vds->dip	= dip;
24041ae08745Sheppo 	vds->vd_table	= mod_hash_create_ptrhash("vds_vd_table", VDS_NCHAINS,
24051ae08745Sheppo 							vds_destroy_vd,
24061ae08745Sheppo 							sizeof (void *));
24071ae08745Sheppo 	ASSERT(vds->vd_table != NULL);
24081ae08745Sheppo 
24091ae08745Sheppo 	if ((status = ldi_ident_from_dip(dip, &vds->ldi_ident)) != 0) {
24101ae08745Sheppo 		PRN("ldi_ident_from_dip() returned errno %d", status);
24111ae08745Sheppo 		return (DDI_FAILURE);
24121ae08745Sheppo 	}
24131ae08745Sheppo 	vds->initialized |= VDS_LDI;
24141ae08745Sheppo 
24151ae08745Sheppo 	/* Register for MD updates */
24161ae08745Sheppo 	vds_prop_spec[1].ps_val = cfg_handle;
24171ae08745Sheppo 	if (mdeg_register(&vds_spec, &vd_spec, vds_process_md, vds,
24181ae08745Sheppo 		&vds->mdeg) != MDEG_SUCCESS) {
24191ae08745Sheppo 		PRN("Unable to register for MD updates");
24201ae08745Sheppo 		return (DDI_FAILURE);
24211ae08745Sheppo 	}
24221ae08745Sheppo 	vds->initialized |= VDS_MDEG;
24231ae08745Sheppo 
24240a55fbb7Slm66018 	/* Prevent auto-detaching so driver is available whenever MD changes */
24250a55fbb7Slm66018 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip, DDI_NO_AUTODETACH, 1) !=
24260a55fbb7Slm66018 	    DDI_PROP_SUCCESS) {
24270a55fbb7Slm66018 		PRN("failed to set \"%s\" property for instance %u",
24280a55fbb7Slm66018 		    DDI_NO_AUTODETACH, instance);
24290a55fbb7Slm66018 	}
24300a55fbb7Slm66018 
24311ae08745Sheppo 	ddi_report_dev(dip);
24321ae08745Sheppo 	return (DDI_SUCCESS);
24331ae08745Sheppo }
24341ae08745Sheppo 
24351ae08745Sheppo static int
24361ae08745Sheppo vds_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
24371ae08745Sheppo {
24381ae08745Sheppo 	int	status;
24391ae08745Sheppo 
24401ae08745Sheppo 	switch (cmd) {
24411ae08745Sheppo 	case DDI_ATTACH:
2442d10e4ef2Snarayan 		PR0("Attaching");
24431ae08745Sheppo 		if ((status = vds_do_attach(dip)) != DDI_SUCCESS)
24441ae08745Sheppo 			(void) vds_detach(dip, DDI_DETACH);
24451ae08745Sheppo 		return (status);
24461ae08745Sheppo 	case DDI_RESUME:
2447d10e4ef2Snarayan 		PR0("No action required for DDI_RESUME");
24481ae08745Sheppo 		return (DDI_SUCCESS);
24491ae08745Sheppo 	default:
24501ae08745Sheppo 		return (DDI_FAILURE);
24511ae08745Sheppo 	}
24521ae08745Sheppo }
24531ae08745Sheppo 
24541ae08745Sheppo static struct dev_ops vds_ops = {
24551ae08745Sheppo 	DEVO_REV,	/* devo_rev */
24561ae08745Sheppo 	0,		/* devo_refcnt */
24571ae08745Sheppo 	ddi_no_info,	/* devo_getinfo */
24581ae08745Sheppo 	nulldev,	/* devo_identify */
24591ae08745Sheppo 	nulldev,	/* devo_probe */
24601ae08745Sheppo 	vds_attach,	/* devo_attach */
24611ae08745Sheppo 	vds_detach,	/* devo_detach */
24621ae08745Sheppo 	nodev,		/* devo_reset */
24631ae08745Sheppo 	NULL,		/* devo_cb_ops */
24641ae08745Sheppo 	NULL,		/* devo_bus_ops */
24651ae08745Sheppo 	nulldev		/* devo_power */
24661ae08745Sheppo };
24671ae08745Sheppo 
24681ae08745Sheppo static struct modldrv modldrv = {
24691ae08745Sheppo 	&mod_driverops,
24701ae08745Sheppo 	"virtual disk server v%I%",
24711ae08745Sheppo 	&vds_ops,
24721ae08745Sheppo };
24731ae08745Sheppo 
24741ae08745Sheppo static struct modlinkage modlinkage = {
24751ae08745Sheppo 	MODREV_1,
24761ae08745Sheppo 	&modldrv,
24771ae08745Sheppo 	NULL
24781ae08745Sheppo };
24791ae08745Sheppo 
24801ae08745Sheppo 
24811ae08745Sheppo int
24821ae08745Sheppo _init(void)
24831ae08745Sheppo {
24841ae08745Sheppo 	int		i, status;
24851ae08745Sheppo 
2486d10e4ef2Snarayan 
24871ae08745Sheppo 	if ((status = ddi_soft_state_init(&vds_state, sizeof (vds_t), 1)) != 0)
24881ae08745Sheppo 		return (status);
24891ae08745Sheppo 	if ((status = mod_install(&modlinkage)) != 0) {
24901ae08745Sheppo 		ddi_soft_state_fini(&vds_state);
24911ae08745Sheppo 		return (status);
24921ae08745Sheppo 	}
24931ae08745Sheppo 
24941ae08745Sheppo 	/* Fill in the bit-mask of server-supported operations */
24951ae08745Sheppo 	for (i = 0; i < vds_noperations; i++)
24961ae08745Sheppo 		vds_operations |= 1 << (vds_operation[i].operation - 1);
24971ae08745Sheppo 
24981ae08745Sheppo 	return (0);
24991ae08745Sheppo }
25001ae08745Sheppo 
25011ae08745Sheppo int
25021ae08745Sheppo _info(struct modinfo *modinfop)
25031ae08745Sheppo {
25041ae08745Sheppo 	return (mod_info(&modlinkage, modinfop));
25051ae08745Sheppo }
25061ae08745Sheppo 
25071ae08745Sheppo int
25081ae08745Sheppo _fini(void)
25091ae08745Sheppo {
25101ae08745Sheppo 	int	status;
25111ae08745Sheppo 
2512d10e4ef2Snarayan 
25131ae08745Sheppo 	if ((status = mod_remove(&modlinkage)) != 0)
25141ae08745Sheppo 		return (status);
25151ae08745Sheppo 	ddi_soft_state_fini(&vds_state);
25161ae08745Sheppo 	return (0);
25171ae08745Sheppo }
2518