xref: /illumos-gate/usr/src/uts/sun4v/io/vds.c (revision 98677c366f39bc9e671513615d9b1a2c6f15621d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Virtual disk server
31  */
32 
33 
34 #include <sys/types.h>
35 #include <sys/conf.h>
36 #include <sys/ddi.h>
37 #include <sys/dkio.h>
38 #include <sys/file.h>
39 #include <sys/mdeg.h>
40 #include <sys/modhash.h>
41 #include <sys/note.h>
42 #include <sys/pathname.h>
43 #include <sys/sunddi.h>
44 #include <sys/sunldi.h>
45 #include <sys/sysmacros.h>
46 #include <sys/vio_common.h>
47 #include <sys/vdsk_mailbox.h>
48 #include <sys/vdsk_common.h>
49 #include <sys/vtoc.h>
50 
51 
52 /* Virtual disk server initialization flags */
53 #define	VDS_LDI			0x01
54 #define	VDS_MDEG		0x02
55 
56 /* Virtual disk server tunable parameters */
57 #define	VDS_LDC_RETRIES		3
58 #define	VDS_NCHAINS		32
59 
60 /* Identification parameters for MD, synthetic dkio(7i) structures, etc. */
61 #define	VDS_NAME		"virtual-disk-server"
62 
63 #define	VD_NAME			"vd"
64 #define	VD_VOLUME_NAME		"vdisk"
65 #define	VD_ASCIILABEL		"Virtual Disk"
66 
67 #define	VD_CHANNEL_ENDPOINT	"channel-endpoint"
68 #define	VD_ID_PROP		"id"
69 #define	VD_BLOCK_DEVICE_PROP	"vds-block-device"
70 
71 /* Virtual disk initialization flags */
72 #define	VD_LOCKING		0x01
73 #define	VD_LDC			0x02
74 #define	VD_DRING		0x04
75 #define	VD_SID			0x08
76 #define	VD_SEQ_NUM		0x10
77 
78 /* Flags for opening/closing backing devices via LDI */
79 #define	VD_OPEN_FLAGS		(FEXCL | FREAD | FWRITE)
80 
81 /*
82  * By Solaris convention, slice/partition 2 represents the entire disk;
83  * unfortunately, this convention does not appear to be codified.
84  */
85 #define	VD_ENTIRE_DISK_SLICE	2
86 
87 /* Return a cpp token as a string */
88 #define	STRINGIZE(token)	#token
89 
90 /*
91  * Print a message prefixed with the current function name to the message log
92  * (and optionally to the console for verbose boots); these macros use cpp's
93  * concatenation of string literals and C99 variable-length-argument-list
94  * macros
95  */
96 #define	PRN(...)	_PRN("?%s():  "__VA_ARGS__, "")
97 #define	_PRN(format, ...)					\
98 	cmn_err(CE_CONT, format"%s", __func__, __VA_ARGS__)
99 
100 /* Return a pointer to the "i"th vdisk dring element */
101 #define	VD_DRING_ELEM(i)	((vd_dring_entry_t *)(void *)	\
102 	    (vd->dring + (i)*vd->descriptor_size))
103 
104 /* Return the virtual disk client's type as a string (for use in messages) */
105 #define	VD_CLIENT(vd)							\
106 	(((vd)->xfer_mode == VIO_DESC_MODE) ? "in-band client" :	\
107 	    (((vd)->xfer_mode == VIO_DRING_MODE) ? "dring client" :	\
108 		(((vd)->xfer_mode == 0) ? "null client" :		\
109 		    "unsupported client")))
110 
111 /* Debugging macros */
112 #ifdef DEBUG
113 #define	PR0 if (vd_msglevel > 0)	PRN
114 #define	PR1 if (vd_msglevel > 1)	PRN
115 #define	PR2 if (vd_msglevel > 2)	PRN
116 
117 #define	VD_DUMP_DRING_ELEM(elem)					\
118 	PRN("dst:%x op:%x st:%u nb:%lx addr:%lx ncook:%u\n",		\
119 	    elem->hdr.dstate,						\
120 	    elem->payload.operation,					\
121 	    elem->payload.status,					\
122 	    elem->payload.nbytes,					\
123 	    elem->payload.addr,						\
124 	    elem->payload.ncookies);
125 
126 #else	/* !DEBUG */
127 #define	PR0(...)
128 #define	PR1(...)
129 #define	PR2(...)
130 
131 #define	VD_DUMP_DRING_ELEM(elem)
132 
133 #endif	/* DEBUG */
134 
135 
136 /*
137  * Soft state structure for a vds instance
138  */
139 typedef struct vds {
140 	uint_t		initialized;	/* driver inst initialization flags */
141 	dev_info_t	*dip;		/* driver inst devinfo pointer */
142 	ldi_ident_t	ldi_ident;	/* driver's identifier for LDI */
143 	mod_hash_t	*vd_table;	/* table of virtual disks served */
144 	mdeg_handle_t	mdeg;		/* handle for MDEG operations  */
145 } vds_t;
146 
147 /*
148  * Types of descriptor-processing tasks
149  */
150 typedef enum vd_task_type {
151 	VD_NONFINAL_RANGE_TASK,	/* task for intermediate descriptor in range */
152 	VD_FINAL_RANGE_TASK,	/* task for last in a range of descriptors */
153 } vd_task_type_t;
154 
155 /*
156  * Structure describing the task for processing a descriptor
157  */
158 typedef struct vd_task {
159 	struct vd		*vd;		/* vd instance task is for */
160 	vd_task_type_t		type;		/* type of descriptor task */
161 	int			index;		/* dring elem index for task */
162 	vio_msg_t		*msg;		/* VIO message task is for */
163 	size_t			msglen;		/* length of message content */
164 	size_t			msgsize;	/* size of message buffer */
165 	vd_dring_payload_t	*request;	/* request task will perform */
166 	struct buf		buf;		/* buf(9s) for I/O request */
167 
168 } vd_task_t;
169 
170 /*
171  * Soft state structure for a virtual disk instance
172  */
173 typedef struct vd {
174 	uint_t			initialized;	/* vdisk initialization flags */
175 	vds_t			*vds;		/* server for this vdisk */
176 	ddi_taskq_t		*startq;	/* queue for I/O start tasks */
177 	ddi_taskq_t		*completionq;	/* queue for completion tasks */
178 	ldi_handle_t		ldi_handle[V_NUMPAR];	/* LDI slice handles */
179 	dev_t			dev[V_NUMPAR];	/* dev numbers for slices */
180 	uint_t			nslices;	/* number of slices */
181 	size_t			vdisk_size;	/* number of blocks in vdisk */
182 	vd_disk_type_t		vdisk_type;	/* slice or entire disk */
183 	ushort_t		max_xfer_sz;	/* max xfer size in DEV_BSIZE */
184 	boolean_t		pseudo;		/* underlying pseudo dev */
185 	struct dk_geom		dk_geom;	/* synthetic for slice type */
186 	struct vtoc		vtoc;		/* synthetic for slice type */
187 	ldc_status_t		ldc_state;	/* LDC connection state */
188 	ldc_handle_t		ldc_handle;	/* handle for LDC comm */
189 	size_t			max_msglen;	/* largest LDC message len */
190 	vd_state_t		state;		/* client handshake state */
191 	uint8_t			xfer_mode;	/* transfer mode with client */
192 	uint32_t		sid;		/* client's session ID */
193 	uint64_t		seq_num;	/* message sequence number */
194 	uint64_t		dring_ident;	/* identifier of dring */
195 	ldc_dring_handle_t	dring_handle;	/* handle for dring ops */
196 	uint32_t		descriptor_size;	/* num bytes in desc */
197 	uint32_t		dring_len;	/* number of dring elements */
198 	caddr_t			dring;		/* address of dring */
199 	vd_task_t		inband_task;	/* task for inband descriptor */
200 	vd_task_t		*dring_task;	/* tasks dring elements */
201 
202 	kmutex_t		lock;		/* protects variables below */
203 	boolean_t		enabled;	/* is vdisk enabled? */
204 	boolean_t		reset_state;	/* reset connection state? */
205 	boolean_t		reset_ldc;	/* reset LDC channel? */
206 } vd_t;
207 
208 typedef struct vds_operation {
209 	uint8_t	operation;
210 	int	(*start)(vd_task_t *task);
211 	void	(*complete)(void *arg);
212 } vds_operation_t;
213 
214 typedef struct vd_ioctl {
215 	uint8_t		operation;		/* vdisk operation */
216 	const char	*operation_name;	/* vdisk operation name */
217 	size_t		nbytes;			/* size of operation buffer */
218 	int		cmd;			/* corresponding ioctl cmd */
219 	const char	*cmd_name;		/* ioctl cmd name */
220 	void		*arg;			/* ioctl cmd argument */
221 	/* convert input vd_buf to output ioctl_arg */
222 	void		(*copyin)(void *vd_buf, void *ioctl_arg);
223 	/* convert input ioctl_arg to output vd_buf */
224 	void		(*copyout)(void *ioctl_arg, void *vd_buf);
225 } vd_ioctl_t;
226 
227 /* Define trivial copyin/copyout conversion function flag */
228 #define	VD_IDENTITY	((void (*)(void *, void *))-1)
229 
230 
231 static int	vds_ldc_retries = VDS_LDC_RETRIES;
232 static void	*vds_state;
233 static uint64_t	vds_operations;	/* see vds_operation[] definition below */
234 
235 static int	vd_open_flags = VD_OPEN_FLAGS;
236 
237 /*
238  * Supported protocol version pairs, from highest (newest) to lowest (oldest)
239  *
240  * Each supported major version should appear only once, paired with (and only
241  * with) its highest supported minor version number (as the protocol requires
242  * supporting all lower minor version numbers as well)
243  */
244 static const vio_ver_t	vds_version[] = {{1, 0}};
245 static const size_t	vds_num_versions =
246     sizeof (vds_version)/sizeof (vds_version[0]);
247 
248 #ifdef DEBUG
249 static int	vd_msglevel;
250 #endif /* DEBUG */
251 
252 
253 static int
254 vd_start_bio(vd_task_t *task)
255 {
256 	int			status = 0;
257 	vd_t			*vd		= task->vd;
258 	vd_dring_payload_t	*request	= task->request;
259 	struct buf		*buf		= &task->buf;
260 
261 
262 	ASSERT(vd != NULL);
263 	ASSERT(request != NULL);
264 	ASSERT(request->slice < vd->nslices);
265 	ASSERT((request->operation == VD_OP_BREAD) ||
266 	    (request->operation == VD_OP_BWRITE));
267 
268 	if (request->nbytes == 0)
269 		return (EINVAL);	/* no service for trivial requests */
270 
271 	PR1("%s %lu bytes at block %lu",
272 	    (request->operation == VD_OP_BREAD) ? "Read" : "Write",
273 	    request->nbytes, request->addr);
274 
275 	bioinit(buf);
276 	buf->b_flags		= B_BUSY;
277 	buf->b_bcount		= request->nbytes;
278 	buf->b_un.b_addr	= kmem_alloc(buf->b_bcount, KM_SLEEP);
279 	buf->b_lblkno		= request->addr;
280 	buf->b_edev		= vd->dev[request->slice];
281 
282 	if (request->operation == VD_OP_BREAD) {
283 		buf->b_flags |= B_READ;
284 	} else {
285 		buf->b_flags |= B_WRITE;
286 		/* Get data to write from client */
287 		if ((status = ldc_mem_copy(vd->ldc_handle, buf->b_un.b_addr, 0,
288 			    &request->nbytes, request->cookie,
289 			    request->ncookies, LDC_COPY_IN)) != 0) {
290 			PRN("ldc_mem_copy() returned errno %d "
291 			    "copying from client", status);
292 		}
293 	}
294 
295 	/* Start the block I/O */
296 	if ((status == 0) &&
297 	    ((status = ldi_strategy(vd->ldi_handle[request->slice], buf)) == 0))
298 		return (EINPROGRESS);	/* will complete on completionq */
299 
300 	/* Clean up after error */
301 	kmem_free(buf->b_un.b_addr, buf->b_bcount);
302 	biofini(buf);
303 	return (status);
304 }
305 
306 static int
307 send_msg(ldc_handle_t ldc_handle, void *msg, size_t msglen)
308 {
309 	int	retry, status;
310 	size_t	nbytes;
311 
312 
313 	for (retry = 0, status = EWOULDBLOCK;
314 	    retry < vds_ldc_retries && status == EWOULDBLOCK;
315 	    retry++) {
316 		PR1("ldc_write() attempt %d", (retry + 1));
317 		nbytes = msglen;
318 		status = ldc_write(ldc_handle, msg, &nbytes);
319 	}
320 
321 	if (status != 0) {
322 		PRN("ldc_write() returned errno %d", status);
323 		return (status);
324 	} else if (nbytes != msglen) {
325 		PRN("ldc_write() performed only partial write");
326 		return (EIO);
327 	}
328 
329 	PR1("SENT %lu bytes", msglen);
330 	return (0);
331 }
332 
333 static void
334 vd_need_reset(vd_t *vd, boolean_t reset_ldc)
335 {
336 	mutex_enter(&vd->lock);
337 	vd->reset_state	= B_TRUE;
338 	vd->reset_ldc	= reset_ldc;
339 	mutex_exit(&vd->lock);
340 }
341 
342 /*
343  * Reset the state of the connection with a client, if needed; reset the LDC
344  * transport as well, if needed.  This function should only be called from the
345  * "startq", as it waits for tasks on the "completionq" and will deadlock if
346  * called from that queue.
347  */
348 static void
349 vd_reset_if_needed(vd_t *vd)
350 {
351 	int		status = 0;
352 
353 
354 	mutex_enter(&vd->lock);
355 	if (!vd->reset_state) {
356 		ASSERT(!vd->reset_ldc);
357 		mutex_exit(&vd->lock);
358 		return;
359 	}
360 	mutex_exit(&vd->lock);
361 
362 
363 	PR0("Resetting connection state with %s", VD_CLIENT(vd));
364 
365 	/*
366 	 * Let any asynchronous I/O complete before possibly pulling the rug
367 	 * out from under it; defer checking vd->reset_ldc, as one of the
368 	 * asynchronous tasks might set it
369 	 */
370 	ddi_taskq_wait(vd->completionq);
371 
372 
373 	if ((vd->initialized & VD_DRING) &&
374 	    ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0))
375 		PRN("ldc_mem_dring_unmap() returned errno %d", status);
376 
377 	if (vd->dring_task != NULL) {
378 		ASSERT(vd->dring_len != 0);
379 		kmem_free(vd->dring_task,
380 		    (sizeof (*vd->dring_task)) * vd->dring_len);
381 		vd->dring_task = NULL;
382 	}
383 
384 
385 	mutex_enter(&vd->lock);
386 	if (vd->reset_ldc && ((status = ldc_down(vd->ldc_handle)) != 0))
387 		PRN("ldc_down() returned errno %d", status);
388 
389 	vd->initialized	&= ~(VD_SID | VD_SEQ_NUM | VD_DRING);
390 	vd->state	= VD_STATE_INIT;
391 	vd->max_msglen	= sizeof (vio_msg_t);	/* baseline vio message size */
392 
393 	vd->reset_state	= B_FALSE;
394 	vd->reset_ldc	= B_FALSE;
395 	mutex_exit(&vd->lock);
396 }
397 
398 static int
399 vd_mark_elem_done(vd_t *vd, int idx, int elem_status)
400 {
401 	boolean_t		accepted;
402 	int			status;
403 	vd_dring_entry_t	*elem = VD_DRING_ELEM(idx);
404 
405 
406 	/* Acquire the element */
407 	if ((status = ldc_mem_dring_acquire(vd->dring_handle, idx, idx)) != 0) {
408 		PRN("ldc_mem_dring_acquire() returned errno %d", status);
409 		return (status);
410 	}
411 
412 	/* Set the element's status and mark it done */
413 	accepted = (elem->hdr.dstate == VIO_DESC_ACCEPTED);
414 	if (accepted) {
415 		elem->payload.status	= elem_status;
416 		elem->hdr.dstate	= VIO_DESC_DONE;
417 	} else {
418 		/* Perhaps client timed out waiting for I/O... */
419 		PRN("element %u no longer \"accepted\"", idx);
420 		VD_DUMP_DRING_ELEM(elem);
421 	}
422 	/* Release the element */
423 	if ((status = ldc_mem_dring_release(vd->dring_handle, idx, idx)) != 0) {
424 		PRN("ldc_mem_dring_release() returned errno %d", status);
425 		return (status);
426 	}
427 
428 	return (accepted ? 0 : EINVAL);
429 }
430 
431 static void
432 vd_complete_bio(void *arg)
433 {
434 	int			status		= 0;
435 	vd_task_t		*task		= (vd_task_t *)arg;
436 	vd_t			*vd		= task->vd;
437 	vd_dring_payload_t	*request	= task->request;
438 	struct buf		*buf		= &task->buf;
439 
440 
441 	ASSERT(vd != NULL);
442 	ASSERT(request != NULL);
443 	ASSERT(task->msg != NULL);
444 	ASSERT(task->msglen >= sizeof (*task->msg));
445 	ASSERT(task->msgsize >= task->msglen);
446 
447 	/* Wait for the I/O to complete */
448 	request->status = biowait(buf);
449 
450 	/* If data was read, copy it to the client */
451 	if ((request->status == 0) && (request->operation == VD_OP_BREAD) &&
452 	    ((status = ldc_mem_copy(vd->ldc_handle, buf->b_un.b_addr, 0,
453 		    &request->nbytes, request->cookie, request->ncookies,
454 		    LDC_COPY_OUT)) != 0)) {
455 		PRN("ldc_mem_copy() returned errno %d copying to client",
456 		    status);
457 	}
458 
459 	/* Release I/O buffer */
460 	kmem_free(buf->b_un.b_addr, buf->b_bcount);
461 	biofini(buf);
462 
463 	/* Update the dring element for a dring client */
464 	if ((status == 0) && (vd->xfer_mode == VIO_DRING_MODE))
465 		status = vd_mark_elem_done(vd, task->index, request->status);
466 
467 	/*
468 	 * If a transport error occurred, arrange to "nack" the message when
469 	 * the final task in the descriptor element range completes
470 	 */
471 	if (status != 0)
472 		task->msg->tag.vio_subtype = VIO_SUBTYPE_NACK;
473 
474 	/*
475 	 * Only the final task for a range of elements will respond to and
476 	 * free the message
477 	 */
478 	if (task->type == VD_NONFINAL_RANGE_TASK)
479 		return;
480 
481 	/*
482 	 * Send the "ack" or "nack" back to the client; if sending the message
483 	 * via LDC fails, arrange to reset both the connection state and LDC
484 	 * itself
485 	 */
486 	PR1("Sending %s",
487 	    (task->msg->tag.vio_subtype == VIO_SUBTYPE_ACK) ? "ACK" : "NACK");
488 	if (send_msg(vd->ldc_handle, task->msg, task->msglen) != 0)
489 		vd_need_reset(vd, B_TRUE);
490 
491 	/* Free the message now that it has been used for the reply */
492 	kmem_free(task->msg, task->msgsize);
493 }
494 
495 static void
496 vd_geom2dk_geom(void *vd_buf, void *ioctl_arg)
497 {
498 	VD_GEOM2DK_GEOM((vd_geom_t *)vd_buf, (struct dk_geom *)ioctl_arg);
499 }
500 
501 static void
502 vd_vtoc2vtoc(void *vd_buf, void *ioctl_arg)
503 {
504 	VD_VTOC2VTOC((vd_vtoc_t *)vd_buf, (struct vtoc *)ioctl_arg);
505 }
506 
507 static void
508 dk_geom2vd_geom(void *ioctl_arg, void *vd_buf)
509 {
510 	DK_GEOM2VD_GEOM((struct dk_geom *)ioctl_arg, (vd_geom_t *)vd_buf);
511 }
512 
513 static void
514 vtoc2vd_vtoc(void *ioctl_arg, void *vd_buf)
515 {
516 	VTOC2VD_VTOC((struct vtoc *)ioctl_arg, (vd_vtoc_t *)vd_buf);
517 }
518 
519 static int
520 vd_do_slice_ioctl(vd_t *vd, int cmd, void *ioctl_arg)
521 {
522 	switch (cmd) {
523 	case DKIOCGGEOM:
524 		ASSERT(ioctl_arg != NULL);
525 		bcopy(&vd->dk_geom, ioctl_arg, sizeof (vd->dk_geom));
526 		return (0);
527 	case DKIOCGVTOC:
528 		ASSERT(ioctl_arg != NULL);
529 		bcopy(&vd->vtoc, ioctl_arg, sizeof (vd->vtoc));
530 		return (0);
531 	default:
532 		return (ENOTSUP);
533 	}
534 }
535 
536 static int
537 vd_do_ioctl(vd_t *vd, vd_dring_payload_t *request, void* buf, vd_ioctl_t *ioctl)
538 {
539 	int	rval = 0, status;
540 	size_t	nbytes = request->nbytes;	/* modifiable copy */
541 
542 
543 	ASSERT(request->slice < vd->nslices);
544 	PR0("Performing %s", ioctl->operation_name);
545 
546 	/* Get data from client and convert, if necessary */
547 	if (ioctl->copyin != NULL)  {
548 		ASSERT(nbytes != 0 && buf != NULL);
549 		PR1("Getting \"arg\" data from client");
550 		if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes,
551 			    request->cookie, request->ncookies,
552 			    LDC_COPY_IN)) != 0) {
553 			PRN("ldc_mem_copy() returned errno %d "
554 			    "copying from client", status);
555 			return (status);
556 		}
557 
558 		/* Convert client's data, if necessary */
559 		if (ioctl->copyin == VD_IDENTITY)	/* use client buffer */
560 			ioctl->arg = buf;
561 		else	/* convert client vdisk operation data to ioctl data */
562 			(ioctl->copyin)(buf, (void *)ioctl->arg);
563 	}
564 
565 	/*
566 	 * Handle single-slice block devices internally; otherwise, have the
567 	 * real driver perform the ioctl()
568 	 */
569 	if (vd->vdisk_type == VD_DISK_TYPE_SLICE && !vd->pseudo) {
570 		if ((status = vd_do_slice_ioctl(vd, ioctl->cmd,
571 			    (void *)ioctl->arg)) != 0)
572 			return (status);
573 	} else if ((status = ldi_ioctl(vd->ldi_handle[request->slice],
574 		    ioctl->cmd, (intptr_t)ioctl->arg, (vd_open_flags | FKIOCTL),
575 		    kcred, &rval)) != 0) {
576 		PR0("ldi_ioctl(%s) = errno %d", ioctl->cmd_name, status);
577 		return (status);
578 	}
579 #ifdef DEBUG
580 	if (rval != 0) {
581 		PRN("%s set rval = %d, which is not being returned to client",
582 		    ioctl->cmd_name, rval);
583 	}
584 #endif /* DEBUG */
585 
586 	/* Convert data and send to client, if necessary */
587 	if (ioctl->copyout != NULL)  {
588 		ASSERT(nbytes != 0 && buf != NULL);
589 		PR1("Sending \"arg\" data to client");
590 
591 		/* Convert ioctl data to vdisk operation data, if necessary */
592 		if (ioctl->copyout != VD_IDENTITY)
593 			(ioctl->copyout)((void *)ioctl->arg, buf);
594 
595 		if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes,
596 			    request->cookie, request->ncookies,
597 			    LDC_COPY_OUT)) != 0) {
598 			PRN("ldc_mem_copy() returned errno %d "
599 			    "copying to client", status);
600 			return (status);
601 		}
602 	}
603 
604 	return (status);
605 }
606 
607 /*
608  * Open any slices which have become non-empty as a result of performing a
609  * set-VTOC operation for the client.
610  *
611  * When serving a full disk, vds attempts to exclusively open all of the
612  * disk's slices to prevent another thread or process in the service domain
613  * from "stealing" a slice or from performing I/O to a slice while a vds
614  * client is accessing it.  Unfortunately, underlying drivers, such as sd(7d)
615  * and cmdk(7d), return an error when attempting to open the device file for a
616  * slice which is currently empty according to the VTOC.  This driver behavior
617  * means that vds must skip opening empty slices when initializing a vdisk for
618  * full-disk service and try to open slices that become non-empty (via a
619  * set-VTOC operation) during use of the full disk in order to begin serving
620  * such slices to the client.  This approach has an inherent (and therefore
621  * unavoidable) race condition; it also means that failure to open a
622  * newly-non-empty slice has different semantics than failure to open an
623  * initially-non-empty slice:  Due to driver bahavior, opening a
624  * newly-non-empty slice is a necessary side effect of vds performing a
625  * (successful) set-VTOC operation for a client on an in-service (and in-use)
626  * disk in order to begin serving the slice; failure of this side-effect
627  * operation does not mean that the client's set-VTOC operation failed or that
628  * operations on other slices must fail.  Therefore, this function prints an
629  * error message on failure to open a slice, but does not return an error to
630  * its caller--unlike failure to open a slice initially, which results in an
631  * error that prevents serving the vdisk (and thereby requires an
632  * administrator to resolve the problem).  Note that, apart from another
633  * thread or process opening a new slice during the race-condition window,
634  * failure to open a slice in this function will likely indicate an underlying
635  * drive problem, which will also likely become evident in errors returned by
636  * operations on other slices, and which will require administrative
637  * intervention and possibly servicing the drive.
638  */
639 static void
640 vd_open_new_slices(vd_t *vd)
641 {
642 	int		rval, status;
643 	struct vtoc	vtoc;
644 
645 
646 	/* Get the (new) VTOC for updated slice sizes */
647 	if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCGVTOC, (intptr_t)&vtoc,
648 		    (vd_open_flags | FKIOCTL), kcred, &rval)) != 0) {
649 		PRN("ldi_ioctl(DKIOCGVTOC) returned errno %d", status);
650 		return;
651 	}
652 
653 	/* Open any newly-non-empty slices */
654 	for (int slice = 0; slice < vd->nslices; slice++) {
655 		/* Skip zero-length slices */
656 		if (vtoc.v_part[slice].p_size == 0) {
657 			if (vd->ldi_handle[slice] != NULL)
658 				PR0("Open slice %u now has zero length", slice);
659 			continue;
660 		}
661 
662 		/* Skip already-open slices */
663 		if (vd->ldi_handle[slice] != NULL)
664 			continue;
665 
666 		PR0("Opening newly-non-empty slice %u", slice);
667 		if ((status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK,
668 			    vd_open_flags, kcred, &vd->ldi_handle[slice],
669 			    vd->vds->ldi_ident)) != 0) {
670 			PRN("ldi_open_by_dev() returned errno %d "
671 			    "for slice %u", status, slice);
672 		}
673 	}
674 }
675 
676 #define	RNDSIZE(expr) P2ROUNDUP(sizeof (expr), sizeof (uint64_t))
677 static int
678 vd_ioctl(vd_task_t *task)
679 {
680 	int			i, status;
681 	void			*buf = NULL;
682 	struct dk_geom		dk_geom = {0};
683 	struct vtoc		vtoc = {0};
684 	vd_t			*vd		= task->vd;
685 	vd_dring_payload_t	*request	= task->request;
686 	vd_ioctl_t		ioctl[] = {
687 		/* Command (no-copy) operations */
688 		{VD_OP_FLUSH, STRINGIZE(VD_OP_FLUSH), 0,
689 		    DKIOCFLUSHWRITECACHE, STRINGIZE(DKIOCFLUSHWRITECACHE),
690 		    NULL, NULL, NULL},
691 
692 		/* "Get" (copy-out) operations */
693 		{VD_OP_GET_WCE, STRINGIZE(VD_OP_GET_WCE), RNDSIZE(int),
694 		    DKIOCGETWCE, STRINGIZE(DKIOCGETWCE),
695 		    NULL, NULL, VD_IDENTITY},
696 		{VD_OP_GET_DISKGEOM, STRINGIZE(VD_OP_GET_DISKGEOM),
697 		    RNDSIZE(vd_geom_t),
698 		    DKIOCGGEOM, STRINGIZE(DKIOCGGEOM),
699 		    &dk_geom, NULL, dk_geom2vd_geom},
700 		{VD_OP_GET_VTOC, STRINGIZE(VD_OP_GET_VTOC), RNDSIZE(vd_vtoc_t),
701 		    DKIOCGVTOC, STRINGIZE(DKIOCGVTOC),
702 		    &vtoc, NULL, vtoc2vd_vtoc},
703 
704 		/* "Set" (copy-in) operations */
705 		{VD_OP_SET_WCE, STRINGIZE(VD_OP_SET_WCE), RNDSIZE(int),
706 		    DKIOCSETWCE, STRINGIZE(DKIOCSETWCE),
707 		    NULL, VD_IDENTITY, NULL},
708 		{VD_OP_SET_DISKGEOM, STRINGIZE(VD_OP_SET_DISKGEOM),
709 		    RNDSIZE(vd_geom_t),
710 		    DKIOCSGEOM, STRINGIZE(DKIOCSGEOM),
711 		    &dk_geom, vd_geom2dk_geom, NULL},
712 		{VD_OP_SET_VTOC, STRINGIZE(VD_OP_SET_VTOC), RNDSIZE(vd_vtoc_t),
713 		    DKIOCSVTOC, STRINGIZE(DKIOCSVTOC),
714 		    &vtoc, vd_vtoc2vtoc, NULL},
715 	};
716 	size_t		nioctls = (sizeof (ioctl))/(sizeof (ioctl[0]));
717 
718 
719 	ASSERT(vd != NULL);
720 	ASSERT(request != NULL);
721 	ASSERT(request->slice < vd->nslices);
722 
723 	/*
724 	 * Determine ioctl corresponding to caller's "operation" and
725 	 * validate caller's "nbytes"
726 	 */
727 	for (i = 0; i < nioctls; i++) {
728 		if (request->operation == ioctl[i].operation) {
729 			/* LDC memory operations require 8-byte multiples */
730 			ASSERT(ioctl[i].nbytes % sizeof (uint64_t) == 0);
731 
732 			if (request->nbytes != ioctl[i].nbytes) {
733 				PRN("%s:  Expected nbytes = %lu, got %lu",
734 				    ioctl[i].operation_name, ioctl[i].nbytes,
735 				    request->nbytes);
736 				return (EINVAL);
737 			}
738 
739 			break;
740 		}
741 	}
742 	ASSERT(i < nioctls);	/* because "operation" already validated */
743 
744 	if (request->nbytes)
745 		buf = kmem_zalloc(request->nbytes, KM_SLEEP);
746 	status = vd_do_ioctl(vd, request, buf, &ioctl[i]);
747 	if (request->nbytes)
748 		kmem_free(buf, request->nbytes);
749 	if ((request->operation == VD_OP_SET_VTOC) &&
750 	    (vd->vdisk_type == VD_DISK_TYPE_DISK))
751 		vd_open_new_slices(vd);
752 	PR0("Returning %d", status);
753 	return (status);
754 }
755 
756 /*
757  * Define the supported operations once the functions for performing them have
758  * been defined
759  */
760 static const vds_operation_t	vds_operation[] = {
761 	{VD_OP_BREAD,		vd_start_bio,	vd_complete_bio},
762 	{VD_OP_BWRITE,		vd_start_bio,	vd_complete_bio},
763 	{VD_OP_FLUSH,		vd_ioctl,	NULL},
764 	{VD_OP_GET_WCE,		vd_ioctl,	NULL},
765 	{VD_OP_SET_WCE,		vd_ioctl,	NULL},
766 	{VD_OP_GET_VTOC,	vd_ioctl,	NULL},
767 	{VD_OP_SET_VTOC,	vd_ioctl,	NULL},
768 	{VD_OP_GET_DISKGEOM,	vd_ioctl,	NULL},
769 	{VD_OP_SET_DISKGEOM,	vd_ioctl,	NULL}
770 };
771 
772 static const size_t	vds_noperations =
773 	(sizeof (vds_operation))/(sizeof (vds_operation[0]));
774 
775 /*
776  * Process a task specifying a client I/O request
777  */
778 static int
779 vd_process_task(vd_task_t *task)
780 {
781 	int			i, status;
782 	vd_t			*vd		= task->vd;
783 	vd_dring_payload_t	*request	= task->request;
784 
785 
786 	ASSERT(vd != NULL);
787 	ASSERT(request != NULL);
788 
789 	/* Range-check slice */
790 	if (request->slice >= vd->nslices) {
791 		PRN("Invalid \"slice\" %u (max %u) for virtual disk",
792 		    request->slice, (vd->nslices - 1));
793 		return (EINVAL);
794 	}
795 
796 	/* Find the requested operation */
797 	for (i = 0; i < vds_noperations; i++)
798 		if (request->operation == vds_operation[i].operation)
799 			break;
800 	if (i == vds_noperations) {
801 		PRN("Unsupported operation %u", request->operation);
802 		return (ENOTSUP);
803 	}
804 
805 	/* Start the operation */
806 	if ((status = vds_operation[i].start(task)) != EINPROGRESS) {
807 		request->status = status;	/* op succeeded or failed */
808 		return (0);			/* but request completed */
809 	}
810 
811 	ASSERT(vds_operation[i].complete != NULL);	/* debug case */
812 	if (vds_operation[i].complete == NULL) {	/* non-debug case */
813 		PRN("Unexpected return of EINPROGRESS "
814 		    "with no I/O completion handler");
815 		request->status = EIO;	/* operation failed */
816 		return (0);		/* but request completed */
817 	}
818 
819 	/* Queue a task to complete the operation */
820 	status = ddi_taskq_dispatch(vd->completionq, vds_operation[i].complete,
821 	    task, DDI_SLEEP);
822 	/* ddi_taskq_dispatch(9f) guarantees success with DDI_SLEEP */
823 	ASSERT(status == DDI_SUCCESS);
824 
825 	PR1("Operation in progress");
826 	return (EINPROGRESS);	/* completion handler will finish request */
827 }
828 
829 /*
830  * Return true if the "type", "subtype", and "env" fields of the "tag" first
831  * argument match the corresponding remaining arguments; otherwise, return false
832  */
833 boolean_t
834 vd_msgtype(vio_msg_tag_t *tag, int type, int subtype, int env)
835 {
836 	return ((tag->vio_msgtype == type) &&
837 		(tag->vio_subtype == subtype) &&
838 		(tag->vio_subtype_env == env)) ? B_TRUE : B_FALSE;
839 }
840 
841 /*
842  * Check whether the major/minor version specified in "ver_msg" is supported
843  * by this server.
844  */
845 static boolean_t
846 vds_supported_version(vio_ver_msg_t *ver_msg)
847 {
848 	for (int i = 0; i < vds_num_versions; i++) {
849 		ASSERT(vds_version[i].major > 0);
850 		ASSERT((i == 0) ||
851 		    (vds_version[i].major < vds_version[i-1].major));
852 
853 		/*
854 		 * If the major versions match, adjust the minor version, if
855 		 * necessary, down to the highest value supported by this
856 		 * server and return true so this message will get "ack"ed;
857 		 * the client should also support all minor versions lower
858 		 * than the value it sent
859 		 */
860 		if (ver_msg->ver_major == vds_version[i].major) {
861 			if (ver_msg->ver_minor > vds_version[i].minor) {
862 				PR0("Adjusting minor version from %u to %u",
863 				    ver_msg->ver_minor, vds_version[i].minor);
864 				ver_msg->ver_minor = vds_version[i].minor;
865 			}
866 			return (B_TRUE);
867 		}
868 
869 		/*
870 		 * If the message contains a higher major version number, set
871 		 * the message's major/minor versions to the current values
872 		 * and return false, so this message will get "nack"ed with
873 		 * these values, and the client will potentially try again
874 		 * with the same or a lower version
875 		 */
876 		if (ver_msg->ver_major > vds_version[i].major) {
877 			ver_msg->ver_major = vds_version[i].major;
878 			ver_msg->ver_minor = vds_version[i].minor;
879 			return (B_FALSE);
880 		}
881 
882 		/*
883 		 * Otherwise, the message's major version is less than the
884 		 * current major version, so continue the loop to the next
885 		 * (lower) supported version
886 		 */
887 	}
888 
889 	/*
890 	 * No common version was found; "ground" the version pair in the
891 	 * message to terminate negotiation
892 	 */
893 	ver_msg->ver_major = 0;
894 	ver_msg->ver_minor = 0;
895 	return (B_FALSE);
896 }
897 
898 /*
899  * Process a version message from a client.  vds expects to receive version
900  * messages from clients seeking service, but never issues version messages
901  * itself; therefore, vds can ACK or NACK client version messages, but does
902  * not expect to receive version-message ACKs or NACKs (and will treat such
903  * messages as invalid).
904  */
905 static int
906 vd_process_ver_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
907 {
908 	vio_ver_msg_t	*ver_msg = (vio_ver_msg_t *)msg;
909 
910 
911 	ASSERT(msglen >= sizeof (msg->tag));
912 
913 	if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO,
914 		VIO_VER_INFO)) {
915 		return (ENOMSG);	/* not a version message */
916 	}
917 
918 	if (msglen != sizeof (*ver_msg)) {
919 		PRN("Expected %lu-byte version message; "
920 		    "received %lu bytes", sizeof (*ver_msg), msglen);
921 		return (EBADMSG);
922 	}
923 
924 	if (ver_msg->dev_class != VDEV_DISK) {
925 		PRN("Expected device class %u (disk); received %u",
926 		    VDEV_DISK, ver_msg->dev_class);
927 		return (EBADMSG);
928 	}
929 
930 	/*
931 	 * We're talking to the expected kind of client; set our device class
932 	 * for "ack/nack" back to the client
933 	 */
934 	ver_msg->dev_class = VDEV_DISK_SERVER;
935 
936 	/*
937 	 * Check whether the (valid) version message specifies a version
938 	 * supported by this server.  If the version is not supported, return
939 	 * EBADMSG so the message will get "nack"ed; vds_supported_version()
940 	 * will have updated the message with a supported version for the
941 	 * client to consider
942 	 */
943 	if (!vds_supported_version(ver_msg))
944 		return (EBADMSG);
945 
946 
947 	/*
948 	 * A version has been agreed upon; use the client's SID for
949 	 * communication on this channel now
950 	 */
951 	ASSERT(!(vd->initialized & VD_SID));
952 	vd->sid = ver_msg->tag.vio_sid;
953 	vd->initialized |= VD_SID;
954 
955 	/*
956 	 * When multiple versions are supported, this function should store
957 	 * the negotiated major and minor version values in the "vd" data
958 	 * structure to govern further communication; in particular, note that
959 	 * the client might have specified a lower minor version for the
960 	 * agreed major version than specifed in the vds_version[] array.  The
961 	 * following assertions should help remind future maintainers to make
962 	 * the appropriate changes to support multiple versions.
963 	 */
964 	ASSERT(vds_num_versions == 1);
965 	ASSERT(ver_msg->ver_major == vds_version[0].major);
966 	ASSERT(ver_msg->ver_minor == vds_version[0].minor);
967 
968 	PR0("Using major version %u, minor version %u",
969 	    ver_msg->ver_major, ver_msg->ver_minor);
970 	return (0);
971 }
972 
973 static int
974 vd_process_attr_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
975 {
976 	vd_attr_msg_t	*attr_msg = (vd_attr_msg_t *)msg;
977 
978 
979 	ASSERT(msglen >= sizeof (msg->tag));
980 
981 	if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO,
982 		VIO_ATTR_INFO)) {
983 		PR0("Message is not an attribute message");
984 		return (ENOMSG);
985 	}
986 
987 	if (msglen != sizeof (*attr_msg)) {
988 		PRN("Expected %lu-byte attribute message; "
989 		    "received %lu bytes", sizeof (*attr_msg), msglen);
990 		return (EBADMSG);
991 	}
992 
993 	if (attr_msg->max_xfer_sz == 0) {
994 		PRN("Received maximum transfer size of 0 from client");
995 		return (EBADMSG);
996 	}
997 
998 	if ((attr_msg->xfer_mode != VIO_DESC_MODE) &&
999 	    (attr_msg->xfer_mode != VIO_DRING_MODE)) {
1000 		PRN("Client requested unsupported transfer mode");
1001 		return (EBADMSG);
1002 	}
1003 
1004 
1005 	/* Success:  valid message and transfer mode */
1006 	vd->xfer_mode = attr_msg->xfer_mode;
1007 	if (vd->xfer_mode == VIO_DESC_MODE) {
1008 		/*
1009 		 * The vd_dring_inband_msg_t contains one cookie; need room
1010 		 * for up to n-1 more cookies, where "n" is the number of full
1011 		 * pages plus possibly one partial page required to cover
1012 		 * "max_xfer_sz".  Add room for one more cookie if
1013 		 * "max_xfer_sz" isn't an integral multiple of the page size.
1014 		 * Must first get the maximum transfer size in bytes.
1015 		 */
1016 		size_t	max_xfer_bytes = attr_msg->vdisk_block_size ?
1017 		    attr_msg->vdisk_block_size*attr_msg->max_xfer_sz :
1018 		    attr_msg->max_xfer_sz;
1019 		size_t	max_inband_msglen =
1020 		    sizeof (vd_dring_inband_msg_t) +
1021 		    ((max_xfer_bytes/PAGESIZE +
1022 			((max_xfer_bytes % PAGESIZE) ? 1 : 0))*
1023 			(sizeof (ldc_mem_cookie_t)));
1024 
1025 		/*
1026 		 * Set the maximum expected message length to
1027 		 * accommodate in-band-descriptor messages with all
1028 		 * their cookies
1029 		 */
1030 		vd->max_msglen = MAX(vd->max_msglen, max_inband_msglen);
1031 
1032 		/*
1033 		 * Initialize the data structure for processing in-band I/O
1034 		 * request descriptors
1035 		 */
1036 		vd->inband_task.vd	= vd;
1037 		vd->inband_task.index	= 0;
1038 		vd->inband_task.type	= VD_FINAL_RANGE_TASK;	/* range == 1 */
1039 	}
1040 
1041 	/* Return the device's block size and max transfer size to the client */
1042 	attr_msg->vdisk_block_size	= DEV_BSIZE;
1043 	attr_msg->max_xfer_sz		= vd->max_xfer_sz;
1044 
1045 	attr_msg->vdisk_size = vd->vdisk_size;
1046 	attr_msg->vdisk_type = vd->vdisk_type;
1047 	attr_msg->operations = vds_operations;
1048 	PR0("%s", VD_CLIENT(vd));
1049 	return (0);
1050 }
1051 
1052 static int
1053 vd_process_dring_reg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
1054 {
1055 	int			status;
1056 	size_t			expected;
1057 	ldc_mem_info_t		dring_minfo;
1058 	vio_dring_reg_msg_t	*reg_msg = (vio_dring_reg_msg_t *)msg;
1059 
1060 
1061 	ASSERT(msglen >= sizeof (msg->tag));
1062 
1063 	if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO,
1064 		VIO_DRING_REG)) {
1065 		PR0("Message is not a register-dring message");
1066 		return (ENOMSG);
1067 	}
1068 
1069 	if (msglen < sizeof (*reg_msg)) {
1070 		PRN("Expected at least %lu-byte register-dring message; "
1071 		    "received %lu bytes", sizeof (*reg_msg), msglen);
1072 		return (EBADMSG);
1073 	}
1074 
1075 	expected = sizeof (*reg_msg) +
1076 	    (reg_msg->ncookies - 1)*(sizeof (reg_msg->cookie[0]));
1077 	if (msglen != expected) {
1078 		PRN("Expected %lu-byte register-dring message; "
1079 		    "received %lu bytes", expected, msglen);
1080 		return (EBADMSG);
1081 	}
1082 
1083 	if (vd->initialized & VD_DRING) {
1084 		PRN("A dring was previously registered; only support one");
1085 		return (EBADMSG);
1086 	}
1087 
1088 	if (reg_msg->num_descriptors > INT32_MAX) {
1089 		PRN("reg_msg->num_descriptors = %u; must be <= %u (%s)",
1090 		    reg_msg->ncookies, INT32_MAX, STRINGIZE(INT32_MAX));
1091 		return (EBADMSG);
1092 	}
1093 
1094 	if (reg_msg->ncookies != 1) {
1095 		/*
1096 		 * In addition to fixing the assertion in the success case
1097 		 * below, supporting drings which require more than one
1098 		 * "cookie" requires increasing the value of vd->max_msglen
1099 		 * somewhere in the code path prior to receiving the message
1100 		 * which results in calling this function.  Note that without
1101 		 * making this change, the larger message size required to
1102 		 * accommodate multiple cookies cannot be successfully
1103 		 * received, so this function will not even get called.
1104 		 * Gracefully accommodating more dring cookies might
1105 		 * reasonably demand exchanging an additional attribute or
1106 		 * making a minor protocol adjustment
1107 		 */
1108 		PRN("reg_msg->ncookies = %u != 1", reg_msg->ncookies);
1109 		return (EBADMSG);
1110 	}
1111 
1112 	status = ldc_mem_dring_map(vd->ldc_handle, reg_msg->cookie,
1113 	    reg_msg->ncookies, reg_msg->num_descriptors,
1114 	    reg_msg->descriptor_size, LDC_SHADOW_MAP, &vd->dring_handle);
1115 	if (status != 0) {
1116 		PRN("ldc_mem_dring_map() returned errno %d", status);
1117 		return (status);
1118 	}
1119 
1120 	/*
1121 	 * To remove the need for this assertion, must call
1122 	 * ldc_mem_dring_nextcookie() successfully ncookies-1 times after a
1123 	 * successful call to ldc_mem_dring_map()
1124 	 */
1125 	ASSERT(reg_msg->ncookies == 1);
1126 
1127 	if ((status =
1128 		ldc_mem_dring_info(vd->dring_handle, &dring_minfo)) != 0) {
1129 		PRN("ldc_mem_dring_info() returned errno %d", status);
1130 		if ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0)
1131 			PRN("ldc_mem_dring_unmap() returned errno %d", status);
1132 		return (status);
1133 	}
1134 
1135 	if (dring_minfo.vaddr == NULL) {
1136 		PRN("Descriptor ring virtual address is NULL");
1137 		return (ENXIO);
1138 	}
1139 
1140 
1141 	/* Initialize for valid message and mapped dring */
1142 	PR1("descriptor size = %u, dring length = %u",
1143 	    vd->descriptor_size, vd->dring_len);
1144 	vd->initialized |= VD_DRING;
1145 	vd->dring_ident = 1;	/* "There Can Be Only One" */
1146 	vd->dring = dring_minfo.vaddr;
1147 	vd->descriptor_size = reg_msg->descriptor_size;
1148 	vd->dring_len = reg_msg->num_descriptors;
1149 	reg_msg->dring_ident = vd->dring_ident;
1150 
1151 	/*
1152 	 * Allocate and initialize a "shadow" array of data structures for
1153 	 * tasks to process I/O requests in dring elements
1154 	 */
1155 	vd->dring_task =
1156 	    kmem_zalloc((sizeof (*vd->dring_task)) * vd->dring_len, KM_SLEEP);
1157 	for (int i = 0; i < vd->dring_len; i++) {
1158 		vd->dring_task[i].vd		= vd;
1159 		vd->dring_task[i].index		= i;
1160 		vd->dring_task[i].request	= &VD_DRING_ELEM(i)->payload;
1161 	}
1162 
1163 	return (0);
1164 }
1165 
1166 static int
1167 vd_process_dring_unreg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
1168 {
1169 	vio_dring_unreg_msg_t	*unreg_msg = (vio_dring_unreg_msg_t *)msg;
1170 
1171 
1172 	ASSERT(msglen >= sizeof (msg->tag));
1173 
1174 	if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO,
1175 		VIO_DRING_UNREG)) {
1176 		PR0("Message is not an unregister-dring message");
1177 		return (ENOMSG);
1178 	}
1179 
1180 	if (msglen != sizeof (*unreg_msg)) {
1181 		PRN("Expected %lu-byte unregister-dring message; "
1182 		    "received %lu bytes", sizeof (*unreg_msg), msglen);
1183 		return (EBADMSG);
1184 	}
1185 
1186 	if (unreg_msg->dring_ident != vd->dring_ident) {
1187 		PRN("Expected dring ident %lu; received %lu",
1188 		    vd->dring_ident, unreg_msg->dring_ident);
1189 		return (EBADMSG);
1190 	}
1191 
1192 	return (0);
1193 }
1194 
1195 static int
1196 process_rdx_msg(vio_msg_t *msg, size_t msglen)
1197 {
1198 	ASSERT(msglen >= sizeof (msg->tag));
1199 
1200 	if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX)) {
1201 		PR0("Message is not an RDX message");
1202 		return (ENOMSG);
1203 	}
1204 
1205 	if (msglen != sizeof (vio_rdx_msg_t)) {
1206 		PRN("Expected %lu-byte RDX message; received %lu bytes",
1207 		    sizeof (vio_rdx_msg_t), msglen);
1208 		return (EBADMSG);
1209 	}
1210 
1211 	PR0("Valid RDX message");
1212 	return (0);
1213 }
1214 
1215 static int
1216 vd_check_seq_num(vd_t *vd, uint64_t seq_num)
1217 {
1218 	if ((vd->initialized & VD_SEQ_NUM) && (seq_num != vd->seq_num + 1)) {
1219 		PRN("Received seq_num %lu; expected %lu",
1220 		    seq_num, (vd->seq_num + 1));
1221 		vd_need_reset(vd, B_FALSE);
1222 		return (1);
1223 	}
1224 
1225 	vd->seq_num = seq_num;
1226 	vd->initialized |= VD_SEQ_NUM;	/* superfluous after first time... */
1227 	return (0);
1228 }
1229 
1230 /*
1231  * Return the expected size of an inband-descriptor message with all the
1232  * cookies it claims to include
1233  */
1234 static size_t
1235 expected_inband_size(vd_dring_inband_msg_t *msg)
1236 {
1237 	return ((sizeof (*msg)) +
1238 	    (msg->payload.ncookies - 1)*(sizeof (msg->payload.cookie[0])));
1239 }
1240 
1241 /*
1242  * Process an in-band descriptor message:  used with clients like OBP, with
1243  * which vds exchanges descriptors within VIO message payloads, rather than
1244  * operating on them within a descriptor ring
1245  */
1246 static int
1247 vd_process_desc_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize)
1248 {
1249 	size_t			expected;
1250 	vd_dring_inband_msg_t	*desc_msg = (vd_dring_inband_msg_t *)msg;
1251 
1252 
1253 	ASSERT(msglen >= sizeof (msg->tag));
1254 
1255 	if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO,
1256 		VIO_DESC_DATA)) {
1257 		PR1("Message is not an in-band-descriptor message");
1258 		return (ENOMSG);
1259 	}
1260 
1261 	if (msglen < sizeof (*desc_msg)) {
1262 		PRN("Expected at least %lu-byte descriptor message; "
1263 		    "received %lu bytes", sizeof (*desc_msg), msglen);
1264 		return (EBADMSG);
1265 	}
1266 
1267 	if (msglen != (expected = expected_inband_size(desc_msg))) {
1268 		PRN("Expected %lu-byte descriptor message; "
1269 		    "received %lu bytes", expected, msglen);
1270 		return (EBADMSG);
1271 	}
1272 
1273 	if (vd_check_seq_num(vd, desc_msg->hdr.seq_num) != 0)
1274 		return (EBADMSG);
1275 
1276 	/*
1277 	 * Valid message:  Set up the in-band descriptor task and process the
1278 	 * request.  Arrange to acknowledge the client's message, unless an
1279 	 * error processing the descriptor task results in setting
1280 	 * VIO_SUBTYPE_NACK
1281 	 */
1282 	PR1("Valid in-band-descriptor message");
1283 	msg->tag.vio_subtype = VIO_SUBTYPE_ACK;
1284 	vd->inband_task.msg	= msg;
1285 	vd->inband_task.msglen	= msglen;
1286 	vd->inband_task.msgsize	= msgsize;
1287 	vd->inband_task.request	= &desc_msg->payload;
1288 	return (vd_process_task(&vd->inband_task));
1289 }
1290 
1291 static int
1292 vd_process_element(vd_t *vd, vd_task_type_t type, uint32_t idx,
1293     vio_msg_t *msg, size_t msglen, size_t msgsize)
1294 {
1295 	int			status;
1296 	boolean_t		ready;
1297 	vd_dring_entry_t	*elem = VD_DRING_ELEM(idx);
1298 
1299 
1300 	/* Accept the updated dring element */
1301 	if ((status = ldc_mem_dring_acquire(vd->dring_handle, idx, idx)) != 0) {
1302 		PRN("ldc_mem_dring_acquire() returned errno %d", status);
1303 		return (status);
1304 	}
1305 	ready = (elem->hdr.dstate == VIO_DESC_READY);
1306 	if (ready) {
1307 		elem->hdr.dstate = VIO_DESC_ACCEPTED;
1308 	} else {
1309 		PRN("descriptor %u not ready", idx);
1310 		VD_DUMP_DRING_ELEM(elem);
1311 	}
1312 	if ((status = ldc_mem_dring_release(vd->dring_handle, idx, idx)) != 0) {
1313 		PRN("ldc_mem_dring_release() returned errno %d", status);
1314 		return (status);
1315 	}
1316 	if (!ready)
1317 		return (EBUSY);
1318 
1319 
1320 	/* Initialize a task and process the accepted element */
1321 	PR1("Processing dring element %u", idx);
1322 	vd->dring_task[idx].type	= type;
1323 	vd->dring_task[idx].msg		= msg;
1324 	vd->dring_task[idx].msglen	= msglen;
1325 	vd->dring_task[idx].msgsize	= msgsize;
1326 	if ((status = vd_process_task(&vd->dring_task[idx])) != EINPROGRESS)
1327 		status = vd_mark_elem_done(vd, idx, elem->payload.status);
1328 
1329 	return (status);
1330 }
1331 
1332 static int
1333 vd_process_element_range(vd_t *vd, int start, int end,
1334     vio_msg_t *msg, size_t msglen, size_t msgsize)
1335 {
1336 	int		i, n, nelem, status = 0;
1337 	boolean_t	inprogress = B_FALSE;
1338 	vd_task_type_t	type;
1339 
1340 
1341 	ASSERT(start >= 0);
1342 	ASSERT(end >= 0);
1343 
1344 	/*
1345 	 * Arrange to acknowledge the client's message, unless an error
1346 	 * processing one of the dring elements results in setting
1347 	 * VIO_SUBTYPE_NACK
1348 	 */
1349 	msg->tag.vio_subtype = VIO_SUBTYPE_ACK;
1350 
1351 	/*
1352 	 * Process the dring elements in the range
1353 	 */
1354 	nelem = ((end < start) ? end + vd->dring_len : end) - start + 1;
1355 	for (i = start, n = nelem; n > 0; i = (i + 1) % vd->dring_len, n--) {
1356 		((vio_dring_msg_t *)msg)->end_idx = i;
1357 		type = (n == 1) ? VD_FINAL_RANGE_TASK : VD_NONFINAL_RANGE_TASK;
1358 		status = vd_process_element(vd, type, i, msg, msglen, msgsize);
1359 		if (status == EINPROGRESS)
1360 			inprogress = B_TRUE;
1361 		else if (status != 0)
1362 			break;
1363 	}
1364 
1365 	/*
1366 	 * If some, but not all, operations of a multi-element range are in
1367 	 * progress, wait for other operations to complete before returning
1368 	 * (which will result in "ack" or "nack" of the message).  Note that
1369 	 * all outstanding operations will need to complete, not just the ones
1370 	 * corresponding to the current range of dring elements; howevever, as
1371 	 * this situation is an error case, performance is less critical.
1372 	 */
1373 	if ((nelem > 1) && (status != EINPROGRESS) && inprogress)
1374 		ddi_taskq_wait(vd->completionq);
1375 
1376 	return (status);
1377 }
1378 
1379 static int
1380 vd_process_dring_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize)
1381 {
1382 	vio_dring_msg_t	*dring_msg = (vio_dring_msg_t *)msg;
1383 
1384 
1385 	ASSERT(msglen >= sizeof (msg->tag));
1386 
1387 	if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO,
1388 		VIO_DRING_DATA)) {
1389 		PR1("Message is not a dring-data message");
1390 		return (ENOMSG);
1391 	}
1392 
1393 	if (msglen != sizeof (*dring_msg)) {
1394 		PRN("Expected %lu-byte dring message; received %lu bytes",
1395 		    sizeof (*dring_msg), msglen);
1396 		return (EBADMSG);
1397 	}
1398 
1399 	if (vd_check_seq_num(vd, dring_msg->seq_num) != 0)
1400 		return (EBADMSG);
1401 
1402 	if (dring_msg->dring_ident != vd->dring_ident) {
1403 		PRN("Expected dring ident %lu; received ident %lu",
1404 		    vd->dring_ident, dring_msg->dring_ident);
1405 		return (EBADMSG);
1406 	}
1407 
1408 	if (dring_msg->start_idx >= vd->dring_len) {
1409 		PRN("\"start_idx\" = %u; must be less than %u",
1410 		    dring_msg->start_idx, vd->dring_len);
1411 		return (EBADMSG);
1412 	}
1413 
1414 	if ((dring_msg->end_idx < 0) ||
1415 	    (dring_msg->end_idx >= vd->dring_len)) {
1416 		PRN("\"end_idx\" = %u; must be >= 0 and less than %u",
1417 		    dring_msg->end_idx, vd->dring_len);
1418 		return (EBADMSG);
1419 	}
1420 
1421 	/* Valid message; process range of updated dring elements */
1422 	PR1("Processing descriptor range, start = %u, end = %u",
1423 	    dring_msg->start_idx, dring_msg->end_idx);
1424 	return (vd_process_element_range(vd, dring_msg->start_idx,
1425 		dring_msg->end_idx, msg, msglen, msgsize));
1426 }
1427 
1428 static int
1429 recv_msg(ldc_handle_t ldc_handle, void *msg, size_t *nbytes)
1430 {
1431 	int	retry, status;
1432 	size_t	size = *nbytes;
1433 
1434 
1435 	for (retry = 0, status = ETIMEDOUT;
1436 	    retry < vds_ldc_retries && status == ETIMEDOUT;
1437 	    retry++) {
1438 		PR1("ldc_read() attempt %d", (retry + 1));
1439 		*nbytes = size;
1440 		status = ldc_read(ldc_handle, msg, nbytes);
1441 	}
1442 
1443 	if (status != 0) {
1444 		PRN("ldc_read() returned errno %d", status);
1445 		return (status);
1446 	} else if (*nbytes == 0) {
1447 		PR1("ldc_read() returned 0 and no message read");
1448 		return (ENOMSG);
1449 	}
1450 
1451 	PR1("RCVD %lu-byte message", *nbytes);
1452 	return (0);
1453 }
1454 
1455 static int
1456 vd_do_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize)
1457 {
1458 	int		status;
1459 
1460 
1461 	PR1("Processing (%x/%x/%x) message", msg->tag.vio_msgtype,
1462 	    msg->tag.vio_subtype, msg->tag.vio_subtype_env);
1463 
1464 	/*
1465 	 * Validate session ID up front, since it applies to all messages
1466 	 * once set
1467 	 */
1468 	if ((msg->tag.vio_sid != vd->sid) && (vd->initialized & VD_SID)) {
1469 		PRN("Expected SID %u, received %u", vd->sid,
1470 		    msg->tag.vio_sid);
1471 		return (EBADMSG);
1472 	}
1473 
1474 
1475 	/*
1476 	 * Process the received message based on connection state
1477 	 */
1478 	switch (vd->state) {
1479 	case VD_STATE_INIT:	/* expect version message */
1480 		if ((status = vd_process_ver_msg(vd, msg, msglen)) != 0)
1481 			return (status);
1482 
1483 		/* Version negotiated, move to that state */
1484 		vd->state = VD_STATE_VER;
1485 		return (0);
1486 
1487 	case VD_STATE_VER:	/* expect attribute message */
1488 		if ((status = vd_process_attr_msg(vd, msg, msglen)) != 0)
1489 			return (status);
1490 
1491 		/* Attributes exchanged, move to that state */
1492 		vd->state = VD_STATE_ATTR;
1493 		return (0);
1494 
1495 	case VD_STATE_ATTR:
1496 		switch (vd->xfer_mode) {
1497 		case VIO_DESC_MODE:	/* expect RDX message */
1498 			if ((status = process_rdx_msg(msg, msglen)) != 0)
1499 				return (status);
1500 
1501 			/* Ready to receive in-band descriptors */
1502 			vd->state = VD_STATE_DATA;
1503 			return (0);
1504 
1505 		case VIO_DRING_MODE:	/* expect register-dring message */
1506 			if ((status =
1507 				vd_process_dring_reg_msg(vd, msg, msglen)) != 0)
1508 				return (status);
1509 
1510 			/* One dring negotiated, move to that state */
1511 			vd->state = VD_STATE_DRING;
1512 			return (0);
1513 
1514 		default:
1515 			ASSERT("Unsupported transfer mode");
1516 			PRN("Unsupported transfer mode");
1517 			return (ENOTSUP);
1518 		}
1519 
1520 	case VD_STATE_DRING:	/* expect RDX, register-dring, or unreg-dring */
1521 		if ((status = process_rdx_msg(msg, msglen)) == 0) {
1522 			/* Ready to receive data */
1523 			vd->state = VD_STATE_DATA;
1524 			return (0);
1525 		} else if (status != ENOMSG) {
1526 			return (status);
1527 		}
1528 
1529 
1530 		/*
1531 		 * If another register-dring message is received, stay in
1532 		 * dring state in case the client sends RDX; although the
1533 		 * protocol allows multiple drings, this server does not
1534 		 * support using more than one
1535 		 */
1536 		if ((status =
1537 			vd_process_dring_reg_msg(vd, msg, msglen)) != ENOMSG)
1538 			return (status);
1539 
1540 		/*
1541 		 * Acknowledge an unregister-dring message, but reset the
1542 		 * connection anyway:  Although the protocol allows
1543 		 * unregistering drings, this server cannot serve a vdisk
1544 		 * without its only dring
1545 		 */
1546 		status = vd_process_dring_unreg_msg(vd, msg, msglen);
1547 		return ((status == 0) ? ENOTSUP : status);
1548 
1549 	case VD_STATE_DATA:
1550 		switch (vd->xfer_mode) {
1551 		case VIO_DESC_MODE:	/* expect in-band-descriptor message */
1552 			return (vd_process_desc_msg(vd, msg, msglen, msgsize));
1553 
1554 		case VIO_DRING_MODE:	/* expect dring-data or unreg-dring */
1555 			/*
1556 			 * Typically expect dring-data messages, so handle
1557 			 * them first
1558 			 */
1559 			if ((status = vd_process_dring_msg(vd, msg,
1560 				    msglen, msgsize)) != ENOMSG)
1561 				return (status);
1562 
1563 			/*
1564 			 * Acknowledge an unregister-dring message, but reset
1565 			 * the connection anyway:  Although the protocol
1566 			 * allows unregistering drings, this server cannot
1567 			 * serve a vdisk without its only dring
1568 			 */
1569 			status = vd_process_dring_unreg_msg(vd, msg, msglen);
1570 			return ((status == 0) ? ENOTSUP : status);
1571 
1572 		default:
1573 			ASSERT("Unsupported transfer mode");
1574 			PRN("Unsupported transfer mode");
1575 			return (ENOTSUP);
1576 		}
1577 
1578 	default:
1579 		ASSERT("Invalid client connection state");
1580 		PRN("Invalid client connection state");
1581 		return (ENOTSUP);
1582 	}
1583 }
1584 
1585 static int
1586 vd_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize)
1587 {
1588 	int		status;
1589 	boolean_t	reset_ldc = B_FALSE;
1590 
1591 
1592 	/*
1593 	 * Check that the message is at least big enough for a "tag", so that
1594 	 * message processing can proceed based on tag-specified message type
1595 	 */
1596 	if (msglen < sizeof (vio_msg_tag_t)) {
1597 		PRN("Received short (%lu-byte) message", msglen);
1598 		/* Can't "nack" short message, so drop the big hammer */
1599 		vd_need_reset(vd, B_TRUE);
1600 		return (EBADMSG);
1601 	}
1602 
1603 	/*
1604 	 * Process the message
1605 	 */
1606 	switch (status = vd_do_process_msg(vd, msg, msglen, msgsize)) {
1607 	case 0:
1608 		/* "ack" valid, successfully-processed messages */
1609 		msg->tag.vio_subtype = VIO_SUBTYPE_ACK;
1610 		break;
1611 
1612 	case EINPROGRESS:
1613 		/* The completion handler will "ack" or "nack" the message */
1614 		return (EINPROGRESS);
1615 	case ENOMSG:
1616 		PRN("Received unexpected message");
1617 		_NOTE(FALLTHROUGH);
1618 	case EBADMSG:
1619 	case ENOTSUP:
1620 		/* "nack" invalid messages */
1621 		msg->tag.vio_subtype = VIO_SUBTYPE_NACK;
1622 		break;
1623 
1624 	default:
1625 		/* "nack" failed messages */
1626 		msg->tag.vio_subtype = VIO_SUBTYPE_NACK;
1627 		/* An LDC error probably occurred, so try resetting it */
1628 		reset_ldc = B_TRUE;
1629 		break;
1630 	}
1631 
1632 	/* Send the "ack" or "nack" to the client */
1633 	PR1("Sending %s",
1634 	    (msg->tag.vio_subtype == VIO_SUBTYPE_ACK) ? "ACK" : "NACK");
1635 	if (send_msg(vd->ldc_handle, msg, msglen) != 0)
1636 		reset_ldc = B_TRUE;
1637 
1638 	/* Arrange to reset the connection for nack'ed or failed messages */
1639 	if ((status != 0) || reset_ldc)
1640 		vd_need_reset(vd, reset_ldc);
1641 
1642 	return (status);
1643 }
1644 
1645 static boolean_t
1646 vd_enabled(vd_t *vd)
1647 {
1648 	boolean_t	enabled;
1649 
1650 
1651 	mutex_enter(&vd->lock);
1652 	enabled = vd->enabled;
1653 	mutex_exit(&vd->lock);
1654 	return (enabled);
1655 }
1656 
1657 static void
1658 vd_recv_msg(void *arg)
1659 {
1660 	vd_t	*vd = (vd_t *)arg;
1661 	int	status = 0;
1662 
1663 
1664 	ASSERT(vd != NULL);
1665 	PR2("New task to receive incoming message(s)");
1666 	while (vd_enabled(vd) && status == 0) {
1667 		size_t		msglen, msgsize;
1668 		vio_msg_t	*vio_msg;
1669 
1670 
1671 		/*
1672 		 * Receive and process a message
1673 		 */
1674 		vd_reset_if_needed(vd);	/* can change vd->max_msglen */
1675 		msgsize = vd->max_msglen;	/* stable copy for alloc/free */
1676 		msglen	= msgsize;	/* actual length after recv_msg() */
1677 		vio_msg = kmem_alloc(msgsize, KM_SLEEP);
1678 		if ((status = recv_msg(vd->ldc_handle, vio_msg, &msglen)) ==
1679 		    0) {
1680 			if (vd_process_msg(vd, vio_msg, msglen, msgsize) ==
1681 			    EINPROGRESS)
1682 				continue;	/* handler will free msg */
1683 		} else if (status != ENOMSG) {
1684 			/* Probably an LDC failure; arrange to reset it */
1685 			vd_need_reset(vd, B_TRUE);
1686 		}
1687 		kmem_free(vio_msg, msgsize);
1688 	}
1689 	PR2("Task finished");
1690 }
1691 
1692 static uint_t
1693 vd_handle_ldc_events(uint64_t event, caddr_t arg)
1694 {
1695 	vd_t	*vd = (vd_t *)(void *)arg;
1696 
1697 
1698 	ASSERT(vd != NULL);
1699 
1700 	if (!vd_enabled(vd))
1701 		return (LDC_SUCCESS);
1702 
1703 	if (event & LDC_EVT_RESET) {
1704 		PR0("LDC channel was reset");
1705 		return (LDC_SUCCESS);
1706 	}
1707 
1708 	if (event & LDC_EVT_UP) {
1709 		PR0("LDC channel came up:  Resetting client connection state");
1710 		vd_need_reset(vd, B_FALSE);
1711 	}
1712 
1713 	if (event & LDC_EVT_READ) {
1714 		int	status;
1715 
1716 		PR1("New data available");
1717 		/* Queue a task to receive the new data */
1718 		status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd,
1719 		    DDI_SLEEP);
1720 		/* ddi_taskq_dispatch(9f) guarantees success with DDI_SLEEP */
1721 		ASSERT(status == DDI_SUCCESS);
1722 	}
1723 
1724 	return (LDC_SUCCESS);
1725 }
1726 
1727 static uint_t
1728 vds_check_for_vd(mod_hash_key_t key, mod_hash_val_t *val, void *arg)
1729 {
1730 	_NOTE(ARGUNUSED(key, val))
1731 	(*((uint_t *)arg))++;
1732 	return (MH_WALK_TERMINATE);
1733 }
1734 
1735 
1736 static int
1737 vds_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1738 {
1739 	uint_t	vd_present = 0;
1740 	minor_t	instance;
1741 	vds_t	*vds;
1742 
1743 
1744 	switch (cmd) {
1745 	case DDI_DETACH:
1746 		/* the real work happens below */
1747 		break;
1748 	case DDI_SUSPEND:
1749 		PR0("No action required for DDI_SUSPEND");
1750 		return (DDI_SUCCESS);
1751 	default:
1752 		PRN("Unrecognized \"cmd\"");
1753 		return (DDI_FAILURE);
1754 	}
1755 
1756 	ASSERT(cmd == DDI_DETACH);
1757 	instance = ddi_get_instance(dip);
1758 	if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) {
1759 		PRN("Could not get state for instance %u", instance);
1760 		ddi_soft_state_free(vds_state, instance);
1761 		return (DDI_FAILURE);
1762 	}
1763 
1764 	/* Do no detach when serving any vdisks */
1765 	mod_hash_walk(vds->vd_table, vds_check_for_vd, &vd_present);
1766 	if (vd_present) {
1767 		PR0("Not detaching because serving vdisks");
1768 		return (DDI_FAILURE);
1769 	}
1770 
1771 	PR0("Detaching");
1772 	if (vds->initialized & VDS_MDEG)
1773 		(void) mdeg_unregister(vds->mdeg);
1774 	if (vds->initialized & VDS_LDI)
1775 		(void) ldi_ident_release(vds->ldi_ident);
1776 	mod_hash_destroy_hash(vds->vd_table);
1777 	ddi_soft_state_free(vds_state, instance);
1778 	return (DDI_SUCCESS);
1779 }
1780 
1781 static boolean_t
1782 is_pseudo_device(dev_info_t *dip)
1783 {
1784 	dev_info_t	*parent, *root = ddi_root_node();
1785 
1786 
1787 	for (parent = ddi_get_parent(dip); (parent != NULL) && (parent != root);
1788 	    parent = ddi_get_parent(parent)) {
1789 		if (strcmp(ddi_get_name(parent), DEVI_PSEUDO_NEXNAME) == 0)
1790 			return (B_TRUE);
1791 	}
1792 
1793 	return (B_FALSE);
1794 }
1795 
1796 static int
1797 vd_setup_full_disk(vd_t *vd)
1798 {
1799 	int		rval, status;
1800 	major_t		major = getmajor(vd->dev[0]);
1801 	minor_t		minor = getminor(vd->dev[0]) - VD_ENTIRE_DISK_SLICE;
1802 	struct vtoc	vtoc;
1803 
1804 
1805 	/* Get the VTOC for slice sizes */
1806 	if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCGVTOC, (intptr_t)&vtoc,
1807 		    (vd_open_flags | FKIOCTL), kcred, &rval)) != 0) {
1808 		PRN("ldi_ioctl(DKIOCGVTOC) returned errno %d", status);
1809 		return (status);
1810 	}
1811 
1812 	/* Set full-disk parameters */
1813 	vd->vdisk_type	= VD_DISK_TYPE_DISK;
1814 	vd->nslices	= (sizeof (vd->dev))/(sizeof (vd->dev[0]));
1815 
1816 	/* Move dev number and LDI handle to entire-disk-slice array elements */
1817 	vd->dev[VD_ENTIRE_DISK_SLICE]		= vd->dev[0];
1818 	vd->dev[0]				= 0;
1819 	vd->ldi_handle[VD_ENTIRE_DISK_SLICE]	= vd->ldi_handle[0];
1820 	vd->ldi_handle[0]			= NULL;
1821 
1822 	/* Initialize device numbers for remaining slices and open them */
1823 	for (int slice = 0; slice < vd->nslices; slice++) {
1824 		/*
1825 		 * Skip the entire-disk slice, as it's already open and its
1826 		 * device known
1827 		 */
1828 		if (slice == VD_ENTIRE_DISK_SLICE)
1829 			continue;
1830 		ASSERT(vd->dev[slice] == 0);
1831 		ASSERT(vd->ldi_handle[slice] == NULL);
1832 
1833 		/*
1834 		 * Construct the device number for the current slice
1835 		 */
1836 		vd->dev[slice] = makedevice(major, (minor + slice));
1837 
1838 		/*
1839 		 * At least some underlying drivers refuse to open
1840 		 * devices for (currently) zero-length slices, so skip
1841 		 * them for now
1842 		 */
1843 		if (vtoc.v_part[slice].p_size == 0) {
1844 			PR0("Skipping zero-length slice %u", slice);
1845 			continue;
1846 		}
1847 
1848 		/*
1849 		 * Open all non-empty slices of the disk to serve them to the
1850 		 * client.  Slices are opened exclusively to prevent other
1851 		 * threads or processes in the service domain from performing
1852 		 * I/O to slices being accessed by a client.  Failure to open
1853 		 * a slice results in vds not serving this disk, as the client
1854 		 * could attempt (and should be able) to access any non-empty
1855 		 * slice immediately.  Any slices successfully opened before a
1856 		 * failure will get closed by vds_destroy_vd() as a result of
1857 		 * the error returned by this function.
1858 		 */
1859 		PR0("Opening device major %u, minor %u = slice %u",
1860 		    major, minor, slice);
1861 		if ((status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK,
1862 			    vd_open_flags, kcred, &vd->ldi_handle[slice],
1863 			    vd->vds->ldi_ident)) != 0) {
1864 			PRN("ldi_open_by_dev() returned errno %d "
1865 			    "for slice %u", status, slice);
1866 			/* vds_destroy_vd() will close any open slices */
1867 			return (status);
1868 		}
1869 	}
1870 
1871 	return (0);
1872 }
1873 
1874 static int
1875 vd_setup_vd(char *device_path, vd_t *vd)
1876 {
1877 	int		rval, status;
1878 	dev_info_t	*dip;
1879 	struct dk_cinfo	dk_cinfo;
1880 
1881 
1882 	if ((status = ldi_open_by_name(device_path, vd_open_flags, kcred,
1883 		    &vd->ldi_handle[0], vd->vds->ldi_ident)) != 0) {
1884 		PRN("ldi_open_by_name(%s) = errno %d", device_path, status);
1885 		return (status);
1886 	}
1887 
1888 	/* Get device number and size of backing device */
1889 	if ((status = ldi_get_dev(vd->ldi_handle[0], &vd->dev[0])) != 0) {
1890 		PRN("ldi_get_dev() returned errno %d for %s",
1891 		    status, device_path);
1892 		return (status);
1893 	}
1894 	if (ldi_get_size(vd->ldi_handle[0], &vd->vdisk_size) != DDI_SUCCESS) {
1895 		PRN("ldi_get_size() failed for %s", device_path);
1896 		return (EIO);
1897 	}
1898 	vd->vdisk_size = lbtodb(vd->vdisk_size);	/* convert to blocks */
1899 
1900 	/* Verify backing device supports dk_cinfo, dk_geom, and vtoc */
1901 	if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCINFO,
1902 		    (intptr_t)&dk_cinfo, (vd_open_flags | FKIOCTL), kcred,
1903 		    &rval)) != 0) {
1904 		PRN("ldi_ioctl(DKIOCINFO) returned errno %d for %s",
1905 		    status, device_path);
1906 		return (status);
1907 	}
1908 	if (dk_cinfo.dki_partition >= V_NUMPAR) {
1909 		PRN("slice %u >= maximum slice %u for %s",
1910 		    dk_cinfo.dki_partition, V_NUMPAR, device_path);
1911 		return (EIO);
1912 	}
1913 	if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCGGEOM,
1914 		    (intptr_t)&vd->dk_geom, (vd_open_flags | FKIOCTL), kcred,
1915 		    &rval)) != 0) {
1916 		PRN("ldi_ioctl(DKIOCGEOM) returned errno %d for %s",
1917 		    status, device_path);
1918 		return (status);
1919 	}
1920 	if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCGVTOC,
1921 		    (intptr_t)&vd->vtoc, (vd_open_flags | FKIOCTL), kcred,
1922 		    &rval)) != 0) {
1923 		PRN("ldi_ioctl(DKIOCGVTOC) returned errno %d for %s",
1924 		    status, device_path);
1925 		return (status);
1926 	}
1927 
1928 	/* Store the device's max transfer size for return to the client */
1929 	vd->max_xfer_sz = dk_cinfo.dki_maxtransfer;
1930 
1931 
1932 	/* Determine if backing device is a pseudo device */
1933 	if ((dip = ddi_hold_devi_by_instance(getmajor(vd->dev[0]),
1934 		    dev_to_instance(vd->dev[0]), 0))  == NULL) {
1935 		PRN("%s is no longer accessible", device_path);
1936 		return (EIO);
1937 	}
1938 	vd->pseudo = is_pseudo_device(dip);
1939 	ddi_release_devi(dip);
1940 	if (vd->pseudo) {
1941 		vd->vdisk_type	= VD_DISK_TYPE_SLICE;
1942 		vd->nslices	= 1;
1943 		return (0);	/* ...and we're done */
1944 	}
1945 
1946 
1947 	/* If slice is entire-disk slice, initialize for full disk */
1948 	if (dk_cinfo.dki_partition == VD_ENTIRE_DISK_SLICE)
1949 		return (vd_setup_full_disk(vd));
1950 
1951 
1952 	/* Otherwise, we have a non-entire slice of a device */
1953 	vd->vdisk_type	= VD_DISK_TYPE_SLICE;
1954 	vd->nslices	= 1;
1955 
1956 
1957 	/* Initialize dk_geom structure for single-slice device */
1958 	if (vd->dk_geom.dkg_nsect == 0) {
1959 		PRN("%s geometry claims 0 sectors per track", device_path);
1960 		return (EIO);
1961 	}
1962 	if (vd->dk_geom.dkg_nhead == 0) {
1963 		PRN("%s geometry claims 0 heads", device_path);
1964 		return (EIO);
1965 	}
1966 	vd->dk_geom.dkg_ncyl =
1967 	    vd->vdisk_size/vd->dk_geom.dkg_nsect/vd->dk_geom.dkg_nhead;
1968 	vd->dk_geom.dkg_acyl = 0;
1969 	vd->dk_geom.dkg_pcyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl;
1970 
1971 
1972 	/* Initialize vtoc structure for single-slice device */
1973 	bcopy(VD_VOLUME_NAME, vd->vtoc.v_volume,
1974 	    MIN(sizeof (VD_VOLUME_NAME), sizeof (vd->vtoc.v_volume)));
1975 	bzero(vd->vtoc.v_part, sizeof (vd->vtoc.v_part));
1976 	vd->vtoc.v_nparts = 1;
1977 	vd->vtoc.v_part[0].p_tag = V_UNASSIGNED;
1978 	vd->vtoc.v_part[0].p_flag = 0;
1979 	vd->vtoc.v_part[0].p_start = 0;
1980 	vd->vtoc.v_part[0].p_size = vd->vdisk_size;
1981 	bcopy(VD_ASCIILABEL, vd->vtoc.v_asciilabel,
1982 	    MIN(sizeof (VD_ASCIILABEL), sizeof (vd->vtoc.v_asciilabel)));
1983 
1984 
1985 	return (0);
1986 }
1987 
1988 static int
1989 vds_do_init_vd(vds_t *vds, uint64_t id, char *device_path, uint64_t ldc_id,
1990     vd_t **vdp)
1991 {
1992 	char			tq_name[TASKQ_NAMELEN];
1993 	int			status;
1994 	ddi_iblock_cookie_t	iblock = NULL;
1995 	ldc_attr_t		ldc_attr;
1996 	vd_t			*vd;
1997 
1998 
1999 	ASSERT(vds != NULL);
2000 	ASSERT(device_path != NULL);
2001 	ASSERT(vdp != NULL);
2002 	PR0("Adding vdisk for %s", device_path);
2003 
2004 	if ((vd = kmem_zalloc(sizeof (*vd), KM_NOSLEEP)) == NULL) {
2005 		PRN("No memory for virtual disk");
2006 		return (EAGAIN);
2007 	}
2008 	*vdp = vd;	/* assign here so vds_destroy_vd() can cleanup later */
2009 	vd->vds = vds;
2010 
2011 
2012 	/* Open vdisk and initialize parameters */
2013 	if ((status = vd_setup_vd(device_path, vd)) != 0)
2014 		return (status);
2015 	ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR);
2016 	PR0("vdisk_type = %s, pseudo = %s, nslices = %u",
2017 	    ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"),
2018 	    (vd->pseudo ? "yes" : "no"), vd->nslices);
2019 
2020 
2021 	/* Initialize locking */
2022 	if (ddi_get_soft_iblock_cookie(vds->dip, DDI_SOFTINT_MED,
2023 		&iblock) != DDI_SUCCESS) {
2024 		PRN("Could not get iblock cookie.");
2025 		return (EIO);
2026 	}
2027 
2028 	mutex_init(&vd->lock, NULL, MUTEX_DRIVER, iblock);
2029 	vd->initialized |= VD_LOCKING;
2030 
2031 
2032 	/* Create start and completion task queues for the vdisk */
2033 	(void) snprintf(tq_name, sizeof (tq_name), "vd_startq%lu", id);
2034 	PR1("tq_name = %s", tq_name);
2035 	if ((vd->startq = ddi_taskq_create(vds->dip, tq_name, 1,
2036 		    TASKQ_DEFAULTPRI, 0)) == NULL) {
2037 		PRN("Could not create task queue");
2038 		return (EIO);
2039 	}
2040 	(void) snprintf(tq_name, sizeof (tq_name), "vd_completionq%lu", id);
2041 	PR1("tq_name = %s", tq_name);
2042 	if ((vd->completionq = ddi_taskq_create(vds->dip, tq_name, 1,
2043 		    TASKQ_DEFAULTPRI, 0)) == NULL) {
2044 		PRN("Could not create task queue");
2045 		return (EIO);
2046 	}
2047 	vd->enabled = 1;	/* before callback can dispatch to startq */
2048 
2049 
2050 	/* Bring up LDC */
2051 	ldc_attr.devclass	= LDC_DEV_BLK_SVC;
2052 	ldc_attr.instance	= ddi_get_instance(vds->dip);
2053 	ldc_attr.mode		= LDC_MODE_UNRELIABLE;
2054 	ldc_attr.mtu		= VD_LDC_MTU;
2055 	if ((status = ldc_init(ldc_id, &ldc_attr, &vd->ldc_handle)) != 0) {
2056 		PRN("ldc_init(%lu) = errno %d", ldc_id, status);
2057 		return (status);
2058 	}
2059 	vd->initialized |= VD_LDC;
2060 
2061 	if ((status = ldc_reg_callback(vd->ldc_handle, vd_handle_ldc_events,
2062 		(caddr_t)vd)) != 0) {
2063 		PRN("ldc_reg_callback() returned errno %d", status);
2064 		return (status);
2065 	}
2066 
2067 	if ((status = ldc_open(vd->ldc_handle)) != 0) {
2068 		PRN("ldc_open() returned errno %d", status);
2069 		return (status);
2070 	}
2071 
2072 
2073 	/* Add the successfully-initialized vdisk to the server's table */
2074 	if (mod_hash_insert(vds->vd_table, (mod_hash_key_t)id, vd) != 0) {
2075 		PRN("Error adding vdisk ID %lu to table", id);
2076 		return (EIO);
2077 	}
2078 
2079 	return (0);
2080 }
2081 
2082 /*
2083  * Destroy the state associated with a virtual disk
2084  */
2085 static void
2086 vds_destroy_vd(void *arg)
2087 {
2088 	vd_t	*vd = (vd_t *)arg;
2089 
2090 
2091 	if (vd == NULL)
2092 		return;
2093 
2094 	PR0("Destroying vdisk state");
2095 
2096 	/* Disable queuing requests for the vdisk */
2097 	if (vd->initialized & VD_LOCKING) {
2098 		mutex_enter(&vd->lock);
2099 		vd->enabled = 0;
2100 		mutex_exit(&vd->lock);
2101 	}
2102 
2103 	/* Drain and destroy start queue (*before* destroying completionq) */
2104 	if (vd->startq != NULL)
2105 		ddi_taskq_destroy(vd->startq);	/* waits for queued tasks */
2106 
2107 	/* Drain and destroy completion queue (*before* shutting down LDC) */
2108 	if (vd->completionq != NULL)
2109 		ddi_taskq_destroy(vd->completionq);	/* waits for tasks */
2110 
2111 	if (vd->dring_task != NULL) {
2112 		ASSERT(vd->dring_len != 0);
2113 		kmem_free(vd->dring_task,
2114 		    (sizeof (*vd->dring_task)) * vd->dring_len);
2115 	}
2116 
2117 	/* Shut down LDC */
2118 	if (vd->initialized & VD_LDC) {
2119 		if (vd->initialized & VD_DRING)
2120 			(void) ldc_mem_dring_unmap(vd->dring_handle);
2121 		(void) ldc_unreg_callback(vd->ldc_handle);
2122 		(void) ldc_close(vd->ldc_handle);
2123 		(void) ldc_fini(vd->ldc_handle);
2124 	}
2125 
2126 	/* Close any open backing-device slices */
2127 	for (uint_t slice = 0; slice < vd->nslices; slice++) {
2128 		if (vd->ldi_handle[slice] != NULL) {
2129 			PR0("Closing slice %u", slice);
2130 			(void) ldi_close(vd->ldi_handle[slice],
2131 			    vd_open_flags, kcred);
2132 		}
2133 	}
2134 
2135 	/* Free lock */
2136 	if (vd->initialized & VD_LOCKING)
2137 		mutex_destroy(&vd->lock);
2138 
2139 	/* Finally, free the vdisk structure itself */
2140 	kmem_free(vd, sizeof (*vd));
2141 }
2142 
2143 static int
2144 vds_init_vd(vds_t *vds, uint64_t id, char *device_path, uint64_t ldc_id)
2145 {
2146 	int	status;
2147 	vd_t	*vd = NULL;
2148 
2149 
2150 #ifdef lint
2151 	(void) vd;
2152 #endif	/* lint */
2153 
2154 	if ((status = vds_do_init_vd(vds, id, device_path, ldc_id, &vd)) != 0)
2155 		vds_destroy_vd(vd);
2156 
2157 	return (status);
2158 }
2159 
2160 static int
2161 vds_do_get_ldc_id(md_t *md, mde_cookie_t vd_node, mde_cookie_t *channel,
2162     uint64_t *ldc_id)
2163 {
2164 	int	num_channels;
2165 
2166 
2167 	/* Look for channel endpoint child(ren) of the vdisk MD node */
2168 	if ((num_channels = md_scan_dag(md, vd_node,
2169 		    md_find_name(md, VD_CHANNEL_ENDPOINT),
2170 		    md_find_name(md, "fwd"), channel)) <= 0) {
2171 		PRN("No \"%s\" found for virtual disk", VD_CHANNEL_ENDPOINT);
2172 		return (-1);
2173 	}
2174 
2175 	/* Get the "id" value for the first channel endpoint node */
2176 	if (md_get_prop_val(md, channel[0], VD_ID_PROP, ldc_id) != 0) {
2177 		PRN("No \"%s\" property found for \"%s\" of vdisk",
2178 		    VD_ID_PROP, VD_CHANNEL_ENDPOINT);
2179 		return (-1);
2180 	}
2181 
2182 	if (num_channels > 1) {
2183 		PRN("Using ID of first of multiple channels for this vdisk");
2184 	}
2185 
2186 	return (0);
2187 }
2188 
2189 static int
2190 vds_get_ldc_id(md_t *md, mde_cookie_t vd_node, uint64_t *ldc_id)
2191 {
2192 	int		num_nodes, status;
2193 	size_t		size;
2194 	mde_cookie_t	*channel;
2195 
2196 
2197 	if ((num_nodes = md_node_count(md)) <= 0) {
2198 		PRN("Invalid node count in Machine Description subtree");
2199 		return (-1);
2200 	}
2201 	size = num_nodes*(sizeof (*channel));
2202 	channel = kmem_zalloc(size, KM_SLEEP);
2203 	status = vds_do_get_ldc_id(md, vd_node, channel, ldc_id);
2204 	kmem_free(channel, size);
2205 
2206 	return (status);
2207 }
2208 
2209 static void
2210 vds_add_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node)
2211 {
2212 	char		*device_path = NULL;
2213 	uint64_t	id = 0, ldc_id = 0;
2214 
2215 
2216 	if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) {
2217 		PRN("Error getting vdisk \"%s\"", VD_ID_PROP);
2218 		return;
2219 	}
2220 	PR0("Adding vdisk ID %lu", id);
2221 	if (md_get_prop_str(md, vd_node, VD_BLOCK_DEVICE_PROP,
2222 		&device_path) != 0) {
2223 		PRN("Error getting vdisk \"%s\"", VD_BLOCK_DEVICE_PROP);
2224 		return;
2225 	}
2226 
2227 	if (vds_get_ldc_id(md, vd_node, &ldc_id) != 0) {
2228 		PRN("Error getting LDC ID for vdisk %lu", id);
2229 		return;
2230 	}
2231 
2232 	if (vds_init_vd(vds, id, device_path, ldc_id) != 0) {
2233 		PRN("Failed to add vdisk ID %lu", id);
2234 		return;
2235 	}
2236 }
2237 
2238 static void
2239 vds_remove_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node)
2240 {
2241 	uint64_t	id = 0;
2242 
2243 
2244 	if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) {
2245 		PRN("Unable to get \"%s\" property from vdisk's MD node",
2246 		    VD_ID_PROP);
2247 		return;
2248 	}
2249 	PR0("Removing vdisk ID %lu", id);
2250 	if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)id) != 0)
2251 		PRN("No vdisk entry found for vdisk ID %lu", id);
2252 }
2253 
2254 static void
2255 vds_change_vd(vds_t *vds, md_t *prev_md, mde_cookie_t prev_vd_node,
2256     md_t *curr_md, mde_cookie_t curr_vd_node)
2257 {
2258 	char		*curr_dev, *prev_dev;
2259 	uint64_t	curr_id = 0, curr_ldc_id = 0;
2260 	uint64_t	prev_id = 0, prev_ldc_id = 0;
2261 	size_t		len;
2262 
2263 
2264 	/* Validate that vdisk ID has not changed */
2265 	if (md_get_prop_val(prev_md, prev_vd_node, VD_ID_PROP, &prev_id) != 0) {
2266 		PRN("Error getting previous vdisk \"%s\" property",
2267 		    VD_ID_PROP);
2268 		return;
2269 	}
2270 	if (md_get_prop_val(curr_md, curr_vd_node, VD_ID_PROP, &curr_id) != 0) {
2271 		PRN("Error getting current vdisk \"%s\" property", VD_ID_PROP);
2272 		return;
2273 	}
2274 	if (curr_id != prev_id) {
2275 		PRN("Not changing vdisk:  ID changed from %lu to %lu",
2276 		    prev_id, curr_id);
2277 		return;
2278 	}
2279 
2280 	/* Validate that LDC ID has not changed */
2281 	if (vds_get_ldc_id(prev_md, prev_vd_node, &prev_ldc_id) != 0) {
2282 		PRN("Error getting LDC ID for vdisk %lu", prev_id);
2283 		return;
2284 	}
2285 
2286 	if (vds_get_ldc_id(curr_md, curr_vd_node, &curr_ldc_id) != 0) {
2287 		PRN("Error getting LDC ID for vdisk %lu", curr_id);
2288 		return;
2289 	}
2290 	if (curr_ldc_id != prev_ldc_id) {
2291 		_NOTE(NOTREACHED);	/* lint is confused */
2292 		PRN("Not changing vdisk:  "
2293 		    "LDC ID changed from %lu to %lu", prev_ldc_id, curr_ldc_id);
2294 		return;
2295 	}
2296 
2297 	/* Determine whether device path has changed */
2298 	if (md_get_prop_str(prev_md, prev_vd_node, VD_BLOCK_DEVICE_PROP,
2299 		&prev_dev) != 0) {
2300 		PRN("Error getting previous vdisk \"%s\"",
2301 		    VD_BLOCK_DEVICE_PROP);
2302 		return;
2303 	}
2304 	if (md_get_prop_str(curr_md, curr_vd_node, VD_BLOCK_DEVICE_PROP,
2305 		&curr_dev) != 0) {
2306 		PRN("Error getting current vdisk \"%s\"", VD_BLOCK_DEVICE_PROP);
2307 		return;
2308 	}
2309 	if (((len = strlen(curr_dev)) == strlen(prev_dev)) &&
2310 	    (strncmp(curr_dev, prev_dev, len) == 0))
2311 		return;	/* no relevant (supported) change */
2312 
2313 	PR0("Changing vdisk ID %lu", prev_id);
2314 	/* Remove old state, which will close vdisk and reset */
2315 	if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)prev_id) != 0)
2316 		PRN("No entry found for vdisk ID %lu", prev_id);
2317 	/* Re-initialize vdisk with new state */
2318 	if (vds_init_vd(vds, curr_id, curr_dev, curr_ldc_id) != 0) {
2319 		PRN("Failed to change vdisk ID %lu", curr_id);
2320 		return;
2321 	}
2322 }
2323 
2324 static int
2325 vds_process_md(void *arg, mdeg_result_t *md)
2326 {
2327 	int	i;
2328 	vds_t	*vds = arg;
2329 
2330 
2331 	if (md == NULL)
2332 		return (MDEG_FAILURE);
2333 	ASSERT(vds != NULL);
2334 
2335 	for (i = 0; i < md->removed.nelem; i++)
2336 		vds_remove_vd(vds, md->removed.mdp, md->removed.mdep[i]);
2337 	for (i = 0; i < md->match_curr.nelem; i++)
2338 		vds_change_vd(vds, md->match_prev.mdp, md->match_prev.mdep[i],
2339 		    md->match_curr.mdp, md->match_curr.mdep[i]);
2340 	for (i = 0; i < md->added.nelem; i++)
2341 		vds_add_vd(vds, md->added.mdp, md->added.mdep[i]);
2342 
2343 	return (MDEG_SUCCESS);
2344 }
2345 
2346 static int
2347 vds_do_attach(dev_info_t *dip)
2348 {
2349 	static char	reg_prop[] = "reg";	/* devinfo ID prop */
2350 
2351 	/* MDEG specification for a (particular) vds node */
2352 	static mdeg_prop_spec_t	vds_prop_spec[] = {
2353 		{MDET_PROP_STR, "name", {VDS_NAME}},
2354 		{MDET_PROP_VAL, "cfg-handle", {0}},
2355 		{MDET_LIST_END, NULL, {0}}};
2356 	static mdeg_node_spec_t	vds_spec = {"virtual-device", vds_prop_spec};
2357 
2358 	/* MDEG specification for matching a vd node */
2359 	static md_prop_match_t	vd_prop_spec[] = {
2360 		{MDET_PROP_VAL, VD_ID_PROP},
2361 		{MDET_LIST_END, NULL}};
2362 	static mdeg_node_match_t vd_spec = {"virtual-device-port",
2363 					    vd_prop_spec};
2364 
2365 	int			status;
2366 	uint64_t		cfg_handle;
2367 	minor_t			instance = ddi_get_instance(dip);
2368 	vds_t			*vds;
2369 
2370 
2371 	/*
2372 	 * The "cfg-handle" property of a vds node in an MD contains the MD's
2373 	 * notion of "instance", or unique identifier, for that node; OBP
2374 	 * stores the value of the "cfg-handle" MD property as the value of
2375 	 * the "reg" property on the node in the device tree it builds from
2376 	 * the MD and passes to Solaris.  Thus, we look up the devinfo node's
2377 	 * "reg" property value to uniquely identify this device instance when
2378 	 * registering with the MD event-generation framework.  If the "reg"
2379 	 * property cannot be found, the device tree state is presumably so
2380 	 * broken that there is no point in continuing.
2381 	 */
2382 	if (!ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, reg_prop)) {
2383 		PRN("vds \"%s\" property does not exist", reg_prop);
2384 		return (DDI_FAILURE);
2385 	}
2386 
2387 	/* Get the MD instance for later MDEG registration */
2388 	cfg_handle = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2389 	    reg_prop, -1);
2390 
2391 	if (ddi_soft_state_zalloc(vds_state, instance) != DDI_SUCCESS) {
2392 		PRN("Could not allocate state for instance %u", instance);
2393 		return (DDI_FAILURE);
2394 	}
2395 
2396 	if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) {
2397 		PRN("Could not get state for instance %u", instance);
2398 		ddi_soft_state_free(vds_state, instance);
2399 		return (DDI_FAILURE);
2400 	}
2401 
2402 
2403 	vds->dip	= dip;
2404 	vds->vd_table	= mod_hash_create_ptrhash("vds_vd_table", VDS_NCHAINS,
2405 							vds_destroy_vd,
2406 							sizeof (void *));
2407 	ASSERT(vds->vd_table != NULL);
2408 
2409 	if ((status = ldi_ident_from_dip(dip, &vds->ldi_ident)) != 0) {
2410 		PRN("ldi_ident_from_dip() returned errno %d", status);
2411 		return (DDI_FAILURE);
2412 	}
2413 	vds->initialized |= VDS_LDI;
2414 
2415 	/* Register for MD updates */
2416 	vds_prop_spec[1].ps_val = cfg_handle;
2417 	if (mdeg_register(&vds_spec, &vd_spec, vds_process_md, vds,
2418 		&vds->mdeg) != MDEG_SUCCESS) {
2419 		PRN("Unable to register for MD updates");
2420 		return (DDI_FAILURE);
2421 	}
2422 	vds->initialized |= VDS_MDEG;
2423 
2424 	/* Prevent auto-detaching so driver is available whenever MD changes */
2425 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip, DDI_NO_AUTODETACH, 1) !=
2426 	    DDI_PROP_SUCCESS) {
2427 		PRN("failed to set \"%s\" property for instance %u",
2428 		    DDI_NO_AUTODETACH, instance);
2429 	}
2430 
2431 	ddi_report_dev(dip);
2432 	return (DDI_SUCCESS);
2433 }
2434 
2435 static int
2436 vds_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2437 {
2438 	int	status;
2439 
2440 	switch (cmd) {
2441 	case DDI_ATTACH:
2442 		PR0("Attaching");
2443 		if ((status = vds_do_attach(dip)) != DDI_SUCCESS)
2444 			(void) vds_detach(dip, DDI_DETACH);
2445 		return (status);
2446 	case DDI_RESUME:
2447 		PR0("No action required for DDI_RESUME");
2448 		return (DDI_SUCCESS);
2449 	default:
2450 		return (DDI_FAILURE);
2451 	}
2452 }
2453 
2454 static struct dev_ops vds_ops = {
2455 	DEVO_REV,	/* devo_rev */
2456 	0,		/* devo_refcnt */
2457 	ddi_no_info,	/* devo_getinfo */
2458 	nulldev,	/* devo_identify */
2459 	nulldev,	/* devo_probe */
2460 	vds_attach,	/* devo_attach */
2461 	vds_detach,	/* devo_detach */
2462 	nodev,		/* devo_reset */
2463 	NULL,		/* devo_cb_ops */
2464 	NULL,		/* devo_bus_ops */
2465 	nulldev		/* devo_power */
2466 };
2467 
2468 static struct modldrv modldrv = {
2469 	&mod_driverops,
2470 	"virtual disk server v%I%",
2471 	&vds_ops,
2472 };
2473 
2474 static struct modlinkage modlinkage = {
2475 	MODREV_1,
2476 	&modldrv,
2477 	NULL
2478 };
2479 
2480 
2481 int
2482 _init(void)
2483 {
2484 	int		i, status;
2485 
2486 
2487 	if ((status = ddi_soft_state_init(&vds_state, sizeof (vds_t), 1)) != 0)
2488 		return (status);
2489 	if ((status = mod_install(&modlinkage)) != 0) {
2490 		ddi_soft_state_fini(&vds_state);
2491 		return (status);
2492 	}
2493 
2494 	/* Fill in the bit-mask of server-supported operations */
2495 	for (i = 0; i < vds_noperations; i++)
2496 		vds_operations |= 1 << (vds_operation[i].operation - 1);
2497 
2498 	return (0);
2499 }
2500 
2501 int
2502 _info(struct modinfo *modinfop)
2503 {
2504 	return (mod_info(&modlinkage, modinfop));
2505 }
2506 
2507 int
2508 _fini(void)
2509 {
2510 	int	status;
2511 
2512 
2513 	if ((status = mod_remove(&modlinkage)) != 0)
2514 		return (status);
2515 	ddi_soft_state_fini(&vds_state);
2516 	return (0);
2517 }
2518