11ae08745Sheppo /* 21ae08745Sheppo * CDDL HEADER START 31ae08745Sheppo * 41ae08745Sheppo * The contents of this file are subject to the terms of the 51ae08745Sheppo * Common Development and Distribution License (the "License"). 61ae08745Sheppo * You may not use this file except in compliance with the License. 71ae08745Sheppo * 81ae08745Sheppo * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 91ae08745Sheppo * or http://www.opensolaris.org/os/licensing. 101ae08745Sheppo * See the License for the specific language governing permissions 111ae08745Sheppo * and limitations under the License. 121ae08745Sheppo * 131ae08745Sheppo * When distributing Covered Code, include this CDDL HEADER in each 141ae08745Sheppo * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 151ae08745Sheppo * If applicable, add the following below this CDDL HEADER, with the 161ae08745Sheppo * fields enclosed by brackets "[]" replaced with your own identifying 171ae08745Sheppo * information: Portions Copyright [yyyy] [name of copyright owner] 181ae08745Sheppo * 191ae08745Sheppo * CDDL HEADER END 201ae08745Sheppo */ 211ae08745Sheppo 221ae08745Sheppo /* 231ae08745Sheppo * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 241ae08745Sheppo * Use is subject to license terms. 251ae08745Sheppo */ 261ae08745Sheppo 271ae08745Sheppo #pragma ident "%Z%%M% %I% %E% SMI" 281ae08745Sheppo 291ae08745Sheppo /* 301ae08745Sheppo * Virtual disk server 311ae08745Sheppo */ 321ae08745Sheppo 331ae08745Sheppo 341ae08745Sheppo #include <sys/types.h> 351ae08745Sheppo #include <sys/conf.h> 361ae08745Sheppo #include <sys/ddi.h> 371ae08745Sheppo #include <sys/dkio.h> 381ae08745Sheppo #include <sys/file.h> 391ae08745Sheppo #include <sys/mdeg.h> 401ae08745Sheppo #include <sys/modhash.h> 411ae08745Sheppo #include <sys/note.h> 421ae08745Sheppo #include <sys/pathname.h> 431ae08745Sheppo #include <sys/sunddi.h> 441ae08745Sheppo #include <sys/sunldi.h> 451ae08745Sheppo #include <sys/sysmacros.h> 461ae08745Sheppo #include <sys/vio_common.h> 471ae08745Sheppo #include <sys/vdsk_mailbox.h> 481ae08745Sheppo #include <sys/vdsk_common.h> 491ae08745Sheppo #include <sys/vtoc.h> 501ae08745Sheppo 511ae08745Sheppo 521ae08745Sheppo /* Virtual disk server initialization flags */ 53*d10e4ef2Snarayan #define VDS_LDI 0x01 54*d10e4ef2Snarayan #define VDS_MDEG 0x02 551ae08745Sheppo 561ae08745Sheppo /* Virtual disk server tunable parameters */ 571ae08745Sheppo #define VDS_LDC_RETRIES 3 581ae08745Sheppo #define VDS_NCHAINS 32 591ae08745Sheppo 601ae08745Sheppo /* Identification parameters for MD, synthetic dkio(7i) structures, etc. */ 611ae08745Sheppo #define VDS_NAME "virtual-disk-server" 621ae08745Sheppo 631ae08745Sheppo #define VD_NAME "vd" 641ae08745Sheppo #define VD_VOLUME_NAME "vdisk" 651ae08745Sheppo #define VD_ASCIILABEL "Virtual Disk" 661ae08745Sheppo 671ae08745Sheppo #define VD_CHANNEL_ENDPOINT "channel-endpoint" 681ae08745Sheppo #define VD_ID_PROP "id" 691ae08745Sheppo #define VD_BLOCK_DEVICE_PROP "vds-block-device" 701ae08745Sheppo 711ae08745Sheppo /* Virtual disk initialization flags */ 721ae08745Sheppo #define VD_LOCKING 0x01 73*d10e4ef2Snarayan #define VD_LDC 0x02 74*d10e4ef2Snarayan #define VD_DRING 0x04 75*d10e4ef2Snarayan #define VD_SID 0x08 76*d10e4ef2Snarayan #define VD_SEQ_NUM 0x10 771ae08745Sheppo 781ae08745Sheppo /* Flags for opening/closing backing devices via LDI */ 791ae08745Sheppo #define VD_OPEN_FLAGS (FEXCL | FREAD | FWRITE) 801ae08745Sheppo 811ae08745Sheppo /* 821ae08745Sheppo * By Solaris convention, slice/partition 2 represents the entire disk; 831ae08745Sheppo * unfortunately, this convention does not appear to be codified. 841ae08745Sheppo */ 851ae08745Sheppo #define VD_ENTIRE_DISK_SLICE 2 861ae08745Sheppo 871ae08745Sheppo /* Return a cpp token as a string */ 881ae08745Sheppo #define STRINGIZE(token) #token 891ae08745Sheppo 901ae08745Sheppo /* 911ae08745Sheppo * Print a message prefixed with the current function name to the message log 921ae08745Sheppo * (and optionally to the console for verbose boots); these macros use cpp's 931ae08745Sheppo * concatenation of string literals and C99 variable-length-argument-list 941ae08745Sheppo * macros 951ae08745Sheppo */ 961ae08745Sheppo #define PRN(...) _PRN("?%s(): "__VA_ARGS__, "") 971ae08745Sheppo #define _PRN(format, ...) \ 981ae08745Sheppo cmn_err(CE_CONT, format"%s", __func__, __VA_ARGS__) 991ae08745Sheppo 1001ae08745Sheppo /* Return a pointer to the "i"th vdisk dring element */ 1011ae08745Sheppo #define VD_DRING_ELEM(i) ((vd_dring_entry_t *)(void *) \ 1021ae08745Sheppo (vd->dring + (i)*vd->descriptor_size)) 1031ae08745Sheppo 1041ae08745Sheppo /* Return the virtual disk client's type as a string (for use in messages) */ 1051ae08745Sheppo #define VD_CLIENT(vd) \ 1061ae08745Sheppo (((vd)->xfer_mode == VIO_DESC_MODE) ? "in-band client" : \ 1071ae08745Sheppo (((vd)->xfer_mode == VIO_DRING_MODE) ? "dring client" : \ 1081ae08745Sheppo (((vd)->xfer_mode == 0) ? "null client" : \ 1091ae08745Sheppo "unsupported client"))) 1101ae08745Sheppo 1111ae08745Sheppo /* Debugging macros */ 1121ae08745Sheppo #ifdef DEBUG 1131ae08745Sheppo #define PR0 if (vd_msglevel > 0) PRN 1141ae08745Sheppo #define PR1 if (vd_msglevel > 1) PRN 1151ae08745Sheppo #define PR2 if (vd_msglevel > 2) PRN 1161ae08745Sheppo 1171ae08745Sheppo #define VD_DUMP_DRING_ELEM(elem) \ 1181ae08745Sheppo PRN("dst:%x op:%x st:%u nb:%lx addr:%lx ncook:%u\n", \ 1191ae08745Sheppo elem->hdr.dstate, \ 1201ae08745Sheppo elem->payload.operation, \ 1211ae08745Sheppo elem->payload.status, \ 1221ae08745Sheppo elem->payload.nbytes, \ 1231ae08745Sheppo elem->payload.addr, \ 1241ae08745Sheppo elem->payload.ncookies); 1251ae08745Sheppo 1261ae08745Sheppo #else /* !DEBUG */ 1271ae08745Sheppo #define PR0(...) 1281ae08745Sheppo #define PR1(...) 1291ae08745Sheppo #define PR2(...) 1301ae08745Sheppo 1311ae08745Sheppo #define VD_DUMP_DRING_ELEM(elem) 1321ae08745Sheppo 1331ae08745Sheppo #endif /* DEBUG */ 1341ae08745Sheppo 1351ae08745Sheppo 136*d10e4ef2Snarayan /* 137*d10e4ef2Snarayan * Soft state structure for a vds instance 138*d10e4ef2Snarayan */ 1391ae08745Sheppo typedef struct vds { 1401ae08745Sheppo uint_t initialized; /* driver inst initialization flags */ 1411ae08745Sheppo dev_info_t *dip; /* driver inst devinfo pointer */ 1421ae08745Sheppo ldi_ident_t ldi_ident; /* driver's identifier for LDI */ 1431ae08745Sheppo mod_hash_t *vd_table; /* table of virtual disks served */ 1441ae08745Sheppo mdeg_handle_t mdeg; /* handle for MDEG operations */ 1451ae08745Sheppo } vds_t; 1461ae08745Sheppo 147*d10e4ef2Snarayan /* 148*d10e4ef2Snarayan * Types of descriptor-processing tasks 149*d10e4ef2Snarayan */ 150*d10e4ef2Snarayan typedef enum vd_task_type { 151*d10e4ef2Snarayan VD_NONFINAL_RANGE_TASK, /* task for intermediate descriptor in range */ 152*d10e4ef2Snarayan VD_FINAL_RANGE_TASK, /* task for last in a range of descriptors */ 153*d10e4ef2Snarayan } vd_task_type_t; 154*d10e4ef2Snarayan 155*d10e4ef2Snarayan /* 156*d10e4ef2Snarayan * Structure describing the task for processing a descriptor 157*d10e4ef2Snarayan */ 158*d10e4ef2Snarayan typedef struct vd_task { 159*d10e4ef2Snarayan struct vd *vd; /* vd instance task is for */ 160*d10e4ef2Snarayan vd_task_type_t type; /* type of descriptor task */ 161*d10e4ef2Snarayan int index; /* dring elem index for task */ 162*d10e4ef2Snarayan vio_msg_t *msg; /* VIO message task is for */ 163*d10e4ef2Snarayan size_t msglen; /* length of message content */ 164*d10e4ef2Snarayan size_t msgsize; /* size of message buffer */ 165*d10e4ef2Snarayan vd_dring_payload_t *request; /* request task will perform */ 166*d10e4ef2Snarayan struct buf buf; /* buf(9s) for I/O request */ 167*d10e4ef2Snarayan 168*d10e4ef2Snarayan } vd_task_t; 169*d10e4ef2Snarayan 170*d10e4ef2Snarayan /* 171*d10e4ef2Snarayan * Soft state structure for a virtual disk instance 172*d10e4ef2Snarayan */ 1731ae08745Sheppo typedef struct vd { 1741ae08745Sheppo uint_t initialized; /* vdisk initialization flags */ 1751ae08745Sheppo vds_t *vds; /* server for this vdisk */ 176*d10e4ef2Snarayan ddi_taskq_t *startq; /* queue for I/O start tasks */ 177*d10e4ef2Snarayan ddi_taskq_t *completionq; /* queue for completion tasks */ 1781ae08745Sheppo ldi_handle_t ldi_handle[V_NUMPAR]; /* LDI slice handles */ 1791ae08745Sheppo dev_t dev[V_NUMPAR]; /* dev numbers for slices */ 1801ae08745Sheppo uint_t nslices; /* number for slices */ 1811ae08745Sheppo size_t vdisk_size; /* number of blocks in vdisk */ 1821ae08745Sheppo vd_disk_type_t vdisk_type; /* slice or entire disk */ 1831ae08745Sheppo boolean_t pseudo; /* underlying pseudo dev */ 1841ae08745Sheppo struct dk_geom dk_geom; /* synthetic for slice type */ 1851ae08745Sheppo struct vtoc vtoc; /* synthetic for slice type */ 1861ae08745Sheppo ldc_status_t ldc_state; /* LDC connection state */ 1871ae08745Sheppo ldc_handle_t ldc_handle; /* handle for LDC comm */ 1881ae08745Sheppo size_t max_msglen; /* largest LDC message len */ 1891ae08745Sheppo vd_state_t state; /* client handshake state */ 1901ae08745Sheppo uint8_t xfer_mode; /* transfer mode with client */ 1911ae08745Sheppo uint32_t sid; /* client's session ID */ 1921ae08745Sheppo uint64_t seq_num; /* message sequence number */ 1931ae08745Sheppo uint64_t dring_ident; /* identifier of dring */ 1941ae08745Sheppo ldc_dring_handle_t dring_handle; /* handle for dring ops */ 1951ae08745Sheppo uint32_t descriptor_size; /* num bytes in desc */ 1961ae08745Sheppo uint32_t dring_len; /* number of dring elements */ 1971ae08745Sheppo caddr_t dring; /* address of dring */ 198*d10e4ef2Snarayan vd_task_t inband_task; /* task for inband descriptor */ 199*d10e4ef2Snarayan vd_task_t *dring_task; /* tasks dring elements */ 200*d10e4ef2Snarayan 201*d10e4ef2Snarayan kmutex_t lock; /* protects variables below */ 202*d10e4ef2Snarayan boolean_t enabled; /* is vdisk enabled? */ 203*d10e4ef2Snarayan boolean_t reset_state; /* reset connection state? */ 204*d10e4ef2Snarayan boolean_t reset_ldc; /* reset LDC channel? */ 2051ae08745Sheppo } vd_t; 2061ae08745Sheppo 2071ae08745Sheppo typedef struct vds_operation { 2081ae08745Sheppo uint8_t operation; 209*d10e4ef2Snarayan int (*start)(vd_task_t *task); 210*d10e4ef2Snarayan void (*complete)(void *arg); 2111ae08745Sheppo } vds_operation_t; 2121ae08745Sheppo 2130a55fbb7Slm66018 typedef struct vd_ioctl { 2140a55fbb7Slm66018 uint8_t operation; /* vdisk operation */ 2150a55fbb7Slm66018 const char *operation_name; /* vdisk operation name */ 2160a55fbb7Slm66018 size_t nbytes; /* size of operation buffer */ 2170a55fbb7Slm66018 int cmd; /* corresponding ioctl cmd */ 2180a55fbb7Slm66018 const char *cmd_name; /* ioctl cmd name */ 2190a55fbb7Slm66018 void *arg; /* ioctl cmd argument */ 2200a55fbb7Slm66018 /* convert input vd_buf to output ioctl_arg */ 2210a55fbb7Slm66018 void (*copyin)(void *vd_buf, void *ioctl_arg); 2220a55fbb7Slm66018 /* convert input ioctl_arg to output vd_buf */ 2230a55fbb7Slm66018 void (*copyout)(void *ioctl_arg, void *vd_buf); 2240a55fbb7Slm66018 } vd_ioctl_t; 2250a55fbb7Slm66018 2260a55fbb7Slm66018 /* Define trivial copyin/copyout conversion function flag */ 2270a55fbb7Slm66018 #define VD_IDENTITY ((void (*)(void *, void *))-1) 2281ae08745Sheppo 2291ae08745Sheppo 2301ae08745Sheppo static int vds_ldc_retries = VDS_LDC_RETRIES; 2311ae08745Sheppo static void *vds_state; 2321ae08745Sheppo static uint64_t vds_operations; /* see vds_operation[] definition below */ 2331ae08745Sheppo 2341ae08745Sheppo static int vd_open_flags = VD_OPEN_FLAGS; 2351ae08745Sheppo 2360a55fbb7Slm66018 /* 2370a55fbb7Slm66018 * Supported protocol version pairs, from highest (newest) to lowest (oldest) 2380a55fbb7Slm66018 * 2390a55fbb7Slm66018 * Each supported major version should appear only once, paired with (and only 2400a55fbb7Slm66018 * with) its highest supported minor version number (as the protocol requires 2410a55fbb7Slm66018 * supporting all lower minor version numbers as well) 2420a55fbb7Slm66018 */ 2430a55fbb7Slm66018 static const vio_ver_t vds_version[] = {{1, 0}}; 2440a55fbb7Slm66018 static const size_t vds_num_versions = 2450a55fbb7Slm66018 sizeof (vds_version)/sizeof (vds_version[0]); 2460a55fbb7Slm66018 2471ae08745Sheppo #ifdef DEBUG 2481ae08745Sheppo static int vd_msglevel; 2491ae08745Sheppo #endif /* DEBUG */ 2501ae08745Sheppo 2511ae08745Sheppo 2521ae08745Sheppo static int 253*d10e4ef2Snarayan vd_start_bio(vd_task_t *task) 2541ae08745Sheppo { 255*d10e4ef2Snarayan int status = 0; 256*d10e4ef2Snarayan vd_t *vd = task->vd; 257*d10e4ef2Snarayan vd_dring_payload_t *request = task->request; 258*d10e4ef2Snarayan struct buf *buf = &task->buf; 2591ae08745Sheppo 260*d10e4ef2Snarayan 261*d10e4ef2Snarayan ASSERT(vd != NULL); 262*d10e4ef2Snarayan ASSERT(request != NULL); 263*d10e4ef2Snarayan ASSERT(request->slice < vd->nslices); 264*d10e4ef2Snarayan ASSERT((request->operation == VD_OP_BREAD) || 265*d10e4ef2Snarayan (request->operation == VD_OP_BWRITE)); 266*d10e4ef2Snarayan 2671ae08745Sheppo if (request->nbytes == 0) 2681ae08745Sheppo return (EINVAL); /* no service for trivial requests */ 2691ae08745Sheppo 270*d10e4ef2Snarayan PR1("%s %lu bytes at block %lu", 271*d10e4ef2Snarayan (request->operation == VD_OP_BREAD) ? "Read" : "Write", 272*d10e4ef2Snarayan request->nbytes, request->addr); 2731ae08745Sheppo 274*d10e4ef2Snarayan bioinit(buf); 275*d10e4ef2Snarayan buf->b_flags = B_BUSY; 276*d10e4ef2Snarayan buf->b_bcount = request->nbytes; 277*d10e4ef2Snarayan buf->b_un.b_addr = kmem_alloc(buf->b_bcount, KM_SLEEP); 278*d10e4ef2Snarayan buf->b_lblkno = request->addr; 279*d10e4ef2Snarayan buf->b_edev = vd->dev[request->slice]; 280*d10e4ef2Snarayan 281*d10e4ef2Snarayan if (request->operation == VD_OP_BREAD) { 282*d10e4ef2Snarayan buf->b_flags |= B_READ; 283*d10e4ef2Snarayan } else { 284*d10e4ef2Snarayan buf->b_flags |= B_WRITE; 285*d10e4ef2Snarayan /* Get data to write from client */ 286*d10e4ef2Snarayan if ((status = ldc_mem_copy(vd->ldc_handle, buf->b_un.b_addr, 0, 287*d10e4ef2Snarayan &request->nbytes, request->cookie, 288*d10e4ef2Snarayan request->ncookies, LDC_COPY_IN)) != 0) { 289*d10e4ef2Snarayan PRN("ldc_mem_copy() returned errno %d " 290*d10e4ef2Snarayan "copying from client", status); 291*d10e4ef2Snarayan } 292*d10e4ef2Snarayan } 293*d10e4ef2Snarayan 294*d10e4ef2Snarayan /* Start the block I/O */ 2951ae08745Sheppo if ((status == 0) && 296*d10e4ef2Snarayan ((status = ldi_strategy(vd->ldi_handle[request->slice], buf)) == 0)) 297*d10e4ef2Snarayan return (EINPROGRESS); /* will complete on completionq */ 298*d10e4ef2Snarayan 299*d10e4ef2Snarayan /* Clean up after error */ 300*d10e4ef2Snarayan kmem_free(buf->b_un.b_addr, buf->b_bcount); 301*d10e4ef2Snarayan biofini(buf); 302*d10e4ef2Snarayan return (status); 303*d10e4ef2Snarayan } 304*d10e4ef2Snarayan 305*d10e4ef2Snarayan static int 306*d10e4ef2Snarayan send_msg(ldc_handle_t ldc_handle, void *msg, size_t msglen) 307*d10e4ef2Snarayan { 308*d10e4ef2Snarayan int retry, status; 309*d10e4ef2Snarayan size_t nbytes; 310*d10e4ef2Snarayan 311*d10e4ef2Snarayan 312*d10e4ef2Snarayan for (retry = 0, status = EWOULDBLOCK; 313*d10e4ef2Snarayan retry < vds_ldc_retries && status == EWOULDBLOCK; 314*d10e4ef2Snarayan retry++) { 315*d10e4ef2Snarayan PR1("ldc_write() attempt %d", (retry + 1)); 316*d10e4ef2Snarayan nbytes = msglen; 317*d10e4ef2Snarayan status = ldc_write(ldc_handle, msg, &nbytes); 318*d10e4ef2Snarayan } 319*d10e4ef2Snarayan 320*d10e4ef2Snarayan if (status != 0) { 321*d10e4ef2Snarayan PRN("ldc_write() returned errno %d", status); 322*d10e4ef2Snarayan return (status); 323*d10e4ef2Snarayan } else if (nbytes != msglen) { 324*d10e4ef2Snarayan PRN("ldc_write() performed only partial write"); 325*d10e4ef2Snarayan return (EIO); 326*d10e4ef2Snarayan } 327*d10e4ef2Snarayan 328*d10e4ef2Snarayan PR1("SENT %lu bytes", msglen); 329*d10e4ef2Snarayan return (0); 330*d10e4ef2Snarayan } 331*d10e4ef2Snarayan 332*d10e4ef2Snarayan static void 333*d10e4ef2Snarayan vd_need_reset(vd_t *vd, boolean_t reset_ldc) 334*d10e4ef2Snarayan { 335*d10e4ef2Snarayan mutex_enter(&vd->lock); 336*d10e4ef2Snarayan vd->reset_state = B_TRUE; 337*d10e4ef2Snarayan vd->reset_ldc = reset_ldc; 338*d10e4ef2Snarayan mutex_exit(&vd->lock); 339*d10e4ef2Snarayan } 340*d10e4ef2Snarayan 341*d10e4ef2Snarayan /* 342*d10e4ef2Snarayan * Reset the state of the connection with a client, if needed; reset the LDC 343*d10e4ef2Snarayan * transport as well, if needed. This function should only be called from the 344*d10e4ef2Snarayan * "startq", as it waits for tasks on the "completionq" and will deadlock if 345*d10e4ef2Snarayan * called from that queue. 346*d10e4ef2Snarayan */ 347*d10e4ef2Snarayan static void 348*d10e4ef2Snarayan vd_reset_if_needed(vd_t *vd) 349*d10e4ef2Snarayan { 350*d10e4ef2Snarayan int status = 0; 351*d10e4ef2Snarayan 352*d10e4ef2Snarayan 353*d10e4ef2Snarayan mutex_enter(&vd->lock); 354*d10e4ef2Snarayan if (!vd->reset_state) { 355*d10e4ef2Snarayan ASSERT(!vd->reset_ldc); 356*d10e4ef2Snarayan mutex_exit(&vd->lock); 357*d10e4ef2Snarayan return; 358*d10e4ef2Snarayan } 359*d10e4ef2Snarayan mutex_exit(&vd->lock); 360*d10e4ef2Snarayan 361*d10e4ef2Snarayan 362*d10e4ef2Snarayan PR0("Resetting connection state with %s", VD_CLIENT(vd)); 363*d10e4ef2Snarayan 364*d10e4ef2Snarayan /* 365*d10e4ef2Snarayan * Let any asynchronous I/O complete before possibly pulling the rug 366*d10e4ef2Snarayan * out from under it; defer checking vd->reset_ldc, as one of the 367*d10e4ef2Snarayan * asynchronous tasks might set it 368*d10e4ef2Snarayan */ 369*d10e4ef2Snarayan ddi_taskq_wait(vd->completionq); 370*d10e4ef2Snarayan 371*d10e4ef2Snarayan 372*d10e4ef2Snarayan if ((vd->initialized & VD_DRING) && 373*d10e4ef2Snarayan ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0)) 374*d10e4ef2Snarayan PRN("ldc_mem_dring_unmap() returned errno %d", status); 375*d10e4ef2Snarayan 376*d10e4ef2Snarayan if (vd->dring_task != NULL) { 377*d10e4ef2Snarayan ASSERT(vd->dring_len != 0); 378*d10e4ef2Snarayan kmem_free(vd->dring_task, 379*d10e4ef2Snarayan (sizeof (*vd->dring_task)) * vd->dring_len); 380*d10e4ef2Snarayan vd->dring_task = NULL; 381*d10e4ef2Snarayan } 382*d10e4ef2Snarayan 383*d10e4ef2Snarayan 384*d10e4ef2Snarayan mutex_enter(&vd->lock); 385*d10e4ef2Snarayan if (vd->reset_ldc && ((status = ldc_reset(vd->ldc_handle)) != 0)) 386*d10e4ef2Snarayan PRN("ldc_reset() returned errno %d", status); 387*d10e4ef2Snarayan 388*d10e4ef2Snarayan vd->initialized &= ~(VD_SID | VD_SEQ_NUM | VD_DRING); 389*d10e4ef2Snarayan vd->state = VD_STATE_INIT; 390*d10e4ef2Snarayan vd->max_msglen = sizeof (vio_msg_t); /* baseline vio message size */ 391*d10e4ef2Snarayan 392*d10e4ef2Snarayan vd->reset_state = B_FALSE; 393*d10e4ef2Snarayan vd->reset_ldc = B_FALSE; 394*d10e4ef2Snarayan mutex_exit(&vd->lock); 395*d10e4ef2Snarayan } 396*d10e4ef2Snarayan 397*d10e4ef2Snarayan static int 398*d10e4ef2Snarayan vd_mark_elem_done(vd_t *vd, int idx, int elem_status) 399*d10e4ef2Snarayan { 400*d10e4ef2Snarayan boolean_t accepted; 401*d10e4ef2Snarayan int status; 402*d10e4ef2Snarayan vd_dring_entry_t *elem = VD_DRING_ELEM(idx); 403*d10e4ef2Snarayan 404*d10e4ef2Snarayan 405*d10e4ef2Snarayan /* Acquire the element */ 406*d10e4ef2Snarayan if ((status = ldc_mem_dring_acquire(vd->dring_handle, idx, idx)) != 0) { 407*d10e4ef2Snarayan PRN("ldc_mem_dring_acquire() returned errno %d", status); 408*d10e4ef2Snarayan return (status); 409*d10e4ef2Snarayan } 410*d10e4ef2Snarayan 411*d10e4ef2Snarayan /* Set the element's status and mark it done */ 412*d10e4ef2Snarayan accepted = (elem->hdr.dstate == VIO_DESC_ACCEPTED); 413*d10e4ef2Snarayan if (accepted) { 414*d10e4ef2Snarayan elem->payload.status = elem_status; 415*d10e4ef2Snarayan elem->hdr.dstate = VIO_DESC_DONE; 416*d10e4ef2Snarayan } else { 417*d10e4ef2Snarayan /* Perhaps client timed out waiting for I/O... */ 418*d10e4ef2Snarayan PRN("element %u no longer \"accepted\"", idx); 419*d10e4ef2Snarayan VD_DUMP_DRING_ELEM(elem); 420*d10e4ef2Snarayan } 421*d10e4ef2Snarayan /* Release the element */ 422*d10e4ef2Snarayan if ((status = ldc_mem_dring_release(vd->dring_handle, idx, idx)) != 0) { 423*d10e4ef2Snarayan PRN("ldc_mem_dring_release() returned errno %d", status); 424*d10e4ef2Snarayan return (status); 425*d10e4ef2Snarayan } 426*d10e4ef2Snarayan 427*d10e4ef2Snarayan return (accepted ? 0 : EINVAL); 428*d10e4ef2Snarayan } 429*d10e4ef2Snarayan 430*d10e4ef2Snarayan static void 431*d10e4ef2Snarayan vd_complete_bio(void *arg) 432*d10e4ef2Snarayan { 433*d10e4ef2Snarayan int status = 0; 434*d10e4ef2Snarayan vd_task_t *task = (vd_task_t *)arg; 435*d10e4ef2Snarayan vd_t *vd = task->vd; 436*d10e4ef2Snarayan vd_dring_payload_t *request = task->request; 437*d10e4ef2Snarayan struct buf *buf = &task->buf; 438*d10e4ef2Snarayan 439*d10e4ef2Snarayan 440*d10e4ef2Snarayan ASSERT(vd != NULL); 441*d10e4ef2Snarayan ASSERT(request != NULL); 442*d10e4ef2Snarayan ASSERT(task->msg != NULL); 443*d10e4ef2Snarayan ASSERT(task->msglen >= sizeof (*task->msg)); 444*d10e4ef2Snarayan ASSERT(task->msgsize >= task->msglen); 445*d10e4ef2Snarayan 446*d10e4ef2Snarayan /* Wait for the I/O to complete */ 447*d10e4ef2Snarayan request->status = biowait(buf); 448*d10e4ef2Snarayan 449*d10e4ef2Snarayan /* If data was read, copy it to the client */ 450*d10e4ef2Snarayan if ((request->status == 0) && (request->operation == VD_OP_BREAD) && 451*d10e4ef2Snarayan ((status = ldc_mem_copy(vd->ldc_handle, buf->b_un.b_addr, 0, 4521ae08745Sheppo &request->nbytes, request->cookie, request->ncookies, 4531ae08745Sheppo LDC_COPY_OUT)) != 0)) { 4541ae08745Sheppo PRN("ldc_mem_copy() returned errno %d copying to client", 4551ae08745Sheppo status); 4561ae08745Sheppo } 4571ae08745Sheppo 458*d10e4ef2Snarayan /* Release I/O buffer */ 459*d10e4ef2Snarayan kmem_free(buf->b_un.b_addr, buf->b_bcount); 460*d10e4ef2Snarayan biofini(buf); 4611ae08745Sheppo 462*d10e4ef2Snarayan /* Update the dring element for a dring client */ 463*d10e4ef2Snarayan if ((status == 0) && (vd->xfer_mode == VIO_DRING_MODE)) 464*d10e4ef2Snarayan status = vd_mark_elem_done(vd, task->index, request->status); 4651ae08745Sheppo 466*d10e4ef2Snarayan /* 467*d10e4ef2Snarayan * If a transport error occurred, arrange to "nack" the message when 468*d10e4ef2Snarayan * the final task in the descriptor element range completes 469*d10e4ef2Snarayan */ 470*d10e4ef2Snarayan if (status != 0) 471*d10e4ef2Snarayan task->msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 4721ae08745Sheppo 473*d10e4ef2Snarayan /* 474*d10e4ef2Snarayan * Only the final task for a range of elements will respond to and 475*d10e4ef2Snarayan * free the message 476*d10e4ef2Snarayan */ 477*d10e4ef2Snarayan if (task->type == VD_NONFINAL_RANGE_TASK) 478*d10e4ef2Snarayan return; 4791ae08745Sheppo 480*d10e4ef2Snarayan /* 481*d10e4ef2Snarayan * Send the "ack" or "nack" back to the client; if sending the message 482*d10e4ef2Snarayan * via LDC fails, arrange to reset both the connection state and LDC 483*d10e4ef2Snarayan * itself 484*d10e4ef2Snarayan */ 485*d10e4ef2Snarayan PR1("Sending %s", 486*d10e4ef2Snarayan (task->msg->tag.vio_subtype == VIO_SUBTYPE_ACK) ? "ACK" : "NACK"); 487*d10e4ef2Snarayan if (send_msg(vd->ldc_handle, task->msg, task->msglen) != 0) 488*d10e4ef2Snarayan vd_need_reset(vd, B_TRUE); 4891ae08745Sheppo 490*d10e4ef2Snarayan /* Free the message now that it has been used for the reply */ 491*d10e4ef2Snarayan kmem_free(task->msg, task->msgsize); 4921ae08745Sheppo } 4931ae08745Sheppo 4940a55fbb7Slm66018 static void 4950a55fbb7Slm66018 vd_geom2dk_geom(void *vd_buf, void *ioctl_arg) 4960a55fbb7Slm66018 { 4970a55fbb7Slm66018 VD_GEOM2DK_GEOM((vd_geom_t *)vd_buf, (struct dk_geom *)ioctl_arg); 4980a55fbb7Slm66018 } 4990a55fbb7Slm66018 5000a55fbb7Slm66018 static void 5010a55fbb7Slm66018 vd_vtoc2vtoc(void *vd_buf, void *ioctl_arg) 5020a55fbb7Slm66018 { 5030a55fbb7Slm66018 VD_VTOC2VTOC((vd_vtoc_t *)vd_buf, (struct vtoc *)ioctl_arg); 5040a55fbb7Slm66018 } 5050a55fbb7Slm66018 5060a55fbb7Slm66018 static void 5070a55fbb7Slm66018 dk_geom2vd_geom(void *ioctl_arg, void *vd_buf) 5080a55fbb7Slm66018 { 5090a55fbb7Slm66018 DK_GEOM2VD_GEOM((struct dk_geom *)ioctl_arg, (vd_geom_t *)vd_buf); 5100a55fbb7Slm66018 } 5110a55fbb7Slm66018 5120a55fbb7Slm66018 static void 5130a55fbb7Slm66018 vtoc2vd_vtoc(void *ioctl_arg, void *vd_buf) 5140a55fbb7Slm66018 { 5150a55fbb7Slm66018 VTOC2VD_VTOC((struct vtoc *)ioctl_arg, (vd_vtoc_t *)vd_buf); 5160a55fbb7Slm66018 } 5170a55fbb7Slm66018 5181ae08745Sheppo static int 5190a55fbb7Slm66018 vd_do_slice_ioctl(vd_t *vd, int cmd, void *ioctl_arg) 5201ae08745Sheppo { 5211ae08745Sheppo switch (cmd) { 5221ae08745Sheppo case DKIOCGGEOM: 5230a55fbb7Slm66018 ASSERT(ioctl_arg != NULL); 5240a55fbb7Slm66018 bcopy(&vd->dk_geom, ioctl_arg, sizeof (vd->dk_geom)); 5251ae08745Sheppo return (0); 5261ae08745Sheppo case DKIOCGVTOC: 5270a55fbb7Slm66018 ASSERT(ioctl_arg != NULL); 5280a55fbb7Slm66018 bcopy(&vd->vtoc, ioctl_arg, sizeof (vd->vtoc)); 5291ae08745Sheppo return (0); 5301ae08745Sheppo default: 5311ae08745Sheppo return (ENOTSUP); 5321ae08745Sheppo } 5331ae08745Sheppo } 5341ae08745Sheppo 5351ae08745Sheppo static int 5360a55fbb7Slm66018 vd_do_ioctl(vd_t *vd, vd_dring_payload_t *request, void* buf, vd_ioctl_t *ioctl) 5371ae08745Sheppo { 5381ae08745Sheppo int rval = 0, status; 5391ae08745Sheppo size_t nbytes = request->nbytes; /* modifiable copy */ 5401ae08745Sheppo 5411ae08745Sheppo 5421ae08745Sheppo ASSERT(request->slice < vd->nslices); 5431ae08745Sheppo PR0("Performing %s", ioctl->operation_name); 5441ae08745Sheppo 5450a55fbb7Slm66018 /* Get data from client and convert, if necessary */ 5460a55fbb7Slm66018 if (ioctl->copyin != NULL) { 5471ae08745Sheppo ASSERT(nbytes != 0 && buf != NULL); 5481ae08745Sheppo PR1("Getting \"arg\" data from client"); 5491ae08745Sheppo if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes, 5501ae08745Sheppo request->cookie, request->ncookies, 5511ae08745Sheppo LDC_COPY_IN)) != 0) { 5521ae08745Sheppo PRN("ldc_mem_copy() returned errno %d " 5531ae08745Sheppo "copying from client", status); 5541ae08745Sheppo return (status); 5551ae08745Sheppo } 5560a55fbb7Slm66018 5570a55fbb7Slm66018 /* Convert client's data, if necessary */ 5580a55fbb7Slm66018 if (ioctl->copyin == VD_IDENTITY) /* use client buffer */ 5590a55fbb7Slm66018 ioctl->arg = buf; 5600a55fbb7Slm66018 else /* convert client vdisk operation data to ioctl data */ 5610a55fbb7Slm66018 (ioctl->copyin)(buf, (void *)ioctl->arg); 5621ae08745Sheppo } 5631ae08745Sheppo 5641ae08745Sheppo /* 5651ae08745Sheppo * Handle single-slice block devices internally; otherwise, have the 5661ae08745Sheppo * real driver perform the ioctl() 5671ae08745Sheppo */ 5681ae08745Sheppo if (vd->vdisk_type == VD_DISK_TYPE_SLICE && !vd->pseudo) { 5690a55fbb7Slm66018 if ((status = vd_do_slice_ioctl(vd, ioctl->cmd, 5700a55fbb7Slm66018 (void *)ioctl->arg)) != 0) 5711ae08745Sheppo return (status); 5721ae08745Sheppo } else if ((status = ldi_ioctl(vd->ldi_handle[request->slice], 573*d10e4ef2Snarayan ioctl->cmd, (intptr_t)ioctl->arg, (vd_open_flags | FKIOCTL), 574*d10e4ef2Snarayan kcred, &rval)) != 0) { 5751ae08745Sheppo PR0("ldi_ioctl(%s) = errno %d", ioctl->cmd_name, status); 5761ae08745Sheppo return (status); 5771ae08745Sheppo } 5781ae08745Sheppo #ifdef DEBUG 5791ae08745Sheppo if (rval != 0) { 5801ae08745Sheppo PRN("%s set rval = %d, which is not being returned to client", 5811ae08745Sheppo ioctl->cmd_name, rval); 5821ae08745Sheppo } 5831ae08745Sheppo #endif /* DEBUG */ 5841ae08745Sheppo 5850a55fbb7Slm66018 /* Convert data and send to client, if necessary */ 5860a55fbb7Slm66018 if (ioctl->copyout != NULL) { 5871ae08745Sheppo ASSERT(nbytes != 0 && buf != NULL); 5881ae08745Sheppo PR1("Sending \"arg\" data to client"); 5890a55fbb7Slm66018 5900a55fbb7Slm66018 /* Convert ioctl data to vdisk operation data, if necessary */ 5910a55fbb7Slm66018 if (ioctl->copyout != VD_IDENTITY) 5920a55fbb7Slm66018 (ioctl->copyout)((void *)ioctl->arg, buf); 5930a55fbb7Slm66018 5941ae08745Sheppo if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes, 5951ae08745Sheppo request->cookie, request->ncookies, 5961ae08745Sheppo LDC_COPY_OUT)) != 0) { 5971ae08745Sheppo PRN("ldc_mem_copy() returned errno %d " 5981ae08745Sheppo "copying to client", status); 5991ae08745Sheppo return (status); 6001ae08745Sheppo } 6011ae08745Sheppo } 6021ae08745Sheppo 6031ae08745Sheppo return (status); 6041ae08745Sheppo } 6051ae08745Sheppo 6060a55fbb7Slm66018 /* 6070a55fbb7Slm66018 * Open any slices which have become non-empty as a result of performing a 6080a55fbb7Slm66018 * set-VTOC operation for the client. 6090a55fbb7Slm66018 * 6100a55fbb7Slm66018 * When serving a full disk, vds attempts to exclusively open all of the 6110a55fbb7Slm66018 * disk's slices to prevent another thread or process in the service domain 6120a55fbb7Slm66018 * from "stealing" a slice or from performing I/O to a slice while a vds 6130a55fbb7Slm66018 * client is accessing it. Unfortunately, underlying drivers, such as sd(7d) 6140a55fbb7Slm66018 * and cmdk(7d), return an error when attempting to open the device file for a 6150a55fbb7Slm66018 * slice which is currently empty according to the VTOC. This driver behavior 6160a55fbb7Slm66018 * means that vds must skip opening empty slices when initializing a vdisk for 6170a55fbb7Slm66018 * full-disk service and try to open slices that become non-empty (via a 6180a55fbb7Slm66018 * set-VTOC operation) during use of the full disk in order to begin serving 6190a55fbb7Slm66018 * such slices to the client. This approach has an inherent (and therefore 6200a55fbb7Slm66018 * unavoidable) race condition; it also means that failure to open a 6210a55fbb7Slm66018 * newly-non-empty slice has different semantics than failure to open an 6220a55fbb7Slm66018 * initially-non-empty slice: Due to driver bahavior, opening a 6230a55fbb7Slm66018 * newly-non-empty slice is a necessary side effect of vds performing a 6240a55fbb7Slm66018 * (successful) set-VTOC operation for a client on an in-service (and in-use) 6250a55fbb7Slm66018 * disk in order to begin serving the slice; failure of this side-effect 6260a55fbb7Slm66018 * operation does not mean that the client's set-VTOC operation failed or that 6270a55fbb7Slm66018 * operations on other slices must fail. Therefore, this function prints an 6280a55fbb7Slm66018 * error message on failure to open a slice, but does not return an error to 6290a55fbb7Slm66018 * its caller--unlike failure to open a slice initially, which results in an 6300a55fbb7Slm66018 * error that prevents serving the vdisk (and thereby requires an 6310a55fbb7Slm66018 * administrator to resolve the problem). Note that, apart from another 6320a55fbb7Slm66018 * thread or process opening a new slice during the race-condition window, 6330a55fbb7Slm66018 * failure to open a slice in this function will likely indicate an underlying 6340a55fbb7Slm66018 * drive problem, which will also likely become evident in errors returned by 6350a55fbb7Slm66018 * operations on other slices, and which will require administrative 6360a55fbb7Slm66018 * intervention and possibly servicing the drive. 6370a55fbb7Slm66018 */ 6380a55fbb7Slm66018 static void 6390a55fbb7Slm66018 vd_open_new_slices(vd_t *vd) 6400a55fbb7Slm66018 { 6410a55fbb7Slm66018 int rval, status; 6420a55fbb7Slm66018 struct vtoc vtoc; 6430a55fbb7Slm66018 6440a55fbb7Slm66018 6450a55fbb7Slm66018 /* Get the (new) VTOC for updated slice sizes */ 6460a55fbb7Slm66018 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCGVTOC, (intptr_t)&vtoc, 647*d10e4ef2Snarayan (vd_open_flags | FKIOCTL), kcred, &rval)) != 0) { 6480a55fbb7Slm66018 PRN("ldi_ioctl(DKIOCGVTOC) returned errno %d", status); 6490a55fbb7Slm66018 return; 6500a55fbb7Slm66018 } 6510a55fbb7Slm66018 6520a55fbb7Slm66018 /* Open any newly-non-empty slices */ 6530a55fbb7Slm66018 for (int slice = 0; slice < vd->nslices; slice++) { 6540a55fbb7Slm66018 /* Skip zero-length slices */ 6550a55fbb7Slm66018 if (vtoc.v_part[slice].p_size == 0) { 6560a55fbb7Slm66018 if (vd->ldi_handle[slice] != NULL) 6570a55fbb7Slm66018 PR0("Open slice %u now has zero length", slice); 6580a55fbb7Slm66018 continue; 6590a55fbb7Slm66018 } 6600a55fbb7Slm66018 6610a55fbb7Slm66018 /* Skip already-open slices */ 6620a55fbb7Slm66018 if (vd->ldi_handle[slice] != NULL) 6630a55fbb7Slm66018 continue; 6640a55fbb7Slm66018 6650a55fbb7Slm66018 PR0("Opening newly-non-empty slice %u", slice); 6660a55fbb7Slm66018 if ((status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK, 6670a55fbb7Slm66018 vd_open_flags, kcred, &vd->ldi_handle[slice], 6680a55fbb7Slm66018 vd->vds->ldi_ident)) != 0) { 6690a55fbb7Slm66018 PRN("ldi_open_by_dev() returned errno %d " 6700a55fbb7Slm66018 "for slice %u", status, slice); 6710a55fbb7Slm66018 } 6720a55fbb7Slm66018 } 6730a55fbb7Slm66018 } 6740a55fbb7Slm66018 6751ae08745Sheppo #define RNDSIZE(expr) P2ROUNDUP(sizeof (expr), sizeof (uint64_t)) 6761ae08745Sheppo static int 677*d10e4ef2Snarayan vd_ioctl(vd_task_t *task) 6781ae08745Sheppo { 6791ae08745Sheppo int i, status; 6801ae08745Sheppo void *buf = NULL; 6810a55fbb7Slm66018 struct dk_geom dk_geom = {0}; 6820a55fbb7Slm66018 struct vtoc vtoc = {0}; 683*d10e4ef2Snarayan vd_t *vd = task->vd; 684*d10e4ef2Snarayan vd_dring_payload_t *request = task->request; 6850a55fbb7Slm66018 vd_ioctl_t ioctl[] = { 6860a55fbb7Slm66018 /* Command (no-copy) operations */ 6870a55fbb7Slm66018 {VD_OP_FLUSH, STRINGIZE(VD_OP_FLUSH), 0, 6880a55fbb7Slm66018 DKIOCFLUSHWRITECACHE, STRINGIZE(DKIOCFLUSHWRITECACHE), 6890a55fbb7Slm66018 NULL, NULL, NULL}, 6900a55fbb7Slm66018 6910a55fbb7Slm66018 /* "Get" (copy-out) operations */ 6920a55fbb7Slm66018 {VD_OP_GET_WCE, STRINGIZE(VD_OP_GET_WCE), RNDSIZE(int), 6930a55fbb7Slm66018 DKIOCGETWCE, STRINGIZE(DKIOCGETWCE), 6940a55fbb7Slm66018 NULL, NULL, VD_IDENTITY}, 6950a55fbb7Slm66018 {VD_OP_GET_DISKGEOM, STRINGIZE(VD_OP_GET_DISKGEOM), 6960a55fbb7Slm66018 RNDSIZE(vd_geom_t), 6970a55fbb7Slm66018 DKIOCGGEOM, STRINGIZE(DKIOCGGEOM), 6980a55fbb7Slm66018 &dk_geom, NULL, dk_geom2vd_geom}, 6990a55fbb7Slm66018 {VD_OP_GET_VTOC, STRINGIZE(VD_OP_GET_VTOC), RNDSIZE(vd_vtoc_t), 7000a55fbb7Slm66018 DKIOCGVTOC, STRINGIZE(DKIOCGVTOC), 7010a55fbb7Slm66018 &vtoc, NULL, vtoc2vd_vtoc}, 7020a55fbb7Slm66018 7030a55fbb7Slm66018 /* "Set" (copy-in) operations */ 7040a55fbb7Slm66018 {VD_OP_SET_WCE, STRINGIZE(VD_OP_SET_WCE), RNDSIZE(int), 7050a55fbb7Slm66018 DKIOCSETWCE, STRINGIZE(DKIOCSETWCE), 7060a55fbb7Slm66018 NULL, VD_IDENTITY, NULL}, 7070a55fbb7Slm66018 {VD_OP_SET_DISKGEOM, STRINGIZE(VD_OP_SET_DISKGEOM), 7080a55fbb7Slm66018 RNDSIZE(vd_geom_t), 7090a55fbb7Slm66018 DKIOCSGEOM, STRINGIZE(DKIOCSGEOM), 7100a55fbb7Slm66018 &dk_geom, vd_geom2dk_geom, NULL}, 7110a55fbb7Slm66018 {VD_OP_SET_VTOC, STRINGIZE(VD_OP_SET_VTOC), RNDSIZE(vd_vtoc_t), 7120a55fbb7Slm66018 DKIOCSVTOC, STRINGIZE(DKIOCSVTOC), 7130a55fbb7Slm66018 &vtoc, vd_vtoc2vtoc, NULL}, 7140a55fbb7Slm66018 }; 7151ae08745Sheppo size_t nioctls = (sizeof (ioctl))/(sizeof (ioctl[0])); 7161ae08745Sheppo 7171ae08745Sheppo 718*d10e4ef2Snarayan ASSERT(vd != NULL); 719*d10e4ef2Snarayan ASSERT(request != NULL); 7201ae08745Sheppo ASSERT(request->slice < vd->nslices); 7211ae08745Sheppo 7221ae08745Sheppo /* 7231ae08745Sheppo * Determine ioctl corresponding to caller's "operation" and 7241ae08745Sheppo * validate caller's "nbytes" 7251ae08745Sheppo */ 7261ae08745Sheppo for (i = 0; i < nioctls; i++) { 7271ae08745Sheppo if (request->operation == ioctl[i].operation) { 7280a55fbb7Slm66018 /* LDC memory operations require 8-byte multiples */ 7290a55fbb7Slm66018 ASSERT(ioctl[i].nbytes % sizeof (uint64_t) == 0); 7300a55fbb7Slm66018 7310a55fbb7Slm66018 if (request->nbytes != ioctl[i].nbytes) { 7320a55fbb7Slm66018 PRN("%s: Expected nbytes = %lu, got %lu", 7330a55fbb7Slm66018 ioctl[i].operation_name, ioctl[i].nbytes, 7340a55fbb7Slm66018 request->nbytes); 7351ae08745Sheppo return (EINVAL); 7361ae08745Sheppo } 7371ae08745Sheppo 7381ae08745Sheppo break; 7391ae08745Sheppo } 7401ae08745Sheppo } 7411ae08745Sheppo ASSERT(i < nioctls); /* because "operation" already validated */ 7421ae08745Sheppo 7431ae08745Sheppo if (request->nbytes) 7441ae08745Sheppo buf = kmem_zalloc(request->nbytes, KM_SLEEP); 7451ae08745Sheppo status = vd_do_ioctl(vd, request, buf, &ioctl[i]); 7461ae08745Sheppo if (request->nbytes) 7471ae08745Sheppo kmem_free(buf, request->nbytes); 7480a55fbb7Slm66018 if ((request->operation == VD_OP_SET_VTOC) && 7490a55fbb7Slm66018 (vd->vdisk_type == VD_DISK_TYPE_DISK)) 7500a55fbb7Slm66018 vd_open_new_slices(vd); 751*d10e4ef2Snarayan PR0("Returning %d", status); 7521ae08745Sheppo return (status); 7531ae08745Sheppo } 7541ae08745Sheppo 7551ae08745Sheppo /* 7561ae08745Sheppo * Define the supported operations once the functions for performing them have 7571ae08745Sheppo * been defined 7581ae08745Sheppo */ 7591ae08745Sheppo static const vds_operation_t vds_operation[] = { 760*d10e4ef2Snarayan {VD_OP_BREAD, vd_start_bio, vd_complete_bio}, 761*d10e4ef2Snarayan {VD_OP_BWRITE, vd_start_bio, vd_complete_bio}, 762*d10e4ef2Snarayan {VD_OP_FLUSH, vd_ioctl, NULL}, 763*d10e4ef2Snarayan {VD_OP_GET_WCE, vd_ioctl, NULL}, 764*d10e4ef2Snarayan {VD_OP_SET_WCE, vd_ioctl, NULL}, 765*d10e4ef2Snarayan {VD_OP_GET_VTOC, vd_ioctl, NULL}, 766*d10e4ef2Snarayan {VD_OP_SET_VTOC, vd_ioctl, NULL}, 767*d10e4ef2Snarayan {VD_OP_GET_DISKGEOM, vd_ioctl, NULL}, 768*d10e4ef2Snarayan {VD_OP_SET_DISKGEOM, vd_ioctl, NULL} 7691ae08745Sheppo }; 7701ae08745Sheppo 7711ae08745Sheppo static const size_t vds_noperations = 7721ae08745Sheppo (sizeof (vds_operation))/(sizeof (vds_operation[0])); 7731ae08745Sheppo 7741ae08745Sheppo /* 775*d10e4ef2Snarayan * Process a task specifying a client I/O request 7761ae08745Sheppo */ 7771ae08745Sheppo static int 778*d10e4ef2Snarayan vd_process_task(vd_task_t *task) 7791ae08745Sheppo { 780*d10e4ef2Snarayan int i, status; 781*d10e4ef2Snarayan vd_t *vd = task->vd; 782*d10e4ef2Snarayan vd_dring_payload_t *request = task->request; 7831ae08745Sheppo 7841ae08745Sheppo 785*d10e4ef2Snarayan ASSERT(vd != NULL); 786*d10e4ef2Snarayan ASSERT(request != NULL); 7871ae08745Sheppo 7881ae08745Sheppo /* Range-check slice */ 7891ae08745Sheppo if (request->slice >= vd->nslices) { 7901ae08745Sheppo PRN("Invalid \"slice\" %u (max %u) for virtual disk", 7911ae08745Sheppo request->slice, (vd->nslices - 1)); 7921ae08745Sheppo return (EINVAL); 7931ae08745Sheppo } 7941ae08745Sheppo 795*d10e4ef2Snarayan /* Find the requested operation */ 7961ae08745Sheppo for (i = 0; i < vds_noperations; i++) 7971ae08745Sheppo if (request->operation == vds_operation[i].operation) 798*d10e4ef2Snarayan break; 799*d10e4ef2Snarayan if (i == vds_noperations) { 8001ae08745Sheppo PRN("Unsupported operation %u", request->operation); 8011ae08745Sheppo return (ENOTSUP); 8021ae08745Sheppo } 8031ae08745Sheppo 804*d10e4ef2Snarayan /* Start the operation */ 805*d10e4ef2Snarayan if ((status = vds_operation[i].start(task)) != EINPROGRESS) { 806*d10e4ef2Snarayan request->status = status; /* op succeeded or failed */ 807*d10e4ef2Snarayan return (0); /* but request completed */ 8081ae08745Sheppo } 8091ae08745Sheppo 810*d10e4ef2Snarayan ASSERT(vds_operation[i].complete != NULL); /* debug case */ 811*d10e4ef2Snarayan if (vds_operation[i].complete == NULL) { /* non-debug case */ 812*d10e4ef2Snarayan PRN("Unexpected return of EINPROGRESS " 813*d10e4ef2Snarayan "with no I/O completion handler"); 814*d10e4ef2Snarayan request->status = EIO; /* operation failed */ 815*d10e4ef2Snarayan return (0); /* but request completed */ 8161ae08745Sheppo } 8171ae08745Sheppo 818*d10e4ef2Snarayan /* Queue a task to complete the operation */ 819*d10e4ef2Snarayan status = ddi_taskq_dispatch(vd->completionq, vds_operation[i].complete, 820*d10e4ef2Snarayan task, DDI_SLEEP); 821*d10e4ef2Snarayan /* ddi_taskq_dispatch(9f) guarantees success with DDI_SLEEP */ 822*d10e4ef2Snarayan ASSERT(status == DDI_SUCCESS); 823*d10e4ef2Snarayan 824*d10e4ef2Snarayan PR1("Operation in progress"); 825*d10e4ef2Snarayan return (EINPROGRESS); /* completion handler will finish request */ 8261ae08745Sheppo } 8271ae08745Sheppo 8281ae08745Sheppo /* 8290a55fbb7Slm66018 * Return true if the "type", "subtype", and "env" fields of the "tag" first 8300a55fbb7Slm66018 * argument match the corresponding remaining arguments; otherwise, return false 8311ae08745Sheppo */ 8320a55fbb7Slm66018 boolean_t 8331ae08745Sheppo vd_msgtype(vio_msg_tag_t *tag, int type, int subtype, int env) 8341ae08745Sheppo { 8351ae08745Sheppo return ((tag->vio_msgtype == type) && 8361ae08745Sheppo (tag->vio_subtype == subtype) && 8370a55fbb7Slm66018 (tag->vio_subtype_env == env)) ? B_TRUE : B_FALSE; 8381ae08745Sheppo } 8391ae08745Sheppo 8400a55fbb7Slm66018 /* 8410a55fbb7Slm66018 * Check whether the major/minor version specified in "ver_msg" is supported 8420a55fbb7Slm66018 * by this server. 8430a55fbb7Slm66018 */ 8440a55fbb7Slm66018 static boolean_t 8450a55fbb7Slm66018 vds_supported_version(vio_ver_msg_t *ver_msg) 8460a55fbb7Slm66018 { 8470a55fbb7Slm66018 for (int i = 0; i < vds_num_versions; i++) { 8480a55fbb7Slm66018 ASSERT(vds_version[i].major > 0); 8490a55fbb7Slm66018 ASSERT((i == 0) || 8500a55fbb7Slm66018 (vds_version[i].major < vds_version[i-1].major)); 8510a55fbb7Slm66018 8520a55fbb7Slm66018 /* 8530a55fbb7Slm66018 * If the major versions match, adjust the minor version, if 8540a55fbb7Slm66018 * necessary, down to the highest value supported by this 8550a55fbb7Slm66018 * server and return true so this message will get "ack"ed; 8560a55fbb7Slm66018 * the client should also support all minor versions lower 8570a55fbb7Slm66018 * than the value it sent 8580a55fbb7Slm66018 */ 8590a55fbb7Slm66018 if (ver_msg->ver_major == vds_version[i].major) { 8600a55fbb7Slm66018 if (ver_msg->ver_minor > vds_version[i].minor) { 8610a55fbb7Slm66018 PR0("Adjusting minor version from %u to %u", 8620a55fbb7Slm66018 ver_msg->ver_minor, vds_version[i].minor); 8630a55fbb7Slm66018 ver_msg->ver_minor = vds_version[i].minor; 8640a55fbb7Slm66018 } 8650a55fbb7Slm66018 return (B_TRUE); 8660a55fbb7Slm66018 } 8670a55fbb7Slm66018 8680a55fbb7Slm66018 /* 8690a55fbb7Slm66018 * If the message contains a higher major version number, set 8700a55fbb7Slm66018 * the message's major/minor versions to the current values 8710a55fbb7Slm66018 * and return false, so this message will get "nack"ed with 8720a55fbb7Slm66018 * these values, and the client will potentially try again 8730a55fbb7Slm66018 * with the same or a lower version 8740a55fbb7Slm66018 */ 8750a55fbb7Slm66018 if (ver_msg->ver_major > vds_version[i].major) { 8760a55fbb7Slm66018 ver_msg->ver_major = vds_version[i].major; 8770a55fbb7Slm66018 ver_msg->ver_minor = vds_version[i].minor; 8780a55fbb7Slm66018 return (B_FALSE); 8790a55fbb7Slm66018 } 8800a55fbb7Slm66018 8810a55fbb7Slm66018 /* 8820a55fbb7Slm66018 * Otherwise, the message's major version is less than the 8830a55fbb7Slm66018 * current major version, so continue the loop to the next 8840a55fbb7Slm66018 * (lower) supported version 8850a55fbb7Slm66018 */ 8860a55fbb7Slm66018 } 8870a55fbb7Slm66018 8880a55fbb7Slm66018 /* 8890a55fbb7Slm66018 * No common version was found; "ground" the version pair in the 8900a55fbb7Slm66018 * message to terminate negotiation 8910a55fbb7Slm66018 */ 8920a55fbb7Slm66018 ver_msg->ver_major = 0; 8930a55fbb7Slm66018 ver_msg->ver_minor = 0; 8940a55fbb7Slm66018 return (B_FALSE); 8950a55fbb7Slm66018 } 8960a55fbb7Slm66018 8970a55fbb7Slm66018 /* 8980a55fbb7Slm66018 * Process a version message from a client. vds expects to receive version 8990a55fbb7Slm66018 * messages from clients seeking service, but never issues version messages 9000a55fbb7Slm66018 * itself; therefore, vds can ACK or NACK client version messages, but does 9010a55fbb7Slm66018 * not expect to receive version-message ACKs or NACKs (and will treat such 9020a55fbb7Slm66018 * messages as invalid). 9030a55fbb7Slm66018 */ 9041ae08745Sheppo static int 9050a55fbb7Slm66018 vd_process_ver_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 9061ae08745Sheppo { 9071ae08745Sheppo vio_ver_msg_t *ver_msg = (vio_ver_msg_t *)msg; 9081ae08745Sheppo 9091ae08745Sheppo 9101ae08745Sheppo ASSERT(msglen >= sizeof (msg->tag)); 9111ae08745Sheppo 9121ae08745Sheppo if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 9131ae08745Sheppo VIO_VER_INFO)) { 9141ae08745Sheppo return (ENOMSG); /* not a version message */ 9151ae08745Sheppo } 9161ae08745Sheppo 9171ae08745Sheppo if (msglen != sizeof (*ver_msg)) { 9181ae08745Sheppo PRN("Expected %lu-byte version message; " 9191ae08745Sheppo "received %lu bytes", sizeof (*ver_msg), msglen); 9201ae08745Sheppo return (EBADMSG); 9211ae08745Sheppo } 9221ae08745Sheppo 9231ae08745Sheppo if (ver_msg->dev_class != VDEV_DISK) { 9241ae08745Sheppo PRN("Expected device class %u (disk); received %u", 9251ae08745Sheppo VDEV_DISK, ver_msg->dev_class); 9261ae08745Sheppo return (EBADMSG); 9271ae08745Sheppo } 9281ae08745Sheppo 9290a55fbb7Slm66018 /* 9300a55fbb7Slm66018 * We're talking to the expected kind of client; set our device class 9310a55fbb7Slm66018 * for "ack/nack" back to the client 9320a55fbb7Slm66018 */ 9331ae08745Sheppo ver_msg->dev_class = VDEV_DISK_SERVER; 9340a55fbb7Slm66018 9350a55fbb7Slm66018 /* 9360a55fbb7Slm66018 * Check whether the (valid) version message specifies a version 9370a55fbb7Slm66018 * supported by this server. If the version is not supported, return 9380a55fbb7Slm66018 * EBADMSG so the message will get "nack"ed; vds_supported_version() 9390a55fbb7Slm66018 * will have updated the message with a supported version for the 9400a55fbb7Slm66018 * client to consider 9410a55fbb7Slm66018 */ 9420a55fbb7Slm66018 if (!vds_supported_version(ver_msg)) 9430a55fbb7Slm66018 return (EBADMSG); 9440a55fbb7Slm66018 9450a55fbb7Slm66018 9460a55fbb7Slm66018 /* 9470a55fbb7Slm66018 * A version has been agreed upon; use the client's SID for 9480a55fbb7Slm66018 * communication on this channel now 9490a55fbb7Slm66018 */ 9500a55fbb7Slm66018 ASSERT(!(vd->initialized & VD_SID)); 9510a55fbb7Slm66018 vd->sid = ver_msg->tag.vio_sid; 9520a55fbb7Slm66018 vd->initialized |= VD_SID; 9530a55fbb7Slm66018 9540a55fbb7Slm66018 /* 9550a55fbb7Slm66018 * When multiple versions are supported, this function should store 9560a55fbb7Slm66018 * the negotiated major and minor version values in the "vd" data 9570a55fbb7Slm66018 * structure to govern further communication; in particular, note that 9580a55fbb7Slm66018 * the client might have specified a lower minor version for the 9590a55fbb7Slm66018 * agreed major version than specifed in the vds_version[] array. The 9600a55fbb7Slm66018 * following assertions should help remind future maintainers to make 9610a55fbb7Slm66018 * the appropriate changes to support multiple versions. 9620a55fbb7Slm66018 */ 9630a55fbb7Slm66018 ASSERT(vds_num_versions == 1); 9640a55fbb7Slm66018 ASSERT(ver_msg->ver_major == vds_version[0].major); 9650a55fbb7Slm66018 ASSERT(ver_msg->ver_minor == vds_version[0].minor); 9660a55fbb7Slm66018 9670a55fbb7Slm66018 PR0("Using major version %u, minor version %u", 9680a55fbb7Slm66018 ver_msg->ver_major, ver_msg->ver_minor); 9691ae08745Sheppo return (0); 9701ae08745Sheppo } 9711ae08745Sheppo 9721ae08745Sheppo static int 9731ae08745Sheppo vd_process_attr_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 9741ae08745Sheppo { 9751ae08745Sheppo vd_attr_msg_t *attr_msg = (vd_attr_msg_t *)msg; 9761ae08745Sheppo 9771ae08745Sheppo 9781ae08745Sheppo ASSERT(msglen >= sizeof (msg->tag)); 9791ae08745Sheppo 9801ae08745Sheppo if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 9811ae08745Sheppo VIO_ATTR_INFO)) { 982*d10e4ef2Snarayan PR0("Message is not an attribute message"); 983*d10e4ef2Snarayan return (ENOMSG); 9841ae08745Sheppo } 9851ae08745Sheppo 9861ae08745Sheppo if (msglen != sizeof (*attr_msg)) { 9871ae08745Sheppo PRN("Expected %lu-byte attribute message; " 9881ae08745Sheppo "received %lu bytes", sizeof (*attr_msg), msglen); 9891ae08745Sheppo return (EBADMSG); 9901ae08745Sheppo } 9911ae08745Sheppo 9921ae08745Sheppo if (attr_msg->max_xfer_sz == 0) { 9931ae08745Sheppo PRN("Received maximum transfer size of 0 from client"); 9941ae08745Sheppo return (EBADMSG); 9951ae08745Sheppo } 9961ae08745Sheppo 9971ae08745Sheppo if ((attr_msg->xfer_mode != VIO_DESC_MODE) && 9981ae08745Sheppo (attr_msg->xfer_mode != VIO_DRING_MODE)) { 9991ae08745Sheppo PRN("Client requested unsupported transfer mode"); 10001ae08745Sheppo return (EBADMSG); 10011ae08745Sheppo } 10021ae08745Sheppo 10031ae08745Sheppo 10041ae08745Sheppo /* Success: valid message and transfer mode */ 10051ae08745Sheppo vd->xfer_mode = attr_msg->xfer_mode; 10061ae08745Sheppo if (vd->xfer_mode == VIO_DESC_MODE) { 10071ae08745Sheppo /* 10081ae08745Sheppo * The vd_dring_inband_msg_t contains one cookie; need room 10091ae08745Sheppo * for up to n-1 more cookies, where "n" is the number of full 10101ae08745Sheppo * pages plus possibly one partial page required to cover 10111ae08745Sheppo * "max_xfer_sz". Add room for one more cookie if 10121ae08745Sheppo * "max_xfer_sz" isn't an integral multiple of the page size. 10131ae08745Sheppo * Must first get the maximum transfer size in bytes. 10141ae08745Sheppo */ 10151ae08745Sheppo size_t max_xfer_bytes = attr_msg->vdisk_block_size ? 10161ae08745Sheppo attr_msg->vdisk_block_size*attr_msg->max_xfer_sz : 10171ae08745Sheppo attr_msg->max_xfer_sz; 10181ae08745Sheppo size_t max_inband_msglen = 10191ae08745Sheppo sizeof (vd_dring_inband_msg_t) + 10201ae08745Sheppo ((max_xfer_bytes/PAGESIZE + 10211ae08745Sheppo ((max_xfer_bytes % PAGESIZE) ? 1 : 0))* 10221ae08745Sheppo (sizeof (ldc_mem_cookie_t))); 10231ae08745Sheppo 10241ae08745Sheppo /* 10251ae08745Sheppo * Set the maximum expected message length to 10261ae08745Sheppo * accommodate in-band-descriptor messages with all 10271ae08745Sheppo * their cookies 10281ae08745Sheppo */ 10291ae08745Sheppo vd->max_msglen = MAX(vd->max_msglen, max_inband_msglen); 1030*d10e4ef2Snarayan 1031*d10e4ef2Snarayan /* 1032*d10e4ef2Snarayan * Initialize the data structure for processing in-band I/O 1033*d10e4ef2Snarayan * request descriptors 1034*d10e4ef2Snarayan */ 1035*d10e4ef2Snarayan vd->inband_task.vd = vd; 1036*d10e4ef2Snarayan vd->inband_task.index = 0; 1037*d10e4ef2Snarayan vd->inband_task.type = VD_FINAL_RANGE_TASK; /* range == 1 */ 10381ae08745Sheppo } 10391ae08745Sheppo 10401ae08745Sheppo attr_msg->vdisk_size = vd->vdisk_size; 10411ae08745Sheppo attr_msg->vdisk_type = vd->vdisk_type; 10421ae08745Sheppo attr_msg->operations = vds_operations; 10431ae08745Sheppo PR0("%s", VD_CLIENT(vd)); 10441ae08745Sheppo return (0); 10451ae08745Sheppo } 10461ae08745Sheppo 10471ae08745Sheppo static int 10481ae08745Sheppo vd_process_dring_reg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 10491ae08745Sheppo { 10501ae08745Sheppo int status; 10511ae08745Sheppo size_t expected; 10521ae08745Sheppo ldc_mem_info_t dring_minfo; 10531ae08745Sheppo vio_dring_reg_msg_t *reg_msg = (vio_dring_reg_msg_t *)msg; 10541ae08745Sheppo 10551ae08745Sheppo 10561ae08745Sheppo ASSERT(msglen >= sizeof (msg->tag)); 10571ae08745Sheppo 10581ae08745Sheppo if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 10591ae08745Sheppo VIO_DRING_REG)) { 1060*d10e4ef2Snarayan PR0("Message is not a register-dring message"); 1061*d10e4ef2Snarayan return (ENOMSG); 10621ae08745Sheppo } 10631ae08745Sheppo 10641ae08745Sheppo if (msglen < sizeof (*reg_msg)) { 10651ae08745Sheppo PRN("Expected at least %lu-byte register-dring message; " 10661ae08745Sheppo "received %lu bytes", sizeof (*reg_msg), msglen); 10671ae08745Sheppo return (EBADMSG); 10681ae08745Sheppo } 10691ae08745Sheppo 10701ae08745Sheppo expected = sizeof (*reg_msg) + 10711ae08745Sheppo (reg_msg->ncookies - 1)*(sizeof (reg_msg->cookie[0])); 10721ae08745Sheppo if (msglen != expected) { 10731ae08745Sheppo PRN("Expected %lu-byte register-dring message; " 10741ae08745Sheppo "received %lu bytes", expected, msglen); 10751ae08745Sheppo return (EBADMSG); 10761ae08745Sheppo } 10771ae08745Sheppo 10781ae08745Sheppo if (vd->initialized & VD_DRING) { 10791ae08745Sheppo PRN("A dring was previously registered; only support one"); 10801ae08745Sheppo return (EBADMSG); 10811ae08745Sheppo } 10821ae08745Sheppo 1083*d10e4ef2Snarayan if (reg_msg->num_descriptors > INT32_MAX) { 1084*d10e4ef2Snarayan PRN("reg_msg->num_descriptors = %u; must be <= %u (%s)", 1085*d10e4ef2Snarayan reg_msg->ncookies, INT32_MAX, STRINGIZE(INT32_MAX)); 1086*d10e4ef2Snarayan return (EBADMSG); 1087*d10e4ef2Snarayan } 1088*d10e4ef2Snarayan 10891ae08745Sheppo if (reg_msg->ncookies != 1) { 10901ae08745Sheppo /* 10911ae08745Sheppo * In addition to fixing the assertion in the success case 10921ae08745Sheppo * below, supporting drings which require more than one 10931ae08745Sheppo * "cookie" requires increasing the value of vd->max_msglen 10941ae08745Sheppo * somewhere in the code path prior to receiving the message 10951ae08745Sheppo * which results in calling this function. Note that without 10961ae08745Sheppo * making this change, the larger message size required to 10971ae08745Sheppo * accommodate multiple cookies cannot be successfully 10981ae08745Sheppo * received, so this function will not even get called. 10991ae08745Sheppo * Gracefully accommodating more dring cookies might 11001ae08745Sheppo * reasonably demand exchanging an additional attribute or 11011ae08745Sheppo * making a minor protocol adjustment 11021ae08745Sheppo */ 11031ae08745Sheppo PRN("reg_msg->ncookies = %u != 1", reg_msg->ncookies); 11041ae08745Sheppo return (EBADMSG); 11051ae08745Sheppo } 11061ae08745Sheppo 11071ae08745Sheppo status = ldc_mem_dring_map(vd->ldc_handle, reg_msg->cookie, 11081ae08745Sheppo reg_msg->ncookies, reg_msg->num_descriptors, 11091ae08745Sheppo reg_msg->descriptor_size, LDC_SHADOW_MAP, &vd->dring_handle); 11101ae08745Sheppo if (status != 0) { 11111ae08745Sheppo PRN("ldc_mem_dring_map() returned errno %d", status); 11121ae08745Sheppo return (status); 11131ae08745Sheppo } 11141ae08745Sheppo 11151ae08745Sheppo /* 11161ae08745Sheppo * To remove the need for this assertion, must call 11171ae08745Sheppo * ldc_mem_dring_nextcookie() successfully ncookies-1 times after a 11181ae08745Sheppo * successful call to ldc_mem_dring_map() 11191ae08745Sheppo */ 11201ae08745Sheppo ASSERT(reg_msg->ncookies == 1); 11211ae08745Sheppo 11221ae08745Sheppo if ((status = 11231ae08745Sheppo ldc_mem_dring_info(vd->dring_handle, &dring_minfo)) != 0) { 11241ae08745Sheppo PRN("ldc_mem_dring_info() returned errno %d", status); 11251ae08745Sheppo if ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0) 11261ae08745Sheppo PRN("ldc_mem_dring_unmap() returned errno %d", status); 11271ae08745Sheppo return (status); 11281ae08745Sheppo } 11291ae08745Sheppo 11301ae08745Sheppo if (dring_minfo.vaddr == NULL) { 11311ae08745Sheppo PRN("Descriptor ring virtual address is NULL"); 11320a55fbb7Slm66018 return (ENXIO); 11331ae08745Sheppo } 11341ae08745Sheppo 11351ae08745Sheppo 1136*d10e4ef2Snarayan /* Initialize for valid message and mapped dring */ 11371ae08745Sheppo PR1("descriptor size = %u, dring length = %u", 11381ae08745Sheppo vd->descriptor_size, vd->dring_len); 11391ae08745Sheppo vd->initialized |= VD_DRING; 11401ae08745Sheppo vd->dring_ident = 1; /* "There Can Be Only One" */ 11411ae08745Sheppo vd->dring = dring_minfo.vaddr; 11421ae08745Sheppo vd->descriptor_size = reg_msg->descriptor_size; 11431ae08745Sheppo vd->dring_len = reg_msg->num_descriptors; 11441ae08745Sheppo reg_msg->dring_ident = vd->dring_ident; 1145*d10e4ef2Snarayan 1146*d10e4ef2Snarayan /* 1147*d10e4ef2Snarayan * Allocate and initialize a "shadow" array of data structures for 1148*d10e4ef2Snarayan * tasks to process I/O requests in dring elements 1149*d10e4ef2Snarayan */ 1150*d10e4ef2Snarayan vd->dring_task = 1151*d10e4ef2Snarayan kmem_zalloc((sizeof (*vd->dring_task)) * vd->dring_len, KM_SLEEP); 1152*d10e4ef2Snarayan for (int i = 0; i < vd->dring_len; i++) { 1153*d10e4ef2Snarayan vd->dring_task[i].vd = vd; 1154*d10e4ef2Snarayan vd->dring_task[i].index = i; 1155*d10e4ef2Snarayan vd->dring_task[i].request = &VD_DRING_ELEM(i)->payload; 1156*d10e4ef2Snarayan } 1157*d10e4ef2Snarayan 11581ae08745Sheppo return (0); 11591ae08745Sheppo } 11601ae08745Sheppo 11611ae08745Sheppo static int 11621ae08745Sheppo vd_process_dring_unreg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 11631ae08745Sheppo { 11641ae08745Sheppo vio_dring_unreg_msg_t *unreg_msg = (vio_dring_unreg_msg_t *)msg; 11651ae08745Sheppo 11661ae08745Sheppo 11671ae08745Sheppo ASSERT(msglen >= sizeof (msg->tag)); 11681ae08745Sheppo 11691ae08745Sheppo if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 11701ae08745Sheppo VIO_DRING_UNREG)) { 1171*d10e4ef2Snarayan PR0("Message is not an unregister-dring message"); 1172*d10e4ef2Snarayan return (ENOMSG); 11731ae08745Sheppo } 11741ae08745Sheppo 11751ae08745Sheppo if (msglen != sizeof (*unreg_msg)) { 11761ae08745Sheppo PRN("Expected %lu-byte unregister-dring message; " 11771ae08745Sheppo "received %lu bytes", sizeof (*unreg_msg), msglen); 11781ae08745Sheppo return (EBADMSG); 11791ae08745Sheppo } 11801ae08745Sheppo 11811ae08745Sheppo if (unreg_msg->dring_ident != vd->dring_ident) { 11821ae08745Sheppo PRN("Expected dring ident %lu; received %lu", 11831ae08745Sheppo vd->dring_ident, unreg_msg->dring_ident); 11841ae08745Sheppo return (EBADMSG); 11851ae08745Sheppo } 11861ae08745Sheppo 11871ae08745Sheppo return (0); 11881ae08745Sheppo } 11891ae08745Sheppo 11901ae08745Sheppo static int 11911ae08745Sheppo process_rdx_msg(vio_msg_t *msg, size_t msglen) 11921ae08745Sheppo { 11931ae08745Sheppo ASSERT(msglen >= sizeof (msg->tag)); 11941ae08745Sheppo 1195*d10e4ef2Snarayan if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX)) { 1196*d10e4ef2Snarayan PR0("Message is not an RDX message"); 1197*d10e4ef2Snarayan return (ENOMSG); 1198*d10e4ef2Snarayan } 11991ae08745Sheppo 12001ae08745Sheppo if (msglen != sizeof (vio_rdx_msg_t)) { 12011ae08745Sheppo PRN("Expected %lu-byte RDX message; received %lu bytes", 12021ae08745Sheppo sizeof (vio_rdx_msg_t), msglen); 12031ae08745Sheppo return (EBADMSG); 12041ae08745Sheppo } 12051ae08745Sheppo 1206*d10e4ef2Snarayan PR0("Valid RDX message"); 12071ae08745Sheppo return (0); 12081ae08745Sheppo } 12091ae08745Sheppo 12101ae08745Sheppo static int 12111ae08745Sheppo vd_check_seq_num(vd_t *vd, uint64_t seq_num) 12121ae08745Sheppo { 12131ae08745Sheppo if ((vd->initialized & VD_SEQ_NUM) && (seq_num != vd->seq_num + 1)) { 12141ae08745Sheppo PRN("Received seq_num %lu; expected %lu", 12151ae08745Sheppo seq_num, (vd->seq_num + 1)); 1216*d10e4ef2Snarayan vd_need_reset(vd, B_FALSE); 12171ae08745Sheppo return (1); 12181ae08745Sheppo } 12191ae08745Sheppo 12201ae08745Sheppo vd->seq_num = seq_num; 12211ae08745Sheppo vd->initialized |= VD_SEQ_NUM; /* superfluous after first time... */ 12221ae08745Sheppo return (0); 12231ae08745Sheppo } 12241ae08745Sheppo 12251ae08745Sheppo /* 12261ae08745Sheppo * Return the expected size of an inband-descriptor message with all the 12271ae08745Sheppo * cookies it claims to include 12281ae08745Sheppo */ 12291ae08745Sheppo static size_t 12301ae08745Sheppo expected_inband_size(vd_dring_inband_msg_t *msg) 12311ae08745Sheppo { 12321ae08745Sheppo return ((sizeof (*msg)) + 12331ae08745Sheppo (msg->payload.ncookies - 1)*(sizeof (msg->payload.cookie[0]))); 12341ae08745Sheppo } 12351ae08745Sheppo 12361ae08745Sheppo /* 12371ae08745Sheppo * Process an in-band descriptor message: used with clients like OBP, with 12381ae08745Sheppo * which vds exchanges descriptors within VIO message payloads, rather than 12391ae08745Sheppo * operating on them within a descriptor ring 12401ae08745Sheppo */ 12411ae08745Sheppo static int 1242*d10e4ef2Snarayan vd_process_desc_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize) 12431ae08745Sheppo { 12441ae08745Sheppo size_t expected; 12451ae08745Sheppo vd_dring_inband_msg_t *desc_msg = (vd_dring_inband_msg_t *)msg; 12461ae08745Sheppo 12471ae08745Sheppo 12481ae08745Sheppo ASSERT(msglen >= sizeof (msg->tag)); 12491ae08745Sheppo 12501ae08745Sheppo if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO, 1251*d10e4ef2Snarayan VIO_DESC_DATA)) { 1252*d10e4ef2Snarayan PR1("Message is not an in-band-descriptor message"); 1253*d10e4ef2Snarayan return (ENOMSG); 1254*d10e4ef2Snarayan } 12551ae08745Sheppo 12561ae08745Sheppo if (msglen < sizeof (*desc_msg)) { 12571ae08745Sheppo PRN("Expected at least %lu-byte descriptor message; " 12581ae08745Sheppo "received %lu bytes", sizeof (*desc_msg), msglen); 12591ae08745Sheppo return (EBADMSG); 12601ae08745Sheppo } 12611ae08745Sheppo 12621ae08745Sheppo if (msglen != (expected = expected_inband_size(desc_msg))) { 12631ae08745Sheppo PRN("Expected %lu-byte descriptor message; " 12641ae08745Sheppo "received %lu bytes", expected, msglen); 12651ae08745Sheppo return (EBADMSG); 12661ae08745Sheppo } 12671ae08745Sheppo 1268*d10e4ef2Snarayan if (vd_check_seq_num(vd, desc_msg->hdr.seq_num) != 0) 12691ae08745Sheppo return (EBADMSG); 12701ae08745Sheppo 1271*d10e4ef2Snarayan /* 1272*d10e4ef2Snarayan * Valid message: Set up the in-band descriptor task and process the 1273*d10e4ef2Snarayan * request. Arrange to acknowledge the client's message, unless an 1274*d10e4ef2Snarayan * error processing the descriptor task results in setting 1275*d10e4ef2Snarayan * VIO_SUBTYPE_NACK 1276*d10e4ef2Snarayan */ 1277*d10e4ef2Snarayan PR1("Valid in-band-descriptor message"); 1278*d10e4ef2Snarayan msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 1279*d10e4ef2Snarayan vd->inband_task.msg = msg; 1280*d10e4ef2Snarayan vd->inband_task.msglen = msglen; 1281*d10e4ef2Snarayan vd->inband_task.msgsize = msgsize; 1282*d10e4ef2Snarayan vd->inband_task.request = &desc_msg->payload; 1283*d10e4ef2Snarayan return (vd_process_task(&vd->inband_task)); 12841ae08745Sheppo } 12851ae08745Sheppo 12861ae08745Sheppo static int 1287*d10e4ef2Snarayan vd_process_element(vd_t *vd, vd_task_type_t type, uint32_t idx, 1288*d10e4ef2Snarayan vio_msg_t *msg, size_t msglen, size_t msgsize) 12891ae08745Sheppo { 12901ae08745Sheppo int status; 1291*d10e4ef2Snarayan boolean_t ready; 1292*d10e4ef2Snarayan vd_dring_entry_t *elem = VD_DRING_ELEM(idx); 12931ae08745Sheppo 12941ae08745Sheppo 1295*d10e4ef2Snarayan /* Accept the updated dring element */ 1296*d10e4ef2Snarayan if ((status = ldc_mem_dring_acquire(vd->dring_handle, idx, idx)) != 0) { 12971ae08745Sheppo PRN("ldc_mem_dring_acquire() returned errno %d", status); 12981ae08745Sheppo return (status); 12991ae08745Sheppo } 1300*d10e4ef2Snarayan ready = (elem->hdr.dstate == VIO_DESC_READY); 1301*d10e4ef2Snarayan if (ready) { 1302*d10e4ef2Snarayan elem->hdr.dstate = VIO_DESC_ACCEPTED; 1303*d10e4ef2Snarayan } else { 1304*d10e4ef2Snarayan PRN("descriptor %u not ready", idx); 1305*d10e4ef2Snarayan VD_DUMP_DRING_ELEM(elem); 1306*d10e4ef2Snarayan } 1307*d10e4ef2Snarayan if ((status = ldc_mem_dring_release(vd->dring_handle, idx, idx)) != 0) { 13081ae08745Sheppo PRN("ldc_mem_dring_release() returned errno %d", status); 13091ae08745Sheppo return (status); 13101ae08745Sheppo } 1311*d10e4ef2Snarayan if (!ready) 1312*d10e4ef2Snarayan return (EBUSY); 13131ae08745Sheppo 13141ae08745Sheppo 1315*d10e4ef2Snarayan /* Initialize a task and process the accepted element */ 1316*d10e4ef2Snarayan PR1("Processing dring element %u", idx); 1317*d10e4ef2Snarayan vd->dring_task[idx].type = type; 1318*d10e4ef2Snarayan vd->dring_task[idx].msg = msg; 1319*d10e4ef2Snarayan vd->dring_task[idx].msglen = msglen; 1320*d10e4ef2Snarayan vd->dring_task[idx].msgsize = msgsize; 1321*d10e4ef2Snarayan if ((status = vd_process_task(&vd->dring_task[idx])) != EINPROGRESS) 1322*d10e4ef2Snarayan status = vd_mark_elem_done(vd, idx, elem->payload.status); 13231ae08745Sheppo 13241ae08745Sheppo return (status); 13251ae08745Sheppo } 13261ae08745Sheppo 13271ae08745Sheppo static int 1328*d10e4ef2Snarayan vd_process_element_range(vd_t *vd, int start, int end, 1329*d10e4ef2Snarayan vio_msg_t *msg, size_t msglen, size_t msgsize) 1330*d10e4ef2Snarayan { 1331*d10e4ef2Snarayan int i, n, nelem, status = 0; 1332*d10e4ef2Snarayan boolean_t inprogress = B_FALSE; 1333*d10e4ef2Snarayan vd_task_type_t type; 1334*d10e4ef2Snarayan 1335*d10e4ef2Snarayan 1336*d10e4ef2Snarayan ASSERT(start >= 0); 1337*d10e4ef2Snarayan ASSERT(end >= 0); 1338*d10e4ef2Snarayan 1339*d10e4ef2Snarayan /* 1340*d10e4ef2Snarayan * Arrange to acknowledge the client's message, unless an error 1341*d10e4ef2Snarayan * processing one of the dring elements results in setting 1342*d10e4ef2Snarayan * VIO_SUBTYPE_NACK 1343*d10e4ef2Snarayan */ 1344*d10e4ef2Snarayan msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 1345*d10e4ef2Snarayan 1346*d10e4ef2Snarayan /* 1347*d10e4ef2Snarayan * Process the dring elements in the range 1348*d10e4ef2Snarayan */ 1349*d10e4ef2Snarayan nelem = ((end < start) ? end + vd->dring_len : end) - start + 1; 1350*d10e4ef2Snarayan for (i = start, n = nelem; n > 0; i = (i + 1) % vd->dring_len, n--) { 1351*d10e4ef2Snarayan ((vio_dring_msg_t *)msg)->end_idx = i; 1352*d10e4ef2Snarayan type = (n == 1) ? VD_FINAL_RANGE_TASK : VD_NONFINAL_RANGE_TASK; 1353*d10e4ef2Snarayan status = vd_process_element(vd, type, i, msg, msglen, msgsize); 1354*d10e4ef2Snarayan if (status == EINPROGRESS) 1355*d10e4ef2Snarayan inprogress = B_TRUE; 1356*d10e4ef2Snarayan else if (status != 0) 1357*d10e4ef2Snarayan break; 1358*d10e4ef2Snarayan } 1359*d10e4ef2Snarayan 1360*d10e4ef2Snarayan /* 1361*d10e4ef2Snarayan * If some, but not all, operations of a multi-element range are in 1362*d10e4ef2Snarayan * progress, wait for other operations to complete before returning 1363*d10e4ef2Snarayan * (which will result in "ack" or "nack" of the message). Note that 1364*d10e4ef2Snarayan * all outstanding operations will need to complete, not just the ones 1365*d10e4ef2Snarayan * corresponding to the current range of dring elements; howevever, as 1366*d10e4ef2Snarayan * this situation is an error case, performance is less critical. 1367*d10e4ef2Snarayan */ 1368*d10e4ef2Snarayan if ((nelem > 1) && (status != EINPROGRESS) && inprogress) 1369*d10e4ef2Snarayan ddi_taskq_wait(vd->completionq); 1370*d10e4ef2Snarayan 1371*d10e4ef2Snarayan return (status); 1372*d10e4ef2Snarayan } 1373*d10e4ef2Snarayan 1374*d10e4ef2Snarayan static int 1375*d10e4ef2Snarayan vd_process_dring_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize) 13761ae08745Sheppo { 13771ae08745Sheppo vio_dring_msg_t *dring_msg = (vio_dring_msg_t *)msg; 13781ae08745Sheppo 13791ae08745Sheppo 13801ae08745Sheppo ASSERT(msglen >= sizeof (msg->tag)); 13811ae08745Sheppo 13821ae08745Sheppo if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO, 13831ae08745Sheppo VIO_DRING_DATA)) { 1384*d10e4ef2Snarayan PR1("Message is not a dring-data message"); 1385*d10e4ef2Snarayan return (ENOMSG); 13861ae08745Sheppo } 13871ae08745Sheppo 13881ae08745Sheppo if (msglen != sizeof (*dring_msg)) { 13891ae08745Sheppo PRN("Expected %lu-byte dring message; received %lu bytes", 13901ae08745Sheppo sizeof (*dring_msg), msglen); 13911ae08745Sheppo return (EBADMSG); 13921ae08745Sheppo } 13931ae08745Sheppo 1394*d10e4ef2Snarayan if (vd_check_seq_num(vd, dring_msg->seq_num) != 0) 13951ae08745Sheppo return (EBADMSG); 13961ae08745Sheppo 13971ae08745Sheppo if (dring_msg->dring_ident != vd->dring_ident) { 13981ae08745Sheppo PRN("Expected dring ident %lu; received ident %lu", 13991ae08745Sheppo vd->dring_ident, dring_msg->dring_ident); 14001ae08745Sheppo return (EBADMSG); 14011ae08745Sheppo } 14021ae08745Sheppo 1403*d10e4ef2Snarayan if (dring_msg->start_idx >= vd->dring_len) { 1404*d10e4ef2Snarayan PRN("\"start_idx\" = %u; must be less than %u", 1405*d10e4ef2Snarayan dring_msg->start_idx, vd->dring_len); 1406*d10e4ef2Snarayan return (EBADMSG); 1407*d10e4ef2Snarayan } 14081ae08745Sheppo 1409*d10e4ef2Snarayan if ((dring_msg->end_idx < 0) || 1410*d10e4ef2Snarayan (dring_msg->end_idx >= vd->dring_len)) { 1411*d10e4ef2Snarayan PRN("\"end_idx\" = %u; must be >= 0 and less than %u", 1412*d10e4ef2Snarayan dring_msg->end_idx, vd->dring_len); 1413*d10e4ef2Snarayan return (EBADMSG); 1414*d10e4ef2Snarayan } 1415*d10e4ef2Snarayan 1416*d10e4ef2Snarayan /* Valid message; process range of updated dring elements */ 1417*d10e4ef2Snarayan PR1("Processing descriptor range, start = %u, end = %u", 1418*d10e4ef2Snarayan dring_msg->start_idx, dring_msg->end_idx); 1419*d10e4ef2Snarayan return (vd_process_element_range(vd, dring_msg->start_idx, 1420*d10e4ef2Snarayan dring_msg->end_idx, msg, msglen, msgsize)); 14211ae08745Sheppo } 14221ae08745Sheppo 14231ae08745Sheppo static int 14241ae08745Sheppo recv_msg(ldc_handle_t ldc_handle, void *msg, size_t *nbytes) 14251ae08745Sheppo { 14261ae08745Sheppo int retry, status; 14271ae08745Sheppo size_t size = *nbytes; 14281ae08745Sheppo 14291ae08745Sheppo 14301ae08745Sheppo for (retry = 0, status = ETIMEDOUT; 14311ae08745Sheppo retry < vds_ldc_retries && status == ETIMEDOUT; 14321ae08745Sheppo retry++) { 14331ae08745Sheppo PR1("ldc_read() attempt %d", (retry + 1)); 14341ae08745Sheppo *nbytes = size; 14351ae08745Sheppo status = ldc_read(ldc_handle, msg, nbytes); 14361ae08745Sheppo } 14371ae08745Sheppo 14381ae08745Sheppo if (status != 0) { 14391ae08745Sheppo PRN("ldc_read() returned errno %d", status); 14401ae08745Sheppo return (status); 14411ae08745Sheppo } else if (*nbytes == 0) { 14421ae08745Sheppo PR1("ldc_read() returned 0 and no message read"); 14431ae08745Sheppo return (ENOMSG); 14441ae08745Sheppo } 14451ae08745Sheppo 14461ae08745Sheppo PR1("RCVD %lu-byte message", *nbytes); 14471ae08745Sheppo return (0); 14481ae08745Sheppo } 14491ae08745Sheppo 14501ae08745Sheppo static int 1451*d10e4ef2Snarayan vd_do_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize) 14521ae08745Sheppo { 14531ae08745Sheppo int status; 14541ae08745Sheppo 14551ae08745Sheppo 14561ae08745Sheppo PR1("Processing (%x/%x/%x) message", msg->tag.vio_msgtype, 14571ae08745Sheppo msg->tag.vio_subtype, msg->tag.vio_subtype_env); 14581ae08745Sheppo 14591ae08745Sheppo /* 14601ae08745Sheppo * Validate session ID up front, since it applies to all messages 14611ae08745Sheppo * once set 14621ae08745Sheppo */ 14631ae08745Sheppo if ((msg->tag.vio_sid != vd->sid) && (vd->initialized & VD_SID)) { 14641ae08745Sheppo PRN("Expected SID %u, received %u", vd->sid, 14651ae08745Sheppo msg->tag.vio_sid); 14661ae08745Sheppo return (EBADMSG); 14671ae08745Sheppo } 14681ae08745Sheppo 14691ae08745Sheppo 14701ae08745Sheppo /* 14711ae08745Sheppo * Process the received message based on connection state 14721ae08745Sheppo */ 14731ae08745Sheppo switch (vd->state) { 14741ae08745Sheppo case VD_STATE_INIT: /* expect version message */ 14750a55fbb7Slm66018 if ((status = vd_process_ver_msg(vd, msg, msglen)) != 0) 14761ae08745Sheppo return (status); 14771ae08745Sheppo 14781ae08745Sheppo /* Version negotiated, move to that state */ 14791ae08745Sheppo vd->state = VD_STATE_VER; 14801ae08745Sheppo return (0); 14811ae08745Sheppo 14821ae08745Sheppo case VD_STATE_VER: /* expect attribute message */ 14831ae08745Sheppo if ((status = vd_process_attr_msg(vd, msg, msglen)) != 0) 14841ae08745Sheppo return (status); 14851ae08745Sheppo 14861ae08745Sheppo /* Attributes exchanged, move to that state */ 14871ae08745Sheppo vd->state = VD_STATE_ATTR; 14881ae08745Sheppo return (0); 14891ae08745Sheppo 14901ae08745Sheppo case VD_STATE_ATTR: 14911ae08745Sheppo switch (vd->xfer_mode) { 14921ae08745Sheppo case VIO_DESC_MODE: /* expect RDX message */ 14931ae08745Sheppo if ((status = process_rdx_msg(msg, msglen)) != 0) 14941ae08745Sheppo return (status); 14951ae08745Sheppo 14961ae08745Sheppo /* Ready to receive in-band descriptors */ 14971ae08745Sheppo vd->state = VD_STATE_DATA; 14981ae08745Sheppo return (0); 14991ae08745Sheppo 15001ae08745Sheppo case VIO_DRING_MODE: /* expect register-dring message */ 15011ae08745Sheppo if ((status = 15021ae08745Sheppo vd_process_dring_reg_msg(vd, msg, msglen)) != 0) 15031ae08745Sheppo return (status); 15041ae08745Sheppo 15051ae08745Sheppo /* One dring negotiated, move to that state */ 15061ae08745Sheppo vd->state = VD_STATE_DRING; 15071ae08745Sheppo return (0); 15081ae08745Sheppo 15091ae08745Sheppo default: 15101ae08745Sheppo ASSERT("Unsupported transfer mode"); 15111ae08745Sheppo PRN("Unsupported transfer mode"); 15121ae08745Sheppo return (ENOTSUP); 15131ae08745Sheppo } 15141ae08745Sheppo 15151ae08745Sheppo case VD_STATE_DRING: /* expect RDX, register-dring, or unreg-dring */ 15161ae08745Sheppo if ((status = process_rdx_msg(msg, msglen)) == 0) { 15171ae08745Sheppo /* Ready to receive data */ 15181ae08745Sheppo vd->state = VD_STATE_DATA; 15191ae08745Sheppo return (0); 15201ae08745Sheppo } else if (status != ENOMSG) { 15211ae08745Sheppo return (status); 15221ae08745Sheppo } 15231ae08745Sheppo 15241ae08745Sheppo 15251ae08745Sheppo /* 15261ae08745Sheppo * If another register-dring message is received, stay in 15271ae08745Sheppo * dring state in case the client sends RDX; although the 15281ae08745Sheppo * protocol allows multiple drings, this server does not 15291ae08745Sheppo * support using more than one 15301ae08745Sheppo */ 15311ae08745Sheppo if ((status = 15321ae08745Sheppo vd_process_dring_reg_msg(vd, msg, msglen)) != ENOMSG) 15331ae08745Sheppo return (status); 15341ae08745Sheppo 15351ae08745Sheppo /* 15361ae08745Sheppo * Acknowledge an unregister-dring message, but reset the 15371ae08745Sheppo * connection anyway: Although the protocol allows 15381ae08745Sheppo * unregistering drings, this server cannot serve a vdisk 15391ae08745Sheppo * without its only dring 15401ae08745Sheppo */ 15411ae08745Sheppo status = vd_process_dring_unreg_msg(vd, msg, msglen); 15421ae08745Sheppo return ((status == 0) ? ENOTSUP : status); 15431ae08745Sheppo 15441ae08745Sheppo case VD_STATE_DATA: 15451ae08745Sheppo switch (vd->xfer_mode) { 15461ae08745Sheppo case VIO_DESC_MODE: /* expect in-band-descriptor message */ 1547*d10e4ef2Snarayan return (vd_process_desc_msg(vd, msg, msglen, msgsize)); 15481ae08745Sheppo 15491ae08745Sheppo case VIO_DRING_MODE: /* expect dring-data or unreg-dring */ 15501ae08745Sheppo /* 15511ae08745Sheppo * Typically expect dring-data messages, so handle 15521ae08745Sheppo * them first 15531ae08745Sheppo */ 15541ae08745Sheppo if ((status = vd_process_dring_msg(vd, msg, 1555*d10e4ef2Snarayan msglen, msgsize)) != ENOMSG) 15561ae08745Sheppo return (status); 15571ae08745Sheppo 15581ae08745Sheppo /* 15591ae08745Sheppo * Acknowledge an unregister-dring message, but reset 15601ae08745Sheppo * the connection anyway: Although the protocol 15611ae08745Sheppo * allows unregistering drings, this server cannot 15621ae08745Sheppo * serve a vdisk without its only dring 15631ae08745Sheppo */ 15641ae08745Sheppo status = vd_process_dring_unreg_msg(vd, msg, msglen); 15651ae08745Sheppo return ((status == 0) ? ENOTSUP : status); 15661ae08745Sheppo 15671ae08745Sheppo default: 15681ae08745Sheppo ASSERT("Unsupported transfer mode"); 15691ae08745Sheppo PRN("Unsupported transfer mode"); 15701ae08745Sheppo return (ENOTSUP); 15711ae08745Sheppo } 15721ae08745Sheppo 15731ae08745Sheppo default: 15741ae08745Sheppo ASSERT("Invalid client connection state"); 15751ae08745Sheppo PRN("Invalid client connection state"); 15761ae08745Sheppo return (ENOTSUP); 15771ae08745Sheppo } 15781ae08745Sheppo } 15791ae08745Sheppo 1580*d10e4ef2Snarayan static int 1581*d10e4ef2Snarayan vd_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize) 15821ae08745Sheppo { 15831ae08745Sheppo int status; 15841ae08745Sheppo boolean_t reset_ldc = B_FALSE; 15851ae08745Sheppo 15861ae08745Sheppo 15871ae08745Sheppo /* 15881ae08745Sheppo * Check that the message is at least big enough for a "tag", so that 15891ae08745Sheppo * message processing can proceed based on tag-specified message type 15901ae08745Sheppo */ 15911ae08745Sheppo if (msglen < sizeof (vio_msg_tag_t)) { 15921ae08745Sheppo PRN("Received short (%lu-byte) message", msglen); 15931ae08745Sheppo /* Can't "nack" short message, so drop the big hammer */ 1594*d10e4ef2Snarayan vd_need_reset(vd, B_TRUE); 1595*d10e4ef2Snarayan return (EBADMSG); 15961ae08745Sheppo } 15971ae08745Sheppo 15981ae08745Sheppo /* 15991ae08745Sheppo * Process the message 16001ae08745Sheppo */ 1601*d10e4ef2Snarayan switch (status = vd_do_process_msg(vd, msg, msglen, msgsize)) { 16021ae08745Sheppo case 0: 16031ae08745Sheppo /* "ack" valid, successfully-processed messages */ 16041ae08745Sheppo msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 16051ae08745Sheppo break; 16061ae08745Sheppo 1607*d10e4ef2Snarayan case EINPROGRESS: 1608*d10e4ef2Snarayan /* The completion handler will "ack" or "nack" the message */ 1609*d10e4ef2Snarayan return (EINPROGRESS); 16101ae08745Sheppo case ENOMSG: 16111ae08745Sheppo PRN("Received unexpected message"); 16121ae08745Sheppo _NOTE(FALLTHROUGH); 16131ae08745Sheppo case EBADMSG: 16141ae08745Sheppo case ENOTSUP: 16151ae08745Sheppo /* "nack" invalid messages */ 16161ae08745Sheppo msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 16171ae08745Sheppo break; 16181ae08745Sheppo 16191ae08745Sheppo default: 16201ae08745Sheppo /* "nack" failed messages */ 16211ae08745Sheppo msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 16221ae08745Sheppo /* An LDC error probably occurred, so try resetting it */ 16231ae08745Sheppo reset_ldc = B_TRUE; 16241ae08745Sheppo break; 16251ae08745Sheppo } 16261ae08745Sheppo 1627*d10e4ef2Snarayan /* Send the "ack" or "nack" to the client */ 16281ae08745Sheppo PR1("Sending %s", 16291ae08745Sheppo (msg->tag.vio_subtype == VIO_SUBTYPE_ACK) ? "ACK" : "NACK"); 16301ae08745Sheppo if (send_msg(vd->ldc_handle, msg, msglen) != 0) 16311ae08745Sheppo reset_ldc = B_TRUE; 16321ae08745Sheppo 1633*d10e4ef2Snarayan /* Arrange to reset the connection for nack'ed or failed messages */ 16341ae08745Sheppo if ((status != 0) || reset_ldc) 1635*d10e4ef2Snarayan vd_need_reset(vd, reset_ldc); 1636*d10e4ef2Snarayan 1637*d10e4ef2Snarayan return (status); 1638*d10e4ef2Snarayan } 1639*d10e4ef2Snarayan 1640*d10e4ef2Snarayan static boolean_t 1641*d10e4ef2Snarayan vd_enabled(vd_t *vd) 1642*d10e4ef2Snarayan { 1643*d10e4ef2Snarayan boolean_t enabled; 1644*d10e4ef2Snarayan 1645*d10e4ef2Snarayan 1646*d10e4ef2Snarayan mutex_enter(&vd->lock); 1647*d10e4ef2Snarayan enabled = vd->enabled; 1648*d10e4ef2Snarayan mutex_exit(&vd->lock); 1649*d10e4ef2Snarayan return (enabled); 16501ae08745Sheppo } 16511ae08745Sheppo 16521ae08745Sheppo static void 16530a55fbb7Slm66018 vd_recv_msg(void *arg) 16541ae08745Sheppo { 16551ae08745Sheppo vd_t *vd = (vd_t *)arg; 16560a55fbb7Slm66018 int status = 0; 16571ae08745Sheppo 16581ae08745Sheppo 16591ae08745Sheppo ASSERT(vd != NULL); 1660*d10e4ef2Snarayan PR2("New task to receive incoming message(s)"); 1661*d10e4ef2Snarayan while (vd_enabled(vd) && status == 0) { 1662*d10e4ef2Snarayan size_t msglen, msgsize; 1663*d10e4ef2Snarayan vio_msg_t *vio_msg; 1664*d10e4ef2Snarayan 1665*d10e4ef2Snarayan 16660a55fbb7Slm66018 /* 1667*d10e4ef2Snarayan * Receive and process a message 16680a55fbb7Slm66018 */ 1669*d10e4ef2Snarayan vd_reset_if_needed(vd); /* can change vd->max_msglen */ 1670*d10e4ef2Snarayan msgsize = vd->max_msglen; /* stable copy for alloc/free */ 1671*d10e4ef2Snarayan msglen = msgsize; /* actual length after recv_msg() */ 1672*d10e4ef2Snarayan vio_msg = kmem_alloc(msgsize, KM_SLEEP); 1673*d10e4ef2Snarayan if ((status = recv_msg(vd->ldc_handle, vio_msg, &msglen)) == 1674*d10e4ef2Snarayan 0) { 1675*d10e4ef2Snarayan if (vd_process_msg(vd, vio_msg, msglen, msgsize) == 1676*d10e4ef2Snarayan EINPROGRESS) 1677*d10e4ef2Snarayan continue; /* handler will free msg */ 1678*d10e4ef2Snarayan } else if (status != ENOMSG) { 1679*d10e4ef2Snarayan /* Probably an LDC failure; arrange to reset it */ 1680*d10e4ef2Snarayan vd_need_reset(vd, B_TRUE); 16810a55fbb7Slm66018 } 1682*d10e4ef2Snarayan kmem_free(vio_msg, msgsize); 16831ae08745Sheppo } 1684*d10e4ef2Snarayan PR2("Task finished"); 16850a55fbb7Slm66018 } 16860a55fbb7Slm66018 16870a55fbb7Slm66018 static uint_t 16881ae08745Sheppo vd_handle_ldc_events(uint64_t event, caddr_t arg) 16891ae08745Sheppo { 16901ae08745Sheppo vd_t *vd = (vd_t *)(void *)arg; 16911ae08745Sheppo 16921ae08745Sheppo 16931ae08745Sheppo ASSERT(vd != NULL); 1694*d10e4ef2Snarayan 1695*d10e4ef2Snarayan if (!vd_enabled(vd)) 1696*d10e4ef2Snarayan return (LDC_SUCCESS); 1697*d10e4ef2Snarayan 1698*d10e4ef2Snarayan if (event & LDC_EVT_RESET) { 1699*d10e4ef2Snarayan PR0("LDC channel was reset"); 1700*d10e4ef2Snarayan return (LDC_SUCCESS); 1701*d10e4ef2Snarayan } 1702*d10e4ef2Snarayan 1703*d10e4ef2Snarayan if (event & LDC_EVT_UP) { 1704*d10e4ef2Snarayan PR0("LDC channel came up: Resetting client connection state"); 1705*d10e4ef2Snarayan vd_need_reset(vd, B_FALSE); 1706*d10e4ef2Snarayan } 1707*d10e4ef2Snarayan 1708*d10e4ef2Snarayan if (event & LDC_EVT_READ) { 1709*d10e4ef2Snarayan int status; 1710*d10e4ef2Snarayan 1711*d10e4ef2Snarayan PR1("New data available"); 1712*d10e4ef2Snarayan /* Queue a task to receive the new data */ 1713*d10e4ef2Snarayan status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, 1714*d10e4ef2Snarayan DDI_SLEEP); 1715*d10e4ef2Snarayan /* ddi_taskq_dispatch(9f) guarantees success with DDI_SLEEP */ 1716*d10e4ef2Snarayan ASSERT(status == DDI_SUCCESS); 1717*d10e4ef2Snarayan } 1718*d10e4ef2Snarayan 1719*d10e4ef2Snarayan return (LDC_SUCCESS); 17201ae08745Sheppo } 17211ae08745Sheppo 17221ae08745Sheppo static uint_t 17231ae08745Sheppo vds_check_for_vd(mod_hash_key_t key, mod_hash_val_t *val, void *arg) 17241ae08745Sheppo { 17251ae08745Sheppo _NOTE(ARGUNUSED(key, val)) 17261ae08745Sheppo (*((uint_t *)arg))++; 17271ae08745Sheppo return (MH_WALK_TERMINATE); 17281ae08745Sheppo } 17291ae08745Sheppo 17301ae08745Sheppo 17311ae08745Sheppo static int 17321ae08745Sheppo vds_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 17331ae08745Sheppo { 17341ae08745Sheppo uint_t vd_present = 0; 17351ae08745Sheppo minor_t instance; 17361ae08745Sheppo vds_t *vds; 17371ae08745Sheppo 17381ae08745Sheppo 17391ae08745Sheppo switch (cmd) { 17401ae08745Sheppo case DDI_DETACH: 17411ae08745Sheppo /* the real work happens below */ 17421ae08745Sheppo break; 17431ae08745Sheppo case DDI_SUSPEND: 1744*d10e4ef2Snarayan PR0("No action required for DDI_SUSPEND"); 17451ae08745Sheppo return (DDI_SUCCESS); 17461ae08745Sheppo default: 1747*d10e4ef2Snarayan PRN("Unrecognized \"cmd\""); 17481ae08745Sheppo return (DDI_FAILURE); 17491ae08745Sheppo } 17501ae08745Sheppo 17511ae08745Sheppo ASSERT(cmd == DDI_DETACH); 17521ae08745Sheppo instance = ddi_get_instance(dip); 17531ae08745Sheppo if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) { 17541ae08745Sheppo PRN("Could not get state for instance %u", instance); 17551ae08745Sheppo ddi_soft_state_free(vds_state, instance); 17561ae08745Sheppo return (DDI_FAILURE); 17571ae08745Sheppo } 17581ae08745Sheppo 17591ae08745Sheppo /* Do no detach when serving any vdisks */ 17601ae08745Sheppo mod_hash_walk(vds->vd_table, vds_check_for_vd, &vd_present); 17611ae08745Sheppo if (vd_present) { 17621ae08745Sheppo PR0("Not detaching because serving vdisks"); 17631ae08745Sheppo return (DDI_FAILURE); 17641ae08745Sheppo } 17651ae08745Sheppo 17661ae08745Sheppo PR0("Detaching"); 17671ae08745Sheppo if (vds->initialized & VDS_MDEG) 17681ae08745Sheppo (void) mdeg_unregister(vds->mdeg); 17691ae08745Sheppo if (vds->initialized & VDS_LDI) 17701ae08745Sheppo (void) ldi_ident_release(vds->ldi_ident); 17711ae08745Sheppo mod_hash_destroy_hash(vds->vd_table); 17721ae08745Sheppo ddi_soft_state_free(vds_state, instance); 17731ae08745Sheppo return (DDI_SUCCESS); 17741ae08745Sheppo } 17751ae08745Sheppo 17761ae08745Sheppo static boolean_t 17771ae08745Sheppo is_pseudo_device(dev_info_t *dip) 17781ae08745Sheppo { 17791ae08745Sheppo dev_info_t *parent, *root = ddi_root_node(); 17801ae08745Sheppo 17811ae08745Sheppo 17821ae08745Sheppo for (parent = ddi_get_parent(dip); (parent != NULL) && (parent != root); 17831ae08745Sheppo parent = ddi_get_parent(parent)) { 17841ae08745Sheppo if (strcmp(ddi_get_name(parent), DEVI_PSEUDO_NEXNAME) == 0) 17851ae08745Sheppo return (B_TRUE); 17861ae08745Sheppo } 17871ae08745Sheppo 17881ae08745Sheppo return (B_FALSE); 17891ae08745Sheppo } 17901ae08745Sheppo 17911ae08745Sheppo static int 17920a55fbb7Slm66018 vd_setup_full_disk(vd_t *vd) 17930a55fbb7Slm66018 { 17940a55fbb7Slm66018 int rval, status; 17950a55fbb7Slm66018 major_t major = getmajor(vd->dev[0]); 17960a55fbb7Slm66018 minor_t minor = getminor(vd->dev[0]) - VD_ENTIRE_DISK_SLICE; 17970a55fbb7Slm66018 struct vtoc vtoc; 17980a55fbb7Slm66018 17990a55fbb7Slm66018 18000a55fbb7Slm66018 /* Get the VTOC for slice sizes */ 18010a55fbb7Slm66018 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCGVTOC, (intptr_t)&vtoc, 1802*d10e4ef2Snarayan (vd_open_flags | FKIOCTL), kcred, &rval)) != 0) { 18030a55fbb7Slm66018 PRN("ldi_ioctl(DKIOCGVTOC) returned errno %d", status); 18040a55fbb7Slm66018 return (status); 18050a55fbb7Slm66018 } 18060a55fbb7Slm66018 18070a55fbb7Slm66018 /* Set full-disk parameters */ 18080a55fbb7Slm66018 vd->vdisk_type = VD_DISK_TYPE_DISK; 18090a55fbb7Slm66018 vd->nslices = (sizeof (vd->dev))/(sizeof (vd->dev[0])); 18100a55fbb7Slm66018 18110a55fbb7Slm66018 /* Move dev number and LDI handle to entire-disk-slice array elements */ 18120a55fbb7Slm66018 vd->dev[VD_ENTIRE_DISK_SLICE] = vd->dev[0]; 18130a55fbb7Slm66018 vd->dev[0] = 0; 18140a55fbb7Slm66018 vd->ldi_handle[VD_ENTIRE_DISK_SLICE] = vd->ldi_handle[0]; 18150a55fbb7Slm66018 vd->ldi_handle[0] = NULL; 18160a55fbb7Slm66018 18170a55fbb7Slm66018 /* Initialize device numbers for remaining slices and open them */ 18180a55fbb7Slm66018 for (int slice = 0; slice < vd->nslices; slice++) { 18190a55fbb7Slm66018 /* 18200a55fbb7Slm66018 * Skip the entire-disk slice, as it's already open and its 18210a55fbb7Slm66018 * device known 18220a55fbb7Slm66018 */ 18230a55fbb7Slm66018 if (slice == VD_ENTIRE_DISK_SLICE) 18240a55fbb7Slm66018 continue; 18250a55fbb7Slm66018 ASSERT(vd->dev[slice] == 0); 18260a55fbb7Slm66018 ASSERT(vd->ldi_handle[slice] == NULL); 18270a55fbb7Slm66018 18280a55fbb7Slm66018 /* 18290a55fbb7Slm66018 * Construct the device number for the current slice 18300a55fbb7Slm66018 */ 18310a55fbb7Slm66018 vd->dev[slice] = makedevice(major, (minor + slice)); 18320a55fbb7Slm66018 18330a55fbb7Slm66018 /* 18340a55fbb7Slm66018 * At least some underlying drivers refuse to open 18350a55fbb7Slm66018 * devices for (currently) zero-length slices, so skip 18360a55fbb7Slm66018 * them for now 18370a55fbb7Slm66018 */ 18380a55fbb7Slm66018 if (vtoc.v_part[slice].p_size == 0) { 18390a55fbb7Slm66018 PR0("Skipping zero-length slice %u", slice); 18400a55fbb7Slm66018 continue; 18410a55fbb7Slm66018 } 18420a55fbb7Slm66018 18430a55fbb7Slm66018 /* 18440a55fbb7Slm66018 * Open all non-empty slices of the disk to serve them to the 18450a55fbb7Slm66018 * client. Slices are opened exclusively to prevent other 18460a55fbb7Slm66018 * threads or processes in the service domain from performing 18470a55fbb7Slm66018 * I/O to slices being accessed by a client. Failure to open 18480a55fbb7Slm66018 * a slice results in vds not serving this disk, as the client 18490a55fbb7Slm66018 * could attempt (and should be able) to access any non-empty 18500a55fbb7Slm66018 * slice immediately. Any slices successfully opened before a 18510a55fbb7Slm66018 * failure will get closed by vds_destroy_vd() as a result of 18520a55fbb7Slm66018 * the error returned by this function. 18530a55fbb7Slm66018 */ 18540a55fbb7Slm66018 PR0("Opening device major %u, minor %u = slice %u", 18550a55fbb7Slm66018 major, minor, slice); 18560a55fbb7Slm66018 if ((status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK, 18570a55fbb7Slm66018 vd_open_flags, kcred, &vd->ldi_handle[slice], 18580a55fbb7Slm66018 vd->vds->ldi_ident)) != 0) { 18590a55fbb7Slm66018 PRN("ldi_open_by_dev() returned errno %d " 18600a55fbb7Slm66018 "for slice %u", status, slice); 18610a55fbb7Slm66018 /* vds_destroy_vd() will close any open slices */ 18620a55fbb7Slm66018 return (status); 18630a55fbb7Slm66018 } 18640a55fbb7Slm66018 } 18650a55fbb7Slm66018 18660a55fbb7Slm66018 return (0); 18670a55fbb7Slm66018 } 18680a55fbb7Slm66018 18690a55fbb7Slm66018 static int 18700a55fbb7Slm66018 vd_setup_vd(char *block_device, vd_t *vd) 18711ae08745Sheppo { 18721ae08745Sheppo int otyp, rval, status; 18731ae08745Sheppo dev_info_t *dip; 18741ae08745Sheppo struct dk_cinfo dk_cinfo; 18751ae08745Sheppo 18761ae08745Sheppo 18770a55fbb7Slm66018 if ((status = ldi_open_by_name(block_device, vd_open_flags, kcred, 18780a55fbb7Slm66018 &vd->ldi_handle[0], vd->vds->ldi_ident)) != 0) { 18790a55fbb7Slm66018 PRN("ldi_open_by_name(%s) = errno %d", block_device, status); 18800a55fbb7Slm66018 return (status); 18810a55fbb7Slm66018 } 18820a55fbb7Slm66018 18831ae08745Sheppo /* Get block device's device number, otyp, and size */ 18840a55fbb7Slm66018 if ((status = ldi_get_dev(vd->ldi_handle[0], &vd->dev[0])) != 0) { 18851ae08745Sheppo PRN("ldi_get_dev() returned errno %d for %s", 18861ae08745Sheppo status, block_device); 18871ae08745Sheppo return (status); 18881ae08745Sheppo } 18890a55fbb7Slm66018 if ((status = ldi_get_otyp(vd->ldi_handle[0], &otyp)) != 0) { 18901ae08745Sheppo PRN("ldi_get_otyp() returned errno %d for %s", 18911ae08745Sheppo status, block_device); 18921ae08745Sheppo return (status); 18931ae08745Sheppo } 18941ae08745Sheppo if (otyp != OTYP_BLK) { 18951ae08745Sheppo PRN("Cannot serve non-block device %s", block_device); 18961ae08745Sheppo return (ENOTBLK); 18971ae08745Sheppo } 18980a55fbb7Slm66018 if (ldi_get_size(vd->ldi_handle[0], &vd->vdisk_size) != DDI_SUCCESS) { 18991ae08745Sheppo PRN("ldi_get_size() failed for %s", block_device); 19001ae08745Sheppo return (EIO); 19011ae08745Sheppo } 19021ae08745Sheppo 19031ae08745Sheppo /* Determine if backing block device is a pseudo device */ 19041ae08745Sheppo if ((dip = ddi_hold_devi_by_instance(getmajor(vd->dev[0]), 19051ae08745Sheppo dev_to_instance(vd->dev[0]), 0)) == NULL) { 19061ae08745Sheppo PRN("%s is no longer accessible", block_device); 19071ae08745Sheppo return (EIO); 19081ae08745Sheppo } 19091ae08745Sheppo vd->pseudo = is_pseudo_device(dip); 19101ae08745Sheppo ddi_release_devi(dip); 19111ae08745Sheppo if (vd->pseudo) { 19121ae08745Sheppo vd->vdisk_type = VD_DISK_TYPE_SLICE; 19131ae08745Sheppo vd->nslices = 1; 19141ae08745Sheppo return (0); /* ...and we're done */ 19151ae08745Sheppo } 19161ae08745Sheppo 19171ae08745Sheppo /* Get dk_cinfo to determine slice of backing block device */ 19180a55fbb7Slm66018 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCINFO, 1919*d10e4ef2Snarayan (intptr_t)&dk_cinfo, (vd_open_flags | FKIOCTL), kcred, 1920*d10e4ef2Snarayan &rval)) != 0) { 19211ae08745Sheppo PRN("ldi_ioctl(DKIOCINFO) returned errno %d for %s", 19221ae08745Sheppo status, block_device); 19231ae08745Sheppo return (status); 19241ae08745Sheppo } 19251ae08745Sheppo 19261ae08745Sheppo if (dk_cinfo.dki_partition >= V_NUMPAR) { 19271ae08745Sheppo PRN("slice %u >= maximum slice %u for %s", 19281ae08745Sheppo dk_cinfo.dki_partition, V_NUMPAR, block_device); 19291ae08745Sheppo return (EIO); 19301ae08745Sheppo } 19311ae08745Sheppo 19321ae08745Sheppo 19330a55fbb7Slm66018 /* If slice is entire-disk slice, initialize for full disk */ 19340a55fbb7Slm66018 if (dk_cinfo.dki_partition == VD_ENTIRE_DISK_SLICE) 19350a55fbb7Slm66018 return (vd_setup_full_disk(vd)); 19361ae08745Sheppo 19370a55fbb7Slm66018 19380a55fbb7Slm66018 /* Otherwise, we have a non-entire slice of a block device */ 19391ae08745Sheppo vd->vdisk_type = VD_DISK_TYPE_SLICE; 19401ae08745Sheppo vd->nslices = 1; 19411ae08745Sheppo 19421ae08745Sheppo 19431ae08745Sheppo /* Initialize dk_geom structure for single-slice block device */ 19440a55fbb7Slm66018 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCGGEOM, 1945*d10e4ef2Snarayan (intptr_t)&vd->dk_geom, (vd_open_flags | FKIOCTL), kcred, 1946*d10e4ef2Snarayan &rval)) != 0) { 19471ae08745Sheppo PRN("ldi_ioctl(DKIOCGEOM) returned errno %d for %s", 19481ae08745Sheppo status, block_device); 19491ae08745Sheppo return (status); 19501ae08745Sheppo } 19511ae08745Sheppo if (vd->dk_geom.dkg_nsect == 0) { 19521ae08745Sheppo PRN("%s geometry claims 0 sectors per track", block_device); 19531ae08745Sheppo return (EIO); 19541ae08745Sheppo } 19551ae08745Sheppo if (vd->dk_geom.dkg_nhead == 0) { 19561ae08745Sheppo PRN("%s geometry claims 0 heads", block_device); 19571ae08745Sheppo return (EIO); 19581ae08745Sheppo } 19591ae08745Sheppo vd->dk_geom.dkg_ncyl = 19601ae08745Sheppo lbtodb(vd->vdisk_size)/vd->dk_geom.dkg_nsect/vd->dk_geom.dkg_nhead; 19611ae08745Sheppo vd->dk_geom.dkg_acyl = 0; 19621ae08745Sheppo vd->dk_geom.dkg_pcyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl; 19631ae08745Sheppo 19641ae08745Sheppo 19651ae08745Sheppo /* Initialize vtoc structure for single-slice block device */ 19660a55fbb7Slm66018 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCGVTOC, 1967*d10e4ef2Snarayan (intptr_t)&vd->vtoc, (vd_open_flags | FKIOCTL), kcred, 1968*d10e4ef2Snarayan &rval)) != 0) { 19691ae08745Sheppo PRN("ldi_ioctl(DKIOCGVTOC) returned errno %d for %s", 19701ae08745Sheppo status, block_device); 19711ae08745Sheppo return (status); 19721ae08745Sheppo } 19731ae08745Sheppo bcopy(VD_VOLUME_NAME, vd->vtoc.v_volume, 19741ae08745Sheppo MIN(sizeof (VD_VOLUME_NAME), sizeof (vd->vtoc.v_volume))); 19751ae08745Sheppo bzero(vd->vtoc.v_part, sizeof (vd->vtoc.v_part)); 19761ae08745Sheppo vd->vtoc.v_nparts = 1; 19771ae08745Sheppo vd->vtoc.v_part[0].p_tag = V_UNASSIGNED; 19781ae08745Sheppo vd->vtoc.v_part[0].p_flag = 0; 19791ae08745Sheppo vd->vtoc.v_part[0].p_start = 0; 19801ae08745Sheppo vd->vtoc.v_part[0].p_size = lbtodb(vd->vdisk_size); 19811ae08745Sheppo bcopy(VD_ASCIILABEL, vd->vtoc.v_asciilabel, 19821ae08745Sheppo MIN(sizeof (VD_ASCIILABEL), sizeof (vd->vtoc.v_asciilabel))); 19831ae08745Sheppo 19841ae08745Sheppo 19851ae08745Sheppo return (0); 19861ae08745Sheppo } 19871ae08745Sheppo 19881ae08745Sheppo static int 19891ae08745Sheppo vds_do_init_vd(vds_t *vds, uint64_t id, char *block_device, uint64_t ldc_id, 19901ae08745Sheppo vd_t **vdp) 19911ae08745Sheppo { 19921ae08745Sheppo char tq_name[TASKQ_NAMELEN]; 19930a55fbb7Slm66018 int status; 19941ae08745Sheppo ddi_iblock_cookie_t iblock = NULL; 19951ae08745Sheppo ldc_attr_t ldc_attr; 19961ae08745Sheppo vd_t *vd; 19971ae08745Sheppo 19981ae08745Sheppo 19991ae08745Sheppo ASSERT(vds != NULL); 20001ae08745Sheppo ASSERT(block_device != NULL); 20011ae08745Sheppo ASSERT(vdp != NULL); 20021ae08745Sheppo PR0("Adding vdisk for %s", block_device); 20031ae08745Sheppo 20041ae08745Sheppo if ((vd = kmem_zalloc(sizeof (*vd), KM_NOSLEEP)) == NULL) { 20051ae08745Sheppo PRN("No memory for virtual disk"); 20061ae08745Sheppo return (EAGAIN); 20071ae08745Sheppo } 20081ae08745Sheppo *vdp = vd; /* assign here so vds_destroy_vd() can cleanup later */ 20091ae08745Sheppo vd->vds = vds; 20101ae08745Sheppo 20111ae08745Sheppo 20120a55fbb7Slm66018 /* Open vdisk and initialize parameters */ 20130a55fbb7Slm66018 if ((status = vd_setup_vd(block_device, vd)) != 0) 20141ae08745Sheppo return (status); 20151ae08745Sheppo ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR); 20161ae08745Sheppo PR0("vdisk_type = %s, pseudo = %s, nslices = %u", 20171ae08745Sheppo ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"), 20181ae08745Sheppo (vd->pseudo ? "yes" : "no"), vd->nslices); 20191ae08745Sheppo 20201ae08745Sheppo 20211ae08745Sheppo /* Initialize locking */ 20221ae08745Sheppo if (ddi_get_soft_iblock_cookie(vds->dip, DDI_SOFTINT_MED, 20231ae08745Sheppo &iblock) != DDI_SUCCESS) { 20241ae08745Sheppo PRN("Could not get iblock cookie."); 20251ae08745Sheppo return (EIO); 20261ae08745Sheppo } 20271ae08745Sheppo 20281ae08745Sheppo mutex_init(&vd->lock, NULL, MUTEX_DRIVER, iblock); 20291ae08745Sheppo vd->initialized |= VD_LOCKING; 20301ae08745Sheppo 20311ae08745Sheppo 2032*d10e4ef2Snarayan /* Create start and completion task queues for the vdisk */ 2033*d10e4ef2Snarayan (void) snprintf(tq_name, sizeof (tq_name), "vd_startq%lu", id); 20341ae08745Sheppo PR1("tq_name = %s", tq_name); 2035*d10e4ef2Snarayan if ((vd->startq = ddi_taskq_create(vds->dip, tq_name, 1, 20361ae08745Sheppo TASKQ_DEFAULTPRI, 0)) == NULL) { 20371ae08745Sheppo PRN("Could not create task queue"); 20381ae08745Sheppo return (EIO); 20391ae08745Sheppo } 2040*d10e4ef2Snarayan (void) snprintf(tq_name, sizeof (tq_name), "vd_completionq%lu", id); 2041*d10e4ef2Snarayan PR1("tq_name = %s", tq_name); 2042*d10e4ef2Snarayan if ((vd->completionq = ddi_taskq_create(vds->dip, tq_name, 1, 2043*d10e4ef2Snarayan TASKQ_DEFAULTPRI, 0)) == NULL) { 2044*d10e4ef2Snarayan PRN("Could not create task queue"); 2045*d10e4ef2Snarayan return (EIO); 2046*d10e4ef2Snarayan } 2047*d10e4ef2Snarayan vd->enabled = 1; /* before callback can dispatch to startq */ 20481ae08745Sheppo 20491ae08745Sheppo 20501ae08745Sheppo /* Bring up LDC */ 20511ae08745Sheppo ldc_attr.devclass = LDC_DEV_BLK_SVC; 20521ae08745Sheppo ldc_attr.instance = ddi_get_instance(vds->dip); 20531ae08745Sheppo ldc_attr.mode = LDC_MODE_UNRELIABLE; 20541ae08745Sheppo ldc_attr.qlen = VD_LDC_QLEN; 20551ae08745Sheppo if ((status = ldc_init(ldc_id, &ldc_attr, &vd->ldc_handle)) != 0) { 20561ae08745Sheppo PRN("ldc_init(%lu) = errno %d", ldc_id, status); 20571ae08745Sheppo return (status); 20581ae08745Sheppo } 20591ae08745Sheppo vd->initialized |= VD_LDC; 20601ae08745Sheppo 20611ae08745Sheppo if ((status = ldc_reg_callback(vd->ldc_handle, vd_handle_ldc_events, 20621ae08745Sheppo (caddr_t)vd)) != 0) { 20631ae08745Sheppo PRN("ldc_reg_callback() returned errno %d", status); 20641ae08745Sheppo return (status); 20651ae08745Sheppo } 20661ae08745Sheppo 20671ae08745Sheppo if ((status = ldc_open(vd->ldc_handle)) != 0) { 20681ae08745Sheppo PRN("ldc_open() returned errno %d", status); 20691ae08745Sheppo return (status); 20701ae08745Sheppo } 20711ae08745Sheppo 20721ae08745Sheppo 20731ae08745Sheppo /* Add the successfully-initialized vdisk to the server's table */ 20741ae08745Sheppo if (mod_hash_insert(vds->vd_table, (mod_hash_key_t)id, vd) != 0) { 20751ae08745Sheppo PRN("Error adding vdisk ID %lu to table", id); 20761ae08745Sheppo return (EIO); 20771ae08745Sheppo } 20781ae08745Sheppo 20791ae08745Sheppo return (0); 20801ae08745Sheppo } 20811ae08745Sheppo 20821ae08745Sheppo /* 20831ae08745Sheppo * Destroy the state associated with a virtual disk 20841ae08745Sheppo */ 20851ae08745Sheppo static void 20861ae08745Sheppo vds_destroy_vd(void *arg) 20871ae08745Sheppo { 20881ae08745Sheppo vd_t *vd = (vd_t *)arg; 20891ae08745Sheppo 20901ae08745Sheppo 20911ae08745Sheppo if (vd == NULL) 20921ae08745Sheppo return; 20931ae08745Sheppo 2094*d10e4ef2Snarayan PR0("Destroying vdisk state"); 2095*d10e4ef2Snarayan 20961ae08745Sheppo /* Disable queuing requests for the vdisk */ 20971ae08745Sheppo if (vd->initialized & VD_LOCKING) { 20981ae08745Sheppo mutex_enter(&vd->lock); 20991ae08745Sheppo vd->enabled = 0; 21001ae08745Sheppo mutex_exit(&vd->lock); 21011ae08745Sheppo } 21021ae08745Sheppo 2103*d10e4ef2Snarayan /* Drain and destroy start queue (*before* destroying completionq) */ 2104*d10e4ef2Snarayan if (vd->startq != NULL) 2105*d10e4ef2Snarayan ddi_taskq_destroy(vd->startq); /* waits for queued tasks */ 2106*d10e4ef2Snarayan 2107*d10e4ef2Snarayan /* Drain and destroy completion queue (*before* shutting down LDC) */ 2108*d10e4ef2Snarayan if (vd->completionq != NULL) 2109*d10e4ef2Snarayan ddi_taskq_destroy(vd->completionq); /* waits for tasks */ 2110*d10e4ef2Snarayan 2111*d10e4ef2Snarayan if (vd->dring_task != NULL) { 2112*d10e4ef2Snarayan ASSERT(vd->dring_len != 0); 2113*d10e4ef2Snarayan kmem_free(vd->dring_task, 2114*d10e4ef2Snarayan (sizeof (*vd->dring_task)) * vd->dring_len); 2115*d10e4ef2Snarayan } 21161ae08745Sheppo 21171ae08745Sheppo /* Shut down LDC */ 21181ae08745Sheppo if (vd->initialized & VD_LDC) { 21191ae08745Sheppo if (vd->initialized & VD_DRING) 21201ae08745Sheppo (void) ldc_mem_dring_unmap(vd->dring_handle); 21211ae08745Sheppo (void) ldc_unreg_callback(vd->ldc_handle); 21221ae08745Sheppo (void) ldc_close(vd->ldc_handle); 21231ae08745Sheppo (void) ldc_fini(vd->ldc_handle); 21241ae08745Sheppo } 21251ae08745Sheppo 21261ae08745Sheppo /* Close any open backing-device slices */ 21271ae08745Sheppo for (uint_t slice = 0; slice < vd->nslices; slice++) { 21281ae08745Sheppo if (vd->ldi_handle[slice] != NULL) { 21291ae08745Sheppo PR0("Closing slice %u", slice); 21301ae08745Sheppo (void) ldi_close(vd->ldi_handle[slice], 21311ae08745Sheppo vd_open_flags, kcred); 21321ae08745Sheppo } 21331ae08745Sheppo } 21341ae08745Sheppo 21351ae08745Sheppo /* Free lock */ 21361ae08745Sheppo if (vd->initialized & VD_LOCKING) 21371ae08745Sheppo mutex_destroy(&vd->lock); 21381ae08745Sheppo 21391ae08745Sheppo /* Finally, free the vdisk structure itself */ 21401ae08745Sheppo kmem_free(vd, sizeof (*vd)); 21411ae08745Sheppo } 21421ae08745Sheppo 21431ae08745Sheppo static int 21441ae08745Sheppo vds_init_vd(vds_t *vds, uint64_t id, char *block_device, uint64_t ldc_id) 21451ae08745Sheppo { 21461ae08745Sheppo int status; 21471ae08745Sheppo vd_t *vd = NULL; 21481ae08745Sheppo 21491ae08745Sheppo 21501ae08745Sheppo #ifdef lint 21511ae08745Sheppo (void) vd; 21521ae08745Sheppo #endif /* lint */ 21531ae08745Sheppo 21541ae08745Sheppo if ((status = vds_do_init_vd(vds, id, block_device, ldc_id, &vd)) != 0) 21551ae08745Sheppo vds_destroy_vd(vd); 21561ae08745Sheppo 21571ae08745Sheppo return (status); 21581ae08745Sheppo } 21591ae08745Sheppo 21601ae08745Sheppo static int 21611ae08745Sheppo vds_do_get_ldc_id(md_t *md, mde_cookie_t vd_node, mde_cookie_t *channel, 21621ae08745Sheppo uint64_t *ldc_id) 21631ae08745Sheppo { 21641ae08745Sheppo int num_channels; 21651ae08745Sheppo 21661ae08745Sheppo 21671ae08745Sheppo /* Look for channel endpoint child(ren) of the vdisk MD node */ 21681ae08745Sheppo if ((num_channels = md_scan_dag(md, vd_node, 21691ae08745Sheppo md_find_name(md, VD_CHANNEL_ENDPOINT), 21701ae08745Sheppo md_find_name(md, "fwd"), channel)) <= 0) { 21711ae08745Sheppo PRN("No \"%s\" found for virtual disk", VD_CHANNEL_ENDPOINT); 21721ae08745Sheppo return (-1); 21731ae08745Sheppo } 21741ae08745Sheppo 21751ae08745Sheppo /* Get the "id" value for the first channel endpoint node */ 21761ae08745Sheppo if (md_get_prop_val(md, channel[0], VD_ID_PROP, ldc_id) != 0) { 21771ae08745Sheppo PRN("No \"%s\" property found for \"%s\" of vdisk", 21781ae08745Sheppo VD_ID_PROP, VD_CHANNEL_ENDPOINT); 21791ae08745Sheppo return (-1); 21801ae08745Sheppo } 21811ae08745Sheppo 21821ae08745Sheppo if (num_channels > 1) { 21831ae08745Sheppo PRN("Using ID of first of multiple channels for this vdisk"); 21841ae08745Sheppo } 21851ae08745Sheppo 21861ae08745Sheppo return (0); 21871ae08745Sheppo } 21881ae08745Sheppo 21891ae08745Sheppo static int 21901ae08745Sheppo vds_get_ldc_id(md_t *md, mde_cookie_t vd_node, uint64_t *ldc_id) 21911ae08745Sheppo { 21921ae08745Sheppo int num_nodes, status; 21931ae08745Sheppo size_t size; 21941ae08745Sheppo mde_cookie_t *channel; 21951ae08745Sheppo 21961ae08745Sheppo 21971ae08745Sheppo if ((num_nodes = md_node_count(md)) <= 0) { 21981ae08745Sheppo PRN("Invalid node count in Machine Description subtree"); 21991ae08745Sheppo return (-1); 22001ae08745Sheppo } 22011ae08745Sheppo size = num_nodes*(sizeof (*channel)); 22021ae08745Sheppo channel = kmem_zalloc(size, KM_SLEEP); 22031ae08745Sheppo status = vds_do_get_ldc_id(md, vd_node, channel, ldc_id); 22041ae08745Sheppo kmem_free(channel, size); 22051ae08745Sheppo 22061ae08745Sheppo return (status); 22071ae08745Sheppo } 22081ae08745Sheppo 22091ae08745Sheppo static void 22101ae08745Sheppo vds_add_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node) 22111ae08745Sheppo { 22121ae08745Sheppo char *block_device = NULL; 22131ae08745Sheppo uint64_t id = 0, ldc_id = 0; 22141ae08745Sheppo 22151ae08745Sheppo 22161ae08745Sheppo if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) { 22171ae08745Sheppo PRN("Error getting vdisk \"%s\"", VD_ID_PROP); 22181ae08745Sheppo return; 22191ae08745Sheppo } 22201ae08745Sheppo PR0("Adding vdisk ID %lu", id); 22211ae08745Sheppo if (md_get_prop_str(md, vd_node, VD_BLOCK_DEVICE_PROP, 22221ae08745Sheppo &block_device) != 0) { 22231ae08745Sheppo PRN("Error getting vdisk \"%s\"", VD_BLOCK_DEVICE_PROP); 22241ae08745Sheppo return; 22251ae08745Sheppo } 22261ae08745Sheppo 22271ae08745Sheppo if (vds_get_ldc_id(md, vd_node, &ldc_id) != 0) { 22281ae08745Sheppo PRN("Error getting LDC ID for vdisk %lu", id); 22291ae08745Sheppo return; 22301ae08745Sheppo } 22311ae08745Sheppo 22321ae08745Sheppo if (vds_init_vd(vds, id, block_device, ldc_id) != 0) { 22331ae08745Sheppo PRN("Failed to add vdisk ID %lu", id); 22341ae08745Sheppo return; 22351ae08745Sheppo } 22361ae08745Sheppo } 22371ae08745Sheppo 22381ae08745Sheppo static void 22391ae08745Sheppo vds_remove_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node) 22401ae08745Sheppo { 22411ae08745Sheppo uint64_t id = 0; 22421ae08745Sheppo 22431ae08745Sheppo 22441ae08745Sheppo if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) { 22451ae08745Sheppo PRN("Unable to get \"%s\" property from vdisk's MD node", 22461ae08745Sheppo VD_ID_PROP); 22471ae08745Sheppo return; 22481ae08745Sheppo } 22491ae08745Sheppo PR0("Removing vdisk ID %lu", id); 22501ae08745Sheppo if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)id) != 0) 22511ae08745Sheppo PRN("No vdisk entry found for vdisk ID %lu", id); 22521ae08745Sheppo } 22531ae08745Sheppo 22541ae08745Sheppo static void 22551ae08745Sheppo vds_change_vd(vds_t *vds, md_t *prev_md, mde_cookie_t prev_vd_node, 22561ae08745Sheppo md_t *curr_md, mde_cookie_t curr_vd_node) 22571ae08745Sheppo { 22581ae08745Sheppo char *curr_dev, *prev_dev; 22591ae08745Sheppo uint64_t curr_id = 0, curr_ldc_id = 0; 22601ae08745Sheppo uint64_t prev_id = 0, prev_ldc_id = 0; 22611ae08745Sheppo size_t len; 22621ae08745Sheppo 22631ae08745Sheppo 22641ae08745Sheppo /* Validate that vdisk ID has not changed */ 22651ae08745Sheppo if (md_get_prop_val(prev_md, prev_vd_node, VD_ID_PROP, &prev_id) != 0) { 22661ae08745Sheppo PRN("Error getting previous vdisk \"%s\" property", 22671ae08745Sheppo VD_ID_PROP); 22681ae08745Sheppo return; 22691ae08745Sheppo } 22701ae08745Sheppo if (md_get_prop_val(curr_md, curr_vd_node, VD_ID_PROP, &curr_id) != 0) { 22711ae08745Sheppo PRN("Error getting current vdisk \"%s\" property", VD_ID_PROP); 22721ae08745Sheppo return; 22731ae08745Sheppo } 22741ae08745Sheppo if (curr_id != prev_id) { 22751ae08745Sheppo PRN("Not changing vdisk: ID changed from %lu to %lu", 22761ae08745Sheppo prev_id, curr_id); 22771ae08745Sheppo return; 22781ae08745Sheppo } 22791ae08745Sheppo 22801ae08745Sheppo /* Validate that LDC ID has not changed */ 22811ae08745Sheppo if (vds_get_ldc_id(prev_md, prev_vd_node, &prev_ldc_id) != 0) { 22821ae08745Sheppo PRN("Error getting LDC ID for vdisk %lu", prev_id); 22831ae08745Sheppo return; 22841ae08745Sheppo } 22851ae08745Sheppo 22861ae08745Sheppo if (vds_get_ldc_id(curr_md, curr_vd_node, &curr_ldc_id) != 0) { 22871ae08745Sheppo PRN("Error getting LDC ID for vdisk %lu", curr_id); 22881ae08745Sheppo return; 22891ae08745Sheppo } 22901ae08745Sheppo if (curr_ldc_id != prev_ldc_id) { 22910a55fbb7Slm66018 _NOTE(NOTREACHED); /* lint is confused */ 22921ae08745Sheppo PRN("Not changing vdisk: " 22931ae08745Sheppo "LDC ID changed from %lu to %lu", prev_ldc_id, curr_ldc_id); 22941ae08745Sheppo return; 22951ae08745Sheppo } 22961ae08745Sheppo 22971ae08745Sheppo /* Determine whether device path has changed */ 22981ae08745Sheppo if (md_get_prop_str(prev_md, prev_vd_node, VD_BLOCK_DEVICE_PROP, 22991ae08745Sheppo &prev_dev) != 0) { 23001ae08745Sheppo PRN("Error getting previous vdisk \"%s\"", 23011ae08745Sheppo VD_BLOCK_DEVICE_PROP); 23021ae08745Sheppo return; 23031ae08745Sheppo } 23041ae08745Sheppo if (md_get_prop_str(curr_md, curr_vd_node, VD_BLOCK_DEVICE_PROP, 23051ae08745Sheppo &curr_dev) != 0) { 23061ae08745Sheppo PRN("Error getting current vdisk \"%s\"", VD_BLOCK_DEVICE_PROP); 23071ae08745Sheppo return; 23081ae08745Sheppo } 23091ae08745Sheppo if (((len = strlen(curr_dev)) == strlen(prev_dev)) && 23101ae08745Sheppo (strncmp(curr_dev, prev_dev, len) == 0)) 23111ae08745Sheppo return; /* no relevant (supported) change */ 23121ae08745Sheppo 23131ae08745Sheppo PR0("Changing vdisk ID %lu", prev_id); 23141ae08745Sheppo /* Remove old state, which will close vdisk and reset */ 23151ae08745Sheppo if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)prev_id) != 0) 23161ae08745Sheppo PRN("No entry found for vdisk ID %lu", prev_id); 23171ae08745Sheppo /* Re-initialize vdisk with new state */ 23181ae08745Sheppo if (vds_init_vd(vds, curr_id, curr_dev, curr_ldc_id) != 0) { 23191ae08745Sheppo PRN("Failed to change vdisk ID %lu", curr_id); 23201ae08745Sheppo return; 23211ae08745Sheppo } 23221ae08745Sheppo } 23231ae08745Sheppo 23241ae08745Sheppo static int 23251ae08745Sheppo vds_process_md(void *arg, mdeg_result_t *md) 23261ae08745Sheppo { 23271ae08745Sheppo int i; 23281ae08745Sheppo vds_t *vds = arg; 23291ae08745Sheppo 23301ae08745Sheppo 23311ae08745Sheppo if (md == NULL) 23321ae08745Sheppo return (MDEG_FAILURE); 23331ae08745Sheppo ASSERT(vds != NULL); 23341ae08745Sheppo 23351ae08745Sheppo for (i = 0; i < md->removed.nelem; i++) 23361ae08745Sheppo vds_remove_vd(vds, md->removed.mdp, md->removed.mdep[i]); 23371ae08745Sheppo for (i = 0; i < md->match_curr.nelem; i++) 23381ae08745Sheppo vds_change_vd(vds, md->match_prev.mdp, md->match_prev.mdep[i], 23391ae08745Sheppo md->match_curr.mdp, md->match_curr.mdep[i]); 23401ae08745Sheppo for (i = 0; i < md->added.nelem; i++) 23411ae08745Sheppo vds_add_vd(vds, md->added.mdp, md->added.mdep[i]); 23421ae08745Sheppo 23431ae08745Sheppo return (MDEG_SUCCESS); 23441ae08745Sheppo } 23451ae08745Sheppo 23461ae08745Sheppo static int 23471ae08745Sheppo vds_do_attach(dev_info_t *dip) 23481ae08745Sheppo { 23491ae08745Sheppo static char reg_prop[] = "reg"; /* devinfo ID prop */ 23501ae08745Sheppo 23511ae08745Sheppo /* MDEG specification for a (particular) vds node */ 23521ae08745Sheppo static mdeg_prop_spec_t vds_prop_spec[] = { 23531ae08745Sheppo {MDET_PROP_STR, "name", {VDS_NAME}}, 23541ae08745Sheppo {MDET_PROP_VAL, "cfg-handle", {0}}, 23551ae08745Sheppo {MDET_LIST_END, NULL, {0}}}; 23561ae08745Sheppo static mdeg_node_spec_t vds_spec = {"virtual-device", vds_prop_spec}; 23571ae08745Sheppo 23581ae08745Sheppo /* MDEG specification for matching a vd node */ 23591ae08745Sheppo static md_prop_match_t vd_prop_spec[] = { 23601ae08745Sheppo {MDET_PROP_VAL, VD_ID_PROP}, 23611ae08745Sheppo {MDET_LIST_END, NULL}}; 23621ae08745Sheppo static mdeg_node_match_t vd_spec = {"virtual-device-port", 23631ae08745Sheppo vd_prop_spec}; 23641ae08745Sheppo 23651ae08745Sheppo int status; 23661ae08745Sheppo uint64_t cfg_handle; 23671ae08745Sheppo minor_t instance = ddi_get_instance(dip); 23681ae08745Sheppo vds_t *vds; 23691ae08745Sheppo 23701ae08745Sheppo 23711ae08745Sheppo /* 23721ae08745Sheppo * The "cfg-handle" property of a vds node in an MD contains the MD's 23731ae08745Sheppo * notion of "instance", or unique identifier, for that node; OBP 23741ae08745Sheppo * stores the value of the "cfg-handle" MD property as the value of 23751ae08745Sheppo * the "reg" property on the node in the device tree it builds from 23761ae08745Sheppo * the MD and passes to Solaris. Thus, we look up the devinfo node's 23771ae08745Sheppo * "reg" property value to uniquely identify this device instance when 23781ae08745Sheppo * registering with the MD event-generation framework. If the "reg" 23791ae08745Sheppo * property cannot be found, the device tree state is presumably so 23801ae08745Sheppo * broken that there is no point in continuing. 23811ae08745Sheppo */ 23821ae08745Sheppo if (!ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, reg_prop)) { 23831ae08745Sheppo PRN("vds \"%s\" property does not exist", reg_prop); 23841ae08745Sheppo return (DDI_FAILURE); 23851ae08745Sheppo } 23861ae08745Sheppo 23871ae08745Sheppo /* Get the MD instance for later MDEG registration */ 23881ae08745Sheppo cfg_handle = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 23891ae08745Sheppo reg_prop, -1); 23901ae08745Sheppo 23911ae08745Sheppo if (ddi_soft_state_zalloc(vds_state, instance) != DDI_SUCCESS) { 23921ae08745Sheppo PRN("Could not allocate state for instance %u", instance); 23931ae08745Sheppo return (DDI_FAILURE); 23941ae08745Sheppo } 23951ae08745Sheppo 23961ae08745Sheppo if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) { 23971ae08745Sheppo PRN("Could not get state for instance %u", instance); 23981ae08745Sheppo ddi_soft_state_free(vds_state, instance); 23991ae08745Sheppo return (DDI_FAILURE); 24001ae08745Sheppo } 24011ae08745Sheppo 24021ae08745Sheppo 24031ae08745Sheppo vds->dip = dip; 24041ae08745Sheppo vds->vd_table = mod_hash_create_ptrhash("vds_vd_table", VDS_NCHAINS, 24051ae08745Sheppo vds_destroy_vd, 24061ae08745Sheppo sizeof (void *)); 24071ae08745Sheppo ASSERT(vds->vd_table != NULL); 24081ae08745Sheppo 24091ae08745Sheppo if ((status = ldi_ident_from_dip(dip, &vds->ldi_ident)) != 0) { 24101ae08745Sheppo PRN("ldi_ident_from_dip() returned errno %d", status); 24111ae08745Sheppo return (DDI_FAILURE); 24121ae08745Sheppo } 24131ae08745Sheppo vds->initialized |= VDS_LDI; 24141ae08745Sheppo 24151ae08745Sheppo /* Register for MD updates */ 24161ae08745Sheppo vds_prop_spec[1].ps_val = cfg_handle; 24171ae08745Sheppo if (mdeg_register(&vds_spec, &vd_spec, vds_process_md, vds, 24181ae08745Sheppo &vds->mdeg) != MDEG_SUCCESS) { 24191ae08745Sheppo PRN("Unable to register for MD updates"); 24201ae08745Sheppo return (DDI_FAILURE); 24211ae08745Sheppo } 24221ae08745Sheppo vds->initialized |= VDS_MDEG; 24231ae08745Sheppo 24240a55fbb7Slm66018 /* Prevent auto-detaching so driver is available whenever MD changes */ 24250a55fbb7Slm66018 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip, DDI_NO_AUTODETACH, 1) != 24260a55fbb7Slm66018 DDI_PROP_SUCCESS) { 24270a55fbb7Slm66018 PRN("failed to set \"%s\" property for instance %u", 24280a55fbb7Slm66018 DDI_NO_AUTODETACH, instance); 24290a55fbb7Slm66018 } 24300a55fbb7Slm66018 24311ae08745Sheppo ddi_report_dev(dip); 24321ae08745Sheppo return (DDI_SUCCESS); 24331ae08745Sheppo } 24341ae08745Sheppo 24351ae08745Sheppo static int 24361ae08745Sheppo vds_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 24371ae08745Sheppo { 24381ae08745Sheppo int status; 24391ae08745Sheppo 24401ae08745Sheppo switch (cmd) { 24411ae08745Sheppo case DDI_ATTACH: 2442*d10e4ef2Snarayan PR0("Attaching"); 24431ae08745Sheppo if ((status = vds_do_attach(dip)) != DDI_SUCCESS) 24441ae08745Sheppo (void) vds_detach(dip, DDI_DETACH); 24451ae08745Sheppo return (status); 24461ae08745Sheppo case DDI_RESUME: 2447*d10e4ef2Snarayan PR0("No action required for DDI_RESUME"); 24481ae08745Sheppo return (DDI_SUCCESS); 24491ae08745Sheppo default: 24501ae08745Sheppo return (DDI_FAILURE); 24511ae08745Sheppo } 24521ae08745Sheppo } 24531ae08745Sheppo 24541ae08745Sheppo static struct dev_ops vds_ops = { 24551ae08745Sheppo DEVO_REV, /* devo_rev */ 24561ae08745Sheppo 0, /* devo_refcnt */ 24571ae08745Sheppo ddi_no_info, /* devo_getinfo */ 24581ae08745Sheppo nulldev, /* devo_identify */ 24591ae08745Sheppo nulldev, /* devo_probe */ 24601ae08745Sheppo vds_attach, /* devo_attach */ 24611ae08745Sheppo vds_detach, /* devo_detach */ 24621ae08745Sheppo nodev, /* devo_reset */ 24631ae08745Sheppo NULL, /* devo_cb_ops */ 24641ae08745Sheppo NULL, /* devo_bus_ops */ 24651ae08745Sheppo nulldev /* devo_power */ 24661ae08745Sheppo }; 24671ae08745Sheppo 24681ae08745Sheppo static struct modldrv modldrv = { 24691ae08745Sheppo &mod_driverops, 24701ae08745Sheppo "virtual disk server v%I%", 24711ae08745Sheppo &vds_ops, 24721ae08745Sheppo }; 24731ae08745Sheppo 24741ae08745Sheppo static struct modlinkage modlinkage = { 24751ae08745Sheppo MODREV_1, 24761ae08745Sheppo &modldrv, 24771ae08745Sheppo NULL 24781ae08745Sheppo }; 24791ae08745Sheppo 24801ae08745Sheppo 24811ae08745Sheppo int 24821ae08745Sheppo _init(void) 24831ae08745Sheppo { 24841ae08745Sheppo int i, status; 24851ae08745Sheppo 2486*d10e4ef2Snarayan 24871ae08745Sheppo if ((status = ddi_soft_state_init(&vds_state, sizeof (vds_t), 1)) != 0) 24881ae08745Sheppo return (status); 24891ae08745Sheppo if ((status = mod_install(&modlinkage)) != 0) { 24901ae08745Sheppo ddi_soft_state_fini(&vds_state); 24911ae08745Sheppo return (status); 24921ae08745Sheppo } 24931ae08745Sheppo 24941ae08745Sheppo /* Fill in the bit-mask of server-supported operations */ 24951ae08745Sheppo for (i = 0; i < vds_noperations; i++) 24961ae08745Sheppo vds_operations |= 1 << (vds_operation[i].operation - 1); 24971ae08745Sheppo 24981ae08745Sheppo return (0); 24991ae08745Sheppo } 25001ae08745Sheppo 25011ae08745Sheppo int 25021ae08745Sheppo _info(struct modinfo *modinfop) 25031ae08745Sheppo { 25041ae08745Sheppo return (mod_info(&modlinkage, modinfop)); 25051ae08745Sheppo } 25061ae08745Sheppo 25071ae08745Sheppo int 25081ae08745Sheppo _fini(void) 25091ae08745Sheppo { 25101ae08745Sheppo int status; 25111ae08745Sheppo 2512*d10e4ef2Snarayan 25131ae08745Sheppo if ((status = mod_remove(&modlinkage)) != 0) 25141ae08745Sheppo return (status); 25151ae08745Sheppo ddi_soft_state_fini(&vds_state); 25161ae08745Sheppo return (0); 25171ae08745Sheppo } 2518