1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * Virtual disk server 31 */ 32 33 34 #include <sys/types.h> 35 #include <sys/conf.h> 36 #include <sys/crc32.h> 37 #include <sys/ddi.h> 38 #include <sys/dkio.h> 39 #include <sys/file.h> 40 #include <sys/fs/hsfs_isospec.h> 41 #include <sys/mdeg.h> 42 #include <sys/mhd.h> 43 #include <sys/modhash.h> 44 #include <sys/note.h> 45 #include <sys/pathname.h> 46 #include <sys/sdt.h> 47 #include <sys/sunddi.h> 48 #include <sys/sunldi.h> 49 #include <sys/sysmacros.h> 50 #include <sys/vio_common.h> 51 #include <sys/vio_util.h> 52 #include <sys/vdsk_mailbox.h> 53 #include <sys/vdsk_common.h> 54 #include <sys/vtoc.h> 55 #include <sys/vfs.h> 56 #include <sys/stat.h> 57 #include <sys/scsi/impl/uscsi.h> 58 #include <vm/seg_map.h> 59 60 /* Virtual disk server initialization flags */ 61 #define VDS_LDI 0x01 62 #define VDS_MDEG 0x02 63 64 /* Virtual disk server tunable parameters */ 65 #define VDS_RETRIES 5 66 #define VDS_LDC_DELAY 1000 /* 1 msecs */ 67 #define VDS_DEV_DELAY 10000000 /* 10 secs */ 68 #define VDS_NCHAINS 32 69 70 /* Identification parameters for MD, synthetic dkio(7i) structures, etc. */ 71 #define VDS_NAME "virtual-disk-server" 72 73 #define VD_NAME "vd" 74 #define VD_VOLUME_NAME "vdisk" 75 #define VD_ASCIILABEL "Virtual Disk" 76 77 #define VD_CHANNEL_ENDPOINT "channel-endpoint" 78 #define VD_ID_PROP "id" 79 #define VD_BLOCK_DEVICE_PROP "vds-block-device" 80 #define VD_BLOCK_DEVICE_OPTS "vds-block-device-opts" 81 #define VD_REG_PROP "reg" 82 83 /* Virtual disk initialization flags */ 84 #define VD_DISK_READY 0x01 85 #define VD_LOCKING 0x02 86 #define VD_LDC 0x04 87 #define VD_DRING 0x08 88 #define VD_SID 0x10 89 #define VD_SEQ_NUM 0x20 90 #define VD_SETUP_ERROR 0x40 91 92 /* Flags for writing to a vdisk which is a file */ 93 #define VD_FILE_WRITE_FLAGS SM_ASYNC 94 95 /* Number of backup labels */ 96 #define VD_FILE_NUM_BACKUP 5 97 98 /* Timeout for SCSI I/O */ 99 #define VD_SCSI_RDWR_TIMEOUT 30 /* 30 secs */ 100 101 /* 102 * By Solaris convention, slice/partition 2 represents the entire disk; 103 * unfortunately, this convention does not appear to be codified. 104 */ 105 #define VD_ENTIRE_DISK_SLICE 2 106 107 /* Return a cpp token as a string */ 108 #define STRINGIZE(token) #token 109 110 /* 111 * Print a message prefixed with the current function name to the message log 112 * (and optionally to the console for verbose boots); these macros use cpp's 113 * concatenation of string literals and C99 variable-length-argument-list 114 * macros 115 */ 116 #define PRN(...) _PRN("?%s(): "__VA_ARGS__, "") 117 #define _PRN(format, ...) \ 118 cmn_err(CE_CONT, format"%s", __func__, __VA_ARGS__) 119 120 /* Return a pointer to the "i"th vdisk dring element */ 121 #define VD_DRING_ELEM(i) ((vd_dring_entry_t *)(void *) \ 122 (vd->dring + (i)*vd->descriptor_size)) 123 124 /* Return the virtual disk client's type as a string (for use in messages) */ 125 #define VD_CLIENT(vd) \ 126 (((vd)->xfer_mode == VIO_DESC_MODE) ? "in-band client" : \ 127 (((vd)->xfer_mode == VIO_DRING_MODE) ? "dring client" : \ 128 (((vd)->xfer_mode == 0) ? "null client" : \ 129 "unsupported client"))) 130 131 /* Read disk label from a disk on file */ 132 #define VD_FILE_LABEL_READ(vd, labelp) \ 133 vd_file_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)labelp, \ 134 0, sizeof (struct dk_label)) 135 136 /* Write disk label to a disk on file */ 137 #define VD_FILE_LABEL_WRITE(vd, labelp) \ 138 vd_file_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, (caddr_t)labelp, \ 139 0, sizeof (struct dk_label)) 140 141 /* Message for disk access rights reset failure */ 142 #define VD_RESET_ACCESS_FAILURE_MSG \ 143 "Fail to reset disk access rights for disk %s" 144 145 /* 146 * Specification of an MD node passed to the MDEG to filter any 147 * 'vport' nodes that do not belong to the specified node. This 148 * template is copied for each vds instance and filled in with 149 * the appropriate 'cfg-handle' value before being passed to the MDEG. 150 */ 151 static mdeg_prop_spec_t vds_prop_template[] = { 152 { MDET_PROP_STR, "name", VDS_NAME }, 153 { MDET_PROP_VAL, "cfg-handle", NULL }, 154 { MDET_LIST_END, NULL, NULL } 155 }; 156 157 #define VDS_SET_MDEG_PROP_INST(specp, val) (specp)[1].ps_val = (val); 158 159 /* 160 * Matching criteria passed to the MDEG to register interest 161 * in changes to 'virtual-device-port' nodes identified by their 162 * 'id' property. 163 */ 164 static md_prop_match_t vd_prop_match[] = { 165 { MDET_PROP_VAL, VD_ID_PROP }, 166 { MDET_LIST_END, NULL } 167 }; 168 169 static mdeg_node_match_t vd_match = {"virtual-device-port", 170 vd_prop_match}; 171 172 /* 173 * Options for the VD_BLOCK_DEVICE_OPTS property. 174 */ 175 #define VD_OPT_RDONLY 0x1 /* read-only */ 176 #define VD_OPT_SLICE 0x2 /* single slice */ 177 #define VD_OPT_EXCLUSIVE 0x4 /* exclusive access */ 178 179 #define VD_OPTION_NLEN 128 180 181 typedef struct vd_option { 182 char vdo_name[VD_OPTION_NLEN]; 183 uint64_t vdo_value; 184 } vd_option_t; 185 186 vd_option_t vd_bdev_options[] = { 187 { "ro", VD_OPT_RDONLY }, 188 { "slice", VD_OPT_SLICE }, 189 { "excl", VD_OPT_EXCLUSIVE } 190 }; 191 192 /* Debugging macros */ 193 #ifdef DEBUG 194 195 static int vd_msglevel = 0; 196 197 #define PR0 if (vd_msglevel > 0) PRN 198 #define PR1 if (vd_msglevel > 1) PRN 199 #define PR2 if (vd_msglevel > 2) PRN 200 201 #define VD_DUMP_DRING_ELEM(elem) \ 202 PR0("dst:%x op:%x st:%u nb:%lx addr:%lx ncook:%u\n", \ 203 elem->hdr.dstate, \ 204 elem->payload.operation, \ 205 elem->payload.status, \ 206 elem->payload.nbytes, \ 207 elem->payload.addr, \ 208 elem->payload.ncookies); 209 210 char * 211 vd_decode_state(int state) 212 { 213 char *str; 214 215 #define CASE_STATE(_s) case _s: str = #_s; break; 216 217 switch (state) { 218 CASE_STATE(VD_STATE_INIT) 219 CASE_STATE(VD_STATE_VER) 220 CASE_STATE(VD_STATE_ATTR) 221 CASE_STATE(VD_STATE_DRING) 222 CASE_STATE(VD_STATE_RDX) 223 CASE_STATE(VD_STATE_DATA) 224 default: str = "unknown"; break; 225 } 226 227 #undef CASE_STATE 228 229 return (str); 230 } 231 232 void 233 vd_decode_tag(vio_msg_t *msg) 234 { 235 char *tstr, *sstr, *estr; 236 237 #define CASE_TYPE(_s) case _s: tstr = #_s; break; 238 239 switch (msg->tag.vio_msgtype) { 240 CASE_TYPE(VIO_TYPE_CTRL) 241 CASE_TYPE(VIO_TYPE_DATA) 242 CASE_TYPE(VIO_TYPE_ERR) 243 default: tstr = "unknown"; break; 244 } 245 246 #undef CASE_TYPE 247 248 #define CASE_SUBTYPE(_s) case _s: sstr = #_s; break; 249 250 switch (msg->tag.vio_subtype) { 251 CASE_SUBTYPE(VIO_SUBTYPE_INFO) 252 CASE_SUBTYPE(VIO_SUBTYPE_ACK) 253 CASE_SUBTYPE(VIO_SUBTYPE_NACK) 254 default: sstr = "unknown"; break; 255 } 256 257 #undef CASE_SUBTYPE 258 259 #define CASE_ENV(_s) case _s: estr = #_s; break; 260 261 switch (msg->tag.vio_subtype_env) { 262 CASE_ENV(VIO_VER_INFO) 263 CASE_ENV(VIO_ATTR_INFO) 264 CASE_ENV(VIO_DRING_REG) 265 CASE_ENV(VIO_DRING_UNREG) 266 CASE_ENV(VIO_RDX) 267 CASE_ENV(VIO_PKT_DATA) 268 CASE_ENV(VIO_DESC_DATA) 269 CASE_ENV(VIO_DRING_DATA) 270 default: estr = "unknown"; break; 271 } 272 273 #undef CASE_ENV 274 275 PR1("(%x/%x/%x) message : (%s/%s/%s)", 276 msg->tag.vio_msgtype, msg->tag.vio_subtype, 277 msg->tag.vio_subtype_env, tstr, sstr, estr); 278 } 279 280 #else /* !DEBUG */ 281 282 #define PR0(...) 283 #define PR1(...) 284 #define PR2(...) 285 286 #define VD_DUMP_DRING_ELEM(elem) 287 288 #define vd_decode_state(_s) (NULL) 289 #define vd_decode_tag(_s) (NULL) 290 291 #endif /* DEBUG */ 292 293 294 /* 295 * Soft state structure for a vds instance 296 */ 297 typedef struct vds { 298 uint_t initialized; /* driver inst initialization flags */ 299 dev_info_t *dip; /* driver inst devinfo pointer */ 300 ldi_ident_t ldi_ident; /* driver's identifier for LDI */ 301 mod_hash_t *vd_table; /* table of virtual disks served */ 302 mdeg_node_spec_t *ispecp; /* mdeg node specification */ 303 mdeg_handle_t mdeg; /* handle for MDEG operations */ 304 } vds_t; 305 306 /* 307 * Types of descriptor-processing tasks 308 */ 309 typedef enum vd_task_type { 310 VD_NONFINAL_RANGE_TASK, /* task for intermediate descriptor in range */ 311 VD_FINAL_RANGE_TASK, /* task for last in a range of descriptors */ 312 } vd_task_type_t; 313 314 /* 315 * Structure describing the task for processing a descriptor 316 */ 317 typedef struct vd_task { 318 struct vd *vd; /* vd instance task is for */ 319 vd_task_type_t type; /* type of descriptor task */ 320 int index; /* dring elem index for task */ 321 vio_msg_t *msg; /* VIO message task is for */ 322 size_t msglen; /* length of message content */ 323 vd_dring_payload_t *request; /* request task will perform */ 324 struct buf buf; /* buf(9s) for I/O request */ 325 ldc_mem_handle_t mhdl; /* task memory handle */ 326 int status; /* status of processing task */ 327 int (*completef)(struct vd_task *task); /* completion func ptr */ 328 } vd_task_t; 329 330 /* 331 * Soft state structure for a virtual disk instance 332 */ 333 typedef struct vd { 334 uint_t initialized; /* vdisk initialization flags */ 335 uint64_t operations; /* bitmask of VD_OPs exported */ 336 vio_ver_t version; /* ver negotiated with client */ 337 vds_t *vds; /* server for this vdisk */ 338 ddi_taskq_t *startq; /* queue for I/O start tasks */ 339 ddi_taskq_t *completionq; /* queue for completion tasks */ 340 ldi_handle_t ldi_handle[V_NUMPAR]; /* LDI slice handles */ 341 char device_path[MAXPATHLEN + 1]; /* vdisk device */ 342 dev_t dev[V_NUMPAR]; /* dev numbers for slices */ 343 int open_flags; /* open flags */ 344 uint_t nslices; /* number of slices */ 345 size_t vdisk_size; /* number of blocks in vdisk */ 346 size_t vdisk_block_size; /* size of each vdisk block */ 347 vd_disk_type_t vdisk_type; /* slice or entire disk */ 348 vd_disk_label_t vdisk_label; /* EFI or VTOC label */ 349 vd_media_t vdisk_media; /* media type of backing dev. */ 350 boolean_t is_atapi_dev; /* Is this an IDE CD-ROM dev? */ 351 ushort_t max_xfer_sz; /* max xfer size in DEV_BSIZE */ 352 size_t block_size; /* blk size of actual device */ 353 boolean_t pseudo; /* underlying pseudo dev */ 354 boolean_t file; /* is vDisk backed by a file? */ 355 boolean_t scsi; /* is vDisk backed by scsi? */ 356 vnode_t *file_vnode; /* file vnode */ 357 size_t file_size; /* file size */ 358 ddi_devid_t file_devid; /* devid for disk image */ 359 struct dk_efi dk_efi; /* synthetic for slice type */ 360 struct dk_geom dk_geom; /* synthetic for slice type */ 361 struct dk_minfo dk_minfo; /* synthetic for slice type */ 362 struct vtoc vtoc; /* synthetic for slice type */ 363 boolean_t ownership; /* disk ownership status */ 364 ldc_status_t ldc_state; /* LDC connection state */ 365 ldc_handle_t ldc_handle; /* handle for LDC comm */ 366 size_t max_msglen; /* largest LDC message len */ 367 vd_state_t state; /* client handshake state */ 368 uint8_t xfer_mode; /* transfer mode with client */ 369 uint32_t sid; /* client's session ID */ 370 uint64_t seq_num; /* message sequence number */ 371 uint64_t dring_ident; /* identifier of dring */ 372 ldc_dring_handle_t dring_handle; /* handle for dring ops */ 373 uint32_t descriptor_size; /* num bytes in desc */ 374 uint32_t dring_len; /* number of dring elements */ 375 caddr_t dring; /* address of dring */ 376 caddr_t vio_msgp; /* vio msg staging buffer */ 377 vd_task_t inband_task; /* task for inband descriptor */ 378 vd_task_t *dring_task; /* tasks dring elements */ 379 380 kmutex_t lock; /* protects variables below */ 381 boolean_t enabled; /* is vdisk enabled? */ 382 boolean_t reset_state; /* reset connection state? */ 383 boolean_t reset_ldc; /* reset LDC channel? */ 384 } vd_t; 385 386 typedef struct vds_operation { 387 char *namep; 388 uint8_t operation; 389 int (*start)(vd_task_t *task); 390 int (*complete)(vd_task_t *task); 391 } vds_operation_t; 392 393 typedef struct vd_ioctl { 394 uint8_t operation; /* vdisk operation */ 395 const char *operation_name; /* vdisk operation name */ 396 size_t nbytes; /* size of operation buffer */ 397 int cmd; /* corresponding ioctl cmd */ 398 const char *cmd_name; /* ioctl cmd name */ 399 void *arg; /* ioctl cmd argument */ 400 /* convert input vd_buf to output ioctl_arg */ 401 int (*copyin)(void *vd_buf, size_t, void *ioctl_arg); 402 /* convert input ioctl_arg to output vd_buf */ 403 void (*copyout)(void *ioctl_arg, void *vd_buf); 404 /* write is true if the operation writes any data to the backend */ 405 boolean_t write; 406 } vd_ioctl_t; 407 408 /* Define trivial copyin/copyout conversion function flag */ 409 #define VD_IDENTITY_IN ((int (*)(void *, size_t, void *))-1) 410 #define VD_IDENTITY_OUT ((void (*)(void *, void *))-1) 411 412 413 static int vds_ldc_retries = VDS_RETRIES; 414 static int vds_ldc_delay = VDS_LDC_DELAY; 415 static int vds_dev_retries = VDS_RETRIES; 416 static int vds_dev_delay = VDS_DEV_DELAY; 417 static void *vds_state; 418 419 static uint_t vd_file_write_flags = VD_FILE_WRITE_FLAGS; 420 421 static short vd_scsi_rdwr_timeout = VD_SCSI_RDWR_TIMEOUT; 422 static int vd_scsi_debug = USCSI_SILENT; 423 424 /* 425 * Tunable to define the behavior of the service domain if the vdisk server 426 * fails to reset disk exclusive access when a LDC channel is reset. When a 427 * LDC channel is reset the vdisk server will try to reset disk exclusive 428 * access by releasing any SCSI-2 reservation or resetting the disk. If these 429 * actions fail then the default behavior (vd_reset_access_failure = 0) is to 430 * print a warning message. This default behavior can be changed by setting 431 * the vd_reset_access_failure variable to A_REBOOT (= 0x1) and that will 432 * cause the service domain to reboot, or A_DUMP (= 0x5) and that will cause 433 * the service domain to panic. In both cases, the reset of the service domain 434 * should trigger a reset SCSI buses and hopefully clear any SCSI-2 reservation. 435 */ 436 static int vd_reset_access_failure = 0; 437 438 /* 439 * Tunable for backward compatibility. When this variable is set to B_TRUE, 440 * all disk volumes (ZFS, SVM, VxvM volumes) will be exported as single 441 * slice disks whether or not they have the "slice" option set. This is 442 * to provide a simple backward compatibility mechanism when upgrading 443 * the vds driver and using a domain configuration created before the 444 * "slice" option was available. 445 */ 446 static boolean_t vd_volume_force_slice = B_FALSE; 447 448 /* 449 * Supported protocol version pairs, from highest (newest) to lowest (oldest) 450 * 451 * Each supported major version should appear only once, paired with (and only 452 * with) its highest supported minor version number (as the protocol requires 453 * supporting all lower minor version numbers as well) 454 */ 455 static const vio_ver_t vds_version[] = {{1, 1}}; 456 static const size_t vds_num_versions = 457 sizeof (vds_version)/sizeof (vds_version[0]); 458 459 static void vd_free_dring_task(vd_t *vdp); 460 static int vd_setup_vd(vd_t *vd); 461 static int vd_setup_single_slice_disk(vd_t *vd); 462 static int vd_setup_mediainfo(vd_t *vd); 463 static boolean_t vd_enabled(vd_t *vd); 464 static ushort_t vd_lbl2cksum(struct dk_label *label); 465 static int vd_file_validate_geometry(vd_t *vd); 466 static boolean_t vd_file_is_iso_image(vd_t *vd); 467 static void vd_set_exported_operations(vd_t *vd); 468 static void vd_reset_access(vd_t *vd); 469 470 /* 471 * Function: 472 * vd_file_rw 473 * 474 * Description: 475 * Read or write to a disk on file. 476 * 477 * Parameters: 478 * vd - disk on which the operation is performed. 479 * slice - slice on which the operation is performed, 480 * VD_SLICE_NONE indicates that the operation 481 * is done using an absolute disk offset. 482 * operation - operation to execute: read (VD_OP_BREAD) or 483 * write (VD_OP_BWRITE). 484 * data - buffer where data are read to or written from. 485 * blk - starting block for the operation. 486 * len - number of bytes to read or write. 487 * 488 * Return Code: 489 * n >= 0 - success, n indicates the number of bytes read 490 * or written. 491 * -1 - error. 492 */ 493 static ssize_t 494 vd_file_rw(vd_t *vd, int slice, int operation, caddr_t data, size_t blk, 495 size_t len) 496 { 497 caddr_t maddr; 498 size_t offset, maxlen, moffset, mlen, n; 499 uint_t smflags; 500 enum seg_rw srw; 501 502 ASSERT(vd->file); 503 ASSERT(len > 0); 504 505 /* 506 * If a file is exported as a slice then we don't care about the vtoc. 507 * In that case, the vtoc is a fake mainly to make newfs happy and we 508 * handle any I/O as a raw disk access so that we can have access to the 509 * entire backend. 510 */ 511 if (vd->vdisk_type == VD_DISK_TYPE_SLICE || slice == VD_SLICE_NONE) { 512 /* raw disk access */ 513 offset = blk * DEV_BSIZE; 514 } else { 515 ASSERT(slice >= 0 && slice < V_NUMPAR); 516 517 /* 518 * v1.0 vDisk clients depended on the server not verifying 519 * the label of a unformatted disk. This "feature" is 520 * maintained for backward compatibility but all versions 521 * from v1.1 onwards must do the right thing. 522 */ 523 if (vd->vdisk_label == VD_DISK_LABEL_UNK && 524 vio_ver_is_supported(vd->version, 1, 1) && 525 vd_file_validate_geometry(vd) != 0) { 526 PR0("Unknown disk label, can't do I/O from slice %d", 527 slice); 528 return (-1); 529 } 530 531 if (blk >= vd->vtoc.v_part[slice].p_size) { 532 /* address past the end of the slice */ 533 PR0("req_addr (0x%lx) > psize (0x%lx)", 534 blk, vd->vtoc.v_part[slice].p_size); 535 return (0); 536 } 537 538 offset = (vd->vtoc.v_part[slice].p_start + blk) * DEV_BSIZE; 539 540 /* 541 * If the requested size is greater than the size 542 * of the partition, truncate the read/write. 543 */ 544 maxlen = (vd->vtoc.v_part[slice].p_size - blk) * DEV_BSIZE; 545 546 if (len > maxlen) { 547 PR0("I/O size truncated to %lu bytes from %lu bytes", 548 maxlen, len); 549 len = maxlen; 550 } 551 } 552 553 /* 554 * We have to ensure that we are reading/writing into the mmap 555 * range. If we have a partial disk image (e.g. an image of 556 * s0 instead s2) the system can try to access slices that 557 * are not included into the disk image. 558 */ 559 if ((offset + len) >= vd->file_size) { 560 PR0("offset + nbytes (0x%lx + 0x%lx) >= " 561 "file_size (0x%lx)", offset, len, vd->file_size); 562 return (-1); 563 } 564 565 srw = (operation == VD_OP_BREAD)? S_READ : S_WRITE; 566 smflags = (operation == VD_OP_BREAD)? 0 : 567 (SM_WRITE | vd_file_write_flags); 568 n = len; 569 570 do { 571 /* 572 * segmap_getmapflt() returns a MAXBSIZE chunk which is 573 * MAXBSIZE aligned. 574 */ 575 moffset = offset & MAXBOFFSET; 576 mlen = MIN(MAXBSIZE - moffset, n); 577 maddr = segmap_getmapflt(segkmap, vd->file_vnode, offset, 578 mlen, 1, srw); 579 /* 580 * Fault in the pages so we can check for error and ensure 581 * that we can safely used the mapped address. 582 */ 583 if (segmap_fault(kas.a_hat, segkmap, maddr, mlen, 584 F_SOFTLOCK, srw) != 0) { 585 (void) segmap_release(segkmap, maddr, 0); 586 return (-1); 587 } 588 589 if (operation == VD_OP_BREAD) 590 bcopy(maddr + moffset, data, mlen); 591 else 592 bcopy(data, maddr + moffset, mlen); 593 594 if (segmap_fault(kas.a_hat, segkmap, maddr, mlen, 595 F_SOFTUNLOCK, srw) != 0) { 596 (void) segmap_release(segkmap, maddr, 0); 597 return (-1); 598 } 599 if (segmap_release(segkmap, maddr, smflags) != 0) 600 return (-1); 601 n -= mlen; 602 offset += mlen; 603 data += mlen; 604 605 } while (n > 0); 606 607 return (len); 608 } 609 610 /* 611 * Function: 612 * vd_file_build_default_label 613 * 614 * Description: 615 * Return a default label for the given disk. This is used when the disk 616 * does not have a valid VTOC so that the user can get a valid default 617 * configuration. The default label has all slice sizes set to 0 (except 618 * slice 2 which is the entire disk) to force the user to write a valid 619 * label onto the disk image. 620 * 621 * Parameters: 622 * vd - disk on which the operation is performed. 623 * label - the returned default label. 624 * 625 * Return Code: 626 * none. 627 */ 628 static void 629 vd_file_build_default_label(vd_t *vd, struct dk_label *label) 630 { 631 size_t size; 632 char prefix; 633 int slice, nparts; 634 uint16_t tag; 635 636 ASSERT(vd->file); 637 638 /* 639 * We must have a resonable number of cylinders and sectors so 640 * that newfs can run using default values. 641 * 642 * if (disk_size < 2MB) 643 * phys_cylinders = disk_size / 100K 644 * else 645 * phys_cylinders = disk_size / 300K 646 * 647 * phys_cylinders = (phys_cylinders == 0) ? 1 : phys_cylinders 648 * alt_cylinders = (phys_cylinders > 2) ? 2 : 0; 649 * data_cylinders = phys_cylinders - alt_cylinders 650 * 651 * sectors = disk_size / (phys_cylinders * blk_size) 652 * 653 * The file size test is an attempt to not have too few cylinders 654 * for a small file, or so many on a big file that you waste space 655 * for backup superblocks or cylinder group structures. 656 */ 657 if (vd->file_size < (2 * 1024 * 1024)) 658 label->dkl_pcyl = vd->file_size / (100 * 1024); 659 else 660 label->dkl_pcyl = vd->file_size / (300 * 1024); 661 662 if (label->dkl_pcyl == 0) 663 label->dkl_pcyl = 1; 664 665 label->dkl_acyl = 0; 666 667 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { 668 nparts = 1; 669 slice = 0; 670 tag = V_UNASSIGNED; 671 } else { 672 if (label->dkl_pcyl > 2) 673 label->dkl_acyl = 2; 674 nparts = V_NUMPAR; 675 slice = VD_ENTIRE_DISK_SLICE; 676 tag = V_BACKUP; 677 } 678 679 label->dkl_nsect = vd->file_size / 680 (DEV_BSIZE * label->dkl_pcyl); 681 label->dkl_ncyl = label->dkl_pcyl - label->dkl_acyl; 682 label->dkl_nhead = 1; 683 label->dkl_write_reinstruct = 0; 684 label->dkl_read_reinstruct = 0; 685 label->dkl_rpm = 7200; 686 label->dkl_apc = 0; 687 label->dkl_intrlv = 0; 688 689 PR0("requested disk size: %ld bytes\n", vd->file_size); 690 PR0("setup: ncyl=%d nhead=%d nsec=%d\n", label->dkl_pcyl, 691 label->dkl_nhead, label->dkl_nsect); 692 PR0("provided disk size: %ld bytes\n", (uint64_t) 693 (label->dkl_pcyl * label->dkl_nhead * 694 label->dkl_nsect * DEV_BSIZE)); 695 696 if (vd->file_size < (1ULL << 20)) { 697 size = vd->file_size >> 10; 698 prefix = 'K'; /* Kilobyte */ 699 } else if (vd->file_size < (1ULL << 30)) { 700 size = vd->file_size >> 20; 701 prefix = 'M'; /* Megabyte */ 702 } else if (vd->file_size < (1ULL << 40)) { 703 size = vd->file_size >> 30; 704 prefix = 'G'; /* Gigabyte */ 705 } else { 706 size = vd->file_size >> 40; 707 prefix = 'T'; /* Terabyte */ 708 } 709 710 /* 711 * We must have a correct label name otherwise format(1m) will 712 * not recognized the disk as labeled. 713 */ 714 (void) snprintf(label->dkl_asciilabel, LEN_DKL_ASCII, 715 "SUN-DiskImage-%ld%cB cyl %d alt %d hd %d sec %d", 716 size, prefix, 717 label->dkl_ncyl, label->dkl_acyl, label->dkl_nhead, 718 label->dkl_nsect); 719 720 /* default VTOC */ 721 label->dkl_vtoc.v_version = V_VERSION; 722 label->dkl_vtoc.v_nparts = nparts; 723 label->dkl_vtoc.v_sanity = VTOC_SANE; 724 label->dkl_vtoc.v_part[slice].p_tag = tag; 725 label->dkl_map[slice].dkl_cylno = 0; 726 label->dkl_map[slice].dkl_nblk = label->dkl_ncyl * 727 label->dkl_nhead * label->dkl_nsect; 728 label->dkl_cksum = vd_lbl2cksum(label); 729 } 730 731 /* 732 * Function: 733 * vd_file_set_vtoc 734 * 735 * Description: 736 * Set the vtoc of a disk image by writing the label and backup 737 * labels into the disk image backend. 738 * 739 * Parameters: 740 * vd - disk on which the operation is performed. 741 * label - the data to be written. 742 * 743 * Return Code: 744 * 0 - success. 745 * n > 0 - error, n indicates the errno code. 746 */ 747 static int 748 vd_file_set_vtoc(vd_t *vd, struct dk_label *label) 749 { 750 int blk, sec, cyl, head, cnt; 751 752 ASSERT(vd->file); 753 754 if (VD_FILE_LABEL_WRITE(vd, label) < 0) { 755 PR0("fail to write disk label"); 756 return (EIO); 757 } 758 759 /* 760 * Backup labels are on the last alternate cylinder's 761 * first five odd sectors. 762 */ 763 if (label->dkl_acyl == 0) { 764 PR0("no alternate cylinder, can not store backup labels"); 765 return (0); 766 } 767 768 cyl = label->dkl_ncyl + label->dkl_acyl - 1; 769 head = label->dkl_nhead - 1; 770 771 blk = (cyl * ((label->dkl_nhead * label->dkl_nsect) - label->dkl_apc)) + 772 (head * label->dkl_nsect); 773 774 /* 775 * Write the backup labels. Make sure we don't try to write past 776 * the last cylinder. 777 */ 778 sec = 1; 779 780 for (cnt = 0; cnt < VD_FILE_NUM_BACKUP; cnt++) { 781 782 if (sec >= label->dkl_nsect) { 783 PR0("not enough sector to store all backup labels"); 784 return (0); 785 } 786 787 if (vd_file_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, (caddr_t)label, 788 blk + sec, sizeof (struct dk_label)) < 0) { 789 PR0("error writing backup label at block %d\n", 790 blk + sec); 791 return (EIO); 792 } 793 794 PR1("wrote backup label at block %d\n", blk + sec); 795 796 sec += 2; 797 } 798 799 return (0); 800 } 801 802 /* 803 * Function: 804 * vd_file_get_devid_block 805 * 806 * Description: 807 * Return the block number where the device id is stored. 808 * 809 * Parameters: 810 * vd - disk on which the operation is performed. 811 * blkp - pointer to the block number 812 * 813 * Return Code: 814 * 0 - success 815 * ENOSPC - disk has no space to store a device id 816 */ 817 static int 818 vd_file_get_devid_block(vd_t *vd, size_t *blkp) 819 { 820 diskaddr_t spc, head, cyl; 821 822 ASSERT(vd->file); 823 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); 824 825 /* this geometry doesn't allow us to have a devid */ 826 if (vd->dk_geom.dkg_acyl < 2) { 827 PR0("not enough alternate cylinder available for devid " 828 "(acyl=%u)", vd->dk_geom.dkg_acyl); 829 return (ENOSPC); 830 } 831 832 /* the devid is in on the track next to the last cylinder */ 833 cyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl - 2; 834 spc = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; 835 head = vd->dk_geom.dkg_nhead - 1; 836 837 *blkp = (cyl * (spc - vd->dk_geom.dkg_apc)) + 838 (head * vd->dk_geom.dkg_nsect) + 1; 839 840 return (0); 841 } 842 843 /* 844 * Return the checksum of a disk block containing an on-disk devid. 845 */ 846 static uint_t 847 vd_dkdevid2cksum(struct dk_devid *dkdevid) 848 { 849 uint_t chksum, *ip; 850 int i; 851 852 chksum = 0; 853 ip = (uint_t *)dkdevid; 854 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); i++) 855 chksum ^= ip[i]; 856 857 return (chksum); 858 } 859 860 /* 861 * Function: 862 * vd_file_read_devid 863 * 864 * Description: 865 * Read the device id stored on a disk image. 866 * 867 * Parameters: 868 * vd - disk on which the operation is performed. 869 * devid - the return address of the device ID. 870 * 871 * Return Code: 872 * 0 - success 873 * EIO - I/O error while trying to access the disk image 874 * EINVAL - no valid device id was found 875 * ENOSPC - disk has no space to store a device id 876 */ 877 static int 878 vd_file_read_devid(vd_t *vd, ddi_devid_t *devid) 879 { 880 struct dk_devid *dkdevid; 881 size_t blk; 882 uint_t chksum; 883 int status, sz; 884 885 if ((status = vd_file_get_devid_block(vd, &blk)) != 0) 886 return (status); 887 888 dkdevid = kmem_zalloc(DEV_BSIZE, KM_SLEEP); 889 890 /* get the devid */ 891 if ((vd_file_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)dkdevid, blk, 892 DEV_BSIZE)) < 0) { 893 PR0("error reading devid block at %lu", blk); 894 status = EIO; 895 goto done; 896 } 897 898 /* validate the revision */ 899 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 900 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 901 PR0("invalid devid found at block %lu (bad revision)", blk); 902 status = EINVAL; 903 goto done; 904 } 905 906 /* compute checksum */ 907 chksum = vd_dkdevid2cksum(dkdevid); 908 909 /* compare the checksums */ 910 if (DKD_GETCHKSUM(dkdevid) != chksum) { 911 PR0("invalid devid found at block %lu (bad checksum)", blk); 912 status = EINVAL; 913 goto done; 914 } 915 916 /* validate the device id */ 917 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 918 PR0("invalid devid found at block %lu", blk); 919 status = EINVAL; 920 goto done; 921 } 922 923 PR1("devid read at block %lu", blk); 924 925 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 926 *devid = kmem_alloc(sz, KM_SLEEP); 927 bcopy(&dkdevid->dkd_devid, *devid, sz); 928 929 done: 930 kmem_free(dkdevid, DEV_BSIZE); 931 return (status); 932 933 } 934 935 /* 936 * Function: 937 * vd_file_write_devid 938 * 939 * Description: 940 * Write a device id into disk image. 941 * 942 * Parameters: 943 * vd - disk on which the operation is performed. 944 * devid - the device ID to store. 945 * 946 * Return Code: 947 * 0 - success 948 * EIO - I/O error while trying to access the disk image 949 * ENOSPC - disk has no space to store a device id 950 */ 951 static int 952 vd_file_write_devid(vd_t *vd, ddi_devid_t devid) 953 { 954 struct dk_devid *dkdevid; 955 uint_t chksum; 956 size_t blk; 957 int status; 958 959 if ((status = vd_file_get_devid_block(vd, &blk)) != 0) 960 return (status); 961 962 dkdevid = kmem_zalloc(DEV_BSIZE, KM_SLEEP); 963 964 /* set revision */ 965 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 966 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 967 968 /* copy devid */ 969 bcopy(devid, &dkdevid->dkd_devid, ddi_devid_sizeof(devid)); 970 971 /* compute checksum */ 972 chksum = vd_dkdevid2cksum(dkdevid); 973 974 /* set checksum */ 975 DKD_FORMCHKSUM(chksum, dkdevid); 976 977 /* store the devid */ 978 if ((status = vd_file_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, 979 (caddr_t)dkdevid, blk, DEV_BSIZE)) < 0) { 980 PR0("Error writing devid block at %lu", blk); 981 status = EIO; 982 } else { 983 PR1("devid written at block %lu", blk); 984 status = 0; 985 } 986 987 kmem_free(dkdevid, DEV_BSIZE); 988 return (status); 989 } 990 991 /* 992 * Function: 993 * vd_do_scsi_rdwr 994 * 995 * Description: 996 * Read or write to a SCSI disk using an absolute disk offset. 997 * 998 * Parameters: 999 * vd - disk on which the operation is performed. 1000 * operation - operation to execute: read (VD_OP_BREAD) or 1001 * write (VD_OP_BWRITE). 1002 * data - buffer where data are read to or written from. 1003 * blk - starting block for the operation. 1004 * len - number of bytes to read or write. 1005 * 1006 * Return Code: 1007 * 0 - success 1008 * n != 0 - error. 1009 */ 1010 static int 1011 vd_do_scsi_rdwr(vd_t *vd, int operation, caddr_t data, size_t blk, size_t len) 1012 { 1013 struct uscsi_cmd ucmd; 1014 union scsi_cdb cdb; 1015 int nsectors, nblk; 1016 int max_sectors; 1017 int status, rval; 1018 1019 ASSERT(!vd->file); 1020 ASSERT(vd->vdisk_block_size > 0); 1021 1022 max_sectors = vd->max_xfer_sz; 1023 nblk = (len / vd->vdisk_block_size); 1024 1025 if (len % vd->vdisk_block_size != 0) 1026 return (EINVAL); 1027 1028 /* 1029 * Build and execute the uscsi ioctl. We build a group0, group1 1030 * or group4 command as necessary, since some targets 1031 * do not support group1 commands. 1032 */ 1033 while (nblk) { 1034 1035 bzero(&ucmd, sizeof (ucmd)); 1036 bzero(&cdb, sizeof (cdb)); 1037 1038 nsectors = (max_sectors < nblk) ? max_sectors : nblk; 1039 1040 /* 1041 * Some of the optical drives on sun4v machines are ATAPI 1042 * devices which use Group 1 Read/Write commands so we need 1043 * to explicitly check a flag which is set when a domain 1044 * is bound. 1045 */ 1046 if (blk < (2 << 20) && nsectors <= 0xff && !vd->is_atapi_dev) { 1047 FORMG0ADDR(&cdb, blk); 1048 FORMG0COUNT(&cdb, nsectors); 1049 ucmd.uscsi_cdblen = CDB_GROUP0; 1050 } else if (blk > 0xffffffff) { 1051 FORMG4LONGADDR(&cdb, blk); 1052 FORMG4COUNT(&cdb, nsectors); 1053 ucmd.uscsi_cdblen = CDB_GROUP4; 1054 cdb.scc_cmd |= SCMD_GROUP4; 1055 } else { 1056 FORMG1ADDR(&cdb, blk); 1057 FORMG1COUNT(&cdb, nsectors); 1058 ucmd.uscsi_cdblen = CDB_GROUP1; 1059 cdb.scc_cmd |= SCMD_GROUP1; 1060 } 1061 ucmd.uscsi_cdb = (caddr_t)&cdb; 1062 ucmd.uscsi_bufaddr = data; 1063 ucmd.uscsi_buflen = nsectors * vd->block_size; 1064 ucmd.uscsi_timeout = vd_scsi_rdwr_timeout; 1065 /* 1066 * Set flags so that the command is isolated from normal 1067 * commands and no error message is printed. 1068 */ 1069 ucmd.uscsi_flags = USCSI_ISOLATE | USCSI_SILENT; 1070 1071 if (operation == VD_OP_BREAD) { 1072 cdb.scc_cmd |= SCMD_READ; 1073 ucmd.uscsi_flags |= USCSI_READ; 1074 } else { 1075 cdb.scc_cmd |= SCMD_WRITE; 1076 } 1077 1078 status = ldi_ioctl(vd->ldi_handle[VD_ENTIRE_DISK_SLICE], 1079 USCSICMD, (intptr_t)&ucmd, (vd->open_flags | FKIOCTL), 1080 kcred, &rval); 1081 1082 if (status == 0) 1083 status = ucmd.uscsi_status; 1084 1085 if (status != 0) 1086 break; 1087 1088 /* 1089 * Check if partial DMA breakup is required. If so, reduce 1090 * the request size by half and retry the last request. 1091 */ 1092 if (ucmd.uscsi_resid == ucmd.uscsi_buflen) { 1093 max_sectors >>= 1; 1094 if (max_sectors <= 0) { 1095 status = EIO; 1096 break; 1097 } 1098 continue; 1099 } 1100 1101 if (ucmd.uscsi_resid != 0) { 1102 status = EIO; 1103 break; 1104 } 1105 1106 blk += nsectors; 1107 nblk -= nsectors; 1108 data += nsectors * vd->vdisk_block_size; /* SECSIZE */ 1109 } 1110 1111 return (status); 1112 } 1113 1114 /* 1115 * Function: 1116 * vd_scsi_rdwr 1117 * 1118 * Description: 1119 * Wrapper function to read or write to a SCSI disk using an absolute 1120 * disk offset. It checks the blocksize of the underlying device and, 1121 * if necessary, adjusts the buffers accordingly before calling 1122 * vd_do_scsi_rdwr() to do the actual read or write. 1123 * 1124 * Parameters: 1125 * vd - disk on which the operation is performed. 1126 * operation - operation to execute: read (VD_OP_BREAD) or 1127 * write (VD_OP_BWRITE). 1128 * data - buffer where data are read to or written from. 1129 * blk - starting block for the operation. 1130 * len - number of bytes to read or write. 1131 * 1132 * Return Code: 1133 * 0 - success 1134 * n != 0 - error. 1135 */ 1136 static int 1137 vd_scsi_rdwr(vd_t *vd, int operation, caddr_t data, size_t vblk, size_t vlen) 1138 { 1139 int rv; 1140 1141 size_t pblk; /* physical device block number of data on device */ 1142 size_t delta; /* relative offset between pblk and vblk */ 1143 size_t pnblk; /* number of physical blocks to be read from device */ 1144 size_t plen; /* length of data to be read from physical device */ 1145 char *buf; /* buffer area to fit physical device's block size */ 1146 1147 if (vd->block_size == 0) { 1148 /* 1149 * The block size was not available during the attach, 1150 * try to update it now. 1151 */ 1152 if (vd_setup_mediainfo(vd) != 0) 1153 return (EIO); 1154 } 1155 1156 /* 1157 * If the vdisk block size and the block size of the underlying device 1158 * match we can skip straight to vd_do_scsi_rdwr(), otherwise we need 1159 * to create a buffer large enough to handle the device's block size 1160 * and adjust the block to be read from and the amount of data to 1161 * read to correspond with the device's block size. 1162 */ 1163 if (vd->vdisk_block_size == vd->block_size) 1164 return (vd_do_scsi_rdwr(vd, operation, data, vblk, vlen)); 1165 1166 if (vd->vdisk_block_size > vd->block_size) 1167 return (EINVAL); 1168 1169 /* 1170 * Writing of physical block sizes larger than the virtual block size 1171 * is not supported. This would be added if/when support for guests 1172 * writing to DVDs is implemented. 1173 */ 1174 if (operation == VD_OP_BWRITE) 1175 return (ENOTSUP); 1176 1177 /* BEGIN CSTYLED */ 1178 /* 1179 * Below is a diagram showing the relationship between the physical 1180 * and virtual blocks. If the virtual blocks marked by 'X' below are 1181 * requested, then the physical blocks denoted by 'Y' are read. 1182 * 1183 * vblk 1184 * | vlen 1185 * |<--------------->| 1186 * v v 1187 * --+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- virtual disk: 1188 * | | | |XX|XX|XX|XX|XX|XX| | | | | | } block size is 1189 * --+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- vd->vdisk_block_size 1190 * : : : : 1191 * >:==:< delta : : 1192 * : : : : 1193 * --+-----+-----+-----+-----+-----+-----+-----+-- physical disk: 1194 * | |YY:YY|YYYYY|YYYYY|YY:YY| | | } block size is 1195 * --+-----+-----+-----+-----+-----+-----+-----+-- vd->block_size 1196 * ^ ^ 1197 * |<--------------------->| 1198 * | plen 1199 * pblk 1200 */ 1201 /* END CSTYLED */ 1202 pblk = (vblk * vd->vdisk_block_size) / vd->block_size; 1203 delta = (vblk * vd->vdisk_block_size) - (pblk * vd->block_size); 1204 pnblk = ((delta + vlen - 1) / vd->block_size) + 1; 1205 plen = pnblk * vd->block_size; 1206 1207 PR2("vblk %lx:pblk %lx: vlen %ld:plen %ld", vblk, pblk, vlen, plen); 1208 1209 buf = kmem_zalloc(sizeof (caddr_t) * plen, KM_SLEEP); 1210 rv = vd_do_scsi_rdwr(vd, operation, (caddr_t)buf, pblk, plen); 1211 bcopy(buf + delta, data, vlen); 1212 1213 kmem_free(buf, sizeof (caddr_t) * plen); 1214 1215 return (rv); 1216 } 1217 1218 /* 1219 * Return Values 1220 * EINPROGRESS - operation was successfully started 1221 * EIO - encountered LDC (aka. task error) 1222 * 0 - operation completed successfully 1223 * 1224 * Side Effect 1225 * sets request->status = <disk operation status> 1226 */ 1227 static int 1228 vd_start_bio(vd_task_t *task) 1229 { 1230 int rv, status = 0; 1231 vd_t *vd = task->vd; 1232 vd_dring_payload_t *request = task->request; 1233 struct buf *buf = &task->buf; 1234 uint8_t mtype; 1235 int slice; 1236 char *bufaddr = 0; 1237 size_t buflen; 1238 1239 ASSERT(vd != NULL); 1240 ASSERT(request != NULL); 1241 1242 slice = request->slice; 1243 1244 ASSERT(slice == VD_SLICE_NONE || slice < vd->nslices); 1245 ASSERT((request->operation == VD_OP_BREAD) || 1246 (request->operation == VD_OP_BWRITE)); 1247 1248 if (request->nbytes == 0) { 1249 /* no service for trivial requests */ 1250 request->status = EINVAL; 1251 return (0); 1252 } 1253 1254 PR1("%s %lu bytes at block %lu", 1255 (request->operation == VD_OP_BREAD) ? "Read" : "Write", 1256 request->nbytes, request->addr); 1257 1258 /* 1259 * We have to check the open flags because the functions processing 1260 * the read/write request will not do it. 1261 */ 1262 if (request->operation == VD_OP_BWRITE && !(vd->open_flags & FWRITE)) { 1263 PR0("write fails because backend is opened read-only"); 1264 request->nbytes = 0; 1265 request->status = EROFS; 1266 return (0); 1267 } 1268 1269 mtype = (&vd->inband_task == task) ? LDC_SHADOW_MAP : LDC_DIRECT_MAP; 1270 1271 /* Map memory exported by client */ 1272 status = ldc_mem_map(task->mhdl, request->cookie, request->ncookies, 1273 mtype, (request->operation == VD_OP_BREAD) ? LDC_MEM_W : LDC_MEM_R, 1274 &bufaddr, NULL); 1275 if (status != 0) { 1276 PR0("ldc_mem_map() returned err %d ", status); 1277 return (EIO); 1278 } 1279 1280 buflen = request->nbytes; 1281 1282 status = ldc_mem_acquire(task->mhdl, 0, buflen); 1283 if (status != 0) { 1284 (void) ldc_mem_unmap(task->mhdl); 1285 PR0("ldc_mem_acquire() returned err %d ", status); 1286 return (EIO); 1287 } 1288 1289 /* Start the block I/O */ 1290 if (vd->file) { 1291 rv = vd_file_rw(vd, slice, request->operation, bufaddr, 1292 request->addr, request->nbytes); 1293 if (rv < 0) { 1294 request->nbytes = 0; 1295 request->status = EIO; 1296 } else { 1297 request->nbytes = rv; 1298 request->status = 0; 1299 } 1300 } else { 1301 if (slice == VD_SLICE_NONE) { 1302 /* 1303 * This is not a disk image so it is a real disk. We 1304 * assume that the underlying device driver supports 1305 * USCSICMD ioctls. This is the case of all SCSI devices 1306 * (sd, ssd...). 1307 * 1308 * In the future if we have non-SCSI disks we would need 1309 * to invoke the appropriate function to do I/O using an 1310 * absolute disk offset (for example using DIOCTL_RWCMD 1311 * for IDE disks). 1312 */ 1313 rv = vd_scsi_rdwr(vd, request->operation, bufaddr, 1314 request->addr, request->nbytes); 1315 if (rv != 0) { 1316 request->nbytes = 0; 1317 request->status = EIO; 1318 } else { 1319 request->status = 0; 1320 } 1321 } else { 1322 bioinit(buf); 1323 buf->b_flags = B_BUSY; 1324 buf->b_bcount = request->nbytes; 1325 buf->b_lblkno = request->addr; 1326 buf->b_edev = vd->dev[slice]; 1327 buf->b_un.b_addr = bufaddr; 1328 buf->b_flags |= (request->operation == VD_OP_BREAD)? 1329 B_READ : B_WRITE; 1330 1331 request->status = 1332 ldi_strategy(vd->ldi_handle[slice], buf); 1333 1334 /* 1335 * This is to indicate to the caller that the request 1336 * needs to be finished by vd_complete_bio() by calling 1337 * biowait() there and waiting for that to return before 1338 * triggering the notification of the vDisk client. 1339 * 1340 * This is necessary when writing to real disks as 1341 * otherwise calls to ldi_strategy() would be serialized 1342 * behind the calls to biowait() and performance would 1343 * suffer. 1344 */ 1345 if (request->status == 0) 1346 return (EINPROGRESS); 1347 1348 biofini(buf); 1349 } 1350 } 1351 1352 /* Clean up after error */ 1353 rv = ldc_mem_release(task->mhdl, 0, buflen); 1354 if (rv) { 1355 PR0("ldc_mem_release() returned err %d ", rv); 1356 status = EIO; 1357 } 1358 rv = ldc_mem_unmap(task->mhdl); 1359 if (rv) { 1360 PR0("ldc_mem_unmap() returned err %d ", rv); 1361 status = EIO; 1362 } 1363 1364 return (status); 1365 } 1366 1367 /* 1368 * This function should only be called from vd_notify to ensure that requests 1369 * are responded to in the order that they are received. 1370 */ 1371 static int 1372 send_msg(ldc_handle_t ldc_handle, void *msg, size_t msglen) 1373 { 1374 int status; 1375 size_t nbytes; 1376 1377 do { 1378 nbytes = msglen; 1379 status = ldc_write(ldc_handle, msg, &nbytes); 1380 if (status != EWOULDBLOCK) 1381 break; 1382 drv_usecwait(vds_ldc_delay); 1383 } while (status == EWOULDBLOCK); 1384 1385 if (status != 0) { 1386 if (status != ECONNRESET) 1387 PR0("ldc_write() returned errno %d", status); 1388 return (status); 1389 } else if (nbytes != msglen) { 1390 PR0("ldc_write() performed only partial write"); 1391 return (EIO); 1392 } 1393 1394 PR1("SENT %lu bytes", msglen); 1395 return (0); 1396 } 1397 1398 static void 1399 vd_need_reset(vd_t *vd, boolean_t reset_ldc) 1400 { 1401 mutex_enter(&vd->lock); 1402 vd->reset_state = B_TRUE; 1403 vd->reset_ldc = reset_ldc; 1404 mutex_exit(&vd->lock); 1405 } 1406 1407 /* 1408 * Reset the state of the connection with a client, if needed; reset the LDC 1409 * transport as well, if needed. This function should only be called from the 1410 * "vd_recv_msg", as it waits for tasks - otherwise a deadlock can occur. 1411 */ 1412 static void 1413 vd_reset_if_needed(vd_t *vd) 1414 { 1415 int status = 0; 1416 1417 mutex_enter(&vd->lock); 1418 if (!vd->reset_state) { 1419 ASSERT(!vd->reset_ldc); 1420 mutex_exit(&vd->lock); 1421 return; 1422 } 1423 mutex_exit(&vd->lock); 1424 1425 PR0("Resetting connection state with %s", VD_CLIENT(vd)); 1426 1427 /* 1428 * Let any asynchronous I/O complete before possibly pulling the rug 1429 * out from under it; defer checking vd->reset_ldc, as one of the 1430 * asynchronous tasks might set it 1431 */ 1432 ddi_taskq_wait(vd->completionq); 1433 1434 if (vd->file) { 1435 status = VOP_FSYNC(vd->file_vnode, FSYNC, kcred, NULL); 1436 if (status) { 1437 PR0("VOP_FSYNC returned errno %d", status); 1438 } 1439 } 1440 1441 if ((vd->initialized & VD_DRING) && 1442 ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0)) 1443 PR0("ldc_mem_dring_unmap() returned errno %d", status); 1444 1445 vd_free_dring_task(vd); 1446 1447 /* Free the staging buffer for msgs */ 1448 if (vd->vio_msgp != NULL) { 1449 kmem_free(vd->vio_msgp, vd->max_msglen); 1450 vd->vio_msgp = NULL; 1451 } 1452 1453 /* Free the inband message buffer */ 1454 if (vd->inband_task.msg != NULL) { 1455 kmem_free(vd->inband_task.msg, vd->max_msglen); 1456 vd->inband_task.msg = NULL; 1457 } 1458 1459 mutex_enter(&vd->lock); 1460 1461 if (vd->reset_ldc) 1462 PR0("taking down LDC channel"); 1463 if (vd->reset_ldc && ((status = ldc_down(vd->ldc_handle)) != 0)) 1464 PR0("ldc_down() returned errno %d", status); 1465 1466 /* Reset exclusive access rights */ 1467 vd_reset_access(vd); 1468 1469 vd->initialized &= ~(VD_SID | VD_SEQ_NUM | VD_DRING); 1470 vd->state = VD_STATE_INIT; 1471 vd->max_msglen = sizeof (vio_msg_t); /* baseline vio message size */ 1472 1473 /* Allocate the staging buffer */ 1474 vd->vio_msgp = kmem_alloc(vd->max_msglen, KM_SLEEP); 1475 1476 PR0("calling ldc_up\n"); 1477 (void) ldc_up(vd->ldc_handle); 1478 1479 vd->reset_state = B_FALSE; 1480 vd->reset_ldc = B_FALSE; 1481 1482 mutex_exit(&vd->lock); 1483 } 1484 1485 static void vd_recv_msg(void *arg); 1486 1487 static void 1488 vd_mark_in_reset(vd_t *vd) 1489 { 1490 int status; 1491 1492 PR0("vd_mark_in_reset: marking vd in reset\n"); 1493 1494 vd_need_reset(vd, B_FALSE); 1495 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, DDI_SLEEP); 1496 if (status == DDI_FAILURE) { 1497 PR0("cannot schedule task to recv msg\n"); 1498 vd_need_reset(vd, B_TRUE); 1499 return; 1500 } 1501 } 1502 1503 static int 1504 vd_mark_elem_done(vd_t *vd, int idx, int elem_status, int elem_nbytes) 1505 { 1506 boolean_t accepted; 1507 int status; 1508 vd_dring_entry_t *elem = VD_DRING_ELEM(idx); 1509 1510 if (vd->reset_state) 1511 return (0); 1512 1513 /* Acquire the element */ 1514 if (!vd->reset_state && 1515 (status = ldc_mem_dring_acquire(vd->dring_handle, idx, idx)) != 0) { 1516 if (status == ECONNRESET) { 1517 vd_mark_in_reset(vd); 1518 return (0); 1519 } else { 1520 PR0("ldc_mem_dring_acquire() returned errno %d", 1521 status); 1522 return (status); 1523 } 1524 } 1525 1526 /* Set the element's status and mark it done */ 1527 accepted = (elem->hdr.dstate == VIO_DESC_ACCEPTED); 1528 if (accepted) { 1529 elem->payload.nbytes = elem_nbytes; 1530 elem->payload.status = elem_status; 1531 elem->hdr.dstate = VIO_DESC_DONE; 1532 } else { 1533 /* Perhaps client timed out waiting for I/O... */ 1534 PR0("element %u no longer \"accepted\"", idx); 1535 VD_DUMP_DRING_ELEM(elem); 1536 } 1537 /* Release the element */ 1538 if (!vd->reset_state && 1539 (status = ldc_mem_dring_release(vd->dring_handle, idx, idx)) != 0) { 1540 if (status == ECONNRESET) { 1541 vd_mark_in_reset(vd); 1542 return (0); 1543 } else { 1544 PR0("ldc_mem_dring_release() returned errno %d", 1545 status); 1546 return (status); 1547 } 1548 } 1549 1550 return (accepted ? 0 : EINVAL); 1551 } 1552 1553 /* 1554 * Return Values 1555 * 0 - operation completed successfully 1556 * EIO - encountered LDC / task error 1557 * 1558 * Side Effect 1559 * sets request->status = <disk operation status> 1560 */ 1561 static int 1562 vd_complete_bio(vd_task_t *task) 1563 { 1564 int status = 0; 1565 int rv = 0; 1566 vd_t *vd = task->vd; 1567 vd_dring_payload_t *request = task->request; 1568 struct buf *buf = &task->buf; 1569 1570 1571 ASSERT(vd != NULL); 1572 ASSERT(request != NULL); 1573 ASSERT(task->msg != NULL); 1574 ASSERT(task->msglen >= sizeof (*task->msg)); 1575 ASSERT(!vd->file); 1576 ASSERT(request->slice != VD_SLICE_NONE); 1577 1578 /* Wait for the I/O to complete [ call to ldi_strategy(9f) ] */ 1579 request->status = biowait(buf); 1580 1581 /* return back the number of bytes read/written */ 1582 request->nbytes = buf->b_bcount - buf->b_resid; 1583 1584 /* Release the buffer */ 1585 if (!vd->reset_state) 1586 status = ldc_mem_release(task->mhdl, 0, buf->b_bcount); 1587 if (status) { 1588 PR0("ldc_mem_release() returned errno %d copying to " 1589 "client", status); 1590 if (status == ECONNRESET) { 1591 vd_mark_in_reset(vd); 1592 } 1593 rv = EIO; 1594 } 1595 1596 /* Unmap the memory, even if in reset */ 1597 status = ldc_mem_unmap(task->mhdl); 1598 if (status) { 1599 PR0("ldc_mem_unmap() returned errno %d copying to client", 1600 status); 1601 if (status == ECONNRESET) { 1602 vd_mark_in_reset(vd); 1603 } 1604 rv = EIO; 1605 } 1606 1607 biofini(buf); 1608 1609 return (rv); 1610 } 1611 1612 /* 1613 * Description: 1614 * This function is called by the two functions called by a taskq 1615 * [ vd_complete_notify() and vd_serial_notify()) ] to send the 1616 * message to the client. 1617 * 1618 * Parameters: 1619 * arg - opaque pointer to structure containing task to be completed 1620 * 1621 * Return Values 1622 * None 1623 */ 1624 static void 1625 vd_notify(vd_task_t *task) 1626 { 1627 int status; 1628 1629 ASSERT(task != NULL); 1630 ASSERT(task->vd != NULL); 1631 1632 if (task->vd->reset_state) 1633 return; 1634 1635 /* 1636 * Send the "ack" or "nack" back to the client; if sending the message 1637 * via LDC fails, arrange to reset both the connection state and LDC 1638 * itself 1639 */ 1640 PR2("Sending %s", 1641 (task->msg->tag.vio_subtype == VIO_SUBTYPE_ACK) ? "ACK" : "NACK"); 1642 1643 status = send_msg(task->vd->ldc_handle, task->msg, task->msglen); 1644 switch (status) { 1645 case 0: 1646 break; 1647 case ECONNRESET: 1648 vd_mark_in_reset(task->vd); 1649 break; 1650 default: 1651 PR0("initiating full reset"); 1652 vd_need_reset(task->vd, B_TRUE); 1653 break; 1654 } 1655 1656 DTRACE_PROBE1(task__end, vd_task_t *, task); 1657 } 1658 1659 /* 1660 * Description: 1661 * Mark the Dring entry as Done and (if necessary) send an ACK/NACK to 1662 * the vDisk client 1663 * 1664 * Parameters: 1665 * task - structure containing the request sent from client 1666 * 1667 * Return Values 1668 * None 1669 */ 1670 static void 1671 vd_complete_notify(vd_task_t *task) 1672 { 1673 int status = 0; 1674 vd_t *vd = task->vd; 1675 vd_dring_payload_t *request = task->request; 1676 1677 /* Update the dring element for a dring client */ 1678 if (!vd->reset_state && (vd->xfer_mode == VIO_DRING_MODE)) { 1679 status = vd_mark_elem_done(vd, task->index, 1680 request->status, request->nbytes); 1681 if (status == ECONNRESET) 1682 vd_mark_in_reset(vd); 1683 } 1684 1685 /* 1686 * If a transport error occurred while marking the element done or 1687 * previously while executing the task, arrange to "nack" the message 1688 * when the final task in the descriptor element range completes 1689 */ 1690 if ((status != 0) || (task->status != 0)) 1691 task->msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 1692 1693 /* 1694 * Only the final task for a range of elements will respond to and 1695 * free the message 1696 */ 1697 if (task->type == VD_NONFINAL_RANGE_TASK) { 1698 return; 1699 } 1700 1701 vd_notify(task); 1702 } 1703 1704 /* 1705 * Description: 1706 * This is the basic completion function called to handle inband data 1707 * requests and handshake messages. All it needs to do is trigger a 1708 * message to the client that the request is completed. 1709 * 1710 * Parameters: 1711 * arg - opaque pointer to structure containing task to be completed 1712 * 1713 * Return Values 1714 * None 1715 */ 1716 static void 1717 vd_serial_notify(void *arg) 1718 { 1719 vd_task_t *task = (vd_task_t *)arg; 1720 1721 ASSERT(task != NULL); 1722 vd_notify(task); 1723 } 1724 1725 /* ARGSUSED */ 1726 static int 1727 vd_geom2dk_geom(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 1728 { 1729 VD_GEOM2DK_GEOM((vd_geom_t *)vd_buf, (struct dk_geom *)ioctl_arg); 1730 return (0); 1731 } 1732 1733 /* ARGSUSED */ 1734 static int 1735 vd_vtoc2vtoc(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 1736 { 1737 VD_VTOC2VTOC((vd_vtoc_t *)vd_buf, (struct vtoc *)ioctl_arg); 1738 return (0); 1739 } 1740 1741 static void 1742 dk_geom2vd_geom(void *ioctl_arg, void *vd_buf) 1743 { 1744 DK_GEOM2VD_GEOM((struct dk_geom *)ioctl_arg, (vd_geom_t *)vd_buf); 1745 } 1746 1747 static void 1748 vtoc2vd_vtoc(void *ioctl_arg, void *vd_buf) 1749 { 1750 VTOC2VD_VTOC((struct vtoc *)ioctl_arg, (vd_vtoc_t *)vd_buf); 1751 } 1752 1753 static int 1754 vd_get_efi_in(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 1755 { 1756 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 1757 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 1758 size_t data_len; 1759 1760 data_len = vd_buf_len - (sizeof (vd_efi_t) - sizeof (uint64_t)); 1761 if (vd_efi->length > data_len) 1762 return (EINVAL); 1763 1764 dk_efi->dki_lba = vd_efi->lba; 1765 dk_efi->dki_length = vd_efi->length; 1766 dk_efi->dki_data = kmem_zalloc(vd_efi->length, KM_SLEEP); 1767 return (0); 1768 } 1769 1770 static void 1771 vd_get_efi_out(void *ioctl_arg, void *vd_buf) 1772 { 1773 int len; 1774 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 1775 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 1776 1777 len = vd_efi->length; 1778 DK_EFI2VD_EFI(dk_efi, vd_efi); 1779 kmem_free(dk_efi->dki_data, len); 1780 } 1781 1782 static int 1783 vd_set_efi_in(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 1784 { 1785 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 1786 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 1787 size_t data_len; 1788 1789 data_len = vd_buf_len - (sizeof (vd_efi_t) - sizeof (uint64_t)); 1790 if (vd_efi->length > data_len) 1791 return (EINVAL); 1792 1793 dk_efi->dki_data = kmem_alloc(vd_efi->length, KM_SLEEP); 1794 VD_EFI2DK_EFI(vd_efi, dk_efi); 1795 return (0); 1796 } 1797 1798 static void 1799 vd_set_efi_out(void *ioctl_arg, void *vd_buf) 1800 { 1801 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 1802 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 1803 1804 kmem_free(dk_efi->dki_data, vd_efi->length); 1805 } 1806 1807 static int 1808 vd_scsicmd_in(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 1809 { 1810 size_t vd_scsi_len; 1811 vd_scsi_t *vd_scsi = (vd_scsi_t *)vd_buf; 1812 struct uscsi_cmd *uscsi = (struct uscsi_cmd *)ioctl_arg; 1813 1814 /* check buffer size */ 1815 vd_scsi_len = VD_SCSI_SIZE; 1816 vd_scsi_len += P2ROUNDUP(vd_scsi->cdb_len, sizeof (uint64_t)); 1817 vd_scsi_len += P2ROUNDUP(vd_scsi->sense_len, sizeof (uint64_t)); 1818 vd_scsi_len += P2ROUNDUP(vd_scsi->datain_len, sizeof (uint64_t)); 1819 vd_scsi_len += P2ROUNDUP(vd_scsi->dataout_len, sizeof (uint64_t)); 1820 1821 ASSERT(vd_scsi_len % sizeof (uint64_t) == 0); 1822 1823 if (vd_buf_len < vd_scsi_len) 1824 return (EINVAL); 1825 1826 /* set flags */ 1827 uscsi->uscsi_flags = vd_scsi_debug; 1828 1829 if (vd_scsi->options & VD_SCSI_OPT_NORETRY) { 1830 uscsi->uscsi_flags |= USCSI_ISOLATE; 1831 uscsi->uscsi_flags |= USCSI_DIAGNOSE; 1832 } 1833 1834 /* task attribute */ 1835 switch (vd_scsi->task_attribute) { 1836 case VD_SCSI_TASK_ACA: 1837 uscsi->uscsi_flags |= USCSI_HEAD; 1838 break; 1839 case VD_SCSI_TASK_HQUEUE: 1840 uscsi->uscsi_flags |= USCSI_HTAG; 1841 break; 1842 case VD_SCSI_TASK_ORDERED: 1843 uscsi->uscsi_flags |= USCSI_OTAG; 1844 break; 1845 default: 1846 uscsi->uscsi_flags |= USCSI_NOTAG; 1847 break; 1848 } 1849 1850 /* timeout */ 1851 uscsi->uscsi_timeout = vd_scsi->timeout; 1852 1853 /* cdb data */ 1854 uscsi->uscsi_cdb = (caddr_t)VD_SCSI_DATA_CDB(vd_scsi); 1855 uscsi->uscsi_cdblen = vd_scsi->cdb_len; 1856 1857 /* sense buffer */ 1858 if (vd_scsi->sense_len != 0) { 1859 uscsi->uscsi_flags |= USCSI_RQENABLE; 1860 uscsi->uscsi_rqbuf = (caddr_t)VD_SCSI_DATA_SENSE(vd_scsi); 1861 uscsi->uscsi_rqlen = vd_scsi->sense_len; 1862 } 1863 1864 if (vd_scsi->datain_len != 0 && vd_scsi->dataout_len != 0) { 1865 /* uscsi does not support read/write request */ 1866 return (EINVAL); 1867 } 1868 1869 /* request data-in */ 1870 if (vd_scsi->datain_len != 0) { 1871 uscsi->uscsi_flags |= USCSI_READ; 1872 uscsi->uscsi_buflen = vd_scsi->datain_len; 1873 uscsi->uscsi_bufaddr = (char *)VD_SCSI_DATA_IN(vd_scsi); 1874 } 1875 1876 /* request data-out */ 1877 if (vd_scsi->dataout_len != 0) { 1878 uscsi->uscsi_buflen = vd_scsi->dataout_len; 1879 uscsi->uscsi_bufaddr = (char *)VD_SCSI_DATA_OUT(vd_scsi); 1880 } 1881 1882 return (0); 1883 } 1884 1885 static void 1886 vd_scsicmd_out(void *ioctl_arg, void *vd_buf) 1887 { 1888 vd_scsi_t *vd_scsi = (vd_scsi_t *)vd_buf; 1889 struct uscsi_cmd *uscsi = (struct uscsi_cmd *)ioctl_arg; 1890 1891 /* output fields */ 1892 vd_scsi->cmd_status = uscsi->uscsi_status; 1893 1894 /* sense data */ 1895 if ((uscsi->uscsi_flags & USCSI_RQENABLE) && 1896 (uscsi->uscsi_status == STATUS_CHECK || 1897 uscsi->uscsi_status == STATUS_TERMINATED)) { 1898 vd_scsi->sense_status = uscsi->uscsi_rqstatus; 1899 if (uscsi->uscsi_rqstatus == STATUS_GOOD) 1900 vd_scsi->sense_len -= uscsi->uscsi_resid; 1901 else 1902 vd_scsi->sense_len = 0; 1903 } else { 1904 vd_scsi->sense_len = 0; 1905 } 1906 1907 if (uscsi->uscsi_status != STATUS_GOOD) { 1908 vd_scsi->dataout_len = 0; 1909 vd_scsi->datain_len = 0; 1910 return; 1911 } 1912 1913 if (uscsi->uscsi_flags & USCSI_READ) { 1914 /* request data (read) */ 1915 vd_scsi->datain_len -= uscsi->uscsi_resid; 1916 vd_scsi->dataout_len = 0; 1917 } else { 1918 /* request data (write) */ 1919 vd_scsi->datain_len = 0; 1920 vd_scsi->dataout_len -= uscsi->uscsi_resid; 1921 } 1922 } 1923 1924 static vd_disk_label_t 1925 vd_read_vtoc(vd_t *vd, struct vtoc *vtoc) 1926 { 1927 int status, rval; 1928 struct dk_gpt *efi; 1929 size_t efi_len; 1930 1931 ASSERT(vd->ldi_handle[0] != NULL); 1932 1933 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGVTOC, (intptr_t)vtoc, 1934 (vd->open_flags | FKIOCTL), kcred, &rval); 1935 1936 if (status == 0) { 1937 return (VD_DISK_LABEL_VTOC); 1938 } else if (status != ENOTSUP) { 1939 PR0("ldi_ioctl(DKIOCGVTOC) returned error %d", status); 1940 return (VD_DISK_LABEL_UNK); 1941 } 1942 1943 status = vds_efi_alloc_and_read(vd->ldi_handle[0], &efi, &efi_len); 1944 1945 if (status) { 1946 PR0("vds_efi_alloc_and_read returned error %d", status); 1947 return (VD_DISK_LABEL_UNK); 1948 } 1949 1950 vd_efi_to_vtoc(efi, vtoc); 1951 vd_efi_free(efi, efi_len); 1952 1953 return (VD_DISK_LABEL_EFI); 1954 } 1955 1956 static ushort_t 1957 vd_lbl2cksum(struct dk_label *label) 1958 { 1959 int count; 1960 ushort_t sum, *sp; 1961 1962 count = (sizeof (struct dk_label)) / (sizeof (short)) - 1; 1963 sp = (ushort_t *)label; 1964 sum = 0; 1965 while (count--) { 1966 sum ^= *sp++; 1967 } 1968 1969 return (sum); 1970 } 1971 1972 /* 1973 * Handle ioctls to a disk slice. 1974 * 1975 * Return Values 1976 * 0 - Indicates that there are no errors in disk operations 1977 * ENOTSUP - Unknown disk label type or unsupported DKIO ioctl 1978 * EINVAL - Not enough room to copy the EFI label 1979 * 1980 */ 1981 static int 1982 vd_do_slice_ioctl(vd_t *vd, int cmd, void *ioctl_arg) 1983 { 1984 dk_efi_t *dk_ioc; 1985 1986 switch (vd->vdisk_label) { 1987 1988 /* ioctls for a slice from a disk with a VTOC label */ 1989 case VD_DISK_LABEL_VTOC: 1990 1991 switch (cmd) { 1992 case DKIOCGGEOM: 1993 ASSERT(ioctl_arg != NULL); 1994 bcopy(&vd->dk_geom, ioctl_arg, sizeof (vd->dk_geom)); 1995 return (0); 1996 case DKIOCGVTOC: 1997 ASSERT(ioctl_arg != NULL); 1998 bcopy(&vd->vtoc, ioctl_arg, sizeof (vd->vtoc)); 1999 return (0); 2000 default: 2001 return (ENOTSUP); 2002 } 2003 2004 /* ioctls for a slice from a disk with an EFI label */ 2005 case VD_DISK_LABEL_EFI: 2006 2007 switch (cmd) { 2008 case DKIOCGETEFI: 2009 ASSERT(ioctl_arg != NULL); 2010 dk_ioc = (dk_efi_t *)ioctl_arg; 2011 if (dk_ioc->dki_length < vd->dk_efi.dki_length) 2012 return (EINVAL); 2013 bcopy(vd->dk_efi.dki_data, dk_ioc->dki_data, 2014 vd->dk_efi.dki_length); 2015 return (0); 2016 default: 2017 return (ENOTSUP); 2018 } 2019 2020 default: 2021 /* Unknown disk label type */ 2022 return (ENOTSUP); 2023 } 2024 } 2025 2026 /* 2027 * Function: 2028 * vd_file_validate_geometry 2029 * 2030 * Description: 2031 * Read the label and validate the geometry of a disk image. The driver 2032 * label, vtoc and geometry information are updated according to the 2033 * label read from the disk image. 2034 * 2035 * If no valid label is found, the label is set to unknown and the 2036 * function returns EINVAL, but a default vtoc and geometry are provided 2037 * to the driver. 2038 * 2039 * Parameters: 2040 * vd - disk on which the operation is performed. 2041 * 2042 * Return Code: 2043 * 0 - success. 2044 * EIO - error reading the label from the disk image. 2045 * EINVAL - unknown disk label. 2046 */ 2047 static int 2048 vd_file_validate_geometry(vd_t *vd) 2049 { 2050 struct dk_label label; 2051 struct dk_geom *geom = &vd->dk_geom; 2052 struct vtoc *vtoc = &vd->vtoc; 2053 int i; 2054 int status = 0; 2055 2056 ASSERT(vd->file); 2057 2058 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { 2059 /* 2060 * For single slice disk we always fake the geometry, and we 2061 * only need to do it once because the geometry will never 2062 * change. 2063 */ 2064 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) 2065 /* geometry was already validated */ 2066 return (0); 2067 2068 ASSERT(vd->vdisk_label == VD_DISK_LABEL_UNK); 2069 vd_file_build_default_label(vd, &label); 2070 vd->vdisk_label = VD_DISK_LABEL_VTOC; 2071 } else { 2072 if (VD_FILE_LABEL_READ(vd, &label) < 0) 2073 return (EIO); 2074 2075 if (label.dkl_magic != DKL_MAGIC || 2076 label.dkl_cksum != vd_lbl2cksum(&label) || 2077 label.dkl_vtoc.v_sanity != VTOC_SANE || 2078 label.dkl_vtoc.v_nparts != V_NUMPAR) { 2079 vd->vdisk_label = VD_DISK_LABEL_UNK; 2080 vd_file_build_default_label(vd, &label); 2081 status = EINVAL; 2082 } else { 2083 vd->vdisk_label = VD_DISK_LABEL_VTOC; 2084 } 2085 } 2086 2087 /* Update the driver geometry */ 2088 bzero(geom, sizeof (struct dk_geom)); 2089 2090 geom->dkg_ncyl = label.dkl_ncyl; 2091 geom->dkg_acyl = label.dkl_acyl; 2092 geom->dkg_nhead = label.dkl_nhead; 2093 geom->dkg_nsect = label.dkl_nsect; 2094 geom->dkg_intrlv = label.dkl_intrlv; 2095 geom->dkg_apc = label.dkl_apc; 2096 geom->dkg_rpm = label.dkl_rpm; 2097 geom->dkg_pcyl = label.dkl_pcyl; 2098 geom->dkg_write_reinstruct = label.dkl_write_reinstruct; 2099 geom->dkg_read_reinstruct = label.dkl_read_reinstruct; 2100 2101 /* Update the driver vtoc */ 2102 bzero(vtoc, sizeof (struct vtoc)); 2103 2104 vtoc->v_sanity = label.dkl_vtoc.v_sanity; 2105 vtoc->v_version = label.dkl_vtoc.v_version; 2106 vtoc->v_sectorsz = DEV_BSIZE; 2107 vtoc->v_nparts = label.dkl_vtoc.v_nparts; 2108 2109 for (i = 0; i < vtoc->v_nparts; i++) { 2110 vtoc->v_part[i].p_tag = 2111 label.dkl_vtoc.v_part[i].p_tag; 2112 vtoc->v_part[i].p_flag = 2113 label.dkl_vtoc.v_part[i].p_flag; 2114 vtoc->v_part[i].p_start = 2115 label.dkl_map[i].dkl_cylno * 2116 (label.dkl_nhead * label.dkl_nsect); 2117 vtoc->v_part[i].p_size = label.dkl_map[i].dkl_nblk; 2118 vtoc->timestamp[i] = 2119 label.dkl_vtoc.v_timestamp[i]; 2120 } 2121 /* 2122 * The bootinfo array can not be copied with bcopy() because 2123 * elements are of type long in vtoc (so 64-bit) and of type 2124 * int in dk_vtoc (so 32-bit). 2125 */ 2126 vtoc->v_bootinfo[0] = label.dkl_vtoc.v_bootinfo[0]; 2127 vtoc->v_bootinfo[1] = label.dkl_vtoc.v_bootinfo[1]; 2128 vtoc->v_bootinfo[2] = label.dkl_vtoc.v_bootinfo[2]; 2129 bcopy(label.dkl_asciilabel, vtoc->v_asciilabel, 2130 LEN_DKL_ASCII); 2131 bcopy(label.dkl_vtoc.v_volume, vtoc->v_volume, 2132 LEN_DKL_VVOL); 2133 2134 return (status); 2135 } 2136 2137 /* 2138 * Handle ioctls to a disk image (file-based). 2139 * 2140 * Return Values 2141 * 0 - Indicates that there are no errors 2142 * != 0 - Disk operation returned an error 2143 */ 2144 static int 2145 vd_do_file_ioctl(vd_t *vd, int cmd, void *ioctl_arg) 2146 { 2147 struct dk_label label; 2148 struct dk_geom *geom; 2149 struct vtoc *vtoc; 2150 int i, rc; 2151 2152 ASSERT(vd->file); 2153 2154 switch (cmd) { 2155 2156 case DKIOCGGEOM: 2157 ASSERT(ioctl_arg != NULL); 2158 geom = (struct dk_geom *)ioctl_arg; 2159 2160 rc = vd_file_validate_geometry(vd); 2161 if (rc != 0 && rc != EINVAL) { 2162 ASSERT(vd->vdisk_type != VD_DISK_TYPE_SLICE); 2163 return (rc); 2164 } 2165 2166 bcopy(&vd->dk_geom, geom, sizeof (struct dk_geom)); 2167 return (0); 2168 2169 case DKIOCGVTOC: 2170 ASSERT(ioctl_arg != NULL); 2171 vtoc = (struct vtoc *)ioctl_arg; 2172 2173 rc = vd_file_validate_geometry(vd); 2174 if (rc != 0 && rc != EINVAL) { 2175 ASSERT(vd->vdisk_type != VD_DISK_TYPE_SLICE); 2176 return (rc); 2177 } 2178 2179 bcopy(&vd->vtoc, vtoc, sizeof (struct vtoc)); 2180 return (0); 2181 2182 case DKIOCSGEOM: 2183 ASSERT(ioctl_arg != NULL); 2184 geom = (struct dk_geom *)ioctl_arg; 2185 2186 /* geometry can only be changed for full disk */ 2187 if (vd->vdisk_type != VD_DISK_TYPE_DISK) 2188 return (ENOTSUP); 2189 2190 if (geom->dkg_nhead == 0 || geom->dkg_nsect == 0) 2191 return (EINVAL); 2192 2193 /* 2194 * The current device geometry is not updated, just the driver 2195 * "notion" of it. The device geometry will be effectively 2196 * updated when a label is written to the device during a next 2197 * DKIOCSVTOC. 2198 */ 2199 bcopy(ioctl_arg, &vd->dk_geom, sizeof (vd->dk_geom)); 2200 return (0); 2201 2202 case DKIOCSVTOC: 2203 ASSERT(ioctl_arg != NULL); 2204 ASSERT(vd->dk_geom.dkg_nhead != 0 && 2205 vd->dk_geom.dkg_nsect != 0); 2206 vtoc = (struct vtoc *)ioctl_arg; 2207 2208 /* vtoc can only be changed for full disk */ 2209 if (vd->vdisk_type != VD_DISK_TYPE_DISK) 2210 return (ENOTSUP); 2211 2212 if (vtoc->v_sanity != VTOC_SANE || 2213 vtoc->v_sectorsz != DEV_BSIZE || 2214 vtoc->v_nparts != V_NUMPAR) 2215 return (EINVAL); 2216 2217 bzero(&label, sizeof (label)); 2218 label.dkl_ncyl = vd->dk_geom.dkg_ncyl; 2219 label.dkl_acyl = vd->dk_geom.dkg_acyl; 2220 label.dkl_pcyl = vd->dk_geom.dkg_pcyl; 2221 label.dkl_nhead = vd->dk_geom.dkg_nhead; 2222 label.dkl_nsect = vd->dk_geom.dkg_nsect; 2223 label.dkl_intrlv = vd->dk_geom.dkg_intrlv; 2224 label.dkl_apc = vd->dk_geom.dkg_apc; 2225 label.dkl_rpm = vd->dk_geom.dkg_rpm; 2226 label.dkl_write_reinstruct = vd->dk_geom.dkg_write_reinstruct; 2227 label.dkl_read_reinstruct = vd->dk_geom.dkg_read_reinstruct; 2228 2229 label.dkl_vtoc.v_nparts = V_NUMPAR; 2230 label.dkl_vtoc.v_sanity = VTOC_SANE; 2231 label.dkl_vtoc.v_version = vtoc->v_version; 2232 for (i = 0; i < V_NUMPAR; i++) { 2233 label.dkl_vtoc.v_timestamp[i] = 2234 vtoc->timestamp[i]; 2235 label.dkl_vtoc.v_part[i].p_tag = 2236 vtoc->v_part[i].p_tag; 2237 label.dkl_vtoc.v_part[i].p_flag = 2238 vtoc->v_part[i].p_flag; 2239 label.dkl_map[i].dkl_cylno = 2240 vtoc->v_part[i].p_start / 2241 (label.dkl_nhead * label.dkl_nsect); 2242 label.dkl_map[i].dkl_nblk = 2243 vtoc->v_part[i].p_size; 2244 } 2245 /* 2246 * The bootinfo array can not be copied with bcopy() because 2247 * elements are of type long in vtoc (so 64-bit) and of type 2248 * int in dk_vtoc (so 32-bit). 2249 */ 2250 label.dkl_vtoc.v_bootinfo[0] = vtoc->v_bootinfo[0]; 2251 label.dkl_vtoc.v_bootinfo[1] = vtoc->v_bootinfo[1]; 2252 label.dkl_vtoc.v_bootinfo[2] = vtoc->v_bootinfo[2]; 2253 bcopy(vtoc->v_asciilabel, label.dkl_asciilabel, 2254 LEN_DKL_ASCII); 2255 bcopy(vtoc->v_volume, label.dkl_vtoc.v_volume, 2256 LEN_DKL_VVOL); 2257 2258 /* re-compute checksum */ 2259 label.dkl_magic = DKL_MAGIC; 2260 label.dkl_cksum = vd_lbl2cksum(&label); 2261 2262 /* write label to the disk image */ 2263 if ((rc = vd_file_set_vtoc(vd, &label)) != 0) 2264 return (rc); 2265 2266 /* check the geometry and update the driver info */ 2267 if ((rc = vd_file_validate_geometry(vd)) != 0) 2268 return (rc); 2269 2270 /* 2271 * The disk geometry may have changed, so we need to write 2272 * the devid (if there is one) so that it is stored at the 2273 * right location. 2274 */ 2275 if (vd->file_devid != NULL && 2276 vd_file_write_devid(vd, vd->file_devid) != 0) { 2277 PR0("Fail to write devid"); 2278 } 2279 2280 return (0); 2281 2282 case DKIOCFLUSHWRITECACHE: 2283 return (VOP_FSYNC(vd->file_vnode, FSYNC, kcred, NULL)); 2284 2285 default: 2286 return (ENOTSUP); 2287 } 2288 } 2289 2290 /* 2291 * Description: 2292 * This is the function that processes the ioctl requests (farming it 2293 * out to functions that handle slices, files or whole disks) 2294 * 2295 * Return Values 2296 * 0 - ioctl operation completed successfully 2297 * != 0 - The LDC error value encountered 2298 * (propagated back up the call stack as a task error) 2299 * 2300 * Side Effect 2301 * sets request->status to the return value of the ioctl function. 2302 */ 2303 static int 2304 vd_do_ioctl(vd_t *vd, vd_dring_payload_t *request, void* buf, vd_ioctl_t *ioctl) 2305 { 2306 int rval = 0, status = 0; 2307 size_t nbytes = request->nbytes; /* modifiable copy */ 2308 2309 2310 ASSERT(request->slice < vd->nslices); 2311 PR0("Performing %s", ioctl->operation_name); 2312 2313 /* Get data from client and convert, if necessary */ 2314 if (ioctl->copyin != NULL) { 2315 ASSERT(nbytes != 0 && buf != NULL); 2316 PR1("Getting \"arg\" data from client"); 2317 if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes, 2318 request->cookie, request->ncookies, 2319 LDC_COPY_IN)) != 0) { 2320 PR0("ldc_mem_copy() returned errno %d " 2321 "copying from client", status); 2322 return (status); 2323 } 2324 2325 /* Convert client's data, if necessary */ 2326 if (ioctl->copyin == VD_IDENTITY_IN) { 2327 /* use client buffer */ 2328 ioctl->arg = buf; 2329 } else { 2330 /* convert client vdisk operation data to ioctl data */ 2331 status = (ioctl->copyin)(buf, nbytes, 2332 (void *)ioctl->arg); 2333 if (status != 0) { 2334 request->status = status; 2335 return (0); 2336 } 2337 } 2338 } 2339 2340 if (ioctl->operation == VD_OP_SCSICMD) { 2341 struct uscsi_cmd *uscsi = (struct uscsi_cmd *)ioctl->arg; 2342 2343 /* check write permission */ 2344 if (!(vd->open_flags & FWRITE) && 2345 !(uscsi->uscsi_flags & USCSI_READ)) { 2346 PR0("uscsi fails because backend is opened read-only"); 2347 request->status = EROFS; 2348 return (0); 2349 } 2350 } 2351 2352 /* 2353 * Handle single-slice block devices internally; otherwise, have the 2354 * real driver perform the ioctl() 2355 */ 2356 if (vd->file) { 2357 request->status = 2358 vd_do_file_ioctl(vd, ioctl->cmd, (void *)ioctl->arg); 2359 2360 } else if (vd->vdisk_type == VD_DISK_TYPE_SLICE && !vd->pseudo) { 2361 request->status = 2362 vd_do_slice_ioctl(vd, ioctl->cmd, (void *)ioctl->arg); 2363 2364 } else { 2365 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 2366 ioctl->cmd, (intptr_t)ioctl->arg, vd->open_flags | FKIOCTL, 2367 kcred, &rval); 2368 2369 #ifdef DEBUG 2370 if (rval != 0) { 2371 PR0("%s set rval = %d, which is not being returned to" 2372 " client", ioctl->cmd_name, rval); 2373 } 2374 #endif /* DEBUG */ 2375 } 2376 2377 if (request->status != 0) { 2378 PR0("ioctl(%s) = errno %d", ioctl->cmd_name, request->status); 2379 if (ioctl->operation == VD_OP_SCSICMD && 2380 ((struct uscsi_cmd *)ioctl->arg)->uscsi_status != 0) 2381 /* 2382 * USCSICMD has reported an error and the uscsi_status 2383 * field is not zero. This means that the SCSI command 2384 * has completed but it has an error. So we should 2385 * mark the VD operation has succesfully completed 2386 * and clients can check the SCSI status field for 2387 * SCSI errors. 2388 */ 2389 request->status = 0; 2390 else 2391 return (0); 2392 } 2393 2394 /* Convert data and send to client, if necessary */ 2395 if (ioctl->copyout != NULL) { 2396 ASSERT(nbytes != 0 && buf != NULL); 2397 PR1("Sending \"arg\" data to client"); 2398 2399 /* Convert ioctl data to vdisk operation data, if necessary */ 2400 if (ioctl->copyout != VD_IDENTITY_OUT) 2401 (ioctl->copyout)((void *)ioctl->arg, buf); 2402 2403 if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes, 2404 request->cookie, request->ncookies, 2405 LDC_COPY_OUT)) != 0) { 2406 PR0("ldc_mem_copy() returned errno %d " 2407 "copying to client", status); 2408 return (status); 2409 } 2410 } 2411 2412 return (status); 2413 } 2414 2415 #define RNDSIZE(expr) P2ROUNDUP(sizeof (expr), sizeof (uint64_t)) 2416 2417 /* 2418 * Description: 2419 * This generic function is called by the task queue to complete 2420 * the processing of the tasks. The specific completion function 2421 * is passed in as a field in the task pointer. 2422 * 2423 * Parameters: 2424 * arg - opaque pointer to structure containing task to be completed 2425 * 2426 * Return Values 2427 * None 2428 */ 2429 static void 2430 vd_complete(void *arg) 2431 { 2432 vd_task_t *task = (vd_task_t *)arg; 2433 2434 ASSERT(task != NULL); 2435 ASSERT(task->status == EINPROGRESS); 2436 ASSERT(task->completef != NULL); 2437 2438 task->status = task->completef(task); 2439 if (task->status) 2440 PR0("%s: Error %d completing task", __func__, task->status); 2441 2442 /* Now notify the vDisk client */ 2443 vd_complete_notify(task); 2444 } 2445 2446 static int 2447 vd_ioctl(vd_task_t *task) 2448 { 2449 int i, status; 2450 void *buf = NULL; 2451 struct dk_geom dk_geom = {0}; 2452 struct vtoc vtoc = {0}; 2453 struct dk_efi dk_efi = {0}; 2454 struct uscsi_cmd uscsi = {0}; 2455 vd_t *vd = task->vd; 2456 vd_dring_payload_t *request = task->request; 2457 vd_ioctl_t ioctl[] = { 2458 /* Command (no-copy) operations */ 2459 {VD_OP_FLUSH, STRINGIZE(VD_OP_FLUSH), 0, 2460 DKIOCFLUSHWRITECACHE, STRINGIZE(DKIOCFLUSHWRITECACHE), 2461 NULL, NULL, NULL, B_TRUE}, 2462 2463 /* "Get" (copy-out) operations */ 2464 {VD_OP_GET_WCE, STRINGIZE(VD_OP_GET_WCE), RNDSIZE(int), 2465 DKIOCGETWCE, STRINGIZE(DKIOCGETWCE), 2466 NULL, VD_IDENTITY_IN, VD_IDENTITY_OUT, B_FALSE}, 2467 {VD_OP_GET_DISKGEOM, STRINGIZE(VD_OP_GET_DISKGEOM), 2468 RNDSIZE(vd_geom_t), 2469 DKIOCGGEOM, STRINGIZE(DKIOCGGEOM), 2470 &dk_geom, NULL, dk_geom2vd_geom, B_FALSE}, 2471 {VD_OP_GET_VTOC, STRINGIZE(VD_OP_GET_VTOC), RNDSIZE(vd_vtoc_t), 2472 DKIOCGVTOC, STRINGIZE(DKIOCGVTOC), 2473 &vtoc, NULL, vtoc2vd_vtoc, B_FALSE}, 2474 {VD_OP_GET_EFI, STRINGIZE(VD_OP_GET_EFI), RNDSIZE(vd_efi_t), 2475 DKIOCGETEFI, STRINGIZE(DKIOCGETEFI), 2476 &dk_efi, vd_get_efi_in, vd_get_efi_out, B_FALSE}, 2477 2478 /* "Set" (copy-in) operations */ 2479 {VD_OP_SET_WCE, STRINGIZE(VD_OP_SET_WCE), RNDSIZE(int), 2480 DKIOCSETWCE, STRINGIZE(DKIOCSETWCE), 2481 NULL, VD_IDENTITY_IN, VD_IDENTITY_OUT, B_TRUE}, 2482 {VD_OP_SET_DISKGEOM, STRINGIZE(VD_OP_SET_DISKGEOM), 2483 RNDSIZE(vd_geom_t), 2484 DKIOCSGEOM, STRINGIZE(DKIOCSGEOM), 2485 &dk_geom, vd_geom2dk_geom, NULL, B_TRUE}, 2486 {VD_OP_SET_VTOC, STRINGIZE(VD_OP_SET_VTOC), RNDSIZE(vd_vtoc_t), 2487 DKIOCSVTOC, STRINGIZE(DKIOCSVTOC), 2488 &vtoc, vd_vtoc2vtoc, NULL, B_TRUE}, 2489 {VD_OP_SET_EFI, STRINGIZE(VD_OP_SET_EFI), RNDSIZE(vd_efi_t), 2490 DKIOCSETEFI, STRINGIZE(DKIOCSETEFI), 2491 &dk_efi, vd_set_efi_in, vd_set_efi_out, B_TRUE}, 2492 2493 {VD_OP_SCSICMD, STRINGIZE(VD_OP_SCSICMD), RNDSIZE(vd_scsi_t), 2494 USCSICMD, STRINGIZE(USCSICMD), 2495 &uscsi, vd_scsicmd_in, vd_scsicmd_out, B_FALSE}, 2496 }; 2497 size_t nioctls = (sizeof (ioctl))/(sizeof (ioctl[0])); 2498 2499 2500 ASSERT(vd != NULL); 2501 ASSERT(request != NULL); 2502 ASSERT(request->slice < vd->nslices); 2503 2504 /* 2505 * Determine ioctl corresponding to caller's "operation" and 2506 * validate caller's "nbytes" 2507 */ 2508 for (i = 0; i < nioctls; i++) { 2509 if (request->operation == ioctl[i].operation) { 2510 /* LDC memory operations require 8-byte multiples */ 2511 ASSERT(ioctl[i].nbytes % sizeof (uint64_t) == 0); 2512 2513 if (request->operation == VD_OP_GET_EFI || 2514 request->operation == VD_OP_SET_EFI || 2515 request->operation == VD_OP_SCSICMD) { 2516 if (request->nbytes >= ioctl[i].nbytes) 2517 break; 2518 PR0("%s: Expected at least nbytes = %lu, " 2519 "got %lu", ioctl[i].operation_name, 2520 ioctl[i].nbytes, request->nbytes); 2521 return (EINVAL); 2522 } 2523 2524 if (request->nbytes != ioctl[i].nbytes) { 2525 PR0("%s: Expected nbytes = %lu, got %lu", 2526 ioctl[i].operation_name, ioctl[i].nbytes, 2527 request->nbytes); 2528 return (EINVAL); 2529 } 2530 2531 break; 2532 } 2533 } 2534 ASSERT(i < nioctls); /* because "operation" already validated */ 2535 2536 if (!(vd->open_flags & FWRITE) && ioctl[i].write) { 2537 PR0("%s fails because backend is opened read-only", 2538 ioctl[i].operation_name); 2539 request->status = EROFS; 2540 return (0); 2541 } 2542 2543 if (request->nbytes) 2544 buf = kmem_zalloc(request->nbytes, KM_SLEEP); 2545 status = vd_do_ioctl(vd, request, buf, &ioctl[i]); 2546 if (request->nbytes) 2547 kmem_free(buf, request->nbytes); 2548 2549 return (status); 2550 } 2551 2552 static int 2553 vd_get_devid(vd_task_t *task) 2554 { 2555 vd_t *vd = task->vd; 2556 vd_dring_payload_t *request = task->request; 2557 vd_devid_t *vd_devid; 2558 impl_devid_t *devid; 2559 int status, bufid_len, devid_len, len, sz; 2560 int bufbytes; 2561 2562 PR1("Get Device ID, nbytes=%ld", request->nbytes); 2563 2564 if (vd->file) { 2565 if (vd->file_devid == NULL) { 2566 PR2("No Device ID"); 2567 request->status = ENOENT; 2568 return (0); 2569 } else { 2570 sz = ddi_devid_sizeof(vd->file_devid); 2571 devid = kmem_alloc(sz, KM_SLEEP); 2572 bcopy(vd->file_devid, devid, sz); 2573 } 2574 } else { 2575 if (ddi_lyr_get_devid(vd->dev[request->slice], 2576 (ddi_devid_t *)&devid) != DDI_SUCCESS) { 2577 PR2("No Device ID"); 2578 request->status = ENOENT; 2579 return (0); 2580 } 2581 } 2582 2583 bufid_len = request->nbytes - sizeof (vd_devid_t) + 1; 2584 devid_len = DEVID_GETLEN(devid); 2585 2586 /* 2587 * Save the buffer size here for use in deallocation. 2588 * The actual number of bytes copied is returned in 2589 * the 'nbytes' field of the request structure. 2590 */ 2591 bufbytes = request->nbytes; 2592 2593 vd_devid = kmem_zalloc(bufbytes, KM_SLEEP); 2594 vd_devid->length = devid_len; 2595 vd_devid->type = DEVID_GETTYPE(devid); 2596 2597 len = (devid_len > bufid_len)? bufid_len : devid_len; 2598 2599 bcopy(devid->did_id, vd_devid->id, len); 2600 2601 request->status = 0; 2602 2603 /* LDC memory operations require 8-byte multiples */ 2604 ASSERT(request->nbytes % sizeof (uint64_t) == 0); 2605 2606 if ((status = ldc_mem_copy(vd->ldc_handle, (caddr_t)vd_devid, 0, 2607 &request->nbytes, request->cookie, request->ncookies, 2608 LDC_COPY_OUT)) != 0) { 2609 PR0("ldc_mem_copy() returned errno %d copying to client", 2610 status); 2611 } 2612 PR1("post mem_copy: nbytes=%ld", request->nbytes); 2613 2614 kmem_free(vd_devid, bufbytes); 2615 ddi_devid_free((ddi_devid_t)devid); 2616 2617 return (status); 2618 } 2619 2620 static int 2621 vd_scsi_reset(vd_t *vd) 2622 { 2623 int rval, status; 2624 struct uscsi_cmd uscsi = { 0 }; 2625 2626 uscsi.uscsi_flags = vd_scsi_debug | USCSI_RESET; 2627 uscsi.uscsi_timeout = vd_scsi_rdwr_timeout; 2628 2629 status = ldi_ioctl(vd->ldi_handle[0], USCSICMD, (intptr_t)&uscsi, 2630 (vd->open_flags | FKIOCTL), kcred, &rval); 2631 2632 return (status); 2633 } 2634 2635 static int 2636 vd_reset(vd_task_t *task) 2637 { 2638 vd_t *vd = task->vd; 2639 vd_dring_payload_t *request = task->request; 2640 2641 ASSERT(request->operation == VD_OP_RESET); 2642 ASSERT(vd->scsi); 2643 2644 PR0("Performing VD_OP_RESET"); 2645 2646 if (request->nbytes != 0) { 2647 PR0("VD_OP_RESET: Expected nbytes = 0, got %lu", 2648 request->nbytes); 2649 return (EINVAL); 2650 } 2651 2652 request->status = vd_scsi_reset(vd); 2653 2654 return (0); 2655 } 2656 2657 static int 2658 vd_get_capacity(vd_task_t *task) 2659 { 2660 int rv; 2661 size_t nbytes; 2662 vd_t *vd = task->vd; 2663 vd_dring_payload_t *request = task->request; 2664 vd_capacity_t vd_cap = { 0 }; 2665 2666 ASSERT(request->operation == VD_OP_GET_CAPACITY); 2667 ASSERT(vd->scsi); 2668 2669 PR0("Performing VD_OP_GET_CAPACITY"); 2670 2671 nbytes = request->nbytes; 2672 2673 if (nbytes != RNDSIZE(vd_capacity_t)) { 2674 PR0("VD_OP_GET_CAPACITY: Expected nbytes = %lu, got %lu", 2675 RNDSIZE(vd_capacity_t), nbytes); 2676 return (EINVAL); 2677 } 2678 2679 if (vd->vdisk_size == VD_SIZE_UNKNOWN) { 2680 if (vd_setup_mediainfo(vd) != 0) 2681 ASSERT(vd->vdisk_size == VD_SIZE_UNKNOWN); 2682 } 2683 2684 ASSERT(vd->vdisk_size != 0); 2685 2686 request->status = 0; 2687 2688 vd_cap.vdisk_block_size = vd->vdisk_block_size; 2689 vd_cap.vdisk_size = vd->vdisk_size; 2690 2691 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&vd_cap, 0, &nbytes, 2692 request->cookie, request->ncookies, LDC_COPY_OUT)) != 0) { 2693 PR0("ldc_mem_copy() returned errno %d copying to client", rv); 2694 return (rv); 2695 } 2696 2697 return (0); 2698 } 2699 2700 static int 2701 vd_get_access(vd_task_t *task) 2702 { 2703 uint64_t access; 2704 int rv, rval = 0; 2705 size_t nbytes; 2706 vd_t *vd = task->vd; 2707 vd_dring_payload_t *request = task->request; 2708 2709 ASSERT(request->operation == VD_OP_GET_ACCESS); 2710 ASSERT(vd->scsi); 2711 2712 PR0("Performing VD_OP_GET_ACCESS"); 2713 2714 nbytes = request->nbytes; 2715 2716 if (nbytes != sizeof (uint64_t)) { 2717 PR0("VD_OP_GET_ACCESS: Expected nbytes = %lu, got %lu", 2718 sizeof (uint64_t), nbytes); 2719 return (EINVAL); 2720 } 2721 2722 request->status = ldi_ioctl(vd->ldi_handle[request->slice], MHIOCSTATUS, 2723 NULL, (vd->open_flags | FKIOCTL), kcred, &rval); 2724 2725 if (request->status != 0) 2726 return (0); 2727 2728 access = (rval == 0)? VD_ACCESS_ALLOWED : VD_ACCESS_DENIED; 2729 2730 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&access, 0, &nbytes, 2731 request->cookie, request->ncookies, LDC_COPY_OUT)) != 0) { 2732 PR0("ldc_mem_copy() returned errno %d copying to client", rv); 2733 return (rv); 2734 } 2735 2736 return (0); 2737 } 2738 2739 static int 2740 vd_set_access(vd_task_t *task) 2741 { 2742 uint64_t flags; 2743 int rv, rval; 2744 size_t nbytes; 2745 vd_t *vd = task->vd; 2746 vd_dring_payload_t *request = task->request; 2747 2748 ASSERT(request->operation == VD_OP_SET_ACCESS); 2749 ASSERT(vd->scsi); 2750 2751 nbytes = request->nbytes; 2752 2753 if (nbytes != sizeof (uint64_t)) { 2754 PR0("VD_OP_SET_ACCESS: Expected nbytes = %lu, got %lu", 2755 sizeof (uint64_t), nbytes); 2756 return (EINVAL); 2757 } 2758 2759 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&flags, 0, &nbytes, 2760 request->cookie, request->ncookies, LDC_COPY_IN)) != 0) { 2761 PR0("ldc_mem_copy() returned errno %d copying from client", rv); 2762 return (rv); 2763 } 2764 2765 if (flags == VD_ACCESS_SET_CLEAR) { 2766 PR0("Performing VD_OP_SET_ACCESS (CLEAR)"); 2767 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 2768 MHIOCRELEASE, NULL, (vd->open_flags | FKIOCTL), kcred, 2769 &rval); 2770 if (request->status == 0) 2771 vd->ownership = B_FALSE; 2772 return (0); 2773 } 2774 2775 /* 2776 * As per the VIO spec, the PREEMPT and PRESERVE flags are only valid 2777 * when the EXCLUSIVE flag is set. 2778 */ 2779 if (!(flags & VD_ACCESS_SET_EXCLUSIVE)) { 2780 PR0("Invalid VD_OP_SET_ACCESS flags: 0x%lx", flags); 2781 request->status = EINVAL; 2782 return (0); 2783 } 2784 2785 switch (flags & (VD_ACCESS_SET_PREEMPT | VD_ACCESS_SET_PRESERVE)) { 2786 2787 case VD_ACCESS_SET_PREEMPT | VD_ACCESS_SET_PRESERVE: 2788 /* 2789 * Flags EXCLUSIVE and PREEMPT and PRESERVE. We have to 2790 * acquire exclusive access rights, preserve them and we 2791 * can use preemption. So we can use the MHIOCTKNOWN ioctl. 2792 */ 2793 PR0("Performing VD_OP_SET_ACCESS (EXCLUSIVE|PREEMPT|PRESERVE)"); 2794 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 2795 MHIOCTKOWN, NULL, (vd->open_flags | FKIOCTL), kcred, &rval); 2796 break; 2797 2798 case VD_ACCESS_SET_PRESERVE: 2799 /* 2800 * Flags EXCLUSIVE and PRESERVE. We have to acquire exclusive 2801 * access rights and preserve them, but not preempt any other 2802 * host. So we need to use the MHIOCTKOWN ioctl to enable the 2803 * "preserve" feature but we can not called it directly 2804 * because it uses preemption. So before that, we use the 2805 * MHIOCQRESERVE ioctl to ensure we can get exclusive rights 2806 * without preempting anyone. 2807 */ 2808 PR0("Performing VD_OP_SET_ACCESS (EXCLUSIVE|PRESERVE)"); 2809 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 2810 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, 2811 &rval); 2812 if (request->status != 0) 2813 break; 2814 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 2815 MHIOCTKOWN, NULL, (vd->open_flags | FKIOCTL), kcred, &rval); 2816 break; 2817 2818 case VD_ACCESS_SET_PREEMPT: 2819 /* 2820 * Flags EXCLUSIVE and PREEMPT. We have to acquire exclusive 2821 * access rights and we can use preemption. So we try to do 2822 * a SCSI reservation, if it fails we reset the disk to clear 2823 * any reservation and we try to reserve again. 2824 */ 2825 PR0("Performing VD_OP_SET_ACCESS (EXCLUSIVE|PREEMPT)"); 2826 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 2827 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, 2828 &rval); 2829 if (request->status == 0) 2830 break; 2831 2832 /* reset the disk */ 2833 (void) vd_scsi_reset(vd); 2834 2835 /* try again even if the reset has failed */ 2836 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 2837 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, 2838 &rval); 2839 break; 2840 2841 case 0: 2842 /* Flag EXCLUSIVE only. Just issue a SCSI reservation */ 2843 PR0("Performing VD_OP_SET_ACCESS (EXCLUSIVE)"); 2844 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 2845 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, 2846 &rval); 2847 break; 2848 } 2849 2850 if (request->status == 0) 2851 vd->ownership = B_TRUE; 2852 else 2853 PR0("VD_OP_SET_ACCESS: error %d", request->status); 2854 2855 return (0); 2856 } 2857 2858 static void 2859 vd_reset_access(vd_t *vd) 2860 { 2861 int status, rval; 2862 2863 if (vd->file || !vd->ownership) 2864 return; 2865 2866 PR0("Releasing disk ownership"); 2867 status = ldi_ioctl(vd->ldi_handle[0], MHIOCRELEASE, NULL, 2868 (vd->open_flags | FKIOCTL), kcred, &rval); 2869 2870 /* 2871 * An EACCES failure means that there is a reservation conflict, 2872 * so we are not the owner of the disk anymore. 2873 */ 2874 if (status == 0 || status == EACCES) { 2875 vd->ownership = B_FALSE; 2876 return; 2877 } 2878 2879 PR0("Fail to release ownership, error %d", status); 2880 2881 /* 2882 * We have failed to release the ownership, try to reset the disk 2883 * to release reservations. 2884 */ 2885 PR0("Resetting disk"); 2886 status = vd_scsi_reset(vd); 2887 2888 if (status != 0) 2889 PR0("Fail to reset disk, error %d", status); 2890 2891 /* whatever the result of the reset is, we try the release again */ 2892 status = ldi_ioctl(vd->ldi_handle[0], MHIOCRELEASE, NULL, 2893 (vd->open_flags | FKIOCTL), kcred, &rval); 2894 2895 if (status == 0 || status == EACCES) { 2896 vd->ownership = B_FALSE; 2897 return; 2898 } 2899 2900 PR0("Fail to release ownership, error %d", status); 2901 2902 /* 2903 * At this point we have done our best to try to reset the 2904 * access rights to the disk and we don't know if we still 2905 * own a reservation and if any mechanism to preserve the 2906 * ownership is still in place. The ultimate solution would 2907 * be to reset the system but this is usually not what we 2908 * want to happen. 2909 */ 2910 2911 if (vd_reset_access_failure == A_REBOOT) { 2912 cmn_err(CE_WARN, VD_RESET_ACCESS_FAILURE_MSG 2913 ", rebooting the system", vd->device_path); 2914 (void) uadmin(A_SHUTDOWN, AD_BOOT, NULL); 2915 } else if (vd_reset_access_failure == A_DUMP) { 2916 panic(VD_RESET_ACCESS_FAILURE_MSG, vd->device_path); 2917 } 2918 2919 cmn_err(CE_WARN, VD_RESET_ACCESS_FAILURE_MSG, vd->device_path); 2920 } 2921 2922 /* 2923 * Define the supported operations once the functions for performing them have 2924 * been defined 2925 */ 2926 static const vds_operation_t vds_operation[] = { 2927 #define X(_s) #_s, _s 2928 {X(VD_OP_BREAD), vd_start_bio, vd_complete_bio}, 2929 {X(VD_OP_BWRITE), vd_start_bio, vd_complete_bio}, 2930 {X(VD_OP_FLUSH), vd_ioctl, NULL}, 2931 {X(VD_OP_GET_WCE), vd_ioctl, NULL}, 2932 {X(VD_OP_SET_WCE), vd_ioctl, NULL}, 2933 {X(VD_OP_GET_VTOC), vd_ioctl, NULL}, 2934 {X(VD_OP_SET_VTOC), vd_ioctl, NULL}, 2935 {X(VD_OP_GET_DISKGEOM), vd_ioctl, NULL}, 2936 {X(VD_OP_SET_DISKGEOM), vd_ioctl, NULL}, 2937 {X(VD_OP_GET_EFI), vd_ioctl, NULL}, 2938 {X(VD_OP_SET_EFI), vd_ioctl, NULL}, 2939 {X(VD_OP_GET_DEVID), vd_get_devid, NULL}, 2940 {X(VD_OP_SCSICMD), vd_ioctl, NULL}, 2941 {X(VD_OP_RESET), vd_reset, NULL}, 2942 {X(VD_OP_GET_CAPACITY), vd_get_capacity, NULL}, 2943 {X(VD_OP_SET_ACCESS), vd_set_access, NULL}, 2944 {X(VD_OP_GET_ACCESS), vd_get_access, NULL}, 2945 #undef X 2946 }; 2947 2948 static const size_t vds_noperations = 2949 (sizeof (vds_operation))/(sizeof (vds_operation[0])); 2950 2951 /* 2952 * Process a task specifying a client I/O request 2953 * 2954 * Parameters: 2955 * task - structure containing the request sent from client 2956 * 2957 * Return Value 2958 * 0 - success 2959 * ENOTSUP - Unknown/Unsupported VD_OP_XXX operation 2960 * EINVAL - Invalid disk slice 2961 * != 0 - some other non-zero return value from start function 2962 */ 2963 static int 2964 vd_do_process_task(vd_task_t *task) 2965 { 2966 int i; 2967 vd_t *vd = task->vd; 2968 vd_dring_payload_t *request = task->request; 2969 2970 ASSERT(vd != NULL); 2971 ASSERT(request != NULL); 2972 2973 /* Find the requested operation */ 2974 for (i = 0; i < vds_noperations; i++) { 2975 if (request->operation == vds_operation[i].operation) { 2976 /* all operations should have a start func */ 2977 ASSERT(vds_operation[i].start != NULL); 2978 2979 task->completef = vds_operation[i].complete; 2980 break; 2981 } 2982 } 2983 2984 /* 2985 * We need to check that the requested operation is permitted 2986 * for the particular client that sent it or that the loop above 2987 * did not complete without finding the operation type (indicating 2988 * that the requested operation is unknown/unimplemented) 2989 */ 2990 if ((VD_OP_SUPPORTED(vd->operations, request->operation) == B_FALSE) || 2991 (i == vds_noperations)) { 2992 PR0("Unsupported operation %u", request->operation); 2993 request->status = ENOTSUP; 2994 return (0); 2995 } 2996 2997 /* Range-check slice */ 2998 if (request->slice >= vd->nslices && 2999 (vd->vdisk_type != VD_DISK_TYPE_DISK || 3000 request->slice != VD_SLICE_NONE)) { 3001 PR0("Invalid \"slice\" %u (max %u) for virtual disk", 3002 request->slice, (vd->nslices - 1)); 3003 return (EINVAL); 3004 } 3005 3006 /* 3007 * Call the function pointer that starts the operation. 3008 */ 3009 return (vds_operation[i].start(task)); 3010 } 3011 3012 /* 3013 * Description: 3014 * This function is called by both the in-band and descriptor ring 3015 * message processing functions paths to actually execute the task 3016 * requested by the vDisk client. It in turn calls its worker 3017 * function, vd_do_process_task(), to carry our the request. 3018 * 3019 * Any transport errors (e.g. LDC errors, vDisk protocol errors) are 3020 * saved in the 'status' field of the task and are propagated back 3021 * up the call stack to trigger a NACK 3022 * 3023 * Any request errors (e.g. ENOTTY from an ioctl) are saved in 3024 * the 'status' field of the request and result in an ACK being sent 3025 * by the completion handler. 3026 * 3027 * Parameters: 3028 * task - structure containing the request sent from client 3029 * 3030 * Return Value 3031 * 0 - successful synchronous request. 3032 * != 0 - transport error (e.g. LDC errors, vDisk protocol) 3033 * EINPROGRESS - task will be finished in a completion handler 3034 */ 3035 static int 3036 vd_process_task(vd_task_t *task) 3037 { 3038 vd_t *vd = task->vd; 3039 int status; 3040 3041 DTRACE_PROBE1(task__start, vd_task_t *, task); 3042 3043 task->status = vd_do_process_task(task); 3044 3045 /* 3046 * If the task processing function returned EINPROGRESS indicating 3047 * that the task needs completing then schedule a taskq entry to 3048 * finish it now. 3049 * 3050 * Otherwise the task processing function returned either zero 3051 * indicating that the task was finished in the start function (and we 3052 * don't need to wait in a completion function) or the start function 3053 * returned an error - in both cases all that needs to happen is the 3054 * notification to the vDisk client higher up the call stack. 3055 * If the task was using a Descriptor Ring, we need to mark it as done 3056 * at this stage. 3057 */ 3058 if (task->status == EINPROGRESS) { 3059 /* Queue a task to complete the operation */ 3060 (void) ddi_taskq_dispatch(vd->completionq, vd_complete, 3061 task, DDI_SLEEP); 3062 3063 } else if (!vd->reset_state && (vd->xfer_mode == VIO_DRING_MODE)) { 3064 /* Update the dring element if it's a dring client */ 3065 status = vd_mark_elem_done(vd, task->index, 3066 task->request->status, task->request->nbytes); 3067 if (status == ECONNRESET) 3068 vd_mark_in_reset(vd); 3069 } 3070 3071 return (task->status); 3072 } 3073 3074 /* 3075 * Return true if the "type", "subtype", and "env" fields of the "tag" first 3076 * argument match the corresponding remaining arguments; otherwise, return false 3077 */ 3078 boolean_t 3079 vd_msgtype(vio_msg_tag_t *tag, int type, int subtype, int env) 3080 { 3081 return ((tag->vio_msgtype == type) && 3082 (tag->vio_subtype == subtype) && 3083 (tag->vio_subtype_env == env)) ? B_TRUE : B_FALSE; 3084 } 3085 3086 /* 3087 * Check whether the major/minor version specified in "ver_msg" is supported 3088 * by this server. 3089 */ 3090 static boolean_t 3091 vds_supported_version(vio_ver_msg_t *ver_msg) 3092 { 3093 for (int i = 0; i < vds_num_versions; i++) { 3094 ASSERT(vds_version[i].major > 0); 3095 ASSERT((i == 0) || 3096 (vds_version[i].major < vds_version[i-1].major)); 3097 3098 /* 3099 * If the major versions match, adjust the minor version, if 3100 * necessary, down to the highest value supported by this 3101 * server and return true so this message will get "ack"ed; 3102 * the client should also support all minor versions lower 3103 * than the value it sent 3104 */ 3105 if (ver_msg->ver_major == vds_version[i].major) { 3106 if (ver_msg->ver_minor > vds_version[i].minor) { 3107 PR0("Adjusting minor version from %u to %u", 3108 ver_msg->ver_minor, vds_version[i].minor); 3109 ver_msg->ver_minor = vds_version[i].minor; 3110 } 3111 return (B_TRUE); 3112 } 3113 3114 /* 3115 * If the message contains a higher major version number, set 3116 * the message's major/minor versions to the current values 3117 * and return false, so this message will get "nack"ed with 3118 * these values, and the client will potentially try again 3119 * with the same or a lower version 3120 */ 3121 if (ver_msg->ver_major > vds_version[i].major) { 3122 ver_msg->ver_major = vds_version[i].major; 3123 ver_msg->ver_minor = vds_version[i].minor; 3124 return (B_FALSE); 3125 } 3126 3127 /* 3128 * Otherwise, the message's major version is less than the 3129 * current major version, so continue the loop to the next 3130 * (lower) supported version 3131 */ 3132 } 3133 3134 /* 3135 * No common version was found; "ground" the version pair in the 3136 * message to terminate negotiation 3137 */ 3138 ver_msg->ver_major = 0; 3139 ver_msg->ver_minor = 0; 3140 return (B_FALSE); 3141 } 3142 3143 /* 3144 * Process a version message from a client. vds expects to receive version 3145 * messages from clients seeking service, but never issues version messages 3146 * itself; therefore, vds can ACK or NACK client version messages, but does 3147 * not expect to receive version-message ACKs or NACKs (and will treat such 3148 * messages as invalid). 3149 */ 3150 static int 3151 vd_process_ver_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 3152 { 3153 vio_ver_msg_t *ver_msg = (vio_ver_msg_t *)msg; 3154 3155 3156 ASSERT(msglen >= sizeof (msg->tag)); 3157 3158 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 3159 VIO_VER_INFO)) { 3160 return (ENOMSG); /* not a version message */ 3161 } 3162 3163 if (msglen != sizeof (*ver_msg)) { 3164 PR0("Expected %lu-byte version message; " 3165 "received %lu bytes", sizeof (*ver_msg), msglen); 3166 return (EBADMSG); 3167 } 3168 3169 if (ver_msg->dev_class != VDEV_DISK) { 3170 PR0("Expected device class %u (disk); received %u", 3171 VDEV_DISK, ver_msg->dev_class); 3172 return (EBADMSG); 3173 } 3174 3175 /* 3176 * We're talking to the expected kind of client; set our device class 3177 * for "ack/nack" back to the client 3178 */ 3179 ver_msg->dev_class = VDEV_DISK_SERVER; 3180 3181 /* 3182 * Check whether the (valid) version message specifies a version 3183 * supported by this server. If the version is not supported, return 3184 * EBADMSG so the message will get "nack"ed; vds_supported_version() 3185 * will have updated the message with a supported version for the 3186 * client to consider 3187 */ 3188 if (!vds_supported_version(ver_msg)) 3189 return (EBADMSG); 3190 3191 3192 /* 3193 * A version has been agreed upon; use the client's SID for 3194 * communication on this channel now 3195 */ 3196 ASSERT(!(vd->initialized & VD_SID)); 3197 vd->sid = ver_msg->tag.vio_sid; 3198 vd->initialized |= VD_SID; 3199 3200 /* 3201 * Store the negotiated major and minor version values in the "vd" data 3202 * structure so that we can check if certain operations are supported 3203 * by the client. 3204 */ 3205 vd->version.major = ver_msg->ver_major; 3206 vd->version.minor = ver_msg->ver_minor; 3207 3208 PR0("Using major version %u, minor version %u", 3209 ver_msg->ver_major, ver_msg->ver_minor); 3210 return (0); 3211 } 3212 3213 static void 3214 vd_set_exported_operations(vd_t *vd) 3215 { 3216 vd->operations = 0; /* clear field */ 3217 3218 /* 3219 * We need to check from the highest version supported to the 3220 * lowest because versions with a higher minor number implicitly 3221 * support versions with a lower minor number. 3222 */ 3223 if (vio_ver_is_supported(vd->version, 1, 1)) { 3224 ASSERT(vd->open_flags & FREAD); 3225 vd->operations |= VD_OP_MASK_READ; 3226 3227 if (vd->open_flags & FWRITE) 3228 vd->operations |= VD_OP_MASK_WRITE; 3229 3230 if (vd->scsi) 3231 vd->operations |= VD_OP_MASK_SCSI; 3232 3233 if (vd->file && vd_file_is_iso_image(vd)) { 3234 /* 3235 * can't write to ISO images, make sure that write 3236 * support is not set in case administrator did not 3237 * use "options=ro" when doing an ldm add-vdsdev 3238 */ 3239 vd->operations &= ~VD_OP_MASK_WRITE; 3240 } 3241 } else if (vio_ver_is_supported(vd->version, 1, 0)) { 3242 vd->operations = VD_OP_MASK_READ | VD_OP_MASK_WRITE; 3243 } 3244 3245 /* we should have already agreed on a version */ 3246 ASSERT(vd->operations != 0); 3247 } 3248 3249 static int 3250 vd_process_attr_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 3251 { 3252 vd_attr_msg_t *attr_msg = (vd_attr_msg_t *)msg; 3253 int status, retry = 0; 3254 3255 3256 ASSERT(msglen >= sizeof (msg->tag)); 3257 3258 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 3259 VIO_ATTR_INFO)) { 3260 PR0("Message is not an attribute message"); 3261 return (ENOMSG); 3262 } 3263 3264 if (msglen != sizeof (*attr_msg)) { 3265 PR0("Expected %lu-byte attribute message; " 3266 "received %lu bytes", sizeof (*attr_msg), msglen); 3267 return (EBADMSG); 3268 } 3269 3270 if (attr_msg->max_xfer_sz == 0) { 3271 PR0("Received maximum transfer size of 0 from client"); 3272 return (EBADMSG); 3273 } 3274 3275 if ((attr_msg->xfer_mode != VIO_DESC_MODE) && 3276 (attr_msg->xfer_mode != VIO_DRING_MODE)) { 3277 PR0("Client requested unsupported transfer mode"); 3278 return (EBADMSG); 3279 } 3280 3281 /* 3282 * check if the underlying disk is ready, if not try accessing 3283 * the device again. Open the vdisk device and extract info 3284 * about it, as this is needed to respond to the attr info msg 3285 */ 3286 if ((vd->initialized & VD_DISK_READY) == 0) { 3287 PR0("Retry setting up disk (%s)", vd->device_path); 3288 do { 3289 status = vd_setup_vd(vd); 3290 if (status != EAGAIN || ++retry > vds_dev_retries) 3291 break; 3292 3293 /* incremental delay */ 3294 delay(drv_usectohz(vds_dev_delay)); 3295 3296 /* if vdisk is no longer enabled - return error */ 3297 if (!vd_enabled(vd)) 3298 return (ENXIO); 3299 3300 } while (status == EAGAIN); 3301 3302 if (status) 3303 return (ENXIO); 3304 3305 vd->initialized |= VD_DISK_READY; 3306 ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR); 3307 PR0("vdisk_type = %s, pseudo = %s, file = %s, nslices = %u", 3308 ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"), 3309 (vd->pseudo ? "yes" : "no"), 3310 (vd->file ? "yes" : "no"), 3311 vd->nslices); 3312 } 3313 3314 /* Success: valid message and transfer mode */ 3315 vd->xfer_mode = attr_msg->xfer_mode; 3316 3317 if (vd->xfer_mode == VIO_DESC_MODE) { 3318 3319 /* 3320 * The vd_dring_inband_msg_t contains one cookie; need room 3321 * for up to n-1 more cookies, where "n" is the number of full 3322 * pages plus possibly one partial page required to cover 3323 * "max_xfer_sz". Add room for one more cookie if 3324 * "max_xfer_sz" isn't an integral multiple of the page size. 3325 * Must first get the maximum transfer size in bytes. 3326 */ 3327 size_t max_xfer_bytes = attr_msg->vdisk_block_size ? 3328 attr_msg->vdisk_block_size*attr_msg->max_xfer_sz : 3329 attr_msg->max_xfer_sz; 3330 size_t max_inband_msglen = 3331 sizeof (vd_dring_inband_msg_t) + 3332 ((max_xfer_bytes/PAGESIZE + 3333 ((max_xfer_bytes % PAGESIZE) ? 1 : 0))* 3334 (sizeof (ldc_mem_cookie_t))); 3335 3336 /* 3337 * Set the maximum expected message length to 3338 * accommodate in-band-descriptor messages with all 3339 * their cookies 3340 */ 3341 vd->max_msglen = MAX(vd->max_msglen, max_inband_msglen); 3342 3343 /* 3344 * Initialize the data structure for processing in-band I/O 3345 * request descriptors 3346 */ 3347 vd->inband_task.vd = vd; 3348 vd->inband_task.msg = kmem_alloc(vd->max_msglen, KM_SLEEP); 3349 vd->inband_task.index = 0; 3350 vd->inband_task.type = VD_FINAL_RANGE_TASK; /* range == 1 */ 3351 } 3352 3353 /* Return the device's block size and max transfer size to the client */ 3354 attr_msg->vdisk_block_size = vd->vdisk_block_size; 3355 attr_msg->max_xfer_sz = vd->max_xfer_sz; 3356 3357 attr_msg->vdisk_size = vd->vdisk_size; 3358 attr_msg->vdisk_type = vd->vdisk_type; 3359 attr_msg->vdisk_media = vd->vdisk_media; 3360 3361 /* Discover and save the list of supported VD_OP_XXX operations */ 3362 vd_set_exported_operations(vd); 3363 attr_msg->operations = vd->operations; 3364 3365 PR0("%s", VD_CLIENT(vd)); 3366 3367 ASSERT(vd->dring_task == NULL); 3368 3369 return (0); 3370 } 3371 3372 static int 3373 vd_process_dring_reg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 3374 { 3375 int status; 3376 size_t expected; 3377 ldc_mem_info_t dring_minfo; 3378 vio_dring_reg_msg_t *reg_msg = (vio_dring_reg_msg_t *)msg; 3379 3380 3381 ASSERT(msglen >= sizeof (msg->tag)); 3382 3383 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 3384 VIO_DRING_REG)) { 3385 PR0("Message is not a register-dring message"); 3386 return (ENOMSG); 3387 } 3388 3389 if (msglen < sizeof (*reg_msg)) { 3390 PR0("Expected at least %lu-byte register-dring message; " 3391 "received %lu bytes", sizeof (*reg_msg), msglen); 3392 return (EBADMSG); 3393 } 3394 3395 expected = sizeof (*reg_msg) + 3396 (reg_msg->ncookies - 1)*(sizeof (reg_msg->cookie[0])); 3397 if (msglen != expected) { 3398 PR0("Expected %lu-byte register-dring message; " 3399 "received %lu bytes", expected, msglen); 3400 return (EBADMSG); 3401 } 3402 3403 if (vd->initialized & VD_DRING) { 3404 PR0("A dring was previously registered; only support one"); 3405 return (EBADMSG); 3406 } 3407 3408 if (reg_msg->num_descriptors > INT32_MAX) { 3409 PR0("reg_msg->num_descriptors = %u; must be <= %u (%s)", 3410 reg_msg->ncookies, INT32_MAX, STRINGIZE(INT32_MAX)); 3411 return (EBADMSG); 3412 } 3413 3414 if (reg_msg->ncookies != 1) { 3415 /* 3416 * In addition to fixing the assertion in the success case 3417 * below, supporting drings which require more than one 3418 * "cookie" requires increasing the value of vd->max_msglen 3419 * somewhere in the code path prior to receiving the message 3420 * which results in calling this function. Note that without 3421 * making this change, the larger message size required to 3422 * accommodate multiple cookies cannot be successfully 3423 * received, so this function will not even get called. 3424 * Gracefully accommodating more dring cookies might 3425 * reasonably demand exchanging an additional attribute or 3426 * making a minor protocol adjustment 3427 */ 3428 PR0("reg_msg->ncookies = %u != 1", reg_msg->ncookies); 3429 return (EBADMSG); 3430 } 3431 3432 status = ldc_mem_dring_map(vd->ldc_handle, reg_msg->cookie, 3433 reg_msg->ncookies, reg_msg->num_descriptors, 3434 reg_msg->descriptor_size, LDC_DIRECT_MAP, &vd->dring_handle); 3435 if (status != 0) { 3436 PR0("ldc_mem_dring_map() returned errno %d", status); 3437 return (status); 3438 } 3439 3440 /* 3441 * To remove the need for this assertion, must call 3442 * ldc_mem_dring_nextcookie() successfully ncookies-1 times after a 3443 * successful call to ldc_mem_dring_map() 3444 */ 3445 ASSERT(reg_msg->ncookies == 1); 3446 3447 if ((status = 3448 ldc_mem_dring_info(vd->dring_handle, &dring_minfo)) != 0) { 3449 PR0("ldc_mem_dring_info() returned errno %d", status); 3450 if ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0) 3451 PR0("ldc_mem_dring_unmap() returned errno %d", status); 3452 return (status); 3453 } 3454 3455 if (dring_minfo.vaddr == NULL) { 3456 PR0("Descriptor ring virtual address is NULL"); 3457 return (ENXIO); 3458 } 3459 3460 3461 /* Initialize for valid message and mapped dring */ 3462 PR1("descriptor size = %u, dring length = %u", 3463 vd->descriptor_size, vd->dring_len); 3464 vd->initialized |= VD_DRING; 3465 vd->dring_ident = 1; /* "There Can Be Only One" */ 3466 vd->dring = dring_minfo.vaddr; 3467 vd->descriptor_size = reg_msg->descriptor_size; 3468 vd->dring_len = reg_msg->num_descriptors; 3469 reg_msg->dring_ident = vd->dring_ident; 3470 3471 /* 3472 * Allocate and initialize a "shadow" array of data structures for 3473 * tasks to process I/O requests in dring elements 3474 */ 3475 vd->dring_task = 3476 kmem_zalloc((sizeof (*vd->dring_task)) * vd->dring_len, KM_SLEEP); 3477 for (int i = 0; i < vd->dring_len; i++) { 3478 vd->dring_task[i].vd = vd; 3479 vd->dring_task[i].index = i; 3480 vd->dring_task[i].request = &VD_DRING_ELEM(i)->payload; 3481 3482 status = ldc_mem_alloc_handle(vd->ldc_handle, 3483 &(vd->dring_task[i].mhdl)); 3484 if (status) { 3485 PR0("ldc_mem_alloc_handle() returned err %d ", status); 3486 return (ENXIO); 3487 } 3488 3489 vd->dring_task[i].msg = kmem_alloc(vd->max_msglen, KM_SLEEP); 3490 } 3491 3492 return (0); 3493 } 3494 3495 static int 3496 vd_process_dring_unreg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 3497 { 3498 vio_dring_unreg_msg_t *unreg_msg = (vio_dring_unreg_msg_t *)msg; 3499 3500 3501 ASSERT(msglen >= sizeof (msg->tag)); 3502 3503 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 3504 VIO_DRING_UNREG)) { 3505 PR0("Message is not an unregister-dring message"); 3506 return (ENOMSG); 3507 } 3508 3509 if (msglen != sizeof (*unreg_msg)) { 3510 PR0("Expected %lu-byte unregister-dring message; " 3511 "received %lu bytes", sizeof (*unreg_msg), msglen); 3512 return (EBADMSG); 3513 } 3514 3515 if (unreg_msg->dring_ident != vd->dring_ident) { 3516 PR0("Expected dring ident %lu; received %lu", 3517 vd->dring_ident, unreg_msg->dring_ident); 3518 return (EBADMSG); 3519 } 3520 3521 return (0); 3522 } 3523 3524 static int 3525 process_rdx_msg(vio_msg_t *msg, size_t msglen) 3526 { 3527 ASSERT(msglen >= sizeof (msg->tag)); 3528 3529 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX)) { 3530 PR0("Message is not an RDX message"); 3531 return (ENOMSG); 3532 } 3533 3534 if (msglen != sizeof (vio_rdx_msg_t)) { 3535 PR0("Expected %lu-byte RDX message; received %lu bytes", 3536 sizeof (vio_rdx_msg_t), msglen); 3537 return (EBADMSG); 3538 } 3539 3540 PR0("Valid RDX message"); 3541 return (0); 3542 } 3543 3544 static int 3545 vd_check_seq_num(vd_t *vd, uint64_t seq_num) 3546 { 3547 if ((vd->initialized & VD_SEQ_NUM) && (seq_num != vd->seq_num + 1)) { 3548 PR0("Received seq_num %lu; expected %lu", 3549 seq_num, (vd->seq_num + 1)); 3550 PR0("initiating soft reset"); 3551 vd_need_reset(vd, B_FALSE); 3552 return (1); 3553 } 3554 3555 vd->seq_num = seq_num; 3556 vd->initialized |= VD_SEQ_NUM; /* superfluous after first time... */ 3557 return (0); 3558 } 3559 3560 /* 3561 * Return the expected size of an inband-descriptor message with all the 3562 * cookies it claims to include 3563 */ 3564 static size_t 3565 expected_inband_size(vd_dring_inband_msg_t *msg) 3566 { 3567 return ((sizeof (*msg)) + 3568 (msg->payload.ncookies - 1)*(sizeof (msg->payload.cookie[0]))); 3569 } 3570 3571 /* 3572 * Process an in-band descriptor message: used with clients like OBP, with 3573 * which vds exchanges descriptors within VIO message payloads, rather than 3574 * operating on them within a descriptor ring 3575 */ 3576 static int 3577 vd_process_desc_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 3578 { 3579 size_t expected; 3580 vd_dring_inband_msg_t *desc_msg = (vd_dring_inband_msg_t *)msg; 3581 3582 3583 ASSERT(msglen >= sizeof (msg->tag)); 3584 3585 if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO, 3586 VIO_DESC_DATA)) { 3587 PR1("Message is not an in-band-descriptor message"); 3588 return (ENOMSG); 3589 } 3590 3591 if (msglen < sizeof (*desc_msg)) { 3592 PR0("Expected at least %lu-byte descriptor message; " 3593 "received %lu bytes", sizeof (*desc_msg), msglen); 3594 return (EBADMSG); 3595 } 3596 3597 if (msglen != (expected = expected_inband_size(desc_msg))) { 3598 PR0("Expected %lu-byte descriptor message; " 3599 "received %lu bytes", expected, msglen); 3600 return (EBADMSG); 3601 } 3602 3603 if (vd_check_seq_num(vd, desc_msg->hdr.seq_num) != 0) 3604 return (EBADMSG); 3605 3606 /* 3607 * Valid message: Set up the in-band descriptor task and process the 3608 * request. Arrange to acknowledge the client's message, unless an 3609 * error processing the descriptor task results in setting 3610 * VIO_SUBTYPE_NACK 3611 */ 3612 PR1("Valid in-band-descriptor message"); 3613 msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 3614 3615 ASSERT(vd->inband_task.msg != NULL); 3616 3617 bcopy(msg, vd->inband_task.msg, msglen); 3618 vd->inband_task.msglen = msglen; 3619 3620 /* 3621 * The task request is now the payload of the message 3622 * that was just copied into the body of the task. 3623 */ 3624 desc_msg = (vd_dring_inband_msg_t *)vd->inband_task.msg; 3625 vd->inband_task.request = &desc_msg->payload; 3626 3627 return (vd_process_task(&vd->inband_task)); 3628 } 3629 3630 static int 3631 vd_process_element(vd_t *vd, vd_task_type_t type, uint32_t idx, 3632 vio_msg_t *msg, size_t msglen) 3633 { 3634 int status; 3635 boolean_t ready; 3636 vd_dring_entry_t *elem = VD_DRING_ELEM(idx); 3637 3638 3639 /* Accept the updated dring element */ 3640 if ((status = ldc_mem_dring_acquire(vd->dring_handle, idx, idx)) != 0) { 3641 PR0("ldc_mem_dring_acquire() returned errno %d", status); 3642 return (status); 3643 } 3644 ready = (elem->hdr.dstate == VIO_DESC_READY); 3645 if (ready) { 3646 elem->hdr.dstate = VIO_DESC_ACCEPTED; 3647 } else { 3648 PR0("descriptor %u not ready", idx); 3649 VD_DUMP_DRING_ELEM(elem); 3650 } 3651 if ((status = ldc_mem_dring_release(vd->dring_handle, idx, idx)) != 0) { 3652 PR0("ldc_mem_dring_release() returned errno %d", status); 3653 return (status); 3654 } 3655 if (!ready) 3656 return (EBUSY); 3657 3658 3659 /* Initialize a task and process the accepted element */ 3660 PR1("Processing dring element %u", idx); 3661 vd->dring_task[idx].type = type; 3662 3663 /* duplicate msg buf for cookies etc. */ 3664 bcopy(msg, vd->dring_task[idx].msg, msglen); 3665 3666 vd->dring_task[idx].msglen = msglen; 3667 return (vd_process_task(&vd->dring_task[idx])); 3668 } 3669 3670 static int 3671 vd_process_element_range(vd_t *vd, int start, int end, 3672 vio_msg_t *msg, size_t msglen) 3673 { 3674 int i, n, nelem, status = 0; 3675 boolean_t inprogress = B_FALSE; 3676 vd_task_type_t type; 3677 3678 3679 ASSERT(start >= 0); 3680 ASSERT(end >= 0); 3681 3682 /* 3683 * Arrange to acknowledge the client's message, unless an error 3684 * processing one of the dring elements results in setting 3685 * VIO_SUBTYPE_NACK 3686 */ 3687 msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 3688 3689 /* 3690 * Process the dring elements in the range 3691 */ 3692 nelem = ((end < start) ? end + vd->dring_len : end) - start + 1; 3693 for (i = start, n = nelem; n > 0; i = (i + 1) % vd->dring_len, n--) { 3694 ((vio_dring_msg_t *)msg)->end_idx = i; 3695 type = (n == 1) ? VD_FINAL_RANGE_TASK : VD_NONFINAL_RANGE_TASK; 3696 status = vd_process_element(vd, type, i, msg, msglen); 3697 if (status == EINPROGRESS) 3698 inprogress = B_TRUE; 3699 else if (status != 0) 3700 break; 3701 } 3702 3703 /* 3704 * If some, but not all, operations of a multi-element range are in 3705 * progress, wait for other operations to complete before returning 3706 * (which will result in "ack" or "nack" of the message). Note that 3707 * all outstanding operations will need to complete, not just the ones 3708 * corresponding to the current range of dring elements; howevever, as 3709 * this situation is an error case, performance is less critical. 3710 */ 3711 if ((nelem > 1) && (status != EINPROGRESS) && inprogress) 3712 ddi_taskq_wait(vd->completionq); 3713 3714 return (status); 3715 } 3716 3717 static int 3718 vd_process_dring_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 3719 { 3720 vio_dring_msg_t *dring_msg = (vio_dring_msg_t *)msg; 3721 3722 3723 ASSERT(msglen >= sizeof (msg->tag)); 3724 3725 if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO, 3726 VIO_DRING_DATA)) { 3727 PR1("Message is not a dring-data message"); 3728 return (ENOMSG); 3729 } 3730 3731 if (msglen != sizeof (*dring_msg)) { 3732 PR0("Expected %lu-byte dring message; received %lu bytes", 3733 sizeof (*dring_msg), msglen); 3734 return (EBADMSG); 3735 } 3736 3737 if (vd_check_seq_num(vd, dring_msg->seq_num) != 0) 3738 return (EBADMSG); 3739 3740 if (dring_msg->dring_ident != vd->dring_ident) { 3741 PR0("Expected dring ident %lu; received ident %lu", 3742 vd->dring_ident, dring_msg->dring_ident); 3743 return (EBADMSG); 3744 } 3745 3746 if (dring_msg->start_idx >= vd->dring_len) { 3747 PR0("\"start_idx\" = %u; must be less than %u", 3748 dring_msg->start_idx, vd->dring_len); 3749 return (EBADMSG); 3750 } 3751 3752 if ((dring_msg->end_idx < 0) || 3753 (dring_msg->end_idx >= vd->dring_len)) { 3754 PR0("\"end_idx\" = %u; must be >= 0 and less than %u", 3755 dring_msg->end_idx, vd->dring_len); 3756 return (EBADMSG); 3757 } 3758 3759 /* Valid message; process range of updated dring elements */ 3760 PR1("Processing descriptor range, start = %u, end = %u", 3761 dring_msg->start_idx, dring_msg->end_idx); 3762 return (vd_process_element_range(vd, dring_msg->start_idx, 3763 dring_msg->end_idx, msg, msglen)); 3764 } 3765 3766 static int 3767 recv_msg(ldc_handle_t ldc_handle, void *msg, size_t *nbytes) 3768 { 3769 int retry, status; 3770 size_t size = *nbytes; 3771 3772 3773 for (retry = 0, status = ETIMEDOUT; 3774 retry < vds_ldc_retries && status == ETIMEDOUT; 3775 retry++) { 3776 PR1("ldc_read() attempt %d", (retry + 1)); 3777 *nbytes = size; 3778 status = ldc_read(ldc_handle, msg, nbytes); 3779 } 3780 3781 if (status) { 3782 PR0("ldc_read() returned errno %d", status); 3783 if (status != ECONNRESET) 3784 return (ENOMSG); 3785 return (status); 3786 } else if (*nbytes == 0) { 3787 PR1("ldc_read() returned 0 and no message read"); 3788 return (ENOMSG); 3789 } 3790 3791 PR1("RCVD %lu-byte message", *nbytes); 3792 return (0); 3793 } 3794 3795 static int 3796 vd_do_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 3797 { 3798 int status; 3799 3800 3801 PR1("Processing (%x/%x/%x) message", msg->tag.vio_msgtype, 3802 msg->tag.vio_subtype, msg->tag.vio_subtype_env); 3803 #ifdef DEBUG 3804 vd_decode_tag(msg); 3805 #endif 3806 3807 /* 3808 * Validate session ID up front, since it applies to all messages 3809 * once set 3810 */ 3811 if ((msg->tag.vio_sid != vd->sid) && (vd->initialized & VD_SID)) { 3812 PR0("Expected SID %u, received %u", vd->sid, 3813 msg->tag.vio_sid); 3814 return (EBADMSG); 3815 } 3816 3817 PR1("\tWhile in state %d (%s)", vd->state, vd_decode_state(vd->state)); 3818 3819 /* 3820 * Process the received message based on connection state 3821 */ 3822 switch (vd->state) { 3823 case VD_STATE_INIT: /* expect version message */ 3824 if ((status = vd_process_ver_msg(vd, msg, msglen)) != 0) 3825 return (status); 3826 3827 /* Version negotiated, move to that state */ 3828 vd->state = VD_STATE_VER; 3829 return (0); 3830 3831 case VD_STATE_VER: /* expect attribute message */ 3832 if ((status = vd_process_attr_msg(vd, msg, msglen)) != 0) 3833 return (status); 3834 3835 /* Attributes exchanged, move to that state */ 3836 vd->state = VD_STATE_ATTR; 3837 return (0); 3838 3839 case VD_STATE_ATTR: 3840 switch (vd->xfer_mode) { 3841 case VIO_DESC_MODE: /* expect RDX message */ 3842 if ((status = process_rdx_msg(msg, msglen)) != 0) 3843 return (status); 3844 3845 /* Ready to receive in-band descriptors */ 3846 vd->state = VD_STATE_DATA; 3847 return (0); 3848 3849 case VIO_DRING_MODE: /* expect register-dring message */ 3850 if ((status = 3851 vd_process_dring_reg_msg(vd, msg, msglen)) != 0) 3852 return (status); 3853 3854 /* One dring negotiated, move to that state */ 3855 vd->state = VD_STATE_DRING; 3856 return (0); 3857 3858 default: 3859 ASSERT("Unsupported transfer mode"); 3860 PR0("Unsupported transfer mode"); 3861 return (ENOTSUP); 3862 } 3863 3864 case VD_STATE_DRING: /* expect RDX, register-dring, or unreg-dring */ 3865 if ((status = process_rdx_msg(msg, msglen)) == 0) { 3866 /* Ready to receive data */ 3867 vd->state = VD_STATE_DATA; 3868 return (0); 3869 } else if (status != ENOMSG) { 3870 return (status); 3871 } 3872 3873 3874 /* 3875 * If another register-dring message is received, stay in 3876 * dring state in case the client sends RDX; although the 3877 * protocol allows multiple drings, this server does not 3878 * support using more than one 3879 */ 3880 if ((status = 3881 vd_process_dring_reg_msg(vd, msg, msglen)) != ENOMSG) 3882 return (status); 3883 3884 /* 3885 * Acknowledge an unregister-dring message, but reset the 3886 * connection anyway: Although the protocol allows 3887 * unregistering drings, this server cannot serve a vdisk 3888 * without its only dring 3889 */ 3890 status = vd_process_dring_unreg_msg(vd, msg, msglen); 3891 return ((status == 0) ? ENOTSUP : status); 3892 3893 case VD_STATE_DATA: 3894 switch (vd->xfer_mode) { 3895 case VIO_DESC_MODE: /* expect in-band-descriptor message */ 3896 return (vd_process_desc_msg(vd, msg, msglen)); 3897 3898 case VIO_DRING_MODE: /* expect dring-data or unreg-dring */ 3899 /* 3900 * Typically expect dring-data messages, so handle 3901 * them first 3902 */ 3903 if ((status = vd_process_dring_msg(vd, msg, 3904 msglen)) != ENOMSG) 3905 return (status); 3906 3907 /* 3908 * Acknowledge an unregister-dring message, but reset 3909 * the connection anyway: Although the protocol 3910 * allows unregistering drings, this server cannot 3911 * serve a vdisk without its only dring 3912 */ 3913 status = vd_process_dring_unreg_msg(vd, msg, msglen); 3914 return ((status == 0) ? ENOTSUP : status); 3915 3916 default: 3917 ASSERT("Unsupported transfer mode"); 3918 PR0("Unsupported transfer mode"); 3919 return (ENOTSUP); 3920 } 3921 3922 default: 3923 ASSERT("Invalid client connection state"); 3924 PR0("Invalid client connection state"); 3925 return (ENOTSUP); 3926 } 3927 } 3928 3929 static int 3930 vd_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 3931 { 3932 int status; 3933 boolean_t reset_ldc = B_FALSE; 3934 vd_task_t task; 3935 3936 /* 3937 * Check that the message is at least big enough for a "tag", so that 3938 * message processing can proceed based on tag-specified message type 3939 */ 3940 if (msglen < sizeof (vio_msg_tag_t)) { 3941 PR0("Received short (%lu-byte) message", msglen); 3942 /* Can't "nack" short message, so drop the big hammer */ 3943 PR0("initiating full reset"); 3944 vd_need_reset(vd, B_TRUE); 3945 return (EBADMSG); 3946 } 3947 3948 /* 3949 * Process the message 3950 */ 3951 switch (status = vd_do_process_msg(vd, msg, msglen)) { 3952 case 0: 3953 /* "ack" valid, successfully-processed messages */ 3954 msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 3955 break; 3956 3957 case EINPROGRESS: 3958 /* The completion handler will "ack" or "nack" the message */ 3959 return (EINPROGRESS); 3960 case ENOMSG: 3961 PR0("Received unexpected message"); 3962 _NOTE(FALLTHROUGH); 3963 case EBADMSG: 3964 case ENOTSUP: 3965 /* "transport" error will cause NACK of invalid messages */ 3966 msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 3967 break; 3968 3969 default: 3970 /* "transport" error will cause NACK of invalid messages */ 3971 msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 3972 /* An LDC error probably occurred, so try resetting it */ 3973 reset_ldc = B_TRUE; 3974 break; 3975 } 3976 3977 PR1("\tResulting in state %d (%s)", vd->state, 3978 vd_decode_state(vd->state)); 3979 3980 /* populate the task so we can dispatch it on the taskq */ 3981 task.vd = vd; 3982 task.msg = msg; 3983 task.msglen = msglen; 3984 3985 /* 3986 * Queue a task to send the notification that the operation completed. 3987 * We need to ensure that requests are responded to in the correct 3988 * order and since the taskq is processed serially this ordering 3989 * is maintained. 3990 */ 3991 (void) ddi_taskq_dispatch(vd->completionq, vd_serial_notify, 3992 &task, DDI_SLEEP); 3993 3994 /* 3995 * To ensure handshake negotiations do not happen out of order, such 3996 * requests that come through this path should not be done in parallel 3997 * so we need to wait here until the response is sent to the client. 3998 */ 3999 ddi_taskq_wait(vd->completionq); 4000 4001 /* Arrange to reset the connection for nack'ed or failed messages */ 4002 if ((status != 0) || reset_ldc) { 4003 PR0("initiating %s reset", 4004 (reset_ldc) ? "full" : "soft"); 4005 vd_need_reset(vd, reset_ldc); 4006 } 4007 4008 return (status); 4009 } 4010 4011 static boolean_t 4012 vd_enabled(vd_t *vd) 4013 { 4014 boolean_t enabled; 4015 4016 mutex_enter(&vd->lock); 4017 enabled = vd->enabled; 4018 mutex_exit(&vd->lock); 4019 return (enabled); 4020 } 4021 4022 static void 4023 vd_recv_msg(void *arg) 4024 { 4025 vd_t *vd = (vd_t *)arg; 4026 int rv = 0, status = 0; 4027 4028 ASSERT(vd != NULL); 4029 4030 PR2("New task to receive incoming message(s)"); 4031 4032 4033 while (vd_enabled(vd) && status == 0) { 4034 size_t msglen, msgsize; 4035 ldc_status_t lstatus; 4036 4037 /* 4038 * Receive and process a message 4039 */ 4040 vd_reset_if_needed(vd); /* can change vd->max_msglen */ 4041 4042 /* 4043 * check if channel is UP - else break out of loop 4044 */ 4045 status = ldc_status(vd->ldc_handle, &lstatus); 4046 if (lstatus != LDC_UP) { 4047 PR0("channel not up (status=%d), exiting recv loop\n", 4048 lstatus); 4049 break; 4050 } 4051 4052 ASSERT(vd->max_msglen != 0); 4053 4054 msgsize = vd->max_msglen; /* stable copy for alloc/free */ 4055 msglen = msgsize; /* actual len after recv_msg() */ 4056 4057 status = recv_msg(vd->ldc_handle, vd->vio_msgp, &msglen); 4058 switch (status) { 4059 case 0: 4060 rv = vd_process_msg(vd, (vio_msg_t *)vd->vio_msgp, 4061 msglen); 4062 /* check if max_msglen changed */ 4063 if (msgsize != vd->max_msglen) { 4064 PR0("max_msglen changed 0x%lx to 0x%lx bytes\n", 4065 msgsize, vd->max_msglen); 4066 kmem_free(vd->vio_msgp, msgsize); 4067 vd->vio_msgp = 4068 kmem_alloc(vd->max_msglen, KM_SLEEP); 4069 } 4070 if (rv == EINPROGRESS) 4071 continue; 4072 break; 4073 4074 case ENOMSG: 4075 break; 4076 4077 case ECONNRESET: 4078 PR0("initiating soft reset (ECONNRESET)\n"); 4079 vd_need_reset(vd, B_FALSE); 4080 status = 0; 4081 break; 4082 4083 default: 4084 /* Probably an LDC failure; arrange to reset it */ 4085 PR0("initiating full reset (status=0x%x)", status); 4086 vd_need_reset(vd, B_TRUE); 4087 break; 4088 } 4089 } 4090 4091 PR2("Task finished"); 4092 } 4093 4094 static uint_t 4095 vd_handle_ldc_events(uint64_t event, caddr_t arg) 4096 { 4097 vd_t *vd = (vd_t *)(void *)arg; 4098 int status; 4099 4100 ASSERT(vd != NULL); 4101 4102 if (!vd_enabled(vd)) 4103 return (LDC_SUCCESS); 4104 4105 if (event & LDC_EVT_DOWN) { 4106 PR0("LDC_EVT_DOWN: LDC channel went down"); 4107 4108 vd_need_reset(vd, B_TRUE); 4109 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, 4110 DDI_SLEEP); 4111 if (status == DDI_FAILURE) { 4112 PR0("cannot schedule task to recv msg\n"); 4113 vd_need_reset(vd, B_TRUE); 4114 } 4115 } 4116 4117 if (event & LDC_EVT_RESET) { 4118 PR0("LDC_EVT_RESET: LDC channel was reset"); 4119 4120 if (vd->state != VD_STATE_INIT) { 4121 PR0("scheduling full reset"); 4122 vd_need_reset(vd, B_FALSE); 4123 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, 4124 vd, DDI_SLEEP); 4125 if (status == DDI_FAILURE) { 4126 PR0("cannot schedule task to recv msg\n"); 4127 vd_need_reset(vd, B_TRUE); 4128 } 4129 4130 } else { 4131 PR0("channel already reset, ignoring...\n"); 4132 PR0("doing ldc up...\n"); 4133 (void) ldc_up(vd->ldc_handle); 4134 } 4135 4136 return (LDC_SUCCESS); 4137 } 4138 4139 if (event & LDC_EVT_UP) { 4140 PR0("EVT_UP: LDC is up\nResetting client connection state"); 4141 PR0("initiating soft reset"); 4142 vd_need_reset(vd, B_FALSE); 4143 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, 4144 vd, DDI_SLEEP); 4145 if (status == DDI_FAILURE) { 4146 PR0("cannot schedule task to recv msg\n"); 4147 vd_need_reset(vd, B_TRUE); 4148 return (LDC_SUCCESS); 4149 } 4150 } 4151 4152 if (event & LDC_EVT_READ) { 4153 int status; 4154 4155 PR1("New data available"); 4156 /* Queue a task to receive the new data */ 4157 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, 4158 DDI_SLEEP); 4159 4160 if (status == DDI_FAILURE) { 4161 PR0("cannot schedule task to recv msg\n"); 4162 vd_need_reset(vd, B_TRUE); 4163 } 4164 } 4165 4166 return (LDC_SUCCESS); 4167 } 4168 4169 static uint_t 4170 vds_check_for_vd(mod_hash_key_t key, mod_hash_val_t *val, void *arg) 4171 { 4172 _NOTE(ARGUNUSED(key, val)) 4173 (*((uint_t *)arg))++; 4174 return (MH_WALK_TERMINATE); 4175 } 4176 4177 4178 static int 4179 vds_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 4180 { 4181 uint_t vd_present = 0; 4182 minor_t instance; 4183 vds_t *vds; 4184 4185 4186 switch (cmd) { 4187 case DDI_DETACH: 4188 /* the real work happens below */ 4189 break; 4190 case DDI_SUSPEND: 4191 PR0("No action required for DDI_SUSPEND"); 4192 return (DDI_SUCCESS); 4193 default: 4194 PR0("Unrecognized \"cmd\""); 4195 return (DDI_FAILURE); 4196 } 4197 4198 ASSERT(cmd == DDI_DETACH); 4199 instance = ddi_get_instance(dip); 4200 if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) { 4201 PR0("Could not get state for instance %u", instance); 4202 ddi_soft_state_free(vds_state, instance); 4203 return (DDI_FAILURE); 4204 } 4205 4206 /* Do no detach when serving any vdisks */ 4207 mod_hash_walk(vds->vd_table, vds_check_for_vd, &vd_present); 4208 if (vd_present) { 4209 PR0("Not detaching because serving vdisks"); 4210 return (DDI_FAILURE); 4211 } 4212 4213 PR0("Detaching"); 4214 if (vds->initialized & VDS_MDEG) { 4215 (void) mdeg_unregister(vds->mdeg); 4216 kmem_free(vds->ispecp->specp, sizeof (vds_prop_template)); 4217 kmem_free(vds->ispecp, sizeof (mdeg_node_spec_t)); 4218 vds->ispecp = NULL; 4219 vds->mdeg = NULL; 4220 } 4221 4222 if (vds->initialized & VDS_LDI) 4223 (void) ldi_ident_release(vds->ldi_ident); 4224 mod_hash_destroy_hash(vds->vd_table); 4225 ddi_soft_state_free(vds_state, instance); 4226 return (DDI_SUCCESS); 4227 } 4228 4229 static boolean_t 4230 is_pseudo_device(dev_info_t *dip) 4231 { 4232 dev_info_t *parent, *root = ddi_root_node(); 4233 4234 4235 for (parent = ddi_get_parent(dip); (parent != NULL) && (parent != root); 4236 parent = ddi_get_parent(parent)) { 4237 if (strcmp(ddi_get_name(parent), DEVI_PSEUDO_NEXNAME) == 0) 4238 return (B_TRUE); 4239 } 4240 4241 return (B_FALSE); 4242 } 4243 4244 /* 4245 * Description: 4246 * This function checks to see if the file being used as a 4247 * virtual disk is an ISO image. An ISO image is a special 4248 * case which can be booted/installed from like a CD/DVD 4249 * 4250 * Parameters: 4251 * vd - disk on which the operation is performed. 4252 * 4253 * Return Code: 4254 * B_TRUE - The file is an ISO 9660 compliant image 4255 * B_FALSE - just a regular disk image file 4256 */ 4257 static boolean_t 4258 vd_file_is_iso_image(vd_t *vd) 4259 { 4260 char iso_buf[ISO_SECTOR_SIZE]; 4261 int i, rv; 4262 uint_t sec; 4263 4264 ASSERT(vd->file); 4265 4266 /* 4267 * If we have already discovered and saved this info we can 4268 * short-circuit the check and avoid reading the file. 4269 */ 4270 if (vd->vdisk_media == VD_MEDIA_DVD || vd->vdisk_media == VD_MEDIA_CD) 4271 return (B_TRUE); 4272 4273 /* 4274 * We wish to read the sector that should contain the 2nd ISO volume 4275 * descriptor. The second field in this descriptor is called the 4276 * Standard Identifier and is set to CD001 for a CD-ROM compliant 4277 * to the ISO 9660 standard. 4278 */ 4279 sec = (ISO_VOLDESC_SEC * ISO_SECTOR_SIZE) / vd->vdisk_block_size; 4280 rv = vd_file_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)iso_buf, 4281 sec, ISO_SECTOR_SIZE); 4282 4283 if (rv < 0) 4284 return (B_FALSE); 4285 4286 for (i = 0; i < ISO_ID_STRLEN; i++) { 4287 if (ISO_STD_ID(iso_buf)[i] != ISO_ID_STRING[i]) 4288 return (B_FALSE); 4289 } 4290 4291 return (B_TRUE); 4292 } 4293 4294 /* 4295 * Description: 4296 * This function checks to see if the virtual device is an ATAPI 4297 * device. ATAPI devices use Group 1 Read/Write commands, so 4298 * any USCSI calls vds makes need to take this into account. 4299 * 4300 * Parameters: 4301 * vd - disk on which the operation is performed. 4302 * 4303 * Return Code: 4304 * B_TRUE - The virtual disk is backed by an ATAPI device 4305 * B_FALSE - not an ATAPI device (presumably SCSI) 4306 */ 4307 static boolean_t 4308 vd_is_atapi_device(vd_t *vd) 4309 { 4310 boolean_t is_atapi = B_FALSE; 4311 char *variantp; 4312 int rv; 4313 4314 ASSERT(vd->ldi_handle[0] != NULL); 4315 ASSERT(!vd->file); 4316 4317 rv = ldi_prop_lookup_string(vd->ldi_handle[0], 4318 (LDI_DEV_T_ANY | DDI_PROP_DONTPASS), "variant", &variantp); 4319 if (rv == DDI_PROP_SUCCESS) { 4320 PR0("'variant' property exists for %s", vd->device_path); 4321 if (strcmp(variantp, "atapi") == 0) 4322 is_atapi = B_TRUE; 4323 ddi_prop_free(variantp); 4324 } 4325 4326 rv = ldi_prop_exists(vd->ldi_handle[0], LDI_DEV_T_ANY, "atapi"); 4327 if (rv) { 4328 PR0("'atapi' property exists for %s", vd->device_path); 4329 is_atapi = B_TRUE; 4330 } 4331 4332 return (is_atapi); 4333 } 4334 4335 static int 4336 vd_setup_mediainfo(vd_t *vd) 4337 { 4338 int status, rval; 4339 struct dk_minfo dk_minfo; 4340 4341 ASSERT(vd->ldi_handle[0] != NULL); 4342 ASSERT(vd->vdisk_block_size != 0); 4343 4344 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCGMEDIAINFO, 4345 (intptr_t)&dk_minfo, (vd->open_flags | FKIOCTL), 4346 kcred, &rval)) != 0) 4347 return (status); 4348 4349 ASSERT(dk_minfo.dki_lbsize % vd->vdisk_block_size == 0); 4350 4351 vd->block_size = dk_minfo.dki_lbsize; 4352 vd->vdisk_size = (dk_minfo.dki_capacity * dk_minfo.dki_lbsize) / 4353 vd->vdisk_block_size; 4354 vd->vdisk_media = DK_MEDIATYPE2VD_MEDIATYPE(dk_minfo.dki_media_type); 4355 return (0); 4356 } 4357 4358 static int 4359 vd_setup_full_disk(vd_t *vd) 4360 { 4361 int status; 4362 major_t major = getmajor(vd->dev[0]); 4363 minor_t minor = getminor(vd->dev[0]) - VD_ENTIRE_DISK_SLICE; 4364 4365 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); 4366 4367 vd->vdisk_block_size = DEV_BSIZE; 4368 4369 /* 4370 * At this point, vdisk_size is set to the size of partition 2 but 4371 * this does not represent the size of the disk because partition 2 4372 * may not cover the entire disk and its size does not include reserved 4373 * blocks. So we call vd_get_mediainfo to udpate this information and 4374 * set the block size and the media type of the disk. 4375 */ 4376 status = vd_setup_mediainfo(vd); 4377 4378 if (status != 0) { 4379 if (!vd->scsi) { 4380 /* unexpected failure */ 4381 PRN("ldi_ioctl(DKIOCGMEDIAINFO) returned errno %d", 4382 status); 4383 return (status); 4384 } 4385 4386 /* 4387 * The function can fail for SCSI disks which are present but 4388 * reserved by another system. In that case, we don't know the 4389 * size of the disk and the block size. 4390 */ 4391 vd->vdisk_size = VD_SIZE_UNKNOWN; 4392 vd->block_size = 0; 4393 vd->vdisk_media = VD_MEDIA_FIXED; 4394 } 4395 4396 /* Move dev number and LDI handle to entire-disk-slice array elements */ 4397 vd->dev[VD_ENTIRE_DISK_SLICE] = vd->dev[0]; 4398 vd->dev[0] = 0; 4399 vd->ldi_handle[VD_ENTIRE_DISK_SLICE] = vd->ldi_handle[0]; 4400 vd->ldi_handle[0] = NULL; 4401 4402 /* Initialize device numbers for remaining slices and open them */ 4403 for (int slice = 0; slice < vd->nslices; slice++) { 4404 /* 4405 * Skip the entire-disk slice, as it's already open and its 4406 * device known 4407 */ 4408 if (slice == VD_ENTIRE_DISK_SLICE) 4409 continue; 4410 ASSERT(vd->dev[slice] == 0); 4411 ASSERT(vd->ldi_handle[slice] == NULL); 4412 4413 /* 4414 * Construct the device number for the current slice 4415 */ 4416 vd->dev[slice] = makedevice(major, (minor + slice)); 4417 4418 /* 4419 * Open all slices of the disk to serve them to the client. 4420 * Slices are opened exclusively to prevent other threads or 4421 * processes in the service domain from performing I/O to 4422 * slices being accessed by a client. Failure to open a slice 4423 * results in vds not serving this disk, as the client could 4424 * attempt (and should be able) to access any slice immediately. 4425 * Any slices successfully opened before a failure will get 4426 * closed by vds_destroy_vd() as a result of the error returned 4427 * by this function. 4428 * 4429 * We need to do the open with FNDELAY so that opening an empty 4430 * slice does not fail. 4431 */ 4432 PR0("Opening device major %u, minor %u = slice %u", 4433 major, minor, slice); 4434 4435 /* 4436 * Try to open the device. This can fail for example if we are 4437 * opening an empty slice. So in case of a failure, we try the 4438 * open again but this time with the FNDELAY flag. 4439 */ 4440 status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK, 4441 vd->open_flags, kcred, &vd->ldi_handle[slice], 4442 vd->vds->ldi_ident); 4443 4444 if (status != 0) { 4445 status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK, 4446 vd->open_flags | FNDELAY, kcred, 4447 &vd->ldi_handle[slice], vd->vds->ldi_ident); 4448 } 4449 4450 if (status != 0) { 4451 PRN("ldi_open_by_dev() returned errno %d " 4452 "for slice %u", status, slice); 4453 /* vds_destroy_vd() will close any open slices */ 4454 vd->ldi_handle[slice] = NULL; 4455 return (status); 4456 } 4457 } 4458 4459 return (0); 4460 } 4461 4462 static int 4463 vd_setup_partition_vtoc(vd_t *vd) 4464 { 4465 int rval, status; 4466 char *device_path = vd->device_path; 4467 4468 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGGEOM, 4469 (intptr_t)&vd->dk_geom, (vd->open_flags | FKIOCTL), kcred, &rval); 4470 4471 if (status != 0) { 4472 PRN("ldi_ioctl(DKIOCGEOM) returned errno %d for %s", 4473 status, device_path); 4474 return (status); 4475 } 4476 4477 /* Initialize dk_geom structure for single-slice device */ 4478 if (vd->dk_geom.dkg_nsect == 0) { 4479 PRN("%s geometry claims 0 sectors per track", device_path); 4480 return (EIO); 4481 } 4482 if (vd->dk_geom.dkg_nhead == 0) { 4483 PRN("%s geometry claims 0 heads", device_path); 4484 return (EIO); 4485 } 4486 vd->dk_geom.dkg_ncyl = vd->vdisk_size / vd->dk_geom.dkg_nsect / 4487 vd->dk_geom.dkg_nhead; 4488 vd->dk_geom.dkg_acyl = 0; 4489 vd->dk_geom.dkg_pcyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl; 4490 4491 4492 /* Initialize vtoc structure for single-slice device */ 4493 bcopy(VD_VOLUME_NAME, vd->vtoc.v_volume, 4494 MIN(sizeof (VD_VOLUME_NAME), sizeof (vd->vtoc.v_volume))); 4495 bzero(vd->vtoc.v_part, sizeof (vd->vtoc.v_part)); 4496 vd->vtoc.v_nparts = 1; 4497 vd->vtoc.v_part[0].p_tag = V_UNASSIGNED; 4498 vd->vtoc.v_part[0].p_flag = 0; 4499 vd->vtoc.v_part[0].p_start = 0; 4500 vd->vtoc.v_part[0].p_size = vd->vdisk_size; 4501 bcopy(VD_ASCIILABEL, vd->vtoc.v_asciilabel, 4502 MIN(sizeof (VD_ASCIILABEL), sizeof (vd->vtoc.v_asciilabel))); 4503 4504 return (0); 4505 } 4506 4507 static int 4508 vd_setup_partition_efi(vd_t *vd) 4509 { 4510 efi_gpt_t *gpt; 4511 efi_gpe_t *gpe; 4512 struct uuid uuid = EFI_RESERVED; 4513 uint32_t crc; 4514 int length; 4515 4516 length = sizeof (efi_gpt_t) + sizeof (efi_gpe_t); 4517 4518 gpt = kmem_zalloc(length, KM_SLEEP); 4519 gpe = (efi_gpe_t *)(gpt + 1); 4520 4521 gpt->efi_gpt_Signature = LE_64(EFI_SIGNATURE); 4522 gpt->efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 4523 gpt->efi_gpt_HeaderSize = LE_32(sizeof (efi_gpt_t)); 4524 gpt->efi_gpt_FirstUsableLBA = LE_64(0ULL); 4525 gpt->efi_gpt_LastUsableLBA = LE_64(vd->vdisk_size - 1); 4526 gpt->efi_gpt_NumberOfPartitionEntries = LE_32(1); 4527 gpt->efi_gpt_SizeOfPartitionEntry = LE_32(sizeof (efi_gpe_t)); 4528 4529 UUID_LE_CONVERT(gpe->efi_gpe_PartitionTypeGUID, uuid); 4530 gpe->efi_gpe_StartingLBA = gpt->efi_gpt_FirstUsableLBA; 4531 gpe->efi_gpe_EndingLBA = gpt->efi_gpt_LastUsableLBA; 4532 4533 CRC32(crc, gpe, sizeof (efi_gpe_t), -1U, crc32_table); 4534 gpt->efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 4535 4536 CRC32(crc, gpt, sizeof (efi_gpt_t), -1U, crc32_table); 4537 gpt->efi_gpt_HeaderCRC32 = LE_32(~crc); 4538 4539 vd->dk_efi.dki_lba = 0; 4540 vd->dk_efi.dki_length = length; 4541 vd->dk_efi.dki_data = gpt; 4542 4543 return (0); 4544 } 4545 4546 /* 4547 * Setup for a virtual disk whose backend is a file (exported as a single slice 4548 * or as a full disk) or a pseudo device (for example a ZFS, SVM or VxVM volume) 4549 * exported as a full disk. In these cases, the backend is accessed using the 4550 * vnode interface. 4551 */ 4552 static int 4553 vd_setup_backend_vnode(vd_t *vd) 4554 { 4555 int rval, status; 4556 vattr_t vattr; 4557 dev_t dev; 4558 char *file_path = vd->device_path; 4559 char dev_path[MAXPATHLEN + 1]; 4560 ldi_handle_t lhandle; 4561 struct dk_cinfo dk_cinfo; 4562 4563 if ((status = vn_open(file_path, UIO_SYSSPACE, vd->open_flags | FOFFMAX, 4564 0, &vd->file_vnode, 0, 0)) != 0) { 4565 PRN("vn_open(%s) = errno %d", file_path, status); 4566 return (status); 4567 } 4568 4569 /* 4570 * We set vd->file now so that vds_destroy_vd will take care of 4571 * closing the file and releasing the vnode in case of an error. 4572 */ 4573 vd->file = B_TRUE; 4574 4575 vattr.va_mask = AT_SIZE; 4576 if ((status = VOP_GETATTR(vd->file_vnode, &vattr, 0, kcred, NULL)) 4577 != 0) { 4578 PRN("VOP_GETATTR(%s) = errno %d", file_path, status); 4579 return (EIO); 4580 } 4581 4582 vd->file_size = vattr.va_size; 4583 /* size should be at least sizeof(dk_label) */ 4584 if (vd->file_size < sizeof (struct dk_label)) { 4585 PRN("Size of file has to be at least %ld bytes", 4586 sizeof (struct dk_label)); 4587 return (EIO); 4588 } 4589 4590 if (vd->file_vnode->v_flag & VNOMAP) { 4591 PRN("File %s cannot be mapped", file_path); 4592 return (EIO); 4593 } 4594 4595 /* 4596 * Find and validate the geometry of a disk image. For a single slice 4597 * disk image, this will build a fake geometry and vtoc. 4598 */ 4599 status = vd_file_validate_geometry(vd); 4600 if (status != 0 && status != EINVAL) { 4601 PRN("Failed to read label from %s", file_path); 4602 return (EIO); 4603 } 4604 4605 /* sector size = block size = DEV_BSIZE */ 4606 vd->block_size = DEV_BSIZE; 4607 vd->vdisk_block_size = DEV_BSIZE; 4608 vd->vdisk_size = vd->file_size / DEV_BSIZE; 4609 vd->max_xfer_sz = maxphys / DEV_BSIZE; /* default transfer size */ 4610 4611 if (vd_file_is_iso_image(vd)) { 4612 /* 4613 * Indicate whether to call this a CD or DVD from the size 4614 * of the ISO image (images for both drive types are stored 4615 * in the ISO-9600 format). CDs can store up to just under 1Gb 4616 */ 4617 if ((vd->vdisk_size * vd->vdisk_block_size) > 4618 (1024 * 1024 * 1024)) 4619 vd->vdisk_media = VD_MEDIA_DVD; 4620 else 4621 vd->vdisk_media = VD_MEDIA_CD; 4622 } else { 4623 vd->vdisk_media = VD_MEDIA_FIXED; 4624 } 4625 4626 /* 4627 * Get max_xfer_sz from the device where the file is or from the device 4628 * itself if we have a pseudo device. 4629 */ 4630 dev_path[0] = '\0'; 4631 4632 if (vd->pseudo) { 4633 status = ldi_open_by_name(file_path, FREAD, kcred, &lhandle, 4634 vd->vds->ldi_ident); 4635 } else { 4636 dev = vd->file_vnode->v_vfsp->vfs_dev; 4637 if (ddi_dev_pathname(dev, S_IFBLK, dev_path) == DDI_SUCCESS) { 4638 PR0("underlying device = %s\n", dev_path); 4639 } 4640 4641 status = ldi_open_by_dev(&dev, OTYP_BLK, FREAD, kcred, &lhandle, 4642 vd->vds->ldi_ident); 4643 } 4644 4645 if (status != 0) { 4646 PR0("ldi_open() returned errno %d for device %s", 4647 status, (dev_path[0] == '\0')? file_path : dev_path); 4648 } else { 4649 if ((status = ldi_ioctl(lhandle, DKIOCINFO, 4650 (intptr_t)&dk_cinfo, (vd->open_flags | FKIOCTL), kcred, 4651 &rval)) != 0) { 4652 PR0("ldi_ioctl(DKIOCINFO) returned errno %d for %s", 4653 status, dev_path); 4654 } else { 4655 /* 4656 * Store the device's max transfer size for 4657 * return to the client 4658 */ 4659 vd->max_xfer_sz = dk_cinfo.dki_maxtransfer; 4660 } 4661 4662 PR0("close the device %s", dev_path); 4663 (void) ldi_close(lhandle, FREAD, kcred); 4664 } 4665 4666 PR0("using file %s, dev %s, max_xfer = %u blks", 4667 file_path, dev_path, vd->max_xfer_sz); 4668 4669 /* Setup devid for the disk image */ 4670 4671 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) 4672 return (0); 4673 4674 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { 4675 4676 status = vd_file_read_devid(vd, &vd->file_devid); 4677 4678 if (status == 0) { 4679 /* a valid devid was found */ 4680 return (0); 4681 } 4682 4683 if (status != EINVAL) { 4684 /* 4685 * There was an error while trying to read the devid. 4686 * So this disk image may have a devid but we are 4687 * unable to read it. 4688 */ 4689 PR0("can not read devid for %s", file_path); 4690 vd->file_devid = NULL; 4691 return (0); 4692 } 4693 } 4694 4695 /* 4696 * No valid device id was found so we create one. Note that a failure 4697 * to create a device id is not fatal and does not prevent the disk 4698 * image from being attached. 4699 */ 4700 PR1("creating devid for %s", file_path); 4701 4702 if (ddi_devid_init(vd->vds->dip, DEVID_FAB, NULL, 0, 4703 &vd->file_devid) != DDI_SUCCESS) { 4704 PR0("fail to create devid for %s", file_path); 4705 vd->file_devid = NULL; 4706 return (0); 4707 } 4708 4709 /* 4710 * Write devid to the disk image. The devid is stored into the disk 4711 * image if we have a valid label; otherwise the devid will be stored 4712 * when the user writes a valid label. 4713 */ 4714 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { 4715 if (vd_file_write_devid(vd, vd->file_devid) != 0) { 4716 PR0("fail to write devid for %s", file_path); 4717 ddi_devid_free(vd->file_devid); 4718 vd->file_devid = NULL; 4719 } 4720 } 4721 4722 return (0); 4723 } 4724 4725 4726 /* 4727 * Description: 4728 * Open a device using its device path (supplied by ldm(1m)) 4729 * 4730 * Parameters: 4731 * vd - pointer to structure containing the vDisk info 4732 * 4733 * Return Value 4734 * 0 - success 4735 * EIO - Invalid number of partitions 4736 * != 0 - some other non-zero return value from ldi(9F) functions 4737 */ 4738 static int 4739 vd_open_using_ldi_by_name(vd_t *vd) 4740 { 4741 int rval, status, open_flags; 4742 struct dk_cinfo dk_cinfo; 4743 char *device_path = vd->device_path; 4744 4745 /* 4746 * Try to open the device. If the flags indicate that the device should 4747 * be opened write-enabled, we first we try to open it "read-only" 4748 * to see if we have an optical device such as a CD-ROM which, for 4749 * now, we do not permit writes to and thus should not export write 4750 * operations to the client. 4751 * 4752 * Future: if/when we implement support for guest domains writing to 4753 * optical devices we will need to do further checking of the media type 4754 * to distinguish between read-only and writable discs. 4755 */ 4756 if (vd->open_flags & FWRITE) { 4757 open_flags = vd->open_flags & ~FWRITE; 4758 status = ldi_open_by_name(device_path, open_flags, kcred, 4759 &vd->ldi_handle[0], vd->vds->ldi_ident); 4760 4761 if (status == 0) { 4762 /* Verify backing device supports dk_cinfo */ 4763 status = ldi_ioctl(vd->ldi_handle[0], DKIOCINFO, 4764 (intptr_t)&dk_cinfo, (open_flags | FKIOCTL), 4765 kcred, &rval); 4766 if (status != 0) { 4767 PRN("ldi_ioctl(DKIOCINFO) returned errno %d for" 4768 " %s opened as RO", status, device_path); 4769 return (status); 4770 } 4771 4772 if (dk_cinfo.dki_partition >= V_NUMPAR) { 4773 PRN("slice %u >= maximum slice %u for %s", 4774 dk_cinfo.dki_partition, V_NUMPAR, 4775 device_path); 4776 return (EIO); 4777 } 4778 4779 /* 4780 * If this is an optical device then we disable 4781 * write access and return, otherwise we close 4782 * the device and try again with writes enabled. 4783 */ 4784 if (dk_cinfo.dki_ctype == DKC_CDROM) { 4785 vd->open_flags = open_flags; 4786 return (0); 4787 } else { 4788 (void) ldi_close(vd->ldi_handle[0], 4789 open_flags, kcred); 4790 } 4791 } 4792 } 4793 4794 /* Attempt to (re)open device */ 4795 status = ldi_open_by_name(device_path, open_flags, kcred, 4796 &vd->ldi_handle[0], vd->vds->ldi_ident); 4797 4798 /* 4799 * The open can fail for example if we are opening an empty slice. 4800 * In case of a failure, we try the open again but this time with 4801 * the FNDELAY flag. 4802 */ 4803 if (status != 0) 4804 status = ldi_open_by_name(device_path, vd->open_flags | FNDELAY, 4805 kcred, &vd->ldi_handle[0], vd->vds->ldi_ident); 4806 4807 if (status != 0) { 4808 PR0("ldi_open_by_name(%s) = errno %d", device_path, status); 4809 vd->ldi_handle[0] = NULL; 4810 return (status); 4811 } 4812 4813 /* Verify backing device supports dk_cinfo */ 4814 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCINFO, 4815 (intptr_t)&dk_cinfo, (vd->open_flags | FKIOCTL), kcred, 4816 &rval)) != 0) { 4817 PRN("ldi_ioctl(DKIOCINFO) returned errno %d for %s", 4818 status, device_path); 4819 return (status); 4820 } 4821 if (dk_cinfo.dki_partition >= V_NUMPAR) { 4822 PRN("slice %u >= maximum slice %u for %s", 4823 dk_cinfo.dki_partition, V_NUMPAR, device_path); 4824 return (EIO); 4825 } 4826 4827 return (0); 4828 } 4829 4830 4831 /* 4832 * Setup for a virtual disk which backend is a device (a physical disk, 4833 * slice or pseudo device) that is directly exported either as a full disk 4834 * for a physical disk or as a slice for a pseudo device or a disk slice. 4835 * In these cases, the backend is accessed using the LDI interface. 4836 */ 4837 static int 4838 vd_setup_backend_ldi(vd_t *vd) 4839 { 4840 int rval, status; 4841 struct dk_cinfo dk_cinfo; 4842 char *device_path = vd->device_path; 4843 4844 status = vd_open_using_ldi_by_name(vd); 4845 if (status != 0) { 4846 PR0("Failed to open (%s) = errno %d", device_path, status); 4847 return (status); 4848 } 4849 4850 vd->file = B_FALSE; 4851 4852 /* Get device number of backing device */ 4853 if ((status = ldi_get_dev(vd->ldi_handle[0], &vd->dev[0])) != 0) { 4854 PRN("ldi_get_dev() returned errno %d for %s", 4855 status, device_path); 4856 return (status); 4857 } 4858 4859 /* Verify backing device supports dk_cinfo */ 4860 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCINFO, 4861 (intptr_t)&dk_cinfo, (vd->open_flags | FKIOCTL), kcred, 4862 &rval)) != 0) { 4863 PRN("ldi_ioctl(DKIOCINFO) returned errno %d for %s", 4864 status, device_path); 4865 return (status); 4866 } 4867 if (dk_cinfo.dki_partition >= V_NUMPAR) { 4868 PRN("slice %u >= maximum slice %u for %s", 4869 dk_cinfo.dki_partition, V_NUMPAR, device_path); 4870 return (EIO); 4871 } 4872 4873 vd->vdisk_label = vd_read_vtoc(vd, &vd->vtoc); 4874 4875 /* Store the device's max transfer size for return to the client */ 4876 vd->max_xfer_sz = dk_cinfo.dki_maxtransfer; 4877 4878 /* 4879 * We need to work out if it's an ATAPI (IDE CD-ROM) or SCSI device so 4880 * that we can use the correct CDB group when sending USCSI commands. 4881 */ 4882 vd->is_atapi_dev = vd_is_atapi_device(vd); 4883 4884 /* 4885 * Export a full disk. 4886 * 4887 * When we use the LDI interface, we export a device as a full disk 4888 * if we have an entire disk slice (slice 2) and if this slice is 4889 * exported as a full disk and not as a single slice disk. 4890 * Similarly, we want to use LDI if we are accessing a CD or DVD 4891 * device (even if it isn't s2) 4892 * 4893 * Note that pseudo devices are exported as full disks using the vnode 4894 * interface, not the LDI interface. 4895 */ 4896 if ((dk_cinfo.dki_partition == VD_ENTIRE_DISK_SLICE && 4897 vd->vdisk_type == VD_DISK_TYPE_DISK) || 4898 dk_cinfo.dki_ctype == DKC_CDROM) { 4899 ASSERT(!vd->pseudo); 4900 if (dk_cinfo.dki_ctype == DKC_SCSI_CCS) 4901 vd->scsi = B_TRUE; 4902 return (vd_setup_full_disk(vd)); 4903 } 4904 4905 /* 4906 * Export a single slice disk. 4907 * 4908 * The exported device can be either a pseudo device or a disk slice. If 4909 * it is a disk slice different from slice 2 then it is always exported 4910 * as a single slice disk even if the "slice" option is not specified. 4911 * If it is disk slice 2 or a pseudo device then it is exported as a 4912 * single slice disk only if the "slice" option is specified. 4913 */ 4914 return (vd_setup_single_slice_disk(vd)); 4915 } 4916 4917 static int 4918 vd_setup_single_slice_disk(vd_t *vd) 4919 { 4920 int status; 4921 char *device_path = vd->device_path; 4922 4923 /* Get size of backing device */ 4924 if (ldi_get_size(vd->ldi_handle[0], &vd->vdisk_size) != DDI_SUCCESS) { 4925 PRN("ldi_get_size() failed for %s", device_path); 4926 return (EIO); 4927 } 4928 vd->vdisk_size = lbtodb(vd->vdisk_size); /* convert to blocks */ 4929 vd->block_size = DEV_BSIZE; 4930 vd->vdisk_block_size = DEV_BSIZE; 4931 vd->vdisk_media = VD_MEDIA_FIXED; 4932 4933 if (vd->pseudo) { 4934 4935 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 4936 4937 /* 4938 * Currently we only support exporting pseudo devices which 4939 * provide a valid disk label. 4940 */ 4941 if (vd->vdisk_label == VD_DISK_LABEL_UNK) { 4942 PRN("%s is a pseudo device with an invalid disk " 4943 "label\n", device_path); 4944 return (EINVAL); 4945 } 4946 return (0); /* ...and we're done */ 4947 } 4948 4949 /* We can only export a slice if the disk has a valid label */ 4950 if (vd->vdisk_label == VD_DISK_LABEL_UNK) { 4951 PRN("%s is a slice from a disk with an unknown disk label\n", 4952 device_path); 4953 return (EINVAL); 4954 } 4955 4956 /* 4957 * We export the slice as a single slice disk even if the "slice" 4958 * option was not specified. 4959 */ 4960 vd->vdisk_type = VD_DISK_TYPE_SLICE; 4961 vd->nslices = 1; 4962 4963 if (vd->vdisk_label == VD_DISK_LABEL_EFI) { 4964 /* Slice from a disk with an EFI label */ 4965 status = vd_setup_partition_efi(vd); 4966 } else { 4967 /* Slice from a disk with a VTOC label */ 4968 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); 4969 status = vd_setup_partition_vtoc(vd); 4970 } 4971 4972 return (status); 4973 } 4974 4975 static int 4976 vd_setup_vd(vd_t *vd) 4977 { 4978 int status; 4979 dev_info_t *dip; 4980 vnode_t *vnp; 4981 char *path = vd->device_path; 4982 4983 /* make sure the vdisk backend is valid */ 4984 if ((status = lookupname(path, UIO_SYSSPACE, 4985 FOLLOW, NULLVPP, &vnp)) != 0) { 4986 PR0("Cannot lookup %s errno %d", path, status); 4987 goto done; 4988 } 4989 4990 switch (vnp->v_type) { 4991 case VREG: 4992 /* 4993 * Backend is a file so it is exported as a full disk or as a 4994 * single slice disk using the vnode interface. 4995 */ 4996 VN_RELE(vnp); 4997 vd->pseudo = B_FALSE; 4998 status = vd_setup_backend_vnode(vd); 4999 break; 5000 5001 case VBLK: 5002 case VCHR: 5003 /* 5004 * Backend is a device. The way it is exported depends on the 5005 * type of the device. 5006 * 5007 * - A pseudo device is exported as a full disk using the vnode 5008 * interface or as a single slice disk using the LDI 5009 * interface. 5010 * 5011 * - A disk (represented by the slice 2 of that disk) is 5012 * exported as a full disk using the LDI interface. 5013 * 5014 * - A disk slice (different from slice 2) is always exported 5015 * as a single slice disk using the LDI interface. 5016 * 5017 * - The slice 2 of a disk is exported as a single slice disk 5018 * if the "slice" option is specified, otherwise the entire 5019 * disk will be exported. In any case, the LDI interface is 5020 * used. 5021 */ 5022 5023 /* check if this is a pseudo device */ 5024 if ((dip = ddi_hold_devi_by_instance(getmajor(vnp->v_rdev), 5025 dev_to_instance(vnp->v_rdev), 0)) == NULL) { 5026 PRN("%s is no longer accessible", path); 5027 VN_RELE(vnp); 5028 status = EIO; 5029 break; 5030 } 5031 vd->pseudo = is_pseudo_device(dip); 5032 ddi_release_devi(dip); 5033 VN_RELE(vnp); 5034 5035 if (!vd->pseudo) { 5036 status = vd_setup_backend_ldi(vd); 5037 break; 5038 } 5039 5040 /* 5041 * If this is a pseudo device then its usage depends if the 5042 * "slice" option is set or not. If the "slice" option is set 5043 * then the pseudo device will be exported as a single slice, 5044 * otherwise it will be exported as a full disk. 5045 * 5046 * For backward compatibility, if vd_volume_force_slice is set 5047 * then we always export pseudo devices as slices. 5048 */ 5049 if (vd_volume_force_slice) { 5050 vd->vdisk_type = VD_DISK_TYPE_SLICE; 5051 vd->nslices = 1; 5052 } 5053 5054 if (vd->vdisk_type == VD_DISK_TYPE_DISK) 5055 status = vd_setup_backend_vnode(vd); 5056 else 5057 status = vd_setup_backend_ldi(vd); 5058 break; 5059 5060 default: 5061 PRN("Unsupported vdisk backend %s", path); 5062 VN_RELE(vnp); 5063 status = EBADF; 5064 } 5065 5066 done: 5067 if (status != 0) { 5068 /* 5069 * If the error is retryable print an error message only 5070 * during the first try. 5071 */ 5072 if (status == ENXIO || status == ENODEV || 5073 status == ENOENT || status == EROFS) { 5074 if (!(vd->initialized & VD_SETUP_ERROR)) { 5075 PRN("%s is currently inaccessible (error %d)", 5076 path, status); 5077 } 5078 status = EAGAIN; 5079 } else { 5080 PRN("%s can not be exported as a virtual disk " 5081 "(error %d)", path, status); 5082 } 5083 vd->initialized |= VD_SETUP_ERROR; 5084 5085 } else if (vd->initialized & VD_SETUP_ERROR) { 5086 /* print a message only if we previously had an error */ 5087 PRN("%s is now online", path); 5088 vd->initialized &= ~VD_SETUP_ERROR; 5089 } 5090 5091 return (status); 5092 } 5093 5094 static int 5095 vds_do_init_vd(vds_t *vds, uint64_t id, char *device_path, uint64_t options, 5096 uint64_t ldc_id, vd_t **vdp) 5097 { 5098 char tq_name[TASKQ_NAMELEN]; 5099 int status; 5100 ddi_iblock_cookie_t iblock = NULL; 5101 ldc_attr_t ldc_attr; 5102 vd_t *vd; 5103 5104 5105 ASSERT(vds != NULL); 5106 ASSERT(device_path != NULL); 5107 ASSERT(vdp != NULL); 5108 PR0("Adding vdisk for %s", device_path); 5109 5110 if ((vd = kmem_zalloc(sizeof (*vd), KM_NOSLEEP)) == NULL) { 5111 PRN("No memory for virtual disk"); 5112 return (EAGAIN); 5113 } 5114 *vdp = vd; /* assign here so vds_destroy_vd() can cleanup later */ 5115 vd->vds = vds; 5116 (void) strncpy(vd->device_path, device_path, MAXPATHLEN); 5117 5118 /* Setup open flags */ 5119 vd->open_flags = FREAD; 5120 5121 if (!(options & VD_OPT_RDONLY)) 5122 vd->open_flags |= FWRITE; 5123 5124 if (options & VD_OPT_EXCLUSIVE) 5125 vd->open_flags |= FEXCL; 5126 5127 /* Setup disk type */ 5128 if (options & VD_OPT_SLICE) { 5129 vd->vdisk_type = VD_DISK_TYPE_SLICE; 5130 vd->nslices = 1; 5131 } else { 5132 vd->vdisk_type = VD_DISK_TYPE_DISK; 5133 vd->nslices = V_NUMPAR; 5134 } 5135 5136 /* default disk label */ 5137 vd->vdisk_label = VD_DISK_LABEL_UNK; 5138 5139 /* Open vdisk and initialize parameters */ 5140 if ((status = vd_setup_vd(vd)) == 0) { 5141 vd->initialized |= VD_DISK_READY; 5142 5143 ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR); 5144 PR0("vdisk_type = %s, pseudo = %s, file = %s, nslices = %u", 5145 ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"), 5146 (vd->pseudo ? "yes" : "no"), (vd->file ? "yes" : "no"), 5147 vd->nslices); 5148 } else { 5149 if (status != EAGAIN) 5150 return (status); 5151 } 5152 5153 /* Initialize locking */ 5154 if (ddi_get_soft_iblock_cookie(vds->dip, DDI_SOFTINT_MED, 5155 &iblock) != DDI_SUCCESS) { 5156 PRN("Could not get iblock cookie."); 5157 return (EIO); 5158 } 5159 5160 mutex_init(&vd->lock, NULL, MUTEX_DRIVER, iblock); 5161 vd->initialized |= VD_LOCKING; 5162 5163 5164 /* Create start and completion task queues for the vdisk */ 5165 (void) snprintf(tq_name, sizeof (tq_name), "vd_startq%lu", id); 5166 PR1("tq_name = %s", tq_name); 5167 if ((vd->startq = ddi_taskq_create(vds->dip, tq_name, 1, 5168 TASKQ_DEFAULTPRI, 0)) == NULL) { 5169 PRN("Could not create task queue"); 5170 return (EIO); 5171 } 5172 (void) snprintf(tq_name, sizeof (tq_name), "vd_completionq%lu", id); 5173 PR1("tq_name = %s", tq_name); 5174 if ((vd->completionq = ddi_taskq_create(vds->dip, tq_name, 1, 5175 TASKQ_DEFAULTPRI, 0)) == NULL) { 5176 PRN("Could not create task queue"); 5177 return (EIO); 5178 } 5179 vd->enabled = 1; /* before callback can dispatch to startq */ 5180 5181 5182 /* Bring up LDC */ 5183 ldc_attr.devclass = LDC_DEV_BLK_SVC; 5184 ldc_attr.instance = ddi_get_instance(vds->dip); 5185 ldc_attr.mode = LDC_MODE_UNRELIABLE; 5186 ldc_attr.mtu = VD_LDC_MTU; 5187 if ((status = ldc_init(ldc_id, &ldc_attr, &vd->ldc_handle)) != 0) { 5188 PRN("Could not initialize LDC channel %lx, " 5189 "init failed with error %d", ldc_id, status); 5190 return (status); 5191 } 5192 vd->initialized |= VD_LDC; 5193 5194 if ((status = ldc_reg_callback(vd->ldc_handle, vd_handle_ldc_events, 5195 (caddr_t)vd)) != 0) { 5196 PRN("Could not initialize LDC channel %lu," 5197 "reg_callback failed with error %d", ldc_id, status); 5198 return (status); 5199 } 5200 5201 if ((status = ldc_open(vd->ldc_handle)) != 0) { 5202 PRN("Could not initialize LDC channel %lu," 5203 "open failed with error %d", ldc_id, status); 5204 return (status); 5205 } 5206 5207 if ((status = ldc_up(vd->ldc_handle)) != 0) { 5208 PR0("ldc_up() returned errno %d", status); 5209 } 5210 5211 /* Allocate the inband task memory handle */ 5212 status = ldc_mem_alloc_handle(vd->ldc_handle, &(vd->inband_task.mhdl)); 5213 if (status) { 5214 PRN("Could not initialize LDC channel %lu," 5215 "alloc_handle failed with error %d", ldc_id, status); 5216 return (ENXIO); 5217 } 5218 5219 /* Add the successfully-initialized vdisk to the server's table */ 5220 if (mod_hash_insert(vds->vd_table, (mod_hash_key_t)id, vd) != 0) { 5221 PRN("Error adding vdisk ID %lu to table", id); 5222 return (EIO); 5223 } 5224 5225 /* Allocate the staging buffer */ 5226 vd->max_msglen = sizeof (vio_msg_t); /* baseline vio message size */ 5227 vd->vio_msgp = kmem_alloc(vd->max_msglen, KM_SLEEP); 5228 5229 /* store initial state */ 5230 vd->state = VD_STATE_INIT; 5231 5232 return (0); 5233 } 5234 5235 static void 5236 vd_free_dring_task(vd_t *vdp) 5237 { 5238 if (vdp->dring_task != NULL) { 5239 ASSERT(vdp->dring_len != 0); 5240 /* Free all dring_task memory handles */ 5241 for (int i = 0; i < vdp->dring_len; i++) { 5242 (void) ldc_mem_free_handle(vdp->dring_task[i].mhdl); 5243 kmem_free(vdp->dring_task[i].msg, vdp->max_msglen); 5244 vdp->dring_task[i].msg = NULL; 5245 } 5246 kmem_free(vdp->dring_task, 5247 (sizeof (*vdp->dring_task)) * vdp->dring_len); 5248 vdp->dring_task = NULL; 5249 } 5250 } 5251 5252 /* 5253 * Destroy the state associated with a virtual disk 5254 */ 5255 static void 5256 vds_destroy_vd(void *arg) 5257 { 5258 vd_t *vd = (vd_t *)arg; 5259 int retry = 0, rv; 5260 5261 if (vd == NULL) 5262 return; 5263 5264 PR0("Destroying vdisk state"); 5265 5266 if (vd->dk_efi.dki_data != NULL) 5267 kmem_free(vd->dk_efi.dki_data, vd->dk_efi.dki_length); 5268 5269 /* Disable queuing requests for the vdisk */ 5270 if (vd->initialized & VD_LOCKING) { 5271 mutex_enter(&vd->lock); 5272 vd->enabled = 0; 5273 mutex_exit(&vd->lock); 5274 } 5275 5276 /* Drain and destroy start queue (*before* destroying completionq) */ 5277 if (vd->startq != NULL) 5278 ddi_taskq_destroy(vd->startq); /* waits for queued tasks */ 5279 5280 /* Drain and destroy completion queue (*before* shutting down LDC) */ 5281 if (vd->completionq != NULL) 5282 ddi_taskq_destroy(vd->completionq); /* waits for tasks */ 5283 5284 vd_free_dring_task(vd); 5285 5286 /* Free the inband task memory handle */ 5287 (void) ldc_mem_free_handle(vd->inband_task.mhdl); 5288 5289 /* Shut down LDC */ 5290 if (vd->initialized & VD_LDC) { 5291 /* unmap the dring */ 5292 if (vd->initialized & VD_DRING) 5293 (void) ldc_mem_dring_unmap(vd->dring_handle); 5294 5295 /* close LDC channel - retry on EAGAIN */ 5296 while ((rv = ldc_close(vd->ldc_handle)) == EAGAIN) { 5297 if (++retry > vds_ldc_retries) { 5298 PR0("Timed out closing channel"); 5299 break; 5300 } 5301 drv_usecwait(vds_ldc_delay); 5302 } 5303 if (rv == 0) { 5304 (void) ldc_unreg_callback(vd->ldc_handle); 5305 (void) ldc_fini(vd->ldc_handle); 5306 } else { 5307 /* 5308 * Closing the LDC channel has failed. Ideally we should 5309 * fail here but there is no Zeus level infrastructure 5310 * to handle this. The MD has already been changed and 5311 * we have to do the close. So we try to do as much 5312 * clean up as we can. 5313 */ 5314 (void) ldc_set_cb_mode(vd->ldc_handle, LDC_CB_DISABLE); 5315 while (ldc_unreg_callback(vd->ldc_handle) == EAGAIN) 5316 drv_usecwait(vds_ldc_delay); 5317 } 5318 } 5319 5320 /* Free the staging buffer for msgs */ 5321 if (vd->vio_msgp != NULL) { 5322 kmem_free(vd->vio_msgp, vd->max_msglen); 5323 vd->vio_msgp = NULL; 5324 } 5325 5326 /* Free the inband message buffer */ 5327 if (vd->inband_task.msg != NULL) { 5328 kmem_free(vd->inband_task.msg, vd->max_msglen); 5329 vd->inband_task.msg = NULL; 5330 } 5331 5332 if (vd->file) { 5333 /* Close file */ 5334 (void) VOP_CLOSE(vd->file_vnode, vd->open_flags, 1, 5335 0, kcred, NULL); 5336 VN_RELE(vd->file_vnode); 5337 if (vd->file_devid != NULL) 5338 ddi_devid_free(vd->file_devid); 5339 } else { 5340 /* Close any open backing-device slices */ 5341 for (uint_t slice = 0; slice < vd->nslices; slice++) { 5342 if (vd->ldi_handle[slice] != NULL) { 5343 PR0("Closing slice %u", slice); 5344 (void) ldi_close(vd->ldi_handle[slice], 5345 vd->open_flags, kcred); 5346 } 5347 } 5348 } 5349 5350 /* Free lock */ 5351 if (vd->initialized & VD_LOCKING) 5352 mutex_destroy(&vd->lock); 5353 5354 /* Finally, free the vdisk structure itself */ 5355 kmem_free(vd, sizeof (*vd)); 5356 } 5357 5358 static int 5359 vds_init_vd(vds_t *vds, uint64_t id, char *device_path, uint64_t options, 5360 uint64_t ldc_id) 5361 { 5362 int status; 5363 vd_t *vd = NULL; 5364 5365 5366 if ((status = vds_do_init_vd(vds, id, device_path, options, 5367 ldc_id, &vd)) != 0) 5368 vds_destroy_vd(vd); 5369 5370 return (status); 5371 } 5372 5373 static int 5374 vds_do_get_ldc_id(md_t *md, mde_cookie_t vd_node, mde_cookie_t *channel, 5375 uint64_t *ldc_id) 5376 { 5377 int num_channels; 5378 5379 5380 /* Look for channel endpoint child(ren) of the vdisk MD node */ 5381 if ((num_channels = md_scan_dag(md, vd_node, 5382 md_find_name(md, VD_CHANNEL_ENDPOINT), 5383 md_find_name(md, "fwd"), channel)) <= 0) { 5384 PRN("No \"%s\" found for virtual disk", VD_CHANNEL_ENDPOINT); 5385 return (-1); 5386 } 5387 5388 /* Get the "id" value for the first channel endpoint node */ 5389 if (md_get_prop_val(md, channel[0], VD_ID_PROP, ldc_id) != 0) { 5390 PRN("No \"%s\" property found for \"%s\" of vdisk", 5391 VD_ID_PROP, VD_CHANNEL_ENDPOINT); 5392 return (-1); 5393 } 5394 5395 if (num_channels > 1) { 5396 PRN("Using ID of first of multiple channels for this vdisk"); 5397 } 5398 5399 return (0); 5400 } 5401 5402 static int 5403 vds_get_ldc_id(md_t *md, mde_cookie_t vd_node, uint64_t *ldc_id) 5404 { 5405 int num_nodes, status; 5406 size_t size; 5407 mde_cookie_t *channel; 5408 5409 5410 if ((num_nodes = md_node_count(md)) <= 0) { 5411 PRN("Invalid node count in Machine Description subtree"); 5412 return (-1); 5413 } 5414 size = num_nodes*(sizeof (*channel)); 5415 channel = kmem_zalloc(size, KM_SLEEP); 5416 status = vds_do_get_ldc_id(md, vd_node, channel, ldc_id); 5417 kmem_free(channel, size); 5418 5419 return (status); 5420 } 5421 5422 /* 5423 * Function: 5424 * vds_get_options 5425 * 5426 * Description: 5427 * Parse the options of a vds node. Options are defined as an array 5428 * of strings in the vds-block-device-opts property of the vds node 5429 * in the machine description. Options are returned as a bitmask. The 5430 * mapping between the bitmask options and the options strings from the 5431 * machine description is defined in the vd_bdev_options[] array. 5432 * 5433 * The vds-block-device-opts property is optional. If a vds has no such 5434 * property then no option is defined. 5435 * 5436 * Parameters: 5437 * md - machine description. 5438 * vd_node - vds node in the machine description for which 5439 * options have to be parsed. 5440 * options - the returned options. 5441 * 5442 * Return Code: 5443 * none. 5444 */ 5445 static void 5446 vds_get_options(md_t *md, mde_cookie_t vd_node, uint64_t *options) 5447 { 5448 char *optstr, *opt; 5449 int len, n, i; 5450 5451 *options = 0; 5452 5453 if (md_get_prop_data(md, vd_node, VD_BLOCK_DEVICE_OPTS, 5454 (uint8_t **)&optstr, &len) != 0) { 5455 PR0("No options found"); 5456 return; 5457 } 5458 5459 /* parse options */ 5460 opt = optstr; 5461 n = sizeof (vd_bdev_options) / sizeof (vd_option_t); 5462 5463 while (opt < optstr + len) { 5464 for (i = 0; i < n; i++) { 5465 if (strncmp(vd_bdev_options[i].vdo_name, 5466 opt, VD_OPTION_NLEN) == 0) { 5467 *options |= vd_bdev_options[i].vdo_value; 5468 break; 5469 } 5470 } 5471 5472 if (i < n) { 5473 PR0("option: %s", opt); 5474 } else { 5475 PRN("option %s is unknown or unsupported", opt); 5476 } 5477 5478 opt += strlen(opt) + 1; 5479 } 5480 } 5481 5482 static void 5483 vds_add_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node) 5484 { 5485 char *device_path = NULL; 5486 uint64_t id = 0, ldc_id = 0, options = 0; 5487 5488 if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) { 5489 PRN("Error getting vdisk \"%s\"", VD_ID_PROP); 5490 return; 5491 } 5492 PR0("Adding vdisk ID %lu", id); 5493 if (md_get_prop_str(md, vd_node, VD_BLOCK_DEVICE_PROP, 5494 &device_path) != 0) { 5495 PRN("Error getting vdisk \"%s\"", VD_BLOCK_DEVICE_PROP); 5496 return; 5497 } 5498 5499 vds_get_options(md, vd_node, &options); 5500 5501 if (vds_get_ldc_id(md, vd_node, &ldc_id) != 0) { 5502 PRN("Error getting LDC ID for vdisk %lu", id); 5503 return; 5504 } 5505 5506 if (vds_init_vd(vds, id, device_path, options, ldc_id) != 0) { 5507 PRN("Failed to add vdisk ID %lu", id); 5508 if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)id) != 0) 5509 PRN("No vDisk entry found for vdisk ID %lu", id); 5510 return; 5511 } 5512 } 5513 5514 static void 5515 vds_remove_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node) 5516 { 5517 uint64_t id = 0; 5518 5519 5520 if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) { 5521 PRN("Unable to get \"%s\" property from vdisk's MD node", 5522 VD_ID_PROP); 5523 return; 5524 } 5525 PR0("Removing vdisk ID %lu", id); 5526 if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)id) != 0) 5527 PRN("No vdisk entry found for vdisk ID %lu", id); 5528 } 5529 5530 static void 5531 vds_change_vd(vds_t *vds, md_t *prev_md, mde_cookie_t prev_vd_node, 5532 md_t *curr_md, mde_cookie_t curr_vd_node) 5533 { 5534 char *curr_dev, *prev_dev; 5535 uint64_t curr_id = 0, curr_ldc_id = 0, curr_options = 0; 5536 uint64_t prev_id = 0, prev_ldc_id = 0, prev_options = 0; 5537 size_t len; 5538 5539 5540 /* Validate that vdisk ID has not changed */ 5541 if (md_get_prop_val(prev_md, prev_vd_node, VD_ID_PROP, &prev_id) != 0) { 5542 PRN("Error getting previous vdisk \"%s\" property", 5543 VD_ID_PROP); 5544 return; 5545 } 5546 if (md_get_prop_val(curr_md, curr_vd_node, VD_ID_PROP, &curr_id) != 0) { 5547 PRN("Error getting current vdisk \"%s\" property", VD_ID_PROP); 5548 return; 5549 } 5550 if (curr_id != prev_id) { 5551 PRN("Not changing vdisk: ID changed from %lu to %lu", 5552 prev_id, curr_id); 5553 return; 5554 } 5555 5556 /* Validate that LDC ID has not changed */ 5557 if (vds_get_ldc_id(prev_md, prev_vd_node, &prev_ldc_id) != 0) { 5558 PRN("Error getting LDC ID for vdisk %lu", prev_id); 5559 return; 5560 } 5561 5562 if (vds_get_ldc_id(curr_md, curr_vd_node, &curr_ldc_id) != 0) { 5563 PRN("Error getting LDC ID for vdisk %lu", curr_id); 5564 return; 5565 } 5566 if (curr_ldc_id != prev_ldc_id) { 5567 _NOTE(NOTREACHED); /* lint is confused */ 5568 PRN("Not changing vdisk: " 5569 "LDC ID changed from %lu to %lu", prev_ldc_id, curr_ldc_id); 5570 return; 5571 } 5572 5573 /* Determine whether device path has changed */ 5574 if (md_get_prop_str(prev_md, prev_vd_node, VD_BLOCK_DEVICE_PROP, 5575 &prev_dev) != 0) { 5576 PRN("Error getting previous vdisk \"%s\"", 5577 VD_BLOCK_DEVICE_PROP); 5578 return; 5579 } 5580 if (md_get_prop_str(curr_md, curr_vd_node, VD_BLOCK_DEVICE_PROP, 5581 &curr_dev) != 0) { 5582 PRN("Error getting current vdisk \"%s\"", VD_BLOCK_DEVICE_PROP); 5583 return; 5584 } 5585 if (((len = strlen(curr_dev)) == strlen(prev_dev)) && 5586 (strncmp(curr_dev, prev_dev, len) == 0)) 5587 return; /* no relevant (supported) change */ 5588 5589 /* Validate that options have not changed */ 5590 vds_get_options(prev_md, prev_vd_node, &prev_options); 5591 vds_get_options(curr_md, curr_vd_node, &curr_options); 5592 if (prev_options != curr_options) { 5593 PRN("Not changing vdisk: options changed from %lx to %lx", 5594 prev_options, curr_options); 5595 return; 5596 } 5597 5598 PR0("Changing vdisk ID %lu", prev_id); 5599 5600 /* Remove old state, which will close vdisk and reset */ 5601 if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)prev_id) != 0) 5602 PRN("No entry found for vdisk ID %lu", prev_id); 5603 5604 /* Re-initialize vdisk with new state */ 5605 if (vds_init_vd(vds, curr_id, curr_dev, curr_options, 5606 curr_ldc_id) != 0) { 5607 PRN("Failed to change vdisk ID %lu", curr_id); 5608 return; 5609 } 5610 } 5611 5612 static int 5613 vds_process_md(void *arg, mdeg_result_t *md) 5614 { 5615 int i; 5616 vds_t *vds = arg; 5617 5618 5619 if (md == NULL) 5620 return (MDEG_FAILURE); 5621 ASSERT(vds != NULL); 5622 5623 for (i = 0; i < md->removed.nelem; i++) 5624 vds_remove_vd(vds, md->removed.mdp, md->removed.mdep[i]); 5625 for (i = 0; i < md->match_curr.nelem; i++) 5626 vds_change_vd(vds, md->match_prev.mdp, md->match_prev.mdep[i], 5627 md->match_curr.mdp, md->match_curr.mdep[i]); 5628 for (i = 0; i < md->added.nelem; i++) 5629 vds_add_vd(vds, md->added.mdp, md->added.mdep[i]); 5630 5631 return (MDEG_SUCCESS); 5632 } 5633 5634 5635 static int 5636 vds_do_attach(dev_info_t *dip) 5637 { 5638 int status, sz; 5639 int cfg_handle; 5640 minor_t instance = ddi_get_instance(dip); 5641 vds_t *vds; 5642 mdeg_prop_spec_t *pspecp; 5643 mdeg_node_spec_t *ispecp; 5644 5645 /* 5646 * The "cfg-handle" property of a vds node in an MD contains the MD's 5647 * notion of "instance", or unique identifier, for that node; OBP 5648 * stores the value of the "cfg-handle" MD property as the value of 5649 * the "reg" property on the node in the device tree it builds from 5650 * the MD and passes to Solaris. Thus, we look up the devinfo node's 5651 * "reg" property value to uniquely identify this device instance when 5652 * registering with the MD event-generation framework. If the "reg" 5653 * property cannot be found, the device tree state is presumably so 5654 * broken that there is no point in continuing. 5655 */ 5656 if (!ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 5657 VD_REG_PROP)) { 5658 PRN("vds \"%s\" property does not exist", VD_REG_PROP); 5659 return (DDI_FAILURE); 5660 } 5661 5662 /* Get the MD instance for later MDEG registration */ 5663 cfg_handle = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 5664 VD_REG_PROP, -1); 5665 5666 if (ddi_soft_state_zalloc(vds_state, instance) != DDI_SUCCESS) { 5667 PRN("Could not allocate state for instance %u", instance); 5668 return (DDI_FAILURE); 5669 } 5670 5671 if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) { 5672 PRN("Could not get state for instance %u", instance); 5673 ddi_soft_state_free(vds_state, instance); 5674 return (DDI_FAILURE); 5675 } 5676 5677 vds->dip = dip; 5678 vds->vd_table = mod_hash_create_ptrhash("vds_vd_table", VDS_NCHAINS, 5679 vds_destroy_vd, sizeof (void *)); 5680 5681 ASSERT(vds->vd_table != NULL); 5682 5683 if ((status = ldi_ident_from_dip(dip, &vds->ldi_ident)) != 0) { 5684 PRN("ldi_ident_from_dip() returned errno %d", status); 5685 return (DDI_FAILURE); 5686 } 5687 vds->initialized |= VDS_LDI; 5688 5689 /* Register for MD updates */ 5690 sz = sizeof (vds_prop_template); 5691 pspecp = kmem_alloc(sz, KM_SLEEP); 5692 bcopy(vds_prop_template, pspecp, sz); 5693 5694 VDS_SET_MDEG_PROP_INST(pspecp, cfg_handle); 5695 5696 /* initialize the complete prop spec structure */ 5697 ispecp = kmem_zalloc(sizeof (mdeg_node_spec_t), KM_SLEEP); 5698 ispecp->namep = "virtual-device"; 5699 ispecp->specp = pspecp; 5700 5701 if (mdeg_register(ispecp, &vd_match, vds_process_md, vds, 5702 &vds->mdeg) != MDEG_SUCCESS) { 5703 PRN("Unable to register for MD updates"); 5704 kmem_free(ispecp, sizeof (mdeg_node_spec_t)); 5705 kmem_free(pspecp, sz); 5706 return (DDI_FAILURE); 5707 } 5708 5709 vds->ispecp = ispecp; 5710 vds->initialized |= VDS_MDEG; 5711 5712 /* Prevent auto-detaching so driver is available whenever MD changes */ 5713 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip, DDI_NO_AUTODETACH, 1) != 5714 DDI_PROP_SUCCESS) { 5715 PRN("failed to set \"%s\" property for instance %u", 5716 DDI_NO_AUTODETACH, instance); 5717 } 5718 5719 ddi_report_dev(dip); 5720 return (DDI_SUCCESS); 5721 } 5722 5723 static int 5724 vds_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 5725 { 5726 int status; 5727 5728 switch (cmd) { 5729 case DDI_ATTACH: 5730 PR0("Attaching"); 5731 if ((status = vds_do_attach(dip)) != DDI_SUCCESS) 5732 (void) vds_detach(dip, DDI_DETACH); 5733 return (status); 5734 case DDI_RESUME: 5735 PR0("No action required for DDI_RESUME"); 5736 return (DDI_SUCCESS); 5737 default: 5738 return (DDI_FAILURE); 5739 } 5740 } 5741 5742 static struct dev_ops vds_ops = { 5743 DEVO_REV, /* devo_rev */ 5744 0, /* devo_refcnt */ 5745 ddi_no_info, /* devo_getinfo */ 5746 nulldev, /* devo_identify */ 5747 nulldev, /* devo_probe */ 5748 vds_attach, /* devo_attach */ 5749 vds_detach, /* devo_detach */ 5750 nodev, /* devo_reset */ 5751 NULL, /* devo_cb_ops */ 5752 NULL, /* devo_bus_ops */ 5753 nulldev /* devo_power */ 5754 }; 5755 5756 static struct modldrv modldrv = { 5757 &mod_driverops, 5758 "virtual disk server", 5759 &vds_ops, 5760 }; 5761 5762 static struct modlinkage modlinkage = { 5763 MODREV_1, 5764 &modldrv, 5765 NULL 5766 }; 5767 5768 5769 int 5770 _init(void) 5771 { 5772 int status; 5773 5774 if ((status = ddi_soft_state_init(&vds_state, sizeof (vds_t), 1)) != 0) 5775 return (status); 5776 5777 if ((status = mod_install(&modlinkage)) != 0) { 5778 ddi_soft_state_fini(&vds_state); 5779 return (status); 5780 } 5781 5782 return (0); 5783 } 5784 5785 int 5786 _info(struct modinfo *modinfop) 5787 { 5788 return (mod_info(&modlinkage, modinfop)); 5789 } 5790 5791 int 5792 _fini(void) 5793 { 5794 int status; 5795 5796 if ((status = mod_remove(&modlinkage)) != 0) 5797 return (status); 5798 ddi_soft_state_fini(&vds_state); 5799 return (0); 5800 } 5801