1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Virtual disk server 29 */ 30 31 32 #include <sys/types.h> 33 #include <sys/conf.h> 34 #include <sys/crc32.h> 35 #include <sys/ddi.h> 36 #include <sys/dkio.h> 37 #include <sys/file.h> 38 #include <sys/fs/hsfs_isospec.h> 39 #include <sys/mdeg.h> 40 #include <sys/mhd.h> 41 #include <sys/modhash.h> 42 #include <sys/note.h> 43 #include <sys/pathname.h> 44 #include <sys/sdt.h> 45 #include <sys/sunddi.h> 46 #include <sys/sunldi.h> 47 #include <sys/sysmacros.h> 48 #include <sys/vio_common.h> 49 #include <sys/vio_util.h> 50 #include <sys/vdsk_mailbox.h> 51 #include <sys/vdsk_common.h> 52 #include <sys/vtoc.h> 53 #include <sys/vfs.h> 54 #include <sys/stat.h> 55 #include <sys/scsi/impl/uscsi.h> 56 #include <sys/ontrap.h> 57 #include <vm/seg_map.h> 58 59 #define ONE_MEGABYTE (1ULL << 20) 60 #define ONE_GIGABYTE (1ULL << 30) 61 #define ONE_TERABYTE (1ULL << 40) 62 63 /* Virtual disk server initialization flags */ 64 #define VDS_LDI 0x01 65 #define VDS_MDEG 0x02 66 67 /* Virtual disk server tunable parameters */ 68 #define VDS_RETRIES 5 69 #define VDS_LDC_DELAY 1000 /* 1 msecs */ 70 #define VDS_DEV_DELAY 10000000 /* 10 secs */ 71 #define VDS_NCHAINS 32 72 73 /* Identification parameters for MD, synthetic dkio(7i) structures, etc. */ 74 #define VDS_NAME "virtual-disk-server" 75 76 #define VD_NAME "vd" 77 #define VD_VOLUME_NAME "vdisk" 78 #define VD_ASCIILABEL "Virtual Disk" 79 80 #define VD_CHANNEL_ENDPOINT "channel-endpoint" 81 #define VD_ID_PROP "id" 82 #define VD_BLOCK_DEVICE_PROP "vds-block-device" 83 #define VD_BLOCK_DEVICE_OPTS "vds-block-device-opts" 84 #define VD_REG_PROP "reg" 85 86 /* Virtual disk initialization flags */ 87 #define VD_DISK_READY 0x01 88 #define VD_LOCKING 0x02 89 #define VD_LDC 0x04 90 #define VD_DRING 0x08 91 #define VD_SID 0x10 92 #define VD_SEQ_NUM 0x20 93 #define VD_SETUP_ERROR 0x40 94 95 /* Flags for writing to a vdisk which is a file */ 96 #define VD_FILE_WRITE_FLAGS SM_ASYNC 97 98 /* Number of backup labels */ 99 #define VD_DSKIMG_NUM_BACKUP 5 100 101 /* Timeout for SCSI I/O */ 102 #define VD_SCSI_RDWR_TIMEOUT 30 /* 30 secs */ 103 104 /* Maximum number of logical partitions */ 105 #define VD_MAXPART (NDKMAP + 1) 106 107 /* 108 * By Solaris convention, slice/partition 2 represents the entire disk; 109 * unfortunately, this convention does not appear to be codified. 110 */ 111 #define VD_ENTIRE_DISK_SLICE 2 112 113 /* Logical block address for EFI */ 114 #define VD_EFI_LBA_GPT 1 /* LBA of the GPT */ 115 #define VD_EFI_LBA_GPE 2 /* LBA of the GPE */ 116 117 /* Driver types */ 118 typedef enum vd_driver { 119 VD_DRIVER_UNKNOWN = 0, /* driver type unknown */ 120 VD_DRIVER_DISK, /* disk driver */ 121 VD_DRIVER_VOLUME /* volume driver */ 122 } vd_driver_t; 123 124 #define VD_DRIVER_NAME_LEN 64 125 126 #define VDS_NUM_DRIVERS (sizeof (vds_driver_types) / sizeof (vd_driver_type_t)) 127 128 typedef struct vd_driver_type { 129 char name[VD_DRIVER_NAME_LEN]; /* driver name */ 130 vd_driver_t type; /* driver type (disk or volume) */ 131 } vd_driver_type_t; 132 133 /* 134 * There is no reliable way to determine if a device is representing a disk 135 * or a volume, especially with pseudo devices. So we maintain a list of well 136 * known drivers and the type of device they represent (either a disk or a 137 * volume). 138 * 139 * The list can be extended by adding a "driver-type-list" entry in vds.conf 140 * with the following syntax: 141 * 142 * driver-type-list="<driver>:<type>", ... ,"<driver>:<type>"; 143 * 144 * Where: 145 * <driver> is the name of a driver (limited to 64 characters) 146 * <type> is either the string "disk" or "volume" 147 * 148 * Invalid entries in "driver-type-list" will be ignored. 149 * 150 * For example, the following line in vds.conf: 151 * 152 * driver-type-list="foo:disk","bar:volume"; 153 * 154 * defines that "foo" is a disk driver, and driver "bar" is a volume driver. 155 * 156 * When a list is defined in vds.conf, it is checked before the built-in list 157 * (vds_driver_types[]) so that any definition from this list can be overriden 158 * using vds.conf. 159 */ 160 vd_driver_type_t vds_driver_types[] = { 161 { "dad", VD_DRIVER_DISK }, /* Solaris */ 162 { "did", VD_DRIVER_DISK }, /* Sun Cluster */ 163 { "emcp", VD_DRIVER_DISK }, /* EMC Powerpath */ 164 { "lofi", VD_DRIVER_VOLUME }, /* Solaris */ 165 { "md", VD_DRIVER_VOLUME }, /* Solaris - SVM */ 166 { "sd", VD_DRIVER_DISK }, /* Solaris */ 167 { "ssd", VD_DRIVER_DISK }, /* Solaris */ 168 { "vdc", VD_DRIVER_DISK }, /* Solaris */ 169 { "vxdmp", VD_DRIVER_DISK }, /* Veritas */ 170 { "vxio", VD_DRIVER_VOLUME }, /* Veritas - VxVM */ 171 { "zfs", VD_DRIVER_VOLUME } /* Solaris */ 172 }; 173 174 /* Return a cpp token as a string */ 175 #define STRINGIZE(token) #token 176 177 /* 178 * Print a message prefixed with the current function name to the message log 179 * (and optionally to the console for verbose boots); these macros use cpp's 180 * concatenation of string literals and C99 variable-length-argument-list 181 * macros 182 */ 183 #define PRN(...) _PRN("?%s(): "__VA_ARGS__, "") 184 #define _PRN(format, ...) \ 185 cmn_err(CE_CONT, format"%s", __func__, __VA_ARGS__) 186 187 /* Return a pointer to the "i"th vdisk dring element */ 188 #define VD_DRING_ELEM(i) ((vd_dring_entry_t *)(void *) \ 189 (vd->dring + (i)*vd->descriptor_size)) 190 191 /* Return the virtual disk client's type as a string (for use in messages) */ 192 #define VD_CLIENT(vd) \ 193 (((vd)->xfer_mode == VIO_DESC_MODE) ? "in-band client" : \ 194 (((vd)->xfer_mode == VIO_DRING_MODE_V1_0) ? "dring client" : \ 195 (((vd)->xfer_mode == 0) ? "null client" : \ 196 "unsupported client"))) 197 198 /* Read disk label from a disk image */ 199 #define VD_DSKIMG_LABEL_READ(vd, labelp) \ 200 vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)labelp, \ 201 0, sizeof (struct dk_label)) 202 203 /* Write disk label to a disk image */ 204 #define VD_DSKIMG_LABEL_WRITE(vd, labelp) \ 205 vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, (caddr_t)labelp, \ 206 0, sizeof (struct dk_label)) 207 208 /* Identify if a backend is a disk image */ 209 #define VD_DSKIMG(vd) ((vd)->vdisk_type == VD_DISK_TYPE_DISK && \ 210 ((vd)->file || (vd)->volume)) 211 212 /* Message for disk access rights reset failure */ 213 #define VD_RESET_ACCESS_FAILURE_MSG \ 214 "Fail to reset disk access rights for disk %s" 215 216 /* 217 * Specification of an MD node passed to the MDEG to filter any 218 * 'vport' nodes that do not belong to the specified node. This 219 * template is copied for each vds instance and filled in with 220 * the appropriate 'cfg-handle' value before being passed to the MDEG. 221 */ 222 static mdeg_prop_spec_t vds_prop_template[] = { 223 { MDET_PROP_STR, "name", VDS_NAME }, 224 { MDET_PROP_VAL, "cfg-handle", NULL }, 225 { MDET_LIST_END, NULL, NULL } 226 }; 227 228 #define VDS_SET_MDEG_PROP_INST(specp, val) (specp)[1].ps_val = (val); 229 230 /* 231 * Matching criteria passed to the MDEG to register interest 232 * in changes to 'virtual-device-port' nodes identified by their 233 * 'id' property. 234 */ 235 static md_prop_match_t vd_prop_match[] = { 236 { MDET_PROP_VAL, VD_ID_PROP }, 237 { MDET_LIST_END, NULL } 238 }; 239 240 static mdeg_node_match_t vd_match = {"virtual-device-port", 241 vd_prop_match}; 242 243 /* 244 * Options for the VD_BLOCK_DEVICE_OPTS property. 245 */ 246 #define VD_OPT_RDONLY 0x1 /* read-only */ 247 #define VD_OPT_SLICE 0x2 /* single slice */ 248 #define VD_OPT_EXCLUSIVE 0x4 /* exclusive access */ 249 250 #define VD_OPTION_NLEN 128 251 252 typedef struct vd_option { 253 char vdo_name[VD_OPTION_NLEN]; 254 uint64_t vdo_value; 255 } vd_option_t; 256 257 vd_option_t vd_bdev_options[] = { 258 { "ro", VD_OPT_RDONLY }, 259 { "slice", VD_OPT_SLICE }, 260 { "excl", VD_OPT_EXCLUSIVE } 261 }; 262 263 /* Debugging macros */ 264 #ifdef DEBUG 265 266 static int vd_msglevel = 0; 267 268 #define PR0 if (vd_msglevel > 0) PRN 269 #define PR1 if (vd_msglevel > 1) PRN 270 #define PR2 if (vd_msglevel > 2) PRN 271 272 #define VD_DUMP_DRING_ELEM(elem) \ 273 PR0("dst:%x op:%x st:%u nb:%lx addr:%lx ncook:%u\n", \ 274 elem->hdr.dstate, \ 275 elem->payload.operation, \ 276 elem->payload.status, \ 277 elem->payload.nbytes, \ 278 elem->payload.addr, \ 279 elem->payload.ncookies); 280 281 char * 282 vd_decode_state(int state) 283 { 284 char *str; 285 286 #define CASE_STATE(_s) case _s: str = #_s; break; 287 288 switch (state) { 289 CASE_STATE(VD_STATE_INIT) 290 CASE_STATE(VD_STATE_VER) 291 CASE_STATE(VD_STATE_ATTR) 292 CASE_STATE(VD_STATE_DRING) 293 CASE_STATE(VD_STATE_RDX) 294 CASE_STATE(VD_STATE_DATA) 295 default: str = "unknown"; break; 296 } 297 298 #undef CASE_STATE 299 300 return (str); 301 } 302 303 void 304 vd_decode_tag(vio_msg_t *msg) 305 { 306 char *tstr, *sstr, *estr; 307 308 #define CASE_TYPE(_s) case _s: tstr = #_s; break; 309 310 switch (msg->tag.vio_msgtype) { 311 CASE_TYPE(VIO_TYPE_CTRL) 312 CASE_TYPE(VIO_TYPE_DATA) 313 CASE_TYPE(VIO_TYPE_ERR) 314 default: tstr = "unknown"; break; 315 } 316 317 #undef CASE_TYPE 318 319 #define CASE_SUBTYPE(_s) case _s: sstr = #_s; break; 320 321 switch (msg->tag.vio_subtype) { 322 CASE_SUBTYPE(VIO_SUBTYPE_INFO) 323 CASE_SUBTYPE(VIO_SUBTYPE_ACK) 324 CASE_SUBTYPE(VIO_SUBTYPE_NACK) 325 default: sstr = "unknown"; break; 326 } 327 328 #undef CASE_SUBTYPE 329 330 #define CASE_ENV(_s) case _s: estr = #_s; break; 331 332 switch (msg->tag.vio_subtype_env) { 333 CASE_ENV(VIO_VER_INFO) 334 CASE_ENV(VIO_ATTR_INFO) 335 CASE_ENV(VIO_DRING_REG) 336 CASE_ENV(VIO_DRING_UNREG) 337 CASE_ENV(VIO_RDX) 338 CASE_ENV(VIO_PKT_DATA) 339 CASE_ENV(VIO_DESC_DATA) 340 CASE_ENV(VIO_DRING_DATA) 341 default: estr = "unknown"; break; 342 } 343 344 #undef CASE_ENV 345 346 PR1("(%x/%x/%x) message : (%s/%s/%s)", 347 msg->tag.vio_msgtype, msg->tag.vio_subtype, 348 msg->tag.vio_subtype_env, tstr, sstr, estr); 349 } 350 351 #else /* !DEBUG */ 352 353 #define PR0(...) 354 #define PR1(...) 355 #define PR2(...) 356 357 #define VD_DUMP_DRING_ELEM(elem) 358 359 #define vd_decode_state(_s) (NULL) 360 #define vd_decode_tag(_s) (NULL) 361 362 #endif /* DEBUG */ 363 364 365 /* 366 * Soft state structure for a vds instance 367 */ 368 typedef struct vds { 369 uint_t initialized; /* driver inst initialization flags */ 370 dev_info_t *dip; /* driver inst devinfo pointer */ 371 ldi_ident_t ldi_ident; /* driver's identifier for LDI */ 372 mod_hash_t *vd_table; /* table of virtual disks served */ 373 mdeg_node_spec_t *ispecp; /* mdeg node specification */ 374 mdeg_handle_t mdeg; /* handle for MDEG operations */ 375 vd_driver_type_t *driver_types; /* extra driver types (from vds.conf) */ 376 int num_drivers; /* num of extra driver types */ 377 } vds_t; 378 379 /* 380 * Types of descriptor-processing tasks 381 */ 382 typedef enum vd_task_type { 383 VD_NONFINAL_RANGE_TASK, /* task for intermediate descriptor in range */ 384 VD_FINAL_RANGE_TASK, /* task for last in a range of descriptors */ 385 } vd_task_type_t; 386 387 /* 388 * Structure describing the task for processing a descriptor 389 */ 390 typedef struct vd_task { 391 struct vd *vd; /* vd instance task is for */ 392 vd_task_type_t type; /* type of descriptor task */ 393 int index; /* dring elem index for task */ 394 vio_msg_t *msg; /* VIO message task is for */ 395 size_t msglen; /* length of message content */ 396 vd_dring_payload_t *request; /* request task will perform */ 397 struct buf buf; /* buf(9s) for I/O request */ 398 ldc_mem_handle_t mhdl; /* task memory handle */ 399 int status; /* status of processing task */ 400 int (*completef)(struct vd_task *task); /* completion func ptr */ 401 } vd_task_t; 402 403 /* 404 * Soft state structure for a virtual disk instance 405 */ 406 typedef struct vd { 407 uint_t initialized; /* vdisk initialization flags */ 408 uint64_t operations; /* bitmask of VD_OPs exported */ 409 vio_ver_t version; /* ver negotiated with client */ 410 vds_t *vds; /* server for this vdisk */ 411 ddi_taskq_t *startq; /* queue for I/O start tasks */ 412 ddi_taskq_t *completionq; /* queue for completion tasks */ 413 ldi_handle_t ldi_handle[V_NUMPAR]; /* LDI slice handles */ 414 char device_path[MAXPATHLEN + 1]; /* vdisk device */ 415 dev_t dev[V_NUMPAR]; /* dev numbers for slices */ 416 int open_flags; /* open flags */ 417 uint_t nslices; /* number of slices we export */ 418 size_t vdisk_size; /* number of blocks in vdisk */ 419 size_t vdisk_block_size; /* size of each vdisk block */ 420 vd_disk_type_t vdisk_type; /* slice or entire disk */ 421 vd_disk_label_t vdisk_label; /* EFI or VTOC label */ 422 vd_media_t vdisk_media; /* media type of backing dev. */ 423 boolean_t is_atapi_dev; /* Is this an IDE CD-ROM dev? */ 424 ushort_t max_xfer_sz; /* max xfer size in DEV_BSIZE */ 425 size_t block_size; /* blk size of actual device */ 426 boolean_t volume; /* is vDisk backed by volume */ 427 boolean_t zvol; /* is vDisk backed by a zvol */ 428 boolean_t file; /* is vDisk backed by a file? */ 429 boolean_t scsi; /* is vDisk backed by scsi? */ 430 vnode_t *file_vnode; /* file vnode */ 431 size_t dskimg_size; /* size of disk image */ 432 ddi_devid_t dskimg_devid; /* devid for disk image */ 433 int efi_reserved; /* EFI reserved slice */ 434 caddr_t flabel; /* fake label for slice type */ 435 uint_t flabel_size; /* fake label size */ 436 uint_t flabel_limit; /* limit of the fake label */ 437 struct dk_geom dk_geom; /* synthetic for slice type */ 438 struct extvtoc vtoc; /* synthetic for slice type */ 439 vd_slice_t slices[VD_MAXPART]; /* logical partitions */ 440 boolean_t ownership; /* disk ownership status */ 441 ldc_status_t ldc_state; /* LDC connection state */ 442 ldc_handle_t ldc_handle; /* handle for LDC comm */ 443 size_t max_msglen; /* largest LDC message len */ 444 vd_state_t state; /* client handshake state */ 445 uint8_t xfer_mode; /* transfer mode with client */ 446 uint32_t sid; /* client's session ID */ 447 uint64_t seq_num; /* message sequence number */ 448 uint64_t dring_ident; /* identifier of dring */ 449 ldc_dring_handle_t dring_handle; /* handle for dring ops */ 450 uint32_t descriptor_size; /* num bytes in desc */ 451 uint32_t dring_len; /* number of dring elements */ 452 uint8_t dring_mtype; /* dring mem map type */ 453 caddr_t dring; /* address of dring */ 454 caddr_t vio_msgp; /* vio msg staging buffer */ 455 vd_task_t inband_task; /* task for inband descriptor */ 456 vd_task_t *dring_task; /* tasks dring elements */ 457 458 kmutex_t lock; /* protects variables below */ 459 boolean_t enabled; /* is vdisk enabled? */ 460 boolean_t reset_state; /* reset connection state? */ 461 boolean_t reset_ldc; /* reset LDC channel? */ 462 } vd_t; 463 464 /* 465 * Macros to manipulate the fake label (flabel) for single slice disks. 466 * 467 * If we fake a VTOC label then the fake label consists of only one block 468 * containing the VTOC label (struct dk_label). 469 * 470 * If we fake an EFI label then the fake label consists of a blank block 471 * followed by a GPT (efi_gpt_t) and a GPE (efi_gpe_t). 472 * 473 */ 474 #define VD_LABEL_VTOC_SIZE \ 475 P2ROUNDUP(sizeof (struct dk_label), DEV_BSIZE) 476 477 #define VD_LABEL_EFI_SIZE \ 478 P2ROUNDUP(DEV_BSIZE + sizeof (efi_gpt_t) + \ 479 sizeof (efi_gpe_t) * VD_MAXPART, DEV_BSIZE) 480 481 #define VD_LABEL_VTOC(vd) \ 482 ((struct dk_label *)(void *)((vd)->flabel)) 483 484 #define VD_LABEL_EFI_GPT(vd) \ 485 ((efi_gpt_t *)(void *)((vd)->flabel + DEV_BSIZE)) 486 #define VD_LABEL_EFI_GPE(vd) \ 487 ((efi_gpe_t *)(void *)((vd)->flabel + DEV_BSIZE + \ 488 sizeof (efi_gpt_t))) 489 490 491 typedef struct vds_operation { 492 char *namep; 493 uint8_t operation; 494 int (*start)(vd_task_t *task); 495 int (*complete)(vd_task_t *task); 496 } vds_operation_t; 497 498 typedef struct vd_ioctl { 499 uint8_t operation; /* vdisk operation */ 500 const char *operation_name; /* vdisk operation name */ 501 size_t nbytes; /* size of operation buffer */ 502 int cmd; /* corresponding ioctl cmd */ 503 const char *cmd_name; /* ioctl cmd name */ 504 void *arg; /* ioctl cmd argument */ 505 /* convert input vd_buf to output ioctl_arg */ 506 int (*copyin)(void *vd_buf, size_t, void *ioctl_arg); 507 /* convert input ioctl_arg to output vd_buf */ 508 void (*copyout)(void *ioctl_arg, void *vd_buf); 509 /* write is true if the operation writes any data to the backend */ 510 boolean_t write; 511 } vd_ioctl_t; 512 513 /* Define trivial copyin/copyout conversion function flag */ 514 #define VD_IDENTITY_IN ((int (*)(void *, size_t, void *))-1) 515 #define VD_IDENTITY_OUT ((void (*)(void *, void *))-1) 516 517 518 static int vds_ldc_retries = VDS_RETRIES; 519 static int vds_ldc_delay = VDS_LDC_DELAY; 520 static int vds_dev_retries = VDS_RETRIES; 521 static int vds_dev_delay = VDS_DEV_DELAY; 522 static void *vds_state; 523 524 static uint_t vd_file_write_flags = VD_FILE_WRITE_FLAGS; 525 526 static short vd_scsi_rdwr_timeout = VD_SCSI_RDWR_TIMEOUT; 527 static int vd_scsi_debug = USCSI_SILENT; 528 529 /* 530 * Tunable to define the behavior of the service domain if the vdisk server 531 * fails to reset disk exclusive access when a LDC channel is reset. When a 532 * LDC channel is reset the vdisk server will try to reset disk exclusive 533 * access by releasing any SCSI-2 reservation or resetting the disk. If these 534 * actions fail then the default behavior (vd_reset_access_failure = 0) is to 535 * print a warning message. This default behavior can be changed by setting 536 * the vd_reset_access_failure variable to A_REBOOT (= 0x1) and that will 537 * cause the service domain to reboot, or A_DUMP (= 0x5) and that will cause 538 * the service domain to panic. In both cases, the reset of the service domain 539 * should trigger a reset SCSI buses and hopefully clear any SCSI-2 reservation. 540 */ 541 static int vd_reset_access_failure = 0; 542 543 /* 544 * Tunable for backward compatibility. When this variable is set to B_TRUE, 545 * all disk volumes (ZFS, SVM, VxvM volumes) will be exported as single 546 * slice disks whether or not they have the "slice" option set. This is 547 * to provide a simple backward compatibility mechanism when upgrading 548 * the vds driver and using a domain configuration created before the 549 * "slice" option was available. 550 */ 551 static boolean_t vd_volume_force_slice = B_FALSE; 552 553 /* 554 * The label of disk images created with some earlier versions of the virtual 555 * disk software is not entirely correct and have an incorrect v_sanity field 556 * (usually 0) instead of VTOC_SANE. This creates a compatibility problem with 557 * these images because we are now validating that the disk label (and the 558 * sanity) is correct when a disk image is opened. 559 * 560 * This tunable is set to false to not validate the sanity field and ensure 561 * compatibility. If the tunable is set to true, we will do a strict checking 562 * of the sanity but this can create compatibility problems with old disk 563 * images. 564 */ 565 static boolean_t vd_dskimg_validate_sanity = B_FALSE; 566 567 /* 568 * Enables the use of LDC_DIRECT_MAP when mapping in imported descriptor rings. 569 */ 570 static boolean_t vd_direct_mapped_drings = B_TRUE; 571 572 /* 573 * When a backend is exported as a single-slice disk then we entirely fake 574 * its disk label. So it can be exported either with a VTOC label or with 575 * an EFI label. If vd_slice_label is set to VD_DISK_LABEL_VTOC then all 576 * single-slice disks will be exported with a VTOC label; and if it is set 577 * to VD_DISK_LABEL_EFI then all single-slice disks will be exported with 578 * an EFI label. 579 * 580 * If vd_slice_label is set to VD_DISK_LABEL_UNK and the backend is a disk 581 * or volume device then it will be exported with the same type of label as 582 * defined on the device. Otherwise if the backend is a file then it will 583 * exported with the disk label type set in the vd_file_slice_label variable. 584 * 585 * Note that if the backend size is greater than 1TB then it will always be 586 * exported with an EFI label no matter what the setting is. 587 */ 588 static vd_disk_label_t vd_slice_label = VD_DISK_LABEL_UNK; 589 590 static vd_disk_label_t vd_file_slice_label = VD_DISK_LABEL_VTOC; 591 592 /* 593 * Tunable for backward compatibility. If this variable is set to B_TRUE then 594 * single-slice disks are exported as disks with only one slice instead of 595 * faking a complete disk partitioning. 596 */ 597 static boolean_t vd_slice_single_slice = B_FALSE; 598 599 /* 600 * Supported protocol version pairs, from highest (newest) to lowest (oldest) 601 * 602 * Each supported major version should appear only once, paired with (and only 603 * with) its highest supported minor version number (as the protocol requires 604 * supporting all lower minor version numbers as well) 605 */ 606 static const vio_ver_t vds_version[] = {{1, 1}}; 607 static const size_t vds_num_versions = 608 sizeof (vds_version)/sizeof (vds_version[0]); 609 610 static void vd_free_dring_task(vd_t *vdp); 611 static int vd_setup_vd(vd_t *vd); 612 static int vd_setup_single_slice_disk(vd_t *vd); 613 static int vd_setup_slice_image(vd_t *vd); 614 static int vd_setup_disk_image(vd_t *vd); 615 static int vd_backend_check_size(vd_t *vd); 616 static boolean_t vd_enabled(vd_t *vd); 617 static ushort_t vd_lbl2cksum(struct dk_label *label); 618 static int vd_dskimg_validate_geometry(vd_t *vd); 619 static boolean_t vd_dskimg_is_iso_image(vd_t *vd); 620 static void vd_set_exported_operations(vd_t *vd); 621 static void vd_reset_access(vd_t *vd); 622 static int vd_backend_ioctl(vd_t *vd, int cmd, caddr_t arg); 623 static int vds_efi_alloc_and_read(vd_t *, efi_gpt_t **, efi_gpe_t **); 624 static void vds_efi_free(vd_t *, efi_gpt_t *, efi_gpe_t *); 625 static void vds_driver_types_free(vds_t *vds); 626 static void vd_vtocgeom_to_label(struct extvtoc *vtoc, struct dk_geom *geom, 627 struct dk_label *label); 628 static void vd_label_to_vtocgeom(struct dk_label *label, struct extvtoc *vtoc, 629 struct dk_geom *geom); 630 static boolean_t vd_slice_geom_isvalid(vd_t *vd, struct dk_geom *geom); 631 static boolean_t vd_slice_vtoc_isvalid(vd_t *vd, struct extvtoc *vtoc); 632 633 extern int is_pseudo_device(dev_info_t *); 634 635 /* 636 * Function: 637 * vd_get_readable_size 638 * 639 * Description: 640 * Convert a given size in bytes to a human readable format in 641 * kilobytes, megabytes, gigabytes or terabytes. 642 * 643 * Parameters: 644 * full_size - the size to convert in bytes. 645 * size - the converted size. 646 * unit - the unit of the converted size: 'K' (kilobyte), 647 * 'M' (Megabyte), 'G' (Gigabyte), 'T' (Terabyte). 648 * 649 * Return Code: 650 * none 651 */ 652 static void 653 vd_get_readable_size(size_t full_size, size_t *size, char *unit) 654 { 655 if (full_size < (1ULL << 20)) { 656 *size = full_size >> 10; 657 *unit = 'K'; /* Kilobyte */ 658 } else if (full_size < (1ULL << 30)) { 659 *size = full_size >> 20; 660 *unit = 'M'; /* Megabyte */ 661 } else if (full_size < (1ULL << 40)) { 662 *size = full_size >> 30; 663 *unit = 'G'; /* Gigabyte */ 664 } else { 665 *size = full_size >> 40; 666 *unit = 'T'; /* Terabyte */ 667 } 668 } 669 670 /* 671 * Function: 672 * vd_dskimg_io_params 673 * 674 * Description: 675 * Convert virtual disk I/O parameters (slice, block, length) to 676 * (offset, length) relative to the disk image and according to 677 * the virtual disk partitioning. 678 * 679 * Parameters: 680 * vd - disk on which the operation is performed. 681 * slice - slice to which is the I/O parameters apply. 682 * VD_SLICE_NONE indicates that parameters are 683 * are relative to the entire virtual disk. 684 * blkp - pointer to the starting block relative to the 685 * slice; return the starting block relative to 686 * the disk image. 687 * lenp - pointer to the number of bytes requested; return 688 * the number of bytes that can effectively be used. 689 * 690 * Return Code: 691 * 0 - I/O parameters have been successfully converted; 692 * blkp and lenp point to the converted values. 693 * ENODATA - no data are available for the given I/O parameters; 694 * This occurs if the starting block is past the limit 695 * of the slice. 696 * EINVAL - I/O parameters are invalid. 697 */ 698 static int 699 vd_dskimg_io_params(vd_t *vd, int slice, size_t *blkp, size_t *lenp) 700 { 701 size_t blk = *blkp; 702 size_t len = *lenp; 703 size_t offset, maxlen; 704 705 ASSERT(vd->file || VD_DSKIMG(vd)); 706 ASSERT(len > 0); 707 708 /* 709 * If a file is exported as a slice then we don't care about the vtoc. 710 * In that case, the vtoc is a fake mainly to make newfs happy and we 711 * handle any I/O as a raw disk access so that we can have access to the 712 * entire backend. 713 */ 714 if (vd->vdisk_type == VD_DISK_TYPE_SLICE || slice == VD_SLICE_NONE) { 715 /* raw disk access */ 716 offset = blk * DEV_BSIZE; 717 if (offset >= vd->dskimg_size) { 718 /* offset past the end of the disk */ 719 PR0("offset (0x%lx) >= size (0x%lx)", 720 offset, vd->dskimg_size); 721 return (ENODATA); 722 } 723 maxlen = vd->dskimg_size - offset; 724 } else { 725 ASSERT(slice >= 0 && slice < V_NUMPAR); 726 727 /* 728 * v1.0 vDisk clients depended on the server not verifying 729 * the label of a unformatted disk. This "feature" is 730 * maintained for backward compatibility but all versions 731 * from v1.1 onwards must do the right thing. 732 */ 733 if (vd->vdisk_label == VD_DISK_LABEL_UNK && 734 vio_ver_is_supported(vd->version, 1, 1)) { 735 (void) vd_dskimg_validate_geometry(vd); 736 if (vd->vdisk_label == VD_DISK_LABEL_UNK) { 737 PR0("Unknown disk label, can't do I/O " 738 "from slice %d", slice); 739 return (EINVAL); 740 } 741 } 742 743 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { 744 ASSERT(vd->vtoc.v_sectorsz == DEV_BSIZE); 745 } else { 746 ASSERT(vd->vdisk_label == VD_DISK_LABEL_EFI); 747 ASSERT(vd->vdisk_block_size == DEV_BSIZE); 748 } 749 750 if (blk >= vd->slices[slice].nblocks) { 751 /* address past the end of the slice */ 752 PR0("req_addr (0x%lx) >= psize (0x%lx)", 753 blk, vd->slices[slice].nblocks); 754 return (ENODATA); 755 } 756 757 offset = (vd->slices[slice].start + blk) * DEV_BSIZE; 758 maxlen = (vd->slices[slice].nblocks - blk) * DEV_BSIZE; 759 } 760 761 /* 762 * If the requested size is greater than the size 763 * of the partition, truncate the read/write. 764 */ 765 if (len > maxlen) { 766 PR0("I/O size truncated to %lu bytes from %lu bytes", 767 maxlen, len); 768 len = maxlen; 769 } 770 771 /* 772 * We have to ensure that we are reading/writing into the mmap 773 * range. If we have a partial disk image (e.g. an image of 774 * s0 instead s2) the system can try to access slices that 775 * are not included into the disk image. 776 */ 777 if ((offset + len) > vd->dskimg_size) { 778 PR0("offset + nbytes (0x%lx + 0x%lx) > " 779 "dskimg_size (0x%lx)", offset, len, vd->dskimg_size); 780 return (EINVAL); 781 } 782 783 *blkp = offset / DEV_BSIZE; 784 *lenp = len; 785 786 return (0); 787 } 788 789 /* 790 * Function: 791 * vd_dskimg_rw 792 * 793 * Description: 794 * Read or write to a disk image. It handles the case where the disk 795 * image is a file or a volume exported as a full disk or a file 796 * exported as single-slice disk. Read or write to volumes exported as 797 * single slice disks are done by directly using the ldi interface. 798 * 799 * Parameters: 800 * vd - disk on which the operation is performed. 801 * slice - slice on which the operation is performed, 802 * VD_SLICE_NONE indicates that the operation 803 * is done using an absolute disk offset. 804 * operation - operation to execute: read (VD_OP_BREAD) or 805 * write (VD_OP_BWRITE). 806 * data - buffer where data are read to or written from. 807 * blk - starting block for the operation. 808 * len - number of bytes to read or write. 809 * 810 * Return Code: 811 * n >= 0 - success, n indicates the number of bytes read 812 * or written. 813 * -1 - error. 814 */ 815 static ssize_t 816 vd_dskimg_rw(vd_t *vd, int slice, int operation, caddr_t data, size_t offset, 817 size_t len) 818 { 819 caddr_t maddr; 820 size_t moffset, mlen, n; 821 uint_t smflags; 822 enum seg_rw srw; 823 ssize_t resid; 824 struct buf buf; 825 int status; 826 827 ASSERT(vd->file || VD_DSKIMG(vd)); 828 ASSERT(len > 0); 829 830 if ((status = vd_dskimg_io_params(vd, slice, &offset, &len)) != 0) 831 return ((status == ENODATA)? 0: -1); 832 833 offset *= DEV_BSIZE; 834 835 if (vd->volume) { 836 ASSERT(offset % DEV_BSIZE == 0); 837 838 bioinit(&buf); 839 buf.b_flags = B_BUSY | 840 ((operation == VD_OP_BREAD)? B_READ : B_WRITE); 841 buf.b_bcount = len; 842 buf.b_lblkno = offset / DEV_BSIZE; 843 buf.b_edev = vd->dev[0]; 844 buf.b_un.b_addr = data; 845 846 /* 847 * We use ldi_strategy() and not ldi_read()/ldi_write() because 848 * the read/write functions of the underlying driver may try to 849 * lock pages of the data buffer, and this requires the data 850 * buffer to be kmem_alloc'ed (and not allocated on the stack). 851 * 852 * Also using ldi_strategy() ensures that writes are immediatly 853 * commited and not cached as this may be the case with 854 * ldi_write() (for example with a ZFS volume). 855 */ 856 if (ldi_strategy(vd->ldi_handle[0], &buf) != 0) { 857 biofini(&buf); 858 return (-1); 859 } 860 861 if (biowait(&buf) != 0) { 862 biofini(&buf); 863 return (-1); 864 } 865 866 resid = buf.b_resid; 867 biofini(&buf); 868 869 ASSERT(resid <= len); 870 return (len - resid); 871 } 872 873 ASSERT(vd->file); 874 875 srw = (operation == VD_OP_BREAD)? S_READ : S_WRITE; 876 smflags = (operation == VD_OP_BREAD)? 0 : 877 (SM_WRITE | vd_file_write_flags); 878 n = len; 879 880 do { 881 /* 882 * segmap_getmapflt() returns a MAXBSIZE chunk which is 883 * MAXBSIZE aligned. 884 */ 885 moffset = offset & MAXBOFFSET; 886 mlen = MIN(MAXBSIZE - moffset, n); 887 maddr = segmap_getmapflt(segkmap, vd->file_vnode, offset, 888 mlen, 1, srw); 889 /* 890 * Fault in the pages so we can check for error and ensure 891 * that we can safely used the mapped address. 892 */ 893 if (segmap_fault(kas.a_hat, segkmap, maddr, mlen, 894 F_SOFTLOCK, srw) != 0) { 895 (void) segmap_release(segkmap, maddr, 0); 896 return (-1); 897 } 898 899 if (operation == VD_OP_BREAD) 900 bcopy(maddr + moffset, data, mlen); 901 else 902 bcopy(data, maddr + moffset, mlen); 903 904 if (segmap_fault(kas.a_hat, segkmap, maddr, mlen, 905 F_SOFTUNLOCK, srw) != 0) { 906 (void) segmap_release(segkmap, maddr, 0); 907 return (-1); 908 } 909 if (segmap_release(segkmap, maddr, smflags) != 0) 910 return (-1); 911 n -= mlen; 912 offset += mlen; 913 data += mlen; 914 915 } while (n > 0); 916 917 return (len); 918 } 919 920 /* 921 * Function: 922 * vd_build_default_label 923 * 924 * Description: 925 * Return a default label for a given disk size. This is used when the disk 926 * does not have a valid VTOC so that the user can get a valid default 927 * configuration. The default label has all slice sizes set to 0 (except 928 * slice 2 which is the entire disk) to force the user to write a valid 929 * label onto the disk image. 930 * 931 * Parameters: 932 * disk_size - the disk size in bytes 933 * label - the returned default label. 934 * 935 * Return Code: 936 * none. 937 */ 938 static void 939 vd_build_default_label(size_t disk_size, struct dk_label *label) 940 { 941 size_t size; 942 char unit; 943 944 bzero(label, sizeof (struct dk_label)); 945 946 /* 947 * Ideally we would like the cylinder size (nsect * nhead) to be the 948 * same whatever the disk size is. That way the VTOC label could be 949 * easily updated in case the disk size is increased (keeping the 950 * same cylinder size allows to preserve the existing partitioning 951 * when updating the VTOC label). But it is not possible to have 952 * a fixed cylinder size and to cover all disk size. 953 * 954 * So we define different cylinder sizes depending on the disk size. 955 * The cylinder size is chosen so that we don't have too few cylinders 956 * for a small disk image, or so many on a big disk image that you 957 * waste space for backup superblocks or cylinder group structures. 958 * Also we must have a resonable number of cylinders and sectors so 959 * that newfs can run using default values. 960 * 961 * +-----------+--------+---------+--------+ 962 * | disk_size | < 2MB | 2MB-4GB | >= 8GB | 963 * +-----------+--------+---------+--------+ 964 * | nhead | 1 | 1 | 96 | 965 * | nsect | 200 | 600 | 768 | 966 * +-----------+--------+---------+--------+ 967 * 968 * Other parameters are computed from these values: 969 * 970 * pcyl = disk_size / (nhead * nsect * 512) 971 * acyl = (pcyl > 2)? 2 : 0 972 * ncyl = pcyl - acyl 973 * 974 * The maximum number of cylinder is 65535 so this allows to define a 975 * geometry for a disk size up to 65535 * 96 * 768 * 512 = 2.24 TB 976 * which is more than enough to cover the maximum size allowed by the 977 * extended VTOC format (2TB). 978 */ 979 980 if (disk_size >= 8 * ONE_GIGABYTE) { 981 982 label->dkl_nhead = 96; 983 label->dkl_nsect = 768; 984 985 } else if (disk_size >= 2 * ONE_MEGABYTE) { 986 987 label->dkl_nhead = 1; 988 label->dkl_nsect = 600; 989 990 } else { 991 992 label->dkl_nhead = 1; 993 label->dkl_nsect = 200; 994 } 995 996 label->dkl_pcyl = disk_size / 997 (label->dkl_nsect * label->dkl_nhead * DEV_BSIZE); 998 999 if (label->dkl_pcyl == 0) 1000 label->dkl_pcyl = 1; 1001 1002 label->dkl_acyl = 0; 1003 1004 if (label->dkl_pcyl > 2) 1005 label->dkl_acyl = 2; 1006 1007 label->dkl_ncyl = label->dkl_pcyl - label->dkl_acyl; 1008 label->dkl_write_reinstruct = 0; 1009 label->dkl_read_reinstruct = 0; 1010 label->dkl_rpm = 7200; 1011 label->dkl_apc = 0; 1012 label->dkl_intrlv = 0; 1013 1014 PR0("requested disk size: %ld bytes\n", disk_size); 1015 PR0("setup: ncyl=%d nhead=%d nsec=%d\n", label->dkl_pcyl, 1016 label->dkl_nhead, label->dkl_nsect); 1017 PR0("provided disk size: %ld bytes\n", (uint64_t) 1018 (label->dkl_pcyl * label->dkl_nhead * 1019 label->dkl_nsect * DEV_BSIZE)); 1020 1021 vd_get_readable_size(disk_size, &size, &unit); 1022 1023 /* 1024 * We must have a correct label name otherwise format(1m) will 1025 * not recognized the disk as labeled. 1026 */ 1027 (void) snprintf(label->dkl_asciilabel, LEN_DKL_ASCII, 1028 "SUN-DiskImage-%ld%cB cyl %d alt %d hd %d sec %d", 1029 size, unit, 1030 label->dkl_ncyl, label->dkl_acyl, label->dkl_nhead, 1031 label->dkl_nsect); 1032 1033 /* default VTOC */ 1034 label->dkl_vtoc.v_version = V_EXTVERSION; 1035 label->dkl_vtoc.v_nparts = V_NUMPAR; 1036 label->dkl_vtoc.v_sanity = VTOC_SANE; 1037 label->dkl_vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_tag = V_BACKUP; 1038 label->dkl_map[VD_ENTIRE_DISK_SLICE].dkl_cylno = 0; 1039 label->dkl_map[VD_ENTIRE_DISK_SLICE].dkl_nblk = label->dkl_ncyl * 1040 label->dkl_nhead * label->dkl_nsect; 1041 label->dkl_magic = DKL_MAGIC; 1042 label->dkl_cksum = vd_lbl2cksum(label); 1043 } 1044 1045 /* 1046 * Function: 1047 * vd_dskimg_set_vtoc 1048 * 1049 * Description: 1050 * Set the vtoc of a disk image by writing the label and backup 1051 * labels into the disk image backend. 1052 * 1053 * Parameters: 1054 * vd - disk on which the operation is performed. 1055 * label - the data to be written. 1056 * 1057 * Return Code: 1058 * 0 - success. 1059 * n > 0 - error, n indicates the errno code. 1060 */ 1061 static int 1062 vd_dskimg_set_vtoc(vd_t *vd, struct dk_label *label) 1063 { 1064 size_t blk, sec, cyl, head, cnt; 1065 1066 ASSERT(VD_DSKIMG(vd)); 1067 1068 if (VD_DSKIMG_LABEL_WRITE(vd, label) < 0) { 1069 PR0("fail to write disk label"); 1070 return (EIO); 1071 } 1072 1073 /* 1074 * Backup labels are on the last alternate cylinder's 1075 * first five odd sectors. 1076 */ 1077 if (label->dkl_acyl == 0) { 1078 PR0("no alternate cylinder, can not store backup labels"); 1079 return (0); 1080 } 1081 1082 cyl = label->dkl_ncyl + label->dkl_acyl - 1; 1083 head = label->dkl_nhead - 1; 1084 1085 blk = (cyl * ((label->dkl_nhead * label->dkl_nsect) - label->dkl_apc)) + 1086 (head * label->dkl_nsect); 1087 1088 /* 1089 * Write the backup labels. Make sure we don't try to write past 1090 * the last cylinder. 1091 */ 1092 sec = 1; 1093 1094 for (cnt = 0; cnt < VD_DSKIMG_NUM_BACKUP; cnt++) { 1095 1096 if (sec >= label->dkl_nsect) { 1097 PR0("not enough sector to store all backup labels"); 1098 return (0); 1099 } 1100 1101 if (vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, 1102 (caddr_t)label, blk + sec, sizeof (struct dk_label)) < 0) { 1103 PR0("error writing backup label at block %lu\n", 1104 blk + sec); 1105 return (EIO); 1106 } 1107 1108 PR1("wrote backup label at block %lu\n", blk + sec); 1109 1110 sec += 2; 1111 } 1112 1113 return (0); 1114 } 1115 1116 /* 1117 * Function: 1118 * vd_dskimg_get_devid_block 1119 * 1120 * Description: 1121 * Return the block number where the device id is stored. 1122 * 1123 * Parameters: 1124 * vd - disk on which the operation is performed. 1125 * blkp - pointer to the block number 1126 * 1127 * Return Code: 1128 * 0 - success 1129 * ENOSPC - disk has no space to store a device id 1130 */ 1131 static int 1132 vd_dskimg_get_devid_block(vd_t *vd, size_t *blkp) 1133 { 1134 diskaddr_t spc, head, cyl; 1135 1136 ASSERT(VD_DSKIMG(vd)); 1137 1138 if (vd->vdisk_label == VD_DISK_LABEL_UNK) { 1139 /* 1140 * If no label is defined we don't know where to find 1141 * a device id. 1142 */ 1143 return (ENOSPC); 1144 } 1145 1146 if (vd->vdisk_label == VD_DISK_LABEL_EFI) { 1147 /* 1148 * For an EFI disk, the devid is at the beginning of 1149 * the reserved slice 1150 */ 1151 if (vd->efi_reserved == -1) { 1152 PR0("EFI disk has no reserved slice"); 1153 return (ENOSPC); 1154 } 1155 1156 *blkp = vd->slices[vd->efi_reserved].start; 1157 return (0); 1158 } 1159 1160 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); 1161 1162 /* this geometry doesn't allow us to have a devid */ 1163 if (vd->dk_geom.dkg_acyl < 2) { 1164 PR0("not enough alternate cylinder available for devid " 1165 "(acyl=%u)", vd->dk_geom.dkg_acyl); 1166 return (ENOSPC); 1167 } 1168 1169 /* the devid is in on the track next to the last cylinder */ 1170 cyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl - 2; 1171 spc = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; 1172 head = vd->dk_geom.dkg_nhead - 1; 1173 1174 *blkp = (cyl * (spc - vd->dk_geom.dkg_apc)) + 1175 (head * vd->dk_geom.dkg_nsect) + 1; 1176 1177 return (0); 1178 } 1179 1180 /* 1181 * Return the checksum of a disk block containing an on-disk devid. 1182 */ 1183 static uint_t 1184 vd_dkdevid2cksum(struct dk_devid *dkdevid) 1185 { 1186 uint_t chksum, *ip; 1187 int i; 1188 1189 chksum = 0; 1190 ip = (void *)dkdevid; 1191 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); i++) 1192 chksum ^= ip[i]; 1193 1194 return (chksum); 1195 } 1196 1197 /* 1198 * Function: 1199 * vd_dskimg_read_devid 1200 * 1201 * Description: 1202 * Read the device id stored on a disk image. 1203 * 1204 * Parameters: 1205 * vd - disk on which the operation is performed. 1206 * devid - the return address of the device ID. 1207 * 1208 * Return Code: 1209 * 0 - success 1210 * EIO - I/O error while trying to access the disk image 1211 * EINVAL - no valid device id was found 1212 * ENOSPC - disk has no space to store a device id 1213 */ 1214 static int 1215 vd_dskimg_read_devid(vd_t *vd, ddi_devid_t *devid) 1216 { 1217 struct dk_devid *dkdevid; 1218 size_t blk; 1219 uint_t chksum; 1220 int status, sz; 1221 1222 if ((status = vd_dskimg_get_devid_block(vd, &blk)) != 0) 1223 return (status); 1224 1225 dkdevid = kmem_zalloc(DEV_BSIZE, KM_SLEEP); 1226 1227 /* get the devid */ 1228 if ((vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)dkdevid, blk, 1229 DEV_BSIZE)) < 0) { 1230 PR0("error reading devid block at %lu", blk); 1231 status = EIO; 1232 goto done; 1233 } 1234 1235 /* validate the revision */ 1236 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 1237 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 1238 PR0("invalid devid found at block %lu (bad revision)", blk); 1239 status = EINVAL; 1240 goto done; 1241 } 1242 1243 /* compute checksum */ 1244 chksum = vd_dkdevid2cksum(dkdevid); 1245 1246 /* compare the checksums */ 1247 if (DKD_GETCHKSUM(dkdevid) != chksum) { 1248 PR0("invalid devid found at block %lu (bad checksum)", blk); 1249 status = EINVAL; 1250 goto done; 1251 } 1252 1253 /* validate the device id */ 1254 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 1255 PR0("invalid devid found at block %lu", blk); 1256 status = EINVAL; 1257 goto done; 1258 } 1259 1260 PR1("devid read at block %lu", blk); 1261 1262 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 1263 *devid = kmem_alloc(sz, KM_SLEEP); 1264 bcopy(&dkdevid->dkd_devid, *devid, sz); 1265 1266 done: 1267 kmem_free(dkdevid, DEV_BSIZE); 1268 return (status); 1269 1270 } 1271 1272 /* 1273 * Function: 1274 * vd_dskimg_write_devid 1275 * 1276 * Description: 1277 * Write a device id into disk image. 1278 * 1279 * Parameters: 1280 * vd - disk on which the operation is performed. 1281 * devid - the device ID to store. 1282 * 1283 * Return Code: 1284 * 0 - success 1285 * EIO - I/O error while trying to access the disk image 1286 * ENOSPC - disk has no space to store a device id 1287 */ 1288 static int 1289 vd_dskimg_write_devid(vd_t *vd, ddi_devid_t devid) 1290 { 1291 struct dk_devid *dkdevid; 1292 uint_t chksum; 1293 size_t blk; 1294 int status; 1295 1296 if (devid == NULL) { 1297 /* nothing to write */ 1298 return (0); 1299 } 1300 1301 if ((status = vd_dskimg_get_devid_block(vd, &blk)) != 0) 1302 return (status); 1303 1304 dkdevid = kmem_zalloc(DEV_BSIZE, KM_SLEEP); 1305 1306 /* set revision */ 1307 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 1308 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 1309 1310 /* copy devid */ 1311 bcopy(devid, &dkdevid->dkd_devid, ddi_devid_sizeof(devid)); 1312 1313 /* compute checksum */ 1314 chksum = vd_dkdevid2cksum(dkdevid); 1315 1316 /* set checksum */ 1317 DKD_FORMCHKSUM(chksum, dkdevid); 1318 1319 /* store the devid */ 1320 if ((status = vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, 1321 (caddr_t)dkdevid, blk, DEV_BSIZE)) < 0) { 1322 PR0("Error writing devid block at %lu", blk); 1323 status = EIO; 1324 } else { 1325 PR1("devid written at block %lu", blk); 1326 status = 0; 1327 } 1328 1329 kmem_free(dkdevid, DEV_BSIZE); 1330 return (status); 1331 } 1332 1333 /* 1334 * Function: 1335 * vd_do_scsi_rdwr 1336 * 1337 * Description: 1338 * Read or write to a SCSI disk using an absolute disk offset. 1339 * 1340 * Parameters: 1341 * vd - disk on which the operation is performed. 1342 * operation - operation to execute: read (VD_OP_BREAD) or 1343 * write (VD_OP_BWRITE). 1344 * data - buffer where data are read to or written from. 1345 * blk - starting block for the operation. 1346 * len - number of bytes to read or write. 1347 * 1348 * Return Code: 1349 * 0 - success 1350 * n != 0 - error. 1351 */ 1352 static int 1353 vd_do_scsi_rdwr(vd_t *vd, int operation, caddr_t data, size_t blk, size_t len) 1354 { 1355 struct uscsi_cmd ucmd; 1356 union scsi_cdb cdb; 1357 int nsectors, nblk; 1358 int max_sectors; 1359 int status, rval; 1360 1361 ASSERT(!vd->file); 1362 ASSERT(!vd->volume); 1363 ASSERT(vd->vdisk_block_size > 0); 1364 1365 max_sectors = vd->max_xfer_sz; 1366 nblk = (len / vd->vdisk_block_size); 1367 1368 if (len % vd->vdisk_block_size != 0) 1369 return (EINVAL); 1370 1371 /* 1372 * Build and execute the uscsi ioctl. We build a group0, group1 1373 * or group4 command as necessary, since some targets 1374 * do not support group1 commands. 1375 */ 1376 while (nblk) { 1377 1378 bzero(&ucmd, sizeof (ucmd)); 1379 bzero(&cdb, sizeof (cdb)); 1380 1381 nsectors = (max_sectors < nblk) ? max_sectors : nblk; 1382 1383 /* 1384 * Some of the optical drives on sun4v machines are ATAPI 1385 * devices which use Group 1 Read/Write commands so we need 1386 * to explicitly check a flag which is set when a domain 1387 * is bound. 1388 */ 1389 if (blk < (2 << 20) && nsectors <= 0xff && !vd->is_atapi_dev) { 1390 FORMG0ADDR(&cdb, blk); 1391 FORMG0COUNT(&cdb, (uchar_t)nsectors); 1392 ucmd.uscsi_cdblen = CDB_GROUP0; 1393 } else if (blk > 0xffffffff) { 1394 FORMG4LONGADDR(&cdb, blk); 1395 FORMG4COUNT(&cdb, nsectors); 1396 ucmd.uscsi_cdblen = CDB_GROUP4; 1397 cdb.scc_cmd |= SCMD_GROUP4; 1398 } else { 1399 FORMG1ADDR(&cdb, blk); 1400 FORMG1COUNT(&cdb, nsectors); 1401 ucmd.uscsi_cdblen = CDB_GROUP1; 1402 cdb.scc_cmd |= SCMD_GROUP1; 1403 } 1404 ucmd.uscsi_cdb = (caddr_t)&cdb; 1405 ucmd.uscsi_bufaddr = data; 1406 ucmd.uscsi_buflen = nsectors * vd->block_size; 1407 ucmd.uscsi_timeout = vd_scsi_rdwr_timeout; 1408 /* 1409 * Set flags so that the command is isolated from normal 1410 * commands and no error message is printed. 1411 */ 1412 ucmd.uscsi_flags = USCSI_ISOLATE | USCSI_SILENT; 1413 1414 if (operation == VD_OP_BREAD) { 1415 cdb.scc_cmd |= SCMD_READ; 1416 ucmd.uscsi_flags |= USCSI_READ; 1417 } else { 1418 cdb.scc_cmd |= SCMD_WRITE; 1419 } 1420 1421 status = ldi_ioctl(vd->ldi_handle[VD_ENTIRE_DISK_SLICE], 1422 USCSICMD, (intptr_t)&ucmd, (vd->open_flags | FKIOCTL), 1423 kcred, &rval); 1424 1425 if (status == 0) 1426 status = ucmd.uscsi_status; 1427 1428 if (status != 0) 1429 break; 1430 1431 /* 1432 * Check if partial DMA breakup is required. If so, reduce 1433 * the request size by half and retry the last request. 1434 */ 1435 if (ucmd.uscsi_resid == ucmd.uscsi_buflen) { 1436 max_sectors >>= 1; 1437 if (max_sectors <= 0) { 1438 status = EIO; 1439 break; 1440 } 1441 continue; 1442 } 1443 1444 if (ucmd.uscsi_resid != 0) { 1445 status = EIO; 1446 break; 1447 } 1448 1449 blk += nsectors; 1450 nblk -= nsectors; 1451 data += nsectors * vd->vdisk_block_size; /* SECSIZE */ 1452 } 1453 1454 return (status); 1455 } 1456 1457 /* 1458 * Function: 1459 * vd_scsi_rdwr 1460 * 1461 * Description: 1462 * Wrapper function to read or write to a SCSI disk using an absolute 1463 * disk offset. It checks the blocksize of the underlying device and, 1464 * if necessary, adjusts the buffers accordingly before calling 1465 * vd_do_scsi_rdwr() to do the actual read or write. 1466 * 1467 * Parameters: 1468 * vd - disk on which the operation is performed. 1469 * operation - operation to execute: read (VD_OP_BREAD) or 1470 * write (VD_OP_BWRITE). 1471 * data - buffer where data are read to or written from. 1472 * blk - starting block for the operation. 1473 * len - number of bytes to read or write. 1474 * 1475 * Return Code: 1476 * 0 - success 1477 * n != 0 - error. 1478 */ 1479 static int 1480 vd_scsi_rdwr(vd_t *vd, int operation, caddr_t data, size_t vblk, size_t vlen) 1481 { 1482 int rv; 1483 1484 size_t pblk; /* physical device block number of data on device */ 1485 size_t delta; /* relative offset between pblk and vblk */ 1486 size_t pnblk; /* number of physical blocks to be read from device */ 1487 size_t plen; /* length of data to be read from physical device */ 1488 char *buf; /* buffer area to fit physical device's block size */ 1489 1490 if (vd->block_size == 0) { 1491 /* 1492 * The block size was not available during the attach, 1493 * try to update it now. 1494 */ 1495 if (vd_backend_check_size(vd) != 0) 1496 return (EIO); 1497 } 1498 1499 /* 1500 * If the vdisk block size and the block size of the underlying device 1501 * match we can skip straight to vd_do_scsi_rdwr(), otherwise we need 1502 * to create a buffer large enough to handle the device's block size 1503 * and adjust the block to be read from and the amount of data to 1504 * read to correspond with the device's block size. 1505 */ 1506 if (vd->vdisk_block_size == vd->block_size) 1507 return (vd_do_scsi_rdwr(vd, operation, data, vblk, vlen)); 1508 1509 if (vd->vdisk_block_size > vd->block_size) 1510 return (EINVAL); 1511 1512 /* 1513 * Writing of physical block sizes larger than the virtual block size 1514 * is not supported. This would be added if/when support for guests 1515 * writing to DVDs is implemented. 1516 */ 1517 if (operation == VD_OP_BWRITE) 1518 return (ENOTSUP); 1519 1520 /* BEGIN CSTYLED */ 1521 /* 1522 * Below is a diagram showing the relationship between the physical 1523 * and virtual blocks. If the virtual blocks marked by 'X' below are 1524 * requested, then the physical blocks denoted by 'Y' are read. 1525 * 1526 * vblk 1527 * | vlen 1528 * |<--------------->| 1529 * v v 1530 * --+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- virtual disk: 1531 * | | | |XX|XX|XX|XX|XX|XX| | | | | | } block size is 1532 * --+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- vd->vdisk_block_size 1533 * : : : : 1534 * >:==:< delta : : 1535 * : : : : 1536 * --+-----+-----+-----+-----+-----+-----+-----+-- physical disk: 1537 * | |YY:YY|YYYYY|YYYYY|YY:YY| | | } block size is 1538 * --+-----+-----+-----+-----+-----+-----+-----+-- vd->block_size 1539 * ^ ^ 1540 * |<--------------------->| 1541 * | plen 1542 * pblk 1543 */ 1544 /* END CSTYLED */ 1545 pblk = (vblk * vd->vdisk_block_size) / vd->block_size; 1546 delta = (vblk * vd->vdisk_block_size) - (pblk * vd->block_size); 1547 pnblk = ((delta + vlen - 1) / vd->block_size) + 1; 1548 plen = pnblk * vd->block_size; 1549 1550 PR2("vblk %lx:pblk %lx: vlen %ld:plen %ld", vblk, pblk, vlen, plen); 1551 1552 buf = kmem_zalloc(sizeof (caddr_t) * plen, KM_SLEEP); 1553 rv = vd_do_scsi_rdwr(vd, operation, (caddr_t)buf, pblk, plen); 1554 bcopy(buf + delta, data, vlen); 1555 1556 kmem_free(buf, sizeof (caddr_t) * plen); 1557 1558 return (rv); 1559 } 1560 1561 /* 1562 * Function: 1563 * vd_slice_flabel_read 1564 * 1565 * Description: 1566 * This function simulates a read operation from the fake label of 1567 * a single-slice disk. 1568 * 1569 * Parameters: 1570 * vd - single-slice disk to read from 1571 * data - buffer where data should be read to 1572 * offset - offset in byte where the read should start 1573 * length - number of bytes to read 1574 * 1575 * Return Code: 1576 * n >= 0 - success, n indicates the number of bytes read 1577 * -1 - error 1578 */ 1579 static ssize_t 1580 vd_slice_flabel_read(vd_t *vd, caddr_t data, size_t offset, size_t length) 1581 { 1582 size_t n = 0; 1583 uint_t limit = vd->flabel_limit * DEV_BSIZE; 1584 1585 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 1586 ASSERT(vd->flabel != NULL); 1587 1588 /* if offset is past the fake label limit there's nothing to read */ 1589 if (offset >= limit) 1590 return (0); 1591 1592 /* data with offset 0 to flabel_size are read from flabel */ 1593 if (offset < vd->flabel_size) { 1594 1595 if (offset + length <= vd->flabel_size) { 1596 bcopy(vd->flabel + offset, data, length); 1597 return (length); 1598 } 1599 1600 n = vd->flabel_size - offset; 1601 bcopy(vd->flabel + offset, data, n); 1602 data += n; 1603 } 1604 1605 /* data with offset from flabel_size to flabel_limit are all zeros */ 1606 if (offset + length <= limit) { 1607 bzero(data, length - n); 1608 return (length); 1609 } 1610 1611 bzero(data, limit - offset - n); 1612 return (limit - offset); 1613 } 1614 1615 /* 1616 * Function: 1617 * vd_slice_flabel_write 1618 * 1619 * Description: 1620 * This function simulates a write operation to the fake label of 1621 * a single-slice disk. Write operations are actually faked and return 1622 * success although the label is never changed. This is mostly to 1623 * simulate a successful label update. 1624 * 1625 * Parameters: 1626 * vd - single-slice disk to write to 1627 * data - buffer where data should be written from 1628 * offset - offset in byte where the write should start 1629 * length - number of bytes to written 1630 * 1631 * Return Code: 1632 * n >= 0 - success, n indicates the number of bytes written 1633 * -1 - error 1634 */ 1635 static ssize_t 1636 vd_slice_flabel_write(vd_t *vd, caddr_t data, size_t offset, size_t length) 1637 { 1638 uint_t limit = vd->flabel_limit * DEV_BSIZE; 1639 struct dk_label *label; 1640 struct dk_geom geom; 1641 struct extvtoc vtoc; 1642 1643 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 1644 ASSERT(vd->flabel != NULL); 1645 1646 if (offset >= limit) 1647 return (0); 1648 1649 /* 1650 * If this is a request to overwrite the VTOC disk label, check that 1651 * the new label is similar to the previous one and return that the 1652 * write was successful, but note that nothing is actually overwritten. 1653 */ 1654 if (vd->vdisk_label == VD_DISK_LABEL_VTOC && 1655 offset == 0 && length == DEV_BSIZE) { 1656 label = (void *)data; 1657 1658 /* check that this is a valid label */ 1659 if (label->dkl_magic != DKL_MAGIC || 1660 label->dkl_cksum != vd_lbl2cksum(label)) 1661 return (-1); 1662 1663 /* check the vtoc and geometry */ 1664 vd_label_to_vtocgeom(label, &vtoc, &geom); 1665 if (vd_slice_geom_isvalid(vd, &geom) && 1666 vd_slice_vtoc_isvalid(vd, &vtoc)) 1667 return (length); 1668 } 1669 1670 /* fail any other write */ 1671 return (-1); 1672 } 1673 1674 /* 1675 * Function: 1676 * vd_slice_fake_rdwr 1677 * 1678 * Description: 1679 * This function simulates a raw read or write operation to a single-slice 1680 * disk. It only handles the faked part of the operation i.e. I/Os to 1681 * blocks which have no mapping with the vdisk backend (I/Os to the 1682 * beginning and to the end of the vdisk). 1683 * 1684 * The function returns 0 is the operation is completed and it has been 1685 * entirely handled as a fake read or write. In that case, lengthp points 1686 * to the number of bytes not read or written. Values returned by datap 1687 * and blkp are undefined. 1688 * 1689 * If the fake operation has succeeded but the read or write is not 1690 * complete (i.e. the read/write operation extends beyond the blocks 1691 * we fake) then the function returns EAGAIN and datap, blkp and lengthp 1692 * pointers points to the parameters for completing the operation. 1693 * 1694 * In case of an error, for example if the slice is empty or parameters 1695 * are invalid, then the function returns a non-zero value different 1696 * from EAGAIN. In that case, the returned values of datap, blkp and 1697 * lengthp are undefined. 1698 * 1699 * Parameters: 1700 * vd - single-slice disk on which the operation is performed 1701 * slice - slice on which the operation is performed, 1702 * VD_SLICE_NONE indicates that the operation 1703 * is done using an absolute disk offset. 1704 * operation - operation to execute: read (VD_OP_BREAD) or 1705 * write (VD_OP_BWRITE). 1706 * datap - pointer to the buffer where data are read to 1707 * or written from. Return the pointer where remaining 1708 * data have to be read to or written from. 1709 * blkp - pointer to the starting block for the operation. 1710 * Return the starting block relative to the vdisk 1711 * backend for the remaining operation. 1712 * lengthp - pointer to the number of bytes to read or write. 1713 * This should be a multiple of DEV_BSIZE. Return the 1714 * remaining number of bytes to read or write. 1715 * 1716 * Return Code: 1717 * 0 - read/write operation is completed 1718 * EAGAIN - read/write operation is not completed 1719 * other values - error 1720 */ 1721 static int 1722 vd_slice_fake_rdwr(vd_t *vd, int slice, int operation, caddr_t *datap, 1723 size_t *blkp, size_t *lengthp) 1724 { 1725 struct dk_label *label; 1726 caddr_t data; 1727 size_t blk, length, csize; 1728 size_t ablk, asize, aoff, alen; 1729 ssize_t n; 1730 int sec, status; 1731 1732 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 1733 ASSERT(slice != 0); 1734 1735 data = *datap; 1736 blk = *blkp; 1737 length = *lengthp; 1738 1739 /* 1740 * If this is not a raw I/O or an I/O from a full disk slice then 1741 * this is an I/O to/from an empty slice. 1742 */ 1743 if (slice != VD_SLICE_NONE && 1744 (slice != VD_ENTIRE_DISK_SLICE || 1745 vd->vdisk_label != VD_DISK_LABEL_VTOC) && 1746 (slice != VD_EFI_WD_SLICE || 1747 vd->vdisk_label != VD_DISK_LABEL_EFI)) { 1748 return (EIO); 1749 } 1750 1751 if (length % DEV_BSIZE != 0) 1752 return (EINVAL); 1753 1754 /* handle any I/O with the fake label */ 1755 if (operation == VD_OP_BWRITE) 1756 n = vd_slice_flabel_write(vd, data, blk * DEV_BSIZE, length); 1757 else 1758 n = vd_slice_flabel_read(vd, data, blk * DEV_BSIZE, length); 1759 1760 if (n == -1) 1761 return (EINVAL); 1762 1763 ASSERT(n % DEV_BSIZE == 0); 1764 1765 /* adjust I/O arguments */ 1766 data += n; 1767 blk += n / DEV_BSIZE; 1768 length -= n; 1769 1770 /* check if there's something else to process */ 1771 if (length == 0) { 1772 status = 0; 1773 goto done; 1774 } 1775 1776 if (vd->vdisk_label == VD_DISK_LABEL_VTOC && 1777 slice == VD_ENTIRE_DISK_SLICE) { 1778 status = EAGAIN; 1779 goto done; 1780 } 1781 1782 if (vd->vdisk_label == VD_DISK_LABEL_EFI) { 1783 asize = EFI_MIN_RESV_SIZE + 33; 1784 ablk = vd->vdisk_size - asize; 1785 } else { 1786 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); 1787 ASSERT(vd->dk_geom.dkg_apc == 0); 1788 1789 csize = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; 1790 ablk = vd->dk_geom.dkg_ncyl * csize; 1791 asize = vd->dk_geom.dkg_acyl * csize; 1792 } 1793 1794 alen = length / DEV_BSIZE; 1795 aoff = blk; 1796 1797 /* if we have reached the last block then the I/O is completed */ 1798 if (aoff == ablk + asize) { 1799 status = 0; 1800 goto done; 1801 } 1802 1803 /* if we are past the last block then return an error */ 1804 if (aoff > ablk + asize) 1805 return (EIO); 1806 1807 /* check if there is any I/O to end of the disk */ 1808 if (aoff + alen < ablk) { 1809 status = EAGAIN; 1810 goto done; 1811 } 1812 1813 /* we don't allow any write to the end of the disk */ 1814 if (operation == VD_OP_BWRITE) 1815 return (EIO); 1816 1817 if (aoff < ablk) { 1818 alen -= (ablk - aoff); 1819 aoff = ablk; 1820 } 1821 1822 if (aoff + alen > ablk + asize) { 1823 alen = ablk + asize - aoff; 1824 } 1825 1826 alen *= DEV_BSIZE; 1827 1828 if (operation == VD_OP_BREAD) { 1829 bzero(data + (aoff - blk) * DEV_BSIZE, alen); 1830 1831 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { 1832 /* check if we read backup labels */ 1833 label = VD_LABEL_VTOC(vd); 1834 ablk += (label->dkl_acyl - 1) * csize + 1835 (label->dkl_nhead - 1) * label->dkl_nsect; 1836 1837 for (sec = 1; (sec < 5 * 2 + 1); sec += 2) { 1838 1839 if (ablk + sec >= blk && 1840 ablk + sec < blk + (length / DEV_BSIZE)) { 1841 bcopy(label, data + 1842 (ablk + sec - blk) * DEV_BSIZE, 1843 sizeof (struct dk_label)); 1844 } 1845 } 1846 } 1847 } 1848 1849 length -= alen; 1850 1851 status = (length == 0)? 0: EAGAIN; 1852 1853 done: 1854 ASSERT(length == 0 || blk >= vd->flabel_limit); 1855 1856 /* 1857 * Return the parameters for the remaining I/O. The starting block is 1858 * adjusted so that it is relative to the vdisk backend. 1859 */ 1860 *datap = data; 1861 *blkp = blk - vd->flabel_limit; 1862 *lengthp = length; 1863 1864 return (status); 1865 } 1866 1867 /* 1868 * We define our own biodone function so that buffers used for 1869 * asynchronous writes are not released when biodone() is called. 1870 */ 1871 static int 1872 vd_biodone(struct buf *bp) 1873 { 1874 ASSERT((bp->b_flags & B_DONE) == 0); 1875 ASSERT(SEMA_HELD(&bp->b_sem)); 1876 1877 bp->b_flags |= B_DONE; 1878 if (!(bp->b_flags & B_ASYNC)) 1879 sema_v(&bp->b_io); 1880 1881 return (0); 1882 } 1883 1884 /* 1885 * Return Values 1886 * EINPROGRESS - operation was successfully started 1887 * EIO - encountered LDC (aka. task error) 1888 * 0 - operation completed successfully 1889 * 1890 * Side Effect 1891 * sets request->status = <disk operation status> 1892 */ 1893 static int 1894 vd_start_bio(vd_task_t *task) 1895 { 1896 int rv, status = 0; 1897 vd_t *vd = task->vd; 1898 vd_dring_payload_t *request = task->request; 1899 struct buf *buf = &task->buf; 1900 uint8_t mtype; 1901 int slice; 1902 char *bufaddr = 0; 1903 size_t buflen; 1904 size_t offset, length, nbytes; 1905 1906 ASSERT(vd != NULL); 1907 ASSERT(request != NULL); 1908 1909 slice = request->slice; 1910 1911 ASSERT(slice == VD_SLICE_NONE || slice < vd->nslices); 1912 ASSERT((request->operation == VD_OP_BREAD) || 1913 (request->operation == VD_OP_BWRITE)); 1914 1915 if (request->nbytes == 0) { 1916 /* no service for trivial requests */ 1917 request->status = EINVAL; 1918 return (0); 1919 } 1920 1921 PR1("%s %lu bytes at block %lu", 1922 (request->operation == VD_OP_BREAD) ? "Read" : "Write", 1923 request->nbytes, request->addr); 1924 1925 /* 1926 * We have to check the open flags because the functions processing 1927 * the read/write request will not do it. 1928 */ 1929 if (request->operation == VD_OP_BWRITE && !(vd->open_flags & FWRITE)) { 1930 PR0("write fails because backend is opened read-only"); 1931 request->nbytes = 0; 1932 request->status = EROFS; 1933 return (0); 1934 } 1935 1936 mtype = (&vd->inband_task == task) ? LDC_SHADOW_MAP : LDC_DIRECT_MAP; 1937 1938 /* Map memory exported by client */ 1939 status = ldc_mem_map(task->mhdl, request->cookie, request->ncookies, 1940 mtype, (request->operation == VD_OP_BREAD) ? LDC_MEM_W : LDC_MEM_R, 1941 &bufaddr, NULL); 1942 if (status != 0) { 1943 PR0("ldc_mem_map() returned err %d ", status); 1944 return (EIO); 1945 } 1946 1947 /* 1948 * The buffer size has to be 8-byte aligned, so the client should have 1949 * sent a buffer which size is roundup to the next 8-byte aligned value. 1950 */ 1951 buflen = P2ROUNDUP(request->nbytes, 8); 1952 1953 status = ldc_mem_acquire(task->mhdl, 0, buflen); 1954 if (status != 0) { 1955 (void) ldc_mem_unmap(task->mhdl); 1956 PR0("ldc_mem_acquire() returned err %d ", status); 1957 return (EIO); 1958 } 1959 1960 offset = request->addr; 1961 nbytes = request->nbytes; 1962 length = nbytes; 1963 1964 /* default number of byte returned by the I/O */ 1965 request->nbytes = 0; 1966 1967 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { 1968 1969 if (slice != 0) { 1970 /* handle any fake I/O */ 1971 rv = vd_slice_fake_rdwr(vd, slice, request->operation, 1972 &bufaddr, &offset, &length); 1973 1974 /* record the number of bytes from the fake I/O */ 1975 request->nbytes = nbytes - length; 1976 1977 if (rv == 0) { 1978 request->status = 0; 1979 goto io_done; 1980 } 1981 1982 if (rv != EAGAIN) { 1983 request->nbytes = 0; 1984 request->status = EIO; 1985 goto io_done; 1986 } 1987 1988 /* 1989 * If we return with EAGAIN then this means that there 1990 * are still data to read or write. 1991 */ 1992 ASSERT(length != 0); 1993 1994 /* 1995 * We need to continue the I/O from the slice backend to 1996 * complete the request. The variables bufaddr, offset 1997 * and length have been adjusted to have the right 1998 * information to do the remaining I/O from the backend. 1999 * The backend is entirely mapped to slice 0 so we just 2000 * have to complete the I/O from that slice. 2001 */ 2002 slice = 0; 2003 } 2004 2005 } else if (vd->volume) { 2006 2007 rv = vd_dskimg_io_params(vd, slice, &offset, &length); 2008 if (rv != 0) { 2009 request->status = (rv == ENODATA)? 0: EIO; 2010 goto io_done; 2011 } 2012 slice = 0; 2013 2014 } else if ((slice == VD_SLICE_NONE) && !vd->file) { 2015 2016 /* 2017 * This is not a disk image so it is a real disk. We 2018 * assume that the underlying device driver supports 2019 * USCSICMD ioctls. This is the case of all SCSI devices 2020 * (sd, ssd...). 2021 * 2022 * In the future if we have non-SCSI disks we would need 2023 * to invoke the appropriate function to do I/O using an 2024 * absolute disk offset (for example using DIOCTL_RWCMD 2025 * for IDE disks). 2026 */ 2027 rv = vd_scsi_rdwr(vd, request->operation, bufaddr, offset, 2028 length); 2029 if (rv != 0) { 2030 request->status = EIO; 2031 } else { 2032 request->nbytes = length; 2033 request->status = 0; 2034 } 2035 goto io_done; 2036 } 2037 2038 /* Start the block I/O */ 2039 if (vd->file) { 2040 rv = vd_dskimg_rw(vd, slice, request->operation, bufaddr, 2041 offset, length); 2042 if (rv < 0) { 2043 request->nbytes = 0; 2044 request->status = EIO; 2045 } else { 2046 request->nbytes += rv; 2047 request->status = 0; 2048 } 2049 } else { 2050 bioinit(buf); 2051 buf->b_flags = B_BUSY; 2052 buf->b_bcount = length; 2053 buf->b_lblkno = offset; 2054 buf->b_bufsize = buflen; 2055 buf->b_edev = vd->dev[slice]; 2056 buf->b_un.b_addr = bufaddr; 2057 buf->b_iodone = vd_biodone; 2058 2059 if (request->operation == VD_OP_BREAD) { 2060 buf->b_flags |= B_READ; 2061 } else { 2062 /* 2063 * If we have a ZFS volume then we do an 2064 * asynchronous write and we will wait for the 2065 * completion of the write in vd_complete_bio() 2066 * using the DKIOCFLUSHWRITECACHE ioctl. We 2067 * do so for performance reason because, for a 2068 * synchronous write, the ZFS volume strategy() 2069 * function would only return after the write is 2070 * commited and this prevents starting multiple 2071 * writes in parallel. 2072 */ 2073 if (vd->zvol) 2074 buf->b_flags |= B_WRITE | B_ASYNC; 2075 else 2076 buf->b_flags |= B_WRITE; 2077 } 2078 2079 request->status = ldi_strategy(vd->ldi_handle[slice], buf); 2080 2081 /* 2082 * This is to indicate to the caller that the request 2083 * needs to be finished by vd_complete_bio() by calling 2084 * biowait() there and waiting for that to return before 2085 * triggering the notification of the vDisk client. 2086 * 2087 * This is necessary when writing to real disks as 2088 * otherwise calls to ldi_strategy() would be serialized 2089 * behind the calls to biowait() and performance would 2090 * suffer. 2091 */ 2092 if (request->status == 0) 2093 return (EINPROGRESS); 2094 2095 biofini(buf); 2096 } 2097 2098 io_done: 2099 /* Clean up after error or completion */ 2100 rv = ldc_mem_release(task->mhdl, 0, buflen); 2101 if (rv) { 2102 PR0("ldc_mem_release() returned err %d ", rv); 2103 status = EIO; 2104 } 2105 rv = ldc_mem_unmap(task->mhdl); 2106 if (rv) { 2107 PR0("ldc_mem_unmap() returned err %d ", rv); 2108 status = EIO; 2109 } 2110 2111 return (status); 2112 } 2113 2114 /* 2115 * This function should only be called from vd_notify to ensure that requests 2116 * are responded to in the order that they are received. 2117 */ 2118 static int 2119 send_msg(ldc_handle_t ldc_handle, void *msg, size_t msglen) 2120 { 2121 int status; 2122 size_t nbytes; 2123 2124 do { 2125 nbytes = msglen; 2126 status = ldc_write(ldc_handle, msg, &nbytes); 2127 if (status != EWOULDBLOCK) 2128 break; 2129 drv_usecwait(vds_ldc_delay); 2130 } while (status == EWOULDBLOCK); 2131 2132 if (status != 0) { 2133 if (status != ECONNRESET) 2134 PR0("ldc_write() returned errno %d", status); 2135 return (status); 2136 } else if (nbytes != msglen) { 2137 PR0("ldc_write() performed only partial write"); 2138 return (EIO); 2139 } 2140 2141 PR1("SENT %lu bytes", msglen); 2142 return (0); 2143 } 2144 2145 static void 2146 vd_need_reset(vd_t *vd, boolean_t reset_ldc) 2147 { 2148 mutex_enter(&vd->lock); 2149 vd->reset_state = B_TRUE; 2150 vd->reset_ldc = reset_ldc; 2151 mutex_exit(&vd->lock); 2152 } 2153 2154 /* 2155 * Reset the state of the connection with a client, if needed; reset the LDC 2156 * transport as well, if needed. This function should only be called from the 2157 * "vd_recv_msg", as it waits for tasks - otherwise a deadlock can occur. 2158 */ 2159 static void 2160 vd_reset_if_needed(vd_t *vd) 2161 { 2162 int status = 0; 2163 2164 mutex_enter(&vd->lock); 2165 if (!vd->reset_state) { 2166 ASSERT(!vd->reset_ldc); 2167 mutex_exit(&vd->lock); 2168 return; 2169 } 2170 mutex_exit(&vd->lock); 2171 2172 PR0("Resetting connection state with %s", VD_CLIENT(vd)); 2173 2174 /* 2175 * Let any asynchronous I/O complete before possibly pulling the rug 2176 * out from under it; defer checking vd->reset_ldc, as one of the 2177 * asynchronous tasks might set it 2178 */ 2179 ddi_taskq_wait(vd->completionq); 2180 2181 if (vd->file) { 2182 status = VOP_FSYNC(vd->file_vnode, FSYNC, kcred, NULL); 2183 if (status) { 2184 PR0("VOP_FSYNC returned errno %d", status); 2185 } 2186 } 2187 2188 if ((vd->initialized & VD_DRING) && 2189 ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0)) 2190 PR0("ldc_mem_dring_unmap() returned errno %d", status); 2191 2192 vd_free_dring_task(vd); 2193 2194 /* Free the staging buffer for msgs */ 2195 if (vd->vio_msgp != NULL) { 2196 kmem_free(vd->vio_msgp, vd->max_msglen); 2197 vd->vio_msgp = NULL; 2198 } 2199 2200 /* Free the inband message buffer */ 2201 if (vd->inband_task.msg != NULL) { 2202 kmem_free(vd->inband_task.msg, vd->max_msglen); 2203 vd->inband_task.msg = NULL; 2204 } 2205 2206 mutex_enter(&vd->lock); 2207 2208 if (vd->reset_ldc) 2209 PR0("taking down LDC channel"); 2210 if (vd->reset_ldc && ((status = ldc_down(vd->ldc_handle)) != 0)) 2211 PR0("ldc_down() returned errno %d", status); 2212 2213 /* Reset exclusive access rights */ 2214 vd_reset_access(vd); 2215 2216 vd->initialized &= ~(VD_SID | VD_SEQ_NUM | VD_DRING); 2217 vd->state = VD_STATE_INIT; 2218 vd->max_msglen = sizeof (vio_msg_t); /* baseline vio message size */ 2219 2220 /* Allocate the staging buffer */ 2221 vd->vio_msgp = kmem_alloc(vd->max_msglen, KM_SLEEP); 2222 2223 PR0("calling ldc_up\n"); 2224 (void) ldc_up(vd->ldc_handle); 2225 2226 vd->reset_state = B_FALSE; 2227 vd->reset_ldc = B_FALSE; 2228 2229 mutex_exit(&vd->lock); 2230 } 2231 2232 static void vd_recv_msg(void *arg); 2233 2234 static void 2235 vd_mark_in_reset(vd_t *vd) 2236 { 2237 int status; 2238 2239 PR0("vd_mark_in_reset: marking vd in reset\n"); 2240 2241 vd_need_reset(vd, B_FALSE); 2242 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, DDI_SLEEP); 2243 if (status == DDI_FAILURE) { 2244 PR0("cannot schedule task to recv msg\n"); 2245 vd_need_reset(vd, B_TRUE); 2246 return; 2247 } 2248 } 2249 2250 static int 2251 vd_mark_elem_done(vd_t *vd, int idx, int elem_status, int elem_nbytes) 2252 { 2253 boolean_t accepted; 2254 int status; 2255 on_trap_data_t otd; 2256 vd_dring_entry_t *elem = VD_DRING_ELEM(idx); 2257 2258 if (vd->reset_state) 2259 return (0); 2260 2261 /* Acquire the element */ 2262 if ((status = VIO_DRING_ACQUIRE(&otd, vd->dring_mtype, 2263 vd->dring_handle, idx, idx)) != 0) { 2264 if (status == ECONNRESET) { 2265 vd_mark_in_reset(vd); 2266 return (0); 2267 } else { 2268 return (status); 2269 } 2270 } 2271 2272 /* Set the element's status and mark it done */ 2273 accepted = (elem->hdr.dstate == VIO_DESC_ACCEPTED); 2274 if (accepted) { 2275 elem->payload.nbytes = elem_nbytes; 2276 elem->payload.status = elem_status; 2277 elem->hdr.dstate = VIO_DESC_DONE; 2278 } else { 2279 /* Perhaps client timed out waiting for I/O... */ 2280 PR0("element %u no longer \"accepted\"", idx); 2281 VD_DUMP_DRING_ELEM(elem); 2282 } 2283 /* Release the element */ 2284 if ((status = VIO_DRING_RELEASE(vd->dring_mtype, 2285 vd->dring_handle, idx, idx)) != 0) { 2286 if (status == ECONNRESET) { 2287 vd_mark_in_reset(vd); 2288 return (0); 2289 } else { 2290 PR0("VIO_DRING_RELEASE() returned errno %d", 2291 status); 2292 return (status); 2293 } 2294 } 2295 2296 return (accepted ? 0 : EINVAL); 2297 } 2298 2299 /* 2300 * Return Values 2301 * 0 - operation completed successfully 2302 * EIO - encountered LDC / task error 2303 * 2304 * Side Effect 2305 * sets request->status = <disk operation status> 2306 */ 2307 static int 2308 vd_complete_bio(vd_task_t *task) 2309 { 2310 int status = 0; 2311 int rv = 0; 2312 vd_t *vd = task->vd; 2313 vd_dring_payload_t *request = task->request; 2314 struct buf *buf = &task->buf; 2315 int rval; 2316 2317 2318 ASSERT(vd != NULL); 2319 ASSERT(request != NULL); 2320 ASSERT(task->msg != NULL); 2321 ASSERT(task->msglen >= sizeof (*task->msg)); 2322 ASSERT(!vd->file); 2323 ASSERT(request->slice != VD_SLICE_NONE || (!vd_slice_single_slice && 2324 vd->vdisk_type == VD_DISK_TYPE_SLICE) || vd->volume); 2325 2326 if (vd->zvol && request->operation == VD_OP_BWRITE) { 2327 /* 2328 * For a ZFS volume, we use asynchronous writes so we have to 2329 * ensure that writes have been commited before marking the 2330 * I/O as completed. 2331 */ 2332 request->status = ldi_ioctl(vd->ldi_handle[0], 2333 DKIOCFLUSHWRITECACHE, NULL, vd->open_flags | FKIOCTL, 2334 kcred, &rval); 2335 } else { 2336 /* Wait for the I/O to complete [ call to ldi_strategy(9f) ] */ 2337 request->status = biowait(buf); 2338 } 2339 2340 /* Update the number of bytes read/written */ 2341 request->nbytes += buf->b_bcount - buf->b_resid; 2342 2343 /* Release the buffer */ 2344 if (!vd->reset_state) 2345 status = ldc_mem_release(task->mhdl, 0, buf->b_bufsize); 2346 if (status) { 2347 PR0("ldc_mem_release() returned errno %d copying to " 2348 "client", status); 2349 if (status == ECONNRESET) { 2350 vd_mark_in_reset(vd); 2351 } 2352 rv = EIO; 2353 } 2354 2355 /* Unmap the memory, even if in reset */ 2356 status = ldc_mem_unmap(task->mhdl); 2357 if (status) { 2358 PR0("ldc_mem_unmap() returned errno %d copying to client", 2359 status); 2360 if (status == ECONNRESET) { 2361 vd_mark_in_reset(vd); 2362 } 2363 rv = EIO; 2364 } 2365 2366 biofini(buf); 2367 2368 return (rv); 2369 } 2370 2371 /* 2372 * Description: 2373 * This function is called by the two functions called by a taskq 2374 * [ vd_complete_notify() and vd_serial_notify()) ] to send the 2375 * message to the client. 2376 * 2377 * Parameters: 2378 * arg - opaque pointer to structure containing task to be completed 2379 * 2380 * Return Values 2381 * None 2382 */ 2383 static void 2384 vd_notify(vd_task_t *task) 2385 { 2386 int status; 2387 2388 ASSERT(task != NULL); 2389 ASSERT(task->vd != NULL); 2390 2391 /* 2392 * Send the "ack" or "nack" back to the client; if sending the message 2393 * via LDC fails, arrange to reset both the connection state and LDC 2394 * itself 2395 */ 2396 PR2("Sending %s", 2397 (task->msg->tag.vio_subtype == VIO_SUBTYPE_ACK) ? "ACK" : "NACK"); 2398 2399 status = send_msg(task->vd->ldc_handle, task->msg, task->msglen); 2400 switch (status) { 2401 case 0: 2402 break; 2403 case ECONNRESET: 2404 vd_mark_in_reset(task->vd); 2405 break; 2406 default: 2407 PR0("initiating full reset"); 2408 vd_need_reset(task->vd, B_TRUE); 2409 break; 2410 } 2411 2412 DTRACE_PROBE1(task__end, vd_task_t *, task); 2413 } 2414 2415 /* 2416 * Description: 2417 * Mark the Dring entry as Done and (if necessary) send an ACK/NACK to 2418 * the vDisk client 2419 * 2420 * Parameters: 2421 * task - structure containing the request sent from client 2422 * 2423 * Return Values 2424 * None 2425 */ 2426 static void 2427 vd_complete_notify(vd_task_t *task) 2428 { 2429 int status = 0; 2430 vd_t *vd = task->vd; 2431 vd_dring_payload_t *request = task->request; 2432 2433 /* Update the dring element for a dring client */ 2434 if (!vd->reset_state && (vd->xfer_mode == VIO_DRING_MODE_V1_0)) { 2435 status = vd_mark_elem_done(vd, task->index, 2436 request->status, request->nbytes); 2437 if (status == ECONNRESET) 2438 vd_mark_in_reset(vd); 2439 else if (status == EACCES) 2440 vd_need_reset(vd, B_TRUE); 2441 } 2442 2443 /* 2444 * If a transport error occurred while marking the element done or 2445 * previously while executing the task, arrange to "nack" the message 2446 * when the final task in the descriptor element range completes 2447 */ 2448 if ((status != 0) || (task->status != 0)) 2449 task->msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 2450 2451 /* 2452 * Only the final task for a range of elements will respond to and 2453 * free the message 2454 */ 2455 if (task->type == VD_NONFINAL_RANGE_TASK) { 2456 return; 2457 } 2458 2459 /* 2460 * We should only send an ACK/NACK here if we are not currently in 2461 * reset as, depending on how we reset, the dring may have been 2462 * blown away and we don't want to ACK/NACK a message that isn't 2463 * there. 2464 */ 2465 if (!vd->reset_state) 2466 vd_notify(task); 2467 } 2468 2469 /* 2470 * Description: 2471 * This is the basic completion function called to handle inband data 2472 * requests and handshake messages. All it needs to do is trigger a 2473 * message to the client that the request is completed. 2474 * 2475 * Parameters: 2476 * arg - opaque pointer to structure containing task to be completed 2477 * 2478 * Return Values 2479 * None 2480 */ 2481 static void 2482 vd_serial_notify(void *arg) 2483 { 2484 vd_task_t *task = (vd_task_t *)arg; 2485 2486 ASSERT(task != NULL); 2487 vd_notify(task); 2488 } 2489 2490 /* ARGSUSED */ 2491 static int 2492 vd_geom2dk_geom(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 2493 { 2494 VD_GEOM2DK_GEOM((vd_geom_t *)vd_buf, (struct dk_geom *)ioctl_arg); 2495 return (0); 2496 } 2497 2498 /* ARGSUSED */ 2499 static int 2500 vd_vtoc2vtoc(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 2501 { 2502 VD_VTOC2VTOC((vd_vtoc_t *)vd_buf, (struct extvtoc *)ioctl_arg); 2503 return (0); 2504 } 2505 2506 static void 2507 dk_geom2vd_geom(void *ioctl_arg, void *vd_buf) 2508 { 2509 DK_GEOM2VD_GEOM((struct dk_geom *)ioctl_arg, (vd_geom_t *)vd_buf); 2510 } 2511 2512 static void 2513 vtoc2vd_vtoc(void *ioctl_arg, void *vd_buf) 2514 { 2515 VTOC2VD_VTOC((struct extvtoc *)ioctl_arg, (vd_vtoc_t *)vd_buf); 2516 } 2517 2518 static int 2519 vd_get_efi_in(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 2520 { 2521 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 2522 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 2523 size_t data_len; 2524 2525 data_len = vd_buf_len - (sizeof (vd_efi_t) - sizeof (uint64_t)); 2526 if (vd_efi->length > data_len) 2527 return (EINVAL); 2528 2529 dk_efi->dki_lba = vd_efi->lba; 2530 dk_efi->dki_length = vd_efi->length; 2531 dk_efi->dki_data = kmem_zalloc(vd_efi->length, KM_SLEEP); 2532 return (0); 2533 } 2534 2535 static void 2536 vd_get_efi_out(void *ioctl_arg, void *vd_buf) 2537 { 2538 int len; 2539 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 2540 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 2541 2542 len = vd_efi->length; 2543 DK_EFI2VD_EFI(dk_efi, vd_efi); 2544 kmem_free(dk_efi->dki_data, len); 2545 } 2546 2547 static int 2548 vd_set_efi_in(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 2549 { 2550 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 2551 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 2552 size_t data_len; 2553 2554 data_len = vd_buf_len - (sizeof (vd_efi_t) - sizeof (uint64_t)); 2555 if (vd_efi->length > data_len) 2556 return (EINVAL); 2557 2558 dk_efi->dki_data = kmem_alloc(vd_efi->length, KM_SLEEP); 2559 VD_EFI2DK_EFI(vd_efi, dk_efi); 2560 return (0); 2561 } 2562 2563 static void 2564 vd_set_efi_out(void *ioctl_arg, void *vd_buf) 2565 { 2566 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 2567 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 2568 2569 kmem_free(dk_efi->dki_data, vd_efi->length); 2570 } 2571 2572 static int 2573 vd_scsicmd_in(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 2574 { 2575 size_t vd_scsi_len; 2576 vd_scsi_t *vd_scsi = (vd_scsi_t *)vd_buf; 2577 struct uscsi_cmd *uscsi = (struct uscsi_cmd *)ioctl_arg; 2578 2579 /* check buffer size */ 2580 vd_scsi_len = VD_SCSI_SIZE; 2581 vd_scsi_len += P2ROUNDUP(vd_scsi->cdb_len, sizeof (uint64_t)); 2582 vd_scsi_len += P2ROUNDUP(vd_scsi->sense_len, sizeof (uint64_t)); 2583 vd_scsi_len += P2ROUNDUP(vd_scsi->datain_len, sizeof (uint64_t)); 2584 vd_scsi_len += P2ROUNDUP(vd_scsi->dataout_len, sizeof (uint64_t)); 2585 2586 ASSERT(vd_scsi_len % sizeof (uint64_t) == 0); 2587 2588 if (vd_buf_len < vd_scsi_len) 2589 return (EINVAL); 2590 2591 /* set flags */ 2592 uscsi->uscsi_flags = vd_scsi_debug; 2593 2594 if (vd_scsi->options & VD_SCSI_OPT_NORETRY) { 2595 uscsi->uscsi_flags |= USCSI_ISOLATE; 2596 uscsi->uscsi_flags |= USCSI_DIAGNOSE; 2597 } 2598 2599 /* task attribute */ 2600 switch (vd_scsi->task_attribute) { 2601 case VD_SCSI_TASK_ACA: 2602 uscsi->uscsi_flags |= USCSI_HEAD; 2603 break; 2604 case VD_SCSI_TASK_HQUEUE: 2605 uscsi->uscsi_flags |= USCSI_HTAG; 2606 break; 2607 case VD_SCSI_TASK_ORDERED: 2608 uscsi->uscsi_flags |= USCSI_OTAG; 2609 break; 2610 default: 2611 uscsi->uscsi_flags |= USCSI_NOTAG; 2612 break; 2613 } 2614 2615 /* timeout */ 2616 uscsi->uscsi_timeout = vd_scsi->timeout; 2617 2618 /* cdb data */ 2619 uscsi->uscsi_cdb = (caddr_t)VD_SCSI_DATA_CDB(vd_scsi); 2620 uscsi->uscsi_cdblen = vd_scsi->cdb_len; 2621 2622 /* sense buffer */ 2623 if (vd_scsi->sense_len != 0) { 2624 uscsi->uscsi_flags |= USCSI_RQENABLE; 2625 uscsi->uscsi_rqbuf = (caddr_t)VD_SCSI_DATA_SENSE(vd_scsi); 2626 uscsi->uscsi_rqlen = vd_scsi->sense_len; 2627 } 2628 2629 if (vd_scsi->datain_len != 0 && vd_scsi->dataout_len != 0) { 2630 /* uscsi does not support read/write request */ 2631 return (EINVAL); 2632 } 2633 2634 /* request data-in */ 2635 if (vd_scsi->datain_len != 0) { 2636 uscsi->uscsi_flags |= USCSI_READ; 2637 uscsi->uscsi_buflen = vd_scsi->datain_len; 2638 uscsi->uscsi_bufaddr = (char *)VD_SCSI_DATA_IN(vd_scsi); 2639 } 2640 2641 /* request data-out */ 2642 if (vd_scsi->dataout_len != 0) { 2643 uscsi->uscsi_buflen = vd_scsi->dataout_len; 2644 uscsi->uscsi_bufaddr = (char *)VD_SCSI_DATA_OUT(vd_scsi); 2645 } 2646 2647 return (0); 2648 } 2649 2650 static void 2651 vd_scsicmd_out(void *ioctl_arg, void *vd_buf) 2652 { 2653 vd_scsi_t *vd_scsi = (vd_scsi_t *)vd_buf; 2654 struct uscsi_cmd *uscsi = (struct uscsi_cmd *)ioctl_arg; 2655 2656 /* output fields */ 2657 vd_scsi->cmd_status = uscsi->uscsi_status; 2658 2659 /* sense data */ 2660 if ((uscsi->uscsi_flags & USCSI_RQENABLE) && 2661 (uscsi->uscsi_status == STATUS_CHECK || 2662 uscsi->uscsi_status == STATUS_TERMINATED)) { 2663 vd_scsi->sense_status = uscsi->uscsi_rqstatus; 2664 if (uscsi->uscsi_rqstatus == STATUS_GOOD) 2665 vd_scsi->sense_len -= uscsi->uscsi_rqresid; 2666 else 2667 vd_scsi->sense_len = 0; 2668 } else { 2669 vd_scsi->sense_len = 0; 2670 } 2671 2672 if (uscsi->uscsi_status != STATUS_GOOD) { 2673 vd_scsi->dataout_len = 0; 2674 vd_scsi->datain_len = 0; 2675 return; 2676 } 2677 2678 if (uscsi->uscsi_flags & USCSI_READ) { 2679 /* request data (read) */ 2680 vd_scsi->datain_len -= uscsi->uscsi_resid; 2681 vd_scsi->dataout_len = 0; 2682 } else { 2683 /* request data (write) */ 2684 vd_scsi->datain_len = 0; 2685 vd_scsi->dataout_len -= uscsi->uscsi_resid; 2686 } 2687 } 2688 2689 static ushort_t 2690 vd_lbl2cksum(struct dk_label *label) 2691 { 2692 int count; 2693 ushort_t sum, *sp; 2694 2695 count = (sizeof (struct dk_label)) / (sizeof (short)) - 1; 2696 sp = (ushort_t *)label; 2697 sum = 0; 2698 while (count--) { 2699 sum ^= *sp++; 2700 } 2701 2702 return (sum); 2703 } 2704 2705 /* 2706 * Copy information from a vtoc and dk_geom structures to a dk_label structure. 2707 */ 2708 static void 2709 vd_vtocgeom_to_label(struct extvtoc *vtoc, struct dk_geom *geom, 2710 struct dk_label *label) 2711 { 2712 int i; 2713 2714 ASSERT(vtoc->v_nparts == V_NUMPAR); 2715 ASSERT(vtoc->v_sanity == VTOC_SANE); 2716 2717 bzero(label, sizeof (struct dk_label)); 2718 2719 label->dkl_ncyl = geom->dkg_ncyl; 2720 label->dkl_acyl = geom->dkg_acyl; 2721 label->dkl_pcyl = geom->dkg_pcyl; 2722 label->dkl_nhead = geom->dkg_nhead; 2723 label->dkl_nsect = geom->dkg_nsect; 2724 label->dkl_intrlv = geom->dkg_intrlv; 2725 label->dkl_apc = geom->dkg_apc; 2726 label->dkl_rpm = geom->dkg_rpm; 2727 label->dkl_write_reinstruct = geom->dkg_write_reinstruct; 2728 label->dkl_read_reinstruct = geom->dkg_read_reinstruct; 2729 2730 label->dkl_vtoc.v_nparts = V_NUMPAR; 2731 label->dkl_vtoc.v_sanity = VTOC_SANE; 2732 label->dkl_vtoc.v_version = vtoc->v_version; 2733 for (i = 0; i < V_NUMPAR; i++) { 2734 label->dkl_vtoc.v_timestamp[i] = vtoc->timestamp[i]; 2735 label->dkl_vtoc.v_part[i].p_tag = vtoc->v_part[i].p_tag; 2736 label->dkl_vtoc.v_part[i].p_flag = vtoc->v_part[i].p_flag; 2737 label->dkl_map[i].dkl_cylno = vtoc->v_part[i].p_start / 2738 (label->dkl_nhead * label->dkl_nsect); 2739 label->dkl_map[i].dkl_nblk = vtoc->v_part[i].p_size; 2740 } 2741 2742 /* 2743 * The bootinfo array can not be copied with bcopy() because 2744 * elements are of type long in vtoc (so 64-bit) and of type 2745 * int in dk_vtoc (so 32-bit). 2746 */ 2747 label->dkl_vtoc.v_bootinfo[0] = vtoc->v_bootinfo[0]; 2748 label->dkl_vtoc.v_bootinfo[1] = vtoc->v_bootinfo[1]; 2749 label->dkl_vtoc.v_bootinfo[2] = vtoc->v_bootinfo[2]; 2750 bcopy(vtoc->v_asciilabel, label->dkl_asciilabel, LEN_DKL_ASCII); 2751 bcopy(vtoc->v_volume, label->dkl_vtoc.v_volume, LEN_DKL_VVOL); 2752 2753 /* re-compute checksum */ 2754 label->dkl_magic = DKL_MAGIC; 2755 label->dkl_cksum = vd_lbl2cksum(label); 2756 } 2757 2758 /* 2759 * Copy information from a dk_label structure to a vtoc and dk_geom structures. 2760 */ 2761 static void 2762 vd_label_to_vtocgeom(struct dk_label *label, struct extvtoc *vtoc, 2763 struct dk_geom *geom) 2764 { 2765 int i; 2766 2767 bzero(vtoc, sizeof (struct vtoc)); 2768 bzero(geom, sizeof (struct dk_geom)); 2769 2770 geom->dkg_ncyl = label->dkl_ncyl; 2771 geom->dkg_acyl = label->dkl_acyl; 2772 geom->dkg_nhead = label->dkl_nhead; 2773 geom->dkg_nsect = label->dkl_nsect; 2774 geom->dkg_intrlv = label->dkl_intrlv; 2775 geom->dkg_apc = label->dkl_apc; 2776 geom->dkg_rpm = label->dkl_rpm; 2777 geom->dkg_pcyl = label->dkl_pcyl; 2778 geom->dkg_write_reinstruct = label->dkl_write_reinstruct; 2779 geom->dkg_read_reinstruct = label->dkl_read_reinstruct; 2780 2781 vtoc->v_sanity = label->dkl_vtoc.v_sanity; 2782 vtoc->v_version = label->dkl_vtoc.v_version; 2783 vtoc->v_sectorsz = DEV_BSIZE; 2784 vtoc->v_nparts = label->dkl_vtoc.v_nparts; 2785 2786 for (i = 0; i < vtoc->v_nparts; i++) { 2787 vtoc->v_part[i].p_tag = label->dkl_vtoc.v_part[i].p_tag; 2788 vtoc->v_part[i].p_flag = label->dkl_vtoc.v_part[i].p_flag; 2789 vtoc->v_part[i].p_start = label->dkl_map[i].dkl_cylno * 2790 (label->dkl_nhead * label->dkl_nsect); 2791 vtoc->v_part[i].p_size = label->dkl_map[i].dkl_nblk; 2792 vtoc->timestamp[i] = label->dkl_vtoc.v_timestamp[i]; 2793 } 2794 2795 /* 2796 * The bootinfo array can not be copied with bcopy() because 2797 * elements are of type long in vtoc (so 64-bit) and of type 2798 * int in dk_vtoc (so 32-bit). 2799 */ 2800 vtoc->v_bootinfo[0] = label->dkl_vtoc.v_bootinfo[0]; 2801 vtoc->v_bootinfo[1] = label->dkl_vtoc.v_bootinfo[1]; 2802 vtoc->v_bootinfo[2] = label->dkl_vtoc.v_bootinfo[2]; 2803 bcopy(label->dkl_asciilabel, vtoc->v_asciilabel, LEN_DKL_ASCII); 2804 bcopy(label->dkl_vtoc.v_volume, vtoc->v_volume, LEN_DKL_VVOL); 2805 } 2806 2807 /* 2808 * Check if a geometry is valid for a single-slice disk. A geometry is 2809 * considered valid if the main attributes of the geometry match with the 2810 * attributes of the fake geometry we have created. 2811 */ 2812 static boolean_t 2813 vd_slice_geom_isvalid(vd_t *vd, struct dk_geom *geom) 2814 { 2815 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 2816 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); 2817 2818 if (geom->dkg_ncyl != vd->dk_geom.dkg_ncyl || 2819 geom->dkg_acyl != vd->dk_geom.dkg_acyl || 2820 geom->dkg_nsect != vd->dk_geom.dkg_nsect || 2821 geom->dkg_pcyl != vd->dk_geom.dkg_pcyl) 2822 return (B_FALSE); 2823 2824 return (B_TRUE); 2825 } 2826 2827 /* 2828 * Check if a vtoc is valid for a single-slice disk. A vtoc is considered 2829 * valid if the main attributes of the vtoc match with the attributes of the 2830 * fake vtoc we have created. 2831 */ 2832 static boolean_t 2833 vd_slice_vtoc_isvalid(vd_t *vd, struct extvtoc *vtoc) 2834 { 2835 size_t csize; 2836 int i; 2837 2838 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 2839 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); 2840 2841 if (vtoc->v_sanity != vd->vtoc.v_sanity || 2842 vtoc->v_version != vd->vtoc.v_version || 2843 vtoc->v_nparts != vd->vtoc.v_nparts || 2844 strcmp(vtoc->v_volume, vd->vtoc.v_volume) != 0 || 2845 strcmp(vtoc->v_asciilabel, vd->vtoc.v_asciilabel) != 0) 2846 return (B_FALSE); 2847 2848 /* slice 2 should be unchanged */ 2849 if (vtoc->v_part[VD_ENTIRE_DISK_SLICE].p_start != 2850 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_start || 2851 vtoc->v_part[VD_ENTIRE_DISK_SLICE].p_size != 2852 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_size) 2853 return (B_FALSE); 2854 2855 /* 2856 * Slice 0 should be mostly unchanged and cover most of the disk. 2857 * However we allow some flexibility wrt to the start and the size 2858 * of this slice mainly because we can't exactly know how it will 2859 * be defined by the OS installer. 2860 * 2861 * We allow slice 0 to be defined as starting on any of the first 2862 * 4 cylinders. 2863 */ 2864 csize = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; 2865 2866 if (vtoc->v_part[0].p_start > 4 * csize || 2867 vtoc->v_part[0].p_size > vtoc->v_part[VD_ENTIRE_DISK_SLICE].p_size) 2868 return (B_FALSE); 2869 2870 if (vd->vtoc.v_part[0].p_size >= 4 * csize && 2871 vtoc->v_part[0].p_size < vd->vtoc.v_part[0].p_size - 4 *csize) 2872 return (B_FALSE); 2873 2874 /* any other slice should have a size of 0 */ 2875 for (i = 1; i < vtoc->v_nparts; i++) { 2876 if (i != VD_ENTIRE_DISK_SLICE && 2877 vtoc->v_part[i].p_size != 0) 2878 return (B_FALSE); 2879 } 2880 2881 return (B_TRUE); 2882 } 2883 2884 /* 2885 * Handle ioctls to a disk slice. 2886 * 2887 * Return Values 2888 * 0 - Indicates that there are no errors in disk operations 2889 * ENOTSUP - Unknown disk label type or unsupported DKIO ioctl 2890 * EINVAL - Not enough room to copy the EFI label 2891 * 2892 */ 2893 static int 2894 vd_do_slice_ioctl(vd_t *vd, int cmd, void *ioctl_arg) 2895 { 2896 dk_efi_t *dk_ioc; 2897 struct extvtoc *vtoc; 2898 struct dk_geom *geom; 2899 size_t len, lba; 2900 int rval; 2901 2902 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 2903 2904 if (cmd == DKIOCFLUSHWRITECACHE) { 2905 if (vd->file) { 2906 return (VOP_FSYNC(vd->file_vnode, FSYNC, kcred, NULL)); 2907 } else { 2908 return (ldi_ioctl(vd->ldi_handle[0], cmd, 2909 (intptr_t)ioctl_arg, vd->open_flags | FKIOCTL, 2910 kcred, &rval)); 2911 } 2912 } 2913 2914 switch (vd->vdisk_label) { 2915 2916 /* ioctls for a single slice disk with a VTOC label */ 2917 case VD_DISK_LABEL_VTOC: 2918 2919 switch (cmd) { 2920 2921 case DKIOCGGEOM: 2922 ASSERT(ioctl_arg != NULL); 2923 bcopy(&vd->dk_geom, ioctl_arg, sizeof (vd->dk_geom)); 2924 return (0); 2925 2926 case DKIOCGEXTVTOC: 2927 ASSERT(ioctl_arg != NULL); 2928 bcopy(&vd->vtoc, ioctl_arg, sizeof (vd->vtoc)); 2929 return (0); 2930 2931 case DKIOCSGEOM: 2932 ASSERT(ioctl_arg != NULL); 2933 if (vd_slice_single_slice) 2934 return (ENOTSUP); 2935 2936 /* fake success only if new geometry is valid */ 2937 geom = (struct dk_geom *)ioctl_arg; 2938 if (!vd_slice_geom_isvalid(vd, geom)) 2939 return (EINVAL); 2940 2941 return (0); 2942 2943 case DKIOCSEXTVTOC: 2944 ASSERT(ioctl_arg != NULL); 2945 if (vd_slice_single_slice) 2946 return (ENOTSUP); 2947 2948 /* fake sucess only if the new vtoc is valid */ 2949 vtoc = (struct extvtoc *)ioctl_arg; 2950 if (!vd_slice_vtoc_isvalid(vd, vtoc)) 2951 return (EINVAL); 2952 2953 return (0); 2954 2955 default: 2956 return (ENOTSUP); 2957 } 2958 2959 /* ioctls for a single slice disk with an EFI label */ 2960 case VD_DISK_LABEL_EFI: 2961 2962 if (cmd != DKIOCGETEFI && cmd != DKIOCSETEFI) 2963 return (ENOTSUP); 2964 2965 ASSERT(ioctl_arg != NULL); 2966 dk_ioc = (dk_efi_t *)ioctl_arg; 2967 2968 len = dk_ioc->dki_length; 2969 lba = dk_ioc->dki_lba; 2970 2971 if ((lba != VD_EFI_LBA_GPT && lba != VD_EFI_LBA_GPE) || 2972 (lba == VD_EFI_LBA_GPT && len < sizeof (efi_gpt_t)) || 2973 (lba == VD_EFI_LBA_GPE && len < sizeof (efi_gpe_t))) 2974 return (EINVAL); 2975 2976 switch (cmd) { 2977 case DKIOCGETEFI: 2978 len = vd_slice_flabel_read(vd, 2979 (caddr_t)dk_ioc->dki_data, lba * DEV_BSIZE, len); 2980 2981 ASSERT(len > 0); 2982 2983 return (0); 2984 2985 case DKIOCSETEFI: 2986 if (vd_slice_single_slice) 2987 return (ENOTSUP); 2988 2989 /* we currently don't support writing EFI */ 2990 return (EIO); 2991 } 2992 2993 default: 2994 /* Unknown disk label type */ 2995 return (ENOTSUP); 2996 } 2997 } 2998 2999 static int 3000 vds_efi_alloc_and_read(vd_t *vd, efi_gpt_t **gpt, efi_gpe_t **gpe) 3001 { 3002 vd_efi_dev_t edev; 3003 int status; 3004 3005 VD_EFI_DEV_SET(edev, vd, (vd_efi_ioctl_func)vd_backend_ioctl); 3006 3007 status = vd_efi_alloc_and_read(&edev, gpt, gpe); 3008 3009 return (status); 3010 } 3011 3012 static void 3013 vds_efi_free(vd_t *vd, efi_gpt_t *gpt, efi_gpe_t *gpe) 3014 { 3015 vd_efi_dev_t edev; 3016 3017 VD_EFI_DEV_SET(edev, vd, (vd_efi_ioctl_func)vd_backend_ioctl); 3018 3019 vd_efi_free(&edev, gpt, gpe); 3020 } 3021 3022 static int 3023 vd_dskimg_validate_efi(vd_t *vd) 3024 { 3025 efi_gpt_t *gpt; 3026 efi_gpe_t *gpe; 3027 int i, nparts, status; 3028 struct uuid efi_reserved = EFI_RESERVED; 3029 3030 if ((status = vds_efi_alloc_and_read(vd, &gpt, &gpe)) != 0) 3031 return (status); 3032 3033 bzero(&vd->vtoc, sizeof (struct extvtoc)); 3034 bzero(&vd->dk_geom, sizeof (struct dk_geom)); 3035 bzero(vd->slices, sizeof (vd_slice_t) * VD_MAXPART); 3036 3037 vd->efi_reserved = -1; 3038 3039 nparts = gpt->efi_gpt_NumberOfPartitionEntries; 3040 3041 for (i = 0; i < nparts && i < VD_MAXPART; i++) { 3042 3043 if (gpe[i].efi_gpe_StartingLBA == 0 || 3044 gpe[i].efi_gpe_EndingLBA == 0) { 3045 continue; 3046 } 3047 3048 vd->slices[i].start = gpe[i].efi_gpe_StartingLBA; 3049 vd->slices[i].nblocks = gpe[i].efi_gpe_EndingLBA - 3050 gpe[i].efi_gpe_StartingLBA + 1; 3051 3052 if (bcmp(&gpe[i].efi_gpe_PartitionTypeGUID, &efi_reserved, 3053 sizeof (struct uuid)) == 0) 3054 vd->efi_reserved = i; 3055 3056 } 3057 3058 ASSERT(vd->vdisk_size != 0); 3059 vd->slices[VD_EFI_WD_SLICE].start = 0; 3060 vd->slices[VD_EFI_WD_SLICE].nblocks = vd->vdisk_size; 3061 3062 vds_efi_free(vd, gpt, gpe); 3063 3064 return (status); 3065 } 3066 3067 /* 3068 * Function: 3069 * vd_dskimg_validate_geometry 3070 * 3071 * Description: 3072 * Read the label and validate the geometry of a disk image. The driver 3073 * label, vtoc and geometry information are updated according to the 3074 * label read from the disk image. 3075 * 3076 * If no valid label is found, the label is set to unknown and the 3077 * function returns EINVAL, but a default vtoc and geometry are provided 3078 * to the driver. If an EFI label is found, ENOTSUP is returned. 3079 * 3080 * Parameters: 3081 * vd - disk on which the operation is performed. 3082 * 3083 * Return Code: 3084 * 0 - success. 3085 * EIO - error reading the label from the disk image. 3086 * EINVAL - unknown disk label. 3087 * ENOTSUP - geometry not applicable (EFI label). 3088 */ 3089 static int 3090 vd_dskimg_validate_geometry(vd_t *vd) 3091 { 3092 struct dk_label label; 3093 struct dk_geom *geom = &vd->dk_geom; 3094 struct extvtoc *vtoc = &vd->vtoc; 3095 int i; 3096 int status = 0; 3097 3098 ASSERT(VD_DSKIMG(vd)); 3099 3100 if (VD_DSKIMG_LABEL_READ(vd, &label) < 0) 3101 return (EIO); 3102 3103 if (label.dkl_magic != DKL_MAGIC || 3104 label.dkl_cksum != vd_lbl2cksum(&label) || 3105 (vd_dskimg_validate_sanity && 3106 label.dkl_vtoc.v_sanity != VTOC_SANE) || 3107 label.dkl_vtoc.v_nparts != V_NUMPAR) { 3108 3109 if (vd_dskimg_validate_efi(vd) == 0) { 3110 vd->vdisk_label = VD_DISK_LABEL_EFI; 3111 return (ENOTSUP); 3112 } 3113 3114 vd->vdisk_label = VD_DISK_LABEL_UNK; 3115 vd_build_default_label(vd->dskimg_size, &label); 3116 status = EINVAL; 3117 } else { 3118 vd->vdisk_label = VD_DISK_LABEL_VTOC; 3119 } 3120 3121 /* Update the driver geometry and vtoc */ 3122 vd_label_to_vtocgeom(&label, vtoc, geom); 3123 3124 /* Update logical partitions */ 3125 bzero(vd->slices, sizeof (vd_slice_t) * VD_MAXPART); 3126 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { 3127 for (i = 0; i < vtoc->v_nparts; i++) { 3128 vd->slices[i].start = vtoc->v_part[i].p_start; 3129 vd->slices[i].nblocks = vtoc->v_part[i].p_size; 3130 } 3131 } 3132 3133 return (status); 3134 } 3135 3136 /* 3137 * Handle ioctls to a disk image. 3138 * 3139 * Return Values 3140 * 0 - Indicates that there are no errors 3141 * != 0 - Disk operation returned an error 3142 */ 3143 static int 3144 vd_do_dskimg_ioctl(vd_t *vd, int cmd, void *ioctl_arg) 3145 { 3146 struct dk_label label; 3147 struct dk_geom *geom; 3148 struct extvtoc *vtoc; 3149 dk_efi_t *efi; 3150 int rc, rval; 3151 3152 ASSERT(VD_DSKIMG(vd)); 3153 3154 switch (cmd) { 3155 3156 case DKIOCGGEOM: 3157 ASSERT(ioctl_arg != NULL); 3158 geom = (struct dk_geom *)ioctl_arg; 3159 3160 rc = vd_dskimg_validate_geometry(vd); 3161 if (rc != 0 && rc != EINVAL) 3162 return (rc); 3163 bcopy(&vd->dk_geom, geom, sizeof (struct dk_geom)); 3164 return (0); 3165 3166 case DKIOCGEXTVTOC: 3167 ASSERT(ioctl_arg != NULL); 3168 vtoc = (struct extvtoc *)ioctl_arg; 3169 3170 rc = vd_dskimg_validate_geometry(vd); 3171 if (rc != 0 && rc != EINVAL) 3172 return (rc); 3173 bcopy(&vd->vtoc, vtoc, sizeof (struct extvtoc)); 3174 return (0); 3175 3176 case DKIOCSGEOM: 3177 ASSERT(ioctl_arg != NULL); 3178 geom = (struct dk_geom *)ioctl_arg; 3179 3180 if (geom->dkg_nhead == 0 || geom->dkg_nsect == 0) 3181 return (EINVAL); 3182 3183 /* 3184 * The current device geometry is not updated, just the driver 3185 * "notion" of it. The device geometry will be effectively 3186 * updated when a label is written to the device during a next 3187 * DKIOCSEXTVTOC. 3188 */ 3189 bcopy(ioctl_arg, &vd->dk_geom, sizeof (vd->dk_geom)); 3190 return (0); 3191 3192 case DKIOCSEXTVTOC: 3193 ASSERT(ioctl_arg != NULL); 3194 ASSERT(vd->dk_geom.dkg_nhead != 0 && 3195 vd->dk_geom.dkg_nsect != 0); 3196 vtoc = (struct extvtoc *)ioctl_arg; 3197 3198 if (vtoc->v_sanity != VTOC_SANE || 3199 vtoc->v_sectorsz != DEV_BSIZE || 3200 vtoc->v_nparts != V_NUMPAR) 3201 return (EINVAL); 3202 3203 vd_vtocgeom_to_label(vtoc, &vd->dk_geom, &label); 3204 3205 /* write label to the disk image */ 3206 if ((rc = vd_dskimg_set_vtoc(vd, &label)) != 0) 3207 return (rc); 3208 3209 break; 3210 3211 case DKIOCFLUSHWRITECACHE: 3212 if (vd->file) 3213 return (VOP_FSYNC(vd->file_vnode, FSYNC, kcred, NULL)); 3214 else 3215 return (ldi_ioctl(vd->ldi_handle[0], cmd, 3216 (intptr_t)ioctl_arg, vd->open_flags | FKIOCTL, 3217 kcred, &rval)); 3218 3219 case DKIOCGETEFI: 3220 ASSERT(ioctl_arg != NULL); 3221 efi = (dk_efi_t *)ioctl_arg; 3222 3223 if (vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, 3224 (caddr_t)efi->dki_data, efi->dki_lba, efi->dki_length) < 0) 3225 return (EIO); 3226 3227 return (0); 3228 3229 case DKIOCSETEFI: 3230 ASSERT(ioctl_arg != NULL); 3231 efi = (dk_efi_t *)ioctl_arg; 3232 3233 if (vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, 3234 (caddr_t)efi->dki_data, efi->dki_lba, efi->dki_length) < 0) 3235 return (EIO); 3236 3237 break; 3238 3239 3240 default: 3241 return (ENOTSUP); 3242 } 3243 3244 ASSERT(cmd == DKIOCSEXTVTOC || cmd == DKIOCSETEFI); 3245 3246 /* label has changed, revalidate the geometry */ 3247 (void) vd_dskimg_validate_geometry(vd); 3248 3249 /* 3250 * The disk geometry may have changed, so we need to write 3251 * the devid (if there is one) so that it is stored at the 3252 * right location. 3253 */ 3254 if (vd_dskimg_write_devid(vd, vd->dskimg_devid) != 0) { 3255 PR0("Fail to write devid"); 3256 } 3257 3258 return (0); 3259 } 3260 3261 static int 3262 vd_backend_ioctl(vd_t *vd, int cmd, caddr_t arg) 3263 { 3264 int rval = 0, status; 3265 struct vtoc vtoc; 3266 3267 /* 3268 * Call the appropriate function to execute the ioctl depending 3269 * on the type of vdisk. 3270 */ 3271 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { 3272 3273 /* slice, file or volume exported as a single slice disk */ 3274 status = vd_do_slice_ioctl(vd, cmd, arg); 3275 3276 } else if (VD_DSKIMG(vd)) { 3277 3278 /* file or volume exported as a full disk */ 3279 status = vd_do_dskimg_ioctl(vd, cmd, arg); 3280 3281 } else { 3282 3283 /* disk device exported as a full disk */ 3284 status = ldi_ioctl(vd->ldi_handle[0], cmd, (intptr_t)arg, 3285 vd->open_flags | FKIOCTL, kcred, &rval); 3286 3287 /* 3288 * By default VTOC ioctls are done using ioctls for the 3289 * extended VTOC. Some drivers (in particular non-Sun drivers) 3290 * may not support these ioctls. In that case, we fallback to 3291 * the regular VTOC ioctls. 3292 */ 3293 if (status == ENOTTY) { 3294 switch (cmd) { 3295 3296 case DKIOCGEXTVTOC: 3297 cmd = DKIOCGVTOC; 3298 status = ldi_ioctl(vd->ldi_handle[0], cmd, 3299 (intptr_t)&vtoc, vd->open_flags | FKIOCTL, 3300 kcred, &rval); 3301 vtoctoextvtoc(vtoc, 3302 (*(struct extvtoc *)(void *)arg)); 3303 break; 3304 3305 case DKIOCSEXTVTOC: 3306 cmd = DKIOCSVTOC; 3307 extvtoctovtoc((*(struct extvtoc *)(void *)arg), 3308 vtoc); 3309 status = ldi_ioctl(vd->ldi_handle[0], cmd, 3310 (intptr_t)&vtoc, vd->open_flags | FKIOCTL, 3311 kcred, &rval); 3312 break; 3313 } 3314 } 3315 } 3316 3317 #ifdef DEBUG 3318 if (rval != 0) { 3319 PR0("ioctl %x set rval = %d, which is not being returned" 3320 " to caller", cmd, rval); 3321 } 3322 #endif /* DEBUG */ 3323 3324 return (status); 3325 } 3326 3327 /* 3328 * Description: 3329 * This is the function that processes the ioctl requests (farming it 3330 * out to functions that handle slices, files or whole disks) 3331 * 3332 * Return Values 3333 * 0 - ioctl operation completed successfully 3334 * != 0 - The LDC error value encountered 3335 * (propagated back up the call stack as a task error) 3336 * 3337 * Side Effect 3338 * sets request->status to the return value of the ioctl function. 3339 */ 3340 static int 3341 vd_do_ioctl(vd_t *vd, vd_dring_payload_t *request, void* buf, vd_ioctl_t *ioctl) 3342 { 3343 int status = 0; 3344 size_t nbytes = request->nbytes; /* modifiable copy */ 3345 3346 3347 ASSERT(request->slice < vd->nslices); 3348 PR0("Performing %s", ioctl->operation_name); 3349 3350 /* Get data from client and convert, if necessary */ 3351 if (ioctl->copyin != NULL) { 3352 ASSERT(nbytes != 0 && buf != NULL); 3353 PR1("Getting \"arg\" data from client"); 3354 if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes, 3355 request->cookie, request->ncookies, 3356 LDC_COPY_IN)) != 0) { 3357 PR0("ldc_mem_copy() returned errno %d " 3358 "copying from client", status); 3359 return (status); 3360 } 3361 3362 /* Convert client's data, if necessary */ 3363 if (ioctl->copyin == VD_IDENTITY_IN) { 3364 /* use client buffer */ 3365 ioctl->arg = buf; 3366 } else { 3367 /* convert client vdisk operation data to ioctl data */ 3368 status = (ioctl->copyin)(buf, nbytes, 3369 (void *)ioctl->arg); 3370 if (status != 0) { 3371 request->status = status; 3372 return (0); 3373 } 3374 } 3375 } 3376 3377 if (ioctl->operation == VD_OP_SCSICMD) { 3378 struct uscsi_cmd *uscsi = (struct uscsi_cmd *)ioctl->arg; 3379 3380 /* check write permission */ 3381 if (!(vd->open_flags & FWRITE) && 3382 !(uscsi->uscsi_flags & USCSI_READ)) { 3383 PR0("uscsi fails because backend is opened read-only"); 3384 request->status = EROFS; 3385 return (0); 3386 } 3387 } 3388 3389 /* 3390 * Send the ioctl to the disk backend. 3391 */ 3392 request->status = vd_backend_ioctl(vd, ioctl->cmd, ioctl->arg); 3393 3394 if (request->status != 0) { 3395 PR0("ioctl(%s) = errno %d", ioctl->cmd_name, request->status); 3396 if (ioctl->operation == VD_OP_SCSICMD && 3397 ((struct uscsi_cmd *)ioctl->arg)->uscsi_status != 0) 3398 /* 3399 * USCSICMD has reported an error and the uscsi_status 3400 * field is not zero. This means that the SCSI command 3401 * has completed but it has an error. So we should 3402 * mark the VD operation has succesfully completed 3403 * and clients can check the SCSI status field for 3404 * SCSI errors. 3405 */ 3406 request->status = 0; 3407 else 3408 return (0); 3409 } 3410 3411 /* Convert data and send to client, if necessary */ 3412 if (ioctl->copyout != NULL) { 3413 ASSERT(nbytes != 0 && buf != NULL); 3414 PR1("Sending \"arg\" data to client"); 3415 3416 /* Convert ioctl data to vdisk operation data, if necessary */ 3417 if (ioctl->copyout != VD_IDENTITY_OUT) 3418 (ioctl->copyout)((void *)ioctl->arg, buf); 3419 3420 if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes, 3421 request->cookie, request->ncookies, 3422 LDC_COPY_OUT)) != 0) { 3423 PR0("ldc_mem_copy() returned errno %d " 3424 "copying to client", status); 3425 return (status); 3426 } 3427 } 3428 3429 return (status); 3430 } 3431 3432 #define RNDSIZE(expr) P2ROUNDUP(sizeof (expr), sizeof (uint64_t)) 3433 3434 /* 3435 * Description: 3436 * This generic function is called by the task queue to complete 3437 * the processing of the tasks. The specific completion function 3438 * is passed in as a field in the task pointer. 3439 * 3440 * Parameters: 3441 * arg - opaque pointer to structure containing task to be completed 3442 * 3443 * Return Values 3444 * None 3445 */ 3446 static void 3447 vd_complete(void *arg) 3448 { 3449 vd_task_t *task = (vd_task_t *)arg; 3450 3451 ASSERT(task != NULL); 3452 ASSERT(task->status == EINPROGRESS); 3453 ASSERT(task->completef != NULL); 3454 3455 task->status = task->completef(task); 3456 if (task->status) 3457 PR0("%s: Error %d completing task", __func__, task->status); 3458 3459 /* Now notify the vDisk client */ 3460 vd_complete_notify(task); 3461 } 3462 3463 static int 3464 vd_ioctl(vd_task_t *task) 3465 { 3466 int i, status; 3467 void *buf = NULL; 3468 struct dk_geom dk_geom = {0}; 3469 struct extvtoc vtoc = {0}; 3470 struct dk_efi dk_efi = {0}; 3471 struct uscsi_cmd uscsi = {0}; 3472 vd_t *vd = task->vd; 3473 vd_dring_payload_t *request = task->request; 3474 vd_ioctl_t ioctl[] = { 3475 /* Command (no-copy) operations */ 3476 {VD_OP_FLUSH, STRINGIZE(VD_OP_FLUSH), 0, 3477 DKIOCFLUSHWRITECACHE, STRINGIZE(DKIOCFLUSHWRITECACHE), 3478 NULL, NULL, NULL, B_TRUE}, 3479 3480 /* "Get" (copy-out) operations */ 3481 {VD_OP_GET_WCE, STRINGIZE(VD_OP_GET_WCE), RNDSIZE(int), 3482 DKIOCGETWCE, STRINGIZE(DKIOCGETWCE), 3483 NULL, VD_IDENTITY_IN, VD_IDENTITY_OUT, B_FALSE}, 3484 {VD_OP_GET_DISKGEOM, STRINGIZE(VD_OP_GET_DISKGEOM), 3485 RNDSIZE(vd_geom_t), 3486 DKIOCGGEOM, STRINGIZE(DKIOCGGEOM), 3487 &dk_geom, NULL, dk_geom2vd_geom, B_FALSE}, 3488 {VD_OP_GET_VTOC, STRINGIZE(VD_OP_GET_VTOC), RNDSIZE(vd_vtoc_t), 3489 DKIOCGEXTVTOC, STRINGIZE(DKIOCGEXTVTOC), 3490 &vtoc, NULL, vtoc2vd_vtoc, B_FALSE}, 3491 {VD_OP_GET_EFI, STRINGIZE(VD_OP_GET_EFI), RNDSIZE(vd_efi_t), 3492 DKIOCGETEFI, STRINGIZE(DKIOCGETEFI), 3493 &dk_efi, vd_get_efi_in, vd_get_efi_out, B_FALSE}, 3494 3495 /* "Set" (copy-in) operations */ 3496 {VD_OP_SET_WCE, STRINGIZE(VD_OP_SET_WCE), RNDSIZE(int), 3497 DKIOCSETWCE, STRINGIZE(DKIOCSETWCE), 3498 NULL, VD_IDENTITY_IN, VD_IDENTITY_OUT, B_TRUE}, 3499 {VD_OP_SET_DISKGEOM, STRINGIZE(VD_OP_SET_DISKGEOM), 3500 RNDSIZE(vd_geom_t), 3501 DKIOCSGEOM, STRINGIZE(DKIOCSGEOM), 3502 &dk_geom, vd_geom2dk_geom, NULL, B_TRUE}, 3503 {VD_OP_SET_VTOC, STRINGIZE(VD_OP_SET_VTOC), RNDSIZE(vd_vtoc_t), 3504 DKIOCSEXTVTOC, STRINGIZE(DKIOCSEXTVTOC), 3505 &vtoc, vd_vtoc2vtoc, NULL, B_TRUE}, 3506 {VD_OP_SET_EFI, STRINGIZE(VD_OP_SET_EFI), RNDSIZE(vd_efi_t), 3507 DKIOCSETEFI, STRINGIZE(DKIOCSETEFI), 3508 &dk_efi, vd_set_efi_in, vd_set_efi_out, B_TRUE}, 3509 3510 {VD_OP_SCSICMD, STRINGIZE(VD_OP_SCSICMD), RNDSIZE(vd_scsi_t), 3511 USCSICMD, STRINGIZE(USCSICMD), 3512 &uscsi, vd_scsicmd_in, vd_scsicmd_out, B_FALSE}, 3513 }; 3514 size_t nioctls = (sizeof (ioctl))/(sizeof (ioctl[0])); 3515 3516 3517 ASSERT(vd != NULL); 3518 ASSERT(request != NULL); 3519 ASSERT(request->slice < vd->nslices); 3520 3521 /* 3522 * Determine ioctl corresponding to caller's "operation" and 3523 * validate caller's "nbytes" 3524 */ 3525 for (i = 0; i < nioctls; i++) { 3526 if (request->operation == ioctl[i].operation) { 3527 /* LDC memory operations require 8-byte multiples */ 3528 ASSERT(ioctl[i].nbytes % sizeof (uint64_t) == 0); 3529 3530 if (request->operation == VD_OP_GET_EFI || 3531 request->operation == VD_OP_SET_EFI || 3532 request->operation == VD_OP_SCSICMD) { 3533 if (request->nbytes >= ioctl[i].nbytes) 3534 break; 3535 PR0("%s: Expected at least nbytes = %lu, " 3536 "got %lu", ioctl[i].operation_name, 3537 ioctl[i].nbytes, request->nbytes); 3538 return (EINVAL); 3539 } 3540 3541 if (request->nbytes != ioctl[i].nbytes) { 3542 PR0("%s: Expected nbytes = %lu, got %lu", 3543 ioctl[i].operation_name, ioctl[i].nbytes, 3544 request->nbytes); 3545 return (EINVAL); 3546 } 3547 3548 break; 3549 } 3550 } 3551 ASSERT(i < nioctls); /* because "operation" already validated */ 3552 3553 if (!(vd->open_flags & FWRITE) && ioctl[i].write) { 3554 PR0("%s fails because backend is opened read-only", 3555 ioctl[i].operation_name); 3556 request->status = EROFS; 3557 return (0); 3558 } 3559 3560 if (request->nbytes) 3561 buf = kmem_zalloc(request->nbytes, KM_SLEEP); 3562 status = vd_do_ioctl(vd, request, buf, &ioctl[i]); 3563 if (request->nbytes) 3564 kmem_free(buf, request->nbytes); 3565 3566 return (status); 3567 } 3568 3569 static int 3570 vd_get_devid(vd_task_t *task) 3571 { 3572 vd_t *vd = task->vd; 3573 vd_dring_payload_t *request = task->request; 3574 vd_devid_t *vd_devid; 3575 impl_devid_t *devid; 3576 int status, bufid_len, devid_len, len, sz; 3577 int bufbytes; 3578 3579 PR1("Get Device ID, nbytes=%ld", request->nbytes); 3580 3581 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { 3582 /* 3583 * We don't support devid for single-slice disks because we 3584 * have no space to store a fabricated devid and for physical 3585 * disk slices, we can't use the devid of the disk otherwise 3586 * exporting multiple slices from the same disk will produce 3587 * the same devids. 3588 */ 3589 PR2("No Device ID for slices"); 3590 request->status = ENOTSUP; 3591 return (0); 3592 } 3593 3594 if (VD_DSKIMG(vd)) { 3595 if (vd->dskimg_devid == NULL) { 3596 PR2("No Device ID"); 3597 request->status = ENOENT; 3598 return (0); 3599 } else { 3600 sz = ddi_devid_sizeof(vd->dskimg_devid); 3601 devid = kmem_alloc(sz, KM_SLEEP); 3602 bcopy(vd->dskimg_devid, devid, sz); 3603 } 3604 } else { 3605 if (ddi_lyr_get_devid(vd->dev[request->slice], 3606 (ddi_devid_t *)&devid) != DDI_SUCCESS) { 3607 PR2("No Device ID"); 3608 request->status = ENOENT; 3609 return (0); 3610 } 3611 } 3612 3613 bufid_len = request->nbytes - sizeof (vd_devid_t) + 1; 3614 devid_len = DEVID_GETLEN(devid); 3615 3616 /* 3617 * Save the buffer size here for use in deallocation. 3618 * The actual number of bytes copied is returned in 3619 * the 'nbytes' field of the request structure. 3620 */ 3621 bufbytes = request->nbytes; 3622 3623 vd_devid = kmem_zalloc(bufbytes, KM_SLEEP); 3624 vd_devid->length = devid_len; 3625 vd_devid->type = DEVID_GETTYPE(devid); 3626 3627 len = (devid_len > bufid_len)? bufid_len : devid_len; 3628 3629 bcopy(devid->did_id, vd_devid->id, len); 3630 3631 request->status = 0; 3632 3633 /* LDC memory operations require 8-byte multiples */ 3634 ASSERT(request->nbytes % sizeof (uint64_t) == 0); 3635 3636 if ((status = ldc_mem_copy(vd->ldc_handle, (caddr_t)vd_devid, 0, 3637 &request->nbytes, request->cookie, request->ncookies, 3638 LDC_COPY_OUT)) != 0) { 3639 PR0("ldc_mem_copy() returned errno %d copying to client", 3640 status); 3641 } 3642 PR1("post mem_copy: nbytes=%ld", request->nbytes); 3643 3644 kmem_free(vd_devid, bufbytes); 3645 ddi_devid_free((ddi_devid_t)devid); 3646 3647 return (status); 3648 } 3649 3650 static int 3651 vd_scsi_reset(vd_t *vd) 3652 { 3653 int rval, status; 3654 struct uscsi_cmd uscsi = { 0 }; 3655 3656 uscsi.uscsi_flags = vd_scsi_debug | USCSI_RESET; 3657 uscsi.uscsi_timeout = vd_scsi_rdwr_timeout; 3658 3659 status = ldi_ioctl(vd->ldi_handle[0], USCSICMD, (intptr_t)&uscsi, 3660 (vd->open_flags | FKIOCTL), kcred, &rval); 3661 3662 return (status); 3663 } 3664 3665 static int 3666 vd_reset(vd_task_t *task) 3667 { 3668 vd_t *vd = task->vd; 3669 vd_dring_payload_t *request = task->request; 3670 3671 ASSERT(request->operation == VD_OP_RESET); 3672 ASSERT(vd->scsi); 3673 3674 PR0("Performing VD_OP_RESET"); 3675 3676 if (request->nbytes != 0) { 3677 PR0("VD_OP_RESET: Expected nbytes = 0, got %lu", 3678 request->nbytes); 3679 return (EINVAL); 3680 } 3681 3682 request->status = vd_scsi_reset(vd); 3683 3684 return (0); 3685 } 3686 3687 static int 3688 vd_get_capacity(vd_task_t *task) 3689 { 3690 int rv; 3691 size_t nbytes; 3692 vd_t *vd = task->vd; 3693 vd_dring_payload_t *request = task->request; 3694 vd_capacity_t vd_cap = { 0 }; 3695 3696 ASSERT(request->operation == VD_OP_GET_CAPACITY); 3697 3698 PR0("Performing VD_OP_GET_CAPACITY"); 3699 3700 nbytes = request->nbytes; 3701 3702 if (nbytes != RNDSIZE(vd_capacity_t)) { 3703 PR0("VD_OP_GET_CAPACITY: Expected nbytes = %lu, got %lu", 3704 RNDSIZE(vd_capacity_t), nbytes); 3705 return (EINVAL); 3706 } 3707 3708 /* 3709 * Check the backend size in case it has changed. If the check fails 3710 * then we will return the last known size. 3711 */ 3712 3713 (void) vd_backend_check_size(vd); 3714 ASSERT(vd->vdisk_size != 0); 3715 3716 request->status = 0; 3717 3718 vd_cap.vdisk_block_size = vd->vdisk_block_size; 3719 vd_cap.vdisk_size = vd->vdisk_size; 3720 3721 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&vd_cap, 0, &nbytes, 3722 request->cookie, request->ncookies, LDC_COPY_OUT)) != 0) { 3723 PR0("ldc_mem_copy() returned errno %d copying to client", rv); 3724 return (rv); 3725 } 3726 3727 return (0); 3728 } 3729 3730 static int 3731 vd_get_access(vd_task_t *task) 3732 { 3733 uint64_t access; 3734 int rv, rval = 0; 3735 size_t nbytes; 3736 vd_t *vd = task->vd; 3737 vd_dring_payload_t *request = task->request; 3738 3739 ASSERT(request->operation == VD_OP_GET_ACCESS); 3740 ASSERT(vd->scsi); 3741 3742 PR0("Performing VD_OP_GET_ACCESS"); 3743 3744 nbytes = request->nbytes; 3745 3746 if (nbytes != sizeof (uint64_t)) { 3747 PR0("VD_OP_GET_ACCESS: Expected nbytes = %lu, got %lu", 3748 sizeof (uint64_t), nbytes); 3749 return (EINVAL); 3750 } 3751 3752 request->status = ldi_ioctl(vd->ldi_handle[request->slice], MHIOCSTATUS, 3753 NULL, (vd->open_flags | FKIOCTL), kcred, &rval); 3754 3755 if (request->status != 0) 3756 return (0); 3757 3758 access = (rval == 0)? VD_ACCESS_ALLOWED : VD_ACCESS_DENIED; 3759 3760 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&access, 0, &nbytes, 3761 request->cookie, request->ncookies, LDC_COPY_OUT)) != 0) { 3762 PR0("ldc_mem_copy() returned errno %d copying to client", rv); 3763 return (rv); 3764 } 3765 3766 return (0); 3767 } 3768 3769 static int 3770 vd_set_access(vd_task_t *task) 3771 { 3772 uint64_t flags; 3773 int rv, rval; 3774 size_t nbytes; 3775 vd_t *vd = task->vd; 3776 vd_dring_payload_t *request = task->request; 3777 3778 ASSERT(request->operation == VD_OP_SET_ACCESS); 3779 ASSERT(vd->scsi); 3780 3781 nbytes = request->nbytes; 3782 3783 if (nbytes != sizeof (uint64_t)) { 3784 PR0("VD_OP_SET_ACCESS: Expected nbytes = %lu, got %lu", 3785 sizeof (uint64_t), nbytes); 3786 return (EINVAL); 3787 } 3788 3789 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&flags, 0, &nbytes, 3790 request->cookie, request->ncookies, LDC_COPY_IN)) != 0) { 3791 PR0("ldc_mem_copy() returned errno %d copying from client", rv); 3792 return (rv); 3793 } 3794 3795 if (flags == VD_ACCESS_SET_CLEAR) { 3796 PR0("Performing VD_OP_SET_ACCESS (CLEAR)"); 3797 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3798 MHIOCRELEASE, NULL, (vd->open_flags | FKIOCTL), kcred, 3799 &rval); 3800 if (request->status == 0) 3801 vd->ownership = B_FALSE; 3802 return (0); 3803 } 3804 3805 /* 3806 * As per the VIO spec, the PREEMPT and PRESERVE flags are only valid 3807 * when the EXCLUSIVE flag is set. 3808 */ 3809 if (!(flags & VD_ACCESS_SET_EXCLUSIVE)) { 3810 PR0("Invalid VD_OP_SET_ACCESS flags: 0x%lx", flags); 3811 request->status = EINVAL; 3812 return (0); 3813 } 3814 3815 switch (flags & (VD_ACCESS_SET_PREEMPT | VD_ACCESS_SET_PRESERVE)) { 3816 3817 case VD_ACCESS_SET_PREEMPT | VD_ACCESS_SET_PRESERVE: 3818 /* 3819 * Flags EXCLUSIVE and PREEMPT and PRESERVE. We have to 3820 * acquire exclusive access rights, preserve them and we 3821 * can use preemption. So we can use the MHIOCTKNOWN ioctl. 3822 */ 3823 PR0("Performing VD_OP_SET_ACCESS (EXCLUSIVE|PREEMPT|PRESERVE)"); 3824 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3825 MHIOCTKOWN, NULL, (vd->open_flags | FKIOCTL), kcred, &rval); 3826 break; 3827 3828 case VD_ACCESS_SET_PRESERVE: 3829 /* 3830 * Flags EXCLUSIVE and PRESERVE. We have to acquire exclusive 3831 * access rights and preserve them, but not preempt any other 3832 * host. So we need to use the MHIOCTKOWN ioctl to enable the 3833 * "preserve" feature but we can not called it directly 3834 * because it uses preemption. So before that, we use the 3835 * MHIOCQRESERVE ioctl to ensure we can get exclusive rights 3836 * without preempting anyone. 3837 */ 3838 PR0("Performing VD_OP_SET_ACCESS (EXCLUSIVE|PRESERVE)"); 3839 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3840 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, 3841 &rval); 3842 if (request->status != 0) 3843 break; 3844 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3845 MHIOCTKOWN, NULL, (vd->open_flags | FKIOCTL), kcred, &rval); 3846 break; 3847 3848 case VD_ACCESS_SET_PREEMPT: 3849 /* 3850 * Flags EXCLUSIVE and PREEMPT. We have to acquire exclusive 3851 * access rights and we can use preemption. So we try to do 3852 * a SCSI reservation, if it fails we reset the disk to clear 3853 * any reservation and we try to reserve again. 3854 */ 3855 PR0("Performing VD_OP_SET_ACCESS (EXCLUSIVE|PREEMPT)"); 3856 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3857 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, 3858 &rval); 3859 if (request->status == 0) 3860 break; 3861 3862 /* reset the disk */ 3863 (void) vd_scsi_reset(vd); 3864 3865 /* try again even if the reset has failed */ 3866 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3867 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, 3868 &rval); 3869 break; 3870 3871 case 0: 3872 /* Flag EXCLUSIVE only. Just issue a SCSI reservation */ 3873 PR0("Performing VD_OP_SET_ACCESS (EXCLUSIVE)"); 3874 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3875 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, 3876 &rval); 3877 break; 3878 } 3879 3880 if (request->status == 0) 3881 vd->ownership = B_TRUE; 3882 else 3883 PR0("VD_OP_SET_ACCESS: error %d", request->status); 3884 3885 return (0); 3886 } 3887 3888 static void 3889 vd_reset_access(vd_t *vd) 3890 { 3891 int status, rval; 3892 3893 if (vd->file || vd->volume || !vd->ownership) 3894 return; 3895 3896 PR0("Releasing disk ownership"); 3897 status = ldi_ioctl(vd->ldi_handle[0], MHIOCRELEASE, NULL, 3898 (vd->open_flags | FKIOCTL), kcred, &rval); 3899 3900 /* 3901 * An EACCES failure means that there is a reservation conflict, 3902 * so we are not the owner of the disk anymore. 3903 */ 3904 if (status == 0 || status == EACCES) { 3905 vd->ownership = B_FALSE; 3906 return; 3907 } 3908 3909 PR0("Fail to release ownership, error %d", status); 3910 3911 /* 3912 * We have failed to release the ownership, try to reset the disk 3913 * to release reservations. 3914 */ 3915 PR0("Resetting disk"); 3916 status = vd_scsi_reset(vd); 3917 3918 if (status != 0) 3919 PR0("Fail to reset disk, error %d", status); 3920 3921 /* whatever the result of the reset is, we try the release again */ 3922 status = ldi_ioctl(vd->ldi_handle[0], MHIOCRELEASE, NULL, 3923 (vd->open_flags | FKIOCTL), kcred, &rval); 3924 3925 if (status == 0 || status == EACCES) { 3926 vd->ownership = B_FALSE; 3927 return; 3928 } 3929 3930 PR0("Fail to release ownership, error %d", status); 3931 3932 /* 3933 * At this point we have done our best to try to reset the 3934 * access rights to the disk and we don't know if we still 3935 * own a reservation and if any mechanism to preserve the 3936 * ownership is still in place. The ultimate solution would 3937 * be to reset the system but this is usually not what we 3938 * want to happen. 3939 */ 3940 3941 if (vd_reset_access_failure == A_REBOOT) { 3942 cmn_err(CE_WARN, VD_RESET_ACCESS_FAILURE_MSG 3943 ", rebooting the system", vd->device_path); 3944 (void) uadmin(A_SHUTDOWN, AD_BOOT, NULL); 3945 } else if (vd_reset_access_failure == A_DUMP) { 3946 panic(VD_RESET_ACCESS_FAILURE_MSG, vd->device_path); 3947 } 3948 3949 cmn_err(CE_WARN, VD_RESET_ACCESS_FAILURE_MSG, vd->device_path); 3950 } 3951 3952 /* 3953 * Define the supported operations once the functions for performing them have 3954 * been defined 3955 */ 3956 static const vds_operation_t vds_operation[] = { 3957 #define X(_s) #_s, _s 3958 {X(VD_OP_BREAD), vd_start_bio, vd_complete_bio}, 3959 {X(VD_OP_BWRITE), vd_start_bio, vd_complete_bio}, 3960 {X(VD_OP_FLUSH), vd_ioctl, NULL}, 3961 {X(VD_OP_GET_WCE), vd_ioctl, NULL}, 3962 {X(VD_OP_SET_WCE), vd_ioctl, NULL}, 3963 {X(VD_OP_GET_VTOC), vd_ioctl, NULL}, 3964 {X(VD_OP_SET_VTOC), vd_ioctl, NULL}, 3965 {X(VD_OP_GET_DISKGEOM), vd_ioctl, NULL}, 3966 {X(VD_OP_SET_DISKGEOM), vd_ioctl, NULL}, 3967 {X(VD_OP_GET_EFI), vd_ioctl, NULL}, 3968 {X(VD_OP_SET_EFI), vd_ioctl, NULL}, 3969 {X(VD_OP_GET_DEVID), vd_get_devid, NULL}, 3970 {X(VD_OP_SCSICMD), vd_ioctl, NULL}, 3971 {X(VD_OP_RESET), vd_reset, NULL}, 3972 {X(VD_OP_GET_CAPACITY), vd_get_capacity, NULL}, 3973 {X(VD_OP_SET_ACCESS), vd_set_access, NULL}, 3974 {X(VD_OP_GET_ACCESS), vd_get_access, NULL}, 3975 #undef X 3976 }; 3977 3978 static const size_t vds_noperations = 3979 (sizeof (vds_operation))/(sizeof (vds_operation[0])); 3980 3981 /* 3982 * Process a task specifying a client I/O request 3983 * 3984 * Parameters: 3985 * task - structure containing the request sent from client 3986 * 3987 * Return Value 3988 * 0 - success 3989 * ENOTSUP - Unknown/Unsupported VD_OP_XXX operation 3990 * EINVAL - Invalid disk slice 3991 * != 0 - some other non-zero return value from start function 3992 */ 3993 static int 3994 vd_do_process_task(vd_task_t *task) 3995 { 3996 int i; 3997 vd_t *vd = task->vd; 3998 vd_dring_payload_t *request = task->request; 3999 4000 ASSERT(vd != NULL); 4001 ASSERT(request != NULL); 4002 4003 /* Find the requested operation */ 4004 for (i = 0; i < vds_noperations; i++) { 4005 if (request->operation == vds_operation[i].operation) { 4006 /* all operations should have a start func */ 4007 ASSERT(vds_operation[i].start != NULL); 4008 4009 task->completef = vds_operation[i].complete; 4010 break; 4011 } 4012 } 4013 4014 /* 4015 * We need to check that the requested operation is permitted 4016 * for the particular client that sent it or that the loop above 4017 * did not complete without finding the operation type (indicating 4018 * that the requested operation is unknown/unimplemented) 4019 */ 4020 if ((VD_OP_SUPPORTED(vd->operations, request->operation) == B_FALSE) || 4021 (i == vds_noperations)) { 4022 PR0("Unsupported operation %u", request->operation); 4023 request->status = ENOTSUP; 4024 return (0); 4025 } 4026 4027 /* Range-check slice */ 4028 if (request->slice >= vd->nslices && 4029 ((vd->vdisk_type != VD_DISK_TYPE_DISK && vd_slice_single_slice) || 4030 request->slice != VD_SLICE_NONE)) { 4031 PR0("Invalid \"slice\" %u (max %u) for virtual disk", 4032 request->slice, (vd->nslices - 1)); 4033 request->status = EINVAL; 4034 return (0); 4035 } 4036 4037 /* 4038 * Call the function pointer that starts the operation. 4039 */ 4040 return (vds_operation[i].start(task)); 4041 } 4042 4043 /* 4044 * Description: 4045 * This function is called by both the in-band and descriptor ring 4046 * message processing functions paths to actually execute the task 4047 * requested by the vDisk client. It in turn calls its worker 4048 * function, vd_do_process_task(), to carry our the request. 4049 * 4050 * Any transport errors (e.g. LDC errors, vDisk protocol errors) are 4051 * saved in the 'status' field of the task and are propagated back 4052 * up the call stack to trigger a NACK 4053 * 4054 * Any request errors (e.g. ENOTTY from an ioctl) are saved in 4055 * the 'status' field of the request and result in an ACK being sent 4056 * by the completion handler. 4057 * 4058 * Parameters: 4059 * task - structure containing the request sent from client 4060 * 4061 * Return Value 4062 * 0 - successful synchronous request. 4063 * != 0 - transport error (e.g. LDC errors, vDisk protocol) 4064 * EINPROGRESS - task will be finished in a completion handler 4065 */ 4066 static int 4067 vd_process_task(vd_task_t *task) 4068 { 4069 vd_t *vd = task->vd; 4070 int status; 4071 4072 DTRACE_PROBE1(task__start, vd_task_t *, task); 4073 4074 task->status = vd_do_process_task(task); 4075 4076 /* 4077 * If the task processing function returned EINPROGRESS indicating 4078 * that the task needs completing then schedule a taskq entry to 4079 * finish it now. 4080 * 4081 * Otherwise the task processing function returned either zero 4082 * indicating that the task was finished in the start function (and we 4083 * don't need to wait in a completion function) or the start function 4084 * returned an error - in both cases all that needs to happen is the 4085 * notification to the vDisk client higher up the call stack. 4086 * If the task was using a Descriptor Ring, we need to mark it as done 4087 * at this stage. 4088 */ 4089 if (task->status == EINPROGRESS) { 4090 /* Queue a task to complete the operation */ 4091 (void) ddi_taskq_dispatch(vd->completionq, vd_complete, 4092 task, DDI_SLEEP); 4093 4094 } else if (!vd->reset_state && (vd->xfer_mode == VIO_DRING_MODE_V1_0)) { 4095 /* Update the dring element if it's a dring client */ 4096 status = vd_mark_elem_done(vd, task->index, 4097 task->request->status, task->request->nbytes); 4098 if (status == ECONNRESET) 4099 vd_mark_in_reset(vd); 4100 else if (status == EACCES) 4101 vd_need_reset(vd, B_TRUE); 4102 } 4103 4104 return (task->status); 4105 } 4106 4107 /* 4108 * Return true if the "type", "subtype", and "env" fields of the "tag" first 4109 * argument match the corresponding remaining arguments; otherwise, return false 4110 */ 4111 boolean_t 4112 vd_msgtype(vio_msg_tag_t *tag, int type, int subtype, int env) 4113 { 4114 return ((tag->vio_msgtype == type) && 4115 (tag->vio_subtype == subtype) && 4116 (tag->vio_subtype_env == env)) ? B_TRUE : B_FALSE; 4117 } 4118 4119 /* 4120 * Check whether the major/minor version specified in "ver_msg" is supported 4121 * by this server. 4122 */ 4123 static boolean_t 4124 vds_supported_version(vio_ver_msg_t *ver_msg) 4125 { 4126 for (int i = 0; i < vds_num_versions; i++) { 4127 ASSERT(vds_version[i].major > 0); 4128 ASSERT((i == 0) || 4129 (vds_version[i].major < vds_version[i-1].major)); 4130 4131 /* 4132 * If the major versions match, adjust the minor version, if 4133 * necessary, down to the highest value supported by this 4134 * server and return true so this message will get "ack"ed; 4135 * the client should also support all minor versions lower 4136 * than the value it sent 4137 */ 4138 if (ver_msg->ver_major == vds_version[i].major) { 4139 if (ver_msg->ver_minor > vds_version[i].minor) { 4140 PR0("Adjusting minor version from %u to %u", 4141 ver_msg->ver_minor, vds_version[i].minor); 4142 ver_msg->ver_minor = vds_version[i].minor; 4143 } 4144 return (B_TRUE); 4145 } 4146 4147 /* 4148 * If the message contains a higher major version number, set 4149 * the message's major/minor versions to the current values 4150 * and return false, so this message will get "nack"ed with 4151 * these values, and the client will potentially try again 4152 * with the same or a lower version 4153 */ 4154 if (ver_msg->ver_major > vds_version[i].major) { 4155 ver_msg->ver_major = vds_version[i].major; 4156 ver_msg->ver_minor = vds_version[i].minor; 4157 return (B_FALSE); 4158 } 4159 4160 /* 4161 * Otherwise, the message's major version is less than the 4162 * current major version, so continue the loop to the next 4163 * (lower) supported version 4164 */ 4165 } 4166 4167 /* 4168 * No common version was found; "ground" the version pair in the 4169 * message to terminate negotiation 4170 */ 4171 ver_msg->ver_major = 0; 4172 ver_msg->ver_minor = 0; 4173 return (B_FALSE); 4174 } 4175 4176 /* 4177 * Process a version message from a client. vds expects to receive version 4178 * messages from clients seeking service, but never issues version messages 4179 * itself; therefore, vds can ACK or NACK client version messages, but does 4180 * not expect to receive version-message ACKs or NACKs (and will treat such 4181 * messages as invalid). 4182 */ 4183 static int 4184 vd_process_ver_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4185 { 4186 vio_ver_msg_t *ver_msg = (vio_ver_msg_t *)msg; 4187 4188 4189 ASSERT(msglen >= sizeof (msg->tag)); 4190 4191 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 4192 VIO_VER_INFO)) { 4193 return (ENOMSG); /* not a version message */ 4194 } 4195 4196 if (msglen != sizeof (*ver_msg)) { 4197 PR0("Expected %lu-byte version message; " 4198 "received %lu bytes", sizeof (*ver_msg), msglen); 4199 return (EBADMSG); 4200 } 4201 4202 if (ver_msg->dev_class != VDEV_DISK) { 4203 PR0("Expected device class %u (disk); received %u", 4204 VDEV_DISK, ver_msg->dev_class); 4205 return (EBADMSG); 4206 } 4207 4208 /* 4209 * We're talking to the expected kind of client; set our device class 4210 * for "ack/nack" back to the client 4211 */ 4212 ver_msg->dev_class = VDEV_DISK_SERVER; 4213 4214 /* 4215 * Check whether the (valid) version message specifies a version 4216 * supported by this server. If the version is not supported, return 4217 * EBADMSG so the message will get "nack"ed; vds_supported_version() 4218 * will have updated the message with a supported version for the 4219 * client to consider 4220 */ 4221 if (!vds_supported_version(ver_msg)) 4222 return (EBADMSG); 4223 4224 4225 /* 4226 * A version has been agreed upon; use the client's SID for 4227 * communication on this channel now 4228 */ 4229 ASSERT(!(vd->initialized & VD_SID)); 4230 vd->sid = ver_msg->tag.vio_sid; 4231 vd->initialized |= VD_SID; 4232 4233 /* 4234 * Store the negotiated major and minor version values in the "vd" data 4235 * structure so that we can check if certain operations are supported 4236 * by the client. 4237 */ 4238 vd->version.major = ver_msg->ver_major; 4239 vd->version.minor = ver_msg->ver_minor; 4240 4241 PR0("Using major version %u, minor version %u", 4242 ver_msg->ver_major, ver_msg->ver_minor); 4243 return (0); 4244 } 4245 4246 static void 4247 vd_set_exported_operations(vd_t *vd) 4248 { 4249 vd->operations = 0; /* clear field */ 4250 4251 /* 4252 * We need to check from the highest version supported to the 4253 * lowest because versions with a higher minor number implicitly 4254 * support versions with a lower minor number. 4255 */ 4256 if (vio_ver_is_supported(vd->version, 1, 1)) { 4257 ASSERT(vd->open_flags & FREAD); 4258 vd->operations |= VD_OP_MASK_READ | (1 << VD_OP_GET_CAPACITY); 4259 4260 if (vd->open_flags & FWRITE) 4261 vd->operations |= VD_OP_MASK_WRITE; 4262 4263 if (vd->scsi) 4264 vd->operations |= VD_OP_MASK_SCSI; 4265 4266 if (VD_DSKIMG(vd) && vd_dskimg_is_iso_image(vd)) { 4267 /* 4268 * can't write to ISO images, make sure that write 4269 * support is not set in case administrator did not 4270 * use "options=ro" when doing an ldm add-vdsdev 4271 */ 4272 vd->operations &= ~VD_OP_MASK_WRITE; 4273 } 4274 } else if (vio_ver_is_supported(vd->version, 1, 0)) { 4275 vd->operations = VD_OP_MASK_READ | VD_OP_MASK_WRITE; 4276 } 4277 4278 /* we should have already agreed on a version */ 4279 ASSERT(vd->operations != 0); 4280 } 4281 4282 static int 4283 vd_process_attr_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4284 { 4285 vd_attr_msg_t *attr_msg = (vd_attr_msg_t *)msg; 4286 int status, retry = 0; 4287 4288 4289 ASSERT(msglen >= sizeof (msg->tag)); 4290 4291 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 4292 VIO_ATTR_INFO)) { 4293 PR0("Message is not an attribute message"); 4294 return (ENOMSG); 4295 } 4296 4297 if (msglen != sizeof (*attr_msg)) { 4298 PR0("Expected %lu-byte attribute message; " 4299 "received %lu bytes", sizeof (*attr_msg), msglen); 4300 return (EBADMSG); 4301 } 4302 4303 if (attr_msg->max_xfer_sz == 0) { 4304 PR0("Received maximum transfer size of 0 from client"); 4305 return (EBADMSG); 4306 } 4307 4308 if ((attr_msg->xfer_mode != VIO_DESC_MODE) && 4309 (attr_msg->xfer_mode != VIO_DRING_MODE_V1_0)) { 4310 PR0("Client requested unsupported transfer mode"); 4311 return (EBADMSG); 4312 } 4313 4314 /* 4315 * check if the underlying disk is ready, if not try accessing 4316 * the device again. Open the vdisk device and extract info 4317 * about it, as this is needed to respond to the attr info msg 4318 */ 4319 if ((vd->initialized & VD_DISK_READY) == 0) { 4320 PR0("Retry setting up disk (%s)", vd->device_path); 4321 do { 4322 status = vd_setup_vd(vd); 4323 if (status != EAGAIN || ++retry > vds_dev_retries) 4324 break; 4325 4326 /* incremental delay */ 4327 delay(drv_usectohz(vds_dev_delay)); 4328 4329 /* if vdisk is no longer enabled - return error */ 4330 if (!vd_enabled(vd)) 4331 return (ENXIO); 4332 4333 } while (status == EAGAIN); 4334 4335 if (status) 4336 return (ENXIO); 4337 4338 vd->initialized |= VD_DISK_READY; 4339 ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR); 4340 PR0("vdisk_type = %s, volume = %s, file = %s, nslices = %u", 4341 ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"), 4342 (vd->volume ? "yes" : "no"), 4343 (vd->file ? "yes" : "no"), 4344 vd->nslices); 4345 } 4346 4347 /* Success: valid message and transfer mode */ 4348 vd->xfer_mode = attr_msg->xfer_mode; 4349 4350 if (vd->xfer_mode == VIO_DESC_MODE) { 4351 4352 /* 4353 * The vd_dring_inband_msg_t contains one cookie; need room 4354 * for up to n-1 more cookies, where "n" is the number of full 4355 * pages plus possibly one partial page required to cover 4356 * "max_xfer_sz". Add room for one more cookie if 4357 * "max_xfer_sz" isn't an integral multiple of the page size. 4358 * Must first get the maximum transfer size in bytes. 4359 */ 4360 size_t max_xfer_bytes = attr_msg->vdisk_block_size ? 4361 attr_msg->vdisk_block_size*attr_msg->max_xfer_sz : 4362 attr_msg->max_xfer_sz; 4363 size_t max_inband_msglen = 4364 sizeof (vd_dring_inband_msg_t) + 4365 ((max_xfer_bytes/PAGESIZE + 4366 ((max_xfer_bytes % PAGESIZE) ? 1 : 0))* 4367 (sizeof (ldc_mem_cookie_t))); 4368 4369 /* 4370 * Set the maximum expected message length to 4371 * accommodate in-band-descriptor messages with all 4372 * their cookies 4373 */ 4374 vd->max_msglen = MAX(vd->max_msglen, max_inband_msglen); 4375 4376 /* 4377 * Initialize the data structure for processing in-band I/O 4378 * request descriptors 4379 */ 4380 vd->inband_task.vd = vd; 4381 vd->inband_task.msg = kmem_alloc(vd->max_msglen, KM_SLEEP); 4382 vd->inband_task.index = 0; 4383 vd->inband_task.type = VD_FINAL_RANGE_TASK; /* range == 1 */ 4384 } 4385 4386 /* Return the device's block size and max transfer size to the client */ 4387 attr_msg->vdisk_block_size = vd->vdisk_block_size; 4388 attr_msg->max_xfer_sz = vd->max_xfer_sz; 4389 4390 attr_msg->vdisk_size = vd->vdisk_size; 4391 attr_msg->vdisk_type = (vd_slice_single_slice)? vd->vdisk_type : 4392 VD_DISK_TYPE_DISK; 4393 attr_msg->vdisk_media = vd->vdisk_media; 4394 4395 /* Discover and save the list of supported VD_OP_XXX operations */ 4396 vd_set_exported_operations(vd); 4397 attr_msg->operations = vd->operations; 4398 4399 PR0("%s", VD_CLIENT(vd)); 4400 4401 ASSERT(vd->dring_task == NULL); 4402 4403 return (0); 4404 } 4405 4406 static int 4407 vd_process_dring_reg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4408 { 4409 int status; 4410 size_t expected; 4411 ldc_mem_info_t dring_minfo; 4412 uint8_t mtype; 4413 vio_dring_reg_msg_t *reg_msg = (vio_dring_reg_msg_t *)msg; 4414 4415 4416 ASSERT(msglen >= sizeof (msg->tag)); 4417 4418 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 4419 VIO_DRING_REG)) { 4420 PR0("Message is not a register-dring message"); 4421 return (ENOMSG); 4422 } 4423 4424 if (msglen < sizeof (*reg_msg)) { 4425 PR0("Expected at least %lu-byte register-dring message; " 4426 "received %lu bytes", sizeof (*reg_msg), msglen); 4427 return (EBADMSG); 4428 } 4429 4430 expected = sizeof (*reg_msg) + 4431 (reg_msg->ncookies - 1)*(sizeof (reg_msg->cookie[0])); 4432 if (msglen != expected) { 4433 PR0("Expected %lu-byte register-dring message; " 4434 "received %lu bytes", expected, msglen); 4435 return (EBADMSG); 4436 } 4437 4438 if (vd->initialized & VD_DRING) { 4439 PR0("A dring was previously registered; only support one"); 4440 return (EBADMSG); 4441 } 4442 4443 if (reg_msg->num_descriptors > INT32_MAX) { 4444 PR0("reg_msg->num_descriptors = %u; must be <= %u (%s)", 4445 reg_msg->ncookies, INT32_MAX, STRINGIZE(INT32_MAX)); 4446 return (EBADMSG); 4447 } 4448 4449 if (reg_msg->ncookies != 1) { 4450 /* 4451 * In addition to fixing the assertion in the success case 4452 * below, supporting drings which require more than one 4453 * "cookie" requires increasing the value of vd->max_msglen 4454 * somewhere in the code path prior to receiving the message 4455 * which results in calling this function. Note that without 4456 * making this change, the larger message size required to 4457 * accommodate multiple cookies cannot be successfully 4458 * received, so this function will not even get called. 4459 * Gracefully accommodating more dring cookies might 4460 * reasonably demand exchanging an additional attribute or 4461 * making a minor protocol adjustment 4462 */ 4463 PR0("reg_msg->ncookies = %u != 1", reg_msg->ncookies); 4464 return (EBADMSG); 4465 } 4466 4467 if (vd_direct_mapped_drings) 4468 mtype = LDC_DIRECT_MAP; 4469 else 4470 mtype = LDC_SHADOW_MAP; 4471 4472 status = ldc_mem_dring_map(vd->ldc_handle, reg_msg->cookie, 4473 reg_msg->ncookies, reg_msg->num_descriptors, 4474 reg_msg->descriptor_size, mtype, &vd->dring_handle); 4475 if (status != 0) { 4476 PR0("ldc_mem_dring_map() returned errno %d", status); 4477 return (status); 4478 } 4479 4480 /* 4481 * To remove the need for this assertion, must call 4482 * ldc_mem_dring_nextcookie() successfully ncookies-1 times after a 4483 * successful call to ldc_mem_dring_map() 4484 */ 4485 ASSERT(reg_msg->ncookies == 1); 4486 4487 if ((status = 4488 ldc_mem_dring_info(vd->dring_handle, &dring_minfo)) != 0) { 4489 PR0("ldc_mem_dring_info() returned errno %d", status); 4490 if ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0) 4491 PR0("ldc_mem_dring_unmap() returned errno %d", status); 4492 return (status); 4493 } 4494 4495 if (dring_minfo.vaddr == NULL) { 4496 PR0("Descriptor ring virtual address is NULL"); 4497 return (ENXIO); 4498 } 4499 4500 4501 /* Initialize for valid message and mapped dring */ 4502 vd->initialized |= VD_DRING; 4503 vd->dring_ident = 1; /* "There Can Be Only One" */ 4504 vd->dring = dring_minfo.vaddr; 4505 vd->descriptor_size = reg_msg->descriptor_size; 4506 vd->dring_len = reg_msg->num_descriptors; 4507 vd->dring_mtype = dring_minfo.mtype; 4508 reg_msg->dring_ident = vd->dring_ident; 4509 PR1("descriptor size = %u, dring length = %u", 4510 vd->descriptor_size, vd->dring_len); 4511 4512 /* 4513 * Allocate and initialize a "shadow" array of data structures for 4514 * tasks to process I/O requests in dring elements 4515 */ 4516 vd->dring_task = 4517 kmem_zalloc((sizeof (*vd->dring_task)) * vd->dring_len, KM_SLEEP); 4518 for (int i = 0; i < vd->dring_len; i++) { 4519 vd->dring_task[i].vd = vd; 4520 vd->dring_task[i].index = i; 4521 4522 status = ldc_mem_alloc_handle(vd->ldc_handle, 4523 &(vd->dring_task[i].mhdl)); 4524 if (status) { 4525 PR0("ldc_mem_alloc_handle() returned err %d ", status); 4526 return (ENXIO); 4527 } 4528 4529 /* 4530 * The descriptor payload varies in length. Calculate its 4531 * size by subtracting the header size from the total 4532 * descriptor size. 4533 */ 4534 vd->dring_task[i].request = kmem_zalloc((vd->descriptor_size - 4535 sizeof (vio_dring_entry_hdr_t)), KM_SLEEP); 4536 vd->dring_task[i].msg = kmem_alloc(vd->max_msglen, KM_SLEEP); 4537 } 4538 4539 return (0); 4540 } 4541 4542 static int 4543 vd_process_dring_unreg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4544 { 4545 vio_dring_unreg_msg_t *unreg_msg = (vio_dring_unreg_msg_t *)msg; 4546 4547 4548 ASSERT(msglen >= sizeof (msg->tag)); 4549 4550 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 4551 VIO_DRING_UNREG)) { 4552 PR0("Message is not an unregister-dring message"); 4553 return (ENOMSG); 4554 } 4555 4556 if (msglen != sizeof (*unreg_msg)) { 4557 PR0("Expected %lu-byte unregister-dring message; " 4558 "received %lu bytes", sizeof (*unreg_msg), msglen); 4559 return (EBADMSG); 4560 } 4561 4562 if (unreg_msg->dring_ident != vd->dring_ident) { 4563 PR0("Expected dring ident %lu; received %lu", 4564 vd->dring_ident, unreg_msg->dring_ident); 4565 return (EBADMSG); 4566 } 4567 4568 return (0); 4569 } 4570 4571 static int 4572 process_rdx_msg(vio_msg_t *msg, size_t msglen) 4573 { 4574 ASSERT(msglen >= sizeof (msg->tag)); 4575 4576 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX)) { 4577 PR0("Message is not an RDX message"); 4578 return (ENOMSG); 4579 } 4580 4581 if (msglen != sizeof (vio_rdx_msg_t)) { 4582 PR0("Expected %lu-byte RDX message; received %lu bytes", 4583 sizeof (vio_rdx_msg_t), msglen); 4584 return (EBADMSG); 4585 } 4586 4587 PR0("Valid RDX message"); 4588 return (0); 4589 } 4590 4591 static int 4592 vd_check_seq_num(vd_t *vd, uint64_t seq_num) 4593 { 4594 if ((vd->initialized & VD_SEQ_NUM) && (seq_num != vd->seq_num + 1)) { 4595 PR0("Received seq_num %lu; expected %lu", 4596 seq_num, (vd->seq_num + 1)); 4597 PR0("initiating soft reset"); 4598 vd_need_reset(vd, B_FALSE); 4599 return (1); 4600 } 4601 4602 vd->seq_num = seq_num; 4603 vd->initialized |= VD_SEQ_NUM; /* superfluous after first time... */ 4604 return (0); 4605 } 4606 4607 /* 4608 * Return the expected size of an inband-descriptor message with all the 4609 * cookies it claims to include 4610 */ 4611 static size_t 4612 expected_inband_size(vd_dring_inband_msg_t *msg) 4613 { 4614 return ((sizeof (*msg)) + 4615 (msg->payload.ncookies - 1)*(sizeof (msg->payload.cookie[0]))); 4616 } 4617 4618 /* 4619 * Process an in-band descriptor message: used with clients like OBP, with 4620 * which vds exchanges descriptors within VIO message payloads, rather than 4621 * operating on them within a descriptor ring 4622 */ 4623 static int 4624 vd_process_desc_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4625 { 4626 size_t expected; 4627 vd_dring_inband_msg_t *desc_msg = (vd_dring_inband_msg_t *)msg; 4628 4629 4630 ASSERT(msglen >= sizeof (msg->tag)); 4631 4632 if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO, 4633 VIO_DESC_DATA)) { 4634 PR1("Message is not an in-band-descriptor message"); 4635 return (ENOMSG); 4636 } 4637 4638 if (msglen < sizeof (*desc_msg)) { 4639 PR0("Expected at least %lu-byte descriptor message; " 4640 "received %lu bytes", sizeof (*desc_msg), msglen); 4641 return (EBADMSG); 4642 } 4643 4644 if (msglen != (expected = expected_inband_size(desc_msg))) { 4645 PR0("Expected %lu-byte descriptor message; " 4646 "received %lu bytes", expected, msglen); 4647 return (EBADMSG); 4648 } 4649 4650 if (vd_check_seq_num(vd, desc_msg->hdr.seq_num) != 0) 4651 return (EBADMSG); 4652 4653 /* 4654 * Valid message: Set up the in-band descriptor task and process the 4655 * request. Arrange to acknowledge the client's message, unless an 4656 * error processing the descriptor task results in setting 4657 * VIO_SUBTYPE_NACK 4658 */ 4659 PR1("Valid in-band-descriptor message"); 4660 msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 4661 4662 ASSERT(vd->inband_task.msg != NULL); 4663 4664 bcopy(msg, vd->inband_task.msg, msglen); 4665 vd->inband_task.msglen = msglen; 4666 4667 /* 4668 * The task request is now the payload of the message 4669 * that was just copied into the body of the task. 4670 */ 4671 desc_msg = (vd_dring_inband_msg_t *)vd->inband_task.msg; 4672 vd->inband_task.request = &desc_msg->payload; 4673 4674 return (vd_process_task(&vd->inband_task)); 4675 } 4676 4677 static int 4678 vd_process_element(vd_t *vd, vd_task_type_t type, uint32_t idx, 4679 vio_msg_t *msg, size_t msglen) 4680 { 4681 int status; 4682 boolean_t ready; 4683 on_trap_data_t otd; 4684 vd_dring_entry_t *elem = VD_DRING_ELEM(idx); 4685 4686 /* Accept the updated dring element */ 4687 if ((status = VIO_DRING_ACQUIRE(&otd, vd->dring_mtype, 4688 vd->dring_handle, idx, idx)) != 0) { 4689 return (status); 4690 } 4691 ready = (elem->hdr.dstate == VIO_DESC_READY); 4692 if (ready) { 4693 elem->hdr.dstate = VIO_DESC_ACCEPTED; 4694 bcopy(&elem->payload, vd->dring_task[idx].request, 4695 (vd->descriptor_size - sizeof (vio_dring_entry_hdr_t))); 4696 } else { 4697 PR0("descriptor %u not ready", idx); 4698 VD_DUMP_DRING_ELEM(elem); 4699 } 4700 if ((status = VIO_DRING_RELEASE(vd->dring_mtype, 4701 vd->dring_handle, idx, idx)) != 0) { 4702 PR0("VIO_DRING_RELEASE() returned errno %d", status); 4703 return (status); 4704 } 4705 if (!ready) 4706 return (EBUSY); 4707 4708 4709 /* Initialize a task and process the accepted element */ 4710 PR1("Processing dring element %u", idx); 4711 vd->dring_task[idx].type = type; 4712 4713 /* duplicate msg buf for cookies etc. */ 4714 bcopy(msg, vd->dring_task[idx].msg, msglen); 4715 4716 vd->dring_task[idx].msglen = msglen; 4717 return (vd_process_task(&vd->dring_task[idx])); 4718 } 4719 4720 static int 4721 vd_process_element_range(vd_t *vd, int start, int end, 4722 vio_msg_t *msg, size_t msglen) 4723 { 4724 int i, n, nelem, status = 0; 4725 boolean_t inprogress = B_FALSE; 4726 vd_task_type_t type; 4727 4728 4729 ASSERT(start >= 0); 4730 ASSERT(end >= 0); 4731 4732 /* 4733 * Arrange to acknowledge the client's message, unless an error 4734 * processing one of the dring elements results in setting 4735 * VIO_SUBTYPE_NACK 4736 */ 4737 msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 4738 4739 /* 4740 * Process the dring elements in the range 4741 */ 4742 nelem = ((end < start) ? end + vd->dring_len : end) - start + 1; 4743 for (i = start, n = nelem; n > 0; i = (i + 1) % vd->dring_len, n--) { 4744 ((vio_dring_msg_t *)msg)->end_idx = i; 4745 type = (n == 1) ? VD_FINAL_RANGE_TASK : VD_NONFINAL_RANGE_TASK; 4746 status = vd_process_element(vd, type, i, msg, msglen); 4747 if (status == EINPROGRESS) 4748 inprogress = B_TRUE; 4749 else if (status != 0) 4750 break; 4751 } 4752 4753 /* 4754 * If some, but not all, operations of a multi-element range are in 4755 * progress, wait for other operations to complete before returning 4756 * (which will result in "ack" or "nack" of the message). Note that 4757 * all outstanding operations will need to complete, not just the ones 4758 * corresponding to the current range of dring elements; howevever, as 4759 * this situation is an error case, performance is less critical. 4760 */ 4761 if ((nelem > 1) && (status != EINPROGRESS) && inprogress) 4762 ddi_taskq_wait(vd->completionq); 4763 4764 return (status); 4765 } 4766 4767 static int 4768 vd_process_dring_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4769 { 4770 vio_dring_msg_t *dring_msg = (vio_dring_msg_t *)msg; 4771 4772 4773 ASSERT(msglen >= sizeof (msg->tag)); 4774 4775 if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO, 4776 VIO_DRING_DATA)) { 4777 PR1("Message is not a dring-data message"); 4778 return (ENOMSG); 4779 } 4780 4781 if (msglen != sizeof (*dring_msg)) { 4782 PR0("Expected %lu-byte dring message; received %lu bytes", 4783 sizeof (*dring_msg), msglen); 4784 return (EBADMSG); 4785 } 4786 4787 if (vd_check_seq_num(vd, dring_msg->seq_num) != 0) 4788 return (EBADMSG); 4789 4790 if (dring_msg->dring_ident != vd->dring_ident) { 4791 PR0("Expected dring ident %lu; received ident %lu", 4792 vd->dring_ident, dring_msg->dring_ident); 4793 return (EBADMSG); 4794 } 4795 4796 if (dring_msg->start_idx >= vd->dring_len) { 4797 PR0("\"start_idx\" = %u; must be less than %u", 4798 dring_msg->start_idx, vd->dring_len); 4799 return (EBADMSG); 4800 } 4801 4802 if ((dring_msg->end_idx < 0) || 4803 (dring_msg->end_idx >= vd->dring_len)) { 4804 PR0("\"end_idx\" = %u; must be >= 0 and less than %u", 4805 dring_msg->end_idx, vd->dring_len); 4806 return (EBADMSG); 4807 } 4808 4809 /* Valid message; process range of updated dring elements */ 4810 PR1("Processing descriptor range, start = %u, end = %u", 4811 dring_msg->start_idx, dring_msg->end_idx); 4812 return (vd_process_element_range(vd, dring_msg->start_idx, 4813 dring_msg->end_idx, msg, msglen)); 4814 } 4815 4816 static int 4817 recv_msg(ldc_handle_t ldc_handle, void *msg, size_t *nbytes) 4818 { 4819 int retry, status; 4820 size_t size = *nbytes; 4821 4822 4823 for (retry = 0, status = ETIMEDOUT; 4824 retry < vds_ldc_retries && status == ETIMEDOUT; 4825 retry++) { 4826 PR1("ldc_read() attempt %d", (retry + 1)); 4827 *nbytes = size; 4828 status = ldc_read(ldc_handle, msg, nbytes); 4829 } 4830 4831 if (status) { 4832 PR0("ldc_read() returned errno %d", status); 4833 if (status != ECONNRESET) 4834 return (ENOMSG); 4835 return (status); 4836 } else if (*nbytes == 0) { 4837 PR1("ldc_read() returned 0 and no message read"); 4838 return (ENOMSG); 4839 } 4840 4841 PR1("RCVD %lu-byte message", *nbytes); 4842 return (0); 4843 } 4844 4845 static int 4846 vd_do_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4847 { 4848 int status; 4849 4850 4851 PR1("Processing (%x/%x/%x) message", msg->tag.vio_msgtype, 4852 msg->tag.vio_subtype, msg->tag.vio_subtype_env); 4853 #ifdef DEBUG 4854 vd_decode_tag(msg); 4855 #endif 4856 4857 /* 4858 * Validate session ID up front, since it applies to all messages 4859 * once set 4860 */ 4861 if ((msg->tag.vio_sid != vd->sid) && (vd->initialized & VD_SID)) { 4862 PR0("Expected SID %u, received %u", vd->sid, 4863 msg->tag.vio_sid); 4864 return (EBADMSG); 4865 } 4866 4867 PR1("\tWhile in state %d (%s)", vd->state, vd_decode_state(vd->state)); 4868 4869 /* 4870 * Process the received message based on connection state 4871 */ 4872 switch (vd->state) { 4873 case VD_STATE_INIT: /* expect version message */ 4874 if ((status = vd_process_ver_msg(vd, msg, msglen)) != 0) 4875 return (status); 4876 4877 /* Version negotiated, move to that state */ 4878 vd->state = VD_STATE_VER; 4879 return (0); 4880 4881 case VD_STATE_VER: /* expect attribute message */ 4882 if ((status = vd_process_attr_msg(vd, msg, msglen)) != 0) 4883 return (status); 4884 4885 /* Attributes exchanged, move to that state */ 4886 vd->state = VD_STATE_ATTR; 4887 return (0); 4888 4889 case VD_STATE_ATTR: 4890 switch (vd->xfer_mode) { 4891 case VIO_DESC_MODE: /* expect RDX message */ 4892 if ((status = process_rdx_msg(msg, msglen)) != 0) 4893 return (status); 4894 4895 /* Ready to receive in-band descriptors */ 4896 vd->state = VD_STATE_DATA; 4897 return (0); 4898 4899 case VIO_DRING_MODE_V1_0: /* expect register-dring message */ 4900 if ((status = 4901 vd_process_dring_reg_msg(vd, msg, msglen)) != 0) 4902 return (status); 4903 4904 /* One dring negotiated, move to that state */ 4905 vd->state = VD_STATE_DRING; 4906 return (0); 4907 4908 default: 4909 ASSERT("Unsupported transfer mode"); 4910 PR0("Unsupported transfer mode"); 4911 return (ENOTSUP); 4912 } 4913 4914 case VD_STATE_DRING: /* expect RDX, register-dring, or unreg-dring */ 4915 if ((status = process_rdx_msg(msg, msglen)) == 0) { 4916 /* Ready to receive data */ 4917 vd->state = VD_STATE_DATA; 4918 return (0); 4919 } else if (status != ENOMSG) { 4920 return (status); 4921 } 4922 4923 4924 /* 4925 * If another register-dring message is received, stay in 4926 * dring state in case the client sends RDX; although the 4927 * protocol allows multiple drings, this server does not 4928 * support using more than one 4929 */ 4930 if ((status = 4931 vd_process_dring_reg_msg(vd, msg, msglen)) != ENOMSG) 4932 return (status); 4933 4934 /* 4935 * Acknowledge an unregister-dring message, but reset the 4936 * connection anyway: Although the protocol allows 4937 * unregistering drings, this server cannot serve a vdisk 4938 * without its only dring 4939 */ 4940 status = vd_process_dring_unreg_msg(vd, msg, msglen); 4941 return ((status == 0) ? ENOTSUP : status); 4942 4943 case VD_STATE_DATA: 4944 switch (vd->xfer_mode) { 4945 case VIO_DESC_MODE: /* expect in-band-descriptor message */ 4946 return (vd_process_desc_msg(vd, msg, msglen)); 4947 4948 case VIO_DRING_MODE_V1_0: /* expect dring-data or unreg-dring */ 4949 /* 4950 * Typically expect dring-data messages, so handle 4951 * them first 4952 */ 4953 if ((status = vd_process_dring_msg(vd, msg, 4954 msglen)) != ENOMSG) 4955 return (status); 4956 4957 /* 4958 * Acknowledge an unregister-dring message, but reset 4959 * the connection anyway: Although the protocol 4960 * allows unregistering drings, this server cannot 4961 * serve a vdisk without its only dring 4962 */ 4963 status = vd_process_dring_unreg_msg(vd, msg, msglen); 4964 return ((status == 0) ? ENOTSUP : status); 4965 4966 default: 4967 ASSERT("Unsupported transfer mode"); 4968 PR0("Unsupported transfer mode"); 4969 return (ENOTSUP); 4970 } 4971 4972 default: 4973 ASSERT("Invalid client connection state"); 4974 PR0("Invalid client connection state"); 4975 return (ENOTSUP); 4976 } 4977 } 4978 4979 static int 4980 vd_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4981 { 4982 int status; 4983 boolean_t reset_ldc = B_FALSE; 4984 vd_task_t task; 4985 4986 /* 4987 * Check that the message is at least big enough for a "tag", so that 4988 * message processing can proceed based on tag-specified message type 4989 */ 4990 if (msglen < sizeof (vio_msg_tag_t)) { 4991 PR0("Received short (%lu-byte) message", msglen); 4992 /* Can't "nack" short message, so drop the big hammer */ 4993 PR0("initiating full reset"); 4994 vd_need_reset(vd, B_TRUE); 4995 return (EBADMSG); 4996 } 4997 4998 /* 4999 * Process the message 5000 */ 5001 switch (status = vd_do_process_msg(vd, msg, msglen)) { 5002 case 0: 5003 /* "ack" valid, successfully-processed messages */ 5004 msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 5005 break; 5006 5007 case EINPROGRESS: 5008 /* The completion handler will "ack" or "nack" the message */ 5009 return (EINPROGRESS); 5010 case ENOMSG: 5011 PR0("Received unexpected message"); 5012 _NOTE(FALLTHROUGH); 5013 case EBADMSG: 5014 case ENOTSUP: 5015 /* "transport" error will cause NACK of invalid messages */ 5016 msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 5017 break; 5018 5019 default: 5020 /* "transport" error will cause NACK of invalid messages */ 5021 msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 5022 /* An LDC error probably occurred, so try resetting it */ 5023 reset_ldc = B_TRUE; 5024 break; 5025 } 5026 5027 PR1("\tResulting in state %d (%s)", vd->state, 5028 vd_decode_state(vd->state)); 5029 5030 /* populate the task so we can dispatch it on the taskq */ 5031 task.vd = vd; 5032 task.msg = msg; 5033 task.msglen = msglen; 5034 5035 /* 5036 * Queue a task to send the notification that the operation completed. 5037 * We need to ensure that requests are responded to in the correct 5038 * order and since the taskq is processed serially this ordering 5039 * is maintained. 5040 */ 5041 (void) ddi_taskq_dispatch(vd->completionq, vd_serial_notify, 5042 &task, DDI_SLEEP); 5043 5044 /* 5045 * To ensure handshake negotiations do not happen out of order, such 5046 * requests that come through this path should not be done in parallel 5047 * so we need to wait here until the response is sent to the client. 5048 */ 5049 ddi_taskq_wait(vd->completionq); 5050 5051 /* Arrange to reset the connection for nack'ed or failed messages */ 5052 if ((status != 0) || reset_ldc) { 5053 PR0("initiating %s reset", 5054 (reset_ldc) ? "full" : "soft"); 5055 vd_need_reset(vd, reset_ldc); 5056 } 5057 5058 return (status); 5059 } 5060 5061 static boolean_t 5062 vd_enabled(vd_t *vd) 5063 { 5064 boolean_t enabled; 5065 5066 mutex_enter(&vd->lock); 5067 enabled = vd->enabled; 5068 mutex_exit(&vd->lock); 5069 return (enabled); 5070 } 5071 5072 static void 5073 vd_recv_msg(void *arg) 5074 { 5075 vd_t *vd = (vd_t *)arg; 5076 int rv = 0, status = 0; 5077 5078 ASSERT(vd != NULL); 5079 5080 PR2("New task to receive incoming message(s)"); 5081 5082 5083 while (vd_enabled(vd) && status == 0) { 5084 size_t msglen, msgsize; 5085 ldc_status_t lstatus; 5086 5087 /* 5088 * Receive and process a message 5089 */ 5090 vd_reset_if_needed(vd); /* can change vd->max_msglen */ 5091 5092 /* 5093 * check if channel is UP - else break out of loop 5094 */ 5095 status = ldc_status(vd->ldc_handle, &lstatus); 5096 if (lstatus != LDC_UP) { 5097 PR0("channel not up (status=%d), exiting recv loop\n", 5098 lstatus); 5099 break; 5100 } 5101 5102 ASSERT(vd->max_msglen != 0); 5103 5104 msgsize = vd->max_msglen; /* stable copy for alloc/free */ 5105 msglen = msgsize; /* actual len after recv_msg() */ 5106 5107 status = recv_msg(vd->ldc_handle, vd->vio_msgp, &msglen); 5108 switch (status) { 5109 case 0: 5110 rv = vd_process_msg(vd, (void *)vd->vio_msgp, msglen); 5111 /* check if max_msglen changed */ 5112 if (msgsize != vd->max_msglen) { 5113 PR0("max_msglen changed 0x%lx to 0x%lx bytes\n", 5114 msgsize, vd->max_msglen); 5115 kmem_free(vd->vio_msgp, msgsize); 5116 vd->vio_msgp = 5117 kmem_alloc(vd->max_msglen, KM_SLEEP); 5118 } 5119 if (rv == EINPROGRESS) 5120 continue; 5121 break; 5122 5123 case ENOMSG: 5124 break; 5125 5126 case ECONNRESET: 5127 PR0("initiating soft reset (ECONNRESET)\n"); 5128 vd_need_reset(vd, B_FALSE); 5129 status = 0; 5130 break; 5131 5132 default: 5133 /* Probably an LDC failure; arrange to reset it */ 5134 PR0("initiating full reset (status=0x%x)", status); 5135 vd_need_reset(vd, B_TRUE); 5136 break; 5137 } 5138 } 5139 5140 PR2("Task finished"); 5141 } 5142 5143 static uint_t 5144 vd_handle_ldc_events(uint64_t event, caddr_t arg) 5145 { 5146 vd_t *vd = (vd_t *)(void *)arg; 5147 int status; 5148 5149 ASSERT(vd != NULL); 5150 5151 if (!vd_enabled(vd)) 5152 return (LDC_SUCCESS); 5153 5154 if (event & LDC_EVT_DOWN) { 5155 PR0("LDC_EVT_DOWN: LDC channel went down"); 5156 5157 vd_need_reset(vd, B_TRUE); 5158 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, 5159 DDI_SLEEP); 5160 if (status == DDI_FAILURE) { 5161 PR0("cannot schedule task to recv msg\n"); 5162 vd_need_reset(vd, B_TRUE); 5163 } 5164 } 5165 5166 if (event & LDC_EVT_RESET) { 5167 PR0("LDC_EVT_RESET: LDC channel was reset"); 5168 5169 if (vd->state != VD_STATE_INIT) { 5170 PR0("scheduling full reset"); 5171 vd_need_reset(vd, B_FALSE); 5172 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, 5173 vd, DDI_SLEEP); 5174 if (status == DDI_FAILURE) { 5175 PR0("cannot schedule task to recv msg\n"); 5176 vd_need_reset(vd, B_TRUE); 5177 } 5178 5179 } else { 5180 PR0("channel already reset, ignoring...\n"); 5181 PR0("doing ldc up...\n"); 5182 (void) ldc_up(vd->ldc_handle); 5183 } 5184 5185 return (LDC_SUCCESS); 5186 } 5187 5188 if (event & LDC_EVT_UP) { 5189 PR0("EVT_UP: LDC is up\nResetting client connection state"); 5190 PR0("initiating soft reset"); 5191 vd_need_reset(vd, B_FALSE); 5192 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, 5193 vd, DDI_SLEEP); 5194 if (status == DDI_FAILURE) { 5195 PR0("cannot schedule task to recv msg\n"); 5196 vd_need_reset(vd, B_TRUE); 5197 return (LDC_SUCCESS); 5198 } 5199 } 5200 5201 if (event & LDC_EVT_READ) { 5202 int status; 5203 5204 PR1("New data available"); 5205 /* Queue a task to receive the new data */ 5206 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, 5207 DDI_SLEEP); 5208 5209 if (status == DDI_FAILURE) { 5210 PR0("cannot schedule task to recv msg\n"); 5211 vd_need_reset(vd, B_TRUE); 5212 } 5213 } 5214 5215 return (LDC_SUCCESS); 5216 } 5217 5218 static uint_t 5219 vds_check_for_vd(mod_hash_key_t key, mod_hash_val_t *val, void *arg) 5220 { 5221 _NOTE(ARGUNUSED(key, val)) 5222 (*((uint_t *)arg))++; 5223 return (MH_WALK_TERMINATE); 5224 } 5225 5226 5227 static int 5228 vds_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 5229 { 5230 uint_t vd_present = 0; 5231 minor_t instance; 5232 vds_t *vds; 5233 5234 5235 switch (cmd) { 5236 case DDI_DETACH: 5237 /* the real work happens below */ 5238 break; 5239 case DDI_SUSPEND: 5240 PR0("No action required for DDI_SUSPEND"); 5241 return (DDI_SUCCESS); 5242 default: 5243 PR0("Unrecognized \"cmd\""); 5244 return (DDI_FAILURE); 5245 } 5246 5247 ASSERT(cmd == DDI_DETACH); 5248 instance = ddi_get_instance(dip); 5249 if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) { 5250 PR0("Could not get state for instance %u", instance); 5251 ddi_soft_state_free(vds_state, instance); 5252 return (DDI_FAILURE); 5253 } 5254 5255 /* Do no detach when serving any vdisks */ 5256 mod_hash_walk(vds->vd_table, vds_check_for_vd, &vd_present); 5257 if (vd_present) { 5258 PR0("Not detaching because serving vdisks"); 5259 return (DDI_FAILURE); 5260 } 5261 5262 PR0("Detaching"); 5263 if (vds->initialized & VDS_MDEG) { 5264 (void) mdeg_unregister(vds->mdeg); 5265 kmem_free(vds->ispecp->specp, sizeof (vds_prop_template)); 5266 kmem_free(vds->ispecp, sizeof (mdeg_node_spec_t)); 5267 vds->ispecp = NULL; 5268 vds->mdeg = NULL; 5269 } 5270 5271 vds_driver_types_free(vds); 5272 5273 if (vds->initialized & VDS_LDI) 5274 (void) ldi_ident_release(vds->ldi_ident); 5275 mod_hash_destroy_hash(vds->vd_table); 5276 ddi_soft_state_free(vds_state, instance); 5277 return (DDI_SUCCESS); 5278 } 5279 5280 /* 5281 * Description: 5282 * This function checks to see if the disk image being used as a 5283 * virtual disk is an ISO image. An ISO image is a special case 5284 * which can be booted/installed from like a CD/DVD. 5285 * 5286 * Parameters: 5287 * vd - disk on which the operation is performed. 5288 * 5289 * Return Code: 5290 * B_TRUE - The disk image is an ISO 9660 compliant image 5291 * B_FALSE - just a regular disk image 5292 */ 5293 static boolean_t 5294 vd_dskimg_is_iso_image(vd_t *vd) 5295 { 5296 char iso_buf[ISO_SECTOR_SIZE]; 5297 int i, rv; 5298 uint_t sec; 5299 5300 ASSERT(VD_DSKIMG(vd)); 5301 5302 /* 5303 * If we have already discovered and saved this info we can 5304 * short-circuit the check and avoid reading the disk image. 5305 */ 5306 if (vd->vdisk_media == VD_MEDIA_DVD || vd->vdisk_media == VD_MEDIA_CD) 5307 return (B_TRUE); 5308 5309 /* 5310 * We wish to read the sector that should contain the 2nd ISO volume 5311 * descriptor. The second field in this descriptor is called the 5312 * Standard Identifier and is set to CD001 for a CD-ROM compliant 5313 * to the ISO 9660 standard. 5314 */ 5315 sec = (ISO_VOLDESC_SEC * ISO_SECTOR_SIZE) / vd->vdisk_block_size; 5316 rv = vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)iso_buf, 5317 sec, ISO_SECTOR_SIZE); 5318 5319 if (rv < 0) 5320 return (B_FALSE); 5321 5322 for (i = 0; i < ISO_ID_STRLEN; i++) { 5323 if (ISO_STD_ID(iso_buf)[i] != ISO_ID_STRING[i]) 5324 return (B_FALSE); 5325 } 5326 5327 return (B_TRUE); 5328 } 5329 5330 /* 5331 * Description: 5332 * This function checks to see if the virtual device is an ATAPI 5333 * device. ATAPI devices use Group 1 Read/Write commands, so 5334 * any USCSI calls vds makes need to take this into account. 5335 * 5336 * Parameters: 5337 * vd - disk on which the operation is performed. 5338 * 5339 * Return Code: 5340 * B_TRUE - The virtual disk is backed by an ATAPI device 5341 * B_FALSE - not an ATAPI device (presumably SCSI) 5342 */ 5343 static boolean_t 5344 vd_is_atapi_device(vd_t *vd) 5345 { 5346 boolean_t is_atapi = B_FALSE; 5347 char *variantp; 5348 int rv; 5349 5350 ASSERT(vd->ldi_handle[0] != NULL); 5351 ASSERT(!vd->file); 5352 5353 rv = ldi_prop_lookup_string(vd->ldi_handle[0], 5354 (LDI_DEV_T_ANY | DDI_PROP_DONTPASS), "variant", &variantp); 5355 if (rv == DDI_PROP_SUCCESS) { 5356 PR0("'variant' property exists for %s", vd->device_path); 5357 if (strcmp(variantp, "atapi") == 0) 5358 is_atapi = B_TRUE; 5359 ddi_prop_free(variantp); 5360 } 5361 5362 rv = ldi_prop_exists(vd->ldi_handle[0], LDI_DEV_T_ANY, "atapi"); 5363 if (rv) { 5364 PR0("'atapi' property exists for %s", vd->device_path); 5365 is_atapi = B_TRUE; 5366 } 5367 5368 return (is_atapi); 5369 } 5370 5371 static int 5372 vd_setup_full_disk(vd_t *vd) 5373 { 5374 int status; 5375 major_t major = getmajor(vd->dev[0]); 5376 minor_t minor = getminor(vd->dev[0]) - VD_ENTIRE_DISK_SLICE; 5377 5378 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); 5379 5380 vd->vdisk_block_size = DEV_BSIZE; 5381 5382 /* set the disk size, block size and the media type of the disk */ 5383 status = vd_backend_check_size(vd); 5384 5385 if (status != 0) { 5386 if (!vd->scsi) { 5387 /* unexpected failure */ 5388 PRN("ldi_ioctl(DKIOCGMEDIAINFO) returned errno %d", 5389 status); 5390 return (status); 5391 } 5392 5393 /* 5394 * The function can fail for SCSI disks which are present but 5395 * reserved by another system. In that case, we don't know the 5396 * size of the disk and the block size. 5397 */ 5398 vd->vdisk_size = VD_SIZE_UNKNOWN; 5399 vd->block_size = 0; 5400 vd->vdisk_media = VD_MEDIA_FIXED; 5401 } 5402 5403 /* Move dev number and LDI handle to entire-disk-slice array elements */ 5404 vd->dev[VD_ENTIRE_DISK_SLICE] = vd->dev[0]; 5405 vd->dev[0] = 0; 5406 vd->ldi_handle[VD_ENTIRE_DISK_SLICE] = vd->ldi_handle[0]; 5407 vd->ldi_handle[0] = NULL; 5408 5409 /* Initialize device numbers for remaining slices and open them */ 5410 for (int slice = 0; slice < vd->nslices; slice++) { 5411 /* 5412 * Skip the entire-disk slice, as it's already open and its 5413 * device known 5414 */ 5415 if (slice == VD_ENTIRE_DISK_SLICE) 5416 continue; 5417 ASSERT(vd->dev[slice] == 0); 5418 ASSERT(vd->ldi_handle[slice] == NULL); 5419 5420 /* 5421 * Construct the device number for the current slice 5422 */ 5423 vd->dev[slice] = makedevice(major, (minor + slice)); 5424 5425 /* 5426 * Open all slices of the disk to serve them to the client. 5427 * Slices are opened exclusively to prevent other threads or 5428 * processes in the service domain from performing I/O to 5429 * slices being accessed by a client. Failure to open a slice 5430 * results in vds not serving this disk, as the client could 5431 * attempt (and should be able) to access any slice immediately. 5432 * Any slices successfully opened before a failure will get 5433 * closed by vds_destroy_vd() as a result of the error returned 5434 * by this function. 5435 * 5436 * We need to do the open with FNDELAY so that opening an empty 5437 * slice does not fail. 5438 */ 5439 PR0("Opening device major %u, minor %u = slice %u", 5440 major, minor, slice); 5441 5442 /* 5443 * Try to open the device. This can fail for example if we are 5444 * opening an empty slice. So in case of a failure, we try the 5445 * open again but this time with the FNDELAY flag. 5446 */ 5447 status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK, 5448 vd->open_flags, kcred, &vd->ldi_handle[slice], 5449 vd->vds->ldi_ident); 5450 5451 if (status != 0) { 5452 status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK, 5453 vd->open_flags | FNDELAY, kcred, 5454 &vd->ldi_handle[slice], vd->vds->ldi_ident); 5455 } 5456 5457 if (status != 0) { 5458 PRN("ldi_open_by_dev() returned errno %d " 5459 "for slice %u", status, slice); 5460 /* vds_destroy_vd() will close any open slices */ 5461 vd->ldi_handle[slice] = NULL; 5462 return (status); 5463 } 5464 } 5465 5466 return (0); 5467 } 5468 5469 /* 5470 * When a slice or a volume is exported as a single-slice disk, we want 5471 * the disk backend (i.e. the slice or volume) to be entirely mapped as 5472 * a slice without the addition of any metadata. 5473 * 5474 * So when exporting the disk as a VTOC disk, we fake a disk with the following 5475 * layout: 5476 * flabel +--- flabel_limit 5477 * <-> V 5478 * 0 1 C D E 5479 * +-+---+--------------------------+--+ 5480 * virtual disk: |L|XXX| slice 0 |AA| 5481 * +-+---+--------------------------+--+ 5482 * ^ : : 5483 * | : : 5484 * VTOC LABEL--+ : : 5485 * +--------------------------+ 5486 * disk backend: | slice/volume/file | 5487 * +--------------------------+ 5488 * 0 N 5489 * 5490 * N is the number of blocks in the slice/volume/file. 5491 * 5492 * We simulate a disk with N+M blocks, where M is the number of blocks 5493 * simluated at the beginning and at the end of the disk (blocks 0-C 5494 * and D-E). 5495 * 5496 * The first blocks (0 to C-1) are emulated and can not be changed. Blocks C 5497 * to D defines slice 0 and are mapped to the backend. Finally we emulate 2 5498 * alternate cylinders at the end of the disk (blocks D-E). In summary we have: 5499 * 5500 * - block 0 (L) returns a fake VTOC label 5501 * - blocks 1 to C-1 (X) are unused and return 0 5502 * - blocks C to D-1 are mapped to the exported slice or volume 5503 * - blocks D and E (A) are blocks defining alternate cylinders (2 cylinders) 5504 * 5505 * Note: because we define a fake disk geometry, it is possible that the length 5506 * of the backend is not a multiple of the size of cylinder, in that case the 5507 * very end of the backend will not map to any block of the virtual disk. 5508 */ 5509 static int 5510 vd_setup_partition_vtoc(vd_t *vd) 5511 { 5512 char *device_path = vd->device_path; 5513 char unit; 5514 size_t size, csize; 5515 5516 /* Initialize dk_geom structure for single-slice device */ 5517 if (vd->dk_geom.dkg_nsect == 0) { 5518 PRN("%s geometry claims 0 sectors per track", device_path); 5519 return (EIO); 5520 } 5521 if (vd->dk_geom.dkg_nhead == 0) { 5522 PRN("%s geometry claims 0 heads", device_path); 5523 return (EIO); 5524 } 5525 5526 /* size of a cylinder in block */ 5527 csize = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; 5528 5529 /* 5530 * Add extra cylinders: we emulate the first cylinder (which contains 5531 * the disk label). 5532 */ 5533 vd->dk_geom.dkg_ncyl = vd->vdisk_size / csize + 1; 5534 5535 /* we emulate 2 alternate cylinders */ 5536 vd->dk_geom.dkg_acyl = 2; 5537 vd->dk_geom.dkg_pcyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl; 5538 5539 5540 /* Initialize vtoc structure for single-slice device */ 5541 bzero(vd->vtoc.v_part, sizeof (vd->vtoc.v_part)); 5542 vd->vtoc.v_part[0].p_tag = V_UNASSIGNED; 5543 vd->vtoc.v_part[0].p_flag = 0; 5544 /* 5545 * Partition 0 starts on cylinder 1 and its size has to be 5546 * a multiple of a number of cylinder. 5547 */ 5548 vd->vtoc.v_part[0].p_start = csize; /* start on cylinder 1 */ 5549 vd->vtoc.v_part[0].p_size = (vd->vdisk_size / csize) * csize; 5550 5551 if (vd_slice_single_slice) { 5552 vd->vtoc.v_nparts = 1; 5553 bcopy(VD_ASCIILABEL, vd->vtoc.v_asciilabel, 5554 MIN(sizeof (VD_ASCIILABEL), 5555 sizeof (vd->vtoc.v_asciilabel))); 5556 bcopy(VD_VOLUME_NAME, vd->vtoc.v_volume, 5557 MIN(sizeof (VD_VOLUME_NAME), sizeof (vd->vtoc.v_volume))); 5558 } else { 5559 /* adjust the number of slices */ 5560 vd->nslices = V_NUMPAR; 5561 vd->vtoc.v_nparts = V_NUMPAR; 5562 5563 /* define slice 2 representing the entire disk */ 5564 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_tag = V_BACKUP; 5565 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_flag = 0; 5566 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_start = 0; 5567 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_size = 5568 vd->dk_geom.dkg_ncyl * csize; 5569 5570 vd_get_readable_size(vd->vdisk_size * vd->vdisk_block_size, 5571 &size, &unit); 5572 5573 /* 5574 * Set some attributes of the geometry to what format(1m) uses 5575 * so that writing a default label using format(1m) does not 5576 * produce any error. 5577 */ 5578 vd->dk_geom.dkg_bcyl = 0; 5579 vd->dk_geom.dkg_intrlv = 1; 5580 vd->dk_geom.dkg_write_reinstruct = 0; 5581 vd->dk_geom.dkg_read_reinstruct = 0; 5582 5583 /* 5584 * We must have a correct label name otherwise format(1m) will 5585 * not recognized the disk as labeled. 5586 */ 5587 (void) snprintf(vd->vtoc.v_asciilabel, LEN_DKL_ASCII, 5588 "SUN-DiskSlice-%ld%cB cyl %d alt %d hd %d sec %d", 5589 size, unit, 5590 vd->dk_geom.dkg_ncyl, vd->dk_geom.dkg_acyl, 5591 vd->dk_geom.dkg_nhead, vd->dk_geom.dkg_nsect); 5592 bzero(vd->vtoc.v_volume, sizeof (vd->vtoc.v_volume)); 5593 5594 /* create a fake label from the vtoc and geometry */ 5595 vd->flabel_limit = (uint_t)csize; 5596 vd->flabel_size = VD_LABEL_VTOC_SIZE; 5597 vd->flabel = kmem_zalloc(vd->flabel_size, KM_SLEEP); 5598 vd_vtocgeom_to_label(&vd->vtoc, &vd->dk_geom, 5599 VD_LABEL_VTOC(vd)); 5600 } 5601 5602 /* adjust the vdisk_size, we emulate 3 cylinders */ 5603 vd->vdisk_size += csize * 3; 5604 5605 return (0); 5606 } 5607 5608 /* 5609 * When a slice, volume or file is exported as a single-slice disk, we want 5610 * the disk backend (i.e. the slice, volume or file) to be entirely mapped 5611 * as a slice without the addition of any metadata. 5612 * 5613 * So when exporting the disk as an EFI disk, we fake a disk with the following 5614 * layout: 5615 * 5616 * flabel +--- flabel_limit 5617 * <------> v 5618 * 0 1 2 L 34 34+N P 5619 * +-+-+--+-------+--------------------------+-------+ 5620 * virtual disk: |X|T|EE|XXXXXXX| slice 0 |RRRRRRR| 5621 * +-+-+--+-------+--------------------------+-------+ 5622 * ^ ^ : : 5623 * | | : : 5624 * GPT-+ +-GPE : : 5625 * +--------------------------+ 5626 * disk backend: | slice/volume/file | 5627 * +--------------------------+ 5628 * 0 N 5629 * 5630 * N is the number of blocks in the slice/volume/file. 5631 * 5632 * We simulate a disk with N+M blocks, where M is the number of blocks 5633 * simluated at the beginning and at the end of the disk (blocks 0-34 5634 * and 34+N-P). 5635 * 5636 * The first 34 blocks (0 to 33) are emulated and can not be changed. Blocks 34 5637 * to 34+N defines slice 0 and are mapped to the exported backend, and we 5638 * emulate some blocks at the end of the disk (blocks 34+N to P) as a the EFI 5639 * reserved partition. 5640 * 5641 * - block 0 (X) is unused and return 0 5642 * - block 1 (T) returns a fake EFI GPT (via DKIOCGETEFI) 5643 * - blocks 2 to L-1 (E) defines a fake EFI GPE (via DKIOCGETEFI) 5644 * - blocks L to 33 (X) are unused and return 0 5645 * - blocks 34 to 34+N are mapped to the exported slice, volume or file 5646 * - blocks 34+N+1 to P define a fake reserved partition and backup label, it 5647 * returns 0 5648 * 5649 * Note: if the backend size is not a multiple of the vdisk block size 5650 * (DEV_BSIZE = 512 byte) then the very end of the backend will not map to 5651 * any block of the virtual disk. 5652 */ 5653 static int 5654 vd_setup_partition_efi(vd_t *vd) 5655 { 5656 efi_gpt_t *gpt; 5657 efi_gpe_t *gpe; 5658 struct uuid uuid = EFI_USR; 5659 struct uuid efi_reserved = EFI_RESERVED; 5660 uint32_t crc; 5661 uint64_t s0_start, s0_end; 5662 5663 vd->flabel_limit = 34; 5664 vd->flabel_size = VD_LABEL_EFI_SIZE; 5665 vd->flabel = kmem_zalloc(vd->flabel_size, KM_SLEEP); 5666 gpt = VD_LABEL_EFI_GPT(vd); 5667 gpe = VD_LABEL_EFI_GPE(vd); 5668 5669 /* adjust the vdisk_size, we emulate the first 34 blocks */ 5670 vd->vdisk_size += 34; 5671 s0_start = 34; 5672 s0_end = vd->vdisk_size - 1; 5673 5674 gpt->efi_gpt_Signature = LE_64(EFI_SIGNATURE); 5675 gpt->efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 5676 gpt->efi_gpt_HeaderSize = LE_32(sizeof (efi_gpt_t)); 5677 gpt->efi_gpt_FirstUsableLBA = LE_64(34ULL); 5678 gpt->efi_gpt_PartitionEntryLBA = LE_64(2ULL); 5679 gpt->efi_gpt_SizeOfPartitionEntry = LE_32(sizeof (efi_gpe_t)); 5680 5681 UUID_LE_CONVERT(gpe[0].efi_gpe_PartitionTypeGUID, uuid); 5682 gpe[0].efi_gpe_StartingLBA = LE_64(s0_start); 5683 gpe[0].efi_gpe_EndingLBA = LE_64(s0_end); 5684 5685 if (vd_slice_single_slice) { 5686 gpt->efi_gpt_NumberOfPartitionEntries = LE_32(1); 5687 } else { 5688 /* adjust the number of slices */ 5689 gpt->efi_gpt_NumberOfPartitionEntries = LE_32(VD_MAXPART); 5690 vd->nslices = V_NUMPAR; 5691 5692 /* define a fake reserved partition */ 5693 UUID_LE_CONVERT(gpe[VD_MAXPART - 1].efi_gpe_PartitionTypeGUID, 5694 efi_reserved); 5695 gpe[VD_MAXPART - 1].efi_gpe_StartingLBA = 5696 LE_64(s0_end + 1); 5697 gpe[VD_MAXPART - 1].efi_gpe_EndingLBA = 5698 LE_64(s0_end + EFI_MIN_RESV_SIZE); 5699 5700 /* adjust the vdisk_size to include the reserved slice */ 5701 vd->vdisk_size += EFI_MIN_RESV_SIZE; 5702 } 5703 5704 gpt->efi_gpt_LastUsableLBA = LE_64(vd->vdisk_size - 1); 5705 5706 /* adjust the vdisk size for the backup GPT and GPE */ 5707 vd->vdisk_size += 33; 5708 5709 CRC32(crc, gpe, sizeof (efi_gpe_t) * VD_MAXPART, -1U, crc32_table); 5710 gpt->efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 5711 5712 CRC32(crc, gpt, sizeof (efi_gpt_t), -1U, crc32_table); 5713 gpt->efi_gpt_HeaderCRC32 = LE_32(~crc); 5714 5715 return (0); 5716 } 5717 5718 /* 5719 * Setup for a virtual disk whose backend is a file (exported as a single slice 5720 * or as a full disk). In that case, the backend is accessed using the vnode 5721 * interface. 5722 */ 5723 static int 5724 vd_setup_backend_vnode(vd_t *vd) 5725 { 5726 int rval, status; 5727 vattr_t vattr; 5728 dev_t dev; 5729 char *file_path = vd->device_path; 5730 ldi_handle_t lhandle; 5731 struct dk_cinfo dk_cinfo; 5732 5733 ASSERT(!vd->volume); 5734 5735 if ((status = vn_open(file_path, UIO_SYSSPACE, vd->open_flags | FOFFMAX, 5736 0, &vd->file_vnode, 0, 0)) != 0) { 5737 PRN("vn_open(%s) = errno %d", file_path, status); 5738 return (status); 5739 } 5740 5741 /* 5742 * We set vd->file now so that vds_destroy_vd will take care of 5743 * closing the file and releasing the vnode in case of an error. 5744 */ 5745 vd->file = B_TRUE; 5746 5747 vattr.va_mask = AT_SIZE; 5748 if ((status = VOP_GETATTR(vd->file_vnode, &vattr, 0, kcred, NULL)) 5749 != 0) { 5750 PRN("VOP_GETATTR(%s) = errno %d", file_path, status); 5751 return (EIO); 5752 } 5753 5754 vd->dskimg_size = vattr.va_size; 5755 5756 if (vd->file_vnode->v_flag & VNOMAP) { 5757 PRN("File %s cannot be mapped", file_path); 5758 return (EIO); 5759 } 5760 5761 vd->max_xfer_sz = maxphys / DEV_BSIZE; /* default transfer size */ 5762 5763 /* 5764 * Get max_xfer_sz from the device where the file is. 5765 */ 5766 dev = vd->file_vnode->v_vfsp->vfs_dev; 5767 PR0("underlying device of %s = (%d, %d)\n", file_path, 5768 getmajor(dev), getminor(dev)); 5769 5770 status = ldi_open_by_dev(&dev, OTYP_BLK, FREAD, kcred, &lhandle, 5771 vd->vds->ldi_ident); 5772 5773 if (status != 0) { 5774 PR0("ldi_open() returned errno %d for underlying device", 5775 status); 5776 } else { 5777 if ((status = ldi_ioctl(lhandle, DKIOCINFO, 5778 (intptr_t)&dk_cinfo, (vd->open_flags | FKIOCTL), kcred, 5779 &rval)) != 0) { 5780 PR0("ldi_ioctl(DKIOCINFO) returned errno %d for " 5781 "underlying device", status); 5782 } else { 5783 /* 5784 * Store the device's max transfer size for 5785 * return to the client 5786 */ 5787 vd->max_xfer_sz = dk_cinfo.dki_maxtransfer; 5788 } 5789 5790 PR0("close the underlying device"); 5791 (void) ldi_close(lhandle, FREAD, kcred); 5792 } 5793 5794 PR0("using file %s on device (%d, %d), max_xfer = %u blks", 5795 file_path, getmajor(dev), getminor(dev), vd->max_xfer_sz); 5796 5797 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) 5798 status = vd_setup_slice_image(vd); 5799 else 5800 status = vd_setup_disk_image(vd); 5801 5802 return (status); 5803 } 5804 5805 static int 5806 vd_setup_slice_image(vd_t *vd) 5807 { 5808 struct dk_label label; 5809 int status; 5810 5811 /* sector size = block size = DEV_BSIZE */ 5812 vd->block_size = DEV_BSIZE; 5813 vd->vdisk_block_size = DEV_BSIZE; 5814 vd->vdisk_size = vd->dskimg_size / DEV_BSIZE; 5815 vd->vdisk_media = VD_MEDIA_FIXED; 5816 vd->vdisk_label = (vd_slice_label == VD_DISK_LABEL_UNK)? 5817 vd_file_slice_label : vd_slice_label; 5818 5819 if (vd->vdisk_label == VD_DISK_LABEL_EFI || 5820 vd->dskimg_size >= 2 * ONE_TERABYTE) { 5821 status = vd_setup_partition_efi(vd); 5822 } else { 5823 /* 5824 * We build a default label to get a geometry for 5825 * the vdisk. Then the partition setup function will 5826 * adjust the vtoc so that it defines a single-slice 5827 * disk. 5828 */ 5829 vd_build_default_label(vd->dskimg_size, &label); 5830 vd_label_to_vtocgeom(&label, &vd->vtoc, &vd->dk_geom); 5831 status = vd_setup_partition_vtoc(vd); 5832 } 5833 5834 return (status); 5835 } 5836 5837 static int 5838 vd_setup_disk_image(vd_t *vd) 5839 { 5840 int status; 5841 char *backend_path = vd->device_path; 5842 5843 /* size should be at least sizeof(dk_label) */ 5844 if (vd->dskimg_size < sizeof (struct dk_label)) { 5845 PRN("Size of file has to be at least %ld bytes", 5846 sizeof (struct dk_label)); 5847 return (EIO); 5848 } 5849 5850 /* sector size = block size = DEV_BSIZE */ 5851 vd->block_size = DEV_BSIZE; 5852 vd->vdisk_block_size = DEV_BSIZE; 5853 vd->vdisk_size = vd->dskimg_size / DEV_BSIZE; 5854 5855 /* 5856 * Find and validate the geometry of a disk image. 5857 */ 5858 status = vd_dskimg_validate_geometry(vd); 5859 if (status != 0 && status != EINVAL && status != ENOTSUP) { 5860 PRN("Failed to read label from %s", backend_path); 5861 return (EIO); 5862 } 5863 5864 if (vd_dskimg_is_iso_image(vd)) { 5865 /* 5866 * Indicate whether to call this a CD or DVD from the size 5867 * of the ISO image (images for both drive types are stored 5868 * in the ISO-9600 format). CDs can store up to just under 1Gb 5869 */ 5870 if ((vd->vdisk_size * vd->vdisk_block_size) > ONE_GIGABYTE) 5871 vd->vdisk_media = VD_MEDIA_DVD; 5872 else 5873 vd->vdisk_media = VD_MEDIA_CD; 5874 } else { 5875 vd->vdisk_media = VD_MEDIA_FIXED; 5876 } 5877 5878 /* Setup devid for the disk image */ 5879 5880 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { 5881 5882 status = vd_dskimg_read_devid(vd, &vd->dskimg_devid); 5883 5884 if (status == 0) { 5885 /* a valid devid was found */ 5886 return (0); 5887 } 5888 5889 if (status != EINVAL) { 5890 /* 5891 * There was an error while trying to read the devid. 5892 * So this disk image may have a devid but we are 5893 * unable to read it. 5894 */ 5895 PR0("can not read devid for %s", backend_path); 5896 vd->dskimg_devid = NULL; 5897 return (0); 5898 } 5899 } 5900 5901 /* 5902 * No valid device id was found so we create one. Note that a failure 5903 * to create a device id is not fatal and does not prevent the disk 5904 * image from being attached. 5905 */ 5906 PR1("creating devid for %s", backend_path); 5907 5908 if (ddi_devid_init(vd->vds->dip, DEVID_FAB, NULL, 0, 5909 &vd->dskimg_devid) != DDI_SUCCESS) { 5910 PR0("fail to create devid for %s", backend_path); 5911 vd->dskimg_devid = NULL; 5912 return (0); 5913 } 5914 5915 /* 5916 * Write devid to the disk image. The devid is stored into the disk 5917 * image if we have a valid label; otherwise the devid will be stored 5918 * when the user writes a valid label. 5919 */ 5920 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { 5921 if (vd_dskimg_write_devid(vd, vd->dskimg_devid) != 0) { 5922 PR0("fail to write devid for %s", backend_path); 5923 ddi_devid_free(vd->dskimg_devid); 5924 vd->dskimg_devid = NULL; 5925 } 5926 } 5927 5928 return (0); 5929 } 5930 5931 5932 /* 5933 * Description: 5934 * Open a device using its device path (supplied by ldm(1m)) 5935 * 5936 * Parameters: 5937 * vd - pointer to structure containing the vDisk info 5938 * flags - open flags 5939 * 5940 * Return Value 5941 * 0 - success 5942 * != 0 - some other non-zero return value from ldi(9F) functions 5943 */ 5944 static int 5945 vd_open_using_ldi_by_name(vd_t *vd, int flags) 5946 { 5947 int status; 5948 char *device_path = vd->device_path; 5949 5950 /* Attempt to open device */ 5951 status = ldi_open_by_name(device_path, flags, kcred, 5952 &vd->ldi_handle[0], vd->vds->ldi_ident); 5953 5954 /* 5955 * The open can fail for example if we are opening an empty slice. 5956 * In case of a failure, we try the open again but this time with 5957 * the FNDELAY flag. 5958 */ 5959 if (status != 0) 5960 status = ldi_open_by_name(device_path, flags | FNDELAY, 5961 kcred, &vd->ldi_handle[0], vd->vds->ldi_ident); 5962 5963 if (status != 0) { 5964 PR0("ldi_open_by_name(%s) = errno %d", device_path, status); 5965 vd->ldi_handle[0] = NULL; 5966 return (status); 5967 } 5968 5969 return (0); 5970 } 5971 5972 /* 5973 * Setup for a virtual disk which backend is a device (a physical disk, 5974 * slice or volume device) exported as a full disk or as a slice. In these 5975 * cases, the backend is accessed using the LDI interface. 5976 */ 5977 static int 5978 vd_setup_backend_ldi(vd_t *vd) 5979 { 5980 int rval, status; 5981 struct dk_cinfo dk_cinfo; 5982 char *device_path = vd->device_path; 5983 5984 /* device has been opened by vd_identify_dev() */ 5985 ASSERT(vd->ldi_handle[0] != NULL); 5986 ASSERT(vd->dev[0] != NULL); 5987 5988 vd->file = B_FALSE; 5989 5990 /* Verify backing device supports dk_cinfo */ 5991 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCINFO, 5992 (intptr_t)&dk_cinfo, (vd->open_flags | FKIOCTL), kcred, 5993 &rval)) != 0) { 5994 PRN("ldi_ioctl(DKIOCINFO) returned errno %d for %s", 5995 status, device_path); 5996 return (status); 5997 } 5998 if (dk_cinfo.dki_partition >= V_NUMPAR) { 5999 PRN("slice %u >= maximum slice %u for %s", 6000 dk_cinfo.dki_partition, V_NUMPAR, device_path); 6001 return (EIO); 6002 } 6003 6004 /* 6005 * The device has been opened read-only by vd_identify_dev(), re-open 6006 * it read-write if the write flag is set and we don't have an optical 6007 * device such as a CD-ROM, which, for now, we do not permit writes to 6008 * and thus should not export write operations to the client. 6009 * 6010 * Future: if/when we implement support for guest domains writing to 6011 * optical devices we will need to do further checking of the media type 6012 * to distinguish between read-only and writable discs. 6013 */ 6014 if (dk_cinfo.dki_ctype == DKC_CDROM) { 6015 6016 vd->open_flags &= ~FWRITE; 6017 6018 } else if (vd->open_flags & FWRITE) { 6019 6020 (void) ldi_close(vd->ldi_handle[0], vd->open_flags & ~FWRITE, 6021 kcred); 6022 status = vd_open_using_ldi_by_name(vd, vd->open_flags); 6023 if (status != 0) { 6024 PR0("Failed to open (%s) = errno %d", 6025 device_path, status); 6026 return (status); 6027 } 6028 } 6029 6030 /* Store the device's max transfer size for return to the client */ 6031 vd->max_xfer_sz = dk_cinfo.dki_maxtransfer; 6032 6033 /* 6034 * We need to work out if it's an ATAPI (IDE CD-ROM) or SCSI device so 6035 * that we can use the correct CDB group when sending USCSI commands. 6036 */ 6037 vd->is_atapi_dev = vd_is_atapi_device(vd); 6038 6039 /* 6040 * Export a full disk. 6041 * 6042 * The exported device can be either a volume, a disk or a CD/DVD 6043 * device. We export a device as a full disk if we have an entire 6044 * disk slice (slice 2) and if this slice is exported as a full disk 6045 * and not as a single slice disk. A CD or DVD device is exported 6046 * as a full disk (even if it isn't s2). A volume is exported as a 6047 * full disk as long as the "slice" option is not specified. 6048 */ 6049 if (vd->vdisk_type == VD_DISK_TYPE_DISK) { 6050 6051 if (vd->volume) { 6052 /* get size of backing device */ 6053 if (ldi_get_size(vd->ldi_handle[0], &vd->dskimg_size) != 6054 DDI_SUCCESS) { 6055 PRN("ldi_get_size() failed for %s", 6056 device_path); 6057 return (EIO); 6058 } 6059 6060 /* setup disk image */ 6061 return (vd_setup_disk_image(vd)); 6062 } 6063 6064 if (dk_cinfo.dki_partition == VD_ENTIRE_DISK_SLICE || 6065 dk_cinfo.dki_ctype == DKC_CDROM) { 6066 ASSERT(!vd->volume); 6067 if (dk_cinfo.dki_ctype == DKC_SCSI_CCS) 6068 vd->scsi = B_TRUE; 6069 return (vd_setup_full_disk(vd)); 6070 } 6071 } 6072 6073 /* 6074 * Export a single slice disk. 6075 * 6076 * The exported device can be either a volume device or a disk slice. If 6077 * it is a disk slice different from slice 2 then it is always exported 6078 * as a single slice disk even if the "slice" option is not specified. 6079 * If it is disk slice 2 or a volume device then it is exported as a 6080 * single slice disk only if the "slice" option is specified. 6081 */ 6082 return (vd_setup_single_slice_disk(vd)); 6083 } 6084 6085 static int 6086 vd_setup_single_slice_disk(vd_t *vd) 6087 { 6088 int status, rval; 6089 struct dk_label label; 6090 char *device_path = vd->device_path; 6091 struct vtoc vtoc; 6092 6093 /* Get size of backing device */ 6094 if (ldi_get_size(vd->ldi_handle[0], &vd->vdisk_size) != DDI_SUCCESS) { 6095 PRN("ldi_get_size() failed for %s", device_path); 6096 return (EIO); 6097 } 6098 vd->vdisk_size = lbtodb(vd->vdisk_size); /* convert to blocks */ 6099 vd->block_size = DEV_BSIZE; 6100 vd->vdisk_block_size = DEV_BSIZE; 6101 vd->vdisk_media = VD_MEDIA_FIXED; 6102 6103 if (vd->volume) { 6104 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 6105 } 6106 6107 /* 6108 * We export the slice as a single slice disk even if the "slice" 6109 * option was not specified. 6110 */ 6111 vd->vdisk_type = VD_DISK_TYPE_SLICE; 6112 vd->nslices = 1; 6113 6114 /* 6115 * When exporting a slice or a device as a single slice disk, we don't 6116 * care about any partitioning exposed by the backend. The goal is just 6117 * to export the backend as a flat storage. We provide a fake partition 6118 * table (either a VTOC or EFI), which presents only one slice, to 6119 * accommodate tools expecting a disk label. The selection of the label 6120 * type (VTOC or EFI) depends on the value of the vd_slice_label 6121 * variable. 6122 */ 6123 if (vd_slice_label == VD_DISK_LABEL_EFI || 6124 vd->vdisk_size >= ONE_TERABYTE / DEV_BSIZE) { 6125 vd->vdisk_label = VD_DISK_LABEL_EFI; 6126 } else { 6127 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGEXTVTOC, 6128 (intptr_t)&vd->vtoc, (vd->open_flags | FKIOCTL), 6129 kcred, &rval); 6130 6131 if (status == ENOTTY) { 6132 /* try with the non-extended vtoc ioctl */ 6133 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGVTOC, 6134 (intptr_t)&vtoc, (vd->open_flags | FKIOCTL), 6135 kcred, &rval); 6136 vtoctoextvtoc(vtoc, vd->vtoc); 6137 } 6138 6139 if (status == 0) { 6140 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGGEOM, 6141 (intptr_t)&vd->dk_geom, (vd->open_flags | FKIOCTL), 6142 kcred, &rval); 6143 6144 if (status != 0) { 6145 PRN("ldi_ioctl(DKIOCGEOM) returned errno %d " 6146 "for %s", status, device_path); 6147 return (status); 6148 } 6149 vd->vdisk_label = VD_DISK_LABEL_VTOC; 6150 6151 } else if (vd_slice_label == VD_DISK_LABEL_VTOC) { 6152 6153 vd->vdisk_label = VD_DISK_LABEL_VTOC; 6154 vd_build_default_label(vd->vdisk_size * DEV_BSIZE, 6155 &label); 6156 vd_label_to_vtocgeom(&label, &vd->vtoc, &vd->dk_geom); 6157 6158 } else { 6159 vd->vdisk_label = VD_DISK_LABEL_EFI; 6160 } 6161 } 6162 6163 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { 6164 /* export with a fake VTOC label */ 6165 status = vd_setup_partition_vtoc(vd); 6166 6167 } else { 6168 /* export with a fake EFI label */ 6169 status = vd_setup_partition_efi(vd); 6170 } 6171 6172 return (status); 6173 } 6174 6175 static int 6176 vd_backend_check_size(vd_t *vd) 6177 { 6178 size_t backend_size, old_size, new_size; 6179 struct dk_minfo minfo; 6180 vattr_t vattr; 6181 int rval, rv; 6182 6183 if (vd->file) { 6184 6185 /* file (slice or full disk) */ 6186 vattr.va_mask = AT_SIZE; 6187 rv = VOP_GETATTR(vd->file_vnode, &vattr, 0, kcred, NULL); 6188 if (rv != 0) { 6189 PR0("VOP_GETATTR(%s) = errno %d", vd->device_path, rv); 6190 return (rv); 6191 } 6192 backend_size = vattr.va_size; 6193 6194 } else if (vd->volume || vd->vdisk_type == VD_DISK_TYPE_SLICE) { 6195 6196 /* physical slice or volume (slice or full disk) */ 6197 rv = ldi_get_size(vd->ldi_handle[0], &backend_size); 6198 if (rv != DDI_SUCCESS) { 6199 PR0("ldi_get_size() failed for %s", vd->device_path); 6200 return (EIO); 6201 } 6202 6203 } else { 6204 6205 /* physical disk */ 6206 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); 6207 rv = ldi_ioctl(vd->ldi_handle[0], DKIOCGMEDIAINFO, 6208 (intptr_t)&minfo, (vd->open_flags | FKIOCTL), 6209 kcred, &rval); 6210 if (rv != 0) { 6211 PR0("DKIOCGMEDIAINFO failed for %s (err=%d)", 6212 vd->device_path, rv); 6213 return (rv); 6214 } 6215 backend_size = minfo.dki_capacity * minfo.dki_lbsize; 6216 } 6217 6218 old_size = vd->vdisk_size; 6219 new_size = backend_size / DEV_BSIZE; 6220 6221 /* check if size has changed */ 6222 if (old_size != VD_SIZE_UNKNOWN && old_size == new_size) 6223 return (0); 6224 6225 vd->vdisk_size = new_size; 6226 6227 if (vd->file || vd->volume) 6228 vd->dskimg_size = backend_size; 6229 6230 /* 6231 * If we are exporting a single-slice disk and the size of the backend 6232 * has changed then we regenerate the partition setup so that the 6233 * partitioning matches with the new disk backend size. 6234 */ 6235 6236 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { 6237 /* slice or file or device exported as a slice */ 6238 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { 6239 rv = vd_setup_partition_vtoc(vd); 6240 if (rv != 0) { 6241 PR0("vd_setup_partition_vtoc() failed for %s " 6242 "(err = %d)", vd->device_path, rv); 6243 return (rv); 6244 } 6245 } else { 6246 rv = vd_setup_partition_efi(vd); 6247 if (rv != 0) { 6248 PR0("vd_setup_partition_efi() failed for %s " 6249 "(err = %d)", vd->device_path, rv); 6250 return (rv); 6251 } 6252 } 6253 6254 } else if (!vd->file && !vd->volume) { 6255 /* physical disk */ 6256 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); 6257 vd->block_size = minfo.dki_lbsize; 6258 vd->vdisk_media = 6259 DK_MEDIATYPE2VD_MEDIATYPE(minfo.dki_media_type); 6260 } 6261 6262 return (0); 6263 } 6264 6265 /* 6266 * Description: 6267 * Open a device using its device path and identify if this is 6268 * a disk device or a volume device. 6269 * 6270 * Parameters: 6271 * vd - pointer to structure containing the vDisk info 6272 * dtype - return the driver type of the device 6273 * 6274 * Return Value 6275 * 0 - success 6276 * != 0 - some other non-zero return value from ldi(9F) functions 6277 */ 6278 static int 6279 vd_identify_dev(vd_t *vd, int *dtype) 6280 { 6281 int status, i; 6282 char *device_path = vd->device_path; 6283 char *drv_name; 6284 int drv_type; 6285 vds_t *vds = vd->vds; 6286 6287 status = vd_open_using_ldi_by_name(vd, vd->open_flags & ~FWRITE); 6288 if (status != 0) { 6289 PR0("Failed to open (%s) = errno %d", device_path, status); 6290 return (status); 6291 } 6292 6293 /* Get device number of backing device */ 6294 if ((status = ldi_get_dev(vd->ldi_handle[0], &vd->dev[0])) != 0) { 6295 PRN("ldi_get_dev() returned errno %d for %s", 6296 status, device_path); 6297 return (status); 6298 } 6299 6300 /* 6301 * We start by looking if the driver is in the list from vds.conf 6302 * so that we can override the built-in list using vds.conf. 6303 */ 6304 drv_name = ddi_major_to_name(getmajor(vd->dev[0])); 6305 drv_type = VD_DRIVER_UNKNOWN; 6306 6307 /* check vds.conf list */ 6308 for (i = 0; i < vds->num_drivers; i++) { 6309 if (vds->driver_types[i].type == VD_DRIVER_UNKNOWN) { 6310 /* ignore invalid entries */ 6311 continue; 6312 } 6313 if (strcmp(drv_name, vds->driver_types[i].name) == 0) { 6314 drv_type = vds->driver_types[i].type; 6315 goto done; 6316 } 6317 } 6318 6319 /* check built-in list */ 6320 for (i = 0; i < VDS_NUM_DRIVERS; i++) { 6321 if (strcmp(drv_name, vds_driver_types[i].name) == 0) { 6322 drv_type = vds_driver_types[i].type; 6323 goto done; 6324 } 6325 } 6326 6327 done: 6328 PR0("driver %s identified as %s", drv_name, 6329 (drv_type == VD_DRIVER_DISK)? "DISK" : 6330 (drv_type == VD_DRIVER_VOLUME)? "VOLUME" : "UNKNOWN"); 6331 6332 if (strcmp(drv_name, "zfs") == 0) 6333 vd->zvol = B_TRUE; 6334 6335 *dtype = drv_type; 6336 6337 return (0); 6338 } 6339 6340 static int 6341 vd_setup_vd(vd_t *vd) 6342 { 6343 int status, drv_type, pseudo; 6344 dev_info_t *dip; 6345 vnode_t *vnp; 6346 char *path = vd->device_path; 6347 6348 /* make sure the vdisk backend is valid */ 6349 if ((status = lookupname(path, UIO_SYSSPACE, 6350 FOLLOW, NULLVPP, &vnp)) != 0) { 6351 PR0("Cannot lookup %s errno %d", path, status); 6352 goto done; 6353 } 6354 6355 switch (vnp->v_type) { 6356 case VREG: 6357 /* 6358 * Backend is a file so it is exported as a full disk or as a 6359 * single slice disk using the vnode interface. 6360 */ 6361 VN_RELE(vnp); 6362 vd->volume = B_FALSE; 6363 status = vd_setup_backend_vnode(vd); 6364 break; 6365 6366 case VBLK: 6367 case VCHR: 6368 /* 6369 * Backend is a device. In that case, it is exported using the 6370 * LDI interface, and it is exported either as a single-slice 6371 * disk or as a full disk depending on the "slice" option and 6372 * on the type of device. 6373 * 6374 * - A volume device is exported as a single-slice disk if the 6375 * "slice" is specified, otherwise it is exported as a full 6376 * disk. 6377 * 6378 * - A disk slice (different from slice 2) is always exported 6379 * as a single slice disk using the LDI interface. 6380 * 6381 * - The slice 2 of a disk is exported as a single slice disk 6382 * if the "slice" option is specified, otherwise the entire 6383 * disk will be exported. 6384 * 6385 * - The slice of a CD or DVD is exported as single slice disk 6386 * if the "slice" option is specified, otherwise the entire 6387 * disk will be exported. 6388 */ 6389 6390 /* check if this is a pseudo device */ 6391 if ((dip = ddi_hold_devi_by_instance(getmajor(vnp->v_rdev), 6392 dev_to_instance(vnp->v_rdev), 0)) == NULL) { 6393 PRN("%s is no longer accessible", path); 6394 VN_RELE(vnp); 6395 status = EIO; 6396 break; 6397 } 6398 pseudo = is_pseudo_device(dip); 6399 ddi_release_devi(dip); 6400 VN_RELE(vnp); 6401 6402 if (vd_identify_dev(vd, &drv_type) != 0) { 6403 PRN("%s identification failed", path); 6404 status = EIO; 6405 break; 6406 } 6407 6408 /* 6409 * If the driver hasn't been identified then we consider that 6410 * pseudo devices are volumes and other devices are disks. 6411 */ 6412 if (drv_type == VD_DRIVER_VOLUME || 6413 (drv_type == VD_DRIVER_UNKNOWN && pseudo)) { 6414 vd->volume = B_TRUE; 6415 } 6416 6417 /* 6418 * If this is a volume device then its usage depends if the 6419 * "slice" option is set or not. If the "slice" option is set 6420 * then the volume device will be exported as a single slice, 6421 * otherwise it will be exported as a full disk. 6422 * 6423 * For backward compatibility, if vd_volume_force_slice is set 6424 * then we always export volume devices as slices. 6425 */ 6426 if (vd->volume && vd_volume_force_slice) { 6427 vd->vdisk_type = VD_DISK_TYPE_SLICE; 6428 vd->nslices = 1; 6429 } 6430 6431 status = vd_setup_backend_ldi(vd); 6432 break; 6433 6434 default: 6435 PRN("Unsupported vdisk backend %s", path); 6436 VN_RELE(vnp); 6437 status = EBADF; 6438 } 6439 6440 done: 6441 if (status != 0) { 6442 /* 6443 * If the error is retryable print an error message only 6444 * during the first try. 6445 */ 6446 if (status == ENXIO || status == ENODEV || 6447 status == ENOENT || status == EROFS) { 6448 if (!(vd->initialized & VD_SETUP_ERROR)) { 6449 PRN("%s is currently inaccessible (error %d)", 6450 path, status); 6451 } 6452 status = EAGAIN; 6453 } else { 6454 PRN("%s can not be exported as a virtual disk " 6455 "(error %d)", path, status); 6456 } 6457 vd->initialized |= VD_SETUP_ERROR; 6458 6459 } else if (vd->initialized & VD_SETUP_ERROR) { 6460 /* print a message only if we previously had an error */ 6461 PRN("%s is now online", path); 6462 vd->initialized &= ~VD_SETUP_ERROR; 6463 } 6464 6465 return (status); 6466 } 6467 6468 static int 6469 vds_do_init_vd(vds_t *vds, uint64_t id, char *device_path, uint64_t options, 6470 uint64_t ldc_id, vd_t **vdp) 6471 { 6472 char tq_name[TASKQ_NAMELEN]; 6473 int status; 6474 ddi_iblock_cookie_t iblock = NULL; 6475 ldc_attr_t ldc_attr; 6476 vd_t *vd; 6477 6478 6479 ASSERT(vds != NULL); 6480 ASSERT(device_path != NULL); 6481 ASSERT(vdp != NULL); 6482 PR0("Adding vdisk for %s", device_path); 6483 6484 if ((vd = kmem_zalloc(sizeof (*vd), KM_NOSLEEP)) == NULL) { 6485 PRN("No memory for virtual disk"); 6486 return (EAGAIN); 6487 } 6488 *vdp = vd; /* assign here so vds_destroy_vd() can cleanup later */ 6489 vd->vds = vds; 6490 (void) strncpy(vd->device_path, device_path, MAXPATHLEN); 6491 6492 /* Setup open flags */ 6493 vd->open_flags = FREAD; 6494 6495 if (!(options & VD_OPT_RDONLY)) 6496 vd->open_flags |= FWRITE; 6497 6498 if (options & VD_OPT_EXCLUSIVE) 6499 vd->open_flags |= FEXCL; 6500 6501 /* Setup disk type */ 6502 if (options & VD_OPT_SLICE) { 6503 vd->vdisk_type = VD_DISK_TYPE_SLICE; 6504 vd->nslices = 1; 6505 } else { 6506 vd->vdisk_type = VD_DISK_TYPE_DISK; 6507 vd->nslices = V_NUMPAR; 6508 } 6509 6510 /* default disk label */ 6511 vd->vdisk_label = VD_DISK_LABEL_UNK; 6512 6513 /* Open vdisk and initialize parameters */ 6514 if ((status = vd_setup_vd(vd)) == 0) { 6515 vd->initialized |= VD_DISK_READY; 6516 6517 ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR); 6518 PR0("vdisk_type = %s, volume = %s, file = %s, nslices = %u", 6519 ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"), 6520 (vd->volume ? "yes" : "no"), (vd->file ? "yes" : "no"), 6521 vd->nslices); 6522 } else { 6523 if (status != EAGAIN) 6524 return (status); 6525 } 6526 6527 /* Initialize locking */ 6528 if (ddi_get_soft_iblock_cookie(vds->dip, DDI_SOFTINT_MED, 6529 &iblock) != DDI_SUCCESS) { 6530 PRN("Could not get iblock cookie."); 6531 return (EIO); 6532 } 6533 6534 mutex_init(&vd->lock, NULL, MUTEX_DRIVER, iblock); 6535 vd->initialized |= VD_LOCKING; 6536 6537 6538 /* Create start and completion task queues for the vdisk */ 6539 (void) snprintf(tq_name, sizeof (tq_name), "vd_startq%lu", id); 6540 PR1("tq_name = %s", tq_name); 6541 if ((vd->startq = ddi_taskq_create(vds->dip, tq_name, 1, 6542 TASKQ_DEFAULTPRI, 0)) == NULL) { 6543 PRN("Could not create task queue"); 6544 return (EIO); 6545 } 6546 (void) snprintf(tq_name, sizeof (tq_name), "vd_completionq%lu", id); 6547 PR1("tq_name = %s", tq_name); 6548 if ((vd->completionq = ddi_taskq_create(vds->dip, tq_name, 1, 6549 TASKQ_DEFAULTPRI, 0)) == NULL) { 6550 PRN("Could not create task queue"); 6551 return (EIO); 6552 } 6553 6554 /* Allocate the staging buffer */ 6555 vd->max_msglen = sizeof (vio_msg_t); /* baseline vio message size */ 6556 vd->vio_msgp = kmem_alloc(vd->max_msglen, KM_SLEEP); 6557 6558 vd->enabled = 1; /* before callback can dispatch to startq */ 6559 6560 6561 /* Bring up LDC */ 6562 ldc_attr.devclass = LDC_DEV_BLK_SVC; 6563 ldc_attr.instance = ddi_get_instance(vds->dip); 6564 ldc_attr.mode = LDC_MODE_UNRELIABLE; 6565 ldc_attr.mtu = VD_LDC_MTU; 6566 if ((status = ldc_init(ldc_id, &ldc_attr, &vd->ldc_handle)) != 0) { 6567 PRN("Could not initialize LDC channel %lx, " 6568 "init failed with error %d", ldc_id, status); 6569 return (status); 6570 } 6571 vd->initialized |= VD_LDC; 6572 6573 if ((status = ldc_reg_callback(vd->ldc_handle, vd_handle_ldc_events, 6574 (caddr_t)vd)) != 0) { 6575 PRN("Could not initialize LDC channel %lu," 6576 "reg_callback failed with error %d", ldc_id, status); 6577 return (status); 6578 } 6579 6580 if ((status = ldc_open(vd->ldc_handle)) != 0) { 6581 PRN("Could not initialize LDC channel %lu," 6582 "open failed with error %d", ldc_id, status); 6583 return (status); 6584 } 6585 6586 if ((status = ldc_up(vd->ldc_handle)) != 0) { 6587 PR0("ldc_up() returned errno %d", status); 6588 } 6589 6590 /* Allocate the inband task memory handle */ 6591 status = ldc_mem_alloc_handle(vd->ldc_handle, &(vd->inband_task.mhdl)); 6592 if (status) { 6593 PRN("Could not initialize LDC channel %lu," 6594 "alloc_handle failed with error %d", ldc_id, status); 6595 return (ENXIO); 6596 } 6597 6598 /* Add the successfully-initialized vdisk to the server's table */ 6599 if (mod_hash_insert(vds->vd_table, (mod_hash_key_t)id, vd) != 0) { 6600 PRN("Error adding vdisk ID %lu to table", id); 6601 return (EIO); 6602 } 6603 6604 /* store initial state */ 6605 vd->state = VD_STATE_INIT; 6606 6607 return (0); 6608 } 6609 6610 static void 6611 vd_free_dring_task(vd_t *vdp) 6612 { 6613 if (vdp->dring_task != NULL) { 6614 ASSERT(vdp->dring_len != 0); 6615 /* Free all dring_task memory handles */ 6616 for (int i = 0; i < vdp->dring_len; i++) { 6617 (void) ldc_mem_free_handle(vdp->dring_task[i].mhdl); 6618 kmem_free(vdp->dring_task[i].request, 6619 (vdp->descriptor_size - 6620 sizeof (vio_dring_entry_hdr_t))); 6621 vdp->dring_task[i].request = NULL; 6622 kmem_free(vdp->dring_task[i].msg, vdp->max_msglen); 6623 vdp->dring_task[i].msg = NULL; 6624 } 6625 kmem_free(vdp->dring_task, 6626 (sizeof (*vdp->dring_task)) * vdp->dring_len); 6627 vdp->dring_task = NULL; 6628 } 6629 } 6630 6631 /* 6632 * Destroy the state associated with a virtual disk 6633 */ 6634 static void 6635 vds_destroy_vd(void *arg) 6636 { 6637 vd_t *vd = (vd_t *)arg; 6638 int retry = 0, rv; 6639 6640 if (vd == NULL) 6641 return; 6642 6643 PR0("Destroying vdisk state"); 6644 6645 /* Disable queuing requests for the vdisk */ 6646 if (vd->initialized & VD_LOCKING) { 6647 mutex_enter(&vd->lock); 6648 vd->enabled = 0; 6649 mutex_exit(&vd->lock); 6650 } 6651 6652 /* Drain and destroy start queue (*before* destroying completionq) */ 6653 if (vd->startq != NULL) 6654 ddi_taskq_destroy(vd->startq); /* waits for queued tasks */ 6655 6656 /* Drain and destroy completion queue (*before* shutting down LDC) */ 6657 if (vd->completionq != NULL) 6658 ddi_taskq_destroy(vd->completionq); /* waits for tasks */ 6659 6660 vd_free_dring_task(vd); 6661 6662 /* Free the inband task memory handle */ 6663 (void) ldc_mem_free_handle(vd->inband_task.mhdl); 6664 6665 /* Shut down LDC */ 6666 if (vd->initialized & VD_LDC) { 6667 /* unmap the dring */ 6668 if (vd->initialized & VD_DRING) 6669 (void) ldc_mem_dring_unmap(vd->dring_handle); 6670 6671 /* close LDC channel - retry on EAGAIN */ 6672 while ((rv = ldc_close(vd->ldc_handle)) == EAGAIN) { 6673 if (++retry > vds_ldc_retries) { 6674 PR0("Timed out closing channel"); 6675 break; 6676 } 6677 drv_usecwait(vds_ldc_delay); 6678 } 6679 if (rv == 0) { 6680 (void) ldc_unreg_callback(vd->ldc_handle); 6681 (void) ldc_fini(vd->ldc_handle); 6682 } else { 6683 /* 6684 * Closing the LDC channel has failed. Ideally we should 6685 * fail here but there is no Zeus level infrastructure 6686 * to handle this. The MD has already been changed and 6687 * we have to do the close. So we try to do as much 6688 * clean up as we can. 6689 */ 6690 (void) ldc_set_cb_mode(vd->ldc_handle, LDC_CB_DISABLE); 6691 while (ldc_unreg_callback(vd->ldc_handle) == EAGAIN) 6692 drv_usecwait(vds_ldc_delay); 6693 } 6694 } 6695 6696 /* Free the staging buffer for msgs */ 6697 if (vd->vio_msgp != NULL) { 6698 kmem_free(vd->vio_msgp, vd->max_msglen); 6699 vd->vio_msgp = NULL; 6700 } 6701 6702 /* Free the inband message buffer */ 6703 if (vd->inband_task.msg != NULL) { 6704 kmem_free(vd->inband_task.msg, vd->max_msglen); 6705 vd->inband_task.msg = NULL; 6706 } 6707 6708 if (vd->file) { 6709 /* Close file */ 6710 (void) VOP_CLOSE(vd->file_vnode, vd->open_flags, 1, 6711 0, kcred, NULL); 6712 VN_RELE(vd->file_vnode); 6713 } else { 6714 /* Close any open backing-device slices */ 6715 for (uint_t slice = 0; slice < V_NUMPAR; slice++) { 6716 if (vd->ldi_handle[slice] != NULL) { 6717 PR0("Closing slice %u", slice); 6718 (void) ldi_close(vd->ldi_handle[slice], 6719 vd->open_flags, kcred); 6720 } 6721 } 6722 } 6723 6724 /* Free disk image devid */ 6725 if (vd->dskimg_devid != NULL) 6726 ddi_devid_free(vd->dskimg_devid); 6727 6728 /* Free any fake label */ 6729 if (vd->flabel) { 6730 kmem_free(vd->flabel, vd->flabel_size); 6731 vd->flabel = NULL; 6732 vd->flabel_size = 0; 6733 } 6734 6735 /* Free lock */ 6736 if (vd->initialized & VD_LOCKING) 6737 mutex_destroy(&vd->lock); 6738 6739 /* Finally, free the vdisk structure itself */ 6740 kmem_free(vd, sizeof (*vd)); 6741 } 6742 6743 static int 6744 vds_init_vd(vds_t *vds, uint64_t id, char *device_path, uint64_t options, 6745 uint64_t ldc_id) 6746 { 6747 int status; 6748 vd_t *vd = NULL; 6749 6750 6751 if ((status = vds_do_init_vd(vds, id, device_path, options, 6752 ldc_id, &vd)) != 0) 6753 vds_destroy_vd(vd); 6754 6755 return (status); 6756 } 6757 6758 static int 6759 vds_do_get_ldc_id(md_t *md, mde_cookie_t vd_node, mde_cookie_t *channel, 6760 uint64_t *ldc_id) 6761 { 6762 int num_channels; 6763 6764 6765 /* Look for channel endpoint child(ren) of the vdisk MD node */ 6766 if ((num_channels = md_scan_dag(md, vd_node, 6767 md_find_name(md, VD_CHANNEL_ENDPOINT), 6768 md_find_name(md, "fwd"), channel)) <= 0) { 6769 PRN("No \"%s\" found for virtual disk", VD_CHANNEL_ENDPOINT); 6770 return (-1); 6771 } 6772 6773 /* Get the "id" value for the first channel endpoint node */ 6774 if (md_get_prop_val(md, channel[0], VD_ID_PROP, ldc_id) != 0) { 6775 PRN("No \"%s\" property found for \"%s\" of vdisk", 6776 VD_ID_PROP, VD_CHANNEL_ENDPOINT); 6777 return (-1); 6778 } 6779 6780 if (num_channels > 1) { 6781 PRN("Using ID of first of multiple channels for this vdisk"); 6782 } 6783 6784 return (0); 6785 } 6786 6787 static int 6788 vds_get_ldc_id(md_t *md, mde_cookie_t vd_node, uint64_t *ldc_id) 6789 { 6790 int num_nodes, status; 6791 size_t size; 6792 mde_cookie_t *channel; 6793 6794 6795 if ((num_nodes = md_node_count(md)) <= 0) { 6796 PRN("Invalid node count in Machine Description subtree"); 6797 return (-1); 6798 } 6799 size = num_nodes*(sizeof (*channel)); 6800 channel = kmem_zalloc(size, KM_SLEEP); 6801 status = vds_do_get_ldc_id(md, vd_node, channel, ldc_id); 6802 kmem_free(channel, size); 6803 6804 return (status); 6805 } 6806 6807 /* 6808 * Function: 6809 * vds_get_options 6810 * 6811 * Description: 6812 * Parse the options of a vds node. Options are defined as an array 6813 * of strings in the vds-block-device-opts property of the vds node 6814 * in the machine description. Options are returned as a bitmask. The 6815 * mapping between the bitmask options and the options strings from the 6816 * machine description is defined in the vd_bdev_options[] array. 6817 * 6818 * The vds-block-device-opts property is optional. If a vds has no such 6819 * property then no option is defined. 6820 * 6821 * Parameters: 6822 * md - machine description. 6823 * vd_node - vds node in the machine description for which 6824 * options have to be parsed. 6825 * options - the returned options. 6826 * 6827 * Return Code: 6828 * none. 6829 */ 6830 static void 6831 vds_get_options(md_t *md, mde_cookie_t vd_node, uint64_t *options) 6832 { 6833 char *optstr, *opt; 6834 int len, n, i; 6835 6836 *options = 0; 6837 6838 if (md_get_prop_data(md, vd_node, VD_BLOCK_DEVICE_OPTS, 6839 (uint8_t **)&optstr, &len) != 0) { 6840 PR0("No options found"); 6841 return; 6842 } 6843 6844 /* parse options */ 6845 opt = optstr; 6846 n = sizeof (vd_bdev_options) / sizeof (vd_option_t); 6847 6848 while (opt < optstr + len) { 6849 for (i = 0; i < n; i++) { 6850 if (strncmp(vd_bdev_options[i].vdo_name, 6851 opt, VD_OPTION_NLEN) == 0) { 6852 *options |= vd_bdev_options[i].vdo_value; 6853 break; 6854 } 6855 } 6856 6857 if (i < n) { 6858 PR0("option: %s", opt); 6859 } else { 6860 PRN("option %s is unknown or unsupported", opt); 6861 } 6862 6863 opt += strlen(opt) + 1; 6864 } 6865 } 6866 6867 static void 6868 vds_driver_types_free(vds_t *vds) 6869 { 6870 if (vds->driver_types != NULL) { 6871 kmem_free(vds->driver_types, sizeof (vd_driver_type_t) * 6872 vds->num_drivers); 6873 vds->driver_types = NULL; 6874 vds->num_drivers = 0; 6875 } 6876 } 6877 6878 /* 6879 * Update the driver type list with information from vds.conf. 6880 */ 6881 static void 6882 vds_driver_types_update(vds_t *vds) 6883 { 6884 char **list, *s; 6885 uint_t i, num, count = 0, len; 6886 6887 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, vds->dip, 6888 DDI_PROP_DONTPASS, "driver-type-list", &list, &num) != 6889 DDI_PROP_SUCCESS) 6890 return; 6891 6892 /* 6893 * We create a driver_types list with as many as entries as there 6894 * is in the driver-type-list from vds.conf. However only valid 6895 * entries will be populated (i.e. entries from driver-type-list 6896 * with a valid syntax). Invalid entries will be left blank so 6897 * they will have no driver name and the driver type will be 6898 * VD_DRIVER_UNKNOWN (= 0). 6899 */ 6900 vds->num_drivers = num; 6901 vds->driver_types = kmem_zalloc(sizeof (vd_driver_type_t) * num, 6902 KM_SLEEP); 6903 6904 for (i = 0; i < num; i++) { 6905 6906 s = strchr(list[i], ':'); 6907 6908 if (s == NULL) { 6909 PRN("vds.conf: driver-type-list, entry %d (%s): " 6910 "a colon is expected in the entry", 6911 i, list[i]); 6912 continue; 6913 } 6914 6915 len = (uintptr_t)s - (uintptr_t)list[i]; 6916 6917 if (len == 0) { 6918 PRN("vds.conf: driver-type-list, entry %d (%s): " 6919 "the driver name is empty", 6920 i, list[i]); 6921 continue; 6922 } 6923 6924 if (len >= VD_DRIVER_NAME_LEN) { 6925 PRN("vds.conf: driver-type-list, entry %d (%s): " 6926 "the driver name is too long", 6927 i, list[i]); 6928 continue; 6929 } 6930 6931 if (strcmp(s + 1, "disk") == 0) { 6932 6933 vds->driver_types[i].type = VD_DRIVER_DISK; 6934 6935 } else if (strcmp(s + 1, "volume") == 0) { 6936 6937 vds->driver_types[i].type = VD_DRIVER_VOLUME; 6938 6939 } else { 6940 PRN("vds.conf: driver-type-list, entry %d (%s): " 6941 "the driver type is invalid", 6942 i, list[i]); 6943 continue; 6944 } 6945 6946 (void) strncpy(vds->driver_types[i].name, list[i], len); 6947 6948 PR0("driver-type-list, entry %d (%s) added", 6949 i, list[i]); 6950 6951 count++; 6952 } 6953 6954 ddi_prop_free(list); 6955 6956 if (count == 0) { 6957 /* nothing was added, clean up */ 6958 vds_driver_types_free(vds); 6959 } 6960 } 6961 6962 static void 6963 vds_add_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node) 6964 { 6965 char *device_path = NULL; 6966 uint64_t id = 0, ldc_id = 0, options = 0; 6967 6968 if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) { 6969 PRN("Error getting vdisk \"%s\"", VD_ID_PROP); 6970 return; 6971 } 6972 PR0("Adding vdisk ID %lu", id); 6973 if (md_get_prop_str(md, vd_node, VD_BLOCK_DEVICE_PROP, 6974 &device_path) != 0) { 6975 PRN("Error getting vdisk \"%s\"", VD_BLOCK_DEVICE_PROP); 6976 return; 6977 } 6978 6979 vds_get_options(md, vd_node, &options); 6980 6981 if (vds_get_ldc_id(md, vd_node, &ldc_id) != 0) { 6982 PRN("Error getting LDC ID for vdisk %lu", id); 6983 return; 6984 } 6985 6986 if (vds_init_vd(vds, id, device_path, options, ldc_id) != 0) { 6987 PRN("Failed to add vdisk ID %lu", id); 6988 if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)id) != 0) 6989 PRN("No vDisk entry found for vdisk ID %lu", id); 6990 return; 6991 } 6992 } 6993 6994 static void 6995 vds_remove_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node) 6996 { 6997 uint64_t id = 0; 6998 6999 7000 if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) { 7001 PRN("Unable to get \"%s\" property from vdisk's MD node", 7002 VD_ID_PROP); 7003 return; 7004 } 7005 PR0("Removing vdisk ID %lu", id); 7006 if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)id) != 0) 7007 PRN("No vdisk entry found for vdisk ID %lu", id); 7008 } 7009 7010 static void 7011 vds_change_vd(vds_t *vds, md_t *prev_md, mde_cookie_t prev_vd_node, 7012 md_t *curr_md, mde_cookie_t curr_vd_node) 7013 { 7014 char *curr_dev, *prev_dev; 7015 uint64_t curr_id = 0, curr_ldc_id = 0, curr_options = 0; 7016 uint64_t prev_id = 0, prev_ldc_id = 0, prev_options = 0; 7017 size_t len; 7018 7019 7020 /* Validate that vdisk ID has not changed */ 7021 if (md_get_prop_val(prev_md, prev_vd_node, VD_ID_PROP, &prev_id) != 0) { 7022 PRN("Error getting previous vdisk \"%s\" property", 7023 VD_ID_PROP); 7024 return; 7025 } 7026 if (md_get_prop_val(curr_md, curr_vd_node, VD_ID_PROP, &curr_id) != 0) { 7027 PRN("Error getting current vdisk \"%s\" property", VD_ID_PROP); 7028 return; 7029 } 7030 if (curr_id != prev_id) { 7031 PRN("Not changing vdisk: ID changed from %lu to %lu", 7032 prev_id, curr_id); 7033 return; 7034 } 7035 7036 /* Validate that LDC ID has not changed */ 7037 if (vds_get_ldc_id(prev_md, prev_vd_node, &prev_ldc_id) != 0) { 7038 PRN("Error getting LDC ID for vdisk %lu", prev_id); 7039 return; 7040 } 7041 7042 if (vds_get_ldc_id(curr_md, curr_vd_node, &curr_ldc_id) != 0) { 7043 PRN("Error getting LDC ID for vdisk %lu", curr_id); 7044 return; 7045 } 7046 if (curr_ldc_id != prev_ldc_id) { 7047 _NOTE(NOTREACHED); /* lint is confused */ 7048 PRN("Not changing vdisk: " 7049 "LDC ID changed from %lu to %lu", prev_ldc_id, curr_ldc_id); 7050 return; 7051 } 7052 7053 /* Determine whether device path has changed */ 7054 if (md_get_prop_str(prev_md, prev_vd_node, VD_BLOCK_DEVICE_PROP, 7055 &prev_dev) != 0) { 7056 PRN("Error getting previous vdisk \"%s\"", 7057 VD_BLOCK_DEVICE_PROP); 7058 return; 7059 } 7060 if (md_get_prop_str(curr_md, curr_vd_node, VD_BLOCK_DEVICE_PROP, 7061 &curr_dev) != 0) { 7062 PRN("Error getting current vdisk \"%s\"", VD_BLOCK_DEVICE_PROP); 7063 return; 7064 } 7065 if (((len = strlen(curr_dev)) == strlen(prev_dev)) && 7066 (strncmp(curr_dev, prev_dev, len) == 0)) 7067 return; /* no relevant (supported) change */ 7068 7069 /* Validate that options have not changed */ 7070 vds_get_options(prev_md, prev_vd_node, &prev_options); 7071 vds_get_options(curr_md, curr_vd_node, &curr_options); 7072 if (prev_options != curr_options) { 7073 PRN("Not changing vdisk: options changed from %lx to %lx", 7074 prev_options, curr_options); 7075 return; 7076 } 7077 7078 PR0("Changing vdisk ID %lu", prev_id); 7079 7080 /* Remove old state, which will close vdisk and reset */ 7081 if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)prev_id) != 0) 7082 PRN("No entry found for vdisk ID %lu", prev_id); 7083 7084 /* Re-initialize vdisk with new state */ 7085 if (vds_init_vd(vds, curr_id, curr_dev, curr_options, 7086 curr_ldc_id) != 0) { 7087 PRN("Failed to change vdisk ID %lu", curr_id); 7088 return; 7089 } 7090 } 7091 7092 static int 7093 vds_process_md(void *arg, mdeg_result_t *md) 7094 { 7095 int i; 7096 vds_t *vds = arg; 7097 7098 7099 if (md == NULL) 7100 return (MDEG_FAILURE); 7101 ASSERT(vds != NULL); 7102 7103 for (i = 0; i < md->removed.nelem; i++) 7104 vds_remove_vd(vds, md->removed.mdp, md->removed.mdep[i]); 7105 for (i = 0; i < md->match_curr.nelem; i++) 7106 vds_change_vd(vds, md->match_prev.mdp, md->match_prev.mdep[i], 7107 md->match_curr.mdp, md->match_curr.mdep[i]); 7108 for (i = 0; i < md->added.nelem; i++) 7109 vds_add_vd(vds, md->added.mdp, md->added.mdep[i]); 7110 7111 return (MDEG_SUCCESS); 7112 } 7113 7114 7115 static int 7116 vds_do_attach(dev_info_t *dip) 7117 { 7118 int status, sz; 7119 int cfg_handle; 7120 minor_t instance = ddi_get_instance(dip); 7121 vds_t *vds; 7122 mdeg_prop_spec_t *pspecp; 7123 mdeg_node_spec_t *ispecp; 7124 7125 /* 7126 * The "cfg-handle" property of a vds node in an MD contains the MD's 7127 * notion of "instance", or unique identifier, for that node; OBP 7128 * stores the value of the "cfg-handle" MD property as the value of 7129 * the "reg" property on the node in the device tree it builds from 7130 * the MD and passes to Solaris. Thus, we look up the devinfo node's 7131 * "reg" property value to uniquely identify this device instance when 7132 * registering with the MD event-generation framework. If the "reg" 7133 * property cannot be found, the device tree state is presumably so 7134 * broken that there is no point in continuing. 7135 */ 7136 if (!ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 7137 VD_REG_PROP)) { 7138 PRN("vds \"%s\" property does not exist", VD_REG_PROP); 7139 return (DDI_FAILURE); 7140 } 7141 7142 /* Get the MD instance for later MDEG registration */ 7143 cfg_handle = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 7144 VD_REG_PROP, -1); 7145 7146 if (ddi_soft_state_zalloc(vds_state, instance) != DDI_SUCCESS) { 7147 PRN("Could not allocate state for instance %u", instance); 7148 return (DDI_FAILURE); 7149 } 7150 7151 if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) { 7152 PRN("Could not get state for instance %u", instance); 7153 ddi_soft_state_free(vds_state, instance); 7154 return (DDI_FAILURE); 7155 } 7156 7157 vds->dip = dip; 7158 vds->vd_table = mod_hash_create_ptrhash("vds_vd_table", VDS_NCHAINS, 7159 vds_destroy_vd, sizeof (void *)); 7160 7161 ASSERT(vds->vd_table != NULL); 7162 7163 if ((status = ldi_ident_from_dip(dip, &vds->ldi_ident)) != 0) { 7164 PRN("ldi_ident_from_dip() returned errno %d", status); 7165 return (DDI_FAILURE); 7166 } 7167 vds->initialized |= VDS_LDI; 7168 7169 /* Register for MD updates */ 7170 sz = sizeof (vds_prop_template); 7171 pspecp = kmem_alloc(sz, KM_SLEEP); 7172 bcopy(vds_prop_template, pspecp, sz); 7173 7174 VDS_SET_MDEG_PROP_INST(pspecp, cfg_handle); 7175 7176 /* initialize the complete prop spec structure */ 7177 ispecp = kmem_zalloc(sizeof (mdeg_node_spec_t), KM_SLEEP); 7178 ispecp->namep = "virtual-device"; 7179 ispecp->specp = pspecp; 7180 7181 if (mdeg_register(ispecp, &vd_match, vds_process_md, vds, 7182 &vds->mdeg) != MDEG_SUCCESS) { 7183 PRN("Unable to register for MD updates"); 7184 kmem_free(ispecp, sizeof (mdeg_node_spec_t)); 7185 kmem_free(pspecp, sz); 7186 return (DDI_FAILURE); 7187 } 7188 7189 vds->ispecp = ispecp; 7190 vds->initialized |= VDS_MDEG; 7191 7192 /* Prevent auto-detaching so driver is available whenever MD changes */ 7193 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip, DDI_NO_AUTODETACH, 1) != 7194 DDI_PROP_SUCCESS) { 7195 PRN("failed to set \"%s\" property for instance %u", 7196 DDI_NO_AUTODETACH, instance); 7197 } 7198 7199 /* read any user defined driver types from conf file and update list */ 7200 vds_driver_types_update(vds); 7201 7202 ddi_report_dev(dip); 7203 return (DDI_SUCCESS); 7204 } 7205 7206 static int 7207 vds_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 7208 { 7209 int status; 7210 7211 switch (cmd) { 7212 case DDI_ATTACH: 7213 PR0("Attaching"); 7214 if ((status = vds_do_attach(dip)) != DDI_SUCCESS) 7215 (void) vds_detach(dip, DDI_DETACH); 7216 return (status); 7217 case DDI_RESUME: 7218 PR0("No action required for DDI_RESUME"); 7219 return (DDI_SUCCESS); 7220 default: 7221 return (DDI_FAILURE); 7222 } 7223 } 7224 7225 static struct dev_ops vds_ops = { 7226 DEVO_REV, /* devo_rev */ 7227 0, /* devo_refcnt */ 7228 ddi_no_info, /* devo_getinfo */ 7229 nulldev, /* devo_identify */ 7230 nulldev, /* devo_probe */ 7231 vds_attach, /* devo_attach */ 7232 vds_detach, /* devo_detach */ 7233 nodev, /* devo_reset */ 7234 NULL, /* devo_cb_ops */ 7235 NULL, /* devo_bus_ops */ 7236 nulldev, /* devo_power */ 7237 ddi_quiesce_not_needed, /* devo_quiesce */ 7238 }; 7239 7240 static struct modldrv modldrv = { 7241 &mod_driverops, 7242 "virtual disk server", 7243 &vds_ops, 7244 }; 7245 7246 static struct modlinkage modlinkage = { 7247 MODREV_1, 7248 &modldrv, 7249 NULL 7250 }; 7251 7252 7253 int 7254 _init(void) 7255 { 7256 int status; 7257 7258 if ((status = ddi_soft_state_init(&vds_state, sizeof (vds_t), 1)) != 0) 7259 return (status); 7260 7261 if ((status = mod_install(&modlinkage)) != 0) { 7262 ddi_soft_state_fini(&vds_state); 7263 return (status); 7264 } 7265 7266 return (0); 7267 } 7268 7269 int 7270 _info(struct modinfo *modinfop) 7271 { 7272 return (mod_info(&modlinkage, modinfop)); 7273 } 7274 7275 int 7276 _fini(void) 7277 { 7278 int status; 7279 7280 if ((status = mod_remove(&modlinkage)) != 0) 7281 return (status); 7282 ddi_soft_state_fini(&vds_state); 7283 return (0); 7284 } 7285