1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * Virtual disk server 31 */ 32 33 34 #include <sys/types.h> 35 #include <sys/conf.h> 36 #include <sys/crc32.h> 37 #include <sys/ddi.h> 38 #include <sys/dkio.h> 39 #include <sys/file.h> 40 #include <sys/fs/hsfs_isospec.h> 41 #include <sys/mdeg.h> 42 #include <sys/mhd.h> 43 #include <sys/modhash.h> 44 #include <sys/note.h> 45 #include <sys/pathname.h> 46 #include <sys/sdt.h> 47 #include <sys/sunddi.h> 48 #include <sys/sunldi.h> 49 #include <sys/sysmacros.h> 50 #include <sys/vio_common.h> 51 #include <sys/vio_util.h> 52 #include <sys/vdsk_mailbox.h> 53 #include <sys/vdsk_common.h> 54 #include <sys/vtoc.h> 55 #include <sys/vfs.h> 56 #include <sys/stat.h> 57 #include <sys/scsi/impl/uscsi.h> 58 #include <vm/seg_map.h> 59 60 /* Virtual disk server initialization flags */ 61 #define VDS_LDI 0x01 62 #define VDS_MDEG 0x02 63 64 /* Virtual disk server tunable parameters */ 65 #define VDS_RETRIES 5 66 #define VDS_LDC_DELAY 1000 /* 1 msecs */ 67 #define VDS_DEV_DELAY 10000000 /* 10 secs */ 68 #define VDS_NCHAINS 32 69 70 /* Identification parameters for MD, synthetic dkio(7i) structures, etc. */ 71 #define VDS_NAME "virtual-disk-server" 72 73 #define VD_NAME "vd" 74 #define VD_VOLUME_NAME "vdisk" 75 #define VD_ASCIILABEL "Virtual Disk" 76 77 #define VD_CHANNEL_ENDPOINT "channel-endpoint" 78 #define VD_ID_PROP "id" 79 #define VD_BLOCK_DEVICE_PROP "vds-block-device" 80 #define VD_BLOCK_DEVICE_OPTS "vds-block-device-opts" 81 #define VD_REG_PROP "reg" 82 83 /* Virtual disk initialization flags */ 84 #define VD_DISK_READY 0x01 85 #define VD_LOCKING 0x02 86 #define VD_LDC 0x04 87 #define VD_DRING 0x08 88 #define VD_SID 0x10 89 #define VD_SEQ_NUM 0x20 90 #define VD_SETUP_ERROR 0x40 91 92 /* Flags for writing to a vdisk which is a file */ 93 #define VD_FILE_WRITE_FLAGS SM_ASYNC 94 95 /* Number of backup labels */ 96 #define VD_FILE_NUM_BACKUP 5 97 98 /* Timeout for SCSI I/O */ 99 #define VD_SCSI_RDWR_TIMEOUT 30 /* 30 secs */ 100 101 /* Maximum number of logical partitions */ 102 #define VD_MAXPART (NDKMAP + 1) 103 104 /* 105 * By Solaris convention, slice/partition 2 represents the entire disk; 106 * unfortunately, this convention does not appear to be codified. 107 */ 108 #define VD_ENTIRE_DISK_SLICE 2 109 110 /* Driver types */ 111 typedef enum vd_driver { 112 VD_DRIVER_UNKNOWN = 0, /* driver type unknown */ 113 VD_DRIVER_DISK, /* disk driver */ 114 VD_DRIVER_VOLUME /* volume driver */ 115 } vd_driver_t; 116 117 #define VD_DRIVER_NAME_LEN 64 118 119 #define VDS_NUM_DRIVERS (sizeof (vds_driver_types) / sizeof (vd_driver_type_t)) 120 121 typedef struct vd_driver_type { 122 char name[VD_DRIVER_NAME_LEN]; /* driver name */ 123 vd_driver_t type; /* driver type (disk or volume) */ 124 } vd_driver_type_t; 125 126 /* 127 * There is no reliable way to determine if a device is representing a disk 128 * or a volume, especially with pseudo devices. So we maintain a list of well 129 * known drivers and the type of device they represent (either a disk or a 130 * volume). 131 * 132 * The list can be extended by adding a "driver-type-list" entry in vds.conf 133 * with the following syntax: 134 * 135 * driver-type-list="<driver>:<type>", ... ,"<driver>:<type>"; 136 * 137 * Where: 138 * <driver> is the name of a driver (limited to 64 characters) 139 * <type> is either the string "disk" or "volume" 140 * 141 * Invalid entries in "driver-type-list" will be ignored. 142 * 143 * For example, the following line in vds.conf: 144 * 145 * driver-type-list="foo:disk","bar:volume"; 146 * 147 * defines that "foo" is a disk driver, and driver "bar" is a volume driver. 148 * 149 * When a list is defined in vds.conf, it is checked before the built-in list 150 * (vds_driver_types[]) so that any definition from this list can be overriden 151 * using vds.conf. 152 */ 153 vd_driver_type_t vds_driver_types[] = { 154 { "dad", VD_DRIVER_DISK }, /* Solaris */ 155 { "did", VD_DRIVER_DISK }, /* Sun Cluster */ 156 { "emcp", VD_DRIVER_DISK }, /* EMC Powerpath */ 157 { "lofi", VD_DRIVER_VOLUME }, /* Solaris */ 158 { "md", VD_DRIVER_VOLUME }, /* Solaris - SVM */ 159 { "sd", VD_DRIVER_DISK }, /* Solaris */ 160 { "ssd", VD_DRIVER_DISK }, /* Solaris */ 161 { "vdc", VD_DRIVER_DISK }, /* Solaris */ 162 { "vxdmp", VD_DRIVER_DISK }, /* Veritas */ 163 { "vxio", VD_DRIVER_VOLUME }, /* Veritas - VxVM */ 164 { "zfs", VD_DRIVER_VOLUME } /* Solaris */ 165 }; 166 167 /* Return a cpp token as a string */ 168 #define STRINGIZE(token) #token 169 170 /* 171 * Print a message prefixed with the current function name to the message log 172 * (and optionally to the console for verbose boots); these macros use cpp's 173 * concatenation of string literals and C99 variable-length-argument-list 174 * macros 175 */ 176 #define PRN(...) _PRN("?%s(): "__VA_ARGS__, "") 177 #define _PRN(format, ...) \ 178 cmn_err(CE_CONT, format"%s", __func__, __VA_ARGS__) 179 180 /* Return a pointer to the "i"th vdisk dring element */ 181 #define VD_DRING_ELEM(i) ((vd_dring_entry_t *)(void *) \ 182 (vd->dring + (i)*vd->descriptor_size)) 183 184 /* Return the virtual disk client's type as a string (for use in messages) */ 185 #define VD_CLIENT(vd) \ 186 (((vd)->xfer_mode == VIO_DESC_MODE) ? "in-band client" : \ 187 (((vd)->xfer_mode == VIO_DRING_MODE_V1_0) ? "dring client" : \ 188 (((vd)->xfer_mode == 0) ? "null client" : \ 189 "unsupported client"))) 190 191 /* Read disk label from a disk on file */ 192 #define VD_FILE_LABEL_READ(vd, labelp) \ 193 vd_file_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)labelp, \ 194 0, sizeof (struct dk_label)) 195 196 /* Write disk label to a disk on file */ 197 #define VD_FILE_LABEL_WRITE(vd, labelp) \ 198 vd_file_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, (caddr_t)labelp, \ 199 0, sizeof (struct dk_label)) 200 201 /* Message for disk access rights reset failure */ 202 #define VD_RESET_ACCESS_FAILURE_MSG \ 203 "Fail to reset disk access rights for disk %s" 204 205 /* 206 * Specification of an MD node passed to the MDEG to filter any 207 * 'vport' nodes that do not belong to the specified node. This 208 * template is copied for each vds instance and filled in with 209 * the appropriate 'cfg-handle' value before being passed to the MDEG. 210 */ 211 static mdeg_prop_spec_t vds_prop_template[] = { 212 { MDET_PROP_STR, "name", VDS_NAME }, 213 { MDET_PROP_VAL, "cfg-handle", NULL }, 214 { MDET_LIST_END, NULL, NULL } 215 }; 216 217 #define VDS_SET_MDEG_PROP_INST(specp, val) (specp)[1].ps_val = (val); 218 219 /* 220 * Matching criteria passed to the MDEG to register interest 221 * in changes to 'virtual-device-port' nodes identified by their 222 * 'id' property. 223 */ 224 static md_prop_match_t vd_prop_match[] = { 225 { MDET_PROP_VAL, VD_ID_PROP }, 226 { MDET_LIST_END, NULL } 227 }; 228 229 static mdeg_node_match_t vd_match = {"virtual-device-port", 230 vd_prop_match}; 231 232 /* 233 * Options for the VD_BLOCK_DEVICE_OPTS property. 234 */ 235 #define VD_OPT_RDONLY 0x1 /* read-only */ 236 #define VD_OPT_SLICE 0x2 /* single slice */ 237 #define VD_OPT_EXCLUSIVE 0x4 /* exclusive access */ 238 239 #define VD_OPTION_NLEN 128 240 241 typedef struct vd_option { 242 char vdo_name[VD_OPTION_NLEN]; 243 uint64_t vdo_value; 244 } vd_option_t; 245 246 vd_option_t vd_bdev_options[] = { 247 { "ro", VD_OPT_RDONLY }, 248 { "slice", VD_OPT_SLICE }, 249 { "excl", VD_OPT_EXCLUSIVE } 250 }; 251 252 /* Debugging macros */ 253 #ifdef DEBUG 254 255 static int vd_msglevel = 0; 256 257 #define PR0 if (vd_msglevel > 0) PRN 258 #define PR1 if (vd_msglevel > 1) PRN 259 #define PR2 if (vd_msglevel > 2) PRN 260 261 #define VD_DUMP_DRING_ELEM(elem) \ 262 PR0("dst:%x op:%x st:%u nb:%lx addr:%lx ncook:%u\n", \ 263 elem->hdr.dstate, \ 264 elem->payload.operation, \ 265 elem->payload.status, \ 266 elem->payload.nbytes, \ 267 elem->payload.addr, \ 268 elem->payload.ncookies); 269 270 char * 271 vd_decode_state(int state) 272 { 273 char *str; 274 275 #define CASE_STATE(_s) case _s: str = #_s; break; 276 277 switch (state) { 278 CASE_STATE(VD_STATE_INIT) 279 CASE_STATE(VD_STATE_VER) 280 CASE_STATE(VD_STATE_ATTR) 281 CASE_STATE(VD_STATE_DRING) 282 CASE_STATE(VD_STATE_RDX) 283 CASE_STATE(VD_STATE_DATA) 284 default: str = "unknown"; break; 285 } 286 287 #undef CASE_STATE 288 289 return (str); 290 } 291 292 void 293 vd_decode_tag(vio_msg_t *msg) 294 { 295 char *tstr, *sstr, *estr; 296 297 #define CASE_TYPE(_s) case _s: tstr = #_s; break; 298 299 switch (msg->tag.vio_msgtype) { 300 CASE_TYPE(VIO_TYPE_CTRL) 301 CASE_TYPE(VIO_TYPE_DATA) 302 CASE_TYPE(VIO_TYPE_ERR) 303 default: tstr = "unknown"; break; 304 } 305 306 #undef CASE_TYPE 307 308 #define CASE_SUBTYPE(_s) case _s: sstr = #_s; break; 309 310 switch (msg->tag.vio_subtype) { 311 CASE_SUBTYPE(VIO_SUBTYPE_INFO) 312 CASE_SUBTYPE(VIO_SUBTYPE_ACK) 313 CASE_SUBTYPE(VIO_SUBTYPE_NACK) 314 default: sstr = "unknown"; break; 315 } 316 317 #undef CASE_SUBTYPE 318 319 #define CASE_ENV(_s) case _s: estr = #_s; break; 320 321 switch (msg->tag.vio_subtype_env) { 322 CASE_ENV(VIO_VER_INFO) 323 CASE_ENV(VIO_ATTR_INFO) 324 CASE_ENV(VIO_DRING_REG) 325 CASE_ENV(VIO_DRING_UNREG) 326 CASE_ENV(VIO_RDX) 327 CASE_ENV(VIO_PKT_DATA) 328 CASE_ENV(VIO_DESC_DATA) 329 CASE_ENV(VIO_DRING_DATA) 330 default: estr = "unknown"; break; 331 } 332 333 #undef CASE_ENV 334 335 PR1("(%x/%x/%x) message : (%s/%s/%s)", 336 msg->tag.vio_msgtype, msg->tag.vio_subtype, 337 msg->tag.vio_subtype_env, tstr, sstr, estr); 338 } 339 340 #else /* !DEBUG */ 341 342 #define PR0(...) 343 #define PR1(...) 344 #define PR2(...) 345 346 #define VD_DUMP_DRING_ELEM(elem) 347 348 #define vd_decode_state(_s) (NULL) 349 #define vd_decode_tag(_s) (NULL) 350 351 #endif /* DEBUG */ 352 353 354 /* 355 * Soft state structure for a vds instance 356 */ 357 typedef struct vds { 358 uint_t initialized; /* driver inst initialization flags */ 359 dev_info_t *dip; /* driver inst devinfo pointer */ 360 ldi_ident_t ldi_ident; /* driver's identifier for LDI */ 361 mod_hash_t *vd_table; /* table of virtual disks served */ 362 mdeg_node_spec_t *ispecp; /* mdeg node specification */ 363 mdeg_handle_t mdeg; /* handle for MDEG operations */ 364 vd_driver_type_t *driver_types; /* extra driver types (from vds.conf) */ 365 int num_drivers; /* num of extra driver types */ 366 } vds_t; 367 368 /* 369 * Types of descriptor-processing tasks 370 */ 371 typedef enum vd_task_type { 372 VD_NONFINAL_RANGE_TASK, /* task for intermediate descriptor in range */ 373 VD_FINAL_RANGE_TASK, /* task for last in a range of descriptors */ 374 } vd_task_type_t; 375 376 /* 377 * Structure describing the task for processing a descriptor 378 */ 379 typedef struct vd_task { 380 struct vd *vd; /* vd instance task is for */ 381 vd_task_type_t type; /* type of descriptor task */ 382 int index; /* dring elem index for task */ 383 vio_msg_t *msg; /* VIO message task is for */ 384 size_t msglen; /* length of message content */ 385 vd_dring_payload_t *request; /* request task will perform */ 386 struct buf buf; /* buf(9s) for I/O request */ 387 ldc_mem_handle_t mhdl; /* task memory handle */ 388 int status; /* status of processing task */ 389 int (*completef)(struct vd_task *task); /* completion func ptr */ 390 } vd_task_t; 391 392 /* 393 * Soft state structure for a virtual disk instance 394 */ 395 typedef struct vd { 396 uint_t initialized; /* vdisk initialization flags */ 397 uint64_t operations; /* bitmask of VD_OPs exported */ 398 vio_ver_t version; /* ver negotiated with client */ 399 vds_t *vds; /* server for this vdisk */ 400 ddi_taskq_t *startq; /* queue for I/O start tasks */ 401 ddi_taskq_t *completionq; /* queue for completion tasks */ 402 ldi_handle_t ldi_handle[V_NUMPAR]; /* LDI slice handles */ 403 char device_path[MAXPATHLEN + 1]; /* vdisk device */ 404 dev_t dev[V_NUMPAR]; /* dev numbers for slices */ 405 int open_flags; /* open flags */ 406 uint_t nslices; /* number of slices */ 407 size_t vdisk_size; /* number of blocks in vdisk */ 408 size_t vdisk_block_size; /* size of each vdisk block */ 409 vd_disk_type_t vdisk_type; /* slice or entire disk */ 410 vd_disk_label_t vdisk_label; /* EFI or VTOC label */ 411 vd_media_t vdisk_media; /* media type of backing dev. */ 412 boolean_t is_atapi_dev; /* Is this an IDE CD-ROM dev? */ 413 ushort_t max_xfer_sz; /* max xfer size in DEV_BSIZE */ 414 size_t block_size; /* blk size of actual device */ 415 boolean_t volume; /* is vDisk backed by volume */ 416 boolean_t file; /* is vDisk backed by a file? */ 417 boolean_t scsi; /* is vDisk backed by scsi? */ 418 vnode_t *file_vnode; /* file vnode */ 419 size_t file_size; /* file size */ 420 ddi_devid_t file_devid; /* devid for disk image */ 421 efi_gpt_t efi_gpt; /* EFI GPT for slice type */ 422 efi_gpe_t efi_gpe; /* EFI GPE for slice type */ 423 int efi_reserved; /* EFI reserved slice */ 424 struct dk_geom dk_geom; /* synthetic for slice type */ 425 struct vtoc vtoc; /* synthetic for slice type */ 426 vd_slice_t slices[VD_MAXPART]; /* logical partitions */ 427 boolean_t ownership; /* disk ownership status */ 428 ldc_status_t ldc_state; /* LDC connection state */ 429 ldc_handle_t ldc_handle; /* handle for LDC comm */ 430 size_t max_msglen; /* largest LDC message len */ 431 vd_state_t state; /* client handshake state */ 432 uint8_t xfer_mode; /* transfer mode with client */ 433 uint32_t sid; /* client's session ID */ 434 uint64_t seq_num; /* message sequence number */ 435 uint64_t dring_ident; /* identifier of dring */ 436 ldc_dring_handle_t dring_handle; /* handle for dring ops */ 437 uint32_t descriptor_size; /* num bytes in desc */ 438 uint32_t dring_len; /* number of dring elements */ 439 caddr_t dring; /* address of dring */ 440 caddr_t vio_msgp; /* vio msg staging buffer */ 441 vd_task_t inband_task; /* task for inband descriptor */ 442 vd_task_t *dring_task; /* tasks dring elements */ 443 444 kmutex_t lock; /* protects variables below */ 445 boolean_t enabled; /* is vdisk enabled? */ 446 boolean_t reset_state; /* reset connection state? */ 447 boolean_t reset_ldc; /* reset LDC channel? */ 448 } vd_t; 449 450 typedef struct vds_operation { 451 char *namep; 452 uint8_t operation; 453 int (*start)(vd_task_t *task); 454 int (*complete)(vd_task_t *task); 455 } vds_operation_t; 456 457 typedef struct vd_ioctl { 458 uint8_t operation; /* vdisk operation */ 459 const char *operation_name; /* vdisk operation name */ 460 size_t nbytes; /* size of operation buffer */ 461 int cmd; /* corresponding ioctl cmd */ 462 const char *cmd_name; /* ioctl cmd name */ 463 void *arg; /* ioctl cmd argument */ 464 /* convert input vd_buf to output ioctl_arg */ 465 int (*copyin)(void *vd_buf, size_t, void *ioctl_arg); 466 /* convert input ioctl_arg to output vd_buf */ 467 void (*copyout)(void *ioctl_arg, void *vd_buf); 468 /* write is true if the operation writes any data to the backend */ 469 boolean_t write; 470 } vd_ioctl_t; 471 472 /* Define trivial copyin/copyout conversion function flag */ 473 #define VD_IDENTITY_IN ((int (*)(void *, size_t, void *))-1) 474 #define VD_IDENTITY_OUT ((void (*)(void *, void *))-1) 475 476 477 static int vds_ldc_retries = VDS_RETRIES; 478 static int vds_ldc_delay = VDS_LDC_DELAY; 479 static int vds_dev_retries = VDS_RETRIES; 480 static int vds_dev_delay = VDS_DEV_DELAY; 481 static void *vds_state; 482 483 static uint_t vd_file_write_flags = VD_FILE_WRITE_FLAGS; 484 485 static short vd_scsi_rdwr_timeout = VD_SCSI_RDWR_TIMEOUT; 486 static int vd_scsi_debug = USCSI_SILENT; 487 488 /* 489 * Tunable to define the behavior of the service domain if the vdisk server 490 * fails to reset disk exclusive access when a LDC channel is reset. When a 491 * LDC channel is reset the vdisk server will try to reset disk exclusive 492 * access by releasing any SCSI-2 reservation or resetting the disk. If these 493 * actions fail then the default behavior (vd_reset_access_failure = 0) is to 494 * print a warning message. This default behavior can be changed by setting 495 * the vd_reset_access_failure variable to A_REBOOT (= 0x1) and that will 496 * cause the service domain to reboot, or A_DUMP (= 0x5) and that will cause 497 * the service domain to panic. In both cases, the reset of the service domain 498 * should trigger a reset SCSI buses and hopefully clear any SCSI-2 reservation. 499 */ 500 static int vd_reset_access_failure = 0; 501 502 /* 503 * Tunable for backward compatibility. When this variable is set to B_TRUE, 504 * all disk volumes (ZFS, SVM, VxvM volumes) will be exported as single 505 * slice disks whether or not they have the "slice" option set. This is 506 * to provide a simple backward compatibility mechanism when upgrading 507 * the vds driver and using a domain configuration created before the 508 * "slice" option was available. 509 */ 510 static boolean_t vd_volume_force_slice = B_FALSE; 511 512 /* 513 * The label of disk images created with some earlier versions of the virtual 514 * disk software is not entirely correct and have an incorrect v_sanity field 515 * (usually 0) instead of VTOC_SANE. This creates a compatibility problem with 516 * these images because we are now validating that the disk label (and the 517 * sanity) is correct when a disk image is opened. 518 * 519 * This tunable is set to false to not validate the sanity field and ensure 520 * compatibility. If the tunable is set to true, we will do a strict checking 521 * of the sanity but this can create compatibility problems with old disk 522 * images. 523 */ 524 static boolean_t vd_file_validate_sanity = B_FALSE; 525 526 /* 527 * Supported protocol version pairs, from highest (newest) to lowest (oldest) 528 * 529 * Each supported major version should appear only once, paired with (and only 530 * with) its highest supported minor version number (as the protocol requires 531 * supporting all lower minor version numbers as well) 532 */ 533 static const vio_ver_t vds_version[] = {{1, 1}}; 534 static const size_t vds_num_versions = 535 sizeof (vds_version)/sizeof (vds_version[0]); 536 537 static void vd_free_dring_task(vd_t *vdp); 538 static int vd_setup_vd(vd_t *vd); 539 static int vd_setup_single_slice_disk(vd_t *vd); 540 static int vd_setup_mediainfo(vd_t *vd); 541 static boolean_t vd_enabled(vd_t *vd); 542 static ushort_t vd_lbl2cksum(struct dk_label *label); 543 static int vd_file_validate_geometry(vd_t *vd); 544 static boolean_t vd_file_is_iso_image(vd_t *vd); 545 static void vd_set_exported_operations(vd_t *vd); 546 static void vd_reset_access(vd_t *vd); 547 static int vd_backend_ioctl(vd_t *vd, int cmd, caddr_t arg); 548 static int vds_efi_alloc_and_read(vd_t *, efi_gpt_t **, efi_gpe_t **); 549 static void vds_efi_free(vd_t *, efi_gpt_t *, efi_gpe_t *); 550 static void vds_driver_types_free(vds_t *vds); 551 552 /* 553 * Function: 554 * vd_file_rw 555 * 556 * Description: 557 * Read or write to a disk on file. 558 * 559 * Parameters: 560 * vd - disk on which the operation is performed. 561 * slice - slice on which the operation is performed, 562 * VD_SLICE_NONE indicates that the operation 563 * is done using an absolute disk offset. 564 * operation - operation to execute: read (VD_OP_BREAD) or 565 * write (VD_OP_BWRITE). 566 * data - buffer where data are read to or written from. 567 * blk - starting block for the operation. 568 * len - number of bytes to read or write. 569 * 570 * Return Code: 571 * n >= 0 - success, n indicates the number of bytes read 572 * or written. 573 * -1 - error. 574 */ 575 static ssize_t 576 vd_file_rw(vd_t *vd, int slice, int operation, caddr_t data, size_t blk, 577 size_t len) 578 { 579 caddr_t maddr; 580 size_t offset, maxlen, moffset, mlen, n; 581 uint_t smflags; 582 enum seg_rw srw; 583 584 ASSERT(vd->file); 585 ASSERT(len > 0); 586 587 /* 588 * If a file is exported as a slice then we don't care about the vtoc. 589 * In that case, the vtoc is a fake mainly to make newfs happy and we 590 * handle any I/O as a raw disk access so that we can have access to the 591 * entire backend. 592 */ 593 if (vd->vdisk_type == VD_DISK_TYPE_SLICE || slice == VD_SLICE_NONE) { 594 /* raw disk access */ 595 offset = blk * DEV_BSIZE; 596 } else { 597 ASSERT(slice >= 0 && slice < V_NUMPAR); 598 599 /* 600 * v1.0 vDisk clients depended on the server not verifying 601 * the label of a unformatted disk. This "feature" is 602 * maintained for backward compatibility but all versions 603 * from v1.1 onwards must do the right thing. 604 */ 605 if (vd->vdisk_label == VD_DISK_LABEL_UNK && 606 vio_ver_is_supported(vd->version, 1, 1)) { 607 (void) vd_file_validate_geometry(vd); 608 if (vd->vdisk_label == VD_DISK_LABEL_UNK) { 609 PR0("Unknown disk label, can't do I/O " 610 "from slice %d", slice); 611 return (-1); 612 } 613 } 614 615 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { 616 ASSERT(vd->vtoc.v_sectorsz == DEV_BSIZE); 617 } else { 618 ASSERT(vd->vdisk_label == VD_DISK_LABEL_EFI); 619 ASSERT(vd->vdisk_block_size == DEV_BSIZE); 620 } 621 622 if (blk >= vd->slices[slice].nblocks) { 623 /* address past the end of the slice */ 624 PR0("req_addr (0x%lx) > psize (0x%lx)", 625 blk, vd->slices[slice].nblocks); 626 return (0); 627 } 628 629 offset = (vd->slices[slice].start + blk) * DEV_BSIZE; 630 631 /* 632 * If the requested size is greater than the size 633 * of the partition, truncate the read/write. 634 */ 635 maxlen = (vd->slices[slice].nblocks - blk) * DEV_BSIZE; 636 637 if (len > maxlen) { 638 PR0("I/O size truncated to %lu bytes from %lu bytes", 639 maxlen, len); 640 len = maxlen; 641 } 642 } 643 644 /* 645 * We have to ensure that we are reading/writing into the mmap 646 * range. If we have a partial disk image (e.g. an image of 647 * s0 instead s2) the system can try to access slices that 648 * are not included into the disk image. 649 */ 650 if ((offset + len) > vd->file_size) { 651 PR0("offset + nbytes (0x%lx + 0x%lx) > " 652 "file_size (0x%lx)", offset, len, vd->file_size); 653 return (-1); 654 } 655 656 srw = (operation == VD_OP_BREAD)? S_READ : S_WRITE; 657 smflags = (operation == VD_OP_BREAD)? 0 : 658 (SM_WRITE | vd_file_write_flags); 659 n = len; 660 661 do { 662 /* 663 * segmap_getmapflt() returns a MAXBSIZE chunk which is 664 * MAXBSIZE aligned. 665 */ 666 moffset = offset & MAXBOFFSET; 667 mlen = MIN(MAXBSIZE - moffset, n); 668 maddr = segmap_getmapflt(segkmap, vd->file_vnode, offset, 669 mlen, 1, srw); 670 /* 671 * Fault in the pages so we can check for error and ensure 672 * that we can safely used the mapped address. 673 */ 674 if (segmap_fault(kas.a_hat, segkmap, maddr, mlen, 675 F_SOFTLOCK, srw) != 0) { 676 (void) segmap_release(segkmap, maddr, 0); 677 return (-1); 678 } 679 680 if (operation == VD_OP_BREAD) 681 bcopy(maddr + moffset, data, mlen); 682 else 683 bcopy(data, maddr + moffset, mlen); 684 685 if (segmap_fault(kas.a_hat, segkmap, maddr, mlen, 686 F_SOFTUNLOCK, srw) != 0) { 687 (void) segmap_release(segkmap, maddr, 0); 688 return (-1); 689 } 690 if (segmap_release(segkmap, maddr, smflags) != 0) 691 return (-1); 692 n -= mlen; 693 offset += mlen; 694 data += mlen; 695 696 } while (n > 0); 697 698 return (len); 699 } 700 701 /* 702 * Function: 703 * vd_file_build_default_label 704 * 705 * Description: 706 * Return a default label for the given disk. This is used when the disk 707 * does not have a valid VTOC so that the user can get a valid default 708 * configuration. The default label has all slice sizes set to 0 (except 709 * slice 2 which is the entire disk) to force the user to write a valid 710 * label onto the disk image. 711 * 712 * Parameters: 713 * vd - disk on which the operation is performed. 714 * label - the returned default label. 715 * 716 * Return Code: 717 * none. 718 */ 719 static void 720 vd_file_build_default_label(vd_t *vd, struct dk_label *label) 721 { 722 size_t size; 723 char prefix; 724 725 ASSERT(vd->file); 726 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); 727 728 bzero(label, sizeof (struct dk_label)); 729 730 /* 731 * We must have a resonable number of cylinders and sectors so 732 * that newfs can run using default values. 733 * 734 * if (disk_size < 2MB) 735 * phys_cylinders = disk_size / 100K 736 * else 737 * phys_cylinders = disk_size / 300K 738 * 739 * phys_cylinders = (phys_cylinders == 0) ? 1 : phys_cylinders 740 * alt_cylinders = (phys_cylinders > 2) ? 2 : 0; 741 * data_cylinders = phys_cylinders - alt_cylinders 742 * 743 * sectors = disk_size / (phys_cylinders * blk_size) 744 * 745 * The file size test is an attempt to not have too few cylinders 746 * for a small file, or so many on a big file that you waste space 747 * for backup superblocks or cylinder group structures. 748 */ 749 if (vd->file_size < (2 * 1024 * 1024)) 750 label->dkl_pcyl = vd->file_size / (100 * 1024); 751 else 752 label->dkl_pcyl = vd->file_size / (300 * 1024); 753 754 if (label->dkl_pcyl == 0) 755 label->dkl_pcyl = 1; 756 757 label->dkl_acyl = 0; 758 759 if (label->dkl_pcyl > 2) 760 label->dkl_acyl = 2; 761 762 label->dkl_nsect = vd->file_size / 763 (DEV_BSIZE * label->dkl_pcyl); 764 label->dkl_ncyl = label->dkl_pcyl - label->dkl_acyl; 765 label->dkl_nhead = 1; 766 label->dkl_write_reinstruct = 0; 767 label->dkl_read_reinstruct = 0; 768 label->dkl_rpm = 7200; 769 label->dkl_apc = 0; 770 label->dkl_intrlv = 0; 771 772 PR0("requested disk size: %ld bytes\n", vd->file_size); 773 PR0("setup: ncyl=%d nhead=%d nsec=%d\n", label->dkl_pcyl, 774 label->dkl_nhead, label->dkl_nsect); 775 PR0("provided disk size: %ld bytes\n", (uint64_t) 776 (label->dkl_pcyl * label->dkl_nhead * 777 label->dkl_nsect * DEV_BSIZE)); 778 779 if (vd->file_size < (1ULL << 20)) { 780 size = vd->file_size >> 10; 781 prefix = 'K'; /* Kilobyte */ 782 } else if (vd->file_size < (1ULL << 30)) { 783 size = vd->file_size >> 20; 784 prefix = 'M'; /* Megabyte */ 785 } else if (vd->file_size < (1ULL << 40)) { 786 size = vd->file_size >> 30; 787 prefix = 'G'; /* Gigabyte */ 788 } else { 789 size = vd->file_size >> 40; 790 prefix = 'T'; /* Terabyte */ 791 } 792 793 /* 794 * We must have a correct label name otherwise format(1m) will 795 * not recognized the disk as labeled. 796 */ 797 (void) snprintf(label->dkl_asciilabel, LEN_DKL_ASCII, 798 "SUN-DiskImage-%ld%cB cyl %d alt %d hd %d sec %d", 799 size, prefix, 800 label->dkl_ncyl, label->dkl_acyl, label->dkl_nhead, 801 label->dkl_nsect); 802 803 /* default VTOC */ 804 label->dkl_vtoc.v_version = V_VERSION; 805 label->dkl_vtoc.v_nparts = V_NUMPAR; 806 label->dkl_vtoc.v_sanity = VTOC_SANE; 807 label->dkl_vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_tag = V_BACKUP; 808 label->dkl_map[VD_ENTIRE_DISK_SLICE].dkl_cylno = 0; 809 label->dkl_map[VD_ENTIRE_DISK_SLICE].dkl_nblk = label->dkl_ncyl * 810 label->dkl_nhead * label->dkl_nsect; 811 label->dkl_magic = DKL_MAGIC; 812 label->dkl_cksum = vd_lbl2cksum(label); 813 } 814 815 /* 816 * Function: 817 * vd_file_set_vtoc 818 * 819 * Description: 820 * Set the vtoc of a disk image by writing the label and backup 821 * labels into the disk image backend. 822 * 823 * Parameters: 824 * vd - disk on which the operation is performed. 825 * label - the data to be written. 826 * 827 * Return Code: 828 * 0 - success. 829 * n > 0 - error, n indicates the errno code. 830 */ 831 static int 832 vd_file_set_vtoc(vd_t *vd, struct dk_label *label) 833 { 834 int blk, sec, cyl, head, cnt; 835 836 ASSERT(vd->file); 837 838 if (VD_FILE_LABEL_WRITE(vd, label) < 0) { 839 PR0("fail to write disk label"); 840 return (EIO); 841 } 842 843 /* 844 * Backup labels are on the last alternate cylinder's 845 * first five odd sectors. 846 */ 847 if (label->dkl_acyl == 0) { 848 PR0("no alternate cylinder, can not store backup labels"); 849 return (0); 850 } 851 852 cyl = label->dkl_ncyl + label->dkl_acyl - 1; 853 head = label->dkl_nhead - 1; 854 855 blk = (cyl * ((label->dkl_nhead * label->dkl_nsect) - label->dkl_apc)) + 856 (head * label->dkl_nsect); 857 858 /* 859 * Write the backup labels. Make sure we don't try to write past 860 * the last cylinder. 861 */ 862 sec = 1; 863 864 for (cnt = 0; cnt < VD_FILE_NUM_BACKUP; cnt++) { 865 866 if (sec >= label->dkl_nsect) { 867 PR0("not enough sector to store all backup labels"); 868 return (0); 869 } 870 871 if (vd_file_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, (caddr_t)label, 872 blk + sec, sizeof (struct dk_label)) < 0) { 873 PR0("error writing backup label at block %d\n", 874 blk + sec); 875 return (EIO); 876 } 877 878 PR1("wrote backup label at block %d\n", blk + sec); 879 880 sec += 2; 881 } 882 883 return (0); 884 } 885 886 /* 887 * Function: 888 * vd_file_get_devid_block 889 * 890 * Description: 891 * Return the block number where the device id is stored. 892 * 893 * Parameters: 894 * vd - disk on which the operation is performed. 895 * blkp - pointer to the block number 896 * 897 * Return Code: 898 * 0 - success 899 * ENOSPC - disk has no space to store a device id 900 */ 901 static int 902 vd_file_get_devid_block(vd_t *vd, size_t *blkp) 903 { 904 diskaddr_t spc, head, cyl; 905 906 ASSERT(vd->file); 907 908 if (vd->vdisk_label == VD_DISK_LABEL_UNK) { 909 /* 910 * If no label is defined we don't know where to find 911 * a device id. 912 */ 913 return (ENOSPC); 914 } 915 916 if (vd->vdisk_label == VD_DISK_LABEL_EFI) { 917 /* 918 * For an EFI disk, the devid is at the beginning of 919 * the reserved slice 920 */ 921 if (vd->efi_reserved == -1) { 922 PR0("EFI disk has no reserved slice"); 923 return (ENOSPC); 924 } 925 926 *blkp = vd->slices[vd->efi_reserved].start; 927 return (0); 928 } 929 930 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); 931 932 /* this geometry doesn't allow us to have a devid */ 933 if (vd->dk_geom.dkg_acyl < 2) { 934 PR0("not enough alternate cylinder available for devid " 935 "(acyl=%u)", vd->dk_geom.dkg_acyl); 936 return (ENOSPC); 937 } 938 939 /* the devid is in on the track next to the last cylinder */ 940 cyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl - 2; 941 spc = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; 942 head = vd->dk_geom.dkg_nhead - 1; 943 944 *blkp = (cyl * (spc - vd->dk_geom.dkg_apc)) + 945 (head * vd->dk_geom.dkg_nsect) + 1; 946 947 return (0); 948 } 949 950 /* 951 * Return the checksum of a disk block containing an on-disk devid. 952 */ 953 static uint_t 954 vd_dkdevid2cksum(struct dk_devid *dkdevid) 955 { 956 uint_t chksum, *ip; 957 int i; 958 959 chksum = 0; 960 ip = (uint_t *)dkdevid; 961 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); i++) 962 chksum ^= ip[i]; 963 964 return (chksum); 965 } 966 967 /* 968 * Function: 969 * vd_file_read_devid 970 * 971 * Description: 972 * Read the device id stored on a disk image. 973 * 974 * Parameters: 975 * vd - disk on which the operation is performed. 976 * devid - the return address of the device ID. 977 * 978 * Return Code: 979 * 0 - success 980 * EIO - I/O error while trying to access the disk image 981 * EINVAL - no valid device id was found 982 * ENOSPC - disk has no space to store a device id 983 */ 984 static int 985 vd_file_read_devid(vd_t *vd, ddi_devid_t *devid) 986 { 987 struct dk_devid *dkdevid; 988 size_t blk; 989 uint_t chksum; 990 int status, sz; 991 992 if ((status = vd_file_get_devid_block(vd, &blk)) != 0) 993 return (status); 994 995 dkdevid = kmem_zalloc(DEV_BSIZE, KM_SLEEP); 996 997 /* get the devid */ 998 if ((vd_file_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)dkdevid, blk, 999 DEV_BSIZE)) < 0) { 1000 PR0("error reading devid block at %lu", blk); 1001 status = EIO; 1002 goto done; 1003 } 1004 1005 /* validate the revision */ 1006 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 1007 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 1008 PR0("invalid devid found at block %lu (bad revision)", blk); 1009 status = EINVAL; 1010 goto done; 1011 } 1012 1013 /* compute checksum */ 1014 chksum = vd_dkdevid2cksum(dkdevid); 1015 1016 /* compare the checksums */ 1017 if (DKD_GETCHKSUM(dkdevid) != chksum) { 1018 PR0("invalid devid found at block %lu (bad checksum)", blk); 1019 status = EINVAL; 1020 goto done; 1021 } 1022 1023 /* validate the device id */ 1024 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 1025 PR0("invalid devid found at block %lu", blk); 1026 status = EINVAL; 1027 goto done; 1028 } 1029 1030 PR1("devid read at block %lu", blk); 1031 1032 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 1033 *devid = kmem_alloc(sz, KM_SLEEP); 1034 bcopy(&dkdevid->dkd_devid, *devid, sz); 1035 1036 done: 1037 kmem_free(dkdevid, DEV_BSIZE); 1038 return (status); 1039 1040 } 1041 1042 /* 1043 * Function: 1044 * vd_file_write_devid 1045 * 1046 * Description: 1047 * Write a device id into disk image. 1048 * 1049 * Parameters: 1050 * vd - disk on which the operation is performed. 1051 * devid - the device ID to store. 1052 * 1053 * Return Code: 1054 * 0 - success 1055 * EIO - I/O error while trying to access the disk image 1056 * ENOSPC - disk has no space to store a device id 1057 */ 1058 static int 1059 vd_file_write_devid(vd_t *vd, ddi_devid_t devid) 1060 { 1061 struct dk_devid *dkdevid; 1062 uint_t chksum; 1063 size_t blk; 1064 int status; 1065 1066 if (devid == NULL) { 1067 /* nothing to write */ 1068 return (0); 1069 } 1070 1071 if ((status = vd_file_get_devid_block(vd, &blk)) != 0) 1072 return (status); 1073 1074 dkdevid = kmem_zalloc(DEV_BSIZE, KM_SLEEP); 1075 1076 /* set revision */ 1077 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 1078 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 1079 1080 /* copy devid */ 1081 bcopy(devid, &dkdevid->dkd_devid, ddi_devid_sizeof(devid)); 1082 1083 /* compute checksum */ 1084 chksum = vd_dkdevid2cksum(dkdevid); 1085 1086 /* set checksum */ 1087 DKD_FORMCHKSUM(chksum, dkdevid); 1088 1089 /* store the devid */ 1090 if ((status = vd_file_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, 1091 (caddr_t)dkdevid, blk, DEV_BSIZE)) < 0) { 1092 PR0("Error writing devid block at %lu", blk); 1093 status = EIO; 1094 } else { 1095 PR1("devid written at block %lu", blk); 1096 status = 0; 1097 } 1098 1099 kmem_free(dkdevid, DEV_BSIZE); 1100 return (status); 1101 } 1102 1103 /* 1104 * Function: 1105 * vd_do_scsi_rdwr 1106 * 1107 * Description: 1108 * Read or write to a SCSI disk using an absolute disk offset. 1109 * 1110 * Parameters: 1111 * vd - disk on which the operation is performed. 1112 * operation - operation to execute: read (VD_OP_BREAD) or 1113 * write (VD_OP_BWRITE). 1114 * data - buffer where data are read to or written from. 1115 * blk - starting block for the operation. 1116 * len - number of bytes to read or write. 1117 * 1118 * Return Code: 1119 * 0 - success 1120 * n != 0 - error. 1121 */ 1122 static int 1123 vd_do_scsi_rdwr(vd_t *vd, int operation, caddr_t data, size_t blk, size_t len) 1124 { 1125 struct uscsi_cmd ucmd; 1126 union scsi_cdb cdb; 1127 int nsectors, nblk; 1128 int max_sectors; 1129 int status, rval; 1130 1131 ASSERT(!vd->file); 1132 ASSERT(vd->vdisk_block_size > 0); 1133 1134 max_sectors = vd->max_xfer_sz; 1135 nblk = (len / vd->vdisk_block_size); 1136 1137 if (len % vd->vdisk_block_size != 0) 1138 return (EINVAL); 1139 1140 /* 1141 * Build and execute the uscsi ioctl. We build a group0, group1 1142 * or group4 command as necessary, since some targets 1143 * do not support group1 commands. 1144 */ 1145 while (nblk) { 1146 1147 bzero(&ucmd, sizeof (ucmd)); 1148 bzero(&cdb, sizeof (cdb)); 1149 1150 nsectors = (max_sectors < nblk) ? max_sectors : nblk; 1151 1152 /* 1153 * Some of the optical drives on sun4v machines are ATAPI 1154 * devices which use Group 1 Read/Write commands so we need 1155 * to explicitly check a flag which is set when a domain 1156 * is bound. 1157 */ 1158 if (blk < (2 << 20) && nsectors <= 0xff && !vd->is_atapi_dev) { 1159 FORMG0ADDR(&cdb, blk); 1160 FORMG0COUNT(&cdb, nsectors); 1161 ucmd.uscsi_cdblen = CDB_GROUP0; 1162 } else if (blk > 0xffffffff) { 1163 FORMG4LONGADDR(&cdb, blk); 1164 FORMG4COUNT(&cdb, nsectors); 1165 ucmd.uscsi_cdblen = CDB_GROUP4; 1166 cdb.scc_cmd |= SCMD_GROUP4; 1167 } else { 1168 FORMG1ADDR(&cdb, blk); 1169 FORMG1COUNT(&cdb, nsectors); 1170 ucmd.uscsi_cdblen = CDB_GROUP1; 1171 cdb.scc_cmd |= SCMD_GROUP1; 1172 } 1173 ucmd.uscsi_cdb = (caddr_t)&cdb; 1174 ucmd.uscsi_bufaddr = data; 1175 ucmd.uscsi_buflen = nsectors * vd->block_size; 1176 ucmd.uscsi_timeout = vd_scsi_rdwr_timeout; 1177 /* 1178 * Set flags so that the command is isolated from normal 1179 * commands and no error message is printed. 1180 */ 1181 ucmd.uscsi_flags = USCSI_ISOLATE | USCSI_SILENT; 1182 1183 if (operation == VD_OP_BREAD) { 1184 cdb.scc_cmd |= SCMD_READ; 1185 ucmd.uscsi_flags |= USCSI_READ; 1186 } else { 1187 cdb.scc_cmd |= SCMD_WRITE; 1188 } 1189 1190 status = ldi_ioctl(vd->ldi_handle[VD_ENTIRE_DISK_SLICE], 1191 USCSICMD, (intptr_t)&ucmd, (vd->open_flags | FKIOCTL), 1192 kcred, &rval); 1193 1194 if (status == 0) 1195 status = ucmd.uscsi_status; 1196 1197 if (status != 0) 1198 break; 1199 1200 /* 1201 * Check if partial DMA breakup is required. If so, reduce 1202 * the request size by half and retry the last request. 1203 */ 1204 if (ucmd.uscsi_resid == ucmd.uscsi_buflen) { 1205 max_sectors >>= 1; 1206 if (max_sectors <= 0) { 1207 status = EIO; 1208 break; 1209 } 1210 continue; 1211 } 1212 1213 if (ucmd.uscsi_resid != 0) { 1214 status = EIO; 1215 break; 1216 } 1217 1218 blk += nsectors; 1219 nblk -= nsectors; 1220 data += nsectors * vd->vdisk_block_size; /* SECSIZE */ 1221 } 1222 1223 return (status); 1224 } 1225 1226 /* 1227 * Function: 1228 * vd_scsi_rdwr 1229 * 1230 * Description: 1231 * Wrapper function to read or write to a SCSI disk using an absolute 1232 * disk offset. It checks the blocksize of the underlying device and, 1233 * if necessary, adjusts the buffers accordingly before calling 1234 * vd_do_scsi_rdwr() to do the actual read or write. 1235 * 1236 * Parameters: 1237 * vd - disk on which the operation is performed. 1238 * operation - operation to execute: read (VD_OP_BREAD) or 1239 * write (VD_OP_BWRITE). 1240 * data - buffer where data are read to or written from. 1241 * blk - starting block for the operation. 1242 * len - number of bytes to read or write. 1243 * 1244 * Return Code: 1245 * 0 - success 1246 * n != 0 - error. 1247 */ 1248 static int 1249 vd_scsi_rdwr(vd_t *vd, int operation, caddr_t data, size_t vblk, size_t vlen) 1250 { 1251 int rv; 1252 1253 size_t pblk; /* physical device block number of data on device */ 1254 size_t delta; /* relative offset between pblk and vblk */ 1255 size_t pnblk; /* number of physical blocks to be read from device */ 1256 size_t plen; /* length of data to be read from physical device */ 1257 char *buf; /* buffer area to fit physical device's block size */ 1258 1259 if (vd->block_size == 0) { 1260 /* 1261 * The block size was not available during the attach, 1262 * try to update it now. 1263 */ 1264 if (vd_setup_mediainfo(vd) != 0) 1265 return (EIO); 1266 } 1267 1268 /* 1269 * If the vdisk block size and the block size of the underlying device 1270 * match we can skip straight to vd_do_scsi_rdwr(), otherwise we need 1271 * to create a buffer large enough to handle the device's block size 1272 * and adjust the block to be read from and the amount of data to 1273 * read to correspond with the device's block size. 1274 */ 1275 if (vd->vdisk_block_size == vd->block_size) 1276 return (vd_do_scsi_rdwr(vd, operation, data, vblk, vlen)); 1277 1278 if (vd->vdisk_block_size > vd->block_size) 1279 return (EINVAL); 1280 1281 /* 1282 * Writing of physical block sizes larger than the virtual block size 1283 * is not supported. This would be added if/when support for guests 1284 * writing to DVDs is implemented. 1285 */ 1286 if (operation == VD_OP_BWRITE) 1287 return (ENOTSUP); 1288 1289 /* BEGIN CSTYLED */ 1290 /* 1291 * Below is a diagram showing the relationship between the physical 1292 * and virtual blocks. If the virtual blocks marked by 'X' below are 1293 * requested, then the physical blocks denoted by 'Y' are read. 1294 * 1295 * vblk 1296 * | vlen 1297 * |<--------------->| 1298 * v v 1299 * --+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- virtual disk: 1300 * | | | |XX|XX|XX|XX|XX|XX| | | | | | } block size is 1301 * --+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- vd->vdisk_block_size 1302 * : : : : 1303 * >:==:< delta : : 1304 * : : : : 1305 * --+-----+-----+-----+-----+-----+-----+-----+-- physical disk: 1306 * | |YY:YY|YYYYY|YYYYY|YY:YY| | | } block size is 1307 * --+-----+-----+-----+-----+-----+-----+-----+-- vd->block_size 1308 * ^ ^ 1309 * |<--------------------->| 1310 * | plen 1311 * pblk 1312 */ 1313 /* END CSTYLED */ 1314 pblk = (vblk * vd->vdisk_block_size) / vd->block_size; 1315 delta = (vblk * vd->vdisk_block_size) - (pblk * vd->block_size); 1316 pnblk = ((delta + vlen - 1) / vd->block_size) + 1; 1317 plen = pnblk * vd->block_size; 1318 1319 PR2("vblk %lx:pblk %lx: vlen %ld:plen %ld", vblk, pblk, vlen, plen); 1320 1321 buf = kmem_zalloc(sizeof (caddr_t) * plen, KM_SLEEP); 1322 rv = vd_do_scsi_rdwr(vd, operation, (caddr_t)buf, pblk, plen); 1323 bcopy(buf + delta, data, vlen); 1324 1325 kmem_free(buf, sizeof (caddr_t) * plen); 1326 1327 return (rv); 1328 } 1329 1330 /* 1331 * Return Values 1332 * EINPROGRESS - operation was successfully started 1333 * EIO - encountered LDC (aka. task error) 1334 * 0 - operation completed successfully 1335 * 1336 * Side Effect 1337 * sets request->status = <disk operation status> 1338 */ 1339 static int 1340 vd_start_bio(vd_task_t *task) 1341 { 1342 int rv, status = 0; 1343 vd_t *vd = task->vd; 1344 vd_dring_payload_t *request = task->request; 1345 struct buf *buf = &task->buf; 1346 uint8_t mtype; 1347 int slice; 1348 char *bufaddr = 0; 1349 size_t buflen; 1350 1351 ASSERT(vd != NULL); 1352 ASSERT(request != NULL); 1353 1354 slice = request->slice; 1355 1356 ASSERT(slice == VD_SLICE_NONE || slice < vd->nslices); 1357 ASSERT((request->operation == VD_OP_BREAD) || 1358 (request->operation == VD_OP_BWRITE)); 1359 1360 if (request->nbytes == 0) { 1361 /* no service for trivial requests */ 1362 request->status = EINVAL; 1363 return (0); 1364 } 1365 1366 PR1("%s %lu bytes at block %lu", 1367 (request->operation == VD_OP_BREAD) ? "Read" : "Write", 1368 request->nbytes, request->addr); 1369 1370 /* 1371 * We have to check the open flags because the functions processing 1372 * the read/write request will not do it. 1373 */ 1374 if (request->operation == VD_OP_BWRITE && !(vd->open_flags & FWRITE)) { 1375 PR0("write fails because backend is opened read-only"); 1376 request->nbytes = 0; 1377 request->status = EROFS; 1378 return (0); 1379 } 1380 1381 mtype = (&vd->inband_task == task) ? LDC_SHADOW_MAP : LDC_DIRECT_MAP; 1382 1383 /* Map memory exported by client */ 1384 status = ldc_mem_map(task->mhdl, request->cookie, request->ncookies, 1385 mtype, (request->operation == VD_OP_BREAD) ? LDC_MEM_W : LDC_MEM_R, 1386 &bufaddr, NULL); 1387 if (status != 0) { 1388 PR0("ldc_mem_map() returned err %d ", status); 1389 return (EIO); 1390 } 1391 1392 buflen = request->nbytes; 1393 1394 status = ldc_mem_acquire(task->mhdl, 0, buflen); 1395 if (status != 0) { 1396 (void) ldc_mem_unmap(task->mhdl); 1397 PR0("ldc_mem_acquire() returned err %d ", status); 1398 return (EIO); 1399 } 1400 1401 /* Start the block I/O */ 1402 if (vd->file) { 1403 rv = vd_file_rw(vd, slice, request->operation, bufaddr, 1404 request->addr, request->nbytes); 1405 if (rv < 0) { 1406 request->nbytes = 0; 1407 request->status = EIO; 1408 } else { 1409 request->nbytes = rv; 1410 request->status = 0; 1411 } 1412 } else { 1413 if (slice == VD_SLICE_NONE) { 1414 /* 1415 * This is not a disk image so it is a real disk. We 1416 * assume that the underlying device driver supports 1417 * USCSICMD ioctls. This is the case of all SCSI devices 1418 * (sd, ssd...). 1419 * 1420 * In the future if we have non-SCSI disks we would need 1421 * to invoke the appropriate function to do I/O using an 1422 * absolute disk offset (for example using DIOCTL_RWCMD 1423 * for IDE disks). 1424 */ 1425 rv = vd_scsi_rdwr(vd, request->operation, bufaddr, 1426 request->addr, request->nbytes); 1427 if (rv != 0) { 1428 request->nbytes = 0; 1429 request->status = EIO; 1430 } else { 1431 request->status = 0; 1432 } 1433 } else { 1434 bioinit(buf); 1435 buf->b_flags = B_BUSY; 1436 buf->b_bcount = request->nbytes; 1437 buf->b_lblkno = request->addr; 1438 buf->b_edev = vd->dev[slice]; 1439 buf->b_un.b_addr = bufaddr; 1440 buf->b_flags |= (request->operation == VD_OP_BREAD)? 1441 B_READ : B_WRITE; 1442 1443 request->status = 1444 ldi_strategy(vd->ldi_handle[slice], buf); 1445 1446 /* 1447 * This is to indicate to the caller that the request 1448 * needs to be finished by vd_complete_bio() by calling 1449 * biowait() there and waiting for that to return before 1450 * triggering the notification of the vDisk client. 1451 * 1452 * This is necessary when writing to real disks as 1453 * otherwise calls to ldi_strategy() would be serialized 1454 * behind the calls to biowait() and performance would 1455 * suffer. 1456 */ 1457 if (request->status == 0) 1458 return (EINPROGRESS); 1459 1460 biofini(buf); 1461 } 1462 } 1463 1464 /* Clean up after error */ 1465 rv = ldc_mem_release(task->mhdl, 0, buflen); 1466 if (rv) { 1467 PR0("ldc_mem_release() returned err %d ", rv); 1468 status = EIO; 1469 } 1470 rv = ldc_mem_unmap(task->mhdl); 1471 if (rv) { 1472 PR0("ldc_mem_unmap() returned err %d ", rv); 1473 status = EIO; 1474 } 1475 1476 return (status); 1477 } 1478 1479 /* 1480 * This function should only be called from vd_notify to ensure that requests 1481 * are responded to in the order that they are received. 1482 */ 1483 static int 1484 send_msg(ldc_handle_t ldc_handle, void *msg, size_t msglen) 1485 { 1486 int status; 1487 size_t nbytes; 1488 1489 do { 1490 nbytes = msglen; 1491 status = ldc_write(ldc_handle, msg, &nbytes); 1492 if (status != EWOULDBLOCK) 1493 break; 1494 drv_usecwait(vds_ldc_delay); 1495 } while (status == EWOULDBLOCK); 1496 1497 if (status != 0) { 1498 if (status != ECONNRESET) 1499 PR0("ldc_write() returned errno %d", status); 1500 return (status); 1501 } else if (nbytes != msglen) { 1502 PR0("ldc_write() performed only partial write"); 1503 return (EIO); 1504 } 1505 1506 PR1("SENT %lu bytes", msglen); 1507 return (0); 1508 } 1509 1510 static void 1511 vd_need_reset(vd_t *vd, boolean_t reset_ldc) 1512 { 1513 mutex_enter(&vd->lock); 1514 vd->reset_state = B_TRUE; 1515 vd->reset_ldc = reset_ldc; 1516 mutex_exit(&vd->lock); 1517 } 1518 1519 /* 1520 * Reset the state of the connection with a client, if needed; reset the LDC 1521 * transport as well, if needed. This function should only be called from the 1522 * "vd_recv_msg", as it waits for tasks - otherwise a deadlock can occur. 1523 */ 1524 static void 1525 vd_reset_if_needed(vd_t *vd) 1526 { 1527 int status = 0; 1528 1529 mutex_enter(&vd->lock); 1530 if (!vd->reset_state) { 1531 ASSERT(!vd->reset_ldc); 1532 mutex_exit(&vd->lock); 1533 return; 1534 } 1535 mutex_exit(&vd->lock); 1536 1537 PR0("Resetting connection state with %s", VD_CLIENT(vd)); 1538 1539 /* 1540 * Let any asynchronous I/O complete before possibly pulling the rug 1541 * out from under it; defer checking vd->reset_ldc, as one of the 1542 * asynchronous tasks might set it 1543 */ 1544 ddi_taskq_wait(vd->completionq); 1545 1546 if (vd->file) { 1547 status = VOP_FSYNC(vd->file_vnode, FSYNC, kcred, NULL); 1548 if (status) { 1549 PR0("VOP_FSYNC returned errno %d", status); 1550 } 1551 } 1552 1553 if ((vd->initialized & VD_DRING) && 1554 ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0)) 1555 PR0("ldc_mem_dring_unmap() returned errno %d", status); 1556 1557 vd_free_dring_task(vd); 1558 1559 /* Free the staging buffer for msgs */ 1560 if (vd->vio_msgp != NULL) { 1561 kmem_free(vd->vio_msgp, vd->max_msglen); 1562 vd->vio_msgp = NULL; 1563 } 1564 1565 /* Free the inband message buffer */ 1566 if (vd->inband_task.msg != NULL) { 1567 kmem_free(vd->inband_task.msg, vd->max_msglen); 1568 vd->inband_task.msg = NULL; 1569 } 1570 1571 mutex_enter(&vd->lock); 1572 1573 if (vd->reset_ldc) 1574 PR0("taking down LDC channel"); 1575 if (vd->reset_ldc && ((status = ldc_down(vd->ldc_handle)) != 0)) 1576 PR0("ldc_down() returned errno %d", status); 1577 1578 /* Reset exclusive access rights */ 1579 vd_reset_access(vd); 1580 1581 vd->initialized &= ~(VD_SID | VD_SEQ_NUM | VD_DRING); 1582 vd->state = VD_STATE_INIT; 1583 vd->max_msglen = sizeof (vio_msg_t); /* baseline vio message size */ 1584 1585 /* Allocate the staging buffer */ 1586 vd->vio_msgp = kmem_alloc(vd->max_msglen, KM_SLEEP); 1587 1588 PR0("calling ldc_up\n"); 1589 (void) ldc_up(vd->ldc_handle); 1590 1591 vd->reset_state = B_FALSE; 1592 vd->reset_ldc = B_FALSE; 1593 1594 mutex_exit(&vd->lock); 1595 } 1596 1597 static void vd_recv_msg(void *arg); 1598 1599 static void 1600 vd_mark_in_reset(vd_t *vd) 1601 { 1602 int status; 1603 1604 PR0("vd_mark_in_reset: marking vd in reset\n"); 1605 1606 vd_need_reset(vd, B_FALSE); 1607 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, DDI_SLEEP); 1608 if (status == DDI_FAILURE) { 1609 PR0("cannot schedule task to recv msg\n"); 1610 vd_need_reset(vd, B_TRUE); 1611 return; 1612 } 1613 } 1614 1615 static int 1616 vd_mark_elem_done(vd_t *vd, int idx, int elem_status, int elem_nbytes) 1617 { 1618 boolean_t accepted; 1619 int status; 1620 vd_dring_entry_t *elem = VD_DRING_ELEM(idx); 1621 1622 if (vd->reset_state) 1623 return (0); 1624 1625 /* Acquire the element */ 1626 if (!vd->reset_state && 1627 (status = ldc_mem_dring_acquire(vd->dring_handle, idx, idx)) != 0) { 1628 if (status == ECONNRESET) { 1629 vd_mark_in_reset(vd); 1630 return (0); 1631 } else { 1632 PR0("ldc_mem_dring_acquire() returned errno %d", 1633 status); 1634 return (status); 1635 } 1636 } 1637 1638 /* Set the element's status and mark it done */ 1639 accepted = (elem->hdr.dstate == VIO_DESC_ACCEPTED); 1640 if (accepted) { 1641 elem->payload.nbytes = elem_nbytes; 1642 elem->payload.status = elem_status; 1643 elem->hdr.dstate = VIO_DESC_DONE; 1644 } else { 1645 /* Perhaps client timed out waiting for I/O... */ 1646 PR0("element %u no longer \"accepted\"", idx); 1647 VD_DUMP_DRING_ELEM(elem); 1648 } 1649 /* Release the element */ 1650 if (!vd->reset_state && 1651 (status = ldc_mem_dring_release(vd->dring_handle, idx, idx)) != 0) { 1652 if (status == ECONNRESET) { 1653 vd_mark_in_reset(vd); 1654 return (0); 1655 } else { 1656 PR0("ldc_mem_dring_release() returned errno %d", 1657 status); 1658 return (status); 1659 } 1660 } 1661 1662 return (accepted ? 0 : EINVAL); 1663 } 1664 1665 /* 1666 * Return Values 1667 * 0 - operation completed successfully 1668 * EIO - encountered LDC / task error 1669 * 1670 * Side Effect 1671 * sets request->status = <disk operation status> 1672 */ 1673 static int 1674 vd_complete_bio(vd_task_t *task) 1675 { 1676 int status = 0; 1677 int rv = 0; 1678 vd_t *vd = task->vd; 1679 vd_dring_payload_t *request = task->request; 1680 struct buf *buf = &task->buf; 1681 1682 1683 ASSERT(vd != NULL); 1684 ASSERT(request != NULL); 1685 ASSERT(task->msg != NULL); 1686 ASSERT(task->msglen >= sizeof (*task->msg)); 1687 ASSERT(!vd->file); 1688 ASSERT(request->slice != VD_SLICE_NONE); 1689 1690 /* Wait for the I/O to complete [ call to ldi_strategy(9f) ] */ 1691 request->status = biowait(buf); 1692 1693 /* return back the number of bytes read/written */ 1694 request->nbytes = buf->b_bcount - buf->b_resid; 1695 1696 /* Release the buffer */ 1697 if (!vd->reset_state) 1698 status = ldc_mem_release(task->mhdl, 0, buf->b_bcount); 1699 if (status) { 1700 PR0("ldc_mem_release() returned errno %d copying to " 1701 "client", status); 1702 if (status == ECONNRESET) { 1703 vd_mark_in_reset(vd); 1704 } 1705 rv = EIO; 1706 } 1707 1708 /* Unmap the memory, even if in reset */ 1709 status = ldc_mem_unmap(task->mhdl); 1710 if (status) { 1711 PR0("ldc_mem_unmap() returned errno %d copying to client", 1712 status); 1713 if (status == ECONNRESET) { 1714 vd_mark_in_reset(vd); 1715 } 1716 rv = EIO; 1717 } 1718 1719 biofini(buf); 1720 1721 return (rv); 1722 } 1723 1724 /* 1725 * Description: 1726 * This function is called by the two functions called by a taskq 1727 * [ vd_complete_notify() and vd_serial_notify()) ] to send the 1728 * message to the client. 1729 * 1730 * Parameters: 1731 * arg - opaque pointer to structure containing task to be completed 1732 * 1733 * Return Values 1734 * None 1735 */ 1736 static void 1737 vd_notify(vd_task_t *task) 1738 { 1739 int status; 1740 1741 ASSERT(task != NULL); 1742 ASSERT(task->vd != NULL); 1743 1744 if (task->vd->reset_state) 1745 return; 1746 1747 /* 1748 * Send the "ack" or "nack" back to the client; if sending the message 1749 * via LDC fails, arrange to reset both the connection state and LDC 1750 * itself 1751 */ 1752 PR2("Sending %s", 1753 (task->msg->tag.vio_subtype == VIO_SUBTYPE_ACK) ? "ACK" : "NACK"); 1754 1755 status = send_msg(task->vd->ldc_handle, task->msg, task->msglen); 1756 switch (status) { 1757 case 0: 1758 break; 1759 case ECONNRESET: 1760 vd_mark_in_reset(task->vd); 1761 break; 1762 default: 1763 PR0("initiating full reset"); 1764 vd_need_reset(task->vd, B_TRUE); 1765 break; 1766 } 1767 1768 DTRACE_PROBE1(task__end, vd_task_t *, task); 1769 } 1770 1771 /* 1772 * Description: 1773 * Mark the Dring entry as Done and (if necessary) send an ACK/NACK to 1774 * the vDisk client 1775 * 1776 * Parameters: 1777 * task - structure containing the request sent from client 1778 * 1779 * Return Values 1780 * None 1781 */ 1782 static void 1783 vd_complete_notify(vd_task_t *task) 1784 { 1785 int status = 0; 1786 vd_t *vd = task->vd; 1787 vd_dring_payload_t *request = task->request; 1788 1789 /* Update the dring element for a dring client */ 1790 if (!vd->reset_state && (vd->xfer_mode == VIO_DRING_MODE_V1_0)) { 1791 status = vd_mark_elem_done(vd, task->index, 1792 request->status, request->nbytes); 1793 if (status == ECONNRESET) 1794 vd_mark_in_reset(vd); 1795 } 1796 1797 /* 1798 * If a transport error occurred while marking the element done or 1799 * previously while executing the task, arrange to "nack" the message 1800 * when the final task in the descriptor element range completes 1801 */ 1802 if ((status != 0) || (task->status != 0)) 1803 task->msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 1804 1805 /* 1806 * Only the final task for a range of elements will respond to and 1807 * free the message 1808 */ 1809 if (task->type == VD_NONFINAL_RANGE_TASK) { 1810 return; 1811 } 1812 1813 vd_notify(task); 1814 } 1815 1816 /* 1817 * Description: 1818 * This is the basic completion function called to handle inband data 1819 * requests and handshake messages. All it needs to do is trigger a 1820 * message to the client that the request is completed. 1821 * 1822 * Parameters: 1823 * arg - opaque pointer to structure containing task to be completed 1824 * 1825 * Return Values 1826 * None 1827 */ 1828 static void 1829 vd_serial_notify(void *arg) 1830 { 1831 vd_task_t *task = (vd_task_t *)arg; 1832 1833 ASSERT(task != NULL); 1834 vd_notify(task); 1835 } 1836 1837 /* ARGSUSED */ 1838 static int 1839 vd_geom2dk_geom(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 1840 { 1841 VD_GEOM2DK_GEOM((vd_geom_t *)vd_buf, (struct dk_geom *)ioctl_arg); 1842 return (0); 1843 } 1844 1845 /* ARGSUSED */ 1846 static int 1847 vd_vtoc2vtoc(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 1848 { 1849 VD_VTOC2VTOC((vd_vtoc_t *)vd_buf, (struct vtoc *)ioctl_arg); 1850 return (0); 1851 } 1852 1853 static void 1854 dk_geom2vd_geom(void *ioctl_arg, void *vd_buf) 1855 { 1856 DK_GEOM2VD_GEOM((struct dk_geom *)ioctl_arg, (vd_geom_t *)vd_buf); 1857 } 1858 1859 static void 1860 vtoc2vd_vtoc(void *ioctl_arg, void *vd_buf) 1861 { 1862 VTOC2VD_VTOC((struct vtoc *)ioctl_arg, (vd_vtoc_t *)vd_buf); 1863 } 1864 1865 static int 1866 vd_get_efi_in(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 1867 { 1868 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 1869 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 1870 size_t data_len; 1871 1872 data_len = vd_buf_len - (sizeof (vd_efi_t) - sizeof (uint64_t)); 1873 if (vd_efi->length > data_len) 1874 return (EINVAL); 1875 1876 dk_efi->dki_lba = vd_efi->lba; 1877 dk_efi->dki_length = vd_efi->length; 1878 dk_efi->dki_data = kmem_zalloc(vd_efi->length, KM_SLEEP); 1879 return (0); 1880 } 1881 1882 static void 1883 vd_get_efi_out(void *ioctl_arg, void *vd_buf) 1884 { 1885 int len; 1886 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 1887 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 1888 1889 len = vd_efi->length; 1890 DK_EFI2VD_EFI(dk_efi, vd_efi); 1891 kmem_free(dk_efi->dki_data, len); 1892 } 1893 1894 static int 1895 vd_set_efi_in(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 1896 { 1897 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 1898 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 1899 size_t data_len; 1900 1901 data_len = vd_buf_len - (sizeof (vd_efi_t) - sizeof (uint64_t)); 1902 if (vd_efi->length > data_len) 1903 return (EINVAL); 1904 1905 dk_efi->dki_data = kmem_alloc(vd_efi->length, KM_SLEEP); 1906 VD_EFI2DK_EFI(vd_efi, dk_efi); 1907 return (0); 1908 } 1909 1910 static void 1911 vd_set_efi_out(void *ioctl_arg, void *vd_buf) 1912 { 1913 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 1914 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 1915 1916 kmem_free(dk_efi->dki_data, vd_efi->length); 1917 } 1918 1919 static int 1920 vd_scsicmd_in(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 1921 { 1922 size_t vd_scsi_len; 1923 vd_scsi_t *vd_scsi = (vd_scsi_t *)vd_buf; 1924 struct uscsi_cmd *uscsi = (struct uscsi_cmd *)ioctl_arg; 1925 1926 /* check buffer size */ 1927 vd_scsi_len = VD_SCSI_SIZE; 1928 vd_scsi_len += P2ROUNDUP(vd_scsi->cdb_len, sizeof (uint64_t)); 1929 vd_scsi_len += P2ROUNDUP(vd_scsi->sense_len, sizeof (uint64_t)); 1930 vd_scsi_len += P2ROUNDUP(vd_scsi->datain_len, sizeof (uint64_t)); 1931 vd_scsi_len += P2ROUNDUP(vd_scsi->dataout_len, sizeof (uint64_t)); 1932 1933 ASSERT(vd_scsi_len % sizeof (uint64_t) == 0); 1934 1935 if (vd_buf_len < vd_scsi_len) 1936 return (EINVAL); 1937 1938 /* set flags */ 1939 uscsi->uscsi_flags = vd_scsi_debug; 1940 1941 if (vd_scsi->options & VD_SCSI_OPT_NORETRY) { 1942 uscsi->uscsi_flags |= USCSI_ISOLATE; 1943 uscsi->uscsi_flags |= USCSI_DIAGNOSE; 1944 } 1945 1946 /* task attribute */ 1947 switch (vd_scsi->task_attribute) { 1948 case VD_SCSI_TASK_ACA: 1949 uscsi->uscsi_flags |= USCSI_HEAD; 1950 break; 1951 case VD_SCSI_TASK_HQUEUE: 1952 uscsi->uscsi_flags |= USCSI_HTAG; 1953 break; 1954 case VD_SCSI_TASK_ORDERED: 1955 uscsi->uscsi_flags |= USCSI_OTAG; 1956 break; 1957 default: 1958 uscsi->uscsi_flags |= USCSI_NOTAG; 1959 break; 1960 } 1961 1962 /* timeout */ 1963 uscsi->uscsi_timeout = vd_scsi->timeout; 1964 1965 /* cdb data */ 1966 uscsi->uscsi_cdb = (caddr_t)VD_SCSI_DATA_CDB(vd_scsi); 1967 uscsi->uscsi_cdblen = vd_scsi->cdb_len; 1968 1969 /* sense buffer */ 1970 if (vd_scsi->sense_len != 0) { 1971 uscsi->uscsi_flags |= USCSI_RQENABLE; 1972 uscsi->uscsi_rqbuf = (caddr_t)VD_SCSI_DATA_SENSE(vd_scsi); 1973 uscsi->uscsi_rqlen = vd_scsi->sense_len; 1974 } 1975 1976 if (vd_scsi->datain_len != 0 && vd_scsi->dataout_len != 0) { 1977 /* uscsi does not support read/write request */ 1978 return (EINVAL); 1979 } 1980 1981 /* request data-in */ 1982 if (vd_scsi->datain_len != 0) { 1983 uscsi->uscsi_flags |= USCSI_READ; 1984 uscsi->uscsi_buflen = vd_scsi->datain_len; 1985 uscsi->uscsi_bufaddr = (char *)VD_SCSI_DATA_IN(vd_scsi); 1986 } 1987 1988 /* request data-out */ 1989 if (vd_scsi->dataout_len != 0) { 1990 uscsi->uscsi_buflen = vd_scsi->dataout_len; 1991 uscsi->uscsi_bufaddr = (char *)VD_SCSI_DATA_OUT(vd_scsi); 1992 } 1993 1994 return (0); 1995 } 1996 1997 static void 1998 vd_scsicmd_out(void *ioctl_arg, void *vd_buf) 1999 { 2000 vd_scsi_t *vd_scsi = (vd_scsi_t *)vd_buf; 2001 struct uscsi_cmd *uscsi = (struct uscsi_cmd *)ioctl_arg; 2002 2003 /* output fields */ 2004 vd_scsi->cmd_status = uscsi->uscsi_status; 2005 2006 /* sense data */ 2007 if ((uscsi->uscsi_flags & USCSI_RQENABLE) && 2008 (uscsi->uscsi_status == STATUS_CHECK || 2009 uscsi->uscsi_status == STATUS_TERMINATED)) { 2010 vd_scsi->sense_status = uscsi->uscsi_rqstatus; 2011 if (uscsi->uscsi_rqstatus == STATUS_GOOD) 2012 vd_scsi->sense_len -= uscsi->uscsi_resid; 2013 else 2014 vd_scsi->sense_len = 0; 2015 } else { 2016 vd_scsi->sense_len = 0; 2017 } 2018 2019 if (uscsi->uscsi_status != STATUS_GOOD) { 2020 vd_scsi->dataout_len = 0; 2021 vd_scsi->datain_len = 0; 2022 return; 2023 } 2024 2025 if (uscsi->uscsi_flags & USCSI_READ) { 2026 /* request data (read) */ 2027 vd_scsi->datain_len -= uscsi->uscsi_resid; 2028 vd_scsi->dataout_len = 0; 2029 } else { 2030 /* request data (write) */ 2031 vd_scsi->datain_len = 0; 2032 vd_scsi->dataout_len -= uscsi->uscsi_resid; 2033 } 2034 } 2035 2036 static ushort_t 2037 vd_lbl2cksum(struct dk_label *label) 2038 { 2039 int count; 2040 ushort_t sum, *sp; 2041 2042 count = (sizeof (struct dk_label)) / (sizeof (short)) - 1; 2043 sp = (ushort_t *)label; 2044 sum = 0; 2045 while (count--) { 2046 sum ^= *sp++; 2047 } 2048 2049 return (sum); 2050 } 2051 2052 /* 2053 * Handle ioctls to a disk slice. 2054 * 2055 * Return Values 2056 * 0 - Indicates that there are no errors in disk operations 2057 * ENOTSUP - Unknown disk label type or unsupported DKIO ioctl 2058 * EINVAL - Not enough room to copy the EFI label 2059 * 2060 */ 2061 static int 2062 vd_do_slice_ioctl(vd_t *vd, int cmd, void *ioctl_arg) 2063 { 2064 dk_efi_t *dk_ioc; 2065 int rval; 2066 2067 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 2068 2069 if (cmd == DKIOCFLUSHWRITECACHE) { 2070 if (vd->file) { 2071 return (VOP_FSYNC(vd->file_vnode, FSYNC, kcred, NULL)); 2072 } else { 2073 return (ldi_ioctl(vd->ldi_handle[0], cmd, 2074 (intptr_t)ioctl_arg, vd->open_flags | FKIOCTL, 2075 kcred, &rval)); 2076 } 2077 } 2078 2079 switch (vd->vdisk_label) { 2080 2081 /* ioctls for a single slice disk with a VTOC label */ 2082 case VD_DISK_LABEL_VTOC: 2083 2084 switch (cmd) { 2085 case DKIOCGGEOM: 2086 ASSERT(ioctl_arg != NULL); 2087 bcopy(&vd->dk_geom, ioctl_arg, sizeof (vd->dk_geom)); 2088 return (0); 2089 case DKIOCGVTOC: 2090 ASSERT(ioctl_arg != NULL); 2091 bcopy(&vd->vtoc, ioctl_arg, sizeof (vd->vtoc)); 2092 return (0); 2093 default: 2094 return (ENOTSUP); 2095 } 2096 2097 /* ioctls for a single slice disk with an EFI label */ 2098 case VD_DISK_LABEL_EFI: 2099 2100 switch (cmd) { 2101 case DKIOCGETEFI: 2102 ASSERT(ioctl_arg != NULL); 2103 dk_ioc = (dk_efi_t *)ioctl_arg; 2104 2105 /* 2106 * For a single slice disk with an EFI label, we define 2107 * a fake EFI label with the GPT at LBA 1 and one GPE 2108 * at LBA 2. So we return the GPT or the GPE depending 2109 * on which LBA is requested. 2110 */ 2111 if (dk_ioc->dki_lba == 1) { 2112 2113 /* return the EFI GPT */ 2114 if (dk_ioc->dki_length < sizeof (efi_gpt_t)) 2115 return (EINVAL); 2116 2117 bcopy(&vd->efi_gpt, dk_ioc->dki_data, 2118 sizeof (efi_gpt_t)); 2119 2120 /* also return the GPE if possible */ 2121 if (dk_ioc->dki_length >= sizeof (efi_gpt_t) + 2122 sizeof (efi_gpe_t)) { 2123 bcopy(&vd->efi_gpe, dk_ioc->dki_data + 2124 1, sizeof (efi_gpe_t)); 2125 } 2126 2127 } else if (dk_ioc->dki_lba == 2) { 2128 2129 /* return the EFI GPE */ 2130 if (dk_ioc->dki_length < sizeof (efi_gpe_t)) 2131 return (EINVAL); 2132 2133 bcopy(&vd->efi_gpe, dk_ioc->dki_data, 2134 sizeof (efi_gpe_t)); 2135 2136 } else { 2137 return (EINVAL); 2138 } 2139 2140 return (0); 2141 default: 2142 return (ENOTSUP); 2143 } 2144 2145 default: 2146 /* Unknown disk label type */ 2147 return (ENOTSUP); 2148 } 2149 } 2150 2151 static int 2152 vds_efi_alloc_and_read(vd_t *vd, efi_gpt_t **gpt, efi_gpe_t **gpe) 2153 { 2154 vd_efi_dev_t edev; 2155 int status; 2156 2157 VD_EFI_DEV_SET(edev, vd, (vd_efi_ioctl_func)vd_backend_ioctl); 2158 2159 status = vd_efi_alloc_and_read(&edev, gpt, gpe); 2160 2161 return (status); 2162 } 2163 2164 static void 2165 vds_efi_free(vd_t *vd, efi_gpt_t *gpt, efi_gpe_t *gpe) 2166 { 2167 vd_efi_dev_t edev; 2168 2169 VD_EFI_DEV_SET(edev, vd, (vd_efi_ioctl_func)vd_backend_ioctl); 2170 2171 vd_efi_free(&edev, gpt, gpe); 2172 } 2173 2174 static int 2175 vd_file_validate_efi(vd_t *vd) 2176 { 2177 efi_gpt_t *gpt; 2178 efi_gpe_t *gpe; 2179 int i, nparts, status; 2180 struct uuid efi_reserved = EFI_RESERVED; 2181 2182 if ((status = vds_efi_alloc_and_read(vd, &gpt, &gpe)) != 0) 2183 return (status); 2184 2185 bzero(&vd->vtoc, sizeof (struct vtoc)); 2186 bzero(&vd->dk_geom, sizeof (struct dk_geom)); 2187 bzero(vd->slices, sizeof (vd_slice_t) * VD_MAXPART); 2188 2189 vd->efi_reserved = -1; 2190 2191 nparts = gpt->efi_gpt_NumberOfPartitionEntries; 2192 2193 for (i = 0; i < nparts && i < VD_MAXPART; i++) { 2194 2195 if (gpe[i].efi_gpe_StartingLBA == 0 || 2196 gpe[i].efi_gpe_EndingLBA == 0) { 2197 continue; 2198 } 2199 2200 vd->slices[i].start = gpe[i].efi_gpe_StartingLBA; 2201 vd->slices[i].nblocks = gpe[i].efi_gpe_EndingLBA - 2202 gpe[i].efi_gpe_StartingLBA + 1; 2203 2204 if (bcmp(&gpe[i].efi_gpe_PartitionTypeGUID, &efi_reserved, 2205 sizeof (struct uuid)) == 0) 2206 vd->efi_reserved = i; 2207 2208 } 2209 2210 ASSERT(vd->vdisk_size != 0); 2211 vd->slices[VD_EFI_WD_SLICE].start = 0; 2212 vd->slices[VD_EFI_WD_SLICE].nblocks = vd->vdisk_size; 2213 2214 vds_efi_free(vd, gpt, gpe); 2215 2216 return (status); 2217 } 2218 2219 /* 2220 * Function: 2221 * vd_file_validate_geometry 2222 * 2223 * Description: 2224 * Read the label and validate the geometry of a disk image. The driver 2225 * label, vtoc and geometry information are updated according to the 2226 * label read from the disk image. 2227 * 2228 * If no valid label is found, the label is set to unknown and the 2229 * function returns EINVAL, but a default vtoc and geometry are provided 2230 * to the driver. If an EFI label is found, ENOTSUP is returned. 2231 * 2232 * Parameters: 2233 * vd - disk on which the operation is performed. 2234 * 2235 * Return Code: 2236 * 0 - success. 2237 * EIO - error reading the label from the disk image. 2238 * EINVAL - unknown disk label. 2239 * ENOTSUP - geometry not applicable (EFI label). 2240 */ 2241 static int 2242 vd_file_validate_geometry(vd_t *vd) 2243 { 2244 struct dk_label label; 2245 struct dk_geom *geom = &vd->dk_geom; 2246 struct vtoc *vtoc = &vd->vtoc; 2247 int i; 2248 int status = 0; 2249 2250 ASSERT(vd->file); 2251 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); 2252 2253 if (VD_FILE_LABEL_READ(vd, &label) < 0) 2254 return (EIO); 2255 2256 if (label.dkl_magic != DKL_MAGIC || 2257 label.dkl_cksum != vd_lbl2cksum(&label) || 2258 (vd_file_validate_sanity && label.dkl_vtoc.v_sanity != VTOC_SANE) || 2259 label.dkl_vtoc.v_nparts != V_NUMPAR) { 2260 2261 if (vd_file_validate_efi(vd) == 0) { 2262 vd->vdisk_label = VD_DISK_LABEL_EFI; 2263 return (ENOTSUP); 2264 } 2265 2266 vd->vdisk_label = VD_DISK_LABEL_UNK; 2267 vd_file_build_default_label(vd, &label); 2268 status = EINVAL; 2269 } else { 2270 vd->vdisk_label = VD_DISK_LABEL_VTOC; 2271 } 2272 2273 /* Update the driver geometry */ 2274 bzero(geom, sizeof (struct dk_geom)); 2275 2276 geom->dkg_ncyl = label.dkl_ncyl; 2277 geom->dkg_acyl = label.dkl_acyl; 2278 geom->dkg_nhead = label.dkl_nhead; 2279 geom->dkg_nsect = label.dkl_nsect; 2280 geom->dkg_intrlv = label.dkl_intrlv; 2281 geom->dkg_apc = label.dkl_apc; 2282 geom->dkg_rpm = label.dkl_rpm; 2283 geom->dkg_pcyl = label.dkl_pcyl; 2284 geom->dkg_write_reinstruct = label.dkl_write_reinstruct; 2285 geom->dkg_read_reinstruct = label.dkl_read_reinstruct; 2286 2287 /* Update the driver vtoc */ 2288 bzero(vtoc, sizeof (struct vtoc)); 2289 2290 vtoc->v_sanity = label.dkl_vtoc.v_sanity; 2291 vtoc->v_version = label.dkl_vtoc.v_version; 2292 vtoc->v_sectorsz = DEV_BSIZE; 2293 vtoc->v_nparts = label.dkl_vtoc.v_nparts; 2294 2295 for (i = 0; i < vtoc->v_nparts; i++) { 2296 vtoc->v_part[i].p_tag = 2297 label.dkl_vtoc.v_part[i].p_tag; 2298 vtoc->v_part[i].p_flag = 2299 label.dkl_vtoc.v_part[i].p_flag; 2300 vtoc->v_part[i].p_start = 2301 label.dkl_map[i].dkl_cylno * 2302 (label.dkl_nhead * label.dkl_nsect); 2303 vtoc->v_part[i].p_size = label.dkl_map[i].dkl_nblk; 2304 vtoc->timestamp[i] = 2305 label.dkl_vtoc.v_timestamp[i]; 2306 } 2307 /* 2308 * The bootinfo array can not be copied with bcopy() because 2309 * elements are of type long in vtoc (so 64-bit) and of type 2310 * int in dk_vtoc (so 32-bit). 2311 */ 2312 vtoc->v_bootinfo[0] = label.dkl_vtoc.v_bootinfo[0]; 2313 vtoc->v_bootinfo[1] = label.dkl_vtoc.v_bootinfo[1]; 2314 vtoc->v_bootinfo[2] = label.dkl_vtoc.v_bootinfo[2]; 2315 bcopy(label.dkl_asciilabel, vtoc->v_asciilabel, 2316 LEN_DKL_ASCII); 2317 bcopy(label.dkl_vtoc.v_volume, vtoc->v_volume, 2318 LEN_DKL_VVOL); 2319 2320 /* Update logical partitions */ 2321 bzero(vd->slices, sizeof (vd_slice_t) * VD_MAXPART); 2322 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { 2323 for (i = 0; i < vtoc->v_nparts; i++) { 2324 vd->slices[i].start = vtoc->v_part[i].p_start; 2325 vd->slices[i].nblocks = vtoc->v_part[i].p_size; 2326 } 2327 } 2328 2329 return (status); 2330 } 2331 2332 /* 2333 * Handle ioctls to a disk image (file-based). 2334 * 2335 * Return Values 2336 * 0 - Indicates that there are no errors 2337 * != 0 - Disk operation returned an error 2338 */ 2339 static int 2340 vd_do_file_ioctl(vd_t *vd, int cmd, void *ioctl_arg) 2341 { 2342 struct dk_label label; 2343 struct dk_geom *geom; 2344 struct vtoc *vtoc; 2345 dk_efi_t *efi; 2346 int i, rc; 2347 2348 ASSERT(vd->file); 2349 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); 2350 2351 switch (cmd) { 2352 2353 case DKIOCGGEOM: 2354 ASSERT(ioctl_arg != NULL); 2355 geom = (struct dk_geom *)ioctl_arg; 2356 2357 rc = vd_file_validate_geometry(vd); 2358 if (rc != 0 && rc != EINVAL) 2359 return (rc); 2360 bcopy(&vd->dk_geom, geom, sizeof (struct dk_geom)); 2361 return (0); 2362 2363 case DKIOCGVTOC: 2364 ASSERT(ioctl_arg != NULL); 2365 vtoc = (struct vtoc *)ioctl_arg; 2366 2367 rc = vd_file_validate_geometry(vd); 2368 if (rc != 0 && rc != EINVAL) 2369 return (rc); 2370 bcopy(&vd->vtoc, vtoc, sizeof (struct vtoc)); 2371 return (0); 2372 2373 case DKIOCSGEOM: 2374 ASSERT(ioctl_arg != NULL); 2375 geom = (struct dk_geom *)ioctl_arg; 2376 2377 if (geom->dkg_nhead == 0 || geom->dkg_nsect == 0) 2378 return (EINVAL); 2379 2380 /* 2381 * The current device geometry is not updated, just the driver 2382 * "notion" of it. The device geometry will be effectively 2383 * updated when a label is written to the device during a next 2384 * DKIOCSVTOC. 2385 */ 2386 bcopy(ioctl_arg, &vd->dk_geom, sizeof (vd->dk_geom)); 2387 return (0); 2388 2389 case DKIOCSVTOC: 2390 ASSERT(ioctl_arg != NULL); 2391 ASSERT(vd->dk_geom.dkg_nhead != 0 && 2392 vd->dk_geom.dkg_nsect != 0); 2393 vtoc = (struct vtoc *)ioctl_arg; 2394 2395 if (vtoc->v_sanity != VTOC_SANE || 2396 vtoc->v_sectorsz != DEV_BSIZE || 2397 vtoc->v_nparts != V_NUMPAR) 2398 return (EINVAL); 2399 2400 bzero(&label, sizeof (label)); 2401 label.dkl_ncyl = vd->dk_geom.dkg_ncyl; 2402 label.dkl_acyl = vd->dk_geom.dkg_acyl; 2403 label.dkl_pcyl = vd->dk_geom.dkg_pcyl; 2404 label.dkl_nhead = vd->dk_geom.dkg_nhead; 2405 label.dkl_nsect = vd->dk_geom.dkg_nsect; 2406 label.dkl_intrlv = vd->dk_geom.dkg_intrlv; 2407 label.dkl_apc = vd->dk_geom.dkg_apc; 2408 label.dkl_rpm = vd->dk_geom.dkg_rpm; 2409 label.dkl_write_reinstruct = vd->dk_geom.dkg_write_reinstruct; 2410 label.dkl_read_reinstruct = vd->dk_geom.dkg_read_reinstruct; 2411 2412 label.dkl_vtoc.v_nparts = V_NUMPAR; 2413 label.dkl_vtoc.v_sanity = VTOC_SANE; 2414 label.dkl_vtoc.v_version = vtoc->v_version; 2415 for (i = 0; i < V_NUMPAR; i++) { 2416 label.dkl_vtoc.v_timestamp[i] = 2417 vtoc->timestamp[i]; 2418 label.dkl_vtoc.v_part[i].p_tag = 2419 vtoc->v_part[i].p_tag; 2420 label.dkl_vtoc.v_part[i].p_flag = 2421 vtoc->v_part[i].p_flag; 2422 label.dkl_map[i].dkl_cylno = 2423 vtoc->v_part[i].p_start / 2424 (label.dkl_nhead * label.dkl_nsect); 2425 label.dkl_map[i].dkl_nblk = 2426 vtoc->v_part[i].p_size; 2427 } 2428 /* 2429 * The bootinfo array can not be copied with bcopy() because 2430 * elements are of type long in vtoc (so 64-bit) and of type 2431 * int in dk_vtoc (so 32-bit). 2432 */ 2433 label.dkl_vtoc.v_bootinfo[0] = vtoc->v_bootinfo[0]; 2434 label.dkl_vtoc.v_bootinfo[1] = vtoc->v_bootinfo[1]; 2435 label.dkl_vtoc.v_bootinfo[2] = vtoc->v_bootinfo[2]; 2436 bcopy(vtoc->v_asciilabel, label.dkl_asciilabel, 2437 LEN_DKL_ASCII); 2438 bcopy(vtoc->v_volume, label.dkl_vtoc.v_volume, 2439 LEN_DKL_VVOL); 2440 2441 /* re-compute checksum */ 2442 label.dkl_magic = DKL_MAGIC; 2443 label.dkl_cksum = vd_lbl2cksum(&label); 2444 2445 /* write label to the disk image */ 2446 if ((rc = vd_file_set_vtoc(vd, &label)) != 0) 2447 return (rc); 2448 2449 break; 2450 2451 case DKIOCFLUSHWRITECACHE: 2452 return (VOP_FSYNC(vd->file_vnode, FSYNC, kcred, NULL)); 2453 2454 case DKIOCGETEFI: 2455 ASSERT(ioctl_arg != NULL); 2456 efi = (dk_efi_t *)ioctl_arg; 2457 2458 if (vd_file_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, 2459 (caddr_t)efi->dki_data, efi->dki_lba, efi->dki_length) < 0) 2460 return (EIO); 2461 2462 return (0); 2463 2464 case DKIOCSETEFI: 2465 ASSERT(ioctl_arg != NULL); 2466 efi = (dk_efi_t *)ioctl_arg; 2467 2468 if (vd_file_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, 2469 (caddr_t)efi->dki_data, efi->dki_lba, efi->dki_length) < 0) 2470 return (EIO); 2471 2472 break; 2473 2474 2475 default: 2476 return (ENOTSUP); 2477 } 2478 2479 ASSERT(cmd == DKIOCSVTOC || cmd == DKIOCSETEFI); 2480 2481 /* label has changed, revalidate the geometry */ 2482 (void) vd_file_validate_geometry(vd); 2483 2484 /* 2485 * The disk geometry may have changed, so we need to write 2486 * the devid (if there is one) so that it is stored at the 2487 * right location. 2488 */ 2489 if (vd_file_write_devid(vd, vd->file_devid) != 0) { 2490 PR0("Fail to write devid"); 2491 } 2492 2493 return (0); 2494 } 2495 2496 static int 2497 vd_backend_ioctl(vd_t *vd, int cmd, caddr_t arg) 2498 { 2499 int rval = 0, status; 2500 2501 /* 2502 * Call the appropriate function to execute the ioctl depending 2503 * on the type of vdisk. 2504 */ 2505 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { 2506 2507 /* slice, file or volume exported as a single slice disk */ 2508 status = vd_do_slice_ioctl(vd, cmd, arg); 2509 2510 } else if (vd->file) { 2511 2512 /* file or volume exported as a full disk */ 2513 status = vd_do_file_ioctl(vd, cmd, arg); 2514 2515 } else { 2516 2517 /* disk device exported as a full disk */ 2518 status = ldi_ioctl(vd->ldi_handle[0], cmd, (intptr_t)arg, 2519 vd->open_flags | FKIOCTL, kcred, &rval); 2520 } 2521 2522 #ifdef DEBUG 2523 if (rval != 0) { 2524 PR0("ioctl %x set rval = %d, which is not being returned" 2525 " to caller", cmd, rval); 2526 } 2527 #endif /* DEBUG */ 2528 2529 return (status); 2530 } 2531 2532 /* 2533 * Description: 2534 * This is the function that processes the ioctl requests (farming it 2535 * out to functions that handle slices, files or whole disks) 2536 * 2537 * Return Values 2538 * 0 - ioctl operation completed successfully 2539 * != 0 - The LDC error value encountered 2540 * (propagated back up the call stack as a task error) 2541 * 2542 * Side Effect 2543 * sets request->status to the return value of the ioctl function. 2544 */ 2545 static int 2546 vd_do_ioctl(vd_t *vd, vd_dring_payload_t *request, void* buf, vd_ioctl_t *ioctl) 2547 { 2548 int status = 0; 2549 size_t nbytes = request->nbytes; /* modifiable copy */ 2550 2551 2552 ASSERT(request->slice < vd->nslices); 2553 PR0("Performing %s", ioctl->operation_name); 2554 2555 /* Get data from client and convert, if necessary */ 2556 if (ioctl->copyin != NULL) { 2557 ASSERT(nbytes != 0 && buf != NULL); 2558 PR1("Getting \"arg\" data from client"); 2559 if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes, 2560 request->cookie, request->ncookies, 2561 LDC_COPY_IN)) != 0) { 2562 PR0("ldc_mem_copy() returned errno %d " 2563 "copying from client", status); 2564 return (status); 2565 } 2566 2567 /* Convert client's data, if necessary */ 2568 if (ioctl->copyin == VD_IDENTITY_IN) { 2569 /* use client buffer */ 2570 ioctl->arg = buf; 2571 } else { 2572 /* convert client vdisk operation data to ioctl data */ 2573 status = (ioctl->copyin)(buf, nbytes, 2574 (void *)ioctl->arg); 2575 if (status != 0) { 2576 request->status = status; 2577 return (0); 2578 } 2579 } 2580 } 2581 2582 if (ioctl->operation == VD_OP_SCSICMD) { 2583 struct uscsi_cmd *uscsi = (struct uscsi_cmd *)ioctl->arg; 2584 2585 /* check write permission */ 2586 if (!(vd->open_flags & FWRITE) && 2587 !(uscsi->uscsi_flags & USCSI_READ)) { 2588 PR0("uscsi fails because backend is opened read-only"); 2589 request->status = EROFS; 2590 return (0); 2591 } 2592 } 2593 2594 /* 2595 * Send the ioctl to the disk backend. 2596 */ 2597 request->status = vd_backend_ioctl(vd, ioctl->cmd, ioctl->arg); 2598 2599 if (request->status != 0) { 2600 PR0("ioctl(%s) = errno %d", ioctl->cmd_name, request->status); 2601 if (ioctl->operation == VD_OP_SCSICMD && 2602 ((struct uscsi_cmd *)ioctl->arg)->uscsi_status != 0) 2603 /* 2604 * USCSICMD has reported an error and the uscsi_status 2605 * field is not zero. This means that the SCSI command 2606 * has completed but it has an error. So we should 2607 * mark the VD operation has succesfully completed 2608 * and clients can check the SCSI status field for 2609 * SCSI errors. 2610 */ 2611 request->status = 0; 2612 else 2613 return (0); 2614 } 2615 2616 /* Convert data and send to client, if necessary */ 2617 if (ioctl->copyout != NULL) { 2618 ASSERT(nbytes != 0 && buf != NULL); 2619 PR1("Sending \"arg\" data to client"); 2620 2621 /* Convert ioctl data to vdisk operation data, if necessary */ 2622 if (ioctl->copyout != VD_IDENTITY_OUT) 2623 (ioctl->copyout)((void *)ioctl->arg, buf); 2624 2625 if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes, 2626 request->cookie, request->ncookies, 2627 LDC_COPY_OUT)) != 0) { 2628 PR0("ldc_mem_copy() returned errno %d " 2629 "copying to client", status); 2630 return (status); 2631 } 2632 } 2633 2634 return (status); 2635 } 2636 2637 #define RNDSIZE(expr) P2ROUNDUP(sizeof (expr), sizeof (uint64_t)) 2638 2639 /* 2640 * Description: 2641 * This generic function is called by the task queue to complete 2642 * the processing of the tasks. The specific completion function 2643 * is passed in as a field in the task pointer. 2644 * 2645 * Parameters: 2646 * arg - opaque pointer to structure containing task to be completed 2647 * 2648 * Return Values 2649 * None 2650 */ 2651 static void 2652 vd_complete(void *arg) 2653 { 2654 vd_task_t *task = (vd_task_t *)arg; 2655 2656 ASSERT(task != NULL); 2657 ASSERT(task->status == EINPROGRESS); 2658 ASSERT(task->completef != NULL); 2659 2660 task->status = task->completef(task); 2661 if (task->status) 2662 PR0("%s: Error %d completing task", __func__, task->status); 2663 2664 /* Now notify the vDisk client */ 2665 vd_complete_notify(task); 2666 } 2667 2668 static int 2669 vd_ioctl(vd_task_t *task) 2670 { 2671 int i, status; 2672 void *buf = NULL; 2673 struct dk_geom dk_geom = {0}; 2674 struct vtoc vtoc = {0}; 2675 struct dk_efi dk_efi = {0}; 2676 struct uscsi_cmd uscsi = {0}; 2677 vd_t *vd = task->vd; 2678 vd_dring_payload_t *request = task->request; 2679 vd_ioctl_t ioctl[] = { 2680 /* Command (no-copy) operations */ 2681 {VD_OP_FLUSH, STRINGIZE(VD_OP_FLUSH), 0, 2682 DKIOCFLUSHWRITECACHE, STRINGIZE(DKIOCFLUSHWRITECACHE), 2683 NULL, NULL, NULL, B_TRUE}, 2684 2685 /* "Get" (copy-out) operations */ 2686 {VD_OP_GET_WCE, STRINGIZE(VD_OP_GET_WCE), RNDSIZE(int), 2687 DKIOCGETWCE, STRINGIZE(DKIOCGETWCE), 2688 NULL, VD_IDENTITY_IN, VD_IDENTITY_OUT, B_FALSE}, 2689 {VD_OP_GET_DISKGEOM, STRINGIZE(VD_OP_GET_DISKGEOM), 2690 RNDSIZE(vd_geom_t), 2691 DKIOCGGEOM, STRINGIZE(DKIOCGGEOM), 2692 &dk_geom, NULL, dk_geom2vd_geom, B_FALSE}, 2693 {VD_OP_GET_VTOC, STRINGIZE(VD_OP_GET_VTOC), RNDSIZE(vd_vtoc_t), 2694 DKIOCGVTOC, STRINGIZE(DKIOCGVTOC), 2695 &vtoc, NULL, vtoc2vd_vtoc, B_FALSE}, 2696 {VD_OP_GET_EFI, STRINGIZE(VD_OP_GET_EFI), RNDSIZE(vd_efi_t), 2697 DKIOCGETEFI, STRINGIZE(DKIOCGETEFI), 2698 &dk_efi, vd_get_efi_in, vd_get_efi_out, B_FALSE}, 2699 2700 /* "Set" (copy-in) operations */ 2701 {VD_OP_SET_WCE, STRINGIZE(VD_OP_SET_WCE), RNDSIZE(int), 2702 DKIOCSETWCE, STRINGIZE(DKIOCSETWCE), 2703 NULL, VD_IDENTITY_IN, VD_IDENTITY_OUT, B_TRUE}, 2704 {VD_OP_SET_DISKGEOM, STRINGIZE(VD_OP_SET_DISKGEOM), 2705 RNDSIZE(vd_geom_t), 2706 DKIOCSGEOM, STRINGIZE(DKIOCSGEOM), 2707 &dk_geom, vd_geom2dk_geom, NULL, B_TRUE}, 2708 {VD_OP_SET_VTOC, STRINGIZE(VD_OP_SET_VTOC), RNDSIZE(vd_vtoc_t), 2709 DKIOCSVTOC, STRINGIZE(DKIOCSVTOC), 2710 &vtoc, vd_vtoc2vtoc, NULL, B_TRUE}, 2711 {VD_OP_SET_EFI, STRINGIZE(VD_OP_SET_EFI), RNDSIZE(vd_efi_t), 2712 DKIOCSETEFI, STRINGIZE(DKIOCSETEFI), 2713 &dk_efi, vd_set_efi_in, vd_set_efi_out, B_TRUE}, 2714 2715 {VD_OP_SCSICMD, STRINGIZE(VD_OP_SCSICMD), RNDSIZE(vd_scsi_t), 2716 USCSICMD, STRINGIZE(USCSICMD), 2717 &uscsi, vd_scsicmd_in, vd_scsicmd_out, B_FALSE}, 2718 }; 2719 size_t nioctls = (sizeof (ioctl))/(sizeof (ioctl[0])); 2720 2721 2722 ASSERT(vd != NULL); 2723 ASSERT(request != NULL); 2724 ASSERT(request->slice < vd->nslices); 2725 2726 /* 2727 * Determine ioctl corresponding to caller's "operation" and 2728 * validate caller's "nbytes" 2729 */ 2730 for (i = 0; i < nioctls; i++) { 2731 if (request->operation == ioctl[i].operation) { 2732 /* LDC memory operations require 8-byte multiples */ 2733 ASSERT(ioctl[i].nbytes % sizeof (uint64_t) == 0); 2734 2735 if (request->operation == VD_OP_GET_EFI || 2736 request->operation == VD_OP_SET_EFI || 2737 request->operation == VD_OP_SCSICMD) { 2738 if (request->nbytes >= ioctl[i].nbytes) 2739 break; 2740 PR0("%s: Expected at least nbytes = %lu, " 2741 "got %lu", ioctl[i].operation_name, 2742 ioctl[i].nbytes, request->nbytes); 2743 return (EINVAL); 2744 } 2745 2746 if (request->nbytes != ioctl[i].nbytes) { 2747 PR0("%s: Expected nbytes = %lu, got %lu", 2748 ioctl[i].operation_name, ioctl[i].nbytes, 2749 request->nbytes); 2750 return (EINVAL); 2751 } 2752 2753 break; 2754 } 2755 } 2756 ASSERT(i < nioctls); /* because "operation" already validated */ 2757 2758 if (!(vd->open_flags & FWRITE) && ioctl[i].write) { 2759 PR0("%s fails because backend is opened read-only", 2760 ioctl[i].operation_name); 2761 request->status = EROFS; 2762 return (0); 2763 } 2764 2765 if (request->nbytes) 2766 buf = kmem_zalloc(request->nbytes, KM_SLEEP); 2767 status = vd_do_ioctl(vd, request, buf, &ioctl[i]); 2768 if (request->nbytes) 2769 kmem_free(buf, request->nbytes); 2770 2771 return (status); 2772 } 2773 2774 static int 2775 vd_get_devid(vd_task_t *task) 2776 { 2777 vd_t *vd = task->vd; 2778 vd_dring_payload_t *request = task->request; 2779 vd_devid_t *vd_devid; 2780 impl_devid_t *devid; 2781 int status, bufid_len, devid_len, len, sz; 2782 int bufbytes; 2783 2784 PR1("Get Device ID, nbytes=%ld", request->nbytes); 2785 2786 if (vd->file) { 2787 if (vd->file_devid == NULL) { 2788 PR2("No Device ID"); 2789 request->status = ENOENT; 2790 return (0); 2791 } else { 2792 sz = ddi_devid_sizeof(vd->file_devid); 2793 devid = kmem_alloc(sz, KM_SLEEP); 2794 bcopy(vd->file_devid, devid, sz); 2795 } 2796 } else { 2797 if (ddi_lyr_get_devid(vd->dev[request->slice], 2798 (ddi_devid_t *)&devid) != DDI_SUCCESS) { 2799 PR2("No Device ID"); 2800 request->status = ENOENT; 2801 return (0); 2802 } 2803 } 2804 2805 bufid_len = request->nbytes - sizeof (vd_devid_t) + 1; 2806 devid_len = DEVID_GETLEN(devid); 2807 2808 /* 2809 * Save the buffer size here for use in deallocation. 2810 * The actual number of bytes copied is returned in 2811 * the 'nbytes' field of the request structure. 2812 */ 2813 bufbytes = request->nbytes; 2814 2815 vd_devid = kmem_zalloc(bufbytes, KM_SLEEP); 2816 vd_devid->length = devid_len; 2817 vd_devid->type = DEVID_GETTYPE(devid); 2818 2819 len = (devid_len > bufid_len)? bufid_len : devid_len; 2820 2821 bcopy(devid->did_id, vd_devid->id, len); 2822 2823 request->status = 0; 2824 2825 /* LDC memory operations require 8-byte multiples */ 2826 ASSERT(request->nbytes % sizeof (uint64_t) == 0); 2827 2828 if ((status = ldc_mem_copy(vd->ldc_handle, (caddr_t)vd_devid, 0, 2829 &request->nbytes, request->cookie, request->ncookies, 2830 LDC_COPY_OUT)) != 0) { 2831 PR0("ldc_mem_copy() returned errno %d copying to client", 2832 status); 2833 } 2834 PR1("post mem_copy: nbytes=%ld", request->nbytes); 2835 2836 kmem_free(vd_devid, bufbytes); 2837 ddi_devid_free((ddi_devid_t)devid); 2838 2839 return (status); 2840 } 2841 2842 static int 2843 vd_scsi_reset(vd_t *vd) 2844 { 2845 int rval, status; 2846 struct uscsi_cmd uscsi = { 0 }; 2847 2848 uscsi.uscsi_flags = vd_scsi_debug | USCSI_RESET; 2849 uscsi.uscsi_timeout = vd_scsi_rdwr_timeout; 2850 2851 status = ldi_ioctl(vd->ldi_handle[0], USCSICMD, (intptr_t)&uscsi, 2852 (vd->open_flags | FKIOCTL), kcred, &rval); 2853 2854 return (status); 2855 } 2856 2857 static int 2858 vd_reset(vd_task_t *task) 2859 { 2860 vd_t *vd = task->vd; 2861 vd_dring_payload_t *request = task->request; 2862 2863 ASSERT(request->operation == VD_OP_RESET); 2864 ASSERT(vd->scsi); 2865 2866 PR0("Performing VD_OP_RESET"); 2867 2868 if (request->nbytes != 0) { 2869 PR0("VD_OP_RESET: Expected nbytes = 0, got %lu", 2870 request->nbytes); 2871 return (EINVAL); 2872 } 2873 2874 request->status = vd_scsi_reset(vd); 2875 2876 return (0); 2877 } 2878 2879 static int 2880 vd_get_capacity(vd_task_t *task) 2881 { 2882 int rv; 2883 size_t nbytes; 2884 vd_t *vd = task->vd; 2885 vd_dring_payload_t *request = task->request; 2886 vd_capacity_t vd_cap = { 0 }; 2887 2888 ASSERT(request->operation == VD_OP_GET_CAPACITY); 2889 ASSERT(vd->scsi); 2890 2891 PR0("Performing VD_OP_GET_CAPACITY"); 2892 2893 nbytes = request->nbytes; 2894 2895 if (nbytes != RNDSIZE(vd_capacity_t)) { 2896 PR0("VD_OP_GET_CAPACITY: Expected nbytes = %lu, got %lu", 2897 RNDSIZE(vd_capacity_t), nbytes); 2898 return (EINVAL); 2899 } 2900 2901 if (vd->vdisk_size == VD_SIZE_UNKNOWN) { 2902 if (vd_setup_mediainfo(vd) != 0) 2903 ASSERT(vd->vdisk_size == VD_SIZE_UNKNOWN); 2904 } 2905 2906 ASSERT(vd->vdisk_size != 0); 2907 2908 request->status = 0; 2909 2910 vd_cap.vdisk_block_size = vd->vdisk_block_size; 2911 vd_cap.vdisk_size = vd->vdisk_size; 2912 2913 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&vd_cap, 0, &nbytes, 2914 request->cookie, request->ncookies, LDC_COPY_OUT)) != 0) { 2915 PR0("ldc_mem_copy() returned errno %d copying to client", rv); 2916 return (rv); 2917 } 2918 2919 return (0); 2920 } 2921 2922 static int 2923 vd_get_access(vd_task_t *task) 2924 { 2925 uint64_t access; 2926 int rv, rval = 0; 2927 size_t nbytes; 2928 vd_t *vd = task->vd; 2929 vd_dring_payload_t *request = task->request; 2930 2931 ASSERT(request->operation == VD_OP_GET_ACCESS); 2932 ASSERT(vd->scsi); 2933 2934 PR0("Performing VD_OP_GET_ACCESS"); 2935 2936 nbytes = request->nbytes; 2937 2938 if (nbytes != sizeof (uint64_t)) { 2939 PR0("VD_OP_GET_ACCESS: Expected nbytes = %lu, got %lu", 2940 sizeof (uint64_t), nbytes); 2941 return (EINVAL); 2942 } 2943 2944 request->status = ldi_ioctl(vd->ldi_handle[request->slice], MHIOCSTATUS, 2945 NULL, (vd->open_flags | FKIOCTL), kcred, &rval); 2946 2947 if (request->status != 0) 2948 return (0); 2949 2950 access = (rval == 0)? VD_ACCESS_ALLOWED : VD_ACCESS_DENIED; 2951 2952 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&access, 0, &nbytes, 2953 request->cookie, request->ncookies, LDC_COPY_OUT)) != 0) { 2954 PR0("ldc_mem_copy() returned errno %d copying to client", rv); 2955 return (rv); 2956 } 2957 2958 return (0); 2959 } 2960 2961 static int 2962 vd_set_access(vd_task_t *task) 2963 { 2964 uint64_t flags; 2965 int rv, rval; 2966 size_t nbytes; 2967 vd_t *vd = task->vd; 2968 vd_dring_payload_t *request = task->request; 2969 2970 ASSERT(request->operation == VD_OP_SET_ACCESS); 2971 ASSERT(vd->scsi); 2972 2973 nbytes = request->nbytes; 2974 2975 if (nbytes != sizeof (uint64_t)) { 2976 PR0("VD_OP_SET_ACCESS: Expected nbytes = %lu, got %lu", 2977 sizeof (uint64_t), nbytes); 2978 return (EINVAL); 2979 } 2980 2981 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&flags, 0, &nbytes, 2982 request->cookie, request->ncookies, LDC_COPY_IN)) != 0) { 2983 PR0("ldc_mem_copy() returned errno %d copying from client", rv); 2984 return (rv); 2985 } 2986 2987 if (flags == VD_ACCESS_SET_CLEAR) { 2988 PR0("Performing VD_OP_SET_ACCESS (CLEAR)"); 2989 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 2990 MHIOCRELEASE, NULL, (vd->open_flags | FKIOCTL), kcred, 2991 &rval); 2992 if (request->status == 0) 2993 vd->ownership = B_FALSE; 2994 return (0); 2995 } 2996 2997 /* 2998 * As per the VIO spec, the PREEMPT and PRESERVE flags are only valid 2999 * when the EXCLUSIVE flag is set. 3000 */ 3001 if (!(flags & VD_ACCESS_SET_EXCLUSIVE)) { 3002 PR0("Invalid VD_OP_SET_ACCESS flags: 0x%lx", flags); 3003 request->status = EINVAL; 3004 return (0); 3005 } 3006 3007 switch (flags & (VD_ACCESS_SET_PREEMPT | VD_ACCESS_SET_PRESERVE)) { 3008 3009 case VD_ACCESS_SET_PREEMPT | VD_ACCESS_SET_PRESERVE: 3010 /* 3011 * Flags EXCLUSIVE and PREEMPT and PRESERVE. We have to 3012 * acquire exclusive access rights, preserve them and we 3013 * can use preemption. So we can use the MHIOCTKNOWN ioctl. 3014 */ 3015 PR0("Performing VD_OP_SET_ACCESS (EXCLUSIVE|PREEMPT|PRESERVE)"); 3016 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3017 MHIOCTKOWN, NULL, (vd->open_flags | FKIOCTL), kcred, &rval); 3018 break; 3019 3020 case VD_ACCESS_SET_PRESERVE: 3021 /* 3022 * Flags EXCLUSIVE and PRESERVE. We have to acquire exclusive 3023 * access rights and preserve them, but not preempt any other 3024 * host. So we need to use the MHIOCTKOWN ioctl to enable the 3025 * "preserve" feature but we can not called it directly 3026 * because it uses preemption. So before that, we use the 3027 * MHIOCQRESERVE ioctl to ensure we can get exclusive rights 3028 * without preempting anyone. 3029 */ 3030 PR0("Performing VD_OP_SET_ACCESS (EXCLUSIVE|PRESERVE)"); 3031 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3032 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, 3033 &rval); 3034 if (request->status != 0) 3035 break; 3036 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3037 MHIOCTKOWN, NULL, (vd->open_flags | FKIOCTL), kcred, &rval); 3038 break; 3039 3040 case VD_ACCESS_SET_PREEMPT: 3041 /* 3042 * Flags EXCLUSIVE and PREEMPT. We have to acquire exclusive 3043 * access rights and we can use preemption. So we try to do 3044 * a SCSI reservation, if it fails we reset the disk to clear 3045 * any reservation and we try to reserve again. 3046 */ 3047 PR0("Performing VD_OP_SET_ACCESS (EXCLUSIVE|PREEMPT)"); 3048 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3049 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, 3050 &rval); 3051 if (request->status == 0) 3052 break; 3053 3054 /* reset the disk */ 3055 (void) vd_scsi_reset(vd); 3056 3057 /* try again even if the reset has failed */ 3058 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3059 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, 3060 &rval); 3061 break; 3062 3063 case 0: 3064 /* Flag EXCLUSIVE only. Just issue a SCSI reservation */ 3065 PR0("Performing VD_OP_SET_ACCESS (EXCLUSIVE)"); 3066 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3067 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, 3068 &rval); 3069 break; 3070 } 3071 3072 if (request->status == 0) 3073 vd->ownership = B_TRUE; 3074 else 3075 PR0("VD_OP_SET_ACCESS: error %d", request->status); 3076 3077 return (0); 3078 } 3079 3080 static void 3081 vd_reset_access(vd_t *vd) 3082 { 3083 int status, rval; 3084 3085 if (vd->file || !vd->ownership) 3086 return; 3087 3088 PR0("Releasing disk ownership"); 3089 status = ldi_ioctl(vd->ldi_handle[0], MHIOCRELEASE, NULL, 3090 (vd->open_flags | FKIOCTL), kcred, &rval); 3091 3092 /* 3093 * An EACCES failure means that there is a reservation conflict, 3094 * so we are not the owner of the disk anymore. 3095 */ 3096 if (status == 0 || status == EACCES) { 3097 vd->ownership = B_FALSE; 3098 return; 3099 } 3100 3101 PR0("Fail to release ownership, error %d", status); 3102 3103 /* 3104 * We have failed to release the ownership, try to reset the disk 3105 * to release reservations. 3106 */ 3107 PR0("Resetting disk"); 3108 status = vd_scsi_reset(vd); 3109 3110 if (status != 0) 3111 PR0("Fail to reset disk, error %d", status); 3112 3113 /* whatever the result of the reset is, we try the release again */ 3114 status = ldi_ioctl(vd->ldi_handle[0], MHIOCRELEASE, NULL, 3115 (vd->open_flags | FKIOCTL), kcred, &rval); 3116 3117 if (status == 0 || status == EACCES) { 3118 vd->ownership = B_FALSE; 3119 return; 3120 } 3121 3122 PR0("Fail to release ownership, error %d", status); 3123 3124 /* 3125 * At this point we have done our best to try to reset the 3126 * access rights to the disk and we don't know if we still 3127 * own a reservation and if any mechanism to preserve the 3128 * ownership is still in place. The ultimate solution would 3129 * be to reset the system but this is usually not what we 3130 * want to happen. 3131 */ 3132 3133 if (vd_reset_access_failure == A_REBOOT) { 3134 cmn_err(CE_WARN, VD_RESET_ACCESS_FAILURE_MSG 3135 ", rebooting the system", vd->device_path); 3136 (void) uadmin(A_SHUTDOWN, AD_BOOT, NULL); 3137 } else if (vd_reset_access_failure == A_DUMP) { 3138 panic(VD_RESET_ACCESS_FAILURE_MSG, vd->device_path); 3139 } 3140 3141 cmn_err(CE_WARN, VD_RESET_ACCESS_FAILURE_MSG, vd->device_path); 3142 } 3143 3144 /* 3145 * Define the supported operations once the functions for performing them have 3146 * been defined 3147 */ 3148 static const vds_operation_t vds_operation[] = { 3149 #define X(_s) #_s, _s 3150 {X(VD_OP_BREAD), vd_start_bio, vd_complete_bio}, 3151 {X(VD_OP_BWRITE), vd_start_bio, vd_complete_bio}, 3152 {X(VD_OP_FLUSH), vd_ioctl, NULL}, 3153 {X(VD_OP_GET_WCE), vd_ioctl, NULL}, 3154 {X(VD_OP_SET_WCE), vd_ioctl, NULL}, 3155 {X(VD_OP_GET_VTOC), vd_ioctl, NULL}, 3156 {X(VD_OP_SET_VTOC), vd_ioctl, NULL}, 3157 {X(VD_OP_GET_DISKGEOM), vd_ioctl, NULL}, 3158 {X(VD_OP_SET_DISKGEOM), vd_ioctl, NULL}, 3159 {X(VD_OP_GET_EFI), vd_ioctl, NULL}, 3160 {X(VD_OP_SET_EFI), vd_ioctl, NULL}, 3161 {X(VD_OP_GET_DEVID), vd_get_devid, NULL}, 3162 {X(VD_OP_SCSICMD), vd_ioctl, NULL}, 3163 {X(VD_OP_RESET), vd_reset, NULL}, 3164 {X(VD_OP_GET_CAPACITY), vd_get_capacity, NULL}, 3165 {X(VD_OP_SET_ACCESS), vd_set_access, NULL}, 3166 {X(VD_OP_GET_ACCESS), vd_get_access, NULL}, 3167 #undef X 3168 }; 3169 3170 static const size_t vds_noperations = 3171 (sizeof (vds_operation))/(sizeof (vds_operation[0])); 3172 3173 /* 3174 * Process a task specifying a client I/O request 3175 * 3176 * Parameters: 3177 * task - structure containing the request sent from client 3178 * 3179 * Return Value 3180 * 0 - success 3181 * ENOTSUP - Unknown/Unsupported VD_OP_XXX operation 3182 * EINVAL - Invalid disk slice 3183 * != 0 - some other non-zero return value from start function 3184 */ 3185 static int 3186 vd_do_process_task(vd_task_t *task) 3187 { 3188 int i; 3189 vd_t *vd = task->vd; 3190 vd_dring_payload_t *request = task->request; 3191 3192 ASSERT(vd != NULL); 3193 ASSERT(request != NULL); 3194 3195 /* Find the requested operation */ 3196 for (i = 0; i < vds_noperations; i++) { 3197 if (request->operation == vds_operation[i].operation) { 3198 /* all operations should have a start func */ 3199 ASSERT(vds_operation[i].start != NULL); 3200 3201 task->completef = vds_operation[i].complete; 3202 break; 3203 } 3204 } 3205 3206 /* 3207 * We need to check that the requested operation is permitted 3208 * for the particular client that sent it or that the loop above 3209 * did not complete without finding the operation type (indicating 3210 * that the requested operation is unknown/unimplemented) 3211 */ 3212 if ((VD_OP_SUPPORTED(vd->operations, request->operation) == B_FALSE) || 3213 (i == vds_noperations)) { 3214 PR0("Unsupported operation %u", request->operation); 3215 request->status = ENOTSUP; 3216 return (0); 3217 } 3218 3219 /* Range-check slice */ 3220 if (request->slice >= vd->nslices && 3221 (vd->vdisk_type != VD_DISK_TYPE_DISK || 3222 request->slice != VD_SLICE_NONE)) { 3223 PR0("Invalid \"slice\" %u (max %u) for virtual disk", 3224 request->slice, (vd->nslices - 1)); 3225 return (EINVAL); 3226 } 3227 3228 /* 3229 * Call the function pointer that starts the operation. 3230 */ 3231 return (vds_operation[i].start(task)); 3232 } 3233 3234 /* 3235 * Description: 3236 * This function is called by both the in-band and descriptor ring 3237 * message processing functions paths to actually execute the task 3238 * requested by the vDisk client. It in turn calls its worker 3239 * function, vd_do_process_task(), to carry our the request. 3240 * 3241 * Any transport errors (e.g. LDC errors, vDisk protocol errors) are 3242 * saved in the 'status' field of the task and are propagated back 3243 * up the call stack to trigger a NACK 3244 * 3245 * Any request errors (e.g. ENOTTY from an ioctl) are saved in 3246 * the 'status' field of the request and result in an ACK being sent 3247 * by the completion handler. 3248 * 3249 * Parameters: 3250 * task - structure containing the request sent from client 3251 * 3252 * Return Value 3253 * 0 - successful synchronous request. 3254 * != 0 - transport error (e.g. LDC errors, vDisk protocol) 3255 * EINPROGRESS - task will be finished in a completion handler 3256 */ 3257 static int 3258 vd_process_task(vd_task_t *task) 3259 { 3260 vd_t *vd = task->vd; 3261 int status; 3262 3263 DTRACE_PROBE1(task__start, vd_task_t *, task); 3264 3265 task->status = vd_do_process_task(task); 3266 3267 /* 3268 * If the task processing function returned EINPROGRESS indicating 3269 * that the task needs completing then schedule a taskq entry to 3270 * finish it now. 3271 * 3272 * Otherwise the task processing function returned either zero 3273 * indicating that the task was finished in the start function (and we 3274 * don't need to wait in a completion function) or the start function 3275 * returned an error - in both cases all that needs to happen is the 3276 * notification to the vDisk client higher up the call stack. 3277 * If the task was using a Descriptor Ring, we need to mark it as done 3278 * at this stage. 3279 */ 3280 if (task->status == EINPROGRESS) { 3281 /* Queue a task to complete the operation */ 3282 (void) ddi_taskq_dispatch(vd->completionq, vd_complete, 3283 task, DDI_SLEEP); 3284 3285 } else if (!vd->reset_state && (vd->xfer_mode == VIO_DRING_MODE_V1_0)) { 3286 /* Update the dring element if it's a dring client */ 3287 status = vd_mark_elem_done(vd, task->index, 3288 task->request->status, task->request->nbytes); 3289 if (status == ECONNRESET) 3290 vd_mark_in_reset(vd); 3291 } 3292 3293 return (task->status); 3294 } 3295 3296 /* 3297 * Return true if the "type", "subtype", and "env" fields of the "tag" first 3298 * argument match the corresponding remaining arguments; otherwise, return false 3299 */ 3300 boolean_t 3301 vd_msgtype(vio_msg_tag_t *tag, int type, int subtype, int env) 3302 { 3303 return ((tag->vio_msgtype == type) && 3304 (tag->vio_subtype == subtype) && 3305 (tag->vio_subtype_env == env)) ? B_TRUE : B_FALSE; 3306 } 3307 3308 /* 3309 * Check whether the major/minor version specified in "ver_msg" is supported 3310 * by this server. 3311 */ 3312 static boolean_t 3313 vds_supported_version(vio_ver_msg_t *ver_msg) 3314 { 3315 for (int i = 0; i < vds_num_versions; i++) { 3316 ASSERT(vds_version[i].major > 0); 3317 ASSERT((i == 0) || 3318 (vds_version[i].major < vds_version[i-1].major)); 3319 3320 /* 3321 * If the major versions match, adjust the minor version, if 3322 * necessary, down to the highest value supported by this 3323 * server and return true so this message will get "ack"ed; 3324 * the client should also support all minor versions lower 3325 * than the value it sent 3326 */ 3327 if (ver_msg->ver_major == vds_version[i].major) { 3328 if (ver_msg->ver_minor > vds_version[i].minor) { 3329 PR0("Adjusting minor version from %u to %u", 3330 ver_msg->ver_minor, vds_version[i].minor); 3331 ver_msg->ver_minor = vds_version[i].minor; 3332 } 3333 return (B_TRUE); 3334 } 3335 3336 /* 3337 * If the message contains a higher major version number, set 3338 * the message's major/minor versions to the current values 3339 * and return false, so this message will get "nack"ed with 3340 * these values, and the client will potentially try again 3341 * with the same or a lower version 3342 */ 3343 if (ver_msg->ver_major > vds_version[i].major) { 3344 ver_msg->ver_major = vds_version[i].major; 3345 ver_msg->ver_minor = vds_version[i].minor; 3346 return (B_FALSE); 3347 } 3348 3349 /* 3350 * Otherwise, the message's major version is less than the 3351 * current major version, so continue the loop to the next 3352 * (lower) supported version 3353 */ 3354 } 3355 3356 /* 3357 * No common version was found; "ground" the version pair in the 3358 * message to terminate negotiation 3359 */ 3360 ver_msg->ver_major = 0; 3361 ver_msg->ver_minor = 0; 3362 return (B_FALSE); 3363 } 3364 3365 /* 3366 * Process a version message from a client. vds expects to receive version 3367 * messages from clients seeking service, but never issues version messages 3368 * itself; therefore, vds can ACK or NACK client version messages, but does 3369 * not expect to receive version-message ACKs or NACKs (and will treat such 3370 * messages as invalid). 3371 */ 3372 static int 3373 vd_process_ver_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 3374 { 3375 vio_ver_msg_t *ver_msg = (vio_ver_msg_t *)msg; 3376 3377 3378 ASSERT(msglen >= sizeof (msg->tag)); 3379 3380 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 3381 VIO_VER_INFO)) { 3382 return (ENOMSG); /* not a version message */ 3383 } 3384 3385 if (msglen != sizeof (*ver_msg)) { 3386 PR0("Expected %lu-byte version message; " 3387 "received %lu bytes", sizeof (*ver_msg), msglen); 3388 return (EBADMSG); 3389 } 3390 3391 if (ver_msg->dev_class != VDEV_DISK) { 3392 PR0("Expected device class %u (disk); received %u", 3393 VDEV_DISK, ver_msg->dev_class); 3394 return (EBADMSG); 3395 } 3396 3397 /* 3398 * We're talking to the expected kind of client; set our device class 3399 * for "ack/nack" back to the client 3400 */ 3401 ver_msg->dev_class = VDEV_DISK_SERVER; 3402 3403 /* 3404 * Check whether the (valid) version message specifies a version 3405 * supported by this server. If the version is not supported, return 3406 * EBADMSG so the message will get "nack"ed; vds_supported_version() 3407 * will have updated the message with a supported version for the 3408 * client to consider 3409 */ 3410 if (!vds_supported_version(ver_msg)) 3411 return (EBADMSG); 3412 3413 3414 /* 3415 * A version has been agreed upon; use the client's SID for 3416 * communication on this channel now 3417 */ 3418 ASSERT(!(vd->initialized & VD_SID)); 3419 vd->sid = ver_msg->tag.vio_sid; 3420 vd->initialized |= VD_SID; 3421 3422 /* 3423 * Store the negotiated major and minor version values in the "vd" data 3424 * structure so that we can check if certain operations are supported 3425 * by the client. 3426 */ 3427 vd->version.major = ver_msg->ver_major; 3428 vd->version.minor = ver_msg->ver_minor; 3429 3430 PR0("Using major version %u, minor version %u", 3431 ver_msg->ver_major, ver_msg->ver_minor); 3432 return (0); 3433 } 3434 3435 static void 3436 vd_set_exported_operations(vd_t *vd) 3437 { 3438 vd->operations = 0; /* clear field */ 3439 3440 /* 3441 * We need to check from the highest version supported to the 3442 * lowest because versions with a higher minor number implicitly 3443 * support versions with a lower minor number. 3444 */ 3445 if (vio_ver_is_supported(vd->version, 1, 1)) { 3446 ASSERT(vd->open_flags & FREAD); 3447 vd->operations |= VD_OP_MASK_READ; 3448 3449 if (vd->open_flags & FWRITE) 3450 vd->operations |= VD_OP_MASK_WRITE; 3451 3452 if (vd->scsi) 3453 vd->operations |= VD_OP_MASK_SCSI; 3454 3455 if (vd->file && vd_file_is_iso_image(vd)) { 3456 /* 3457 * can't write to ISO images, make sure that write 3458 * support is not set in case administrator did not 3459 * use "options=ro" when doing an ldm add-vdsdev 3460 */ 3461 vd->operations &= ~VD_OP_MASK_WRITE; 3462 } 3463 } else if (vio_ver_is_supported(vd->version, 1, 0)) { 3464 vd->operations = VD_OP_MASK_READ | VD_OP_MASK_WRITE; 3465 } 3466 3467 /* we should have already agreed on a version */ 3468 ASSERT(vd->operations != 0); 3469 } 3470 3471 static int 3472 vd_process_attr_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 3473 { 3474 vd_attr_msg_t *attr_msg = (vd_attr_msg_t *)msg; 3475 int status, retry = 0; 3476 3477 3478 ASSERT(msglen >= sizeof (msg->tag)); 3479 3480 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 3481 VIO_ATTR_INFO)) { 3482 PR0("Message is not an attribute message"); 3483 return (ENOMSG); 3484 } 3485 3486 if (msglen != sizeof (*attr_msg)) { 3487 PR0("Expected %lu-byte attribute message; " 3488 "received %lu bytes", sizeof (*attr_msg), msglen); 3489 return (EBADMSG); 3490 } 3491 3492 if (attr_msg->max_xfer_sz == 0) { 3493 PR0("Received maximum transfer size of 0 from client"); 3494 return (EBADMSG); 3495 } 3496 3497 if ((attr_msg->xfer_mode != VIO_DESC_MODE) && 3498 (attr_msg->xfer_mode != VIO_DRING_MODE_V1_0)) { 3499 PR0("Client requested unsupported transfer mode"); 3500 return (EBADMSG); 3501 } 3502 3503 /* 3504 * check if the underlying disk is ready, if not try accessing 3505 * the device again. Open the vdisk device and extract info 3506 * about it, as this is needed to respond to the attr info msg 3507 */ 3508 if ((vd->initialized & VD_DISK_READY) == 0) { 3509 PR0("Retry setting up disk (%s)", vd->device_path); 3510 do { 3511 status = vd_setup_vd(vd); 3512 if (status != EAGAIN || ++retry > vds_dev_retries) 3513 break; 3514 3515 /* incremental delay */ 3516 delay(drv_usectohz(vds_dev_delay)); 3517 3518 /* if vdisk is no longer enabled - return error */ 3519 if (!vd_enabled(vd)) 3520 return (ENXIO); 3521 3522 } while (status == EAGAIN); 3523 3524 if (status) 3525 return (ENXIO); 3526 3527 vd->initialized |= VD_DISK_READY; 3528 ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR); 3529 PR0("vdisk_type = %s, volume = %s, file = %s, nslices = %u", 3530 ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"), 3531 (vd->volume ? "yes" : "no"), 3532 (vd->file ? "yes" : "no"), 3533 vd->nslices); 3534 } 3535 3536 /* Success: valid message and transfer mode */ 3537 vd->xfer_mode = attr_msg->xfer_mode; 3538 3539 if (vd->xfer_mode == VIO_DESC_MODE) { 3540 3541 /* 3542 * The vd_dring_inband_msg_t contains one cookie; need room 3543 * for up to n-1 more cookies, where "n" is the number of full 3544 * pages plus possibly one partial page required to cover 3545 * "max_xfer_sz". Add room for one more cookie if 3546 * "max_xfer_sz" isn't an integral multiple of the page size. 3547 * Must first get the maximum transfer size in bytes. 3548 */ 3549 size_t max_xfer_bytes = attr_msg->vdisk_block_size ? 3550 attr_msg->vdisk_block_size*attr_msg->max_xfer_sz : 3551 attr_msg->max_xfer_sz; 3552 size_t max_inband_msglen = 3553 sizeof (vd_dring_inband_msg_t) + 3554 ((max_xfer_bytes/PAGESIZE + 3555 ((max_xfer_bytes % PAGESIZE) ? 1 : 0))* 3556 (sizeof (ldc_mem_cookie_t))); 3557 3558 /* 3559 * Set the maximum expected message length to 3560 * accommodate in-band-descriptor messages with all 3561 * their cookies 3562 */ 3563 vd->max_msglen = MAX(vd->max_msglen, max_inband_msglen); 3564 3565 /* 3566 * Initialize the data structure for processing in-band I/O 3567 * request descriptors 3568 */ 3569 vd->inband_task.vd = vd; 3570 vd->inband_task.msg = kmem_alloc(vd->max_msglen, KM_SLEEP); 3571 vd->inband_task.index = 0; 3572 vd->inband_task.type = VD_FINAL_RANGE_TASK; /* range == 1 */ 3573 } 3574 3575 /* Return the device's block size and max transfer size to the client */ 3576 attr_msg->vdisk_block_size = vd->vdisk_block_size; 3577 attr_msg->max_xfer_sz = vd->max_xfer_sz; 3578 3579 attr_msg->vdisk_size = vd->vdisk_size; 3580 attr_msg->vdisk_type = vd->vdisk_type; 3581 attr_msg->vdisk_media = vd->vdisk_media; 3582 3583 /* Discover and save the list of supported VD_OP_XXX operations */ 3584 vd_set_exported_operations(vd); 3585 attr_msg->operations = vd->operations; 3586 3587 PR0("%s", VD_CLIENT(vd)); 3588 3589 ASSERT(vd->dring_task == NULL); 3590 3591 return (0); 3592 } 3593 3594 static int 3595 vd_process_dring_reg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 3596 { 3597 int status; 3598 size_t expected; 3599 ldc_mem_info_t dring_minfo; 3600 vio_dring_reg_msg_t *reg_msg = (vio_dring_reg_msg_t *)msg; 3601 3602 3603 ASSERT(msglen >= sizeof (msg->tag)); 3604 3605 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 3606 VIO_DRING_REG)) { 3607 PR0("Message is not a register-dring message"); 3608 return (ENOMSG); 3609 } 3610 3611 if (msglen < sizeof (*reg_msg)) { 3612 PR0("Expected at least %lu-byte register-dring message; " 3613 "received %lu bytes", sizeof (*reg_msg), msglen); 3614 return (EBADMSG); 3615 } 3616 3617 expected = sizeof (*reg_msg) + 3618 (reg_msg->ncookies - 1)*(sizeof (reg_msg->cookie[0])); 3619 if (msglen != expected) { 3620 PR0("Expected %lu-byte register-dring message; " 3621 "received %lu bytes", expected, msglen); 3622 return (EBADMSG); 3623 } 3624 3625 if (vd->initialized & VD_DRING) { 3626 PR0("A dring was previously registered; only support one"); 3627 return (EBADMSG); 3628 } 3629 3630 if (reg_msg->num_descriptors > INT32_MAX) { 3631 PR0("reg_msg->num_descriptors = %u; must be <= %u (%s)", 3632 reg_msg->ncookies, INT32_MAX, STRINGIZE(INT32_MAX)); 3633 return (EBADMSG); 3634 } 3635 3636 if (reg_msg->ncookies != 1) { 3637 /* 3638 * In addition to fixing the assertion in the success case 3639 * below, supporting drings which require more than one 3640 * "cookie" requires increasing the value of vd->max_msglen 3641 * somewhere in the code path prior to receiving the message 3642 * which results in calling this function. Note that without 3643 * making this change, the larger message size required to 3644 * accommodate multiple cookies cannot be successfully 3645 * received, so this function will not even get called. 3646 * Gracefully accommodating more dring cookies might 3647 * reasonably demand exchanging an additional attribute or 3648 * making a minor protocol adjustment 3649 */ 3650 PR0("reg_msg->ncookies = %u != 1", reg_msg->ncookies); 3651 return (EBADMSG); 3652 } 3653 3654 status = ldc_mem_dring_map(vd->ldc_handle, reg_msg->cookie, 3655 reg_msg->ncookies, reg_msg->num_descriptors, 3656 reg_msg->descriptor_size, LDC_DIRECT_MAP, &vd->dring_handle); 3657 if (status != 0) { 3658 PR0("ldc_mem_dring_map() returned errno %d", status); 3659 return (status); 3660 } 3661 3662 /* 3663 * To remove the need for this assertion, must call 3664 * ldc_mem_dring_nextcookie() successfully ncookies-1 times after a 3665 * successful call to ldc_mem_dring_map() 3666 */ 3667 ASSERT(reg_msg->ncookies == 1); 3668 3669 if ((status = 3670 ldc_mem_dring_info(vd->dring_handle, &dring_minfo)) != 0) { 3671 PR0("ldc_mem_dring_info() returned errno %d", status); 3672 if ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0) 3673 PR0("ldc_mem_dring_unmap() returned errno %d", status); 3674 return (status); 3675 } 3676 3677 if (dring_minfo.vaddr == NULL) { 3678 PR0("Descriptor ring virtual address is NULL"); 3679 return (ENXIO); 3680 } 3681 3682 3683 /* Initialize for valid message and mapped dring */ 3684 PR1("descriptor size = %u, dring length = %u", 3685 vd->descriptor_size, vd->dring_len); 3686 vd->initialized |= VD_DRING; 3687 vd->dring_ident = 1; /* "There Can Be Only One" */ 3688 vd->dring = dring_minfo.vaddr; 3689 vd->descriptor_size = reg_msg->descriptor_size; 3690 vd->dring_len = reg_msg->num_descriptors; 3691 reg_msg->dring_ident = vd->dring_ident; 3692 3693 /* 3694 * Allocate and initialize a "shadow" array of data structures for 3695 * tasks to process I/O requests in dring elements 3696 */ 3697 vd->dring_task = 3698 kmem_zalloc((sizeof (*vd->dring_task)) * vd->dring_len, KM_SLEEP); 3699 for (int i = 0; i < vd->dring_len; i++) { 3700 vd->dring_task[i].vd = vd; 3701 vd->dring_task[i].index = i; 3702 vd->dring_task[i].request = &VD_DRING_ELEM(i)->payload; 3703 3704 status = ldc_mem_alloc_handle(vd->ldc_handle, 3705 &(vd->dring_task[i].mhdl)); 3706 if (status) { 3707 PR0("ldc_mem_alloc_handle() returned err %d ", status); 3708 return (ENXIO); 3709 } 3710 3711 vd->dring_task[i].msg = kmem_alloc(vd->max_msglen, KM_SLEEP); 3712 } 3713 3714 return (0); 3715 } 3716 3717 static int 3718 vd_process_dring_unreg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 3719 { 3720 vio_dring_unreg_msg_t *unreg_msg = (vio_dring_unreg_msg_t *)msg; 3721 3722 3723 ASSERT(msglen >= sizeof (msg->tag)); 3724 3725 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 3726 VIO_DRING_UNREG)) { 3727 PR0("Message is not an unregister-dring message"); 3728 return (ENOMSG); 3729 } 3730 3731 if (msglen != sizeof (*unreg_msg)) { 3732 PR0("Expected %lu-byte unregister-dring message; " 3733 "received %lu bytes", sizeof (*unreg_msg), msglen); 3734 return (EBADMSG); 3735 } 3736 3737 if (unreg_msg->dring_ident != vd->dring_ident) { 3738 PR0("Expected dring ident %lu; received %lu", 3739 vd->dring_ident, unreg_msg->dring_ident); 3740 return (EBADMSG); 3741 } 3742 3743 return (0); 3744 } 3745 3746 static int 3747 process_rdx_msg(vio_msg_t *msg, size_t msglen) 3748 { 3749 ASSERT(msglen >= sizeof (msg->tag)); 3750 3751 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX)) { 3752 PR0("Message is not an RDX message"); 3753 return (ENOMSG); 3754 } 3755 3756 if (msglen != sizeof (vio_rdx_msg_t)) { 3757 PR0("Expected %lu-byte RDX message; received %lu bytes", 3758 sizeof (vio_rdx_msg_t), msglen); 3759 return (EBADMSG); 3760 } 3761 3762 PR0("Valid RDX message"); 3763 return (0); 3764 } 3765 3766 static int 3767 vd_check_seq_num(vd_t *vd, uint64_t seq_num) 3768 { 3769 if ((vd->initialized & VD_SEQ_NUM) && (seq_num != vd->seq_num + 1)) { 3770 PR0("Received seq_num %lu; expected %lu", 3771 seq_num, (vd->seq_num + 1)); 3772 PR0("initiating soft reset"); 3773 vd_need_reset(vd, B_FALSE); 3774 return (1); 3775 } 3776 3777 vd->seq_num = seq_num; 3778 vd->initialized |= VD_SEQ_NUM; /* superfluous after first time... */ 3779 return (0); 3780 } 3781 3782 /* 3783 * Return the expected size of an inband-descriptor message with all the 3784 * cookies it claims to include 3785 */ 3786 static size_t 3787 expected_inband_size(vd_dring_inband_msg_t *msg) 3788 { 3789 return ((sizeof (*msg)) + 3790 (msg->payload.ncookies - 1)*(sizeof (msg->payload.cookie[0]))); 3791 } 3792 3793 /* 3794 * Process an in-band descriptor message: used with clients like OBP, with 3795 * which vds exchanges descriptors within VIO message payloads, rather than 3796 * operating on them within a descriptor ring 3797 */ 3798 static int 3799 vd_process_desc_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 3800 { 3801 size_t expected; 3802 vd_dring_inband_msg_t *desc_msg = (vd_dring_inband_msg_t *)msg; 3803 3804 3805 ASSERT(msglen >= sizeof (msg->tag)); 3806 3807 if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO, 3808 VIO_DESC_DATA)) { 3809 PR1("Message is not an in-band-descriptor message"); 3810 return (ENOMSG); 3811 } 3812 3813 if (msglen < sizeof (*desc_msg)) { 3814 PR0("Expected at least %lu-byte descriptor message; " 3815 "received %lu bytes", sizeof (*desc_msg), msglen); 3816 return (EBADMSG); 3817 } 3818 3819 if (msglen != (expected = expected_inband_size(desc_msg))) { 3820 PR0("Expected %lu-byte descriptor message; " 3821 "received %lu bytes", expected, msglen); 3822 return (EBADMSG); 3823 } 3824 3825 if (vd_check_seq_num(vd, desc_msg->hdr.seq_num) != 0) 3826 return (EBADMSG); 3827 3828 /* 3829 * Valid message: Set up the in-band descriptor task and process the 3830 * request. Arrange to acknowledge the client's message, unless an 3831 * error processing the descriptor task results in setting 3832 * VIO_SUBTYPE_NACK 3833 */ 3834 PR1("Valid in-band-descriptor message"); 3835 msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 3836 3837 ASSERT(vd->inband_task.msg != NULL); 3838 3839 bcopy(msg, vd->inband_task.msg, msglen); 3840 vd->inband_task.msglen = msglen; 3841 3842 /* 3843 * The task request is now the payload of the message 3844 * that was just copied into the body of the task. 3845 */ 3846 desc_msg = (vd_dring_inband_msg_t *)vd->inband_task.msg; 3847 vd->inband_task.request = &desc_msg->payload; 3848 3849 return (vd_process_task(&vd->inband_task)); 3850 } 3851 3852 static int 3853 vd_process_element(vd_t *vd, vd_task_type_t type, uint32_t idx, 3854 vio_msg_t *msg, size_t msglen) 3855 { 3856 int status; 3857 boolean_t ready; 3858 vd_dring_entry_t *elem = VD_DRING_ELEM(idx); 3859 3860 3861 /* Accept the updated dring element */ 3862 if ((status = ldc_mem_dring_acquire(vd->dring_handle, idx, idx)) != 0) { 3863 PR0("ldc_mem_dring_acquire() returned errno %d", status); 3864 return (status); 3865 } 3866 ready = (elem->hdr.dstate == VIO_DESC_READY); 3867 if (ready) { 3868 elem->hdr.dstate = VIO_DESC_ACCEPTED; 3869 } else { 3870 PR0("descriptor %u not ready", idx); 3871 VD_DUMP_DRING_ELEM(elem); 3872 } 3873 if ((status = ldc_mem_dring_release(vd->dring_handle, idx, idx)) != 0) { 3874 PR0("ldc_mem_dring_release() returned errno %d", status); 3875 return (status); 3876 } 3877 if (!ready) 3878 return (EBUSY); 3879 3880 3881 /* Initialize a task and process the accepted element */ 3882 PR1("Processing dring element %u", idx); 3883 vd->dring_task[idx].type = type; 3884 3885 /* duplicate msg buf for cookies etc. */ 3886 bcopy(msg, vd->dring_task[idx].msg, msglen); 3887 3888 vd->dring_task[idx].msglen = msglen; 3889 return (vd_process_task(&vd->dring_task[idx])); 3890 } 3891 3892 static int 3893 vd_process_element_range(vd_t *vd, int start, int end, 3894 vio_msg_t *msg, size_t msglen) 3895 { 3896 int i, n, nelem, status = 0; 3897 boolean_t inprogress = B_FALSE; 3898 vd_task_type_t type; 3899 3900 3901 ASSERT(start >= 0); 3902 ASSERT(end >= 0); 3903 3904 /* 3905 * Arrange to acknowledge the client's message, unless an error 3906 * processing one of the dring elements results in setting 3907 * VIO_SUBTYPE_NACK 3908 */ 3909 msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 3910 3911 /* 3912 * Process the dring elements in the range 3913 */ 3914 nelem = ((end < start) ? end + vd->dring_len : end) - start + 1; 3915 for (i = start, n = nelem; n > 0; i = (i + 1) % vd->dring_len, n--) { 3916 ((vio_dring_msg_t *)msg)->end_idx = i; 3917 type = (n == 1) ? VD_FINAL_RANGE_TASK : VD_NONFINAL_RANGE_TASK; 3918 status = vd_process_element(vd, type, i, msg, msglen); 3919 if (status == EINPROGRESS) 3920 inprogress = B_TRUE; 3921 else if (status != 0) 3922 break; 3923 } 3924 3925 /* 3926 * If some, but not all, operations of a multi-element range are in 3927 * progress, wait for other operations to complete before returning 3928 * (which will result in "ack" or "nack" of the message). Note that 3929 * all outstanding operations will need to complete, not just the ones 3930 * corresponding to the current range of dring elements; howevever, as 3931 * this situation is an error case, performance is less critical. 3932 */ 3933 if ((nelem > 1) && (status != EINPROGRESS) && inprogress) 3934 ddi_taskq_wait(vd->completionq); 3935 3936 return (status); 3937 } 3938 3939 static int 3940 vd_process_dring_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 3941 { 3942 vio_dring_msg_t *dring_msg = (vio_dring_msg_t *)msg; 3943 3944 3945 ASSERT(msglen >= sizeof (msg->tag)); 3946 3947 if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO, 3948 VIO_DRING_DATA)) { 3949 PR1("Message is not a dring-data message"); 3950 return (ENOMSG); 3951 } 3952 3953 if (msglen != sizeof (*dring_msg)) { 3954 PR0("Expected %lu-byte dring message; received %lu bytes", 3955 sizeof (*dring_msg), msglen); 3956 return (EBADMSG); 3957 } 3958 3959 if (vd_check_seq_num(vd, dring_msg->seq_num) != 0) 3960 return (EBADMSG); 3961 3962 if (dring_msg->dring_ident != vd->dring_ident) { 3963 PR0("Expected dring ident %lu; received ident %lu", 3964 vd->dring_ident, dring_msg->dring_ident); 3965 return (EBADMSG); 3966 } 3967 3968 if (dring_msg->start_idx >= vd->dring_len) { 3969 PR0("\"start_idx\" = %u; must be less than %u", 3970 dring_msg->start_idx, vd->dring_len); 3971 return (EBADMSG); 3972 } 3973 3974 if ((dring_msg->end_idx < 0) || 3975 (dring_msg->end_idx >= vd->dring_len)) { 3976 PR0("\"end_idx\" = %u; must be >= 0 and less than %u", 3977 dring_msg->end_idx, vd->dring_len); 3978 return (EBADMSG); 3979 } 3980 3981 /* Valid message; process range of updated dring elements */ 3982 PR1("Processing descriptor range, start = %u, end = %u", 3983 dring_msg->start_idx, dring_msg->end_idx); 3984 return (vd_process_element_range(vd, dring_msg->start_idx, 3985 dring_msg->end_idx, msg, msglen)); 3986 } 3987 3988 static int 3989 recv_msg(ldc_handle_t ldc_handle, void *msg, size_t *nbytes) 3990 { 3991 int retry, status; 3992 size_t size = *nbytes; 3993 3994 3995 for (retry = 0, status = ETIMEDOUT; 3996 retry < vds_ldc_retries && status == ETIMEDOUT; 3997 retry++) { 3998 PR1("ldc_read() attempt %d", (retry + 1)); 3999 *nbytes = size; 4000 status = ldc_read(ldc_handle, msg, nbytes); 4001 } 4002 4003 if (status) { 4004 PR0("ldc_read() returned errno %d", status); 4005 if (status != ECONNRESET) 4006 return (ENOMSG); 4007 return (status); 4008 } else if (*nbytes == 0) { 4009 PR1("ldc_read() returned 0 and no message read"); 4010 return (ENOMSG); 4011 } 4012 4013 PR1("RCVD %lu-byte message", *nbytes); 4014 return (0); 4015 } 4016 4017 static int 4018 vd_do_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4019 { 4020 int status; 4021 4022 4023 PR1("Processing (%x/%x/%x) message", msg->tag.vio_msgtype, 4024 msg->tag.vio_subtype, msg->tag.vio_subtype_env); 4025 #ifdef DEBUG 4026 vd_decode_tag(msg); 4027 #endif 4028 4029 /* 4030 * Validate session ID up front, since it applies to all messages 4031 * once set 4032 */ 4033 if ((msg->tag.vio_sid != vd->sid) && (vd->initialized & VD_SID)) { 4034 PR0("Expected SID %u, received %u", vd->sid, 4035 msg->tag.vio_sid); 4036 return (EBADMSG); 4037 } 4038 4039 PR1("\tWhile in state %d (%s)", vd->state, vd_decode_state(vd->state)); 4040 4041 /* 4042 * Process the received message based on connection state 4043 */ 4044 switch (vd->state) { 4045 case VD_STATE_INIT: /* expect version message */ 4046 if ((status = vd_process_ver_msg(vd, msg, msglen)) != 0) 4047 return (status); 4048 4049 /* Version negotiated, move to that state */ 4050 vd->state = VD_STATE_VER; 4051 return (0); 4052 4053 case VD_STATE_VER: /* expect attribute message */ 4054 if ((status = vd_process_attr_msg(vd, msg, msglen)) != 0) 4055 return (status); 4056 4057 /* Attributes exchanged, move to that state */ 4058 vd->state = VD_STATE_ATTR; 4059 return (0); 4060 4061 case VD_STATE_ATTR: 4062 switch (vd->xfer_mode) { 4063 case VIO_DESC_MODE: /* expect RDX message */ 4064 if ((status = process_rdx_msg(msg, msglen)) != 0) 4065 return (status); 4066 4067 /* Ready to receive in-band descriptors */ 4068 vd->state = VD_STATE_DATA; 4069 return (0); 4070 4071 case VIO_DRING_MODE_V1_0: /* expect register-dring message */ 4072 if ((status = 4073 vd_process_dring_reg_msg(vd, msg, msglen)) != 0) 4074 return (status); 4075 4076 /* One dring negotiated, move to that state */ 4077 vd->state = VD_STATE_DRING; 4078 return (0); 4079 4080 default: 4081 ASSERT("Unsupported transfer mode"); 4082 PR0("Unsupported transfer mode"); 4083 return (ENOTSUP); 4084 } 4085 4086 case VD_STATE_DRING: /* expect RDX, register-dring, or unreg-dring */ 4087 if ((status = process_rdx_msg(msg, msglen)) == 0) { 4088 /* Ready to receive data */ 4089 vd->state = VD_STATE_DATA; 4090 return (0); 4091 } else if (status != ENOMSG) { 4092 return (status); 4093 } 4094 4095 4096 /* 4097 * If another register-dring message is received, stay in 4098 * dring state in case the client sends RDX; although the 4099 * protocol allows multiple drings, this server does not 4100 * support using more than one 4101 */ 4102 if ((status = 4103 vd_process_dring_reg_msg(vd, msg, msglen)) != ENOMSG) 4104 return (status); 4105 4106 /* 4107 * Acknowledge an unregister-dring message, but reset the 4108 * connection anyway: Although the protocol allows 4109 * unregistering drings, this server cannot serve a vdisk 4110 * without its only dring 4111 */ 4112 status = vd_process_dring_unreg_msg(vd, msg, msglen); 4113 return ((status == 0) ? ENOTSUP : status); 4114 4115 case VD_STATE_DATA: 4116 switch (vd->xfer_mode) { 4117 case VIO_DESC_MODE: /* expect in-band-descriptor message */ 4118 return (vd_process_desc_msg(vd, msg, msglen)); 4119 4120 case VIO_DRING_MODE_V1_0: /* expect dring-data or unreg-dring */ 4121 /* 4122 * Typically expect dring-data messages, so handle 4123 * them first 4124 */ 4125 if ((status = vd_process_dring_msg(vd, msg, 4126 msglen)) != ENOMSG) 4127 return (status); 4128 4129 /* 4130 * Acknowledge an unregister-dring message, but reset 4131 * the connection anyway: Although the protocol 4132 * allows unregistering drings, this server cannot 4133 * serve a vdisk without its only dring 4134 */ 4135 status = vd_process_dring_unreg_msg(vd, msg, msglen); 4136 return ((status == 0) ? ENOTSUP : status); 4137 4138 default: 4139 ASSERT("Unsupported transfer mode"); 4140 PR0("Unsupported transfer mode"); 4141 return (ENOTSUP); 4142 } 4143 4144 default: 4145 ASSERT("Invalid client connection state"); 4146 PR0("Invalid client connection state"); 4147 return (ENOTSUP); 4148 } 4149 } 4150 4151 static int 4152 vd_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4153 { 4154 int status; 4155 boolean_t reset_ldc = B_FALSE; 4156 vd_task_t task; 4157 4158 /* 4159 * Check that the message is at least big enough for a "tag", so that 4160 * message processing can proceed based on tag-specified message type 4161 */ 4162 if (msglen < sizeof (vio_msg_tag_t)) { 4163 PR0("Received short (%lu-byte) message", msglen); 4164 /* Can't "nack" short message, so drop the big hammer */ 4165 PR0("initiating full reset"); 4166 vd_need_reset(vd, B_TRUE); 4167 return (EBADMSG); 4168 } 4169 4170 /* 4171 * Process the message 4172 */ 4173 switch (status = vd_do_process_msg(vd, msg, msglen)) { 4174 case 0: 4175 /* "ack" valid, successfully-processed messages */ 4176 msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 4177 break; 4178 4179 case EINPROGRESS: 4180 /* The completion handler will "ack" or "nack" the message */ 4181 return (EINPROGRESS); 4182 case ENOMSG: 4183 PR0("Received unexpected message"); 4184 _NOTE(FALLTHROUGH); 4185 case EBADMSG: 4186 case ENOTSUP: 4187 /* "transport" error will cause NACK of invalid messages */ 4188 msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 4189 break; 4190 4191 default: 4192 /* "transport" error will cause NACK of invalid messages */ 4193 msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 4194 /* An LDC error probably occurred, so try resetting it */ 4195 reset_ldc = B_TRUE; 4196 break; 4197 } 4198 4199 PR1("\tResulting in state %d (%s)", vd->state, 4200 vd_decode_state(vd->state)); 4201 4202 /* populate the task so we can dispatch it on the taskq */ 4203 task.vd = vd; 4204 task.msg = msg; 4205 task.msglen = msglen; 4206 4207 /* 4208 * Queue a task to send the notification that the operation completed. 4209 * We need to ensure that requests are responded to in the correct 4210 * order and since the taskq is processed serially this ordering 4211 * is maintained. 4212 */ 4213 (void) ddi_taskq_dispatch(vd->completionq, vd_serial_notify, 4214 &task, DDI_SLEEP); 4215 4216 /* 4217 * To ensure handshake negotiations do not happen out of order, such 4218 * requests that come through this path should not be done in parallel 4219 * so we need to wait here until the response is sent to the client. 4220 */ 4221 ddi_taskq_wait(vd->completionq); 4222 4223 /* Arrange to reset the connection for nack'ed or failed messages */ 4224 if ((status != 0) || reset_ldc) { 4225 PR0("initiating %s reset", 4226 (reset_ldc) ? "full" : "soft"); 4227 vd_need_reset(vd, reset_ldc); 4228 } 4229 4230 return (status); 4231 } 4232 4233 static boolean_t 4234 vd_enabled(vd_t *vd) 4235 { 4236 boolean_t enabled; 4237 4238 mutex_enter(&vd->lock); 4239 enabled = vd->enabled; 4240 mutex_exit(&vd->lock); 4241 return (enabled); 4242 } 4243 4244 static void 4245 vd_recv_msg(void *arg) 4246 { 4247 vd_t *vd = (vd_t *)arg; 4248 int rv = 0, status = 0; 4249 4250 ASSERT(vd != NULL); 4251 4252 PR2("New task to receive incoming message(s)"); 4253 4254 4255 while (vd_enabled(vd) && status == 0) { 4256 size_t msglen, msgsize; 4257 ldc_status_t lstatus; 4258 4259 /* 4260 * Receive and process a message 4261 */ 4262 vd_reset_if_needed(vd); /* can change vd->max_msglen */ 4263 4264 /* 4265 * check if channel is UP - else break out of loop 4266 */ 4267 status = ldc_status(vd->ldc_handle, &lstatus); 4268 if (lstatus != LDC_UP) { 4269 PR0("channel not up (status=%d), exiting recv loop\n", 4270 lstatus); 4271 break; 4272 } 4273 4274 ASSERT(vd->max_msglen != 0); 4275 4276 msgsize = vd->max_msglen; /* stable copy for alloc/free */ 4277 msglen = msgsize; /* actual len after recv_msg() */ 4278 4279 status = recv_msg(vd->ldc_handle, vd->vio_msgp, &msglen); 4280 switch (status) { 4281 case 0: 4282 rv = vd_process_msg(vd, (vio_msg_t *)vd->vio_msgp, 4283 msglen); 4284 /* check if max_msglen changed */ 4285 if (msgsize != vd->max_msglen) { 4286 PR0("max_msglen changed 0x%lx to 0x%lx bytes\n", 4287 msgsize, vd->max_msglen); 4288 kmem_free(vd->vio_msgp, msgsize); 4289 vd->vio_msgp = 4290 kmem_alloc(vd->max_msglen, KM_SLEEP); 4291 } 4292 if (rv == EINPROGRESS) 4293 continue; 4294 break; 4295 4296 case ENOMSG: 4297 break; 4298 4299 case ECONNRESET: 4300 PR0("initiating soft reset (ECONNRESET)\n"); 4301 vd_need_reset(vd, B_FALSE); 4302 status = 0; 4303 break; 4304 4305 default: 4306 /* Probably an LDC failure; arrange to reset it */ 4307 PR0("initiating full reset (status=0x%x)", status); 4308 vd_need_reset(vd, B_TRUE); 4309 break; 4310 } 4311 } 4312 4313 PR2("Task finished"); 4314 } 4315 4316 static uint_t 4317 vd_handle_ldc_events(uint64_t event, caddr_t arg) 4318 { 4319 vd_t *vd = (vd_t *)(void *)arg; 4320 int status; 4321 4322 ASSERT(vd != NULL); 4323 4324 if (!vd_enabled(vd)) 4325 return (LDC_SUCCESS); 4326 4327 if (event & LDC_EVT_DOWN) { 4328 PR0("LDC_EVT_DOWN: LDC channel went down"); 4329 4330 vd_need_reset(vd, B_TRUE); 4331 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, 4332 DDI_SLEEP); 4333 if (status == DDI_FAILURE) { 4334 PR0("cannot schedule task to recv msg\n"); 4335 vd_need_reset(vd, B_TRUE); 4336 } 4337 } 4338 4339 if (event & LDC_EVT_RESET) { 4340 PR0("LDC_EVT_RESET: LDC channel was reset"); 4341 4342 if (vd->state != VD_STATE_INIT) { 4343 PR0("scheduling full reset"); 4344 vd_need_reset(vd, B_FALSE); 4345 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, 4346 vd, DDI_SLEEP); 4347 if (status == DDI_FAILURE) { 4348 PR0("cannot schedule task to recv msg\n"); 4349 vd_need_reset(vd, B_TRUE); 4350 } 4351 4352 } else { 4353 PR0("channel already reset, ignoring...\n"); 4354 PR0("doing ldc up...\n"); 4355 (void) ldc_up(vd->ldc_handle); 4356 } 4357 4358 return (LDC_SUCCESS); 4359 } 4360 4361 if (event & LDC_EVT_UP) { 4362 PR0("EVT_UP: LDC is up\nResetting client connection state"); 4363 PR0("initiating soft reset"); 4364 vd_need_reset(vd, B_FALSE); 4365 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, 4366 vd, DDI_SLEEP); 4367 if (status == DDI_FAILURE) { 4368 PR0("cannot schedule task to recv msg\n"); 4369 vd_need_reset(vd, B_TRUE); 4370 return (LDC_SUCCESS); 4371 } 4372 } 4373 4374 if (event & LDC_EVT_READ) { 4375 int status; 4376 4377 PR1("New data available"); 4378 /* Queue a task to receive the new data */ 4379 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, 4380 DDI_SLEEP); 4381 4382 if (status == DDI_FAILURE) { 4383 PR0("cannot schedule task to recv msg\n"); 4384 vd_need_reset(vd, B_TRUE); 4385 } 4386 } 4387 4388 return (LDC_SUCCESS); 4389 } 4390 4391 static uint_t 4392 vds_check_for_vd(mod_hash_key_t key, mod_hash_val_t *val, void *arg) 4393 { 4394 _NOTE(ARGUNUSED(key, val)) 4395 (*((uint_t *)arg))++; 4396 return (MH_WALK_TERMINATE); 4397 } 4398 4399 4400 static int 4401 vds_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 4402 { 4403 uint_t vd_present = 0; 4404 minor_t instance; 4405 vds_t *vds; 4406 4407 4408 switch (cmd) { 4409 case DDI_DETACH: 4410 /* the real work happens below */ 4411 break; 4412 case DDI_SUSPEND: 4413 PR0("No action required for DDI_SUSPEND"); 4414 return (DDI_SUCCESS); 4415 default: 4416 PR0("Unrecognized \"cmd\""); 4417 return (DDI_FAILURE); 4418 } 4419 4420 ASSERT(cmd == DDI_DETACH); 4421 instance = ddi_get_instance(dip); 4422 if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) { 4423 PR0("Could not get state for instance %u", instance); 4424 ddi_soft_state_free(vds_state, instance); 4425 return (DDI_FAILURE); 4426 } 4427 4428 /* Do no detach when serving any vdisks */ 4429 mod_hash_walk(vds->vd_table, vds_check_for_vd, &vd_present); 4430 if (vd_present) { 4431 PR0("Not detaching because serving vdisks"); 4432 return (DDI_FAILURE); 4433 } 4434 4435 PR0("Detaching"); 4436 if (vds->initialized & VDS_MDEG) { 4437 (void) mdeg_unregister(vds->mdeg); 4438 kmem_free(vds->ispecp->specp, sizeof (vds_prop_template)); 4439 kmem_free(vds->ispecp, sizeof (mdeg_node_spec_t)); 4440 vds->ispecp = NULL; 4441 vds->mdeg = NULL; 4442 } 4443 4444 vds_driver_types_free(vds); 4445 4446 if (vds->initialized & VDS_LDI) 4447 (void) ldi_ident_release(vds->ldi_ident); 4448 mod_hash_destroy_hash(vds->vd_table); 4449 ddi_soft_state_free(vds_state, instance); 4450 return (DDI_SUCCESS); 4451 } 4452 4453 static boolean_t 4454 is_pseudo_device(dev_info_t *dip) 4455 { 4456 dev_info_t *parent, *root = ddi_root_node(); 4457 4458 4459 for (parent = ddi_get_parent(dip); (parent != NULL) && (parent != root); 4460 parent = ddi_get_parent(parent)) { 4461 if (strcmp(ddi_get_name(parent), DEVI_PSEUDO_NEXNAME) == 0) 4462 return (B_TRUE); 4463 } 4464 4465 return (B_FALSE); 4466 } 4467 4468 /* 4469 * Description: 4470 * This function checks to see if the file being used as a 4471 * virtual disk is an ISO image. An ISO image is a special 4472 * case which can be booted/installed from like a CD/DVD 4473 * 4474 * Parameters: 4475 * vd - disk on which the operation is performed. 4476 * 4477 * Return Code: 4478 * B_TRUE - The file is an ISO 9660 compliant image 4479 * B_FALSE - just a regular disk image file 4480 */ 4481 static boolean_t 4482 vd_file_is_iso_image(vd_t *vd) 4483 { 4484 char iso_buf[ISO_SECTOR_SIZE]; 4485 int i, rv; 4486 uint_t sec; 4487 4488 ASSERT(vd->file); 4489 4490 /* 4491 * If we have already discovered and saved this info we can 4492 * short-circuit the check and avoid reading the file. 4493 */ 4494 if (vd->vdisk_media == VD_MEDIA_DVD || vd->vdisk_media == VD_MEDIA_CD) 4495 return (B_TRUE); 4496 4497 /* 4498 * We wish to read the sector that should contain the 2nd ISO volume 4499 * descriptor. The second field in this descriptor is called the 4500 * Standard Identifier and is set to CD001 for a CD-ROM compliant 4501 * to the ISO 9660 standard. 4502 */ 4503 sec = (ISO_VOLDESC_SEC * ISO_SECTOR_SIZE) / vd->vdisk_block_size; 4504 rv = vd_file_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)iso_buf, 4505 sec, ISO_SECTOR_SIZE); 4506 4507 if (rv < 0) 4508 return (B_FALSE); 4509 4510 for (i = 0; i < ISO_ID_STRLEN; i++) { 4511 if (ISO_STD_ID(iso_buf)[i] != ISO_ID_STRING[i]) 4512 return (B_FALSE); 4513 } 4514 4515 return (B_TRUE); 4516 } 4517 4518 /* 4519 * Description: 4520 * This function checks to see if the virtual device is an ATAPI 4521 * device. ATAPI devices use Group 1 Read/Write commands, so 4522 * any USCSI calls vds makes need to take this into account. 4523 * 4524 * Parameters: 4525 * vd - disk on which the operation is performed. 4526 * 4527 * Return Code: 4528 * B_TRUE - The virtual disk is backed by an ATAPI device 4529 * B_FALSE - not an ATAPI device (presumably SCSI) 4530 */ 4531 static boolean_t 4532 vd_is_atapi_device(vd_t *vd) 4533 { 4534 boolean_t is_atapi = B_FALSE; 4535 char *variantp; 4536 int rv; 4537 4538 ASSERT(vd->ldi_handle[0] != NULL); 4539 ASSERT(!vd->file); 4540 4541 rv = ldi_prop_lookup_string(vd->ldi_handle[0], 4542 (LDI_DEV_T_ANY | DDI_PROP_DONTPASS), "variant", &variantp); 4543 if (rv == DDI_PROP_SUCCESS) { 4544 PR0("'variant' property exists for %s", vd->device_path); 4545 if (strcmp(variantp, "atapi") == 0) 4546 is_atapi = B_TRUE; 4547 ddi_prop_free(variantp); 4548 } 4549 4550 rv = ldi_prop_exists(vd->ldi_handle[0], LDI_DEV_T_ANY, "atapi"); 4551 if (rv) { 4552 PR0("'atapi' property exists for %s", vd->device_path); 4553 is_atapi = B_TRUE; 4554 } 4555 4556 return (is_atapi); 4557 } 4558 4559 static int 4560 vd_setup_mediainfo(vd_t *vd) 4561 { 4562 int status, rval; 4563 struct dk_minfo dk_minfo; 4564 4565 ASSERT(vd->ldi_handle[0] != NULL); 4566 ASSERT(vd->vdisk_block_size != 0); 4567 4568 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCGMEDIAINFO, 4569 (intptr_t)&dk_minfo, (vd->open_flags | FKIOCTL), 4570 kcred, &rval)) != 0) 4571 return (status); 4572 4573 ASSERT(dk_minfo.dki_lbsize % vd->vdisk_block_size == 0); 4574 4575 vd->block_size = dk_minfo.dki_lbsize; 4576 vd->vdisk_size = (dk_minfo.dki_capacity * dk_minfo.dki_lbsize) / 4577 vd->vdisk_block_size; 4578 vd->vdisk_media = DK_MEDIATYPE2VD_MEDIATYPE(dk_minfo.dki_media_type); 4579 return (0); 4580 } 4581 4582 static int 4583 vd_setup_full_disk(vd_t *vd) 4584 { 4585 int status; 4586 major_t major = getmajor(vd->dev[0]); 4587 minor_t minor = getminor(vd->dev[0]) - VD_ENTIRE_DISK_SLICE; 4588 4589 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); 4590 4591 vd->vdisk_block_size = DEV_BSIZE; 4592 4593 /* 4594 * At this point, vdisk_size is set to the size of partition 2 but 4595 * this does not represent the size of the disk because partition 2 4596 * may not cover the entire disk and its size does not include reserved 4597 * blocks. So we call vd_get_mediainfo to udpate this information and 4598 * set the block size and the media type of the disk. 4599 */ 4600 status = vd_setup_mediainfo(vd); 4601 4602 if (status != 0) { 4603 if (!vd->scsi) { 4604 /* unexpected failure */ 4605 PRN("ldi_ioctl(DKIOCGMEDIAINFO) returned errno %d", 4606 status); 4607 return (status); 4608 } 4609 4610 /* 4611 * The function can fail for SCSI disks which are present but 4612 * reserved by another system. In that case, we don't know the 4613 * size of the disk and the block size. 4614 */ 4615 vd->vdisk_size = VD_SIZE_UNKNOWN; 4616 vd->block_size = 0; 4617 vd->vdisk_media = VD_MEDIA_FIXED; 4618 } 4619 4620 /* Move dev number and LDI handle to entire-disk-slice array elements */ 4621 vd->dev[VD_ENTIRE_DISK_SLICE] = vd->dev[0]; 4622 vd->dev[0] = 0; 4623 vd->ldi_handle[VD_ENTIRE_DISK_SLICE] = vd->ldi_handle[0]; 4624 vd->ldi_handle[0] = NULL; 4625 4626 /* Initialize device numbers for remaining slices and open them */ 4627 for (int slice = 0; slice < vd->nslices; slice++) { 4628 /* 4629 * Skip the entire-disk slice, as it's already open and its 4630 * device known 4631 */ 4632 if (slice == VD_ENTIRE_DISK_SLICE) 4633 continue; 4634 ASSERT(vd->dev[slice] == 0); 4635 ASSERT(vd->ldi_handle[slice] == NULL); 4636 4637 /* 4638 * Construct the device number for the current slice 4639 */ 4640 vd->dev[slice] = makedevice(major, (minor + slice)); 4641 4642 /* 4643 * Open all slices of the disk to serve them to the client. 4644 * Slices are opened exclusively to prevent other threads or 4645 * processes in the service domain from performing I/O to 4646 * slices being accessed by a client. Failure to open a slice 4647 * results in vds not serving this disk, as the client could 4648 * attempt (and should be able) to access any slice immediately. 4649 * Any slices successfully opened before a failure will get 4650 * closed by vds_destroy_vd() as a result of the error returned 4651 * by this function. 4652 * 4653 * We need to do the open with FNDELAY so that opening an empty 4654 * slice does not fail. 4655 */ 4656 PR0("Opening device major %u, minor %u = slice %u", 4657 major, minor, slice); 4658 4659 /* 4660 * Try to open the device. This can fail for example if we are 4661 * opening an empty slice. So in case of a failure, we try the 4662 * open again but this time with the FNDELAY flag. 4663 */ 4664 status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK, 4665 vd->open_flags, kcred, &vd->ldi_handle[slice], 4666 vd->vds->ldi_ident); 4667 4668 if (status != 0) { 4669 status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK, 4670 vd->open_flags | FNDELAY, kcred, 4671 &vd->ldi_handle[slice], vd->vds->ldi_ident); 4672 } 4673 4674 if (status != 0) { 4675 PRN("ldi_open_by_dev() returned errno %d " 4676 "for slice %u", status, slice); 4677 /* vds_destroy_vd() will close any open slices */ 4678 vd->ldi_handle[slice] = NULL; 4679 return (status); 4680 } 4681 } 4682 4683 return (0); 4684 } 4685 4686 /* 4687 * When a slice or a volume is exported as a single-slice disk, we want 4688 * the disk backend (i.e. the slice or volume) to be entirely mapped as 4689 * a slice without the addition of any metadata. 4690 * 4691 * So when exporting the disk as a VTOC disk, we fake a disk with the following 4692 * layout: 4693 * 4694 * 0 1 N+1 4695 * +-+--------------------------+ 4696 * virtual disk: |L| slice 0 | 4697 * +-+--------------------------+ 4698 * ^: : 4699 * |: : 4700 * VTOC LABEL--+: : 4701 * +--------------------------+ 4702 * disk backend: | slice/volume | 4703 * +--------------------------+ 4704 * 0 N 4705 * 4706 * N is the number of blocks in the slice/volume. 4707 * 4708 * We simulate a disk with N+1 blocks. The first block (block 0) is faked and 4709 * can not be changed. The remaining blocks (1 to N+1) defines slice 0 and are 4710 * mapped to the exported slice or volume: 4711 * 4712 * - block 0 (L) can return a fake VTOC label if raw read was implemented. 4713 * - block 1 to N+1 is mapped to the exported slice or volume. 4714 * 4715 */ 4716 static int 4717 vd_setup_partition_vtoc(vd_t *vd) 4718 { 4719 int rval, status; 4720 char *device_path = vd->device_path; 4721 4722 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGGEOM, 4723 (intptr_t)&vd->dk_geom, (vd->open_flags | FKIOCTL), kcred, &rval); 4724 4725 if (status != 0) { 4726 PRN("ldi_ioctl(DKIOCGEOM) returned errno %d for %s", 4727 status, device_path); 4728 return (status); 4729 } 4730 4731 /* Initialize dk_geom structure for single-slice device */ 4732 if (vd->dk_geom.dkg_nsect == 0) { 4733 PRN("%s geometry claims 0 sectors per track", device_path); 4734 return (EIO); 4735 } 4736 if (vd->dk_geom.dkg_nhead == 0) { 4737 PRN("%s geometry claims 0 heads", device_path); 4738 return (EIO); 4739 } 4740 vd->dk_geom.dkg_ncyl = (vd->vdisk_size + 1) / vd->dk_geom.dkg_nsect / 4741 vd->dk_geom.dkg_nhead; 4742 vd->dk_geom.dkg_acyl = 0; 4743 vd->dk_geom.dkg_pcyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl; 4744 4745 4746 /* Initialize vtoc structure for single-slice device */ 4747 bcopy(VD_VOLUME_NAME, vd->vtoc.v_volume, 4748 MIN(sizeof (VD_VOLUME_NAME), sizeof (vd->vtoc.v_volume))); 4749 bzero(vd->vtoc.v_part, sizeof (vd->vtoc.v_part)); 4750 vd->vtoc.v_nparts = 1; 4751 vd->vtoc.v_part[0].p_tag = V_UNASSIGNED; 4752 vd->vtoc.v_part[0].p_flag = 0; 4753 vd->vtoc.v_part[0].p_start = 1; 4754 vd->vtoc.v_part[0].p_size = vd->vdisk_size; 4755 bcopy(VD_ASCIILABEL, vd->vtoc.v_asciilabel, 4756 MIN(sizeof (VD_ASCIILABEL), sizeof (vd->vtoc.v_asciilabel))); 4757 4758 /* adjust the vdisk_size, we emulate the first block */ 4759 vd->vdisk_size += 1; 4760 4761 return (0); 4762 } 4763 4764 /* 4765 * When a slice, volume or file is exported as a single-slice disk, we want 4766 * the disk backend (i.e. the slice, volume or file) to be entirely mapped 4767 * as a slice without the addition of any metadata. 4768 * 4769 * So when exporting the disk as an EFI disk, we fake a disk with the following 4770 * layout: 4771 * 4772 * 0 1 2 3 34 34+N 4773 * +-+-+-+-------+--------------------------+ 4774 * virtual disk: |X|T|E|XXXXXXX| slice 0 | 4775 * +-+-+-+-------+--------------------------+ 4776 * ^ ^ : : 4777 * | | : : 4778 * GPT-+ +-GPE : : 4779 * +--------------------------+ 4780 * disk backend: | slice/volume/file | 4781 * +--------------------------+ 4782 * 0 N 4783 * 4784 * N is the number of blocks in the slice/volume/file. 4785 * 4786 * We simulate a disk with 34+N blocks. The first 34 blocks (0 to 33) are 4787 * emulated and can not be changed. The remaining blocks (34 to 34+N) defines 4788 * slice 0 and are mapped to the exported slice, volume or file: 4789 * 4790 * - block 0 (X) is unused and can return 0 if raw read was implemented. 4791 * - block 1 (T) returns a fake EFI GPT (via DKIOCGETEFI) 4792 * - block 2 (E) returns a fake EFI GPE (via DKIOCGETEFI) 4793 * - block 3 to 33 (X) are unused and return 0 if raw read is implemented. 4794 * - block 34 to 34+N is mapped to the exported slice, volume or file. 4795 * 4796 */ 4797 static int 4798 vd_setup_partition_efi(vd_t *vd) 4799 { 4800 efi_gpt_t *gpt; 4801 efi_gpe_t *gpe; 4802 struct uuid uuid = EFI_USR; 4803 uint32_t crc; 4804 4805 gpt = &vd->efi_gpt; 4806 gpe = &vd->efi_gpe; 4807 4808 bzero(gpt, sizeof (efi_gpt_t)); 4809 bzero(gpe, sizeof (efi_gpe_t)); 4810 4811 /* adjust the vdisk_size, we emulate the first 34 blocks */ 4812 vd->vdisk_size += 34; 4813 4814 gpt->efi_gpt_Signature = LE_64(EFI_SIGNATURE); 4815 gpt->efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 4816 gpt->efi_gpt_HeaderSize = LE_32(sizeof (efi_gpt_t)); 4817 gpt->efi_gpt_FirstUsableLBA = LE_64(34ULL); 4818 gpt->efi_gpt_LastUsableLBA = LE_64(vd->vdisk_size - 1); 4819 gpt->efi_gpt_NumberOfPartitionEntries = LE_32(1); 4820 gpt->efi_gpt_PartitionEntryLBA = LE_64(2ULL); 4821 gpt->efi_gpt_SizeOfPartitionEntry = LE_32(sizeof (efi_gpe_t)); 4822 4823 UUID_LE_CONVERT(gpe->efi_gpe_PartitionTypeGUID, uuid); 4824 gpe->efi_gpe_StartingLBA = gpt->efi_gpt_FirstUsableLBA; 4825 gpe->efi_gpe_EndingLBA = gpt->efi_gpt_LastUsableLBA; 4826 4827 CRC32(crc, gpe, sizeof (efi_gpe_t), -1U, crc32_table); 4828 gpt->efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 4829 4830 CRC32(crc, gpt, sizeof (efi_gpt_t), -1U, crc32_table); 4831 gpt->efi_gpt_HeaderCRC32 = LE_32(~crc); 4832 4833 return (0); 4834 } 4835 4836 /* 4837 * Setup for a virtual disk whose backend is a file (exported as a single slice 4838 * or as a full disk) or a volume device (for example a ZFS, SVM or VxVM volume) 4839 * exported as a full disk. In these cases, the backend is accessed using the 4840 * vnode interface. 4841 */ 4842 static int 4843 vd_setup_backend_vnode(vd_t *vd) 4844 { 4845 int rval, status; 4846 vattr_t vattr; 4847 dev_t dev; 4848 char *file_path = vd->device_path; 4849 char dev_path[MAXPATHLEN + 1]; 4850 ldi_handle_t lhandle; 4851 struct dk_cinfo dk_cinfo; 4852 4853 if ((status = vn_open(file_path, UIO_SYSSPACE, vd->open_flags | FOFFMAX, 4854 0, &vd->file_vnode, 0, 0)) != 0) { 4855 PRN("vn_open(%s) = errno %d", file_path, status); 4856 return (status); 4857 } 4858 4859 /* 4860 * We set vd->file now so that vds_destroy_vd will take care of 4861 * closing the file and releasing the vnode in case of an error. 4862 */ 4863 vd->file = B_TRUE; 4864 4865 vattr.va_mask = AT_SIZE; 4866 if ((status = VOP_GETATTR(vd->file_vnode, &vattr, 0, kcred, NULL)) 4867 != 0) { 4868 PRN("VOP_GETATTR(%s) = errno %d", file_path, status); 4869 return (EIO); 4870 } 4871 4872 vd->file_size = vattr.va_size; 4873 /* size should be at least sizeof(dk_label) */ 4874 if (vd->file_size < sizeof (struct dk_label)) { 4875 PRN("Size of file has to be at least %ld bytes", 4876 sizeof (struct dk_label)); 4877 return (EIO); 4878 } 4879 4880 if (vd->file_vnode->v_flag & VNOMAP) { 4881 PRN("File %s cannot be mapped", file_path); 4882 return (EIO); 4883 } 4884 4885 /* sector size = block size = DEV_BSIZE */ 4886 vd->block_size = DEV_BSIZE; 4887 vd->vdisk_block_size = DEV_BSIZE; 4888 vd->vdisk_size = vd->file_size / DEV_BSIZE; 4889 vd->max_xfer_sz = maxphys / DEV_BSIZE; /* default transfer size */ 4890 4891 /* 4892 * Get max_xfer_sz from the device where the file is or from the device 4893 * itself if we have a volume device. 4894 */ 4895 dev_path[0] = '\0'; 4896 4897 if (vd->volume) { 4898 status = ldi_open_by_name(file_path, FREAD, kcred, &lhandle, 4899 vd->vds->ldi_ident); 4900 } else { 4901 dev = vd->file_vnode->v_vfsp->vfs_dev; 4902 if (ddi_dev_pathname(dev, S_IFBLK, dev_path) == DDI_SUCCESS) { 4903 PR0("underlying device = %s\n", dev_path); 4904 } 4905 4906 status = ldi_open_by_dev(&dev, OTYP_BLK, FREAD, kcred, &lhandle, 4907 vd->vds->ldi_ident); 4908 } 4909 4910 if (status != 0) { 4911 PR0("ldi_open() returned errno %d for device %s", 4912 status, (dev_path[0] == '\0')? file_path : dev_path); 4913 } else { 4914 if ((status = ldi_ioctl(lhandle, DKIOCINFO, 4915 (intptr_t)&dk_cinfo, (vd->open_flags | FKIOCTL), kcred, 4916 &rval)) != 0) { 4917 PR0("ldi_ioctl(DKIOCINFO) returned errno %d for %s", 4918 status, dev_path); 4919 } else { 4920 /* 4921 * Store the device's max transfer size for 4922 * return to the client 4923 */ 4924 vd->max_xfer_sz = dk_cinfo.dki_maxtransfer; 4925 } 4926 4927 PR0("close the device %s", dev_path); 4928 (void) ldi_close(lhandle, FREAD, kcred); 4929 } 4930 4931 PR0("using file %s, dev %s, max_xfer = %u blks", 4932 file_path, dev_path, vd->max_xfer_sz); 4933 4934 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { 4935 ASSERT(!vd->volume); 4936 vd->vdisk_label = VD_DISK_LABEL_EFI; 4937 status = vd_setup_partition_efi(vd); 4938 return (0); 4939 } 4940 4941 /* 4942 * Find and validate the geometry of a disk image. 4943 */ 4944 status = vd_file_validate_geometry(vd); 4945 if (status != 0 && status != EINVAL && status != ENOTSUP) { 4946 PRN("Failed to read label from %s", file_path); 4947 return (EIO); 4948 } 4949 4950 if (vd_file_is_iso_image(vd)) { 4951 /* 4952 * Indicate whether to call this a CD or DVD from the size 4953 * of the ISO image (images for both drive types are stored 4954 * in the ISO-9600 format). CDs can store up to just under 1Gb 4955 */ 4956 if ((vd->vdisk_size * vd->vdisk_block_size) > 4957 (1024 * 1024 * 1024)) 4958 vd->vdisk_media = VD_MEDIA_DVD; 4959 else 4960 vd->vdisk_media = VD_MEDIA_CD; 4961 } else { 4962 vd->vdisk_media = VD_MEDIA_FIXED; 4963 } 4964 4965 /* Setup devid for the disk image */ 4966 4967 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { 4968 4969 status = vd_file_read_devid(vd, &vd->file_devid); 4970 4971 if (status == 0) { 4972 /* a valid devid was found */ 4973 return (0); 4974 } 4975 4976 if (status != EINVAL) { 4977 /* 4978 * There was an error while trying to read the devid. 4979 * So this disk image may have a devid but we are 4980 * unable to read it. 4981 */ 4982 PR0("can not read devid for %s", file_path); 4983 vd->file_devid = NULL; 4984 return (0); 4985 } 4986 } 4987 4988 /* 4989 * No valid device id was found so we create one. Note that a failure 4990 * to create a device id is not fatal and does not prevent the disk 4991 * image from being attached. 4992 */ 4993 PR1("creating devid for %s", file_path); 4994 4995 if (ddi_devid_init(vd->vds->dip, DEVID_FAB, NULL, 0, 4996 &vd->file_devid) != DDI_SUCCESS) { 4997 PR0("fail to create devid for %s", file_path); 4998 vd->file_devid = NULL; 4999 return (0); 5000 } 5001 5002 /* 5003 * Write devid to the disk image. The devid is stored into the disk 5004 * image if we have a valid label; otherwise the devid will be stored 5005 * when the user writes a valid label. 5006 */ 5007 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { 5008 if (vd_file_write_devid(vd, vd->file_devid) != 0) { 5009 PR0("fail to write devid for %s", file_path); 5010 ddi_devid_free(vd->file_devid); 5011 vd->file_devid = NULL; 5012 } 5013 } 5014 5015 return (0); 5016 } 5017 5018 5019 /* 5020 * Description: 5021 * Open a device using its device path (supplied by ldm(1m)) 5022 * 5023 * Parameters: 5024 * vd - pointer to structure containing the vDisk info 5025 * flags - open flags 5026 * 5027 * Return Value 5028 * 0 - success 5029 * != 0 - some other non-zero return value from ldi(9F) functions 5030 */ 5031 static int 5032 vd_open_using_ldi_by_name(vd_t *vd, int flags) 5033 { 5034 int status; 5035 char *device_path = vd->device_path; 5036 5037 /* Attempt to open device */ 5038 status = ldi_open_by_name(device_path, flags, kcred, 5039 &vd->ldi_handle[0], vd->vds->ldi_ident); 5040 5041 /* 5042 * The open can fail for example if we are opening an empty slice. 5043 * In case of a failure, we try the open again but this time with 5044 * the FNDELAY flag. 5045 */ 5046 if (status != 0) 5047 status = ldi_open_by_name(device_path, flags | FNDELAY, 5048 kcred, &vd->ldi_handle[0], vd->vds->ldi_ident); 5049 5050 if (status != 0) { 5051 PR0("ldi_open_by_name(%s) = errno %d", device_path, status); 5052 vd->ldi_handle[0] = NULL; 5053 return (status); 5054 } 5055 5056 return (0); 5057 } 5058 5059 /* 5060 * Setup for a virtual disk which backend is a device (a physical disk, 5061 * slice or volume device) that is directly exported either as a full disk 5062 * for a physical disk or as a slice for a volume device or a disk slice. 5063 * In these cases, the backend is accessed using the LDI interface. 5064 */ 5065 static int 5066 vd_setup_backend_ldi(vd_t *vd) 5067 { 5068 int rval, status; 5069 struct dk_cinfo dk_cinfo; 5070 char *device_path = vd->device_path; 5071 5072 /* device has been opened by vd_identify_dev() */ 5073 ASSERT(vd->ldi_handle[0] != NULL); 5074 ASSERT(vd->dev[0] != NULL); 5075 5076 vd->file = B_FALSE; 5077 5078 /* Verify backing device supports dk_cinfo */ 5079 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCINFO, 5080 (intptr_t)&dk_cinfo, (vd->open_flags | FKIOCTL), kcred, 5081 &rval)) != 0) { 5082 PRN("ldi_ioctl(DKIOCINFO) returned errno %d for %s", 5083 status, device_path); 5084 return (status); 5085 } 5086 if (dk_cinfo.dki_partition >= V_NUMPAR) { 5087 PRN("slice %u >= maximum slice %u for %s", 5088 dk_cinfo.dki_partition, V_NUMPAR, device_path); 5089 return (EIO); 5090 } 5091 5092 /* 5093 * The device has been opened read-only by vd_identify_dev(), re-open 5094 * it read-write if the write flag is set and we don't have an optical 5095 * device such as a CD-ROM, which, for now, we do not permit writes to 5096 * and thus should not export write operations to the client. 5097 * 5098 * Future: if/when we implement support for guest domains writing to 5099 * optical devices we will need to do further checking of the media type 5100 * to distinguish between read-only and writable discs. 5101 */ 5102 if (dk_cinfo.dki_ctype == DKC_CDROM) { 5103 5104 vd->open_flags &= ~FWRITE; 5105 5106 } else if (vd->open_flags & FWRITE) { 5107 5108 (void) ldi_close(vd->ldi_handle[0], vd->open_flags & ~FWRITE, 5109 kcred); 5110 status = vd_open_using_ldi_by_name(vd, vd->open_flags); 5111 if (status != 0) { 5112 PR0("Failed to open (%s) = errno %d", 5113 device_path, status); 5114 return (status); 5115 } 5116 } 5117 5118 /* Store the device's max transfer size for return to the client */ 5119 vd->max_xfer_sz = dk_cinfo.dki_maxtransfer; 5120 5121 /* 5122 * We need to work out if it's an ATAPI (IDE CD-ROM) or SCSI device so 5123 * that we can use the correct CDB group when sending USCSI commands. 5124 */ 5125 vd->is_atapi_dev = vd_is_atapi_device(vd); 5126 5127 /* 5128 * Export a full disk. 5129 * 5130 * When we use the LDI interface, we export a device as a full disk 5131 * if we have an entire disk slice (slice 2) and if this slice is 5132 * exported as a full disk and not as a single slice disk. 5133 * Similarly, we want to use LDI if we are accessing a CD or DVD 5134 * device (even if it isn't s2) 5135 * 5136 * Note that volume devices are exported as full disks using the vnode 5137 * interface, not the LDI interface. 5138 */ 5139 if ((dk_cinfo.dki_partition == VD_ENTIRE_DISK_SLICE && 5140 vd->vdisk_type == VD_DISK_TYPE_DISK) || 5141 dk_cinfo.dki_ctype == DKC_CDROM) { 5142 ASSERT(!vd->volume); 5143 if (dk_cinfo.dki_ctype == DKC_SCSI_CCS) 5144 vd->scsi = B_TRUE; 5145 return (vd_setup_full_disk(vd)); 5146 } 5147 5148 /* 5149 * Export a single slice disk. 5150 * 5151 * The exported device can be either a volume device or a disk slice. If 5152 * it is a disk slice different from slice 2 then it is always exported 5153 * as a single slice disk even if the "slice" option is not specified. 5154 * If it is disk slice 2 or a volume device then it is exported as a 5155 * single slice disk only if the "slice" option is specified. 5156 */ 5157 return (vd_setup_single_slice_disk(vd)); 5158 } 5159 5160 static int 5161 vd_setup_single_slice_disk(vd_t *vd) 5162 { 5163 int status, rval; 5164 char *device_path = vd->device_path; 5165 5166 /* Get size of backing device */ 5167 if (ldi_get_size(vd->ldi_handle[0], &vd->vdisk_size) != DDI_SUCCESS) { 5168 PRN("ldi_get_size() failed for %s", device_path); 5169 return (EIO); 5170 } 5171 vd->vdisk_size = lbtodb(vd->vdisk_size); /* convert to blocks */ 5172 vd->block_size = DEV_BSIZE; 5173 vd->vdisk_block_size = DEV_BSIZE; 5174 vd->vdisk_media = VD_MEDIA_FIXED; 5175 5176 if (vd->volume) { 5177 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 5178 } 5179 5180 /* 5181 * We export the slice as a single slice disk even if the "slice" 5182 * option was not specified. 5183 */ 5184 vd->vdisk_type = VD_DISK_TYPE_SLICE; 5185 vd->nslices = 1; 5186 5187 /* 5188 * When exporting a slice or a device as a single slice disk, we don't 5189 * care about any partitioning exposed by the backend. The goal is just 5190 * to export the backend as a flat storage. We provide a fake partition 5191 * table (either a VTOC or EFI), which presents only one slice, to 5192 * accommodate tools expecting a disk label. 5193 * 5194 * We check the label of the backend to export the device as a slice 5195 * using the same type of label (VTOC or EFI). If there is no label 5196 * then we create a fake EFI label. 5197 * 5198 * Note that the partition table we are creating could also be faked 5199 * by the client based on the size of the backend device. 5200 */ 5201 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGVTOC, (intptr_t)&vd->vtoc, 5202 (vd->open_flags | FKIOCTL), kcred, &rval); 5203 5204 if (status == 0) { 5205 /* export with a fake VTOC label */ 5206 vd->vdisk_label = VD_DISK_LABEL_VTOC; 5207 status = vd_setup_partition_vtoc(vd); 5208 } else { 5209 /* export with a fake EFI label */ 5210 vd->vdisk_label = VD_DISK_LABEL_EFI; 5211 status = vd_setup_partition_efi(vd); 5212 } 5213 5214 return (status); 5215 } 5216 5217 /* 5218 * Description: 5219 * Open a device using its device path and identify if this is 5220 * a disk device or a volume device. 5221 * 5222 * Parameters: 5223 * vd - pointer to structure containing the vDisk info 5224 * dtype - return the driver type of the device 5225 * 5226 * Return Value 5227 * 0 - success 5228 * != 0 - some other non-zero return value from ldi(9F) functions 5229 */ 5230 static int 5231 vd_identify_dev(vd_t *vd, int *dtype) 5232 { 5233 int status, i; 5234 char *device_path = vd->device_path; 5235 char *drv_name; 5236 int drv_type; 5237 vds_t *vds = vd->vds; 5238 5239 status = vd_open_using_ldi_by_name(vd, vd->open_flags & ~FWRITE); 5240 if (status != 0) { 5241 PR0("Failed to open (%s) = errno %d", device_path, status); 5242 return (status); 5243 } 5244 5245 /* Get device number of backing device */ 5246 if ((status = ldi_get_dev(vd->ldi_handle[0], &vd->dev[0])) != 0) { 5247 PRN("ldi_get_dev() returned errno %d for %s", 5248 status, device_path); 5249 return (status); 5250 } 5251 5252 /* 5253 * We start by looking if the driver is in the list from vds.conf 5254 * so that we can override the built-in list using vds.conf. 5255 */ 5256 drv_name = ddi_major_to_name(getmajor(vd->dev[0])); 5257 drv_type = VD_DRIVER_UNKNOWN; 5258 5259 /* check vds.conf list */ 5260 for (i = 0; i < vds->num_drivers; i++) { 5261 if (vds->driver_types[i].type == VD_DRIVER_UNKNOWN) { 5262 /* ignore invalid entries */ 5263 continue; 5264 } 5265 if (strcmp(drv_name, vds->driver_types[i].name) == 0) { 5266 drv_type = vds->driver_types[i].type; 5267 goto done; 5268 } 5269 } 5270 5271 /* check built-in list */ 5272 for (i = 0; i < VDS_NUM_DRIVERS; i++) { 5273 if (strcmp(drv_name, vds_driver_types[i].name) == 0) { 5274 drv_type = vds_driver_types[i].type; 5275 goto done; 5276 } 5277 } 5278 5279 done: 5280 PR0("driver %s identified as %s", drv_name, 5281 (drv_type == VD_DRIVER_DISK)? "DISK" : 5282 (drv_type == VD_DRIVER_VOLUME)? "VOLUME" : "UNKNOWN"); 5283 5284 *dtype = drv_type; 5285 5286 return (0); 5287 } 5288 5289 static int 5290 vd_setup_vd(vd_t *vd) 5291 { 5292 int status, drv_type, pseudo; 5293 dev_info_t *dip; 5294 vnode_t *vnp; 5295 char *path = vd->device_path; 5296 5297 /* make sure the vdisk backend is valid */ 5298 if ((status = lookupname(path, UIO_SYSSPACE, 5299 FOLLOW, NULLVPP, &vnp)) != 0) { 5300 PR0("Cannot lookup %s errno %d", path, status); 5301 goto done; 5302 } 5303 5304 switch (vnp->v_type) { 5305 case VREG: 5306 /* 5307 * Backend is a file so it is exported as a full disk or as a 5308 * single slice disk using the vnode interface. 5309 */ 5310 VN_RELE(vnp); 5311 vd->volume = B_FALSE; 5312 status = vd_setup_backend_vnode(vd); 5313 break; 5314 5315 case VBLK: 5316 case VCHR: 5317 /* 5318 * Backend is a device. The way it is exported depends on the 5319 * type of the device. 5320 * 5321 * - A volume device is exported as a full disk using the vnode 5322 * interface or as a single slice disk using the LDI 5323 * interface. 5324 * 5325 * - A disk (represented by the slice 2 of that disk) is 5326 * exported as a full disk using the LDI interface. 5327 * 5328 * - A disk slice (different from slice 2) is always exported 5329 * as a single slice disk using the LDI interface. 5330 * 5331 * - The slice 2 of a disk is exported as a single slice disk 5332 * if the "slice" option is specified, otherwise the entire 5333 * disk will be exported. In any case, the LDI interface is 5334 * used. 5335 */ 5336 5337 /* check if this is a pseudo device */ 5338 if ((dip = ddi_hold_devi_by_instance(getmajor(vnp->v_rdev), 5339 dev_to_instance(vnp->v_rdev), 0)) == NULL) { 5340 PRN("%s is no longer accessible", path); 5341 VN_RELE(vnp); 5342 status = EIO; 5343 break; 5344 } 5345 pseudo = is_pseudo_device(dip); 5346 ddi_release_devi(dip); 5347 VN_RELE(vnp); 5348 5349 if (vd_identify_dev(vd, &drv_type) != 0) { 5350 PRN("%s identification failed", path); 5351 status = EIO; 5352 break; 5353 } 5354 5355 /* 5356 * If the driver hasn't been identified then we consider that 5357 * pseudo devices are volumes and other devices are disks. 5358 */ 5359 if (drv_type == VD_DRIVER_VOLUME || 5360 (drv_type == VD_DRIVER_UNKNOWN && pseudo)) { 5361 vd->volume = B_TRUE; 5362 } else { 5363 status = vd_setup_backend_ldi(vd); 5364 break; 5365 } 5366 5367 /* 5368 * If this is a volume device then its usage depends if the 5369 * "slice" option is set or not. If the "slice" option is set 5370 * then the volume device will be exported as a single slice, 5371 * otherwise it will be exported as a full disk. 5372 * 5373 * For backward compatibility, if vd_volume_force_slice is set 5374 * then we always export volume devices as slices. 5375 */ 5376 if (vd_volume_force_slice) { 5377 vd->vdisk_type = VD_DISK_TYPE_SLICE; 5378 vd->nslices = 1; 5379 } 5380 5381 if (vd->vdisk_type == VD_DISK_TYPE_DISK) { 5382 /* close device opened during identification */ 5383 (void) ldi_close(vd->ldi_handle[0], 5384 vd->open_flags & ~FWRITE, kcred); 5385 vd->ldi_handle[0] = NULL; 5386 vd->dev[0] = 0; 5387 status = vd_setup_backend_vnode(vd); 5388 } else { 5389 status = vd_setup_backend_ldi(vd); 5390 } 5391 break; 5392 5393 default: 5394 PRN("Unsupported vdisk backend %s", path); 5395 VN_RELE(vnp); 5396 status = EBADF; 5397 } 5398 5399 done: 5400 if (status != 0) { 5401 /* 5402 * If the error is retryable print an error message only 5403 * during the first try. 5404 */ 5405 if (status == ENXIO || status == ENODEV || 5406 status == ENOENT || status == EROFS) { 5407 if (!(vd->initialized & VD_SETUP_ERROR)) { 5408 PRN("%s is currently inaccessible (error %d)", 5409 path, status); 5410 } 5411 status = EAGAIN; 5412 } else { 5413 PRN("%s can not be exported as a virtual disk " 5414 "(error %d)", path, status); 5415 } 5416 vd->initialized |= VD_SETUP_ERROR; 5417 5418 } else if (vd->initialized & VD_SETUP_ERROR) { 5419 /* print a message only if we previously had an error */ 5420 PRN("%s is now online", path); 5421 vd->initialized &= ~VD_SETUP_ERROR; 5422 } 5423 5424 return (status); 5425 } 5426 5427 static int 5428 vds_do_init_vd(vds_t *vds, uint64_t id, char *device_path, uint64_t options, 5429 uint64_t ldc_id, vd_t **vdp) 5430 { 5431 char tq_name[TASKQ_NAMELEN]; 5432 int status; 5433 ddi_iblock_cookie_t iblock = NULL; 5434 ldc_attr_t ldc_attr; 5435 vd_t *vd; 5436 5437 5438 ASSERT(vds != NULL); 5439 ASSERT(device_path != NULL); 5440 ASSERT(vdp != NULL); 5441 PR0("Adding vdisk for %s", device_path); 5442 5443 if ((vd = kmem_zalloc(sizeof (*vd), KM_NOSLEEP)) == NULL) { 5444 PRN("No memory for virtual disk"); 5445 return (EAGAIN); 5446 } 5447 *vdp = vd; /* assign here so vds_destroy_vd() can cleanup later */ 5448 vd->vds = vds; 5449 (void) strncpy(vd->device_path, device_path, MAXPATHLEN); 5450 5451 /* Setup open flags */ 5452 vd->open_flags = FREAD; 5453 5454 if (!(options & VD_OPT_RDONLY)) 5455 vd->open_flags |= FWRITE; 5456 5457 if (options & VD_OPT_EXCLUSIVE) 5458 vd->open_flags |= FEXCL; 5459 5460 /* Setup disk type */ 5461 if (options & VD_OPT_SLICE) { 5462 vd->vdisk_type = VD_DISK_TYPE_SLICE; 5463 vd->nslices = 1; 5464 } else { 5465 vd->vdisk_type = VD_DISK_TYPE_DISK; 5466 vd->nslices = V_NUMPAR; 5467 } 5468 5469 /* default disk label */ 5470 vd->vdisk_label = VD_DISK_LABEL_UNK; 5471 5472 /* Open vdisk and initialize parameters */ 5473 if ((status = vd_setup_vd(vd)) == 0) { 5474 vd->initialized |= VD_DISK_READY; 5475 5476 ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR); 5477 PR0("vdisk_type = %s, volume = %s, file = %s, nslices = %u", 5478 ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"), 5479 (vd->volume ? "yes" : "no"), (vd->file ? "yes" : "no"), 5480 vd->nslices); 5481 } else { 5482 if (status != EAGAIN) 5483 return (status); 5484 } 5485 5486 /* Initialize locking */ 5487 if (ddi_get_soft_iblock_cookie(vds->dip, DDI_SOFTINT_MED, 5488 &iblock) != DDI_SUCCESS) { 5489 PRN("Could not get iblock cookie."); 5490 return (EIO); 5491 } 5492 5493 mutex_init(&vd->lock, NULL, MUTEX_DRIVER, iblock); 5494 vd->initialized |= VD_LOCKING; 5495 5496 5497 /* Create start and completion task queues for the vdisk */ 5498 (void) snprintf(tq_name, sizeof (tq_name), "vd_startq%lu", id); 5499 PR1("tq_name = %s", tq_name); 5500 if ((vd->startq = ddi_taskq_create(vds->dip, tq_name, 1, 5501 TASKQ_DEFAULTPRI, 0)) == NULL) { 5502 PRN("Could not create task queue"); 5503 return (EIO); 5504 } 5505 (void) snprintf(tq_name, sizeof (tq_name), "vd_completionq%lu", id); 5506 PR1("tq_name = %s", tq_name); 5507 if ((vd->completionq = ddi_taskq_create(vds->dip, tq_name, 1, 5508 TASKQ_DEFAULTPRI, 0)) == NULL) { 5509 PRN("Could not create task queue"); 5510 return (EIO); 5511 } 5512 5513 /* Allocate the staging buffer */ 5514 vd->max_msglen = sizeof (vio_msg_t); /* baseline vio message size */ 5515 vd->vio_msgp = kmem_alloc(vd->max_msglen, KM_SLEEP); 5516 5517 vd->enabled = 1; /* before callback can dispatch to startq */ 5518 5519 5520 /* Bring up LDC */ 5521 ldc_attr.devclass = LDC_DEV_BLK_SVC; 5522 ldc_attr.instance = ddi_get_instance(vds->dip); 5523 ldc_attr.mode = LDC_MODE_UNRELIABLE; 5524 ldc_attr.mtu = VD_LDC_MTU; 5525 if ((status = ldc_init(ldc_id, &ldc_attr, &vd->ldc_handle)) != 0) { 5526 PRN("Could not initialize LDC channel %lx, " 5527 "init failed with error %d", ldc_id, status); 5528 return (status); 5529 } 5530 vd->initialized |= VD_LDC; 5531 5532 if ((status = ldc_reg_callback(vd->ldc_handle, vd_handle_ldc_events, 5533 (caddr_t)vd)) != 0) { 5534 PRN("Could not initialize LDC channel %lu," 5535 "reg_callback failed with error %d", ldc_id, status); 5536 return (status); 5537 } 5538 5539 if ((status = ldc_open(vd->ldc_handle)) != 0) { 5540 PRN("Could not initialize LDC channel %lu," 5541 "open failed with error %d", ldc_id, status); 5542 return (status); 5543 } 5544 5545 if ((status = ldc_up(vd->ldc_handle)) != 0) { 5546 PR0("ldc_up() returned errno %d", status); 5547 } 5548 5549 /* Allocate the inband task memory handle */ 5550 status = ldc_mem_alloc_handle(vd->ldc_handle, &(vd->inband_task.mhdl)); 5551 if (status) { 5552 PRN("Could not initialize LDC channel %lu," 5553 "alloc_handle failed with error %d", ldc_id, status); 5554 return (ENXIO); 5555 } 5556 5557 /* Add the successfully-initialized vdisk to the server's table */ 5558 if (mod_hash_insert(vds->vd_table, (mod_hash_key_t)id, vd) != 0) { 5559 PRN("Error adding vdisk ID %lu to table", id); 5560 return (EIO); 5561 } 5562 5563 /* store initial state */ 5564 vd->state = VD_STATE_INIT; 5565 5566 return (0); 5567 } 5568 5569 static void 5570 vd_free_dring_task(vd_t *vdp) 5571 { 5572 if (vdp->dring_task != NULL) { 5573 ASSERT(vdp->dring_len != 0); 5574 /* Free all dring_task memory handles */ 5575 for (int i = 0; i < vdp->dring_len; i++) { 5576 (void) ldc_mem_free_handle(vdp->dring_task[i].mhdl); 5577 kmem_free(vdp->dring_task[i].msg, vdp->max_msglen); 5578 vdp->dring_task[i].msg = NULL; 5579 } 5580 kmem_free(vdp->dring_task, 5581 (sizeof (*vdp->dring_task)) * vdp->dring_len); 5582 vdp->dring_task = NULL; 5583 } 5584 } 5585 5586 /* 5587 * Destroy the state associated with a virtual disk 5588 */ 5589 static void 5590 vds_destroy_vd(void *arg) 5591 { 5592 vd_t *vd = (vd_t *)arg; 5593 int retry = 0, rv; 5594 5595 if (vd == NULL) 5596 return; 5597 5598 PR0("Destroying vdisk state"); 5599 5600 /* Disable queuing requests for the vdisk */ 5601 if (vd->initialized & VD_LOCKING) { 5602 mutex_enter(&vd->lock); 5603 vd->enabled = 0; 5604 mutex_exit(&vd->lock); 5605 } 5606 5607 /* Drain and destroy start queue (*before* destroying completionq) */ 5608 if (vd->startq != NULL) 5609 ddi_taskq_destroy(vd->startq); /* waits for queued tasks */ 5610 5611 /* Drain and destroy completion queue (*before* shutting down LDC) */ 5612 if (vd->completionq != NULL) 5613 ddi_taskq_destroy(vd->completionq); /* waits for tasks */ 5614 5615 vd_free_dring_task(vd); 5616 5617 /* Free the inband task memory handle */ 5618 (void) ldc_mem_free_handle(vd->inband_task.mhdl); 5619 5620 /* Shut down LDC */ 5621 if (vd->initialized & VD_LDC) { 5622 /* unmap the dring */ 5623 if (vd->initialized & VD_DRING) 5624 (void) ldc_mem_dring_unmap(vd->dring_handle); 5625 5626 /* close LDC channel - retry on EAGAIN */ 5627 while ((rv = ldc_close(vd->ldc_handle)) == EAGAIN) { 5628 if (++retry > vds_ldc_retries) { 5629 PR0("Timed out closing channel"); 5630 break; 5631 } 5632 drv_usecwait(vds_ldc_delay); 5633 } 5634 if (rv == 0) { 5635 (void) ldc_unreg_callback(vd->ldc_handle); 5636 (void) ldc_fini(vd->ldc_handle); 5637 } else { 5638 /* 5639 * Closing the LDC channel has failed. Ideally we should 5640 * fail here but there is no Zeus level infrastructure 5641 * to handle this. The MD has already been changed and 5642 * we have to do the close. So we try to do as much 5643 * clean up as we can. 5644 */ 5645 (void) ldc_set_cb_mode(vd->ldc_handle, LDC_CB_DISABLE); 5646 while (ldc_unreg_callback(vd->ldc_handle) == EAGAIN) 5647 drv_usecwait(vds_ldc_delay); 5648 } 5649 } 5650 5651 /* Free the staging buffer for msgs */ 5652 if (vd->vio_msgp != NULL) { 5653 kmem_free(vd->vio_msgp, vd->max_msglen); 5654 vd->vio_msgp = NULL; 5655 } 5656 5657 /* Free the inband message buffer */ 5658 if (vd->inband_task.msg != NULL) { 5659 kmem_free(vd->inband_task.msg, vd->max_msglen); 5660 vd->inband_task.msg = NULL; 5661 } 5662 5663 if (vd->file) { 5664 /* Close file */ 5665 (void) VOP_CLOSE(vd->file_vnode, vd->open_flags, 1, 5666 0, kcred, NULL); 5667 VN_RELE(vd->file_vnode); 5668 if (vd->file_devid != NULL) 5669 ddi_devid_free(vd->file_devid); 5670 } else { 5671 /* Close any open backing-device slices */ 5672 for (uint_t slice = 0; slice < vd->nslices; slice++) { 5673 if (vd->ldi_handle[slice] != NULL) { 5674 PR0("Closing slice %u", slice); 5675 (void) ldi_close(vd->ldi_handle[slice], 5676 vd->open_flags, kcred); 5677 } 5678 } 5679 } 5680 5681 /* Free lock */ 5682 if (vd->initialized & VD_LOCKING) 5683 mutex_destroy(&vd->lock); 5684 5685 /* Finally, free the vdisk structure itself */ 5686 kmem_free(vd, sizeof (*vd)); 5687 } 5688 5689 static int 5690 vds_init_vd(vds_t *vds, uint64_t id, char *device_path, uint64_t options, 5691 uint64_t ldc_id) 5692 { 5693 int status; 5694 vd_t *vd = NULL; 5695 5696 5697 if ((status = vds_do_init_vd(vds, id, device_path, options, 5698 ldc_id, &vd)) != 0) 5699 vds_destroy_vd(vd); 5700 5701 return (status); 5702 } 5703 5704 static int 5705 vds_do_get_ldc_id(md_t *md, mde_cookie_t vd_node, mde_cookie_t *channel, 5706 uint64_t *ldc_id) 5707 { 5708 int num_channels; 5709 5710 5711 /* Look for channel endpoint child(ren) of the vdisk MD node */ 5712 if ((num_channels = md_scan_dag(md, vd_node, 5713 md_find_name(md, VD_CHANNEL_ENDPOINT), 5714 md_find_name(md, "fwd"), channel)) <= 0) { 5715 PRN("No \"%s\" found for virtual disk", VD_CHANNEL_ENDPOINT); 5716 return (-1); 5717 } 5718 5719 /* Get the "id" value for the first channel endpoint node */ 5720 if (md_get_prop_val(md, channel[0], VD_ID_PROP, ldc_id) != 0) { 5721 PRN("No \"%s\" property found for \"%s\" of vdisk", 5722 VD_ID_PROP, VD_CHANNEL_ENDPOINT); 5723 return (-1); 5724 } 5725 5726 if (num_channels > 1) { 5727 PRN("Using ID of first of multiple channels for this vdisk"); 5728 } 5729 5730 return (0); 5731 } 5732 5733 static int 5734 vds_get_ldc_id(md_t *md, mde_cookie_t vd_node, uint64_t *ldc_id) 5735 { 5736 int num_nodes, status; 5737 size_t size; 5738 mde_cookie_t *channel; 5739 5740 5741 if ((num_nodes = md_node_count(md)) <= 0) { 5742 PRN("Invalid node count in Machine Description subtree"); 5743 return (-1); 5744 } 5745 size = num_nodes*(sizeof (*channel)); 5746 channel = kmem_zalloc(size, KM_SLEEP); 5747 status = vds_do_get_ldc_id(md, vd_node, channel, ldc_id); 5748 kmem_free(channel, size); 5749 5750 return (status); 5751 } 5752 5753 /* 5754 * Function: 5755 * vds_get_options 5756 * 5757 * Description: 5758 * Parse the options of a vds node. Options are defined as an array 5759 * of strings in the vds-block-device-opts property of the vds node 5760 * in the machine description. Options are returned as a bitmask. The 5761 * mapping between the bitmask options and the options strings from the 5762 * machine description is defined in the vd_bdev_options[] array. 5763 * 5764 * The vds-block-device-opts property is optional. If a vds has no such 5765 * property then no option is defined. 5766 * 5767 * Parameters: 5768 * md - machine description. 5769 * vd_node - vds node in the machine description for which 5770 * options have to be parsed. 5771 * options - the returned options. 5772 * 5773 * Return Code: 5774 * none. 5775 */ 5776 static void 5777 vds_get_options(md_t *md, mde_cookie_t vd_node, uint64_t *options) 5778 { 5779 char *optstr, *opt; 5780 int len, n, i; 5781 5782 *options = 0; 5783 5784 if (md_get_prop_data(md, vd_node, VD_BLOCK_DEVICE_OPTS, 5785 (uint8_t **)&optstr, &len) != 0) { 5786 PR0("No options found"); 5787 return; 5788 } 5789 5790 /* parse options */ 5791 opt = optstr; 5792 n = sizeof (vd_bdev_options) / sizeof (vd_option_t); 5793 5794 while (opt < optstr + len) { 5795 for (i = 0; i < n; i++) { 5796 if (strncmp(vd_bdev_options[i].vdo_name, 5797 opt, VD_OPTION_NLEN) == 0) { 5798 *options |= vd_bdev_options[i].vdo_value; 5799 break; 5800 } 5801 } 5802 5803 if (i < n) { 5804 PR0("option: %s", opt); 5805 } else { 5806 PRN("option %s is unknown or unsupported", opt); 5807 } 5808 5809 opt += strlen(opt) + 1; 5810 } 5811 } 5812 5813 static void 5814 vds_driver_types_free(vds_t *vds) 5815 { 5816 if (vds->driver_types != NULL) { 5817 kmem_free(vds->driver_types, sizeof (vd_driver_type_t) * 5818 vds->num_drivers); 5819 vds->driver_types = NULL; 5820 vds->num_drivers = 0; 5821 } 5822 } 5823 5824 /* 5825 * Update the driver type list with information from vds.conf. 5826 */ 5827 static void 5828 vds_driver_types_update(vds_t *vds) 5829 { 5830 char **list, *s; 5831 uint_t i, num, count = 0, len; 5832 5833 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, vds->dip, 5834 DDI_PROP_DONTPASS, "driver-type-list", &list, &num) != 5835 DDI_PROP_SUCCESS) 5836 return; 5837 5838 /* 5839 * We create a driver_types list with as many as entries as there 5840 * is in the driver-type-list from vds.conf. However only valid 5841 * entries will be populated (i.e. entries from driver-type-list 5842 * with a valid syntax). Invalid entries will be left blank so 5843 * they will have no driver name and the driver type will be 5844 * VD_DRIVER_UNKNOWN (= 0). 5845 */ 5846 vds->num_drivers = num; 5847 vds->driver_types = kmem_zalloc(sizeof (vd_driver_type_t) * num, 5848 KM_SLEEP); 5849 5850 for (i = 0; i < num; i++) { 5851 5852 s = strchr(list[i], ':'); 5853 5854 if (s == NULL) { 5855 PRN("vds.conf: driver-type-list, entry %d (%s): " 5856 "a colon is expected in the entry", 5857 i, list[i]); 5858 continue; 5859 } 5860 5861 len = (uintptr_t)s - (uintptr_t)list[i]; 5862 5863 if (len == 0) { 5864 PRN("vds.conf: driver-type-list, entry %d (%s): " 5865 "the driver name is empty", 5866 i, list[i]); 5867 continue; 5868 } 5869 5870 if (len >= VD_DRIVER_NAME_LEN) { 5871 PRN("vds.conf: driver-type-list, entry %d (%s): " 5872 "the driver name is too long", 5873 i, list[i]); 5874 continue; 5875 } 5876 5877 if (strcmp(s + 1, "disk") == 0) { 5878 5879 vds->driver_types[i].type = VD_DRIVER_DISK; 5880 5881 } else if (strcmp(s + 1, "volume") == 0) { 5882 5883 vds->driver_types[i].type = VD_DRIVER_VOLUME; 5884 5885 } else { 5886 PRN("vds.conf: driver-type-list, entry %d (%s): " 5887 "the driver type is invalid", 5888 i, list[i]); 5889 continue; 5890 } 5891 5892 (void) strncpy(vds->driver_types[i].name, list[i], len); 5893 5894 PR0("driver-type-list, entry %d (%s) added", 5895 i, list[i]); 5896 5897 count++; 5898 } 5899 5900 ddi_prop_free(list); 5901 5902 if (count == 0) { 5903 /* nothing was added, clean up */ 5904 vds_driver_types_free(vds); 5905 } 5906 } 5907 5908 static void 5909 vds_add_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node) 5910 { 5911 char *device_path = NULL; 5912 uint64_t id = 0, ldc_id = 0, options = 0; 5913 5914 if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) { 5915 PRN("Error getting vdisk \"%s\"", VD_ID_PROP); 5916 return; 5917 } 5918 PR0("Adding vdisk ID %lu", id); 5919 if (md_get_prop_str(md, vd_node, VD_BLOCK_DEVICE_PROP, 5920 &device_path) != 0) { 5921 PRN("Error getting vdisk \"%s\"", VD_BLOCK_DEVICE_PROP); 5922 return; 5923 } 5924 5925 vds_get_options(md, vd_node, &options); 5926 5927 if (vds_get_ldc_id(md, vd_node, &ldc_id) != 0) { 5928 PRN("Error getting LDC ID for vdisk %lu", id); 5929 return; 5930 } 5931 5932 if (vds_init_vd(vds, id, device_path, options, ldc_id) != 0) { 5933 PRN("Failed to add vdisk ID %lu", id); 5934 if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)id) != 0) 5935 PRN("No vDisk entry found for vdisk ID %lu", id); 5936 return; 5937 } 5938 } 5939 5940 static void 5941 vds_remove_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node) 5942 { 5943 uint64_t id = 0; 5944 5945 5946 if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) { 5947 PRN("Unable to get \"%s\" property from vdisk's MD node", 5948 VD_ID_PROP); 5949 return; 5950 } 5951 PR0("Removing vdisk ID %lu", id); 5952 if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)id) != 0) 5953 PRN("No vdisk entry found for vdisk ID %lu", id); 5954 } 5955 5956 static void 5957 vds_change_vd(vds_t *vds, md_t *prev_md, mde_cookie_t prev_vd_node, 5958 md_t *curr_md, mde_cookie_t curr_vd_node) 5959 { 5960 char *curr_dev, *prev_dev; 5961 uint64_t curr_id = 0, curr_ldc_id = 0, curr_options = 0; 5962 uint64_t prev_id = 0, prev_ldc_id = 0, prev_options = 0; 5963 size_t len; 5964 5965 5966 /* Validate that vdisk ID has not changed */ 5967 if (md_get_prop_val(prev_md, prev_vd_node, VD_ID_PROP, &prev_id) != 0) { 5968 PRN("Error getting previous vdisk \"%s\" property", 5969 VD_ID_PROP); 5970 return; 5971 } 5972 if (md_get_prop_val(curr_md, curr_vd_node, VD_ID_PROP, &curr_id) != 0) { 5973 PRN("Error getting current vdisk \"%s\" property", VD_ID_PROP); 5974 return; 5975 } 5976 if (curr_id != prev_id) { 5977 PRN("Not changing vdisk: ID changed from %lu to %lu", 5978 prev_id, curr_id); 5979 return; 5980 } 5981 5982 /* Validate that LDC ID has not changed */ 5983 if (vds_get_ldc_id(prev_md, prev_vd_node, &prev_ldc_id) != 0) { 5984 PRN("Error getting LDC ID for vdisk %lu", prev_id); 5985 return; 5986 } 5987 5988 if (vds_get_ldc_id(curr_md, curr_vd_node, &curr_ldc_id) != 0) { 5989 PRN("Error getting LDC ID for vdisk %lu", curr_id); 5990 return; 5991 } 5992 if (curr_ldc_id != prev_ldc_id) { 5993 _NOTE(NOTREACHED); /* lint is confused */ 5994 PRN("Not changing vdisk: " 5995 "LDC ID changed from %lu to %lu", prev_ldc_id, curr_ldc_id); 5996 return; 5997 } 5998 5999 /* Determine whether device path has changed */ 6000 if (md_get_prop_str(prev_md, prev_vd_node, VD_BLOCK_DEVICE_PROP, 6001 &prev_dev) != 0) { 6002 PRN("Error getting previous vdisk \"%s\"", 6003 VD_BLOCK_DEVICE_PROP); 6004 return; 6005 } 6006 if (md_get_prop_str(curr_md, curr_vd_node, VD_BLOCK_DEVICE_PROP, 6007 &curr_dev) != 0) { 6008 PRN("Error getting current vdisk \"%s\"", VD_BLOCK_DEVICE_PROP); 6009 return; 6010 } 6011 if (((len = strlen(curr_dev)) == strlen(prev_dev)) && 6012 (strncmp(curr_dev, prev_dev, len) == 0)) 6013 return; /* no relevant (supported) change */ 6014 6015 /* Validate that options have not changed */ 6016 vds_get_options(prev_md, prev_vd_node, &prev_options); 6017 vds_get_options(curr_md, curr_vd_node, &curr_options); 6018 if (prev_options != curr_options) { 6019 PRN("Not changing vdisk: options changed from %lx to %lx", 6020 prev_options, curr_options); 6021 return; 6022 } 6023 6024 PR0("Changing vdisk ID %lu", prev_id); 6025 6026 /* Remove old state, which will close vdisk and reset */ 6027 if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)prev_id) != 0) 6028 PRN("No entry found for vdisk ID %lu", prev_id); 6029 6030 /* Re-initialize vdisk with new state */ 6031 if (vds_init_vd(vds, curr_id, curr_dev, curr_options, 6032 curr_ldc_id) != 0) { 6033 PRN("Failed to change vdisk ID %lu", curr_id); 6034 return; 6035 } 6036 } 6037 6038 static int 6039 vds_process_md(void *arg, mdeg_result_t *md) 6040 { 6041 int i; 6042 vds_t *vds = arg; 6043 6044 6045 if (md == NULL) 6046 return (MDEG_FAILURE); 6047 ASSERT(vds != NULL); 6048 6049 for (i = 0; i < md->removed.nelem; i++) 6050 vds_remove_vd(vds, md->removed.mdp, md->removed.mdep[i]); 6051 for (i = 0; i < md->match_curr.nelem; i++) 6052 vds_change_vd(vds, md->match_prev.mdp, md->match_prev.mdep[i], 6053 md->match_curr.mdp, md->match_curr.mdep[i]); 6054 for (i = 0; i < md->added.nelem; i++) 6055 vds_add_vd(vds, md->added.mdp, md->added.mdep[i]); 6056 6057 return (MDEG_SUCCESS); 6058 } 6059 6060 6061 static int 6062 vds_do_attach(dev_info_t *dip) 6063 { 6064 int status, sz; 6065 int cfg_handle; 6066 minor_t instance = ddi_get_instance(dip); 6067 vds_t *vds; 6068 mdeg_prop_spec_t *pspecp; 6069 mdeg_node_spec_t *ispecp; 6070 6071 /* 6072 * The "cfg-handle" property of a vds node in an MD contains the MD's 6073 * notion of "instance", or unique identifier, for that node; OBP 6074 * stores the value of the "cfg-handle" MD property as the value of 6075 * the "reg" property on the node in the device tree it builds from 6076 * the MD and passes to Solaris. Thus, we look up the devinfo node's 6077 * "reg" property value to uniquely identify this device instance when 6078 * registering with the MD event-generation framework. If the "reg" 6079 * property cannot be found, the device tree state is presumably so 6080 * broken that there is no point in continuing. 6081 */ 6082 if (!ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 6083 VD_REG_PROP)) { 6084 PRN("vds \"%s\" property does not exist", VD_REG_PROP); 6085 return (DDI_FAILURE); 6086 } 6087 6088 /* Get the MD instance for later MDEG registration */ 6089 cfg_handle = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 6090 VD_REG_PROP, -1); 6091 6092 if (ddi_soft_state_zalloc(vds_state, instance) != DDI_SUCCESS) { 6093 PRN("Could not allocate state for instance %u", instance); 6094 return (DDI_FAILURE); 6095 } 6096 6097 if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) { 6098 PRN("Could not get state for instance %u", instance); 6099 ddi_soft_state_free(vds_state, instance); 6100 return (DDI_FAILURE); 6101 } 6102 6103 vds->dip = dip; 6104 vds->vd_table = mod_hash_create_ptrhash("vds_vd_table", VDS_NCHAINS, 6105 vds_destroy_vd, sizeof (void *)); 6106 6107 ASSERT(vds->vd_table != NULL); 6108 6109 if ((status = ldi_ident_from_dip(dip, &vds->ldi_ident)) != 0) { 6110 PRN("ldi_ident_from_dip() returned errno %d", status); 6111 return (DDI_FAILURE); 6112 } 6113 vds->initialized |= VDS_LDI; 6114 6115 /* Register for MD updates */ 6116 sz = sizeof (vds_prop_template); 6117 pspecp = kmem_alloc(sz, KM_SLEEP); 6118 bcopy(vds_prop_template, pspecp, sz); 6119 6120 VDS_SET_MDEG_PROP_INST(pspecp, cfg_handle); 6121 6122 /* initialize the complete prop spec structure */ 6123 ispecp = kmem_zalloc(sizeof (mdeg_node_spec_t), KM_SLEEP); 6124 ispecp->namep = "virtual-device"; 6125 ispecp->specp = pspecp; 6126 6127 if (mdeg_register(ispecp, &vd_match, vds_process_md, vds, 6128 &vds->mdeg) != MDEG_SUCCESS) { 6129 PRN("Unable to register for MD updates"); 6130 kmem_free(ispecp, sizeof (mdeg_node_spec_t)); 6131 kmem_free(pspecp, sz); 6132 return (DDI_FAILURE); 6133 } 6134 6135 vds->ispecp = ispecp; 6136 vds->initialized |= VDS_MDEG; 6137 6138 /* Prevent auto-detaching so driver is available whenever MD changes */ 6139 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip, DDI_NO_AUTODETACH, 1) != 6140 DDI_PROP_SUCCESS) { 6141 PRN("failed to set \"%s\" property for instance %u", 6142 DDI_NO_AUTODETACH, instance); 6143 } 6144 6145 /* read any user defined driver types from conf file and update list */ 6146 vds_driver_types_update(vds); 6147 6148 ddi_report_dev(dip); 6149 return (DDI_SUCCESS); 6150 } 6151 6152 static int 6153 vds_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 6154 { 6155 int status; 6156 6157 switch (cmd) { 6158 case DDI_ATTACH: 6159 PR0("Attaching"); 6160 if ((status = vds_do_attach(dip)) != DDI_SUCCESS) 6161 (void) vds_detach(dip, DDI_DETACH); 6162 return (status); 6163 case DDI_RESUME: 6164 PR0("No action required for DDI_RESUME"); 6165 return (DDI_SUCCESS); 6166 default: 6167 return (DDI_FAILURE); 6168 } 6169 } 6170 6171 static struct dev_ops vds_ops = { 6172 DEVO_REV, /* devo_rev */ 6173 0, /* devo_refcnt */ 6174 ddi_no_info, /* devo_getinfo */ 6175 nulldev, /* devo_identify */ 6176 nulldev, /* devo_probe */ 6177 vds_attach, /* devo_attach */ 6178 vds_detach, /* devo_detach */ 6179 nodev, /* devo_reset */ 6180 NULL, /* devo_cb_ops */ 6181 NULL, /* devo_bus_ops */ 6182 nulldev /* devo_power */ 6183 }; 6184 6185 static struct modldrv modldrv = { 6186 &mod_driverops, 6187 "virtual disk server", 6188 &vds_ops, 6189 }; 6190 6191 static struct modlinkage modlinkage = { 6192 MODREV_1, 6193 &modldrv, 6194 NULL 6195 }; 6196 6197 6198 int 6199 _init(void) 6200 { 6201 int status; 6202 6203 if ((status = ddi_soft_state_init(&vds_state, sizeof (vds_t), 1)) != 0) 6204 return (status); 6205 6206 if ((status = mod_install(&modlinkage)) != 0) { 6207 ddi_soft_state_fini(&vds_state); 6208 return (status); 6209 } 6210 6211 return (0); 6212 } 6213 6214 int 6215 _info(struct modinfo *modinfop) 6216 { 6217 return (mod_info(&modlinkage, modinfop)); 6218 } 6219 6220 int 6221 _fini(void) 6222 { 6223 int status; 6224 6225 if ((status = mod_remove(&modlinkage)) != 0) 6226 return (status); 6227 ddi_soft_state_fini(&vds_state); 6228 return (0); 6229 } 6230