1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Virtual disk server 29 */ 30 31 32 #include <sys/types.h> 33 #include <sys/conf.h> 34 #include <sys/crc32.h> 35 #include <sys/ddi.h> 36 #include <sys/dkio.h> 37 #include <sys/file.h> 38 #include <sys/fs/hsfs_isospec.h> 39 #include <sys/mdeg.h> 40 #include <sys/mhd.h> 41 #include <sys/modhash.h> 42 #include <sys/note.h> 43 #include <sys/pathname.h> 44 #include <sys/sdt.h> 45 #include <sys/sunddi.h> 46 #include <sys/sunldi.h> 47 #include <sys/sysmacros.h> 48 #include <sys/vio_common.h> 49 #include <sys/vio_util.h> 50 #include <sys/vdsk_mailbox.h> 51 #include <sys/vdsk_common.h> 52 #include <sys/vtoc.h> 53 #include <sys/vfs.h> 54 #include <sys/stat.h> 55 #include <sys/scsi/impl/uscsi.h> 56 #include <sys/ontrap.h> 57 #include <vm/seg_map.h> 58 59 #define ONE_MEGABYTE (1ULL << 20) 60 #define ONE_GIGABYTE (1ULL << 30) 61 #define ONE_TERABYTE (1ULL << 40) 62 63 /* Virtual disk server initialization flags */ 64 #define VDS_LDI 0x01 65 #define VDS_MDEG 0x02 66 67 /* Virtual disk server tunable parameters */ 68 #define VDS_RETRIES 5 69 #define VDS_LDC_DELAY 1000 /* 1 msecs */ 70 #define VDS_DEV_DELAY 10000000 /* 10 secs */ 71 #define VDS_NCHAINS 32 72 73 /* Identification parameters for MD, synthetic dkio(7i) structures, etc. */ 74 #define VDS_NAME "virtual-disk-server" 75 76 #define VD_NAME "vd" 77 #define VD_VOLUME_NAME "vdisk" 78 #define VD_ASCIILABEL "Virtual Disk" 79 80 #define VD_CHANNEL_ENDPOINT "channel-endpoint" 81 #define VD_ID_PROP "id" 82 #define VD_BLOCK_DEVICE_PROP "vds-block-device" 83 #define VD_BLOCK_DEVICE_OPTS "vds-block-device-opts" 84 #define VD_REG_PROP "reg" 85 86 /* Virtual disk initialization flags */ 87 #define VD_DISK_READY 0x01 88 #define VD_LOCKING 0x02 89 #define VD_LDC 0x04 90 #define VD_DRING 0x08 91 #define VD_SID 0x10 92 #define VD_SEQ_NUM 0x20 93 #define VD_SETUP_ERROR 0x40 94 95 /* Number of backup labels */ 96 #define VD_DSKIMG_NUM_BACKUP 5 97 98 /* Timeout for SCSI I/O */ 99 #define VD_SCSI_RDWR_TIMEOUT 30 /* 30 secs */ 100 101 /* 102 * Default number of threads for the I/O queue. In many cases, we will not 103 * receive more than 8 I/O requests at the same time. However there are 104 * cases (for example during the OS installation) where we can have a lot 105 * more (up to the limit of the DRing size). 106 */ 107 #define VD_IOQ_NTHREADS 8 108 109 /* Maximum number of logical partitions */ 110 #define VD_MAXPART (NDKMAP + 1) 111 112 /* 113 * By Solaris convention, slice/partition 2 represents the entire disk; 114 * unfortunately, this convention does not appear to be codified. 115 */ 116 #define VD_ENTIRE_DISK_SLICE 2 117 118 /* Logical block address for EFI */ 119 #define VD_EFI_LBA_GPT 1 /* LBA of the GPT */ 120 #define VD_EFI_LBA_GPE 2 /* LBA of the GPE */ 121 122 #define VD_EFI_DEV_SET(dev, vdsk, ioctl) \ 123 VDSK_EFI_DEV_SET(dev, vdsk, ioctl, \ 124 (vdsk)->vdisk_bsize, (vdsk)->vdisk_size) 125 126 /* 127 * Flags defining the behavior for flushing asynchronous writes used to 128 * performed some write I/O requests. 129 * 130 * The VD_AWFLUSH_IMMEDIATE enables immediate flushing of asynchronous 131 * writes. This ensures that data are committed to the backend when the I/O 132 * request reply is sent to the guest domain so this prevents any data to 133 * be lost in case a service domain unexpectedly crashes. 134 * 135 * The flag VD_AWFLUSH_DEFER indicates that flushing is deferred to another 136 * thread while the request is immediatly marked as completed. In that case, 137 * a guest domain can a receive a reply that its write request is completed 138 * while data haven't been flushed to disk yet. 139 * 140 * Flags VD_AWFLUSH_IMMEDIATE and VD_AWFLUSH_DEFER are mutually exclusive. 141 */ 142 #define VD_AWFLUSH_IMMEDIATE 0x01 /* immediate flushing */ 143 #define VD_AWFLUSH_DEFER 0x02 /* defer flushing */ 144 #define VD_AWFLUSH_GROUP 0x04 /* group requests before flushing */ 145 146 /* Driver types */ 147 typedef enum vd_driver { 148 VD_DRIVER_UNKNOWN = 0, /* driver type unknown */ 149 VD_DRIVER_DISK, /* disk driver */ 150 VD_DRIVER_VOLUME /* volume driver */ 151 } vd_driver_t; 152 153 #define VD_DRIVER_NAME_LEN 64 154 155 #define VDS_NUM_DRIVERS (sizeof (vds_driver_types) / sizeof (vd_driver_type_t)) 156 157 typedef struct vd_driver_type { 158 char name[VD_DRIVER_NAME_LEN]; /* driver name */ 159 vd_driver_t type; /* driver type (disk or volume) */ 160 } vd_driver_type_t; 161 162 /* 163 * There is no reliable way to determine if a device is representing a disk 164 * or a volume, especially with pseudo devices. So we maintain a list of well 165 * known drivers and the type of device they represent (either a disk or a 166 * volume). 167 * 168 * The list can be extended by adding a "driver-type-list" entry in vds.conf 169 * with the following syntax: 170 * 171 * driver-type-list="<driver>:<type>", ... ,"<driver>:<type>"; 172 * 173 * Where: 174 * <driver> is the name of a driver (limited to 64 characters) 175 * <type> is either the string "disk" or "volume" 176 * 177 * Invalid entries in "driver-type-list" will be ignored. 178 * 179 * For example, the following line in vds.conf: 180 * 181 * driver-type-list="foo:disk","bar:volume"; 182 * 183 * defines that "foo" is a disk driver, and driver "bar" is a volume driver. 184 * 185 * When a list is defined in vds.conf, it is checked before the built-in list 186 * (vds_driver_types[]) so that any definition from this list can be overriden 187 * using vds.conf. 188 */ 189 vd_driver_type_t vds_driver_types[] = { 190 { "dad", VD_DRIVER_DISK }, /* Solaris */ 191 { "did", VD_DRIVER_DISK }, /* Sun Cluster */ 192 { "dlmfdrv", VD_DRIVER_DISK }, /* Hitachi HDLM */ 193 { "emcp", VD_DRIVER_DISK }, /* EMC Powerpath */ 194 { "lofi", VD_DRIVER_VOLUME }, /* Solaris */ 195 { "md", VD_DRIVER_VOLUME }, /* Solaris - SVM */ 196 { "sd", VD_DRIVER_DISK }, /* Solaris */ 197 { "ssd", VD_DRIVER_DISK }, /* Solaris */ 198 { "vdc", VD_DRIVER_DISK }, /* Solaris */ 199 { "vxdmp", VD_DRIVER_DISK }, /* Veritas */ 200 { "vxio", VD_DRIVER_VOLUME }, /* Veritas - VxVM */ 201 { "zfs", VD_DRIVER_VOLUME } /* Solaris */ 202 }; 203 204 /* Return a cpp token as a string */ 205 #define STRINGIZE(token) #token 206 207 /* 208 * Print a message prefixed with the current function name to the message log 209 * (and optionally to the console for verbose boots); these macros use cpp's 210 * concatenation of string literals and C99 variable-length-argument-list 211 * macros 212 */ 213 #define PRN(...) _PRN("?%s(): "__VA_ARGS__, "") 214 #define _PRN(format, ...) \ 215 cmn_err(CE_CONT, format"%s", __func__, __VA_ARGS__) 216 217 /* Return a pointer to the "i"th vdisk dring element */ 218 #define VD_DRING_ELEM(i) ((vd_dring_entry_t *)(void *) \ 219 (vd->dring + (i)*vd->descriptor_size)) 220 221 /* Return the virtual disk client's type as a string (for use in messages) */ 222 #define VD_CLIENT(vd) \ 223 (((vd)->xfer_mode == VIO_DESC_MODE) ? "in-band client" : \ 224 (((vd)->xfer_mode == VIO_DRING_MODE_V1_0) ? "dring client" : \ 225 (((vd)->xfer_mode == 0) ? "null client" : \ 226 "unsupported client"))) 227 228 /* Read disk label from a disk image */ 229 #define VD_DSKIMG_LABEL_READ(vd, labelp) \ 230 vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)labelp, \ 231 0, sizeof (struct dk_label)) 232 233 /* Write disk label to a disk image */ 234 #define VD_DSKIMG_LABEL_WRITE(vd, labelp) \ 235 vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, (caddr_t)labelp, \ 236 0, sizeof (struct dk_label)) 237 238 /* Identify if a backend is a disk image */ 239 #define VD_DSKIMG(vd) ((vd)->vdisk_type == VD_DISK_TYPE_DISK && \ 240 ((vd)->file || (vd)->volume)) 241 242 /* Next index in a write queue */ 243 #define VD_WRITE_INDEX_NEXT(vd, id) \ 244 ((((id) + 1) >= vd->dring_len)? 0 : (id) + 1) 245 246 /* Message for disk access rights reset failure */ 247 #define VD_RESET_ACCESS_FAILURE_MSG \ 248 "Fail to reset disk access rights for disk %s" 249 250 /* 251 * Specification of an MD node passed to the MDEG to filter any 252 * 'vport' nodes that do not belong to the specified node. This 253 * template is copied for each vds instance and filled in with 254 * the appropriate 'cfg-handle' value before being passed to the MDEG. 255 */ 256 static mdeg_prop_spec_t vds_prop_template[] = { 257 { MDET_PROP_STR, "name", VDS_NAME }, 258 { MDET_PROP_VAL, "cfg-handle", NULL }, 259 { MDET_LIST_END, NULL, NULL } 260 }; 261 262 #define VDS_SET_MDEG_PROP_INST(specp, val) (specp)[1].ps_val = (val); 263 264 /* 265 * Matching criteria passed to the MDEG to register interest 266 * in changes to 'virtual-device-port' nodes identified by their 267 * 'id' property. 268 */ 269 static md_prop_match_t vd_prop_match[] = { 270 { MDET_PROP_VAL, VD_ID_PROP }, 271 { MDET_LIST_END, NULL } 272 }; 273 274 static mdeg_node_match_t vd_match = {"virtual-device-port", 275 vd_prop_match}; 276 277 /* 278 * Options for the VD_BLOCK_DEVICE_OPTS property. 279 */ 280 #define VD_OPT_RDONLY 0x1 /* read-only */ 281 #define VD_OPT_SLICE 0x2 /* single slice */ 282 #define VD_OPT_EXCLUSIVE 0x4 /* exclusive access */ 283 284 #define VD_OPTION_NLEN 128 285 286 typedef struct vd_option { 287 char vdo_name[VD_OPTION_NLEN]; 288 uint64_t vdo_value; 289 } vd_option_t; 290 291 vd_option_t vd_bdev_options[] = { 292 { "ro", VD_OPT_RDONLY }, 293 { "slice", VD_OPT_SLICE }, 294 { "excl", VD_OPT_EXCLUSIVE } 295 }; 296 297 /* Debugging macros */ 298 #ifdef DEBUG 299 300 static int vd_msglevel = 0; 301 302 #define PR0 if (vd_msglevel > 0) PRN 303 #define PR1 if (vd_msglevel > 1) PRN 304 #define PR2 if (vd_msglevel > 2) PRN 305 306 #define VD_DUMP_DRING_ELEM(elem) \ 307 PR0("dst:%x op:%x st:%u nb:%lx addr:%lx ncook:%u\n", \ 308 elem->hdr.dstate, \ 309 elem->payload.operation, \ 310 elem->payload.status, \ 311 elem->payload.nbytes, \ 312 elem->payload.addr, \ 313 elem->payload.ncookies); 314 315 char * 316 vd_decode_state(int state) 317 { 318 char *str; 319 320 #define CASE_STATE(_s) case _s: str = #_s; break; 321 322 switch (state) { 323 CASE_STATE(VD_STATE_INIT) 324 CASE_STATE(VD_STATE_VER) 325 CASE_STATE(VD_STATE_ATTR) 326 CASE_STATE(VD_STATE_DRING) 327 CASE_STATE(VD_STATE_RDX) 328 CASE_STATE(VD_STATE_DATA) 329 default: str = "unknown"; break; 330 } 331 332 #undef CASE_STATE 333 334 return (str); 335 } 336 337 void 338 vd_decode_tag(vio_msg_t *msg) 339 { 340 char *tstr, *sstr, *estr; 341 342 #define CASE_TYPE(_s) case _s: tstr = #_s; break; 343 344 switch (msg->tag.vio_msgtype) { 345 CASE_TYPE(VIO_TYPE_CTRL) 346 CASE_TYPE(VIO_TYPE_DATA) 347 CASE_TYPE(VIO_TYPE_ERR) 348 default: tstr = "unknown"; break; 349 } 350 351 #undef CASE_TYPE 352 353 #define CASE_SUBTYPE(_s) case _s: sstr = #_s; break; 354 355 switch (msg->tag.vio_subtype) { 356 CASE_SUBTYPE(VIO_SUBTYPE_INFO) 357 CASE_SUBTYPE(VIO_SUBTYPE_ACK) 358 CASE_SUBTYPE(VIO_SUBTYPE_NACK) 359 default: sstr = "unknown"; break; 360 } 361 362 #undef CASE_SUBTYPE 363 364 #define CASE_ENV(_s) case _s: estr = #_s; break; 365 366 switch (msg->tag.vio_subtype_env) { 367 CASE_ENV(VIO_VER_INFO) 368 CASE_ENV(VIO_ATTR_INFO) 369 CASE_ENV(VIO_DRING_REG) 370 CASE_ENV(VIO_DRING_UNREG) 371 CASE_ENV(VIO_RDX) 372 CASE_ENV(VIO_PKT_DATA) 373 CASE_ENV(VIO_DESC_DATA) 374 CASE_ENV(VIO_DRING_DATA) 375 default: estr = "unknown"; break; 376 } 377 378 #undef CASE_ENV 379 380 PR1("(%x/%x/%x) message : (%s/%s/%s)", 381 msg->tag.vio_msgtype, msg->tag.vio_subtype, 382 msg->tag.vio_subtype_env, tstr, sstr, estr); 383 } 384 385 #else /* !DEBUG */ 386 387 #define PR0(...) 388 #define PR1(...) 389 #define PR2(...) 390 391 #define VD_DUMP_DRING_ELEM(elem) 392 393 #define vd_decode_state(_s) (NULL) 394 #define vd_decode_tag(_s) (NULL) 395 396 #endif /* DEBUG */ 397 398 399 /* 400 * Soft state structure for a vds instance 401 */ 402 typedef struct vds { 403 uint_t initialized; /* driver inst initialization flags */ 404 dev_info_t *dip; /* driver inst devinfo pointer */ 405 ldi_ident_t ldi_ident; /* driver's identifier for LDI */ 406 mod_hash_t *vd_table; /* table of virtual disks served */ 407 mdeg_node_spec_t *ispecp; /* mdeg node specification */ 408 mdeg_handle_t mdeg; /* handle for MDEG operations */ 409 vd_driver_type_t *driver_types; /* extra driver types (from vds.conf) */ 410 int num_drivers; /* num of extra driver types */ 411 } vds_t; 412 413 /* 414 * Types of descriptor-processing tasks 415 */ 416 typedef enum vd_task_type { 417 VD_NONFINAL_RANGE_TASK, /* task for intermediate descriptor in range */ 418 VD_FINAL_RANGE_TASK, /* task for last in a range of descriptors */ 419 } vd_task_type_t; 420 421 /* 422 * Structure describing the task for processing a descriptor 423 */ 424 typedef struct vd_task { 425 struct vd *vd; /* vd instance task is for */ 426 vd_task_type_t type; /* type of descriptor task */ 427 int index; /* dring elem index for task */ 428 vio_msg_t *msg; /* VIO message task is for */ 429 size_t msglen; /* length of message content */ 430 vd_dring_payload_t *request; /* request task will perform */ 431 struct buf buf; /* buf(9s) for I/O request */ 432 ldc_mem_handle_t mhdl; /* task memory handle */ 433 int status; /* status of processing task */ 434 int (*completef)(struct vd_task *task); /* completion func ptr */ 435 uint32_t write_index; /* index in the write_queue */ 436 } vd_task_t; 437 438 /* 439 * Soft state structure for a virtual disk instance 440 */ 441 typedef struct vd { 442 uint64_t id; /* vdisk id */ 443 uint_t initialized; /* vdisk initialization flags */ 444 uint64_t operations; /* bitmask of VD_OPs exported */ 445 vio_ver_t version; /* ver negotiated with client */ 446 vds_t *vds; /* server for this vdisk */ 447 ddi_taskq_t *startq; /* queue for I/O start tasks */ 448 ddi_taskq_t *completionq; /* queue for completion tasks */ 449 ddi_taskq_t *ioq; /* queue for I/O */ 450 uint32_t write_index; /* next write index */ 451 buf_t **write_queue; /* queue for async writes */ 452 ldi_handle_t ldi_handle[V_NUMPAR]; /* LDI slice handles */ 453 char device_path[MAXPATHLEN + 1]; /* vdisk device */ 454 dev_t dev[V_NUMPAR]; /* dev numbers for slices */ 455 int open_flags; /* open flags */ 456 uint_t nslices; /* number of slices we export */ 457 size_t vdisk_size; /* number of blocks in vdisk */ 458 size_t vdisk_bsize; /* blk size of the vdisk */ 459 vd_disk_type_t vdisk_type; /* slice or entire disk */ 460 vd_disk_label_t vdisk_label; /* EFI or VTOC label */ 461 vd_media_t vdisk_media; /* media type of backing dev. */ 462 boolean_t is_atapi_dev; /* Is this an IDE CD-ROM dev? */ 463 ushort_t max_xfer_sz; /* max xfer size in DEV_BSIZE */ 464 size_t backend_bsize; /* blk size of backend device */ 465 int vio_bshift; /* shift for blk convertion */ 466 boolean_t volume; /* is vDisk backed by volume */ 467 boolean_t zvol; /* is vDisk backed by a zvol */ 468 boolean_t file; /* is vDisk backed by a file? */ 469 boolean_t scsi; /* is vDisk backed by scsi? */ 470 vnode_t *file_vnode; /* file vnode */ 471 size_t dskimg_size; /* size of disk image */ 472 ddi_devid_t dskimg_devid; /* devid for disk image */ 473 int efi_reserved; /* EFI reserved slice */ 474 caddr_t flabel; /* fake label for slice type */ 475 uint_t flabel_size; /* fake label size */ 476 uint_t flabel_limit; /* limit of the fake label */ 477 struct dk_geom dk_geom; /* synthetic for slice type */ 478 struct extvtoc vtoc; /* synthetic for slice type */ 479 vd_slice_t slices[VD_MAXPART]; /* logical partitions */ 480 boolean_t ownership; /* disk ownership status */ 481 ldc_status_t ldc_state; /* LDC connection state */ 482 ldc_handle_t ldc_handle; /* handle for LDC comm */ 483 size_t max_msglen; /* largest LDC message len */ 484 vd_state_t state; /* client handshake state */ 485 uint8_t xfer_mode; /* transfer mode with client */ 486 uint32_t sid; /* client's session ID */ 487 uint64_t seq_num; /* message sequence number */ 488 uint64_t dring_ident; /* identifier of dring */ 489 ldc_dring_handle_t dring_handle; /* handle for dring ops */ 490 uint32_t descriptor_size; /* num bytes in desc */ 491 uint32_t dring_len; /* number of dring elements */ 492 uint8_t dring_mtype; /* dring mem map type */ 493 caddr_t dring; /* address of dring */ 494 caddr_t vio_msgp; /* vio msg staging buffer */ 495 vd_task_t inband_task; /* task for inband descriptor */ 496 vd_task_t *dring_task; /* tasks dring elements */ 497 498 kmutex_t lock; /* protects variables below */ 499 boolean_t enabled; /* is vdisk enabled? */ 500 boolean_t reset_state; /* reset connection state? */ 501 boolean_t reset_ldc; /* reset LDC channel? */ 502 } vd_t; 503 504 /* 505 * Macros to manipulate the fake label (flabel) for single slice disks. 506 * 507 * If we fake a VTOC label then the fake label consists of only one block 508 * containing the VTOC label (struct dk_label). 509 * 510 * If we fake an EFI label then the fake label consists of a blank block 511 * followed by a GPT (efi_gpt_t) and a GPE (efi_gpe_t). 512 * 513 */ 514 #define VD_LABEL_VTOC_SIZE(lba) \ 515 P2ROUNDUP(sizeof (struct dk_label), (lba)) 516 517 #define VD_LABEL_EFI_SIZE(lba) \ 518 P2ROUNDUP(2 * (lba) + sizeof (efi_gpe_t) * VD_MAXPART, \ 519 (lba)) 520 521 #define VD_LABEL_VTOC(vd) \ 522 ((struct dk_label *)(void *)((vd)->flabel)) 523 524 #define VD_LABEL_EFI_GPT(vd, lba) \ 525 ((efi_gpt_t *)(void *)((vd)->flabel + (lba))) 526 #define VD_LABEL_EFI_GPE(vd, lba) \ 527 ((efi_gpe_t *)(void *)((vd)->flabel + 2 * (lba))) 528 529 530 typedef struct vds_operation { 531 char *namep; 532 uint8_t operation; 533 int (*start)(vd_task_t *task); 534 int (*complete)(vd_task_t *task); 535 } vds_operation_t; 536 537 typedef struct vd_ioctl { 538 uint8_t operation; /* vdisk operation */ 539 const char *operation_name; /* vdisk operation name */ 540 size_t nbytes; /* size of operation buffer */ 541 int cmd; /* corresponding ioctl cmd */ 542 const char *cmd_name; /* ioctl cmd name */ 543 void *arg; /* ioctl cmd argument */ 544 /* convert input vd_buf to output ioctl_arg */ 545 int (*copyin)(void *vd_buf, size_t, void *ioctl_arg); 546 /* convert input ioctl_arg to output vd_buf */ 547 void (*copyout)(void *ioctl_arg, void *vd_buf); 548 /* write is true if the operation writes any data to the backend */ 549 boolean_t write; 550 } vd_ioctl_t; 551 552 /* Define trivial copyin/copyout conversion function flag */ 553 #define VD_IDENTITY_IN ((int (*)(void *, size_t, void *))-1) 554 #define VD_IDENTITY_OUT ((void (*)(void *, void *))-1) 555 556 557 static int vds_ldc_retries = VDS_RETRIES; 558 static int vds_ldc_delay = VDS_LDC_DELAY; 559 static int vds_dev_retries = VDS_RETRIES; 560 static int vds_dev_delay = VDS_DEV_DELAY; 561 static void *vds_state; 562 563 static short vd_scsi_rdwr_timeout = VD_SCSI_RDWR_TIMEOUT; 564 static int vd_scsi_debug = USCSI_SILENT; 565 566 /* 567 * Number of threads in the taskq handling vdisk I/O. This can be set up to 568 * the size of the DRing which is the maximum number of I/O we can receive 569 * in parallel. Note that using a high number of threads can improve performance 570 * but this is going to consume a lot of resources if there are many vdisks. 571 */ 572 static int vd_ioq_nthreads = VD_IOQ_NTHREADS; 573 574 /* 575 * Tunable to define the behavior for flushing asynchronous writes used to 576 * performed some write I/O requests. The default behavior is to group as 577 * much asynchronous writes as possible and to flush them immediatly. 578 * 579 * If the tunable is set to 0 then explicit flushing is disabled. In that 580 * case, data will be flushed by traditional mechanism (like fsflush) but 581 * this might not happen immediatly. 582 * 583 */ 584 static int vd_awflush = VD_AWFLUSH_IMMEDIATE | VD_AWFLUSH_GROUP; 585 586 /* 587 * Tunable to define the behavior of the service domain if the vdisk server 588 * fails to reset disk exclusive access when a LDC channel is reset. When a 589 * LDC channel is reset the vdisk server will try to reset disk exclusive 590 * access by releasing any SCSI-2 reservation or resetting the disk. If these 591 * actions fail then the default behavior (vd_reset_access_failure = 0) is to 592 * print a warning message. This default behavior can be changed by setting 593 * the vd_reset_access_failure variable to A_REBOOT (= 0x1) and that will 594 * cause the service domain to reboot, or A_DUMP (= 0x5) and that will cause 595 * the service domain to panic. In both cases, the reset of the service domain 596 * should trigger a reset SCSI buses and hopefully clear any SCSI-2 reservation. 597 */ 598 static int vd_reset_access_failure = 0; 599 600 /* 601 * Tunable for backward compatibility. When this variable is set to B_TRUE, 602 * all disk volumes (ZFS, SVM, VxvM volumes) will be exported as single 603 * slice disks whether or not they have the "slice" option set. This is 604 * to provide a simple backward compatibility mechanism when upgrading 605 * the vds driver and using a domain configuration created before the 606 * "slice" option was available. 607 */ 608 static boolean_t vd_volume_force_slice = B_FALSE; 609 610 /* 611 * The label of disk images created with some earlier versions of the virtual 612 * disk software is not entirely correct and have an incorrect v_sanity field 613 * (usually 0) instead of VTOC_SANE. This creates a compatibility problem with 614 * these images because we are now validating that the disk label (and the 615 * sanity) is correct when a disk image is opened. 616 * 617 * This tunable is set to false to not validate the sanity field and ensure 618 * compatibility. If the tunable is set to true, we will do a strict checking 619 * of the sanity but this can create compatibility problems with old disk 620 * images. 621 */ 622 static boolean_t vd_dskimg_validate_sanity = B_FALSE; 623 624 /* 625 * Enables the use of LDC_DIRECT_MAP when mapping in imported descriptor rings. 626 */ 627 static boolean_t vd_direct_mapped_drings = B_TRUE; 628 629 /* 630 * When a backend is exported as a single-slice disk then we entirely fake 631 * its disk label. So it can be exported either with a VTOC label or with 632 * an EFI label. If vd_slice_label is set to VD_DISK_LABEL_VTOC then all 633 * single-slice disks will be exported with a VTOC label; and if it is set 634 * to VD_DISK_LABEL_EFI then all single-slice disks will be exported with 635 * an EFI label. 636 * 637 * If vd_slice_label is set to VD_DISK_LABEL_UNK and the backend is a disk 638 * or volume device then it will be exported with the same type of label as 639 * defined on the device. Otherwise if the backend is a file then it will 640 * exported with the disk label type set in the vd_file_slice_label variable. 641 * 642 * Note that if the backend size is greater than 1TB then it will always be 643 * exported with an EFI label no matter what the setting is. 644 */ 645 static vd_disk_label_t vd_slice_label = VD_DISK_LABEL_UNK; 646 647 static vd_disk_label_t vd_file_slice_label = VD_DISK_LABEL_VTOC; 648 649 /* 650 * Tunable for backward compatibility. If this variable is set to B_TRUE then 651 * single-slice disks are exported as disks with only one slice instead of 652 * faking a complete disk partitioning. 653 */ 654 static boolean_t vd_slice_single_slice = B_FALSE; 655 656 /* 657 * Supported protocol version pairs, from highest (newest) to lowest (oldest) 658 * 659 * Each supported major version should appear only once, paired with (and only 660 * with) its highest supported minor version number (as the protocol requires 661 * supporting all lower minor version numbers as well) 662 */ 663 static const vio_ver_t vds_version[] = {{1, 1}}; 664 static const size_t vds_num_versions = 665 sizeof (vds_version)/sizeof (vds_version[0]); 666 667 static void vd_free_dring_task(vd_t *vdp); 668 static int vd_setup_vd(vd_t *vd); 669 static int vd_setup_single_slice_disk(vd_t *vd); 670 static int vd_setup_slice_image(vd_t *vd); 671 static int vd_setup_disk_image(vd_t *vd); 672 static int vd_backend_check_size(vd_t *vd); 673 static boolean_t vd_enabled(vd_t *vd); 674 static ushort_t vd_lbl2cksum(struct dk_label *label); 675 static int vd_dskimg_validate_geometry(vd_t *vd); 676 static boolean_t vd_dskimg_is_iso_image(vd_t *vd); 677 static void vd_set_exported_operations(vd_t *vd); 678 static void vd_reset_access(vd_t *vd); 679 static int vd_backend_ioctl(vd_t *vd, int cmd, caddr_t arg); 680 static int vds_efi_alloc_and_read(vd_t *, efi_gpt_t **, efi_gpe_t **); 681 static void vds_efi_free(vd_t *, efi_gpt_t *, efi_gpe_t *); 682 static void vds_driver_types_free(vds_t *vds); 683 static void vd_vtocgeom_to_label(struct extvtoc *vtoc, struct dk_geom *geom, 684 struct dk_label *label); 685 static void vd_label_to_vtocgeom(struct dk_label *label, struct extvtoc *vtoc, 686 struct dk_geom *geom); 687 static boolean_t vd_slice_geom_isvalid(vd_t *vd, struct dk_geom *geom); 688 static boolean_t vd_slice_vtoc_isvalid(vd_t *vd, struct extvtoc *vtoc); 689 690 extern int is_pseudo_device(dev_info_t *); 691 692 /* 693 * Function: 694 * vd_get_readable_size 695 * 696 * Description: 697 * Convert a given size in bytes to a human readable format in 698 * kilobytes, megabytes, gigabytes or terabytes. 699 * 700 * Parameters: 701 * full_size - the size to convert in bytes. 702 * size - the converted size. 703 * unit - the unit of the converted size: 'K' (kilobyte), 704 * 'M' (Megabyte), 'G' (Gigabyte), 'T' (Terabyte). 705 * 706 * Return Code: 707 * none 708 */ 709 static void 710 vd_get_readable_size(size_t full_size, size_t *size, char *unit) 711 { 712 if (full_size < (1ULL << 20)) { 713 *size = full_size >> 10; 714 *unit = 'K'; /* Kilobyte */ 715 } else if (full_size < (1ULL << 30)) { 716 *size = full_size >> 20; 717 *unit = 'M'; /* Megabyte */ 718 } else if (full_size < (1ULL << 40)) { 719 *size = full_size >> 30; 720 *unit = 'G'; /* Gigabyte */ 721 } else { 722 *size = full_size >> 40; 723 *unit = 'T'; /* Terabyte */ 724 } 725 } 726 727 /* 728 * Function: 729 * vd_dskimg_io_params 730 * 731 * Description: 732 * Convert virtual disk I/O parameters (slice, block, length) to 733 * (offset, length) relative to the disk image and according to 734 * the virtual disk partitioning. 735 * 736 * Parameters: 737 * vd - disk on which the operation is performed. 738 * slice - slice to which is the I/O parameters apply. 739 * VD_SLICE_NONE indicates that parameters are 740 * are relative to the entire virtual disk. 741 * blkp - pointer to the starting block relative to the 742 * slice; return the starting block relative to 743 * the disk image. 744 * lenp - pointer to the number of bytes requested; return 745 * the number of bytes that can effectively be used. 746 * 747 * Return Code: 748 * 0 - I/O parameters have been successfully converted; 749 * blkp and lenp point to the converted values. 750 * ENODATA - no data are available for the given I/O parameters; 751 * This occurs if the starting block is past the limit 752 * of the slice. 753 * EINVAL - I/O parameters are invalid. 754 */ 755 static int 756 vd_dskimg_io_params(vd_t *vd, int slice, size_t *blkp, size_t *lenp) 757 { 758 size_t blk = *blkp; 759 size_t len = *lenp; 760 size_t offset, maxlen; 761 762 ASSERT(vd->file || VD_DSKIMG(vd)); 763 ASSERT(len > 0); 764 ASSERT(vd->vdisk_bsize == DEV_BSIZE); 765 766 /* 767 * If a file is exported as a slice then we don't care about the vtoc. 768 * In that case, the vtoc is a fake mainly to make newfs happy and we 769 * handle any I/O as a raw disk access so that we can have access to the 770 * entire backend. 771 */ 772 if (vd->vdisk_type == VD_DISK_TYPE_SLICE || slice == VD_SLICE_NONE) { 773 /* raw disk access */ 774 offset = blk * DEV_BSIZE; 775 if (offset >= vd->dskimg_size) { 776 /* offset past the end of the disk */ 777 PR0("offset (0x%lx) >= size (0x%lx)", 778 offset, vd->dskimg_size); 779 return (ENODATA); 780 } 781 maxlen = vd->dskimg_size - offset; 782 } else { 783 ASSERT(slice >= 0 && slice < V_NUMPAR); 784 785 /* 786 * v1.0 vDisk clients depended on the server not verifying 787 * the label of a unformatted disk. This "feature" is 788 * maintained for backward compatibility but all versions 789 * from v1.1 onwards must do the right thing. 790 */ 791 if (vd->vdisk_label == VD_DISK_LABEL_UNK && 792 vio_ver_is_supported(vd->version, 1, 1)) { 793 (void) vd_dskimg_validate_geometry(vd); 794 if (vd->vdisk_label == VD_DISK_LABEL_UNK) { 795 PR0("Unknown disk label, can't do I/O " 796 "from slice %d", slice); 797 return (EINVAL); 798 } 799 } 800 801 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { 802 ASSERT(vd->vtoc.v_sectorsz == DEV_BSIZE); 803 } else { 804 ASSERT(vd->vdisk_label == VD_DISK_LABEL_EFI); 805 } 806 807 if (blk >= vd->slices[slice].nblocks) { 808 /* address past the end of the slice */ 809 PR0("req_addr (0x%lx) >= psize (0x%lx)", 810 blk, vd->slices[slice].nblocks); 811 return (ENODATA); 812 } 813 814 offset = (vd->slices[slice].start + blk) * DEV_BSIZE; 815 maxlen = (vd->slices[slice].nblocks - blk) * DEV_BSIZE; 816 } 817 818 /* 819 * If the requested size is greater than the size 820 * of the partition, truncate the read/write. 821 */ 822 if (len > maxlen) { 823 PR0("I/O size truncated to %lu bytes from %lu bytes", 824 maxlen, len); 825 len = maxlen; 826 } 827 828 /* 829 * We have to ensure that we are reading/writing into the mmap 830 * range. If we have a partial disk image (e.g. an image of 831 * s0 instead s2) the system can try to access slices that 832 * are not included into the disk image. 833 */ 834 if ((offset + len) > vd->dskimg_size) { 835 PR0("offset + nbytes (0x%lx + 0x%lx) > " 836 "dskimg_size (0x%lx)", offset, len, vd->dskimg_size); 837 return (EINVAL); 838 } 839 840 *blkp = offset / DEV_BSIZE; 841 *lenp = len; 842 843 return (0); 844 } 845 846 /* 847 * Function: 848 * vd_dskimg_rw 849 * 850 * Description: 851 * Read or write to a disk image. It handles the case where the disk 852 * image is a file or a volume exported as a full disk or a file 853 * exported as single-slice disk. Read or write to volumes exported as 854 * single slice disks are done by directly using the ldi interface. 855 * 856 * Parameters: 857 * vd - disk on which the operation is performed. 858 * slice - slice on which the operation is performed, 859 * VD_SLICE_NONE indicates that the operation 860 * is done using an absolute disk offset. 861 * operation - operation to execute: read (VD_OP_BREAD) or 862 * write (VD_OP_BWRITE). 863 * data - buffer where data are read to or written from. 864 * blk - starting block for the operation. 865 * len - number of bytes to read or write. 866 * 867 * Return Code: 868 * n >= 0 - success, n indicates the number of bytes read 869 * or written. 870 * -1 - error. 871 */ 872 static ssize_t 873 vd_dskimg_rw(vd_t *vd, int slice, int operation, caddr_t data, size_t offset, 874 size_t len) 875 { 876 ssize_t resid; 877 struct buf buf; 878 int status; 879 880 ASSERT(vd->file || VD_DSKIMG(vd)); 881 ASSERT(len > 0); 882 ASSERT(vd->vdisk_bsize == DEV_BSIZE); 883 884 if ((status = vd_dskimg_io_params(vd, slice, &offset, &len)) != 0) 885 return ((status == ENODATA)? 0: -1); 886 887 if (vd->volume) { 888 889 bioinit(&buf); 890 buf.b_flags = B_BUSY | 891 ((operation == VD_OP_BREAD)? B_READ : B_WRITE); 892 buf.b_bcount = len; 893 buf.b_lblkno = offset; 894 buf.b_edev = vd->dev[0]; 895 buf.b_un.b_addr = data; 896 897 /* 898 * We use ldi_strategy() and not ldi_read()/ldi_write() because 899 * the read/write functions of the underlying driver may try to 900 * lock pages of the data buffer, and this requires the data 901 * buffer to be kmem_alloc'ed (and not allocated on the stack). 902 * 903 * Also using ldi_strategy() ensures that writes are immediatly 904 * commited and not cached as this may be the case with 905 * ldi_write() (for example with a ZFS volume). 906 */ 907 if (ldi_strategy(vd->ldi_handle[0], &buf) != 0) { 908 biofini(&buf); 909 return (-1); 910 } 911 912 if (biowait(&buf) != 0) { 913 biofini(&buf); 914 return (-1); 915 } 916 917 resid = buf.b_resid; 918 biofini(&buf); 919 920 ASSERT(resid <= len); 921 return (len - resid); 922 } 923 924 ASSERT(vd->file); 925 926 status = vn_rdwr((operation == VD_OP_BREAD)? UIO_READ : UIO_WRITE, 927 vd->file_vnode, data, len, offset * DEV_BSIZE, UIO_SYSSPACE, FSYNC, 928 RLIM64_INFINITY, kcred, &resid); 929 930 if (status != 0) 931 return (-1); 932 933 return (len); 934 } 935 936 /* 937 * Function: 938 * vd_build_default_label 939 * 940 * Description: 941 * Return a default label for a given disk size. This is used when the disk 942 * does not have a valid VTOC so that the user can get a valid default 943 * configuration. The default label has all slice sizes set to 0 (except 944 * slice 2 which is the entire disk) to force the user to write a valid 945 * label onto the disk image. 946 * 947 * Parameters: 948 * disk_size - the disk size in bytes 949 * bsize - the disk block size in bytes 950 * label - the returned default label. 951 * 952 * Return Code: 953 * none. 954 */ 955 static void 956 vd_build_default_label(size_t disk_size, size_t bsize, struct dk_label *label) 957 { 958 size_t size; 959 char unit; 960 961 bzero(label, sizeof (struct dk_label)); 962 963 /* 964 * Ideally we would like the cylinder size (nsect * nhead) to be the 965 * same whatever the disk size is. That way the VTOC label could be 966 * easily updated in case the disk size is increased (keeping the 967 * same cylinder size allows to preserve the existing partitioning 968 * when updating the VTOC label). But it is not possible to have 969 * a fixed cylinder size and to cover all disk size. 970 * 971 * So we define different cylinder sizes depending on the disk size. 972 * The cylinder size is chosen so that we don't have too few cylinders 973 * for a small disk image, or so many on a big disk image that you 974 * waste space for backup superblocks or cylinder group structures. 975 * Also we must have a resonable number of cylinders and sectors so 976 * that newfs can run using default values. 977 * 978 * +-----------+--------+---------+--------+ 979 * | disk_size | < 2MB | 2MB-4GB | >= 8GB | 980 * +-----------+--------+---------+--------+ 981 * | nhead | 1 | 1 | 96 | 982 * | nsect | 200 | 600 | 768 | 983 * +-----------+--------+---------+--------+ 984 * 985 * Other parameters are computed from these values: 986 * 987 * pcyl = disk_size / (nhead * nsect * 512) 988 * acyl = (pcyl > 2)? 2 : 0 989 * ncyl = pcyl - acyl 990 * 991 * The maximum number of cylinder is 65535 so this allows to define a 992 * geometry for a disk size up to 65535 * 96 * 768 * 512 = 2.24 TB 993 * which is more than enough to cover the maximum size allowed by the 994 * extended VTOC format (2TB). 995 */ 996 997 if (disk_size >= 8 * ONE_GIGABYTE) { 998 999 label->dkl_nhead = 96; 1000 label->dkl_nsect = 768; 1001 1002 } else if (disk_size >= 2 * ONE_MEGABYTE) { 1003 1004 label->dkl_nhead = 1; 1005 label->dkl_nsect = 600; 1006 1007 } else { 1008 1009 label->dkl_nhead = 1; 1010 label->dkl_nsect = 200; 1011 } 1012 1013 label->dkl_pcyl = disk_size / 1014 (label->dkl_nsect * label->dkl_nhead * bsize); 1015 1016 if (label->dkl_pcyl == 0) 1017 label->dkl_pcyl = 1; 1018 1019 label->dkl_acyl = 0; 1020 1021 if (label->dkl_pcyl > 2) 1022 label->dkl_acyl = 2; 1023 1024 label->dkl_ncyl = label->dkl_pcyl - label->dkl_acyl; 1025 label->dkl_write_reinstruct = 0; 1026 label->dkl_read_reinstruct = 0; 1027 label->dkl_rpm = 7200; 1028 label->dkl_apc = 0; 1029 label->dkl_intrlv = 0; 1030 1031 PR0("requested disk size: %ld bytes\n", disk_size); 1032 PR0("setup: ncyl=%d nhead=%d nsec=%d\n", label->dkl_pcyl, 1033 label->dkl_nhead, label->dkl_nsect); 1034 PR0("provided disk size: %ld bytes\n", (uint64_t) 1035 (label->dkl_pcyl * label->dkl_nhead * 1036 label->dkl_nsect * bsize)); 1037 1038 vd_get_readable_size(disk_size, &size, &unit); 1039 1040 /* 1041 * We must have a correct label name otherwise format(1m) will 1042 * not recognized the disk as labeled. 1043 */ 1044 (void) snprintf(label->dkl_asciilabel, LEN_DKL_ASCII, 1045 "SUN-DiskImage-%ld%cB cyl %d alt %d hd %d sec %d", 1046 size, unit, 1047 label->dkl_ncyl, label->dkl_acyl, label->dkl_nhead, 1048 label->dkl_nsect); 1049 1050 /* default VTOC */ 1051 label->dkl_vtoc.v_version = V_EXTVERSION; 1052 label->dkl_vtoc.v_nparts = V_NUMPAR; 1053 label->dkl_vtoc.v_sanity = VTOC_SANE; 1054 label->dkl_vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_tag = V_BACKUP; 1055 label->dkl_map[VD_ENTIRE_DISK_SLICE].dkl_cylno = 0; 1056 label->dkl_map[VD_ENTIRE_DISK_SLICE].dkl_nblk = label->dkl_ncyl * 1057 label->dkl_nhead * label->dkl_nsect; 1058 label->dkl_magic = DKL_MAGIC; 1059 label->dkl_cksum = vd_lbl2cksum(label); 1060 } 1061 1062 /* 1063 * Function: 1064 * vd_dskimg_set_vtoc 1065 * 1066 * Description: 1067 * Set the vtoc of a disk image by writing the label and backup 1068 * labels into the disk image backend. 1069 * 1070 * Parameters: 1071 * vd - disk on which the operation is performed. 1072 * label - the data to be written. 1073 * 1074 * Return Code: 1075 * 0 - success. 1076 * n > 0 - error, n indicates the errno code. 1077 */ 1078 static int 1079 vd_dskimg_set_vtoc(vd_t *vd, struct dk_label *label) 1080 { 1081 size_t blk, sec, cyl, head, cnt; 1082 1083 ASSERT(VD_DSKIMG(vd)); 1084 1085 if (VD_DSKIMG_LABEL_WRITE(vd, label) < 0) { 1086 PR0("fail to write disk label"); 1087 return (EIO); 1088 } 1089 1090 /* 1091 * Backup labels are on the last alternate cylinder's 1092 * first five odd sectors. 1093 */ 1094 if (label->dkl_acyl == 0) { 1095 PR0("no alternate cylinder, can not store backup labels"); 1096 return (0); 1097 } 1098 1099 cyl = label->dkl_ncyl + label->dkl_acyl - 1; 1100 head = label->dkl_nhead - 1; 1101 1102 blk = (cyl * ((label->dkl_nhead * label->dkl_nsect) - label->dkl_apc)) + 1103 (head * label->dkl_nsect); 1104 1105 /* 1106 * Write the backup labels. Make sure we don't try to write past 1107 * the last cylinder. 1108 */ 1109 sec = 1; 1110 1111 for (cnt = 0; cnt < VD_DSKIMG_NUM_BACKUP; cnt++) { 1112 1113 if (sec >= label->dkl_nsect) { 1114 PR0("not enough sector to store all backup labels"); 1115 return (0); 1116 } 1117 1118 if (vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, 1119 (caddr_t)label, blk + sec, sizeof (struct dk_label)) < 0) { 1120 PR0("error writing backup label at block %lu\n", 1121 blk + sec); 1122 return (EIO); 1123 } 1124 1125 PR1("wrote backup label at block %lu\n", blk + sec); 1126 1127 sec += 2; 1128 } 1129 1130 return (0); 1131 } 1132 1133 /* 1134 * Function: 1135 * vd_dskimg_get_devid_block 1136 * 1137 * Description: 1138 * Return the block number where the device id is stored. 1139 * 1140 * Parameters: 1141 * vd - disk on which the operation is performed. 1142 * blkp - pointer to the block number 1143 * 1144 * Return Code: 1145 * 0 - success 1146 * ENOSPC - disk has no space to store a device id 1147 */ 1148 static int 1149 vd_dskimg_get_devid_block(vd_t *vd, size_t *blkp) 1150 { 1151 diskaddr_t spc, head, cyl; 1152 1153 ASSERT(VD_DSKIMG(vd)); 1154 1155 if (vd->vdisk_label == VD_DISK_LABEL_UNK) { 1156 /* 1157 * If no label is defined we don't know where to find 1158 * a device id. 1159 */ 1160 return (ENOSPC); 1161 } 1162 1163 if (vd->vdisk_label == VD_DISK_LABEL_EFI) { 1164 /* 1165 * For an EFI disk, the devid is at the beginning of 1166 * the reserved slice 1167 */ 1168 if (vd->efi_reserved == -1) { 1169 PR0("EFI disk has no reserved slice"); 1170 return (ENOSPC); 1171 } 1172 1173 *blkp = vd->slices[vd->efi_reserved].start; 1174 return (0); 1175 } 1176 1177 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); 1178 1179 /* this geometry doesn't allow us to have a devid */ 1180 if (vd->dk_geom.dkg_acyl < 2) { 1181 PR0("not enough alternate cylinder available for devid " 1182 "(acyl=%u)", vd->dk_geom.dkg_acyl); 1183 return (ENOSPC); 1184 } 1185 1186 /* the devid is in on the track next to the last cylinder */ 1187 cyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl - 2; 1188 spc = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; 1189 head = vd->dk_geom.dkg_nhead - 1; 1190 1191 *blkp = (cyl * (spc - vd->dk_geom.dkg_apc)) + 1192 (head * vd->dk_geom.dkg_nsect) + 1; 1193 1194 return (0); 1195 } 1196 1197 /* 1198 * Return the checksum of a disk block containing an on-disk devid. 1199 */ 1200 static uint_t 1201 vd_dkdevid2cksum(struct dk_devid *dkdevid) 1202 { 1203 uint_t chksum, *ip; 1204 int i; 1205 1206 chksum = 0; 1207 ip = (void *)dkdevid; 1208 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); i++) 1209 chksum ^= ip[i]; 1210 1211 return (chksum); 1212 } 1213 1214 /* 1215 * Function: 1216 * vd_dskimg_read_devid 1217 * 1218 * Description: 1219 * Read the device id stored on a disk image. 1220 * 1221 * Parameters: 1222 * vd - disk on which the operation is performed. 1223 * devid - the return address of the device ID. 1224 * 1225 * Return Code: 1226 * 0 - success 1227 * EIO - I/O error while trying to access the disk image 1228 * EINVAL - no valid device id was found 1229 * ENOSPC - disk has no space to store a device id 1230 */ 1231 static int 1232 vd_dskimg_read_devid(vd_t *vd, ddi_devid_t *devid) 1233 { 1234 struct dk_devid *dkdevid; 1235 size_t blk; 1236 uint_t chksum; 1237 int status, sz; 1238 1239 ASSERT(vd->vdisk_bsize == DEV_BSIZE); 1240 1241 if ((status = vd_dskimg_get_devid_block(vd, &blk)) != 0) 1242 return (status); 1243 1244 dkdevid = kmem_zalloc(DEV_BSIZE, KM_SLEEP); 1245 1246 /* get the devid */ 1247 if ((vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)dkdevid, blk, 1248 DEV_BSIZE)) < 0) { 1249 PR0("error reading devid block at %lu", blk); 1250 status = EIO; 1251 goto done; 1252 } 1253 1254 /* validate the revision */ 1255 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 1256 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 1257 PR0("invalid devid found at block %lu (bad revision)", blk); 1258 status = EINVAL; 1259 goto done; 1260 } 1261 1262 /* compute checksum */ 1263 chksum = vd_dkdevid2cksum(dkdevid); 1264 1265 /* compare the checksums */ 1266 if (DKD_GETCHKSUM(dkdevid) != chksum) { 1267 PR0("invalid devid found at block %lu (bad checksum)", blk); 1268 status = EINVAL; 1269 goto done; 1270 } 1271 1272 /* validate the device id */ 1273 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 1274 PR0("invalid devid found at block %lu", blk); 1275 status = EINVAL; 1276 goto done; 1277 } 1278 1279 PR1("devid read at block %lu", blk); 1280 1281 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 1282 *devid = kmem_alloc(sz, KM_SLEEP); 1283 bcopy(&dkdevid->dkd_devid, *devid, sz); 1284 1285 done: 1286 kmem_free(dkdevid, DEV_BSIZE); 1287 return (status); 1288 1289 } 1290 1291 /* 1292 * Function: 1293 * vd_dskimg_write_devid 1294 * 1295 * Description: 1296 * Write a device id into disk image. 1297 * 1298 * Parameters: 1299 * vd - disk on which the operation is performed. 1300 * devid - the device ID to store. 1301 * 1302 * Return Code: 1303 * 0 - success 1304 * EIO - I/O error while trying to access the disk image 1305 * ENOSPC - disk has no space to store a device id 1306 */ 1307 static int 1308 vd_dskimg_write_devid(vd_t *vd, ddi_devid_t devid) 1309 { 1310 struct dk_devid *dkdevid; 1311 uint_t chksum; 1312 size_t blk; 1313 int status; 1314 1315 ASSERT(vd->vdisk_bsize == DEV_BSIZE); 1316 1317 if (devid == NULL) { 1318 /* nothing to write */ 1319 return (0); 1320 } 1321 1322 if ((status = vd_dskimg_get_devid_block(vd, &blk)) != 0) 1323 return (status); 1324 1325 dkdevid = kmem_zalloc(DEV_BSIZE, KM_SLEEP); 1326 1327 /* set revision */ 1328 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 1329 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 1330 1331 /* copy devid */ 1332 bcopy(devid, &dkdevid->dkd_devid, ddi_devid_sizeof(devid)); 1333 1334 /* compute checksum */ 1335 chksum = vd_dkdevid2cksum(dkdevid); 1336 1337 /* set checksum */ 1338 DKD_FORMCHKSUM(chksum, dkdevid); 1339 1340 /* store the devid */ 1341 if ((status = vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, 1342 (caddr_t)dkdevid, blk, DEV_BSIZE)) < 0) { 1343 PR0("Error writing devid block at %lu", blk); 1344 status = EIO; 1345 } else { 1346 PR1("devid written at block %lu", blk); 1347 status = 0; 1348 } 1349 1350 kmem_free(dkdevid, DEV_BSIZE); 1351 return (status); 1352 } 1353 1354 /* 1355 * Function: 1356 * vd_do_scsi_rdwr 1357 * 1358 * Description: 1359 * Read or write to a SCSI disk using an absolute disk offset. 1360 * 1361 * Parameters: 1362 * vd - disk on which the operation is performed. 1363 * operation - operation to execute: read (VD_OP_BREAD) or 1364 * write (VD_OP_BWRITE). 1365 * data - buffer where data are read to or written from. 1366 * blk - starting block for the operation. 1367 * len - number of bytes to read or write. 1368 * 1369 * Return Code: 1370 * 0 - success 1371 * n != 0 - error. 1372 */ 1373 static int 1374 vd_do_scsi_rdwr(vd_t *vd, int operation, caddr_t data, size_t blk, size_t len) 1375 { 1376 struct uscsi_cmd ucmd; 1377 union scsi_cdb cdb; 1378 int nsectors, nblk; 1379 int max_sectors; 1380 int status, rval; 1381 1382 ASSERT(!vd->file); 1383 ASSERT(!vd->volume); 1384 ASSERT(vd->vdisk_bsize > 0); 1385 1386 max_sectors = vd->max_xfer_sz; 1387 nblk = (len / vd->vdisk_bsize); 1388 1389 if (len % vd->vdisk_bsize != 0) 1390 return (EINVAL); 1391 1392 /* 1393 * Build and execute the uscsi ioctl. We build a group0, group1 1394 * or group4 command as necessary, since some targets 1395 * do not support group1 commands. 1396 */ 1397 while (nblk) { 1398 1399 bzero(&ucmd, sizeof (ucmd)); 1400 bzero(&cdb, sizeof (cdb)); 1401 1402 nsectors = (max_sectors < nblk) ? max_sectors : nblk; 1403 1404 /* 1405 * Some of the optical drives on sun4v machines are ATAPI 1406 * devices which use Group 1 Read/Write commands so we need 1407 * to explicitly check a flag which is set when a domain 1408 * is bound. 1409 */ 1410 if (blk < (2 << 20) && nsectors <= 0xff && !vd->is_atapi_dev) { 1411 FORMG0ADDR(&cdb, blk); 1412 FORMG0COUNT(&cdb, (uchar_t)nsectors); 1413 ucmd.uscsi_cdblen = CDB_GROUP0; 1414 } else if (blk > 0xffffffff) { 1415 FORMG4LONGADDR(&cdb, blk); 1416 FORMG4COUNT(&cdb, nsectors); 1417 ucmd.uscsi_cdblen = CDB_GROUP4; 1418 cdb.scc_cmd |= SCMD_GROUP4; 1419 } else { 1420 FORMG1ADDR(&cdb, blk); 1421 FORMG1COUNT(&cdb, nsectors); 1422 ucmd.uscsi_cdblen = CDB_GROUP1; 1423 cdb.scc_cmd |= SCMD_GROUP1; 1424 } 1425 ucmd.uscsi_cdb = (caddr_t)&cdb; 1426 ucmd.uscsi_bufaddr = data; 1427 ucmd.uscsi_buflen = nsectors * vd->backend_bsize; 1428 ucmd.uscsi_timeout = vd_scsi_rdwr_timeout; 1429 /* 1430 * Set flags so that the command is isolated from normal 1431 * commands and no error message is printed. 1432 */ 1433 ucmd.uscsi_flags = USCSI_ISOLATE | USCSI_SILENT; 1434 1435 if (operation == VD_OP_BREAD) { 1436 cdb.scc_cmd |= SCMD_READ; 1437 ucmd.uscsi_flags |= USCSI_READ; 1438 } else { 1439 cdb.scc_cmd |= SCMD_WRITE; 1440 } 1441 1442 status = ldi_ioctl(vd->ldi_handle[VD_ENTIRE_DISK_SLICE], 1443 USCSICMD, (intptr_t)&ucmd, (vd->open_flags | FKIOCTL), 1444 kcred, &rval); 1445 1446 if (status == 0) 1447 status = ucmd.uscsi_status; 1448 1449 if (status != 0) 1450 break; 1451 1452 /* 1453 * Check if partial DMA breakup is required. If so, reduce 1454 * the request size by half and retry the last request. 1455 */ 1456 if (ucmd.uscsi_resid == ucmd.uscsi_buflen) { 1457 max_sectors >>= 1; 1458 if (max_sectors <= 0) { 1459 status = EIO; 1460 break; 1461 } 1462 continue; 1463 } 1464 1465 if (ucmd.uscsi_resid != 0) { 1466 status = EIO; 1467 break; 1468 } 1469 1470 blk += nsectors; 1471 nblk -= nsectors; 1472 data += nsectors * vd->vdisk_bsize; 1473 } 1474 1475 return (status); 1476 } 1477 1478 /* 1479 * Function: 1480 * vd_scsi_rdwr 1481 * 1482 * Description: 1483 * Wrapper function to read or write to a SCSI disk using an absolute 1484 * disk offset. It checks the blocksize of the underlying device and, 1485 * if necessary, adjusts the buffers accordingly before calling 1486 * vd_do_scsi_rdwr() to do the actual read or write. 1487 * 1488 * Parameters: 1489 * vd - disk on which the operation is performed. 1490 * operation - operation to execute: read (VD_OP_BREAD) or 1491 * write (VD_OP_BWRITE). 1492 * data - buffer where data are read to or written from. 1493 * blk - starting block for the operation. 1494 * len - number of bytes to read or write. 1495 * 1496 * Return Code: 1497 * 0 - success 1498 * n != 0 - error. 1499 */ 1500 static int 1501 vd_scsi_rdwr(vd_t *vd, int operation, caddr_t data, size_t vblk, size_t vlen) 1502 { 1503 int rv; 1504 1505 size_t pblk; /* physical device block number of data on device */ 1506 size_t delta; /* relative offset between pblk and vblk */ 1507 size_t pnblk; /* number of physical blocks to be read from device */ 1508 size_t plen; /* length of data to be read from physical device */ 1509 char *buf; /* buffer area to fit physical device's block size */ 1510 1511 if (vd->backend_bsize == 0) { 1512 /* 1513 * The block size was not available during the attach, 1514 * try to update it now. 1515 */ 1516 if (vd_backend_check_size(vd) != 0) 1517 return (EIO); 1518 } 1519 1520 /* 1521 * If the vdisk block size and the block size of the underlying device 1522 * match we can skip straight to vd_do_scsi_rdwr(), otherwise we need 1523 * to create a buffer large enough to handle the device's block size 1524 * and adjust the block to be read from and the amount of data to 1525 * read to correspond with the device's block size. 1526 */ 1527 if (vd->vdisk_bsize == vd->backend_bsize) 1528 return (vd_do_scsi_rdwr(vd, operation, data, vblk, vlen)); 1529 1530 if (vd->vdisk_bsize > vd->backend_bsize) 1531 return (EINVAL); 1532 1533 /* 1534 * Writing of physical block sizes larger than the virtual block size 1535 * is not supported. This would be added if/when support for guests 1536 * writing to DVDs is implemented. 1537 */ 1538 if (operation == VD_OP_BWRITE) 1539 return (ENOTSUP); 1540 1541 /* BEGIN CSTYLED */ 1542 /* 1543 * Below is a diagram showing the relationship between the physical 1544 * and virtual blocks. If the virtual blocks marked by 'X' below are 1545 * requested, then the physical blocks denoted by 'Y' are read. 1546 * 1547 * vblk 1548 * | vlen 1549 * |<--------------->| 1550 * v v 1551 * --+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- virtual disk: 1552 * | | | |XX|XX|XX|XX|XX|XX| | | | | | } block size is 1553 * --+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- vd->vdisk_bsize 1554 * : : : : 1555 * >:==:< delta : : 1556 * : : : : 1557 * --+-----+-----+-----+-----+-----+-----+-----+-- physical disk: 1558 * | |YY:YY|YYYYY|YYYYY|YY:YY| | | } block size is 1559 * --+-----+-----+-----+-----+-----+-----+-----+-- vd->backend_bsize 1560 * ^ ^ 1561 * |<--------------------->| 1562 * | plen 1563 * pblk 1564 */ 1565 /* END CSTYLED */ 1566 pblk = (vblk * vd->vdisk_bsize) / vd->backend_bsize; 1567 delta = (vblk * vd->vdisk_bsize) - (pblk * vd->backend_bsize); 1568 pnblk = ((delta + vlen - 1) / vd->backend_bsize) + 1; 1569 plen = pnblk * vd->backend_bsize; 1570 1571 PR2("vblk %lx:pblk %lx: vlen %ld:plen %ld", vblk, pblk, vlen, plen); 1572 1573 buf = kmem_zalloc(sizeof (caddr_t) * plen, KM_SLEEP); 1574 rv = vd_do_scsi_rdwr(vd, operation, (caddr_t)buf, pblk, plen); 1575 bcopy(buf + delta, data, vlen); 1576 1577 kmem_free(buf, sizeof (caddr_t) * plen); 1578 1579 return (rv); 1580 } 1581 1582 /* 1583 * Function: 1584 * vd_slice_flabel_read 1585 * 1586 * Description: 1587 * This function simulates a read operation from the fake label of 1588 * a single-slice disk. 1589 * 1590 * Parameters: 1591 * vd - single-slice disk to read from 1592 * data - buffer where data should be read to 1593 * offset - offset in byte where the read should start 1594 * length - number of bytes to read 1595 * 1596 * Return Code: 1597 * n >= 0 - success, n indicates the number of bytes read 1598 * -1 - error 1599 */ 1600 static ssize_t 1601 vd_slice_flabel_read(vd_t *vd, caddr_t data, size_t offset, size_t length) 1602 { 1603 size_t n = 0; 1604 uint_t limit = vd->flabel_limit * vd->vdisk_bsize; 1605 1606 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 1607 ASSERT(vd->flabel != NULL); 1608 1609 /* if offset is past the fake label limit there's nothing to read */ 1610 if (offset >= limit) 1611 return (0); 1612 1613 /* data with offset 0 to flabel_size are read from flabel */ 1614 if (offset < vd->flabel_size) { 1615 1616 if (offset + length <= vd->flabel_size) { 1617 bcopy(vd->flabel + offset, data, length); 1618 return (length); 1619 } 1620 1621 n = vd->flabel_size - offset; 1622 bcopy(vd->flabel + offset, data, n); 1623 data += n; 1624 } 1625 1626 /* data with offset from flabel_size to flabel_limit are all zeros */ 1627 if (offset + length <= limit) { 1628 bzero(data, length - n); 1629 return (length); 1630 } 1631 1632 bzero(data, limit - offset - n); 1633 return (limit - offset); 1634 } 1635 1636 /* 1637 * Function: 1638 * vd_slice_flabel_write 1639 * 1640 * Description: 1641 * This function simulates a write operation to the fake label of 1642 * a single-slice disk. Write operations are actually faked and return 1643 * success although the label is never changed. This is mostly to 1644 * simulate a successful label update. 1645 * 1646 * Parameters: 1647 * vd - single-slice disk to write to 1648 * data - buffer where data should be written from 1649 * offset - offset in byte where the write should start 1650 * length - number of bytes to written 1651 * 1652 * Return Code: 1653 * n >= 0 - success, n indicates the number of bytes written 1654 * -1 - error 1655 */ 1656 static ssize_t 1657 vd_slice_flabel_write(vd_t *vd, caddr_t data, size_t offset, size_t length) 1658 { 1659 uint_t limit = vd->flabel_limit * vd->vdisk_bsize; 1660 struct dk_label *label; 1661 struct dk_geom geom; 1662 struct extvtoc vtoc; 1663 1664 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 1665 ASSERT(vd->flabel != NULL); 1666 1667 if (offset >= limit) 1668 return (0); 1669 1670 /* 1671 * If this is a request to overwrite the VTOC disk label, check that 1672 * the new label is similar to the previous one and return that the 1673 * write was successful, but note that nothing is actually overwritten. 1674 */ 1675 if (vd->vdisk_label == VD_DISK_LABEL_VTOC && 1676 offset == 0 && length == vd->vdisk_bsize) { 1677 label = (void *)data; 1678 1679 /* check that this is a valid label */ 1680 if (label->dkl_magic != DKL_MAGIC || 1681 label->dkl_cksum != vd_lbl2cksum(label)) 1682 return (-1); 1683 1684 /* check the vtoc and geometry */ 1685 vd_label_to_vtocgeom(label, &vtoc, &geom); 1686 if (vd_slice_geom_isvalid(vd, &geom) && 1687 vd_slice_vtoc_isvalid(vd, &vtoc)) 1688 return (length); 1689 } 1690 1691 /* fail any other write */ 1692 return (-1); 1693 } 1694 1695 /* 1696 * Function: 1697 * vd_slice_fake_rdwr 1698 * 1699 * Description: 1700 * This function simulates a raw read or write operation to a single-slice 1701 * disk. It only handles the faked part of the operation i.e. I/Os to 1702 * blocks which have no mapping with the vdisk backend (I/Os to the 1703 * beginning and to the end of the vdisk). 1704 * 1705 * The function returns 0 is the operation is completed and it has been 1706 * entirely handled as a fake read or write. In that case, lengthp points 1707 * to the number of bytes not read or written. Values returned by datap 1708 * and blkp are undefined. 1709 * 1710 * If the fake operation has succeeded but the read or write is not 1711 * complete (i.e. the read/write operation extends beyond the blocks 1712 * we fake) then the function returns EAGAIN and datap, blkp and lengthp 1713 * pointers points to the parameters for completing the operation. 1714 * 1715 * In case of an error, for example if the slice is empty or parameters 1716 * are invalid, then the function returns a non-zero value different 1717 * from EAGAIN. In that case, the returned values of datap, blkp and 1718 * lengthp are undefined. 1719 * 1720 * Parameters: 1721 * vd - single-slice disk on which the operation is performed 1722 * slice - slice on which the operation is performed, 1723 * VD_SLICE_NONE indicates that the operation 1724 * is done using an absolute disk offset. 1725 * operation - operation to execute: read (VD_OP_BREAD) or 1726 * write (VD_OP_BWRITE). 1727 * datap - pointer to the buffer where data are read to 1728 * or written from. Return the pointer where remaining 1729 * data have to be read to or written from. 1730 * blkp - pointer to the starting block for the operation. 1731 * Return the starting block relative to the vdisk 1732 * backend for the remaining operation. 1733 * lengthp - pointer to the number of bytes to read or write. 1734 * This should be a multiple of vdisk_bsize. Return the 1735 * remaining number of bytes to read or write. 1736 * 1737 * Return Code: 1738 * 0 - read/write operation is completed 1739 * EAGAIN - read/write operation is not completed 1740 * other values - error 1741 */ 1742 static int 1743 vd_slice_fake_rdwr(vd_t *vd, int slice, int operation, caddr_t *datap, 1744 size_t *blkp, size_t *lengthp) 1745 { 1746 struct dk_label *label; 1747 caddr_t data; 1748 size_t blk, length, csize; 1749 size_t ablk, asize, aoff, alen; 1750 ssize_t n; 1751 int sec, status; 1752 size_t bsize = vd->vdisk_bsize; 1753 1754 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 1755 ASSERT(slice != 0); 1756 1757 data = *datap; 1758 blk = *blkp; 1759 length = *lengthp; 1760 1761 /* 1762 * If this is not a raw I/O or an I/O from a full disk slice then 1763 * this is an I/O to/from an empty slice. 1764 */ 1765 if (slice != VD_SLICE_NONE && 1766 (slice != VD_ENTIRE_DISK_SLICE || 1767 vd->vdisk_label != VD_DISK_LABEL_VTOC) && 1768 (slice != VD_EFI_WD_SLICE || 1769 vd->vdisk_label != VD_DISK_LABEL_EFI)) { 1770 return (EIO); 1771 } 1772 1773 if (length % bsize != 0) 1774 return (EINVAL); 1775 1776 /* handle any I/O with the fake label */ 1777 if (operation == VD_OP_BWRITE) 1778 n = vd_slice_flabel_write(vd, data, blk * bsize, length); 1779 else 1780 n = vd_slice_flabel_read(vd, data, blk * bsize, length); 1781 1782 if (n == -1) 1783 return (EINVAL); 1784 1785 ASSERT(n % bsize == 0); 1786 1787 /* adjust I/O arguments */ 1788 data += n; 1789 blk += n / bsize; 1790 length -= n; 1791 1792 /* check if there's something else to process */ 1793 if (length == 0) { 1794 status = 0; 1795 goto done; 1796 } 1797 1798 if (vd->vdisk_label == VD_DISK_LABEL_VTOC && 1799 slice == VD_ENTIRE_DISK_SLICE) { 1800 status = EAGAIN; 1801 goto done; 1802 } 1803 1804 if (vd->vdisk_label == VD_DISK_LABEL_EFI) { 1805 asize = EFI_MIN_RESV_SIZE + (EFI_MIN_ARRAY_SIZE / bsize) + 1; 1806 ablk = vd->vdisk_size - asize; 1807 } else { 1808 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); 1809 ASSERT(vd->dk_geom.dkg_apc == 0); 1810 1811 csize = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; 1812 ablk = vd->dk_geom.dkg_ncyl * csize; 1813 asize = vd->dk_geom.dkg_acyl * csize; 1814 } 1815 1816 alen = length / bsize; 1817 aoff = blk; 1818 1819 /* if we have reached the last block then the I/O is completed */ 1820 if (aoff == ablk + asize) { 1821 status = 0; 1822 goto done; 1823 } 1824 1825 /* if we are past the last block then return an error */ 1826 if (aoff > ablk + asize) 1827 return (EIO); 1828 1829 /* check if there is any I/O to end of the disk */ 1830 if (aoff + alen < ablk) { 1831 status = EAGAIN; 1832 goto done; 1833 } 1834 1835 /* we don't allow any write to the end of the disk */ 1836 if (operation == VD_OP_BWRITE) 1837 return (EIO); 1838 1839 if (aoff < ablk) { 1840 alen -= (ablk - aoff); 1841 aoff = ablk; 1842 } 1843 1844 if (aoff + alen > ablk + asize) { 1845 alen = ablk + asize - aoff; 1846 } 1847 1848 alen *= bsize; 1849 1850 if (operation == VD_OP_BREAD) { 1851 bzero(data + (aoff - blk) * bsize, alen); 1852 1853 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { 1854 /* check if we read backup labels */ 1855 label = VD_LABEL_VTOC(vd); 1856 ablk += (label->dkl_acyl - 1) * csize + 1857 (label->dkl_nhead - 1) * label->dkl_nsect; 1858 1859 for (sec = 1; (sec < 5 * 2 + 1); sec += 2) { 1860 1861 if (ablk + sec >= blk && 1862 ablk + sec < blk + (length / bsize)) { 1863 bcopy(label, data + 1864 (ablk + sec - blk) * bsize, 1865 sizeof (struct dk_label)); 1866 } 1867 } 1868 } 1869 } 1870 1871 length -= alen; 1872 1873 status = (length == 0)? 0: EAGAIN; 1874 1875 done: 1876 ASSERT(length == 0 || blk >= vd->flabel_limit); 1877 1878 /* 1879 * Return the parameters for the remaining I/O. The starting block is 1880 * adjusted so that it is relative to the vdisk backend. 1881 */ 1882 *datap = data; 1883 *blkp = blk - vd->flabel_limit; 1884 *lengthp = length; 1885 1886 return (status); 1887 } 1888 1889 static int 1890 vd_flush_write(vd_t *vd) 1891 { 1892 int status, rval; 1893 1894 if (vd->file) { 1895 status = VOP_FSYNC(vd->file_vnode, FSYNC, kcred, NULL); 1896 } else { 1897 status = ldi_ioctl(vd->ldi_handle[0], DKIOCFLUSHWRITECACHE, 1898 NULL, vd->open_flags | FKIOCTL, kcred, &rval); 1899 } 1900 1901 return (status); 1902 } 1903 1904 static void 1905 vd_bio_task(void *arg) 1906 { 1907 struct buf *buf = (struct buf *)arg; 1908 vd_task_t *task = (vd_task_t *)buf->b_private; 1909 vd_t *vd = task->vd; 1910 ssize_t resid; 1911 int status; 1912 1913 ASSERT(vd->vdisk_bsize == DEV_BSIZE); 1914 1915 if (vd->zvol) { 1916 1917 status = ldi_strategy(vd->ldi_handle[0], buf); 1918 1919 } else { 1920 1921 ASSERT(vd->file); 1922 1923 status = vn_rdwr((buf->b_flags & B_READ)? UIO_READ : UIO_WRITE, 1924 vd->file_vnode, buf->b_un.b_addr, buf->b_bcount, 1925 buf->b_lblkno * DEV_BSIZE, UIO_SYSSPACE, 0, 1926 RLIM64_INFINITY, kcred, &resid); 1927 1928 if (status == 0) { 1929 buf->b_resid = resid; 1930 biodone(buf); 1931 return; 1932 } 1933 } 1934 1935 if (status != 0) { 1936 bioerror(buf, status); 1937 biodone(buf); 1938 } 1939 } 1940 1941 /* 1942 * We define our own biodone function so that buffers used for 1943 * asynchronous writes are not released when biodone() is called. 1944 */ 1945 static int 1946 vd_biodone(struct buf *bp) 1947 { 1948 ASSERT((bp->b_flags & B_DONE) == 0); 1949 ASSERT(SEMA_HELD(&bp->b_sem)); 1950 1951 bp->b_flags |= B_DONE; 1952 sema_v(&bp->b_io); 1953 1954 return (0); 1955 } 1956 1957 /* 1958 * Return Values 1959 * EINPROGRESS - operation was successfully started 1960 * EIO - encountered LDC (aka. task error) 1961 * 0 - operation completed successfully 1962 * 1963 * Side Effect 1964 * sets request->status = <disk operation status> 1965 */ 1966 static int 1967 vd_start_bio(vd_task_t *task) 1968 { 1969 int rv, status = 0; 1970 vd_t *vd = task->vd; 1971 vd_dring_payload_t *request = task->request; 1972 struct buf *buf = &task->buf; 1973 uint8_t mtype; 1974 int slice; 1975 char *bufaddr = 0; 1976 size_t buflen; 1977 size_t offset, length, nbytes; 1978 1979 ASSERT(vd != NULL); 1980 ASSERT(request != NULL); 1981 1982 slice = request->slice; 1983 1984 ASSERT(slice == VD_SLICE_NONE || slice < vd->nslices); 1985 ASSERT((request->operation == VD_OP_BREAD) || 1986 (request->operation == VD_OP_BWRITE)); 1987 1988 if (request->nbytes == 0) { 1989 /* no service for trivial requests */ 1990 request->status = EINVAL; 1991 return (0); 1992 } 1993 1994 PR1("%s %lu bytes at block %lu", 1995 (request->operation == VD_OP_BREAD) ? "Read" : "Write", 1996 request->nbytes, request->addr); 1997 1998 /* 1999 * We have to check the open flags because the functions processing 2000 * the read/write request will not do it. 2001 */ 2002 if (request->operation == VD_OP_BWRITE && !(vd->open_flags & FWRITE)) { 2003 PR0("write fails because backend is opened read-only"); 2004 request->nbytes = 0; 2005 request->status = EROFS; 2006 return (0); 2007 } 2008 2009 mtype = (&vd->inband_task == task) ? LDC_SHADOW_MAP : LDC_DIRECT_MAP; 2010 2011 /* Map memory exported by client */ 2012 status = ldc_mem_map(task->mhdl, request->cookie, request->ncookies, 2013 mtype, (request->operation == VD_OP_BREAD) ? LDC_MEM_W : LDC_MEM_R, 2014 &bufaddr, NULL); 2015 if (status != 0) { 2016 PR0("ldc_mem_map() returned err %d ", status); 2017 return (EIO); 2018 } 2019 2020 /* 2021 * The buffer size has to be 8-byte aligned, so the client should have 2022 * sent a buffer which size is roundup to the next 8-byte aligned value. 2023 */ 2024 buflen = P2ROUNDUP(request->nbytes, 8); 2025 2026 status = ldc_mem_acquire(task->mhdl, 0, buflen); 2027 if (status != 0) { 2028 (void) ldc_mem_unmap(task->mhdl); 2029 PR0("ldc_mem_acquire() returned err %d ", status); 2030 return (EIO); 2031 } 2032 2033 offset = request->addr; 2034 nbytes = request->nbytes; 2035 length = nbytes; 2036 2037 /* default number of byte returned by the I/O */ 2038 request->nbytes = 0; 2039 2040 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { 2041 2042 if (slice != 0) { 2043 /* handle any fake I/O */ 2044 rv = vd_slice_fake_rdwr(vd, slice, request->operation, 2045 &bufaddr, &offset, &length); 2046 2047 /* record the number of bytes from the fake I/O */ 2048 request->nbytes = nbytes - length; 2049 2050 if (rv == 0) { 2051 request->status = 0; 2052 goto io_done; 2053 } 2054 2055 if (rv != EAGAIN) { 2056 request->nbytes = 0; 2057 request->status = EIO; 2058 goto io_done; 2059 } 2060 2061 /* 2062 * If we return with EAGAIN then this means that there 2063 * are still data to read or write. 2064 */ 2065 ASSERT(length != 0); 2066 2067 /* 2068 * We need to continue the I/O from the slice backend to 2069 * complete the request. The variables bufaddr, offset 2070 * and length have been adjusted to have the right 2071 * information to do the remaining I/O from the backend. 2072 * The backend is entirely mapped to slice 0 so we just 2073 * have to complete the I/O from that slice. 2074 */ 2075 slice = 0; 2076 } 2077 2078 } else if (vd->volume || vd->file) { 2079 2080 rv = vd_dskimg_io_params(vd, slice, &offset, &length); 2081 if (rv != 0) { 2082 request->status = (rv == ENODATA)? 0: EIO; 2083 goto io_done; 2084 } 2085 slice = 0; 2086 2087 } else if (slice == VD_SLICE_NONE) { 2088 2089 /* 2090 * This is not a disk image so it is a real disk. We 2091 * assume that the underlying device driver supports 2092 * USCSICMD ioctls. This is the case of all SCSI devices 2093 * (sd, ssd...). 2094 * 2095 * In the future if we have non-SCSI disks we would need 2096 * to invoke the appropriate function to do I/O using an 2097 * absolute disk offset (for example using DIOCTL_RWCMD 2098 * for IDE disks). 2099 */ 2100 rv = vd_scsi_rdwr(vd, request->operation, bufaddr, offset, 2101 length); 2102 if (rv != 0) { 2103 request->status = EIO; 2104 } else { 2105 request->nbytes = length; 2106 request->status = 0; 2107 } 2108 goto io_done; 2109 } 2110 2111 /* Start the block I/O */ 2112 bioinit(buf); 2113 buf->b_flags = B_BUSY; 2114 buf->b_bcount = length; 2115 buf->b_lblkno = offset; 2116 buf->b_bufsize = buflen; 2117 buf->b_edev = vd->dev[slice]; 2118 buf->b_un.b_addr = bufaddr; 2119 buf->b_iodone = vd_biodone; 2120 2121 if (vd->file || vd->zvol) { 2122 /* 2123 * I/O to a file are dispatched to an I/O queue, so that several 2124 * I/Os can be processed in parallel. We also do that for ZFS 2125 * volumes because the ZFS volume strategy() function will only 2126 * return after the I/O is completed (instead of just starting 2127 * the I/O). 2128 */ 2129 2130 if (request->operation == VD_OP_BREAD) { 2131 buf->b_flags |= B_READ; 2132 } else { 2133 /* 2134 * For ZFS volumes and files, we do an asynchronous 2135 * write and we will wait for the completion of the 2136 * write in vd_complete_bio() by flushing the volume 2137 * or file. 2138 * 2139 * This done for performance reasons, so that we can 2140 * group together several write requests into a single 2141 * flush operation. 2142 */ 2143 buf->b_flags |= B_WRITE | B_ASYNC; 2144 2145 /* 2146 * We keep track of the write so that we can group 2147 * requests when flushing. The write queue has the 2148 * same number of slots as the dring so this prevents 2149 * the write queue from wrapping and overwriting 2150 * existing entries: if the write queue gets full 2151 * then that means that the dring is full so we stop 2152 * receiving new requests until an existing request 2153 * is processed, removed from the write queue and 2154 * then from the dring. 2155 */ 2156 task->write_index = vd->write_index; 2157 vd->write_queue[task->write_index] = buf; 2158 vd->write_index = 2159 VD_WRITE_INDEX_NEXT(vd, vd->write_index); 2160 } 2161 2162 buf->b_private = task; 2163 2164 ASSERT(vd->ioq != NULL); 2165 2166 request->status = 0; 2167 (void) ddi_taskq_dispatch(task->vd->ioq, vd_bio_task, buf, 2168 DDI_SLEEP); 2169 2170 } else { 2171 2172 if (request->operation == VD_OP_BREAD) { 2173 buf->b_flags |= B_READ; 2174 } else { 2175 buf->b_flags |= B_WRITE; 2176 } 2177 2178 /* convert VIO block number to buf block number */ 2179 buf->b_lblkno = offset << vd->vio_bshift; 2180 2181 request->status = ldi_strategy(vd->ldi_handle[slice], buf); 2182 } 2183 2184 /* 2185 * This is to indicate to the caller that the request 2186 * needs to be finished by vd_complete_bio() by calling 2187 * biowait() there and waiting for that to return before 2188 * triggering the notification of the vDisk client. 2189 * 2190 * This is necessary when writing to real disks as 2191 * otherwise calls to ldi_strategy() would be serialized 2192 * behind the calls to biowait() and performance would 2193 * suffer. 2194 */ 2195 if (request->status == 0) 2196 return (EINPROGRESS); 2197 2198 biofini(buf); 2199 2200 io_done: 2201 /* Clean up after error or completion */ 2202 rv = ldc_mem_release(task->mhdl, 0, buflen); 2203 if (rv) { 2204 PR0("ldc_mem_release() returned err %d ", rv); 2205 status = EIO; 2206 } 2207 rv = ldc_mem_unmap(task->mhdl); 2208 if (rv) { 2209 PR0("ldc_mem_unmap() returned err %d ", rv); 2210 status = EIO; 2211 } 2212 2213 return (status); 2214 } 2215 2216 /* 2217 * This function should only be called from vd_notify to ensure that requests 2218 * are responded to in the order that they are received. 2219 */ 2220 static int 2221 send_msg(ldc_handle_t ldc_handle, void *msg, size_t msglen) 2222 { 2223 int status; 2224 size_t nbytes; 2225 2226 do { 2227 nbytes = msglen; 2228 status = ldc_write(ldc_handle, msg, &nbytes); 2229 if (status != EWOULDBLOCK) 2230 break; 2231 drv_usecwait(vds_ldc_delay); 2232 } while (status == EWOULDBLOCK); 2233 2234 if (status != 0) { 2235 if (status != ECONNRESET) 2236 PR0("ldc_write() returned errno %d", status); 2237 return (status); 2238 } else if (nbytes != msglen) { 2239 PR0("ldc_write() performed only partial write"); 2240 return (EIO); 2241 } 2242 2243 PR1("SENT %lu bytes", msglen); 2244 return (0); 2245 } 2246 2247 static void 2248 vd_need_reset(vd_t *vd, boolean_t reset_ldc) 2249 { 2250 mutex_enter(&vd->lock); 2251 vd->reset_state = B_TRUE; 2252 vd->reset_ldc = reset_ldc; 2253 mutex_exit(&vd->lock); 2254 } 2255 2256 /* 2257 * Reset the state of the connection with a client, if needed; reset the LDC 2258 * transport as well, if needed. This function should only be called from the 2259 * "vd_recv_msg", as it waits for tasks - otherwise a deadlock can occur. 2260 */ 2261 static void 2262 vd_reset_if_needed(vd_t *vd) 2263 { 2264 int status = 0; 2265 2266 mutex_enter(&vd->lock); 2267 if (!vd->reset_state) { 2268 ASSERT(!vd->reset_ldc); 2269 mutex_exit(&vd->lock); 2270 return; 2271 } 2272 mutex_exit(&vd->lock); 2273 2274 PR0("Resetting connection state with %s", VD_CLIENT(vd)); 2275 2276 /* 2277 * Let any asynchronous I/O complete before possibly pulling the rug 2278 * out from under it; defer checking vd->reset_ldc, as one of the 2279 * asynchronous tasks might set it 2280 */ 2281 if (vd->ioq != NULL) 2282 ddi_taskq_wait(vd->ioq); 2283 ddi_taskq_wait(vd->completionq); 2284 2285 status = vd_flush_write(vd); 2286 if (status) { 2287 PR0("flushwrite returned error %d", status); 2288 } 2289 2290 if ((vd->initialized & VD_DRING) && 2291 ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0)) 2292 PR0("ldc_mem_dring_unmap() returned errno %d", status); 2293 2294 vd_free_dring_task(vd); 2295 2296 /* Free the staging buffer for msgs */ 2297 if (vd->vio_msgp != NULL) { 2298 kmem_free(vd->vio_msgp, vd->max_msglen); 2299 vd->vio_msgp = NULL; 2300 } 2301 2302 /* Free the inband message buffer */ 2303 if (vd->inband_task.msg != NULL) { 2304 kmem_free(vd->inband_task.msg, vd->max_msglen); 2305 vd->inband_task.msg = NULL; 2306 } 2307 2308 mutex_enter(&vd->lock); 2309 2310 if (vd->reset_ldc) 2311 PR0("taking down LDC channel"); 2312 if (vd->reset_ldc && ((status = ldc_down(vd->ldc_handle)) != 0)) 2313 PR0("ldc_down() returned errno %d", status); 2314 2315 /* Reset exclusive access rights */ 2316 vd_reset_access(vd); 2317 2318 vd->initialized &= ~(VD_SID | VD_SEQ_NUM | VD_DRING); 2319 vd->state = VD_STATE_INIT; 2320 vd->max_msglen = sizeof (vio_msg_t); /* baseline vio message size */ 2321 2322 /* Allocate the staging buffer */ 2323 vd->vio_msgp = kmem_alloc(vd->max_msglen, KM_SLEEP); 2324 2325 PR0("calling ldc_up\n"); 2326 (void) ldc_up(vd->ldc_handle); 2327 2328 vd->reset_state = B_FALSE; 2329 vd->reset_ldc = B_FALSE; 2330 2331 mutex_exit(&vd->lock); 2332 } 2333 2334 static void vd_recv_msg(void *arg); 2335 2336 static void 2337 vd_mark_in_reset(vd_t *vd) 2338 { 2339 int status; 2340 2341 PR0("vd_mark_in_reset: marking vd in reset\n"); 2342 2343 vd_need_reset(vd, B_FALSE); 2344 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, DDI_SLEEP); 2345 if (status == DDI_FAILURE) { 2346 PR0("cannot schedule task to recv msg\n"); 2347 vd_need_reset(vd, B_TRUE); 2348 return; 2349 } 2350 } 2351 2352 static int 2353 vd_mark_elem_done(vd_t *vd, int idx, int elem_status, int elem_nbytes) 2354 { 2355 boolean_t accepted; 2356 int status; 2357 on_trap_data_t otd; 2358 vd_dring_entry_t *elem = VD_DRING_ELEM(idx); 2359 2360 if (vd->reset_state) 2361 return (0); 2362 2363 /* Acquire the element */ 2364 if ((status = VIO_DRING_ACQUIRE(&otd, vd->dring_mtype, 2365 vd->dring_handle, idx, idx)) != 0) { 2366 if (status == ECONNRESET) { 2367 vd_mark_in_reset(vd); 2368 return (0); 2369 } else { 2370 return (status); 2371 } 2372 } 2373 2374 /* Set the element's status and mark it done */ 2375 accepted = (elem->hdr.dstate == VIO_DESC_ACCEPTED); 2376 if (accepted) { 2377 elem->payload.nbytes = elem_nbytes; 2378 elem->payload.status = elem_status; 2379 elem->hdr.dstate = VIO_DESC_DONE; 2380 } else { 2381 /* Perhaps client timed out waiting for I/O... */ 2382 PR0("element %u no longer \"accepted\"", idx); 2383 VD_DUMP_DRING_ELEM(elem); 2384 } 2385 /* Release the element */ 2386 if ((status = VIO_DRING_RELEASE(vd->dring_mtype, 2387 vd->dring_handle, idx, idx)) != 0) { 2388 if (status == ECONNRESET) { 2389 vd_mark_in_reset(vd); 2390 return (0); 2391 } else { 2392 PR0("VIO_DRING_RELEASE() returned errno %d", 2393 status); 2394 return (status); 2395 } 2396 } 2397 2398 return (accepted ? 0 : EINVAL); 2399 } 2400 2401 /* 2402 * Return Values 2403 * 0 - operation completed successfully 2404 * EIO - encountered LDC / task error 2405 * 2406 * Side Effect 2407 * sets request->status = <disk operation status> 2408 */ 2409 static int 2410 vd_complete_bio(vd_task_t *task) 2411 { 2412 int status = 0; 2413 int rv = 0; 2414 vd_t *vd = task->vd; 2415 vd_dring_payload_t *request = task->request; 2416 struct buf *buf = &task->buf; 2417 int wid, nwrites; 2418 2419 2420 ASSERT(vd != NULL); 2421 ASSERT(request != NULL); 2422 ASSERT(task->msg != NULL); 2423 ASSERT(task->msglen >= sizeof (*task->msg)); 2424 2425 if (buf->b_flags & B_DONE) { 2426 /* 2427 * If the I/O is already done then we don't call biowait() 2428 * because biowait() might already have been called when 2429 * flushing a previous asynchronous write. So we just 2430 * retrieve the status of the request. 2431 */ 2432 request->status = geterror(buf); 2433 } else { 2434 /* 2435 * Wait for the I/O. For synchronous I/O, biowait() will return 2436 * when the I/O has completed. For asynchronous write, it will 2437 * return the write has been submitted to the backend, but it 2438 * may not have been committed. 2439 */ 2440 request->status = biowait(buf); 2441 } 2442 2443 if (buf->b_flags & B_ASYNC) { 2444 /* 2445 * Asynchronous writes are used when writing to a file or a 2446 * ZFS volume. In that case the bio notification indicates 2447 * that the write has started. We have to flush the backend 2448 * to ensure that the write has been committed before marking 2449 * the request as completed. 2450 */ 2451 ASSERT(task->request->operation == VD_OP_BWRITE); 2452 2453 wid = task->write_index; 2454 2455 /* check if write has been already flushed */ 2456 if (vd->write_queue[wid] != NULL) { 2457 2458 vd->write_queue[wid] = NULL; 2459 wid = VD_WRITE_INDEX_NEXT(vd, wid); 2460 2461 /* 2462 * Because flushing is time consuming, it is worth 2463 * waiting for any other writes so that they can be 2464 * included in this single flush request. 2465 */ 2466 if (vd_awflush & VD_AWFLUSH_GROUP) { 2467 nwrites = 1; 2468 while (vd->write_queue[wid] != NULL) { 2469 (void) biowait(vd->write_queue[wid]); 2470 vd->write_queue[wid] = NULL; 2471 wid = VD_WRITE_INDEX_NEXT(vd, wid); 2472 nwrites++; 2473 } 2474 DTRACE_PROBE2(flushgrp, vd_task_t *, task, 2475 int, nwrites); 2476 } 2477 2478 if (vd_awflush & VD_AWFLUSH_IMMEDIATE) { 2479 request->status = vd_flush_write(vd); 2480 } else if (vd_awflush & VD_AWFLUSH_DEFER) { 2481 (void) taskq_dispatch(system_taskq, 2482 (void (*)(void *))vd_flush_write, vd, 2483 DDI_SLEEP); 2484 request->status = 0; 2485 } 2486 } 2487 } 2488 2489 /* Update the number of bytes read/written */ 2490 request->nbytes += buf->b_bcount - buf->b_resid; 2491 2492 /* Release the buffer */ 2493 if (!vd->reset_state) 2494 status = ldc_mem_release(task->mhdl, 0, buf->b_bufsize); 2495 if (status) { 2496 PR0("ldc_mem_release() returned errno %d copying to " 2497 "client", status); 2498 if (status == ECONNRESET) { 2499 vd_mark_in_reset(vd); 2500 } 2501 rv = EIO; 2502 } 2503 2504 /* Unmap the memory, even if in reset */ 2505 status = ldc_mem_unmap(task->mhdl); 2506 if (status) { 2507 PR0("ldc_mem_unmap() returned errno %d copying to client", 2508 status); 2509 if (status == ECONNRESET) { 2510 vd_mark_in_reset(vd); 2511 } 2512 rv = EIO; 2513 } 2514 2515 biofini(buf); 2516 2517 return (rv); 2518 } 2519 2520 /* 2521 * Description: 2522 * This function is called by the two functions called by a taskq 2523 * [ vd_complete_notify() and vd_serial_notify()) ] to send the 2524 * message to the client. 2525 * 2526 * Parameters: 2527 * arg - opaque pointer to structure containing task to be completed 2528 * 2529 * Return Values 2530 * None 2531 */ 2532 static void 2533 vd_notify(vd_task_t *task) 2534 { 2535 int status; 2536 2537 ASSERT(task != NULL); 2538 ASSERT(task->vd != NULL); 2539 2540 /* 2541 * Send the "ack" or "nack" back to the client; if sending the message 2542 * via LDC fails, arrange to reset both the connection state and LDC 2543 * itself 2544 */ 2545 PR2("Sending %s", 2546 (task->msg->tag.vio_subtype == VIO_SUBTYPE_ACK) ? "ACK" : "NACK"); 2547 2548 status = send_msg(task->vd->ldc_handle, task->msg, task->msglen); 2549 switch (status) { 2550 case 0: 2551 break; 2552 case ECONNRESET: 2553 vd_mark_in_reset(task->vd); 2554 break; 2555 default: 2556 PR0("initiating full reset"); 2557 vd_need_reset(task->vd, B_TRUE); 2558 break; 2559 } 2560 2561 DTRACE_PROBE1(task__end, vd_task_t *, task); 2562 } 2563 2564 /* 2565 * Description: 2566 * Mark the Dring entry as Done and (if necessary) send an ACK/NACK to 2567 * the vDisk client 2568 * 2569 * Parameters: 2570 * task - structure containing the request sent from client 2571 * 2572 * Return Values 2573 * None 2574 */ 2575 static void 2576 vd_complete_notify(vd_task_t *task) 2577 { 2578 int status = 0; 2579 vd_t *vd = task->vd; 2580 vd_dring_payload_t *request = task->request; 2581 2582 /* Update the dring element for a dring client */ 2583 if (!vd->reset_state && (vd->xfer_mode == VIO_DRING_MODE_V1_0)) { 2584 status = vd_mark_elem_done(vd, task->index, 2585 request->status, request->nbytes); 2586 if (status == ECONNRESET) 2587 vd_mark_in_reset(vd); 2588 else if (status == EACCES) 2589 vd_need_reset(vd, B_TRUE); 2590 } 2591 2592 /* 2593 * If a transport error occurred while marking the element done or 2594 * previously while executing the task, arrange to "nack" the message 2595 * when the final task in the descriptor element range completes 2596 */ 2597 if ((status != 0) || (task->status != 0)) 2598 task->msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 2599 2600 /* 2601 * Only the final task for a range of elements will respond to and 2602 * free the message 2603 */ 2604 if (task->type == VD_NONFINAL_RANGE_TASK) { 2605 return; 2606 } 2607 2608 /* 2609 * We should only send an ACK/NACK here if we are not currently in 2610 * reset as, depending on how we reset, the dring may have been 2611 * blown away and we don't want to ACK/NACK a message that isn't 2612 * there. 2613 */ 2614 if (!vd->reset_state) 2615 vd_notify(task); 2616 } 2617 2618 /* 2619 * Description: 2620 * This is the basic completion function called to handle inband data 2621 * requests and handshake messages. All it needs to do is trigger a 2622 * message to the client that the request is completed. 2623 * 2624 * Parameters: 2625 * arg - opaque pointer to structure containing task to be completed 2626 * 2627 * Return Values 2628 * None 2629 */ 2630 static void 2631 vd_serial_notify(void *arg) 2632 { 2633 vd_task_t *task = (vd_task_t *)arg; 2634 2635 ASSERT(task != NULL); 2636 vd_notify(task); 2637 } 2638 2639 /* ARGSUSED */ 2640 static int 2641 vd_geom2dk_geom(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 2642 { 2643 VD_GEOM2DK_GEOM((vd_geom_t *)vd_buf, (struct dk_geom *)ioctl_arg); 2644 return (0); 2645 } 2646 2647 /* ARGSUSED */ 2648 static int 2649 vd_vtoc2vtoc(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 2650 { 2651 VD_VTOC2VTOC((vd_vtoc_t *)vd_buf, (struct extvtoc *)ioctl_arg); 2652 return (0); 2653 } 2654 2655 static void 2656 dk_geom2vd_geom(void *ioctl_arg, void *vd_buf) 2657 { 2658 DK_GEOM2VD_GEOM((struct dk_geom *)ioctl_arg, (vd_geom_t *)vd_buf); 2659 } 2660 2661 static void 2662 vtoc2vd_vtoc(void *ioctl_arg, void *vd_buf) 2663 { 2664 VTOC2VD_VTOC((struct extvtoc *)ioctl_arg, (vd_vtoc_t *)vd_buf); 2665 } 2666 2667 static int 2668 vd_get_efi_in(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 2669 { 2670 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 2671 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 2672 size_t data_len; 2673 2674 data_len = vd_buf_len - (sizeof (vd_efi_t) - sizeof (uint64_t)); 2675 if (vd_efi->length > data_len) 2676 return (EINVAL); 2677 2678 dk_efi->dki_lba = vd_efi->lba; 2679 dk_efi->dki_length = vd_efi->length; 2680 dk_efi->dki_data = kmem_zalloc(vd_efi->length, KM_SLEEP); 2681 return (0); 2682 } 2683 2684 static void 2685 vd_get_efi_out(void *ioctl_arg, void *vd_buf) 2686 { 2687 int len; 2688 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 2689 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 2690 2691 len = vd_efi->length; 2692 DK_EFI2VD_EFI(dk_efi, vd_efi); 2693 kmem_free(dk_efi->dki_data, len); 2694 } 2695 2696 static int 2697 vd_set_efi_in(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 2698 { 2699 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 2700 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 2701 size_t data_len; 2702 2703 data_len = vd_buf_len - (sizeof (vd_efi_t) - sizeof (uint64_t)); 2704 if (vd_efi->length > data_len) 2705 return (EINVAL); 2706 2707 dk_efi->dki_data = kmem_alloc(vd_efi->length, KM_SLEEP); 2708 VD_EFI2DK_EFI(vd_efi, dk_efi); 2709 return (0); 2710 } 2711 2712 static void 2713 vd_set_efi_out(void *ioctl_arg, void *vd_buf) 2714 { 2715 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 2716 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 2717 2718 kmem_free(dk_efi->dki_data, vd_efi->length); 2719 } 2720 2721 static int 2722 vd_scsicmd_in(void *vd_buf, size_t vd_buf_len, void *ioctl_arg) 2723 { 2724 size_t vd_scsi_len; 2725 vd_scsi_t *vd_scsi = (vd_scsi_t *)vd_buf; 2726 struct uscsi_cmd *uscsi = (struct uscsi_cmd *)ioctl_arg; 2727 2728 /* check buffer size */ 2729 vd_scsi_len = VD_SCSI_SIZE; 2730 vd_scsi_len += P2ROUNDUP(vd_scsi->cdb_len, sizeof (uint64_t)); 2731 vd_scsi_len += P2ROUNDUP(vd_scsi->sense_len, sizeof (uint64_t)); 2732 vd_scsi_len += P2ROUNDUP(vd_scsi->datain_len, sizeof (uint64_t)); 2733 vd_scsi_len += P2ROUNDUP(vd_scsi->dataout_len, sizeof (uint64_t)); 2734 2735 ASSERT(vd_scsi_len % sizeof (uint64_t) == 0); 2736 2737 if (vd_buf_len < vd_scsi_len) 2738 return (EINVAL); 2739 2740 /* set flags */ 2741 uscsi->uscsi_flags = vd_scsi_debug; 2742 2743 if (vd_scsi->options & VD_SCSI_OPT_NORETRY) { 2744 uscsi->uscsi_flags |= USCSI_ISOLATE; 2745 uscsi->uscsi_flags |= USCSI_DIAGNOSE; 2746 } 2747 2748 /* task attribute */ 2749 switch (vd_scsi->task_attribute) { 2750 case VD_SCSI_TASK_ACA: 2751 uscsi->uscsi_flags |= USCSI_HEAD; 2752 break; 2753 case VD_SCSI_TASK_HQUEUE: 2754 uscsi->uscsi_flags |= USCSI_HTAG; 2755 break; 2756 case VD_SCSI_TASK_ORDERED: 2757 uscsi->uscsi_flags |= USCSI_OTAG; 2758 break; 2759 default: 2760 uscsi->uscsi_flags |= USCSI_NOTAG; 2761 break; 2762 } 2763 2764 /* timeout */ 2765 uscsi->uscsi_timeout = vd_scsi->timeout; 2766 2767 /* cdb data */ 2768 uscsi->uscsi_cdb = (caddr_t)VD_SCSI_DATA_CDB(vd_scsi); 2769 uscsi->uscsi_cdblen = vd_scsi->cdb_len; 2770 2771 /* sense buffer */ 2772 if (vd_scsi->sense_len != 0) { 2773 uscsi->uscsi_flags |= USCSI_RQENABLE; 2774 uscsi->uscsi_rqbuf = (caddr_t)VD_SCSI_DATA_SENSE(vd_scsi); 2775 uscsi->uscsi_rqlen = vd_scsi->sense_len; 2776 } 2777 2778 if (vd_scsi->datain_len != 0 && vd_scsi->dataout_len != 0) { 2779 /* uscsi does not support read/write request */ 2780 return (EINVAL); 2781 } 2782 2783 /* request data-in */ 2784 if (vd_scsi->datain_len != 0) { 2785 uscsi->uscsi_flags |= USCSI_READ; 2786 uscsi->uscsi_buflen = vd_scsi->datain_len; 2787 uscsi->uscsi_bufaddr = (char *)VD_SCSI_DATA_IN(vd_scsi); 2788 } 2789 2790 /* request data-out */ 2791 if (vd_scsi->dataout_len != 0) { 2792 uscsi->uscsi_buflen = vd_scsi->dataout_len; 2793 uscsi->uscsi_bufaddr = (char *)VD_SCSI_DATA_OUT(vd_scsi); 2794 } 2795 2796 return (0); 2797 } 2798 2799 static void 2800 vd_scsicmd_out(void *ioctl_arg, void *vd_buf) 2801 { 2802 vd_scsi_t *vd_scsi = (vd_scsi_t *)vd_buf; 2803 struct uscsi_cmd *uscsi = (struct uscsi_cmd *)ioctl_arg; 2804 2805 /* output fields */ 2806 vd_scsi->cmd_status = uscsi->uscsi_status; 2807 2808 /* sense data */ 2809 if ((uscsi->uscsi_flags & USCSI_RQENABLE) && 2810 (uscsi->uscsi_status == STATUS_CHECK || 2811 uscsi->uscsi_status == STATUS_TERMINATED)) { 2812 vd_scsi->sense_status = uscsi->uscsi_rqstatus; 2813 if (uscsi->uscsi_rqstatus == STATUS_GOOD) 2814 vd_scsi->sense_len -= uscsi->uscsi_rqresid; 2815 else 2816 vd_scsi->sense_len = 0; 2817 } else { 2818 vd_scsi->sense_len = 0; 2819 } 2820 2821 if (uscsi->uscsi_status != STATUS_GOOD) { 2822 vd_scsi->dataout_len = 0; 2823 vd_scsi->datain_len = 0; 2824 return; 2825 } 2826 2827 if (uscsi->uscsi_flags & USCSI_READ) { 2828 /* request data (read) */ 2829 vd_scsi->datain_len -= uscsi->uscsi_resid; 2830 vd_scsi->dataout_len = 0; 2831 } else { 2832 /* request data (write) */ 2833 vd_scsi->datain_len = 0; 2834 vd_scsi->dataout_len -= uscsi->uscsi_resid; 2835 } 2836 } 2837 2838 static ushort_t 2839 vd_lbl2cksum(struct dk_label *label) 2840 { 2841 int count; 2842 ushort_t sum, *sp; 2843 2844 count = (sizeof (struct dk_label)) / (sizeof (short)) - 1; 2845 sp = (ushort_t *)label; 2846 sum = 0; 2847 while (count--) { 2848 sum ^= *sp++; 2849 } 2850 2851 return (sum); 2852 } 2853 2854 /* 2855 * Copy information from a vtoc and dk_geom structures to a dk_label structure. 2856 */ 2857 static void 2858 vd_vtocgeom_to_label(struct extvtoc *vtoc, struct dk_geom *geom, 2859 struct dk_label *label) 2860 { 2861 int i; 2862 2863 ASSERT(vtoc->v_nparts == V_NUMPAR); 2864 ASSERT(vtoc->v_sanity == VTOC_SANE); 2865 2866 bzero(label, sizeof (struct dk_label)); 2867 2868 label->dkl_ncyl = geom->dkg_ncyl; 2869 label->dkl_acyl = geom->dkg_acyl; 2870 label->dkl_pcyl = geom->dkg_pcyl; 2871 label->dkl_nhead = geom->dkg_nhead; 2872 label->dkl_nsect = geom->dkg_nsect; 2873 label->dkl_intrlv = geom->dkg_intrlv; 2874 label->dkl_apc = geom->dkg_apc; 2875 label->dkl_rpm = geom->dkg_rpm; 2876 label->dkl_write_reinstruct = geom->dkg_write_reinstruct; 2877 label->dkl_read_reinstruct = geom->dkg_read_reinstruct; 2878 2879 label->dkl_vtoc.v_nparts = V_NUMPAR; 2880 label->dkl_vtoc.v_sanity = VTOC_SANE; 2881 label->dkl_vtoc.v_version = vtoc->v_version; 2882 for (i = 0; i < V_NUMPAR; i++) { 2883 label->dkl_vtoc.v_timestamp[i] = vtoc->timestamp[i]; 2884 label->dkl_vtoc.v_part[i].p_tag = vtoc->v_part[i].p_tag; 2885 label->dkl_vtoc.v_part[i].p_flag = vtoc->v_part[i].p_flag; 2886 label->dkl_map[i].dkl_cylno = vtoc->v_part[i].p_start / 2887 (label->dkl_nhead * label->dkl_nsect); 2888 label->dkl_map[i].dkl_nblk = vtoc->v_part[i].p_size; 2889 } 2890 2891 /* 2892 * The bootinfo array can not be copied with bcopy() because 2893 * elements are of type long in vtoc (so 64-bit) and of type 2894 * int in dk_vtoc (so 32-bit). 2895 */ 2896 label->dkl_vtoc.v_bootinfo[0] = vtoc->v_bootinfo[0]; 2897 label->dkl_vtoc.v_bootinfo[1] = vtoc->v_bootinfo[1]; 2898 label->dkl_vtoc.v_bootinfo[2] = vtoc->v_bootinfo[2]; 2899 bcopy(vtoc->v_asciilabel, label->dkl_asciilabel, LEN_DKL_ASCII); 2900 bcopy(vtoc->v_volume, label->dkl_vtoc.v_volume, LEN_DKL_VVOL); 2901 2902 /* re-compute checksum */ 2903 label->dkl_magic = DKL_MAGIC; 2904 label->dkl_cksum = vd_lbl2cksum(label); 2905 } 2906 2907 /* 2908 * Copy information from a dk_label structure to a vtoc and dk_geom structures. 2909 */ 2910 static void 2911 vd_label_to_vtocgeom(struct dk_label *label, struct extvtoc *vtoc, 2912 struct dk_geom *geom) 2913 { 2914 int i; 2915 2916 bzero(vtoc, sizeof (struct vtoc)); 2917 bzero(geom, sizeof (struct dk_geom)); 2918 2919 geom->dkg_ncyl = label->dkl_ncyl; 2920 geom->dkg_acyl = label->dkl_acyl; 2921 geom->dkg_nhead = label->dkl_nhead; 2922 geom->dkg_nsect = label->dkl_nsect; 2923 geom->dkg_intrlv = label->dkl_intrlv; 2924 geom->dkg_apc = label->dkl_apc; 2925 geom->dkg_rpm = label->dkl_rpm; 2926 geom->dkg_pcyl = label->dkl_pcyl; 2927 geom->dkg_write_reinstruct = label->dkl_write_reinstruct; 2928 geom->dkg_read_reinstruct = label->dkl_read_reinstruct; 2929 2930 vtoc->v_sanity = label->dkl_vtoc.v_sanity; 2931 vtoc->v_version = label->dkl_vtoc.v_version; 2932 vtoc->v_sectorsz = DEV_BSIZE; 2933 vtoc->v_nparts = label->dkl_vtoc.v_nparts; 2934 2935 for (i = 0; i < vtoc->v_nparts; i++) { 2936 vtoc->v_part[i].p_tag = label->dkl_vtoc.v_part[i].p_tag; 2937 vtoc->v_part[i].p_flag = label->dkl_vtoc.v_part[i].p_flag; 2938 vtoc->v_part[i].p_start = label->dkl_map[i].dkl_cylno * 2939 (label->dkl_nhead * label->dkl_nsect); 2940 vtoc->v_part[i].p_size = label->dkl_map[i].dkl_nblk; 2941 vtoc->timestamp[i] = label->dkl_vtoc.v_timestamp[i]; 2942 } 2943 2944 /* 2945 * The bootinfo array can not be copied with bcopy() because 2946 * elements are of type long in vtoc (so 64-bit) and of type 2947 * int in dk_vtoc (so 32-bit). 2948 */ 2949 vtoc->v_bootinfo[0] = label->dkl_vtoc.v_bootinfo[0]; 2950 vtoc->v_bootinfo[1] = label->dkl_vtoc.v_bootinfo[1]; 2951 vtoc->v_bootinfo[2] = label->dkl_vtoc.v_bootinfo[2]; 2952 bcopy(label->dkl_asciilabel, vtoc->v_asciilabel, LEN_DKL_ASCII); 2953 bcopy(label->dkl_vtoc.v_volume, vtoc->v_volume, LEN_DKL_VVOL); 2954 } 2955 2956 /* 2957 * Check if a geometry is valid for a single-slice disk. A geometry is 2958 * considered valid if the main attributes of the geometry match with the 2959 * attributes of the fake geometry we have created. 2960 */ 2961 static boolean_t 2962 vd_slice_geom_isvalid(vd_t *vd, struct dk_geom *geom) 2963 { 2964 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 2965 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); 2966 2967 if (geom->dkg_ncyl != vd->dk_geom.dkg_ncyl || 2968 geom->dkg_acyl != vd->dk_geom.dkg_acyl || 2969 geom->dkg_nsect != vd->dk_geom.dkg_nsect || 2970 geom->dkg_pcyl != vd->dk_geom.dkg_pcyl) 2971 return (B_FALSE); 2972 2973 return (B_TRUE); 2974 } 2975 2976 /* 2977 * Check if a vtoc is valid for a single-slice disk. A vtoc is considered 2978 * valid if the main attributes of the vtoc match with the attributes of the 2979 * fake vtoc we have created. 2980 */ 2981 static boolean_t 2982 vd_slice_vtoc_isvalid(vd_t *vd, struct extvtoc *vtoc) 2983 { 2984 size_t csize; 2985 int i; 2986 2987 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 2988 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); 2989 2990 if (vtoc->v_sanity != vd->vtoc.v_sanity || 2991 vtoc->v_version != vd->vtoc.v_version || 2992 vtoc->v_nparts != vd->vtoc.v_nparts || 2993 strcmp(vtoc->v_volume, vd->vtoc.v_volume) != 0 || 2994 strcmp(vtoc->v_asciilabel, vd->vtoc.v_asciilabel) != 0) 2995 return (B_FALSE); 2996 2997 /* slice 2 should be unchanged */ 2998 if (vtoc->v_part[VD_ENTIRE_DISK_SLICE].p_start != 2999 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_start || 3000 vtoc->v_part[VD_ENTIRE_DISK_SLICE].p_size != 3001 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_size) 3002 return (B_FALSE); 3003 3004 /* 3005 * Slice 0 should be mostly unchanged and cover most of the disk. 3006 * However we allow some flexibility wrt to the start and the size 3007 * of this slice mainly because we can't exactly know how it will 3008 * be defined by the OS installer. 3009 * 3010 * We allow slice 0 to be defined as starting on any of the first 3011 * 4 cylinders. 3012 */ 3013 csize = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; 3014 3015 if (vtoc->v_part[0].p_start > 4 * csize || 3016 vtoc->v_part[0].p_size > vtoc->v_part[VD_ENTIRE_DISK_SLICE].p_size) 3017 return (B_FALSE); 3018 3019 if (vd->vtoc.v_part[0].p_size >= 4 * csize && 3020 vtoc->v_part[0].p_size < vd->vtoc.v_part[0].p_size - 4 *csize) 3021 return (B_FALSE); 3022 3023 /* any other slice should have a size of 0 */ 3024 for (i = 1; i < vtoc->v_nparts; i++) { 3025 if (i != VD_ENTIRE_DISK_SLICE && 3026 vtoc->v_part[i].p_size != 0) 3027 return (B_FALSE); 3028 } 3029 3030 return (B_TRUE); 3031 } 3032 3033 /* 3034 * Handle ioctls to a disk slice. 3035 * 3036 * Return Values 3037 * 0 - Indicates that there are no errors in disk operations 3038 * ENOTSUP - Unknown disk label type or unsupported DKIO ioctl 3039 * EINVAL - Not enough room to copy the EFI label 3040 * 3041 */ 3042 static int 3043 vd_do_slice_ioctl(vd_t *vd, int cmd, void *ioctl_arg) 3044 { 3045 dk_efi_t *dk_ioc; 3046 struct extvtoc *vtoc; 3047 struct dk_geom *geom; 3048 size_t len, lba; 3049 3050 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 3051 3052 if (cmd == DKIOCFLUSHWRITECACHE) 3053 return (vd_flush_write(vd)); 3054 3055 switch (vd->vdisk_label) { 3056 3057 /* ioctls for a single slice disk with a VTOC label */ 3058 case VD_DISK_LABEL_VTOC: 3059 3060 switch (cmd) { 3061 3062 case DKIOCGGEOM: 3063 ASSERT(ioctl_arg != NULL); 3064 bcopy(&vd->dk_geom, ioctl_arg, sizeof (vd->dk_geom)); 3065 return (0); 3066 3067 case DKIOCGEXTVTOC: 3068 ASSERT(ioctl_arg != NULL); 3069 bcopy(&vd->vtoc, ioctl_arg, sizeof (vd->vtoc)); 3070 return (0); 3071 3072 case DKIOCSGEOM: 3073 ASSERT(ioctl_arg != NULL); 3074 if (vd_slice_single_slice) 3075 return (ENOTSUP); 3076 3077 /* fake success only if new geometry is valid */ 3078 geom = (struct dk_geom *)ioctl_arg; 3079 if (!vd_slice_geom_isvalid(vd, geom)) 3080 return (EINVAL); 3081 3082 return (0); 3083 3084 case DKIOCSEXTVTOC: 3085 ASSERT(ioctl_arg != NULL); 3086 if (vd_slice_single_slice) 3087 return (ENOTSUP); 3088 3089 /* fake sucess only if the new vtoc is valid */ 3090 vtoc = (struct extvtoc *)ioctl_arg; 3091 if (!vd_slice_vtoc_isvalid(vd, vtoc)) 3092 return (EINVAL); 3093 3094 return (0); 3095 3096 default: 3097 return (ENOTSUP); 3098 } 3099 3100 /* ioctls for a single slice disk with an EFI label */ 3101 case VD_DISK_LABEL_EFI: 3102 3103 if (cmd != DKIOCGETEFI && cmd != DKIOCSETEFI) 3104 return (ENOTSUP); 3105 3106 ASSERT(ioctl_arg != NULL); 3107 dk_ioc = (dk_efi_t *)ioctl_arg; 3108 3109 len = dk_ioc->dki_length; 3110 lba = dk_ioc->dki_lba; 3111 3112 if ((lba != VD_EFI_LBA_GPT && lba != VD_EFI_LBA_GPE) || 3113 (lba == VD_EFI_LBA_GPT && len < sizeof (efi_gpt_t)) || 3114 (lba == VD_EFI_LBA_GPE && len < sizeof (efi_gpe_t))) 3115 return (EINVAL); 3116 3117 switch (cmd) { 3118 case DKIOCGETEFI: 3119 len = vd_slice_flabel_read(vd, 3120 (caddr_t)dk_ioc->dki_data, 3121 lba * vd->vdisk_bsize, len); 3122 3123 ASSERT(len > 0); 3124 3125 return (0); 3126 3127 case DKIOCSETEFI: 3128 if (vd_slice_single_slice) 3129 return (ENOTSUP); 3130 3131 /* we currently don't support writing EFI */ 3132 return (EIO); 3133 } 3134 3135 default: 3136 /* Unknown disk label type */ 3137 return (ENOTSUP); 3138 } 3139 } 3140 3141 static int 3142 vds_efi_alloc_and_read(vd_t *vd, efi_gpt_t **gpt, efi_gpe_t **gpe) 3143 { 3144 vd_efi_dev_t edev; 3145 int status; 3146 3147 VD_EFI_DEV_SET(edev, vd, (vd_efi_ioctl_func)vd_backend_ioctl); 3148 3149 status = vd_efi_alloc_and_read(&edev, gpt, gpe); 3150 3151 return (status); 3152 } 3153 3154 static void 3155 vds_efi_free(vd_t *vd, efi_gpt_t *gpt, efi_gpe_t *gpe) 3156 { 3157 vd_efi_dev_t edev; 3158 3159 VD_EFI_DEV_SET(edev, vd, (vd_efi_ioctl_func)vd_backend_ioctl); 3160 3161 vd_efi_free(&edev, gpt, gpe); 3162 } 3163 3164 static int 3165 vd_dskimg_validate_efi(vd_t *vd) 3166 { 3167 efi_gpt_t *gpt; 3168 efi_gpe_t *gpe; 3169 int i, nparts, status; 3170 struct uuid efi_reserved = EFI_RESERVED; 3171 3172 if ((status = vds_efi_alloc_and_read(vd, &gpt, &gpe)) != 0) 3173 return (status); 3174 3175 bzero(&vd->vtoc, sizeof (struct extvtoc)); 3176 bzero(&vd->dk_geom, sizeof (struct dk_geom)); 3177 bzero(vd->slices, sizeof (vd_slice_t) * VD_MAXPART); 3178 3179 vd->efi_reserved = -1; 3180 3181 nparts = gpt->efi_gpt_NumberOfPartitionEntries; 3182 3183 for (i = 0; i < nparts && i < VD_MAXPART; i++) { 3184 3185 if (gpe[i].efi_gpe_StartingLBA == 0 && 3186 gpe[i].efi_gpe_EndingLBA == 0) { 3187 continue; 3188 } 3189 3190 vd->slices[i].start = gpe[i].efi_gpe_StartingLBA; 3191 vd->slices[i].nblocks = gpe[i].efi_gpe_EndingLBA - 3192 gpe[i].efi_gpe_StartingLBA + 1; 3193 3194 if (bcmp(&gpe[i].efi_gpe_PartitionTypeGUID, &efi_reserved, 3195 sizeof (struct uuid)) == 0) 3196 vd->efi_reserved = i; 3197 3198 } 3199 3200 ASSERT(vd->vdisk_size != 0); 3201 vd->slices[VD_EFI_WD_SLICE].start = 0; 3202 vd->slices[VD_EFI_WD_SLICE].nblocks = vd->vdisk_size; 3203 3204 vds_efi_free(vd, gpt, gpe); 3205 3206 return (status); 3207 } 3208 3209 /* 3210 * Function: 3211 * vd_dskimg_validate_geometry 3212 * 3213 * Description: 3214 * Read the label and validate the geometry of a disk image. The driver 3215 * label, vtoc and geometry information are updated according to the 3216 * label read from the disk image. 3217 * 3218 * If no valid label is found, the label is set to unknown and the 3219 * function returns EINVAL, but a default vtoc and geometry are provided 3220 * to the driver. If an EFI label is found, ENOTSUP is returned. 3221 * 3222 * Parameters: 3223 * vd - disk on which the operation is performed. 3224 * 3225 * Return Code: 3226 * 0 - success. 3227 * EIO - error reading the label from the disk image. 3228 * EINVAL - unknown disk label. 3229 * ENOTSUP - geometry not applicable (EFI label). 3230 */ 3231 static int 3232 vd_dskimg_validate_geometry(vd_t *vd) 3233 { 3234 struct dk_label label; 3235 struct dk_geom *geom = &vd->dk_geom; 3236 struct extvtoc *vtoc = &vd->vtoc; 3237 int i; 3238 int status = 0; 3239 3240 ASSERT(VD_DSKIMG(vd)); 3241 3242 if (VD_DSKIMG_LABEL_READ(vd, &label) < 0) 3243 return (EIO); 3244 3245 if (label.dkl_magic != DKL_MAGIC || 3246 label.dkl_cksum != vd_lbl2cksum(&label) || 3247 (vd_dskimg_validate_sanity && 3248 label.dkl_vtoc.v_sanity != VTOC_SANE) || 3249 label.dkl_vtoc.v_nparts != V_NUMPAR) { 3250 3251 if (vd_dskimg_validate_efi(vd) == 0) { 3252 vd->vdisk_label = VD_DISK_LABEL_EFI; 3253 return (ENOTSUP); 3254 } 3255 3256 vd->vdisk_label = VD_DISK_LABEL_UNK; 3257 vd_build_default_label(vd->dskimg_size, vd->vdisk_bsize, 3258 &label); 3259 status = EINVAL; 3260 } else { 3261 vd->vdisk_label = VD_DISK_LABEL_VTOC; 3262 } 3263 3264 /* Update the driver geometry and vtoc */ 3265 vd_label_to_vtocgeom(&label, vtoc, geom); 3266 3267 /* Update logical partitions */ 3268 bzero(vd->slices, sizeof (vd_slice_t) * VD_MAXPART); 3269 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { 3270 for (i = 0; i < vtoc->v_nparts; i++) { 3271 vd->slices[i].start = vtoc->v_part[i].p_start; 3272 vd->slices[i].nblocks = vtoc->v_part[i].p_size; 3273 } 3274 } 3275 3276 return (status); 3277 } 3278 3279 /* 3280 * Handle ioctls to a disk image. 3281 * 3282 * Return Values 3283 * 0 - Indicates that there are no errors 3284 * != 0 - Disk operation returned an error 3285 */ 3286 static int 3287 vd_do_dskimg_ioctl(vd_t *vd, int cmd, void *ioctl_arg) 3288 { 3289 struct dk_label label; 3290 struct dk_geom *geom; 3291 struct extvtoc *vtoc; 3292 dk_efi_t *efi; 3293 int rc; 3294 3295 ASSERT(VD_DSKIMG(vd)); 3296 3297 switch (cmd) { 3298 3299 case DKIOCGGEOM: 3300 ASSERT(ioctl_arg != NULL); 3301 geom = (struct dk_geom *)ioctl_arg; 3302 3303 rc = vd_dskimg_validate_geometry(vd); 3304 if (rc != 0 && rc != EINVAL) 3305 return (rc); 3306 bcopy(&vd->dk_geom, geom, sizeof (struct dk_geom)); 3307 return (0); 3308 3309 case DKIOCGEXTVTOC: 3310 ASSERT(ioctl_arg != NULL); 3311 vtoc = (struct extvtoc *)ioctl_arg; 3312 3313 rc = vd_dskimg_validate_geometry(vd); 3314 if (rc != 0 && rc != EINVAL) 3315 return (rc); 3316 bcopy(&vd->vtoc, vtoc, sizeof (struct extvtoc)); 3317 return (0); 3318 3319 case DKIOCSGEOM: 3320 ASSERT(ioctl_arg != NULL); 3321 geom = (struct dk_geom *)ioctl_arg; 3322 3323 if (geom->dkg_nhead == 0 || geom->dkg_nsect == 0) 3324 return (EINVAL); 3325 3326 /* 3327 * The current device geometry is not updated, just the driver 3328 * "notion" of it. The device geometry will be effectively 3329 * updated when a label is written to the device during a next 3330 * DKIOCSEXTVTOC. 3331 */ 3332 bcopy(ioctl_arg, &vd->dk_geom, sizeof (vd->dk_geom)); 3333 return (0); 3334 3335 case DKIOCSEXTVTOC: 3336 ASSERT(ioctl_arg != NULL); 3337 ASSERT(vd->dk_geom.dkg_nhead != 0 && 3338 vd->dk_geom.dkg_nsect != 0); 3339 vtoc = (struct extvtoc *)ioctl_arg; 3340 3341 if (vtoc->v_sanity != VTOC_SANE || 3342 vtoc->v_sectorsz != DEV_BSIZE || 3343 vtoc->v_nparts != V_NUMPAR) 3344 return (EINVAL); 3345 3346 vd_vtocgeom_to_label(vtoc, &vd->dk_geom, &label); 3347 3348 /* write label to the disk image */ 3349 if ((rc = vd_dskimg_set_vtoc(vd, &label)) != 0) 3350 return (rc); 3351 3352 break; 3353 3354 case DKIOCFLUSHWRITECACHE: 3355 return (vd_flush_write(vd)); 3356 3357 case DKIOCGETEFI: 3358 ASSERT(ioctl_arg != NULL); 3359 efi = (dk_efi_t *)ioctl_arg; 3360 3361 if (vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, 3362 (caddr_t)efi->dki_data, efi->dki_lba, efi->dki_length) < 0) 3363 return (EIO); 3364 3365 return (0); 3366 3367 case DKIOCSETEFI: 3368 ASSERT(ioctl_arg != NULL); 3369 efi = (dk_efi_t *)ioctl_arg; 3370 3371 if (vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, 3372 (caddr_t)efi->dki_data, efi->dki_lba, efi->dki_length) < 0) 3373 return (EIO); 3374 3375 break; 3376 3377 3378 default: 3379 return (ENOTSUP); 3380 } 3381 3382 ASSERT(cmd == DKIOCSEXTVTOC || cmd == DKIOCSETEFI); 3383 3384 /* label has changed, revalidate the geometry */ 3385 (void) vd_dskimg_validate_geometry(vd); 3386 3387 /* 3388 * The disk geometry may have changed, so we need to write 3389 * the devid (if there is one) so that it is stored at the 3390 * right location. 3391 */ 3392 if (vd_dskimg_write_devid(vd, vd->dskimg_devid) != 0) { 3393 PR0("Fail to write devid"); 3394 } 3395 3396 return (0); 3397 } 3398 3399 static int 3400 vd_backend_ioctl(vd_t *vd, int cmd, caddr_t arg) 3401 { 3402 int rval = 0, status; 3403 struct vtoc vtoc; 3404 3405 /* 3406 * Call the appropriate function to execute the ioctl depending 3407 * on the type of vdisk. 3408 */ 3409 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { 3410 3411 /* slice, file or volume exported as a single slice disk */ 3412 status = vd_do_slice_ioctl(vd, cmd, arg); 3413 3414 } else if (VD_DSKIMG(vd)) { 3415 3416 /* file or volume exported as a full disk */ 3417 status = vd_do_dskimg_ioctl(vd, cmd, arg); 3418 3419 } else { 3420 3421 /* disk device exported as a full disk */ 3422 status = ldi_ioctl(vd->ldi_handle[0], cmd, (intptr_t)arg, 3423 vd->open_flags | FKIOCTL, kcred, &rval); 3424 3425 /* 3426 * By default VTOC ioctls are done using ioctls for the 3427 * extended VTOC. Some drivers (in particular non-Sun drivers) 3428 * may not support these ioctls. In that case, we fallback to 3429 * the regular VTOC ioctls. 3430 */ 3431 if (status == ENOTTY) { 3432 switch (cmd) { 3433 3434 case DKIOCGEXTVTOC: 3435 cmd = DKIOCGVTOC; 3436 status = ldi_ioctl(vd->ldi_handle[0], cmd, 3437 (intptr_t)&vtoc, vd->open_flags | FKIOCTL, 3438 kcred, &rval); 3439 vtoctoextvtoc(vtoc, 3440 (*(struct extvtoc *)(void *)arg)); 3441 break; 3442 3443 case DKIOCSEXTVTOC: 3444 cmd = DKIOCSVTOC; 3445 extvtoctovtoc((*(struct extvtoc *)(void *)arg), 3446 vtoc); 3447 status = ldi_ioctl(vd->ldi_handle[0], cmd, 3448 (intptr_t)&vtoc, vd->open_flags | FKIOCTL, 3449 kcred, &rval); 3450 break; 3451 } 3452 } 3453 } 3454 3455 #ifdef DEBUG 3456 if (rval != 0) { 3457 PR0("ioctl %x set rval = %d, which is not being returned" 3458 " to caller", cmd, rval); 3459 } 3460 #endif /* DEBUG */ 3461 3462 return (status); 3463 } 3464 3465 /* 3466 * Description: 3467 * This is the function that processes the ioctl requests (farming it 3468 * out to functions that handle slices, files or whole disks) 3469 * 3470 * Return Values 3471 * 0 - ioctl operation completed successfully 3472 * != 0 - The LDC error value encountered 3473 * (propagated back up the call stack as a task error) 3474 * 3475 * Side Effect 3476 * sets request->status to the return value of the ioctl function. 3477 */ 3478 static int 3479 vd_do_ioctl(vd_t *vd, vd_dring_payload_t *request, void* buf, vd_ioctl_t *ioctl) 3480 { 3481 int status = 0; 3482 size_t nbytes = request->nbytes; /* modifiable copy */ 3483 3484 3485 ASSERT(request->slice < vd->nslices); 3486 PR0("Performing %s", ioctl->operation_name); 3487 3488 /* Get data from client and convert, if necessary */ 3489 if (ioctl->copyin != NULL) { 3490 ASSERT(nbytes != 0 && buf != NULL); 3491 PR1("Getting \"arg\" data from client"); 3492 if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes, 3493 request->cookie, request->ncookies, 3494 LDC_COPY_IN)) != 0) { 3495 PR0("ldc_mem_copy() returned errno %d " 3496 "copying from client", status); 3497 return (status); 3498 } 3499 3500 /* Convert client's data, if necessary */ 3501 if (ioctl->copyin == VD_IDENTITY_IN) { 3502 /* use client buffer */ 3503 ioctl->arg = buf; 3504 } else { 3505 /* convert client vdisk operation data to ioctl data */ 3506 status = (ioctl->copyin)(buf, nbytes, 3507 (void *)ioctl->arg); 3508 if (status != 0) { 3509 request->status = status; 3510 return (0); 3511 } 3512 } 3513 } 3514 3515 if (ioctl->operation == VD_OP_SCSICMD) { 3516 struct uscsi_cmd *uscsi = (struct uscsi_cmd *)ioctl->arg; 3517 3518 /* check write permission */ 3519 if (!(vd->open_flags & FWRITE) && 3520 !(uscsi->uscsi_flags & USCSI_READ)) { 3521 PR0("uscsi fails because backend is opened read-only"); 3522 request->status = EROFS; 3523 return (0); 3524 } 3525 } 3526 3527 /* 3528 * Send the ioctl to the disk backend. 3529 */ 3530 request->status = vd_backend_ioctl(vd, ioctl->cmd, ioctl->arg); 3531 3532 if (request->status != 0) { 3533 PR0("ioctl(%s) = errno %d", ioctl->cmd_name, request->status); 3534 if (ioctl->operation == VD_OP_SCSICMD && 3535 ((struct uscsi_cmd *)ioctl->arg)->uscsi_status != 0) 3536 /* 3537 * USCSICMD has reported an error and the uscsi_status 3538 * field is not zero. This means that the SCSI command 3539 * has completed but it has an error. So we should 3540 * mark the VD operation has succesfully completed 3541 * and clients can check the SCSI status field for 3542 * SCSI errors. 3543 */ 3544 request->status = 0; 3545 else 3546 return (0); 3547 } 3548 3549 /* Convert data and send to client, if necessary */ 3550 if (ioctl->copyout != NULL) { 3551 ASSERT(nbytes != 0 && buf != NULL); 3552 PR1("Sending \"arg\" data to client"); 3553 3554 /* Convert ioctl data to vdisk operation data, if necessary */ 3555 if (ioctl->copyout != VD_IDENTITY_OUT) 3556 (ioctl->copyout)((void *)ioctl->arg, buf); 3557 3558 if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes, 3559 request->cookie, request->ncookies, 3560 LDC_COPY_OUT)) != 0) { 3561 PR0("ldc_mem_copy() returned errno %d " 3562 "copying to client", status); 3563 return (status); 3564 } 3565 } 3566 3567 return (status); 3568 } 3569 3570 #define RNDSIZE(expr) P2ROUNDUP(sizeof (expr), sizeof (uint64_t)) 3571 3572 /* 3573 * Description: 3574 * This generic function is called by the task queue to complete 3575 * the processing of the tasks. The specific completion function 3576 * is passed in as a field in the task pointer. 3577 * 3578 * Parameters: 3579 * arg - opaque pointer to structure containing task to be completed 3580 * 3581 * Return Values 3582 * None 3583 */ 3584 static void 3585 vd_complete(void *arg) 3586 { 3587 vd_task_t *task = (vd_task_t *)arg; 3588 3589 ASSERT(task != NULL); 3590 ASSERT(task->status == EINPROGRESS); 3591 ASSERT(task->completef != NULL); 3592 3593 task->status = task->completef(task); 3594 if (task->status) 3595 PR0("%s: Error %d completing task", __func__, task->status); 3596 3597 /* Now notify the vDisk client */ 3598 vd_complete_notify(task); 3599 } 3600 3601 static int 3602 vd_ioctl(vd_task_t *task) 3603 { 3604 int i, status; 3605 void *buf = NULL; 3606 struct dk_geom dk_geom = {0}; 3607 struct extvtoc vtoc = {0}; 3608 struct dk_efi dk_efi = {0}; 3609 struct uscsi_cmd uscsi = {0}; 3610 vd_t *vd = task->vd; 3611 vd_dring_payload_t *request = task->request; 3612 vd_ioctl_t ioctl[] = { 3613 /* Command (no-copy) operations */ 3614 {VD_OP_FLUSH, STRINGIZE(VD_OP_FLUSH), 0, 3615 DKIOCFLUSHWRITECACHE, STRINGIZE(DKIOCFLUSHWRITECACHE), 3616 NULL, NULL, NULL, B_TRUE}, 3617 3618 /* "Get" (copy-out) operations */ 3619 {VD_OP_GET_WCE, STRINGIZE(VD_OP_GET_WCE), RNDSIZE(int), 3620 DKIOCGETWCE, STRINGIZE(DKIOCGETWCE), 3621 NULL, VD_IDENTITY_IN, VD_IDENTITY_OUT, B_FALSE}, 3622 {VD_OP_GET_DISKGEOM, STRINGIZE(VD_OP_GET_DISKGEOM), 3623 RNDSIZE(vd_geom_t), 3624 DKIOCGGEOM, STRINGIZE(DKIOCGGEOM), 3625 &dk_geom, NULL, dk_geom2vd_geom, B_FALSE}, 3626 {VD_OP_GET_VTOC, STRINGIZE(VD_OP_GET_VTOC), RNDSIZE(vd_vtoc_t), 3627 DKIOCGEXTVTOC, STRINGIZE(DKIOCGEXTVTOC), 3628 &vtoc, NULL, vtoc2vd_vtoc, B_FALSE}, 3629 {VD_OP_GET_EFI, STRINGIZE(VD_OP_GET_EFI), RNDSIZE(vd_efi_t), 3630 DKIOCGETEFI, STRINGIZE(DKIOCGETEFI), 3631 &dk_efi, vd_get_efi_in, vd_get_efi_out, B_FALSE}, 3632 3633 /* "Set" (copy-in) operations */ 3634 {VD_OP_SET_WCE, STRINGIZE(VD_OP_SET_WCE), RNDSIZE(int), 3635 DKIOCSETWCE, STRINGIZE(DKIOCSETWCE), 3636 NULL, VD_IDENTITY_IN, VD_IDENTITY_OUT, B_TRUE}, 3637 {VD_OP_SET_DISKGEOM, STRINGIZE(VD_OP_SET_DISKGEOM), 3638 RNDSIZE(vd_geom_t), 3639 DKIOCSGEOM, STRINGIZE(DKIOCSGEOM), 3640 &dk_geom, vd_geom2dk_geom, NULL, B_TRUE}, 3641 {VD_OP_SET_VTOC, STRINGIZE(VD_OP_SET_VTOC), RNDSIZE(vd_vtoc_t), 3642 DKIOCSEXTVTOC, STRINGIZE(DKIOCSEXTVTOC), 3643 &vtoc, vd_vtoc2vtoc, NULL, B_TRUE}, 3644 {VD_OP_SET_EFI, STRINGIZE(VD_OP_SET_EFI), RNDSIZE(vd_efi_t), 3645 DKIOCSETEFI, STRINGIZE(DKIOCSETEFI), 3646 &dk_efi, vd_set_efi_in, vd_set_efi_out, B_TRUE}, 3647 3648 {VD_OP_SCSICMD, STRINGIZE(VD_OP_SCSICMD), RNDSIZE(vd_scsi_t), 3649 USCSICMD, STRINGIZE(USCSICMD), 3650 &uscsi, vd_scsicmd_in, vd_scsicmd_out, B_FALSE}, 3651 }; 3652 size_t nioctls = (sizeof (ioctl))/(sizeof (ioctl[0])); 3653 3654 3655 ASSERT(vd != NULL); 3656 ASSERT(request != NULL); 3657 ASSERT(request->slice < vd->nslices); 3658 3659 /* 3660 * Determine ioctl corresponding to caller's "operation" and 3661 * validate caller's "nbytes" 3662 */ 3663 for (i = 0; i < nioctls; i++) { 3664 if (request->operation == ioctl[i].operation) { 3665 /* LDC memory operations require 8-byte multiples */ 3666 ASSERT(ioctl[i].nbytes % sizeof (uint64_t) == 0); 3667 3668 if (request->operation == VD_OP_GET_EFI || 3669 request->operation == VD_OP_SET_EFI || 3670 request->operation == VD_OP_SCSICMD) { 3671 if (request->nbytes >= ioctl[i].nbytes) 3672 break; 3673 PR0("%s: Expected at least nbytes = %lu, " 3674 "got %lu", ioctl[i].operation_name, 3675 ioctl[i].nbytes, request->nbytes); 3676 return (EINVAL); 3677 } 3678 3679 if (request->nbytes != ioctl[i].nbytes) { 3680 PR0("%s: Expected nbytes = %lu, got %lu", 3681 ioctl[i].operation_name, ioctl[i].nbytes, 3682 request->nbytes); 3683 return (EINVAL); 3684 } 3685 3686 break; 3687 } 3688 } 3689 ASSERT(i < nioctls); /* because "operation" already validated */ 3690 3691 if (!(vd->open_flags & FWRITE) && ioctl[i].write) { 3692 PR0("%s fails because backend is opened read-only", 3693 ioctl[i].operation_name); 3694 request->status = EROFS; 3695 return (0); 3696 } 3697 3698 if (request->nbytes) 3699 buf = kmem_zalloc(request->nbytes, KM_SLEEP); 3700 status = vd_do_ioctl(vd, request, buf, &ioctl[i]); 3701 if (request->nbytes) 3702 kmem_free(buf, request->nbytes); 3703 3704 return (status); 3705 } 3706 3707 static int 3708 vd_get_devid(vd_task_t *task) 3709 { 3710 vd_t *vd = task->vd; 3711 vd_dring_payload_t *request = task->request; 3712 vd_devid_t *vd_devid; 3713 impl_devid_t *devid; 3714 int status, bufid_len, devid_len, len, sz; 3715 int bufbytes; 3716 3717 PR1("Get Device ID, nbytes=%ld", request->nbytes); 3718 3719 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { 3720 /* 3721 * We don't support devid for single-slice disks because we 3722 * have no space to store a fabricated devid and for physical 3723 * disk slices, we can't use the devid of the disk otherwise 3724 * exporting multiple slices from the same disk will produce 3725 * the same devids. 3726 */ 3727 PR2("No Device ID for slices"); 3728 request->status = ENOTSUP; 3729 return (0); 3730 } 3731 3732 if (VD_DSKIMG(vd)) { 3733 if (vd->dskimg_devid == NULL) { 3734 PR2("No Device ID"); 3735 request->status = ENOENT; 3736 return (0); 3737 } else { 3738 sz = ddi_devid_sizeof(vd->dskimg_devid); 3739 devid = kmem_alloc(sz, KM_SLEEP); 3740 bcopy(vd->dskimg_devid, devid, sz); 3741 } 3742 } else { 3743 if (ddi_lyr_get_devid(vd->dev[request->slice], 3744 (ddi_devid_t *)&devid) != DDI_SUCCESS) { 3745 PR2("No Device ID"); 3746 request->status = ENOENT; 3747 return (0); 3748 } 3749 } 3750 3751 bufid_len = request->nbytes - sizeof (vd_devid_t) + 1; 3752 devid_len = DEVID_GETLEN(devid); 3753 3754 /* 3755 * Save the buffer size here for use in deallocation. 3756 * The actual number of bytes copied is returned in 3757 * the 'nbytes' field of the request structure. 3758 */ 3759 bufbytes = request->nbytes; 3760 3761 vd_devid = kmem_zalloc(bufbytes, KM_SLEEP); 3762 vd_devid->length = devid_len; 3763 vd_devid->type = DEVID_GETTYPE(devid); 3764 3765 len = (devid_len > bufid_len)? bufid_len : devid_len; 3766 3767 bcopy(devid->did_id, vd_devid->id, len); 3768 3769 request->status = 0; 3770 3771 /* LDC memory operations require 8-byte multiples */ 3772 ASSERT(request->nbytes % sizeof (uint64_t) == 0); 3773 3774 if ((status = ldc_mem_copy(vd->ldc_handle, (caddr_t)vd_devid, 0, 3775 &request->nbytes, request->cookie, request->ncookies, 3776 LDC_COPY_OUT)) != 0) { 3777 PR0("ldc_mem_copy() returned errno %d copying to client", 3778 status); 3779 } 3780 PR1("post mem_copy: nbytes=%ld", request->nbytes); 3781 3782 kmem_free(vd_devid, bufbytes); 3783 ddi_devid_free((ddi_devid_t)devid); 3784 3785 return (status); 3786 } 3787 3788 static int 3789 vd_scsi_reset(vd_t *vd) 3790 { 3791 int rval, status; 3792 struct uscsi_cmd uscsi = { 0 }; 3793 3794 uscsi.uscsi_flags = vd_scsi_debug | USCSI_RESET; 3795 uscsi.uscsi_timeout = vd_scsi_rdwr_timeout; 3796 3797 status = ldi_ioctl(vd->ldi_handle[0], USCSICMD, (intptr_t)&uscsi, 3798 (vd->open_flags | FKIOCTL), kcred, &rval); 3799 3800 return (status); 3801 } 3802 3803 static int 3804 vd_reset(vd_task_t *task) 3805 { 3806 vd_t *vd = task->vd; 3807 vd_dring_payload_t *request = task->request; 3808 3809 ASSERT(request->operation == VD_OP_RESET); 3810 ASSERT(vd->scsi); 3811 3812 PR0("Performing VD_OP_RESET"); 3813 3814 if (request->nbytes != 0) { 3815 PR0("VD_OP_RESET: Expected nbytes = 0, got %lu", 3816 request->nbytes); 3817 return (EINVAL); 3818 } 3819 3820 request->status = vd_scsi_reset(vd); 3821 3822 return (0); 3823 } 3824 3825 static int 3826 vd_get_capacity(vd_task_t *task) 3827 { 3828 int rv; 3829 size_t nbytes; 3830 vd_t *vd = task->vd; 3831 vd_dring_payload_t *request = task->request; 3832 vd_capacity_t vd_cap = { 0 }; 3833 3834 ASSERT(request->operation == VD_OP_GET_CAPACITY); 3835 3836 PR0("Performing VD_OP_GET_CAPACITY"); 3837 3838 nbytes = request->nbytes; 3839 3840 if (nbytes != RNDSIZE(vd_capacity_t)) { 3841 PR0("VD_OP_GET_CAPACITY: Expected nbytes = %lu, got %lu", 3842 RNDSIZE(vd_capacity_t), nbytes); 3843 return (EINVAL); 3844 } 3845 3846 /* 3847 * Check the backend size in case it has changed. If the check fails 3848 * then we will return the last known size. 3849 */ 3850 3851 (void) vd_backend_check_size(vd); 3852 ASSERT(vd->vdisk_size != 0); 3853 3854 request->status = 0; 3855 3856 vd_cap.vdisk_block_size = vd->vdisk_bsize; 3857 vd_cap.vdisk_size = vd->vdisk_size; 3858 3859 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&vd_cap, 0, &nbytes, 3860 request->cookie, request->ncookies, LDC_COPY_OUT)) != 0) { 3861 PR0("ldc_mem_copy() returned errno %d copying to client", rv); 3862 return (rv); 3863 } 3864 3865 return (0); 3866 } 3867 3868 static int 3869 vd_get_access(vd_task_t *task) 3870 { 3871 uint64_t access; 3872 int rv, rval = 0; 3873 size_t nbytes; 3874 vd_t *vd = task->vd; 3875 vd_dring_payload_t *request = task->request; 3876 3877 ASSERT(request->operation == VD_OP_GET_ACCESS); 3878 ASSERT(vd->scsi); 3879 3880 PR0("Performing VD_OP_GET_ACCESS"); 3881 3882 nbytes = request->nbytes; 3883 3884 if (nbytes != sizeof (uint64_t)) { 3885 PR0("VD_OP_GET_ACCESS: Expected nbytes = %lu, got %lu", 3886 sizeof (uint64_t), nbytes); 3887 return (EINVAL); 3888 } 3889 3890 request->status = ldi_ioctl(vd->ldi_handle[request->slice], MHIOCSTATUS, 3891 NULL, (vd->open_flags | FKIOCTL), kcred, &rval); 3892 3893 if (request->status != 0) 3894 return (0); 3895 3896 access = (rval == 0)? VD_ACCESS_ALLOWED : VD_ACCESS_DENIED; 3897 3898 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&access, 0, &nbytes, 3899 request->cookie, request->ncookies, LDC_COPY_OUT)) != 0) { 3900 PR0("ldc_mem_copy() returned errno %d copying to client", rv); 3901 return (rv); 3902 } 3903 3904 return (0); 3905 } 3906 3907 static int 3908 vd_set_access(vd_task_t *task) 3909 { 3910 uint64_t flags; 3911 int rv, rval; 3912 size_t nbytes; 3913 vd_t *vd = task->vd; 3914 vd_dring_payload_t *request = task->request; 3915 3916 ASSERT(request->operation == VD_OP_SET_ACCESS); 3917 ASSERT(vd->scsi); 3918 3919 nbytes = request->nbytes; 3920 3921 if (nbytes != sizeof (uint64_t)) { 3922 PR0("VD_OP_SET_ACCESS: Expected nbytes = %lu, got %lu", 3923 sizeof (uint64_t), nbytes); 3924 return (EINVAL); 3925 } 3926 3927 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&flags, 0, &nbytes, 3928 request->cookie, request->ncookies, LDC_COPY_IN)) != 0) { 3929 PR0("ldc_mem_copy() returned errno %d copying from client", rv); 3930 return (rv); 3931 } 3932 3933 if (flags == VD_ACCESS_SET_CLEAR) { 3934 PR0("Performing VD_OP_SET_ACCESS (CLEAR)"); 3935 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3936 MHIOCRELEASE, NULL, (vd->open_flags | FKIOCTL), kcred, 3937 &rval); 3938 if (request->status == 0) 3939 vd->ownership = B_FALSE; 3940 return (0); 3941 } 3942 3943 /* 3944 * As per the VIO spec, the PREEMPT and PRESERVE flags are only valid 3945 * when the EXCLUSIVE flag is set. 3946 */ 3947 if (!(flags & VD_ACCESS_SET_EXCLUSIVE)) { 3948 PR0("Invalid VD_OP_SET_ACCESS flags: 0x%lx", flags); 3949 request->status = EINVAL; 3950 return (0); 3951 } 3952 3953 switch (flags & (VD_ACCESS_SET_PREEMPT | VD_ACCESS_SET_PRESERVE)) { 3954 3955 case VD_ACCESS_SET_PREEMPT | VD_ACCESS_SET_PRESERVE: 3956 /* 3957 * Flags EXCLUSIVE and PREEMPT and PRESERVE. We have to 3958 * acquire exclusive access rights, preserve them and we 3959 * can use preemption. So we can use the MHIOCTKNOWN ioctl. 3960 */ 3961 PR0("Performing VD_OP_SET_ACCESS (EXCLUSIVE|PREEMPT|PRESERVE)"); 3962 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3963 MHIOCTKOWN, NULL, (vd->open_flags | FKIOCTL), kcred, &rval); 3964 break; 3965 3966 case VD_ACCESS_SET_PRESERVE: 3967 /* 3968 * Flags EXCLUSIVE and PRESERVE. We have to acquire exclusive 3969 * access rights and preserve them, but not preempt any other 3970 * host. So we need to use the MHIOCTKOWN ioctl to enable the 3971 * "preserve" feature but we can not called it directly 3972 * because it uses preemption. So before that, we use the 3973 * MHIOCQRESERVE ioctl to ensure we can get exclusive rights 3974 * without preempting anyone. 3975 */ 3976 PR0("Performing VD_OP_SET_ACCESS (EXCLUSIVE|PRESERVE)"); 3977 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3978 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, 3979 &rval); 3980 if (request->status != 0) 3981 break; 3982 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3983 MHIOCTKOWN, NULL, (vd->open_flags | FKIOCTL), kcred, &rval); 3984 break; 3985 3986 case VD_ACCESS_SET_PREEMPT: 3987 /* 3988 * Flags EXCLUSIVE and PREEMPT. We have to acquire exclusive 3989 * access rights and we can use preemption. So we try to do 3990 * a SCSI reservation, if it fails we reset the disk to clear 3991 * any reservation and we try to reserve again. 3992 */ 3993 PR0("Performing VD_OP_SET_ACCESS (EXCLUSIVE|PREEMPT)"); 3994 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 3995 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, 3996 &rval); 3997 if (request->status == 0) 3998 break; 3999 4000 /* reset the disk */ 4001 (void) vd_scsi_reset(vd); 4002 4003 /* try again even if the reset has failed */ 4004 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 4005 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, 4006 &rval); 4007 break; 4008 4009 case 0: 4010 /* Flag EXCLUSIVE only. Just issue a SCSI reservation */ 4011 PR0("Performing VD_OP_SET_ACCESS (EXCLUSIVE)"); 4012 request->status = ldi_ioctl(vd->ldi_handle[request->slice], 4013 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, 4014 &rval); 4015 break; 4016 } 4017 4018 if (request->status == 0) 4019 vd->ownership = B_TRUE; 4020 else 4021 PR0("VD_OP_SET_ACCESS: error %d", request->status); 4022 4023 return (0); 4024 } 4025 4026 static void 4027 vd_reset_access(vd_t *vd) 4028 { 4029 int status, rval; 4030 4031 if (vd->file || vd->volume || !vd->ownership) 4032 return; 4033 4034 PR0("Releasing disk ownership"); 4035 status = ldi_ioctl(vd->ldi_handle[0], MHIOCRELEASE, NULL, 4036 (vd->open_flags | FKIOCTL), kcred, &rval); 4037 4038 /* 4039 * An EACCES failure means that there is a reservation conflict, 4040 * so we are not the owner of the disk anymore. 4041 */ 4042 if (status == 0 || status == EACCES) { 4043 vd->ownership = B_FALSE; 4044 return; 4045 } 4046 4047 PR0("Fail to release ownership, error %d", status); 4048 4049 /* 4050 * We have failed to release the ownership, try to reset the disk 4051 * to release reservations. 4052 */ 4053 PR0("Resetting disk"); 4054 status = vd_scsi_reset(vd); 4055 4056 if (status != 0) 4057 PR0("Fail to reset disk, error %d", status); 4058 4059 /* whatever the result of the reset is, we try the release again */ 4060 status = ldi_ioctl(vd->ldi_handle[0], MHIOCRELEASE, NULL, 4061 (vd->open_flags | FKIOCTL), kcred, &rval); 4062 4063 if (status == 0 || status == EACCES) { 4064 vd->ownership = B_FALSE; 4065 return; 4066 } 4067 4068 PR0("Fail to release ownership, error %d", status); 4069 4070 /* 4071 * At this point we have done our best to try to reset the 4072 * access rights to the disk and we don't know if we still 4073 * own a reservation and if any mechanism to preserve the 4074 * ownership is still in place. The ultimate solution would 4075 * be to reset the system but this is usually not what we 4076 * want to happen. 4077 */ 4078 4079 if (vd_reset_access_failure == A_REBOOT) { 4080 cmn_err(CE_WARN, VD_RESET_ACCESS_FAILURE_MSG 4081 ", rebooting the system", vd->device_path); 4082 (void) uadmin(A_SHUTDOWN, AD_BOOT, NULL); 4083 } else if (vd_reset_access_failure == A_DUMP) { 4084 panic(VD_RESET_ACCESS_FAILURE_MSG, vd->device_path); 4085 } 4086 4087 cmn_err(CE_WARN, VD_RESET_ACCESS_FAILURE_MSG, vd->device_path); 4088 } 4089 4090 /* 4091 * Define the supported operations once the functions for performing them have 4092 * been defined 4093 */ 4094 static const vds_operation_t vds_operation[] = { 4095 #define X(_s) #_s, _s 4096 {X(VD_OP_BREAD), vd_start_bio, vd_complete_bio}, 4097 {X(VD_OP_BWRITE), vd_start_bio, vd_complete_bio}, 4098 {X(VD_OP_FLUSH), vd_ioctl, NULL}, 4099 {X(VD_OP_GET_WCE), vd_ioctl, NULL}, 4100 {X(VD_OP_SET_WCE), vd_ioctl, NULL}, 4101 {X(VD_OP_GET_VTOC), vd_ioctl, NULL}, 4102 {X(VD_OP_SET_VTOC), vd_ioctl, NULL}, 4103 {X(VD_OP_GET_DISKGEOM), vd_ioctl, NULL}, 4104 {X(VD_OP_SET_DISKGEOM), vd_ioctl, NULL}, 4105 {X(VD_OP_GET_EFI), vd_ioctl, NULL}, 4106 {X(VD_OP_SET_EFI), vd_ioctl, NULL}, 4107 {X(VD_OP_GET_DEVID), vd_get_devid, NULL}, 4108 {X(VD_OP_SCSICMD), vd_ioctl, NULL}, 4109 {X(VD_OP_RESET), vd_reset, NULL}, 4110 {X(VD_OP_GET_CAPACITY), vd_get_capacity, NULL}, 4111 {X(VD_OP_SET_ACCESS), vd_set_access, NULL}, 4112 {X(VD_OP_GET_ACCESS), vd_get_access, NULL}, 4113 #undef X 4114 }; 4115 4116 static const size_t vds_noperations = 4117 (sizeof (vds_operation))/(sizeof (vds_operation[0])); 4118 4119 /* 4120 * Process a task specifying a client I/O request 4121 * 4122 * Parameters: 4123 * task - structure containing the request sent from client 4124 * 4125 * Return Value 4126 * 0 - success 4127 * ENOTSUP - Unknown/Unsupported VD_OP_XXX operation 4128 * EINVAL - Invalid disk slice 4129 * != 0 - some other non-zero return value from start function 4130 */ 4131 static int 4132 vd_do_process_task(vd_task_t *task) 4133 { 4134 int i; 4135 vd_t *vd = task->vd; 4136 vd_dring_payload_t *request = task->request; 4137 4138 ASSERT(vd != NULL); 4139 ASSERT(request != NULL); 4140 4141 /* Find the requested operation */ 4142 for (i = 0; i < vds_noperations; i++) { 4143 if (request->operation == vds_operation[i].operation) { 4144 /* all operations should have a start func */ 4145 ASSERT(vds_operation[i].start != NULL); 4146 4147 task->completef = vds_operation[i].complete; 4148 break; 4149 } 4150 } 4151 4152 /* 4153 * We need to check that the requested operation is permitted 4154 * for the particular client that sent it or that the loop above 4155 * did not complete without finding the operation type (indicating 4156 * that the requested operation is unknown/unimplemented) 4157 */ 4158 if ((VD_OP_SUPPORTED(vd->operations, request->operation) == B_FALSE) || 4159 (i == vds_noperations)) { 4160 PR0("Unsupported operation %u", request->operation); 4161 request->status = ENOTSUP; 4162 return (0); 4163 } 4164 4165 /* Range-check slice */ 4166 if (request->slice >= vd->nslices && 4167 ((vd->vdisk_type != VD_DISK_TYPE_DISK && vd_slice_single_slice) || 4168 request->slice != VD_SLICE_NONE)) { 4169 PR0("Invalid \"slice\" %u (max %u) for virtual disk", 4170 request->slice, (vd->nslices - 1)); 4171 request->status = EINVAL; 4172 return (0); 4173 } 4174 4175 /* 4176 * Call the function pointer that starts the operation. 4177 */ 4178 return (vds_operation[i].start(task)); 4179 } 4180 4181 /* 4182 * Description: 4183 * This function is called by both the in-band and descriptor ring 4184 * message processing functions paths to actually execute the task 4185 * requested by the vDisk client. It in turn calls its worker 4186 * function, vd_do_process_task(), to carry our the request. 4187 * 4188 * Any transport errors (e.g. LDC errors, vDisk protocol errors) are 4189 * saved in the 'status' field of the task and are propagated back 4190 * up the call stack to trigger a NACK 4191 * 4192 * Any request errors (e.g. ENOTTY from an ioctl) are saved in 4193 * the 'status' field of the request and result in an ACK being sent 4194 * by the completion handler. 4195 * 4196 * Parameters: 4197 * task - structure containing the request sent from client 4198 * 4199 * Return Value 4200 * 0 - successful synchronous request. 4201 * != 0 - transport error (e.g. LDC errors, vDisk protocol) 4202 * EINPROGRESS - task will be finished in a completion handler 4203 */ 4204 static int 4205 vd_process_task(vd_task_t *task) 4206 { 4207 vd_t *vd = task->vd; 4208 int status; 4209 4210 DTRACE_PROBE1(task__start, vd_task_t *, task); 4211 4212 task->status = vd_do_process_task(task); 4213 4214 /* 4215 * If the task processing function returned EINPROGRESS indicating 4216 * that the task needs completing then schedule a taskq entry to 4217 * finish it now. 4218 * 4219 * Otherwise the task processing function returned either zero 4220 * indicating that the task was finished in the start function (and we 4221 * don't need to wait in a completion function) or the start function 4222 * returned an error - in both cases all that needs to happen is the 4223 * notification to the vDisk client higher up the call stack. 4224 * If the task was using a Descriptor Ring, we need to mark it as done 4225 * at this stage. 4226 */ 4227 if (task->status == EINPROGRESS) { 4228 /* Queue a task to complete the operation */ 4229 (void) ddi_taskq_dispatch(vd->completionq, vd_complete, 4230 task, DDI_SLEEP); 4231 return (EINPROGRESS); 4232 } 4233 4234 if (!vd->reset_state && (vd->xfer_mode == VIO_DRING_MODE_V1_0)) { 4235 /* Update the dring element if it's a dring client */ 4236 status = vd_mark_elem_done(vd, task->index, 4237 task->request->status, task->request->nbytes); 4238 if (status == ECONNRESET) 4239 vd_mark_in_reset(vd); 4240 else if (status == EACCES) 4241 vd_need_reset(vd, B_TRUE); 4242 } 4243 4244 return (task->status); 4245 } 4246 4247 /* 4248 * Return true if the "type", "subtype", and "env" fields of the "tag" first 4249 * argument match the corresponding remaining arguments; otherwise, return false 4250 */ 4251 boolean_t 4252 vd_msgtype(vio_msg_tag_t *tag, int type, int subtype, int env) 4253 { 4254 return ((tag->vio_msgtype == type) && 4255 (tag->vio_subtype == subtype) && 4256 (tag->vio_subtype_env == env)) ? B_TRUE : B_FALSE; 4257 } 4258 4259 /* 4260 * Check whether the major/minor version specified in "ver_msg" is supported 4261 * by this server. 4262 */ 4263 static boolean_t 4264 vds_supported_version(vio_ver_msg_t *ver_msg) 4265 { 4266 for (int i = 0; i < vds_num_versions; i++) { 4267 ASSERT(vds_version[i].major > 0); 4268 ASSERT((i == 0) || 4269 (vds_version[i].major < vds_version[i-1].major)); 4270 4271 /* 4272 * If the major versions match, adjust the minor version, if 4273 * necessary, down to the highest value supported by this 4274 * server and return true so this message will get "ack"ed; 4275 * the client should also support all minor versions lower 4276 * than the value it sent 4277 */ 4278 if (ver_msg->ver_major == vds_version[i].major) { 4279 if (ver_msg->ver_minor > vds_version[i].minor) { 4280 PR0("Adjusting minor version from %u to %u", 4281 ver_msg->ver_minor, vds_version[i].minor); 4282 ver_msg->ver_minor = vds_version[i].minor; 4283 } 4284 return (B_TRUE); 4285 } 4286 4287 /* 4288 * If the message contains a higher major version number, set 4289 * the message's major/minor versions to the current values 4290 * and return false, so this message will get "nack"ed with 4291 * these values, and the client will potentially try again 4292 * with the same or a lower version 4293 */ 4294 if (ver_msg->ver_major > vds_version[i].major) { 4295 ver_msg->ver_major = vds_version[i].major; 4296 ver_msg->ver_minor = vds_version[i].minor; 4297 return (B_FALSE); 4298 } 4299 4300 /* 4301 * Otherwise, the message's major version is less than the 4302 * current major version, so continue the loop to the next 4303 * (lower) supported version 4304 */ 4305 } 4306 4307 /* 4308 * No common version was found; "ground" the version pair in the 4309 * message to terminate negotiation 4310 */ 4311 ver_msg->ver_major = 0; 4312 ver_msg->ver_minor = 0; 4313 return (B_FALSE); 4314 } 4315 4316 /* 4317 * Process a version message from a client. vds expects to receive version 4318 * messages from clients seeking service, but never issues version messages 4319 * itself; therefore, vds can ACK or NACK client version messages, but does 4320 * not expect to receive version-message ACKs or NACKs (and will treat such 4321 * messages as invalid). 4322 */ 4323 static int 4324 vd_process_ver_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4325 { 4326 vio_ver_msg_t *ver_msg = (vio_ver_msg_t *)msg; 4327 4328 4329 ASSERT(msglen >= sizeof (msg->tag)); 4330 4331 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 4332 VIO_VER_INFO)) { 4333 return (ENOMSG); /* not a version message */ 4334 } 4335 4336 if (msglen != sizeof (*ver_msg)) { 4337 PR0("Expected %lu-byte version message; " 4338 "received %lu bytes", sizeof (*ver_msg), msglen); 4339 return (EBADMSG); 4340 } 4341 4342 if (ver_msg->dev_class != VDEV_DISK) { 4343 PR0("Expected device class %u (disk); received %u", 4344 VDEV_DISK, ver_msg->dev_class); 4345 return (EBADMSG); 4346 } 4347 4348 /* 4349 * We're talking to the expected kind of client; set our device class 4350 * for "ack/nack" back to the client 4351 */ 4352 ver_msg->dev_class = VDEV_DISK_SERVER; 4353 4354 /* 4355 * Check whether the (valid) version message specifies a version 4356 * supported by this server. If the version is not supported, return 4357 * EBADMSG so the message will get "nack"ed; vds_supported_version() 4358 * will have updated the message with a supported version for the 4359 * client to consider 4360 */ 4361 if (!vds_supported_version(ver_msg)) 4362 return (EBADMSG); 4363 4364 4365 /* 4366 * A version has been agreed upon; use the client's SID for 4367 * communication on this channel now 4368 */ 4369 ASSERT(!(vd->initialized & VD_SID)); 4370 vd->sid = ver_msg->tag.vio_sid; 4371 vd->initialized |= VD_SID; 4372 4373 /* 4374 * Store the negotiated major and minor version values in the "vd" data 4375 * structure so that we can check if certain operations are supported 4376 * by the client. 4377 */ 4378 vd->version.major = ver_msg->ver_major; 4379 vd->version.minor = ver_msg->ver_minor; 4380 4381 PR0("Using major version %u, minor version %u", 4382 ver_msg->ver_major, ver_msg->ver_minor); 4383 return (0); 4384 } 4385 4386 static void 4387 vd_set_exported_operations(vd_t *vd) 4388 { 4389 vd->operations = 0; /* clear field */ 4390 4391 /* 4392 * We need to check from the highest version supported to the 4393 * lowest because versions with a higher minor number implicitly 4394 * support versions with a lower minor number. 4395 */ 4396 if (vio_ver_is_supported(vd->version, 1, 1)) { 4397 ASSERT(vd->open_flags & FREAD); 4398 vd->operations |= VD_OP_MASK_READ | (1 << VD_OP_GET_CAPACITY); 4399 4400 if (vd->open_flags & FWRITE) 4401 vd->operations |= VD_OP_MASK_WRITE; 4402 4403 if (vd->scsi) 4404 vd->operations |= VD_OP_MASK_SCSI; 4405 4406 if (VD_DSKIMG(vd) && vd_dskimg_is_iso_image(vd)) { 4407 /* 4408 * can't write to ISO images, make sure that write 4409 * support is not set in case administrator did not 4410 * use "options=ro" when doing an ldm add-vdsdev 4411 */ 4412 vd->operations &= ~VD_OP_MASK_WRITE; 4413 } 4414 } else if (vio_ver_is_supported(vd->version, 1, 0)) { 4415 vd->operations = VD_OP_MASK_READ | VD_OP_MASK_WRITE; 4416 } 4417 4418 /* we should have already agreed on a version */ 4419 ASSERT(vd->operations != 0); 4420 } 4421 4422 static int 4423 vd_process_attr_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4424 { 4425 vd_attr_msg_t *attr_msg = (vd_attr_msg_t *)msg; 4426 int status, retry = 0; 4427 4428 4429 ASSERT(msglen >= sizeof (msg->tag)); 4430 4431 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 4432 VIO_ATTR_INFO)) { 4433 PR0("Message is not an attribute message"); 4434 return (ENOMSG); 4435 } 4436 4437 if (msglen != sizeof (*attr_msg)) { 4438 PR0("Expected %lu-byte attribute message; " 4439 "received %lu bytes", sizeof (*attr_msg), msglen); 4440 return (EBADMSG); 4441 } 4442 4443 if (attr_msg->max_xfer_sz == 0) { 4444 PR0("Received maximum transfer size of 0 from client"); 4445 return (EBADMSG); 4446 } 4447 4448 if ((attr_msg->xfer_mode != VIO_DESC_MODE) && 4449 (attr_msg->xfer_mode != VIO_DRING_MODE_V1_0)) { 4450 PR0("Client requested unsupported transfer mode"); 4451 return (EBADMSG); 4452 } 4453 4454 /* 4455 * check if the underlying disk is ready, if not try accessing 4456 * the device again. Open the vdisk device and extract info 4457 * about it, as this is needed to respond to the attr info msg 4458 */ 4459 if ((vd->initialized & VD_DISK_READY) == 0) { 4460 PR0("Retry setting up disk (%s)", vd->device_path); 4461 do { 4462 status = vd_setup_vd(vd); 4463 if (status != EAGAIN || ++retry > vds_dev_retries) 4464 break; 4465 4466 /* incremental delay */ 4467 delay(drv_usectohz(vds_dev_delay)); 4468 4469 /* if vdisk is no longer enabled - return error */ 4470 if (!vd_enabled(vd)) 4471 return (ENXIO); 4472 4473 } while (status == EAGAIN); 4474 4475 if (status) 4476 return (ENXIO); 4477 4478 vd->initialized |= VD_DISK_READY; 4479 ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR); 4480 PR0("vdisk_type = %s, volume = %s, file = %s, nslices = %u", 4481 ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"), 4482 (vd->volume ? "yes" : "no"), 4483 (vd->file ? "yes" : "no"), 4484 vd->nslices); 4485 } 4486 4487 /* Success: valid message and transfer mode */ 4488 vd->xfer_mode = attr_msg->xfer_mode; 4489 4490 if (vd->xfer_mode == VIO_DESC_MODE) { 4491 4492 /* 4493 * The vd_dring_inband_msg_t contains one cookie; need room 4494 * for up to n-1 more cookies, where "n" is the number of full 4495 * pages plus possibly one partial page required to cover 4496 * "max_xfer_sz". Add room for one more cookie if 4497 * "max_xfer_sz" isn't an integral multiple of the page size. 4498 * Must first get the maximum transfer size in bytes. 4499 */ 4500 size_t max_xfer_bytes = attr_msg->vdisk_block_size ? 4501 attr_msg->vdisk_block_size * attr_msg->max_xfer_sz : 4502 attr_msg->max_xfer_sz; 4503 size_t max_inband_msglen = 4504 sizeof (vd_dring_inband_msg_t) + 4505 ((max_xfer_bytes/PAGESIZE + 4506 ((max_xfer_bytes % PAGESIZE) ? 1 : 0))* 4507 (sizeof (ldc_mem_cookie_t))); 4508 4509 /* 4510 * Set the maximum expected message length to 4511 * accommodate in-band-descriptor messages with all 4512 * their cookies 4513 */ 4514 vd->max_msglen = MAX(vd->max_msglen, max_inband_msglen); 4515 4516 /* 4517 * Initialize the data structure for processing in-band I/O 4518 * request descriptors 4519 */ 4520 vd->inband_task.vd = vd; 4521 vd->inband_task.msg = kmem_alloc(vd->max_msglen, KM_SLEEP); 4522 vd->inband_task.index = 0; 4523 vd->inband_task.type = VD_FINAL_RANGE_TASK; /* range == 1 */ 4524 } 4525 4526 /* Return the device's block size and max transfer size to the client */ 4527 attr_msg->vdisk_block_size = vd->vdisk_bsize; 4528 attr_msg->max_xfer_sz = vd->max_xfer_sz; 4529 4530 attr_msg->vdisk_size = vd->vdisk_size; 4531 attr_msg->vdisk_type = (vd_slice_single_slice)? vd->vdisk_type : 4532 VD_DISK_TYPE_DISK; 4533 attr_msg->vdisk_media = vd->vdisk_media; 4534 4535 /* Discover and save the list of supported VD_OP_XXX operations */ 4536 vd_set_exported_operations(vd); 4537 attr_msg->operations = vd->operations; 4538 4539 PR0("%s", VD_CLIENT(vd)); 4540 4541 ASSERT(vd->dring_task == NULL); 4542 4543 return (0); 4544 } 4545 4546 static int 4547 vd_process_dring_reg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4548 { 4549 int status; 4550 size_t expected; 4551 ldc_mem_info_t dring_minfo; 4552 uint8_t mtype; 4553 vio_dring_reg_msg_t *reg_msg = (vio_dring_reg_msg_t *)msg; 4554 4555 4556 ASSERT(msglen >= sizeof (msg->tag)); 4557 4558 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 4559 VIO_DRING_REG)) { 4560 PR0("Message is not a register-dring message"); 4561 return (ENOMSG); 4562 } 4563 4564 if (msglen < sizeof (*reg_msg)) { 4565 PR0("Expected at least %lu-byte register-dring message; " 4566 "received %lu bytes", sizeof (*reg_msg), msglen); 4567 return (EBADMSG); 4568 } 4569 4570 expected = sizeof (*reg_msg) + 4571 (reg_msg->ncookies - 1)*(sizeof (reg_msg->cookie[0])); 4572 if (msglen != expected) { 4573 PR0("Expected %lu-byte register-dring message; " 4574 "received %lu bytes", expected, msglen); 4575 return (EBADMSG); 4576 } 4577 4578 if (vd->initialized & VD_DRING) { 4579 PR0("A dring was previously registered; only support one"); 4580 return (EBADMSG); 4581 } 4582 4583 if (reg_msg->num_descriptors > INT32_MAX) { 4584 PR0("reg_msg->num_descriptors = %u; must be <= %u (%s)", 4585 reg_msg->ncookies, INT32_MAX, STRINGIZE(INT32_MAX)); 4586 return (EBADMSG); 4587 } 4588 4589 if (reg_msg->ncookies != 1) { 4590 /* 4591 * In addition to fixing the assertion in the success case 4592 * below, supporting drings which require more than one 4593 * "cookie" requires increasing the value of vd->max_msglen 4594 * somewhere in the code path prior to receiving the message 4595 * which results in calling this function. Note that without 4596 * making this change, the larger message size required to 4597 * accommodate multiple cookies cannot be successfully 4598 * received, so this function will not even get called. 4599 * Gracefully accommodating more dring cookies might 4600 * reasonably demand exchanging an additional attribute or 4601 * making a minor protocol adjustment 4602 */ 4603 PR0("reg_msg->ncookies = %u != 1", reg_msg->ncookies); 4604 return (EBADMSG); 4605 } 4606 4607 if (vd_direct_mapped_drings) 4608 mtype = LDC_DIRECT_MAP; 4609 else 4610 mtype = LDC_SHADOW_MAP; 4611 4612 status = ldc_mem_dring_map(vd->ldc_handle, reg_msg->cookie, 4613 reg_msg->ncookies, reg_msg->num_descriptors, 4614 reg_msg->descriptor_size, mtype, &vd->dring_handle); 4615 if (status != 0) { 4616 PR0("ldc_mem_dring_map() returned errno %d", status); 4617 return (status); 4618 } 4619 4620 /* 4621 * To remove the need for this assertion, must call 4622 * ldc_mem_dring_nextcookie() successfully ncookies-1 times after a 4623 * successful call to ldc_mem_dring_map() 4624 */ 4625 ASSERT(reg_msg->ncookies == 1); 4626 4627 if ((status = 4628 ldc_mem_dring_info(vd->dring_handle, &dring_minfo)) != 0) { 4629 PR0("ldc_mem_dring_info() returned errno %d", status); 4630 if ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0) 4631 PR0("ldc_mem_dring_unmap() returned errno %d", status); 4632 return (status); 4633 } 4634 4635 if (dring_minfo.vaddr == NULL) { 4636 PR0("Descriptor ring virtual address is NULL"); 4637 return (ENXIO); 4638 } 4639 4640 4641 /* Initialize for valid message and mapped dring */ 4642 vd->initialized |= VD_DRING; 4643 vd->dring_ident = 1; /* "There Can Be Only One" */ 4644 vd->dring = dring_minfo.vaddr; 4645 vd->descriptor_size = reg_msg->descriptor_size; 4646 vd->dring_len = reg_msg->num_descriptors; 4647 vd->dring_mtype = dring_minfo.mtype; 4648 reg_msg->dring_ident = vd->dring_ident; 4649 PR1("descriptor size = %u, dring length = %u", 4650 vd->descriptor_size, vd->dring_len); 4651 4652 /* 4653 * Allocate and initialize a "shadow" array of data structures for 4654 * tasks to process I/O requests in dring elements 4655 */ 4656 vd->dring_task = 4657 kmem_zalloc((sizeof (*vd->dring_task)) * vd->dring_len, KM_SLEEP); 4658 for (int i = 0; i < vd->dring_len; i++) { 4659 vd->dring_task[i].vd = vd; 4660 vd->dring_task[i].index = i; 4661 4662 status = ldc_mem_alloc_handle(vd->ldc_handle, 4663 &(vd->dring_task[i].mhdl)); 4664 if (status) { 4665 PR0("ldc_mem_alloc_handle() returned err %d ", status); 4666 return (ENXIO); 4667 } 4668 4669 /* 4670 * The descriptor payload varies in length. Calculate its 4671 * size by subtracting the header size from the total 4672 * descriptor size. 4673 */ 4674 vd->dring_task[i].request = kmem_zalloc((vd->descriptor_size - 4675 sizeof (vio_dring_entry_hdr_t)), KM_SLEEP); 4676 vd->dring_task[i].msg = kmem_alloc(vd->max_msglen, KM_SLEEP); 4677 } 4678 4679 if (vd->file || vd->zvol) { 4680 vd->write_queue = 4681 kmem_zalloc(sizeof (buf_t *) * vd->dring_len, KM_SLEEP); 4682 } 4683 4684 return (0); 4685 } 4686 4687 static int 4688 vd_process_dring_unreg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4689 { 4690 vio_dring_unreg_msg_t *unreg_msg = (vio_dring_unreg_msg_t *)msg; 4691 4692 4693 ASSERT(msglen >= sizeof (msg->tag)); 4694 4695 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 4696 VIO_DRING_UNREG)) { 4697 PR0("Message is not an unregister-dring message"); 4698 return (ENOMSG); 4699 } 4700 4701 if (msglen != sizeof (*unreg_msg)) { 4702 PR0("Expected %lu-byte unregister-dring message; " 4703 "received %lu bytes", sizeof (*unreg_msg), msglen); 4704 return (EBADMSG); 4705 } 4706 4707 if (unreg_msg->dring_ident != vd->dring_ident) { 4708 PR0("Expected dring ident %lu; received %lu", 4709 vd->dring_ident, unreg_msg->dring_ident); 4710 return (EBADMSG); 4711 } 4712 4713 return (0); 4714 } 4715 4716 static int 4717 process_rdx_msg(vio_msg_t *msg, size_t msglen) 4718 { 4719 ASSERT(msglen >= sizeof (msg->tag)); 4720 4721 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX)) { 4722 PR0("Message is not an RDX message"); 4723 return (ENOMSG); 4724 } 4725 4726 if (msglen != sizeof (vio_rdx_msg_t)) { 4727 PR0("Expected %lu-byte RDX message; received %lu bytes", 4728 sizeof (vio_rdx_msg_t), msglen); 4729 return (EBADMSG); 4730 } 4731 4732 PR0("Valid RDX message"); 4733 return (0); 4734 } 4735 4736 static int 4737 vd_check_seq_num(vd_t *vd, uint64_t seq_num) 4738 { 4739 if ((vd->initialized & VD_SEQ_NUM) && (seq_num != vd->seq_num + 1)) { 4740 PR0("Received seq_num %lu; expected %lu", 4741 seq_num, (vd->seq_num + 1)); 4742 PR0("initiating soft reset"); 4743 vd_need_reset(vd, B_FALSE); 4744 return (1); 4745 } 4746 4747 vd->seq_num = seq_num; 4748 vd->initialized |= VD_SEQ_NUM; /* superfluous after first time... */ 4749 return (0); 4750 } 4751 4752 /* 4753 * Return the expected size of an inband-descriptor message with all the 4754 * cookies it claims to include 4755 */ 4756 static size_t 4757 expected_inband_size(vd_dring_inband_msg_t *msg) 4758 { 4759 return ((sizeof (*msg)) + 4760 (msg->payload.ncookies - 1)*(sizeof (msg->payload.cookie[0]))); 4761 } 4762 4763 /* 4764 * Process an in-band descriptor message: used with clients like OBP, with 4765 * which vds exchanges descriptors within VIO message payloads, rather than 4766 * operating on them within a descriptor ring 4767 */ 4768 static int 4769 vd_process_desc_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4770 { 4771 size_t expected; 4772 vd_dring_inband_msg_t *desc_msg = (vd_dring_inband_msg_t *)msg; 4773 4774 4775 ASSERT(msglen >= sizeof (msg->tag)); 4776 4777 if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO, 4778 VIO_DESC_DATA)) { 4779 PR1("Message is not an in-band-descriptor message"); 4780 return (ENOMSG); 4781 } 4782 4783 if (msglen < sizeof (*desc_msg)) { 4784 PR0("Expected at least %lu-byte descriptor message; " 4785 "received %lu bytes", sizeof (*desc_msg), msglen); 4786 return (EBADMSG); 4787 } 4788 4789 if (msglen != (expected = expected_inband_size(desc_msg))) { 4790 PR0("Expected %lu-byte descriptor message; " 4791 "received %lu bytes", expected, msglen); 4792 return (EBADMSG); 4793 } 4794 4795 if (vd_check_seq_num(vd, desc_msg->hdr.seq_num) != 0) 4796 return (EBADMSG); 4797 4798 /* 4799 * Valid message: Set up the in-band descriptor task and process the 4800 * request. Arrange to acknowledge the client's message, unless an 4801 * error processing the descriptor task results in setting 4802 * VIO_SUBTYPE_NACK 4803 */ 4804 PR1("Valid in-band-descriptor message"); 4805 msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 4806 4807 ASSERT(vd->inband_task.msg != NULL); 4808 4809 bcopy(msg, vd->inband_task.msg, msglen); 4810 vd->inband_task.msglen = msglen; 4811 4812 /* 4813 * The task request is now the payload of the message 4814 * that was just copied into the body of the task. 4815 */ 4816 desc_msg = (vd_dring_inband_msg_t *)vd->inband_task.msg; 4817 vd->inband_task.request = &desc_msg->payload; 4818 4819 return (vd_process_task(&vd->inband_task)); 4820 } 4821 4822 static int 4823 vd_process_element(vd_t *vd, vd_task_type_t type, uint32_t idx, 4824 vio_msg_t *msg, size_t msglen) 4825 { 4826 int status; 4827 boolean_t ready; 4828 on_trap_data_t otd; 4829 vd_dring_entry_t *elem = VD_DRING_ELEM(idx); 4830 4831 /* Accept the updated dring element */ 4832 if ((status = VIO_DRING_ACQUIRE(&otd, vd->dring_mtype, 4833 vd->dring_handle, idx, idx)) != 0) { 4834 return (status); 4835 } 4836 ready = (elem->hdr.dstate == VIO_DESC_READY); 4837 if (ready) { 4838 elem->hdr.dstate = VIO_DESC_ACCEPTED; 4839 bcopy(&elem->payload, vd->dring_task[idx].request, 4840 (vd->descriptor_size - sizeof (vio_dring_entry_hdr_t))); 4841 } else { 4842 PR0("descriptor %u not ready", idx); 4843 VD_DUMP_DRING_ELEM(elem); 4844 } 4845 if ((status = VIO_DRING_RELEASE(vd->dring_mtype, 4846 vd->dring_handle, idx, idx)) != 0) { 4847 PR0("VIO_DRING_RELEASE() returned errno %d", status); 4848 return (status); 4849 } 4850 if (!ready) 4851 return (EBUSY); 4852 4853 4854 /* Initialize a task and process the accepted element */ 4855 PR1("Processing dring element %u", idx); 4856 vd->dring_task[idx].type = type; 4857 4858 /* duplicate msg buf for cookies etc. */ 4859 bcopy(msg, vd->dring_task[idx].msg, msglen); 4860 4861 vd->dring_task[idx].msglen = msglen; 4862 return (vd_process_task(&vd->dring_task[idx])); 4863 } 4864 4865 static int 4866 vd_process_element_range(vd_t *vd, int start, int end, 4867 vio_msg_t *msg, size_t msglen) 4868 { 4869 int i, n, nelem, status = 0; 4870 boolean_t inprogress = B_FALSE; 4871 vd_task_type_t type; 4872 4873 4874 ASSERT(start >= 0); 4875 ASSERT(end >= 0); 4876 4877 /* 4878 * Arrange to acknowledge the client's message, unless an error 4879 * processing one of the dring elements results in setting 4880 * VIO_SUBTYPE_NACK 4881 */ 4882 msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 4883 4884 /* 4885 * Process the dring elements in the range 4886 */ 4887 nelem = ((end < start) ? end + vd->dring_len : end) - start + 1; 4888 for (i = start, n = nelem; n > 0; i = (i + 1) % vd->dring_len, n--) { 4889 ((vio_dring_msg_t *)msg)->end_idx = i; 4890 type = (n == 1) ? VD_FINAL_RANGE_TASK : VD_NONFINAL_RANGE_TASK; 4891 status = vd_process_element(vd, type, i, msg, msglen); 4892 if (status == EINPROGRESS) 4893 inprogress = B_TRUE; 4894 else if (status != 0) 4895 break; 4896 } 4897 4898 /* 4899 * If some, but not all, operations of a multi-element range are in 4900 * progress, wait for other operations to complete before returning 4901 * (which will result in "ack" or "nack" of the message). Note that 4902 * all outstanding operations will need to complete, not just the ones 4903 * corresponding to the current range of dring elements; howevever, as 4904 * this situation is an error case, performance is less critical. 4905 */ 4906 if ((nelem > 1) && (status != EINPROGRESS) && inprogress) { 4907 if (vd->ioq != NULL) 4908 ddi_taskq_wait(vd->ioq); 4909 ddi_taskq_wait(vd->completionq); 4910 } 4911 4912 return (status); 4913 } 4914 4915 static int 4916 vd_process_dring_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4917 { 4918 vio_dring_msg_t *dring_msg = (vio_dring_msg_t *)msg; 4919 4920 4921 ASSERT(msglen >= sizeof (msg->tag)); 4922 4923 if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO, 4924 VIO_DRING_DATA)) { 4925 PR1("Message is not a dring-data message"); 4926 return (ENOMSG); 4927 } 4928 4929 if (msglen != sizeof (*dring_msg)) { 4930 PR0("Expected %lu-byte dring message; received %lu bytes", 4931 sizeof (*dring_msg), msglen); 4932 return (EBADMSG); 4933 } 4934 4935 if (vd_check_seq_num(vd, dring_msg->seq_num) != 0) 4936 return (EBADMSG); 4937 4938 if (dring_msg->dring_ident != vd->dring_ident) { 4939 PR0("Expected dring ident %lu; received ident %lu", 4940 vd->dring_ident, dring_msg->dring_ident); 4941 return (EBADMSG); 4942 } 4943 4944 if (dring_msg->start_idx >= vd->dring_len) { 4945 PR0("\"start_idx\" = %u; must be less than %u", 4946 dring_msg->start_idx, vd->dring_len); 4947 return (EBADMSG); 4948 } 4949 4950 if ((dring_msg->end_idx < 0) || 4951 (dring_msg->end_idx >= vd->dring_len)) { 4952 PR0("\"end_idx\" = %u; must be >= 0 and less than %u", 4953 dring_msg->end_idx, vd->dring_len); 4954 return (EBADMSG); 4955 } 4956 4957 /* Valid message; process range of updated dring elements */ 4958 PR1("Processing descriptor range, start = %u, end = %u", 4959 dring_msg->start_idx, dring_msg->end_idx); 4960 return (vd_process_element_range(vd, dring_msg->start_idx, 4961 dring_msg->end_idx, msg, msglen)); 4962 } 4963 4964 static int 4965 recv_msg(ldc_handle_t ldc_handle, void *msg, size_t *nbytes) 4966 { 4967 int retry, status; 4968 size_t size = *nbytes; 4969 4970 4971 for (retry = 0, status = ETIMEDOUT; 4972 retry < vds_ldc_retries && status == ETIMEDOUT; 4973 retry++) { 4974 PR1("ldc_read() attempt %d", (retry + 1)); 4975 *nbytes = size; 4976 status = ldc_read(ldc_handle, msg, nbytes); 4977 } 4978 4979 if (status) { 4980 PR0("ldc_read() returned errno %d", status); 4981 if (status != ECONNRESET) 4982 return (ENOMSG); 4983 return (status); 4984 } else if (*nbytes == 0) { 4985 PR1("ldc_read() returned 0 and no message read"); 4986 return (ENOMSG); 4987 } 4988 4989 PR1("RCVD %lu-byte message", *nbytes); 4990 return (0); 4991 } 4992 4993 static int 4994 vd_do_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 4995 { 4996 int status; 4997 4998 4999 PR1("Processing (%x/%x/%x) message", msg->tag.vio_msgtype, 5000 msg->tag.vio_subtype, msg->tag.vio_subtype_env); 5001 #ifdef DEBUG 5002 vd_decode_tag(msg); 5003 #endif 5004 5005 /* 5006 * Validate session ID up front, since it applies to all messages 5007 * once set 5008 */ 5009 if ((msg->tag.vio_sid != vd->sid) && (vd->initialized & VD_SID)) { 5010 PR0("Expected SID %u, received %u", vd->sid, 5011 msg->tag.vio_sid); 5012 return (EBADMSG); 5013 } 5014 5015 PR1("\tWhile in state %d (%s)", vd->state, vd_decode_state(vd->state)); 5016 5017 /* 5018 * Process the received message based on connection state 5019 */ 5020 switch (vd->state) { 5021 case VD_STATE_INIT: /* expect version message */ 5022 if ((status = vd_process_ver_msg(vd, msg, msglen)) != 0) 5023 return (status); 5024 5025 /* Version negotiated, move to that state */ 5026 vd->state = VD_STATE_VER; 5027 return (0); 5028 5029 case VD_STATE_VER: /* expect attribute message */ 5030 if ((status = vd_process_attr_msg(vd, msg, msglen)) != 0) 5031 return (status); 5032 5033 /* Attributes exchanged, move to that state */ 5034 vd->state = VD_STATE_ATTR; 5035 return (0); 5036 5037 case VD_STATE_ATTR: 5038 switch (vd->xfer_mode) { 5039 case VIO_DESC_MODE: /* expect RDX message */ 5040 if ((status = process_rdx_msg(msg, msglen)) != 0) 5041 return (status); 5042 5043 /* Ready to receive in-band descriptors */ 5044 vd->state = VD_STATE_DATA; 5045 return (0); 5046 5047 case VIO_DRING_MODE_V1_0: /* expect register-dring message */ 5048 if ((status = 5049 vd_process_dring_reg_msg(vd, msg, msglen)) != 0) 5050 return (status); 5051 5052 /* One dring negotiated, move to that state */ 5053 vd->state = VD_STATE_DRING; 5054 return (0); 5055 5056 default: 5057 ASSERT("Unsupported transfer mode"); 5058 PR0("Unsupported transfer mode"); 5059 return (ENOTSUP); 5060 } 5061 5062 case VD_STATE_DRING: /* expect RDX, register-dring, or unreg-dring */ 5063 if ((status = process_rdx_msg(msg, msglen)) == 0) { 5064 /* Ready to receive data */ 5065 vd->state = VD_STATE_DATA; 5066 return (0); 5067 } else if (status != ENOMSG) { 5068 return (status); 5069 } 5070 5071 5072 /* 5073 * If another register-dring message is received, stay in 5074 * dring state in case the client sends RDX; although the 5075 * protocol allows multiple drings, this server does not 5076 * support using more than one 5077 */ 5078 if ((status = 5079 vd_process_dring_reg_msg(vd, msg, msglen)) != ENOMSG) 5080 return (status); 5081 5082 /* 5083 * Acknowledge an unregister-dring message, but reset the 5084 * connection anyway: Although the protocol allows 5085 * unregistering drings, this server cannot serve a vdisk 5086 * without its only dring 5087 */ 5088 status = vd_process_dring_unreg_msg(vd, msg, msglen); 5089 return ((status == 0) ? ENOTSUP : status); 5090 5091 case VD_STATE_DATA: 5092 switch (vd->xfer_mode) { 5093 case VIO_DESC_MODE: /* expect in-band-descriptor message */ 5094 return (vd_process_desc_msg(vd, msg, msglen)); 5095 5096 case VIO_DRING_MODE_V1_0: /* expect dring-data or unreg-dring */ 5097 /* 5098 * Typically expect dring-data messages, so handle 5099 * them first 5100 */ 5101 if ((status = vd_process_dring_msg(vd, msg, 5102 msglen)) != ENOMSG) 5103 return (status); 5104 5105 /* 5106 * Acknowledge an unregister-dring message, but reset 5107 * the connection anyway: Although the protocol 5108 * allows unregistering drings, this server cannot 5109 * serve a vdisk without its only dring 5110 */ 5111 status = vd_process_dring_unreg_msg(vd, msg, msglen); 5112 return ((status == 0) ? ENOTSUP : status); 5113 5114 default: 5115 ASSERT("Unsupported transfer mode"); 5116 PR0("Unsupported transfer mode"); 5117 return (ENOTSUP); 5118 } 5119 5120 default: 5121 ASSERT("Invalid client connection state"); 5122 PR0("Invalid client connection state"); 5123 return (ENOTSUP); 5124 } 5125 } 5126 5127 static int 5128 vd_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 5129 { 5130 int status; 5131 boolean_t reset_ldc = B_FALSE; 5132 vd_task_t task; 5133 5134 /* 5135 * Check that the message is at least big enough for a "tag", so that 5136 * message processing can proceed based on tag-specified message type 5137 */ 5138 if (msglen < sizeof (vio_msg_tag_t)) { 5139 PR0("Received short (%lu-byte) message", msglen); 5140 /* Can't "nack" short message, so drop the big hammer */ 5141 PR0("initiating full reset"); 5142 vd_need_reset(vd, B_TRUE); 5143 return (EBADMSG); 5144 } 5145 5146 /* 5147 * Process the message 5148 */ 5149 switch (status = vd_do_process_msg(vd, msg, msglen)) { 5150 case 0: 5151 /* "ack" valid, successfully-processed messages */ 5152 msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 5153 break; 5154 5155 case EINPROGRESS: 5156 /* The completion handler will "ack" or "nack" the message */ 5157 return (EINPROGRESS); 5158 case ENOMSG: 5159 PR0("Received unexpected message"); 5160 _NOTE(FALLTHROUGH); 5161 case EBADMSG: 5162 case ENOTSUP: 5163 /* "transport" error will cause NACK of invalid messages */ 5164 msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 5165 break; 5166 5167 default: 5168 /* "transport" error will cause NACK of invalid messages */ 5169 msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 5170 /* An LDC error probably occurred, so try resetting it */ 5171 reset_ldc = B_TRUE; 5172 break; 5173 } 5174 5175 PR1("\tResulting in state %d (%s)", vd->state, 5176 vd_decode_state(vd->state)); 5177 5178 /* populate the task so we can dispatch it on the taskq */ 5179 task.vd = vd; 5180 task.msg = msg; 5181 task.msglen = msglen; 5182 5183 /* 5184 * Queue a task to send the notification that the operation completed. 5185 * We need to ensure that requests are responded to in the correct 5186 * order and since the taskq is processed serially this ordering 5187 * is maintained. 5188 */ 5189 (void) ddi_taskq_dispatch(vd->completionq, vd_serial_notify, 5190 &task, DDI_SLEEP); 5191 5192 /* 5193 * To ensure handshake negotiations do not happen out of order, such 5194 * requests that come through this path should not be done in parallel 5195 * so we need to wait here until the response is sent to the client. 5196 */ 5197 ddi_taskq_wait(vd->completionq); 5198 5199 /* Arrange to reset the connection for nack'ed or failed messages */ 5200 if ((status != 0) || reset_ldc) { 5201 PR0("initiating %s reset", 5202 (reset_ldc) ? "full" : "soft"); 5203 vd_need_reset(vd, reset_ldc); 5204 } 5205 5206 return (status); 5207 } 5208 5209 static boolean_t 5210 vd_enabled(vd_t *vd) 5211 { 5212 boolean_t enabled; 5213 5214 mutex_enter(&vd->lock); 5215 enabled = vd->enabled; 5216 mutex_exit(&vd->lock); 5217 return (enabled); 5218 } 5219 5220 static void 5221 vd_recv_msg(void *arg) 5222 { 5223 vd_t *vd = (vd_t *)arg; 5224 int rv = 0, status = 0; 5225 5226 ASSERT(vd != NULL); 5227 5228 PR2("New task to receive incoming message(s)"); 5229 5230 5231 while (vd_enabled(vd) && status == 0) { 5232 size_t msglen, msgsize; 5233 ldc_status_t lstatus; 5234 5235 /* 5236 * Receive and process a message 5237 */ 5238 vd_reset_if_needed(vd); /* can change vd->max_msglen */ 5239 5240 /* 5241 * check if channel is UP - else break out of loop 5242 */ 5243 status = ldc_status(vd->ldc_handle, &lstatus); 5244 if (lstatus != LDC_UP) { 5245 PR0("channel not up (status=%d), exiting recv loop\n", 5246 lstatus); 5247 break; 5248 } 5249 5250 ASSERT(vd->max_msglen != 0); 5251 5252 msgsize = vd->max_msglen; /* stable copy for alloc/free */ 5253 msglen = msgsize; /* actual len after recv_msg() */ 5254 5255 status = recv_msg(vd->ldc_handle, vd->vio_msgp, &msglen); 5256 switch (status) { 5257 case 0: 5258 rv = vd_process_msg(vd, (void *)vd->vio_msgp, msglen); 5259 /* check if max_msglen changed */ 5260 if (msgsize != vd->max_msglen) { 5261 PR0("max_msglen changed 0x%lx to 0x%lx bytes\n", 5262 msgsize, vd->max_msglen); 5263 kmem_free(vd->vio_msgp, msgsize); 5264 vd->vio_msgp = 5265 kmem_alloc(vd->max_msglen, KM_SLEEP); 5266 } 5267 if (rv == EINPROGRESS) 5268 continue; 5269 break; 5270 5271 case ENOMSG: 5272 break; 5273 5274 case ECONNRESET: 5275 PR0("initiating soft reset (ECONNRESET)\n"); 5276 vd_need_reset(vd, B_FALSE); 5277 status = 0; 5278 break; 5279 5280 default: 5281 /* Probably an LDC failure; arrange to reset it */ 5282 PR0("initiating full reset (status=0x%x)", status); 5283 vd_need_reset(vd, B_TRUE); 5284 break; 5285 } 5286 } 5287 5288 PR2("Task finished"); 5289 } 5290 5291 static uint_t 5292 vd_handle_ldc_events(uint64_t event, caddr_t arg) 5293 { 5294 vd_t *vd = (vd_t *)(void *)arg; 5295 int status; 5296 5297 ASSERT(vd != NULL); 5298 5299 if (!vd_enabled(vd)) 5300 return (LDC_SUCCESS); 5301 5302 if (event & LDC_EVT_DOWN) { 5303 PR0("LDC_EVT_DOWN: LDC channel went down"); 5304 5305 vd_need_reset(vd, B_TRUE); 5306 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, 5307 DDI_SLEEP); 5308 if (status == DDI_FAILURE) { 5309 PR0("cannot schedule task to recv msg\n"); 5310 vd_need_reset(vd, B_TRUE); 5311 } 5312 } 5313 5314 if (event & LDC_EVT_RESET) { 5315 PR0("LDC_EVT_RESET: LDC channel was reset"); 5316 5317 if (vd->state != VD_STATE_INIT) { 5318 PR0("scheduling full reset"); 5319 vd_need_reset(vd, B_FALSE); 5320 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, 5321 vd, DDI_SLEEP); 5322 if (status == DDI_FAILURE) { 5323 PR0("cannot schedule task to recv msg\n"); 5324 vd_need_reset(vd, B_TRUE); 5325 } 5326 5327 } else { 5328 PR0("channel already reset, ignoring...\n"); 5329 PR0("doing ldc up...\n"); 5330 (void) ldc_up(vd->ldc_handle); 5331 } 5332 5333 return (LDC_SUCCESS); 5334 } 5335 5336 if (event & LDC_EVT_UP) { 5337 PR0("EVT_UP: LDC is up\nResetting client connection state"); 5338 PR0("initiating soft reset"); 5339 vd_need_reset(vd, B_FALSE); 5340 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, 5341 vd, DDI_SLEEP); 5342 if (status == DDI_FAILURE) { 5343 PR0("cannot schedule task to recv msg\n"); 5344 vd_need_reset(vd, B_TRUE); 5345 return (LDC_SUCCESS); 5346 } 5347 } 5348 5349 if (event & LDC_EVT_READ) { 5350 int status; 5351 5352 PR1("New data available"); 5353 /* Queue a task to receive the new data */ 5354 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, 5355 DDI_SLEEP); 5356 5357 if (status == DDI_FAILURE) { 5358 PR0("cannot schedule task to recv msg\n"); 5359 vd_need_reset(vd, B_TRUE); 5360 } 5361 } 5362 5363 return (LDC_SUCCESS); 5364 } 5365 5366 static uint_t 5367 vds_check_for_vd(mod_hash_key_t key, mod_hash_val_t *val, void *arg) 5368 { 5369 _NOTE(ARGUNUSED(key, val)) 5370 (*((uint_t *)arg))++; 5371 return (MH_WALK_TERMINATE); 5372 } 5373 5374 5375 static int 5376 vds_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 5377 { 5378 uint_t vd_present = 0; 5379 minor_t instance; 5380 vds_t *vds; 5381 5382 5383 switch (cmd) { 5384 case DDI_DETACH: 5385 /* the real work happens below */ 5386 break; 5387 case DDI_SUSPEND: 5388 PR0("No action required for DDI_SUSPEND"); 5389 return (DDI_SUCCESS); 5390 default: 5391 PR0("Unrecognized \"cmd\""); 5392 return (DDI_FAILURE); 5393 } 5394 5395 ASSERT(cmd == DDI_DETACH); 5396 instance = ddi_get_instance(dip); 5397 if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) { 5398 PR0("Could not get state for instance %u", instance); 5399 ddi_soft_state_free(vds_state, instance); 5400 return (DDI_FAILURE); 5401 } 5402 5403 /* Do no detach when serving any vdisks */ 5404 mod_hash_walk(vds->vd_table, vds_check_for_vd, &vd_present); 5405 if (vd_present) { 5406 PR0("Not detaching because serving vdisks"); 5407 return (DDI_FAILURE); 5408 } 5409 5410 PR0("Detaching"); 5411 if (vds->initialized & VDS_MDEG) { 5412 (void) mdeg_unregister(vds->mdeg); 5413 kmem_free(vds->ispecp->specp, sizeof (vds_prop_template)); 5414 kmem_free(vds->ispecp, sizeof (mdeg_node_spec_t)); 5415 vds->ispecp = NULL; 5416 vds->mdeg = NULL; 5417 } 5418 5419 vds_driver_types_free(vds); 5420 5421 if (vds->initialized & VDS_LDI) 5422 (void) ldi_ident_release(vds->ldi_ident); 5423 mod_hash_destroy_hash(vds->vd_table); 5424 ddi_soft_state_free(vds_state, instance); 5425 return (DDI_SUCCESS); 5426 } 5427 5428 /* 5429 * Description: 5430 * This function checks to see if the disk image being used as a 5431 * virtual disk is an ISO image. An ISO image is a special case 5432 * which can be booted/installed from like a CD/DVD. 5433 * 5434 * Parameters: 5435 * vd - disk on which the operation is performed. 5436 * 5437 * Return Code: 5438 * B_TRUE - The disk image is an ISO 9660 compliant image 5439 * B_FALSE - just a regular disk image 5440 */ 5441 static boolean_t 5442 vd_dskimg_is_iso_image(vd_t *vd) 5443 { 5444 char iso_buf[ISO_SECTOR_SIZE]; 5445 int i, rv; 5446 uint_t sec; 5447 5448 ASSERT(VD_DSKIMG(vd)); 5449 5450 /* 5451 * If we have already discovered and saved this info we can 5452 * short-circuit the check and avoid reading the disk image. 5453 */ 5454 if (vd->vdisk_media == VD_MEDIA_DVD || vd->vdisk_media == VD_MEDIA_CD) 5455 return (B_TRUE); 5456 5457 /* 5458 * We wish to read the sector that should contain the 2nd ISO volume 5459 * descriptor. The second field in this descriptor is called the 5460 * Standard Identifier and is set to CD001 for a CD-ROM compliant 5461 * to the ISO 9660 standard. 5462 */ 5463 sec = (ISO_VOLDESC_SEC * ISO_SECTOR_SIZE) / vd->vdisk_bsize; 5464 rv = vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)iso_buf, 5465 sec, ISO_SECTOR_SIZE); 5466 5467 if (rv < 0) 5468 return (B_FALSE); 5469 5470 for (i = 0; i < ISO_ID_STRLEN; i++) { 5471 if (ISO_STD_ID(iso_buf)[i] != ISO_ID_STRING[i]) 5472 return (B_FALSE); 5473 } 5474 5475 return (B_TRUE); 5476 } 5477 5478 /* 5479 * Description: 5480 * This function checks to see if the virtual device is an ATAPI 5481 * device. ATAPI devices use Group 1 Read/Write commands, so 5482 * any USCSI calls vds makes need to take this into account. 5483 * 5484 * Parameters: 5485 * vd - disk on which the operation is performed. 5486 * 5487 * Return Code: 5488 * B_TRUE - The virtual disk is backed by an ATAPI device 5489 * B_FALSE - not an ATAPI device (presumably SCSI) 5490 */ 5491 static boolean_t 5492 vd_is_atapi_device(vd_t *vd) 5493 { 5494 boolean_t is_atapi = B_FALSE; 5495 char *variantp; 5496 int rv; 5497 5498 ASSERT(vd->ldi_handle[0] != NULL); 5499 ASSERT(!vd->file); 5500 5501 rv = ldi_prop_lookup_string(vd->ldi_handle[0], 5502 (LDI_DEV_T_ANY | DDI_PROP_DONTPASS), "variant", &variantp); 5503 if (rv == DDI_PROP_SUCCESS) { 5504 PR0("'variant' property exists for %s", vd->device_path); 5505 if (strcmp(variantp, "atapi") == 0) 5506 is_atapi = B_TRUE; 5507 ddi_prop_free(variantp); 5508 } 5509 5510 rv = ldi_prop_exists(vd->ldi_handle[0], LDI_DEV_T_ANY, "atapi"); 5511 if (rv) { 5512 PR0("'atapi' property exists for %s", vd->device_path); 5513 is_atapi = B_TRUE; 5514 } 5515 5516 return (is_atapi); 5517 } 5518 5519 static int 5520 vd_setup_full_disk(vd_t *vd) 5521 { 5522 int status; 5523 major_t major = getmajor(vd->dev[0]); 5524 minor_t minor = getminor(vd->dev[0]) - VD_ENTIRE_DISK_SLICE; 5525 5526 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); 5527 5528 /* set the disk size, block size and the media type of the disk */ 5529 status = vd_backend_check_size(vd); 5530 5531 if (status != 0) { 5532 if (!vd->scsi) { 5533 /* unexpected failure */ 5534 PRN("Failed to check backend size (errno %d)", status); 5535 return (status); 5536 } 5537 5538 /* 5539 * The function can fail for SCSI disks which are present but 5540 * reserved by another system. In that case, we don't know the 5541 * size of the disk and the block size. 5542 */ 5543 vd->vdisk_size = VD_SIZE_UNKNOWN; 5544 vd->vdisk_bsize = 0; 5545 vd->backend_bsize = 0; 5546 vd->vdisk_media = VD_MEDIA_FIXED; 5547 } 5548 5549 /* Move dev number and LDI handle to entire-disk-slice array elements */ 5550 vd->dev[VD_ENTIRE_DISK_SLICE] = vd->dev[0]; 5551 vd->dev[0] = 0; 5552 vd->ldi_handle[VD_ENTIRE_DISK_SLICE] = vd->ldi_handle[0]; 5553 vd->ldi_handle[0] = NULL; 5554 5555 /* Initialize device numbers for remaining slices and open them */ 5556 for (int slice = 0; slice < vd->nslices; slice++) { 5557 /* 5558 * Skip the entire-disk slice, as it's already open and its 5559 * device known 5560 */ 5561 if (slice == VD_ENTIRE_DISK_SLICE) 5562 continue; 5563 ASSERT(vd->dev[slice] == 0); 5564 ASSERT(vd->ldi_handle[slice] == NULL); 5565 5566 /* 5567 * Construct the device number for the current slice 5568 */ 5569 vd->dev[slice] = makedevice(major, (minor + slice)); 5570 5571 /* 5572 * Open all slices of the disk to serve them to the client. 5573 * Slices are opened exclusively to prevent other threads or 5574 * processes in the service domain from performing I/O to 5575 * slices being accessed by a client. Failure to open a slice 5576 * results in vds not serving this disk, as the client could 5577 * attempt (and should be able) to access any slice immediately. 5578 * Any slices successfully opened before a failure will get 5579 * closed by vds_destroy_vd() as a result of the error returned 5580 * by this function. 5581 * 5582 * We need to do the open with FNDELAY so that opening an empty 5583 * slice does not fail. 5584 */ 5585 PR0("Opening device major %u, minor %u = slice %u", 5586 major, minor, slice); 5587 5588 /* 5589 * Try to open the device. This can fail for example if we are 5590 * opening an empty slice. So in case of a failure, we try the 5591 * open again but this time with the FNDELAY flag. 5592 */ 5593 status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK, 5594 vd->open_flags, kcred, &vd->ldi_handle[slice], 5595 vd->vds->ldi_ident); 5596 5597 if (status != 0) { 5598 status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK, 5599 vd->open_flags | FNDELAY, kcred, 5600 &vd->ldi_handle[slice], vd->vds->ldi_ident); 5601 } 5602 5603 if (status != 0) { 5604 PRN("ldi_open_by_dev() returned errno %d " 5605 "for slice %u", status, slice); 5606 /* vds_destroy_vd() will close any open slices */ 5607 vd->ldi_handle[slice] = NULL; 5608 return (status); 5609 } 5610 } 5611 5612 return (0); 5613 } 5614 5615 /* 5616 * When a slice or a volume is exported as a single-slice disk, we want 5617 * the disk backend (i.e. the slice or volume) to be entirely mapped as 5618 * a slice without the addition of any metadata. 5619 * 5620 * So when exporting the disk as a VTOC disk, we fake a disk with the following 5621 * layout: 5622 * flabel +--- flabel_limit 5623 * <-> V 5624 * 0 1 C D E 5625 * +-+---+--------------------------+--+ 5626 * virtual disk: |L|XXX| slice 0 |AA| 5627 * +-+---+--------------------------+--+ 5628 * ^ : : 5629 * | : : 5630 * VTOC LABEL--+ : : 5631 * +--------------------------+ 5632 * disk backend: | slice/volume/file | 5633 * +--------------------------+ 5634 * 0 N 5635 * 5636 * N is the number of blocks in the slice/volume/file. 5637 * 5638 * We simulate a disk with N+M blocks, where M is the number of blocks 5639 * simluated at the beginning and at the end of the disk (blocks 0-C 5640 * and D-E). 5641 * 5642 * The first blocks (0 to C-1) are emulated and can not be changed. Blocks C 5643 * to D defines slice 0 and are mapped to the backend. Finally we emulate 2 5644 * alternate cylinders at the end of the disk (blocks D-E). In summary we have: 5645 * 5646 * - block 0 (L) returns a fake VTOC label 5647 * - blocks 1 to C-1 (X) are unused and return 0 5648 * - blocks C to D-1 are mapped to the exported slice or volume 5649 * - blocks D and E (A) are blocks defining alternate cylinders (2 cylinders) 5650 * 5651 * Note: because we define a fake disk geometry, it is possible that the length 5652 * of the backend is not a multiple of the size of cylinder, in that case the 5653 * very end of the backend will not map to any block of the virtual disk. 5654 */ 5655 static int 5656 vd_setup_partition_vtoc(vd_t *vd) 5657 { 5658 char *device_path = vd->device_path; 5659 char unit; 5660 size_t size, csize; 5661 5662 /* Initialize dk_geom structure for single-slice device */ 5663 if (vd->dk_geom.dkg_nsect == 0) { 5664 PRN("%s geometry claims 0 sectors per track", device_path); 5665 return (EIO); 5666 } 5667 if (vd->dk_geom.dkg_nhead == 0) { 5668 PRN("%s geometry claims 0 heads", device_path); 5669 return (EIO); 5670 } 5671 5672 /* size of a cylinder in block */ 5673 csize = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; 5674 5675 /* 5676 * Add extra cylinders: we emulate the first cylinder (which contains 5677 * the disk label). 5678 */ 5679 vd->dk_geom.dkg_ncyl = vd->vdisk_size / csize + 1; 5680 5681 /* we emulate 2 alternate cylinders */ 5682 vd->dk_geom.dkg_acyl = 2; 5683 vd->dk_geom.dkg_pcyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl; 5684 5685 5686 /* Initialize vtoc structure for single-slice device */ 5687 bzero(vd->vtoc.v_part, sizeof (vd->vtoc.v_part)); 5688 vd->vtoc.v_part[0].p_tag = V_UNASSIGNED; 5689 vd->vtoc.v_part[0].p_flag = 0; 5690 /* 5691 * Partition 0 starts on cylinder 1 and its size has to be 5692 * a multiple of a number of cylinder. 5693 */ 5694 vd->vtoc.v_part[0].p_start = csize; /* start on cylinder 1 */ 5695 vd->vtoc.v_part[0].p_size = (vd->vdisk_size / csize) * csize; 5696 5697 if (vd_slice_single_slice) { 5698 vd->vtoc.v_nparts = 1; 5699 bcopy(VD_ASCIILABEL, vd->vtoc.v_asciilabel, 5700 MIN(sizeof (VD_ASCIILABEL), 5701 sizeof (vd->vtoc.v_asciilabel))); 5702 bcopy(VD_VOLUME_NAME, vd->vtoc.v_volume, 5703 MIN(sizeof (VD_VOLUME_NAME), sizeof (vd->vtoc.v_volume))); 5704 } else { 5705 /* adjust the number of slices */ 5706 vd->nslices = V_NUMPAR; 5707 vd->vtoc.v_nparts = V_NUMPAR; 5708 5709 /* define slice 2 representing the entire disk */ 5710 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_tag = V_BACKUP; 5711 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_flag = 0; 5712 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_start = 0; 5713 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_size = 5714 vd->dk_geom.dkg_ncyl * csize; 5715 5716 vd_get_readable_size(vd->vdisk_size * vd->vdisk_bsize, 5717 &size, &unit); 5718 5719 /* 5720 * Set some attributes of the geometry to what format(1m) uses 5721 * so that writing a default label using format(1m) does not 5722 * produce any error. 5723 */ 5724 vd->dk_geom.dkg_bcyl = 0; 5725 vd->dk_geom.dkg_intrlv = 1; 5726 vd->dk_geom.dkg_write_reinstruct = 0; 5727 vd->dk_geom.dkg_read_reinstruct = 0; 5728 5729 /* 5730 * We must have a correct label name otherwise format(1m) will 5731 * not recognized the disk as labeled. 5732 */ 5733 (void) snprintf(vd->vtoc.v_asciilabel, LEN_DKL_ASCII, 5734 "SUN-DiskSlice-%ld%cB cyl %d alt %d hd %d sec %d", 5735 size, unit, 5736 vd->dk_geom.dkg_ncyl, vd->dk_geom.dkg_acyl, 5737 vd->dk_geom.dkg_nhead, vd->dk_geom.dkg_nsect); 5738 bzero(vd->vtoc.v_volume, sizeof (vd->vtoc.v_volume)); 5739 5740 /* create a fake label from the vtoc and geometry */ 5741 vd->flabel_limit = (uint_t)csize; 5742 vd->flabel_size = VD_LABEL_VTOC_SIZE(vd->vdisk_bsize); 5743 vd->flabel = kmem_zalloc(vd->flabel_size, KM_SLEEP); 5744 vd_vtocgeom_to_label(&vd->vtoc, &vd->dk_geom, 5745 VD_LABEL_VTOC(vd)); 5746 } 5747 5748 /* adjust the vdisk_size, we emulate 3 cylinders */ 5749 vd->vdisk_size += csize * 3; 5750 5751 return (0); 5752 } 5753 5754 /* 5755 * When a slice, volume or file is exported as a single-slice disk, we want 5756 * the disk backend (i.e. the slice, volume or file) to be entirely mapped 5757 * as a slice without the addition of any metadata. 5758 * 5759 * So when exporting the disk as an EFI disk, we fake a disk with the following 5760 * layout: (assuming the block size is 512 bytes) 5761 * 5762 * flabel +--- flabel_limit 5763 * <------> v 5764 * 0 1 2 L 34 34+N P 5765 * +-+-+--+-------+--------------------------+-------+ 5766 * virtual disk: |X|T|EE|XXXXXXX| slice 0 |RRRRRRR| 5767 * +-+-+--+-------+--------------------------+-------+ 5768 * ^ ^ : : 5769 * | | : : 5770 * GPT-+ +-GPE : : 5771 * +--------------------------+ 5772 * disk backend: | slice/volume/file | 5773 * +--------------------------+ 5774 * 0 N 5775 * 5776 * N is the number of blocks in the slice/volume/file. 5777 * 5778 * We simulate a disk with N+M blocks, where M is the number of blocks 5779 * simluated at the beginning and at the end of the disk (blocks 0-34 5780 * and 34+N-P). 5781 * 5782 * The first 34 blocks (0 to 33) are emulated and can not be changed. Blocks 34 5783 * to 34+N defines slice 0 and are mapped to the exported backend, and we 5784 * emulate some blocks at the end of the disk (blocks 34+N to P) as a the EFI 5785 * reserved partition. 5786 * 5787 * - block 0 (X) is unused and return 0 5788 * - block 1 (T) returns a fake EFI GPT (via DKIOCGETEFI) 5789 * - blocks 2 to L-1 (E) defines a fake EFI GPE (via DKIOCGETEFI) 5790 * - blocks L to 33 (X) are unused and return 0 5791 * - blocks 34 to 34+N are mapped to the exported slice, volume or file 5792 * - blocks 34+N+1 to P define a fake reserved partition and backup label, it 5793 * returns 0 5794 * 5795 * Note: if the backend size is not a multiple of the vdisk block size then 5796 * the very end of the backend will not map to any block of the virtual disk. 5797 */ 5798 static int 5799 vd_setup_partition_efi(vd_t *vd) 5800 { 5801 efi_gpt_t *gpt; 5802 efi_gpe_t *gpe; 5803 struct uuid uuid = EFI_USR; 5804 struct uuid efi_reserved = EFI_RESERVED; 5805 uint32_t crc; 5806 uint64_t s0_start, s0_end, first_u_lba; 5807 size_t bsize; 5808 5809 ASSERT(vd->vdisk_bsize > 0); 5810 5811 bsize = vd->vdisk_bsize; 5812 /* 5813 * The minimum size for the label is 16K (EFI_MIN_ARRAY_SIZE) 5814 * for GPEs plus one block for the GPT and one for PMBR. 5815 */ 5816 first_u_lba = (EFI_MIN_ARRAY_SIZE / bsize) + 2; 5817 vd->flabel_limit = (uint_t)first_u_lba; 5818 vd->flabel_size = VD_LABEL_EFI_SIZE(bsize); 5819 vd->flabel = kmem_zalloc(vd->flabel_size, KM_SLEEP); 5820 gpt = VD_LABEL_EFI_GPT(vd, bsize); 5821 gpe = VD_LABEL_EFI_GPE(vd, bsize); 5822 5823 /* 5824 * Adjust the vdisk_size, we emulate the first few blocks 5825 * for the disk label. 5826 */ 5827 vd->vdisk_size += first_u_lba; 5828 s0_start = first_u_lba; 5829 s0_end = vd->vdisk_size - 1; 5830 5831 gpt->efi_gpt_Signature = LE_64(EFI_SIGNATURE); 5832 gpt->efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 5833 gpt->efi_gpt_HeaderSize = LE_32(sizeof (efi_gpt_t)); 5834 gpt->efi_gpt_FirstUsableLBA = LE_64(first_u_lba); 5835 gpt->efi_gpt_PartitionEntryLBA = LE_64(2ULL); 5836 gpt->efi_gpt_SizeOfPartitionEntry = LE_32(sizeof (efi_gpe_t)); 5837 5838 UUID_LE_CONVERT(gpe[0].efi_gpe_PartitionTypeGUID, uuid); 5839 gpe[0].efi_gpe_StartingLBA = LE_64(s0_start); 5840 gpe[0].efi_gpe_EndingLBA = LE_64(s0_end); 5841 5842 if (vd_slice_single_slice) { 5843 gpt->efi_gpt_NumberOfPartitionEntries = LE_32(1); 5844 } else { 5845 /* adjust the number of slices */ 5846 gpt->efi_gpt_NumberOfPartitionEntries = LE_32(VD_MAXPART); 5847 vd->nslices = V_NUMPAR; 5848 5849 /* define a fake reserved partition */ 5850 UUID_LE_CONVERT(gpe[VD_MAXPART - 1].efi_gpe_PartitionTypeGUID, 5851 efi_reserved); 5852 gpe[VD_MAXPART - 1].efi_gpe_StartingLBA = 5853 LE_64(s0_end + 1); 5854 gpe[VD_MAXPART - 1].efi_gpe_EndingLBA = 5855 LE_64(s0_end + EFI_MIN_RESV_SIZE); 5856 5857 /* adjust the vdisk_size to include the reserved slice */ 5858 vd->vdisk_size += EFI_MIN_RESV_SIZE; 5859 } 5860 5861 gpt->efi_gpt_LastUsableLBA = LE_64(vd->vdisk_size - 1); 5862 5863 /* adjust the vdisk size for the backup GPT and GPE */ 5864 vd->vdisk_size += (EFI_MIN_ARRAY_SIZE / bsize) + 1; 5865 gpt->efi_gpt_AlternateLBA = LE_64(vd->vdisk_size - 1); 5866 5867 CRC32(crc, gpe, sizeof (efi_gpe_t) * VD_MAXPART, -1U, crc32_table); 5868 gpt->efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 5869 5870 CRC32(crc, gpt, sizeof (efi_gpt_t), -1U, crc32_table); 5871 gpt->efi_gpt_HeaderCRC32 = LE_32(~crc); 5872 5873 return (0); 5874 } 5875 5876 /* 5877 * Setup for a virtual disk whose backend is a file (exported as a single slice 5878 * or as a full disk). In that case, the backend is accessed using the vnode 5879 * interface. 5880 */ 5881 static int 5882 vd_setup_backend_vnode(vd_t *vd) 5883 { 5884 int rval, status; 5885 dev_t dev; 5886 char *file_path = vd->device_path; 5887 ldi_handle_t lhandle; 5888 struct dk_cinfo dk_cinfo; 5889 5890 ASSERT(!vd->volume); 5891 5892 if ((status = vn_open(file_path, UIO_SYSSPACE, vd->open_flags | FOFFMAX, 5893 0, &vd->file_vnode, 0, 0)) != 0) { 5894 PRN("vn_open(%s) = errno %d", file_path, status); 5895 return (status); 5896 } 5897 5898 /* 5899 * We set vd->file now so that vds_destroy_vd will take care of 5900 * closing the file and releasing the vnode in case of an error. 5901 */ 5902 vd->file = B_TRUE; 5903 5904 vd->max_xfer_sz = maxphys / DEV_BSIZE; /* default transfer size */ 5905 5906 /* 5907 * Get max_xfer_sz from the device where the file is. 5908 */ 5909 dev = vd->file_vnode->v_vfsp->vfs_dev; 5910 PR0("underlying device of %s = (%d, %d)\n", file_path, 5911 getmajor(dev), getminor(dev)); 5912 5913 status = ldi_open_by_dev(&dev, OTYP_BLK, FREAD, kcred, &lhandle, 5914 vd->vds->ldi_ident); 5915 5916 if (status != 0) { 5917 PR0("ldi_open() returned errno %d for underlying device", 5918 status); 5919 } else { 5920 if ((status = ldi_ioctl(lhandle, DKIOCINFO, 5921 (intptr_t)&dk_cinfo, (vd->open_flags | FKIOCTL), kcred, 5922 &rval)) != 0) { 5923 PR0("ldi_ioctl(DKIOCINFO) returned errno %d for " 5924 "underlying device", status); 5925 } else { 5926 /* 5927 * Store the device's max transfer size for 5928 * return to the client 5929 */ 5930 vd->max_xfer_sz = dk_cinfo.dki_maxtransfer; 5931 } 5932 5933 PR0("close the underlying device"); 5934 (void) ldi_close(lhandle, FREAD, kcred); 5935 } 5936 5937 PR0("using file %s on device (%d, %d), max_xfer = %u blks", 5938 file_path, getmajor(dev), getminor(dev), vd->max_xfer_sz); 5939 5940 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) 5941 status = vd_setup_slice_image(vd); 5942 else 5943 status = vd_setup_disk_image(vd); 5944 5945 return (status); 5946 } 5947 5948 static int 5949 vd_setup_slice_image(vd_t *vd) 5950 { 5951 struct dk_label label; 5952 int status; 5953 5954 vd->vdisk_media = VD_MEDIA_FIXED; 5955 vd->vdisk_label = (vd_slice_label == VD_DISK_LABEL_UNK)? 5956 vd_file_slice_label : vd_slice_label; 5957 5958 if (vd->vdisk_label == VD_DISK_LABEL_EFI || 5959 vd->dskimg_size >= 2 * ONE_TERABYTE) { 5960 status = vd_setup_partition_efi(vd); 5961 } else { 5962 /* 5963 * We build a default label to get a geometry for 5964 * the vdisk. Then the partition setup function will 5965 * adjust the vtoc so that it defines a single-slice 5966 * disk. 5967 */ 5968 vd_build_default_label(vd->dskimg_size, vd->vdisk_bsize, 5969 &label); 5970 vd_label_to_vtocgeom(&label, &vd->vtoc, &vd->dk_geom); 5971 status = vd_setup_partition_vtoc(vd); 5972 } 5973 5974 return (status); 5975 } 5976 5977 static int 5978 vd_setup_disk_image(vd_t *vd) 5979 { 5980 int status; 5981 char *backend_path = vd->device_path; 5982 5983 if ((status = vd_backend_check_size(vd)) != 0) { 5984 PRN("Fail to check size of %s (errno %d)", 5985 backend_path, status); 5986 return (EIO); 5987 } 5988 5989 /* size should be at least sizeof(dk_label) */ 5990 if (vd->dskimg_size < sizeof (struct dk_label)) { 5991 PRN("Size of file has to be at least %ld bytes", 5992 sizeof (struct dk_label)); 5993 return (EIO); 5994 } 5995 5996 /* 5997 * Find and validate the geometry of a disk image. 5998 */ 5999 status = vd_dskimg_validate_geometry(vd); 6000 if (status != 0 && status != EINVAL && status != ENOTSUP) { 6001 PRN("Failed to read label from %s", backend_path); 6002 return (EIO); 6003 } 6004 6005 if (vd_dskimg_is_iso_image(vd)) { 6006 /* 6007 * Indicate whether to call this a CD or DVD from the size 6008 * of the ISO image (images for both drive types are stored 6009 * in the ISO-9600 format). CDs can store up to just under 1Gb 6010 */ 6011 if ((vd->vdisk_size * vd->vdisk_bsize) > ONE_GIGABYTE) 6012 vd->vdisk_media = VD_MEDIA_DVD; 6013 else 6014 vd->vdisk_media = VD_MEDIA_CD; 6015 } else { 6016 vd->vdisk_media = VD_MEDIA_FIXED; 6017 } 6018 6019 /* Setup devid for the disk image */ 6020 6021 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { 6022 6023 status = vd_dskimg_read_devid(vd, &vd->dskimg_devid); 6024 6025 if (status == 0) { 6026 /* a valid devid was found */ 6027 return (0); 6028 } 6029 6030 if (status != EINVAL) { 6031 /* 6032 * There was an error while trying to read the devid. 6033 * So this disk image may have a devid but we are 6034 * unable to read it. 6035 */ 6036 PR0("can not read devid for %s", backend_path); 6037 vd->dskimg_devid = NULL; 6038 return (0); 6039 } 6040 } 6041 6042 /* 6043 * No valid device id was found so we create one. Note that a failure 6044 * to create a device id is not fatal and does not prevent the disk 6045 * image from being attached. 6046 */ 6047 PR1("creating devid for %s", backend_path); 6048 6049 if (ddi_devid_init(vd->vds->dip, DEVID_FAB, NULL, 0, 6050 &vd->dskimg_devid) != DDI_SUCCESS) { 6051 PR0("fail to create devid for %s", backend_path); 6052 vd->dskimg_devid = NULL; 6053 return (0); 6054 } 6055 6056 /* 6057 * Write devid to the disk image. The devid is stored into the disk 6058 * image if we have a valid label; otherwise the devid will be stored 6059 * when the user writes a valid label. 6060 */ 6061 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { 6062 if (vd_dskimg_write_devid(vd, vd->dskimg_devid) != 0) { 6063 PR0("fail to write devid for %s", backend_path); 6064 ddi_devid_free(vd->dskimg_devid); 6065 vd->dskimg_devid = NULL; 6066 } 6067 } 6068 6069 return (0); 6070 } 6071 6072 6073 /* 6074 * Description: 6075 * Open a device using its device path (supplied by ldm(1m)) 6076 * 6077 * Parameters: 6078 * vd - pointer to structure containing the vDisk info 6079 * flags - open flags 6080 * 6081 * Return Value 6082 * 0 - success 6083 * != 0 - some other non-zero return value from ldi(9F) functions 6084 */ 6085 static int 6086 vd_open_using_ldi_by_name(vd_t *vd, int flags) 6087 { 6088 int status; 6089 char *device_path = vd->device_path; 6090 6091 /* Attempt to open device */ 6092 status = ldi_open_by_name(device_path, flags, kcred, 6093 &vd->ldi_handle[0], vd->vds->ldi_ident); 6094 6095 /* 6096 * The open can fail for example if we are opening an empty slice. 6097 * In case of a failure, we try the open again but this time with 6098 * the FNDELAY flag. 6099 */ 6100 if (status != 0) 6101 status = ldi_open_by_name(device_path, flags | FNDELAY, 6102 kcred, &vd->ldi_handle[0], vd->vds->ldi_ident); 6103 6104 if (status != 0) { 6105 PR0("ldi_open_by_name(%s) = errno %d", device_path, status); 6106 vd->ldi_handle[0] = NULL; 6107 return (status); 6108 } 6109 6110 return (0); 6111 } 6112 6113 /* 6114 * Setup for a virtual disk which backend is a device (a physical disk, 6115 * slice or volume device) exported as a full disk or as a slice. In these 6116 * cases, the backend is accessed using the LDI interface. 6117 */ 6118 static int 6119 vd_setup_backend_ldi(vd_t *vd) 6120 { 6121 int rval, status; 6122 struct dk_cinfo dk_cinfo; 6123 char *device_path = vd->device_path; 6124 6125 /* device has been opened by vd_identify_dev() */ 6126 ASSERT(vd->ldi_handle[0] != NULL); 6127 ASSERT(vd->dev[0] != NULL); 6128 6129 vd->file = B_FALSE; 6130 6131 /* Verify backing device supports dk_cinfo */ 6132 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCINFO, 6133 (intptr_t)&dk_cinfo, (vd->open_flags | FKIOCTL), kcred, 6134 &rval)) != 0) { 6135 PRN("ldi_ioctl(DKIOCINFO) returned errno %d for %s", 6136 status, device_path); 6137 return (status); 6138 } 6139 if (dk_cinfo.dki_partition >= V_NUMPAR) { 6140 PRN("slice %u >= maximum slice %u for %s", 6141 dk_cinfo.dki_partition, V_NUMPAR, device_path); 6142 return (EIO); 6143 } 6144 6145 /* 6146 * The device has been opened read-only by vd_identify_dev(), re-open 6147 * it read-write if the write flag is set and we don't have an optical 6148 * device such as a CD-ROM, which, for now, we do not permit writes to 6149 * and thus should not export write operations to the client. 6150 * 6151 * Future: if/when we implement support for guest domains writing to 6152 * optical devices we will need to do further checking of the media type 6153 * to distinguish between read-only and writable discs. 6154 */ 6155 if (dk_cinfo.dki_ctype == DKC_CDROM) { 6156 6157 vd->open_flags &= ~FWRITE; 6158 6159 } else if (vd->open_flags & FWRITE) { 6160 6161 (void) ldi_close(vd->ldi_handle[0], vd->open_flags & ~FWRITE, 6162 kcred); 6163 status = vd_open_using_ldi_by_name(vd, vd->open_flags); 6164 if (status != 0) { 6165 PR0("Failed to open (%s) = errno %d", 6166 device_path, status); 6167 return (status); 6168 } 6169 } 6170 6171 /* Store the device's max transfer size for return to the client */ 6172 vd->max_xfer_sz = dk_cinfo.dki_maxtransfer; 6173 6174 /* 6175 * We need to work out if it's an ATAPI (IDE CD-ROM) or SCSI device so 6176 * that we can use the correct CDB group when sending USCSI commands. 6177 */ 6178 vd->is_atapi_dev = vd_is_atapi_device(vd); 6179 6180 /* 6181 * Export a full disk. 6182 * 6183 * The exported device can be either a volume, a disk or a CD/DVD 6184 * device. We export a device as a full disk if we have an entire 6185 * disk slice (slice 2) and if this slice is exported as a full disk 6186 * and not as a single slice disk. A CD or DVD device is exported 6187 * as a full disk (even if it isn't s2). A volume is exported as a 6188 * full disk as long as the "slice" option is not specified. 6189 */ 6190 if (vd->vdisk_type == VD_DISK_TYPE_DISK) { 6191 6192 if (vd->volume) { 6193 /* setup disk image */ 6194 return (vd_setup_disk_image(vd)); 6195 } 6196 6197 if (dk_cinfo.dki_partition == VD_ENTIRE_DISK_SLICE || 6198 dk_cinfo.dki_ctype == DKC_CDROM) { 6199 ASSERT(!vd->volume); 6200 if (dk_cinfo.dki_ctype == DKC_SCSI_CCS) 6201 vd->scsi = B_TRUE; 6202 return (vd_setup_full_disk(vd)); 6203 } 6204 } 6205 6206 /* 6207 * Export a single slice disk. 6208 * 6209 * The exported device can be either a volume device or a disk slice. If 6210 * it is a disk slice different from slice 2 then it is always exported 6211 * as a single slice disk even if the "slice" option is not specified. 6212 * If it is disk slice 2 or a volume device then it is exported as a 6213 * single slice disk only if the "slice" option is specified. 6214 */ 6215 return (vd_setup_single_slice_disk(vd)); 6216 } 6217 6218 static int 6219 vd_setup_single_slice_disk(vd_t *vd) 6220 { 6221 int status, rval; 6222 struct dk_label label; 6223 char *device_path = vd->device_path; 6224 struct vtoc vtoc; 6225 6226 vd->vdisk_media = VD_MEDIA_FIXED; 6227 6228 if (vd->volume) { 6229 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); 6230 } 6231 6232 /* 6233 * We export the slice as a single slice disk even if the "slice" 6234 * option was not specified. 6235 */ 6236 vd->vdisk_type = VD_DISK_TYPE_SLICE; 6237 vd->nslices = 1; 6238 6239 /* Get size of backing device */ 6240 if ((status = vd_backend_check_size(vd)) != 0) { 6241 PRN("Fail to check size of %s (errno %d)", device_path, status); 6242 return (EIO); 6243 } 6244 6245 /* 6246 * When exporting a slice or a device as a single slice disk, we don't 6247 * care about any partitioning exposed by the backend. The goal is just 6248 * to export the backend as a flat storage. We provide a fake partition 6249 * table (either a VTOC or EFI), which presents only one slice, to 6250 * accommodate tools expecting a disk label. The selection of the label 6251 * type (VTOC or EFI) depends on the value of the vd_slice_label 6252 * variable. 6253 */ 6254 if (vd_slice_label == VD_DISK_LABEL_EFI || 6255 vd->vdisk_size >= ONE_TERABYTE / vd->vdisk_bsize) { 6256 vd->vdisk_label = VD_DISK_LABEL_EFI; 6257 } else { 6258 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGEXTVTOC, 6259 (intptr_t)&vd->vtoc, (vd->open_flags | FKIOCTL), 6260 kcred, &rval); 6261 6262 if (status == ENOTTY) { 6263 /* try with the non-extended vtoc ioctl */ 6264 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGVTOC, 6265 (intptr_t)&vtoc, (vd->open_flags | FKIOCTL), 6266 kcred, &rval); 6267 vtoctoextvtoc(vtoc, vd->vtoc); 6268 } 6269 6270 if (status == 0) { 6271 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGGEOM, 6272 (intptr_t)&vd->dk_geom, (vd->open_flags | FKIOCTL), 6273 kcred, &rval); 6274 6275 if (status != 0) { 6276 PRN("ldi_ioctl(DKIOCGEOM) returned errno %d " 6277 "for %s", status, device_path); 6278 return (status); 6279 } 6280 vd->vdisk_label = VD_DISK_LABEL_VTOC; 6281 6282 } else if (vd_slice_label == VD_DISK_LABEL_VTOC) { 6283 6284 vd->vdisk_label = VD_DISK_LABEL_VTOC; 6285 vd_build_default_label(vd->vdisk_size * vd->vdisk_bsize, 6286 vd->vdisk_bsize, &label); 6287 vd_label_to_vtocgeom(&label, &vd->vtoc, &vd->dk_geom); 6288 6289 } else { 6290 vd->vdisk_label = VD_DISK_LABEL_EFI; 6291 } 6292 } 6293 6294 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { 6295 /* export with a fake VTOC label */ 6296 status = vd_setup_partition_vtoc(vd); 6297 6298 } else { 6299 /* export with a fake EFI label */ 6300 status = vd_setup_partition_efi(vd); 6301 } 6302 6303 return (status); 6304 } 6305 6306 /* 6307 * This function is invoked when setting up the vdisk backend and to process 6308 * the VD_OP_GET_CAPACITY operation. It checks the backend size and set the 6309 * following attributes of the vd structure: 6310 * 6311 * - vdisk_bsize: block size for the virtual disk used by the VIO protocol. Its 6312 * value is 512 bytes (DEV_BSIZE) when the backend is a file, a volume or a 6313 * CD/DVD. When the backend is a disk or a disk slice then it has the value 6314 * of the logical block size of that disk (as returned by the DKIOCGMEDIAINFO 6315 * ioctl). This block size is expected to be a power of 2 and a multiple of 6316 * 512. 6317 * 6318 * - vdisk_size: size of the virtual disk expressed as a number of vdisk_bsize 6319 * blocks. 6320 * 6321 * vdisk_size and vdisk_bsize are sent to the vdisk client during the connection 6322 * handshake and in the result of a VD_OP_GET_CAPACITY operation. 6323 * 6324 * - backend_bsize: block size of the backend device. backend_bsize has the same 6325 * value as vdisk_bsize except when the backend is a CD/DVD. In that case, 6326 * vdisk_bsize is set to 512 (DEV_BSIZE) while backend_bsize is set to the 6327 * effective logical block size of the CD/DVD (usually 2048). 6328 * 6329 * - dskimg_size: size of the backend when the backend is a disk image. This 6330 * attribute is set only when the backend is a file or a volume, otherwise it 6331 * is unused. 6332 * 6333 * - vio_bshift: number of bit to shift to convert a VIO block number (which 6334 * uses a block size of vdisk_bsize) to a buf(9s) block number (which uses a 6335 * block size of 512 bytes) i.e. we have vdisk_bsize = 512 x 2 ^ vio_bshift 6336 * 6337 * - vdisk_media: media of the virtual disk. This function only sets this 6338 * attribute for physical disk and CD/DVD. For other backend types, this 6339 * attribute is set in the setup function of the backend. 6340 */ 6341 static int 6342 vd_backend_check_size(vd_t *vd) 6343 { 6344 size_t backend_size, backend_bsize, vdisk_bsize; 6345 size_t old_size, new_size; 6346 struct dk_minfo minfo; 6347 vattr_t vattr; 6348 int rval, rv, media, nshift = 0; 6349 uint32_t n; 6350 6351 if (vd->file) { 6352 6353 /* file (slice or full disk) */ 6354 vattr.va_mask = AT_SIZE; 6355 rv = VOP_GETATTR(vd->file_vnode, &vattr, 0, kcred, NULL); 6356 if (rv != 0) { 6357 PR0("VOP_GETATTR(%s) = errno %d", vd->device_path, rv); 6358 return (rv); 6359 } 6360 backend_size = vattr.va_size; 6361 backend_bsize = DEV_BSIZE; 6362 vdisk_bsize = DEV_BSIZE; 6363 6364 } else if (vd->volume) { 6365 6366 /* volume (slice or full disk) */ 6367 rv = ldi_get_size(vd->ldi_handle[0], &backend_size); 6368 if (rv != DDI_SUCCESS) { 6369 PR0("ldi_get_size() failed for %s", vd->device_path); 6370 return (EIO); 6371 } 6372 backend_bsize = DEV_BSIZE; 6373 vdisk_bsize = DEV_BSIZE; 6374 6375 } else { 6376 6377 /* physical disk or slice */ 6378 rv = ldi_ioctl(vd->ldi_handle[0], DKIOCGMEDIAINFO, 6379 (intptr_t)&minfo, (vd->open_flags | FKIOCTL), 6380 kcred, &rval); 6381 if (rv != 0) { 6382 PR0("DKIOCGMEDIAINFO failed for %s (err=%d)", 6383 vd->device_path, rv); 6384 return (rv); 6385 } 6386 6387 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { 6388 rv = ldi_get_size(vd->ldi_handle[0], &backend_size); 6389 if (rv != DDI_SUCCESS) { 6390 PR0("ldi_get_size() failed for %s", 6391 vd->device_path); 6392 return (EIO); 6393 } 6394 } else { 6395 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); 6396 backend_size = minfo.dki_capacity * minfo.dki_lbsize; 6397 } 6398 6399 backend_bsize = minfo.dki_lbsize; 6400 media = DK_MEDIATYPE2VD_MEDIATYPE(minfo.dki_media_type); 6401 6402 /* 6403 * If the device is a CD or a DVD then we force the vdisk block 6404 * size to 512 bytes (DEV_BSIZE). In that case, vdisk_bsize can 6405 * be different from backend_size. 6406 */ 6407 if (media == VD_MEDIA_CD || media == VD_MEDIA_DVD) 6408 vdisk_bsize = DEV_BSIZE; 6409 else 6410 vdisk_bsize = backend_bsize; 6411 } 6412 6413 /* check vdisk block size */ 6414 if (vdisk_bsize == 0 || vdisk_bsize % DEV_BSIZE != 0) 6415 return (EINVAL); 6416 6417 old_size = vd->vdisk_size; 6418 new_size = backend_size / vdisk_bsize; 6419 6420 /* check if size has changed */ 6421 if (old_size != VD_SIZE_UNKNOWN && old_size == new_size && 6422 vd->vdisk_bsize == vdisk_bsize) 6423 return (0); 6424 6425 /* cache info for blk conversion */ 6426 for (n = vdisk_bsize / DEV_BSIZE; n > 1; n >>= 1) { 6427 if ((n & 0x1) != 0) { 6428 /* blk_size is not a power of 2 */ 6429 return (EINVAL); 6430 } 6431 nshift++; 6432 } 6433 6434 vd->vio_bshift = nshift; 6435 vd->vdisk_size = new_size; 6436 vd->vdisk_bsize = vdisk_bsize; 6437 vd->backend_bsize = backend_bsize; 6438 6439 if (vd->file || vd->volume) 6440 vd->dskimg_size = backend_size; 6441 6442 /* 6443 * If we are exporting a single-slice disk and the size of the backend 6444 * has changed then we regenerate the partition setup so that the 6445 * partitioning matches with the new disk backend size. 6446 */ 6447 6448 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { 6449 /* slice or file or device exported as a slice */ 6450 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { 6451 rv = vd_setup_partition_vtoc(vd); 6452 if (rv != 0) { 6453 PR0("vd_setup_partition_vtoc() failed for %s " 6454 "(err = %d)", vd->device_path, rv); 6455 return (rv); 6456 } 6457 } else { 6458 rv = vd_setup_partition_efi(vd); 6459 if (rv != 0) { 6460 PR0("vd_setup_partition_efi() failed for %s " 6461 "(err = %d)", vd->device_path, rv); 6462 return (rv); 6463 } 6464 } 6465 6466 } else if (!vd->file && !vd->volume) { 6467 /* physical disk */ 6468 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); 6469 vd->vdisk_media = media; 6470 } 6471 6472 return (0); 6473 } 6474 6475 /* 6476 * Description: 6477 * Open a device using its device path and identify if this is 6478 * a disk device or a volume device. 6479 * 6480 * Parameters: 6481 * vd - pointer to structure containing the vDisk info 6482 * dtype - return the driver type of the device 6483 * 6484 * Return Value 6485 * 0 - success 6486 * != 0 - some other non-zero return value from ldi(9F) functions 6487 */ 6488 static int 6489 vd_identify_dev(vd_t *vd, int *dtype) 6490 { 6491 int status, i; 6492 char *device_path = vd->device_path; 6493 char *drv_name; 6494 int drv_type; 6495 vds_t *vds = vd->vds; 6496 6497 status = vd_open_using_ldi_by_name(vd, vd->open_flags & ~FWRITE); 6498 if (status != 0) { 6499 PR0("Failed to open (%s) = errno %d", device_path, status); 6500 return (status); 6501 } 6502 6503 /* Get device number of backing device */ 6504 if ((status = ldi_get_dev(vd->ldi_handle[0], &vd->dev[0])) != 0) { 6505 PRN("ldi_get_dev() returned errno %d for %s", 6506 status, device_path); 6507 return (status); 6508 } 6509 6510 /* 6511 * We start by looking if the driver is in the list from vds.conf 6512 * so that we can override the built-in list using vds.conf. 6513 */ 6514 drv_name = ddi_major_to_name(getmajor(vd->dev[0])); 6515 drv_type = VD_DRIVER_UNKNOWN; 6516 6517 /* check vds.conf list */ 6518 for (i = 0; i < vds->num_drivers; i++) { 6519 if (vds->driver_types[i].type == VD_DRIVER_UNKNOWN) { 6520 /* ignore invalid entries */ 6521 continue; 6522 } 6523 if (strcmp(drv_name, vds->driver_types[i].name) == 0) { 6524 drv_type = vds->driver_types[i].type; 6525 goto done; 6526 } 6527 } 6528 6529 /* check built-in list */ 6530 for (i = 0; i < VDS_NUM_DRIVERS; i++) { 6531 if (strcmp(drv_name, vds_driver_types[i].name) == 0) { 6532 drv_type = vds_driver_types[i].type; 6533 goto done; 6534 } 6535 } 6536 6537 done: 6538 PR0("driver %s identified as %s", drv_name, 6539 (drv_type == VD_DRIVER_DISK)? "DISK" : 6540 (drv_type == VD_DRIVER_VOLUME)? "VOLUME" : "UNKNOWN"); 6541 6542 if (strcmp(drv_name, "zfs") == 0) 6543 vd->zvol = B_TRUE; 6544 6545 *dtype = drv_type; 6546 6547 return (0); 6548 } 6549 6550 static int 6551 vd_setup_vd(vd_t *vd) 6552 { 6553 int status, drv_type, pseudo; 6554 dev_info_t *dip; 6555 vnode_t *vnp; 6556 char *path = vd->device_path; 6557 char tq_name[TASKQ_NAMELEN]; 6558 6559 /* make sure the vdisk backend is valid */ 6560 if ((status = lookupname(path, UIO_SYSSPACE, 6561 FOLLOW, NULLVPP, &vnp)) != 0) { 6562 PR0("Cannot lookup %s errno %d", path, status); 6563 goto done; 6564 } 6565 6566 switch (vnp->v_type) { 6567 case VREG: 6568 /* 6569 * Backend is a file so it is exported as a full disk or as a 6570 * single slice disk using the vnode interface. 6571 */ 6572 VN_RELE(vnp); 6573 vd->volume = B_FALSE; 6574 status = vd_setup_backend_vnode(vd); 6575 break; 6576 6577 case VBLK: 6578 case VCHR: 6579 /* 6580 * Backend is a device. In that case, it is exported using the 6581 * LDI interface, and it is exported either as a single-slice 6582 * disk or as a full disk depending on the "slice" option and 6583 * on the type of device. 6584 * 6585 * - A volume device is exported as a single-slice disk if the 6586 * "slice" is specified, otherwise it is exported as a full 6587 * disk. 6588 * 6589 * - A disk slice (different from slice 2) is always exported 6590 * as a single slice disk using the LDI interface. 6591 * 6592 * - The slice 2 of a disk is exported as a single slice disk 6593 * if the "slice" option is specified, otherwise the entire 6594 * disk will be exported. 6595 * 6596 * - The slice of a CD or DVD is exported as single slice disk 6597 * if the "slice" option is specified, otherwise the entire 6598 * disk will be exported. 6599 */ 6600 6601 /* check if this is a pseudo device */ 6602 if ((dip = ddi_hold_devi_by_instance(getmajor(vnp->v_rdev), 6603 dev_to_instance(vnp->v_rdev), 0)) == NULL) { 6604 PRN("%s is no longer accessible", path); 6605 VN_RELE(vnp); 6606 status = EIO; 6607 break; 6608 } 6609 pseudo = is_pseudo_device(dip); 6610 ddi_release_devi(dip); 6611 VN_RELE(vnp); 6612 6613 if ((status = vd_identify_dev(vd, &drv_type)) != 0) { 6614 if (status != ENODEV && status != ENXIO && 6615 status != ENOENT && status != EROFS) { 6616 PRN("%s identification failed with status %d", 6617 path, status); 6618 status = EIO; 6619 } 6620 break; 6621 } 6622 6623 /* 6624 * If the driver hasn't been identified then we consider that 6625 * pseudo devices are volumes and other devices are disks. 6626 */ 6627 if (drv_type == VD_DRIVER_VOLUME || 6628 (drv_type == VD_DRIVER_UNKNOWN && pseudo)) { 6629 vd->volume = B_TRUE; 6630 } 6631 6632 /* 6633 * If this is a volume device then its usage depends if the 6634 * "slice" option is set or not. If the "slice" option is set 6635 * then the volume device will be exported as a single slice, 6636 * otherwise it will be exported as a full disk. 6637 * 6638 * For backward compatibility, if vd_volume_force_slice is set 6639 * then we always export volume devices as slices. 6640 */ 6641 if (vd->volume && vd_volume_force_slice) { 6642 vd->vdisk_type = VD_DISK_TYPE_SLICE; 6643 vd->nslices = 1; 6644 } 6645 6646 status = vd_setup_backend_ldi(vd); 6647 break; 6648 6649 default: 6650 PRN("Unsupported vdisk backend %s", path); 6651 VN_RELE(vnp); 6652 status = EBADF; 6653 } 6654 6655 done: 6656 if (status != 0) { 6657 /* 6658 * If the error is retryable print an error message only 6659 * during the first try. 6660 */ 6661 if (status == ENXIO || status == ENODEV || 6662 status == ENOENT || status == EROFS) { 6663 if (!(vd->initialized & VD_SETUP_ERROR)) { 6664 PRN("%s is currently inaccessible (error %d)", 6665 path, status); 6666 } 6667 status = EAGAIN; 6668 } else { 6669 PRN("%s can not be exported as a virtual disk " 6670 "(error %d)", path, status); 6671 } 6672 vd->initialized |= VD_SETUP_ERROR; 6673 6674 } else if (vd->initialized & VD_SETUP_ERROR) { 6675 /* print a message only if we previously had an error */ 6676 PRN("%s is now online", path); 6677 vd->initialized &= ~VD_SETUP_ERROR; 6678 } 6679 6680 /* 6681 * For file or ZFS volume we also need an I/O queue. 6682 * 6683 * The I/O task queue is initialized here and not in vds_do_init_vd() 6684 * (as the start and completion queues) because vd_setup_vd() will be 6685 * call again if the backend is not available, and we need to know if 6686 * the backend is a ZFS volume or a file. 6687 */ 6688 if ((vd->file || vd->zvol) && vd->ioq == NULL) { 6689 (void) snprintf(tq_name, sizeof (tq_name), "vd_ioq%lu", vd->id); 6690 6691 if ((vd->ioq = ddi_taskq_create(vd->vds->dip, tq_name, 6692 vd_ioq_nthreads, TASKQ_DEFAULTPRI, 0)) == NULL) { 6693 PRN("Could not create io task queue"); 6694 return (EIO); 6695 } 6696 } 6697 6698 return (status); 6699 } 6700 6701 static int 6702 vds_do_init_vd(vds_t *vds, uint64_t id, char *device_path, uint64_t options, 6703 uint64_t ldc_id, vd_t **vdp) 6704 { 6705 char tq_name[TASKQ_NAMELEN]; 6706 int status; 6707 ddi_iblock_cookie_t iblock = NULL; 6708 ldc_attr_t ldc_attr; 6709 vd_t *vd; 6710 6711 6712 ASSERT(vds != NULL); 6713 ASSERT(device_path != NULL); 6714 ASSERT(vdp != NULL); 6715 PR0("Adding vdisk for %s", device_path); 6716 6717 if ((vd = kmem_zalloc(sizeof (*vd), KM_NOSLEEP)) == NULL) { 6718 PRN("No memory for virtual disk"); 6719 return (EAGAIN); 6720 } 6721 *vdp = vd; /* assign here so vds_destroy_vd() can cleanup later */ 6722 vd->id = id; 6723 vd->vds = vds; 6724 (void) strncpy(vd->device_path, device_path, MAXPATHLEN); 6725 6726 /* Setup open flags */ 6727 vd->open_flags = FREAD; 6728 6729 if (!(options & VD_OPT_RDONLY)) 6730 vd->open_flags |= FWRITE; 6731 6732 if (options & VD_OPT_EXCLUSIVE) 6733 vd->open_flags |= FEXCL; 6734 6735 /* Setup disk type */ 6736 if (options & VD_OPT_SLICE) { 6737 vd->vdisk_type = VD_DISK_TYPE_SLICE; 6738 vd->nslices = 1; 6739 } else { 6740 vd->vdisk_type = VD_DISK_TYPE_DISK; 6741 vd->nslices = V_NUMPAR; 6742 } 6743 6744 /* default disk label */ 6745 vd->vdisk_label = VD_DISK_LABEL_UNK; 6746 6747 /* Open vdisk and initialize parameters */ 6748 if ((status = vd_setup_vd(vd)) == 0) { 6749 vd->initialized |= VD_DISK_READY; 6750 6751 ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR); 6752 PR0("vdisk_type = %s, volume = %s, file = %s, nslices = %u", 6753 ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"), 6754 (vd->volume ? "yes" : "no"), (vd->file ? "yes" : "no"), 6755 vd->nslices); 6756 } else { 6757 if (status != EAGAIN) 6758 return (status); 6759 } 6760 6761 /* Initialize locking */ 6762 if (ddi_get_soft_iblock_cookie(vds->dip, DDI_SOFTINT_MED, 6763 &iblock) != DDI_SUCCESS) { 6764 PRN("Could not get iblock cookie."); 6765 return (EIO); 6766 } 6767 6768 mutex_init(&vd->lock, NULL, MUTEX_DRIVER, iblock); 6769 vd->initialized |= VD_LOCKING; 6770 6771 6772 /* Create start and completion task queues for the vdisk */ 6773 (void) snprintf(tq_name, sizeof (tq_name), "vd_startq%lu", id); 6774 PR1("tq_name = %s", tq_name); 6775 if ((vd->startq = ddi_taskq_create(vds->dip, tq_name, 1, 6776 TASKQ_DEFAULTPRI, 0)) == NULL) { 6777 PRN("Could not create task queue"); 6778 return (EIO); 6779 } 6780 (void) snprintf(tq_name, sizeof (tq_name), "vd_completionq%lu", id); 6781 PR1("tq_name = %s", tq_name); 6782 if ((vd->completionq = ddi_taskq_create(vds->dip, tq_name, 1, 6783 TASKQ_DEFAULTPRI, 0)) == NULL) { 6784 PRN("Could not create task queue"); 6785 return (EIO); 6786 } 6787 6788 /* Allocate the staging buffer */ 6789 vd->max_msglen = sizeof (vio_msg_t); /* baseline vio message size */ 6790 vd->vio_msgp = kmem_alloc(vd->max_msglen, KM_SLEEP); 6791 6792 vd->enabled = 1; /* before callback can dispatch to startq */ 6793 6794 6795 /* Bring up LDC */ 6796 ldc_attr.devclass = LDC_DEV_BLK_SVC; 6797 ldc_attr.instance = ddi_get_instance(vds->dip); 6798 ldc_attr.mode = LDC_MODE_UNRELIABLE; 6799 ldc_attr.mtu = VD_LDC_MTU; 6800 if ((status = ldc_init(ldc_id, &ldc_attr, &vd->ldc_handle)) != 0) { 6801 PRN("Could not initialize LDC channel %lx, " 6802 "init failed with error %d", ldc_id, status); 6803 return (status); 6804 } 6805 vd->initialized |= VD_LDC; 6806 6807 if ((status = ldc_reg_callback(vd->ldc_handle, vd_handle_ldc_events, 6808 (caddr_t)vd)) != 0) { 6809 PRN("Could not initialize LDC channel %lu," 6810 "reg_callback failed with error %d", ldc_id, status); 6811 return (status); 6812 } 6813 6814 if ((status = ldc_open(vd->ldc_handle)) != 0) { 6815 PRN("Could not initialize LDC channel %lu," 6816 "open failed with error %d", ldc_id, status); 6817 return (status); 6818 } 6819 6820 if ((status = ldc_up(vd->ldc_handle)) != 0) { 6821 PR0("ldc_up() returned errno %d", status); 6822 } 6823 6824 /* Allocate the inband task memory handle */ 6825 status = ldc_mem_alloc_handle(vd->ldc_handle, &(vd->inband_task.mhdl)); 6826 if (status) { 6827 PRN("Could not initialize LDC channel %lu," 6828 "alloc_handle failed with error %d", ldc_id, status); 6829 return (ENXIO); 6830 } 6831 6832 /* Add the successfully-initialized vdisk to the server's table */ 6833 if (mod_hash_insert(vds->vd_table, (mod_hash_key_t)id, vd) != 0) { 6834 PRN("Error adding vdisk ID %lu to table", id); 6835 return (EIO); 6836 } 6837 6838 /* store initial state */ 6839 vd->state = VD_STATE_INIT; 6840 6841 return (0); 6842 } 6843 6844 static void 6845 vd_free_dring_task(vd_t *vdp) 6846 { 6847 if (vdp->dring_task != NULL) { 6848 ASSERT(vdp->dring_len != 0); 6849 /* Free all dring_task memory handles */ 6850 for (int i = 0; i < vdp->dring_len; i++) { 6851 (void) ldc_mem_free_handle(vdp->dring_task[i].mhdl); 6852 kmem_free(vdp->dring_task[i].request, 6853 (vdp->descriptor_size - 6854 sizeof (vio_dring_entry_hdr_t))); 6855 vdp->dring_task[i].request = NULL; 6856 kmem_free(vdp->dring_task[i].msg, vdp->max_msglen); 6857 vdp->dring_task[i].msg = NULL; 6858 } 6859 kmem_free(vdp->dring_task, 6860 (sizeof (*vdp->dring_task)) * vdp->dring_len); 6861 vdp->dring_task = NULL; 6862 } 6863 6864 if (vdp->write_queue != NULL) { 6865 kmem_free(vdp->write_queue, sizeof (buf_t *) * vdp->dring_len); 6866 vdp->write_queue = NULL; 6867 } 6868 } 6869 6870 /* 6871 * Destroy the state associated with a virtual disk 6872 */ 6873 static void 6874 vds_destroy_vd(void *arg) 6875 { 6876 vd_t *vd = (vd_t *)arg; 6877 int retry = 0, rv; 6878 6879 if (vd == NULL) 6880 return; 6881 6882 PR0("Destroying vdisk state"); 6883 6884 /* Disable queuing requests for the vdisk */ 6885 if (vd->initialized & VD_LOCKING) { 6886 mutex_enter(&vd->lock); 6887 vd->enabled = 0; 6888 mutex_exit(&vd->lock); 6889 } 6890 6891 /* Drain and destroy start queue (*before* destroying ioq) */ 6892 if (vd->startq != NULL) 6893 ddi_taskq_destroy(vd->startq); /* waits for queued tasks */ 6894 6895 /* Drain and destroy the I/O queue (*before* destroying completionq) */ 6896 if (vd->ioq != NULL) 6897 ddi_taskq_destroy(vd->ioq); 6898 6899 /* Drain and destroy completion queue (*before* shutting down LDC) */ 6900 if (vd->completionq != NULL) 6901 ddi_taskq_destroy(vd->completionq); /* waits for tasks */ 6902 6903 vd_free_dring_task(vd); 6904 6905 /* Free the inband task memory handle */ 6906 (void) ldc_mem_free_handle(vd->inband_task.mhdl); 6907 6908 /* Shut down LDC */ 6909 if (vd->initialized & VD_LDC) { 6910 /* unmap the dring */ 6911 if (vd->initialized & VD_DRING) 6912 (void) ldc_mem_dring_unmap(vd->dring_handle); 6913 6914 /* close LDC channel - retry on EAGAIN */ 6915 while ((rv = ldc_close(vd->ldc_handle)) == EAGAIN) { 6916 if (++retry > vds_ldc_retries) { 6917 PR0("Timed out closing channel"); 6918 break; 6919 } 6920 drv_usecwait(vds_ldc_delay); 6921 } 6922 if (rv == 0) { 6923 (void) ldc_unreg_callback(vd->ldc_handle); 6924 (void) ldc_fini(vd->ldc_handle); 6925 } else { 6926 /* 6927 * Closing the LDC channel has failed. Ideally we should 6928 * fail here but there is no Zeus level infrastructure 6929 * to handle this. The MD has already been changed and 6930 * we have to do the close. So we try to do as much 6931 * clean up as we can. 6932 */ 6933 (void) ldc_set_cb_mode(vd->ldc_handle, LDC_CB_DISABLE); 6934 while (ldc_unreg_callback(vd->ldc_handle) == EAGAIN) 6935 drv_usecwait(vds_ldc_delay); 6936 } 6937 } 6938 6939 /* Free the staging buffer for msgs */ 6940 if (vd->vio_msgp != NULL) { 6941 kmem_free(vd->vio_msgp, vd->max_msglen); 6942 vd->vio_msgp = NULL; 6943 } 6944 6945 /* Free the inband message buffer */ 6946 if (vd->inband_task.msg != NULL) { 6947 kmem_free(vd->inband_task.msg, vd->max_msglen); 6948 vd->inband_task.msg = NULL; 6949 } 6950 6951 if (vd->file) { 6952 /* Close file */ 6953 (void) VOP_CLOSE(vd->file_vnode, vd->open_flags, 1, 6954 0, kcred, NULL); 6955 VN_RELE(vd->file_vnode); 6956 } else { 6957 /* Close any open backing-device slices */ 6958 for (uint_t slice = 0; slice < V_NUMPAR; slice++) { 6959 if (vd->ldi_handle[slice] != NULL) { 6960 PR0("Closing slice %u", slice); 6961 (void) ldi_close(vd->ldi_handle[slice], 6962 vd->open_flags, kcred); 6963 } 6964 } 6965 } 6966 6967 /* Free disk image devid */ 6968 if (vd->dskimg_devid != NULL) 6969 ddi_devid_free(vd->dskimg_devid); 6970 6971 /* Free any fake label */ 6972 if (vd->flabel) { 6973 kmem_free(vd->flabel, vd->flabel_size); 6974 vd->flabel = NULL; 6975 vd->flabel_size = 0; 6976 } 6977 6978 /* Free lock */ 6979 if (vd->initialized & VD_LOCKING) 6980 mutex_destroy(&vd->lock); 6981 6982 /* Finally, free the vdisk structure itself */ 6983 kmem_free(vd, sizeof (*vd)); 6984 } 6985 6986 static int 6987 vds_init_vd(vds_t *vds, uint64_t id, char *device_path, uint64_t options, 6988 uint64_t ldc_id) 6989 { 6990 int status; 6991 vd_t *vd = NULL; 6992 6993 6994 if ((status = vds_do_init_vd(vds, id, device_path, options, 6995 ldc_id, &vd)) != 0) 6996 vds_destroy_vd(vd); 6997 6998 return (status); 6999 } 7000 7001 static int 7002 vds_do_get_ldc_id(md_t *md, mde_cookie_t vd_node, mde_cookie_t *channel, 7003 uint64_t *ldc_id) 7004 { 7005 int num_channels; 7006 7007 7008 /* Look for channel endpoint child(ren) of the vdisk MD node */ 7009 if ((num_channels = md_scan_dag(md, vd_node, 7010 md_find_name(md, VD_CHANNEL_ENDPOINT), 7011 md_find_name(md, "fwd"), channel)) <= 0) { 7012 PRN("No \"%s\" found for virtual disk", VD_CHANNEL_ENDPOINT); 7013 return (-1); 7014 } 7015 7016 /* Get the "id" value for the first channel endpoint node */ 7017 if (md_get_prop_val(md, channel[0], VD_ID_PROP, ldc_id) != 0) { 7018 PRN("No \"%s\" property found for \"%s\" of vdisk", 7019 VD_ID_PROP, VD_CHANNEL_ENDPOINT); 7020 return (-1); 7021 } 7022 7023 if (num_channels > 1) { 7024 PRN("Using ID of first of multiple channels for this vdisk"); 7025 } 7026 7027 return (0); 7028 } 7029 7030 static int 7031 vds_get_ldc_id(md_t *md, mde_cookie_t vd_node, uint64_t *ldc_id) 7032 { 7033 int num_nodes, status; 7034 size_t size; 7035 mde_cookie_t *channel; 7036 7037 7038 if ((num_nodes = md_node_count(md)) <= 0) { 7039 PRN("Invalid node count in Machine Description subtree"); 7040 return (-1); 7041 } 7042 size = num_nodes*(sizeof (*channel)); 7043 channel = kmem_zalloc(size, KM_SLEEP); 7044 status = vds_do_get_ldc_id(md, vd_node, channel, ldc_id); 7045 kmem_free(channel, size); 7046 7047 return (status); 7048 } 7049 7050 /* 7051 * Function: 7052 * vds_get_options 7053 * 7054 * Description: 7055 * Parse the options of a vds node. Options are defined as an array 7056 * of strings in the vds-block-device-opts property of the vds node 7057 * in the machine description. Options are returned as a bitmask. The 7058 * mapping between the bitmask options and the options strings from the 7059 * machine description is defined in the vd_bdev_options[] array. 7060 * 7061 * The vds-block-device-opts property is optional. If a vds has no such 7062 * property then no option is defined. 7063 * 7064 * Parameters: 7065 * md - machine description. 7066 * vd_node - vds node in the machine description for which 7067 * options have to be parsed. 7068 * options - the returned options. 7069 * 7070 * Return Code: 7071 * none. 7072 */ 7073 static void 7074 vds_get_options(md_t *md, mde_cookie_t vd_node, uint64_t *options) 7075 { 7076 char *optstr, *opt; 7077 int len, n, i; 7078 7079 *options = 0; 7080 7081 if (md_get_prop_data(md, vd_node, VD_BLOCK_DEVICE_OPTS, 7082 (uint8_t **)&optstr, &len) != 0) { 7083 PR0("No options found"); 7084 return; 7085 } 7086 7087 /* parse options */ 7088 opt = optstr; 7089 n = sizeof (vd_bdev_options) / sizeof (vd_option_t); 7090 7091 while (opt < optstr + len) { 7092 for (i = 0; i < n; i++) { 7093 if (strncmp(vd_bdev_options[i].vdo_name, 7094 opt, VD_OPTION_NLEN) == 0) { 7095 *options |= vd_bdev_options[i].vdo_value; 7096 break; 7097 } 7098 } 7099 7100 if (i < n) { 7101 PR0("option: %s", opt); 7102 } else { 7103 PRN("option %s is unknown or unsupported", opt); 7104 } 7105 7106 opt += strlen(opt) + 1; 7107 } 7108 } 7109 7110 static void 7111 vds_driver_types_free(vds_t *vds) 7112 { 7113 if (vds->driver_types != NULL) { 7114 kmem_free(vds->driver_types, sizeof (vd_driver_type_t) * 7115 vds->num_drivers); 7116 vds->driver_types = NULL; 7117 vds->num_drivers = 0; 7118 } 7119 } 7120 7121 /* 7122 * Update the driver type list with information from vds.conf. 7123 */ 7124 static void 7125 vds_driver_types_update(vds_t *vds) 7126 { 7127 char **list, *s; 7128 uint_t i, num, count = 0, len; 7129 7130 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, vds->dip, 7131 DDI_PROP_DONTPASS, "driver-type-list", &list, &num) != 7132 DDI_PROP_SUCCESS) 7133 return; 7134 7135 /* 7136 * We create a driver_types list with as many as entries as there 7137 * is in the driver-type-list from vds.conf. However only valid 7138 * entries will be populated (i.e. entries from driver-type-list 7139 * with a valid syntax). Invalid entries will be left blank so 7140 * they will have no driver name and the driver type will be 7141 * VD_DRIVER_UNKNOWN (= 0). 7142 */ 7143 vds->num_drivers = num; 7144 vds->driver_types = kmem_zalloc(sizeof (vd_driver_type_t) * num, 7145 KM_SLEEP); 7146 7147 for (i = 0; i < num; i++) { 7148 7149 s = strchr(list[i], ':'); 7150 7151 if (s == NULL) { 7152 PRN("vds.conf: driver-type-list, entry %d (%s): " 7153 "a colon is expected in the entry", 7154 i, list[i]); 7155 continue; 7156 } 7157 7158 len = (uintptr_t)s - (uintptr_t)list[i]; 7159 7160 if (len == 0) { 7161 PRN("vds.conf: driver-type-list, entry %d (%s): " 7162 "the driver name is empty", 7163 i, list[i]); 7164 continue; 7165 } 7166 7167 if (len >= VD_DRIVER_NAME_LEN) { 7168 PRN("vds.conf: driver-type-list, entry %d (%s): " 7169 "the driver name is too long", 7170 i, list[i]); 7171 continue; 7172 } 7173 7174 if (strcmp(s + 1, "disk") == 0) { 7175 7176 vds->driver_types[i].type = VD_DRIVER_DISK; 7177 7178 } else if (strcmp(s + 1, "volume") == 0) { 7179 7180 vds->driver_types[i].type = VD_DRIVER_VOLUME; 7181 7182 } else { 7183 PRN("vds.conf: driver-type-list, entry %d (%s): " 7184 "the driver type is invalid", 7185 i, list[i]); 7186 continue; 7187 } 7188 7189 (void) strncpy(vds->driver_types[i].name, list[i], len); 7190 7191 PR0("driver-type-list, entry %d (%s) added", 7192 i, list[i]); 7193 7194 count++; 7195 } 7196 7197 ddi_prop_free(list); 7198 7199 if (count == 0) { 7200 /* nothing was added, clean up */ 7201 vds_driver_types_free(vds); 7202 } 7203 } 7204 7205 static void 7206 vds_add_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node) 7207 { 7208 char *device_path = NULL; 7209 uint64_t id = 0, ldc_id = 0, options = 0; 7210 7211 if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) { 7212 PRN("Error getting vdisk \"%s\"", VD_ID_PROP); 7213 return; 7214 } 7215 PR0("Adding vdisk ID %lu", id); 7216 if (md_get_prop_str(md, vd_node, VD_BLOCK_DEVICE_PROP, 7217 &device_path) != 0) { 7218 PRN("Error getting vdisk \"%s\"", VD_BLOCK_DEVICE_PROP); 7219 return; 7220 } 7221 7222 vds_get_options(md, vd_node, &options); 7223 7224 if (vds_get_ldc_id(md, vd_node, &ldc_id) != 0) { 7225 PRN("Error getting LDC ID for vdisk %lu", id); 7226 return; 7227 } 7228 7229 if (vds_init_vd(vds, id, device_path, options, ldc_id) != 0) { 7230 PRN("Failed to add vdisk ID %lu", id); 7231 if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)id) != 0) 7232 PRN("No vDisk entry found for vdisk ID %lu", id); 7233 return; 7234 } 7235 } 7236 7237 static void 7238 vds_remove_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node) 7239 { 7240 uint64_t id = 0; 7241 7242 7243 if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) { 7244 PRN("Unable to get \"%s\" property from vdisk's MD node", 7245 VD_ID_PROP); 7246 return; 7247 } 7248 PR0("Removing vdisk ID %lu", id); 7249 if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)id) != 0) 7250 PRN("No vdisk entry found for vdisk ID %lu", id); 7251 } 7252 7253 static void 7254 vds_change_vd(vds_t *vds, md_t *prev_md, mde_cookie_t prev_vd_node, 7255 md_t *curr_md, mde_cookie_t curr_vd_node) 7256 { 7257 char *curr_dev, *prev_dev; 7258 uint64_t curr_id = 0, curr_ldc_id = 0, curr_options = 0; 7259 uint64_t prev_id = 0, prev_ldc_id = 0, prev_options = 0; 7260 size_t len; 7261 7262 7263 /* Validate that vdisk ID has not changed */ 7264 if (md_get_prop_val(prev_md, prev_vd_node, VD_ID_PROP, &prev_id) != 0) { 7265 PRN("Error getting previous vdisk \"%s\" property", 7266 VD_ID_PROP); 7267 return; 7268 } 7269 if (md_get_prop_val(curr_md, curr_vd_node, VD_ID_PROP, &curr_id) != 0) { 7270 PRN("Error getting current vdisk \"%s\" property", VD_ID_PROP); 7271 return; 7272 } 7273 if (curr_id != prev_id) { 7274 PRN("Not changing vdisk: ID changed from %lu to %lu", 7275 prev_id, curr_id); 7276 return; 7277 } 7278 7279 /* Validate that LDC ID has not changed */ 7280 if (vds_get_ldc_id(prev_md, prev_vd_node, &prev_ldc_id) != 0) { 7281 PRN("Error getting LDC ID for vdisk %lu", prev_id); 7282 return; 7283 } 7284 7285 if (vds_get_ldc_id(curr_md, curr_vd_node, &curr_ldc_id) != 0) { 7286 PRN("Error getting LDC ID for vdisk %lu", curr_id); 7287 return; 7288 } 7289 if (curr_ldc_id != prev_ldc_id) { 7290 _NOTE(NOTREACHED); /* lint is confused */ 7291 PRN("Not changing vdisk: " 7292 "LDC ID changed from %lu to %lu", prev_ldc_id, curr_ldc_id); 7293 return; 7294 } 7295 7296 /* Determine whether device path has changed */ 7297 if (md_get_prop_str(prev_md, prev_vd_node, VD_BLOCK_DEVICE_PROP, 7298 &prev_dev) != 0) { 7299 PRN("Error getting previous vdisk \"%s\"", 7300 VD_BLOCK_DEVICE_PROP); 7301 return; 7302 } 7303 if (md_get_prop_str(curr_md, curr_vd_node, VD_BLOCK_DEVICE_PROP, 7304 &curr_dev) != 0) { 7305 PRN("Error getting current vdisk \"%s\"", VD_BLOCK_DEVICE_PROP); 7306 return; 7307 } 7308 if (((len = strlen(curr_dev)) == strlen(prev_dev)) && 7309 (strncmp(curr_dev, prev_dev, len) == 0)) 7310 return; /* no relevant (supported) change */ 7311 7312 /* Validate that options have not changed */ 7313 vds_get_options(prev_md, prev_vd_node, &prev_options); 7314 vds_get_options(curr_md, curr_vd_node, &curr_options); 7315 if (prev_options != curr_options) { 7316 PRN("Not changing vdisk: options changed from %lx to %lx", 7317 prev_options, curr_options); 7318 return; 7319 } 7320 7321 PR0("Changing vdisk ID %lu", prev_id); 7322 7323 /* Remove old state, which will close vdisk and reset */ 7324 if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)prev_id) != 0) 7325 PRN("No entry found for vdisk ID %lu", prev_id); 7326 7327 /* Re-initialize vdisk with new state */ 7328 if (vds_init_vd(vds, curr_id, curr_dev, curr_options, 7329 curr_ldc_id) != 0) { 7330 PRN("Failed to change vdisk ID %lu", curr_id); 7331 return; 7332 } 7333 } 7334 7335 static int 7336 vds_process_md(void *arg, mdeg_result_t *md) 7337 { 7338 int i; 7339 vds_t *vds = arg; 7340 7341 7342 if (md == NULL) 7343 return (MDEG_FAILURE); 7344 ASSERT(vds != NULL); 7345 7346 for (i = 0; i < md->removed.nelem; i++) 7347 vds_remove_vd(vds, md->removed.mdp, md->removed.mdep[i]); 7348 for (i = 0; i < md->match_curr.nelem; i++) 7349 vds_change_vd(vds, md->match_prev.mdp, md->match_prev.mdep[i], 7350 md->match_curr.mdp, md->match_curr.mdep[i]); 7351 for (i = 0; i < md->added.nelem; i++) 7352 vds_add_vd(vds, md->added.mdp, md->added.mdep[i]); 7353 7354 return (MDEG_SUCCESS); 7355 } 7356 7357 7358 static int 7359 vds_do_attach(dev_info_t *dip) 7360 { 7361 int status, sz; 7362 int cfg_handle; 7363 minor_t instance = ddi_get_instance(dip); 7364 vds_t *vds; 7365 mdeg_prop_spec_t *pspecp; 7366 mdeg_node_spec_t *ispecp; 7367 7368 /* 7369 * The "cfg-handle" property of a vds node in an MD contains the MD's 7370 * notion of "instance", or unique identifier, for that node; OBP 7371 * stores the value of the "cfg-handle" MD property as the value of 7372 * the "reg" property on the node in the device tree it builds from 7373 * the MD and passes to Solaris. Thus, we look up the devinfo node's 7374 * "reg" property value to uniquely identify this device instance when 7375 * registering with the MD event-generation framework. If the "reg" 7376 * property cannot be found, the device tree state is presumably so 7377 * broken that there is no point in continuing. 7378 */ 7379 if (!ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 7380 VD_REG_PROP)) { 7381 PRN("vds \"%s\" property does not exist", VD_REG_PROP); 7382 return (DDI_FAILURE); 7383 } 7384 7385 /* Get the MD instance for later MDEG registration */ 7386 cfg_handle = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 7387 VD_REG_PROP, -1); 7388 7389 if (ddi_soft_state_zalloc(vds_state, instance) != DDI_SUCCESS) { 7390 PRN("Could not allocate state for instance %u", instance); 7391 return (DDI_FAILURE); 7392 } 7393 7394 if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) { 7395 PRN("Could not get state for instance %u", instance); 7396 ddi_soft_state_free(vds_state, instance); 7397 return (DDI_FAILURE); 7398 } 7399 7400 vds->dip = dip; 7401 vds->vd_table = mod_hash_create_ptrhash("vds_vd_table", VDS_NCHAINS, 7402 vds_destroy_vd, sizeof (void *)); 7403 7404 ASSERT(vds->vd_table != NULL); 7405 7406 if ((status = ldi_ident_from_dip(dip, &vds->ldi_ident)) != 0) { 7407 PRN("ldi_ident_from_dip() returned errno %d", status); 7408 return (DDI_FAILURE); 7409 } 7410 vds->initialized |= VDS_LDI; 7411 7412 /* Register for MD updates */ 7413 sz = sizeof (vds_prop_template); 7414 pspecp = kmem_alloc(sz, KM_SLEEP); 7415 bcopy(vds_prop_template, pspecp, sz); 7416 7417 VDS_SET_MDEG_PROP_INST(pspecp, cfg_handle); 7418 7419 /* initialize the complete prop spec structure */ 7420 ispecp = kmem_zalloc(sizeof (mdeg_node_spec_t), KM_SLEEP); 7421 ispecp->namep = "virtual-device"; 7422 ispecp->specp = pspecp; 7423 7424 if (mdeg_register(ispecp, &vd_match, vds_process_md, vds, 7425 &vds->mdeg) != MDEG_SUCCESS) { 7426 PRN("Unable to register for MD updates"); 7427 kmem_free(ispecp, sizeof (mdeg_node_spec_t)); 7428 kmem_free(pspecp, sz); 7429 return (DDI_FAILURE); 7430 } 7431 7432 vds->ispecp = ispecp; 7433 vds->initialized |= VDS_MDEG; 7434 7435 /* Prevent auto-detaching so driver is available whenever MD changes */ 7436 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip, DDI_NO_AUTODETACH, 1) != 7437 DDI_PROP_SUCCESS) { 7438 PRN("failed to set \"%s\" property for instance %u", 7439 DDI_NO_AUTODETACH, instance); 7440 } 7441 7442 /* read any user defined driver types from conf file and update list */ 7443 vds_driver_types_update(vds); 7444 7445 ddi_report_dev(dip); 7446 return (DDI_SUCCESS); 7447 } 7448 7449 static int 7450 vds_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 7451 { 7452 int status; 7453 7454 switch (cmd) { 7455 case DDI_ATTACH: 7456 PR0("Attaching"); 7457 if ((status = vds_do_attach(dip)) != DDI_SUCCESS) 7458 (void) vds_detach(dip, DDI_DETACH); 7459 return (status); 7460 case DDI_RESUME: 7461 PR0("No action required for DDI_RESUME"); 7462 return (DDI_SUCCESS); 7463 default: 7464 return (DDI_FAILURE); 7465 } 7466 } 7467 7468 static struct dev_ops vds_ops = { 7469 DEVO_REV, /* devo_rev */ 7470 0, /* devo_refcnt */ 7471 ddi_no_info, /* devo_getinfo */ 7472 nulldev, /* devo_identify */ 7473 nulldev, /* devo_probe */ 7474 vds_attach, /* devo_attach */ 7475 vds_detach, /* devo_detach */ 7476 nodev, /* devo_reset */ 7477 NULL, /* devo_cb_ops */ 7478 NULL, /* devo_bus_ops */ 7479 nulldev, /* devo_power */ 7480 ddi_quiesce_not_needed, /* devo_quiesce */ 7481 }; 7482 7483 static struct modldrv modldrv = { 7484 &mod_driverops, 7485 "virtual disk server", 7486 &vds_ops, 7487 }; 7488 7489 static struct modlinkage modlinkage = { 7490 MODREV_1, 7491 &modldrv, 7492 NULL 7493 }; 7494 7495 7496 int 7497 _init(void) 7498 { 7499 int status; 7500 7501 if ((status = ddi_soft_state_init(&vds_state, sizeof (vds_t), 1)) != 0) 7502 return (status); 7503 7504 if ((status = mod_install(&modlinkage)) != 0) { 7505 ddi_soft_state_fini(&vds_state); 7506 return (status); 7507 } 7508 7509 return (0); 7510 } 7511 7512 int 7513 _info(struct modinfo *modinfop) 7514 { 7515 return (mod_info(&modlinkage, modinfop)); 7516 } 7517 7518 int 7519 _fini(void) 7520 { 7521 int status; 7522 7523 if ((status = mod_remove(&modlinkage)) != 0) 7524 return (status); 7525 ddi_soft_state_fini(&vds_state); 7526 return (0); 7527 } 7528