1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * Virtual disk server 31 */ 32 33 34 #include <sys/types.h> 35 #include <sys/conf.h> 36 #include <sys/crc32.h> 37 #include <sys/ddi.h> 38 #include <sys/dkio.h> 39 #include <sys/file.h> 40 #include <sys/mdeg.h> 41 #include <sys/modhash.h> 42 #include <sys/note.h> 43 #include <sys/pathname.h> 44 #include <sys/sunddi.h> 45 #include <sys/sunldi.h> 46 #include <sys/sysmacros.h> 47 #include <sys/vio_common.h> 48 #include <sys/vdsk_mailbox.h> 49 #include <sys/vdsk_common.h> 50 #include <sys/vtoc.h> 51 52 53 /* Virtual disk server initialization flags */ 54 #define VDS_LDI 0x01 55 #define VDS_MDEG 0x02 56 57 /* Virtual disk server tunable parameters */ 58 #define VDS_LDC_RETRIES 3 59 #define VDS_NCHAINS 32 60 61 /* Identification parameters for MD, synthetic dkio(7i) structures, etc. */ 62 #define VDS_NAME "virtual-disk-server" 63 64 #define VD_NAME "vd" 65 #define VD_VOLUME_NAME "vdisk" 66 #define VD_ASCIILABEL "Virtual Disk" 67 68 #define VD_CHANNEL_ENDPOINT "channel-endpoint" 69 #define VD_ID_PROP "id" 70 #define VD_BLOCK_DEVICE_PROP "vds-block-device" 71 72 /* Virtual disk initialization flags */ 73 #define VD_LOCKING 0x01 74 #define VD_LDC 0x02 75 #define VD_DRING 0x04 76 #define VD_SID 0x08 77 #define VD_SEQ_NUM 0x10 78 79 /* Flags for opening/closing backing devices via LDI */ 80 #define VD_OPEN_FLAGS (FEXCL | FREAD | FWRITE) 81 82 /* 83 * By Solaris convention, slice/partition 2 represents the entire disk; 84 * unfortunately, this convention does not appear to be codified. 85 */ 86 #define VD_ENTIRE_DISK_SLICE 2 87 88 /* Return a cpp token as a string */ 89 #define STRINGIZE(token) #token 90 91 /* 92 * Print a message prefixed with the current function name to the message log 93 * (and optionally to the console for verbose boots); these macros use cpp's 94 * concatenation of string literals and C99 variable-length-argument-list 95 * macros 96 */ 97 #define PRN(...) _PRN("?%s(): "__VA_ARGS__, "") 98 #define _PRN(format, ...) \ 99 cmn_err(CE_CONT, format"%s", __func__, __VA_ARGS__) 100 101 /* Return a pointer to the "i"th vdisk dring element */ 102 #define VD_DRING_ELEM(i) ((vd_dring_entry_t *)(void *) \ 103 (vd->dring + (i)*vd->descriptor_size)) 104 105 /* Return the virtual disk client's type as a string (for use in messages) */ 106 #define VD_CLIENT(vd) \ 107 (((vd)->xfer_mode == VIO_DESC_MODE) ? "in-band client" : \ 108 (((vd)->xfer_mode == VIO_DRING_MODE) ? "dring client" : \ 109 (((vd)->xfer_mode == 0) ? "null client" : \ 110 "unsupported client"))) 111 112 /* Debugging macros */ 113 #ifdef DEBUG 114 #define PR0 if (vd_msglevel > 0) PRN 115 #define PR1 if (vd_msglevel > 1) PRN 116 #define PR2 if (vd_msglevel > 2) PRN 117 118 #define VD_DUMP_DRING_ELEM(elem) \ 119 PRN("dst:%x op:%x st:%u nb:%lx addr:%lx ncook:%u\n", \ 120 elem->hdr.dstate, \ 121 elem->payload.operation, \ 122 elem->payload.status, \ 123 elem->payload.nbytes, \ 124 elem->payload.addr, \ 125 elem->payload.ncookies); 126 127 #else /* !DEBUG */ 128 #define PR0(...) 129 #define PR1(...) 130 #define PR2(...) 131 132 #define VD_DUMP_DRING_ELEM(elem) 133 134 #endif /* DEBUG */ 135 136 137 /* 138 * Soft state structure for a vds instance 139 */ 140 typedef struct vds { 141 uint_t initialized; /* driver inst initialization flags */ 142 dev_info_t *dip; /* driver inst devinfo pointer */ 143 ldi_ident_t ldi_ident; /* driver's identifier for LDI */ 144 mod_hash_t *vd_table; /* table of virtual disks served */ 145 mdeg_handle_t mdeg; /* handle for MDEG operations */ 146 } vds_t; 147 148 /* 149 * Types of descriptor-processing tasks 150 */ 151 typedef enum vd_task_type { 152 VD_NONFINAL_RANGE_TASK, /* task for intermediate descriptor in range */ 153 VD_FINAL_RANGE_TASK, /* task for last in a range of descriptors */ 154 } vd_task_type_t; 155 156 /* 157 * Structure describing the task for processing a descriptor 158 */ 159 typedef struct vd_task { 160 struct vd *vd; /* vd instance task is for */ 161 vd_task_type_t type; /* type of descriptor task */ 162 int index; /* dring elem index for task */ 163 vio_msg_t *msg; /* VIO message task is for */ 164 size_t msglen; /* length of message content */ 165 size_t msgsize; /* size of message buffer */ 166 vd_dring_payload_t *request; /* request task will perform */ 167 struct buf buf; /* buf(9s) for I/O request */ 168 ldc_mem_handle_t mhdl; /* task memory handle */ 169 } vd_task_t; 170 171 /* 172 * Soft state structure for a virtual disk instance 173 */ 174 typedef struct vd { 175 uint_t initialized; /* vdisk initialization flags */ 176 vds_t *vds; /* server for this vdisk */ 177 ddi_taskq_t *startq; /* queue for I/O start tasks */ 178 ddi_taskq_t *completionq; /* queue for completion tasks */ 179 ldi_handle_t ldi_handle[V_NUMPAR]; /* LDI slice handles */ 180 dev_t dev[V_NUMPAR]; /* dev numbers for slices */ 181 uint_t nslices; /* number of slices */ 182 size_t vdisk_size; /* number of blocks in vdisk */ 183 vd_disk_type_t vdisk_type; /* slice or entire disk */ 184 vd_disk_label_t vdisk_label; /* EFI or VTOC label */ 185 ushort_t max_xfer_sz; /* max xfer size in DEV_BSIZE */ 186 boolean_t pseudo; /* underlying pseudo dev */ 187 struct dk_efi dk_efi; /* synthetic for slice type */ 188 struct dk_geom dk_geom; /* synthetic for slice type */ 189 struct vtoc vtoc; /* synthetic for slice type */ 190 ldc_status_t ldc_state; /* LDC connection state */ 191 ldc_handle_t ldc_handle; /* handle for LDC comm */ 192 size_t max_msglen; /* largest LDC message len */ 193 vd_state_t state; /* client handshake state */ 194 uint8_t xfer_mode; /* transfer mode with client */ 195 uint32_t sid; /* client's session ID */ 196 uint64_t seq_num; /* message sequence number */ 197 uint64_t dring_ident; /* identifier of dring */ 198 ldc_dring_handle_t dring_handle; /* handle for dring ops */ 199 uint32_t descriptor_size; /* num bytes in desc */ 200 uint32_t dring_len; /* number of dring elements */ 201 caddr_t dring; /* address of dring */ 202 vd_task_t inband_task; /* task for inband descriptor */ 203 vd_task_t *dring_task; /* tasks dring elements */ 204 205 kmutex_t lock; /* protects variables below */ 206 boolean_t enabled; /* is vdisk enabled? */ 207 boolean_t reset_state; /* reset connection state? */ 208 boolean_t reset_ldc; /* reset LDC channel? */ 209 } vd_t; 210 211 typedef struct vds_operation { 212 uint8_t operation; 213 int (*start)(vd_task_t *task); 214 void (*complete)(void *arg); 215 } vds_operation_t; 216 217 typedef struct vd_ioctl { 218 uint8_t operation; /* vdisk operation */ 219 const char *operation_name; /* vdisk operation name */ 220 size_t nbytes; /* size of operation buffer */ 221 int cmd; /* corresponding ioctl cmd */ 222 const char *cmd_name; /* ioctl cmd name */ 223 void *arg; /* ioctl cmd argument */ 224 /* convert input vd_buf to output ioctl_arg */ 225 void (*copyin)(void *vd_buf, void *ioctl_arg); 226 /* convert input ioctl_arg to output vd_buf */ 227 void (*copyout)(void *ioctl_arg, void *vd_buf); 228 } vd_ioctl_t; 229 230 /* Define trivial copyin/copyout conversion function flag */ 231 #define VD_IDENTITY ((void (*)(void *, void *))-1) 232 233 234 static int vds_ldc_retries = VDS_LDC_RETRIES; 235 static void *vds_state; 236 static uint64_t vds_operations; /* see vds_operation[] definition below */ 237 238 static int vd_open_flags = VD_OPEN_FLAGS; 239 240 /* 241 * Supported protocol version pairs, from highest (newest) to lowest (oldest) 242 * 243 * Each supported major version should appear only once, paired with (and only 244 * with) its highest supported minor version number (as the protocol requires 245 * supporting all lower minor version numbers as well) 246 */ 247 static const vio_ver_t vds_version[] = {{1, 0}}; 248 static const size_t vds_num_versions = 249 sizeof (vds_version)/sizeof (vds_version[0]); 250 251 #ifdef DEBUG 252 static int vd_msglevel; 253 #endif /* DEBUG */ 254 255 256 static int 257 vd_start_bio(vd_task_t *task) 258 { 259 int rv, status = 0; 260 vd_t *vd = task->vd; 261 vd_dring_payload_t *request = task->request; 262 struct buf *buf = &task->buf; 263 uint8_t mtype; 264 265 266 ASSERT(vd != NULL); 267 ASSERT(request != NULL); 268 ASSERT(request->slice < vd->nslices); 269 ASSERT((request->operation == VD_OP_BREAD) || 270 (request->operation == VD_OP_BWRITE)); 271 272 if (request->nbytes == 0) 273 return (EINVAL); /* no service for trivial requests */ 274 275 PR1("%s %lu bytes at block %lu", 276 (request->operation == VD_OP_BREAD) ? "Read" : "Write", 277 request->nbytes, request->addr); 278 279 bioinit(buf); 280 buf->b_flags = B_BUSY; 281 buf->b_bcount = request->nbytes; 282 buf->b_lblkno = request->addr; 283 buf->b_edev = vd->dev[request->slice]; 284 285 mtype = (&vd->inband_task == task) ? LDC_SHADOW_MAP : LDC_DIRECT_MAP; 286 287 /* Map memory exported by client */ 288 status = ldc_mem_map(task->mhdl, request->cookie, request->ncookies, 289 mtype, (request->operation == VD_OP_BREAD) ? LDC_MEM_W : LDC_MEM_R, 290 &(buf->b_un.b_addr), NULL); 291 if (status != 0) { 292 PRN("ldc_mem_map() returned err %d ", status); 293 biofini(buf); 294 return (status); 295 } 296 297 status = ldc_mem_acquire(task->mhdl, 0, buf->b_bcount); 298 if (status != 0) { 299 (void) ldc_mem_unmap(task->mhdl); 300 PRN("ldc_mem_map() returned err %d ", status); 301 biofini(buf); 302 return (status); 303 } 304 305 buf->b_flags |= (request->operation == VD_OP_BREAD) ? B_READ : B_WRITE; 306 307 /* Start the block I/O */ 308 if ((status = ldi_strategy(vd->ldi_handle[request->slice], buf)) == 0) 309 return (EINPROGRESS); /* will complete on completionq */ 310 311 /* Clean up after error */ 312 rv = ldc_mem_release(task->mhdl, 0, buf->b_bcount); 313 if (rv) { 314 PRN("ldc_mem_release() returned err %d ", status); 315 } 316 rv = ldc_mem_unmap(task->mhdl); 317 if (rv) { 318 PRN("ldc_mem_unmap() returned err %d ", status); 319 } 320 321 biofini(buf); 322 return (status); 323 } 324 325 static int 326 send_msg(ldc_handle_t ldc_handle, void *msg, size_t msglen) 327 { 328 int retry, status; 329 size_t nbytes; 330 331 332 for (retry = 0, status = EWOULDBLOCK; 333 retry < vds_ldc_retries && status == EWOULDBLOCK; 334 retry++) { 335 PR1("ldc_write() attempt %d", (retry + 1)); 336 nbytes = msglen; 337 status = ldc_write(ldc_handle, msg, &nbytes); 338 } 339 340 if (status != 0) { 341 PRN("ldc_write() returned errno %d", status); 342 return (status); 343 } else if (nbytes != msglen) { 344 PRN("ldc_write() performed only partial write"); 345 return (EIO); 346 } 347 348 PR1("SENT %lu bytes", msglen); 349 return (0); 350 } 351 352 static void 353 vd_need_reset(vd_t *vd, boolean_t reset_ldc) 354 { 355 mutex_enter(&vd->lock); 356 vd->reset_state = B_TRUE; 357 vd->reset_ldc = reset_ldc; 358 mutex_exit(&vd->lock); 359 } 360 361 /* 362 * Reset the state of the connection with a client, if needed; reset the LDC 363 * transport as well, if needed. This function should only be called from the 364 * "startq", as it waits for tasks on the "completionq" and will deadlock if 365 * called from that queue. 366 */ 367 static void 368 vd_reset_if_needed(vd_t *vd) 369 { 370 int status = 0; 371 372 373 mutex_enter(&vd->lock); 374 if (!vd->reset_state) { 375 ASSERT(!vd->reset_ldc); 376 mutex_exit(&vd->lock); 377 return; 378 } 379 mutex_exit(&vd->lock); 380 381 382 PR0("Resetting connection state with %s", VD_CLIENT(vd)); 383 384 /* 385 * Let any asynchronous I/O complete before possibly pulling the rug 386 * out from under it; defer checking vd->reset_ldc, as one of the 387 * asynchronous tasks might set it 388 */ 389 ddi_taskq_wait(vd->completionq); 390 391 if ((vd->initialized & VD_DRING) && 392 ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0)) 393 PRN("ldc_mem_dring_unmap() returned errno %d", status); 394 395 if (vd->dring_task != NULL) { 396 ASSERT(vd->dring_len != 0); 397 /* Free all dring_task memory handles */ 398 for (int i = 0; i < vd->dring_len; i++) 399 (void) ldc_mem_free_handle(vd->dring_task[i].mhdl); 400 kmem_free(vd->dring_task, 401 (sizeof (*vd->dring_task)) * vd->dring_len); 402 vd->dring_task = NULL; 403 } 404 405 406 mutex_enter(&vd->lock); 407 if (vd->reset_ldc && ((status = ldc_down(vd->ldc_handle)) != 0)) 408 PRN("ldc_down() returned errno %d", status); 409 410 vd->initialized &= ~(VD_SID | VD_SEQ_NUM | VD_DRING); 411 vd->state = VD_STATE_INIT; 412 vd->max_msglen = sizeof (vio_msg_t); /* baseline vio message size */ 413 414 vd->reset_state = B_FALSE; 415 vd->reset_ldc = B_FALSE; 416 mutex_exit(&vd->lock); 417 } 418 419 static int 420 vd_mark_elem_done(vd_t *vd, int idx, int elem_status) 421 { 422 boolean_t accepted; 423 int status; 424 vd_dring_entry_t *elem = VD_DRING_ELEM(idx); 425 426 427 /* Acquire the element */ 428 if ((status = ldc_mem_dring_acquire(vd->dring_handle, idx, idx)) != 0) { 429 PRN("ldc_mem_dring_acquire() returned errno %d", status); 430 return (status); 431 } 432 433 /* Set the element's status and mark it done */ 434 accepted = (elem->hdr.dstate == VIO_DESC_ACCEPTED); 435 if (accepted) { 436 elem->payload.status = elem_status; 437 elem->hdr.dstate = VIO_DESC_DONE; 438 } else { 439 /* Perhaps client timed out waiting for I/O... */ 440 PRN("element %u no longer \"accepted\"", idx); 441 VD_DUMP_DRING_ELEM(elem); 442 } 443 /* Release the element */ 444 if ((status = ldc_mem_dring_release(vd->dring_handle, idx, idx)) != 0) { 445 PRN("ldc_mem_dring_release() returned errno %d", status); 446 return (status); 447 } 448 449 return (accepted ? 0 : EINVAL); 450 } 451 452 static void 453 vd_complete_bio(void *arg) 454 { 455 int status = 0; 456 vd_task_t *task = (vd_task_t *)arg; 457 vd_t *vd = task->vd; 458 vd_dring_payload_t *request = task->request; 459 struct buf *buf = &task->buf; 460 461 462 ASSERT(vd != NULL); 463 ASSERT(request != NULL); 464 ASSERT(task->msg != NULL); 465 ASSERT(task->msglen >= sizeof (*task->msg)); 466 ASSERT(task->msgsize >= task->msglen); 467 468 /* Wait for the I/O to complete */ 469 request->status = biowait(buf); 470 471 /* Release the buffer */ 472 status = ldc_mem_release(task->mhdl, 0, buf->b_bcount); 473 if (status) { 474 PRN("ldc_mem_release() returned errno %d copying to client", 475 status); 476 } 477 478 /* Unmap the memory */ 479 status = ldc_mem_unmap(task->mhdl); 480 if (status) { 481 PRN("ldc_mem_unmap() returned errno %d copying to client", 482 status); 483 } 484 485 biofini(buf); 486 487 /* Update the dring element for a dring client */ 488 if ((status == 0) && (vd->xfer_mode == VIO_DRING_MODE)) 489 status = vd_mark_elem_done(vd, task->index, request->status); 490 491 /* 492 * If a transport error occurred, arrange to "nack" the message when 493 * the final task in the descriptor element range completes 494 */ 495 if (status != 0) 496 task->msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 497 498 /* 499 * Only the final task for a range of elements will respond to and 500 * free the message 501 */ 502 if (task->type == VD_NONFINAL_RANGE_TASK) 503 return; 504 505 /* 506 * Send the "ack" or "nack" back to the client; if sending the message 507 * via LDC fails, arrange to reset both the connection state and LDC 508 * itself 509 */ 510 PR1("Sending %s", 511 (task->msg->tag.vio_subtype == VIO_SUBTYPE_ACK) ? "ACK" : "NACK"); 512 if (send_msg(vd->ldc_handle, task->msg, task->msglen) != 0) 513 vd_need_reset(vd, B_TRUE); 514 515 /* Free the message now that it has been used for the reply */ 516 kmem_free(task->msg, task->msgsize); 517 } 518 519 static void 520 vd_geom2dk_geom(void *vd_buf, void *ioctl_arg) 521 { 522 VD_GEOM2DK_GEOM((vd_geom_t *)vd_buf, (struct dk_geom *)ioctl_arg); 523 } 524 525 static void 526 vd_vtoc2vtoc(void *vd_buf, void *ioctl_arg) 527 { 528 VD_VTOC2VTOC((vd_vtoc_t *)vd_buf, (struct vtoc *)ioctl_arg); 529 } 530 531 static void 532 dk_geom2vd_geom(void *ioctl_arg, void *vd_buf) 533 { 534 DK_GEOM2VD_GEOM((struct dk_geom *)ioctl_arg, (vd_geom_t *)vd_buf); 535 } 536 537 static void 538 vtoc2vd_vtoc(void *ioctl_arg, void *vd_buf) 539 { 540 VTOC2VD_VTOC((struct vtoc *)ioctl_arg, (vd_vtoc_t *)vd_buf); 541 } 542 543 static void 544 vd_get_efi_in(void *vd_buf, void *ioctl_arg) 545 { 546 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 547 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 548 549 dk_efi->dki_lba = vd_efi->lba; 550 dk_efi->dki_length = vd_efi->length; 551 dk_efi->dki_data = kmem_zalloc(vd_efi->length, KM_SLEEP); 552 } 553 554 static void 555 vd_get_efi_out(void *ioctl_arg, void *vd_buf) 556 { 557 int len; 558 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 559 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 560 561 len = vd_efi->length; 562 DK_EFI2VD_EFI(dk_efi, vd_efi); 563 kmem_free(dk_efi->dki_data, len); 564 } 565 566 static void 567 vd_set_efi_in(void *vd_buf, void *ioctl_arg) 568 { 569 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 570 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 571 572 dk_efi->dki_data = kmem_alloc(vd_efi->length, KM_SLEEP); 573 VD_EFI2DK_EFI(vd_efi, dk_efi); 574 } 575 576 static void 577 vd_set_efi_out(void *ioctl_arg, void *vd_buf) 578 { 579 vd_efi_t *vd_efi = (vd_efi_t *)vd_buf; 580 dk_efi_t *dk_efi = (dk_efi_t *)ioctl_arg; 581 582 kmem_free(dk_efi->dki_data, vd_efi->length); 583 } 584 585 static int 586 vd_read_vtoc(ldi_handle_t handle, struct vtoc *vtoc, vd_disk_label_t *label) 587 { 588 int status, rval; 589 struct dk_gpt *efi; 590 size_t efi_len; 591 592 *label = VD_DISK_LABEL_UNK; 593 594 status = ldi_ioctl(handle, DKIOCGVTOC, (intptr_t)vtoc, 595 (vd_open_flags | FKIOCTL), kcred, &rval); 596 597 if (status == 0) { 598 *label = VD_DISK_LABEL_VTOC; 599 return (0); 600 } else if (status != ENOTSUP) { 601 PRN("ldi_ioctl(DKIOCGVTOC) returned error %d", status); 602 return (status); 603 } 604 605 status = vds_efi_alloc_and_read(handle, &efi, &efi_len); 606 607 if (status) { 608 PRN("vds_efi_alloc_and_read returned error %d", status); 609 return (status); 610 } 611 612 *label = VD_DISK_LABEL_EFI; 613 vd_efi_to_vtoc(efi, vtoc); 614 vd_efi_free(efi, efi_len); 615 616 return (0); 617 } 618 619 static int 620 vd_do_slice_ioctl(vd_t *vd, int cmd, void *ioctl_arg) 621 { 622 dk_efi_t *dk_ioc; 623 624 switch (vd->vdisk_label) { 625 626 case VD_DISK_LABEL_VTOC: 627 628 switch (cmd) { 629 case DKIOCGGEOM: 630 ASSERT(ioctl_arg != NULL); 631 bcopy(&vd->dk_geom, ioctl_arg, sizeof (vd->dk_geom)); 632 return (0); 633 case DKIOCGVTOC: 634 ASSERT(ioctl_arg != NULL); 635 bcopy(&vd->vtoc, ioctl_arg, sizeof (vd->vtoc)); 636 return (0); 637 default: 638 return (ENOTSUP); 639 } 640 641 case VD_DISK_LABEL_EFI: 642 643 switch (cmd) { 644 case DKIOCGETEFI: 645 ASSERT(ioctl_arg != NULL); 646 dk_ioc = (dk_efi_t *)ioctl_arg; 647 if (dk_ioc->dki_length < vd->dk_efi.dki_length) 648 return (EINVAL); 649 bcopy(vd->dk_efi.dki_data, dk_ioc->dki_data, 650 vd->dk_efi.dki_length); 651 return (0); 652 default: 653 return (ENOTSUP); 654 } 655 656 default: 657 return (ENOTSUP); 658 } 659 } 660 661 static int 662 vd_do_ioctl(vd_t *vd, vd_dring_payload_t *request, void* buf, vd_ioctl_t *ioctl) 663 { 664 int rval = 0, status; 665 size_t nbytes = request->nbytes; /* modifiable copy */ 666 667 668 ASSERT(request->slice < vd->nslices); 669 PR0("Performing %s", ioctl->operation_name); 670 671 /* Get data from client and convert, if necessary */ 672 if (ioctl->copyin != NULL) { 673 ASSERT(nbytes != 0 && buf != NULL); 674 PR1("Getting \"arg\" data from client"); 675 if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes, 676 request->cookie, request->ncookies, 677 LDC_COPY_IN)) != 0) { 678 PRN("ldc_mem_copy() returned errno %d " 679 "copying from client", status); 680 return (status); 681 } 682 683 /* Convert client's data, if necessary */ 684 if (ioctl->copyin == VD_IDENTITY) /* use client buffer */ 685 ioctl->arg = buf; 686 else /* convert client vdisk operation data to ioctl data */ 687 (ioctl->copyin)(buf, (void *)ioctl->arg); 688 } 689 690 /* 691 * Handle single-slice block devices internally; otherwise, have the 692 * real driver perform the ioctl() 693 */ 694 if (vd->vdisk_type == VD_DISK_TYPE_SLICE && !vd->pseudo) { 695 if ((status = vd_do_slice_ioctl(vd, ioctl->cmd, 696 (void *)ioctl->arg)) != 0) 697 return (status); 698 } else if ((status = ldi_ioctl(vd->ldi_handle[request->slice], 699 ioctl->cmd, (intptr_t)ioctl->arg, (vd_open_flags | FKIOCTL), 700 kcred, &rval)) != 0) { 701 PR0("ldi_ioctl(%s) = errno %d", ioctl->cmd_name, status); 702 return (status); 703 } 704 #ifdef DEBUG 705 if (rval != 0) { 706 PRN("%s set rval = %d, which is not being returned to client", 707 ioctl->cmd_name, rval); 708 } 709 #endif /* DEBUG */ 710 711 /* Convert data and send to client, if necessary */ 712 if (ioctl->copyout != NULL) { 713 ASSERT(nbytes != 0 && buf != NULL); 714 PR1("Sending \"arg\" data to client"); 715 716 /* Convert ioctl data to vdisk operation data, if necessary */ 717 if (ioctl->copyout != VD_IDENTITY) 718 (ioctl->copyout)((void *)ioctl->arg, buf); 719 720 if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes, 721 request->cookie, request->ncookies, 722 LDC_COPY_OUT)) != 0) { 723 PRN("ldc_mem_copy() returned errno %d " 724 "copying to client", status); 725 return (status); 726 } 727 } 728 729 return (status); 730 } 731 732 /* 733 * Open any slices which have become non-empty as a result of performing a 734 * set-VTOC operation for the client. 735 * 736 * When serving a full disk, vds attempts to exclusively open all of the 737 * disk's slices to prevent another thread or process in the service domain 738 * from "stealing" a slice or from performing I/O to a slice while a vds 739 * client is accessing it. Unfortunately, underlying drivers, such as sd(7d) 740 * and cmdk(7d), return an error when attempting to open the device file for a 741 * slice which is currently empty according to the VTOC. This driver behavior 742 * means that vds must skip opening empty slices when initializing a vdisk for 743 * full-disk service and try to open slices that become non-empty (via a 744 * set-VTOC operation) during use of the full disk in order to begin serving 745 * such slices to the client. This approach has an inherent (and therefore 746 * unavoidable) race condition; it also means that failure to open a 747 * newly-non-empty slice has different semantics than failure to open an 748 * initially-non-empty slice: Due to driver bahavior, opening a 749 * newly-non-empty slice is a necessary side effect of vds performing a 750 * (successful) set-VTOC operation for a client on an in-service (and in-use) 751 * disk in order to begin serving the slice; failure of this side-effect 752 * operation does not mean that the client's set-VTOC operation failed or that 753 * operations on other slices must fail. Therefore, this function prints an 754 * error message on failure to open a slice, but does not return an error to 755 * its caller--unlike failure to open a slice initially, which results in an 756 * error that prevents serving the vdisk (and thereby requires an 757 * administrator to resolve the problem). Note that, apart from another 758 * thread or process opening a new slice during the race-condition window, 759 * failure to open a slice in this function will likely indicate an underlying 760 * drive problem, which will also likely become evident in errors returned by 761 * operations on other slices, and which will require administrative 762 * intervention and possibly servicing the drive. 763 */ 764 static void 765 vd_open_new_slices(vd_t *vd) 766 { 767 int status; 768 struct vtoc vtoc; 769 770 /* Get the (new) partitions for updated slice sizes */ 771 if ((status = vd_read_vtoc(vd->ldi_handle[0], &vtoc, 772 &vd->vdisk_label)) != 0) { 773 PRN("vd_read_vtoc returned error %d", status); 774 return; 775 } 776 777 /* Open any newly-non-empty slices */ 778 for (int slice = 0; slice < vd->nslices; slice++) { 779 /* Skip zero-length slices */ 780 if (vtoc.v_part[slice].p_size == 0) { 781 if (vd->ldi_handle[slice] != NULL) 782 PR0("Open slice %u now has zero length", slice); 783 continue; 784 } 785 786 /* Skip already-open slices */ 787 if (vd->ldi_handle[slice] != NULL) 788 continue; 789 790 PR0("Opening newly-non-empty slice %u", slice); 791 if ((status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK, 792 vd_open_flags, kcred, &vd->ldi_handle[slice], 793 vd->vds->ldi_ident)) != 0) { 794 PRN("ldi_open_by_dev() returned errno %d " 795 "for slice %u", status, slice); 796 } 797 } 798 } 799 800 #define RNDSIZE(expr) P2ROUNDUP(sizeof (expr), sizeof (uint64_t)) 801 static int 802 vd_ioctl(vd_task_t *task) 803 { 804 int i, status; 805 void *buf = NULL; 806 struct dk_geom dk_geom = {0}; 807 struct vtoc vtoc = {0}; 808 struct dk_efi dk_efi = {0}; 809 vd_t *vd = task->vd; 810 vd_dring_payload_t *request = task->request; 811 vd_ioctl_t ioctl[] = { 812 /* Command (no-copy) operations */ 813 {VD_OP_FLUSH, STRINGIZE(VD_OP_FLUSH), 0, 814 DKIOCFLUSHWRITECACHE, STRINGIZE(DKIOCFLUSHWRITECACHE), 815 NULL, NULL, NULL}, 816 817 /* "Get" (copy-out) operations */ 818 {VD_OP_GET_WCE, STRINGIZE(VD_OP_GET_WCE), RNDSIZE(int), 819 DKIOCGETWCE, STRINGIZE(DKIOCGETWCE), 820 NULL, VD_IDENTITY, VD_IDENTITY}, 821 {VD_OP_GET_DISKGEOM, STRINGIZE(VD_OP_GET_DISKGEOM), 822 RNDSIZE(vd_geom_t), 823 DKIOCGGEOM, STRINGIZE(DKIOCGGEOM), 824 &dk_geom, NULL, dk_geom2vd_geom}, 825 {VD_OP_GET_VTOC, STRINGIZE(VD_OP_GET_VTOC), RNDSIZE(vd_vtoc_t), 826 DKIOCGVTOC, STRINGIZE(DKIOCGVTOC), 827 &vtoc, NULL, vtoc2vd_vtoc}, 828 {VD_OP_GET_EFI, STRINGIZE(VD_OP_GET_EFI), RNDSIZE(vd_efi_t), 829 DKIOCGETEFI, STRINGIZE(DKIOCGETEFI), 830 &dk_efi, vd_get_efi_in, vd_get_efi_out}, 831 832 /* "Set" (copy-in) operations */ 833 {VD_OP_SET_WCE, STRINGIZE(VD_OP_SET_WCE), RNDSIZE(int), 834 DKIOCSETWCE, STRINGIZE(DKIOCSETWCE), 835 NULL, VD_IDENTITY, VD_IDENTITY}, 836 {VD_OP_SET_DISKGEOM, STRINGIZE(VD_OP_SET_DISKGEOM), 837 RNDSIZE(vd_geom_t), 838 DKIOCSGEOM, STRINGIZE(DKIOCSGEOM), 839 &dk_geom, vd_geom2dk_geom, NULL}, 840 {VD_OP_SET_VTOC, STRINGIZE(VD_OP_SET_VTOC), RNDSIZE(vd_vtoc_t), 841 DKIOCSVTOC, STRINGIZE(DKIOCSVTOC), 842 &vtoc, vd_vtoc2vtoc, NULL}, 843 {VD_OP_SET_EFI, STRINGIZE(VD_OP_SET_EFI), RNDSIZE(vd_efi_t), 844 DKIOCSETEFI, STRINGIZE(DKIOCSETEFI), 845 &dk_efi, vd_set_efi_in, vd_set_efi_out}, 846 }; 847 size_t nioctls = (sizeof (ioctl))/(sizeof (ioctl[0])); 848 849 850 ASSERT(vd != NULL); 851 ASSERT(request != NULL); 852 ASSERT(request->slice < vd->nslices); 853 854 /* 855 * Determine ioctl corresponding to caller's "operation" and 856 * validate caller's "nbytes" 857 */ 858 for (i = 0; i < nioctls; i++) { 859 if (request->operation == ioctl[i].operation) { 860 /* LDC memory operations require 8-byte multiples */ 861 ASSERT(ioctl[i].nbytes % sizeof (uint64_t) == 0); 862 863 if (request->operation == VD_OP_GET_EFI || 864 request->operation == VD_OP_SET_EFI) { 865 if (request->nbytes >= ioctl[i].nbytes) 866 break; 867 PRN("%s: Expected at least nbytes = %lu, " 868 "got %lu", ioctl[i].operation_name, 869 ioctl[i].nbytes, request->nbytes); 870 return (EINVAL); 871 } 872 873 if (request->nbytes != ioctl[i].nbytes) { 874 PRN("%s: Expected nbytes = %lu, got %lu", 875 ioctl[i].operation_name, ioctl[i].nbytes, 876 request->nbytes); 877 return (EINVAL); 878 } 879 880 break; 881 } 882 } 883 ASSERT(i < nioctls); /* because "operation" already validated */ 884 885 if (request->nbytes) 886 buf = kmem_zalloc(request->nbytes, KM_SLEEP); 887 status = vd_do_ioctl(vd, request, buf, &ioctl[i]); 888 if (request->nbytes) 889 kmem_free(buf, request->nbytes); 890 if (vd->vdisk_type == VD_DISK_TYPE_DISK && 891 (request->operation == VD_OP_SET_VTOC || 892 request->operation == VD_OP_SET_EFI)) 893 vd_open_new_slices(vd); 894 PR0("Returning %d", status); 895 return (status); 896 } 897 898 static int 899 vd_get_devid(vd_task_t *task) 900 { 901 vd_t *vd = task->vd; 902 vd_dring_payload_t *request = task->request; 903 vd_devid_t *vd_devid; 904 impl_devid_t *devid; 905 int status, bufid_len, devid_len, len; 906 907 PR1("Get Device ID"); 908 909 if (ddi_lyr_get_devid(vd->dev[request->slice], 910 (ddi_devid_t *)&devid) != DDI_SUCCESS) { 911 /* the most common failure is that no devid is available */ 912 return (ENOENT); 913 } 914 915 bufid_len = request->nbytes - sizeof (vd_devid_t) + 1; 916 devid_len = DEVID_GETLEN(devid); 917 918 vd_devid = kmem_zalloc(request->nbytes, KM_SLEEP); 919 vd_devid->length = devid_len; 920 vd_devid->type = DEVID_GETTYPE(devid); 921 922 len = (devid_len > bufid_len)? bufid_len : devid_len; 923 924 bcopy(devid->did_id, vd_devid->id, len); 925 926 /* LDC memory operations require 8-byte multiples */ 927 ASSERT(request->nbytes % sizeof (uint64_t) == 0); 928 929 if ((status = ldc_mem_copy(vd->ldc_handle, (caddr_t)vd_devid, 0, 930 &request->nbytes, request->cookie, request->ncookies, 931 LDC_COPY_OUT)) != 0) { 932 PRN("ldc_mem_copy() returned errno %d copying to client", 933 status); 934 } 935 936 kmem_free(vd_devid, request->nbytes); 937 ddi_devid_free((ddi_devid_t)devid); 938 939 return (status); 940 } 941 942 /* 943 * Define the supported operations once the functions for performing them have 944 * been defined 945 */ 946 static const vds_operation_t vds_operation[] = { 947 {VD_OP_BREAD, vd_start_bio, vd_complete_bio}, 948 {VD_OP_BWRITE, vd_start_bio, vd_complete_bio}, 949 {VD_OP_FLUSH, vd_ioctl, NULL}, 950 {VD_OP_GET_WCE, vd_ioctl, NULL}, 951 {VD_OP_SET_WCE, vd_ioctl, NULL}, 952 {VD_OP_GET_VTOC, vd_ioctl, NULL}, 953 {VD_OP_SET_VTOC, vd_ioctl, NULL}, 954 {VD_OP_GET_DISKGEOM, vd_ioctl, NULL}, 955 {VD_OP_SET_DISKGEOM, vd_ioctl, NULL}, 956 {VD_OP_GET_EFI, vd_ioctl, NULL}, 957 {VD_OP_SET_EFI, vd_ioctl, NULL}, 958 {VD_OP_GET_DEVID, vd_get_devid, NULL}, 959 }; 960 961 static const size_t vds_noperations = 962 (sizeof (vds_operation))/(sizeof (vds_operation[0])); 963 964 /* 965 * Process a task specifying a client I/O request 966 */ 967 static int 968 vd_process_task(vd_task_t *task) 969 { 970 int i, status; 971 vd_t *vd = task->vd; 972 vd_dring_payload_t *request = task->request; 973 974 975 ASSERT(vd != NULL); 976 ASSERT(request != NULL); 977 978 /* Find the requested operation */ 979 for (i = 0; i < vds_noperations; i++) 980 if (request->operation == vds_operation[i].operation) 981 break; 982 if (i == vds_noperations) { 983 PRN("Unsupported operation %u", request->operation); 984 return (ENOTSUP); 985 } 986 987 /* Handle client using absolute disk offsets */ 988 if ((vd->vdisk_type == VD_DISK_TYPE_DISK) && 989 (request->slice == UINT8_MAX)) 990 request->slice = VD_ENTIRE_DISK_SLICE; 991 992 /* Range-check slice */ 993 if (request->slice >= vd->nslices) { 994 PRN("Invalid \"slice\" %u (max %u) for virtual disk", 995 request->slice, (vd->nslices - 1)); 996 return (EINVAL); 997 } 998 999 /* Start the operation */ 1000 if ((status = vds_operation[i].start(task)) != EINPROGRESS) { 1001 request->status = status; /* op succeeded or failed */ 1002 return (0); /* but request completed */ 1003 } 1004 1005 ASSERT(vds_operation[i].complete != NULL); /* debug case */ 1006 if (vds_operation[i].complete == NULL) { /* non-debug case */ 1007 PRN("Unexpected return of EINPROGRESS " 1008 "with no I/O completion handler"); 1009 request->status = EIO; /* operation failed */ 1010 return (0); /* but request completed */ 1011 } 1012 1013 /* Queue a task to complete the operation */ 1014 status = ddi_taskq_dispatch(vd->completionq, vds_operation[i].complete, 1015 task, DDI_SLEEP); 1016 /* ddi_taskq_dispatch(9f) guarantees success with DDI_SLEEP */ 1017 ASSERT(status == DDI_SUCCESS); 1018 1019 PR1("Operation in progress"); 1020 return (EINPROGRESS); /* completion handler will finish request */ 1021 } 1022 1023 /* 1024 * Return true if the "type", "subtype", and "env" fields of the "tag" first 1025 * argument match the corresponding remaining arguments; otherwise, return false 1026 */ 1027 boolean_t 1028 vd_msgtype(vio_msg_tag_t *tag, int type, int subtype, int env) 1029 { 1030 return ((tag->vio_msgtype == type) && 1031 (tag->vio_subtype == subtype) && 1032 (tag->vio_subtype_env == env)) ? B_TRUE : B_FALSE; 1033 } 1034 1035 /* 1036 * Check whether the major/minor version specified in "ver_msg" is supported 1037 * by this server. 1038 */ 1039 static boolean_t 1040 vds_supported_version(vio_ver_msg_t *ver_msg) 1041 { 1042 for (int i = 0; i < vds_num_versions; i++) { 1043 ASSERT(vds_version[i].major > 0); 1044 ASSERT((i == 0) || 1045 (vds_version[i].major < vds_version[i-1].major)); 1046 1047 /* 1048 * If the major versions match, adjust the minor version, if 1049 * necessary, down to the highest value supported by this 1050 * server and return true so this message will get "ack"ed; 1051 * the client should also support all minor versions lower 1052 * than the value it sent 1053 */ 1054 if (ver_msg->ver_major == vds_version[i].major) { 1055 if (ver_msg->ver_minor > vds_version[i].minor) { 1056 PR0("Adjusting minor version from %u to %u", 1057 ver_msg->ver_minor, vds_version[i].minor); 1058 ver_msg->ver_minor = vds_version[i].minor; 1059 } 1060 return (B_TRUE); 1061 } 1062 1063 /* 1064 * If the message contains a higher major version number, set 1065 * the message's major/minor versions to the current values 1066 * and return false, so this message will get "nack"ed with 1067 * these values, and the client will potentially try again 1068 * with the same or a lower version 1069 */ 1070 if (ver_msg->ver_major > vds_version[i].major) { 1071 ver_msg->ver_major = vds_version[i].major; 1072 ver_msg->ver_minor = vds_version[i].minor; 1073 return (B_FALSE); 1074 } 1075 1076 /* 1077 * Otherwise, the message's major version is less than the 1078 * current major version, so continue the loop to the next 1079 * (lower) supported version 1080 */ 1081 } 1082 1083 /* 1084 * No common version was found; "ground" the version pair in the 1085 * message to terminate negotiation 1086 */ 1087 ver_msg->ver_major = 0; 1088 ver_msg->ver_minor = 0; 1089 return (B_FALSE); 1090 } 1091 1092 /* 1093 * Process a version message from a client. vds expects to receive version 1094 * messages from clients seeking service, but never issues version messages 1095 * itself; therefore, vds can ACK or NACK client version messages, but does 1096 * not expect to receive version-message ACKs or NACKs (and will treat such 1097 * messages as invalid). 1098 */ 1099 static int 1100 vd_process_ver_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 1101 { 1102 vio_ver_msg_t *ver_msg = (vio_ver_msg_t *)msg; 1103 1104 1105 ASSERT(msglen >= sizeof (msg->tag)); 1106 1107 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 1108 VIO_VER_INFO)) { 1109 return (ENOMSG); /* not a version message */ 1110 } 1111 1112 if (msglen != sizeof (*ver_msg)) { 1113 PRN("Expected %lu-byte version message; " 1114 "received %lu bytes", sizeof (*ver_msg), msglen); 1115 return (EBADMSG); 1116 } 1117 1118 if (ver_msg->dev_class != VDEV_DISK) { 1119 PRN("Expected device class %u (disk); received %u", 1120 VDEV_DISK, ver_msg->dev_class); 1121 return (EBADMSG); 1122 } 1123 1124 /* 1125 * We're talking to the expected kind of client; set our device class 1126 * for "ack/nack" back to the client 1127 */ 1128 ver_msg->dev_class = VDEV_DISK_SERVER; 1129 1130 /* 1131 * Check whether the (valid) version message specifies a version 1132 * supported by this server. If the version is not supported, return 1133 * EBADMSG so the message will get "nack"ed; vds_supported_version() 1134 * will have updated the message with a supported version for the 1135 * client to consider 1136 */ 1137 if (!vds_supported_version(ver_msg)) 1138 return (EBADMSG); 1139 1140 1141 /* 1142 * A version has been agreed upon; use the client's SID for 1143 * communication on this channel now 1144 */ 1145 ASSERT(!(vd->initialized & VD_SID)); 1146 vd->sid = ver_msg->tag.vio_sid; 1147 vd->initialized |= VD_SID; 1148 1149 /* 1150 * When multiple versions are supported, this function should store 1151 * the negotiated major and minor version values in the "vd" data 1152 * structure to govern further communication; in particular, note that 1153 * the client might have specified a lower minor version for the 1154 * agreed major version than specifed in the vds_version[] array. The 1155 * following assertions should help remind future maintainers to make 1156 * the appropriate changes to support multiple versions. 1157 */ 1158 ASSERT(vds_num_versions == 1); 1159 ASSERT(ver_msg->ver_major == vds_version[0].major); 1160 ASSERT(ver_msg->ver_minor == vds_version[0].minor); 1161 1162 PR0("Using major version %u, minor version %u", 1163 ver_msg->ver_major, ver_msg->ver_minor); 1164 return (0); 1165 } 1166 1167 static int 1168 vd_process_attr_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 1169 { 1170 vd_attr_msg_t *attr_msg = (vd_attr_msg_t *)msg; 1171 1172 1173 ASSERT(msglen >= sizeof (msg->tag)); 1174 1175 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 1176 VIO_ATTR_INFO)) { 1177 PR0("Message is not an attribute message"); 1178 return (ENOMSG); 1179 } 1180 1181 if (msglen != sizeof (*attr_msg)) { 1182 PRN("Expected %lu-byte attribute message; " 1183 "received %lu bytes", sizeof (*attr_msg), msglen); 1184 return (EBADMSG); 1185 } 1186 1187 if (attr_msg->max_xfer_sz == 0) { 1188 PRN("Received maximum transfer size of 0 from client"); 1189 return (EBADMSG); 1190 } 1191 1192 if ((attr_msg->xfer_mode != VIO_DESC_MODE) && 1193 (attr_msg->xfer_mode != VIO_DRING_MODE)) { 1194 PRN("Client requested unsupported transfer mode"); 1195 return (EBADMSG); 1196 } 1197 1198 1199 /* Success: valid message and transfer mode */ 1200 vd->xfer_mode = attr_msg->xfer_mode; 1201 if (vd->xfer_mode == VIO_DESC_MODE) { 1202 /* 1203 * The vd_dring_inband_msg_t contains one cookie; need room 1204 * for up to n-1 more cookies, where "n" is the number of full 1205 * pages plus possibly one partial page required to cover 1206 * "max_xfer_sz". Add room for one more cookie if 1207 * "max_xfer_sz" isn't an integral multiple of the page size. 1208 * Must first get the maximum transfer size in bytes. 1209 */ 1210 size_t max_xfer_bytes = attr_msg->vdisk_block_size ? 1211 attr_msg->vdisk_block_size*attr_msg->max_xfer_sz : 1212 attr_msg->max_xfer_sz; 1213 size_t max_inband_msglen = 1214 sizeof (vd_dring_inband_msg_t) + 1215 ((max_xfer_bytes/PAGESIZE + 1216 ((max_xfer_bytes % PAGESIZE) ? 1 : 0))* 1217 (sizeof (ldc_mem_cookie_t))); 1218 1219 /* 1220 * Set the maximum expected message length to 1221 * accommodate in-band-descriptor messages with all 1222 * their cookies 1223 */ 1224 vd->max_msglen = MAX(vd->max_msglen, max_inband_msglen); 1225 1226 /* 1227 * Initialize the data structure for processing in-band I/O 1228 * request descriptors 1229 */ 1230 vd->inband_task.vd = vd; 1231 vd->inband_task.index = 0; 1232 vd->inband_task.type = VD_FINAL_RANGE_TASK; /* range == 1 */ 1233 } 1234 1235 /* Return the device's block size and max transfer size to the client */ 1236 attr_msg->vdisk_block_size = DEV_BSIZE; 1237 attr_msg->max_xfer_sz = vd->max_xfer_sz; 1238 1239 attr_msg->vdisk_size = vd->vdisk_size; 1240 attr_msg->vdisk_type = vd->vdisk_type; 1241 attr_msg->operations = vds_operations; 1242 PR0("%s", VD_CLIENT(vd)); 1243 return (0); 1244 } 1245 1246 static int 1247 vd_process_dring_reg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 1248 { 1249 int status; 1250 size_t expected; 1251 ldc_mem_info_t dring_minfo; 1252 vio_dring_reg_msg_t *reg_msg = (vio_dring_reg_msg_t *)msg; 1253 1254 1255 ASSERT(msglen >= sizeof (msg->tag)); 1256 1257 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 1258 VIO_DRING_REG)) { 1259 PR0("Message is not a register-dring message"); 1260 return (ENOMSG); 1261 } 1262 1263 if (msglen < sizeof (*reg_msg)) { 1264 PRN("Expected at least %lu-byte register-dring message; " 1265 "received %lu bytes", sizeof (*reg_msg), msglen); 1266 return (EBADMSG); 1267 } 1268 1269 expected = sizeof (*reg_msg) + 1270 (reg_msg->ncookies - 1)*(sizeof (reg_msg->cookie[0])); 1271 if (msglen != expected) { 1272 PRN("Expected %lu-byte register-dring message; " 1273 "received %lu bytes", expected, msglen); 1274 return (EBADMSG); 1275 } 1276 1277 if (vd->initialized & VD_DRING) { 1278 PRN("A dring was previously registered; only support one"); 1279 return (EBADMSG); 1280 } 1281 1282 if (reg_msg->num_descriptors > INT32_MAX) { 1283 PRN("reg_msg->num_descriptors = %u; must be <= %u (%s)", 1284 reg_msg->ncookies, INT32_MAX, STRINGIZE(INT32_MAX)); 1285 return (EBADMSG); 1286 } 1287 1288 if (reg_msg->ncookies != 1) { 1289 /* 1290 * In addition to fixing the assertion in the success case 1291 * below, supporting drings which require more than one 1292 * "cookie" requires increasing the value of vd->max_msglen 1293 * somewhere in the code path prior to receiving the message 1294 * which results in calling this function. Note that without 1295 * making this change, the larger message size required to 1296 * accommodate multiple cookies cannot be successfully 1297 * received, so this function will not even get called. 1298 * Gracefully accommodating more dring cookies might 1299 * reasonably demand exchanging an additional attribute or 1300 * making a minor protocol adjustment 1301 */ 1302 PRN("reg_msg->ncookies = %u != 1", reg_msg->ncookies); 1303 return (EBADMSG); 1304 } 1305 1306 status = ldc_mem_dring_map(vd->ldc_handle, reg_msg->cookie, 1307 reg_msg->ncookies, reg_msg->num_descriptors, 1308 reg_msg->descriptor_size, LDC_DIRECT_MAP, &vd->dring_handle); 1309 if (status != 0) { 1310 PRN("ldc_mem_dring_map() returned errno %d", status); 1311 return (status); 1312 } 1313 1314 /* 1315 * To remove the need for this assertion, must call 1316 * ldc_mem_dring_nextcookie() successfully ncookies-1 times after a 1317 * successful call to ldc_mem_dring_map() 1318 */ 1319 ASSERT(reg_msg->ncookies == 1); 1320 1321 if ((status = 1322 ldc_mem_dring_info(vd->dring_handle, &dring_minfo)) != 0) { 1323 PRN("ldc_mem_dring_info() returned errno %d", status); 1324 if ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0) 1325 PRN("ldc_mem_dring_unmap() returned errno %d", status); 1326 return (status); 1327 } 1328 1329 if (dring_minfo.vaddr == NULL) { 1330 PRN("Descriptor ring virtual address is NULL"); 1331 return (ENXIO); 1332 } 1333 1334 1335 /* Initialize for valid message and mapped dring */ 1336 PR1("descriptor size = %u, dring length = %u", 1337 vd->descriptor_size, vd->dring_len); 1338 vd->initialized |= VD_DRING; 1339 vd->dring_ident = 1; /* "There Can Be Only One" */ 1340 vd->dring = dring_minfo.vaddr; 1341 vd->descriptor_size = reg_msg->descriptor_size; 1342 vd->dring_len = reg_msg->num_descriptors; 1343 reg_msg->dring_ident = vd->dring_ident; 1344 1345 /* 1346 * Allocate and initialize a "shadow" array of data structures for 1347 * tasks to process I/O requests in dring elements 1348 */ 1349 vd->dring_task = 1350 kmem_zalloc((sizeof (*vd->dring_task)) * vd->dring_len, KM_SLEEP); 1351 for (int i = 0; i < vd->dring_len; i++) { 1352 vd->dring_task[i].vd = vd; 1353 vd->dring_task[i].index = i; 1354 vd->dring_task[i].request = &VD_DRING_ELEM(i)->payload; 1355 1356 status = ldc_mem_alloc_handle(vd->ldc_handle, 1357 &(vd->dring_task[i].mhdl)); 1358 if (status) { 1359 PRN("ldc_mem_alloc_handle() returned err %d ", status); 1360 return (ENXIO); 1361 } 1362 } 1363 1364 return (0); 1365 } 1366 1367 static int 1368 vd_process_dring_unreg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) 1369 { 1370 vio_dring_unreg_msg_t *unreg_msg = (vio_dring_unreg_msg_t *)msg; 1371 1372 1373 ASSERT(msglen >= sizeof (msg->tag)); 1374 1375 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, 1376 VIO_DRING_UNREG)) { 1377 PR0("Message is not an unregister-dring message"); 1378 return (ENOMSG); 1379 } 1380 1381 if (msglen != sizeof (*unreg_msg)) { 1382 PRN("Expected %lu-byte unregister-dring message; " 1383 "received %lu bytes", sizeof (*unreg_msg), msglen); 1384 return (EBADMSG); 1385 } 1386 1387 if (unreg_msg->dring_ident != vd->dring_ident) { 1388 PRN("Expected dring ident %lu; received %lu", 1389 vd->dring_ident, unreg_msg->dring_ident); 1390 return (EBADMSG); 1391 } 1392 1393 return (0); 1394 } 1395 1396 static int 1397 process_rdx_msg(vio_msg_t *msg, size_t msglen) 1398 { 1399 ASSERT(msglen >= sizeof (msg->tag)); 1400 1401 if (!vd_msgtype(&msg->tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX)) { 1402 PR0("Message is not an RDX message"); 1403 return (ENOMSG); 1404 } 1405 1406 if (msglen != sizeof (vio_rdx_msg_t)) { 1407 PRN("Expected %lu-byte RDX message; received %lu bytes", 1408 sizeof (vio_rdx_msg_t), msglen); 1409 return (EBADMSG); 1410 } 1411 1412 PR0("Valid RDX message"); 1413 return (0); 1414 } 1415 1416 static int 1417 vd_check_seq_num(vd_t *vd, uint64_t seq_num) 1418 { 1419 if ((vd->initialized & VD_SEQ_NUM) && (seq_num != vd->seq_num + 1)) { 1420 PRN("Received seq_num %lu; expected %lu", 1421 seq_num, (vd->seq_num + 1)); 1422 vd_need_reset(vd, B_FALSE); 1423 return (1); 1424 } 1425 1426 vd->seq_num = seq_num; 1427 vd->initialized |= VD_SEQ_NUM; /* superfluous after first time... */ 1428 return (0); 1429 } 1430 1431 /* 1432 * Return the expected size of an inband-descriptor message with all the 1433 * cookies it claims to include 1434 */ 1435 static size_t 1436 expected_inband_size(vd_dring_inband_msg_t *msg) 1437 { 1438 return ((sizeof (*msg)) + 1439 (msg->payload.ncookies - 1)*(sizeof (msg->payload.cookie[0]))); 1440 } 1441 1442 /* 1443 * Process an in-band descriptor message: used with clients like OBP, with 1444 * which vds exchanges descriptors within VIO message payloads, rather than 1445 * operating on them within a descriptor ring 1446 */ 1447 static int 1448 vd_process_desc_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize) 1449 { 1450 size_t expected; 1451 vd_dring_inband_msg_t *desc_msg = (vd_dring_inband_msg_t *)msg; 1452 1453 1454 ASSERT(msglen >= sizeof (msg->tag)); 1455 1456 if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO, 1457 VIO_DESC_DATA)) { 1458 PR1("Message is not an in-band-descriptor message"); 1459 return (ENOMSG); 1460 } 1461 1462 if (msglen < sizeof (*desc_msg)) { 1463 PRN("Expected at least %lu-byte descriptor message; " 1464 "received %lu bytes", sizeof (*desc_msg), msglen); 1465 return (EBADMSG); 1466 } 1467 1468 if (msglen != (expected = expected_inband_size(desc_msg))) { 1469 PRN("Expected %lu-byte descriptor message; " 1470 "received %lu bytes", expected, msglen); 1471 return (EBADMSG); 1472 } 1473 1474 if (vd_check_seq_num(vd, desc_msg->hdr.seq_num) != 0) 1475 return (EBADMSG); 1476 1477 /* 1478 * Valid message: Set up the in-band descriptor task and process the 1479 * request. Arrange to acknowledge the client's message, unless an 1480 * error processing the descriptor task results in setting 1481 * VIO_SUBTYPE_NACK 1482 */ 1483 PR1("Valid in-band-descriptor message"); 1484 msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 1485 vd->inband_task.msg = msg; 1486 vd->inband_task.msglen = msglen; 1487 vd->inband_task.msgsize = msgsize; 1488 vd->inband_task.request = &desc_msg->payload; 1489 return (vd_process_task(&vd->inband_task)); 1490 } 1491 1492 static int 1493 vd_process_element(vd_t *vd, vd_task_type_t type, uint32_t idx, 1494 vio_msg_t *msg, size_t msglen, size_t msgsize) 1495 { 1496 int status; 1497 boolean_t ready; 1498 vd_dring_entry_t *elem = VD_DRING_ELEM(idx); 1499 1500 1501 /* Accept the updated dring element */ 1502 if ((status = ldc_mem_dring_acquire(vd->dring_handle, idx, idx)) != 0) { 1503 PRN("ldc_mem_dring_acquire() returned errno %d", status); 1504 return (status); 1505 } 1506 ready = (elem->hdr.dstate == VIO_DESC_READY); 1507 if (ready) { 1508 elem->hdr.dstate = VIO_DESC_ACCEPTED; 1509 } else { 1510 PRN("descriptor %u not ready", idx); 1511 VD_DUMP_DRING_ELEM(elem); 1512 } 1513 if ((status = ldc_mem_dring_release(vd->dring_handle, idx, idx)) != 0) { 1514 PRN("ldc_mem_dring_release() returned errno %d", status); 1515 return (status); 1516 } 1517 if (!ready) 1518 return (EBUSY); 1519 1520 1521 /* Initialize a task and process the accepted element */ 1522 PR1("Processing dring element %u", idx); 1523 vd->dring_task[idx].type = type; 1524 vd->dring_task[idx].msg = msg; 1525 vd->dring_task[idx].msglen = msglen; 1526 vd->dring_task[idx].msgsize = msgsize; 1527 if ((status = vd_process_task(&vd->dring_task[idx])) != EINPROGRESS) 1528 status = vd_mark_elem_done(vd, idx, elem->payload.status); 1529 1530 return (status); 1531 } 1532 1533 static int 1534 vd_process_element_range(vd_t *vd, int start, int end, 1535 vio_msg_t *msg, size_t msglen, size_t msgsize) 1536 { 1537 int i, n, nelem, status = 0; 1538 boolean_t inprogress = B_FALSE; 1539 vd_task_type_t type; 1540 1541 1542 ASSERT(start >= 0); 1543 ASSERT(end >= 0); 1544 1545 /* 1546 * Arrange to acknowledge the client's message, unless an error 1547 * processing one of the dring elements results in setting 1548 * VIO_SUBTYPE_NACK 1549 */ 1550 msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 1551 1552 /* 1553 * Process the dring elements in the range 1554 */ 1555 nelem = ((end < start) ? end + vd->dring_len : end) - start + 1; 1556 for (i = start, n = nelem; n > 0; i = (i + 1) % vd->dring_len, n--) { 1557 ((vio_dring_msg_t *)msg)->end_idx = i; 1558 type = (n == 1) ? VD_FINAL_RANGE_TASK : VD_NONFINAL_RANGE_TASK; 1559 status = vd_process_element(vd, type, i, msg, msglen, msgsize); 1560 if (status == EINPROGRESS) 1561 inprogress = B_TRUE; 1562 else if (status != 0) 1563 break; 1564 } 1565 1566 /* 1567 * If some, but not all, operations of a multi-element range are in 1568 * progress, wait for other operations to complete before returning 1569 * (which will result in "ack" or "nack" of the message). Note that 1570 * all outstanding operations will need to complete, not just the ones 1571 * corresponding to the current range of dring elements; howevever, as 1572 * this situation is an error case, performance is less critical. 1573 */ 1574 if ((nelem > 1) && (status != EINPROGRESS) && inprogress) 1575 ddi_taskq_wait(vd->completionq); 1576 1577 return (status); 1578 } 1579 1580 static int 1581 vd_process_dring_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize) 1582 { 1583 vio_dring_msg_t *dring_msg = (vio_dring_msg_t *)msg; 1584 1585 1586 ASSERT(msglen >= sizeof (msg->tag)); 1587 1588 if (!vd_msgtype(&msg->tag, VIO_TYPE_DATA, VIO_SUBTYPE_INFO, 1589 VIO_DRING_DATA)) { 1590 PR1("Message is not a dring-data message"); 1591 return (ENOMSG); 1592 } 1593 1594 if (msglen != sizeof (*dring_msg)) { 1595 PRN("Expected %lu-byte dring message; received %lu bytes", 1596 sizeof (*dring_msg), msglen); 1597 return (EBADMSG); 1598 } 1599 1600 if (vd_check_seq_num(vd, dring_msg->seq_num) != 0) 1601 return (EBADMSG); 1602 1603 if (dring_msg->dring_ident != vd->dring_ident) { 1604 PRN("Expected dring ident %lu; received ident %lu", 1605 vd->dring_ident, dring_msg->dring_ident); 1606 return (EBADMSG); 1607 } 1608 1609 if (dring_msg->start_idx >= vd->dring_len) { 1610 PRN("\"start_idx\" = %u; must be less than %u", 1611 dring_msg->start_idx, vd->dring_len); 1612 return (EBADMSG); 1613 } 1614 1615 if ((dring_msg->end_idx < 0) || 1616 (dring_msg->end_idx >= vd->dring_len)) { 1617 PRN("\"end_idx\" = %u; must be >= 0 and less than %u", 1618 dring_msg->end_idx, vd->dring_len); 1619 return (EBADMSG); 1620 } 1621 1622 /* Valid message; process range of updated dring elements */ 1623 PR1("Processing descriptor range, start = %u, end = %u", 1624 dring_msg->start_idx, dring_msg->end_idx); 1625 return (vd_process_element_range(vd, dring_msg->start_idx, 1626 dring_msg->end_idx, msg, msglen, msgsize)); 1627 } 1628 1629 static int 1630 recv_msg(ldc_handle_t ldc_handle, void *msg, size_t *nbytes) 1631 { 1632 int retry, status; 1633 size_t size = *nbytes; 1634 1635 1636 for (retry = 0, status = ETIMEDOUT; 1637 retry < vds_ldc_retries && status == ETIMEDOUT; 1638 retry++) { 1639 PR1("ldc_read() attempt %d", (retry + 1)); 1640 *nbytes = size; 1641 status = ldc_read(ldc_handle, msg, nbytes); 1642 } 1643 1644 if (status != 0) { 1645 PRN("ldc_read() returned errno %d", status); 1646 return (status); 1647 } else if (*nbytes == 0) { 1648 PR1("ldc_read() returned 0 and no message read"); 1649 return (ENOMSG); 1650 } 1651 1652 PR1("RCVD %lu-byte message", *nbytes); 1653 return (0); 1654 } 1655 1656 static int 1657 vd_do_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize) 1658 { 1659 int status; 1660 1661 1662 PR1("Processing (%x/%x/%x) message", msg->tag.vio_msgtype, 1663 msg->tag.vio_subtype, msg->tag.vio_subtype_env); 1664 1665 /* 1666 * Validate session ID up front, since it applies to all messages 1667 * once set 1668 */ 1669 if ((msg->tag.vio_sid != vd->sid) && (vd->initialized & VD_SID)) { 1670 PRN("Expected SID %u, received %u", vd->sid, 1671 msg->tag.vio_sid); 1672 return (EBADMSG); 1673 } 1674 1675 1676 /* 1677 * Process the received message based on connection state 1678 */ 1679 switch (vd->state) { 1680 case VD_STATE_INIT: /* expect version message */ 1681 if ((status = vd_process_ver_msg(vd, msg, msglen)) != 0) 1682 return (status); 1683 1684 /* Version negotiated, move to that state */ 1685 vd->state = VD_STATE_VER; 1686 return (0); 1687 1688 case VD_STATE_VER: /* expect attribute message */ 1689 if ((status = vd_process_attr_msg(vd, msg, msglen)) != 0) 1690 return (status); 1691 1692 /* Attributes exchanged, move to that state */ 1693 vd->state = VD_STATE_ATTR; 1694 return (0); 1695 1696 case VD_STATE_ATTR: 1697 switch (vd->xfer_mode) { 1698 case VIO_DESC_MODE: /* expect RDX message */ 1699 if ((status = process_rdx_msg(msg, msglen)) != 0) 1700 return (status); 1701 1702 /* Ready to receive in-band descriptors */ 1703 vd->state = VD_STATE_DATA; 1704 return (0); 1705 1706 case VIO_DRING_MODE: /* expect register-dring message */ 1707 if ((status = 1708 vd_process_dring_reg_msg(vd, msg, msglen)) != 0) 1709 return (status); 1710 1711 /* One dring negotiated, move to that state */ 1712 vd->state = VD_STATE_DRING; 1713 return (0); 1714 1715 default: 1716 ASSERT("Unsupported transfer mode"); 1717 PRN("Unsupported transfer mode"); 1718 return (ENOTSUP); 1719 } 1720 1721 case VD_STATE_DRING: /* expect RDX, register-dring, or unreg-dring */ 1722 if ((status = process_rdx_msg(msg, msglen)) == 0) { 1723 /* Ready to receive data */ 1724 vd->state = VD_STATE_DATA; 1725 return (0); 1726 } else if (status != ENOMSG) { 1727 return (status); 1728 } 1729 1730 1731 /* 1732 * If another register-dring message is received, stay in 1733 * dring state in case the client sends RDX; although the 1734 * protocol allows multiple drings, this server does not 1735 * support using more than one 1736 */ 1737 if ((status = 1738 vd_process_dring_reg_msg(vd, msg, msglen)) != ENOMSG) 1739 return (status); 1740 1741 /* 1742 * Acknowledge an unregister-dring message, but reset the 1743 * connection anyway: Although the protocol allows 1744 * unregistering drings, this server cannot serve a vdisk 1745 * without its only dring 1746 */ 1747 status = vd_process_dring_unreg_msg(vd, msg, msglen); 1748 return ((status == 0) ? ENOTSUP : status); 1749 1750 case VD_STATE_DATA: 1751 switch (vd->xfer_mode) { 1752 case VIO_DESC_MODE: /* expect in-band-descriptor message */ 1753 return (vd_process_desc_msg(vd, msg, msglen, msgsize)); 1754 1755 case VIO_DRING_MODE: /* expect dring-data or unreg-dring */ 1756 /* 1757 * Typically expect dring-data messages, so handle 1758 * them first 1759 */ 1760 if ((status = vd_process_dring_msg(vd, msg, 1761 msglen, msgsize)) != ENOMSG) 1762 return (status); 1763 1764 /* 1765 * Acknowledge an unregister-dring message, but reset 1766 * the connection anyway: Although the protocol 1767 * allows unregistering drings, this server cannot 1768 * serve a vdisk without its only dring 1769 */ 1770 status = vd_process_dring_unreg_msg(vd, msg, msglen); 1771 return ((status == 0) ? ENOTSUP : status); 1772 1773 default: 1774 ASSERT("Unsupported transfer mode"); 1775 PRN("Unsupported transfer mode"); 1776 return (ENOTSUP); 1777 } 1778 1779 default: 1780 ASSERT("Invalid client connection state"); 1781 PRN("Invalid client connection state"); 1782 return (ENOTSUP); 1783 } 1784 } 1785 1786 static int 1787 vd_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen, size_t msgsize) 1788 { 1789 int status; 1790 boolean_t reset_ldc = B_FALSE; 1791 1792 1793 /* 1794 * Check that the message is at least big enough for a "tag", so that 1795 * message processing can proceed based on tag-specified message type 1796 */ 1797 if (msglen < sizeof (vio_msg_tag_t)) { 1798 PRN("Received short (%lu-byte) message", msglen); 1799 /* Can't "nack" short message, so drop the big hammer */ 1800 vd_need_reset(vd, B_TRUE); 1801 return (EBADMSG); 1802 } 1803 1804 /* 1805 * Process the message 1806 */ 1807 switch (status = vd_do_process_msg(vd, msg, msglen, msgsize)) { 1808 case 0: 1809 /* "ack" valid, successfully-processed messages */ 1810 msg->tag.vio_subtype = VIO_SUBTYPE_ACK; 1811 break; 1812 1813 case EINPROGRESS: 1814 /* The completion handler will "ack" or "nack" the message */ 1815 return (EINPROGRESS); 1816 case ENOMSG: 1817 PRN("Received unexpected message"); 1818 _NOTE(FALLTHROUGH); 1819 case EBADMSG: 1820 case ENOTSUP: 1821 /* "nack" invalid messages */ 1822 msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 1823 break; 1824 1825 default: 1826 /* "nack" failed messages */ 1827 msg->tag.vio_subtype = VIO_SUBTYPE_NACK; 1828 /* An LDC error probably occurred, so try resetting it */ 1829 reset_ldc = B_TRUE; 1830 break; 1831 } 1832 1833 /* Send the "ack" or "nack" to the client */ 1834 PR1("Sending %s", 1835 (msg->tag.vio_subtype == VIO_SUBTYPE_ACK) ? "ACK" : "NACK"); 1836 if (send_msg(vd->ldc_handle, msg, msglen) != 0) 1837 reset_ldc = B_TRUE; 1838 1839 /* Arrange to reset the connection for nack'ed or failed messages */ 1840 if ((status != 0) || reset_ldc) 1841 vd_need_reset(vd, reset_ldc); 1842 1843 return (status); 1844 } 1845 1846 static boolean_t 1847 vd_enabled(vd_t *vd) 1848 { 1849 boolean_t enabled; 1850 1851 1852 mutex_enter(&vd->lock); 1853 enabled = vd->enabled; 1854 mutex_exit(&vd->lock); 1855 return (enabled); 1856 } 1857 1858 static void 1859 vd_recv_msg(void *arg) 1860 { 1861 vd_t *vd = (vd_t *)arg; 1862 int status = 0; 1863 1864 1865 ASSERT(vd != NULL); 1866 PR2("New task to receive incoming message(s)"); 1867 while (vd_enabled(vd) && status == 0) { 1868 size_t msglen, msgsize; 1869 vio_msg_t *vio_msg; 1870 1871 1872 /* 1873 * Receive and process a message 1874 */ 1875 vd_reset_if_needed(vd); /* can change vd->max_msglen */ 1876 msgsize = vd->max_msglen; /* stable copy for alloc/free */ 1877 msglen = msgsize; /* actual length after recv_msg() */ 1878 vio_msg = kmem_alloc(msgsize, KM_SLEEP); 1879 if ((status = recv_msg(vd->ldc_handle, vio_msg, &msglen)) == 1880 0) { 1881 if (vd_process_msg(vd, vio_msg, msglen, msgsize) == 1882 EINPROGRESS) 1883 continue; /* handler will free msg */ 1884 } else if (status != ENOMSG) { 1885 /* Probably an LDC failure; arrange to reset it */ 1886 vd_need_reset(vd, B_TRUE); 1887 } 1888 kmem_free(vio_msg, msgsize); 1889 } 1890 PR2("Task finished"); 1891 } 1892 1893 static uint_t 1894 vd_handle_ldc_events(uint64_t event, caddr_t arg) 1895 { 1896 vd_t *vd = (vd_t *)(void *)arg; 1897 1898 1899 ASSERT(vd != NULL); 1900 1901 if (!vd_enabled(vd)) 1902 return (LDC_SUCCESS); 1903 1904 if (event & LDC_EVT_RESET) { 1905 PR0("LDC channel was reset"); 1906 return (LDC_SUCCESS); 1907 } 1908 1909 if (event & LDC_EVT_UP) { 1910 PR0("LDC channel came up: Resetting client connection state"); 1911 vd_need_reset(vd, B_FALSE); 1912 } 1913 1914 if (event & LDC_EVT_READ) { 1915 int status; 1916 1917 PR1("New data available"); 1918 /* Queue a task to receive the new data */ 1919 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, 1920 DDI_SLEEP); 1921 /* ddi_taskq_dispatch(9f) guarantees success with DDI_SLEEP */ 1922 ASSERT(status == DDI_SUCCESS); 1923 } 1924 1925 return (LDC_SUCCESS); 1926 } 1927 1928 static uint_t 1929 vds_check_for_vd(mod_hash_key_t key, mod_hash_val_t *val, void *arg) 1930 { 1931 _NOTE(ARGUNUSED(key, val)) 1932 (*((uint_t *)arg))++; 1933 return (MH_WALK_TERMINATE); 1934 } 1935 1936 1937 static int 1938 vds_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1939 { 1940 uint_t vd_present = 0; 1941 minor_t instance; 1942 vds_t *vds; 1943 1944 1945 switch (cmd) { 1946 case DDI_DETACH: 1947 /* the real work happens below */ 1948 break; 1949 case DDI_SUSPEND: 1950 PR0("No action required for DDI_SUSPEND"); 1951 return (DDI_SUCCESS); 1952 default: 1953 PRN("Unrecognized \"cmd\""); 1954 return (DDI_FAILURE); 1955 } 1956 1957 ASSERT(cmd == DDI_DETACH); 1958 instance = ddi_get_instance(dip); 1959 if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) { 1960 PRN("Could not get state for instance %u", instance); 1961 ddi_soft_state_free(vds_state, instance); 1962 return (DDI_FAILURE); 1963 } 1964 1965 /* Do no detach when serving any vdisks */ 1966 mod_hash_walk(vds->vd_table, vds_check_for_vd, &vd_present); 1967 if (vd_present) { 1968 PR0("Not detaching because serving vdisks"); 1969 return (DDI_FAILURE); 1970 } 1971 1972 PR0("Detaching"); 1973 if (vds->initialized & VDS_MDEG) 1974 (void) mdeg_unregister(vds->mdeg); 1975 if (vds->initialized & VDS_LDI) 1976 (void) ldi_ident_release(vds->ldi_ident); 1977 mod_hash_destroy_hash(vds->vd_table); 1978 ddi_soft_state_free(vds_state, instance); 1979 return (DDI_SUCCESS); 1980 } 1981 1982 static boolean_t 1983 is_pseudo_device(dev_info_t *dip) 1984 { 1985 dev_info_t *parent, *root = ddi_root_node(); 1986 1987 1988 for (parent = ddi_get_parent(dip); (parent != NULL) && (parent != root); 1989 parent = ddi_get_parent(parent)) { 1990 if (strcmp(ddi_get_name(parent), DEVI_PSEUDO_NEXNAME) == 0) 1991 return (B_TRUE); 1992 } 1993 1994 return (B_FALSE); 1995 } 1996 1997 static int 1998 vd_setup_full_disk(vd_t *vd) 1999 { 2000 int rval, status; 2001 major_t major = getmajor(vd->dev[0]); 2002 minor_t minor = getminor(vd->dev[0]) - VD_ENTIRE_DISK_SLICE; 2003 struct dk_minfo dk_minfo; 2004 2005 /* 2006 * At this point, vdisk_size is set to the size of partition 2 but 2007 * this does not represent the size of the disk because partition 2 2008 * may not cover the entire disk and its size does not include reserved 2009 * blocks. So we update vdisk_size to be the size of the entire disk. 2010 */ 2011 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCGMEDIAINFO, 2012 (intptr_t)&dk_minfo, (vd_open_flags | FKIOCTL), 2013 kcred, &rval)) != 0) { 2014 PRN("ldi_ioctl(DKIOCGMEDIAINFO) returned errno %d", 2015 status); 2016 return (status); 2017 } 2018 vd->vdisk_size = dk_minfo.dki_capacity; 2019 2020 /* Set full-disk parameters */ 2021 vd->vdisk_type = VD_DISK_TYPE_DISK; 2022 vd->nslices = (sizeof (vd->dev))/(sizeof (vd->dev[0])); 2023 2024 /* Move dev number and LDI handle to entire-disk-slice array elements */ 2025 vd->dev[VD_ENTIRE_DISK_SLICE] = vd->dev[0]; 2026 vd->dev[0] = 0; 2027 vd->ldi_handle[VD_ENTIRE_DISK_SLICE] = vd->ldi_handle[0]; 2028 vd->ldi_handle[0] = NULL; 2029 2030 /* Initialize device numbers for remaining slices and open them */ 2031 for (int slice = 0; slice < vd->nslices; slice++) { 2032 /* 2033 * Skip the entire-disk slice, as it's already open and its 2034 * device known 2035 */ 2036 if (slice == VD_ENTIRE_DISK_SLICE) 2037 continue; 2038 ASSERT(vd->dev[slice] == 0); 2039 ASSERT(vd->ldi_handle[slice] == NULL); 2040 2041 /* 2042 * Construct the device number for the current slice 2043 */ 2044 vd->dev[slice] = makedevice(major, (minor + slice)); 2045 2046 /* 2047 * At least some underlying drivers refuse to open 2048 * devices for (currently) zero-length slices, so skip 2049 * them for now 2050 */ 2051 if (vd->vtoc.v_part[slice].p_size == 0) { 2052 PR0("Skipping zero-length slice %u", slice); 2053 continue; 2054 } 2055 2056 /* 2057 * Open all non-empty slices of the disk to serve them to the 2058 * client. Slices are opened exclusively to prevent other 2059 * threads or processes in the service domain from performing 2060 * I/O to slices being accessed by a client. Failure to open 2061 * a slice results in vds not serving this disk, as the client 2062 * could attempt (and should be able) to access any non-empty 2063 * slice immediately. Any slices successfully opened before a 2064 * failure will get closed by vds_destroy_vd() as a result of 2065 * the error returned by this function. 2066 */ 2067 PR0("Opening device major %u, minor %u = slice %u", 2068 major, minor, slice); 2069 if ((status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK, 2070 vd_open_flags, kcred, &vd->ldi_handle[slice], 2071 vd->vds->ldi_ident)) != 0) { 2072 PRN("ldi_open_by_dev() returned errno %d " 2073 "for slice %u", status, slice); 2074 /* vds_destroy_vd() will close any open slices */ 2075 return (status); 2076 } 2077 } 2078 2079 return (0); 2080 } 2081 2082 static int 2083 vd_setup_partition_efi(vd_t *vd) 2084 { 2085 efi_gpt_t *gpt; 2086 efi_gpe_t *gpe; 2087 struct uuid uuid = EFI_RESERVED; 2088 uint32_t crc; 2089 int length; 2090 2091 length = sizeof (efi_gpt_t) + sizeof (efi_gpe_t); 2092 2093 gpt = kmem_zalloc(length, KM_SLEEP); 2094 gpe = (efi_gpe_t *)(gpt + 1); 2095 2096 gpt->efi_gpt_Signature = LE_64(EFI_SIGNATURE); 2097 gpt->efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 2098 gpt->efi_gpt_HeaderSize = LE_32(sizeof (efi_gpt_t)); 2099 gpt->efi_gpt_FirstUsableLBA = LE_64(0ULL); 2100 gpt->efi_gpt_LastUsableLBA = LE_64(vd->vdisk_size - 1); 2101 gpt->efi_gpt_NumberOfPartitionEntries = LE_32(1); 2102 gpt->efi_gpt_SizeOfPartitionEntry = LE_32(sizeof (efi_gpe_t)); 2103 2104 UUID_LE_CONVERT(gpe->efi_gpe_PartitionTypeGUID, uuid); 2105 gpe->efi_gpe_StartingLBA = gpt->efi_gpt_FirstUsableLBA; 2106 gpe->efi_gpe_EndingLBA = gpt->efi_gpt_LastUsableLBA; 2107 2108 CRC32(crc, gpe, sizeof (efi_gpe_t), -1U, crc32_table); 2109 gpt->efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 2110 2111 CRC32(crc, gpt, sizeof (efi_gpt_t), -1U, crc32_table); 2112 gpt->efi_gpt_HeaderCRC32 = LE_32(~crc); 2113 2114 vd->dk_efi.dki_lba = 0; 2115 vd->dk_efi.dki_length = length; 2116 vd->dk_efi.dki_data = gpt; 2117 2118 return (0); 2119 } 2120 2121 static int 2122 vd_setup_vd(char *device_path, vd_t *vd) 2123 { 2124 int rval, status; 2125 dev_info_t *dip; 2126 struct dk_cinfo dk_cinfo; 2127 2128 /* 2129 * We need to open with FNDELAY so that opening an empty partition 2130 * does not fail. 2131 */ 2132 if ((status = ldi_open_by_name(device_path, vd_open_flags | FNDELAY, 2133 kcred, &vd->ldi_handle[0], vd->vds->ldi_ident)) != 0) { 2134 PRN("ldi_open_by_name(%s) = errno %d", device_path, status); 2135 return (status); 2136 } 2137 2138 /* 2139 * nslices must be updated now so that vds_destroy_vd() will close 2140 * the slice we have just opened in case of an error. 2141 */ 2142 vd->nslices = 1; 2143 2144 /* Get device number and size of backing device */ 2145 if ((status = ldi_get_dev(vd->ldi_handle[0], &vd->dev[0])) != 0) { 2146 PRN("ldi_get_dev() returned errno %d for %s", 2147 status, device_path); 2148 return (status); 2149 } 2150 if (ldi_get_size(vd->ldi_handle[0], &vd->vdisk_size) != DDI_SUCCESS) { 2151 PRN("ldi_get_size() failed for %s", device_path); 2152 return (EIO); 2153 } 2154 vd->vdisk_size = lbtodb(vd->vdisk_size); /* convert to blocks */ 2155 2156 /* Verify backing device supports dk_cinfo, dk_geom, and vtoc */ 2157 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCINFO, 2158 (intptr_t)&dk_cinfo, (vd_open_flags | FKIOCTL), kcred, 2159 &rval)) != 0) { 2160 PRN("ldi_ioctl(DKIOCINFO) returned errno %d for %s", 2161 status, device_path); 2162 return (status); 2163 } 2164 if (dk_cinfo.dki_partition >= V_NUMPAR) { 2165 PRN("slice %u >= maximum slice %u for %s", 2166 dk_cinfo.dki_partition, V_NUMPAR, device_path); 2167 return (EIO); 2168 } 2169 2170 status = vd_read_vtoc(vd->ldi_handle[0], &vd->vtoc, &vd->vdisk_label); 2171 2172 if (status != 0) { 2173 PRN("vd_read_vtoc returned errno %d for %s", 2174 status, device_path); 2175 return (status); 2176 } 2177 2178 if (vd->vdisk_label == VD_DISK_LABEL_VTOC && 2179 (status = ldi_ioctl(vd->ldi_handle[0], DKIOCGGEOM, 2180 (intptr_t)&vd->dk_geom, (vd_open_flags | FKIOCTL), 2181 kcred, &rval)) != 0) { 2182 PRN("ldi_ioctl(DKIOCGEOM) returned errno %d for %s", 2183 status, device_path); 2184 return (status); 2185 } 2186 2187 /* Store the device's max transfer size for return to the client */ 2188 vd->max_xfer_sz = dk_cinfo.dki_maxtransfer; 2189 2190 2191 /* Determine if backing device is a pseudo device */ 2192 if ((dip = ddi_hold_devi_by_instance(getmajor(vd->dev[0]), 2193 dev_to_instance(vd->dev[0]), 0)) == NULL) { 2194 PRN("%s is no longer accessible", device_path); 2195 return (EIO); 2196 } 2197 vd->pseudo = is_pseudo_device(dip); 2198 ddi_release_devi(dip); 2199 if (vd->pseudo) { 2200 vd->vdisk_type = VD_DISK_TYPE_SLICE; 2201 vd->nslices = 1; 2202 return (0); /* ...and we're done */ 2203 } 2204 2205 2206 /* If slice is entire-disk slice, initialize for full disk */ 2207 if (dk_cinfo.dki_partition == VD_ENTIRE_DISK_SLICE) 2208 return (vd_setup_full_disk(vd)); 2209 2210 2211 /* Otherwise, we have a non-entire slice of a device */ 2212 vd->vdisk_type = VD_DISK_TYPE_SLICE; 2213 vd->nslices = 1; 2214 2215 if (vd->vdisk_label == VD_DISK_LABEL_EFI) { 2216 status = vd_setup_partition_efi(vd); 2217 return (status); 2218 } 2219 2220 /* Initialize dk_geom structure for single-slice device */ 2221 if (vd->dk_geom.dkg_nsect == 0) { 2222 PRN("%s geometry claims 0 sectors per track", device_path); 2223 return (EIO); 2224 } 2225 if (vd->dk_geom.dkg_nhead == 0) { 2226 PRN("%s geometry claims 0 heads", device_path); 2227 return (EIO); 2228 } 2229 vd->dk_geom.dkg_ncyl = 2230 vd->vdisk_size/vd->dk_geom.dkg_nsect/vd->dk_geom.dkg_nhead; 2231 vd->dk_geom.dkg_acyl = 0; 2232 vd->dk_geom.dkg_pcyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl; 2233 2234 2235 /* Initialize vtoc structure for single-slice device */ 2236 bcopy(VD_VOLUME_NAME, vd->vtoc.v_volume, 2237 MIN(sizeof (VD_VOLUME_NAME), sizeof (vd->vtoc.v_volume))); 2238 bzero(vd->vtoc.v_part, sizeof (vd->vtoc.v_part)); 2239 vd->vtoc.v_nparts = 1; 2240 vd->vtoc.v_part[0].p_tag = V_UNASSIGNED; 2241 vd->vtoc.v_part[0].p_flag = 0; 2242 vd->vtoc.v_part[0].p_start = 0; 2243 vd->vtoc.v_part[0].p_size = vd->vdisk_size; 2244 bcopy(VD_ASCIILABEL, vd->vtoc.v_asciilabel, 2245 MIN(sizeof (VD_ASCIILABEL), sizeof (vd->vtoc.v_asciilabel))); 2246 2247 2248 return (0); 2249 } 2250 2251 static int 2252 vds_do_init_vd(vds_t *vds, uint64_t id, char *device_path, uint64_t ldc_id, 2253 vd_t **vdp) 2254 { 2255 char tq_name[TASKQ_NAMELEN]; 2256 int status; 2257 ddi_iblock_cookie_t iblock = NULL; 2258 ldc_attr_t ldc_attr; 2259 vd_t *vd; 2260 2261 2262 ASSERT(vds != NULL); 2263 ASSERT(device_path != NULL); 2264 ASSERT(vdp != NULL); 2265 PR0("Adding vdisk for %s", device_path); 2266 2267 if ((vd = kmem_zalloc(sizeof (*vd), KM_NOSLEEP)) == NULL) { 2268 PRN("No memory for virtual disk"); 2269 return (EAGAIN); 2270 } 2271 *vdp = vd; /* assign here so vds_destroy_vd() can cleanup later */ 2272 vd->vds = vds; 2273 2274 2275 /* Open vdisk and initialize parameters */ 2276 if ((status = vd_setup_vd(device_path, vd)) != 0) 2277 return (status); 2278 ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR); 2279 PR0("vdisk_type = %s, pseudo = %s, nslices = %u", 2280 ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"), 2281 (vd->pseudo ? "yes" : "no"), vd->nslices); 2282 2283 2284 /* Initialize locking */ 2285 if (ddi_get_soft_iblock_cookie(vds->dip, DDI_SOFTINT_MED, 2286 &iblock) != DDI_SUCCESS) { 2287 PRN("Could not get iblock cookie."); 2288 return (EIO); 2289 } 2290 2291 mutex_init(&vd->lock, NULL, MUTEX_DRIVER, iblock); 2292 vd->initialized |= VD_LOCKING; 2293 2294 2295 /* Create start and completion task queues for the vdisk */ 2296 (void) snprintf(tq_name, sizeof (tq_name), "vd_startq%lu", id); 2297 PR1("tq_name = %s", tq_name); 2298 if ((vd->startq = ddi_taskq_create(vds->dip, tq_name, 1, 2299 TASKQ_DEFAULTPRI, 0)) == NULL) { 2300 PRN("Could not create task queue"); 2301 return (EIO); 2302 } 2303 (void) snprintf(tq_name, sizeof (tq_name), "vd_completionq%lu", id); 2304 PR1("tq_name = %s", tq_name); 2305 if ((vd->completionq = ddi_taskq_create(vds->dip, tq_name, 1, 2306 TASKQ_DEFAULTPRI, 0)) == NULL) { 2307 PRN("Could not create task queue"); 2308 return (EIO); 2309 } 2310 vd->enabled = 1; /* before callback can dispatch to startq */ 2311 2312 2313 /* Bring up LDC */ 2314 ldc_attr.devclass = LDC_DEV_BLK_SVC; 2315 ldc_attr.instance = ddi_get_instance(vds->dip); 2316 ldc_attr.mode = LDC_MODE_UNRELIABLE; 2317 ldc_attr.mtu = VD_LDC_MTU; 2318 if ((status = ldc_init(ldc_id, &ldc_attr, &vd->ldc_handle)) != 0) { 2319 PRN("ldc_init(%lu) = errno %d", ldc_id, status); 2320 return (status); 2321 } 2322 vd->initialized |= VD_LDC; 2323 2324 if ((status = ldc_reg_callback(vd->ldc_handle, vd_handle_ldc_events, 2325 (caddr_t)vd)) != 0) { 2326 PRN("ldc_reg_callback() returned errno %d", status); 2327 return (status); 2328 } 2329 2330 if ((status = ldc_open(vd->ldc_handle)) != 0) { 2331 PRN("ldc_open() returned errno %d", status); 2332 return (status); 2333 } 2334 2335 /* Allocate the inband task memory handle */ 2336 status = ldc_mem_alloc_handle(vd->ldc_handle, &(vd->inband_task.mhdl)); 2337 if (status) { 2338 PRN("ldc_mem_alloc_handle() returned err %d ", status); 2339 return (ENXIO); 2340 } 2341 2342 /* Add the successfully-initialized vdisk to the server's table */ 2343 if (mod_hash_insert(vds->vd_table, (mod_hash_key_t)id, vd) != 0) { 2344 PRN("Error adding vdisk ID %lu to table", id); 2345 return (EIO); 2346 } 2347 2348 return (0); 2349 } 2350 2351 /* 2352 * Destroy the state associated with a virtual disk 2353 */ 2354 static void 2355 vds_destroy_vd(void *arg) 2356 { 2357 vd_t *vd = (vd_t *)arg; 2358 2359 2360 if (vd == NULL) 2361 return; 2362 2363 PR0("Destroying vdisk state"); 2364 2365 if (vd->dk_efi.dki_data != NULL) 2366 kmem_free(vd->dk_efi.dki_data, vd->dk_efi.dki_length); 2367 2368 /* Disable queuing requests for the vdisk */ 2369 if (vd->initialized & VD_LOCKING) { 2370 mutex_enter(&vd->lock); 2371 vd->enabled = 0; 2372 mutex_exit(&vd->lock); 2373 } 2374 2375 /* Drain and destroy start queue (*before* destroying completionq) */ 2376 if (vd->startq != NULL) 2377 ddi_taskq_destroy(vd->startq); /* waits for queued tasks */ 2378 2379 /* Drain and destroy completion queue (*before* shutting down LDC) */ 2380 if (vd->completionq != NULL) 2381 ddi_taskq_destroy(vd->completionq); /* waits for tasks */ 2382 2383 if (vd->dring_task != NULL) { 2384 ASSERT(vd->dring_len != 0); 2385 /* Free all dring_task memory handles */ 2386 for (int i = 0; i < vd->dring_len; i++) 2387 (void) ldc_mem_free_handle(vd->dring_task[i].mhdl); 2388 kmem_free(vd->dring_task, 2389 (sizeof (*vd->dring_task)) * vd->dring_len); 2390 } 2391 2392 /* Free the inband task memory handle */ 2393 (void) ldc_mem_free_handle(vd->inband_task.mhdl); 2394 2395 /* Shut down LDC */ 2396 if (vd->initialized & VD_LDC) { 2397 if (vd->initialized & VD_DRING) 2398 (void) ldc_mem_dring_unmap(vd->dring_handle); 2399 (void) ldc_unreg_callback(vd->ldc_handle); 2400 (void) ldc_close(vd->ldc_handle); 2401 (void) ldc_fini(vd->ldc_handle); 2402 } 2403 2404 /* Close any open backing-device slices */ 2405 for (uint_t slice = 0; slice < vd->nslices; slice++) { 2406 if (vd->ldi_handle[slice] != NULL) { 2407 PR0("Closing slice %u", slice); 2408 (void) ldi_close(vd->ldi_handle[slice], 2409 vd_open_flags | FNDELAY, kcred); 2410 } 2411 } 2412 2413 /* Free lock */ 2414 if (vd->initialized & VD_LOCKING) 2415 mutex_destroy(&vd->lock); 2416 2417 /* Finally, free the vdisk structure itself */ 2418 kmem_free(vd, sizeof (*vd)); 2419 } 2420 2421 static int 2422 vds_init_vd(vds_t *vds, uint64_t id, char *device_path, uint64_t ldc_id) 2423 { 2424 int status; 2425 vd_t *vd = NULL; 2426 2427 2428 #ifdef lint 2429 (void) vd; 2430 #endif /* lint */ 2431 2432 if ((status = vds_do_init_vd(vds, id, device_path, ldc_id, &vd)) != 0) 2433 vds_destroy_vd(vd); 2434 2435 return (status); 2436 } 2437 2438 static int 2439 vds_do_get_ldc_id(md_t *md, mde_cookie_t vd_node, mde_cookie_t *channel, 2440 uint64_t *ldc_id) 2441 { 2442 int num_channels; 2443 2444 2445 /* Look for channel endpoint child(ren) of the vdisk MD node */ 2446 if ((num_channels = md_scan_dag(md, vd_node, 2447 md_find_name(md, VD_CHANNEL_ENDPOINT), 2448 md_find_name(md, "fwd"), channel)) <= 0) { 2449 PRN("No \"%s\" found for virtual disk", VD_CHANNEL_ENDPOINT); 2450 return (-1); 2451 } 2452 2453 /* Get the "id" value for the first channel endpoint node */ 2454 if (md_get_prop_val(md, channel[0], VD_ID_PROP, ldc_id) != 0) { 2455 PRN("No \"%s\" property found for \"%s\" of vdisk", 2456 VD_ID_PROP, VD_CHANNEL_ENDPOINT); 2457 return (-1); 2458 } 2459 2460 if (num_channels > 1) { 2461 PRN("Using ID of first of multiple channels for this vdisk"); 2462 } 2463 2464 return (0); 2465 } 2466 2467 static int 2468 vds_get_ldc_id(md_t *md, mde_cookie_t vd_node, uint64_t *ldc_id) 2469 { 2470 int num_nodes, status; 2471 size_t size; 2472 mde_cookie_t *channel; 2473 2474 2475 if ((num_nodes = md_node_count(md)) <= 0) { 2476 PRN("Invalid node count in Machine Description subtree"); 2477 return (-1); 2478 } 2479 size = num_nodes*(sizeof (*channel)); 2480 channel = kmem_zalloc(size, KM_SLEEP); 2481 status = vds_do_get_ldc_id(md, vd_node, channel, ldc_id); 2482 kmem_free(channel, size); 2483 2484 return (status); 2485 } 2486 2487 static void 2488 vds_add_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node) 2489 { 2490 char *device_path = NULL; 2491 uint64_t id = 0, ldc_id = 0; 2492 2493 2494 if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) { 2495 PRN("Error getting vdisk \"%s\"", VD_ID_PROP); 2496 return; 2497 } 2498 PR0("Adding vdisk ID %lu", id); 2499 if (md_get_prop_str(md, vd_node, VD_BLOCK_DEVICE_PROP, 2500 &device_path) != 0) { 2501 PRN("Error getting vdisk \"%s\"", VD_BLOCK_DEVICE_PROP); 2502 return; 2503 } 2504 2505 if (vds_get_ldc_id(md, vd_node, &ldc_id) != 0) { 2506 PRN("Error getting LDC ID for vdisk %lu", id); 2507 return; 2508 } 2509 2510 if (vds_init_vd(vds, id, device_path, ldc_id) != 0) { 2511 PRN("Failed to add vdisk ID %lu", id); 2512 return; 2513 } 2514 } 2515 2516 static void 2517 vds_remove_vd(vds_t *vds, md_t *md, mde_cookie_t vd_node) 2518 { 2519 uint64_t id = 0; 2520 2521 2522 if (md_get_prop_val(md, vd_node, VD_ID_PROP, &id) != 0) { 2523 PRN("Unable to get \"%s\" property from vdisk's MD node", 2524 VD_ID_PROP); 2525 return; 2526 } 2527 PR0("Removing vdisk ID %lu", id); 2528 if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)id) != 0) 2529 PRN("No vdisk entry found for vdisk ID %lu", id); 2530 } 2531 2532 static void 2533 vds_change_vd(vds_t *vds, md_t *prev_md, mde_cookie_t prev_vd_node, 2534 md_t *curr_md, mde_cookie_t curr_vd_node) 2535 { 2536 char *curr_dev, *prev_dev; 2537 uint64_t curr_id = 0, curr_ldc_id = 0; 2538 uint64_t prev_id = 0, prev_ldc_id = 0; 2539 size_t len; 2540 2541 2542 /* Validate that vdisk ID has not changed */ 2543 if (md_get_prop_val(prev_md, prev_vd_node, VD_ID_PROP, &prev_id) != 0) { 2544 PRN("Error getting previous vdisk \"%s\" property", 2545 VD_ID_PROP); 2546 return; 2547 } 2548 if (md_get_prop_val(curr_md, curr_vd_node, VD_ID_PROP, &curr_id) != 0) { 2549 PRN("Error getting current vdisk \"%s\" property", VD_ID_PROP); 2550 return; 2551 } 2552 if (curr_id != prev_id) { 2553 PRN("Not changing vdisk: ID changed from %lu to %lu", 2554 prev_id, curr_id); 2555 return; 2556 } 2557 2558 /* Validate that LDC ID has not changed */ 2559 if (vds_get_ldc_id(prev_md, prev_vd_node, &prev_ldc_id) != 0) { 2560 PRN("Error getting LDC ID for vdisk %lu", prev_id); 2561 return; 2562 } 2563 2564 if (vds_get_ldc_id(curr_md, curr_vd_node, &curr_ldc_id) != 0) { 2565 PRN("Error getting LDC ID for vdisk %lu", curr_id); 2566 return; 2567 } 2568 if (curr_ldc_id != prev_ldc_id) { 2569 _NOTE(NOTREACHED); /* lint is confused */ 2570 PRN("Not changing vdisk: " 2571 "LDC ID changed from %lu to %lu", prev_ldc_id, curr_ldc_id); 2572 return; 2573 } 2574 2575 /* Determine whether device path has changed */ 2576 if (md_get_prop_str(prev_md, prev_vd_node, VD_BLOCK_DEVICE_PROP, 2577 &prev_dev) != 0) { 2578 PRN("Error getting previous vdisk \"%s\"", 2579 VD_BLOCK_DEVICE_PROP); 2580 return; 2581 } 2582 if (md_get_prop_str(curr_md, curr_vd_node, VD_BLOCK_DEVICE_PROP, 2583 &curr_dev) != 0) { 2584 PRN("Error getting current vdisk \"%s\"", VD_BLOCK_DEVICE_PROP); 2585 return; 2586 } 2587 if (((len = strlen(curr_dev)) == strlen(prev_dev)) && 2588 (strncmp(curr_dev, prev_dev, len) == 0)) 2589 return; /* no relevant (supported) change */ 2590 2591 PR0("Changing vdisk ID %lu", prev_id); 2592 /* Remove old state, which will close vdisk and reset */ 2593 if (mod_hash_destroy(vds->vd_table, (mod_hash_key_t)prev_id) != 0) 2594 PRN("No entry found for vdisk ID %lu", prev_id); 2595 /* Re-initialize vdisk with new state */ 2596 if (vds_init_vd(vds, curr_id, curr_dev, curr_ldc_id) != 0) { 2597 PRN("Failed to change vdisk ID %lu", curr_id); 2598 return; 2599 } 2600 } 2601 2602 static int 2603 vds_process_md(void *arg, mdeg_result_t *md) 2604 { 2605 int i; 2606 vds_t *vds = arg; 2607 2608 2609 if (md == NULL) 2610 return (MDEG_FAILURE); 2611 ASSERT(vds != NULL); 2612 2613 for (i = 0; i < md->removed.nelem; i++) 2614 vds_remove_vd(vds, md->removed.mdp, md->removed.mdep[i]); 2615 for (i = 0; i < md->match_curr.nelem; i++) 2616 vds_change_vd(vds, md->match_prev.mdp, md->match_prev.mdep[i], 2617 md->match_curr.mdp, md->match_curr.mdep[i]); 2618 for (i = 0; i < md->added.nelem; i++) 2619 vds_add_vd(vds, md->added.mdp, md->added.mdep[i]); 2620 2621 return (MDEG_SUCCESS); 2622 } 2623 2624 static int 2625 vds_do_attach(dev_info_t *dip) 2626 { 2627 static char reg_prop[] = "reg"; /* devinfo ID prop */ 2628 2629 /* MDEG specification for a (particular) vds node */ 2630 static mdeg_prop_spec_t vds_prop_spec[] = { 2631 {MDET_PROP_STR, "name", {VDS_NAME}}, 2632 {MDET_PROP_VAL, "cfg-handle", {0}}, 2633 {MDET_LIST_END, NULL, {0}}}; 2634 static mdeg_node_spec_t vds_spec = {"virtual-device", vds_prop_spec}; 2635 2636 /* MDEG specification for matching a vd node */ 2637 static md_prop_match_t vd_prop_spec[] = { 2638 {MDET_PROP_VAL, VD_ID_PROP}, 2639 {MDET_LIST_END, NULL}}; 2640 static mdeg_node_match_t vd_spec = {"virtual-device-port", 2641 vd_prop_spec}; 2642 2643 int status; 2644 uint64_t cfg_handle; 2645 minor_t instance = ddi_get_instance(dip); 2646 vds_t *vds; 2647 2648 2649 /* 2650 * The "cfg-handle" property of a vds node in an MD contains the MD's 2651 * notion of "instance", or unique identifier, for that node; OBP 2652 * stores the value of the "cfg-handle" MD property as the value of 2653 * the "reg" property on the node in the device tree it builds from 2654 * the MD and passes to Solaris. Thus, we look up the devinfo node's 2655 * "reg" property value to uniquely identify this device instance when 2656 * registering with the MD event-generation framework. If the "reg" 2657 * property cannot be found, the device tree state is presumably so 2658 * broken that there is no point in continuing. 2659 */ 2660 if (!ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, reg_prop)) { 2661 PRN("vds \"%s\" property does not exist", reg_prop); 2662 return (DDI_FAILURE); 2663 } 2664 2665 /* Get the MD instance for later MDEG registration */ 2666 cfg_handle = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 2667 reg_prop, -1); 2668 2669 if (ddi_soft_state_zalloc(vds_state, instance) != DDI_SUCCESS) { 2670 PRN("Could not allocate state for instance %u", instance); 2671 return (DDI_FAILURE); 2672 } 2673 2674 if ((vds = ddi_get_soft_state(vds_state, instance)) == NULL) { 2675 PRN("Could not get state for instance %u", instance); 2676 ddi_soft_state_free(vds_state, instance); 2677 return (DDI_FAILURE); 2678 } 2679 2680 2681 vds->dip = dip; 2682 vds->vd_table = mod_hash_create_ptrhash("vds_vd_table", VDS_NCHAINS, 2683 vds_destroy_vd, 2684 sizeof (void *)); 2685 ASSERT(vds->vd_table != NULL); 2686 2687 if ((status = ldi_ident_from_dip(dip, &vds->ldi_ident)) != 0) { 2688 PRN("ldi_ident_from_dip() returned errno %d", status); 2689 return (DDI_FAILURE); 2690 } 2691 vds->initialized |= VDS_LDI; 2692 2693 /* Register for MD updates */ 2694 vds_prop_spec[1].ps_val = cfg_handle; 2695 if (mdeg_register(&vds_spec, &vd_spec, vds_process_md, vds, 2696 &vds->mdeg) != MDEG_SUCCESS) { 2697 PRN("Unable to register for MD updates"); 2698 return (DDI_FAILURE); 2699 } 2700 vds->initialized |= VDS_MDEG; 2701 2702 /* Prevent auto-detaching so driver is available whenever MD changes */ 2703 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip, DDI_NO_AUTODETACH, 1) != 2704 DDI_PROP_SUCCESS) { 2705 PRN("failed to set \"%s\" property for instance %u", 2706 DDI_NO_AUTODETACH, instance); 2707 } 2708 2709 ddi_report_dev(dip); 2710 return (DDI_SUCCESS); 2711 } 2712 2713 static int 2714 vds_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 2715 { 2716 int status; 2717 2718 switch (cmd) { 2719 case DDI_ATTACH: 2720 PR0("Attaching"); 2721 if ((status = vds_do_attach(dip)) != DDI_SUCCESS) 2722 (void) vds_detach(dip, DDI_DETACH); 2723 return (status); 2724 case DDI_RESUME: 2725 PR0("No action required for DDI_RESUME"); 2726 return (DDI_SUCCESS); 2727 default: 2728 return (DDI_FAILURE); 2729 } 2730 } 2731 2732 static struct dev_ops vds_ops = { 2733 DEVO_REV, /* devo_rev */ 2734 0, /* devo_refcnt */ 2735 ddi_no_info, /* devo_getinfo */ 2736 nulldev, /* devo_identify */ 2737 nulldev, /* devo_probe */ 2738 vds_attach, /* devo_attach */ 2739 vds_detach, /* devo_detach */ 2740 nodev, /* devo_reset */ 2741 NULL, /* devo_cb_ops */ 2742 NULL, /* devo_bus_ops */ 2743 nulldev /* devo_power */ 2744 }; 2745 2746 static struct modldrv modldrv = { 2747 &mod_driverops, 2748 "virtual disk server v%I%", 2749 &vds_ops, 2750 }; 2751 2752 static struct modlinkage modlinkage = { 2753 MODREV_1, 2754 &modldrv, 2755 NULL 2756 }; 2757 2758 2759 int 2760 _init(void) 2761 { 2762 int i, status; 2763 2764 2765 if ((status = ddi_soft_state_init(&vds_state, sizeof (vds_t), 1)) != 0) 2766 return (status); 2767 if ((status = mod_install(&modlinkage)) != 0) { 2768 ddi_soft_state_fini(&vds_state); 2769 return (status); 2770 } 2771 2772 /* Fill in the bit-mask of server-supported operations */ 2773 for (i = 0; i < vds_noperations; i++) 2774 vds_operations |= 1 << (vds_operation[i].operation - 1); 2775 2776 return (0); 2777 } 2778 2779 int 2780 _info(struct modinfo *modinfop) 2781 { 2782 return (mod_info(&modlinkage, modinfop)); 2783 } 2784 2785 int 2786 _fini(void) 2787 { 2788 int status; 2789 2790 2791 if ((status = mod_remove(&modlinkage)) != 0) 2792 return (status); 2793 ddi_soft_state_fini(&vds_state); 2794 return (0); 2795 } 2796