1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved. 24 * Copyright 2012 Alexey Zaytsev <alexey.zaytsev@gmail.com> All rights reserved. 25 * Copyright 2017 The MathWorks, Inc. All rights reserved. 26 * Copyright 2020 Joyent, Inc. 27 * Copyright 2022 OmniOS Community Edition (OmniOSce) Association. 28 * Copyright 2022 Tintri by DDN, Inc. All rights reserved. 29 */ 30 31 #include <sys/types.h> 32 #include <sys/ksynch.h> 33 #include <sys/kmem.h> 34 #include <sys/file.h> 35 #include <sys/errno.h> 36 #include <sys/open.h> 37 #include <sys/buf.h> 38 #include <sys/uio.h> 39 #include <sys/aio_req.h> 40 #include <sys/cred.h> 41 #include <sys/modctl.h> 42 #include <sys/cmlb.h> 43 #include <sys/conf.h> 44 #include <sys/devops.h> 45 #include <sys/list.h> 46 #include <sys/sysmacros.h> 47 #include <sys/dkio.h> 48 #include <sys/dkioc_free_util.h> 49 #include <sys/vtoc.h> 50 #include <sys/scsi/scsi.h> /* for DTYPE_DIRECT */ 51 #include <sys/kstat.h> 52 #include <sys/fs/dv_node.h> 53 #include <sys/ddi.h> 54 #include <sys/sunddi.h> 55 #include <sys/note.h> 56 #include <sys/blkdev.h> 57 #include <sys/scsi/impl/inquiry.h> 58 #include <sys/taskq.h> 59 #include <sys/taskq_impl.h> 60 #include <sys/disp.h> 61 #include <sys/sysevent/eventdefs.h> 62 #include <sys/sysevent/dev.h> 63 64 /* 65 * blkdev is a driver which provides a lot of the common functionality 66 * a block device driver may need and helps by removing code which 67 * is frequently duplicated in block device drivers. 68 * 69 * Within this driver all the struct cb_ops functions required for a 70 * block device driver are written with appropriate call back functions 71 * to be provided by the parent driver. 72 * 73 * To use blkdev, a driver needs to: 74 * 1. Create a bd_ops_t structure which has the call back operations 75 * blkdev will use. 76 * 2. Create a handle by calling bd_alloc_handle(). One of the 77 * arguments to this function is the bd_ops_t. 78 * 3. Call bd_attach_handle(). This will instantiate a blkdev device 79 * as a child device node of the calling driver. 80 * 81 * A parent driver is not restricted to just allocating and attaching a 82 * single instance, it may attach as many as it wishes. For each handle 83 * attached, appropriate entries in /dev/[r]dsk are created. 84 * 85 * The bd_ops_t routines that a parent of blkdev need to provide are: 86 * 87 * o_drive_info: Provide information to blkdev such as how many I/O queues 88 * to create and the size of those queues. Also some device 89 * specifics such as EUI, vendor, product, model, serial 90 * number .... 91 * 92 * o_media_info: Provide information about the media. Eg size and block size. 93 * 94 * o_devid_init: Creates and initializes the device id. Typically calls 95 * ddi_devid_init(). 96 * 97 * o_sync_cache: Issues a device appropriate command to flush any write 98 * caches. 99 * 100 * o_read: Read data as described by bd_xfer_t argument. 101 * 102 * o_write: Write data as described by bd_xfer_t argument. 103 * 104 * o_free_space: Free the space described by bd_xfer_t argument (optional). 105 * 106 * Queues 107 * ------ 108 * Part of the drive_info data is a queue count. blkdev will create 109 * "queue count" number of waitq/runq pairs. Each waitq/runq pair 110 * operates independently. As an I/O is scheduled up to the parent 111 * driver via o_read or o_write its queue number is given. If the 112 * parent driver supports multiple hardware queues it can then select 113 * where to submit the I/O request. 114 * 115 * Currently blkdev uses a simplistic round-robin queue selection method. 116 * It has the advantage that it is lockless. In the future it will be 117 * worthwhile reviewing this strategy for something which prioritizes queues 118 * depending on how busy they are. 119 * 120 * Each waitq/runq pair is protected by its mutex (q_iomutex). Incoming 121 * I/O requests are initially added to the waitq. They are taken off the 122 * waitq, added to the runq and submitted, providing the runq is less 123 * than the qsize as specified in the drive_info. As an I/O request 124 * completes, the parent driver is required to call bd_xfer_done(), which 125 * will remove the I/O request from the runq and pass I/O completion 126 * status up the stack. 127 * 128 * Locks 129 * ----- 130 * There are 5 instance global locks d_ocmutex, d_ksmutex, d_errmutex, 131 * d_statemutex and d_dle_mutex. As well a q_iomutex per waitq/runq pair. 132 * 133 * Lock Hierarchy 134 * -------------- 135 * The only two locks which may be held simultaneously are q_iomutex and 136 * d_ksmutex. In all cases q_iomutex must be acquired before d_ksmutex. 137 */ 138 139 #define BD_MAXPART 64 140 #define BDINST(dev) (getminor(dev) / BD_MAXPART) 141 #define BDPART(dev) (getminor(dev) % BD_MAXPART) 142 143 typedef struct bd bd_t; 144 typedef struct bd_xfer_impl bd_xfer_impl_t; 145 typedef struct bd_queue bd_queue_t; 146 147 typedef enum { 148 BD_DLE_PENDING = 1 << 0, 149 BD_DLE_RUNNING = 1 << 1 150 } bd_dle_state_t; 151 152 struct bd { 153 void *d_private; 154 dev_info_t *d_dip; 155 kmutex_t d_ocmutex; /* open/close */ 156 kmutex_t d_ksmutex; /* kstat */ 157 kmutex_t d_errmutex; 158 kmutex_t d_statemutex; 159 kcondvar_t d_statecv; 160 enum dkio_state d_state; 161 cmlb_handle_t d_cmlbh; 162 unsigned d_open_lyr[BD_MAXPART]; /* open count */ 163 uint64_t d_open_excl; /* bit mask indexed by partition */ 164 uint64_t d_open_reg[OTYPCNT]; /* bit mask */ 165 uint64_t d_io_counter; 166 167 uint32_t d_qcount; 168 uint32_t d_qactive; 169 uint32_t d_maxxfer; 170 uint32_t d_blkshift; 171 uint32_t d_pblkshift; 172 uint64_t d_numblks; 173 ddi_devid_t d_devid; 174 175 uint64_t d_max_free_seg; 176 uint64_t d_max_free_blks; 177 uint64_t d_max_free_seg_blks; 178 uint64_t d_free_align; 179 180 kmem_cache_t *d_cache; 181 bd_queue_t *d_queues; 182 kstat_t *d_ksp; 183 kstat_io_t *d_kiop; 184 kstat_t *d_errstats; 185 struct bd_errstats *d_kerr; 186 187 boolean_t d_rdonly; 188 boolean_t d_ssd; 189 boolean_t d_removable; 190 boolean_t d_hotpluggable; 191 boolean_t d_use_dma; 192 193 ddi_dma_attr_t d_dma; 194 bd_ops_t d_ops; 195 bd_handle_t d_handle; 196 197 kmutex_t d_dle_mutex; 198 taskq_ent_t d_dle_ent; 199 bd_dle_state_t d_dle_state; 200 }; 201 202 struct bd_handle { 203 bd_ops_t h_ops; 204 ddi_dma_attr_t *h_dma; 205 dev_info_t *h_parent; 206 dev_info_t *h_child; 207 void *h_private; 208 bd_t *h_bd; 209 char *h_name; 210 char h_addr[50]; /* enough for w%0.32x,%X */ 211 }; 212 213 struct bd_xfer_impl { 214 bd_xfer_t i_public; 215 list_node_t i_linkage; 216 bd_t *i_bd; 217 buf_t *i_bp; 218 bd_queue_t *i_bq; 219 uint_t i_num_win; 220 uint_t i_cur_win; 221 off_t i_offset; 222 int (*i_func)(void *, bd_xfer_t *); 223 uint32_t i_blkshift; 224 size_t i_len; 225 size_t i_resid; 226 }; 227 228 struct bd_queue { 229 kmutex_t q_iomutex; 230 uint32_t q_qsize; 231 uint32_t q_qactive; 232 list_t q_runq; 233 list_t q_waitq; 234 }; 235 236 #define i_dmah i_public.x_dmah 237 #define i_dmac i_public.x_dmac 238 #define i_ndmac i_public.x_ndmac 239 #define i_kaddr i_public.x_kaddr 240 #define i_nblks i_public.x_nblks 241 #define i_blkno i_public.x_blkno 242 #define i_flags i_public.x_flags 243 #define i_qnum i_public.x_qnum 244 #define i_dfl i_public.x_dfl 245 246 #define CAN_FREESPACE(bd) \ 247 (((bd)->d_ops.o_free_space == NULL) ? B_FALSE : B_TRUE) 248 249 /* 250 * Private prototypes. 251 */ 252 253 static void bd_prop_update_inqstring(dev_info_t *, char *, char *, size_t); 254 static void bd_create_inquiry_props(dev_info_t *, bd_drive_t *); 255 static void bd_create_errstats(bd_t *, int, bd_drive_t *); 256 static void bd_destroy_errstats(bd_t *); 257 static void bd_errstats_setstr(kstat_named_t *, char *, size_t, char *); 258 static void bd_init_errstats(bd_t *, bd_drive_t *); 259 static void bd_fini_errstats(bd_t *); 260 261 static int bd_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 262 static int bd_attach(dev_info_t *, ddi_attach_cmd_t); 263 static int bd_detach(dev_info_t *, ddi_detach_cmd_t); 264 265 static int bd_open(dev_t *, int, int, cred_t *); 266 static int bd_close(dev_t, int, int, cred_t *); 267 static int bd_strategy(struct buf *); 268 static int bd_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 269 static int bd_dump(dev_t, caddr_t, daddr_t, int); 270 static int bd_read(dev_t, struct uio *, cred_t *); 271 static int bd_write(dev_t, struct uio *, cred_t *); 272 static int bd_aread(dev_t, struct aio_req *, cred_t *); 273 static int bd_awrite(dev_t, struct aio_req *, cred_t *); 274 static int bd_prop_op(dev_t, dev_info_t *, ddi_prop_op_t, int, char *, 275 caddr_t, int *); 276 277 static int bd_tg_rdwr(dev_info_t *, uchar_t, void *, diskaddr_t, size_t, 278 void *); 279 static int bd_tg_getinfo(dev_info_t *, int, void *, void *); 280 static int bd_xfer_ctor(void *, void *, int); 281 static void bd_xfer_dtor(void *, void *); 282 static void bd_sched(bd_t *, bd_queue_t *); 283 static void bd_submit(bd_t *, bd_xfer_impl_t *); 284 static void bd_runq_exit(bd_xfer_impl_t *, int); 285 static void bd_update_state(bd_t *); 286 static int bd_check_state(bd_t *, enum dkio_state *); 287 static int bd_flush_write_cache(bd_t *, struct dk_callback *); 288 static int bd_check_uio(dev_t, struct uio *); 289 static int bd_free_space(dev_t, bd_t *, dkioc_free_list_t *); 290 291 struct cmlb_tg_ops bd_tg_ops = { 292 TG_DK_OPS_VERSION_1, 293 bd_tg_rdwr, 294 bd_tg_getinfo, 295 }; 296 297 static struct cb_ops bd_cb_ops = { 298 bd_open, /* open */ 299 bd_close, /* close */ 300 bd_strategy, /* strategy */ 301 nodev, /* print */ 302 bd_dump, /* dump */ 303 bd_read, /* read */ 304 bd_write, /* write */ 305 bd_ioctl, /* ioctl */ 306 nodev, /* devmap */ 307 nodev, /* mmap */ 308 nodev, /* segmap */ 309 nochpoll, /* poll */ 310 bd_prop_op, /* cb_prop_op */ 311 0, /* streamtab */ 312 D_64BIT | D_MP, /* Driver comaptibility flag */ 313 CB_REV, /* cb_rev */ 314 bd_aread, /* async read */ 315 bd_awrite /* async write */ 316 }; 317 318 struct dev_ops bd_dev_ops = { 319 DEVO_REV, /* devo_rev, */ 320 0, /* refcnt */ 321 bd_getinfo, /* getinfo */ 322 nulldev, /* identify */ 323 nulldev, /* probe */ 324 bd_attach, /* attach */ 325 bd_detach, /* detach */ 326 nodev, /* reset */ 327 &bd_cb_ops, /* driver operations */ 328 NULL, /* bus operations */ 329 NULL, /* power */ 330 ddi_quiesce_not_needed, /* quiesce */ 331 }; 332 333 static struct modldrv modldrv = { 334 &mod_driverops, 335 "Generic Block Device", 336 &bd_dev_ops, 337 }; 338 339 static struct modlinkage modlinkage = { 340 MODREV_1, { &modldrv, NULL } 341 }; 342 343 static void *bd_state; 344 static krwlock_t bd_lock; 345 static taskq_t *bd_taskq; 346 347 int 348 _init(void) 349 { 350 char taskq_name[TASKQ_NAMELEN]; 351 const char *name; 352 int rv; 353 354 rv = ddi_soft_state_init(&bd_state, sizeof (struct bd), 2); 355 if (rv != DDI_SUCCESS) 356 return (rv); 357 358 name = mod_modname(&modlinkage); 359 (void) snprintf(taskq_name, sizeof (taskq_name), "%s_taskq", name); 360 bd_taskq = taskq_create(taskq_name, 1, minclsyspri, 0, 0, 0); 361 if (bd_taskq == NULL) { 362 cmn_err(CE_WARN, "%s: unable to create %s", name, taskq_name); 363 ddi_soft_state_fini(&bd_state); 364 return (DDI_FAILURE); 365 } 366 367 rw_init(&bd_lock, NULL, RW_DRIVER, NULL); 368 369 rv = mod_install(&modlinkage); 370 if (rv != DDI_SUCCESS) { 371 rw_destroy(&bd_lock); 372 taskq_destroy(bd_taskq); 373 ddi_soft_state_fini(&bd_state); 374 } 375 return (rv); 376 } 377 378 int 379 _fini(void) 380 { 381 int rv; 382 383 rv = mod_remove(&modlinkage); 384 if (rv == DDI_SUCCESS) { 385 rw_destroy(&bd_lock); 386 taskq_destroy(bd_taskq); 387 ddi_soft_state_fini(&bd_state); 388 } 389 return (rv); 390 } 391 392 int 393 _info(struct modinfo *modinfop) 394 { 395 return (mod_info(&modlinkage, modinfop)); 396 } 397 398 static int 399 bd_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp) 400 { 401 bd_t *bd; 402 minor_t inst; 403 404 _NOTE(ARGUNUSED(dip)); 405 406 inst = BDINST((dev_t)arg); 407 408 switch (cmd) { 409 case DDI_INFO_DEVT2DEVINFO: 410 bd = ddi_get_soft_state(bd_state, inst); 411 if (bd == NULL) { 412 return (DDI_FAILURE); 413 } 414 *resultp = (void *)bd->d_dip; 415 break; 416 417 case DDI_INFO_DEVT2INSTANCE: 418 *resultp = (void *)(intptr_t)inst; 419 break; 420 421 default: 422 return (DDI_FAILURE); 423 } 424 return (DDI_SUCCESS); 425 } 426 427 static void 428 bd_prop_update_inqstring(dev_info_t *dip, char *name, char *data, size_t len) 429 { 430 int ilen; 431 char *data_string; 432 433 ilen = scsi_ascii_inquiry_len(data, len); 434 ASSERT3U(ilen, <=, len); 435 if (ilen <= 0) 436 return; 437 /* ensure null termination */ 438 data_string = kmem_zalloc(ilen + 1, KM_SLEEP); 439 bcopy(data, data_string, ilen); 440 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, name, data_string); 441 kmem_free(data_string, ilen + 1); 442 } 443 444 static void 445 bd_create_inquiry_props(dev_info_t *dip, bd_drive_t *drive) 446 { 447 if (drive->d_vendor_len > 0) 448 bd_prop_update_inqstring(dip, INQUIRY_VENDOR_ID, 449 drive->d_vendor, drive->d_vendor_len); 450 451 if (drive->d_product_len > 0) 452 bd_prop_update_inqstring(dip, INQUIRY_PRODUCT_ID, 453 drive->d_product, drive->d_product_len); 454 455 if (drive->d_serial_len > 0) 456 bd_prop_update_inqstring(dip, INQUIRY_SERIAL_NO, 457 drive->d_serial, drive->d_serial_len); 458 459 if (drive->d_revision_len > 0) 460 bd_prop_update_inqstring(dip, INQUIRY_REVISION_ID, 461 drive->d_revision, drive->d_revision_len); 462 } 463 464 static void 465 bd_create_errstats(bd_t *bd, int inst, bd_drive_t *drive) 466 { 467 char ks_module[KSTAT_STRLEN]; 468 char ks_name[KSTAT_STRLEN]; 469 int ndata = sizeof (struct bd_errstats) / sizeof (kstat_named_t); 470 471 if (bd->d_errstats != NULL) 472 return; 473 474 (void) snprintf(ks_module, sizeof (ks_module), "%serr", 475 ddi_driver_name(bd->d_dip)); 476 (void) snprintf(ks_name, sizeof (ks_name), "%s%d,err", 477 ddi_driver_name(bd->d_dip), inst); 478 479 bd->d_errstats = kstat_create(ks_module, inst, ks_name, "device_error", 480 KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 481 482 mutex_init(&bd->d_errmutex, NULL, MUTEX_DRIVER, NULL); 483 if (bd->d_errstats == NULL) { 484 /* 485 * Even if we cannot create the kstat, we create a 486 * scratch kstat. The reason for this is to ensure 487 * that we can update the kstat all of the time, 488 * without adding an extra branch instruction. 489 */ 490 bd->d_kerr = kmem_zalloc(sizeof (struct bd_errstats), 491 KM_SLEEP); 492 } else { 493 bd->d_errstats->ks_lock = &bd->d_errmutex; 494 bd->d_kerr = (struct bd_errstats *)bd->d_errstats->ks_data; 495 } 496 497 kstat_named_init(&bd->d_kerr->bd_softerrs, "Soft Errors", 498 KSTAT_DATA_UINT32); 499 kstat_named_init(&bd->d_kerr->bd_harderrs, "Hard Errors", 500 KSTAT_DATA_UINT32); 501 kstat_named_init(&bd->d_kerr->bd_transerrs, "Transport Errors", 502 KSTAT_DATA_UINT32); 503 504 if (drive->d_model_len > 0) { 505 kstat_named_init(&bd->d_kerr->bd_model, "Model", 506 KSTAT_DATA_STRING); 507 } else { 508 kstat_named_init(&bd->d_kerr->bd_vid, "Vendor", 509 KSTAT_DATA_STRING); 510 kstat_named_init(&bd->d_kerr->bd_pid, "Product", 511 KSTAT_DATA_STRING); 512 } 513 514 kstat_named_init(&bd->d_kerr->bd_revision, "Revision", 515 KSTAT_DATA_STRING); 516 kstat_named_init(&bd->d_kerr->bd_serial, "Serial No", 517 KSTAT_DATA_STRING); 518 kstat_named_init(&bd->d_kerr->bd_capacity, "Size", 519 KSTAT_DATA_ULONGLONG); 520 kstat_named_init(&bd->d_kerr->bd_rq_media_err, "Media Error", 521 KSTAT_DATA_UINT32); 522 kstat_named_init(&bd->d_kerr->bd_rq_ntrdy_err, "Device Not Ready", 523 KSTAT_DATA_UINT32); 524 kstat_named_init(&bd->d_kerr->bd_rq_nodev_err, "No Device", 525 KSTAT_DATA_UINT32); 526 kstat_named_init(&bd->d_kerr->bd_rq_recov_err, "Recoverable", 527 KSTAT_DATA_UINT32); 528 kstat_named_init(&bd->d_kerr->bd_rq_illrq_err, "Illegal Request", 529 KSTAT_DATA_UINT32); 530 kstat_named_init(&bd->d_kerr->bd_rq_pfa_err, 531 "Predictive Failure Analysis", KSTAT_DATA_UINT32); 532 533 bd->d_errstats->ks_private = bd; 534 535 kstat_install(bd->d_errstats); 536 bd_init_errstats(bd, drive); 537 } 538 539 static void 540 bd_destroy_errstats(bd_t *bd) 541 { 542 if (bd->d_errstats != NULL) { 543 bd_fini_errstats(bd); 544 kstat_delete(bd->d_errstats); 545 bd->d_errstats = NULL; 546 } else { 547 kmem_free(bd->d_kerr, sizeof (struct bd_errstats)); 548 bd->d_kerr = NULL; 549 mutex_destroy(&bd->d_errmutex); 550 } 551 } 552 553 static void 554 bd_errstats_setstr(kstat_named_t *k, char *str, size_t len, char *alt) 555 { 556 char *tmp; 557 size_t km_len; 558 559 if (KSTAT_NAMED_STR_PTR(k) == NULL) { 560 if (len > 0) 561 km_len = strnlen(str, len); 562 else if (alt != NULL) 563 km_len = strlen(alt); 564 else 565 return; 566 567 tmp = kmem_alloc(km_len + 1, KM_SLEEP); 568 bcopy(len > 0 ? str : alt, tmp, km_len); 569 tmp[km_len] = '\0'; 570 571 kstat_named_setstr(k, tmp); 572 } 573 } 574 575 static void 576 bd_errstats_clrstr(kstat_named_t *k) 577 { 578 if (KSTAT_NAMED_STR_PTR(k) == NULL) 579 return; 580 581 kmem_free(KSTAT_NAMED_STR_PTR(k), KSTAT_NAMED_STR_BUFLEN(k)); 582 kstat_named_setstr(k, NULL); 583 } 584 585 static void 586 bd_init_errstats(bd_t *bd, bd_drive_t *drive) 587 { 588 struct bd_errstats *est = bd->d_kerr; 589 590 mutex_enter(&bd->d_errmutex); 591 592 if (drive->d_model_len > 0 && 593 KSTAT_NAMED_STR_PTR(&est->bd_model) == NULL) { 594 bd_errstats_setstr(&est->bd_model, drive->d_model, 595 drive->d_model_len, NULL); 596 } else { 597 bd_errstats_setstr(&est->bd_vid, drive->d_vendor, 598 drive->d_vendor_len, "Unknown "); 599 bd_errstats_setstr(&est->bd_pid, drive->d_product, 600 drive->d_product_len, "Unknown "); 601 } 602 603 bd_errstats_setstr(&est->bd_revision, drive->d_revision, 604 drive->d_revision_len, "0001"); 605 bd_errstats_setstr(&est->bd_serial, drive->d_serial, 606 drive->d_serial_len, "0 "); 607 608 mutex_exit(&bd->d_errmutex); 609 } 610 611 static void 612 bd_fini_errstats(bd_t *bd) 613 { 614 struct bd_errstats *est = bd->d_kerr; 615 616 mutex_enter(&bd->d_errmutex); 617 618 bd_errstats_clrstr(&est->bd_model); 619 bd_errstats_clrstr(&est->bd_vid); 620 bd_errstats_clrstr(&est->bd_pid); 621 bd_errstats_clrstr(&est->bd_revision); 622 bd_errstats_clrstr(&est->bd_serial); 623 624 mutex_exit(&bd->d_errmutex); 625 } 626 627 static void 628 bd_queues_free(bd_t *bd) 629 { 630 uint32_t i; 631 632 for (i = 0; i < bd->d_qcount; i++) { 633 bd_queue_t *bq = &bd->d_queues[i]; 634 635 mutex_destroy(&bq->q_iomutex); 636 list_destroy(&bq->q_waitq); 637 list_destroy(&bq->q_runq); 638 } 639 640 kmem_free(bd->d_queues, sizeof (*bd->d_queues) * bd->d_qcount); 641 } 642 643 static int 644 bd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 645 { 646 int inst; 647 bd_handle_t hdl; 648 bd_t *bd; 649 bd_drive_t drive; 650 uint32_t i; 651 int rv; 652 char name[16]; 653 char kcache[32]; 654 char *node_type; 655 656 switch (cmd) { 657 case DDI_ATTACH: 658 break; 659 case DDI_RESUME: 660 /* We don't do anything native for suspend/resume */ 661 return (DDI_SUCCESS); 662 default: 663 return (DDI_FAILURE); 664 } 665 666 inst = ddi_get_instance(dip); 667 hdl = ddi_get_parent_data(dip); 668 669 (void) snprintf(name, sizeof (name), "%s%d", 670 ddi_driver_name(dip), ddi_get_instance(dip)); 671 (void) snprintf(kcache, sizeof (kcache), "%s_xfer", name); 672 673 if (hdl == NULL) { 674 cmn_err(CE_WARN, "%s: missing parent data!", name); 675 return (DDI_FAILURE); 676 } 677 678 if (ddi_soft_state_zalloc(bd_state, inst) != DDI_SUCCESS) { 679 cmn_err(CE_WARN, "%s: unable to zalloc soft state!", name); 680 return (DDI_FAILURE); 681 } 682 bd = ddi_get_soft_state(bd_state, inst); 683 684 if (hdl->h_dma) { 685 bd->d_dma = *(hdl->h_dma); 686 bd->d_dma.dma_attr_granular = 687 max(DEV_BSIZE, bd->d_dma.dma_attr_granular); 688 bd->d_use_dma = B_TRUE; 689 690 if (bd->d_maxxfer && 691 (bd->d_maxxfer != bd->d_dma.dma_attr_maxxfer)) { 692 cmn_err(CE_WARN, 693 "%s: inconsistent maximum transfer size!", 694 name); 695 /* We force it */ 696 bd->d_maxxfer = bd->d_dma.dma_attr_maxxfer; 697 } else { 698 bd->d_maxxfer = bd->d_dma.dma_attr_maxxfer; 699 } 700 } else { 701 bd->d_use_dma = B_FALSE; 702 if (bd->d_maxxfer == 0) { 703 bd->d_maxxfer = 1024 * 1024; 704 } 705 } 706 bd->d_ops = hdl->h_ops; 707 bd->d_private = hdl->h_private; 708 bd->d_blkshift = DEV_BSHIFT; /* 512 bytes, to start */ 709 710 if (bd->d_maxxfer % DEV_BSIZE) { 711 cmn_err(CE_WARN, "%s: maximum transfer misaligned!", name); 712 bd->d_maxxfer &= ~(DEV_BSIZE - 1); 713 } 714 if (bd->d_maxxfer < DEV_BSIZE) { 715 cmn_err(CE_WARN, "%s: maximum transfer size too small!", name); 716 ddi_soft_state_free(bd_state, inst); 717 return (DDI_FAILURE); 718 } 719 720 bd->d_dip = dip; 721 bd->d_handle = hdl; 722 ddi_set_driver_private(dip, bd); 723 724 mutex_init(&bd->d_ksmutex, NULL, MUTEX_DRIVER, NULL); 725 mutex_init(&bd->d_ocmutex, NULL, MUTEX_DRIVER, NULL); 726 mutex_init(&bd->d_statemutex, NULL, MUTEX_DRIVER, NULL); 727 cv_init(&bd->d_statecv, NULL, CV_DRIVER, NULL); 728 mutex_init(&bd->d_dle_mutex, NULL, MUTEX_DRIVER, NULL); 729 bd->d_dle_state = 0; 730 731 bd->d_cache = kmem_cache_create(kcache, sizeof (bd_xfer_impl_t), 8, 732 bd_xfer_ctor, bd_xfer_dtor, NULL, bd, NULL, 0); 733 734 bd->d_ksp = kstat_create(ddi_driver_name(dip), inst, NULL, "disk", 735 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 736 if (bd->d_ksp != NULL) { 737 bd->d_ksp->ks_lock = &bd->d_ksmutex; 738 kstat_install(bd->d_ksp); 739 bd->d_kiop = bd->d_ksp->ks_data; 740 } else { 741 /* 742 * Even if we cannot create the kstat, we create a 743 * scratch kstat. The reason for this is to ensure 744 * that we can update the kstat all of the time, 745 * without adding an extra branch instruction. 746 */ 747 bd->d_kiop = kmem_zalloc(sizeof (kstat_io_t), KM_SLEEP); 748 } 749 750 cmlb_alloc_handle(&bd->d_cmlbh); 751 752 bd->d_state = DKIO_NONE; 753 754 bzero(&drive, sizeof (drive)); 755 /* 756 * Default to one queue, and no restrictions on free space requests 757 * (if driver provides method) parent driver can override. 758 */ 759 drive.d_qcount = 1; 760 drive.d_free_align = 1; 761 bd->d_ops.o_drive_info(bd->d_private, &drive); 762 763 /* 764 * Several checks to make sure o_drive_info() didn't return bad 765 * values: 766 * 767 * There must be at least one queue 768 */ 769 if (drive.d_qcount == 0) 770 goto fail_drive_info; 771 772 /* FREE/UNMAP/TRIM alignment needs to be at least 1 block */ 773 if (drive.d_free_align == 0) 774 goto fail_drive_info; 775 776 /* 777 * If d_max_free_blks is not unlimited (not 0), then we cannot allow 778 * an unlimited segment size. It is however permissible to not impose 779 * a limit on the total number of blocks freed while limiting the 780 * amount allowed in an individual segment. 781 */ 782 if ((drive.d_max_free_blks > 0 && drive.d_max_free_seg_blks == 0)) 783 goto fail_drive_info; 784 785 /* 786 * If a limit is set on d_max_free_blks (by the above check, we know 787 * if there's a limit on d_max_free_blks, d_max_free_seg_blks cannot 788 * be unlimited), it cannot be smaller than the limit on an individual 789 * segment. 790 */ 791 if ((drive.d_max_free_blks > 0 && 792 drive.d_max_free_seg_blks > drive.d_max_free_blks)) { 793 goto fail_drive_info; 794 } 795 796 bd->d_qcount = drive.d_qcount; 797 bd->d_removable = drive.d_removable; 798 bd->d_hotpluggable = drive.d_hotpluggable; 799 800 if (drive.d_maxxfer && drive.d_maxxfer < bd->d_maxxfer) 801 bd->d_maxxfer = drive.d_maxxfer; 802 803 bd->d_free_align = drive.d_free_align; 804 bd->d_max_free_seg = drive.d_max_free_seg; 805 bd->d_max_free_blks = drive.d_max_free_blks; 806 bd->d_max_free_seg_blks = drive.d_max_free_seg_blks; 807 808 bd_create_inquiry_props(dip, &drive); 809 bd_create_errstats(bd, inst, &drive); 810 bd_update_state(bd); 811 812 bd->d_queues = kmem_alloc(sizeof (*bd->d_queues) * bd->d_qcount, 813 KM_SLEEP); 814 for (i = 0; i < bd->d_qcount; i++) { 815 bd_queue_t *bq = &bd->d_queues[i]; 816 817 bq->q_qsize = drive.d_qsize; 818 bq->q_qactive = 0; 819 mutex_init(&bq->q_iomutex, NULL, MUTEX_DRIVER, NULL); 820 821 list_create(&bq->q_waitq, sizeof (bd_xfer_impl_t), 822 offsetof(struct bd_xfer_impl, i_linkage)); 823 list_create(&bq->q_runq, sizeof (bd_xfer_impl_t), 824 offsetof(struct bd_xfer_impl, i_linkage)); 825 } 826 827 if (*(uint64_t *)drive.d_eui64 != 0 || 828 *(uint64_t *)drive.d_guid != 0 || 829 *((uint64_t *)drive.d_guid + 1) != 0) 830 node_type = DDI_NT_BLOCK_BLKDEV; 831 else if (drive.d_lun >= 0) 832 node_type = DDI_NT_BLOCK_CHAN; 833 else 834 node_type = DDI_NT_BLOCK; 835 836 rv = cmlb_attach(dip, &bd_tg_ops, DTYPE_DIRECT, 837 bd->d_removable, bd->d_hotpluggable, node_type, 838 CMLB_FAKE_LABEL_ONE_PARTITION, bd->d_cmlbh, 0); 839 if (rv != 0) { 840 goto fail_cmlb_attach; 841 } 842 843 if (bd->d_ops.o_devid_init != NULL) { 844 rv = bd->d_ops.o_devid_init(bd->d_private, dip, &bd->d_devid); 845 if (rv == DDI_SUCCESS) { 846 if (ddi_devid_register(dip, bd->d_devid) != 847 DDI_SUCCESS) { 848 cmn_err(CE_WARN, 849 "%s: unable to register devid", name); 850 } 851 } 852 } 853 854 /* 855 * Add a zero-length attribute to tell the world we support 856 * kernel ioctls (for layered drivers). Also set up properties 857 * used by HAL to identify removable media. 858 */ 859 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 860 DDI_KERNEL_IOCTL, NULL, 0); 861 if (bd->d_removable) { 862 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 863 "removable-media", NULL, 0); 864 } 865 if (bd->d_hotpluggable) { 866 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 867 "hotpluggable", NULL, 0); 868 } 869 870 hdl->h_bd = bd; 871 ddi_report_dev(dip); 872 873 return (DDI_SUCCESS); 874 875 fail_cmlb_attach: 876 bd_queues_free(bd); 877 bd_destroy_errstats(bd); 878 879 fail_drive_info: 880 cmlb_free_handle(&bd->d_cmlbh); 881 882 if (bd->d_ksp != NULL) { 883 kstat_delete(bd->d_ksp); 884 bd->d_ksp = NULL; 885 } else { 886 kmem_free(bd->d_kiop, sizeof (kstat_io_t)); 887 } 888 889 kmem_cache_destroy(bd->d_cache); 890 cv_destroy(&bd->d_statecv); 891 mutex_destroy(&bd->d_statemutex); 892 mutex_destroy(&bd->d_ocmutex); 893 mutex_destroy(&bd->d_ksmutex); 894 mutex_destroy(&bd->d_dle_mutex); 895 ddi_soft_state_free(bd_state, inst); 896 return (DDI_FAILURE); 897 } 898 899 static int 900 bd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 901 { 902 bd_handle_t hdl; 903 bd_t *bd; 904 905 bd = ddi_get_driver_private(dip); 906 hdl = ddi_get_parent_data(dip); 907 908 switch (cmd) { 909 case DDI_DETACH: 910 break; 911 case DDI_SUSPEND: 912 /* We don't suspend, but our parent does */ 913 return (DDI_SUCCESS); 914 default: 915 return (DDI_FAILURE); 916 } 917 918 hdl->h_bd = NULL; 919 920 if (bd->d_ksp != NULL) { 921 kstat_delete(bd->d_ksp); 922 bd->d_ksp = NULL; 923 } else { 924 kmem_free(bd->d_kiop, sizeof (kstat_io_t)); 925 } 926 927 bd_destroy_errstats(bd); 928 cmlb_detach(bd->d_cmlbh, 0); 929 cmlb_free_handle(&bd->d_cmlbh); 930 if (bd->d_devid) 931 ddi_devid_free(bd->d_devid); 932 kmem_cache_destroy(bd->d_cache); 933 mutex_destroy(&bd->d_ksmutex); 934 mutex_destroy(&bd->d_ocmutex); 935 mutex_destroy(&bd->d_statemutex); 936 cv_destroy(&bd->d_statecv); 937 mutex_destroy(&bd->d_dle_mutex); 938 bd_queues_free(bd); 939 ddi_soft_state_free(bd_state, ddi_get_instance(dip)); 940 return (DDI_SUCCESS); 941 } 942 943 static int 944 bd_xfer_ctor(void *buf, void *arg, int kmflag) 945 { 946 bd_xfer_impl_t *xi; 947 bd_t *bd = arg; 948 int (*dcb)(caddr_t); 949 950 if (kmflag == KM_PUSHPAGE || kmflag == KM_SLEEP) { 951 dcb = DDI_DMA_SLEEP; 952 } else { 953 dcb = DDI_DMA_DONTWAIT; 954 } 955 956 xi = buf; 957 bzero(xi, sizeof (*xi)); 958 xi->i_bd = bd; 959 960 if (bd->d_use_dma) { 961 if (ddi_dma_alloc_handle(bd->d_dip, &bd->d_dma, dcb, NULL, 962 &xi->i_dmah) != DDI_SUCCESS) { 963 return (-1); 964 } 965 } 966 967 return (0); 968 } 969 970 static void 971 bd_xfer_dtor(void *buf, void *arg) 972 { 973 bd_xfer_impl_t *xi = buf; 974 975 _NOTE(ARGUNUSED(arg)); 976 977 if (xi->i_dmah) 978 ddi_dma_free_handle(&xi->i_dmah); 979 xi->i_dmah = NULL; 980 } 981 982 static bd_xfer_impl_t * 983 bd_xfer_alloc(bd_t *bd, struct buf *bp, int (*func)(void *, bd_xfer_t *), 984 int kmflag) 985 { 986 bd_xfer_impl_t *xi; 987 int rv = 0; 988 int status; 989 unsigned dir; 990 int (*cb)(caddr_t); 991 size_t len; 992 uint32_t shift; 993 994 if (kmflag == KM_SLEEP) { 995 cb = DDI_DMA_SLEEP; 996 } else { 997 cb = DDI_DMA_DONTWAIT; 998 } 999 1000 xi = kmem_cache_alloc(bd->d_cache, kmflag); 1001 if (xi == NULL) { 1002 bioerror(bp, ENOMEM); 1003 return (NULL); 1004 } 1005 1006 ASSERT(bp); 1007 1008 xi->i_bp = bp; 1009 xi->i_func = func; 1010 xi->i_blkno = bp->b_lblkno >> (bd->d_blkshift - DEV_BSHIFT); 1011 1012 if (bp->b_bcount == 0) { 1013 xi->i_len = 0; 1014 xi->i_nblks = 0; 1015 xi->i_kaddr = NULL; 1016 xi->i_resid = 0; 1017 xi->i_num_win = 0; 1018 goto done; 1019 } 1020 1021 if (bp->b_flags & B_READ) { 1022 dir = DDI_DMA_READ; 1023 xi->i_func = bd->d_ops.o_read; 1024 } else { 1025 dir = DDI_DMA_WRITE; 1026 xi->i_func = bd->d_ops.o_write; 1027 } 1028 1029 shift = bd->d_blkshift; 1030 xi->i_blkshift = shift; 1031 1032 if (!bd->d_use_dma) { 1033 bp_mapin(bp); 1034 rv = 0; 1035 xi->i_offset = 0; 1036 xi->i_num_win = 1037 (bp->b_bcount + (bd->d_maxxfer - 1)) / bd->d_maxxfer; 1038 xi->i_cur_win = 0; 1039 xi->i_len = min(bp->b_bcount, bd->d_maxxfer); 1040 xi->i_nblks = xi->i_len >> shift; 1041 xi->i_kaddr = bp->b_un.b_addr; 1042 xi->i_resid = bp->b_bcount; 1043 } else { 1044 1045 /* 1046 * We have to use consistent DMA if the address is misaligned. 1047 */ 1048 if (((bp->b_flags & (B_PAGEIO | B_REMAPPED)) != B_PAGEIO) && 1049 ((uintptr_t)bp->b_un.b_addr & 0x7)) { 1050 dir |= DDI_DMA_CONSISTENT | DDI_DMA_PARTIAL; 1051 } else { 1052 dir |= DDI_DMA_STREAMING | DDI_DMA_PARTIAL; 1053 } 1054 1055 status = ddi_dma_buf_bind_handle(xi->i_dmah, bp, dir, cb, 1056 NULL, &xi->i_dmac, &xi->i_ndmac); 1057 switch (status) { 1058 case DDI_DMA_MAPPED: 1059 xi->i_num_win = 1; 1060 xi->i_cur_win = 0; 1061 xi->i_offset = 0; 1062 xi->i_len = bp->b_bcount; 1063 xi->i_nblks = xi->i_len >> shift; 1064 xi->i_resid = bp->b_bcount; 1065 rv = 0; 1066 break; 1067 case DDI_DMA_PARTIAL_MAP: 1068 xi->i_cur_win = 0; 1069 1070 if ((ddi_dma_numwin(xi->i_dmah, &xi->i_num_win) != 1071 DDI_SUCCESS) || 1072 (ddi_dma_getwin(xi->i_dmah, 0, &xi->i_offset, 1073 &len, &xi->i_dmac, &xi->i_ndmac) != 1074 DDI_SUCCESS) || 1075 (P2PHASE(len, (1U << shift)) != 0)) { 1076 (void) ddi_dma_unbind_handle(xi->i_dmah); 1077 rv = EFAULT; 1078 goto done; 1079 } 1080 xi->i_len = len; 1081 xi->i_nblks = xi->i_len >> shift; 1082 xi->i_resid = bp->b_bcount; 1083 rv = 0; 1084 break; 1085 case DDI_DMA_NORESOURCES: 1086 rv = EAGAIN; 1087 goto done; 1088 case DDI_DMA_TOOBIG: 1089 rv = EINVAL; 1090 goto done; 1091 case DDI_DMA_NOMAPPING: 1092 case DDI_DMA_INUSE: 1093 default: 1094 rv = EFAULT; 1095 goto done; 1096 } 1097 } 1098 1099 done: 1100 if (rv != 0) { 1101 kmem_cache_free(bd->d_cache, xi); 1102 bioerror(bp, rv); 1103 return (NULL); 1104 } 1105 1106 return (xi); 1107 } 1108 1109 static void 1110 bd_xfer_free(bd_xfer_impl_t *xi) 1111 { 1112 if (xi->i_dmah) { 1113 (void) ddi_dma_unbind_handle(xi->i_dmah); 1114 } 1115 if (xi->i_dfl != NULL) { 1116 dfl_free((dkioc_free_list_t *)xi->i_dfl); 1117 xi->i_dfl = NULL; 1118 } 1119 kmem_cache_free(xi->i_bd->d_cache, xi); 1120 } 1121 1122 static int 1123 bd_open(dev_t *devp, int flag, int otyp, cred_t *credp) 1124 { 1125 dev_t dev = *devp; 1126 bd_t *bd; 1127 minor_t part; 1128 minor_t inst; 1129 uint64_t mask; 1130 boolean_t ndelay; 1131 int rv; 1132 diskaddr_t nblks; 1133 diskaddr_t lba; 1134 1135 _NOTE(ARGUNUSED(credp)); 1136 1137 part = BDPART(dev); 1138 inst = BDINST(dev); 1139 1140 if (otyp >= OTYPCNT) 1141 return (EINVAL); 1142 1143 ndelay = (flag & (FNDELAY | FNONBLOCK)) ? B_TRUE : B_FALSE; 1144 1145 /* 1146 * Block any DR events from changing the set of registered 1147 * devices while we function. 1148 */ 1149 rw_enter(&bd_lock, RW_READER); 1150 if ((bd = ddi_get_soft_state(bd_state, inst)) == NULL) { 1151 rw_exit(&bd_lock); 1152 return (ENXIO); 1153 } 1154 1155 mutex_enter(&bd->d_ocmutex); 1156 1157 ASSERT(part < 64); 1158 mask = (1U << part); 1159 1160 bd_update_state(bd); 1161 1162 if (cmlb_validate(bd->d_cmlbh, 0, 0) != 0) { 1163 1164 /* non-blocking opens are allowed to succeed */ 1165 if (!ndelay) { 1166 rv = ENXIO; 1167 goto done; 1168 } 1169 } else if (cmlb_partinfo(bd->d_cmlbh, part, &nblks, &lba, 1170 NULL, NULL, 0) == 0) { 1171 1172 /* 1173 * We read the partinfo, verify valid ranges. If the 1174 * partition is invalid, and we aren't blocking or 1175 * doing a raw access, then fail. (Non-blocking and 1176 * raw accesses can still succeed to allow a disk with 1177 * bad partition data to opened by format and fdisk.) 1178 */ 1179 if ((!nblks) && ((!ndelay) || (otyp != OTYP_CHR))) { 1180 rv = ENXIO; 1181 goto done; 1182 } 1183 } else if (!ndelay) { 1184 /* 1185 * cmlb_partinfo failed -- invalid partition or no 1186 * disk label. 1187 */ 1188 rv = ENXIO; 1189 goto done; 1190 } 1191 1192 if ((flag & FWRITE) && bd->d_rdonly) { 1193 rv = EROFS; 1194 goto done; 1195 } 1196 1197 if ((bd->d_open_excl) & (mask)) { 1198 rv = EBUSY; 1199 goto done; 1200 } 1201 if (flag & FEXCL) { 1202 if (bd->d_open_lyr[part]) { 1203 rv = EBUSY; 1204 goto done; 1205 } 1206 for (int i = 0; i < OTYP_LYR; i++) { 1207 if (bd->d_open_reg[i] & mask) { 1208 rv = EBUSY; 1209 goto done; 1210 } 1211 } 1212 } 1213 1214 if (otyp == OTYP_LYR) { 1215 bd->d_open_lyr[part]++; 1216 } else { 1217 bd->d_open_reg[otyp] |= mask; 1218 } 1219 if (flag & FEXCL) { 1220 bd->d_open_excl |= mask; 1221 } 1222 1223 rv = 0; 1224 done: 1225 mutex_exit(&bd->d_ocmutex); 1226 rw_exit(&bd_lock); 1227 1228 return (rv); 1229 } 1230 1231 static int 1232 bd_close(dev_t dev, int flag, int otyp, cred_t *credp) 1233 { 1234 bd_t *bd; 1235 minor_t inst; 1236 minor_t part; 1237 uint64_t mask; 1238 boolean_t last = B_TRUE; 1239 1240 _NOTE(ARGUNUSED(flag)); 1241 _NOTE(ARGUNUSED(credp)); 1242 1243 part = BDPART(dev); 1244 inst = BDINST(dev); 1245 1246 ASSERT(part < 64); 1247 mask = (1U << part); 1248 1249 rw_enter(&bd_lock, RW_READER); 1250 1251 if ((bd = ddi_get_soft_state(bd_state, inst)) == NULL) { 1252 rw_exit(&bd_lock); 1253 return (ENXIO); 1254 } 1255 1256 mutex_enter(&bd->d_ocmutex); 1257 if (bd->d_open_excl & mask) { 1258 bd->d_open_excl &= ~mask; 1259 } 1260 if (otyp == OTYP_LYR) { 1261 bd->d_open_lyr[part]--; 1262 } else { 1263 bd->d_open_reg[otyp] &= ~mask; 1264 } 1265 for (int i = 0; i < 64; i++) { 1266 if (bd->d_open_lyr[part]) { 1267 last = B_FALSE; 1268 } 1269 } 1270 for (int i = 0; last && (i < OTYP_LYR); i++) { 1271 if (bd->d_open_reg[i]) { 1272 last = B_FALSE; 1273 } 1274 } 1275 mutex_exit(&bd->d_ocmutex); 1276 1277 if (last) { 1278 cmlb_invalidate(bd->d_cmlbh, 0); 1279 } 1280 rw_exit(&bd_lock); 1281 1282 return (0); 1283 } 1284 1285 static int 1286 bd_dump(dev_t dev, caddr_t caddr, daddr_t blkno, int nblk) 1287 { 1288 minor_t inst; 1289 minor_t part; 1290 diskaddr_t pstart; 1291 diskaddr_t psize; 1292 bd_t *bd; 1293 bd_xfer_impl_t *xi; 1294 buf_t *bp; 1295 int rv; 1296 uint32_t shift; 1297 daddr_t d_blkno; 1298 int d_nblk; 1299 1300 rw_enter(&bd_lock, RW_READER); 1301 1302 part = BDPART(dev); 1303 inst = BDINST(dev); 1304 1305 if ((bd = ddi_get_soft_state(bd_state, inst)) == NULL) { 1306 rw_exit(&bd_lock); 1307 return (ENXIO); 1308 } 1309 shift = bd->d_blkshift; 1310 d_blkno = blkno >> (shift - DEV_BSHIFT); 1311 d_nblk = nblk >> (shift - DEV_BSHIFT); 1312 /* 1313 * do cmlb, but do it synchronously unless we already have the 1314 * partition (which we probably should.) 1315 */ 1316 if (cmlb_partinfo(bd->d_cmlbh, part, &psize, &pstart, NULL, NULL, 1317 (void *)1)) { 1318 rw_exit(&bd_lock); 1319 return (ENXIO); 1320 } 1321 1322 if ((d_blkno + d_nblk) > psize) { 1323 rw_exit(&bd_lock); 1324 return (EINVAL); 1325 } 1326 bp = getrbuf(KM_NOSLEEP); 1327 if (bp == NULL) { 1328 rw_exit(&bd_lock); 1329 return (ENOMEM); 1330 } 1331 1332 bp->b_bcount = nblk << DEV_BSHIFT; 1333 bp->b_resid = bp->b_bcount; 1334 bp->b_lblkno = blkno; 1335 bp->b_un.b_addr = caddr; 1336 1337 xi = bd_xfer_alloc(bd, bp, bd->d_ops.o_write, KM_NOSLEEP); 1338 if (xi == NULL) { 1339 rw_exit(&bd_lock); 1340 freerbuf(bp); 1341 return (ENOMEM); 1342 } 1343 xi->i_blkno = d_blkno + pstart; 1344 xi->i_flags = BD_XFER_POLL; 1345 bd_submit(bd, xi); 1346 rw_exit(&bd_lock); 1347 1348 /* 1349 * Generally, we should have run this entirely synchronously 1350 * at this point and the biowait call should be a no-op. If 1351 * it didn't happen this way, it's a bug in the underlying 1352 * driver not honoring BD_XFER_POLL. 1353 */ 1354 (void) biowait(bp); 1355 rv = geterror(bp); 1356 freerbuf(bp); 1357 return (rv); 1358 } 1359 1360 void 1361 bd_minphys(struct buf *bp) 1362 { 1363 minor_t inst; 1364 bd_t *bd; 1365 inst = BDINST(bp->b_edev); 1366 1367 bd = ddi_get_soft_state(bd_state, inst); 1368 1369 /* 1370 * In a non-debug kernel, bd_strategy will catch !bd as 1371 * well, and will fail nicely. 1372 */ 1373 ASSERT(bd); 1374 1375 if (bp->b_bcount > bd->d_maxxfer) 1376 bp->b_bcount = bd->d_maxxfer; 1377 } 1378 1379 static int 1380 bd_check_uio(dev_t dev, struct uio *uio) 1381 { 1382 bd_t *bd; 1383 uint32_t shift; 1384 1385 if ((bd = ddi_get_soft_state(bd_state, BDINST(dev))) == NULL) { 1386 return (ENXIO); 1387 } 1388 1389 shift = bd->d_blkshift; 1390 if ((P2PHASE(uio->uio_loffset, (1U << shift)) != 0) || 1391 (P2PHASE(uio->uio_iov->iov_len, (1U << shift)) != 0)) { 1392 return (EINVAL); 1393 } 1394 1395 return (0); 1396 } 1397 1398 static int 1399 bd_read(dev_t dev, struct uio *uio, cred_t *credp) 1400 { 1401 _NOTE(ARGUNUSED(credp)); 1402 int ret = bd_check_uio(dev, uio); 1403 if (ret != 0) { 1404 return (ret); 1405 } 1406 return (physio(bd_strategy, NULL, dev, B_READ, bd_minphys, uio)); 1407 } 1408 1409 static int 1410 bd_write(dev_t dev, struct uio *uio, cred_t *credp) 1411 { 1412 _NOTE(ARGUNUSED(credp)); 1413 int ret = bd_check_uio(dev, uio); 1414 if (ret != 0) { 1415 return (ret); 1416 } 1417 return (physio(bd_strategy, NULL, dev, B_WRITE, bd_minphys, uio)); 1418 } 1419 1420 static int 1421 bd_aread(dev_t dev, struct aio_req *aio, cred_t *credp) 1422 { 1423 _NOTE(ARGUNUSED(credp)); 1424 int ret = bd_check_uio(dev, aio->aio_uio); 1425 if (ret != 0) { 1426 return (ret); 1427 } 1428 return (aphysio(bd_strategy, anocancel, dev, B_READ, bd_minphys, aio)); 1429 } 1430 1431 static int 1432 bd_awrite(dev_t dev, struct aio_req *aio, cred_t *credp) 1433 { 1434 _NOTE(ARGUNUSED(credp)); 1435 int ret = bd_check_uio(dev, aio->aio_uio); 1436 if (ret != 0) { 1437 return (ret); 1438 } 1439 return (aphysio(bd_strategy, anocancel, dev, B_WRITE, bd_minphys, aio)); 1440 } 1441 1442 static int 1443 bd_strategy(struct buf *bp) 1444 { 1445 minor_t inst; 1446 minor_t part; 1447 bd_t *bd; 1448 diskaddr_t p_lba; 1449 diskaddr_t p_nblks; 1450 diskaddr_t b_nblks; 1451 bd_xfer_impl_t *xi; 1452 uint32_t shift; 1453 int (*func)(void *, bd_xfer_t *); 1454 diskaddr_t lblkno; 1455 1456 part = BDPART(bp->b_edev); 1457 inst = BDINST(bp->b_edev); 1458 1459 ASSERT(bp); 1460 1461 bp->b_resid = bp->b_bcount; 1462 1463 if ((bd = ddi_get_soft_state(bd_state, inst)) == NULL) { 1464 bioerror(bp, ENXIO); 1465 biodone(bp); 1466 return (0); 1467 } 1468 1469 if (cmlb_partinfo(bd->d_cmlbh, part, &p_nblks, &p_lba, 1470 NULL, NULL, 0)) { 1471 bioerror(bp, ENXIO); 1472 biodone(bp); 1473 return (0); 1474 } 1475 1476 shift = bd->d_blkshift; 1477 lblkno = bp->b_lblkno >> (shift - DEV_BSHIFT); 1478 if ((P2PHASE(bp->b_lblkno, (1U << (shift - DEV_BSHIFT))) != 0) || 1479 (P2PHASE(bp->b_bcount, (1U << shift)) != 0) || 1480 (lblkno > p_nblks)) { 1481 bioerror(bp, EINVAL); 1482 biodone(bp); 1483 return (0); 1484 } 1485 b_nblks = bp->b_bcount >> shift; 1486 if ((lblkno == p_nblks) || (bp->b_bcount == 0)) { 1487 biodone(bp); 1488 return (0); 1489 } 1490 1491 if ((b_nblks + lblkno) > p_nblks) { 1492 bp->b_resid = ((lblkno + b_nblks - p_nblks) << shift); 1493 bp->b_bcount -= bp->b_resid; 1494 } else { 1495 bp->b_resid = 0; 1496 } 1497 func = (bp->b_flags & B_READ) ? bd->d_ops.o_read : bd->d_ops.o_write; 1498 1499 xi = bd_xfer_alloc(bd, bp, func, KM_NOSLEEP); 1500 if (xi == NULL) { 1501 xi = bd_xfer_alloc(bd, bp, func, KM_PUSHPAGE); 1502 } 1503 if (xi == NULL) { 1504 /* bd_request_alloc will have done bioerror */ 1505 biodone(bp); 1506 return (0); 1507 } 1508 xi->i_blkno = lblkno + p_lba; 1509 1510 bd_submit(bd, xi); 1511 1512 return (0); 1513 } 1514 1515 static int 1516 bd_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp) 1517 { 1518 minor_t inst; 1519 uint16_t part; 1520 bd_t *bd; 1521 void *ptr = (void *)arg; 1522 int rv; 1523 1524 part = BDPART(dev); 1525 inst = BDINST(dev); 1526 1527 if ((bd = ddi_get_soft_state(bd_state, inst)) == NULL) { 1528 return (ENXIO); 1529 } 1530 1531 rv = cmlb_ioctl(bd->d_cmlbh, dev, cmd, arg, flag, credp, rvalp, 0); 1532 if (rv != ENOTTY) 1533 return (rv); 1534 1535 if (rvalp != NULL) { 1536 /* the return value of the ioctl is 0 by default */ 1537 *rvalp = 0; 1538 } 1539 1540 switch (cmd) { 1541 case DKIOCGMEDIAINFO: { 1542 struct dk_minfo minfo; 1543 1544 /* make sure our state information is current */ 1545 bd_update_state(bd); 1546 bzero(&minfo, sizeof (minfo)); 1547 minfo.dki_media_type = DK_FIXED_DISK; 1548 minfo.dki_lbsize = (1U << bd->d_blkshift); 1549 minfo.dki_capacity = bd->d_numblks; 1550 if (ddi_copyout(&minfo, ptr, sizeof (minfo), flag)) { 1551 return (EFAULT); 1552 } 1553 return (0); 1554 } 1555 case DKIOCGMEDIAINFOEXT: { 1556 struct dk_minfo_ext miext; 1557 size_t len; 1558 1559 /* make sure our state information is current */ 1560 bd_update_state(bd); 1561 bzero(&miext, sizeof (miext)); 1562 miext.dki_media_type = DK_FIXED_DISK; 1563 miext.dki_lbsize = (1U << bd->d_blkshift); 1564 miext.dki_pbsize = (1U << bd->d_pblkshift); 1565 miext.dki_capacity = bd->d_numblks; 1566 1567 switch (ddi_model_convert_from(flag & FMODELS)) { 1568 case DDI_MODEL_ILP32: 1569 len = sizeof (struct dk_minfo_ext32); 1570 break; 1571 default: 1572 len = sizeof (struct dk_minfo_ext); 1573 break; 1574 } 1575 1576 if (ddi_copyout(&miext, ptr, len, flag)) { 1577 return (EFAULT); 1578 } 1579 return (0); 1580 } 1581 case DKIOCINFO: { 1582 struct dk_cinfo cinfo; 1583 bzero(&cinfo, sizeof (cinfo)); 1584 cinfo.dki_ctype = DKC_BLKDEV; 1585 cinfo.dki_cnum = ddi_get_instance(ddi_get_parent(bd->d_dip)); 1586 (void) snprintf(cinfo.dki_cname, sizeof (cinfo.dki_cname), 1587 "%s", ddi_driver_name(ddi_get_parent(bd->d_dip))); 1588 (void) snprintf(cinfo.dki_dname, sizeof (cinfo.dki_dname), 1589 "%s", ddi_driver_name(bd->d_dip)); 1590 cinfo.dki_unit = inst; 1591 cinfo.dki_flags = DKI_FMTVOL; 1592 cinfo.dki_partition = part; 1593 cinfo.dki_maxtransfer = bd->d_maxxfer / DEV_BSIZE; 1594 cinfo.dki_addr = 0; 1595 cinfo.dki_slave = 0; 1596 cinfo.dki_space = 0; 1597 cinfo.dki_prio = 0; 1598 cinfo.dki_vec = 0; 1599 if (ddi_copyout(&cinfo, ptr, sizeof (cinfo), flag)) { 1600 return (EFAULT); 1601 } 1602 return (0); 1603 } 1604 case DKIOCREMOVABLE: { 1605 int i; 1606 i = bd->d_removable ? 1 : 0; 1607 if (ddi_copyout(&i, ptr, sizeof (i), flag)) { 1608 return (EFAULT); 1609 } 1610 return (0); 1611 } 1612 case DKIOCHOTPLUGGABLE: { 1613 int i; 1614 i = bd->d_hotpluggable ? 1 : 0; 1615 if (ddi_copyout(&i, ptr, sizeof (i), flag)) { 1616 return (EFAULT); 1617 } 1618 return (0); 1619 } 1620 case DKIOCREADONLY: { 1621 int i; 1622 i = bd->d_rdonly ? 1 : 0; 1623 if (ddi_copyout(&i, ptr, sizeof (i), flag)) { 1624 return (EFAULT); 1625 } 1626 return (0); 1627 } 1628 case DKIOCSOLIDSTATE: { 1629 int i; 1630 i = bd->d_ssd ? 1 : 0; 1631 if (ddi_copyout(&i, ptr, sizeof (i), flag)) { 1632 return (EFAULT); 1633 } 1634 return (0); 1635 } 1636 case DKIOCSTATE: { 1637 enum dkio_state state; 1638 if (ddi_copyin(ptr, &state, sizeof (state), flag)) { 1639 return (EFAULT); 1640 } 1641 if ((rv = bd_check_state(bd, &state)) != 0) { 1642 return (rv); 1643 } 1644 if (ddi_copyout(&state, ptr, sizeof (state), flag)) { 1645 return (EFAULT); 1646 } 1647 return (0); 1648 } 1649 case DKIOCFLUSHWRITECACHE: { 1650 struct dk_callback *dkc = NULL; 1651 1652 if (flag & FKIOCTL) 1653 dkc = (void *)arg; 1654 1655 rv = bd_flush_write_cache(bd, dkc); 1656 return (rv); 1657 } 1658 case DKIOCFREE: { 1659 dkioc_free_list_t *dfl = NULL; 1660 1661 /* 1662 * Check free space support early to avoid copyin/allocation 1663 * when unnecessary. 1664 */ 1665 if (!CAN_FREESPACE(bd)) 1666 return (ENOTSUP); 1667 1668 rv = dfl_copyin(ptr, &dfl, flag, KM_SLEEP); 1669 if (rv != 0) 1670 return (rv); 1671 1672 /* 1673 * bd_free_space() consumes 'dfl'. bd_free_space() will 1674 * call dfl_iter() which will normally try to pass dfl through 1675 * to bd_free_space_cb() which attaches dfl to the bd_xfer_t 1676 * that is then queued for the underlying driver. Once the 1677 * driver processes the request, the bd_xfer_t instance is 1678 * disposed of, including any attached dkioc_free_list_t. 1679 * 1680 * If dfl cannot be processed by the underlying driver due to 1681 * size or alignment requirements of the driver, dfl_iter() 1682 * will replace dfl with one or more new dkioc_free_list_t 1683 * instances with the correct alignment and sizes for the driver 1684 * (and free the original dkioc_free_list_t). 1685 */ 1686 rv = bd_free_space(dev, bd, dfl); 1687 return (rv); 1688 } 1689 1690 case DKIOC_CANFREE: { 1691 boolean_t supported = CAN_FREESPACE(bd); 1692 1693 if (ddi_copyout(&supported, (void *)arg, sizeof (supported), 1694 flag) != 0) { 1695 return (EFAULT); 1696 } 1697 1698 return (0); 1699 } 1700 1701 default: 1702 break; 1703 1704 } 1705 return (ENOTTY); 1706 } 1707 1708 static int 1709 bd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 1710 char *name, caddr_t valuep, int *lengthp) 1711 { 1712 bd_t *bd; 1713 1714 bd = ddi_get_soft_state(bd_state, ddi_get_instance(dip)); 1715 if (bd == NULL) 1716 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 1717 name, valuep, lengthp)); 1718 1719 return (cmlb_prop_op(bd->d_cmlbh, dev, dip, prop_op, mod_flags, name, 1720 valuep, lengthp, BDPART(dev), 0)); 1721 } 1722 1723 1724 static int 1725 bd_tg_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, diskaddr_t start, 1726 size_t length, void *tg_cookie) 1727 { 1728 bd_t *bd; 1729 buf_t *bp; 1730 bd_xfer_impl_t *xi; 1731 int rv; 1732 int (*func)(void *, bd_xfer_t *); 1733 int kmflag; 1734 1735 /* 1736 * If we are running in polled mode (such as during dump(9e) 1737 * execution), then we cannot sleep for kernel allocations. 1738 */ 1739 kmflag = tg_cookie ? KM_NOSLEEP : KM_SLEEP; 1740 1741 bd = ddi_get_soft_state(bd_state, ddi_get_instance(dip)); 1742 1743 if (P2PHASE(length, (1U << bd->d_blkshift)) != 0) { 1744 /* We can only transfer whole blocks at a time! */ 1745 return (EINVAL); 1746 } 1747 1748 if ((bp = getrbuf(kmflag)) == NULL) { 1749 return (ENOMEM); 1750 } 1751 1752 switch (cmd) { 1753 case TG_READ: 1754 bp->b_flags = B_READ; 1755 func = bd->d_ops.o_read; 1756 break; 1757 case TG_WRITE: 1758 bp->b_flags = B_WRITE; 1759 func = bd->d_ops.o_write; 1760 break; 1761 default: 1762 freerbuf(bp); 1763 return (EINVAL); 1764 } 1765 1766 bp->b_un.b_addr = bufaddr; 1767 bp->b_bcount = length; 1768 xi = bd_xfer_alloc(bd, bp, func, kmflag); 1769 if (xi == NULL) { 1770 rv = geterror(bp); 1771 freerbuf(bp); 1772 return (rv); 1773 } 1774 xi->i_flags = tg_cookie ? BD_XFER_POLL : 0; 1775 xi->i_blkno = start; 1776 bd_submit(bd, xi); 1777 (void) biowait(bp); 1778 rv = geterror(bp); 1779 freerbuf(bp); 1780 1781 return (rv); 1782 } 1783 1784 static int 1785 bd_tg_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie) 1786 { 1787 bd_t *bd; 1788 1789 _NOTE(ARGUNUSED(tg_cookie)); 1790 bd = ddi_get_soft_state(bd_state, ddi_get_instance(dip)); 1791 1792 switch (cmd) { 1793 case TG_GETPHYGEOM: 1794 case TG_GETVIRTGEOM: 1795 /* 1796 * We don't have any "geometry" as such, let cmlb 1797 * fabricate something. 1798 */ 1799 return (ENOTTY); 1800 1801 case TG_GETCAPACITY: 1802 bd_update_state(bd); 1803 *(diskaddr_t *)arg = bd->d_numblks; 1804 return (0); 1805 1806 case TG_GETBLOCKSIZE: 1807 *(uint32_t *)arg = (1U << bd->d_blkshift); 1808 return (0); 1809 1810 case TG_GETATTR: 1811 /* 1812 * It turns out that cmlb really doesn't do much for 1813 * non-writable media, but lets make the information 1814 * available for it in case it does more in the 1815 * future. (The value is currently used for 1816 * triggering special behavior for CD-ROMs.) 1817 */ 1818 bd_update_state(bd); 1819 ((tg_attribute_t *)arg)->media_is_writable = 1820 bd->d_rdonly ? B_FALSE : B_TRUE; 1821 ((tg_attribute_t *)arg)->media_is_solid_state = bd->d_ssd; 1822 ((tg_attribute_t *)arg)->media_is_rotational = B_FALSE; 1823 return (0); 1824 1825 default: 1826 return (EINVAL); 1827 } 1828 } 1829 1830 1831 static void 1832 bd_sched(bd_t *bd, bd_queue_t *bq) 1833 { 1834 bd_xfer_impl_t *xi; 1835 struct buf *bp; 1836 int rv; 1837 1838 mutex_enter(&bq->q_iomutex); 1839 1840 while ((bq->q_qactive < bq->q_qsize) && 1841 ((xi = list_remove_head(&bq->q_waitq)) != NULL)) { 1842 mutex_enter(&bd->d_ksmutex); 1843 kstat_waitq_to_runq(bd->d_kiop); 1844 mutex_exit(&bd->d_ksmutex); 1845 1846 bq->q_qactive++; 1847 list_insert_tail(&bq->q_runq, xi); 1848 1849 /* 1850 * Submit the job to the driver. We drop the I/O mutex 1851 * so that we can deal with the case where the driver 1852 * completion routine calls back into us synchronously. 1853 */ 1854 1855 mutex_exit(&bq->q_iomutex); 1856 1857 rv = xi->i_func(bd->d_private, &xi->i_public); 1858 if (rv != 0) { 1859 bp = xi->i_bp; 1860 bioerror(bp, rv); 1861 biodone(bp); 1862 1863 atomic_inc_32(&bd->d_kerr->bd_transerrs.value.ui32); 1864 1865 mutex_enter(&bq->q_iomutex); 1866 1867 mutex_enter(&bd->d_ksmutex); 1868 kstat_runq_exit(bd->d_kiop); 1869 mutex_exit(&bd->d_ksmutex); 1870 1871 bq->q_qactive--; 1872 list_remove(&bq->q_runq, xi); 1873 bd_xfer_free(xi); 1874 } else { 1875 mutex_enter(&bq->q_iomutex); 1876 } 1877 } 1878 1879 mutex_exit(&bq->q_iomutex); 1880 } 1881 1882 static void 1883 bd_submit(bd_t *bd, bd_xfer_impl_t *xi) 1884 { 1885 uint64_t nv = atomic_inc_64_nv(&bd->d_io_counter); 1886 unsigned q = nv % bd->d_qcount; 1887 bd_queue_t *bq = &bd->d_queues[q]; 1888 1889 xi->i_bq = bq; 1890 xi->i_qnum = q; 1891 1892 mutex_enter(&bq->q_iomutex); 1893 1894 list_insert_tail(&bq->q_waitq, xi); 1895 1896 mutex_enter(&bd->d_ksmutex); 1897 kstat_waitq_enter(bd->d_kiop); 1898 mutex_exit(&bd->d_ksmutex); 1899 1900 mutex_exit(&bq->q_iomutex); 1901 1902 bd_sched(bd, bq); 1903 } 1904 1905 static void 1906 bd_runq_exit(bd_xfer_impl_t *xi, int err) 1907 { 1908 bd_t *bd = xi->i_bd; 1909 buf_t *bp = xi->i_bp; 1910 bd_queue_t *bq = xi->i_bq; 1911 1912 mutex_enter(&bq->q_iomutex); 1913 bq->q_qactive--; 1914 1915 mutex_enter(&bd->d_ksmutex); 1916 kstat_runq_exit(bd->d_kiop); 1917 mutex_exit(&bd->d_ksmutex); 1918 1919 list_remove(&bq->q_runq, xi); 1920 mutex_exit(&bq->q_iomutex); 1921 1922 if (err == 0) { 1923 if (bp->b_flags & B_READ) { 1924 atomic_inc_uint(&bd->d_kiop->reads); 1925 atomic_add_64((uint64_t *)&bd->d_kiop->nread, 1926 bp->b_bcount - xi->i_resid); 1927 } else { 1928 atomic_inc_uint(&bd->d_kiop->writes); 1929 atomic_add_64((uint64_t *)&bd->d_kiop->nwritten, 1930 bp->b_bcount - xi->i_resid); 1931 } 1932 } 1933 bd_sched(bd, bq); 1934 } 1935 1936 static void 1937 bd_dle_sysevent_task(void *arg) 1938 { 1939 nvlist_t *attr = NULL; 1940 char *path = NULL; 1941 bd_t *bd = arg; 1942 dev_info_t *dip = bd->d_dip; 1943 size_t n; 1944 1945 mutex_enter(&bd->d_dle_mutex); 1946 bd->d_dle_state &= ~BD_DLE_PENDING; 1947 bd->d_dle_state |= BD_DLE_RUNNING; 1948 mutex_exit(&bd->d_dle_mutex); 1949 1950 dev_err(dip, CE_NOTE, "!dynamic LUN expansion"); 1951 1952 if (nvlist_alloc(&attr, NV_UNIQUE_NAME_TYPE, KM_SLEEP) != 0) { 1953 mutex_enter(&bd->d_dle_mutex); 1954 bd->d_dle_state &= ~(BD_DLE_RUNNING|BD_DLE_PENDING); 1955 mutex_exit(&bd->d_dle_mutex); 1956 return; 1957 } 1958 1959 path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 1960 1961 n = snprintf(path, MAXPATHLEN, "/devices"); 1962 (void) ddi_pathname(dip, path + n); 1963 n = strlen(path); 1964 n += snprintf(path + n, MAXPATHLEN - n, ":x"); 1965 1966 for (;;) { 1967 /* 1968 * On receipt of this event, the ZFS sysevent module will scan 1969 * active zpools for child vdevs matching this physical path. 1970 * In order to catch both whole disk pools and those with an 1971 * EFI boot partition, generate separate sysevents for minor 1972 * node 'a' and 'b'. 1973 */ 1974 for (char c = 'a'; c < 'c'; c++) { 1975 path[n - 1] = c; 1976 1977 if (nvlist_add_string(attr, DEV_PHYS_PATH, path) != 0) 1978 break; 1979 1980 (void) ddi_log_sysevent(dip, DDI_VENDOR_SUNW, 1981 EC_DEV_STATUS, ESC_DEV_DLE, attr, NULL, DDI_SLEEP); 1982 } 1983 1984 mutex_enter(&bd->d_dle_mutex); 1985 if ((bd->d_dle_state & BD_DLE_PENDING) == 0) { 1986 bd->d_dle_state &= ~BD_DLE_RUNNING; 1987 mutex_exit(&bd->d_dle_mutex); 1988 break; 1989 } 1990 bd->d_dle_state &= ~BD_DLE_PENDING; 1991 mutex_exit(&bd->d_dle_mutex); 1992 } 1993 1994 nvlist_free(attr); 1995 kmem_free(path, MAXPATHLEN); 1996 } 1997 1998 static void 1999 bd_update_state(bd_t *bd) 2000 { 2001 enum dkio_state state = DKIO_INSERTED; 2002 boolean_t docmlb = B_FALSE; 2003 bd_media_t media; 2004 2005 bzero(&media, sizeof (media)); 2006 2007 mutex_enter(&bd->d_statemutex); 2008 if (bd->d_ops.o_media_info(bd->d_private, &media) != 0) { 2009 bd->d_numblks = 0; 2010 state = DKIO_EJECTED; 2011 goto done; 2012 } 2013 2014 if ((media.m_blksize < 512) || 2015 (!ISP2(media.m_blksize)) || 2016 (P2PHASE(bd->d_maxxfer, media.m_blksize))) { 2017 dev_err(bd->d_dip, CE_WARN, "Invalid media block size (%d)", 2018 media.m_blksize); 2019 /* 2020 * We can't use the media, treat it as not present. 2021 */ 2022 state = DKIO_EJECTED; 2023 bd->d_numblks = 0; 2024 goto done; 2025 } 2026 2027 if (((1U << bd->d_blkshift) != media.m_blksize) || 2028 (bd->d_numblks != media.m_nblks)) { 2029 /* Device size changed */ 2030 docmlb = B_TRUE; 2031 } 2032 2033 bd->d_blkshift = ddi_ffs(media.m_blksize) - 1; 2034 bd->d_pblkshift = bd->d_blkshift; 2035 bd->d_numblks = media.m_nblks; 2036 bd->d_rdonly = media.m_readonly; 2037 bd->d_ssd = media.m_solidstate; 2038 2039 /* 2040 * Only use the supplied physical block size if it is non-zero, 2041 * greater or equal to the block size, and a power of 2. Ignore it 2042 * if not, it's just informational and we can still use the media. 2043 */ 2044 if ((media.m_pblksize != 0) && 2045 (media.m_pblksize >= media.m_blksize) && 2046 (ISP2(media.m_pblksize))) 2047 bd->d_pblkshift = ddi_ffs(media.m_pblksize) - 1; 2048 2049 done: 2050 if (state != bd->d_state) { 2051 bd->d_state = state; 2052 cv_broadcast(&bd->d_statecv); 2053 docmlb = B_TRUE; 2054 } 2055 mutex_exit(&bd->d_statemutex); 2056 2057 bd->d_kerr->bd_capacity.value.ui64 = bd->d_numblks << bd->d_blkshift; 2058 2059 if (docmlb) { 2060 if (state == DKIO_INSERTED) { 2061 (void) cmlb_validate(bd->d_cmlbh, 0, 0); 2062 2063 mutex_enter(&bd->d_dle_mutex); 2064 /* 2065 * If there is already an event pending, there's 2066 * nothing to do; we coalesce multiple events. 2067 */ 2068 if ((bd->d_dle_state & BD_DLE_PENDING) == 0) { 2069 if ((bd->d_dle_state & BD_DLE_RUNNING) == 0) { 2070 taskq_dispatch_ent(bd_taskq, 2071 bd_dle_sysevent_task, bd, 0, 2072 &bd->d_dle_ent); 2073 } 2074 bd->d_dle_state |= BD_DLE_PENDING; 2075 } 2076 mutex_exit(&bd->d_dle_mutex); 2077 } else { 2078 cmlb_invalidate(bd->d_cmlbh, 0); 2079 } 2080 } 2081 } 2082 2083 static int 2084 bd_check_state(bd_t *bd, enum dkio_state *state) 2085 { 2086 clock_t when; 2087 2088 for (;;) { 2089 2090 bd_update_state(bd); 2091 2092 mutex_enter(&bd->d_statemutex); 2093 2094 if (bd->d_state != *state) { 2095 *state = bd->d_state; 2096 mutex_exit(&bd->d_statemutex); 2097 break; 2098 } 2099 2100 when = drv_usectohz(1000000); 2101 if (cv_reltimedwait_sig(&bd->d_statecv, &bd->d_statemutex, 2102 when, TR_CLOCK_TICK) == 0) { 2103 mutex_exit(&bd->d_statemutex); 2104 return (EINTR); 2105 } 2106 2107 mutex_exit(&bd->d_statemutex); 2108 } 2109 2110 return (0); 2111 } 2112 2113 static int 2114 bd_flush_write_cache_done(struct buf *bp) 2115 { 2116 struct dk_callback *dc = (void *)bp->b_private; 2117 2118 (*dc->dkc_callback)(dc->dkc_cookie, geterror(bp)); 2119 kmem_free(dc, sizeof (*dc)); 2120 freerbuf(bp); 2121 return (0); 2122 } 2123 2124 static int 2125 bd_flush_write_cache(bd_t *bd, struct dk_callback *dkc) 2126 { 2127 buf_t *bp; 2128 struct dk_callback *dc; 2129 bd_xfer_impl_t *xi; 2130 int rv; 2131 2132 if (bd->d_ops.o_sync_cache == NULL) { 2133 return (ENOTSUP); 2134 } 2135 if ((bp = getrbuf(KM_SLEEP)) == NULL) { 2136 return (ENOMEM); 2137 } 2138 bp->b_resid = 0; 2139 bp->b_bcount = 0; 2140 2141 xi = bd_xfer_alloc(bd, bp, bd->d_ops.o_sync_cache, KM_SLEEP); 2142 if (xi == NULL) { 2143 rv = geterror(bp); 2144 freerbuf(bp); 2145 return (rv); 2146 } 2147 2148 /* Make an asynchronous flush, but only if there is a callback */ 2149 if (dkc != NULL && dkc->dkc_callback != NULL) { 2150 /* Make a private copy of the callback structure */ 2151 dc = kmem_alloc(sizeof (*dc), KM_SLEEP); 2152 *dc = *dkc; 2153 bp->b_private = dc; 2154 bp->b_iodone = bd_flush_write_cache_done; 2155 2156 bd_submit(bd, xi); 2157 return (0); 2158 } 2159 2160 /* In case there is no callback, perform a synchronous flush */ 2161 bd_submit(bd, xi); 2162 (void) biowait(bp); 2163 rv = geterror(bp); 2164 freerbuf(bp); 2165 2166 return (rv); 2167 } 2168 2169 static int 2170 bd_free_space_done(struct buf *bp) 2171 { 2172 freerbuf(bp); 2173 return (0); 2174 } 2175 2176 static int 2177 bd_free_space_cb(dkioc_free_list_t *dfl, void *arg, int kmflag) 2178 { 2179 bd_t *bd = arg; 2180 buf_t *bp = NULL; 2181 bd_xfer_impl_t *xi = NULL; 2182 boolean_t sync = DFL_ISSYNC(dfl) ? B_TRUE : B_FALSE; 2183 int rv = 0; 2184 2185 bp = getrbuf(KM_SLEEP); 2186 bp->b_resid = 0; 2187 bp->b_bcount = 0; 2188 bp->b_lblkno = 0; 2189 2190 xi = bd_xfer_alloc(bd, bp, bd->d_ops.o_free_space, kmflag); 2191 xi->i_dfl = dfl; 2192 2193 if (!sync) { 2194 bp->b_iodone = bd_free_space_done; 2195 bd_submit(bd, xi); 2196 return (0); 2197 } 2198 2199 xi->i_flags |= BD_XFER_POLL; 2200 bd_submit(bd, xi); 2201 2202 (void) biowait(bp); 2203 rv = geterror(bp); 2204 freerbuf(bp); 2205 2206 return (rv); 2207 } 2208 2209 static int 2210 bd_free_space(dev_t dev, bd_t *bd, dkioc_free_list_t *dfl) 2211 { 2212 diskaddr_t p_len, p_offset; 2213 uint64_t offset_bytes, len_bytes; 2214 minor_t part = BDPART(dev); 2215 const uint_t bshift = bd->d_blkshift; 2216 dkioc_free_info_t dfi = { 2217 .dfi_bshift = bshift, 2218 .dfi_align = bd->d_free_align << bshift, 2219 .dfi_max_bytes = bd->d_max_free_blks << bshift, 2220 .dfi_max_ext = bd->d_max_free_seg, 2221 .dfi_max_ext_bytes = bd->d_max_free_seg_blks << bshift, 2222 }; 2223 2224 if (cmlb_partinfo(bd->d_cmlbh, part, &p_len, &p_offset, NULL, 2225 NULL, 0) != 0) { 2226 dfl_free(dfl); 2227 return (ENXIO); 2228 } 2229 2230 /* 2231 * bd_ioctl created our own copy of dfl, so we can modify as 2232 * necessary 2233 */ 2234 offset_bytes = (uint64_t)p_offset << bshift; 2235 len_bytes = (uint64_t)p_len << bshift; 2236 2237 dfl->dfl_offset += offset_bytes; 2238 if (dfl->dfl_offset < offset_bytes) { 2239 dfl_free(dfl); 2240 return (EOVERFLOW); 2241 } 2242 2243 return (dfl_iter(dfl, &dfi, offset_bytes + len_bytes, bd_free_space_cb, 2244 bd, KM_SLEEP)); 2245 } 2246 2247 /* 2248 * Nexus support. 2249 */ 2250 int 2251 bd_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop, 2252 void *arg, void *result) 2253 { 2254 bd_handle_t hdl; 2255 2256 switch (ctlop) { 2257 case DDI_CTLOPS_REPORTDEV: 2258 cmn_err(CE_CONT, "?Block device: %s@%s, %s%d\n", 2259 ddi_node_name(rdip), ddi_get_name_addr(rdip), 2260 ddi_driver_name(rdip), ddi_get_instance(rdip)); 2261 return (DDI_SUCCESS); 2262 2263 case DDI_CTLOPS_INITCHILD: 2264 hdl = ddi_get_parent_data((dev_info_t *)arg); 2265 if (hdl == NULL) { 2266 return (DDI_NOT_WELL_FORMED); 2267 } 2268 ddi_set_name_addr((dev_info_t *)arg, hdl->h_addr); 2269 return (DDI_SUCCESS); 2270 2271 case DDI_CTLOPS_UNINITCHILD: 2272 ddi_set_name_addr((dev_info_t *)arg, NULL); 2273 ndi_prop_remove_all((dev_info_t *)arg); 2274 return (DDI_SUCCESS); 2275 2276 default: 2277 return (ddi_ctlops(dip, rdip, ctlop, arg, result)); 2278 } 2279 } 2280 2281 /* 2282 * Functions for device drivers. 2283 */ 2284 bd_handle_t 2285 bd_alloc_handle(void *private, bd_ops_t *ops, ddi_dma_attr_t *dma, int kmflag) 2286 { 2287 bd_handle_t hdl; 2288 2289 switch (ops->o_version) { 2290 case BD_OPS_VERSION_0: 2291 case BD_OPS_VERSION_1: 2292 case BD_OPS_VERSION_2: 2293 break; 2294 2295 default: 2296 /* Unsupported version */ 2297 return (NULL); 2298 } 2299 2300 hdl = kmem_zalloc(sizeof (*hdl), kmflag); 2301 if (hdl == NULL) { 2302 return (NULL); 2303 } 2304 2305 switch (ops->o_version) { 2306 case BD_OPS_VERSION_2: 2307 hdl->h_ops.o_free_space = ops->o_free_space; 2308 /*FALLTHRU*/ 2309 case BD_OPS_VERSION_1: 2310 case BD_OPS_VERSION_0: 2311 hdl->h_ops.o_drive_info = ops->o_drive_info; 2312 hdl->h_ops.o_media_info = ops->o_media_info; 2313 hdl->h_ops.o_devid_init = ops->o_devid_init; 2314 hdl->h_ops.o_sync_cache = ops->o_sync_cache; 2315 hdl->h_ops.o_read = ops->o_read; 2316 hdl->h_ops.o_write = ops->o_write; 2317 break; 2318 } 2319 2320 hdl->h_dma = dma; 2321 hdl->h_private = private; 2322 2323 return (hdl); 2324 } 2325 2326 void 2327 bd_free_handle(bd_handle_t hdl) 2328 { 2329 kmem_free(hdl, sizeof (*hdl)); 2330 } 2331 2332 int 2333 bd_attach_handle(dev_info_t *dip, bd_handle_t hdl) 2334 { 2335 bd_drive_t drive = { 0 }; 2336 dev_info_t *child; 2337 size_t len; 2338 2339 /* 2340 * It's not an error if bd_attach_handle() is called on a handle that 2341 * already is attached. We just ignore the request to attach and return. 2342 * This way drivers using blkdev don't have to keep track about blkdev 2343 * state, they can just call this function to make sure it attached. 2344 */ 2345 if (hdl->h_child != NULL) { 2346 return (DDI_SUCCESS); 2347 } 2348 2349 /* if drivers don't override this, make it assume none */ 2350 drive.d_lun = -1; 2351 hdl->h_ops.o_drive_info(hdl->h_private, &drive); 2352 2353 hdl->h_parent = dip; 2354 hdl->h_name = "blkdev"; 2355 2356 /* 2357 * Prefer the GUID over the EUI64. 2358 */ 2359 if (*(uint64_t *)drive.d_guid != 0 || 2360 *((uint64_t *)drive.d_guid + 1) != 0) { 2361 len = snprintf(hdl->h_addr, sizeof (hdl->h_addr), 2362 "w%02X%02X%02X%02X%02X%02X%02X%02X" 2363 "%02X%02X%02X%02X%02X%02X%02X%02X", 2364 drive.d_guid[0], drive.d_guid[1], drive.d_guid[2], 2365 drive.d_guid[3], drive.d_guid[4], drive.d_guid[5], 2366 drive.d_guid[6], drive.d_guid[7], drive.d_guid[8], 2367 drive.d_guid[9], drive.d_guid[10], drive.d_guid[11], 2368 drive.d_guid[12], drive.d_guid[13], drive.d_guid[14], 2369 drive.d_guid[15]); 2370 } else if (*(uint64_t *)drive.d_eui64 != 0) { 2371 len = snprintf(hdl->h_addr, sizeof (hdl->h_addr), 2372 "w%02X%02X%02X%02X%02X%02X%02X%02X", 2373 drive.d_eui64[0], drive.d_eui64[1], 2374 drive.d_eui64[2], drive.d_eui64[3], 2375 drive.d_eui64[4], drive.d_eui64[5], 2376 drive.d_eui64[6], drive.d_eui64[7]); 2377 } else { 2378 len = snprintf(hdl->h_addr, sizeof (hdl->h_addr), 2379 "%X", drive.d_target); 2380 } 2381 2382 VERIFY(len <= sizeof (hdl->h_addr)); 2383 2384 if (drive.d_lun >= 0) { 2385 (void) snprintf(hdl->h_addr + len, sizeof (hdl->h_addr) - len, 2386 ",%X", drive.d_lun); 2387 } 2388 2389 if (ndi_devi_alloc(dip, hdl->h_name, (pnode_t)DEVI_SID_NODEID, 2390 &child) != NDI_SUCCESS) { 2391 cmn_err(CE_WARN, "%s%d: unable to allocate node %s@%s", 2392 ddi_driver_name(dip), ddi_get_instance(dip), 2393 "blkdev", hdl->h_addr); 2394 return (DDI_FAILURE); 2395 } 2396 2397 ddi_set_parent_data(child, hdl); 2398 hdl->h_child = child; 2399 2400 if (ndi_devi_online(child, 0) != NDI_SUCCESS) { 2401 cmn_err(CE_WARN, "%s%d: failed bringing node %s@%s online", 2402 ddi_driver_name(dip), ddi_get_instance(dip), 2403 hdl->h_name, hdl->h_addr); 2404 (void) ndi_devi_free(child); 2405 hdl->h_child = NULL; 2406 return (DDI_FAILURE); 2407 } 2408 2409 return (DDI_SUCCESS); 2410 } 2411 2412 int 2413 bd_detach_handle(bd_handle_t hdl) 2414 { 2415 int circ; 2416 int rv; 2417 char *devnm; 2418 2419 /* 2420 * It's not an error if bd_detach_handle() is called on a handle that 2421 * already is detached. We just ignore the request to detach and return. 2422 * This way drivers using blkdev don't have to keep track about blkdev 2423 * state, they can just call this function to make sure it detached. 2424 */ 2425 if (hdl->h_child == NULL) { 2426 return (DDI_SUCCESS); 2427 } 2428 ndi_devi_enter(hdl->h_parent, &circ); 2429 if (i_ddi_node_state(hdl->h_child) < DS_INITIALIZED) { 2430 rv = ddi_remove_child(hdl->h_child, 0); 2431 } else { 2432 devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP); 2433 (void) ddi_deviname(hdl->h_child, devnm); 2434 (void) devfs_clean(hdl->h_parent, devnm + 1, DV_CLEAN_FORCE); 2435 rv = ndi_devi_unconfig_one(hdl->h_parent, devnm + 1, NULL, 2436 NDI_DEVI_REMOVE | NDI_UNCONFIG); 2437 kmem_free(devnm, MAXNAMELEN + 1); 2438 } 2439 if (rv == 0) { 2440 hdl->h_child = NULL; 2441 } 2442 2443 ndi_devi_exit(hdl->h_parent, circ); 2444 return (rv == NDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE); 2445 } 2446 2447 void 2448 bd_xfer_done(bd_xfer_t *xfer, int err) 2449 { 2450 bd_xfer_impl_t *xi = (void *)xfer; 2451 buf_t *bp = xi->i_bp; 2452 int rv = DDI_SUCCESS; 2453 bd_t *bd = xi->i_bd; 2454 size_t len; 2455 2456 if (err != 0) { 2457 bd_runq_exit(xi, err); 2458 atomic_inc_32(&bd->d_kerr->bd_harderrs.value.ui32); 2459 2460 bp->b_resid += xi->i_resid; 2461 bd_xfer_free(xi); 2462 bioerror(bp, err); 2463 biodone(bp); 2464 return; 2465 } 2466 2467 xi->i_cur_win++; 2468 xi->i_resid -= xi->i_len; 2469 2470 if (xi->i_resid == 0) { 2471 /* Job completed succcessfully! */ 2472 bd_runq_exit(xi, 0); 2473 2474 bd_xfer_free(xi); 2475 biodone(bp); 2476 return; 2477 } 2478 2479 xi->i_blkno += xi->i_nblks; 2480 2481 if (bd->d_use_dma) { 2482 /* More transfer still pending... advance to next DMA window. */ 2483 rv = ddi_dma_getwin(xi->i_dmah, xi->i_cur_win, 2484 &xi->i_offset, &len, &xi->i_dmac, &xi->i_ndmac); 2485 } else { 2486 /* Advance memory window. */ 2487 xi->i_kaddr += xi->i_len; 2488 xi->i_offset += xi->i_len; 2489 len = min(bp->b_bcount - xi->i_offset, bd->d_maxxfer); 2490 } 2491 2492 2493 if ((rv != DDI_SUCCESS) || 2494 (P2PHASE(len, (1U << xi->i_blkshift)) != 0)) { 2495 bd_runq_exit(xi, EFAULT); 2496 2497 bp->b_resid += xi->i_resid; 2498 bd_xfer_free(xi); 2499 bioerror(bp, EFAULT); 2500 biodone(bp); 2501 return; 2502 } 2503 xi->i_len = len; 2504 xi->i_nblks = len >> xi->i_blkshift; 2505 2506 /* Submit next window to hardware. */ 2507 rv = xi->i_func(bd->d_private, &xi->i_public); 2508 if (rv != 0) { 2509 bd_runq_exit(xi, rv); 2510 2511 atomic_inc_32(&bd->d_kerr->bd_transerrs.value.ui32); 2512 2513 bp->b_resid += xi->i_resid; 2514 bd_xfer_free(xi); 2515 bioerror(bp, rv); 2516 biodone(bp); 2517 } 2518 } 2519 2520 void 2521 bd_error(bd_xfer_t *xfer, int error) 2522 { 2523 bd_xfer_impl_t *xi = (void *)xfer; 2524 bd_t *bd = xi->i_bd; 2525 2526 switch (error) { 2527 case BD_ERR_MEDIA: 2528 atomic_inc_32(&bd->d_kerr->bd_rq_media_err.value.ui32); 2529 break; 2530 case BD_ERR_NTRDY: 2531 atomic_inc_32(&bd->d_kerr->bd_rq_ntrdy_err.value.ui32); 2532 break; 2533 case BD_ERR_NODEV: 2534 atomic_inc_32(&bd->d_kerr->bd_rq_nodev_err.value.ui32); 2535 break; 2536 case BD_ERR_RECOV: 2537 atomic_inc_32(&bd->d_kerr->bd_rq_recov_err.value.ui32); 2538 break; 2539 case BD_ERR_ILLRQ: 2540 atomic_inc_32(&bd->d_kerr->bd_rq_illrq_err.value.ui32); 2541 break; 2542 case BD_ERR_PFA: 2543 atomic_inc_32(&bd->d_kerr->bd_rq_pfa_err.value.ui32); 2544 break; 2545 default: 2546 cmn_err(CE_PANIC, "bd_error: unknown error type %d", error); 2547 break; 2548 } 2549 } 2550 2551 void 2552 bd_state_change(bd_handle_t hdl) 2553 { 2554 bd_t *bd; 2555 2556 if ((bd = hdl->h_bd) != NULL) { 2557 bd_update_state(bd); 2558 } 2559 } 2560 2561 void 2562 bd_mod_init(struct dev_ops *devops) 2563 { 2564 static struct bus_ops bd_bus_ops = { 2565 BUSO_REV, /* busops_rev */ 2566 nullbusmap, /* bus_map */ 2567 NULL, /* bus_get_intrspec (OBSOLETE) */ 2568 NULL, /* bus_add_intrspec (OBSOLETE) */ 2569 NULL, /* bus_remove_intrspec (OBSOLETE) */ 2570 i_ddi_map_fault, /* bus_map_fault */ 2571 NULL, /* bus_dma_map (OBSOLETE) */ 2572 ddi_dma_allochdl, /* bus_dma_allochdl */ 2573 ddi_dma_freehdl, /* bus_dma_freehdl */ 2574 ddi_dma_bindhdl, /* bus_dma_bindhdl */ 2575 ddi_dma_unbindhdl, /* bus_dma_unbindhdl */ 2576 ddi_dma_flush, /* bus_dma_flush */ 2577 ddi_dma_win, /* bus_dma_win */ 2578 ddi_dma_mctl, /* bus_dma_ctl */ 2579 bd_bus_ctl, /* bus_ctl */ 2580 ddi_bus_prop_op, /* bus_prop_op */ 2581 NULL, /* bus_get_eventcookie */ 2582 NULL, /* bus_add_eventcall */ 2583 NULL, /* bus_remove_eventcall */ 2584 NULL, /* bus_post_event */ 2585 NULL, /* bus_intr_ctl (OBSOLETE) */ 2586 NULL, /* bus_config */ 2587 NULL, /* bus_unconfig */ 2588 NULL, /* bus_fm_init */ 2589 NULL, /* bus_fm_fini */ 2590 NULL, /* bus_fm_access_enter */ 2591 NULL, /* bus_fm_access_exit */ 2592 NULL, /* bus_power */ 2593 NULL, /* bus_intr_op */ 2594 }; 2595 2596 devops->devo_bus_ops = &bd_bus_ops; 2597 2598 /* 2599 * NB: The device driver is free to supply its own 2600 * character entry device support. 2601 */ 2602 } 2603 2604 void 2605 bd_mod_fini(struct dev_ops *devops) 2606 { 2607 devops->devo_bus_ops = NULL; 2608 } 2609