1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/scsi/scsi.h> 28 #include <sys/dktp/cm.h> 29 #include <sys/dktp/quetypes.h> 30 #include <sys/dktp/queue.h> 31 #include <sys/dktp/fctypes.h> 32 #include <sys/dktp/flowctrl.h> 33 #include <sys/dktp/cmdev.h> 34 #include <sys/dkio.h> 35 #include <sys/dktp/tgdk.h> 36 #include <sys/dktp/dadk.h> 37 #include <sys/dktp/bbh.h> 38 #include <sys/dktp/altsctr.h> 39 #include <sys/dktp/cmdk.h> 40 41 #include <sys/stat.h> 42 #include <sys/vtoc.h> 43 #include <sys/file.h> 44 #include <sys/dktp/dadkio.h> 45 #include <sys/aio_req.h> 46 47 #include <sys/cmlb.h> 48 49 /* 50 * Local Static Data 51 */ 52 #ifdef CMDK_DEBUG 53 #define DENT 0x0001 54 #define DIO 0x0002 55 56 static int cmdk_debug = DIO; 57 #endif 58 59 #ifndef TRUE 60 #define TRUE 1 61 #endif 62 63 #ifndef FALSE 64 #define FALSE 0 65 #endif 66 67 /* 68 * NDKMAP is the base number for accessing the fdisk partitions. 69 * c?d?p0 --> cmdk@?,?:q 70 */ 71 #define PARTITION0_INDEX (NDKMAP + 0) 72 73 #define DKTP_DATA (dkp->dk_tgobjp)->tg_data 74 #define DKTP_EXT (dkp->dk_tgobjp)->tg_ext 75 76 void *cmdk_state; 77 78 /* 79 * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded 80 * attach situations 81 */ 82 static kmutex_t cmdk_attach_mutex; 83 static int cmdk_max_instance = 0; 84 85 /* 86 * Panic dumpsys state 87 * There is only a single flag that is not mutex locked since 88 * the system is prevented from thread switching and cmdk_dump 89 * will only be called in a single threaded operation. 90 */ 91 static int cmdk_indump; 92 93 /* 94 * Local Function Prototypes 95 */ 96 static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp); 97 static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp); 98 static void cmdkmin(struct buf *bp); 99 static int cmdkrw(dev_t dev, struct uio *uio, int flag); 100 static int cmdkarw(dev_t dev, struct aio_req *aio, int flag); 101 102 /* 103 * Bad Block Handling Functions Prototypes 104 */ 105 static void cmdk_bbh_reopen(struct cmdk *dkp); 106 static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp); 107 static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle); 108 static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle); 109 static void cmdk_bbh_close(struct cmdk *dkp); 110 static void cmdk_bbh_setalts_idx(struct cmdk *dkp); 111 static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key); 112 113 static struct bbh_objops cmdk_bbh_ops = { 114 nulldev, 115 nulldev, 116 cmdk_bbh_gethandle, 117 cmdk_bbh_htoc, 118 cmdk_bbh_freehandle, 119 0, 0 120 }; 121 122 static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp); 123 static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp); 124 static int cmdkstrategy(struct buf *bp); 125 static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 126 static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *); 127 static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp); 128 static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp); 129 static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 130 int mod_flags, char *name, caddr_t valuep, int *lengthp); 131 static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp); 132 static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp); 133 134 /* 135 * Device driver ops vector 136 */ 137 138 static struct cb_ops cmdk_cb_ops = { 139 cmdkopen, /* open */ 140 cmdkclose, /* close */ 141 cmdkstrategy, /* strategy */ 142 nodev, /* print */ 143 cmdkdump, /* dump */ 144 cmdkread, /* read */ 145 cmdkwrite, /* write */ 146 cmdkioctl, /* ioctl */ 147 nodev, /* devmap */ 148 nodev, /* mmap */ 149 nodev, /* segmap */ 150 nochpoll, /* poll */ 151 cmdk_prop_op, /* cb_prop_op */ 152 0, /* streamtab */ 153 D_64BIT | D_MP | D_NEW, /* Driver comaptibility flag */ 154 CB_REV, /* cb_rev */ 155 cmdkaread, /* async read */ 156 cmdkawrite /* async write */ 157 }; 158 159 static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 160 void **result); 161 static int cmdkprobe(dev_info_t *dip); 162 static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd); 163 static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd); 164 165 static void cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp); 166 static int cmdkresume(dev_info_t *dip); 167 static int cmdksuspend(dev_info_t *dip); 168 static int cmdkpower(dev_info_t *dip, int component, int level); 169 170 struct dev_ops cmdk_ops = { 171 DEVO_REV, /* devo_rev, */ 172 0, /* refcnt */ 173 cmdkinfo, /* info */ 174 nulldev, /* identify */ 175 cmdkprobe, /* probe */ 176 cmdkattach, /* attach */ 177 cmdkdetach, /* detach */ 178 nodev, /* reset */ 179 &cmdk_cb_ops, /* driver operations */ 180 (struct bus_ops *)0, /* bus operations */ 181 cmdkpower, /* power */ 182 ddi_quiesce_not_needed, /* quiesce */ 183 }; 184 185 /* 186 * This is the loadable module wrapper. 187 */ 188 #include <sys/modctl.h> 189 190 #ifndef XPV_HVM_DRIVER 191 static struct modldrv modldrv = { 192 &mod_driverops, /* Type of module. This one is a driver */ 193 "Common Direct Access Disk", 194 &cmdk_ops, /* driver ops */ 195 }; 196 197 static struct modlinkage modlinkage = { 198 MODREV_1, (void *)&modldrv, NULL 199 }; 200 201 202 #else /* XPV_HVM_DRIVER */ 203 static struct modlmisc modlmisc = { 204 &mod_miscops, /* Type of module. This one is a misc */ 205 "HVM Common Direct Access Disk", 206 }; 207 208 static struct modlinkage modlinkage = { 209 MODREV_1, (void *)&modlmisc, NULL 210 }; 211 212 #endif /* XPV_HVM_DRIVER */ 213 214 /* Function prototypes for cmlb callbacks */ 215 216 static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 217 diskaddr_t start, size_t length, void *tg_cookie); 218 219 static int cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, 220 void *tg_cookie); 221 222 static void cmdk_devid_setup(struct cmdk *dkp); 223 static int cmdk_devid_modser(struct cmdk *dkp); 224 static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len); 225 static int cmdk_devid_fabricate(struct cmdk *dkp); 226 static int cmdk_devid_read(struct cmdk *dkp); 227 228 static cmlb_tg_ops_t cmdk_lb_ops = { 229 TG_DK_OPS_VERSION_1, 230 cmdk_lb_rdwr, 231 cmdk_lb_getinfo 232 }; 233 234 static boolean_t 235 cmdk_isopen(struct cmdk *dkp, dev_t dev) 236 { 237 int part, otyp; 238 ulong_t partbit; 239 240 ASSERT(MUTEX_HELD((&dkp->dk_mutex))); 241 242 part = CMDKPART(dev); 243 partbit = 1 << part; 244 245 /* account for close */ 246 if (dkp->dk_open_lyr[part] != 0) 247 return (B_TRUE); 248 for (otyp = 0; otyp < OTYPCNT; otyp++) 249 if (dkp->dk_open_reg[otyp] & partbit) 250 return (B_TRUE); 251 return (B_FALSE); 252 } 253 254 int 255 _init(void) 256 { 257 int rval; 258 259 #ifndef XPV_HVM_DRIVER 260 if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7)) 261 return (rval); 262 #endif /* !XPV_HVM_DRIVER */ 263 264 mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL); 265 if ((rval = mod_install(&modlinkage)) != 0) { 266 mutex_destroy(&cmdk_attach_mutex); 267 #ifndef XPV_HVM_DRIVER 268 ddi_soft_state_fini(&cmdk_state); 269 #endif /* !XPV_HVM_DRIVER */ 270 } 271 return (rval); 272 } 273 274 int 275 _fini(void) 276 { 277 return (EBUSY); 278 } 279 280 int 281 _info(struct modinfo *modinfop) 282 { 283 return (mod_info(&modlinkage, modinfop)); 284 } 285 286 /* 287 * Autoconfiguration Routines 288 */ 289 static int 290 cmdkprobe(dev_info_t *dip) 291 { 292 int instance; 293 int status; 294 struct cmdk *dkp; 295 296 instance = ddi_get_instance(dip); 297 298 #ifndef XPV_HVM_DRIVER 299 if (ddi_get_soft_state(cmdk_state, instance)) 300 return (DDI_PROBE_PARTIAL); 301 302 if (ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS) 303 return (DDI_PROBE_PARTIAL); 304 #endif /* !XPV_HVM_DRIVER */ 305 306 if ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL) 307 return (DDI_PROBE_PARTIAL); 308 309 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 310 rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL); 311 dkp->dk_dip = dip; 312 mutex_enter(&dkp->dk_mutex); 313 314 dkp->dk_dev = makedevice(ddi_driver_major(dip), 315 ddi_get_instance(dip) << CMDK_UNITSHF); 316 317 /* linkage to dadk and strategy */ 318 if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) { 319 mutex_exit(&dkp->dk_mutex); 320 mutex_destroy(&dkp->dk_mutex); 321 rw_destroy(&dkp->dk_bbh_mutex); 322 #ifndef XPV_HVM_DRIVER 323 ddi_soft_state_free(cmdk_state, instance); 324 #endif /* !XPV_HVM_DRIVER */ 325 return (DDI_PROBE_PARTIAL); 326 } 327 328 status = dadk_probe(DKTP_DATA, KM_NOSLEEP); 329 if (status != DDI_PROBE_SUCCESS) { 330 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 331 mutex_exit(&dkp->dk_mutex); 332 mutex_destroy(&dkp->dk_mutex); 333 rw_destroy(&dkp->dk_bbh_mutex); 334 #ifndef XPV_HVM_DRIVER 335 ddi_soft_state_free(cmdk_state, instance); 336 #endif /* !XPV_HVM_DRIVER */ 337 return (status); 338 } 339 340 mutex_exit(&dkp->dk_mutex); 341 #ifdef CMDK_DEBUG 342 if (cmdk_debug & DENT) 343 PRF("cmdkprobe: instance= %d name= `%s`\n", 344 instance, ddi_get_name_addr(dip)); 345 #endif 346 return (status); 347 } 348 349 static int 350 cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 351 { 352 int instance; 353 struct cmdk *dkp; 354 char *node_type; 355 356 switch (cmd) { 357 case DDI_ATTACH: 358 break; 359 case DDI_RESUME: 360 return (cmdkresume(dip)); 361 default: 362 return (DDI_FAILURE); 363 } 364 365 instance = ddi_get_instance(dip); 366 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 367 return (DDI_FAILURE); 368 369 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 370 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 371 372 mutex_enter(&dkp->dk_mutex); 373 374 /* dadk_attach is an empty function that only returns SUCCESS */ 375 (void) dadk_attach(DKTP_DATA); 376 377 node_type = (DKTP_EXT->tg_nodetype); 378 379 /* 380 * this open allows cmlb to read the device 381 * and determine the label types 382 * so that cmlb can create minor nodes for device 383 */ 384 385 /* open the target disk */ 386 if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS) 387 goto fail2; 388 389 #ifdef _ILP32 390 { 391 struct tgdk_geom phyg; 392 (void) dadk_getphygeom(DKTP_DATA, &phyg); 393 if ((phyg.g_cap - 1) > DK_MAX_BLOCKS) { 394 (void) dadk_close(DKTP_DATA); 395 goto fail2; 396 } 397 } 398 #endif 399 400 401 /* mark as having opened target */ 402 dkp->dk_flag |= CMDK_TGDK_OPEN; 403 404 cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle); 405 406 if (cmlb_attach(dip, 407 &cmdk_lb_ops, 408 DTYPE_DIRECT, /* device_type */ 409 B_FALSE, /* removable */ 410 B_FALSE, /* hot pluggable XXX */ 411 node_type, 412 CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT, /* alter_behaviour */ 413 dkp->dk_cmlbhandle, 414 0) != 0) 415 goto fail1; 416 417 /* Calling validate will create minor nodes according to disk label */ 418 (void) cmlb_validate(dkp->dk_cmlbhandle, 0, 0); 419 420 /* set bbh (Bad Block Handling) */ 421 cmdk_bbh_reopen(dkp); 422 423 /* setup devid string */ 424 cmdk_devid_setup(dkp); 425 426 mutex_enter(&cmdk_attach_mutex); 427 if (instance > cmdk_max_instance) 428 cmdk_max_instance = instance; 429 mutex_exit(&cmdk_attach_mutex); 430 431 mutex_exit(&dkp->dk_mutex); 432 433 /* 434 * Add a zero-length attribute to tell the world we support 435 * kernel ioctls (for layered drivers) 436 */ 437 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 438 DDI_KERNEL_IOCTL, NULL, 0); 439 ddi_report_dev(dip); 440 441 /* 442 * Initialize power management 443 */ 444 mutex_init(&dkp->dk_pm_mutex, NULL, MUTEX_DRIVER, NULL); 445 cv_init(&dkp->dk_suspend_cv, NULL, CV_DRIVER, NULL); 446 cmdk_setup_pm(dip, dkp); 447 448 return (DDI_SUCCESS); 449 450 fail1: 451 cmlb_free_handle(&dkp->dk_cmlbhandle); 452 (void) dadk_close(DKTP_DATA); 453 fail2: 454 cmdk_destroy_obj(dip, dkp); 455 rw_destroy(&dkp->dk_bbh_mutex); 456 mutex_exit(&dkp->dk_mutex); 457 mutex_destroy(&dkp->dk_mutex); 458 #ifndef XPV_HVM_DRIVER 459 ddi_soft_state_free(cmdk_state, instance); 460 #endif /* !XPV_HVM_DRIVER */ 461 return (DDI_FAILURE); 462 } 463 464 465 static int 466 cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 467 { 468 struct cmdk *dkp; 469 int instance; 470 int max_instance; 471 472 switch (cmd) { 473 case DDI_DETACH: 474 /* return (DDI_FAILURE); */ 475 break; 476 case DDI_SUSPEND: 477 return (cmdksuspend(dip)); 478 default: 479 #ifdef CMDK_DEBUG 480 if (cmdk_debug & DIO) { 481 PRF("cmdkdetach: cmd = %d unknown\n", cmd); 482 } 483 #endif 484 return (DDI_FAILURE); 485 } 486 487 mutex_enter(&cmdk_attach_mutex); 488 max_instance = cmdk_max_instance; 489 mutex_exit(&cmdk_attach_mutex); 490 491 /* check if any instance of driver is open */ 492 for (instance = 0; instance < max_instance; instance++) { 493 dkp = ddi_get_soft_state(cmdk_state, instance); 494 if (!dkp) 495 continue; 496 if (dkp->dk_flag & CMDK_OPEN) 497 return (DDI_FAILURE); 498 } 499 500 instance = ddi_get_instance(dip); 501 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 502 return (DDI_SUCCESS); 503 504 mutex_enter(&dkp->dk_mutex); 505 506 /* 507 * The cmdk_part_info call at the end of cmdkattach may have 508 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on 509 * detach for case when cmdkopen/cmdkclose never occurs. 510 */ 511 if (dkp->dk_flag & CMDK_TGDK_OPEN) { 512 dkp->dk_flag &= ~CMDK_TGDK_OPEN; 513 (void) dadk_close(DKTP_DATA); 514 } 515 516 cmlb_detach(dkp->dk_cmlbhandle, 0); 517 cmlb_free_handle(&dkp->dk_cmlbhandle); 518 ddi_prop_remove_all(dip); 519 520 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 521 mutex_exit(&dkp->dk_mutex); 522 mutex_destroy(&dkp->dk_mutex); 523 rw_destroy(&dkp->dk_bbh_mutex); 524 mutex_destroy(&dkp->dk_pm_mutex); 525 cv_destroy(&dkp->dk_suspend_cv); 526 #ifndef XPV_HVM_DRIVER 527 ddi_soft_state_free(cmdk_state, instance); 528 #endif /* !XPV_HVM_DRIVER */ 529 530 return (DDI_SUCCESS); 531 } 532 533 static int 534 cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 535 { 536 dev_t dev = (dev_t)arg; 537 int instance; 538 struct cmdk *dkp; 539 540 #ifdef lint 541 dip = dip; /* no one ever uses this */ 542 #endif 543 #ifdef CMDK_DEBUG 544 if (cmdk_debug & DENT) 545 PRF("cmdkinfo: call\n"); 546 #endif 547 instance = CMDKUNIT(dev); 548 549 switch (infocmd) { 550 case DDI_INFO_DEVT2DEVINFO: 551 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 552 return (DDI_FAILURE); 553 *result = (void *) dkp->dk_dip; 554 break; 555 case DDI_INFO_DEVT2INSTANCE: 556 *result = (void *)(intptr_t)instance; 557 break; 558 default: 559 return (DDI_FAILURE); 560 } 561 return (DDI_SUCCESS); 562 } 563 564 /* 565 * Initialize the power management components 566 */ 567 static void 568 cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp) 569 { 570 char *pm_comp[] = { "NAME=cmdk", "0=off", "1=on", NULL }; 571 572 /* 573 * Since the cmdk device does not the 'reg' property, 574 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 575 * The following code is to tell cpr that this device 576 * DOES need to be suspended and resumed. 577 */ 578 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 579 "pm-hardware-state", "needs-suspend-resume"); 580 581 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 582 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 583 if (pm_raise_power(dip, 0, CMDK_SPINDLE_ON) == DDI_SUCCESS) { 584 mutex_enter(&dkp->dk_pm_mutex); 585 dkp->dk_pm_level = CMDK_SPINDLE_ON; 586 dkp->dk_pm_is_enabled = 1; 587 mutex_exit(&dkp->dk_pm_mutex); 588 } else { 589 mutex_enter(&dkp->dk_pm_mutex); 590 dkp->dk_pm_level = CMDK_SPINDLE_OFF; 591 dkp->dk_pm_is_enabled = 0; 592 mutex_exit(&dkp->dk_pm_mutex); 593 } 594 } else { 595 mutex_enter(&dkp->dk_pm_mutex); 596 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 597 dkp->dk_pm_is_enabled = 0; 598 mutex_exit(&dkp->dk_pm_mutex); 599 } 600 } 601 602 /* 603 * suspend routine, it will be run when get the command 604 * DDI_SUSPEND at detach(9E) from system power management 605 */ 606 static int 607 cmdksuspend(dev_info_t *dip) 608 { 609 struct cmdk *dkp; 610 int instance; 611 clock_t count = 0; 612 613 instance = ddi_get_instance(dip); 614 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 615 return (DDI_FAILURE); 616 mutex_enter(&dkp->dk_mutex); 617 if (dkp->dk_flag & CMDK_SUSPEND) { 618 mutex_exit(&dkp->dk_mutex); 619 return (DDI_SUCCESS); 620 } 621 dkp->dk_flag |= CMDK_SUSPEND; 622 623 /* need to wait a while */ 624 while (dadk_getcmds(DKTP_DATA) != 0) { 625 delay(drv_usectohz(1000000)); 626 if (count > 60) { 627 dkp->dk_flag &= ~CMDK_SUSPEND; 628 cv_broadcast(&dkp->dk_suspend_cv); 629 mutex_exit(&dkp->dk_mutex); 630 return (DDI_FAILURE); 631 } 632 count++; 633 } 634 mutex_exit(&dkp->dk_mutex); 635 return (DDI_SUCCESS); 636 } 637 638 /* 639 * resume routine, it will be run when get the command 640 * DDI_RESUME at attach(9E) from system power management 641 */ 642 static int 643 cmdkresume(dev_info_t *dip) 644 { 645 struct cmdk *dkp; 646 int instance; 647 648 instance = ddi_get_instance(dip); 649 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 650 return (DDI_FAILURE); 651 mutex_enter(&dkp->dk_mutex); 652 if (!(dkp->dk_flag & CMDK_SUSPEND)) { 653 mutex_exit(&dkp->dk_mutex); 654 return (DDI_FAILURE); 655 } 656 dkp->dk_pm_level = CMDK_SPINDLE_ON; 657 dkp->dk_flag &= ~CMDK_SUSPEND; 658 cv_broadcast(&dkp->dk_suspend_cv); 659 mutex_exit(&dkp->dk_mutex); 660 return (DDI_SUCCESS); 661 662 } 663 664 /* 665 * power management entry point, it was used to 666 * change power management component. 667 * Actually, the real hard drive suspend/resume 668 * was handled in ata, so this function is not 669 * doing any real work other than verifying that 670 * the disk is idle. 671 */ 672 static int 673 cmdkpower(dev_info_t *dip, int component, int level) 674 { 675 struct cmdk *dkp; 676 int instance; 677 678 instance = ddi_get_instance(dip); 679 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 680 component != 0 || level > CMDK_SPINDLE_ON || 681 level < CMDK_SPINDLE_OFF) { 682 return (DDI_FAILURE); 683 } 684 685 mutex_enter(&dkp->dk_pm_mutex); 686 if (dkp->dk_pm_is_enabled && dkp->dk_pm_level == level) { 687 mutex_exit(&dkp->dk_pm_mutex); 688 return (DDI_SUCCESS); 689 } 690 mutex_exit(&dkp->dk_pm_mutex); 691 692 if ((level == CMDK_SPINDLE_OFF) && 693 (dadk_getcmds(DKTP_DATA) != 0)) { 694 return (DDI_FAILURE); 695 } 696 697 mutex_enter(&dkp->dk_pm_mutex); 698 dkp->dk_pm_level = level; 699 mutex_exit(&dkp->dk_pm_mutex); 700 return (DDI_SUCCESS); 701 } 702 703 static int 704 cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 705 char *name, caddr_t valuep, int *lengthp) 706 { 707 struct cmdk *dkp; 708 709 #ifdef CMDK_DEBUG 710 if (cmdk_debug & DENT) 711 PRF("cmdk_prop_op: call\n"); 712 #endif 713 714 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 715 if (dkp == NULL) 716 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 717 name, valuep, lengthp)); 718 719 return (cmlb_prop_op(dkp->dk_cmlbhandle, 720 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 721 CMDKPART(dev), NULL)); 722 } 723 724 /* 725 * dump routine 726 */ 727 static int 728 cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 729 { 730 int instance; 731 struct cmdk *dkp; 732 diskaddr_t p_lblksrt; 733 diskaddr_t p_lblkcnt; 734 struct buf local; 735 struct buf *bp; 736 737 #ifdef CMDK_DEBUG 738 if (cmdk_debug & DENT) 739 PRF("cmdkdump: call\n"); 740 #endif 741 instance = CMDKUNIT(dev); 742 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0)) 743 return (ENXIO); 744 745 if (cmlb_partinfo( 746 dkp->dk_cmlbhandle, 747 CMDKPART(dev), 748 &p_lblkcnt, 749 &p_lblksrt, 750 NULL, 751 NULL, 752 0)) { 753 return (ENXIO); 754 } 755 756 if ((blkno+nblk) > p_lblkcnt) 757 return (EINVAL); 758 759 cmdk_indump = 1; /* Tell disk targets we are panic dumpping */ 760 761 bp = &local; 762 bzero(bp, sizeof (*bp)); 763 bp->b_flags = B_BUSY; 764 bp->b_un.b_addr = addr; 765 bp->b_bcount = nblk << SCTRSHFT; 766 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno))); 767 768 (void) dadk_dump(DKTP_DATA, bp); 769 return (bp->b_error); 770 } 771 772 /* 773 * Copy in the dadkio_rwcmd according to the user's data model. If needed, 774 * convert it for our internal use. 775 */ 776 static int 777 rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag) 778 { 779 switch (ddi_model_convert_from(flag)) { 780 case DDI_MODEL_ILP32: { 781 struct dadkio_rwcmd32 cmd32; 782 783 if (ddi_copyin(inaddr, &cmd32, 784 sizeof (struct dadkio_rwcmd32), flag)) { 785 return (EFAULT); 786 } 787 788 rwcmdp->cmd = cmd32.cmd; 789 rwcmdp->flags = cmd32.flags; 790 rwcmdp->blkaddr = (blkaddr_t)cmd32.blkaddr; 791 rwcmdp->buflen = cmd32.buflen; 792 rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr; 793 /* 794 * Note: we do not convert the 'status' field, 795 * as it should not contain valid data at this 796 * point. 797 */ 798 bzero(&rwcmdp->status, sizeof (rwcmdp->status)); 799 break; 800 } 801 case DDI_MODEL_NONE: { 802 if (ddi_copyin(inaddr, rwcmdp, 803 sizeof (struct dadkio_rwcmd), flag)) { 804 return (EFAULT); 805 } 806 } 807 } 808 return (0); 809 } 810 811 /* 812 * If necessary, convert the internal rwcmdp and status to the appropriate 813 * data model and copy it out to the user. 814 */ 815 static int 816 rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag) 817 { 818 switch (ddi_model_convert_from(flag)) { 819 case DDI_MODEL_ILP32: { 820 struct dadkio_rwcmd32 cmd32; 821 822 cmd32.cmd = rwcmdp->cmd; 823 cmd32.flags = rwcmdp->flags; 824 cmd32.blkaddr = rwcmdp->blkaddr; 825 cmd32.buflen = rwcmdp->buflen; 826 ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0); 827 cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr; 828 829 cmd32.status.status = rwcmdp->status.status; 830 cmd32.status.resid = rwcmdp->status.resid; 831 cmd32.status.failed_blk_is_valid = 832 rwcmdp->status.failed_blk_is_valid; 833 cmd32.status.failed_blk = rwcmdp->status.failed_blk; 834 cmd32.status.fru_code_is_valid = 835 rwcmdp->status.fru_code_is_valid; 836 cmd32.status.fru_code = rwcmdp->status.fru_code; 837 838 bcopy(rwcmdp->status.add_error_info, 839 cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN); 840 841 if (ddi_copyout(&cmd32, outaddr, 842 sizeof (struct dadkio_rwcmd32), flag)) 843 return (EFAULT); 844 break; 845 } 846 case DDI_MODEL_NONE: { 847 if (ddi_copyout(rwcmdp, outaddr, 848 sizeof (struct dadkio_rwcmd), flag)) 849 return (EFAULT); 850 } 851 } 852 return (0); 853 } 854 855 /* 856 * ioctl routine 857 */ 858 static int 859 cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp) 860 { 861 int instance; 862 struct scsi_device *devp; 863 struct cmdk *dkp; 864 char data[NBPSCTR]; 865 866 instance = CMDKUNIT(dev); 867 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 868 return (ENXIO); 869 870 mutex_enter(&dkp->dk_mutex); 871 while (dkp->dk_flag & CMDK_SUSPEND) { 872 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 873 } 874 mutex_exit(&dkp->dk_mutex); 875 876 bzero(data, sizeof (data)); 877 878 switch (cmd) { 879 880 case DKIOCGMEDIAINFO: { 881 struct dk_minfo media_info; 882 struct tgdk_geom phyg; 883 884 /* dadk_getphygeom always returns success */ 885 (void) dadk_getphygeom(DKTP_DATA, &phyg); 886 887 media_info.dki_lbsize = phyg.g_secsiz; 888 media_info.dki_capacity = phyg.g_cap; 889 media_info.dki_media_type = DK_FIXED_DISK; 890 891 if (ddi_copyout(&media_info, (void *)arg, 892 sizeof (struct dk_minfo), flag)) { 893 return (EFAULT); 894 } else { 895 return (0); 896 } 897 } 898 899 case DKIOCINFO: { 900 struct dk_cinfo *info = (struct dk_cinfo *)data; 901 902 /* controller information */ 903 info->dki_ctype = (DKTP_EXT->tg_ctype); 904 info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip)); 905 (void) strcpy(info->dki_cname, 906 ddi_get_name(ddi_get_parent(dkp->dk_dip))); 907 908 /* Unit Information */ 909 info->dki_unit = ddi_get_instance(dkp->dk_dip); 910 devp = ddi_get_driver_private(dkp->dk_dip); 911 info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp); 912 (void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip)); 913 info->dki_flags = DKI_FMTVOL; 914 info->dki_partition = CMDKPART(dev); 915 916 info->dki_maxtransfer = maxphys / DEV_BSIZE; 917 info->dki_addr = 1; 918 info->dki_space = 0; 919 info->dki_prio = 0; 920 info->dki_vec = 0; 921 922 if (ddi_copyout(data, (void *)arg, sizeof (*info), flag)) 923 return (EFAULT); 924 else 925 return (0); 926 } 927 928 case DKIOCSTATE: { 929 int state; 930 int rval; 931 diskaddr_t p_lblksrt; 932 diskaddr_t p_lblkcnt; 933 934 if (ddi_copyin((void *)arg, &state, sizeof (int), flag)) 935 return (EFAULT); 936 937 /* dadk_check_media blocks until state changes */ 938 if (rval = dadk_check_media(DKTP_DATA, &state)) 939 return (rval); 940 941 if (state == DKIO_INSERTED) { 942 943 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) 944 return (ENXIO); 945 946 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev), 947 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) 948 return (ENXIO); 949 950 if (p_lblkcnt <= 0) 951 return (ENXIO); 952 } 953 954 if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag)) 955 return (EFAULT); 956 957 return (0); 958 } 959 960 /* 961 * is media removable? 962 */ 963 case DKIOCREMOVABLE: { 964 int i; 965 966 i = (DKTP_EXT->tg_rmb) ? 1 : 0; 967 968 if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag)) 969 return (EFAULT); 970 971 return (0); 972 } 973 974 case DKIOCADDBAD: 975 /* 976 * This is not an update mechanism to add bad blocks 977 * to the bad block structures stored on disk. 978 * 979 * addbadsec(1M) will update the bad block data on disk 980 * and use this ioctl to force the driver to re-initialize 981 * the list of bad blocks in the driver. 982 */ 983 984 /* start BBH */ 985 cmdk_bbh_reopen(dkp); 986 return (0); 987 988 case DKIOCG_PHYGEOM: 989 case DKIOCG_VIRTGEOM: 990 case DKIOCGGEOM: 991 case DKIOCSGEOM: 992 case DKIOCGAPART: 993 case DKIOCSAPART: 994 case DKIOCGVTOC: 995 case DKIOCSVTOC: 996 case DKIOCPARTINFO: 997 case DKIOCGEXTVTOC: 998 case DKIOCSEXTVTOC: 999 case DKIOCEXTPARTINFO: 1000 case DKIOCGMBOOT: 1001 case DKIOCSMBOOT: 1002 case DKIOCGETEFI: 1003 case DKIOCSETEFI: 1004 case DKIOCPARTITION: 1005 case DKIOCSETEXTPART: 1006 { 1007 int rc; 1008 1009 rc = cmlb_ioctl(dkp->dk_cmlbhandle, dev, cmd, arg, flag, 1010 credp, rvalp, 0); 1011 if (cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) 1012 cmdk_devid_setup(dkp); 1013 return (rc); 1014 } 1015 1016 case DIOCTL_RWCMD: { 1017 struct dadkio_rwcmd *rwcmdp; 1018 int status; 1019 1020 rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP); 1021 1022 status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag); 1023 1024 if (status == 0) { 1025 bzero(&(rwcmdp->status), sizeof (struct dadkio_status)); 1026 status = dadk_ioctl(DKTP_DATA, 1027 dev, 1028 cmd, 1029 (uintptr_t)rwcmdp, 1030 flag, 1031 credp, 1032 rvalp); 1033 } 1034 if (status == 0) 1035 status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag); 1036 1037 kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd)); 1038 return (status); 1039 } 1040 1041 default: 1042 return (dadk_ioctl(DKTP_DATA, 1043 dev, 1044 cmd, 1045 arg, 1046 flag, 1047 credp, 1048 rvalp)); 1049 } 1050 } 1051 1052 /*ARGSUSED1*/ 1053 static int 1054 cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp) 1055 { 1056 int part; 1057 ulong_t partbit; 1058 int instance; 1059 struct cmdk *dkp; 1060 int lastclose = 1; 1061 int i; 1062 1063 instance = CMDKUNIT(dev); 1064 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1065 (otyp >= OTYPCNT)) 1066 return (ENXIO); 1067 1068 mutex_enter(&dkp->dk_mutex); 1069 1070 /* check if device has been opened */ 1071 ASSERT(cmdk_isopen(dkp, dev)); 1072 if (!(dkp->dk_flag & CMDK_OPEN)) { 1073 mutex_exit(&dkp->dk_mutex); 1074 return (ENXIO); 1075 } 1076 1077 while (dkp->dk_flag & CMDK_SUSPEND) { 1078 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1079 } 1080 1081 part = CMDKPART(dev); 1082 partbit = 1 << part; 1083 1084 /* account for close */ 1085 if (otyp == OTYP_LYR) { 1086 ASSERT(dkp->dk_open_lyr[part] > 0); 1087 if (dkp->dk_open_lyr[part]) 1088 dkp->dk_open_lyr[part]--; 1089 } else { 1090 ASSERT((dkp->dk_open_reg[otyp] & partbit) != 0); 1091 dkp->dk_open_reg[otyp] &= ~partbit; 1092 } 1093 dkp->dk_open_exl &= ~partbit; 1094 1095 for (i = 0; i < CMDK_MAXPART; i++) 1096 if (dkp->dk_open_lyr[i] != 0) { 1097 lastclose = 0; 1098 break; 1099 } 1100 1101 if (lastclose) 1102 for (i = 0; i < OTYPCNT; i++) 1103 if (dkp->dk_open_reg[i] != 0) { 1104 lastclose = 0; 1105 break; 1106 } 1107 1108 mutex_exit(&dkp->dk_mutex); 1109 1110 if (lastclose) 1111 cmlb_invalidate(dkp->dk_cmlbhandle, 0); 1112 1113 return (DDI_SUCCESS); 1114 } 1115 1116 /*ARGSUSED3*/ 1117 static int 1118 cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp) 1119 { 1120 dev_t dev = *dev_p; 1121 int part; 1122 ulong_t partbit; 1123 int instance; 1124 struct cmdk *dkp; 1125 diskaddr_t p_lblksrt; 1126 diskaddr_t p_lblkcnt; 1127 int i; 1128 int nodelay; 1129 1130 instance = CMDKUNIT(dev); 1131 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1132 return (ENXIO); 1133 1134 if (otyp >= OTYPCNT) 1135 return (EINVAL); 1136 1137 mutex_enter(&dkp->dk_mutex); 1138 while (dkp->dk_flag & CMDK_SUSPEND) { 1139 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1140 } 1141 mutex_exit(&dkp->dk_mutex); 1142 1143 part = CMDKPART(dev); 1144 partbit = 1 << part; 1145 nodelay = (flag & (FNDELAY | FNONBLOCK)); 1146 1147 mutex_enter(&dkp->dk_mutex); 1148 1149 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) { 1150 1151 /* fail if not doing non block open */ 1152 if (!nodelay) { 1153 mutex_exit(&dkp->dk_mutex); 1154 return (ENXIO); 1155 } 1156 } else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt, 1157 &p_lblksrt, NULL, NULL, 0) == 0) { 1158 1159 if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) { 1160 mutex_exit(&dkp->dk_mutex); 1161 return (ENXIO); 1162 } 1163 } else { 1164 /* fail if not doing non block open */ 1165 if (!nodelay) { 1166 mutex_exit(&dkp->dk_mutex); 1167 return (ENXIO); 1168 } 1169 } 1170 1171 if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) { 1172 mutex_exit(&dkp->dk_mutex); 1173 return (EROFS); 1174 } 1175 1176 /* check for part already opend exclusively */ 1177 if (dkp->dk_open_exl & partbit) 1178 goto excl_open_fail; 1179 1180 /* check if we can establish exclusive open */ 1181 if (flag & FEXCL) { 1182 if (dkp->dk_open_lyr[part]) 1183 goto excl_open_fail; 1184 for (i = 0; i < OTYPCNT; i++) { 1185 if (dkp->dk_open_reg[i] & partbit) 1186 goto excl_open_fail; 1187 } 1188 } 1189 1190 /* open will succeed, account for open */ 1191 dkp->dk_flag |= CMDK_OPEN; 1192 if (otyp == OTYP_LYR) 1193 dkp->dk_open_lyr[part]++; 1194 else 1195 dkp->dk_open_reg[otyp] |= partbit; 1196 if (flag & FEXCL) 1197 dkp->dk_open_exl |= partbit; 1198 1199 mutex_exit(&dkp->dk_mutex); 1200 return (DDI_SUCCESS); 1201 1202 excl_open_fail: 1203 mutex_exit(&dkp->dk_mutex); 1204 return (EBUSY); 1205 } 1206 1207 /* 1208 * read routine 1209 */ 1210 /*ARGSUSED2*/ 1211 static int 1212 cmdkread(dev_t dev, struct uio *uio, cred_t *credp) 1213 { 1214 return (cmdkrw(dev, uio, B_READ)); 1215 } 1216 1217 /* 1218 * async read routine 1219 */ 1220 /*ARGSUSED2*/ 1221 static int 1222 cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp) 1223 { 1224 return (cmdkarw(dev, aio, B_READ)); 1225 } 1226 1227 /* 1228 * write routine 1229 */ 1230 /*ARGSUSED2*/ 1231 static int 1232 cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp) 1233 { 1234 return (cmdkrw(dev, uio, B_WRITE)); 1235 } 1236 1237 /* 1238 * async write routine 1239 */ 1240 /*ARGSUSED2*/ 1241 static int 1242 cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp) 1243 { 1244 return (cmdkarw(dev, aio, B_WRITE)); 1245 } 1246 1247 static void 1248 cmdkmin(struct buf *bp) 1249 { 1250 if (bp->b_bcount > DK_MAXRECSIZE) 1251 bp->b_bcount = DK_MAXRECSIZE; 1252 } 1253 1254 static int 1255 cmdkrw(dev_t dev, struct uio *uio, int flag) 1256 { 1257 int instance; 1258 struct cmdk *dkp; 1259 1260 instance = CMDKUNIT(dev); 1261 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1262 return (ENXIO); 1263 1264 mutex_enter(&dkp->dk_mutex); 1265 while (dkp->dk_flag & CMDK_SUSPEND) { 1266 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1267 } 1268 mutex_exit(&dkp->dk_mutex); 1269 1270 return (physio(cmdkstrategy, (struct buf *)0, dev, flag, cmdkmin, uio)); 1271 } 1272 1273 static int 1274 cmdkarw(dev_t dev, struct aio_req *aio, int flag) 1275 { 1276 int instance; 1277 struct cmdk *dkp; 1278 1279 instance = CMDKUNIT(dev); 1280 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1281 return (ENXIO); 1282 1283 mutex_enter(&dkp->dk_mutex); 1284 while (dkp->dk_flag & CMDK_SUSPEND) { 1285 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1286 } 1287 mutex_exit(&dkp->dk_mutex); 1288 1289 return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio)); 1290 } 1291 1292 /* 1293 * strategy routine 1294 */ 1295 static int 1296 cmdkstrategy(struct buf *bp) 1297 { 1298 int instance; 1299 struct cmdk *dkp; 1300 long d_cnt; 1301 diskaddr_t p_lblksrt; 1302 diskaddr_t p_lblkcnt; 1303 1304 instance = CMDKUNIT(bp->b_edev); 1305 if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1306 (dkblock(bp) < 0)) { 1307 bp->b_resid = bp->b_bcount; 1308 SETBPERR(bp, ENXIO); 1309 biodone(bp); 1310 return (0); 1311 } 1312 1313 mutex_enter(&dkp->dk_mutex); 1314 ASSERT(cmdk_isopen(dkp, bp->b_edev)); 1315 while (dkp->dk_flag & CMDK_SUSPEND) { 1316 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1317 } 1318 mutex_exit(&dkp->dk_mutex); 1319 1320 bp->b_flags &= ~(B_DONE|B_ERROR); 1321 bp->b_resid = 0; 1322 bp->av_back = NULL; 1323 1324 /* 1325 * only re-read the vtoc if necessary (force == FALSE) 1326 */ 1327 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(bp->b_edev), 1328 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) { 1329 SETBPERR(bp, ENXIO); 1330 } 1331 1332 if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt)) 1333 SETBPERR(bp, ENXIO); 1334 1335 if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) { 1336 bp->b_resid = bp->b_bcount; 1337 biodone(bp); 1338 return (0); 1339 } 1340 1341 d_cnt = bp->b_bcount >> SCTRSHFT; 1342 if ((dkblock(bp) + d_cnt) > p_lblkcnt) { 1343 bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT; 1344 bp->b_bcount -= bp->b_resid; 1345 } 1346 1347 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp)))); 1348 if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) { 1349 bp->b_resid += bp->b_bcount; 1350 biodone(bp); 1351 } 1352 return (0); 1353 } 1354 1355 static int 1356 cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp) 1357 { 1358 struct scsi_device *devp; 1359 opaque_t queobjp = NULL; 1360 opaque_t flcobjp = NULL; 1361 char que_keyvalp[64]; 1362 int que_keylen; 1363 char flc_keyvalp[64]; 1364 int flc_keylen; 1365 1366 ASSERT(mutex_owned(&dkp->dk_mutex)); 1367 1368 /* Create linkage to queueing routines based on property */ 1369 que_keylen = sizeof (que_keyvalp); 1370 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1371 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1372 DDI_PROP_SUCCESS) { 1373 cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined"); 1374 return (DDI_FAILURE); 1375 } 1376 que_keyvalp[que_keylen] = (char)0; 1377 1378 if (strcmp(que_keyvalp, "qfifo") == 0) { 1379 queobjp = (opaque_t)qfifo_create(); 1380 } else if (strcmp(que_keyvalp, "qsort") == 0) { 1381 queobjp = (opaque_t)qsort_create(); 1382 } else { 1383 return (DDI_FAILURE); 1384 } 1385 1386 /* Create linkage to dequeueing routines based on property */ 1387 flc_keylen = sizeof (flc_keyvalp); 1388 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1389 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1390 DDI_PROP_SUCCESS) { 1391 cmn_err(CE_WARN, 1392 "cmdk_create_obj: flow-control property undefined"); 1393 return (DDI_FAILURE); 1394 } 1395 1396 flc_keyvalp[flc_keylen] = (char)0; 1397 1398 if (strcmp(flc_keyvalp, "dsngl") == 0) { 1399 flcobjp = (opaque_t)dsngl_create(); 1400 } else if (strcmp(flc_keyvalp, "dmult") == 0) { 1401 flcobjp = (opaque_t)dmult_create(); 1402 } else { 1403 return (DDI_FAILURE); 1404 } 1405 1406 /* populate bbh_obj object stored in dkp */ 1407 dkp->dk_bbh_obj.bbh_data = dkp; 1408 dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops; 1409 1410 /* create linkage to dadk */ 1411 dkp->dk_tgobjp = (opaque_t)dadk_create(); 1412 1413 devp = ddi_get_driver_private(dip); 1414 (void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj, 1415 NULL); 1416 1417 return (DDI_SUCCESS); 1418 } 1419 1420 static void 1421 cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp) 1422 { 1423 char que_keyvalp[64]; 1424 int que_keylen; 1425 char flc_keyvalp[64]; 1426 int flc_keylen; 1427 1428 ASSERT(mutex_owned(&dkp->dk_mutex)); 1429 1430 (void) dadk_free((dkp->dk_tgobjp)); 1431 dkp->dk_tgobjp = NULL; 1432 1433 que_keylen = sizeof (que_keyvalp); 1434 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1435 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1436 DDI_PROP_SUCCESS) { 1437 cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined"); 1438 return; 1439 } 1440 que_keyvalp[que_keylen] = (char)0; 1441 1442 flc_keylen = sizeof (flc_keyvalp); 1443 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1444 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1445 DDI_PROP_SUCCESS) { 1446 cmn_err(CE_WARN, 1447 "cmdk_destroy_obj: flow-control property undefined"); 1448 return; 1449 } 1450 flc_keyvalp[flc_keylen] = (char)0; 1451 } 1452 /*ARGSUSED5*/ 1453 static int 1454 cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 1455 diskaddr_t start, size_t count, void *tg_cookie) 1456 { 1457 struct cmdk *dkp; 1458 opaque_t handle; 1459 int rc = 0; 1460 char *bufa; 1461 size_t buflen; 1462 1463 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1464 if (dkp == NULL) 1465 return (ENXIO); 1466 1467 if (cmd != TG_READ && cmd != TG_WRITE) 1468 return (EINVAL); 1469 1470 /* buflen must be multiple of 512 */ 1471 buflen = (count + NBPSCTR - 1) & -NBPSCTR; 1472 handle = dadk_iob_alloc(DKTP_DATA, start, buflen, KM_SLEEP); 1473 if (!handle) 1474 return (ENOMEM); 1475 1476 if (cmd == TG_READ) { 1477 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1478 if (!bufa) 1479 rc = EIO; 1480 else 1481 bcopy(bufa, bufaddr, count); 1482 } else { 1483 bufa = dadk_iob_htoc(DKTP_DATA, handle); 1484 bcopy(bufaddr, bufa, count); 1485 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1486 if (!bufa) 1487 rc = EIO; 1488 } 1489 (void) dadk_iob_free(DKTP_DATA, handle); 1490 1491 return (rc); 1492 } 1493 1494 /*ARGSUSED3*/ 1495 static int 1496 cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie) 1497 { 1498 1499 struct cmdk *dkp; 1500 struct tgdk_geom phyg; 1501 1502 1503 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1504 if (dkp == NULL) 1505 return (ENXIO); 1506 1507 switch (cmd) { 1508 case TG_GETPHYGEOM: { 1509 cmlb_geom_t *phygeomp = (cmlb_geom_t *)arg; 1510 1511 /* dadk_getphygeom always returns success */ 1512 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1513 1514 phygeomp->g_capacity = phyg.g_cap; 1515 phygeomp->g_nsect = phyg.g_sec; 1516 phygeomp->g_nhead = phyg.g_head; 1517 phygeomp->g_acyl = phyg.g_acyl; 1518 phygeomp->g_ncyl = phyg.g_cyl; 1519 phygeomp->g_secsize = phyg.g_secsiz; 1520 phygeomp->g_intrlv = 1; 1521 phygeomp->g_rpm = 3600; 1522 1523 return (0); 1524 } 1525 1526 case TG_GETVIRTGEOM: { 1527 cmlb_geom_t *virtgeomp = (cmlb_geom_t *)arg; 1528 diskaddr_t capacity; 1529 1530 (void) dadk_getgeom(DKTP_DATA, &phyg); 1531 capacity = phyg.g_cap; 1532 1533 /* 1534 * If the controller returned us something that doesn't 1535 * really fit into an Int 13/function 8 geometry 1536 * result, just fail the ioctl. See PSARC 1998/313. 1537 */ 1538 if (capacity < 0 || capacity >= 63 * 254 * 1024) 1539 return (EINVAL); 1540 1541 virtgeomp->g_capacity = capacity; 1542 virtgeomp->g_nsect = 63; 1543 virtgeomp->g_nhead = 254; 1544 virtgeomp->g_ncyl = capacity / (63 * 254); 1545 virtgeomp->g_acyl = 0; 1546 virtgeomp->g_secsize = 512; 1547 virtgeomp->g_intrlv = 1; 1548 virtgeomp->g_rpm = 3600; 1549 1550 return (0); 1551 } 1552 1553 case TG_GETCAPACITY: 1554 case TG_GETBLOCKSIZE: 1555 { 1556 1557 /* dadk_getphygeom always returns success */ 1558 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1559 if (cmd == TG_GETCAPACITY) 1560 *(diskaddr_t *)arg = phyg.g_cap; 1561 else 1562 *(uint32_t *)arg = (uint32_t)phyg.g_secsiz; 1563 1564 return (0); 1565 } 1566 1567 case TG_GETATTR: { 1568 tg_attribute_t *tgattribute = (tg_attribute_t *)arg; 1569 if ((DKTP_EXT->tg_rdonly)) 1570 tgattribute->media_is_writable = FALSE; 1571 else 1572 tgattribute->media_is_writable = TRUE; 1573 1574 return (0); 1575 } 1576 1577 default: 1578 return (ENOTTY); 1579 } 1580 } 1581 1582 1583 1584 1585 1586 /* 1587 * Create and register the devid. 1588 * There are 4 different ways we can get a device id: 1589 * 1. Already have one - nothing to do 1590 * 2. Build one from the drive's model and serial numbers 1591 * 3. Read one from the disk (first sector of last track) 1592 * 4. Fabricate one and write it on the disk. 1593 * If any of these succeeds, register the deviceid 1594 */ 1595 static void 1596 cmdk_devid_setup(struct cmdk *dkp) 1597 { 1598 int rc; 1599 1600 /* Try options until one succeeds, or all have failed */ 1601 1602 /* 1. All done if already registered */ 1603 if (dkp->dk_devid != NULL) 1604 return; 1605 1606 /* 2. Build a devid from the model and serial number */ 1607 rc = cmdk_devid_modser(dkp); 1608 if (rc != DDI_SUCCESS) { 1609 /* 3. Read devid from the disk, if present */ 1610 rc = cmdk_devid_read(dkp); 1611 1612 /* 4. otherwise make one up and write it on the disk */ 1613 if (rc != DDI_SUCCESS) 1614 rc = cmdk_devid_fabricate(dkp); 1615 } 1616 1617 /* If we managed to get a devid any of the above ways, register it */ 1618 if (rc == DDI_SUCCESS) 1619 (void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid); 1620 1621 } 1622 1623 /* 1624 * Build a devid from the model and serial number 1625 * Return DDI_SUCCESS or DDI_FAILURE. 1626 */ 1627 static int 1628 cmdk_devid_modser(struct cmdk *dkp) 1629 { 1630 int rc = DDI_FAILURE; 1631 char *hwid; 1632 int modlen; 1633 int serlen; 1634 1635 /* 1636 * device ID is a concatenation of model number, '=', serial number. 1637 */ 1638 hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP); 1639 modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN); 1640 if (modlen == 0) { 1641 rc = DDI_FAILURE; 1642 goto err; 1643 } 1644 hwid[modlen++] = '='; 1645 serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL, 1646 hwid + modlen, CMDK_HWIDLEN - modlen); 1647 if (serlen == 0) { 1648 rc = DDI_FAILURE; 1649 goto err; 1650 } 1651 hwid[modlen + serlen] = 0; 1652 1653 /* Initialize the device ID, trailing NULL not included */ 1654 rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen, 1655 hwid, &dkp->dk_devid); 1656 if (rc != DDI_SUCCESS) { 1657 rc = DDI_FAILURE; 1658 goto err; 1659 } 1660 1661 rc = DDI_SUCCESS; 1662 1663 err: 1664 kmem_free(hwid, CMDK_HWIDLEN); 1665 return (rc); 1666 } 1667 1668 static int 1669 cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len) 1670 { 1671 dadk_ioc_string_t strarg; 1672 int rval; 1673 char *s; 1674 char ch; 1675 boolean_t ret; 1676 int i; 1677 int tb; 1678 1679 strarg.is_buf = buf; 1680 strarg.is_size = len; 1681 if (dadk_ioctl(DKTP_DATA, 1682 dkp->dk_dev, 1683 ioccmd, 1684 (uintptr_t)&strarg, 1685 FNATIVE | FKIOCTL, 1686 NULL, 1687 &rval) != 0) 1688 return (0); 1689 1690 /* 1691 * valid model/serial string must contain a non-zero non-space 1692 * trim trailing spaces/NULL 1693 */ 1694 ret = B_FALSE; 1695 s = buf; 1696 for (i = 0; i < strarg.is_size; i++) { 1697 ch = *s++; 1698 if (ch != ' ' && ch != '\0') 1699 tb = i + 1; 1700 if (ch != ' ' && ch != '\0' && ch != '0') 1701 ret = B_TRUE; 1702 } 1703 1704 if (ret == B_FALSE) 1705 return (0); 1706 1707 return (tb); 1708 } 1709 1710 /* 1711 * Read a devid from on the first block of the last track of 1712 * the last cylinder. Make sure what we read is a valid devid. 1713 * Return DDI_SUCCESS or DDI_FAILURE. 1714 */ 1715 static int 1716 cmdk_devid_read(struct cmdk *dkp) 1717 { 1718 diskaddr_t blk; 1719 struct dk_devid *dkdevidp; 1720 uint_t *ip; 1721 int chksum; 1722 int i, sz; 1723 tgdk_iob_handle handle = NULL; 1724 int rc = DDI_FAILURE; 1725 1726 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) 1727 goto err; 1728 1729 /* read the devid */ 1730 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1731 if (handle == NULL) 1732 goto err; 1733 1734 dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1735 if (dkdevidp == NULL) 1736 goto err; 1737 1738 /* Validate the revision */ 1739 if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) || 1740 (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB)) 1741 goto err; 1742 1743 /* Calculate the checksum */ 1744 chksum = 0; 1745 ip = (uint_t *)dkdevidp; 1746 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1747 chksum ^= ip[i]; 1748 if (DKD_GETCHKSUM(dkdevidp) != chksum) 1749 goto err; 1750 1751 /* Validate the device id */ 1752 if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS) 1753 goto err; 1754 1755 /* keep a copy of the device id */ 1756 sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid); 1757 dkp->dk_devid = kmem_alloc(sz, KM_SLEEP); 1758 bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz); 1759 1760 rc = DDI_SUCCESS; 1761 1762 err: 1763 if (handle != NULL) 1764 (void) dadk_iob_free(DKTP_DATA, handle); 1765 return (rc); 1766 } 1767 1768 /* 1769 * Create a devid and write it on the first block of the last track of 1770 * the last cylinder. 1771 * Return DDI_SUCCESS or DDI_FAILURE. 1772 */ 1773 static int 1774 cmdk_devid_fabricate(struct cmdk *dkp) 1775 { 1776 ddi_devid_t devid = NULL; /* devid made by ddi_devid_init */ 1777 struct dk_devid *dkdevidp; /* devid struct stored on disk */ 1778 diskaddr_t blk; 1779 tgdk_iob_handle handle = NULL; 1780 uint_t *ip, chksum; 1781 int i; 1782 int rc = DDI_FAILURE; 1783 1784 if (ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid) != 1785 DDI_SUCCESS) 1786 goto err; 1787 1788 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) { 1789 /* no device id block address */ 1790 goto err; 1791 } 1792 1793 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1794 if (!handle) 1795 goto err; 1796 1797 /* Locate the buffer */ 1798 dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle); 1799 1800 /* Fill in the revision */ 1801 bzero(dkdevidp, NBPSCTR); 1802 dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB; 1803 dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB; 1804 1805 /* Copy in the device id */ 1806 i = ddi_devid_sizeof(devid); 1807 if (i > DK_DEVID_SIZE) 1808 goto err; 1809 bcopy(devid, dkdevidp->dkd_devid, i); 1810 1811 /* Calculate the chksum */ 1812 chksum = 0; 1813 ip = (uint_t *)dkdevidp; 1814 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1815 chksum ^= ip[i]; 1816 1817 /* Fill in the checksum */ 1818 DKD_FORMCHKSUM(chksum, dkdevidp); 1819 1820 /* write the devid */ 1821 (void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1822 1823 dkp->dk_devid = devid; 1824 1825 rc = DDI_SUCCESS; 1826 1827 err: 1828 if (handle != NULL) 1829 (void) dadk_iob_free(DKTP_DATA, handle); 1830 1831 if (rc != DDI_SUCCESS && devid != NULL) 1832 ddi_devid_free(devid); 1833 1834 return (rc); 1835 } 1836 1837 static void 1838 cmdk_bbh_free_alts(struct cmdk *dkp) 1839 { 1840 if (dkp->dk_alts_hdl) { 1841 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1842 kmem_free(dkp->dk_slc_cnt, 1843 NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *))); 1844 dkp->dk_alts_hdl = NULL; 1845 } 1846 } 1847 1848 static void 1849 cmdk_bbh_reopen(struct cmdk *dkp) 1850 { 1851 tgdk_iob_handle handle = NULL; 1852 diskaddr_t slcb, slcn, slce; 1853 struct alts_parttbl *ap; 1854 struct alts_ent *enttblp; 1855 uint32_t altused; 1856 uint32_t altbase; 1857 uint32_t altlast; 1858 int alts; 1859 uint16_t vtoctag; 1860 int i, j; 1861 1862 /* find slice with V_ALTSCTR tag */ 1863 for (alts = 0; alts < NDKMAP; alts++) { 1864 if (cmlb_partinfo( 1865 dkp->dk_cmlbhandle, 1866 alts, 1867 &slcn, 1868 &slcb, 1869 NULL, 1870 &vtoctag, 1871 0)) { 1872 goto empty; /* no partition table exists */ 1873 } 1874 1875 if (vtoctag == V_ALTSCTR && slcn > 1) 1876 break; 1877 } 1878 if (alts >= NDKMAP) { 1879 goto empty; /* no V_ALTSCTR slice defined */ 1880 } 1881 1882 /* read in ALTS label block */ 1883 handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP); 1884 if (!handle) { 1885 goto empty; 1886 } 1887 1888 ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1889 if (!ap || (ap->alts_sanity != ALTS_SANITY)) { 1890 goto empty; 1891 } 1892 1893 altused = ap->alts_ent_used; /* number of BB entries */ 1894 altbase = ap->alts_ent_base; /* blk offset from begin slice */ 1895 altlast = ap->alts_ent_end; /* blk offset to last block */ 1896 /* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */ 1897 1898 if (altused == 0 || 1899 altbase < 1 || 1900 altbase > altlast || 1901 altlast >= slcn) { 1902 goto empty; 1903 } 1904 (void) dadk_iob_free(DKTP_DATA, handle); 1905 1906 /* read in ALTS remapping table */ 1907 handle = dadk_iob_alloc(DKTP_DATA, 1908 slcb + altbase, 1909 (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP); 1910 if (!handle) { 1911 goto empty; 1912 } 1913 1914 enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1915 if (!enttblp) { 1916 goto empty; 1917 } 1918 1919 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1920 1921 /* allocate space for dk_slc_cnt and dk_slc_ent tables */ 1922 if (dkp->dk_slc_cnt == NULL) { 1923 dkp->dk_slc_cnt = kmem_alloc(NDKMAP * 1924 (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP); 1925 } 1926 dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP); 1927 1928 /* free previous BB table (if any) */ 1929 if (dkp->dk_alts_hdl) { 1930 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1931 dkp->dk_alts_hdl = NULL; 1932 dkp->dk_altused = 0; 1933 } 1934 1935 /* save linkage to new BB table */ 1936 dkp->dk_alts_hdl = handle; 1937 dkp->dk_altused = altused; 1938 1939 /* 1940 * build indexes to BB table by slice 1941 * effectively we have 1942 * struct alts_ent *enttblp[altused]; 1943 * 1944 * uint32_t dk_slc_cnt[NDKMAP]; 1945 * struct alts_ent *dk_slc_ent[NDKMAP]; 1946 */ 1947 for (i = 0; i < NDKMAP; i++) { 1948 if (cmlb_partinfo( 1949 dkp->dk_cmlbhandle, 1950 i, 1951 &slcn, 1952 &slcb, 1953 NULL, 1954 NULL, 1955 0)) { 1956 goto empty1; 1957 } 1958 1959 dkp->dk_slc_cnt[i] = 0; 1960 if (slcn == 0) 1961 continue; /* slice is not allocated */ 1962 1963 /* last block in slice */ 1964 slce = slcb + slcn - 1; 1965 1966 /* find first remap entry in after beginnning of slice */ 1967 for (j = 0; j < altused; j++) { 1968 if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb) 1969 break; 1970 } 1971 dkp->dk_slc_ent[i] = enttblp + j; 1972 1973 /* count remap entrys until end of slice */ 1974 for (; j < altused && enttblp[j].bad_start <= slce; j++) { 1975 dkp->dk_slc_cnt[i] += 1; 1976 } 1977 } 1978 1979 rw_exit(&dkp->dk_bbh_mutex); 1980 return; 1981 1982 empty: 1983 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1984 empty1: 1985 if (handle && handle != dkp->dk_alts_hdl) 1986 (void) dadk_iob_free(DKTP_DATA, handle); 1987 1988 if (dkp->dk_alts_hdl) { 1989 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1990 dkp->dk_alts_hdl = NULL; 1991 } 1992 1993 rw_exit(&dkp->dk_bbh_mutex); 1994 } 1995 1996 /*ARGSUSED*/ 1997 static bbh_cookie_t 1998 cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle) 1999 { 2000 struct bbh_handle *hp; 2001 bbh_cookie_t ckp; 2002 2003 hp = (struct bbh_handle *)handle; 2004 ckp = hp->h_cktab + hp->h_idx; 2005 hp->h_idx++; 2006 return (ckp); 2007 } 2008 2009 /*ARGSUSED*/ 2010 static void 2011 cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle) 2012 { 2013 struct bbh_handle *hp; 2014 2015 hp = (struct bbh_handle *)handle; 2016 kmem_free(handle, (sizeof (struct bbh_handle) + 2017 (hp->h_totck * (sizeof (struct bbh_cookie))))); 2018 } 2019 2020 2021 /* 2022 * cmdk_bbh_gethandle remaps the bad sectors to alternates. 2023 * There are 7 different cases when the comparison is made 2024 * between the bad sector cluster and the disk section. 2025 * 2026 * bad sector cluster gggggggggggbbbbbbbggggggggggg 2027 * case 1: ddddd 2028 * case 2: -d----- 2029 * case 3: ddddd 2030 * case 4: dddddddddddd 2031 * case 5: ddddddd----- 2032 * case 6: ---ddddddd 2033 * case 7: ddddddd 2034 * 2035 * where: g = good sector, b = bad sector 2036 * d = sector in disk section 2037 * - = disk section may be extended to cover those disk area 2038 */ 2039 2040 static opaque_t 2041 cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp) 2042 { 2043 struct cmdk *dkp = (struct cmdk *)bbh_data; 2044 struct bbh_handle *hp; 2045 struct bbh_cookie *ckp; 2046 struct alts_ent *altp; 2047 uint32_t alts_used; 2048 uint32_t part = CMDKPART(bp->b_edev); 2049 daddr32_t lastsec; 2050 long d_count; 2051 int i; 2052 int idx; 2053 int cnt; 2054 2055 if (part >= V_NUMPAR) 2056 return (NULL); 2057 2058 /* 2059 * This if statement is atomic and it will succeed 2060 * if there are no bad blocks (almost always) 2061 * 2062 * so this if is performed outside of the rw_enter for speed 2063 * and then repeated inside the rw_enter for safety 2064 */ 2065 if (!dkp->dk_alts_hdl) { 2066 return (NULL); 2067 } 2068 2069 rw_enter(&dkp->dk_bbh_mutex, RW_READER); 2070 2071 if (dkp->dk_alts_hdl == NULL) { 2072 rw_exit(&dkp->dk_bbh_mutex); 2073 return (NULL); 2074 } 2075 2076 alts_used = dkp->dk_slc_cnt[part]; 2077 if (alts_used == 0) { 2078 rw_exit(&dkp->dk_bbh_mutex); 2079 return (NULL); 2080 } 2081 altp = dkp->dk_slc_ent[part]; 2082 2083 /* 2084 * binary search for the largest bad sector index in the alternate 2085 * entry table which overlaps or larger than the starting d_sec 2086 */ 2087 i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp)); 2088 /* if starting sector is > the largest bad sector, return */ 2089 if (i == -1) { 2090 rw_exit(&dkp->dk_bbh_mutex); 2091 return (NULL); 2092 } 2093 /* i is the starting index. Set altp to the starting entry addr */ 2094 altp += i; 2095 2096 d_count = bp->b_bcount >> SCTRSHFT; 2097 lastsec = GET_BP_SEC(bp) + d_count - 1; 2098 2099 /* calculate the number of bad sectors */ 2100 for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) { 2101 if (lastsec < altp->bad_start) 2102 break; 2103 } 2104 2105 if (!cnt) { 2106 rw_exit(&dkp->dk_bbh_mutex); 2107 return (NULL); 2108 } 2109 2110 /* calculate the maximum number of reserved cookies */ 2111 cnt <<= 1; 2112 cnt++; 2113 2114 /* allocate the handle */ 2115 hp = (struct bbh_handle *)kmem_zalloc((sizeof (*hp) + 2116 (cnt * sizeof (*ckp))), KM_SLEEP); 2117 2118 hp->h_idx = 0; 2119 hp->h_totck = cnt; 2120 ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1); 2121 ckp[0].ck_sector = GET_BP_SEC(bp); 2122 ckp[0].ck_seclen = d_count; 2123 2124 altp = dkp->dk_slc_ent[part]; 2125 altp += i; 2126 for (idx = 0; i < alts_used; i++, altp++) { 2127 /* CASE 1: */ 2128 if (lastsec < altp->bad_start) 2129 break; 2130 2131 /* CASE 3: */ 2132 if (ckp[idx].ck_sector > altp->bad_end) 2133 continue; 2134 2135 /* CASE 2 and 7: */ 2136 if ((ckp[idx].ck_sector >= altp->bad_start) && 2137 (lastsec <= altp->bad_end)) { 2138 ckp[idx].ck_sector = altp->good_start + 2139 ckp[idx].ck_sector - altp->bad_start; 2140 break; 2141 } 2142 2143 /* at least one bad sector in our section. break it. */ 2144 /* CASE 5: */ 2145 if ((lastsec >= altp->bad_start) && 2146 (lastsec <= altp->bad_end)) { 2147 ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1; 2148 ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen; 2149 ckp[idx+1].ck_sector = altp->good_start; 2150 break; 2151 } 2152 /* CASE 6: */ 2153 if ((ckp[idx].ck_sector <= altp->bad_end) && 2154 (ckp[idx].ck_sector >= altp->bad_start)) { 2155 ckp[idx+1].ck_seclen = ckp[idx].ck_seclen; 2156 ckp[idx].ck_seclen = altp->bad_end - 2157 ckp[idx].ck_sector + 1; 2158 ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen; 2159 ckp[idx].ck_sector = altp->good_start + 2160 ckp[idx].ck_sector - altp->bad_start; 2161 idx++; 2162 ckp[idx].ck_sector = altp->bad_end + 1; 2163 continue; /* check rest of section */ 2164 } 2165 2166 /* CASE 4: */ 2167 ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector; 2168 ckp[idx+1].ck_sector = altp->good_start; 2169 ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1; 2170 idx += 2; 2171 ckp[idx].ck_sector = altp->bad_end + 1; 2172 ckp[idx].ck_seclen = lastsec - altp->bad_end; 2173 } 2174 2175 rw_exit(&dkp->dk_bbh_mutex); 2176 return ((opaque_t)hp); 2177 } 2178 2179 static int 2180 cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key) 2181 { 2182 int i; 2183 int ind; 2184 int interval; 2185 int mystatus = -1; 2186 2187 if (!cnt) 2188 return (mystatus); 2189 2190 ind = 1; /* compiler complains about possible uninitialized var */ 2191 for (i = 1; i <= cnt; i <<= 1) 2192 ind = i; 2193 2194 for (interval = ind; interval; ) { 2195 if ((key >= buf[ind-1].bad_start) && 2196 (key <= buf[ind-1].bad_end)) { 2197 return (ind-1); 2198 } else { 2199 interval >>= 1; 2200 if (key < buf[ind-1].bad_start) { 2201 /* record the largest bad sector index */ 2202 mystatus = ind-1; 2203 if (!interval) 2204 break; 2205 ind = ind - interval; 2206 } else { 2207 /* 2208 * if key is larger than the last element 2209 * then break 2210 */ 2211 if ((ind == cnt) || !interval) 2212 break; 2213 if ((ind+interval) <= cnt) 2214 ind += interval; 2215 } 2216 } 2217 } 2218 return (mystatus); 2219 } 2220