1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/scsi/scsi.h> 28 #include <sys/dktp/cm.h> 29 #include <sys/dktp/quetypes.h> 30 #include <sys/dktp/queue.h> 31 #include <sys/dktp/fctypes.h> 32 #include <sys/dktp/flowctrl.h> 33 #include <sys/dktp/cmdev.h> 34 #include <sys/dkio.h> 35 #include <sys/dktp/tgdk.h> 36 #include <sys/dktp/dadk.h> 37 #include <sys/dktp/bbh.h> 38 #include <sys/dktp/altsctr.h> 39 #include <sys/dktp/cmdk.h> 40 41 #include <sys/stat.h> 42 #include <sys/vtoc.h> 43 #include <sys/file.h> 44 #include <sys/dktp/dadkio.h> 45 #include <sys/aio_req.h> 46 47 #include <sys/cmlb.h> 48 49 /* 50 * Local Static Data 51 */ 52 #ifdef CMDK_DEBUG 53 #define DENT 0x0001 54 #define DIO 0x0002 55 56 static int cmdk_debug = DIO; 57 #endif 58 59 #ifndef TRUE 60 #define TRUE 1 61 #endif 62 63 #ifndef FALSE 64 #define FALSE 0 65 #endif 66 67 /* 68 * NDKMAP is the base number for accessing the fdisk partitions. 69 * c?d?p0 --> cmdk@?,?:q 70 */ 71 #define PARTITION0_INDEX (NDKMAP + 0) 72 73 #define DKTP_DATA (dkp->dk_tgobjp)->tg_data 74 #define DKTP_EXT (dkp->dk_tgobjp)->tg_ext 75 76 void *cmdk_state; 77 78 /* 79 * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded 80 * attach situations 81 */ 82 static kmutex_t cmdk_attach_mutex; 83 static int cmdk_max_instance = 0; 84 85 /* 86 * Panic dumpsys state 87 * There is only a single flag that is not mutex locked since 88 * the system is prevented from thread switching and cmdk_dump 89 * will only be called in a single threaded operation. 90 */ 91 static int cmdk_indump; 92 93 /* 94 * Local Function Prototypes 95 */ 96 static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp); 97 static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp); 98 static void cmdkmin(struct buf *bp); 99 static int cmdkrw(dev_t dev, struct uio *uio, int flag); 100 static int cmdkarw(dev_t dev, struct aio_req *aio, int flag); 101 102 /* 103 * Bad Block Handling Functions Prototypes 104 */ 105 static void cmdk_bbh_reopen(struct cmdk *dkp); 106 static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp); 107 static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle); 108 static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle); 109 static void cmdk_bbh_close(struct cmdk *dkp); 110 static void cmdk_bbh_setalts_idx(struct cmdk *dkp); 111 static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key); 112 113 static struct bbh_objops cmdk_bbh_ops = { 114 nulldev, 115 nulldev, 116 cmdk_bbh_gethandle, 117 cmdk_bbh_htoc, 118 cmdk_bbh_freehandle, 119 0, 0 120 }; 121 122 static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp); 123 static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp); 124 static int cmdkstrategy(struct buf *bp); 125 static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 126 static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *); 127 static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp); 128 static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp); 129 static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 130 int mod_flags, char *name, caddr_t valuep, int *lengthp); 131 static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp); 132 static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp); 133 134 /* 135 * Device driver ops vector 136 */ 137 138 static struct cb_ops cmdk_cb_ops = { 139 cmdkopen, /* open */ 140 cmdkclose, /* close */ 141 cmdkstrategy, /* strategy */ 142 nodev, /* print */ 143 cmdkdump, /* dump */ 144 cmdkread, /* read */ 145 cmdkwrite, /* write */ 146 cmdkioctl, /* ioctl */ 147 nodev, /* devmap */ 148 nodev, /* mmap */ 149 nodev, /* segmap */ 150 nochpoll, /* poll */ 151 cmdk_prop_op, /* cb_prop_op */ 152 0, /* streamtab */ 153 D_64BIT | D_MP | D_NEW, /* Driver comaptibility flag */ 154 CB_REV, /* cb_rev */ 155 cmdkaread, /* async read */ 156 cmdkawrite /* async write */ 157 }; 158 159 static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 160 void **result); 161 static int cmdkprobe(dev_info_t *dip); 162 static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd); 163 static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd); 164 165 static void cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp); 166 static int cmdkresume(dev_info_t *dip); 167 static int cmdksuspend(dev_info_t *dip); 168 static int cmdkpower(dev_info_t *dip, int component, int level); 169 170 struct dev_ops cmdk_ops = { 171 DEVO_REV, /* devo_rev, */ 172 0, /* refcnt */ 173 cmdkinfo, /* info */ 174 nulldev, /* identify */ 175 cmdkprobe, /* probe */ 176 cmdkattach, /* attach */ 177 cmdkdetach, /* detach */ 178 nodev, /* reset */ 179 &cmdk_cb_ops, /* driver operations */ 180 (struct bus_ops *)0, /* bus operations */ 181 cmdkpower, /* power */ 182 ddi_quiesce_not_needed, /* quiesce */ 183 }; 184 185 /* 186 * This is the loadable module wrapper. 187 */ 188 #include <sys/modctl.h> 189 190 #ifndef XPV_HVM_DRIVER 191 static struct modldrv modldrv = { 192 &mod_driverops, /* Type of module. This one is a driver */ 193 "Common Direct Access Disk", 194 &cmdk_ops, /* driver ops */ 195 }; 196 197 static struct modlinkage modlinkage = { 198 MODREV_1, (void *)&modldrv, NULL 199 }; 200 201 202 #else /* XPV_HVM_DRIVER */ 203 static struct modlmisc modlmisc = { 204 &mod_miscops, /* Type of module. This one is a misc */ 205 "HVM Common Direct Access Disk", 206 }; 207 208 static struct modlinkage modlinkage = { 209 MODREV_1, (void *)&modlmisc, NULL 210 }; 211 212 #endif /* XPV_HVM_DRIVER */ 213 214 /* Function prototypes for cmlb callbacks */ 215 216 static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 217 diskaddr_t start, size_t length, void *tg_cookie); 218 219 static int cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, 220 void *tg_cookie); 221 222 static void cmdk_devid_setup(struct cmdk *dkp); 223 static int cmdk_devid_modser(struct cmdk *dkp); 224 static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len); 225 static int cmdk_devid_fabricate(struct cmdk *dkp); 226 static int cmdk_devid_read(struct cmdk *dkp); 227 228 static cmlb_tg_ops_t cmdk_lb_ops = { 229 TG_DK_OPS_VERSION_1, 230 cmdk_lb_rdwr, 231 cmdk_lb_getinfo 232 }; 233 234 static boolean_t 235 cmdk_isopen(struct cmdk *dkp, dev_t dev) 236 { 237 int part, otyp; 238 ulong_t partbit; 239 240 ASSERT(MUTEX_HELD((&dkp->dk_mutex))); 241 242 part = CMDKPART(dev); 243 partbit = 1 << part; 244 245 /* account for close */ 246 if (dkp->dk_open_lyr[part] != 0) 247 return (B_TRUE); 248 for (otyp = 0; otyp < OTYPCNT; otyp++) 249 if (dkp->dk_open_reg[otyp] & partbit) 250 return (B_TRUE); 251 return (B_FALSE); 252 } 253 254 int 255 _init(void) 256 { 257 int rval; 258 259 #ifndef XPV_HVM_DRIVER 260 if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7)) 261 return (rval); 262 #endif /* !XPV_HVM_DRIVER */ 263 264 mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL); 265 if ((rval = mod_install(&modlinkage)) != 0) { 266 mutex_destroy(&cmdk_attach_mutex); 267 #ifndef XPV_HVM_DRIVER 268 ddi_soft_state_fini(&cmdk_state); 269 #endif /* !XPV_HVM_DRIVER */ 270 } 271 return (rval); 272 } 273 274 int 275 _fini(void) 276 { 277 return (EBUSY); 278 } 279 280 int 281 _info(struct modinfo *modinfop) 282 { 283 return (mod_info(&modlinkage, modinfop)); 284 } 285 286 /* 287 * Autoconfiguration Routines 288 */ 289 static int 290 cmdkprobe(dev_info_t *dip) 291 { 292 int instance; 293 int status; 294 struct cmdk *dkp; 295 296 instance = ddi_get_instance(dip); 297 298 #ifndef XPV_HVM_DRIVER 299 if (ddi_get_soft_state(cmdk_state, instance)) 300 return (DDI_PROBE_PARTIAL); 301 302 if (ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS) 303 return (DDI_PROBE_PARTIAL); 304 #endif /* !XPV_HVM_DRIVER */ 305 306 if ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL) 307 return (DDI_PROBE_PARTIAL); 308 309 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 310 rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL); 311 dkp->dk_dip = dip; 312 mutex_enter(&dkp->dk_mutex); 313 314 dkp->dk_dev = makedevice(ddi_driver_major(dip), 315 ddi_get_instance(dip) << CMDK_UNITSHF); 316 317 /* linkage to dadk and strategy */ 318 if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) { 319 mutex_exit(&dkp->dk_mutex); 320 mutex_destroy(&dkp->dk_mutex); 321 rw_destroy(&dkp->dk_bbh_mutex); 322 #ifndef XPV_HVM_DRIVER 323 ddi_soft_state_free(cmdk_state, instance); 324 #endif /* !XPV_HVM_DRIVER */ 325 return (DDI_PROBE_PARTIAL); 326 } 327 328 status = dadk_probe(DKTP_DATA, KM_NOSLEEP); 329 if (status != DDI_PROBE_SUCCESS) { 330 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 331 mutex_exit(&dkp->dk_mutex); 332 mutex_destroy(&dkp->dk_mutex); 333 rw_destroy(&dkp->dk_bbh_mutex); 334 #ifndef XPV_HVM_DRIVER 335 ddi_soft_state_free(cmdk_state, instance); 336 #endif /* !XPV_HVM_DRIVER */ 337 return (status); 338 } 339 340 mutex_exit(&dkp->dk_mutex); 341 #ifdef CMDK_DEBUG 342 if (cmdk_debug & DENT) 343 PRF("cmdkprobe: instance= %d name= `%s`\n", 344 instance, ddi_get_name_addr(dip)); 345 #endif 346 return (status); 347 } 348 349 static int 350 cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 351 { 352 int instance; 353 struct cmdk *dkp; 354 char *node_type; 355 356 switch (cmd) { 357 case DDI_ATTACH: 358 break; 359 case DDI_RESUME: 360 return (cmdkresume(dip)); 361 default: 362 return (DDI_FAILURE); 363 } 364 365 instance = ddi_get_instance(dip); 366 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 367 return (DDI_FAILURE); 368 369 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 370 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 371 372 mutex_enter(&dkp->dk_mutex); 373 374 /* dadk_attach is an empty function that only returns SUCCESS */ 375 (void) dadk_attach(DKTP_DATA); 376 377 node_type = (DKTP_EXT->tg_nodetype); 378 379 /* 380 * this open allows cmlb to read the device 381 * and determine the label types 382 * so that cmlb can create minor nodes for device 383 */ 384 385 /* open the target disk */ 386 if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS) 387 goto fail2; 388 389 #ifdef _ILP32 390 { 391 struct tgdk_geom phyg; 392 (void) dadk_getphygeom(DKTP_DATA, &phyg); 393 if ((phyg.g_cap - 1) > DK_MAX_BLOCKS) { 394 (void) dadk_close(DKTP_DATA); 395 goto fail2; 396 } 397 } 398 #endif 399 400 401 /* mark as having opened target */ 402 dkp->dk_flag |= CMDK_TGDK_OPEN; 403 404 cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle); 405 406 if (cmlb_attach(dip, 407 &cmdk_lb_ops, 408 DTYPE_DIRECT, /* device_type */ 409 B_FALSE, /* removable */ 410 B_FALSE, /* hot pluggable XXX */ 411 node_type, 412 CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT, /* alter_behaviour */ 413 dkp->dk_cmlbhandle, 414 0) != 0) 415 goto fail1; 416 417 /* Calling validate will create minor nodes according to disk label */ 418 (void) cmlb_validate(dkp->dk_cmlbhandle, 0, 0); 419 420 /* set bbh (Bad Block Handling) */ 421 cmdk_bbh_reopen(dkp); 422 423 /* setup devid string */ 424 cmdk_devid_setup(dkp); 425 426 mutex_enter(&cmdk_attach_mutex); 427 if (instance > cmdk_max_instance) 428 cmdk_max_instance = instance; 429 mutex_exit(&cmdk_attach_mutex); 430 431 mutex_exit(&dkp->dk_mutex); 432 433 /* 434 * Add a zero-length attribute to tell the world we support 435 * kernel ioctls (for layered drivers) 436 */ 437 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 438 DDI_KERNEL_IOCTL, NULL, 0); 439 ddi_report_dev(dip); 440 441 /* 442 * Initialize power management 443 */ 444 mutex_init(&dkp->dk_pm_mutex, NULL, MUTEX_DRIVER, NULL); 445 cv_init(&dkp->dk_suspend_cv, NULL, CV_DRIVER, NULL); 446 cmdk_setup_pm(dip, dkp); 447 448 return (DDI_SUCCESS); 449 450 fail1: 451 cmlb_free_handle(&dkp->dk_cmlbhandle); 452 (void) dadk_close(DKTP_DATA); 453 fail2: 454 cmdk_destroy_obj(dip, dkp); 455 rw_destroy(&dkp->dk_bbh_mutex); 456 mutex_exit(&dkp->dk_mutex); 457 mutex_destroy(&dkp->dk_mutex); 458 #ifndef XPV_HVM_DRIVER 459 ddi_soft_state_free(cmdk_state, instance); 460 #endif /* !XPV_HVM_DRIVER */ 461 return (DDI_FAILURE); 462 } 463 464 465 static int 466 cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 467 { 468 struct cmdk *dkp; 469 int instance; 470 int max_instance; 471 472 switch (cmd) { 473 case DDI_DETACH: 474 /* return (DDI_FAILURE); */ 475 break; 476 case DDI_SUSPEND: 477 return (cmdksuspend(dip)); 478 default: 479 #ifdef CMDK_DEBUG 480 if (cmdk_debug & DIO) { 481 PRF("cmdkdetach: cmd = %d unknown\n", cmd); 482 } 483 #endif 484 return (DDI_FAILURE); 485 } 486 487 mutex_enter(&cmdk_attach_mutex); 488 max_instance = cmdk_max_instance; 489 mutex_exit(&cmdk_attach_mutex); 490 491 /* check if any instance of driver is open */ 492 for (instance = 0; instance < max_instance; instance++) { 493 dkp = ddi_get_soft_state(cmdk_state, instance); 494 if (!dkp) 495 continue; 496 if (dkp->dk_flag & CMDK_OPEN) 497 return (DDI_FAILURE); 498 } 499 500 instance = ddi_get_instance(dip); 501 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 502 return (DDI_SUCCESS); 503 504 mutex_enter(&dkp->dk_mutex); 505 506 /* 507 * The cmdk_part_info call at the end of cmdkattach may have 508 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on 509 * detach for case when cmdkopen/cmdkclose never occurs. 510 */ 511 if (dkp->dk_flag & CMDK_TGDK_OPEN) { 512 dkp->dk_flag &= ~CMDK_TGDK_OPEN; 513 (void) dadk_close(DKTP_DATA); 514 } 515 516 cmlb_detach(dkp->dk_cmlbhandle, 0); 517 cmlb_free_handle(&dkp->dk_cmlbhandle); 518 ddi_prop_remove_all(dip); 519 520 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 521 mutex_exit(&dkp->dk_mutex); 522 mutex_destroy(&dkp->dk_mutex); 523 rw_destroy(&dkp->dk_bbh_mutex); 524 mutex_destroy(&dkp->dk_pm_mutex); 525 cv_destroy(&dkp->dk_suspend_cv); 526 #ifndef XPV_HVM_DRIVER 527 ddi_soft_state_free(cmdk_state, instance); 528 #endif /* !XPV_HVM_DRIVER */ 529 530 return (DDI_SUCCESS); 531 } 532 533 static int 534 cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 535 { 536 dev_t dev = (dev_t)arg; 537 int instance; 538 struct cmdk *dkp; 539 540 #ifdef lint 541 dip = dip; /* no one ever uses this */ 542 #endif 543 #ifdef CMDK_DEBUG 544 if (cmdk_debug & DENT) 545 PRF("cmdkinfo: call\n"); 546 #endif 547 instance = CMDKUNIT(dev); 548 549 switch (infocmd) { 550 case DDI_INFO_DEVT2DEVINFO: 551 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 552 return (DDI_FAILURE); 553 *result = (void *) dkp->dk_dip; 554 break; 555 case DDI_INFO_DEVT2INSTANCE: 556 *result = (void *)(intptr_t)instance; 557 break; 558 default: 559 return (DDI_FAILURE); 560 } 561 return (DDI_SUCCESS); 562 } 563 564 /* 565 * Initialize the power management components 566 */ 567 static void 568 cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp) 569 { 570 char *pm_comp[] = { "NAME=cmdk", "0=off", "1=on", NULL }; 571 572 /* 573 * Since the cmdk device does not the 'reg' property, 574 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 575 * The following code is to tell cpr that this device 576 * DOES need to be suspended and resumed. 577 */ 578 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 579 "pm-hardware-state", "needs-suspend-resume"); 580 581 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 582 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 583 if (pm_raise_power(dip, 0, CMDK_SPINDLE_ON) == DDI_SUCCESS) { 584 mutex_enter(&dkp->dk_pm_mutex); 585 dkp->dk_pm_level = CMDK_SPINDLE_ON; 586 dkp->dk_pm_is_enabled = 1; 587 mutex_exit(&dkp->dk_pm_mutex); 588 } else { 589 mutex_enter(&dkp->dk_pm_mutex); 590 dkp->dk_pm_level = CMDK_SPINDLE_OFF; 591 dkp->dk_pm_is_enabled = 0; 592 mutex_exit(&dkp->dk_pm_mutex); 593 } 594 } else { 595 mutex_enter(&dkp->dk_pm_mutex); 596 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 597 dkp->dk_pm_is_enabled = 0; 598 mutex_exit(&dkp->dk_pm_mutex); 599 } 600 } 601 602 /* 603 * suspend routine, it will be run when get the command 604 * DDI_SUSPEND at detach(9E) from system power management 605 */ 606 static int 607 cmdksuspend(dev_info_t *dip) 608 { 609 struct cmdk *dkp; 610 int instance; 611 clock_t count = 0; 612 613 instance = ddi_get_instance(dip); 614 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 615 return (DDI_FAILURE); 616 mutex_enter(&dkp->dk_mutex); 617 if (dkp->dk_flag & CMDK_SUSPEND) { 618 mutex_exit(&dkp->dk_mutex); 619 return (DDI_SUCCESS); 620 } 621 dkp->dk_flag |= CMDK_SUSPEND; 622 623 /* need to wait a while */ 624 while (dadk_getcmds(DKTP_DATA) != 0) { 625 delay(drv_usectohz(1000000)); 626 if (count > 60) { 627 dkp->dk_flag &= ~CMDK_SUSPEND; 628 cv_broadcast(&dkp->dk_suspend_cv); 629 mutex_exit(&dkp->dk_mutex); 630 return (DDI_FAILURE); 631 } 632 count++; 633 } 634 mutex_exit(&dkp->dk_mutex); 635 return (DDI_SUCCESS); 636 } 637 638 /* 639 * resume routine, it will be run when get the command 640 * DDI_RESUME at attach(9E) from system power management 641 */ 642 static int 643 cmdkresume(dev_info_t *dip) 644 { 645 struct cmdk *dkp; 646 int instance; 647 648 instance = ddi_get_instance(dip); 649 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 650 return (DDI_FAILURE); 651 mutex_enter(&dkp->dk_mutex); 652 if (!(dkp->dk_flag & CMDK_SUSPEND)) { 653 mutex_exit(&dkp->dk_mutex); 654 return (DDI_FAILURE); 655 } 656 dkp->dk_pm_level = CMDK_SPINDLE_ON; 657 dkp->dk_flag &= ~CMDK_SUSPEND; 658 cv_broadcast(&dkp->dk_suspend_cv); 659 mutex_exit(&dkp->dk_mutex); 660 return (DDI_SUCCESS); 661 662 } 663 664 /* 665 * power management entry point, it was used to 666 * change power management component. 667 * Actually, the real hard drive suspend/resume 668 * was handled in ata, so this function is not 669 * doing any real work other than verifying that 670 * the disk is idle. 671 */ 672 static int 673 cmdkpower(dev_info_t *dip, int component, int level) 674 { 675 struct cmdk *dkp; 676 int instance; 677 678 instance = ddi_get_instance(dip); 679 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 680 component != 0 || level > CMDK_SPINDLE_ON || 681 level < CMDK_SPINDLE_OFF) { 682 return (DDI_FAILURE); 683 } 684 685 mutex_enter(&dkp->dk_pm_mutex); 686 if (dkp->dk_pm_is_enabled && dkp->dk_pm_level == level) { 687 mutex_exit(&dkp->dk_pm_mutex); 688 return (DDI_SUCCESS); 689 } 690 mutex_exit(&dkp->dk_pm_mutex); 691 692 if ((level == CMDK_SPINDLE_OFF) && 693 (dadk_getcmds(DKTP_DATA) != 0)) { 694 return (DDI_FAILURE); 695 } 696 697 mutex_enter(&dkp->dk_pm_mutex); 698 dkp->dk_pm_level = level; 699 mutex_exit(&dkp->dk_pm_mutex); 700 return (DDI_SUCCESS); 701 } 702 703 static int 704 cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 705 char *name, caddr_t valuep, int *lengthp) 706 { 707 struct cmdk *dkp; 708 709 #ifdef CMDK_DEBUG 710 if (cmdk_debug & DENT) 711 PRF("cmdk_prop_op: call\n"); 712 #endif 713 714 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 715 if (dkp == NULL) 716 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 717 name, valuep, lengthp)); 718 719 return (cmlb_prop_op(dkp->dk_cmlbhandle, 720 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 721 CMDKPART(dev), NULL)); 722 } 723 724 /* 725 * dump routine 726 */ 727 static int 728 cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 729 { 730 int instance; 731 struct cmdk *dkp; 732 diskaddr_t p_lblksrt; 733 diskaddr_t p_lblkcnt; 734 struct buf local; 735 struct buf *bp; 736 737 #ifdef CMDK_DEBUG 738 if (cmdk_debug & DENT) 739 PRF("cmdkdump: call\n"); 740 #endif 741 instance = CMDKUNIT(dev); 742 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0)) 743 return (ENXIO); 744 745 if (cmlb_partinfo( 746 dkp->dk_cmlbhandle, 747 CMDKPART(dev), 748 &p_lblkcnt, 749 &p_lblksrt, 750 NULL, 751 NULL, 752 0)) { 753 return (ENXIO); 754 } 755 756 if ((blkno+nblk) > p_lblkcnt) 757 return (EINVAL); 758 759 cmdk_indump = 1; /* Tell disk targets we are panic dumpping */ 760 761 bp = &local; 762 bzero(bp, sizeof (*bp)); 763 bp->b_flags = B_BUSY; 764 bp->b_un.b_addr = addr; 765 bp->b_bcount = nblk << SCTRSHFT; 766 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno))); 767 768 (void) dadk_dump(DKTP_DATA, bp); 769 return (bp->b_error); 770 } 771 772 /* 773 * Copy in the dadkio_rwcmd according to the user's data model. If needed, 774 * convert it for our internal use. 775 */ 776 static int 777 rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag) 778 { 779 switch (ddi_model_convert_from(flag)) { 780 case DDI_MODEL_ILP32: { 781 struct dadkio_rwcmd32 cmd32; 782 783 if (ddi_copyin(inaddr, &cmd32, 784 sizeof (struct dadkio_rwcmd32), flag)) { 785 return (EFAULT); 786 } 787 788 rwcmdp->cmd = cmd32.cmd; 789 rwcmdp->flags = cmd32.flags; 790 rwcmdp->blkaddr = (blkaddr_t)cmd32.blkaddr; 791 rwcmdp->buflen = cmd32.buflen; 792 rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr; 793 /* 794 * Note: we do not convert the 'status' field, 795 * as it should not contain valid data at this 796 * point. 797 */ 798 bzero(&rwcmdp->status, sizeof (rwcmdp->status)); 799 break; 800 } 801 case DDI_MODEL_NONE: { 802 if (ddi_copyin(inaddr, rwcmdp, 803 sizeof (struct dadkio_rwcmd), flag)) { 804 return (EFAULT); 805 } 806 } 807 } 808 return (0); 809 } 810 811 /* 812 * If necessary, convert the internal rwcmdp and status to the appropriate 813 * data model and copy it out to the user. 814 */ 815 static int 816 rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag) 817 { 818 switch (ddi_model_convert_from(flag)) { 819 case DDI_MODEL_ILP32: { 820 struct dadkio_rwcmd32 cmd32; 821 822 cmd32.cmd = rwcmdp->cmd; 823 cmd32.flags = rwcmdp->flags; 824 cmd32.blkaddr = rwcmdp->blkaddr; 825 cmd32.buflen = rwcmdp->buflen; 826 ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0); 827 cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr; 828 829 cmd32.status.status = rwcmdp->status.status; 830 cmd32.status.resid = rwcmdp->status.resid; 831 cmd32.status.failed_blk_is_valid = 832 rwcmdp->status.failed_blk_is_valid; 833 cmd32.status.failed_blk = rwcmdp->status.failed_blk; 834 cmd32.status.fru_code_is_valid = 835 rwcmdp->status.fru_code_is_valid; 836 cmd32.status.fru_code = rwcmdp->status.fru_code; 837 838 bcopy(rwcmdp->status.add_error_info, 839 cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN); 840 841 if (ddi_copyout(&cmd32, outaddr, 842 sizeof (struct dadkio_rwcmd32), flag)) 843 return (EFAULT); 844 break; 845 } 846 case DDI_MODEL_NONE: { 847 if (ddi_copyout(rwcmdp, outaddr, 848 sizeof (struct dadkio_rwcmd), flag)) 849 return (EFAULT); 850 } 851 } 852 return (0); 853 } 854 855 /* 856 * ioctl routine 857 */ 858 static int 859 cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp) 860 { 861 int instance; 862 struct scsi_device *devp; 863 struct cmdk *dkp; 864 char data[NBPSCTR]; 865 866 instance = CMDKUNIT(dev); 867 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 868 return (ENXIO); 869 870 mutex_enter(&dkp->dk_mutex); 871 while (dkp->dk_flag & CMDK_SUSPEND) { 872 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 873 } 874 mutex_exit(&dkp->dk_mutex); 875 876 bzero(data, sizeof (data)); 877 878 switch (cmd) { 879 880 case DKIOCGMEDIAINFO: { 881 struct dk_minfo media_info; 882 struct tgdk_geom phyg; 883 884 /* dadk_getphygeom always returns success */ 885 (void) dadk_getphygeom(DKTP_DATA, &phyg); 886 887 media_info.dki_lbsize = phyg.g_secsiz; 888 media_info.dki_capacity = phyg.g_cap; 889 media_info.dki_media_type = DK_FIXED_DISK; 890 891 if (ddi_copyout(&media_info, (void *)arg, 892 sizeof (struct dk_minfo), flag)) { 893 return (EFAULT); 894 } else { 895 return (0); 896 } 897 } 898 899 case DKIOCINFO: { 900 struct dk_cinfo *info = (struct dk_cinfo *)data; 901 902 /* controller information */ 903 info->dki_ctype = (DKTP_EXT->tg_ctype); 904 info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip)); 905 (void) strcpy(info->dki_cname, 906 ddi_get_name(ddi_get_parent(dkp->dk_dip))); 907 908 /* Unit Information */ 909 info->dki_unit = ddi_get_instance(dkp->dk_dip); 910 devp = ddi_get_driver_private(dkp->dk_dip); 911 info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp); 912 (void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip)); 913 info->dki_flags = DKI_FMTVOL; 914 info->dki_partition = CMDKPART(dev); 915 916 info->dki_maxtransfer = maxphys / DEV_BSIZE; 917 info->dki_addr = 1; 918 info->dki_space = 0; 919 info->dki_prio = 0; 920 info->dki_vec = 0; 921 922 if (ddi_copyout(data, (void *)arg, sizeof (*info), flag)) 923 return (EFAULT); 924 else 925 return (0); 926 } 927 928 case DKIOCSTATE: { 929 int state; 930 int rval; 931 diskaddr_t p_lblksrt; 932 diskaddr_t p_lblkcnt; 933 934 if (ddi_copyin((void *)arg, &state, sizeof (int), flag)) 935 return (EFAULT); 936 937 /* dadk_check_media blocks until state changes */ 938 if (rval = dadk_check_media(DKTP_DATA, &state)) 939 return (rval); 940 941 if (state == DKIO_INSERTED) { 942 943 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) 944 return (ENXIO); 945 946 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev), 947 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) 948 return (ENXIO); 949 950 if (p_lblkcnt <= 0) 951 return (ENXIO); 952 } 953 954 if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag)) 955 return (EFAULT); 956 957 return (0); 958 } 959 960 /* 961 * is media removable? 962 */ 963 case DKIOCREMOVABLE: { 964 int i; 965 966 i = (DKTP_EXT->tg_rmb) ? 1 : 0; 967 968 if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag)) 969 return (EFAULT); 970 971 return (0); 972 } 973 974 case DKIOCADDBAD: 975 /* 976 * This is not an update mechanism to add bad blocks 977 * to the bad block structures stored on disk. 978 * 979 * addbadsec(1M) will update the bad block data on disk 980 * and use this ioctl to force the driver to re-initialize 981 * the list of bad blocks in the driver. 982 */ 983 984 /* start BBH */ 985 cmdk_bbh_reopen(dkp); 986 return (0); 987 988 case DKIOCG_PHYGEOM: 989 case DKIOCG_VIRTGEOM: 990 case DKIOCGGEOM: 991 case DKIOCSGEOM: 992 case DKIOCGAPART: 993 case DKIOCSAPART: 994 case DKIOCGVTOC: 995 case DKIOCSVTOC: 996 case DKIOCPARTINFO: 997 case DKIOCGEXTVTOC: 998 case DKIOCSEXTVTOC: 999 case DKIOCEXTPARTINFO: 1000 case DKIOCGMBOOT: 1001 case DKIOCSMBOOT: 1002 case DKIOCGETEFI: 1003 case DKIOCSETEFI: 1004 case DKIOCPARTITION: 1005 case DKIOCSETEXTPART: 1006 { 1007 int rc; 1008 1009 rc = cmlb_ioctl(dkp->dk_cmlbhandle, dev, cmd, arg, flag, 1010 credp, rvalp, 0); 1011 if (cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) 1012 cmdk_devid_setup(dkp); 1013 return (rc); 1014 } 1015 1016 case DIOCTL_RWCMD: { 1017 struct dadkio_rwcmd *rwcmdp; 1018 int status; 1019 1020 rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP); 1021 1022 status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag); 1023 1024 if (status == 0) { 1025 bzero(&(rwcmdp->status), sizeof (struct dadkio_status)); 1026 status = dadk_ioctl(DKTP_DATA, 1027 dev, 1028 cmd, 1029 (uintptr_t)rwcmdp, 1030 flag, 1031 credp, 1032 rvalp); 1033 } 1034 if (status == 0) 1035 status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag); 1036 1037 kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd)); 1038 return (status); 1039 } 1040 1041 default: 1042 return (dadk_ioctl(DKTP_DATA, 1043 dev, 1044 cmd, 1045 arg, 1046 flag, 1047 credp, 1048 rvalp)); 1049 } 1050 } 1051 1052 /*ARGSUSED1*/ 1053 static int 1054 cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp) 1055 { 1056 int part; 1057 ulong_t partbit; 1058 int instance; 1059 struct cmdk *dkp; 1060 int lastclose = 1; 1061 int i; 1062 1063 instance = CMDKUNIT(dev); 1064 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1065 (otyp >= OTYPCNT)) 1066 return (ENXIO); 1067 1068 mutex_enter(&dkp->dk_mutex); 1069 1070 /* check if device has been opened */ 1071 ASSERT(cmdk_isopen(dkp, dev)); 1072 if (!(dkp->dk_flag & CMDK_OPEN)) { 1073 mutex_exit(&dkp->dk_mutex); 1074 return (ENXIO); 1075 } 1076 1077 while (dkp->dk_flag & CMDK_SUSPEND) { 1078 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1079 } 1080 1081 part = CMDKPART(dev); 1082 partbit = 1 << part; 1083 1084 /* account for close */ 1085 if (otyp == OTYP_LYR) { 1086 ASSERT(dkp->dk_open_lyr[part] > 0); 1087 if (dkp->dk_open_lyr[part]) 1088 dkp->dk_open_lyr[part]--; 1089 } else { 1090 ASSERT((dkp->dk_open_reg[otyp] & partbit) != 0); 1091 dkp->dk_open_reg[otyp] &= ~partbit; 1092 } 1093 dkp->dk_open_exl &= ~partbit; 1094 1095 for (i = 0; i < CMDK_MAXPART; i++) 1096 if (dkp->dk_open_lyr[i] != 0) { 1097 lastclose = 0; 1098 break; 1099 } 1100 1101 if (lastclose) 1102 for (i = 0; i < OTYPCNT; i++) 1103 if (dkp->dk_open_reg[i] != 0) { 1104 lastclose = 0; 1105 break; 1106 } 1107 1108 mutex_exit(&dkp->dk_mutex); 1109 1110 if (lastclose) 1111 cmlb_invalidate(dkp->dk_cmlbhandle, 0); 1112 1113 return (DDI_SUCCESS); 1114 } 1115 1116 /*ARGSUSED3*/ 1117 static int 1118 cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp) 1119 { 1120 dev_t dev = *dev_p; 1121 int part; 1122 ulong_t partbit; 1123 int instance; 1124 struct cmdk *dkp; 1125 diskaddr_t p_lblksrt; 1126 diskaddr_t p_lblkcnt; 1127 int i; 1128 int nodelay; 1129 1130 instance = CMDKUNIT(dev); 1131 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1132 return (ENXIO); 1133 1134 if (otyp >= OTYPCNT) 1135 return (EINVAL); 1136 1137 mutex_enter(&dkp->dk_mutex); 1138 while (dkp->dk_flag & CMDK_SUSPEND) { 1139 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1140 } 1141 mutex_exit(&dkp->dk_mutex); 1142 1143 part = CMDKPART(dev); 1144 partbit = 1 << part; 1145 nodelay = (flag & (FNDELAY | FNONBLOCK)); 1146 1147 mutex_enter(&dkp->dk_mutex); 1148 1149 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) { 1150 1151 /* fail if not doing non block open */ 1152 if (!nodelay) { 1153 mutex_exit(&dkp->dk_mutex); 1154 return (ENXIO); 1155 } 1156 } else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt, 1157 &p_lblksrt, NULL, NULL, 0) == 0) { 1158 1159 if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) { 1160 mutex_exit(&dkp->dk_mutex); 1161 return (ENXIO); 1162 } 1163 } else { 1164 /* fail if not doing non block open */ 1165 if (!nodelay) { 1166 mutex_exit(&dkp->dk_mutex); 1167 return (ENXIO); 1168 } 1169 } 1170 1171 if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) { 1172 mutex_exit(&dkp->dk_mutex); 1173 return (EROFS); 1174 } 1175 1176 /* check for part already opend exclusively */ 1177 if (dkp->dk_open_exl & partbit) 1178 goto excl_open_fail; 1179 1180 /* check if we can establish exclusive open */ 1181 if (flag & FEXCL) { 1182 if (dkp->dk_open_lyr[part]) 1183 goto excl_open_fail; 1184 for (i = 0; i < OTYPCNT; i++) { 1185 if (dkp->dk_open_reg[i] & partbit) 1186 goto excl_open_fail; 1187 } 1188 } 1189 1190 /* open will succeed, account for open */ 1191 dkp->dk_flag |= CMDK_OPEN; 1192 if (otyp == OTYP_LYR) 1193 dkp->dk_open_lyr[part]++; 1194 else 1195 dkp->dk_open_reg[otyp] |= partbit; 1196 if (flag & FEXCL) 1197 dkp->dk_open_exl |= partbit; 1198 1199 mutex_exit(&dkp->dk_mutex); 1200 return (DDI_SUCCESS); 1201 1202 excl_open_fail: 1203 mutex_exit(&dkp->dk_mutex); 1204 return (EBUSY); 1205 } 1206 1207 /* 1208 * read routine 1209 */ 1210 /*ARGSUSED2*/ 1211 static int 1212 cmdkread(dev_t dev, struct uio *uio, cred_t *credp) 1213 { 1214 return (cmdkrw(dev, uio, B_READ)); 1215 } 1216 1217 /* 1218 * async read routine 1219 */ 1220 /*ARGSUSED2*/ 1221 static int 1222 cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp) 1223 { 1224 return (cmdkarw(dev, aio, B_READ)); 1225 } 1226 1227 /* 1228 * write routine 1229 */ 1230 /*ARGSUSED2*/ 1231 static int 1232 cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp) 1233 { 1234 return (cmdkrw(dev, uio, B_WRITE)); 1235 } 1236 1237 /* 1238 * async write routine 1239 */ 1240 /*ARGSUSED2*/ 1241 static int 1242 cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp) 1243 { 1244 return (cmdkarw(dev, aio, B_WRITE)); 1245 } 1246 1247 static void 1248 cmdkmin(struct buf *bp) 1249 { 1250 if (bp->b_bcount > DK_MAXRECSIZE) 1251 bp->b_bcount = DK_MAXRECSIZE; 1252 } 1253 1254 static int 1255 cmdkrw(dev_t dev, struct uio *uio, int flag) 1256 { 1257 int instance; 1258 struct cmdk *dkp; 1259 1260 instance = CMDKUNIT(dev); 1261 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1262 return (ENXIO); 1263 1264 mutex_enter(&dkp->dk_mutex); 1265 while (dkp->dk_flag & CMDK_SUSPEND) { 1266 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1267 } 1268 mutex_exit(&dkp->dk_mutex); 1269 1270 return (physio(cmdkstrategy, (struct buf *)0, dev, flag, cmdkmin, uio)); 1271 } 1272 1273 static int 1274 cmdkarw(dev_t dev, struct aio_req *aio, int flag) 1275 { 1276 int instance; 1277 struct cmdk *dkp; 1278 1279 instance = CMDKUNIT(dev); 1280 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1281 return (ENXIO); 1282 1283 mutex_enter(&dkp->dk_mutex); 1284 while (dkp->dk_flag & CMDK_SUSPEND) { 1285 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1286 } 1287 mutex_exit(&dkp->dk_mutex); 1288 1289 return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio)); 1290 } 1291 1292 /* 1293 * strategy routine 1294 */ 1295 static int 1296 cmdkstrategy(struct buf *bp) 1297 { 1298 int instance; 1299 struct cmdk *dkp; 1300 long d_cnt; 1301 diskaddr_t p_lblksrt; 1302 diskaddr_t p_lblkcnt; 1303 1304 instance = CMDKUNIT(bp->b_edev); 1305 if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1306 (dkblock(bp) < 0)) { 1307 bp->b_resid = bp->b_bcount; 1308 SETBPERR(bp, ENXIO); 1309 biodone(bp); 1310 return (0); 1311 } 1312 1313 mutex_enter(&dkp->dk_mutex); 1314 ASSERT(cmdk_isopen(dkp, bp->b_edev)); 1315 while (dkp->dk_flag & CMDK_SUSPEND) { 1316 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1317 } 1318 mutex_exit(&dkp->dk_mutex); 1319 1320 bp->b_flags &= ~(B_DONE|B_ERROR); 1321 bp->b_resid = 0; 1322 bp->av_back = NULL; 1323 1324 /* 1325 * only re-read the vtoc if necessary (force == FALSE) 1326 */ 1327 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(bp->b_edev), 1328 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) { 1329 SETBPERR(bp, ENXIO); 1330 } 1331 1332 if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt)) 1333 SETBPERR(bp, ENXIO); 1334 1335 if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) { 1336 bp->b_resid = bp->b_bcount; 1337 biodone(bp); 1338 return (0); 1339 } 1340 1341 d_cnt = bp->b_bcount >> SCTRSHFT; 1342 if ((dkblock(bp) + d_cnt) > p_lblkcnt) { 1343 bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT; 1344 bp->b_bcount -= bp->b_resid; 1345 } 1346 1347 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp)))); 1348 if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) { 1349 bp->b_resid += bp->b_bcount; 1350 biodone(bp); 1351 } 1352 return (0); 1353 } 1354 1355 static int 1356 cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp) 1357 { 1358 struct scsi_device *devp; 1359 opaque_t queobjp = NULL; 1360 opaque_t flcobjp = NULL; 1361 char que_keyvalp[64]; 1362 int que_keylen; 1363 char flc_keyvalp[64]; 1364 int flc_keylen; 1365 1366 ASSERT(mutex_owned(&dkp->dk_mutex)); 1367 1368 /* Create linkage to queueing routines based on property */ 1369 que_keylen = sizeof (que_keyvalp); 1370 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1371 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1372 DDI_PROP_SUCCESS) { 1373 cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined"); 1374 return (DDI_FAILURE); 1375 } 1376 que_keyvalp[que_keylen] = (char)0; 1377 1378 if (strcmp(que_keyvalp, "qfifo") == 0) { 1379 queobjp = (opaque_t)qfifo_create(); 1380 } else if (strcmp(que_keyvalp, "qsort") == 0) { 1381 queobjp = (opaque_t)qsort_create(); 1382 } else { 1383 return (DDI_FAILURE); 1384 } 1385 1386 /* Create linkage to dequeueing routines based on property */ 1387 flc_keylen = sizeof (flc_keyvalp); 1388 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1389 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1390 DDI_PROP_SUCCESS) { 1391 cmn_err(CE_WARN, 1392 "cmdk_create_obj: flow-control property undefined"); 1393 return (DDI_FAILURE); 1394 } 1395 1396 flc_keyvalp[flc_keylen] = (char)0; 1397 1398 if (strcmp(flc_keyvalp, "dsngl") == 0) { 1399 flcobjp = (opaque_t)dsngl_create(); 1400 } else if (strcmp(flc_keyvalp, "dmult") == 0) { 1401 flcobjp = (opaque_t)dmult_create(); 1402 } else { 1403 return (DDI_FAILURE); 1404 } 1405 1406 /* populate bbh_obj object stored in dkp */ 1407 dkp->dk_bbh_obj.bbh_data = dkp; 1408 dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops; 1409 1410 /* create linkage to dadk */ 1411 dkp->dk_tgobjp = (opaque_t)dadk_create(); 1412 1413 devp = ddi_get_driver_private(dip); 1414 (void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj, 1415 NULL); 1416 1417 return (DDI_SUCCESS); 1418 } 1419 1420 static void 1421 cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp) 1422 { 1423 char que_keyvalp[64]; 1424 int que_keylen; 1425 char flc_keyvalp[64]; 1426 int flc_keylen; 1427 1428 ASSERT(mutex_owned(&dkp->dk_mutex)); 1429 1430 (void) dadk_free((dkp->dk_tgobjp)); 1431 dkp->dk_tgobjp = NULL; 1432 1433 que_keylen = sizeof (que_keyvalp); 1434 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1435 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1436 DDI_PROP_SUCCESS) { 1437 cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined"); 1438 return; 1439 } 1440 que_keyvalp[que_keylen] = (char)0; 1441 1442 flc_keylen = sizeof (flc_keyvalp); 1443 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1444 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1445 DDI_PROP_SUCCESS) { 1446 cmn_err(CE_WARN, 1447 "cmdk_destroy_obj: flow-control property undefined"); 1448 return; 1449 } 1450 flc_keyvalp[flc_keylen] = (char)0; 1451 } 1452 /*ARGSUSED5*/ 1453 static int 1454 cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 1455 diskaddr_t start, size_t count, void *tg_cookie) 1456 { 1457 struct cmdk *dkp; 1458 opaque_t handle; 1459 int rc = 0; 1460 char *bufa; 1461 1462 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1463 if (dkp == NULL) 1464 return (ENXIO); 1465 1466 if (cmd != TG_READ && cmd != TG_WRITE) 1467 return (EINVAL); 1468 1469 /* count must be multiple of 512 */ 1470 count = (count + NBPSCTR - 1) & -NBPSCTR; 1471 handle = dadk_iob_alloc(DKTP_DATA, start, count, KM_SLEEP); 1472 if (!handle) 1473 return (ENOMEM); 1474 1475 if (cmd == TG_READ) { 1476 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1477 if (!bufa) 1478 rc = EIO; 1479 else 1480 bcopy(bufa, bufaddr, count); 1481 } else { 1482 bufa = dadk_iob_htoc(DKTP_DATA, handle); 1483 bcopy(bufaddr, bufa, count); 1484 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1485 if (!bufa) 1486 rc = EIO; 1487 } 1488 (void) dadk_iob_free(DKTP_DATA, handle); 1489 1490 return (rc); 1491 } 1492 1493 /*ARGSUSED3*/ 1494 static int 1495 cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie) 1496 { 1497 1498 struct cmdk *dkp; 1499 struct tgdk_geom phyg; 1500 1501 1502 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1503 if (dkp == NULL) 1504 return (ENXIO); 1505 1506 switch (cmd) { 1507 case TG_GETPHYGEOM: { 1508 cmlb_geom_t *phygeomp = (cmlb_geom_t *)arg; 1509 1510 /* dadk_getphygeom always returns success */ 1511 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1512 1513 phygeomp->g_capacity = phyg.g_cap; 1514 phygeomp->g_nsect = phyg.g_sec; 1515 phygeomp->g_nhead = phyg.g_head; 1516 phygeomp->g_acyl = phyg.g_acyl; 1517 phygeomp->g_ncyl = phyg.g_cyl; 1518 phygeomp->g_secsize = phyg.g_secsiz; 1519 phygeomp->g_intrlv = 1; 1520 phygeomp->g_rpm = 3600; 1521 1522 return (0); 1523 } 1524 1525 case TG_GETVIRTGEOM: { 1526 cmlb_geom_t *virtgeomp = (cmlb_geom_t *)arg; 1527 diskaddr_t capacity; 1528 1529 (void) dadk_getgeom(DKTP_DATA, &phyg); 1530 capacity = phyg.g_cap; 1531 1532 /* 1533 * If the controller returned us something that doesn't 1534 * really fit into an Int 13/function 8 geometry 1535 * result, just fail the ioctl. See PSARC 1998/313. 1536 */ 1537 if (capacity < 0 || capacity >= 63 * 254 * 1024) 1538 return (EINVAL); 1539 1540 virtgeomp->g_capacity = capacity; 1541 virtgeomp->g_nsect = 63; 1542 virtgeomp->g_nhead = 254; 1543 virtgeomp->g_ncyl = capacity / (63 * 254); 1544 virtgeomp->g_acyl = 0; 1545 virtgeomp->g_secsize = 512; 1546 virtgeomp->g_intrlv = 1; 1547 virtgeomp->g_rpm = 3600; 1548 1549 return (0); 1550 } 1551 1552 case TG_GETCAPACITY: 1553 case TG_GETBLOCKSIZE: 1554 { 1555 1556 /* dadk_getphygeom always returns success */ 1557 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1558 if (cmd == TG_GETCAPACITY) 1559 *(diskaddr_t *)arg = phyg.g_cap; 1560 else 1561 *(uint32_t *)arg = (uint32_t)phyg.g_secsiz; 1562 1563 return (0); 1564 } 1565 1566 case TG_GETATTR: { 1567 tg_attribute_t *tgattribute = (tg_attribute_t *)arg; 1568 if ((DKTP_EXT->tg_rdonly)) 1569 tgattribute->media_is_writable = FALSE; 1570 else 1571 tgattribute->media_is_writable = TRUE; 1572 1573 return (0); 1574 } 1575 1576 default: 1577 return (ENOTTY); 1578 } 1579 } 1580 1581 1582 1583 1584 1585 /* 1586 * Create and register the devid. 1587 * There are 4 different ways we can get a device id: 1588 * 1. Already have one - nothing to do 1589 * 2. Build one from the drive's model and serial numbers 1590 * 3. Read one from the disk (first sector of last track) 1591 * 4. Fabricate one and write it on the disk. 1592 * If any of these succeeds, register the deviceid 1593 */ 1594 static void 1595 cmdk_devid_setup(struct cmdk *dkp) 1596 { 1597 int rc; 1598 1599 /* Try options until one succeeds, or all have failed */ 1600 1601 /* 1. All done if already registered */ 1602 if (dkp->dk_devid != NULL) 1603 return; 1604 1605 /* 2. Build a devid from the model and serial number */ 1606 rc = cmdk_devid_modser(dkp); 1607 if (rc != DDI_SUCCESS) { 1608 /* 3. Read devid from the disk, if present */ 1609 rc = cmdk_devid_read(dkp); 1610 1611 /* 4. otherwise make one up and write it on the disk */ 1612 if (rc != DDI_SUCCESS) 1613 rc = cmdk_devid_fabricate(dkp); 1614 } 1615 1616 /* If we managed to get a devid any of the above ways, register it */ 1617 if (rc == DDI_SUCCESS) 1618 (void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid); 1619 1620 } 1621 1622 /* 1623 * Build a devid from the model and serial number 1624 * Return DDI_SUCCESS or DDI_FAILURE. 1625 */ 1626 static int 1627 cmdk_devid_modser(struct cmdk *dkp) 1628 { 1629 int rc = DDI_FAILURE; 1630 char *hwid; 1631 int modlen; 1632 int serlen; 1633 1634 /* 1635 * device ID is a concatenation of model number, '=', serial number. 1636 */ 1637 hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP); 1638 modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN); 1639 if (modlen == 0) { 1640 rc = DDI_FAILURE; 1641 goto err; 1642 } 1643 hwid[modlen++] = '='; 1644 serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL, 1645 hwid + modlen, CMDK_HWIDLEN - modlen); 1646 if (serlen == 0) { 1647 rc = DDI_FAILURE; 1648 goto err; 1649 } 1650 hwid[modlen + serlen] = 0; 1651 1652 /* Initialize the device ID, trailing NULL not included */ 1653 rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen, 1654 hwid, &dkp->dk_devid); 1655 if (rc != DDI_SUCCESS) { 1656 rc = DDI_FAILURE; 1657 goto err; 1658 } 1659 1660 rc = DDI_SUCCESS; 1661 1662 err: 1663 kmem_free(hwid, CMDK_HWIDLEN); 1664 return (rc); 1665 } 1666 1667 static int 1668 cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len) 1669 { 1670 dadk_ioc_string_t strarg; 1671 int rval; 1672 char *s; 1673 char ch; 1674 boolean_t ret; 1675 int i; 1676 int tb; 1677 1678 strarg.is_buf = buf; 1679 strarg.is_size = len; 1680 if (dadk_ioctl(DKTP_DATA, 1681 dkp->dk_dev, 1682 ioccmd, 1683 (uintptr_t)&strarg, 1684 FNATIVE | FKIOCTL, 1685 NULL, 1686 &rval) != 0) 1687 return (0); 1688 1689 /* 1690 * valid model/serial string must contain a non-zero non-space 1691 * trim trailing spaces/NULL 1692 */ 1693 ret = B_FALSE; 1694 s = buf; 1695 for (i = 0; i < strarg.is_size; i++) { 1696 ch = *s++; 1697 if (ch != ' ' && ch != '\0') 1698 tb = i + 1; 1699 if (ch != ' ' && ch != '\0' && ch != '0') 1700 ret = B_TRUE; 1701 } 1702 1703 if (ret == B_FALSE) 1704 return (0); 1705 1706 return (tb); 1707 } 1708 1709 /* 1710 * Read a devid from on the first block of the last track of 1711 * the last cylinder. Make sure what we read is a valid devid. 1712 * Return DDI_SUCCESS or DDI_FAILURE. 1713 */ 1714 static int 1715 cmdk_devid_read(struct cmdk *dkp) 1716 { 1717 diskaddr_t blk; 1718 struct dk_devid *dkdevidp; 1719 uint_t *ip; 1720 int chksum; 1721 int i, sz; 1722 tgdk_iob_handle handle = NULL; 1723 int rc = DDI_FAILURE; 1724 1725 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) 1726 goto err; 1727 1728 /* read the devid */ 1729 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1730 if (handle == NULL) 1731 goto err; 1732 1733 dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1734 if (dkdevidp == NULL) 1735 goto err; 1736 1737 /* Validate the revision */ 1738 if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) || 1739 (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB)) 1740 goto err; 1741 1742 /* Calculate the checksum */ 1743 chksum = 0; 1744 ip = (uint_t *)dkdevidp; 1745 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1746 chksum ^= ip[i]; 1747 if (DKD_GETCHKSUM(dkdevidp) != chksum) 1748 goto err; 1749 1750 /* Validate the device id */ 1751 if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS) 1752 goto err; 1753 1754 /* keep a copy of the device id */ 1755 sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid); 1756 dkp->dk_devid = kmem_alloc(sz, KM_SLEEP); 1757 bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz); 1758 1759 rc = DDI_SUCCESS; 1760 1761 err: 1762 if (handle != NULL) 1763 (void) dadk_iob_free(DKTP_DATA, handle); 1764 return (rc); 1765 } 1766 1767 /* 1768 * Create a devid and write it on the first block of the last track of 1769 * the last cylinder. 1770 * Return DDI_SUCCESS or DDI_FAILURE. 1771 */ 1772 static int 1773 cmdk_devid_fabricate(struct cmdk *dkp) 1774 { 1775 ddi_devid_t devid = NULL; /* devid made by ddi_devid_init */ 1776 struct dk_devid *dkdevidp; /* devid struct stored on disk */ 1777 diskaddr_t blk; 1778 tgdk_iob_handle handle = NULL; 1779 uint_t *ip, chksum; 1780 int i; 1781 int rc = DDI_FAILURE; 1782 1783 if (ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid) != 1784 DDI_SUCCESS) 1785 goto err; 1786 1787 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) { 1788 /* no device id block address */ 1789 goto err; 1790 } 1791 1792 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1793 if (!handle) 1794 goto err; 1795 1796 /* Locate the buffer */ 1797 dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle); 1798 1799 /* Fill in the revision */ 1800 bzero(dkdevidp, NBPSCTR); 1801 dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB; 1802 dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB; 1803 1804 /* Copy in the device id */ 1805 i = ddi_devid_sizeof(devid); 1806 if (i > DK_DEVID_SIZE) 1807 goto err; 1808 bcopy(devid, dkdevidp->dkd_devid, i); 1809 1810 /* Calculate the chksum */ 1811 chksum = 0; 1812 ip = (uint_t *)dkdevidp; 1813 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1814 chksum ^= ip[i]; 1815 1816 /* Fill in the checksum */ 1817 DKD_FORMCHKSUM(chksum, dkdevidp); 1818 1819 /* write the devid */ 1820 (void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1821 1822 dkp->dk_devid = devid; 1823 1824 rc = DDI_SUCCESS; 1825 1826 err: 1827 if (handle != NULL) 1828 (void) dadk_iob_free(DKTP_DATA, handle); 1829 1830 if (rc != DDI_SUCCESS && devid != NULL) 1831 ddi_devid_free(devid); 1832 1833 return (rc); 1834 } 1835 1836 static void 1837 cmdk_bbh_free_alts(struct cmdk *dkp) 1838 { 1839 if (dkp->dk_alts_hdl) { 1840 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1841 kmem_free(dkp->dk_slc_cnt, 1842 NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *))); 1843 dkp->dk_alts_hdl = NULL; 1844 } 1845 } 1846 1847 static void 1848 cmdk_bbh_reopen(struct cmdk *dkp) 1849 { 1850 tgdk_iob_handle handle = NULL; 1851 diskaddr_t slcb, slcn, slce; 1852 struct alts_parttbl *ap; 1853 struct alts_ent *enttblp; 1854 uint32_t altused; 1855 uint32_t altbase; 1856 uint32_t altlast; 1857 int alts; 1858 uint16_t vtoctag; 1859 int i, j; 1860 1861 /* find slice with V_ALTSCTR tag */ 1862 for (alts = 0; alts < NDKMAP; alts++) { 1863 if (cmlb_partinfo( 1864 dkp->dk_cmlbhandle, 1865 alts, 1866 &slcn, 1867 &slcb, 1868 NULL, 1869 &vtoctag, 1870 0)) { 1871 goto empty; /* no partition table exists */ 1872 } 1873 1874 if (vtoctag == V_ALTSCTR && slcn > 1) 1875 break; 1876 } 1877 if (alts >= NDKMAP) { 1878 goto empty; /* no V_ALTSCTR slice defined */ 1879 } 1880 1881 /* read in ALTS label block */ 1882 handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP); 1883 if (!handle) { 1884 goto empty; 1885 } 1886 1887 ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1888 if (!ap || (ap->alts_sanity != ALTS_SANITY)) { 1889 goto empty; 1890 } 1891 1892 altused = ap->alts_ent_used; /* number of BB entries */ 1893 altbase = ap->alts_ent_base; /* blk offset from begin slice */ 1894 altlast = ap->alts_ent_end; /* blk offset to last block */ 1895 /* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */ 1896 1897 if (altused == 0 || 1898 altbase < 1 || 1899 altbase > altlast || 1900 altlast >= slcn) { 1901 goto empty; 1902 } 1903 (void) dadk_iob_free(DKTP_DATA, handle); 1904 1905 /* read in ALTS remapping table */ 1906 handle = dadk_iob_alloc(DKTP_DATA, 1907 slcb + altbase, 1908 (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP); 1909 if (!handle) { 1910 goto empty; 1911 } 1912 1913 enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1914 if (!enttblp) { 1915 goto empty; 1916 } 1917 1918 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1919 1920 /* allocate space for dk_slc_cnt and dk_slc_ent tables */ 1921 if (dkp->dk_slc_cnt == NULL) { 1922 dkp->dk_slc_cnt = kmem_alloc(NDKMAP * 1923 (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP); 1924 } 1925 dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP); 1926 1927 /* free previous BB table (if any) */ 1928 if (dkp->dk_alts_hdl) { 1929 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1930 dkp->dk_alts_hdl = NULL; 1931 dkp->dk_altused = 0; 1932 } 1933 1934 /* save linkage to new BB table */ 1935 dkp->dk_alts_hdl = handle; 1936 dkp->dk_altused = altused; 1937 1938 /* 1939 * build indexes to BB table by slice 1940 * effectively we have 1941 * struct alts_ent *enttblp[altused]; 1942 * 1943 * uint32_t dk_slc_cnt[NDKMAP]; 1944 * struct alts_ent *dk_slc_ent[NDKMAP]; 1945 */ 1946 for (i = 0; i < NDKMAP; i++) { 1947 if (cmlb_partinfo( 1948 dkp->dk_cmlbhandle, 1949 i, 1950 &slcn, 1951 &slcb, 1952 NULL, 1953 NULL, 1954 0)) { 1955 goto empty1; 1956 } 1957 1958 dkp->dk_slc_cnt[i] = 0; 1959 if (slcn == 0) 1960 continue; /* slice is not allocated */ 1961 1962 /* last block in slice */ 1963 slce = slcb + slcn - 1; 1964 1965 /* find first remap entry in after beginnning of slice */ 1966 for (j = 0; j < altused; j++) { 1967 if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb) 1968 break; 1969 } 1970 dkp->dk_slc_ent[i] = enttblp + j; 1971 1972 /* count remap entrys until end of slice */ 1973 for (; j < altused && enttblp[j].bad_start <= slce; j++) { 1974 dkp->dk_slc_cnt[i] += 1; 1975 } 1976 } 1977 1978 rw_exit(&dkp->dk_bbh_mutex); 1979 return; 1980 1981 empty: 1982 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1983 empty1: 1984 if (handle && handle != dkp->dk_alts_hdl) 1985 (void) dadk_iob_free(DKTP_DATA, handle); 1986 1987 if (dkp->dk_alts_hdl) { 1988 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1989 dkp->dk_alts_hdl = NULL; 1990 } 1991 1992 rw_exit(&dkp->dk_bbh_mutex); 1993 } 1994 1995 /*ARGSUSED*/ 1996 static bbh_cookie_t 1997 cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle) 1998 { 1999 struct bbh_handle *hp; 2000 bbh_cookie_t ckp; 2001 2002 hp = (struct bbh_handle *)handle; 2003 ckp = hp->h_cktab + hp->h_idx; 2004 hp->h_idx++; 2005 return (ckp); 2006 } 2007 2008 /*ARGSUSED*/ 2009 static void 2010 cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle) 2011 { 2012 struct bbh_handle *hp; 2013 2014 hp = (struct bbh_handle *)handle; 2015 kmem_free(handle, (sizeof (struct bbh_handle) + 2016 (hp->h_totck * (sizeof (struct bbh_cookie))))); 2017 } 2018 2019 2020 /* 2021 * cmdk_bbh_gethandle remaps the bad sectors to alternates. 2022 * There are 7 different cases when the comparison is made 2023 * between the bad sector cluster and the disk section. 2024 * 2025 * bad sector cluster gggggggggggbbbbbbbggggggggggg 2026 * case 1: ddddd 2027 * case 2: -d----- 2028 * case 3: ddddd 2029 * case 4: dddddddddddd 2030 * case 5: ddddddd----- 2031 * case 6: ---ddddddd 2032 * case 7: ddddddd 2033 * 2034 * where: g = good sector, b = bad sector 2035 * d = sector in disk section 2036 * - = disk section may be extended to cover those disk area 2037 */ 2038 2039 static opaque_t 2040 cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp) 2041 { 2042 struct cmdk *dkp = (struct cmdk *)bbh_data; 2043 struct bbh_handle *hp; 2044 struct bbh_cookie *ckp; 2045 struct alts_ent *altp; 2046 uint32_t alts_used; 2047 uint32_t part = CMDKPART(bp->b_edev); 2048 daddr32_t lastsec; 2049 long d_count; 2050 int i; 2051 int idx; 2052 int cnt; 2053 2054 if (part >= V_NUMPAR) 2055 return (NULL); 2056 2057 /* 2058 * This if statement is atomic and it will succeed 2059 * if there are no bad blocks (almost always) 2060 * 2061 * so this if is performed outside of the rw_enter for speed 2062 * and then repeated inside the rw_enter for safety 2063 */ 2064 if (!dkp->dk_alts_hdl) { 2065 return (NULL); 2066 } 2067 2068 rw_enter(&dkp->dk_bbh_mutex, RW_READER); 2069 2070 if (dkp->dk_alts_hdl == NULL) { 2071 rw_exit(&dkp->dk_bbh_mutex); 2072 return (NULL); 2073 } 2074 2075 alts_used = dkp->dk_slc_cnt[part]; 2076 if (alts_used == 0) { 2077 rw_exit(&dkp->dk_bbh_mutex); 2078 return (NULL); 2079 } 2080 altp = dkp->dk_slc_ent[part]; 2081 2082 /* 2083 * binary search for the largest bad sector index in the alternate 2084 * entry table which overlaps or larger than the starting d_sec 2085 */ 2086 i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp)); 2087 /* if starting sector is > the largest bad sector, return */ 2088 if (i == -1) { 2089 rw_exit(&dkp->dk_bbh_mutex); 2090 return (NULL); 2091 } 2092 /* i is the starting index. Set altp to the starting entry addr */ 2093 altp += i; 2094 2095 d_count = bp->b_bcount >> SCTRSHFT; 2096 lastsec = GET_BP_SEC(bp) + d_count - 1; 2097 2098 /* calculate the number of bad sectors */ 2099 for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) { 2100 if (lastsec < altp->bad_start) 2101 break; 2102 } 2103 2104 if (!cnt) { 2105 rw_exit(&dkp->dk_bbh_mutex); 2106 return (NULL); 2107 } 2108 2109 /* calculate the maximum number of reserved cookies */ 2110 cnt <<= 1; 2111 cnt++; 2112 2113 /* allocate the handle */ 2114 hp = (struct bbh_handle *)kmem_zalloc((sizeof (*hp) + 2115 (cnt * sizeof (*ckp))), KM_SLEEP); 2116 2117 hp->h_idx = 0; 2118 hp->h_totck = cnt; 2119 ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1); 2120 ckp[0].ck_sector = GET_BP_SEC(bp); 2121 ckp[0].ck_seclen = d_count; 2122 2123 altp = dkp->dk_slc_ent[part]; 2124 altp += i; 2125 for (idx = 0; i < alts_used; i++, altp++) { 2126 /* CASE 1: */ 2127 if (lastsec < altp->bad_start) 2128 break; 2129 2130 /* CASE 3: */ 2131 if (ckp[idx].ck_sector > altp->bad_end) 2132 continue; 2133 2134 /* CASE 2 and 7: */ 2135 if ((ckp[idx].ck_sector >= altp->bad_start) && 2136 (lastsec <= altp->bad_end)) { 2137 ckp[idx].ck_sector = altp->good_start + 2138 ckp[idx].ck_sector - altp->bad_start; 2139 break; 2140 } 2141 2142 /* at least one bad sector in our section. break it. */ 2143 /* CASE 5: */ 2144 if ((lastsec >= altp->bad_start) && 2145 (lastsec <= altp->bad_end)) { 2146 ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1; 2147 ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen; 2148 ckp[idx+1].ck_sector = altp->good_start; 2149 break; 2150 } 2151 /* CASE 6: */ 2152 if ((ckp[idx].ck_sector <= altp->bad_end) && 2153 (ckp[idx].ck_sector >= altp->bad_start)) { 2154 ckp[idx+1].ck_seclen = ckp[idx].ck_seclen; 2155 ckp[idx].ck_seclen = altp->bad_end - 2156 ckp[idx].ck_sector + 1; 2157 ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen; 2158 ckp[idx].ck_sector = altp->good_start + 2159 ckp[idx].ck_sector - altp->bad_start; 2160 idx++; 2161 ckp[idx].ck_sector = altp->bad_end + 1; 2162 continue; /* check rest of section */ 2163 } 2164 2165 /* CASE 4: */ 2166 ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector; 2167 ckp[idx+1].ck_sector = altp->good_start; 2168 ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1; 2169 idx += 2; 2170 ckp[idx].ck_sector = altp->bad_end + 1; 2171 ckp[idx].ck_seclen = lastsec - altp->bad_end; 2172 } 2173 2174 rw_exit(&dkp->dk_bbh_mutex); 2175 return ((opaque_t)hp); 2176 } 2177 2178 static int 2179 cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key) 2180 { 2181 int i; 2182 int ind; 2183 int interval; 2184 int mystatus = -1; 2185 2186 if (!cnt) 2187 return (mystatus); 2188 2189 ind = 1; /* compiler complains about possible uninitialized var */ 2190 for (i = 1; i <= cnt; i <<= 1) 2191 ind = i; 2192 2193 for (interval = ind; interval; ) { 2194 if ((key >= buf[ind-1].bad_start) && 2195 (key <= buf[ind-1].bad_end)) { 2196 return (ind-1); 2197 } else { 2198 interval >>= 1; 2199 if (key < buf[ind-1].bad_start) { 2200 /* record the largest bad sector index */ 2201 mystatus = ind-1; 2202 if (!interval) 2203 break; 2204 ind = ind - interval; 2205 } else { 2206 /* 2207 * if key is larger than the last element 2208 * then break 2209 */ 2210 if ((ind == cnt) || !interval) 2211 break; 2212 if ((ind+interval) <= cnt) 2213 ind += interval; 2214 } 2215 } 2216 } 2217 return (mystatus); 2218 } 2219