1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/scsi/scsi.h> 29 #include <sys/dktp/cm.h> 30 #include <sys/dktp/quetypes.h> 31 #include <sys/dktp/queue.h> 32 #include <sys/dktp/fctypes.h> 33 #include <sys/dktp/flowctrl.h> 34 #include <sys/dktp/cmdev.h> 35 #include <sys/dkio.h> 36 #include <sys/dktp/tgdk.h> 37 #include <sys/dktp/dadk.h> 38 #include <sys/dktp/bbh.h> 39 #include <sys/dktp/altsctr.h> 40 #include <sys/dktp/cmdk.h> 41 42 #include <sys/stat.h> 43 #include <sys/vtoc.h> 44 #include <sys/file.h> 45 #include <sys/dktp/dadkio.h> 46 #include <sys/aio_req.h> 47 48 #include <sys/cmlb.h> 49 50 /* 51 * Local Static Data 52 */ 53 #ifdef CMDK_DEBUG 54 #define DENT 0x0001 55 #define DIO 0x0002 56 57 static int cmdk_debug = DIO; 58 #endif 59 60 #ifndef TRUE 61 #define TRUE 1 62 #endif 63 64 #ifndef FALSE 65 #define FALSE 0 66 #endif 67 68 /* 69 * NDKMAP is the base number for accessing the fdisk partitions. 70 * c?d?p0 --> cmdk@?,?:q 71 */ 72 #define PARTITION0_INDEX (NDKMAP + 0) 73 74 #define DKTP_DATA (dkp->dk_tgobjp)->tg_data 75 #define DKTP_EXT (dkp->dk_tgobjp)->tg_ext 76 77 static void *cmdk_state; 78 79 /* 80 * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded 81 * attach situations 82 */ 83 static kmutex_t cmdk_attach_mutex; 84 static int cmdk_max_instance = 0; 85 86 /* 87 * Panic dumpsys state 88 * There is only a single flag that is not mutex locked since 89 * the system is prevented from thread switching and cmdk_dump 90 * will only be called in a single threaded operation. 91 */ 92 static int cmdk_indump; 93 94 /* 95 * Local Function Prototypes 96 */ 97 static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp); 98 static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp); 99 static void cmdkmin(struct buf *bp); 100 static int cmdkrw(dev_t dev, struct uio *uio, int flag); 101 static int cmdkarw(dev_t dev, struct aio_req *aio, int flag); 102 103 /* 104 * Bad Block Handling Functions Prototypes 105 */ 106 static void cmdk_bbh_reopen(struct cmdk *dkp); 107 static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp); 108 static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle); 109 static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle); 110 static void cmdk_bbh_close(struct cmdk *dkp); 111 static void cmdk_bbh_setalts_idx(struct cmdk *dkp); 112 static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key); 113 114 static struct bbh_objops cmdk_bbh_ops = { 115 nulldev, 116 nulldev, 117 cmdk_bbh_gethandle, 118 cmdk_bbh_htoc, 119 cmdk_bbh_freehandle, 120 0, 0 121 }; 122 123 static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp); 124 static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp); 125 static int cmdkstrategy(struct buf *bp); 126 static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 127 static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *); 128 static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp); 129 static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp); 130 static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 131 int mod_flags, char *name, caddr_t valuep, int *lengthp); 132 static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp); 133 static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp); 134 135 /* 136 * Device driver ops vector 137 */ 138 139 static struct cb_ops cmdk_cb_ops = { 140 cmdkopen, /* open */ 141 cmdkclose, /* close */ 142 cmdkstrategy, /* strategy */ 143 nodev, /* print */ 144 cmdkdump, /* dump */ 145 cmdkread, /* read */ 146 cmdkwrite, /* write */ 147 cmdkioctl, /* ioctl */ 148 nodev, /* devmap */ 149 nodev, /* mmap */ 150 nodev, /* segmap */ 151 nochpoll, /* poll */ 152 cmdk_prop_op, /* cb_prop_op */ 153 0, /* streamtab */ 154 D_64BIT | D_MP | D_NEW, /* Driver comaptibility flag */ 155 CB_REV, /* cb_rev */ 156 cmdkaread, /* async read */ 157 cmdkawrite /* async write */ 158 }; 159 160 static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 161 void **result); 162 static int cmdkprobe(dev_info_t *dip); 163 static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd); 164 static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd); 165 166 static void cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp); 167 static int cmdkresume(dev_info_t *dip); 168 static int cmdksuspend(dev_info_t *dip); 169 static int cmdkpower(dev_info_t *dip, int component, int level); 170 171 struct dev_ops cmdk_ops = { 172 DEVO_REV, /* devo_rev, */ 173 0, /* refcnt */ 174 cmdkinfo, /* info */ 175 nulldev, /* identify */ 176 cmdkprobe, /* probe */ 177 cmdkattach, /* attach */ 178 cmdkdetach, /* detach */ 179 nodev, /* reset */ 180 &cmdk_cb_ops, /* driver operations */ 181 (struct bus_ops *)0, /* bus operations */ 182 cmdkpower /* power */ 183 }; 184 185 /* 186 * This is the loadable module wrapper. 187 */ 188 #include <sys/modctl.h> 189 190 extern struct mod_ops mod_driverops; 191 192 static struct modldrv modldrv = { 193 &mod_driverops, /* Type of module. This one is a driver */ 194 "Common Direct Access Disk %I%", 195 &cmdk_ops, /* driver ops */ 196 }; 197 198 static struct modlinkage modlinkage = { 199 MODREV_1, (void *)&modldrv, NULL 200 }; 201 202 /* Function prototypes for cmlb callbacks */ 203 204 static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 205 diskaddr_t start, size_t length, void *tg_cookie); 206 207 static int cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, 208 void *tg_cookie); 209 210 static void cmdk_devid_setup(struct cmdk *dkp); 211 static int cmdk_devid_modser(struct cmdk *dkp); 212 static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len); 213 static int cmdk_devid_fabricate(struct cmdk *dkp); 214 static int cmdk_devid_read(struct cmdk *dkp); 215 216 static cmlb_tg_ops_t cmdk_lb_ops = { 217 TG_DK_OPS_VERSION_1, 218 cmdk_lb_rdwr, 219 cmdk_lb_getinfo 220 }; 221 222 static boolean_t 223 cmdk_isopen(struct cmdk *dkp, dev_t dev) 224 { 225 int part, otyp; 226 ulong_t partbit; 227 228 ASSERT(MUTEX_HELD((&dkp->dk_mutex))); 229 230 part = CMDKPART(dev); 231 partbit = 1 << part; 232 233 /* account for close */ 234 if (dkp->dk_open_lyr[part] != 0) 235 return (B_TRUE); 236 for (otyp = 0; otyp < OTYPCNT; otyp++) 237 if (dkp->dk_open_reg[otyp] & partbit) 238 return (B_TRUE); 239 return (B_FALSE); 240 } 241 242 int 243 _init(void) 244 { 245 int rval; 246 247 if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7)) 248 return (rval); 249 250 mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL); 251 if ((rval = mod_install(&modlinkage)) != 0) { 252 mutex_destroy(&cmdk_attach_mutex); 253 ddi_soft_state_fini(&cmdk_state); 254 } 255 return (rval); 256 } 257 258 int 259 _fini(void) 260 { 261 return (EBUSY); 262 263 /* 264 * This has been commented out until cmdk is a true 265 * unloadable module. Right now x86's are panicking on 266 * a diskless reconfig boot. 267 */ 268 269 #if 0 /* bugid 1186679 */ 270 int rval; 271 272 rval = mod_remove(&modlinkage); 273 if (rval != 0) 274 return (rval); 275 276 mutex_destroy(&cmdk_attach_mutex); 277 ddi_soft_state_fini(&cmdk_state); 278 279 return (0); 280 #endif 281 } 282 283 int 284 _info(struct modinfo *modinfop) 285 { 286 return (mod_info(&modlinkage, modinfop)); 287 } 288 289 /* 290 * Autoconfiguration Routines 291 */ 292 static int 293 cmdkprobe(dev_info_t *dip) 294 { 295 int instance; 296 int status; 297 struct cmdk *dkp; 298 299 instance = ddi_get_instance(dip); 300 301 if (ddi_get_soft_state(cmdk_state, instance)) 302 return (DDI_PROBE_PARTIAL); 303 304 if ((ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS) || 305 ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL)) 306 return (DDI_PROBE_PARTIAL); 307 308 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 309 rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL); 310 dkp->dk_dip = dip; 311 mutex_enter(&dkp->dk_mutex); 312 313 dkp->dk_dev = makedevice(ddi_driver_major(dip), 314 ddi_get_instance(dip) << CMDK_UNITSHF); 315 316 /* linkage to dadk and strategy */ 317 if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) { 318 mutex_exit(&dkp->dk_mutex); 319 mutex_destroy(&dkp->dk_mutex); 320 rw_destroy(&dkp->dk_bbh_mutex); 321 ddi_soft_state_free(cmdk_state, instance); 322 return (DDI_PROBE_PARTIAL); 323 } 324 325 status = dadk_probe(DKTP_DATA, KM_NOSLEEP); 326 if (status != DDI_PROBE_SUCCESS) { 327 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 328 mutex_exit(&dkp->dk_mutex); 329 mutex_destroy(&dkp->dk_mutex); 330 rw_destroy(&dkp->dk_bbh_mutex); 331 ddi_soft_state_free(cmdk_state, instance); 332 return (status); 333 } 334 335 mutex_exit(&dkp->dk_mutex); 336 #ifdef CMDK_DEBUG 337 if (cmdk_debug & DENT) 338 PRF("cmdkprobe: instance= %d name= `%s`\n", 339 instance, ddi_get_name_addr(dip)); 340 #endif 341 return (status); 342 } 343 344 static int 345 cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 346 { 347 int instance; 348 struct cmdk *dkp; 349 char *node_type; 350 351 switch (cmd) { 352 case DDI_ATTACH: 353 break; 354 case DDI_RESUME: 355 return (cmdkresume(dip)); 356 default: 357 return (DDI_FAILURE); 358 } 359 360 instance = ddi_get_instance(dip); 361 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 362 return (DDI_FAILURE); 363 364 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 365 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 366 367 mutex_enter(&dkp->dk_mutex); 368 369 /* dadk_attach is an empty function that only returns SUCCESS */ 370 (void) dadk_attach(DKTP_DATA); 371 372 node_type = (DKTP_EXT->tg_nodetype); 373 374 /* 375 * this open allows cmlb to read the device 376 * and determine the label types 377 * so that cmlb can create minor nodes for device 378 */ 379 380 /* open the target disk */ 381 if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS) 382 goto fail2; 383 384 /* mark as having opened target */ 385 dkp->dk_flag |= CMDK_TGDK_OPEN; 386 387 cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle); 388 389 if (cmlb_attach(dip, 390 &cmdk_lb_ops, 391 DTYPE_DIRECT, /* device_type */ 392 0, /* removable */ 393 0, /* hot pluggable XXX */ 394 node_type, 395 CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT, /* alter_behaviour */ 396 dkp->dk_cmlbhandle, 397 0) != 0) 398 goto fail1; 399 400 /* Calling validate will create minor nodes according to disk label */ 401 (void) cmlb_validate(dkp->dk_cmlbhandle, 0, 0); 402 403 /* set bbh (Bad Block Handling) */ 404 cmdk_bbh_reopen(dkp); 405 406 /* setup devid string */ 407 cmdk_devid_setup(dkp); 408 409 mutex_enter(&cmdk_attach_mutex); 410 if (instance > cmdk_max_instance) 411 cmdk_max_instance = instance; 412 mutex_exit(&cmdk_attach_mutex); 413 414 mutex_exit(&dkp->dk_mutex); 415 416 /* 417 * Add a zero-length attribute to tell the world we support 418 * kernel ioctls (for layered drivers) 419 */ 420 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 421 DDI_KERNEL_IOCTL, NULL, 0); 422 ddi_report_dev(dip); 423 424 /* 425 * Initialize power management 426 */ 427 mutex_init(&dkp->dk_pm_mutex, NULL, MUTEX_DRIVER, NULL); 428 cv_init(&dkp->dk_suspend_cv, NULL, CV_DRIVER, NULL); 429 cmdk_setup_pm(dip, dkp); 430 431 return (DDI_SUCCESS); 432 433 fail1: 434 cmlb_free_handle(&dkp->dk_cmlbhandle); 435 (void) dadk_close(DKTP_DATA); 436 fail2: 437 cmdk_destroy_obj(dip, dkp); 438 rw_destroy(&dkp->dk_bbh_mutex); 439 mutex_exit(&dkp->dk_mutex); 440 mutex_destroy(&dkp->dk_mutex); 441 ddi_soft_state_free(cmdk_state, instance); 442 return (DDI_FAILURE); 443 } 444 445 446 static int 447 cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 448 { 449 struct cmdk *dkp; 450 int instance; 451 int max_instance; 452 453 switch (cmd) { 454 case DDI_DETACH: 455 /* return (DDI_FAILURE); */ 456 break; 457 case DDI_SUSPEND: 458 return (cmdksuspend(dip)); 459 default: 460 #ifdef CMDK_DEBUG 461 if (cmdk_debug & DIO) { 462 PRF("cmdkdetach: cmd = %d unknown\n", cmd); 463 } 464 #endif 465 return (DDI_FAILURE); 466 } 467 468 mutex_enter(&cmdk_attach_mutex); 469 max_instance = cmdk_max_instance; 470 mutex_exit(&cmdk_attach_mutex); 471 472 /* check if any instance of driver is open */ 473 for (instance = 0; instance < max_instance; instance++) { 474 dkp = ddi_get_soft_state(cmdk_state, instance); 475 if (!dkp) 476 continue; 477 if (dkp->dk_flag & CMDK_OPEN) 478 return (DDI_FAILURE); 479 } 480 481 instance = ddi_get_instance(dip); 482 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 483 return (DDI_SUCCESS); 484 485 mutex_enter(&dkp->dk_mutex); 486 487 /* 488 * The cmdk_part_info call at the end of cmdkattach may have 489 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on 490 * detach for case when cmdkopen/cmdkclose never occurs. 491 */ 492 if (dkp->dk_flag & CMDK_TGDK_OPEN) { 493 dkp->dk_flag &= ~CMDK_TGDK_OPEN; 494 (void) dadk_close(DKTP_DATA); 495 } 496 497 cmlb_detach(dkp->dk_cmlbhandle, 0); 498 cmlb_free_handle(&dkp->dk_cmlbhandle); 499 ddi_prop_remove_all(dip); 500 501 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 502 mutex_exit(&dkp->dk_mutex); 503 mutex_destroy(&dkp->dk_mutex); 504 rw_destroy(&dkp->dk_bbh_mutex); 505 mutex_destroy(&dkp->dk_pm_mutex); 506 cv_destroy(&dkp->dk_suspend_cv); 507 ddi_soft_state_free(cmdk_state, instance); 508 509 return (DDI_SUCCESS); 510 } 511 512 static int 513 cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 514 { 515 dev_t dev = (dev_t)arg; 516 int instance; 517 struct cmdk *dkp; 518 519 #ifdef lint 520 dip = dip; /* no one ever uses this */ 521 #endif 522 #ifdef CMDK_DEBUG 523 if (cmdk_debug & DENT) 524 PRF("cmdkinfo: call\n"); 525 #endif 526 instance = CMDKUNIT(dev); 527 528 switch (infocmd) { 529 case DDI_INFO_DEVT2DEVINFO: 530 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 531 return (DDI_FAILURE); 532 *result = (void *) dkp->dk_dip; 533 break; 534 case DDI_INFO_DEVT2INSTANCE: 535 *result = (void *)(intptr_t)instance; 536 break; 537 default: 538 return (DDI_FAILURE); 539 } 540 return (DDI_SUCCESS); 541 } 542 543 /* 544 * Initialize the power management components 545 */ 546 static void 547 cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp) 548 { 549 char *pm_comp[] = { "NAME=cmdk", "0=off", "1=on", NULL }; 550 551 /* 552 * Since the cmdk device does not the 'reg' property, 553 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 554 * The following code is to tell cpr that this device 555 * DOES need to be suspended and resumed. 556 */ 557 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 558 "pm-hardware-state", "needs-suspend-resume"); 559 560 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 561 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 562 if (pm_raise_power(dip, 0, CMDK_SPINDLE_ON) == DDI_SUCCESS) { 563 mutex_enter(&dkp->dk_pm_mutex); 564 dkp->dk_pm_level = CMDK_SPINDLE_ON; 565 dkp->dk_pm_is_enabled = 1; 566 mutex_exit(&dkp->dk_pm_mutex); 567 } else { 568 mutex_enter(&dkp->dk_pm_mutex); 569 dkp->dk_pm_level = CMDK_SPINDLE_OFF; 570 dkp->dk_pm_is_enabled = 0; 571 mutex_exit(&dkp->dk_pm_mutex); 572 } 573 } else { 574 mutex_enter(&dkp->dk_pm_mutex); 575 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT; 576 dkp->dk_pm_is_enabled = 0; 577 mutex_exit(&dkp->dk_pm_mutex); 578 } 579 } 580 581 /* 582 * suspend routine, it will be run when get the command 583 * DDI_SUSPEND at detach(9E) from system power management 584 */ 585 static int 586 cmdksuspend(dev_info_t *dip) 587 { 588 struct cmdk *dkp; 589 int instance; 590 clock_t count = 0; 591 592 instance = ddi_get_instance(dip); 593 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 594 return (DDI_FAILURE); 595 mutex_enter(&dkp->dk_mutex); 596 if (dkp->dk_flag & CMDK_SUSPEND) { 597 mutex_exit(&dkp->dk_mutex); 598 return (DDI_SUCCESS); 599 } 600 dkp->dk_flag |= CMDK_SUSPEND; 601 602 /* need to wait a while */ 603 while (dadk_getcmds(DKTP_DATA) != 0) { 604 delay(drv_usectohz(1000000)); 605 if (count > 60) { 606 dkp->dk_flag &= ~CMDK_SUSPEND; 607 cv_broadcast(&dkp->dk_suspend_cv); 608 mutex_exit(&dkp->dk_mutex); 609 return (DDI_FAILURE); 610 } 611 count++; 612 } 613 mutex_exit(&dkp->dk_mutex); 614 return (DDI_SUCCESS); 615 } 616 617 /* 618 * resume routine, it will be run when get the command 619 * DDI_RESUME at attach(9E) from system power management 620 */ 621 static int 622 cmdkresume(dev_info_t *dip) 623 { 624 struct cmdk *dkp; 625 int instance; 626 627 instance = ddi_get_instance(dip); 628 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 629 return (DDI_FAILURE); 630 mutex_enter(&dkp->dk_mutex); 631 if (!(dkp->dk_flag & CMDK_SUSPEND)) { 632 mutex_exit(&dkp->dk_mutex); 633 return (DDI_FAILURE); 634 } 635 dkp->dk_pm_level = CMDK_SPINDLE_ON; 636 dkp->dk_flag &= ~CMDK_SUSPEND; 637 cv_broadcast(&dkp->dk_suspend_cv); 638 mutex_exit(&dkp->dk_mutex); 639 return (DDI_SUCCESS); 640 641 } 642 643 /* 644 * power management entry point, it was used to 645 * change power management component. 646 * Actually, the real hard drive suspend/resume 647 * was handled in ata, so this function is not 648 * doing any real work other than verifying that 649 * the disk is idle. 650 */ 651 static int 652 cmdkpower(dev_info_t *dip, int component, int level) 653 { 654 struct cmdk *dkp; 655 int instance; 656 657 instance = ddi_get_instance(dip); 658 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 659 component != 0 || level > CMDK_SPINDLE_ON || 660 level < CMDK_SPINDLE_OFF) { 661 return (DDI_FAILURE); 662 } 663 664 mutex_enter(&dkp->dk_pm_mutex); 665 if (dkp->dk_pm_is_enabled && dkp->dk_pm_level == level) { 666 mutex_exit(&dkp->dk_pm_mutex); 667 return (DDI_SUCCESS); 668 } 669 mutex_exit(&dkp->dk_pm_mutex); 670 671 if ((level == CMDK_SPINDLE_OFF) && 672 (dadk_getcmds(DKTP_DATA) != 0)) { 673 return (DDI_FAILURE); 674 } 675 676 mutex_enter(&dkp->dk_pm_mutex); 677 dkp->dk_pm_level = level; 678 mutex_exit(&dkp->dk_pm_mutex); 679 return (DDI_SUCCESS); 680 } 681 682 static int 683 cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 684 char *name, caddr_t valuep, int *lengthp) 685 { 686 struct cmdk *dkp; 687 diskaddr_t p_lblksrt; 688 diskaddr_t p_lblkcnt; 689 690 #ifdef CMDK_DEBUG 691 if (cmdk_debug & DENT) 692 PRF("cmdk_prop_op: call\n"); 693 #endif 694 695 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 696 697 /* 698 * Our dynamic properties are all device specific and size oriented. 699 * Requests issued under conditions where size is valid are passed 700 * to ddi_prop_op_nblocks with the size information, otherwise the 701 * request is passed to ddi_prop_op. Size depends on valid label. 702 */ 703 if ((dev != DDI_DEV_T_ANY) && (dkp != NULL)) { 704 if (!cmlb_partinfo( 705 dkp->dk_cmlbhandle, 706 CMDKPART(dev), 707 &p_lblkcnt, 708 &p_lblksrt, 709 NULL, 710 NULL, 711 0)) 712 return (ddi_prop_op_nblocks(dev, dip, 713 prop_op, mod_flags, 714 name, valuep, lengthp, 715 (uint64_t)p_lblkcnt)); 716 } 717 718 return (ddi_prop_op(dev, dip, 719 prop_op, mod_flags, 720 name, valuep, lengthp)); 721 } 722 723 /* 724 * dump routine 725 */ 726 static int 727 cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 728 { 729 int instance; 730 struct cmdk *dkp; 731 diskaddr_t p_lblksrt; 732 diskaddr_t p_lblkcnt; 733 struct buf local; 734 struct buf *bp; 735 736 #ifdef CMDK_DEBUG 737 if (cmdk_debug & DENT) 738 PRF("cmdkdump: call\n"); 739 #endif 740 instance = CMDKUNIT(dev); 741 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0)) 742 return (ENXIO); 743 744 if (cmlb_partinfo( 745 dkp->dk_cmlbhandle, 746 CMDKPART(dev), 747 &p_lblkcnt, 748 &p_lblksrt, 749 NULL, 750 NULL, 751 0)) { 752 return (ENXIO); 753 } 754 755 if ((blkno+nblk) > p_lblkcnt) 756 return (EINVAL); 757 758 cmdk_indump = 1; /* Tell disk targets we are panic dumpping */ 759 760 bp = &local; 761 bzero(bp, sizeof (*bp)); 762 bp->b_flags = B_BUSY; 763 bp->b_un.b_addr = addr; 764 bp->b_bcount = nblk << SCTRSHFT; 765 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno))); 766 767 (void) dadk_dump(DKTP_DATA, bp); 768 return (bp->b_error); 769 } 770 771 /* 772 * Copy in the dadkio_rwcmd according to the user's data model. If needed, 773 * convert it for our internal use. 774 */ 775 static int 776 rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag) 777 { 778 switch (ddi_model_convert_from(flag)) { 779 case DDI_MODEL_ILP32: { 780 struct dadkio_rwcmd32 cmd32; 781 782 if (ddi_copyin(inaddr, &cmd32, 783 sizeof (struct dadkio_rwcmd32), flag)) { 784 return (EFAULT); 785 } 786 787 rwcmdp->cmd = cmd32.cmd; 788 rwcmdp->flags = cmd32.flags; 789 rwcmdp->blkaddr = (daddr_t)cmd32.blkaddr; 790 rwcmdp->buflen = cmd32.buflen; 791 rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr; 792 /* 793 * Note: we do not convert the 'status' field, 794 * as it should not contain valid data at this 795 * point. 796 */ 797 bzero(&rwcmdp->status, sizeof (rwcmdp->status)); 798 break; 799 } 800 case DDI_MODEL_NONE: { 801 if (ddi_copyin(inaddr, rwcmdp, 802 sizeof (struct dadkio_rwcmd), flag)) { 803 return (EFAULT); 804 } 805 } 806 } 807 return (0); 808 } 809 810 /* 811 * If necessary, convert the internal rwcmdp and status to the appropriate 812 * data model and copy it out to the user. 813 */ 814 static int 815 rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag) 816 { 817 switch (ddi_model_convert_from(flag)) { 818 case DDI_MODEL_ILP32: { 819 struct dadkio_rwcmd32 cmd32; 820 821 cmd32.cmd = rwcmdp->cmd; 822 cmd32.flags = rwcmdp->flags; 823 cmd32.blkaddr = rwcmdp->blkaddr; 824 cmd32.buflen = rwcmdp->buflen; 825 ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0); 826 cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr; 827 828 cmd32.status.status = rwcmdp->status.status; 829 cmd32.status.resid = rwcmdp->status.resid; 830 cmd32.status.failed_blk_is_valid = 831 rwcmdp->status.failed_blk_is_valid; 832 cmd32.status.failed_blk = rwcmdp->status.failed_blk; 833 cmd32.status.fru_code_is_valid = 834 rwcmdp->status.fru_code_is_valid; 835 cmd32.status.fru_code = rwcmdp->status.fru_code; 836 837 bcopy(rwcmdp->status.add_error_info, 838 cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN); 839 840 if (ddi_copyout(&cmd32, outaddr, 841 sizeof (struct dadkio_rwcmd32), flag)) 842 return (EFAULT); 843 break; 844 } 845 case DDI_MODEL_NONE: { 846 if (ddi_copyout(rwcmdp, outaddr, 847 sizeof (struct dadkio_rwcmd), flag)) 848 return (EFAULT); 849 } 850 } 851 return (0); 852 } 853 854 /* 855 * ioctl routine 856 */ 857 static int 858 cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp) 859 { 860 int instance; 861 struct scsi_device *devp; 862 struct cmdk *dkp; 863 char data[NBPSCTR]; 864 865 instance = CMDKUNIT(dev); 866 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 867 return (ENXIO); 868 869 mutex_enter(&dkp->dk_mutex); 870 while (dkp->dk_flag & CMDK_SUSPEND) { 871 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 872 } 873 mutex_exit(&dkp->dk_mutex); 874 875 bzero(data, sizeof (data)); 876 877 switch (cmd) { 878 879 case DKIOCGMEDIAINFO: { 880 struct dk_minfo media_info; 881 struct tgdk_geom phyg; 882 883 /* dadk_getphygeom always returns success */ 884 (void) dadk_getphygeom(DKTP_DATA, &phyg); 885 886 media_info.dki_lbsize = phyg.g_secsiz; 887 media_info.dki_capacity = phyg.g_cap; 888 media_info.dki_media_type = DK_FIXED_DISK; 889 890 if (ddi_copyout(&media_info, (void *)arg, 891 sizeof (struct dk_minfo), flag)) { 892 return (EFAULT); 893 } else { 894 return (0); 895 } 896 } 897 898 case DKIOCINFO: { 899 struct dk_cinfo *info = (struct dk_cinfo *)data; 900 901 /* controller information */ 902 info->dki_ctype = (DKTP_EXT->tg_ctype); 903 info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip)); 904 (void) strcpy(info->dki_cname, 905 ddi_get_name(ddi_get_parent(dkp->dk_dip))); 906 907 /* Unit Information */ 908 info->dki_unit = ddi_get_instance(dkp->dk_dip); 909 devp = ddi_get_driver_private(dkp->dk_dip); 910 info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp); 911 (void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip)); 912 info->dki_flags = DKI_FMTVOL; 913 info->dki_partition = CMDKPART(dev); 914 915 info->dki_maxtransfer = maxphys / DEV_BSIZE; 916 info->dki_addr = 1; 917 info->dki_space = 0; 918 info->dki_prio = 0; 919 info->dki_vec = 0; 920 921 if (ddi_copyout(data, (void *)arg, sizeof (*info), flag)) 922 return (EFAULT); 923 else 924 return (0); 925 } 926 927 case DKIOCSTATE: { 928 int state; 929 int rval; 930 diskaddr_t p_lblksrt; 931 diskaddr_t p_lblkcnt; 932 933 if (ddi_copyin((void *)arg, &state, sizeof (int), flag)) 934 return (EFAULT); 935 936 /* dadk_check_media blocks until state changes */ 937 if (rval = dadk_check_media(DKTP_DATA, &state)) 938 return (rval); 939 940 if (state == DKIO_INSERTED) { 941 942 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) 943 return (ENXIO); 944 945 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev), 946 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) 947 return (ENXIO); 948 949 if (p_lblkcnt <= 0) 950 return (ENXIO); 951 } 952 953 if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag)) 954 return (EFAULT); 955 956 return (0); 957 } 958 959 /* 960 * is media removable? 961 */ 962 case DKIOCREMOVABLE: { 963 int i; 964 965 i = (DKTP_EXT->tg_rmb) ? 1 : 0; 966 967 if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag)) 968 return (EFAULT); 969 970 return (0); 971 } 972 973 case DKIOCADDBAD: 974 /* 975 * This is not an update mechanism to add bad blocks 976 * to the bad block structures stored on disk. 977 * 978 * addbadsec(1M) will update the bad block data on disk 979 * and use this ioctl to force the driver to re-initialize 980 * the list of bad blocks in the driver. 981 */ 982 983 /* start BBH */ 984 cmdk_bbh_reopen(dkp); 985 return (0); 986 987 case DKIOCG_PHYGEOM: 988 case DKIOCG_VIRTGEOM: 989 case DKIOCGGEOM: 990 case DKIOCSGEOM: 991 case DKIOCGAPART: 992 case DKIOCSAPART: 993 case DKIOCGVTOC: 994 case DKIOCSVTOC: 995 case DKIOCPARTINFO: 996 case DKIOCGMBOOT: 997 case DKIOCSMBOOT: 998 case DKIOCGETEFI: 999 case DKIOCSETEFI: 1000 case DKIOCPARTITION: 1001 { 1002 int rc; 1003 1004 rc = cmlb_ioctl(dkp->dk_cmlbhandle, dev, cmd, arg, flag, 1005 credp, rvalp, 0); 1006 if (cmd == DKIOCSVTOC) 1007 cmdk_devid_setup(dkp); 1008 return (rc); 1009 } 1010 1011 case DIOCTL_RWCMD: { 1012 struct dadkio_rwcmd *rwcmdp; 1013 int status; 1014 1015 rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP); 1016 1017 status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag); 1018 1019 if (status == 0) { 1020 bzero(&(rwcmdp->status), sizeof (struct dadkio_status)); 1021 status = dadk_ioctl(DKTP_DATA, 1022 dev, 1023 cmd, 1024 (uintptr_t)rwcmdp, 1025 flag, 1026 credp, 1027 rvalp); 1028 } 1029 if (status == 0) 1030 status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag); 1031 1032 kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd)); 1033 return (status); 1034 } 1035 1036 default: 1037 return (dadk_ioctl(DKTP_DATA, 1038 dev, 1039 cmd, 1040 arg, 1041 flag, 1042 credp, 1043 rvalp)); 1044 } 1045 } 1046 1047 /*ARGSUSED1*/ 1048 static int 1049 cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp) 1050 { 1051 int part; 1052 ulong_t partbit; 1053 int instance; 1054 struct cmdk *dkp; 1055 int lastclose = 1; 1056 int i; 1057 1058 instance = CMDKUNIT(dev); 1059 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1060 (otyp >= OTYPCNT)) 1061 return (ENXIO); 1062 1063 mutex_enter(&dkp->dk_mutex); 1064 1065 /* check if device has been opened */ 1066 ASSERT(cmdk_isopen(dkp, dev)); 1067 if (!(dkp->dk_flag & CMDK_OPEN)) { 1068 mutex_exit(&dkp->dk_mutex); 1069 return (ENXIO); 1070 } 1071 1072 while (dkp->dk_flag & CMDK_SUSPEND) { 1073 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1074 } 1075 1076 part = CMDKPART(dev); 1077 partbit = 1 << part; 1078 1079 /* account for close */ 1080 if (otyp == OTYP_LYR) { 1081 ASSERT(dkp->dk_open_lyr[part] > 0); 1082 if (dkp->dk_open_lyr[part]) 1083 dkp->dk_open_lyr[part]--; 1084 } else { 1085 ASSERT((dkp->dk_open_reg[otyp] & partbit) != 0); 1086 dkp->dk_open_reg[otyp] &= ~partbit; 1087 } 1088 dkp->dk_open_exl &= ~partbit; 1089 1090 for (i = 0; i < CMDK_MAXPART; i++) 1091 if (dkp->dk_open_lyr[i] != 0) { 1092 lastclose = 0; 1093 break; 1094 } 1095 1096 if (lastclose) 1097 for (i = 0; i < OTYPCNT; i++) 1098 if (dkp->dk_open_reg[i] != 0) { 1099 lastclose = 0; 1100 break; 1101 } 1102 1103 mutex_exit(&dkp->dk_mutex); 1104 1105 if (lastclose) 1106 cmlb_invalidate(dkp->dk_cmlbhandle, 0); 1107 1108 return (DDI_SUCCESS); 1109 } 1110 1111 /*ARGSUSED3*/ 1112 static int 1113 cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp) 1114 { 1115 dev_t dev = *dev_p; 1116 int part; 1117 ulong_t partbit; 1118 int instance; 1119 struct cmdk *dkp; 1120 diskaddr_t p_lblksrt; 1121 diskaddr_t p_lblkcnt; 1122 int i; 1123 int nodelay; 1124 1125 instance = CMDKUNIT(dev); 1126 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1127 return (ENXIO); 1128 1129 if (otyp >= OTYPCNT) 1130 return (EINVAL); 1131 1132 mutex_enter(&dkp->dk_mutex); 1133 while (dkp->dk_flag & CMDK_SUSPEND) { 1134 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1135 } 1136 mutex_exit(&dkp->dk_mutex); 1137 1138 part = CMDKPART(dev); 1139 partbit = 1 << part; 1140 nodelay = (flag & (FNDELAY | FNONBLOCK)); 1141 1142 mutex_enter(&dkp->dk_mutex); 1143 1144 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) { 1145 1146 /* fail if not doing non block open */ 1147 if (!nodelay) { 1148 mutex_exit(&dkp->dk_mutex); 1149 return (ENXIO); 1150 } 1151 } else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt, 1152 &p_lblksrt, NULL, NULL, 0) == 0) { 1153 1154 if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) { 1155 mutex_exit(&dkp->dk_mutex); 1156 return (ENXIO); 1157 } 1158 } else { 1159 /* fail if not doing non block open */ 1160 if (!nodelay) { 1161 mutex_exit(&dkp->dk_mutex); 1162 return (ENXIO); 1163 } 1164 } 1165 1166 if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) { 1167 mutex_exit(&dkp->dk_mutex); 1168 return (EROFS); 1169 } 1170 1171 /* check for part already opend exclusively */ 1172 if (dkp->dk_open_exl & partbit) 1173 goto excl_open_fail; 1174 1175 /* check if we can establish exclusive open */ 1176 if (flag & FEXCL) { 1177 if (dkp->dk_open_lyr[part]) 1178 goto excl_open_fail; 1179 for (i = 0; i < OTYPCNT; i++) { 1180 if (dkp->dk_open_reg[i] & partbit) 1181 goto excl_open_fail; 1182 } 1183 } 1184 1185 /* open will succeed, account for open */ 1186 dkp->dk_flag |= CMDK_OPEN; 1187 if (otyp == OTYP_LYR) 1188 dkp->dk_open_lyr[part]++; 1189 else 1190 dkp->dk_open_reg[otyp] |= partbit; 1191 if (flag & FEXCL) 1192 dkp->dk_open_exl |= partbit; 1193 1194 mutex_exit(&dkp->dk_mutex); 1195 return (DDI_SUCCESS); 1196 1197 excl_open_fail: 1198 mutex_exit(&dkp->dk_mutex); 1199 return (EBUSY); 1200 } 1201 1202 /* 1203 * read routine 1204 */ 1205 /*ARGSUSED2*/ 1206 static int 1207 cmdkread(dev_t dev, struct uio *uio, cred_t *credp) 1208 { 1209 return (cmdkrw(dev, uio, B_READ)); 1210 } 1211 1212 /* 1213 * async read routine 1214 */ 1215 /*ARGSUSED2*/ 1216 static int 1217 cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp) 1218 { 1219 return (cmdkarw(dev, aio, B_READ)); 1220 } 1221 1222 /* 1223 * write routine 1224 */ 1225 /*ARGSUSED2*/ 1226 static int 1227 cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp) 1228 { 1229 return (cmdkrw(dev, uio, B_WRITE)); 1230 } 1231 1232 /* 1233 * async write routine 1234 */ 1235 /*ARGSUSED2*/ 1236 static int 1237 cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp) 1238 { 1239 return (cmdkarw(dev, aio, B_WRITE)); 1240 } 1241 1242 static void 1243 cmdkmin(struct buf *bp) 1244 { 1245 if (bp->b_bcount > DK_MAXRECSIZE) 1246 bp->b_bcount = DK_MAXRECSIZE; 1247 } 1248 1249 static int 1250 cmdkrw(dev_t dev, struct uio *uio, int flag) 1251 { 1252 int instance; 1253 struct cmdk *dkp; 1254 1255 instance = CMDKUNIT(dev); 1256 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1257 return (ENXIO); 1258 1259 mutex_enter(&dkp->dk_mutex); 1260 while (dkp->dk_flag & CMDK_SUSPEND) { 1261 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1262 } 1263 mutex_exit(&dkp->dk_mutex); 1264 1265 return (physio(cmdkstrategy, (struct buf *)0, dev, flag, cmdkmin, uio)); 1266 } 1267 1268 static int 1269 cmdkarw(dev_t dev, struct aio_req *aio, int flag) 1270 { 1271 int instance; 1272 struct cmdk *dkp; 1273 1274 instance = CMDKUNIT(dev); 1275 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 1276 return (ENXIO); 1277 1278 mutex_enter(&dkp->dk_mutex); 1279 while (dkp->dk_flag & CMDK_SUSPEND) { 1280 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1281 } 1282 mutex_exit(&dkp->dk_mutex); 1283 1284 return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio)); 1285 } 1286 1287 /* 1288 * strategy routine 1289 */ 1290 static int 1291 cmdkstrategy(struct buf *bp) 1292 { 1293 int instance; 1294 struct cmdk *dkp; 1295 long d_cnt; 1296 diskaddr_t p_lblksrt; 1297 diskaddr_t p_lblkcnt; 1298 1299 instance = CMDKUNIT(bp->b_edev); 1300 if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1301 (dkblock(bp) < 0)) { 1302 bp->b_resid = bp->b_bcount; 1303 SETBPERR(bp, ENXIO); 1304 biodone(bp); 1305 return (0); 1306 } 1307 1308 mutex_enter(&dkp->dk_mutex); 1309 ASSERT(cmdk_isopen(dkp, bp->b_edev)); 1310 while (dkp->dk_flag & CMDK_SUSPEND) { 1311 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex); 1312 } 1313 mutex_exit(&dkp->dk_mutex); 1314 1315 bp->b_flags &= ~(B_DONE|B_ERROR); 1316 bp->b_resid = 0; 1317 bp->av_back = NULL; 1318 1319 /* 1320 * only re-read the vtoc if necessary (force == FALSE) 1321 */ 1322 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(bp->b_edev), 1323 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) { 1324 SETBPERR(bp, ENXIO); 1325 } 1326 1327 if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt)) 1328 SETBPERR(bp, ENXIO); 1329 1330 if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) { 1331 bp->b_resid = bp->b_bcount; 1332 biodone(bp); 1333 return (0); 1334 } 1335 1336 d_cnt = bp->b_bcount >> SCTRSHFT; 1337 if ((dkblock(bp) + d_cnt) > p_lblkcnt) { 1338 bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT; 1339 bp->b_bcount -= bp->b_resid; 1340 } 1341 1342 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp)))); 1343 if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) { 1344 bp->b_resid += bp->b_bcount; 1345 biodone(bp); 1346 } 1347 return (0); 1348 } 1349 1350 static int 1351 cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp) 1352 { 1353 struct scsi_device *devp; 1354 opaque_t queobjp = NULL; 1355 opaque_t flcobjp = NULL; 1356 char que_keyvalp[64]; 1357 int que_keylen; 1358 char flc_keyvalp[64]; 1359 int flc_keylen; 1360 1361 ASSERT(mutex_owned(&dkp->dk_mutex)); 1362 1363 /* Create linkage to queueing routines based on property */ 1364 que_keylen = sizeof (que_keyvalp); 1365 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1366 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1367 DDI_PROP_SUCCESS) { 1368 cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined"); 1369 return (DDI_FAILURE); 1370 } 1371 que_keyvalp[que_keylen] = (char)0; 1372 1373 if (strcmp(que_keyvalp, "qfifo") == 0) { 1374 queobjp = (opaque_t)qfifo_create(); 1375 } else if (strcmp(que_keyvalp, "qsort") == 0) { 1376 queobjp = (opaque_t)qsort_create(); 1377 } else { 1378 return (DDI_FAILURE); 1379 } 1380 1381 /* Create linkage to dequeueing routines based on property */ 1382 flc_keylen = sizeof (flc_keyvalp); 1383 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1384 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1385 DDI_PROP_SUCCESS) { 1386 cmn_err(CE_WARN, 1387 "cmdk_create_obj: flow-control property undefined"); 1388 return (DDI_FAILURE); 1389 } 1390 1391 flc_keyvalp[flc_keylen] = (char)0; 1392 1393 if (strcmp(flc_keyvalp, "dsngl") == 0) { 1394 flcobjp = (opaque_t)dsngl_create(); 1395 } else if (strcmp(flc_keyvalp, "dmult") == 0) { 1396 flcobjp = (opaque_t)dmult_create(); 1397 } else { 1398 return (DDI_FAILURE); 1399 } 1400 1401 /* populate bbh_obj object stored in dkp */ 1402 dkp->dk_bbh_obj.bbh_data = dkp; 1403 dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops; 1404 1405 /* create linkage to dadk */ 1406 dkp->dk_tgobjp = (opaque_t)dadk_create(); 1407 1408 devp = ddi_get_driver_private(dip); 1409 (void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj, 1410 NULL); 1411 1412 return (DDI_SUCCESS); 1413 } 1414 1415 static void 1416 cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp) 1417 { 1418 char que_keyvalp[64]; 1419 int que_keylen; 1420 char flc_keyvalp[64]; 1421 int flc_keylen; 1422 1423 ASSERT(mutex_owned(&dkp->dk_mutex)); 1424 1425 (void) dadk_free((dkp->dk_tgobjp)); 1426 dkp->dk_tgobjp = NULL; 1427 1428 que_keylen = sizeof (que_keyvalp); 1429 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1430 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1431 DDI_PROP_SUCCESS) { 1432 cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined"); 1433 return; 1434 } 1435 que_keyvalp[que_keylen] = (char)0; 1436 1437 flc_keylen = sizeof (flc_keyvalp); 1438 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1439 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1440 DDI_PROP_SUCCESS) { 1441 cmn_err(CE_WARN, 1442 "cmdk_destroy_obj: flow-control property undefined"); 1443 return; 1444 } 1445 flc_keyvalp[flc_keylen] = (char)0; 1446 } 1447 /*ARGSUSED5*/ 1448 static int 1449 cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 1450 diskaddr_t start, size_t count, void *tg_cookie) 1451 { 1452 struct cmdk *dkp; 1453 opaque_t handle; 1454 int rc = 0; 1455 char *bufa; 1456 1457 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1458 if (dkp == NULL) 1459 return (ENXIO); 1460 1461 if (cmd != TG_READ && cmd != TG_WRITE) 1462 return (EINVAL); 1463 1464 /* count must be multiple of 512 */ 1465 count = (count + NBPSCTR - 1) & -NBPSCTR; 1466 handle = dadk_iob_alloc(DKTP_DATA, start, count, KM_SLEEP); 1467 if (!handle) 1468 return (ENOMEM); 1469 1470 if (cmd == TG_READ) { 1471 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1472 if (!bufa) 1473 rc = EIO; 1474 else 1475 bcopy(bufa, bufaddr, count); 1476 } else { 1477 bufa = dadk_iob_htoc(DKTP_DATA, handle); 1478 bcopy(bufaddr, bufa, count); 1479 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1480 if (!bufa) 1481 rc = EIO; 1482 } 1483 (void) dadk_iob_free(DKTP_DATA, handle); 1484 1485 return (rc); 1486 } 1487 1488 /*ARGSUSED3*/ 1489 static int 1490 cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie) 1491 { 1492 1493 struct cmdk *dkp; 1494 struct tgdk_geom phyg; 1495 1496 1497 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1498 if (dkp == NULL) 1499 return (ENXIO); 1500 1501 switch (cmd) { 1502 case TG_GETPHYGEOM: { 1503 cmlb_geom_t *phygeomp = (cmlb_geom_t *)arg; 1504 1505 /* dadk_getphygeom always returns success */ 1506 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1507 1508 phygeomp->g_capacity = phyg.g_cap; 1509 phygeomp->g_nsect = phyg.g_sec; 1510 phygeomp->g_nhead = phyg.g_head; 1511 phygeomp->g_acyl = phyg.g_acyl; 1512 phygeomp->g_ncyl = phyg.g_cyl; 1513 phygeomp->g_secsize = phyg.g_secsiz; 1514 phygeomp->g_intrlv = 1; 1515 phygeomp->g_rpm = 3600; 1516 1517 return (0); 1518 } 1519 1520 case TG_GETVIRTGEOM: { 1521 cmlb_geom_t *virtgeomp = (cmlb_geom_t *)arg; 1522 diskaddr_t capacity; 1523 1524 (void) dadk_getgeom(DKTP_DATA, &phyg); 1525 capacity = phyg.g_cap; 1526 1527 /* 1528 * If the controller returned us something that doesn't 1529 * really fit into an Int 13/function 8 geometry 1530 * result, just fail the ioctl. See PSARC 1998/313. 1531 */ 1532 if (capacity < 0 || capacity >= 63 * 254 * 1024) 1533 return (EINVAL); 1534 1535 virtgeomp->g_capacity = capacity; 1536 virtgeomp->g_nsect = 63; 1537 virtgeomp->g_nhead = 254; 1538 virtgeomp->g_ncyl = capacity / (63 * 254); 1539 virtgeomp->g_acyl = 0; 1540 virtgeomp->g_secsize = 512; 1541 virtgeomp->g_intrlv = 1; 1542 virtgeomp->g_rpm = 3600; 1543 1544 return (0); 1545 } 1546 1547 case TG_GETCAPACITY: 1548 case TG_GETBLOCKSIZE: 1549 { 1550 1551 /* dadk_getphygeom always returns success */ 1552 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1553 if (cmd == TG_GETCAPACITY) 1554 *(diskaddr_t *)arg = phyg.g_cap; 1555 else 1556 *(uint32_t *)arg = (uint32_t)phyg.g_secsiz; 1557 1558 return (0); 1559 } 1560 1561 case TG_GETATTR: { 1562 tg_attribute_t *tgattribute = (tg_attribute_t *)arg; 1563 if ((DKTP_EXT->tg_rdonly)) 1564 tgattribute->media_is_writable = FALSE; 1565 else 1566 tgattribute->media_is_writable = TRUE; 1567 1568 return (0); 1569 } 1570 1571 default: 1572 return (ENOTTY); 1573 } 1574 } 1575 1576 1577 1578 1579 1580 /* 1581 * Create and register the devid. 1582 * There are 4 different ways we can get a device id: 1583 * 1. Already have one - nothing to do 1584 * 2. Build one from the drive's model and serial numbers 1585 * 3. Read one from the disk (first sector of last track) 1586 * 4. Fabricate one and write it on the disk. 1587 * If any of these succeeds, register the deviceid 1588 */ 1589 static void 1590 cmdk_devid_setup(struct cmdk *dkp) 1591 { 1592 int rc; 1593 1594 /* Try options until one succeeds, or all have failed */ 1595 1596 /* 1. All done if already registered */ 1597 if (dkp->dk_devid != NULL) 1598 return; 1599 1600 /* 2. Build a devid from the model and serial number */ 1601 rc = cmdk_devid_modser(dkp); 1602 if (rc != DDI_SUCCESS) { 1603 /* 3. Read devid from the disk, if present */ 1604 rc = cmdk_devid_read(dkp); 1605 1606 /* 4. otherwise make one up and write it on the disk */ 1607 if (rc != DDI_SUCCESS) 1608 rc = cmdk_devid_fabricate(dkp); 1609 } 1610 1611 /* If we managed to get a devid any of the above ways, register it */ 1612 if (rc == DDI_SUCCESS) 1613 (void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid); 1614 1615 } 1616 1617 /* 1618 * Build a devid from the model and serial number 1619 * Return DDI_SUCCESS or DDI_FAILURE. 1620 */ 1621 static int 1622 cmdk_devid_modser(struct cmdk *dkp) 1623 { 1624 int rc = DDI_FAILURE; 1625 char *hwid; 1626 int modlen; 1627 int serlen; 1628 1629 /* 1630 * device ID is a concatenation of model number, '=', serial number. 1631 */ 1632 hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP); 1633 modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN); 1634 if (modlen == 0) { 1635 rc = DDI_FAILURE; 1636 goto err; 1637 } 1638 hwid[modlen++] = '='; 1639 serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL, 1640 hwid + modlen, CMDK_HWIDLEN - modlen); 1641 if (serlen == 0) { 1642 rc = DDI_FAILURE; 1643 goto err; 1644 } 1645 hwid[modlen + serlen] = 0; 1646 1647 /* Initialize the device ID, trailing NULL not included */ 1648 rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen, 1649 hwid, (ddi_devid_t *)&dkp->dk_devid); 1650 if (rc != DDI_SUCCESS) { 1651 rc = DDI_FAILURE; 1652 goto err; 1653 } 1654 1655 rc = DDI_SUCCESS; 1656 1657 err: 1658 kmem_free(hwid, CMDK_HWIDLEN); 1659 return (rc); 1660 } 1661 1662 static int 1663 cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len) 1664 { 1665 dadk_ioc_string_t strarg; 1666 int rval; 1667 char *s; 1668 char ch; 1669 boolean_t ret; 1670 int i; 1671 int tb; 1672 1673 strarg.is_buf = buf; 1674 strarg.is_size = len; 1675 if (dadk_ioctl(DKTP_DATA, 1676 dkp->dk_dev, 1677 ioccmd, 1678 (uintptr_t)&strarg, 1679 FNATIVE | FKIOCTL, 1680 NULL, 1681 &rval) != 0) 1682 return (0); 1683 1684 /* 1685 * valid model/serial string must contain a non-zero non-space 1686 * trim trailing spaces/NULL 1687 */ 1688 ret = B_FALSE; 1689 s = buf; 1690 for (i = 0; i < strarg.is_size; i++) { 1691 ch = *s++; 1692 if (ch != ' ' && ch != '\0') 1693 tb = i + 1; 1694 if (ch != ' ' && ch != '\0' && ch != '0') 1695 ret = B_TRUE; 1696 } 1697 1698 if (ret == B_FALSE) 1699 return (0); 1700 1701 return (tb); 1702 } 1703 1704 /* 1705 * Read a devid from on the first block of the last track of 1706 * the last cylinder. Make sure what we read is a valid devid. 1707 * Return DDI_SUCCESS or DDI_FAILURE. 1708 */ 1709 static int 1710 cmdk_devid_read(struct cmdk *dkp) 1711 { 1712 diskaddr_t blk; 1713 struct dk_devid *dkdevidp; 1714 uint_t *ip; 1715 int chksum; 1716 int i, sz; 1717 tgdk_iob_handle handle; 1718 int rc = DDI_FAILURE; 1719 1720 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) 1721 goto err; 1722 1723 /* read the devid */ 1724 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1725 if (handle == NULL) 1726 goto err; 1727 1728 dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1729 if (dkdevidp == NULL) 1730 goto err; 1731 1732 /* Validate the revision */ 1733 if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) || 1734 (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB)) 1735 goto err; 1736 1737 /* Calculate the checksum */ 1738 chksum = 0; 1739 ip = (uint_t *)dkdevidp; 1740 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1741 chksum ^= ip[i]; 1742 if (DKD_GETCHKSUM(dkdevidp) != chksum) 1743 goto err; 1744 1745 /* Validate the device id */ 1746 if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS) 1747 goto err; 1748 1749 /* keep a copy of the device id */ 1750 sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid); 1751 dkp->dk_devid = kmem_alloc(sz, KM_SLEEP); 1752 bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz); 1753 1754 rc = DDI_SUCCESS; 1755 1756 err: 1757 if (handle != NULL) 1758 (void) dadk_iob_free(DKTP_DATA, handle); 1759 return (rc); 1760 } 1761 1762 /* 1763 * Create a devid and write it on the first block of the last track of 1764 * the last cylinder. 1765 * Return DDI_SUCCESS or DDI_FAILURE. 1766 */ 1767 static int 1768 cmdk_devid_fabricate(struct cmdk *dkp) 1769 { 1770 ddi_devid_t devid = NULL; /* devid made by ddi_devid_init */ 1771 struct dk_devid *dkdevidp; /* devid struct stored on disk */ 1772 diskaddr_t blk; 1773 tgdk_iob_handle handle = NULL; 1774 uint_t *ip, chksum; 1775 int i; 1776 int rc; 1777 1778 rc = ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid); 1779 if (rc != DDI_SUCCESS) 1780 goto err; 1781 1782 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) { 1783 /* no device id block address */ 1784 return (DDI_FAILURE); 1785 } 1786 1787 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1788 if (!handle) 1789 goto err; 1790 1791 /* Locate the buffer */ 1792 dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle); 1793 1794 /* Fill in the revision */ 1795 bzero(dkdevidp, NBPSCTR); 1796 dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB; 1797 dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB; 1798 1799 /* Copy in the device id */ 1800 i = ddi_devid_sizeof(devid); 1801 if (i > DK_DEVID_SIZE) 1802 goto err; 1803 bcopy(devid, dkdevidp->dkd_devid, i); 1804 1805 /* Calculate the chksum */ 1806 chksum = 0; 1807 ip = (uint_t *)dkdevidp; 1808 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1809 chksum ^= ip[i]; 1810 1811 /* Fill in the checksum */ 1812 DKD_FORMCHKSUM(chksum, dkdevidp); 1813 1814 /* write the devid */ 1815 (void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1816 1817 dkp->dk_devid = devid; 1818 1819 rc = DDI_SUCCESS; 1820 1821 err: 1822 if (handle != NULL) 1823 (void) dadk_iob_free(DKTP_DATA, handle); 1824 1825 if (rc != DDI_SUCCESS && devid != NULL) 1826 ddi_devid_free(devid); 1827 1828 return (rc); 1829 } 1830 1831 static void 1832 cmdk_bbh_free_alts(struct cmdk *dkp) 1833 { 1834 if (dkp->dk_alts_hdl) { 1835 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1836 kmem_free(dkp->dk_slc_cnt, 1837 NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *))); 1838 dkp->dk_alts_hdl = NULL; 1839 } 1840 } 1841 1842 static void 1843 cmdk_bbh_reopen(struct cmdk *dkp) 1844 { 1845 tgdk_iob_handle handle = NULL; 1846 diskaddr_t slcb, slcn, slce; 1847 struct alts_parttbl *ap; 1848 struct alts_ent *enttblp; 1849 uint32_t altused; 1850 uint32_t altbase; 1851 uint32_t altlast; 1852 int alts; 1853 uint16_t vtoctag; 1854 int i, j; 1855 1856 /* find slice with V_ALTSCTR tag */ 1857 for (alts = 0; alts < NDKMAP; alts++) { 1858 if (cmlb_partinfo( 1859 dkp->dk_cmlbhandle, 1860 alts, 1861 &slcn, 1862 &slcb, 1863 NULL, 1864 &vtoctag, 1865 0)) { 1866 goto empty; /* no partition table exists */ 1867 } 1868 1869 if (vtoctag == V_ALTSCTR && slcn > 1) 1870 break; 1871 } 1872 if (alts >= NDKMAP) { 1873 goto empty; /* no V_ALTSCTR slice defined */ 1874 } 1875 1876 /* read in ALTS label block */ 1877 handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP); 1878 if (!handle) { 1879 goto empty; 1880 } 1881 1882 ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1883 if (!ap || (ap->alts_sanity != ALTS_SANITY)) { 1884 goto empty; 1885 } 1886 1887 altused = ap->alts_ent_used; /* number of BB entries */ 1888 altbase = ap->alts_ent_base; /* blk offset from begin slice */ 1889 altlast = ap->alts_ent_end; /* blk offset to last block */ 1890 /* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */ 1891 1892 if (altused == 0 || 1893 altbase < 1 || 1894 altbase > altlast || 1895 altlast >= slcn) { 1896 goto empty; 1897 } 1898 (void) dadk_iob_free(DKTP_DATA, handle); 1899 1900 /* read in ALTS remapping table */ 1901 handle = dadk_iob_alloc(DKTP_DATA, 1902 slcb + altbase, 1903 (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP); 1904 if (!handle) { 1905 goto empty; 1906 } 1907 1908 enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1909 if (!enttblp) { 1910 goto empty; 1911 } 1912 1913 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1914 1915 /* allocate space for dk_slc_cnt and dk_slc_ent tables */ 1916 if (dkp->dk_slc_cnt == NULL) { 1917 dkp->dk_slc_cnt = kmem_alloc(NDKMAP * 1918 (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP); 1919 } 1920 dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP); 1921 1922 /* free previous BB table (if any) */ 1923 if (dkp->dk_alts_hdl) { 1924 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1925 dkp->dk_alts_hdl = NULL; 1926 dkp->dk_altused = 0; 1927 } 1928 1929 /* save linkage to new BB table */ 1930 dkp->dk_alts_hdl = handle; 1931 dkp->dk_altused = altused; 1932 1933 /* 1934 * build indexes to BB table by slice 1935 * effectively we have 1936 * struct alts_ent *enttblp[altused]; 1937 * 1938 * uint32_t dk_slc_cnt[NDKMAP]; 1939 * struct alts_ent *dk_slc_ent[NDKMAP]; 1940 */ 1941 for (i = 0; i < NDKMAP; i++) { 1942 if (cmlb_partinfo( 1943 dkp->dk_cmlbhandle, 1944 i, 1945 &slcn, 1946 &slcb, 1947 NULL, 1948 NULL, 1949 0)) { 1950 goto empty1; 1951 } 1952 1953 dkp->dk_slc_cnt[i] = 0; 1954 if (slcn == 0) 1955 continue; /* slice is not allocated */ 1956 1957 /* last block in slice */ 1958 slce = slcb + slcn - 1; 1959 1960 /* find first remap entry in after beginnning of slice */ 1961 for (j = 0; j < altused; j++) { 1962 if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb) 1963 break; 1964 } 1965 dkp->dk_slc_ent[i] = enttblp + j; 1966 1967 /* count remap entrys until end of slice */ 1968 for (; j < altused && enttblp[j].bad_start <= slce; j++) { 1969 dkp->dk_slc_cnt[i] += 1; 1970 } 1971 } 1972 1973 rw_exit(&dkp->dk_bbh_mutex); 1974 return; 1975 1976 empty: 1977 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1978 empty1: 1979 if (handle && handle != dkp->dk_alts_hdl) 1980 (void) dadk_iob_free(DKTP_DATA, handle); 1981 1982 if (dkp->dk_alts_hdl) { 1983 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1984 dkp->dk_alts_hdl = NULL; 1985 } 1986 1987 rw_exit(&dkp->dk_bbh_mutex); 1988 } 1989 1990 /*ARGSUSED*/ 1991 static bbh_cookie_t 1992 cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle) 1993 { 1994 struct bbh_handle *hp; 1995 bbh_cookie_t ckp; 1996 1997 hp = (struct bbh_handle *)handle; 1998 ckp = hp->h_cktab + hp->h_idx; 1999 hp->h_idx++; 2000 return (ckp); 2001 } 2002 2003 /*ARGSUSED*/ 2004 static void 2005 cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle) 2006 { 2007 struct bbh_handle *hp; 2008 2009 hp = (struct bbh_handle *)handle; 2010 kmem_free(handle, (sizeof (struct bbh_handle) + 2011 (hp->h_totck * (sizeof (struct bbh_cookie))))); 2012 } 2013 2014 2015 /* 2016 * cmdk_bbh_gethandle remaps the bad sectors to alternates. 2017 * There are 7 different cases when the comparison is made 2018 * between the bad sector cluster and the disk section. 2019 * 2020 * bad sector cluster gggggggggggbbbbbbbggggggggggg 2021 * case 1: ddddd 2022 * case 2: -d----- 2023 * case 3: ddddd 2024 * case 4: dddddddddddd 2025 * case 5: ddddddd----- 2026 * case 6: ---ddddddd 2027 * case 7: ddddddd 2028 * 2029 * where: g = good sector, b = bad sector 2030 * d = sector in disk section 2031 * - = disk section may be extended to cover those disk area 2032 */ 2033 2034 static opaque_t 2035 cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp) 2036 { 2037 struct cmdk *dkp = (struct cmdk *)bbh_data; 2038 struct bbh_handle *hp; 2039 struct bbh_cookie *ckp; 2040 struct alts_ent *altp; 2041 uint32_t alts_used; 2042 uint32_t part = CMDKPART(bp->b_edev); 2043 daddr32_t lastsec; 2044 long d_count; 2045 int i; 2046 int idx; 2047 int cnt; 2048 2049 if (part >= V_NUMPAR) 2050 return (NULL); 2051 2052 /* 2053 * This if statement is atomic and it will succeed 2054 * if there are no bad blocks (almost always) 2055 * 2056 * so this if is performed outside of the rw_enter for speed 2057 * and then repeated inside the rw_enter for safety 2058 */ 2059 if (!dkp->dk_alts_hdl) { 2060 return (NULL); 2061 } 2062 2063 rw_enter(&dkp->dk_bbh_mutex, RW_READER); 2064 2065 if (dkp->dk_alts_hdl == NULL) { 2066 rw_exit(&dkp->dk_bbh_mutex); 2067 return (NULL); 2068 } 2069 2070 alts_used = dkp->dk_slc_cnt[part]; 2071 if (alts_used == 0) { 2072 rw_exit(&dkp->dk_bbh_mutex); 2073 return (NULL); 2074 } 2075 altp = dkp->dk_slc_ent[part]; 2076 2077 /* 2078 * binary search for the largest bad sector index in the alternate 2079 * entry table which overlaps or larger than the starting d_sec 2080 */ 2081 i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp)); 2082 /* if starting sector is > the largest bad sector, return */ 2083 if (i == -1) { 2084 rw_exit(&dkp->dk_bbh_mutex); 2085 return (NULL); 2086 } 2087 /* i is the starting index. Set altp to the starting entry addr */ 2088 altp += i; 2089 2090 d_count = bp->b_bcount >> SCTRSHFT; 2091 lastsec = GET_BP_SEC(bp) + d_count - 1; 2092 2093 /* calculate the number of bad sectors */ 2094 for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) { 2095 if (lastsec < altp->bad_start) 2096 break; 2097 } 2098 2099 if (!cnt) { 2100 rw_exit(&dkp->dk_bbh_mutex); 2101 return (NULL); 2102 } 2103 2104 /* calculate the maximum number of reserved cookies */ 2105 cnt <<= 1; 2106 cnt++; 2107 2108 /* allocate the handle */ 2109 hp = (struct bbh_handle *)kmem_zalloc((sizeof (*hp) + 2110 (cnt * sizeof (*ckp))), KM_SLEEP); 2111 2112 hp->h_idx = 0; 2113 hp->h_totck = cnt; 2114 ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1); 2115 ckp[0].ck_sector = GET_BP_SEC(bp); 2116 ckp[0].ck_seclen = d_count; 2117 2118 altp = dkp->dk_slc_ent[part]; 2119 altp += i; 2120 for (idx = 0; i < alts_used; i++, altp++) { 2121 /* CASE 1: */ 2122 if (lastsec < altp->bad_start) 2123 break; 2124 2125 /* CASE 3: */ 2126 if (ckp[idx].ck_sector > altp->bad_end) 2127 continue; 2128 2129 /* CASE 2 and 7: */ 2130 if ((ckp[idx].ck_sector >= altp->bad_start) && 2131 (lastsec <= altp->bad_end)) { 2132 ckp[idx].ck_sector = altp->good_start + 2133 ckp[idx].ck_sector - altp->bad_start; 2134 break; 2135 } 2136 2137 /* at least one bad sector in our section. break it. */ 2138 /* CASE 5: */ 2139 if ((lastsec >= altp->bad_start) && 2140 (lastsec <= altp->bad_end)) { 2141 ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1; 2142 ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen; 2143 ckp[idx+1].ck_sector = altp->good_start; 2144 break; 2145 } 2146 /* CASE 6: */ 2147 if ((ckp[idx].ck_sector <= altp->bad_end) && 2148 (ckp[idx].ck_sector >= altp->bad_start)) { 2149 ckp[idx+1].ck_seclen = ckp[idx].ck_seclen; 2150 ckp[idx].ck_seclen = altp->bad_end - 2151 ckp[idx].ck_sector + 1; 2152 ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen; 2153 ckp[idx].ck_sector = altp->good_start + 2154 ckp[idx].ck_sector - altp->bad_start; 2155 idx++; 2156 ckp[idx].ck_sector = altp->bad_end + 1; 2157 continue; /* check rest of section */ 2158 } 2159 2160 /* CASE 4: */ 2161 ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector; 2162 ckp[idx+1].ck_sector = altp->good_start; 2163 ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1; 2164 idx += 2; 2165 ckp[idx].ck_sector = altp->bad_end + 1; 2166 ckp[idx].ck_seclen = lastsec - altp->bad_end; 2167 } 2168 2169 rw_exit(&dkp->dk_bbh_mutex); 2170 return ((opaque_t)hp); 2171 } 2172 2173 static int 2174 cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key) 2175 { 2176 int i; 2177 int ind; 2178 int interval; 2179 int mystatus = -1; 2180 2181 if (!cnt) 2182 return (mystatus); 2183 2184 ind = 1; /* compiler complains about possible uninitialized var */ 2185 for (i = 1; i <= cnt; i <<= 1) 2186 ind = i; 2187 2188 for (interval = ind; interval; ) { 2189 if ((key >= buf[ind-1].bad_start) && 2190 (key <= buf[ind-1].bad_end)) { 2191 return (ind-1); 2192 } else { 2193 interval >>= 1; 2194 if (key < buf[ind-1].bad_start) { 2195 /* record the largest bad sector index */ 2196 mystatus = ind-1; 2197 if (!interval) 2198 break; 2199 ind = ind - interval; 2200 } else { 2201 /* 2202 * if key is larger than the last element 2203 * then break 2204 */ 2205 if ((ind == cnt) || !interval) 2206 break; 2207 if ((ind+interval) <= cnt) 2208 ind += interval; 2209 } 2210 } 2211 } 2212 return (mystatus); 2213 } 2214