1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/scsi/scsi.h> 30 #include <sys/dktp/cm.h> 31 #include <sys/dktp/quetypes.h> 32 #include <sys/dktp/queue.h> 33 #include <sys/dktp/fctypes.h> 34 #include <sys/dktp/flowctrl.h> 35 #include <sys/dktp/cmdev.h> 36 #include <sys/dkio.h> 37 #include <sys/dktp/tgdk.h> 38 #include <sys/dktp/dadk.h> 39 #include <sys/dktp/bbh.h> 40 #include <sys/dktp/altsctr.h> 41 #include <sys/dktp/cmdk.h> 42 43 #include <sys/stat.h> 44 #include <sys/vtoc.h> 45 #include <sys/file.h> 46 #include <sys/dktp/dadkio.h> 47 #include <sys/aio_req.h> 48 49 #include <sys/cmlb.h> 50 51 /* 52 * Local Static Data 53 */ 54 #ifdef CMDK_DEBUG 55 #define DENT 0x0001 56 #define DIO 0x0002 57 58 static int cmdk_debug = DIO; 59 #endif 60 61 #ifndef TRUE 62 #define TRUE 1 63 #endif 64 65 #ifndef FALSE 66 #define FALSE 0 67 #endif 68 69 /* 70 * NDKMAP is the base number for accessing the fdisk partitions. 71 * c?d?p0 --> cmdk@?,?:q 72 */ 73 #define PARTITION0_INDEX (NDKMAP + 0) 74 75 #define DKTP_DATA (dkp->dk_tgobjp)->tg_data 76 #define DKTP_EXT (dkp->dk_tgobjp)->tg_ext 77 78 static void *cmdk_state; 79 80 /* 81 * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded 82 * attach situations 83 */ 84 static kmutex_t cmdk_attach_mutex; 85 static int cmdk_max_instance = 0; 86 87 /* 88 * Panic dumpsys state 89 * There is only a single flag that is not mutex locked since 90 * the system is prevented from thread switching and cmdk_dump 91 * will only be called in a single threaded operation. 92 */ 93 static int cmdk_indump; 94 95 /* 96 * Local Function Prototypes 97 */ 98 static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp); 99 static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp); 100 static void cmdkmin(struct buf *bp); 101 static int cmdkrw(dev_t dev, struct uio *uio, int flag); 102 static int cmdkarw(dev_t dev, struct aio_req *aio, int flag); 103 104 /* 105 * Bad Block Handling Functions Prototypes 106 */ 107 static void cmdk_bbh_reopen(struct cmdk *dkp); 108 static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp); 109 static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle); 110 static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle); 111 static void cmdk_bbh_close(struct cmdk *dkp); 112 static void cmdk_bbh_setalts_idx(struct cmdk *dkp); 113 static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key); 114 115 static struct bbh_objops cmdk_bbh_ops = { 116 nulldev, 117 nulldev, 118 cmdk_bbh_gethandle, 119 cmdk_bbh_htoc, 120 cmdk_bbh_freehandle, 121 0, 0 122 }; 123 124 static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp); 125 static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp); 126 static int cmdkstrategy(struct buf *bp); 127 static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 128 static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *); 129 static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp); 130 static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp); 131 static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 132 int mod_flags, char *name, caddr_t valuep, int *lengthp); 133 static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp); 134 static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp); 135 136 /* 137 * Device driver ops vector 138 */ 139 140 static struct cb_ops cmdk_cb_ops = { 141 cmdkopen, /* open */ 142 cmdkclose, /* close */ 143 cmdkstrategy, /* strategy */ 144 nodev, /* print */ 145 cmdkdump, /* dump */ 146 cmdkread, /* read */ 147 cmdkwrite, /* write */ 148 cmdkioctl, /* ioctl */ 149 nodev, /* devmap */ 150 nodev, /* mmap */ 151 nodev, /* segmap */ 152 nochpoll, /* poll */ 153 cmdk_prop_op, /* cb_prop_op */ 154 0, /* streamtab */ 155 D_64BIT | D_MP | D_NEW, /* Driver comaptibility flag */ 156 CB_REV, /* cb_rev */ 157 cmdkaread, /* async read */ 158 cmdkawrite /* async write */ 159 }; 160 161 static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 162 void **result); 163 static int cmdkprobe(dev_info_t *dip); 164 static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd); 165 static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd); 166 167 struct dev_ops cmdk_ops = { 168 DEVO_REV, /* devo_rev, */ 169 0, /* refcnt */ 170 cmdkinfo, /* info */ 171 nulldev, /* identify */ 172 cmdkprobe, /* probe */ 173 cmdkattach, /* attach */ 174 cmdkdetach, /* detach */ 175 nodev, /* reset */ 176 &cmdk_cb_ops, /* driver operations */ 177 (struct bus_ops *)0 /* bus operations */ 178 }; 179 180 /* 181 * This is the loadable module wrapper. 182 */ 183 #include <sys/modctl.h> 184 185 extern struct mod_ops mod_driverops; 186 187 static struct modldrv modldrv = { 188 &mod_driverops, /* Type of module. This one is a driver */ 189 "Common Direct Access Disk %I%", 190 &cmdk_ops, /* driver ops */ 191 }; 192 193 static struct modlinkage modlinkage = { 194 MODREV_1, (void *)&modldrv, NULL 195 }; 196 197 /* Function prototypes for cmlb callbacks */ 198 199 static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr, 200 diskaddr_t start, size_t length); 201 static int cmdk_lb_getphygeom(dev_info_t *dip, cmlb_geom_t *phygeomp); 202 static int cmdk_lb_getvirtgeom(dev_info_t *dip, cmlb_geom_t *virtgeomp); 203 static int cmdk_lb_getcapacity(dev_info_t *dip, diskaddr_t *capp); 204 static int cmdk_lb_getattribute(dev_info_t *dip, tg_attribute_t *tgattribute); 205 206 static void cmdk_devid_setup(struct cmdk *dkp); 207 static int cmdk_devid_modser(struct cmdk *dkp); 208 static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len); 209 static int cmdk_devid_fabricate(struct cmdk *dkp); 210 static int cmdk_devid_read(struct cmdk *dkp); 211 212 static cmlb_tg_ops_t cmdk_lb_ops = { 213 TG_DK_OPS_VERSION_0, 214 cmdk_lb_rdwr, 215 cmdk_lb_getphygeom, 216 cmdk_lb_getvirtgeom, 217 cmdk_lb_getcapacity, 218 cmdk_lb_getattribute 219 }; 220 221 int 222 _init(void) 223 { 224 int rval; 225 226 if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7)) 227 return (rval); 228 229 mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL); 230 if ((rval = mod_install(&modlinkage)) != 0) { 231 mutex_destroy(&cmdk_attach_mutex); 232 ddi_soft_state_fini(&cmdk_state); 233 } 234 return (rval); 235 } 236 237 int 238 _fini(void) 239 { 240 return (EBUSY); 241 242 /* 243 * This has been commented out until cmdk is a true 244 * unloadable module. Right now x86's are panicking on 245 * a diskless reconfig boot. 246 */ 247 248 #if 0 /* bugid 1186679 */ 249 int rval; 250 251 rval = mod_remove(&modlinkage); 252 if (rval != 0) 253 return (rval); 254 255 mutex_destroy(&cmdk_attach_mutex); 256 ddi_soft_state_fini(&cmdk_state); 257 258 return (0); 259 #endif 260 } 261 262 int 263 _info(struct modinfo *modinfop) 264 { 265 return (mod_info(&modlinkage, modinfop)); 266 } 267 268 /* 269 * Autoconfiguration Routines 270 */ 271 static int 272 cmdkprobe(dev_info_t *dip) 273 { 274 int instance; 275 int status; 276 struct cmdk *dkp; 277 278 instance = ddi_get_instance(dip); 279 280 if (ddi_get_soft_state(cmdk_state, instance)) 281 return (DDI_PROBE_PARTIAL); 282 283 if ((ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS) || 284 ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL)) 285 return (DDI_PROBE_PARTIAL); 286 287 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL); 288 rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL); 289 dkp->dk_dip = dip; 290 mutex_enter(&dkp->dk_mutex); 291 292 dkp->dk_dev = makedevice(ddi_driver_major(dip), 293 ddi_get_instance(dip) << CMDK_UNITSHF); 294 295 /* linkage to dadk and strategy */ 296 if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) { 297 mutex_exit(&dkp->dk_mutex); 298 mutex_destroy(&dkp->dk_mutex); 299 rw_destroy(&dkp->dk_bbh_mutex); 300 ddi_soft_state_free(cmdk_state, instance); 301 return (DDI_PROBE_PARTIAL); 302 } 303 304 status = dadk_probe(DKTP_DATA, KM_NOSLEEP); 305 if (status != DDI_PROBE_SUCCESS) { 306 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 307 mutex_exit(&dkp->dk_mutex); 308 mutex_destroy(&dkp->dk_mutex); 309 rw_destroy(&dkp->dk_bbh_mutex); 310 ddi_soft_state_free(cmdk_state, instance); 311 return (status); 312 } 313 314 mutex_exit(&dkp->dk_mutex); 315 #ifdef CMDK_DEBUG 316 if (cmdk_debug & DENT) 317 PRF("cmdkprobe: instance= %d name= `%s`\n", 318 instance, ddi_get_name_addr(dip)); 319 #endif 320 return (status); 321 } 322 323 static int 324 cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 325 { 326 int instance; 327 struct cmdk *dkp; 328 char *node_type; 329 330 if (cmd != DDI_ATTACH) 331 return (DDI_FAILURE); 332 333 instance = ddi_get_instance(dip); 334 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 335 return (DDI_FAILURE); 336 337 mutex_enter(&dkp->dk_mutex); 338 339 /* dadk_attach is an empty function that only returns SUCCESS */ 340 (void) dadk_attach(DKTP_DATA); 341 342 node_type = (DKTP_EXT->tg_nodetype); 343 344 /* 345 * this open allows cmlb to read the device 346 * and determine the label types 347 * so that cmlb can create minor nodes for device 348 */ 349 350 /* open the target disk */ 351 if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS) 352 goto fail2; 353 354 /* mark as having opened target */ 355 dkp->dk_flag |= CMDK_TGDK_OPEN; 356 357 cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle); 358 359 if (cmlb_attach(dip, 360 &cmdk_lb_ops, 361 DTYPE_DIRECT, /* device_type */ 362 0, /* removable */ 363 node_type, 364 CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT, /* alter_behaviour */ 365 dkp->dk_cmlbhandle) != 0) 366 goto fail1; 367 368 /* Calling validate will create minor nodes according to disk label */ 369 (void) cmlb_validate(dkp->dk_cmlbhandle); 370 371 /* set bbh (Bad Block Handling) */ 372 cmdk_bbh_reopen(dkp); 373 374 /* setup devid string */ 375 cmdk_devid_setup(dkp); 376 377 mutex_enter(&cmdk_attach_mutex); 378 if (instance > cmdk_max_instance) 379 cmdk_max_instance = instance; 380 mutex_exit(&cmdk_attach_mutex); 381 382 mutex_exit(&dkp->dk_mutex); 383 384 /* 385 * Add a zero-length attribute to tell the world we support 386 * kernel ioctls (for layered drivers) 387 */ 388 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 389 DDI_KERNEL_IOCTL, NULL, 0); 390 ddi_report_dev(dip); 391 392 return (DDI_SUCCESS); 393 394 fail1: 395 cmlb_free_handle(&dkp->dk_cmlbhandle); 396 (void) dadk_close(DKTP_DATA); 397 fail2: 398 cmdk_destroy_obj(dip, dkp); 399 rw_destroy(&dkp->dk_bbh_mutex); 400 mutex_exit(&dkp->dk_mutex); 401 mutex_destroy(&dkp->dk_mutex); 402 ddi_soft_state_free(cmdk_state, instance); 403 return (DDI_FAILURE); 404 } 405 406 407 static int 408 cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 409 { 410 struct cmdk *dkp; 411 int instance; 412 int max_instance; 413 414 if (cmd != DDI_DETACH) { 415 #ifdef CMDK_DEBUG 416 if (cmdk_debug & DIO) { 417 PRF("cmdkdetach: cmd = %d unknown\n", cmd); 418 } 419 #endif 420 return (DDI_FAILURE); 421 } 422 423 mutex_enter(&cmdk_attach_mutex); 424 max_instance = cmdk_max_instance; 425 mutex_exit(&cmdk_attach_mutex); 426 427 /* check if any instance of driver is open */ 428 for (instance = 0; instance < max_instance; instance++) { 429 dkp = ddi_get_soft_state(cmdk_state, instance); 430 if (!dkp) 431 continue; 432 if (dkp->dk_flag & CMDK_OPEN) 433 return (DDI_FAILURE); 434 } 435 436 instance = ddi_get_instance(dip); 437 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 438 return (DDI_SUCCESS); 439 440 mutex_enter(&dkp->dk_mutex); 441 442 /* 443 * The cmdk_part_info call at the end of cmdkattach may have 444 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on 445 * detach for case when cmdkopen/cmdkclose never occurs. 446 */ 447 if (dkp->dk_flag & CMDK_TGDK_OPEN) { 448 dkp->dk_flag &= ~CMDK_TGDK_OPEN; 449 (void) dadk_close(DKTP_DATA); 450 } 451 452 cmlb_detach(dkp->dk_cmlbhandle); 453 cmlb_free_handle(&dkp->dk_cmlbhandle); 454 ddi_prop_remove_all(dip); 455 456 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */ 457 mutex_exit(&dkp->dk_mutex); 458 mutex_destroy(&dkp->dk_mutex); 459 rw_destroy(&dkp->dk_bbh_mutex); 460 ddi_soft_state_free(cmdk_state, instance); 461 462 return (DDI_SUCCESS); 463 } 464 465 static int 466 cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 467 { 468 dev_t dev = (dev_t)arg; 469 int instance; 470 struct cmdk *dkp; 471 472 #ifdef lint 473 dip = dip; /* no one ever uses this */ 474 #endif 475 #ifdef CMDK_DEBUG 476 if (cmdk_debug & DENT) 477 PRF("cmdkinfo: call\n"); 478 #endif 479 instance = CMDKUNIT(dev); 480 481 switch (infocmd) { 482 case DDI_INFO_DEVT2DEVINFO: 483 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 484 return (DDI_FAILURE); 485 *result = (void *) dkp->dk_dip; 486 break; 487 case DDI_INFO_DEVT2INSTANCE: 488 *result = (void *)(intptr_t)instance; 489 break; 490 default: 491 return (DDI_FAILURE); 492 } 493 return (DDI_SUCCESS); 494 } 495 496 static int 497 cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 498 char *name, caddr_t valuep, int *lengthp) 499 { 500 struct cmdk *dkp; 501 diskaddr_t p_lblksrt; 502 diskaddr_t p_lblkcnt; 503 504 #ifdef CMDK_DEBUG 505 if (cmdk_debug & DENT) 506 PRF("cmdk_prop_op: call\n"); 507 #endif 508 509 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 510 511 /* 512 * Our dynamic properties are all device specific and size oriented. 513 * Requests issued under conditions where size is valid are passed 514 * to ddi_prop_op_nblocks with the size information, otherwise the 515 * request is passed to ddi_prop_op. Size depends on valid label. 516 */ 517 if ((dev != DDI_DEV_T_ANY) && (dkp != NULL)) { 518 if (!cmlb_partinfo( 519 dkp->dk_cmlbhandle, 520 CMDKPART(dev), 521 &p_lblkcnt, 522 &p_lblksrt, 523 NULL, 524 NULL)) 525 return (ddi_prop_op_nblocks(dev, dip, 526 prop_op, mod_flags, 527 name, valuep, lengthp, 528 (uint64_t)p_lblkcnt)); 529 } 530 531 return (ddi_prop_op(dev, dip, 532 prop_op, mod_flags, 533 name, valuep, lengthp)); 534 } 535 536 /* 537 * dump routine 538 */ 539 static int 540 cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 541 { 542 int instance; 543 struct cmdk *dkp; 544 diskaddr_t p_lblksrt; 545 diskaddr_t p_lblkcnt; 546 struct buf local; 547 struct buf *bp; 548 549 #ifdef CMDK_DEBUG 550 if (cmdk_debug & DENT) 551 PRF("cmdkdump: call\n"); 552 #endif 553 instance = CMDKUNIT(dev); 554 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0)) 555 return (ENXIO); 556 557 if (cmlb_partinfo( 558 dkp->dk_cmlbhandle, 559 CMDKPART(dev), 560 &p_lblkcnt, 561 &p_lblksrt, 562 NULL, 563 NULL)) { 564 return (ENXIO); 565 } 566 567 if ((blkno+nblk) > p_lblkcnt) 568 return (EINVAL); 569 570 cmdk_indump = 1; /* Tell disk targets we are panic dumpping */ 571 572 bp = &local; 573 bzero(bp, sizeof (*bp)); 574 bp->b_flags = B_BUSY; 575 bp->b_un.b_addr = addr; 576 bp->b_bcount = nblk << SCTRSHFT; 577 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno))); 578 579 (void) dadk_dump(DKTP_DATA, bp); 580 return (bp->b_error); 581 } 582 583 /* 584 * Copy in the dadkio_rwcmd according to the user's data model. If needed, 585 * convert it for our internal use. 586 */ 587 static int 588 rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag) 589 { 590 switch (ddi_model_convert_from(flag)) { 591 case DDI_MODEL_ILP32: { 592 struct dadkio_rwcmd32 cmd32; 593 594 if (ddi_copyin(inaddr, &cmd32, 595 sizeof (struct dadkio_rwcmd32), flag)) { 596 return (EFAULT); 597 } 598 599 rwcmdp->cmd = cmd32.cmd; 600 rwcmdp->flags = cmd32.flags; 601 rwcmdp->blkaddr = (daddr_t)cmd32.blkaddr; 602 rwcmdp->buflen = cmd32.buflen; 603 rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr; 604 /* 605 * Note: we do not convert the 'status' field, 606 * as it should not contain valid data at this 607 * point. 608 */ 609 bzero(&rwcmdp->status, sizeof (rwcmdp->status)); 610 break; 611 } 612 case DDI_MODEL_NONE: { 613 if (ddi_copyin(inaddr, rwcmdp, 614 sizeof (struct dadkio_rwcmd), flag)) { 615 return (EFAULT); 616 } 617 } 618 } 619 return (0); 620 } 621 622 /* 623 * If necessary, convert the internal rwcmdp and status to the appropriate 624 * data model and copy it out to the user. 625 */ 626 static int 627 rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag) 628 { 629 switch (ddi_model_convert_from(flag)) { 630 case DDI_MODEL_ILP32: { 631 struct dadkio_rwcmd32 cmd32; 632 633 cmd32.cmd = rwcmdp->cmd; 634 cmd32.flags = rwcmdp->flags; 635 cmd32.blkaddr = rwcmdp->blkaddr; 636 cmd32.buflen = rwcmdp->buflen; 637 ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0); 638 cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr; 639 640 cmd32.status.status = rwcmdp->status.status; 641 cmd32.status.resid = rwcmdp->status.resid; 642 cmd32.status.failed_blk_is_valid = 643 rwcmdp->status.failed_blk_is_valid; 644 cmd32.status.failed_blk = rwcmdp->status.failed_blk; 645 cmd32.status.fru_code_is_valid = 646 rwcmdp->status.fru_code_is_valid; 647 cmd32.status.fru_code = rwcmdp->status.fru_code; 648 649 bcopy(rwcmdp->status.add_error_info, 650 cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN); 651 652 if (ddi_copyout(&cmd32, outaddr, 653 sizeof (struct dadkio_rwcmd32), flag)) 654 return (EFAULT); 655 break; 656 } 657 case DDI_MODEL_NONE: { 658 if (ddi_copyout(rwcmdp, outaddr, 659 sizeof (struct dadkio_rwcmd), flag)) 660 return (EFAULT); 661 } 662 } 663 return (0); 664 } 665 666 /* 667 * ioctl routine 668 */ 669 static int 670 cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp) 671 { 672 int instance; 673 struct scsi_device *devp; 674 struct cmdk *dkp; 675 char data[NBPSCTR]; 676 677 instance = CMDKUNIT(dev); 678 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 679 return (ENXIO); 680 681 bzero(data, sizeof (data)); 682 683 switch (cmd) { 684 685 case DKIOCGMEDIAINFO: { 686 struct dk_minfo media_info; 687 struct tgdk_geom phyg; 688 689 /* dadk_getphygeom always returns success */ 690 (void) dadk_getphygeom(DKTP_DATA, &phyg); 691 692 media_info.dki_lbsize = phyg.g_secsiz; 693 media_info.dki_capacity = phyg.g_cap; 694 media_info.dki_media_type = DK_FIXED_DISK; 695 696 if (ddi_copyout(&media_info, (void *)arg, 697 sizeof (struct dk_minfo), flag)) { 698 return (EFAULT); 699 } else { 700 return (0); 701 } 702 } 703 704 case DKIOCINFO: { 705 struct dk_cinfo *info = (struct dk_cinfo *)data; 706 707 /* controller information */ 708 info->dki_ctype = (DKTP_EXT->tg_ctype); 709 info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip)); 710 (void) strcpy(info->dki_cname, 711 ddi_get_name(ddi_get_parent(dkp->dk_dip))); 712 713 /* Unit Information */ 714 info->dki_unit = ddi_get_instance(dkp->dk_dip); 715 devp = ddi_get_driver_private(dkp->dk_dip); 716 info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp); 717 (void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip)); 718 info->dki_flags = DKI_FMTVOL; 719 info->dki_partition = CMDKPART(dev); 720 721 info->dki_maxtransfer = maxphys / DEV_BSIZE; 722 info->dki_addr = 1; 723 info->dki_space = 0; 724 info->dki_prio = 0; 725 info->dki_vec = 0; 726 727 if (ddi_copyout(data, (void *)arg, sizeof (*info), flag)) 728 return (EFAULT); 729 else 730 return (0); 731 } 732 733 case DKIOCSTATE: { 734 int state; 735 int rval; 736 diskaddr_t p_lblksrt; 737 diskaddr_t p_lblkcnt; 738 739 if (ddi_copyin((void *)arg, &state, sizeof (int), flag)) 740 return (EFAULT); 741 742 /* dadk_check_media blocks until state changes */ 743 if (rval = dadk_check_media(DKTP_DATA, &state)) 744 return (rval); 745 746 if (state == DKIO_INSERTED) { 747 748 if (cmlb_validate(dkp->dk_cmlbhandle) != 0) 749 return (ENXIO); 750 751 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev), 752 &p_lblkcnt, &p_lblksrt, NULL, NULL)) 753 return (ENXIO); 754 755 if (p_lblkcnt <= 0) 756 return (ENXIO); 757 } 758 759 if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag)) 760 return (EFAULT); 761 762 return (0); 763 } 764 765 /* 766 * is media removable? 767 */ 768 case DKIOCREMOVABLE: { 769 int i; 770 771 i = (DKTP_EXT->tg_rmb) ? 1 : 0; 772 773 if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag)) 774 return (EFAULT); 775 776 return (0); 777 } 778 779 case DKIOCADDBAD: 780 /* 781 * This is not an update mechanism to add bad blocks 782 * to the bad block structures stored on disk. 783 * 784 * addbadsec(1M) will update the bad block data on disk 785 * and use this ioctl to force the driver to re-initialize 786 * the list of bad blocks in the driver. 787 */ 788 789 /* start BBH */ 790 cmdk_bbh_reopen(dkp); 791 return (0); 792 793 case DKIOCG_PHYGEOM: 794 case DKIOCG_VIRTGEOM: 795 case DKIOCGGEOM: 796 case DKIOCSGEOM: 797 case DKIOCGAPART: 798 case DKIOCSAPART: 799 case DKIOCGVTOC: 800 case DKIOCSVTOC: 801 case DKIOCPARTINFO: 802 case DKIOCGMBOOT: 803 case DKIOCSMBOOT: 804 case DKIOCGETEFI: 805 case DKIOCSETEFI: 806 case DKIOCPARTITION: 807 { 808 int rc; 809 810 rc = cmlb_ioctl( 811 dkp->dk_cmlbhandle, 812 dev, 813 cmd, 814 arg, 815 flag, 816 credp, 817 rvalp); 818 if (cmd == DKIOCSVTOC) 819 cmdk_devid_setup(dkp); 820 return (rc); 821 } 822 823 case DIOCTL_RWCMD: { 824 struct dadkio_rwcmd *rwcmdp; 825 int status; 826 827 rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP); 828 829 status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag); 830 831 if (status == 0) { 832 bzero(&(rwcmdp->status), sizeof (struct dadkio_status)); 833 status = dadk_ioctl(DKTP_DATA, 834 dev, 835 cmd, 836 (uintptr_t)rwcmdp, 837 flag, 838 credp, 839 rvalp); 840 } 841 if (status == 0) 842 status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag); 843 844 kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd)); 845 return (status); 846 } 847 848 default: 849 return (dadk_ioctl(DKTP_DATA, 850 dev, 851 cmd, 852 arg, 853 flag, 854 credp, 855 rvalp)); 856 } 857 } 858 859 /*ARGSUSED1*/ 860 static int 861 cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp) 862 { 863 int part; 864 ulong_t partbit; 865 int instance; 866 struct cmdk *dkp; 867 int lastclose = 1; 868 int i; 869 870 instance = CMDKUNIT(dev); 871 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || 872 (otyp >= OTYPCNT)) 873 return (ENXIO); 874 875 mutex_enter(&dkp->dk_mutex); 876 877 /* check if device has been opened */ 878 if (!(dkp->dk_flag & CMDK_OPEN)) { 879 mutex_exit(&dkp->dk_mutex); 880 return (ENXIO); 881 } 882 883 part = CMDKPART(dev); 884 partbit = 1 << part; 885 886 /* account for close */ 887 if (otyp == OTYP_LYR) { 888 if (dkp->dk_open_lyr[part]) 889 dkp->dk_open_lyr[part]--; 890 } else 891 dkp->dk_open_reg[otyp] &= ~partbit; 892 dkp->dk_open_exl &= ~partbit; 893 894 for (i = 0; i < CMDK_MAXPART; i++) 895 if (dkp->dk_open_lyr[i] != 0) { 896 lastclose = 0; 897 break; 898 } 899 900 if (lastclose) 901 for (i = 0; i < OTYPCNT; i++) 902 if (dkp->dk_open_reg[i] != 0) { 903 lastclose = 0; 904 break; 905 } 906 907 mutex_exit(&dkp->dk_mutex); 908 909 if (lastclose) 910 cmlb_invalidate(dkp->dk_cmlbhandle); 911 912 return (DDI_SUCCESS); 913 } 914 915 /*ARGSUSED3*/ 916 static int 917 cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp) 918 { 919 dev_t dev = *dev_p; 920 int part; 921 ulong_t partbit; 922 int instance; 923 struct cmdk *dkp; 924 diskaddr_t p_lblksrt; 925 diskaddr_t p_lblkcnt; 926 int i; 927 int nodelay; 928 929 instance = CMDKUNIT(dev); 930 if (!(dkp = ddi_get_soft_state(cmdk_state, instance))) 931 return (ENXIO); 932 933 if (otyp >= OTYPCNT) 934 return (EINVAL); 935 936 part = CMDKPART(dev); 937 partbit = 1 << part; 938 nodelay = (flag & (FNDELAY | FNONBLOCK)); 939 940 mutex_enter(&dkp->dk_mutex); 941 942 if (cmlb_validate(dkp->dk_cmlbhandle) != 0) { 943 944 /* fail if not doing non block open */ 945 if (!nodelay) { 946 mutex_exit(&dkp->dk_mutex); 947 return (ENXIO); 948 } 949 } else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt, 950 &p_lblksrt, NULL, NULL) == 0) { 951 952 if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) { 953 mutex_exit(&dkp->dk_mutex); 954 return (ENXIO); 955 } 956 } else { 957 /* fail if not doing non block open */ 958 if (!nodelay) { 959 mutex_exit(&dkp->dk_mutex); 960 return (ENXIO); 961 } 962 } 963 964 if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) { 965 mutex_exit(&dkp->dk_mutex); 966 return (EROFS); 967 } 968 969 /* check for part already opend exclusively */ 970 if (dkp->dk_open_exl & partbit) 971 goto excl_open_fail; 972 973 /* check if we can establish exclusive open */ 974 if (flag & FEXCL) { 975 if (dkp->dk_open_lyr[part]) 976 goto excl_open_fail; 977 for (i = 0; i < OTYPCNT; i++) { 978 if (dkp->dk_open_reg[i] & partbit) 979 goto excl_open_fail; 980 } 981 } 982 983 /* open will succeed, account for open */ 984 dkp->dk_flag |= CMDK_OPEN; 985 if (otyp == OTYP_LYR) 986 dkp->dk_open_lyr[part]++; 987 else 988 dkp->dk_open_reg[otyp] |= partbit; 989 if (flag & FEXCL) 990 dkp->dk_open_exl |= partbit; 991 992 mutex_exit(&dkp->dk_mutex); 993 return (DDI_SUCCESS); 994 995 excl_open_fail: 996 mutex_exit(&dkp->dk_mutex); 997 return (EBUSY); 998 } 999 1000 /* 1001 * read routine 1002 */ 1003 /*ARGSUSED2*/ 1004 static int 1005 cmdkread(dev_t dev, struct uio *uio, cred_t *credp) 1006 { 1007 return (cmdkrw(dev, uio, B_READ)); 1008 } 1009 1010 /* 1011 * async read routine 1012 */ 1013 /*ARGSUSED2*/ 1014 static int 1015 cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp) 1016 { 1017 return (cmdkarw(dev, aio, B_READ)); 1018 } 1019 1020 /* 1021 * write routine 1022 */ 1023 /*ARGSUSED2*/ 1024 static int 1025 cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp) 1026 { 1027 return (cmdkrw(dev, uio, B_WRITE)); 1028 } 1029 1030 /* 1031 * async write routine 1032 */ 1033 /*ARGSUSED2*/ 1034 static int 1035 cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp) 1036 { 1037 return (cmdkarw(dev, aio, B_WRITE)); 1038 } 1039 1040 static void 1041 cmdkmin(struct buf *bp) 1042 { 1043 if (bp->b_bcount > DK_MAXRECSIZE) 1044 bp->b_bcount = DK_MAXRECSIZE; 1045 } 1046 1047 static int 1048 cmdkrw(dev_t dev, struct uio *uio, int flag) 1049 { 1050 return (physio(cmdkstrategy, (struct buf *)0, dev, flag, cmdkmin, uio)); 1051 } 1052 1053 static int 1054 cmdkarw(dev_t dev, struct aio_req *aio, int flag) 1055 { 1056 return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio)); 1057 } 1058 1059 /* 1060 * strategy routine 1061 */ 1062 static int 1063 cmdkstrategy(struct buf *bp) 1064 { 1065 int instance; 1066 struct cmdk *dkp; 1067 long d_cnt; 1068 diskaddr_t p_lblksrt; 1069 diskaddr_t p_lblkcnt; 1070 1071 instance = CMDKUNIT(bp->b_edev); 1072 if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) || 1073 (dkblock(bp) < 0)) { 1074 bp->b_resid = bp->b_bcount; 1075 SETBPERR(bp, ENXIO); 1076 biodone(bp); 1077 return (0); 1078 } 1079 1080 bp->b_flags &= ~(B_DONE|B_ERROR); 1081 bp->b_resid = 0; 1082 bp->av_back = NULL; 1083 1084 /* 1085 * only re-read the vtoc if necessary (force == FALSE) 1086 */ 1087 if (cmlb_partinfo( 1088 dkp->dk_cmlbhandle, 1089 CMDKPART(bp->b_edev), 1090 &p_lblkcnt, 1091 &p_lblksrt, 1092 NULL, 1093 NULL)) { 1094 SETBPERR(bp, ENXIO); 1095 } 1096 1097 if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt)) 1098 SETBPERR(bp, ENXIO); 1099 1100 if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) { 1101 bp->b_resid = bp->b_bcount; 1102 biodone(bp); 1103 return (0); 1104 } 1105 1106 d_cnt = bp->b_bcount >> SCTRSHFT; 1107 if ((dkblock(bp) + d_cnt) > p_lblkcnt) { 1108 bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT; 1109 bp->b_bcount -= bp->b_resid; 1110 } 1111 1112 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp)))); 1113 if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) { 1114 bp->b_resid += bp->b_bcount; 1115 biodone(bp); 1116 } 1117 return (0); 1118 } 1119 1120 static int 1121 cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp) 1122 { 1123 struct scsi_device *devp; 1124 opaque_t queobjp = NULL; 1125 opaque_t flcobjp = NULL; 1126 char que_keyvalp[64]; 1127 int que_keylen; 1128 char flc_keyvalp[64]; 1129 int flc_keylen; 1130 1131 ASSERT(mutex_owned(&dkp->dk_mutex)); 1132 1133 /* Create linkage to queueing routines based on property */ 1134 que_keylen = sizeof (que_keyvalp); 1135 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1136 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1137 DDI_PROP_SUCCESS) { 1138 cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined"); 1139 return (DDI_FAILURE); 1140 } 1141 que_keyvalp[que_keylen] = (char)0; 1142 1143 if (strcmp(que_keyvalp, "qfifo") == 0) { 1144 queobjp = (opaque_t)qfifo_create(); 1145 } else if (strcmp(que_keyvalp, "qsort") == 0) { 1146 queobjp = (opaque_t)qsort_create(); 1147 } else { 1148 return (DDI_FAILURE); 1149 } 1150 1151 /* Create linkage to dequeueing routines based on property */ 1152 flc_keylen = sizeof (flc_keyvalp); 1153 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1154 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1155 DDI_PROP_SUCCESS) { 1156 cmn_err(CE_WARN, 1157 "cmdk_create_obj: flow-control property undefined"); 1158 return (DDI_FAILURE); 1159 } 1160 1161 flc_keyvalp[flc_keylen] = (char)0; 1162 1163 if (strcmp(flc_keyvalp, "dsngl") == 0) { 1164 flcobjp = (opaque_t)dsngl_create(); 1165 } else if (strcmp(flc_keyvalp, "dmult") == 0) { 1166 flcobjp = (opaque_t)dmult_create(); 1167 } else { 1168 return (DDI_FAILURE); 1169 } 1170 1171 /* populate bbh_obj object stored in dkp */ 1172 dkp->dk_bbh_obj.bbh_data = dkp; 1173 dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops; 1174 1175 /* create linkage to dadk */ 1176 dkp->dk_tgobjp = (opaque_t)dadk_create(); 1177 1178 devp = ddi_get_driver_private(dip); 1179 (void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj, 1180 NULL); 1181 1182 return (DDI_SUCCESS); 1183 } 1184 1185 static void 1186 cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp) 1187 { 1188 char que_keyvalp[64]; 1189 int que_keylen; 1190 char flc_keyvalp[64]; 1191 int flc_keylen; 1192 1193 ASSERT(mutex_owned(&dkp->dk_mutex)); 1194 1195 (void) dadk_free((dkp->dk_tgobjp)); 1196 dkp->dk_tgobjp = NULL; 1197 1198 que_keylen = sizeof (que_keyvalp); 1199 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1200 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) != 1201 DDI_PROP_SUCCESS) { 1202 cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined"); 1203 return; 1204 } 1205 que_keyvalp[que_keylen] = (char)0; 1206 1207 flc_keylen = sizeof (flc_keyvalp); 1208 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF, 1209 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) != 1210 DDI_PROP_SUCCESS) { 1211 cmn_err(CE_WARN, 1212 "cmdk_destroy_obj: flow-control property undefined"); 1213 return; 1214 } 1215 flc_keyvalp[flc_keylen] = (char)0; 1216 } 1217 1218 static int 1219 cmdk_lb_rdwr( 1220 dev_info_t *dip, 1221 uchar_t cmd, 1222 void *bufaddr, 1223 diskaddr_t start, 1224 size_t count) 1225 { 1226 struct cmdk *dkp; 1227 opaque_t handle; 1228 int rc = 0; 1229 char *bufa; 1230 1231 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1232 if (dkp == NULL) 1233 return (ENXIO); 1234 1235 if (cmd != TG_READ && cmd != TG_WRITE) 1236 return (EINVAL); 1237 1238 /* count must be multiple of 512 */ 1239 count = (count + NBPSCTR - 1) & -NBPSCTR; 1240 handle = dadk_iob_alloc(DKTP_DATA, start, count, KM_SLEEP); 1241 if (!handle) 1242 return (ENOMEM); 1243 1244 if (cmd == TG_READ) { 1245 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1246 if (!bufa) 1247 rc = EIO; 1248 else 1249 bcopy(bufa, bufaddr, count); 1250 } else { 1251 bufa = dadk_iob_htoc(DKTP_DATA, handle); 1252 bcopy(bufaddr, bufa, count); 1253 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1254 if (!bufa) 1255 rc = EIO; 1256 } 1257 (void) dadk_iob_free(DKTP_DATA, handle); 1258 1259 return (rc); 1260 } 1261 1262 static int 1263 cmdk_lb_getcapacity( 1264 dev_info_t *dip, 1265 diskaddr_t *capp) 1266 { 1267 struct cmdk *dkp; 1268 struct tgdk_geom phyg; 1269 1270 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1271 if (dkp == NULL) 1272 return (ENXIO); 1273 1274 /* dadk_getphygeom always returns success */ 1275 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1276 1277 *capp = phyg.g_cap; 1278 1279 return (0); 1280 } 1281 1282 static int 1283 cmdk_lb_getvirtgeom( 1284 dev_info_t *dip, 1285 cmlb_geom_t *virtgeomp) 1286 { 1287 struct cmdk *dkp; 1288 struct tgdk_geom phyg; 1289 diskaddr_t capacity; 1290 1291 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1292 if (dkp == NULL) 1293 return (ENXIO); 1294 1295 (void) dadk_getgeom(DKTP_DATA, &phyg); 1296 capacity = phyg.g_cap; 1297 1298 /* 1299 * If the controller returned us something that doesn't 1300 * really fit into an Int 13/function 8 geometry 1301 * result, just fail the ioctl. See PSARC 1998/313. 1302 */ 1303 if (capacity < 0 || capacity >= 63 * 254 * 1024) 1304 return (EINVAL); 1305 1306 virtgeomp->g_capacity = capacity; 1307 virtgeomp->g_nsect = 63; 1308 virtgeomp->g_nhead = 254; 1309 virtgeomp->g_ncyl = capacity / (63 * 254); 1310 virtgeomp->g_acyl = 0; 1311 virtgeomp->g_secsize = 512; 1312 virtgeomp->g_intrlv = 1; 1313 virtgeomp->g_rpm = 3600; 1314 1315 return (0); 1316 } 1317 1318 static int 1319 cmdk_lb_getphygeom( 1320 dev_info_t *dip, 1321 cmlb_geom_t *phygeomp) 1322 { 1323 struct cmdk *dkp; 1324 struct tgdk_geom phyg; 1325 1326 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1327 if (dkp == NULL) 1328 return (ENXIO); 1329 1330 /* dadk_getphygeom always returns success */ 1331 (void) dadk_getphygeom(DKTP_DATA, &phyg); 1332 1333 phygeomp->g_capacity = phyg.g_cap; 1334 phygeomp->g_nsect = phyg.g_sec; 1335 phygeomp->g_nhead = phyg.g_head; 1336 phygeomp->g_acyl = phyg.g_acyl; 1337 phygeomp->g_ncyl = phyg.g_cyl; 1338 phygeomp->g_secsize = phyg.g_secsiz; 1339 phygeomp->g_intrlv = 1; 1340 phygeomp->g_rpm = 3600; 1341 1342 return (0); 1343 } 1344 1345 static int 1346 cmdk_lb_getattribute( 1347 dev_info_t *dip, 1348 tg_attribute_t *tgattribute) 1349 { 1350 struct cmdk *dkp; 1351 1352 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip)); 1353 if (dkp == NULL) 1354 return (ENXIO); 1355 1356 if ((DKTP_EXT->tg_rdonly)) 1357 tgattribute->media_is_writable = FALSE; 1358 else 1359 tgattribute->media_is_writable = TRUE; 1360 1361 return (0); 1362 } 1363 1364 /* 1365 * Create and register the devid. 1366 * There are 4 different ways we can get a device id: 1367 * 1. Already have one - nothing to do 1368 * 2. Build one from the drive's model and serial numbers 1369 * 3. Read one from the disk (first sector of last track) 1370 * 4. Fabricate one and write it on the disk. 1371 * If any of these succeeds, register the deviceid 1372 */ 1373 static void 1374 cmdk_devid_setup(struct cmdk *dkp) 1375 { 1376 int rc; 1377 1378 /* Try options until one succeeds, or all have failed */ 1379 1380 /* 1. All done if already registered */ 1381 if (dkp->dk_devid != NULL) 1382 return; 1383 1384 /* 2. Build a devid from the model and serial number */ 1385 rc = cmdk_devid_modser(dkp); 1386 if (rc != DDI_SUCCESS) { 1387 /* 3. Read devid from the disk, if present */ 1388 rc = cmdk_devid_read(dkp); 1389 1390 /* 4. otherwise make one up and write it on the disk */ 1391 if (rc != DDI_SUCCESS) 1392 rc = cmdk_devid_fabricate(dkp); 1393 } 1394 1395 /* If we managed to get a devid any of the above ways, register it */ 1396 if (rc == DDI_SUCCESS) 1397 (void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid); 1398 1399 } 1400 1401 /* 1402 * Build a devid from the model and serial number 1403 * Return DDI_SUCCESS or DDI_FAILURE. 1404 */ 1405 static int 1406 cmdk_devid_modser(struct cmdk *dkp) 1407 { 1408 int rc = DDI_FAILURE; 1409 char *hwid; 1410 int modlen; 1411 int serlen; 1412 1413 /* 1414 * device ID is a concatenation of model number, '=', serial number. 1415 */ 1416 hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP); 1417 modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN); 1418 if (modlen == 0) { 1419 rc = DDI_FAILURE; 1420 goto err; 1421 } 1422 hwid[modlen++] = '='; 1423 serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL, 1424 hwid + modlen, CMDK_HWIDLEN - modlen); 1425 if (serlen == 0) { 1426 rc = DDI_FAILURE; 1427 goto err; 1428 } 1429 hwid[modlen + serlen] = 0; 1430 1431 /* Initialize the device ID, trailing NULL not included */ 1432 rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen, 1433 hwid, (ddi_devid_t *)&dkp->dk_devid); 1434 if (rc != DDI_SUCCESS) { 1435 rc = DDI_FAILURE; 1436 goto err; 1437 } 1438 1439 rc = DDI_SUCCESS; 1440 1441 err: 1442 kmem_free(hwid, CMDK_HWIDLEN); 1443 return (rc); 1444 } 1445 1446 static int 1447 cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len) 1448 { 1449 dadk_ioc_string_t strarg; 1450 int rval; 1451 char *s; 1452 char ch; 1453 boolean_t ret; 1454 int i; 1455 int tb; 1456 1457 strarg.is_buf = buf; 1458 strarg.is_size = len; 1459 if (dadk_ioctl(DKTP_DATA, 1460 dkp->dk_dev, 1461 ioccmd, 1462 (uintptr_t)&strarg, 1463 FNATIVE | FKIOCTL, 1464 NULL, 1465 &rval) != 0) 1466 return (0); 1467 1468 /* 1469 * valid model/serial string must contain a non-zero non-space 1470 * trim trailing spaces/NULL 1471 */ 1472 ret = B_FALSE; 1473 s = buf; 1474 for (i = 0; i < strarg.is_size; i++) { 1475 ch = *s++; 1476 if (ch != ' ' && ch != '\0') 1477 tb = i + 1; 1478 if (ch != ' ' && ch != '\0' && ch != '0') 1479 ret = B_TRUE; 1480 } 1481 1482 if (ret == B_FALSE) 1483 return (0); 1484 1485 return (tb); 1486 } 1487 1488 /* 1489 * Read a devid from on the first block of the last track of 1490 * the last cylinder. Make sure what we read is a valid devid. 1491 * Return DDI_SUCCESS or DDI_FAILURE. 1492 */ 1493 static int 1494 cmdk_devid_read(struct cmdk *dkp) 1495 { 1496 diskaddr_t blk; 1497 struct dk_devid *dkdevidp; 1498 uint_t *ip; 1499 int chksum; 1500 int i, sz; 1501 tgdk_iob_handle handle; 1502 int rc = DDI_FAILURE; 1503 1504 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk)) 1505 goto err; 1506 1507 /* read the devid */ 1508 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1509 if (handle == NULL) 1510 goto err; 1511 1512 dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1513 if (dkdevidp == NULL) 1514 goto err; 1515 1516 /* Validate the revision */ 1517 if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) || 1518 (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB)) 1519 goto err; 1520 1521 /* Calculate the checksum */ 1522 chksum = 0; 1523 ip = (uint_t *)dkdevidp; 1524 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1525 chksum ^= ip[i]; 1526 if (DKD_GETCHKSUM(dkdevidp) != chksum) 1527 goto err; 1528 1529 /* Validate the device id */ 1530 if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS) 1531 goto err; 1532 1533 /* keep a copy of the device id */ 1534 sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid); 1535 dkp->dk_devid = kmem_alloc(sz, KM_SLEEP); 1536 bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz); 1537 1538 rc = DDI_SUCCESS; 1539 1540 err: 1541 if (handle != NULL) 1542 (void) dadk_iob_free(DKTP_DATA, handle); 1543 return (rc); 1544 } 1545 1546 /* 1547 * Create a devid and write it on the first block of the last track of 1548 * the last cylinder. 1549 * Return DDI_SUCCESS or DDI_FAILURE. 1550 */ 1551 static int 1552 cmdk_devid_fabricate(struct cmdk *dkp) 1553 { 1554 ddi_devid_t devid = NULL; /* devid made by ddi_devid_init */ 1555 struct dk_devid *dkdevidp; /* devid struct stored on disk */ 1556 diskaddr_t blk; 1557 tgdk_iob_handle handle = NULL; 1558 uint_t *ip, chksum; 1559 int i; 1560 int rc; 1561 1562 rc = ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid); 1563 if (rc != DDI_SUCCESS) 1564 goto err; 1565 1566 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk)) { 1567 /* no device id block address */ 1568 return (DDI_FAILURE); 1569 } 1570 1571 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP); 1572 if (!handle) 1573 goto err; 1574 1575 /* Locate the buffer */ 1576 dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle); 1577 1578 /* Fill in the revision */ 1579 bzero(dkdevidp, NBPSCTR); 1580 dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB; 1581 dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB; 1582 1583 /* Copy in the device id */ 1584 i = ddi_devid_sizeof(devid); 1585 if (i > DK_DEVID_SIZE) 1586 goto err; 1587 bcopy(devid, dkdevidp->dkd_devid, i); 1588 1589 /* Calculate the chksum */ 1590 chksum = 0; 1591 ip = (uint_t *)dkdevidp; 1592 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++) 1593 chksum ^= ip[i]; 1594 1595 /* Fill in the checksum */ 1596 DKD_FORMCHKSUM(chksum, dkdevidp); 1597 1598 /* write the devid */ 1599 (void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE); 1600 1601 dkp->dk_devid = devid; 1602 1603 rc = DDI_SUCCESS; 1604 1605 err: 1606 if (handle != NULL) 1607 (void) dadk_iob_free(DKTP_DATA, handle); 1608 1609 if (rc != DDI_SUCCESS && devid != NULL) 1610 ddi_devid_free(devid); 1611 1612 return (rc); 1613 } 1614 1615 static void 1616 cmdk_bbh_free_alts(struct cmdk *dkp) 1617 { 1618 if (dkp->dk_alts_hdl) { 1619 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1620 kmem_free(dkp->dk_slc_cnt, 1621 NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *))); 1622 dkp->dk_alts_hdl = NULL; 1623 } 1624 } 1625 1626 static void 1627 cmdk_bbh_reopen(struct cmdk *dkp) 1628 { 1629 tgdk_iob_handle handle = NULL; 1630 diskaddr_t slcb, slcn, slce; 1631 struct alts_parttbl *ap; 1632 struct alts_ent *enttblp; 1633 uint32_t altused; 1634 uint32_t altbase; 1635 uint32_t altlast; 1636 int alts; 1637 uint16_t vtoctag; 1638 int i, j; 1639 1640 /* find slice with V_ALTSCTR tag */ 1641 for (alts = 0; alts < NDKMAP; alts++) { 1642 if (cmlb_partinfo( 1643 dkp->dk_cmlbhandle, 1644 alts, 1645 &slcn, 1646 &slcb, 1647 NULL, 1648 &vtoctag)) { 1649 goto empty; /* no partition table exists */ 1650 } 1651 1652 if (vtoctag == V_ALTSCTR && slcn > 1) 1653 break; 1654 } 1655 if (alts >= NDKMAP) { 1656 goto empty; /* no V_ALTSCTR slice defined */ 1657 } 1658 1659 /* read in ALTS label block */ 1660 handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP); 1661 if (!handle) { 1662 goto empty; 1663 } 1664 1665 ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1666 if (!ap || (ap->alts_sanity != ALTS_SANITY)) { 1667 goto empty; 1668 } 1669 1670 altused = ap->alts_ent_used; /* number of BB entries */ 1671 altbase = ap->alts_ent_base; /* blk offset from begin slice */ 1672 altlast = ap->alts_ent_end; /* blk offset to last block */ 1673 /* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */ 1674 1675 if (altused == 0 || 1676 altbase < 1 || 1677 altbase > altlast || 1678 altlast >= slcn) { 1679 goto empty; 1680 } 1681 (void) dadk_iob_free(DKTP_DATA, handle); 1682 1683 /* read in ALTS remapping table */ 1684 handle = dadk_iob_alloc(DKTP_DATA, 1685 slcb + altbase, 1686 (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP); 1687 if (!handle) { 1688 goto empty; 1689 } 1690 1691 enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ); 1692 if (!enttblp) { 1693 goto empty; 1694 } 1695 1696 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1697 1698 /* allocate space for dk_slc_cnt and dk_slc_ent tables */ 1699 if (dkp->dk_slc_cnt == NULL) { 1700 dkp->dk_slc_cnt = kmem_alloc(NDKMAP * 1701 (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP); 1702 } 1703 dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP); 1704 1705 /* free previous BB table (if any) */ 1706 if (dkp->dk_alts_hdl) { 1707 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1708 dkp->dk_alts_hdl = NULL; 1709 dkp->dk_altused = 0; 1710 } 1711 1712 /* save linkage to new BB table */ 1713 dkp->dk_alts_hdl = handle; 1714 dkp->dk_altused = altused; 1715 1716 /* 1717 * build indexes to BB table by slice 1718 * effectively we have 1719 * struct alts_ent *enttblp[altused]; 1720 * 1721 * uint32_t dk_slc_cnt[NDKMAP]; 1722 * struct alts_ent *dk_slc_ent[NDKMAP]; 1723 */ 1724 for (i = 0; i < NDKMAP; i++) { 1725 if (cmlb_partinfo( 1726 dkp->dk_cmlbhandle, 1727 i, 1728 &slcn, 1729 &slcb, 1730 NULL, 1731 NULL)) { 1732 goto empty1; 1733 } 1734 1735 dkp->dk_slc_cnt[i] = 0; 1736 if (slcn == 0) 1737 continue; /* slice is not allocated */ 1738 1739 /* last block in slice */ 1740 slce = slcb + slcn - 1; 1741 1742 /* find first remap entry in after beginnning of slice */ 1743 for (j = 0; j < altused; j++) { 1744 if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb) 1745 break; 1746 } 1747 dkp->dk_slc_ent[i] = enttblp + j; 1748 1749 /* count remap entrys until end of slice */ 1750 for (; j < altused && enttblp[j].bad_start <= slce; j++) { 1751 dkp->dk_slc_cnt[i] += 1; 1752 } 1753 } 1754 1755 rw_exit(&dkp->dk_bbh_mutex); 1756 return; 1757 1758 empty: 1759 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER); 1760 empty1: 1761 if (handle && handle != dkp->dk_alts_hdl) 1762 (void) dadk_iob_free(DKTP_DATA, handle); 1763 1764 if (dkp->dk_alts_hdl) { 1765 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl); 1766 dkp->dk_alts_hdl = NULL; 1767 } 1768 1769 rw_exit(&dkp->dk_bbh_mutex); 1770 } 1771 1772 /*ARGSUSED*/ 1773 static bbh_cookie_t 1774 cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle) 1775 { 1776 struct bbh_handle *hp; 1777 bbh_cookie_t ckp; 1778 1779 hp = (struct bbh_handle *)handle; 1780 ckp = hp->h_cktab + hp->h_idx; 1781 hp->h_idx++; 1782 return (ckp); 1783 } 1784 1785 /*ARGSUSED*/ 1786 static void 1787 cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle) 1788 { 1789 struct bbh_handle *hp; 1790 1791 hp = (struct bbh_handle *)handle; 1792 kmem_free(handle, (sizeof (struct bbh_handle) + 1793 (hp->h_totck * (sizeof (struct bbh_cookie))))); 1794 } 1795 1796 1797 /* 1798 * cmdk_bbh_gethandle remaps the bad sectors to alternates. 1799 * There are 7 different cases when the comparison is made 1800 * between the bad sector cluster and the disk section. 1801 * 1802 * bad sector cluster gggggggggggbbbbbbbggggggggggg 1803 * case 1: ddddd 1804 * case 2: -d----- 1805 * case 3: ddddd 1806 * case 4: dddddddddddd 1807 * case 5: ddddddd----- 1808 * case 6: ---ddddddd 1809 * case 7: ddddddd 1810 * 1811 * where: g = good sector, b = bad sector 1812 * d = sector in disk section 1813 * - = disk section may be extended to cover those disk area 1814 */ 1815 1816 static opaque_t 1817 cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp) 1818 { 1819 struct cmdk *dkp = (struct cmdk *)bbh_data; 1820 struct bbh_handle *hp; 1821 struct bbh_cookie *ckp; 1822 struct alts_ent *altp; 1823 uint32_t alts_used; 1824 uint32_t part = CMDKPART(bp->b_edev); 1825 daddr32_t lastsec; 1826 long d_count; 1827 int i; 1828 int idx; 1829 int cnt; 1830 1831 if (part >= V_NUMPAR) 1832 return (NULL); 1833 1834 /* 1835 * This if statement is atomic and it will succeed 1836 * if there are no bad blocks (almost always) 1837 * 1838 * so this if is performed outside of the rw_enter for speed 1839 * and then repeated inside the rw_enter for safety 1840 */ 1841 if (!dkp->dk_alts_hdl) { 1842 return (NULL); 1843 } 1844 1845 rw_enter(&dkp->dk_bbh_mutex, RW_READER); 1846 1847 if (dkp->dk_alts_hdl == NULL) { 1848 rw_exit(&dkp->dk_bbh_mutex); 1849 return (NULL); 1850 } 1851 1852 alts_used = dkp->dk_slc_cnt[part]; 1853 if (alts_used == 0) { 1854 rw_exit(&dkp->dk_bbh_mutex); 1855 return (NULL); 1856 } 1857 altp = dkp->dk_slc_ent[part]; 1858 1859 /* 1860 * binary search for the largest bad sector index in the alternate 1861 * entry table which overlaps or larger than the starting d_sec 1862 */ 1863 i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp)); 1864 /* if starting sector is > the largest bad sector, return */ 1865 if (i == -1) { 1866 rw_exit(&dkp->dk_bbh_mutex); 1867 return (NULL); 1868 } 1869 /* i is the starting index. Set altp to the starting entry addr */ 1870 altp += i; 1871 1872 d_count = bp->b_bcount >> SCTRSHFT; 1873 lastsec = GET_BP_SEC(bp) + d_count - 1; 1874 1875 /* calculate the number of bad sectors */ 1876 for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) { 1877 if (lastsec < altp->bad_start) 1878 break; 1879 } 1880 1881 if (!cnt) { 1882 rw_exit(&dkp->dk_bbh_mutex); 1883 return (NULL); 1884 } 1885 1886 /* calculate the maximum number of reserved cookies */ 1887 cnt <<= 1; 1888 cnt++; 1889 1890 /* allocate the handle */ 1891 hp = (struct bbh_handle *)kmem_zalloc((sizeof (*hp) + 1892 (cnt * sizeof (*ckp))), KM_SLEEP); 1893 1894 hp->h_idx = 0; 1895 hp->h_totck = cnt; 1896 ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1); 1897 ckp[0].ck_sector = GET_BP_SEC(bp); 1898 ckp[0].ck_seclen = d_count; 1899 1900 altp = dkp->dk_slc_ent[part]; 1901 altp += i; 1902 for (idx = 0; i < alts_used; i++, altp++) { 1903 /* CASE 1: */ 1904 if (lastsec < altp->bad_start) 1905 break; 1906 1907 /* CASE 3: */ 1908 if (ckp[idx].ck_sector > altp->bad_end) 1909 continue; 1910 1911 /* CASE 2 and 7: */ 1912 if ((ckp[idx].ck_sector >= altp->bad_start) && 1913 (lastsec <= altp->bad_end)) { 1914 ckp[idx].ck_sector = altp->good_start + 1915 ckp[idx].ck_sector - altp->bad_start; 1916 break; 1917 } 1918 1919 /* at least one bad sector in our section. break it. */ 1920 /* CASE 5: */ 1921 if ((lastsec >= altp->bad_start) && 1922 (lastsec <= altp->bad_end)) { 1923 ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1; 1924 ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen; 1925 ckp[idx+1].ck_sector = altp->good_start; 1926 break; 1927 } 1928 /* CASE 6: */ 1929 if ((ckp[idx].ck_sector <= altp->bad_end) && 1930 (ckp[idx].ck_sector >= altp->bad_start)) { 1931 ckp[idx+1].ck_seclen = ckp[idx].ck_seclen; 1932 ckp[idx].ck_seclen = altp->bad_end - 1933 ckp[idx].ck_sector + 1; 1934 ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen; 1935 ckp[idx].ck_sector = altp->good_start + 1936 ckp[idx].ck_sector - altp->bad_start; 1937 idx++; 1938 ckp[idx].ck_sector = altp->bad_end + 1; 1939 continue; /* check rest of section */ 1940 } 1941 1942 /* CASE 4: */ 1943 ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector; 1944 ckp[idx+1].ck_sector = altp->good_start; 1945 ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1; 1946 idx += 2; 1947 ckp[idx].ck_sector = altp->bad_end + 1; 1948 ckp[idx].ck_seclen = lastsec - altp->bad_end; 1949 } 1950 1951 rw_exit(&dkp->dk_bbh_mutex); 1952 return ((opaque_t)hp); 1953 } 1954 1955 static int 1956 cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key) 1957 { 1958 int i; 1959 int ind; 1960 int interval; 1961 int mystatus = -1; 1962 1963 if (!cnt) 1964 return (mystatus); 1965 1966 ind = 1; /* compiler complains about possible uninitialized var */ 1967 for (i = 1; i <= cnt; i <<= 1) 1968 ind = i; 1969 1970 for (interval = ind; interval; ) { 1971 if ((key >= buf[ind-1].bad_start) && 1972 (key <= buf[ind-1].bad_end)) { 1973 return (ind-1); 1974 } else { 1975 interval >>= 1; 1976 if (key < buf[ind-1].bad_start) { 1977 /* record the largest bad sector index */ 1978 mystatus = ind-1; 1979 if (!interval) 1980 break; 1981 ind = ind - interval; 1982 } else { 1983 /* 1984 * if key is larger than the last element 1985 * then break 1986 */ 1987 if ((ind == cnt) || !interval) 1988 break; 1989 if ((ind+interval) <= cnt) 1990 ind += interval; 1991 } 1992 } 1993 } 1994 return (mystatus); 1995 } 1996