1 /* 2 * 3 * skd.c: Solaris 11/10 Driver for sTec, Inc. S112x PCIe SSD card 4 * 5 * Solaris driver is based on the Linux driver authored by: 6 * 7 * Authors/Alphabetical: Dragan Stancevic <dstancevic@stec-inc.com> 8 * Gordon Waidhofer <gwaidhofer@stec-inc.com> 9 * John Hamilton <jhamilton@stec-inc.com> 10 */ 11 12 /* 13 * This file and its contents are supplied under the terms of the 14 * Common Development and Distribution License ("CDDL"), version 1.0. 15 * You may only use this file in accordance with the terms of version 16 * 1.0 of the CDDL. 17 * 18 * A full copy of the text of the CDDL should have accompanied this 19 * source. A copy of the CDDL is also available via the Internet at 20 * http://www.illumos.org/license/CDDL. 21 */ 22 23 /* 24 * Copyright 2013 STEC, Inc. All rights reserved. 25 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 26 * Copyright 2019 Joyent, Inc. 27 * Copyright 2019 Western Digital Corporation. 28 */ 29 30 #include <sys/types.h> 31 #include <sys/stream.h> 32 #include <sys/cmn_err.h> 33 #include <sys/kmem.h> 34 #include <sys/file.h> 35 #include <sys/buf.h> 36 #include <sys/uio.h> 37 #include <sys/cred.h> 38 #include <sys/modctl.h> 39 #include <sys/debug.h> 40 #include <sys/modctl.h> 41 #include <sys/list.h> 42 #include <sys/sysmacros.h> 43 #include <sys/errno.h> 44 #include <sys/pcie.h> 45 #include <sys/pci.h> 46 #include <sys/ddi.h> 47 #include <sys/dditypes.h> 48 #include <sys/sunddi.h> 49 #include <sys/atomic.h> 50 #include <sys/mutex.h> 51 #include <sys/param.h> 52 #include <sys/devops.h> 53 #include <sys/blkdev.h> 54 #include <sys/queue.h> 55 #include <sys/scsi/impl/inquiry.h> 56 57 #include "skd_s1120.h" 58 #include "skd.h" 59 60 int skd_dbg_level = 0; 61 62 void *skd_state = NULL; 63 int skd_disable_msi = 0; 64 int skd_disable_msix = 0; 65 66 /* Initialized in _init() and tunable, see _init(). */ 67 clock_t skd_timer_ticks; 68 69 /* I/O DMA attributes structures. */ 70 static ddi_dma_attr_t skd_64bit_io_dma_attr = { 71 DMA_ATTR_V0, /* dma_attr_version */ 72 SKD_DMA_LOW_ADDRESS, /* low DMA address range */ 73 SKD_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */ 74 SKD_DMA_XFER_COUNTER, /* DMA counter register */ 75 SKD_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */ 76 SKD_DMA_BURSTSIZES, /* DMA burstsizes */ 77 SKD_DMA_MIN_XFER_SIZE, /* min effective DMA size */ 78 SKD_DMA_MAX_XFER_SIZE, /* max DMA xfer size */ 79 SKD_DMA_SEGMENT_BOUNDARY, /* segment boundary */ 80 SKD_DMA_SG_LIST_LENGTH, /* s/g list length */ 81 SKD_DMA_GRANULARITY, /* granularity of device */ 82 SKD_DMA_XFER_FLAGS /* DMA transfer flags */ 83 }; 84 85 int skd_isr_type = -1; 86 87 #define SKD_MAX_QUEUE_DEPTH 255 88 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64 89 int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; 90 91 #define SKD_MAX_REQ_PER_MSG 14 92 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1 93 int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; 94 95 #define SKD_MAX_N_SG_PER_REQ 4096 96 int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; 97 98 static int skd_sys_quiesce_dev(dev_info_t *); 99 static int skd_quiesce_dev(skd_device_t *); 100 static int skd_list_skmsg(skd_device_t *, int); 101 static int skd_list_skreq(skd_device_t *, int); 102 static int skd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 103 static int skd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 104 static int skd_format_internal_skspcl(struct skd_device *skdev); 105 static void skd_start(skd_device_t *); 106 static void skd_destroy_mutex(skd_device_t *skdev); 107 static void skd_enable_interrupts(struct skd_device *); 108 static void skd_request_fn_not_online(skd_device_t *skdev); 109 static void skd_send_internal_skspcl(struct skd_device *, 110 struct skd_special_context *, uint8_t); 111 static void skd_queue(skd_device_t *, skd_buf_private_t *); 112 static void *skd_alloc_dma_mem(skd_device_t *, dma_mem_t *, uint8_t); 113 static void skd_release_intr(skd_device_t *skdev); 114 static void skd_isr_fwstate(struct skd_device *skdev); 115 static void skd_isr_msg_from_dev(struct skd_device *skdev); 116 static void skd_soft_reset(struct skd_device *skdev); 117 static void skd_refresh_device_data(struct skd_device *skdev); 118 static void skd_update_props(skd_device_t *, dev_info_t *); 119 static void skd_end_request_abnormal(struct skd_device *, skd_buf_private_t *, 120 int, int); 121 static char *skd_pci_info(struct skd_device *skdev, char *str, size_t len); 122 123 static skd_buf_private_t *skd_get_queued_pbuf(skd_device_t *); 124 125 static void skd_bd_driveinfo(void *arg, bd_drive_t *drive); 126 static int skd_bd_mediainfo(void *arg, bd_media_t *media); 127 static int skd_bd_read(void *arg, bd_xfer_t *xfer); 128 static int skd_bd_write(void *arg, bd_xfer_t *xfer); 129 static int skd_devid_init(void *arg, dev_info_t *, ddi_devid_t *); 130 131 132 static bd_ops_t skd_bd_ops = { 133 BD_OPS_CURRENT_VERSION, 134 skd_bd_driveinfo, 135 skd_bd_mediainfo, 136 skd_devid_init, 137 NULL, /* sync_cache */ 138 skd_bd_read, 139 skd_bd_write, 140 NULL, /* free_space */ 141 }; 142 143 static ddi_device_acc_attr_t dev_acc_attr = { 144 DDI_DEVICE_ATTR_V0, 145 DDI_STRUCTURE_LE_ACC, 146 DDI_STRICTORDER_ACC 147 }; 148 149 /* 150 * Solaris module loading/unloading structures 151 */ 152 struct dev_ops skd_dev_ops = { 153 DEVO_REV, /* devo_rev */ 154 0, /* refcnt */ 155 ddi_no_info, /* getinfo */ 156 nulldev, /* identify */ 157 nulldev, /* probe */ 158 skd_attach, /* attach */ 159 skd_detach, /* detach */ 160 nodev, /* reset */ 161 NULL, /* char/block ops */ 162 NULL, /* bus operations */ 163 NULL, /* power management */ 164 skd_sys_quiesce_dev /* quiesce */ 165 }; 166 167 static struct modldrv modldrv = { 168 &mod_driverops, /* type of module: driver */ 169 "sTec skd v" DRV_VER_COMPL, /* name of module */ 170 &skd_dev_ops /* driver dev_ops */ 171 }; 172 173 static struct modlinkage modlinkage = { 174 MODREV_1, 175 &modldrv, 176 NULL 177 }; 178 179 /* 180 * sTec-required wrapper for debug printing. 181 */ 182 /*PRINTFLIKE2*/ 183 static inline void 184 Dcmn_err(int lvl, const char *fmt, ...) 185 { 186 va_list ap; 187 188 if (skd_dbg_level == 0) 189 return; 190 191 va_start(ap, fmt); 192 vcmn_err(lvl, fmt, ap); 193 va_end(ap); 194 } 195 196 /* 197 * Solaris module loading/unloading routines 198 */ 199 200 /* 201 * 202 * Name: _init, performs initial installation 203 * 204 * Inputs: None. 205 * 206 * Returns: Returns the value returned by the ddi_softstate_init function 207 * on a failure to create the device state structure or the result 208 * of the module install routines. 209 * 210 */ 211 int 212 _init(void) 213 { 214 int rval = 0; 215 int tgts = 0; 216 217 tgts |= 0x02; 218 tgts |= 0x08; /* In #ifdef NEXENTA block from original sTec drop. */ 219 220 /* 221 * drv_usectohz() is a function, so can't initialize it at 222 * instantiation. 223 */ 224 skd_timer_ticks = drv_usectohz(1000000); 225 226 Dcmn_err(CE_NOTE, 227 "<# Installing skd Driver dbg-lvl=%d %s %x>", 228 skd_dbg_level, DRV_BUILD_ID, tgts); 229 230 rval = ddi_soft_state_init(&skd_state, sizeof (skd_device_t), 0); 231 if (rval != DDI_SUCCESS) 232 return (rval); 233 234 bd_mod_init(&skd_dev_ops); 235 236 rval = mod_install(&modlinkage); 237 if (rval != DDI_SUCCESS) { 238 ddi_soft_state_fini(&skd_state); 239 bd_mod_fini(&skd_dev_ops); 240 } 241 242 return (rval); 243 } 244 245 /* 246 * 247 * Name: _info, returns information about loadable module. 248 * 249 * Inputs: modinfo, pointer to module information structure. 250 * 251 * Returns: Value returned by mod_info(). 252 * 253 */ 254 int 255 _info(struct modinfo *modinfop) 256 { 257 return (mod_info(&modlinkage, modinfop)); 258 } 259 260 /* 261 * _fini Prepares a module for unloading. It is called when the system 262 * wants to unload a module. If the module determines that it can 263 * be unloaded, then _fini() returns the value returned by 264 * mod_remove(). Upon successful return from _fini() no other 265 * routine in the module will be called before _init() is called. 266 * 267 * Inputs: None. 268 * 269 * Returns: DDI_SUCCESS or DDI_FAILURE. 270 * 271 */ 272 int 273 _fini(void) 274 { 275 int rval; 276 277 rval = mod_remove(&modlinkage); 278 if (rval == DDI_SUCCESS) { 279 ddi_soft_state_fini(&skd_state); 280 bd_mod_fini(&skd_dev_ops); 281 } 282 283 return (rval); 284 } 285 286 /* 287 * Solaris Register read/write routines 288 */ 289 290 /* 291 * 292 * Name: skd_reg_write64, writes a 64-bit value to specified address 293 * 294 * Inputs: skdev - device state structure. 295 * val - 64-bit value to be written. 296 * offset - offset from PCI base address. 297 * 298 * Returns: Nothing. 299 * 300 */ 301 /* 302 * Local vars are to keep lint silent. Any compiler worth its weight will 303 * optimize it all right out... 304 */ 305 static inline void 306 skd_reg_write64(struct skd_device *skdev, uint64_t val, uint32_t offset) 307 { 308 uint64_t *addr; 309 310 ASSERT((offset & 0x7) == 0); 311 /* LINTED */ 312 addr = (uint64_t *)(skdev->dev_iobase + offset); 313 ddi_put64(skdev->dev_handle, addr, val); 314 } 315 316 /* 317 * 318 * Name: skd_reg_read32, reads a 32-bit value to specified address 319 * 320 * Inputs: skdev - device state structure. 321 * offset - offset from PCI base address. 322 * 323 * Returns: val, 32-bit value read from specified PCI address. 324 * 325 */ 326 static inline uint32_t 327 skd_reg_read32(struct skd_device *skdev, uint32_t offset) 328 { 329 uint32_t *addr; 330 331 ASSERT((offset & 0x3) == 0); 332 /* LINTED */ 333 addr = (uint32_t *)(skdev->dev_iobase + offset); 334 return (ddi_get32(skdev->dev_handle, addr)); 335 } 336 337 /* 338 * 339 * Name: skd_reg_write32, writes a 32-bit value to specified address 340 * 341 * Inputs: skdev - device state structure. 342 * val - value to be written. 343 * offset - offset from PCI base address. 344 * 345 * Returns: Nothing. 346 * 347 */ 348 static inline void 349 skd_reg_write32(struct skd_device *skdev, uint32_t val, uint32_t offset) 350 { 351 uint32_t *addr; 352 353 ASSERT((offset & 0x3) == 0); 354 /* LINTED */ 355 addr = (uint32_t *)(skdev->dev_iobase + offset); 356 ddi_put32(skdev->dev_handle, addr, val); 357 } 358 359 360 /* 361 * Solaris skd routines 362 */ 363 364 /* 365 * 366 * Name: skd_name, generates the name of the driver. 367 * 368 * Inputs: skdev - device state structure 369 * 370 * Returns: char pointer to generated driver name. 371 * 372 */ 373 static const char * 374 skd_name(struct skd_device *skdev) 375 { 376 (void) snprintf(skdev->id_str, sizeof (skdev->id_str), "%s:", DRV_NAME); 377 378 return (skdev->id_str); 379 } 380 381 /* 382 * 383 * Name: skd_pci_find_capability, searches the PCI capability 384 * list for the specified capability. 385 * 386 * Inputs: skdev - device state structure. 387 * cap - capability sought. 388 * 389 * Returns: Returns position where capability was found. 390 * If not found, returns zero. 391 * 392 */ 393 static int 394 skd_pci_find_capability(struct skd_device *skdev, int cap) 395 { 396 uint16_t status; 397 uint8_t pos, id, hdr; 398 int ttl = 48; 399 400 status = pci_config_get16(skdev->pci_handle, PCI_CONF_STAT); 401 402 if (!(status & PCI_STAT_CAP)) 403 return (0); 404 405 hdr = pci_config_get8(skdev->pci_handle, PCI_CONF_HEADER); 406 407 if ((hdr & PCI_HEADER_TYPE_M) != 0) 408 return (0); 409 410 pos = pci_config_get8(skdev->pci_handle, PCI_CONF_CAP_PTR); 411 412 while (ttl-- && pos >= 0x40) { 413 pos &= ~3; 414 id = pci_config_get8(skdev->pci_handle, pos+PCI_CAP_ID); 415 if (id == 0xff) 416 break; 417 if (id == cap) 418 return (pos); 419 pos = pci_config_get8(skdev->pci_handle, pos+PCI_CAP_NEXT_PTR); 420 } 421 422 return (0); 423 } 424 425 /* 426 * 427 * Name: skd_io_done, called to conclude an I/O operation. 428 * 429 * Inputs: skdev - device state structure. 430 * pbuf - I/O request 431 * error - contain error value. 432 * mode - debug only. 433 * 434 * Returns: Nothing. 435 * 436 */ 437 static void 438 skd_io_done(skd_device_t *skdev, skd_buf_private_t *pbuf, 439 int error, int mode) 440 { 441 bd_xfer_t *xfer; 442 443 ASSERT(pbuf != NULL); 444 445 xfer = pbuf->x_xfer; 446 447 switch (mode) { 448 case SKD_IODONE_WIOC: 449 skdev->iodone_wioc++; 450 break; 451 case SKD_IODONE_WNIOC: 452 skdev->iodone_wnioc++; 453 break; 454 case SKD_IODONE_WDEBUG: 455 skdev->iodone_wdebug++; 456 break; 457 default: 458 skdev->iodone_unknown++; 459 } 460 461 if (error) { 462 skdev->ios_errors++; 463 cmn_err(CE_WARN, 464 "!%s:skd_io_done:ERR=%d %lld-%ld %s", skdev->name, 465 error, xfer->x_blkno, xfer->x_nblks, 466 (pbuf->dir & B_READ) ? "Read" : "Write"); 467 } 468 469 kmem_free(pbuf, sizeof (skd_buf_private_t)); 470 471 bd_xfer_done(xfer, error); 472 } 473 474 /* 475 * QUIESCE DEVICE 476 */ 477 478 /* 479 * 480 * Name: skd_sys_quiesce_dev, quiets the device 481 * 482 * Inputs: dip - dev info strucuture 483 * 484 * Returns: Zero. 485 * 486 */ 487 static int 488 skd_sys_quiesce_dev(dev_info_t *dip) 489 { 490 skd_device_t *skdev; 491 492 skdev = ddi_get_soft_state(skd_state, ddi_get_instance(dip)); 493 494 /* make sure Dcmn_err() doesn't actually print anything */ 495 skd_dbg_level = 0; 496 497 skd_disable_interrupts(skdev); 498 skd_soft_reset(skdev); 499 500 return (0); 501 } 502 503 /* 504 * 505 * Name: skd_quiesce_dev, quiets the device, but doesn't really do much. 506 * 507 * Inputs: skdev - Device state. 508 * 509 * Returns: -EINVAL if device is not in proper state otherwise 510 * returns zero. 511 * 512 */ 513 static int 514 skd_quiesce_dev(skd_device_t *skdev) 515 { 516 int rc = 0; 517 518 if (skd_dbg_level) 519 Dcmn_err(CE_NOTE, "skd_quiece_dev:"); 520 521 switch (skdev->state) { 522 case SKD_DRVR_STATE_BUSY: 523 case SKD_DRVR_STATE_BUSY_IMMINENT: 524 Dcmn_err(CE_NOTE, "%s: stopping queue", skdev->name); 525 break; 526 case SKD_DRVR_STATE_ONLINE: 527 case SKD_DRVR_STATE_STOPPING: 528 case SKD_DRVR_STATE_SYNCING: 529 case SKD_DRVR_STATE_PAUSING: 530 case SKD_DRVR_STATE_PAUSED: 531 case SKD_DRVR_STATE_STARTING: 532 case SKD_DRVR_STATE_RESTARTING: 533 case SKD_DRVR_STATE_RESUMING: 534 default: 535 rc = -EINVAL; 536 cmn_err(CE_NOTE, "state [%d] not implemented", skdev->state); 537 } 538 539 return (rc); 540 } 541 542 /* 543 * UNQUIESCE DEVICE: 544 * Note: Assumes lock is held to protect device state. 545 */ 546 /* 547 * 548 * Name: skd_unquiesce_dev, awkens the device 549 * 550 * Inputs: skdev - Device state. 551 * 552 * Returns: -EINVAL if device is not in proper state otherwise 553 * returns zero. 554 * 555 */ 556 static int 557 skd_unquiesce_dev(struct skd_device *skdev) 558 { 559 Dcmn_err(CE_NOTE, "skd_unquiece_dev:"); 560 561 skd_log_skdev(skdev, "unquiesce"); 562 if (skdev->state == SKD_DRVR_STATE_ONLINE) { 563 Dcmn_err(CE_NOTE, "**** device already ONLINE"); 564 565 return (0); 566 } 567 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) { 568 /* 569 * If there has been an state change to other than 570 * ONLINE, we will rely on controller state change 571 * to come back online and restart the queue. 572 * The BUSY state means that driver is ready to 573 * continue normal processing but waiting for controller 574 * to become available. 575 */ 576 skdev->state = SKD_DRVR_STATE_BUSY; 577 Dcmn_err(CE_NOTE, "drive BUSY state\n"); 578 579 return (0); 580 } 581 /* 582 * Drive just come online, driver is either in startup, 583 * paused performing a task, or bust waiting for hardware. 584 */ 585 switch (skdev->state) { 586 case SKD_DRVR_STATE_PAUSED: 587 case SKD_DRVR_STATE_BUSY: 588 case SKD_DRVR_STATE_BUSY_IMMINENT: 589 case SKD_DRVR_STATE_BUSY_ERASE: 590 case SKD_DRVR_STATE_STARTING: 591 case SKD_DRVR_STATE_RESTARTING: 592 case SKD_DRVR_STATE_FAULT: 593 case SKD_DRVR_STATE_IDLE: 594 case SKD_DRVR_STATE_LOAD: 595 skdev->state = SKD_DRVR_STATE_ONLINE; 596 Dcmn_err(CE_NOTE, "%s: sTec s1120 ONLINE", skdev->name); 597 Dcmn_err(CE_NOTE, "%s: Starting request queue", skdev->name); 598 Dcmn_err(CE_NOTE, 599 "%s: queue depth limit=%d hard=%d soft=%d lowat=%d", 600 skdev->name, 601 skdev->queue_depth_limit, 602 skdev->hard_queue_depth_limit, 603 skdev->soft_queue_depth_limit, 604 skdev->queue_depth_lowat); 605 606 skdev->gendisk_on = 1; 607 cv_signal(&skdev->cv_waitq); 608 break; 609 case SKD_DRVR_STATE_DISAPPEARED: 610 default: 611 cmn_err(CE_NOTE, "**** driver state %d, not implemented \n", 612 skdev->state); 613 return (-EBUSY); 614 } 615 616 return (0); 617 } 618 619 /* 620 * READ/WRITE REQUESTS 621 */ 622 623 /* 624 * 625 * Name: skd_blkdev_preop_sg_list, builds the S/G list from info 626 * passed in by the blkdev driver. 627 * 628 * Inputs: skdev - device state structure. 629 * skreq - request structure. 630 * sg_byte_count - data transfer byte count. 631 * 632 * Returns: Nothing. 633 * 634 */ 635 /*ARGSUSED*/ 636 static void 637 skd_blkdev_preop_sg_list(struct skd_device *skdev, 638 struct skd_request_context *skreq, uint32_t *sg_byte_count) 639 { 640 bd_xfer_t *xfer; 641 skd_buf_private_t *pbuf; 642 int i, bcount = 0; 643 uint_t n_sg; 644 645 *sg_byte_count = 0; 646 647 ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD || 648 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); 649 650 pbuf = skreq->pbuf; 651 ASSERT(pbuf != NULL); 652 653 xfer = pbuf->x_xfer; 654 n_sg = xfer->x_ndmac; 655 656 ASSERT(n_sg <= skdev->sgs_per_request); 657 658 skreq->n_sg = n_sg; 659 660 skreq->io_dma_handle = xfer->x_dmah; 661 662 skreq->total_sg_bcount = 0; 663 664 for (i = 0; i < n_sg; i++) { 665 ddi_dma_cookie_t *cookiep = &xfer->x_dmac; 666 struct fit_sg_descriptor *sgd; 667 uint32_t cnt = (uint32_t)cookiep->dmac_size; 668 669 bcount += cnt; 670 671 sgd = &skreq->sksg_list[i]; 672 sgd->control = FIT_SGD_CONTROL_NOT_LAST; 673 sgd->byte_count = cnt; 674 sgd->host_side_addr = cookiep->dmac_laddress; 675 sgd->dev_side_addr = 0; /* not used */ 676 *sg_byte_count += cnt; 677 678 skreq->total_sg_bcount += cnt; 679 680 if ((i + 1) != n_sg) 681 ddi_dma_nextcookie(skreq->io_dma_handle, &xfer->x_dmac); 682 } 683 684 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; 685 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; 686 687 (void) ddi_dma_sync(skreq->sksg_dma_address.dma_handle, 0, 0, 688 DDI_DMA_SYNC_FORDEV); 689 } 690 691 /* 692 * 693 * Name: skd_blkdev_postop_sg_list, deallocates DMA 694 * 695 * Inputs: skdev - device state structure. 696 * skreq - skreq data structure. 697 * 698 * Returns: Nothing. 699 * 700 */ 701 /* ARGSUSED */ /* Upstream common source with other platforms. */ 702 static void 703 skd_blkdev_postop_sg_list(struct skd_device *skdev, 704 struct skd_request_context *skreq) 705 { 706 /* 707 * restore the next ptr for next IO request so we 708 * don't have to set it every time. 709 */ 710 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = 711 skreq->sksg_dma_address.cookies->dmac_laddress + 712 ((skreq->n_sg) * sizeof (struct fit_sg_descriptor)); 713 } 714 715 /* 716 * 717 * Name: skd_start, initiates an I/O. 718 * 719 * Inputs: skdev - device state structure. 720 * 721 * Returns: EAGAIN if devicfe is not ONLINE. 722 * On error, if the caller is the blkdev driver, return 723 * the error value. Otherwise, return zero. 724 * 725 */ 726 /* Upstream common source with other platforms. */ 727 static void 728 skd_start(skd_device_t *skdev) 729 { 730 struct skd_fitmsg_context *skmsg = NULL; 731 struct fit_msg_hdr *fmh = NULL; 732 struct skd_request_context *skreq = NULL; 733 struct waitqueue *waitq = &skdev->waitqueue; 734 struct skd_scsi_request *scsi_req; 735 skd_buf_private_t *pbuf = NULL; 736 int bcount; 737 738 uint32_t lba; 739 uint32_t count; 740 uint32_t timo_slot; 741 void *cmd_ptr; 742 uint32_t sg_byte_count = 0; 743 744 /* 745 * Stop conditions: 746 * - There are no more native requests 747 * - There are already the maximum number of requests is progress 748 * - There are no more skd_request_context entries 749 * - There are no more FIT msg buffers 750 */ 751 for (;;) { 752 /* Are too many requests already in progress? */ 753 if (skdev->queue_depth_busy >= skdev->queue_depth_limit) { 754 Dcmn_err(CE_NOTE, "qdepth %d, limit %d\n", 755 skdev->queue_depth_busy, 756 skdev->queue_depth_limit); 757 break; 758 } 759 760 WAITQ_LOCK(skdev); 761 if (SIMPLEQ_EMPTY(waitq)) { 762 WAITQ_UNLOCK(skdev); 763 break; 764 } 765 766 /* Is a skd_request_context available? */ 767 skreq = skdev->skreq_free_list; 768 if (skreq == NULL) { 769 WAITQ_UNLOCK(skdev); 770 break; 771 } 772 773 ASSERT(skreq->state == SKD_REQ_STATE_IDLE); 774 ASSERT((skreq->id & SKD_ID_INCR) == 0); 775 776 skdev->skreq_free_list = skreq->next; 777 778 skreq->state = SKD_REQ_STATE_BUSY; 779 skreq->id += SKD_ID_INCR; 780 781 /* Start a new FIT msg if there is none in progress. */ 782 if (skmsg == NULL) { 783 /* Are there any FIT msg buffers available? */ 784 skmsg = skdev->skmsg_free_list; 785 if (skmsg == NULL) { 786 WAITQ_UNLOCK(skdev); 787 break; 788 } 789 790 ASSERT(skmsg->state == SKD_MSG_STATE_IDLE); 791 ASSERT((skmsg->id & SKD_ID_INCR) == 0); 792 793 skdev->skmsg_free_list = skmsg->next; 794 795 skmsg->state = SKD_MSG_STATE_BUSY; 796 skmsg->id += SKD_ID_INCR; 797 798 /* Initialize the FIT msg header */ 799 fmh = (struct fit_msg_hdr *)skmsg->msg_buf64; 800 bzero(fmh, sizeof (*fmh)); /* Too expensive */ 801 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 802 skmsg->length = sizeof (struct fit_msg_hdr); 803 } 804 805 /* 806 * At this point we are committed to either start or reject 807 * the native request. Note that a FIT msg may have just been 808 * started but contains no SoFIT requests yet. 809 * Now - dequeue pbuf. 810 */ 811 pbuf = skd_get_queued_pbuf(skdev); 812 WAITQ_UNLOCK(skdev); 813 814 skreq->pbuf = pbuf; 815 lba = pbuf->x_xfer->x_blkno; 816 count = pbuf->x_xfer->x_nblks; 817 skreq->did_complete = 0; 818 819 skreq->fitmsg_id = skmsg->id; 820 821 Dcmn_err(CE_NOTE, 822 "pbuf=%p lba=%u(0x%x) count=%u(0x%x) dir=%x\n", 823 (void *)pbuf, lba, lba, count, count, pbuf->dir); 824 825 /* 826 * Transcode the request. 827 */ 828 cmd_ptr = &skmsg->msg_buf[skmsg->length]; 829 bzero(cmd_ptr, 32); /* This is too expensive */ 830 831 scsi_req = cmd_ptr; 832 scsi_req->hdr.tag = skreq->id; 833 scsi_req->hdr.sg_list_dma_address = 834 cpu_to_be64(skreq->sksg_dma_address.cookies->dmac_laddress); 835 scsi_req->cdb[1] = 0; 836 scsi_req->cdb[2] = (lba & 0xff000000) >> 24; 837 scsi_req->cdb[3] = (lba & 0xff0000) >> 16; 838 scsi_req->cdb[4] = (lba & 0xff00) >> 8; 839 scsi_req->cdb[5] = (lba & 0xff); 840 scsi_req->cdb[6] = 0; 841 scsi_req->cdb[7] = (count & 0xff00) >> 8; 842 scsi_req->cdb[8] = count & 0xff; 843 scsi_req->cdb[9] = 0; 844 845 if (pbuf->dir & B_READ) { 846 scsi_req->cdb[0] = 0x28; 847 skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST; 848 } else { 849 scsi_req->cdb[0] = 0x2a; 850 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD; 851 } 852 853 skd_blkdev_preop_sg_list(skdev, skreq, &sg_byte_count); 854 855 scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(sg_byte_count); 856 857 bcount = (sg_byte_count + 511) / 512; 858 scsi_req->cdb[7] = (bcount & 0xff00) >> 8; 859 scsi_req->cdb[8] = bcount & 0xff; 860 861 Dcmn_err(CE_NOTE, 862 "skd_start: pbuf=%p skreq->id=%x opc=%x ====>>>>>", 863 (void *)pbuf, skreq->id, *scsi_req->cdb); 864 865 skmsg->length += sizeof (struct skd_scsi_request); 866 fmh->num_protocol_cmds_coalesced++; 867 868 /* 869 * Update the active request counts. 870 * Capture the timeout timestamp. 871 */ 872 skreq->timeout_stamp = skdev->timeout_stamp; 873 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 874 875 atomic_inc_32(&skdev->timeout_slot[timo_slot]); 876 atomic_inc_32(&skdev->queue_depth_busy); 877 878 Dcmn_err(CE_NOTE, "req=0x%x busy=%d timo_slot=%d", 879 skreq->id, skdev->queue_depth_busy, timo_slot); 880 /* 881 * If the FIT msg buffer is full send it. 882 */ 883 if (skmsg->length >= SKD_N_FITMSG_BYTES || 884 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { 885 886 atomic_inc_64(&skdev->active_cmds); 887 pbuf->skreq = skreq; 888 889 skdev->fitmsg_sent1++; 890 skd_send_fitmsg(skdev, skmsg); 891 892 skmsg = NULL; 893 fmh = NULL; 894 } 895 } 896 897 /* 898 * Is a FIT msg in progress? If it is empty put the buffer back 899 * on the free list. If it is non-empty send what we got. 900 * This minimizes latency when there are fewer requests than 901 * what fits in a FIT msg. 902 */ 903 if (skmsg != NULL) { 904 ASSERT(skmsg->length > sizeof (struct fit_msg_hdr)); 905 Dcmn_err(CE_NOTE, "sending msg=%p, len %d", 906 (void *)skmsg, skmsg->length); 907 908 skdev->active_cmds++; 909 910 skdev->fitmsg_sent2++; 911 skd_send_fitmsg(skdev, skmsg); 912 } 913 } 914 915 /* 916 * 917 * Name: skd_end_request 918 * 919 * Inputs: skdev - device state structure. 920 * skreq - request structure. 921 * error - I/O error value. 922 * 923 * Returns: Nothing. 924 * 925 */ 926 static void 927 skd_end_request(struct skd_device *skdev, 928 struct skd_request_context *skreq, int error) 929 { 930 skdev->ios_completed++; 931 skd_io_done(skdev, skreq->pbuf, error, SKD_IODONE_WIOC); 932 skreq->pbuf = NULL; 933 skreq->did_complete = 1; 934 } 935 936 /* 937 * 938 * Name: skd_end_request_abnormal 939 * 940 * Inputs: skdev - device state structure. 941 * pbuf - I/O request. 942 * error - I/O error value. 943 * mode - debug 944 * 945 * Returns: Nothing. 946 * 947 */ 948 static void 949 skd_end_request_abnormal(skd_device_t *skdev, skd_buf_private_t *pbuf, 950 int error, int mode) 951 { 952 skd_io_done(skdev, pbuf, error, mode); 953 } 954 955 /* 956 * 957 * Name: skd_request_fn_not_online, handles the condition 958 * of the device not being online. 959 * 960 * Inputs: skdev - device state structure. 961 * 962 * Returns: nothing (void). 963 * 964 */ 965 static void 966 skd_request_fn_not_online(skd_device_t *skdev) 967 { 968 int error; 969 skd_buf_private_t *pbuf; 970 971 ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); 972 973 skd_log_skdev(skdev, "req_not_online"); 974 975 switch (skdev->state) { 976 case SKD_DRVR_STATE_PAUSING: 977 case SKD_DRVR_STATE_PAUSED: 978 case SKD_DRVR_STATE_STARTING: 979 case SKD_DRVR_STATE_RESTARTING: 980 case SKD_DRVR_STATE_WAIT_BOOT: 981 /* 982 * In case of starting, we haven't started the queue, 983 * so we can't get here... but requests are 984 * possibly hanging out waiting for us because we 985 * reported the dev/skd/0 already. They'll wait 986 * forever if connect doesn't complete. 987 * What to do??? delay dev/skd/0 ?? 988 */ 989 case SKD_DRVR_STATE_BUSY: 990 case SKD_DRVR_STATE_BUSY_IMMINENT: 991 case SKD_DRVR_STATE_BUSY_ERASE: 992 case SKD_DRVR_STATE_DRAINING_TIMEOUT: 993 return; 994 995 case SKD_DRVR_STATE_BUSY_SANITIZE: 996 case SKD_DRVR_STATE_STOPPING: 997 case SKD_DRVR_STATE_SYNCING: 998 case SKD_DRVR_STATE_FAULT: 999 case SKD_DRVR_STATE_DISAPPEARED: 1000 default: 1001 error = -EIO; 1002 break; 1003 } 1004 1005 /* 1006 * If we get here, terminate all pending block requeusts 1007 * with EIO and any scsi pass thru with appropriate sense 1008 */ 1009 ASSERT(WAITQ_LOCK_HELD(skdev)); 1010 if (SIMPLEQ_EMPTY(&skdev->waitqueue)) 1011 return; 1012 1013 while ((pbuf = skd_get_queued_pbuf(skdev))) 1014 skd_end_request_abnormal(skdev, pbuf, error, SKD_IODONE_WNIOC); 1015 1016 cv_signal(&skdev->cv_waitq); 1017 } 1018 1019 /* 1020 * TIMER 1021 */ 1022 1023 static void skd_timer_tick_not_online(struct skd_device *skdev); 1024 1025 /* 1026 * 1027 * Name: skd_timer_tick, monitors requests for timeouts. 1028 * 1029 * Inputs: skdev - device state structure. 1030 * 1031 * Returns: Nothing. 1032 * 1033 */ 1034 static void 1035 skd_timer_tick(skd_device_t *skdev) 1036 { 1037 uint32_t timo_slot; 1038 1039 skdev->timer_active = 1; 1040 1041 if (skdev->state != SKD_DRVR_STATE_ONLINE) { 1042 skd_timer_tick_not_online(skdev); 1043 goto timer_func_out; 1044 } 1045 1046 skdev->timeout_stamp++; 1047 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 1048 1049 /* 1050 * All requests that happened during the previous use of 1051 * this slot should be done by now. The previous use was 1052 * over 7 seconds ago. 1053 */ 1054 if (skdev->timeout_slot[timo_slot] == 0) { 1055 goto timer_func_out; 1056 } 1057 1058 /* Something is overdue */ 1059 Dcmn_err(CE_NOTE, "found %d timeouts, draining busy=%d", 1060 skdev->timeout_slot[timo_slot], 1061 skdev->queue_depth_busy); 1062 skdev->timer_countdown = SKD_TIMER_SECONDS(3); 1063 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT; 1064 skdev->timo_slot = timo_slot; 1065 1066 timer_func_out: 1067 skdev->timer_active = 0; 1068 } 1069 1070 /* 1071 * 1072 * Name: skd_timer_tick_not_online, handles various device 1073 * state transitions. 1074 * 1075 * Inputs: skdev - device state structure. 1076 * 1077 * Returns: Nothing. 1078 * 1079 */ 1080 static void 1081 skd_timer_tick_not_online(struct skd_device *skdev) 1082 { 1083 Dcmn_err(CE_NOTE, "skd_skd_timer_tick_not_online: state=%d tmo=%d", 1084 skdev->state, skdev->timer_countdown); 1085 1086 ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); 1087 1088 switch (skdev->state) { 1089 case SKD_DRVR_STATE_IDLE: 1090 case SKD_DRVR_STATE_LOAD: 1091 break; 1092 case SKD_DRVR_STATE_BUSY_SANITIZE: 1093 cmn_err(CE_WARN, "!drive busy sanitize[%x], driver[%x]\n", 1094 skdev->drive_state, skdev->state); 1095 break; 1096 1097 case SKD_DRVR_STATE_BUSY: 1098 case SKD_DRVR_STATE_BUSY_IMMINENT: 1099 case SKD_DRVR_STATE_BUSY_ERASE: 1100 Dcmn_err(CE_NOTE, "busy[%x], countdown=%d\n", 1101 skdev->state, skdev->timer_countdown); 1102 if (skdev->timer_countdown > 0) { 1103 skdev->timer_countdown--; 1104 return; 1105 } 1106 cmn_err(CE_WARN, "!busy[%x], timedout=%d, restarting device.", 1107 skdev->state, skdev->timer_countdown); 1108 skd_restart_device(skdev); 1109 break; 1110 1111 case SKD_DRVR_STATE_WAIT_BOOT: 1112 case SKD_DRVR_STATE_STARTING: 1113 if (skdev->timer_countdown > 0) { 1114 skdev->timer_countdown--; 1115 return; 1116 } 1117 /* 1118 * For now, we fault the drive. Could attempt resets to 1119 * revcover at some point. 1120 */ 1121 skdev->state = SKD_DRVR_STATE_FAULT; 1122 1123 cmn_err(CE_WARN, "!(%s): DriveFault Connect Timeout (%x)", 1124 skd_name(skdev), skdev->drive_state); 1125 1126 /* start the queue so we can respond with error to requests */ 1127 skd_start(skdev); 1128 1129 /* wakeup anyone waiting for startup complete */ 1130 skdev->gendisk_on = -1; 1131 1132 cv_signal(&skdev->cv_waitq); 1133 break; 1134 1135 1136 case SKD_DRVR_STATE_PAUSING: 1137 case SKD_DRVR_STATE_PAUSED: 1138 break; 1139 1140 case SKD_DRVR_STATE_DRAINING_TIMEOUT: 1141 cmn_err(CE_WARN, 1142 "!%s: draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n", 1143 skdev->name, 1144 skdev->timo_slot, 1145 skdev->timer_countdown, 1146 skdev->queue_depth_busy, 1147 skdev->timeout_slot[skdev->timo_slot]); 1148 /* if the slot has cleared we can let the I/O continue */ 1149 if (skdev->timeout_slot[skdev->timo_slot] == 0) { 1150 Dcmn_err(CE_NOTE, "Slot drained, starting queue."); 1151 skdev->state = SKD_DRVR_STATE_ONLINE; 1152 skd_start(skdev); 1153 return; 1154 } 1155 if (skdev->timer_countdown > 0) { 1156 skdev->timer_countdown--; 1157 return; 1158 } 1159 skd_restart_device(skdev); 1160 break; 1161 1162 case SKD_DRVR_STATE_RESTARTING: 1163 if (skdev->timer_countdown > 0) { 1164 skdev->timer_countdown--; 1165 1166 return; 1167 } 1168 /* 1169 * For now, we fault the drive. Could attempt resets to 1170 * revcover at some point. 1171 */ 1172 skdev->state = SKD_DRVR_STATE_FAULT; 1173 cmn_err(CE_WARN, "!(%s): DriveFault Reconnect Timeout (%x)\n", 1174 skd_name(skdev), skdev->drive_state); 1175 1176 /* 1177 * Recovering does two things: 1178 * 1. completes IO with error 1179 * 2. reclaims dma resources 1180 * When is it safe to recover requests? 1181 * - if the drive state is faulted 1182 * - if the state is still soft reset after out timeout 1183 * - if the drive registers are dead (state = FF) 1184 */ 1185 1186 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) || 1187 (skdev->drive_state == FIT_SR_DRIVE_FAULT) || 1188 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) { 1189 /* 1190 * It never came out of soft reset. Try to 1191 * recover the requests and then let them 1192 * fail. This is to mitigate hung processes. 1193 * 1194 * Acquire the interrupt lock since these lists are 1195 * manipulated by interrupt handlers. 1196 */ 1197 ASSERT(!WAITQ_LOCK_HELD(skdev)); 1198 INTR_LOCK(skdev); 1199 skd_recover_requests(skdev); 1200 INTR_UNLOCK(skdev); 1201 } 1202 /* start the queue so we can respond with error to requests */ 1203 skd_start(skdev); 1204 /* wakeup anyone waiting for startup complete */ 1205 skdev->gendisk_on = -1; 1206 cv_signal(&skdev->cv_waitq); 1207 break; 1208 1209 case SKD_DRVR_STATE_RESUMING: 1210 case SKD_DRVR_STATE_STOPPING: 1211 case SKD_DRVR_STATE_SYNCING: 1212 case SKD_DRVR_STATE_FAULT: 1213 case SKD_DRVR_STATE_DISAPPEARED: 1214 default: 1215 break; 1216 } 1217 } 1218 1219 /* 1220 * 1221 * Name: skd_timer, kicks off the timer processing. 1222 * 1223 * Inputs: skdev - device state structure. 1224 * 1225 * Returns: Nothing. 1226 * 1227 */ 1228 static void 1229 skd_timer(void *arg) 1230 { 1231 skd_device_t *skdev = (skd_device_t *)arg; 1232 1233 /* Someone set us to 0, don't bother rescheduling. */ 1234 ADAPTER_STATE_LOCK(skdev); 1235 if (skdev->skd_timer_timeout_id != 0) { 1236 ADAPTER_STATE_UNLOCK(skdev); 1237 /* Pardon the drop-and-then-acquire logic here. */ 1238 skd_timer_tick(skdev); 1239 ADAPTER_STATE_LOCK(skdev); 1240 /* Restart timer, if not being stopped. */ 1241 if (skdev->skd_timer_timeout_id != 0) { 1242 skdev->skd_timer_timeout_id = 1243 timeout(skd_timer, arg, skd_timer_ticks); 1244 } 1245 } 1246 ADAPTER_STATE_UNLOCK(skdev); 1247 } 1248 1249 /* 1250 * 1251 * Name: skd_start_timer, kicks off the 1-second timer. 1252 * 1253 * Inputs: skdev - device state structure. 1254 * 1255 * Returns: Zero. 1256 * 1257 */ 1258 static void 1259 skd_start_timer(struct skd_device *skdev) 1260 { 1261 /* Start one second driver timer. */ 1262 ADAPTER_STATE_LOCK(skdev); 1263 ASSERT(skdev->skd_timer_timeout_id == 0); 1264 1265 /* 1266 * Do first "timeout tick" right away, but not in this 1267 * thread. 1268 */ 1269 skdev->skd_timer_timeout_id = timeout(skd_timer, skdev, 1); 1270 ADAPTER_STATE_UNLOCK(skdev); 1271 } 1272 1273 /* 1274 * INTERNAL REQUESTS -- generated by driver itself 1275 */ 1276 1277 /* 1278 * 1279 * Name: skd_format_internal_skspcl, setups the internal 1280 * FIT request message. 1281 * 1282 * Inputs: skdev - device state structure. 1283 * 1284 * Returns: One. 1285 * 1286 */ 1287 static int 1288 skd_format_internal_skspcl(struct skd_device *skdev) 1289 { 1290 struct skd_special_context *skspcl = &skdev->internal_skspcl; 1291 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; 1292 struct fit_msg_hdr *fmh; 1293 uint64_t dma_address; 1294 struct skd_scsi_request *scsi; 1295 1296 fmh = (struct fit_msg_hdr *)&skspcl->msg_buf64[0]; 1297 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 1298 fmh->num_protocol_cmds_coalesced = 1; 1299 1300 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */ 1301 scsi = (struct skd_scsi_request *)&skspcl->msg_buf64[8]; 1302 bzero(scsi, sizeof (*scsi)); 1303 dma_address = skspcl->req.sksg_dma_address.cookies->_dmu._dmac_ll; 1304 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address); 1305 sgd->control = FIT_SGD_CONTROL_LAST; 1306 sgd->byte_count = 0; 1307 sgd->host_side_addr = skspcl->db_dma_address.cookies->_dmu._dmac_ll; 1308 sgd->dev_side_addr = 0; /* not used */ 1309 sgd->next_desc_ptr = 0LL; 1310 1311 return (1); 1312 } 1313 1314 /* 1315 * 1316 * Name: skd_send_internal_skspcl, send internal requests to 1317 * the hardware. 1318 * 1319 * Inputs: skdev - device state structure. 1320 * skspcl - request structure 1321 * opcode - just what it says 1322 * 1323 * Returns: Nothing. 1324 * 1325 */ 1326 void 1327 skd_send_internal_skspcl(struct skd_device *skdev, 1328 struct skd_special_context *skspcl, uint8_t opcode) 1329 { 1330 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; 1331 struct skd_scsi_request *scsi; 1332 1333 if (SKD_REQ_STATE_IDLE != skspcl->req.state) { 1334 /* 1335 * A refresh is already in progress. 1336 * Just wait for it to finish. 1337 */ 1338 return; 1339 } 1340 1341 ASSERT(0 == (skspcl->req.id & SKD_ID_INCR)); 1342 skspcl->req.state = SKD_REQ_STATE_BUSY; 1343 skspcl->req.id += SKD_ID_INCR; 1344 1345 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */ 1346 scsi = (struct skd_scsi_request *)&skspcl->msg_buf64[8]; 1347 scsi->hdr.tag = skspcl->req.id; 1348 1349 Dcmn_err(CE_NOTE, "internal skspcl: opcode=%x req.id=%x ==========>", 1350 opcode, skspcl->req.id); 1351 1352 switch (opcode) { 1353 case TEST_UNIT_READY: 1354 scsi->cdb[0] = TEST_UNIT_READY; 1355 scsi->cdb[1] = 0x00; 1356 scsi->cdb[2] = 0x00; 1357 scsi->cdb[3] = 0x00; 1358 scsi->cdb[4] = 0x00; 1359 scsi->cdb[5] = 0x00; 1360 sgd->byte_count = 0; 1361 scsi->hdr.sg_list_len_bytes = 0; 1362 break; 1363 case READ_CAPACITY_EXT: 1364 scsi->cdb[0] = READ_CAPACITY_EXT; 1365 scsi->cdb[1] = 0x10; 1366 scsi->cdb[2] = 0x00; 1367 scsi->cdb[3] = 0x00; 1368 scsi->cdb[4] = 0x00; 1369 scsi->cdb[5] = 0x00; 1370 scsi->cdb[6] = 0x00; 1371 scsi->cdb[7] = 0x00; 1372 scsi->cdb[8] = 0x00; 1373 scsi->cdb[9] = 0x00; 1374 scsi->cdb[10] = 0x00; 1375 scsi->cdb[11] = 0x00; 1376 scsi->cdb[12] = 0x00; 1377 scsi->cdb[13] = 0x20; 1378 scsi->cdb[14] = 0x00; 1379 scsi->cdb[15] = 0x00; 1380 sgd->byte_count = SKD_N_READ_CAP_EXT_BYTES; 1381 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 1382 break; 1383 case 0x28: 1384 (void) memset(skspcl->data_buf, 0x65, SKD_N_INTERNAL_BYTES); 1385 1386 scsi->cdb[0] = 0x28; 1387 scsi->cdb[1] = 0x00; 1388 scsi->cdb[2] = 0x00; 1389 scsi->cdb[3] = 0x00; 1390 scsi->cdb[4] = 0x00; 1391 scsi->cdb[5] = 0x00; 1392 scsi->cdb[6] = 0x00; 1393 scsi->cdb[7] = 0x00; 1394 scsi->cdb[8] = 0x01; 1395 scsi->cdb[9] = 0x00; 1396 sgd->byte_count = SKD_N_INTERNAL_BYTES; 1397 scsi->hdr.sg_list_len_bytes = cpu_to_be32(SKD_N_INTERNAL_BYTES); 1398 break; 1399 case INQUIRY: 1400 scsi->cdb[0] = INQUIRY; 1401 scsi->cdb[1] = 0x01; /* evpd */ 1402 scsi->cdb[2] = 0x80; /* serial number page */ 1403 scsi->cdb[3] = 0x00; 1404 scsi->cdb[4] = 0x10; 1405 scsi->cdb[5] = 0x00; 1406 sgd->byte_count = 16; /* SKD_N_INQ_BYTES */; 1407 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 1408 break; 1409 case INQUIRY2: 1410 scsi->cdb[0] = INQUIRY; 1411 scsi->cdb[1] = 0x00; 1412 scsi->cdb[2] = 0x00; /* serial number page */ 1413 scsi->cdb[3] = 0x00; 1414 scsi->cdb[4] = 0x24; 1415 scsi->cdb[5] = 0x00; 1416 sgd->byte_count = 36; /* SKD_N_INQ_BYTES */; 1417 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 1418 break; 1419 case SYNCHRONIZE_CACHE: 1420 scsi->cdb[0] = SYNCHRONIZE_CACHE; 1421 scsi->cdb[1] = 0x00; 1422 scsi->cdb[2] = 0x00; 1423 scsi->cdb[3] = 0x00; 1424 scsi->cdb[4] = 0x00; 1425 scsi->cdb[5] = 0x00; 1426 scsi->cdb[6] = 0x00; 1427 scsi->cdb[7] = 0x00; 1428 scsi->cdb[8] = 0x00; 1429 scsi->cdb[9] = 0x00; 1430 sgd->byte_count = 0; 1431 scsi->hdr.sg_list_len_bytes = 0; 1432 break; 1433 default: 1434 ASSERT("Don't know what to send"); 1435 return; 1436 1437 } 1438 1439 skd_send_special_fitmsg(skdev, skspcl); 1440 } 1441 1442 /* 1443 * 1444 * Name: skd_refresh_device_data, sends a TUR command. 1445 * 1446 * Inputs: skdev - device state structure. 1447 * 1448 * Returns: Nothing. 1449 * 1450 */ 1451 static void 1452 skd_refresh_device_data(struct skd_device *skdev) 1453 { 1454 struct skd_special_context *skspcl = &skdev->internal_skspcl; 1455 1456 Dcmn_err(CE_NOTE, "refresh_device_data: state=%d", skdev->state); 1457 1458 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY); 1459 } 1460 1461 /* 1462 * 1463 * Name: skd_complete_internal, handles the completion of 1464 * driver-initiated I/O requests. 1465 * 1466 * Inputs: skdev - device state structure. 1467 * skcomp - completion structure. 1468 * skerr - error structure. 1469 * skspcl - request structure. 1470 * 1471 * Returns: Nothing. 1472 * 1473 */ 1474 /* ARGSUSED */ /* Upstream common source with other platforms. */ 1475 static void 1476 skd_complete_internal(struct skd_device *skdev, 1477 volatile struct fit_completion_entry_v1 *skcomp, 1478 volatile struct fit_comp_error_info *skerr, 1479 struct skd_special_context *skspcl) 1480 { 1481 uint8_t *buf = skspcl->data_buf; 1482 uint8_t status = 2; 1483 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */ 1484 struct skd_scsi_request *scsi = 1485 (struct skd_scsi_request *)&skspcl->msg_buf64[8]; 1486 1487 ASSERT(skspcl == &skdev->internal_skspcl); 1488 1489 (void) ddi_dma_sync(skspcl->db_dma_address.dma_handle, 0, 0, 1490 DDI_DMA_SYNC_FORKERNEL); 1491 (void) ddi_dma_sync(skspcl->mb_dma_address.dma_handle, 0, 0, 1492 DDI_DMA_SYNC_FORKERNEL); 1493 1494 Dcmn_err(CE_NOTE, "complete internal %x", scsi->cdb[0]); 1495 1496 skspcl->req.completion = *skcomp; 1497 skspcl->req.state = SKD_REQ_STATE_IDLE; 1498 skspcl->req.id += SKD_ID_INCR; 1499 1500 status = skspcl->req.completion.status; 1501 1502 Dcmn_err(CE_NOTE, "<<<<====== complete_internal: opc=%x", *scsi->cdb); 1503 1504 switch (scsi->cdb[0]) { 1505 case TEST_UNIT_READY: 1506 if (SAM_STAT_GOOD == status) { 1507 skd_send_internal_skspcl(skdev, skspcl, 1508 READ_CAPACITY_EXT); 1509 } else { 1510 if (skdev->state == SKD_DRVR_STATE_STOPPING) { 1511 cmn_err(CE_WARN, 1512 "!%s: TUR failed, don't send anymore" 1513 "state 0x%x", skdev->name, skdev->state); 1514 1515 return; 1516 } 1517 1518 Dcmn_err(CE_NOTE, "%s: TUR failed, retry skerr", 1519 skdev->name); 1520 skd_send_internal_skspcl(skdev, skspcl, 0x00); 1521 } 1522 break; 1523 case READ_CAPACITY_EXT: { 1524 uint64_t cap, Nblocks; 1525 uint64_t xbuf[1]; 1526 1527 skdev->read_cap_is_valid = 0; 1528 if (SAM_STAT_GOOD == status) { 1529 bcopy(buf, xbuf, 8); 1530 cap = be64_to_cpu(*xbuf); 1531 skdev->read_cap_last_lba = cap; 1532 skdev->read_cap_blocksize = 1533 (buf[8] << 24) | (buf[9] << 16) | 1534 (buf[10] << 8) | buf[11]; 1535 1536 cap *= skdev->read_cap_blocksize; 1537 Dcmn_err(CE_NOTE, " Last LBA: %" PRIu64 " (0x%" PRIx64 1538 "), blk sz: %d, Capacity: %" PRIu64 "GB\n", 1539 skdev->read_cap_last_lba, 1540 skdev->read_cap_last_lba, 1541 skdev->read_cap_blocksize, 1542 cap >> 30ULL); 1543 1544 Nblocks = skdev->read_cap_last_lba + 1; 1545 1546 skdev->Nblocks = Nblocks; 1547 skdev->read_cap_is_valid = 1; 1548 1549 skd_send_internal_skspcl(skdev, skspcl, INQUIRY2); 1550 1551 } else { 1552 Dcmn_err(CE_NOTE, "**** READCAP failed, retry TUR"); 1553 skd_send_internal_skspcl(skdev, skspcl, 1554 TEST_UNIT_READY); 1555 } 1556 break; 1557 } 1558 case INQUIRY: 1559 skdev->inquiry_is_valid = 0; 1560 if (SAM_STAT_GOOD == status) { 1561 skdev->inquiry_is_valid = 1; 1562 1563 if (scsi->cdb[1] == 0x1) { 1564 bcopy(&buf[4], skdev->inq_serial_num, 12); 1565 skdev->inq_serial_num[12] = '\0'; 1566 } else { 1567 char *tmp = skdev->inq_vendor_id; 1568 1569 bcopy(&buf[8], tmp, 8); 1570 tmp[8] = '\0'; 1571 1572 tmp = skdev->inq_product_id; 1573 bcopy(&buf[16], tmp, 16); 1574 tmp[16] = '\0'; 1575 1576 tmp = skdev->inq_product_rev; 1577 bcopy(&buf[32], tmp, 4); 1578 tmp[4] = '\0'; 1579 } 1580 } 1581 1582 if (skdev->state != SKD_DRVR_STATE_ONLINE) 1583 if (skd_unquiesce_dev(skdev) < 0) 1584 cmn_err(CE_NOTE, "** failed, to ONLINE device"); 1585 break; 1586 case SYNCHRONIZE_CACHE: 1587 skdev->sync_done = (SAM_STAT_GOOD == status) ? 1 : -1; 1588 1589 cv_signal(&skdev->cv_waitq); 1590 break; 1591 1592 default: 1593 ASSERT("we didn't send this"); 1594 } 1595 } 1596 1597 /* 1598 * FIT MESSAGES 1599 */ 1600 1601 /* 1602 * 1603 * Name: skd_send_fitmsg, send a FIT message to the hardware. 1604 * 1605 * Inputs: skdev - device state structure. 1606 * skmsg - FIT message structure. 1607 * 1608 * Returns: Nothing. 1609 * 1610 */ 1611 /* ARGSUSED */ /* Upstream common source with other platforms. */ 1612 static void 1613 skd_send_fitmsg(struct skd_device *skdev, 1614 struct skd_fitmsg_context *skmsg) 1615 { 1616 uint64_t qcmd; 1617 struct fit_msg_hdr *fmh; 1618 1619 Dcmn_err(CE_NOTE, "msgbuf's DMA addr: 0x%" PRIx64 ", qdepth_busy=%d", 1620 skmsg->mb_dma_address.cookies->dmac_laddress, 1621 skdev->queue_depth_busy); 1622 1623 Dcmn_err(CE_NOTE, "msg_buf 0x%p, offset %x", (void *)skmsg->msg_buf, 1624 skmsg->offset); 1625 1626 qcmd = skmsg->mb_dma_address.cookies->dmac_laddress; 1627 qcmd |= FIT_QCMD_QID_NORMAL; 1628 1629 fmh = (struct fit_msg_hdr *)skmsg->msg_buf64; 1630 skmsg->outstanding = fmh->num_protocol_cmds_coalesced; 1631 1632 if (skdev->dbg_level > 1) { 1633 uint8_t *bp = skmsg->msg_buf; 1634 int i; 1635 1636 for (i = 0; i < skmsg->length; i += 8) { 1637 Dcmn_err(CE_NOTE, " msg[%2d] %02x %02x %02x %02x " 1638 "%02x %02x %02x %02x", 1639 i, bp[i + 0], bp[i + 1], bp[i + 2], 1640 bp[i + 3], bp[i + 4], bp[i + 5], 1641 bp[i + 6], bp[i + 7]); 1642 if (i == 0) i = 64 - 8; 1643 } 1644 } 1645 1646 (void) ddi_dma_sync(skmsg->mb_dma_address.dma_handle, 0, 0, 1647 DDI_DMA_SYNC_FORDEV); 1648 1649 ASSERT(skmsg->length > sizeof (struct fit_msg_hdr)); 1650 if (skmsg->length > 256) { 1651 qcmd |= FIT_QCMD_MSGSIZE_512; 1652 } else if (skmsg->length > 128) { 1653 qcmd |= FIT_QCMD_MSGSIZE_256; 1654 } else if (skmsg->length > 64) { 1655 qcmd |= FIT_QCMD_MSGSIZE_128; 1656 } 1657 1658 skdev->ios_started++; 1659 1660 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); 1661 } 1662 1663 /* 1664 * 1665 * Name: skd_send_special_fitmsg, send a special FIT message 1666 * to the hardware used driver-originated I/O requests. 1667 * 1668 * Inputs: skdev - device state structure. 1669 * skspcl - skspcl structure. 1670 * 1671 * Returns: Nothing. 1672 * 1673 */ 1674 static void 1675 skd_send_special_fitmsg(struct skd_device *skdev, 1676 struct skd_special_context *skspcl) 1677 { 1678 uint64_t qcmd; 1679 1680 Dcmn_err(CE_NOTE, "send_special_fitmsg: pt 1"); 1681 1682 if (skdev->dbg_level > 1) { 1683 uint8_t *bp = skspcl->msg_buf; 1684 int i; 1685 1686 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) { 1687 cmn_err(CE_NOTE, 1688 " spcl[%2d] %02x %02x %02x %02x " 1689 "%02x %02x %02x %02x\n", i, 1690 bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3], 1691 bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]); 1692 if (i == 0) i = 64 - 8; 1693 } 1694 1695 for (i = 0; i < skspcl->req.n_sg; i++) { 1696 struct fit_sg_descriptor *sgd = 1697 &skspcl->req.sksg_list[i]; 1698 1699 cmn_err(CE_NOTE, " sg[%d] count=%u ctrl=0x%x " 1700 "addr=0x%" PRIx64 " next=0x%" PRIx64, 1701 i, sgd->byte_count, sgd->control, 1702 sgd->host_side_addr, sgd->next_desc_ptr); 1703 } 1704 } 1705 1706 (void) ddi_dma_sync(skspcl->mb_dma_address.dma_handle, 0, 0, 1707 DDI_DMA_SYNC_FORDEV); 1708 (void) ddi_dma_sync(skspcl->db_dma_address.dma_handle, 0, 0, 1709 DDI_DMA_SYNC_FORDEV); 1710 1711 /* 1712 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr 1713 * and one 64-byte SSDI command. 1714 */ 1715 qcmd = skspcl->mb_dma_address.cookies->dmac_laddress; 1716 1717 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128; 1718 1719 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); 1720 } 1721 1722 /* 1723 * COMPLETION QUEUE 1724 */ 1725 1726 static void skd_complete_other(struct skd_device *skdev, 1727 volatile struct fit_completion_entry_v1 *skcomp, 1728 volatile struct fit_comp_error_info *skerr); 1729 1730 struct sns_info { 1731 uint8_t type; 1732 uint8_t stat; 1733 uint8_t key; 1734 uint8_t asc; 1735 uint8_t ascq; 1736 uint8_t mask; 1737 enum skd_check_status_action action; 1738 }; 1739 1740 static struct sns_info skd_chkstat_table[] = { 1741 /* Good */ 1742 {0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c, SKD_CHECK_STATUS_REPORT_GOOD}, 1743 1744 /* Smart alerts */ 1745 {0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */ 1746 SKD_CHECK_STATUS_REPORT_SMART_ALERT}, 1747 {0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */ 1748 SKD_CHECK_STATUS_REPORT_SMART_ALERT}, 1749 {0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temp over trigger */ 1750 SKD_CHECK_STATUS_REPORT_SMART_ALERT}, 1751 1752 /* Retry (with limits) */ 1753 {0x70, 0x02, ABORTED_COMMAND, 0, 0, 0x1C, /* DMA errors */ 1754 SKD_CHECK_STATUS_REQUEUE_REQUEST}, 1755 {0x70, 0x02, UNIT_ATTENTION, 0x0B, 0x00, 0x1E, /* warnings */ 1756 SKD_CHECK_STATUS_REQUEUE_REQUEST}, 1757 {0x70, 0x02, UNIT_ATTENTION, 0x5D, 0x00, 0x1E, /* thresholds */ 1758 SKD_CHECK_STATUS_REQUEUE_REQUEST}, 1759 {0x70, 0x02, UNIT_ATTENTION, 0x80, 0x30, 0x1F, /* backup power */ 1760 SKD_CHECK_STATUS_REQUEUE_REQUEST}, 1761 1762 /* Busy (or about to be) */ 1763 {0x70, 0x02, UNIT_ATTENTION, 0x3f, 0x01, 0x1F, /* fw changed */ 1764 SKD_CHECK_STATUS_BUSY_IMMINENT}, 1765 }; 1766 1767 /* 1768 * 1769 * Name: skd_check_status, checks the return status from a 1770 * completed I/O request. 1771 * 1772 * Inputs: skdev - device state structure. 1773 * cmp_status - SCSI status byte. 1774 * skerr - the error data structure. 1775 * 1776 * Returns: Depending on the error condition, return the action 1777 * to be taken as specified in the skd_chkstat_table. 1778 * If no corresponding value is found in the table 1779 * return SKD_CHECK_STATUS_REPORT_GOOD is no error otherwise 1780 * return SKD_CHECK_STATUS_REPORT_ERROR. 1781 * 1782 */ 1783 static enum skd_check_status_action 1784 skd_check_status(struct skd_device *skdev, uint8_t cmp_status, 1785 volatile struct fit_comp_error_info *skerr) 1786 { 1787 /* 1788 * Look up status and sense data to decide how to handle the error 1789 * from the device. 1790 * mask says which fields must match e.g., mask=0x18 means check 1791 * type and stat, ignore key, asc, ascq. 1792 */ 1793 int i, n; 1794 1795 Dcmn_err(CE_NOTE, "(%s): key/asc/ascq %02x/%02x/%02x", 1796 skd_name(skdev), skerr->key, skerr->code, skerr->qual); 1797 1798 Dcmn_err(CE_NOTE, "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x", 1799 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual); 1800 1801 /* Does the info match an entry in the good category? */ 1802 n = sizeof (skd_chkstat_table) / sizeof (skd_chkstat_table[0]); 1803 for (i = 0; i < n; i++) { 1804 struct sns_info *sns = &skd_chkstat_table[i]; 1805 1806 if (sns->mask & 0x10) 1807 if (skerr->type != sns->type) continue; 1808 1809 if (sns->mask & 0x08) 1810 if (cmp_status != sns->stat) continue; 1811 1812 if (sns->mask & 0x04) 1813 if (skerr->key != sns->key) continue; 1814 1815 if (sns->mask & 0x02) 1816 if (skerr->code != sns->asc) continue; 1817 1818 if (sns->mask & 0x01) 1819 if (skerr->qual != sns->ascq) continue; 1820 1821 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) { 1822 cmn_err(CE_WARN, "!(%s):SMART Alert: sense key/asc/ascq" 1823 " %02x/%02x/%02x", 1824 skd_name(skdev), skerr->key, 1825 skerr->code, skerr->qual); 1826 } 1827 1828 Dcmn_err(CE_NOTE, "skd_check_status: returning %x", 1829 sns->action); 1830 1831 return (sns->action); 1832 } 1833 1834 /* 1835 * No other match, so nonzero status means error, 1836 * zero status means good 1837 */ 1838 if (cmp_status) { 1839 cmn_err(CE_WARN, 1840 "!%s: status check: qdepth=%d skmfl=%p (%d) skrfl=%p (%d)", 1841 skdev->name, 1842 skdev->queue_depth_busy, 1843 (void *)skdev->skmsg_free_list, skd_list_skmsg(skdev, 0), 1844 (void *)skdev->skreq_free_list, skd_list_skreq(skdev, 0)); 1845 1846 cmn_err(CE_WARN, "!%s: t=%02x stat=%02x k=%02x c=%02x q=%02x", 1847 skdev->name, skerr->type, cmp_status, skerr->key, 1848 skerr->code, skerr->qual); 1849 1850 return (SKD_CHECK_STATUS_REPORT_ERROR); 1851 } 1852 1853 Dcmn_err(CE_NOTE, "status check good default"); 1854 1855 return (SKD_CHECK_STATUS_REPORT_GOOD); 1856 } 1857 1858 /* 1859 * 1860 * Name: skd_isr_completion_posted, handles I/O completions. 1861 * 1862 * Inputs: skdev - device state structure. 1863 * 1864 * Returns: Nothing. 1865 * 1866 */ 1867 static void 1868 skd_isr_completion_posted(struct skd_device *skdev) 1869 { 1870 volatile struct fit_completion_entry_v1 *skcmp = NULL; 1871 volatile struct fit_comp_error_info *skerr; 1872 struct skd_fitmsg_context *skmsg; 1873 struct skd_request_context *skreq; 1874 skd_buf_private_t *pbuf; 1875 uint16_t req_id; 1876 uint32_t req_slot; 1877 uint32_t timo_slot; 1878 uint32_t msg_slot; 1879 uint16_t cmp_cntxt = 0; 1880 uint8_t cmp_status = 0; 1881 uint8_t cmp_cycle = 0; 1882 uint32_t cmp_bytes = 0; 1883 1884 (void) ddi_dma_sync(skdev->cq_dma_address.dma_handle, 0, 0, 1885 DDI_DMA_SYNC_FORKERNEL); 1886 1887 for (;;) { 1888 ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); 1889 1890 WAITQ_LOCK(skdev); 1891 1892 skcmp = &skdev->skcomp_table[skdev->skcomp_ix]; 1893 cmp_cycle = skcmp->cycle; 1894 cmp_cntxt = skcmp->tag; 1895 cmp_status = skcmp->status; 1896 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes); 1897 1898 skerr = &skdev->skerr_table[skdev->skcomp_ix]; 1899 1900 Dcmn_err(CE_NOTE, 1901 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d " 1902 "qdepth_busy=%d rbytes=0x%x proto=%d", 1903 skdev->skcomp_cycle, skdev->skcomp_ix, 1904 cmp_cycle, cmp_cntxt, cmp_status, 1905 skdev->queue_depth_busy, cmp_bytes, skdev->proto_ver); 1906 1907 if (cmp_cycle != skdev->skcomp_cycle) { 1908 Dcmn_err(CE_NOTE, "%s:end of completions", skdev->name); 1909 1910 WAITQ_UNLOCK(skdev); 1911 break; 1912 } 1913 1914 1915 skdev->n_req++; 1916 1917 /* 1918 * Update the completion queue head index and possibly 1919 * the completion cycle count. 1920 */ 1921 skdev->skcomp_ix++; 1922 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) { 1923 skdev->skcomp_ix = 0; 1924 skdev->skcomp_cycle++; /* 8-bit wrap-around */ 1925 } 1926 1927 1928 /* 1929 * The command context is a unique 32-bit ID. The low order 1930 * bits help locate the request. The request is usually a 1931 * r/w request (see skd_start() above) or a special request. 1932 */ 1933 req_id = cmp_cntxt; 1934 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK; 1935 1936 Dcmn_err(CE_NOTE, 1937 "<<<< completion_posted 1: req_id=%x req_slot=%x", 1938 req_id, req_slot); 1939 1940 /* Is this other than a r/w request? */ 1941 if (req_slot >= skdev->num_req_context) { 1942 /* 1943 * This is not a completion for a r/w request. 1944 */ 1945 skd_complete_other(skdev, skcmp, skerr); 1946 WAITQ_UNLOCK(skdev); 1947 continue; 1948 } 1949 1950 skreq = &skdev->skreq_table[req_slot]; 1951 1952 /* 1953 * Make sure the request ID for the slot matches. 1954 */ 1955 ASSERT(skreq->id == req_id); 1956 1957 if (SKD_REQ_STATE_ABORTED == skreq->state) { 1958 Dcmn_err(CE_NOTE, "reclaim req %p id=%04x\n", 1959 (void *)skreq, skreq->id); 1960 /* 1961 * a previously timed out command can 1962 * now be cleaned up 1963 */ 1964 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK; 1965 ASSERT(msg_slot < skdev->num_fitmsg_context); 1966 skmsg = &skdev->skmsg_table[msg_slot]; 1967 if (skmsg->id == skreq->fitmsg_id) { 1968 ASSERT(skmsg->outstanding > 0); 1969 skmsg->outstanding--; 1970 if (skmsg->outstanding == 0) { 1971 ASSERT(SKD_MSG_STATE_BUSY == 1972 skmsg->state); 1973 skmsg->state = SKD_MSG_STATE_IDLE; 1974 skmsg->id += SKD_ID_INCR; 1975 skmsg->next = skdev->skmsg_free_list; 1976 skdev->skmsg_free_list = skmsg; 1977 } 1978 } 1979 /* 1980 * Reclaim the skd_request_context 1981 */ 1982 skreq->state = SKD_REQ_STATE_IDLE; 1983 skreq->id += SKD_ID_INCR; 1984 skreq->next = skdev->skreq_free_list; 1985 skdev->skreq_free_list = skreq; 1986 WAITQ_UNLOCK(skdev); 1987 continue; 1988 } 1989 1990 skreq->completion.status = cmp_status; 1991 1992 pbuf = skreq->pbuf; 1993 ASSERT(pbuf != NULL); 1994 1995 Dcmn_err(CE_NOTE, "<<<< completion_posted 2: pbuf=%p " 1996 "req_id=%x req_slot=%x", (void *)pbuf, req_id, req_slot); 1997 if (cmp_status && skdev->disks_initialized) { 1998 cmn_err(CE_WARN, "!%s: " 1999 "I/O err: pbuf=%p blkno=%lld (%llx) nbklks=%ld ", 2000 skdev->name, (void *)pbuf, pbuf->x_xfer->x_blkno, 2001 pbuf->x_xfer->x_blkno, pbuf->x_xfer->x_nblks); 2002 } 2003 2004 ASSERT(skdev->active_cmds); 2005 atomic_dec_64(&skdev->active_cmds); 2006 2007 if (SAM_STAT_GOOD == cmp_status) { 2008 /* Release DMA resources for the request. */ 2009 if (pbuf->x_xfer->x_nblks != 0) 2010 skd_blkdev_postop_sg_list(skdev, skreq); 2011 WAITQ_UNLOCK(skdev); 2012 skd_end_request(skdev, skreq, 0); 2013 WAITQ_LOCK(skdev); 2014 } else { 2015 switch (skd_check_status(skdev, cmp_status, skerr)) { 2016 case SKD_CHECK_STATUS_REPORT_GOOD: 2017 case SKD_CHECK_STATUS_REPORT_SMART_ALERT: 2018 WAITQ_UNLOCK(skdev); 2019 skd_end_request(skdev, skreq, 0); 2020 WAITQ_LOCK(skdev); 2021 break; 2022 2023 case SKD_CHECK_STATUS_BUSY_IMMINENT: 2024 skd_log_skreq(skdev, skreq, "retry(busy)"); 2025 skd_queue(skdev, pbuf); 2026 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; 2027 skdev->timer_countdown = SKD_TIMER_MINUTES(20); 2028 2029 (void) skd_quiesce_dev(skdev); 2030 break; 2031 2032 /* FALLTHRU */ 2033 case SKD_CHECK_STATUS_REPORT_ERROR: 2034 /* fall thru to report error */ 2035 default: 2036 /* 2037 * Save the entire completion 2038 * and error entries for 2039 * later error interpretation. 2040 */ 2041 skreq->completion = *skcmp; 2042 skreq->err_info = *skerr; 2043 WAITQ_UNLOCK(skdev); 2044 skd_end_request(skdev, skreq, -EIO); 2045 WAITQ_LOCK(skdev); 2046 break; 2047 } 2048 } 2049 2050 /* 2051 * Reclaim the FIT msg buffer if this is 2052 * the first of the requests it carried to 2053 * be completed. The FIT msg buffer used to 2054 * send this request cannot be reused until 2055 * we are sure the s1120 card has copied 2056 * it to its memory. The FIT msg might have 2057 * contained several requests. As soon as 2058 * any of them are completed we know that 2059 * the entire FIT msg was transferred. 2060 * Only the first completed request will 2061 * match the FIT msg buffer id. The FIT 2062 * msg buffer id is immediately updated. 2063 * When subsequent requests complete the FIT 2064 * msg buffer id won't match, so we know 2065 * quite cheaply that it is already done. 2066 */ 2067 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK; 2068 2069 ASSERT(msg_slot < skdev->num_fitmsg_context); 2070 skmsg = &skdev->skmsg_table[msg_slot]; 2071 if (skmsg->id == skreq->fitmsg_id) { 2072 ASSERT(SKD_MSG_STATE_BUSY == skmsg->state); 2073 skmsg->state = SKD_MSG_STATE_IDLE; 2074 skmsg->id += SKD_ID_INCR; 2075 skmsg->next = skdev->skmsg_free_list; 2076 skdev->skmsg_free_list = skmsg; 2077 } 2078 2079 /* 2080 * Decrease the number of active requests. 2081 * This also decrements the count in the 2082 * timeout slot. 2083 */ 2084 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 2085 ASSERT(skdev->timeout_slot[timo_slot] > 0); 2086 ASSERT(skdev->queue_depth_busy > 0); 2087 2088 atomic_dec_32(&skdev->timeout_slot[timo_slot]); 2089 atomic_dec_32(&skdev->queue_depth_busy); 2090 2091 /* 2092 * Reclaim the skd_request_context 2093 */ 2094 skreq->state = SKD_REQ_STATE_IDLE; 2095 skreq->id += SKD_ID_INCR; 2096 skreq->next = skdev->skreq_free_list; 2097 skdev->skreq_free_list = skreq; 2098 2099 WAITQ_UNLOCK(skdev); 2100 2101 /* 2102 * make sure the lock is held by caller. 2103 */ 2104 if ((skdev->state == SKD_DRVR_STATE_PAUSING) && 2105 (0 == skdev->queue_depth_busy)) { 2106 skdev->state = SKD_DRVR_STATE_PAUSED; 2107 cv_signal(&skdev->cv_waitq); 2108 } 2109 } /* for(;;) */ 2110 } 2111 2112 /* 2113 * 2114 * Name: skd_complete_other, handle the completion of a 2115 * non-r/w request. 2116 * 2117 * Inputs: skdev - device state structure. 2118 * skcomp - FIT completion structure. 2119 * skerr - error structure. 2120 * 2121 * Returns: Nothing. 2122 * 2123 */ 2124 static void 2125 skd_complete_other(struct skd_device *skdev, 2126 volatile struct fit_completion_entry_v1 *skcomp, 2127 volatile struct fit_comp_error_info *skerr) 2128 { 2129 uint32_t req_id = 0; 2130 uint32_t req_table; 2131 uint32_t req_slot; 2132 struct skd_special_context *skspcl; 2133 2134 req_id = skcomp->tag; 2135 req_table = req_id & SKD_ID_TABLE_MASK; 2136 req_slot = req_id & SKD_ID_SLOT_MASK; 2137 2138 Dcmn_err(CE_NOTE, "complete_other: table=0x%x id=0x%x slot=%d", 2139 req_table, req_id, req_slot); 2140 2141 /* 2142 * Based on the request id, determine how to dispatch this completion. 2143 * This swich/case is finding the good cases and forwarding the 2144 * completion entry. Errors are reported below the switch. 2145 */ 2146 ASSERT(req_table == SKD_ID_INTERNAL); 2147 ASSERT(req_slot == 0); 2148 2149 skspcl = &skdev->internal_skspcl; 2150 ASSERT(skspcl->req.id == req_id); 2151 ASSERT(skspcl->req.state == SKD_REQ_STATE_BUSY); 2152 2153 Dcmn_err(CE_NOTE, "<<<<== complete_other: ID_INTERNAL"); 2154 skd_complete_internal(skdev, skcomp, skerr, skspcl); 2155 } 2156 2157 /* 2158 * 2159 * Name: skd_reset_skcomp, does what it says, resetting completion 2160 * tables. 2161 * 2162 * Inputs: skdev - device state structure. 2163 * 2164 * Returns: Nothing. 2165 * 2166 */ 2167 static void 2168 skd_reset_skcomp(struct skd_device *skdev) 2169 { 2170 uint32_t nbytes; 2171 2172 nbytes = sizeof (struct fit_completion_entry_v1) * 2173 SKD_N_COMPLETION_ENTRY; 2174 nbytes += sizeof (struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; 2175 2176 if (skdev->skcomp_table) 2177 bzero(skdev->skcomp_table, nbytes); 2178 2179 skdev->skcomp_ix = 0; 2180 skdev->skcomp_cycle = 1; 2181 } 2182 2183 2184 2185 /* 2186 * INTERRUPTS 2187 */ 2188 2189 /* 2190 * 2191 * Name: skd_isr_aif, handles the device interrupts. 2192 * 2193 * Inputs: arg - skdev device state structure. 2194 * intvec - not referenced 2195 * 2196 * Returns: DDI_INTR_CLAIMED if interrupt is handled otherwise 2197 * return DDI_INTR_UNCLAIMED. 2198 * 2199 */ 2200 /* ARGSUSED */ /* Upstream common source with other platforms. */ 2201 static uint_t 2202 skd_isr_aif(caddr_t arg, caddr_t intvec) 2203 { 2204 uint32_t intstat; 2205 uint32_t ack; 2206 int rc = DDI_INTR_UNCLAIMED; 2207 struct skd_device *skdev; 2208 2209 skdev = (skd_device_t *)(uintptr_t)arg; 2210 2211 ASSERT(skdev != NULL); 2212 2213 skdev->intr_cntr++; 2214 2215 Dcmn_err(CE_NOTE, "skd_isr_aif: intr=%" PRId64 "\n", skdev->intr_cntr); 2216 2217 for (;;) { 2218 2219 ASSERT(!WAITQ_LOCK_HELD(skdev)); 2220 INTR_LOCK(skdev); 2221 2222 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST); 2223 2224 ack = FIT_INT_DEF_MASK; 2225 ack &= intstat; 2226 2227 Dcmn_err(CE_NOTE, "intstat=0x%x ack=0x%x", intstat, ack); 2228 2229 /* 2230 * As long as there is an int pending on device, keep 2231 * running loop. When none, get out, but if we've never 2232 * done any processing, call completion handler? 2233 */ 2234 if (ack == 0) { 2235 /* 2236 * No interrupts on device, but run the completion 2237 * processor anyway? 2238 */ 2239 if (rc == DDI_INTR_UNCLAIMED && 2240 skdev->state == SKD_DRVR_STATE_ONLINE) { 2241 Dcmn_err(CE_NOTE, 2242 "1: Want isr_comp_posted call"); 2243 skd_isr_completion_posted(skdev); 2244 } 2245 INTR_UNLOCK(skdev); 2246 2247 break; 2248 } 2249 rc = DDI_INTR_CLAIMED; 2250 2251 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST); 2252 2253 if ((skdev->state != SKD_DRVR_STATE_LOAD) && 2254 (skdev->state != SKD_DRVR_STATE_STOPPING)) { 2255 if (intstat & FIT_ISH_COMPLETION_POSTED) { 2256 Dcmn_err(CE_NOTE, 2257 "2: Want isr_comp_posted call"); 2258 skd_isr_completion_posted(skdev); 2259 } 2260 2261 if (intstat & FIT_ISH_FW_STATE_CHANGE) { 2262 Dcmn_err(CE_NOTE, "isr: fwstate change"); 2263 2264 skd_isr_fwstate(skdev); 2265 if (skdev->state == SKD_DRVR_STATE_FAULT || 2266 skdev->state == 2267 SKD_DRVR_STATE_DISAPPEARED) { 2268 INTR_UNLOCK(skdev); 2269 2270 return (rc); 2271 } 2272 } 2273 2274 if (intstat & FIT_ISH_MSG_FROM_DEV) { 2275 Dcmn_err(CE_NOTE, "isr: msg_from_dev change"); 2276 skd_isr_msg_from_dev(skdev); 2277 } 2278 } 2279 2280 INTR_UNLOCK(skdev); 2281 } 2282 2283 if (!SIMPLEQ_EMPTY(&skdev->waitqueue)) 2284 skd_start(skdev); 2285 2286 return (rc); 2287 } 2288 2289 /* 2290 * 2291 * Name: skd_drive_fault, set the drive state to DRV_STATE_FAULT. 2292 * 2293 * Inputs: skdev - device state structure. 2294 * 2295 * Returns: Nothing. 2296 * 2297 */ 2298 static void 2299 skd_drive_fault(struct skd_device *skdev) 2300 { 2301 skdev->state = SKD_DRVR_STATE_FAULT; 2302 cmn_err(CE_WARN, "!(%s): Drive FAULT\n", 2303 skd_name(skdev)); 2304 } 2305 2306 /* 2307 * 2308 * Name: skd_drive_disappeared, set the drive state to DISAPPEARED.. 2309 * 2310 * Inputs: skdev - device state structure. 2311 * 2312 * Returns: Nothing. 2313 * 2314 */ 2315 static void 2316 skd_drive_disappeared(struct skd_device *skdev) 2317 { 2318 skdev->state = SKD_DRVR_STATE_DISAPPEARED; 2319 cmn_err(CE_WARN, "!(%s): Drive DISAPPEARED\n", 2320 skd_name(skdev)); 2321 } 2322 2323 /* 2324 * 2325 * Name: skd_isr_fwstate, handles the various device states. 2326 * 2327 * Inputs: skdev - device state structure. 2328 * 2329 * Returns: Nothing. 2330 * 2331 */ 2332 static void 2333 skd_isr_fwstate(struct skd_device *skdev) 2334 { 2335 uint32_t sense; 2336 uint32_t state; 2337 int prev_driver_state; 2338 uint32_t mtd; 2339 2340 prev_driver_state = skdev->state; 2341 2342 sense = SKD_READL(skdev, FIT_STATUS); 2343 state = sense & FIT_SR_DRIVE_STATE_MASK; 2344 2345 Dcmn_err(CE_NOTE, "s1120 state %s(%d)=>%s(%d)", 2346 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 2347 skd_drive_state_to_str(state), state); 2348 2349 skdev->drive_state = state; 2350 2351 switch (skdev->drive_state) { 2352 case FIT_SR_DRIVE_INIT: 2353 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) { 2354 skd_disable_interrupts(skdev); 2355 break; 2356 } 2357 if (skdev->state == SKD_DRVR_STATE_RESTARTING) { 2358 skd_recover_requests(skdev); 2359 } 2360 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) { 2361 skdev->timer_countdown = 2362 SKD_TIMER_SECONDS(SKD_STARTING_TO); 2363 skdev->state = SKD_DRVR_STATE_STARTING; 2364 skd_soft_reset(skdev); 2365 break; 2366 } 2367 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0); 2368 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2369 skdev->last_mtd = mtd; 2370 break; 2371 2372 case FIT_SR_DRIVE_ONLINE: 2373 skdev->queue_depth_limit = skdev->soft_queue_depth_limit; 2374 if (skdev->queue_depth_limit > skdev->hard_queue_depth_limit) { 2375 skdev->queue_depth_limit = 2376 skdev->hard_queue_depth_limit; 2377 } 2378 2379 skdev->queue_depth_lowat = skdev->queue_depth_limit * 2 / 3 + 1; 2380 if (skdev->queue_depth_lowat < 1) 2381 skdev->queue_depth_lowat = 1; 2382 Dcmn_err(CE_NOTE, 2383 "%s queue depth limit=%d hard=%d soft=%d lowat=%d", 2384 DRV_NAME, 2385 skdev->queue_depth_limit, 2386 skdev->hard_queue_depth_limit, 2387 skdev->soft_queue_depth_limit, 2388 skdev->queue_depth_lowat); 2389 2390 skd_refresh_device_data(skdev); 2391 break; 2392 case FIT_SR_DRIVE_BUSY: 2393 skdev->state = SKD_DRVR_STATE_BUSY; 2394 skdev->timer_countdown = SKD_TIMER_MINUTES(20); 2395 (void) skd_quiesce_dev(skdev); 2396 break; 2397 case FIT_SR_DRIVE_BUSY_SANITIZE: 2398 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; 2399 skd_start(skdev); 2400 break; 2401 case FIT_SR_DRIVE_BUSY_ERASE: 2402 skdev->state = SKD_DRVR_STATE_BUSY_ERASE; 2403 skdev->timer_countdown = SKD_TIMER_MINUTES(20); 2404 break; 2405 case FIT_SR_DRIVE_OFFLINE: 2406 skdev->state = SKD_DRVR_STATE_IDLE; 2407 break; 2408 case FIT_SR_DRIVE_SOFT_RESET: 2409 skdev->state = SKD_DRVR_STATE_RESTARTING; 2410 2411 switch (skdev->state) { 2412 case SKD_DRVR_STATE_STARTING: 2413 case SKD_DRVR_STATE_RESTARTING: 2414 break; 2415 default: 2416 skdev->state = SKD_DRVR_STATE_RESTARTING; 2417 break; 2418 } 2419 break; 2420 case FIT_SR_DRIVE_FW_BOOTING: 2421 Dcmn_err(CE_NOTE, 2422 "ISR FIT_SR_DRIVE_FW_BOOTING %s", skdev->name); 2423 skdev->state = SKD_DRVR_STATE_WAIT_BOOT; 2424 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_WAIT_BOOT_TO); 2425 break; 2426 2427 case FIT_SR_DRIVE_DEGRADED: 2428 case FIT_SR_PCIE_LINK_DOWN: 2429 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: 2430 break; 2431 2432 case FIT_SR_DRIVE_FAULT: 2433 skd_drive_fault(skdev); 2434 skd_recover_requests(skdev); 2435 skd_start(skdev); 2436 break; 2437 2438 case 0xFF: 2439 skd_drive_disappeared(skdev); 2440 skd_recover_requests(skdev); 2441 skd_start(skdev); 2442 break; 2443 default: 2444 /* 2445 * Uknown FW State. Wait for a state we recognize. 2446 */ 2447 break; 2448 } 2449 2450 Dcmn_err(CE_NOTE, "Driver state %s(%d)=>%s(%d)", 2451 skd_skdev_state_to_str(prev_driver_state), prev_driver_state, 2452 skd_skdev_state_to_str(skdev->state), skdev->state); 2453 } 2454 2455 /* 2456 * 2457 * Name: skd_recover_requests, attempts to recover requests. 2458 * 2459 * Inputs: skdev - device state structure. 2460 * 2461 * Returns: Nothing. 2462 * 2463 */ 2464 static void 2465 skd_recover_requests(struct skd_device *skdev) 2466 { 2467 int i; 2468 2469 ASSERT(INTR_LOCK_HELD(skdev)); 2470 2471 for (i = 0; i < skdev->num_req_context; i++) { 2472 struct skd_request_context *skreq = &skdev->skreq_table[i]; 2473 2474 if (skreq->state == SKD_REQ_STATE_BUSY) { 2475 skd_log_skreq(skdev, skreq, "requeue"); 2476 2477 ASSERT(0 != (skreq->id & SKD_ID_INCR)); 2478 ASSERT(skreq->pbuf != NULL); 2479 /* Release DMA resources for the request. */ 2480 skd_blkdev_postop_sg_list(skdev, skreq); 2481 2482 skd_end_request(skdev, skreq, EAGAIN); 2483 skreq->pbuf = NULL; 2484 skreq->state = SKD_REQ_STATE_IDLE; 2485 skreq->id += SKD_ID_INCR; 2486 } 2487 if (i > 0) { 2488 skreq[-1].next = skreq; 2489 } 2490 skreq->next = NULL; 2491 } 2492 2493 WAITQ_LOCK(skdev); 2494 skdev->skreq_free_list = skdev->skreq_table; 2495 WAITQ_UNLOCK(skdev); 2496 2497 for (i = 0; i < skdev->num_fitmsg_context; i++) { 2498 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i]; 2499 2500 if (skmsg->state == SKD_MSG_STATE_BUSY) { 2501 skd_log_skmsg(skdev, skmsg, "salvaged"); 2502 ASSERT((skmsg->id & SKD_ID_INCR) != 0); 2503 skmsg->state = SKD_MSG_STATE_IDLE; 2504 skmsg->id &= ~SKD_ID_INCR; 2505 } 2506 if (i > 0) { 2507 skmsg[-1].next = skmsg; 2508 } 2509 skmsg->next = NULL; 2510 } 2511 WAITQ_LOCK(skdev); 2512 skdev->skmsg_free_list = skdev->skmsg_table; 2513 WAITQ_UNLOCK(skdev); 2514 2515 for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++) { 2516 skdev->timeout_slot[i] = 0; 2517 } 2518 skdev->queue_depth_busy = 0; 2519 } 2520 2521 /* 2522 * 2523 * Name: skd_isr_msg_from_dev, handles a message from the device. 2524 * 2525 * Inputs: skdev - device state structure. 2526 * 2527 * Returns: Nothing. 2528 * 2529 */ 2530 static void 2531 skd_isr_msg_from_dev(struct skd_device *skdev) 2532 { 2533 uint32_t mfd; 2534 uint32_t mtd; 2535 2536 Dcmn_err(CE_NOTE, "skd_isr_msg_from_dev:"); 2537 2538 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); 2539 2540 Dcmn_err(CE_NOTE, "mfd=0x%x last_mtd=0x%x\n", mfd, skdev->last_mtd); 2541 2542 /* 2543 * ignore any mtd that is an ack for something we didn't send 2544 */ 2545 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) { 2546 return; 2547 } 2548 2549 switch (FIT_MXD_TYPE(mfd)) { 2550 case FIT_MTD_FITFW_INIT: 2551 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd); 2552 2553 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) { 2554 cmn_err(CE_WARN, "!(%s): protocol mismatch\n", 2555 skdev->name); 2556 cmn_err(CE_WARN, "!(%s): got=%d support=%d\n", 2557 skdev->name, skdev->proto_ver, 2558 FIT_PROTOCOL_VERSION_1); 2559 cmn_err(CE_WARN, "!(%s): please upgrade driver\n", 2560 skdev->name); 2561 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH; 2562 skd_soft_reset(skdev); 2563 break; 2564 } 2565 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0); 2566 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2567 skdev->last_mtd = mtd; 2568 break; 2569 2570 case FIT_MTD_GET_CMDQ_DEPTH: 2571 skdev->hard_queue_depth_limit = FIT_MXD_DATA(mfd); 2572 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0, 2573 SKD_N_COMPLETION_ENTRY); 2574 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2575 skdev->last_mtd = mtd; 2576 break; 2577 2578 case FIT_MTD_SET_COMPQ_DEPTH: 2579 SKD_WRITEQ(skdev, skdev->cq_dma_address.cookies->dmac_laddress, 2580 FIT_MSG_TO_DEVICE_ARG); 2581 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0); 2582 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2583 skdev->last_mtd = mtd; 2584 break; 2585 2586 case FIT_MTD_SET_COMPQ_ADDR: 2587 skd_reset_skcomp(skdev); 2588 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0); 2589 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2590 skdev->last_mtd = mtd; 2591 break; 2592 2593 case FIT_MTD_ARM_QUEUE: 2594 skdev->last_mtd = 0; 2595 /* 2596 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE. 2597 */ 2598 break; 2599 2600 default: 2601 break; 2602 } 2603 } 2604 2605 2606 /* 2607 * 2608 * Name: skd_disable_interrupts, issues command to disable 2609 * device interrupts. 2610 * 2611 * Inputs: skdev - device state structure. 2612 * 2613 * Returns: Nothing. 2614 * 2615 */ 2616 static void 2617 skd_disable_interrupts(struct skd_device *skdev) 2618 { 2619 uint32_t sense; 2620 2621 Dcmn_err(CE_NOTE, "skd_disable_interrupts:"); 2622 2623 sense = SKD_READL(skdev, FIT_CONTROL); 2624 sense &= ~FIT_CR_ENABLE_INTERRUPTS; 2625 SKD_WRITEL(skdev, sense, FIT_CONTROL); 2626 2627 Dcmn_err(CE_NOTE, "sense 0x%x", sense); 2628 2629 /* 2630 * Note that the 1s is written. A 1-bit means 2631 * disable, a 0 means enable. 2632 */ 2633 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST); 2634 } 2635 2636 /* 2637 * 2638 * Name: skd_enable_interrupts, issues command to enable 2639 * device interrupts. 2640 * 2641 * Inputs: skdev - device state structure. 2642 * 2643 * Returns: Nothing. 2644 * 2645 */ 2646 static void 2647 skd_enable_interrupts(struct skd_device *skdev) 2648 { 2649 uint32_t val; 2650 2651 Dcmn_err(CE_NOTE, "skd_enable_interrupts:"); 2652 2653 /* unmask interrupts first */ 2654 val = FIT_ISH_FW_STATE_CHANGE + 2655 FIT_ISH_COMPLETION_POSTED + 2656 FIT_ISH_MSG_FROM_DEV; 2657 2658 /* 2659 * Note that the compliment of mask is written. A 1-bit means 2660 * disable, a 0 means enable. 2661 */ 2662 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST); 2663 2664 Dcmn_err(CE_NOTE, "interrupt mask=0x%x", ~val); 2665 2666 val = SKD_READL(skdev, FIT_CONTROL); 2667 val |= FIT_CR_ENABLE_INTERRUPTS; 2668 2669 Dcmn_err(CE_NOTE, "control=0x%x", val); 2670 2671 SKD_WRITEL(skdev, val, FIT_CONTROL); 2672 } 2673 2674 /* 2675 * 2676 * Name: skd_soft_reset, issues a soft reset to the hardware. 2677 * 2678 * Inputs: skdev - device state structure. 2679 * 2680 * Returns: Nothing. 2681 * 2682 */ 2683 static void 2684 skd_soft_reset(struct skd_device *skdev) 2685 { 2686 uint32_t val; 2687 2688 Dcmn_err(CE_NOTE, "skd_soft_reset:"); 2689 2690 val = SKD_READL(skdev, FIT_CONTROL); 2691 val |= (FIT_CR_SOFT_RESET); 2692 2693 Dcmn_err(CE_NOTE, "soft_reset: control=0x%x", val); 2694 2695 SKD_WRITEL(skdev, val, FIT_CONTROL); 2696 } 2697 2698 /* 2699 * 2700 * Name: skd_start_device, gets the device going. 2701 * 2702 * Inputs: skdev - device state structure. 2703 * 2704 * Returns: Nothing. 2705 * 2706 */ 2707 static void 2708 skd_start_device(struct skd_device *skdev) 2709 { 2710 uint32_t state; 2711 int delay_action = 0; 2712 2713 Dcmn_err(CE_NOTE, "skd_start_device:"); 2714 2715 /* ack all ghost interrupts */ 2716 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2717 2718 state = SKD_READL(skdev, FIT_STATUS); 2719 2720 Dcmn_err(CE_NOTE, "initial status=0x%x", state); 2721 2722 state &= FIT_SR_DRIVE_STATE_MASK; 2723 skdev->drive_state = state; 2724 skdev->last_mtd = 0; 2725 2726 skdev->state = SKD_DRVR_STATE_STARTING; 2727 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_STARTING_TO); 2728 2729 skd_enable_interrupts(skdev); 2730 2731 switch (skdev->drive_state) { 2732 case FIT_SR_DRIVE_OFFLINE: 2733 Dcmn_err(CE_NOTE, "(%s): Drive offline...", 2734 skd_name(skdev)); 2735 break; 2736 2737 case FIT_SR_DRIVE_FW_BOOTING: 2738 Dcmn_err(CE_NOTE, "FIT_SR_DRIVE_FW_BOOTING %s\n", skdev->name); 2739 skdev->state = SKD_DRVR_STATE_WAIT_BOOT; 2740 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_WAIT_BOOT_TO); 2741 break; 2742 2743 case FIT_SR_DRIVE_BUSY_SANITIZE: 2744 Dcmn_err(CE_NOTE, "(%s): Start: BUSY_SANITIZE\n", 2745 skd_name(skdev)); 2746 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; 2747 skdev->timer_countdown = SKD_TIMER_SECONDS(60); 2748 break; 2749 2750 case FIT_SR_DRIVE_BUSY_ERASE: 2751 Dcmn_err(CE_NOTE, "(%s): Start: BUSY_ERASE\n", 2752 skd_name(skdev)); 2753 skdev->state = SKD_DRVR_STATE_BUSY_ERASE; 2754 skdev->timer_countdown = SKD_TIMER_SECONDS(60); 2755 break; 2756 2757 case FIT_SR_DRIVE_INIT: 2758 case FIT_SR_DRIVE_ONLINE: 2759 skd_soft_reset(skdev); 2760 2761 break; 2762 2763 case FIT_SR_DRIVE_BUSY: 2764 Dcmn_err(CE_NOTE, "(%s): Drive Busy...\n", 2765 skd_name(skdev)); 2766 skdev->state = SKD_DRVR_STATE_BUSY; 2767 skdev->timer_countdown = SKD_TIMER_SECONDS(60); 2768 break; 2769 2770 case FIT_SR_DRIVE_SOFT_RESET: 2771 Dcmn_err(CE_NOTE, "(%s) drive soft reset in prog\n", 2772 skd_name(skdev)); 2773 break; 2774 2775 case FIT_SR_DRIVE_FAULT: 2776 /* 2777 * Fault state is bad...soft reset won't do it... 2778 * Hard reset, maybe, but does it work on device? 2779 * For now, just fault so the system doesn't hang. 2780 */ 2781 skd_drive_fault(skdev); 2782 2783 delay_action = 1; 2784 break; 2785 2786 case 0xFF: 2787 skd_drive_disappeared(skdev); 2788 2789 delay_action = 1; 2790 break; 2791 2792 default: 2793 Dcmn_err(CE_NOTE, "(%s) Start: unknown state %x\n", 2794 skd_name(skdev), skdev->drive_state); 2795 break; 2796 } 2797 2798 state = SKD_READL(skdev, FIT_CONTROL); 2799 Dcmn_err(CE_NOTE, "FIT Control Status=0x%x\n", state); 2800 2801 state = SKD_READL(skdev, FIT_INT_STATUS_HOST); 2802 Dcmn_err(CE_NOTE, "Intr Status=0x%x\n", state); 2803 2804 state = SKD_READL(skdev, FIT_INT_MASK_HOST); 2805 Dcmn_err(CE_NOTE, "Intr Mask=0x%x\n", state); 2806 2807 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); 2808 Dcmn_err(CE_NOTE, "Msg from Dev=0x%x\n", state); 2809 2810 state = SKD_READL(skdev, FIT_HW_VERSION); 2811 Dcmn_err(CE_NOTE, "HW version=0x%x\n", state); 2812 2813 if (delay_action) { 2814 /* start the queue so we can respond with error to requests */ 2815 Dcmn_err(CE_NOTE, "Starting %s queue\n", skdev->name); 2816 skd_start(skdev); 2817 skdev->gendisk_on = -1; 2818 cv_signal(&skdev->cv_waitq); 2819 } 2820 } 2821 2822 /* 2823 * 2824 * Name: skd_restart_device, restart the hardware. 2825 * 2826 * Inputs: skdev - device state structure. 2827 * 2828 * Returns: Nothing. 2829 * 2830 */ 2831 static void 2832 skd_restart_device(struct skd_device *skdev) 2833 { 2834 uint32_t state; 2835 2836 Dcmn_err(CE_NOTE, "skd_restart_device:"); 2837 2838 /* ack all ghost interrupts */ 2839 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2840 2841 state = SKD_READL(skdev, FIT_STATUS); 2842 2843 Dcmn_err(CE_NOTE, "skd_restart_device: drive status=0x%x\n", state); 2844 2845 state &= FIT_SR_DRIVE_STATE_MASK; 2846 skdev->drive_state = state; 2847 skdev->last_mtd = 0; 2848 2849 skdev->state = SKD_DRVR_STATE_RESTARTING; 2850 skdev->timer_countdown = SKD_TIMER_MINUTES(4); 2851 2852 skd_soft_reset(skdev); 2853 } 2854 2855 /* 2856 * 2857 * Name: skd_stop_device, stops the device. 2858 * 2859 * Inputs: skdev - device state structure. 2860 * 2861 * Returns: Nothing. 2862 * 2863 */ 2864 static void 2865 skd_stop_device(struct skd_device *skdev) 2866 { 2867 clock_t cur_ticks, tmo; 2868 int secs; 2869 struct skd_special_context *skspcl = &skdev->internal_skspcl; 2870 2871 if (SKD_DRVR_STATE_ONLINE != skdev->state) { 2872 Dcmn_err(CE_NOTE, "(%s): skd_stop_device not online no sync\n", 2873 skdev->name); 2874 goto stop_out; 2875 } 2876 2877 if (SKD_REQ_STATE_IDLE != skspcl->req.state) { 2878 Dcmn_err(CE_NOTE, "(%s): skd_stop_device no special\n", 2879 skdev->name); 2880 goto stop_out; 2881 } 2882 2883 skdev->state = SKD_DRVR_STATE_SYNCING; 2884 skdev->sync_done = 0; 2885 2886 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); 2887 2888 secs = 10; 2889 mutex_enter(&skdev->skd_internalio_mutex); 2890 while (skdev->sync_done == 0) { 2891 cur_ticks = ddi_get_lbolt(); 2892 tmo = cur_ticks + drv_usectohz(1000000 * secs); 2893 if (cv_timedwait(&skdev->cv_waitq, 2894 &skdev->skd_internalio_mutex, tmo) == -1) { 2895 /* Oops - timed out */ 2896 2897 Dcmn_err(CE_NOTE, "stop_device - %d secs TMO", secs); 2898 } 2899 } 2900 2901 mutex_exit(&skdev->skd_internalio_mutex); 2902 2903 switch (skdev->sync_done) { 2904 case 0: 2905 Dcmn_err(CE_NOTE, "(%s): skd_stop_device no sync\n", 2906 skdev->name); 2907 break; 2908 case 1: 2909 Dcmn_err(CE_NOTE, "(%s): skd_stop_device sync done\n", 2910 skdev->name); 2911 break; 2912 default: 2913 Dcmn_err(CE_NOTE, "(%s): skd_stop_device sync error\n", 2914 skdev->name); 2915 } 2916 2917 2918 stop_out: 2919 skdev->state = SKD_DRVR_STATE_STOPPING; 2920 2921 skd_disable_interrupts(skdev); 2922 2923 /* ensure all ints on device are cleared */ 2924 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2925 /* soft reset the device to unload with a clean slate */ 2926 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL); 2927 } 2928 2929 /* 2930 * CONSTRUCT 2931 */ 2932 2933 static int skd_cons_skcomp(struct skd_device *); 2934 static int skd_cons_skmsg(struct skd_device *); 2935 static int skd_cons_skreq(struct skd_device *); 2936 static int skd_cons_sksb(struct skd_device *); 2937 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *, uint32_t, 2938 dma_mem_t *); 2939 2940 /* 2941 * 2942 * Name: skd_construct, calls other routines to build device 2943 * interface structures. 2944 * 2945 * Inputs: skdev - device state structure. 2946 * instance - DDI instance number. 2947 * 2948 * Returns: Returns DDI_FAILURE on any failure otherwise returns 2949 * DDI_SUCCESS. 2950 * 2951 */ 2952 /* ARGSUSED */ /* Upstream common source with other platforms. */ 2953 static int 2954 skd_construct(skd_device_t *skdev, int instance) 2955 { 2956 int rc = 0; 2957 2958 skdev->state = SKD_DRVR_STATE_LOAD; 2959 skdev->irq_type = skd_isr_type; 2960 skdev->soft_queue_depth_limit = skd_max_queue_depth; 2961 skdev->hard_queue_depth_limit = 10; /* until GET_CMDQ_DEPTH */ 2962 2963 skdev->num_req_context = skd_max_queue_depth; 2964 skdev->num_fitmsg_context = skd_max_queue_depth; 2965 2966 skdev->queue_depth_limit = skdev->hard_queue_depth_limit; 2967 skdev->queue_depth_lowat = 1; 2968 skdev->proto_ver = 99; /* initialize to invalid value */ 2969 skdev->sgs_per_request = skd_sgs_per_request; 2970 skdev->dbg_level = skd_dbg_level; 2971 2972 rc = skd_cons_skcomp(skdev); 2973 if (rc < 0) { 2974 goto err_out; 2975 } 2976 2977 rc = skd_cons_skmsg(skdev); 2978 if (rc < 0) { 2979 goto err_out; 2980 } 2981 2982 rc = skd_cons_skreq(skdev); 2983 if (rc < 0) { 2984 goto err_out; 2985 } 2986 2987 rc = skd_cons_sksb(skdev); 2988 if (rc < 0) { 2989 goto err_out; 2990 } 2991 2992 Dcmn_err(CE_NOTE, "CONSTRUCT VICTORY"); 2993 2994 return (DDI_SUCCESS); 2995 2996 err_out: 2997 Dcmn_err(CE_NOTE, "construct failed\n"); 2998 skd_destruct(skdev); 2999 3000 return (DDI_FAILURE); 3001 } 3002 3003 /* 3004 * 3005 * Name: skd_free_phys, frees DMA memory. 3006 * 3007 * Inputs: skdev - device state structure. 3008 * mem - DMA info. 3009 * 3010 * Returns: Nothing. 3011 * 3012 */ 3013 static void 3014 skd_free_phys(skd_device_t *skdev, dma_mem_t *mem) 3015 { 3016 _NOTE(ARGUNUSED(skdev)); 3017 3018 if (mem == NULL || mem->dma_handle == NULL) 3019 return; 3020 3021 (void) ddi_dma_unbind_handle(mem->dma_handle); 3022 3023 if (mem->acc_handle != NULL) { 3024 ddi_dma_mem_free(&mem->acc_handle); 3025 mem->acc_handle = NULL; 3026 } 3027 3028 mem->bp = NULL; 3029 ddi_dma_free_handle(&mem->dma_handle); 3030 mem->dma_handle = NULL; 3031 } 3032 3033 /* 3034 * 3035 * Name: skd_alloc_dma_mem, allocates DMA memory. 3036 * 3037 * Inputs: skdev - device state structure. 3038 * mem - DMA data structure. 3039 * sleep - indicates whether called routine can sleep. 3040 * atype - specified 32 or 64 bit allocation. 3041 * 3042 * Returns: Void pointer to mem->bp on success else NULL. 3043 * NOTE: There are some failure modes even if sleep is set 3044 * to KM_SLEEP, so callers MUST check the return code even 3045 * if KM_SLEEP is passed in. 3046 * 3047 */ 3048 static void * 3049 skd_alloc_dma_mem(skd_device_t *skdev, dma_mem_t *mem, uint8_t atype) 3050 { 3051 size_t rlen; 3052 uint_t cnt; 3053 ddi_dma_attr_t dma_attr = skd_64bit_io_dma_attr; 3054 ddi_device_acc_attr_t acc_attr = { 3055 DDI_DEVICE_ATTR_V0, 3056 DDI_STRUCTURE_LE_ACC, 3057 DDI_STRICTORDER_ACC 3058 }; 3059 3060 if (atype == ATYPE_32BIT) 3061 dma_attr.dma_attr_addr_hi = SKD_DMA_HIGH_32BIT_ADDRESS; 3062 3063 dma_attr.dma_attr_sgllen = 1; 3064 3065 /* 3066 * Allocate DMA memory. 3067 */ 3068 if (ddi_dma_alloc_handle(skdev->dip, &dma_attr, DDI_DMA_SLEEP, NULL, 3069 &mem->dma_handle) != DDI_SUCCESS) { 3070 cmn_err(CE_WARN, "!alloc_dma_mem-1, failed"); 3071 3072 mem->dma_handle = NULL; 3073 3074 return (NULL); 3075 } 3076 3077 if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr, 3078 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, (caddr_t *)&mem->bp, &rlen, 3079 &mem->acc_handle) != DDI_SUCCESS) { 3080 cmn_err(CE_WARN, "!skd_alloc_dma_mem-2, failed"); 3081 ddi_dma_free_handle(&mem->dma_handle); 3082 mem->dma_handle = NULL; 3083 mem->acc_handle = NULL; 3084 mem->bp = NULL; 3085 3086 return (NULL); 3087 } 3088 bzero(mem->bp, mem->size); 3089 3090 if (ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp, 3091 mem->size, (DDI_DMA_CONSISTENT | DDI_DMA_RDWR), DDI_DMA_SLEEP, NULL, 3092 &mem->cookie, &cnt) != DDI_DMA_MAPPED) { 3093 cmn_err(CE_WARN, "!skd_alloc_dma_mem-3, failed"); 3094 ddi_dma_mem_free(&mem->acc_handle); 3095 ddi_dma_free_handle(&mem->dma_handle); 3096 3097 return (NULL); 3098 } 3099 3100 if (cnt > 1) { 3101 (void) ddi_dma_unbind_handle(mem->dma_handle); 3102 cmn_err(CE_WARN, "!skd_alloc_dma_mem-4, failed, " 3103 "cookie_count %d > 1", cnt); 3104 skd_free_phys(skdev, mem); 3105 3106 return (NULL); 3107 } 3108 mem->cookies = &mem->cookie; 3109 mem->cookies->dmac_size = mem->size; 3110 3111 return (mem->bp); 3112 } 3113 3114 /* 3115 * 3116 * Name: skd_cons_skcomp, allocates space for the skcomp table. 3117 * 3118 * Inputs: skdev - device state structure. 3119 * 3120 * Returns: -ENOMEM if no memory otherwise NULL. 3121 * 3122 */ 3123 static int 3124 skd_cons_skcomp(struct skd_device *skdev) 3125 { 3126 uint64_t *dma_alloc; 3127 struct fit_completion_entry_v1 *skcomp; 3128 int rc = 0; 3129 uint32_t nbytes; 3130 dma_mem_t *mem; 3131 3132 nbytes = sizeof (*skcomp) * SKD_N_COMPLETION_ENTRY; 3133 nbytes += sizeof (struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; 3134 3135 Dcmn_err(CE_NOTE, "cons_skcomp: nbytes=%d,entries=%d", nbytes, 3136 SKD_N_COMPLETION_ENTRY); 3137 3138 mem = &skdev->cq_dma_address; 3139 mem->size = nbytes; 3140 3141 dma_alloc = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT); 3142 skcomp = (struct fit_completion_entry_v1 *)dma_alloc; 3143 if (skcomp == NULL) { 3144 rc = -ENOMEM; 3145 goto err_out; 3146 } 3147 3148 bzero(skcomp, nbytes); 3149 3150 Dcmn_err(CE_NOTE, "cons_skcomp: skcomp=%p nbytes=%d", 3151 (void *)skcomp, nbytes); 3152 3153 skdev->skcomp_table = skcomp; 3154 skdev->skerr_table = (struct fit_comp_error_info *)(dma_alloc + 3155 (SKD_N_COMPLETION_ENTRY * sizeof (*skcomp) / sizeof (uint64_t))); 3156 3157 err_out: 3158 return (rc); 3159 } 3160 3161 /* 3162 * 3163 * Name: skd_cons_skmsg, allocates space for the skmsg table. 3164 * 3165 * Inputs: skdev - device state structure. 3166 * 3167 * Returns: -ENOMEM if no memory otherwise NULL. 3168 * 3169 */ 3170 static int 3171 skd_cons_skmsg(struct skd_device *skdev) 3172 { 3173 dma_mem_t *mem; 3174 int rc = 0; 3175 uint32_t i; 3176 3177 Dcmn_err(CE_NOTE, "skmsg_table kzalloc, struct %lu, count %u total %lu", 3178 (ulong_t)sizeof (struct skd_fitmsg_context), 3179 skdev->num_fitmsg_context, 3180 (ulong_t)(sizeof (struct skd_fitmsg_context) * 3181 skdev->num_fitmsg_context)); 3182 3183 skdev->skmsg_table = (struct skd_fitmsg_context *)kmem_zalloc( 3184 sizeof (struct skd_fitmsg_context) * skdev->num_fitmsg_context, 3185 KM_SLEEP); 3186 3187 for (i = 0; i < skdev->num_fitmsg_context; i++) { 3188 struct skd_fitmsg_context *skmsg; 3189 3190 skmsg = &skdev->skmsg_table[i]; 3191 3192 skmsg->id = i + SKD_ID_FIT_MSG; 3193 3194 skmsg->state = SKD_MSG_STATE_IDLE; 3195 3196 mem = &skmsg->mb_dma_address; 3197 mem->size = SKD_N_FITMSG_BYTES + 64; 3198 3199 skmsg->msg_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT); 3200 3201 if (NULL == skmsg->msg_buf) { 3202 rc = -ENOMEM; 3203 i++; 3204 break; 3205 } 3206 3207 skmsg->offset = 0; 3208 3209 bzero(skmsg->msg_buf, SKD_N_FITMSG_BYTES); 3210 3211 skmsg->next = &skmsg[1]; 3212 } 3213 3214 /* Free list is in order starting with the 0th entry. */ 3215 skdev->skmsg_table[i - 1].next = NULL; 3216 skdev->skmsg_free_list = skdev->skmsg_table; 3217 3218 return (rc); 3219 } 3220 3221 /* 3222 * 3223 * Name: skd_cons_skreq, allocates space for the skreq table. 3224 * 3225 * Inputs: skdev - device state structure. 3226 * 3227 * Returns: -ENOMEM if no memory otherwise NULL. 3228 * 3229 */ 3230 static int 3231 skd_cons_skreq(struct skd_device *skdev) 3232 { 3233 int rc = 0; 3234 uint32_t i; 3235 3236 Dcmn_err(CE_NOTE, 3237 "skreq_table kmem_zalloc, struct %lu, count %u total %lu", 3238 (ulong_t)sizeof (struct skd_request_context), 3239 skdev->num_req_context, 3240 (ulong_t) (sizeof (struct skd_request_context) * 3241 skdev->num_req_context)); 3242 3243 skdev->skreq_table = (struct skd_request_context *)kmem_zalloc( 3244 sizeof (struct skd_request_context) * skdev->num_req_context, 3245 KM_SLEEP); 3246 3247 for (i = 0; i < skdev->num_req_context; i++) { 3248 struct skd_request_context *skreq; 3249 3250 skreq = &skdev->skreq_table[i]; 3251 3252 skreq->id = (uint16_t)(i + SKD_ID_RW_REQUEST); 3253 skreq->state = SKD_REQ_STATE_IDLE; 3254 3255 skreq->sksg_list = skd_cons_sg_list(skdev, 3256 skdev->sgs_per_request, 3257 &skreq->sksg_dma_address); 3258 3259 if (NULL == skreq->sksg_list) { 3260 rc = -ENOMEM; 3261 goto err_out; 3262 } 3263 3264 skreq->next = &skreq[1]; 3265 } 3266 3267 /* Free list is in order starting with the 0th entry. */ 3268 skdev->skreq_table[i - 1].next = NULL; 3269 skdev->skreq_free_list = skdev->skreq_table; 3270 3271 err_out: 3272 return (rc); 3273 } 3274 3275 /* 3276 * 3277 * Name: skd_cons_sksb, allocates space for the skspcl msg buf 3278 * and data buf. 3279 * 3280 * Inputs: skdev - device state structure. 3281 * 3282 * Returns: -ENOMEM if no memory otherwise NULL. 3283 * 3284 */ 3285 static int 3286 skd_cons_sksb(struct skd_device *skdev) 3287 { 3288 int rc = 0; 3289 struct skd_special_context *skspcl; 3290 dma_mem_t *mem; 3291 uint32_t nbytes; 3292 3293 skspcl = &skdev->internal_skspcl; 3294 3295 skspcl->req.id = 0 + SKD_ID_INTERNAL; 3296 skspcl->req.state = SKD_REQ_STATE_IDLE; 3297 3298 nbytes = SKD_N_INTERNAL_BYTES; 3299 3300 mem = &skspcl->db_dma_address; 3301 mem->size = nbytes; 3302 3303 /* data_buf's DMA pointer is skspcl->db_dma_address */ 3304 skspcl->data_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT); 3305 if (skspcl->data_buf == NULL) { 3306 rc = -ENOMEM; 3307 goto err_out; 3308 } 3309 3310 bzero(skspcl->data_buf, nbytes); 3311 3312 nbytes = SKD_N_SPECIAL_FITMSG_BYTES; 3313 3314 mem = &skspcl->mb_dma_address; 3315 mem->size = nbytes; 3316 3317 /* msg_buf DMA pointer is skspcl->mb_dma_address */ 3318 skspcl->msg_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT); 3319 if (skspcl->msg_buf == NULL) { 3320 rc = -ENOMEM; 3321 goto err_out; 3322 } 3323 3324 3325 bzero(skspcl->msg_buf, nbytes); 3326 3327 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1, 3328 &skspcl->req.sksg_dma_address); 3329 3330 3331 if (skspcl->req.sksg_list == NULL) { 3332 rc = -ENOMEM; 3333 goto err_out; 3334 } 3335 3336 if (skd_format_internal_skspcl(skdev) == 0) { 3337 rc = -EINVAL; 3338 goto err_out; 3339 } 3340 3341 err_out: 3342 return (rc); 3343 } 3344 3345 /* 3346 * 3347 * Name: skd_cons_sg_list, allocates the S/G list. 3348 * 3349 * Inputs: skdev - device state structure. 3350 * n_sg - Number of scatter-gather entries. 3351 * ret_dma_addr - S/G list DMA pointer. 3352 * 3353 * Returns: A list of FIT message descriptors. 3354 * 3355 */ 3356 static struct fit_sg_descriptor 3357 *skd_cons_sg_list(struct skd_device *skdev, 3358 uint32_t n_sg, dma_mem_t *ret_dma_addr) 3359 { 3360 struct fit_sg_descriptor *sg_list; 3361 uint32_t nbytes; 3362 dma_mem_t *mem; 3363 3364 nbytes = sizeof (*sg_list) * n_sg; 3365 3366 mem = ret_dma_addr; 3367 mem->size = nbytes; 3368 3369 /* sg_list's DMA pointer is *ret_dma_addr */ 3370 sg_list = skd_alloc_dma_mem(skdev, mem, ATYPE_32BIT); 3371 3372 if (sg_list != NULL) { 3373 uint64_t dma_address = ret_dma_addr->cookie.dmac_laddress; 3374 uint32_t i; 3375 3376 bzero(sg_list, nbytes); 3377 3378 for (i = 0; i < n_sg - 1; i++) { 3379 uint64_t ndp_off; 3380 ndp_off = (i + 1) * sizeof (struct fit_sg_descriptor); 3381 3382 sg_list[i].next_desc_ptr = dma_address + ndp_off; 3383 } 3384 sg_list[i].next_desc_ptr = 0LL; 3385 } 3386 3387 return (sg_list); 3388 } 3389 3390 /* 3391 * DESTRUCT (FREE) 3392 */ 3393 3394 static void skd_free_skcomp(struct skd_device *skdev); 3395 static void skd_free_skmsg(struct skd_device *skdev); 3396 static void skd_free_skreq(struct skd_device *skdev); 3397 static void skd_free_sksb(struct skd_device *skdev); 3398 3399 static void skd_free_sg_list(struct skd_device *skdev, 3400 struct fit_sg_descriptor *sg_list, 3401 uint32_t n_sg, dma_mem_t dma_addr); 3402 3403 /* 3404 * 3405 * Name: skd_destruct, call various rouines to deallocate 3406 * space acquired during initialization. 3407 * 3408 * Inputs: skdev - device state structure. 3409 * 3410 * Returns: Nothing. 3411 * 3412 */ 3413 static void 3414 skd_destruct(struct skd_device *skdev) 3415 { 3416 if (skdev == NULL) { 3417 return; 3418 } 3419 3420 Dcmn_err(CE_NOTE, "destruct sksb"); 3421 skd_free_sksb(skdev); 3422 3423 Dcmn_err(CE_NOTE, "destruct skreq"); 3424 skd_free_skreq(skdev); 3425 3426 Dcmn_err(CE_NOTE, "destruct skmsg"); 3427 skd_free_skmsg(skdev); 3428 3429 Dcmn_err(CE_NOTE, "destruct skcomp"); 3430 skd_free_skcomp(skdev); 3431 3432 Dcmn_err(CE_NOTE, "DESTRUCT VICTORY"); 3433 } 3434 3435 /* 3436 * 3437 * Name: skd_free_skcomp, deallocates skcomp table DMA resources. 3438 * 3439 * Inputs: skdev - device state structure. 3440 * 3441 * Returns: Nothing. 3442 * 3443 */ 3444 static void 3445 skd_free_skcomp(struct skd_device *skdev) 3446 { 3447 if (skdev->skcomp_table != NULL) { 3448 skd_free_phys(skdev, &skdev->cq_dma_address); 3449 } 3450 3451 skdev->skcomp_table = NULL; 3452 } 3453 3454 /* 3455 * 3456 * Name: skd_free_skmsg, deallocates skmsg table DMA resources. 3457 * 3458 * Inputs: skdev - device state structure. 3459 * 3460 * Returns: Nothing. 3461 * 3462 */ 3463 static void 3464 skd_free_skmsg(struct skd_device *skdev) 3465 { 3466 uint32_t i; 3467 3468 if (NULL == skdev->skmsg_table) 3469 return; 3470 3471 for (i = 0; i < skdev->num_fitmsg_context; i++) { 3472 struct skd_fitmsg_context *skmsg; 3473 3474 skmsg = &skdev->skmsg_table[i]; 3475 3476 if (skmsg->msg_buf != NULL) { 3477 skd_free_phys(skdev, &skmsg->mb_dma_address); 3478 } 3479 3480 3481 skmsg->msg_buf = NULL; 3482 } 3483 3484 kmem_free(skdev->skmsg_table, sizeof (struct skd_fitmsg_context) * 3485 skdev->num_fitmsg_context); 3486 3487 skdev->skmsg_table = NULL; 3488 3489 } 3490 3491 /* 3492 * 3493 * Name: skd_free_skreq, deallocates skspcl table DMA resources. 3494 * 3495 * Inputs: skdev - device state structure. 3496 * 3497 * Returns: Nothing. 3498 * 3499 */ 3500 static void 3501 skd_free_skreq(struct skd_device *skdev) 3502 { 3503 uint32_t i; 3504 3505 if (NULL == skdev->skreq_table) 3506 return; 3507 3508 for (i = 0; i < skdev->num_req_context; i++) { 3509 struct skd_request_context *skreq; 3510 3511 skreq = &skdev->skreq_table[i]; 3512 3513 skd_free_sg_list(skdev, skreq->sksg_list, 3514 skdev->sgs_per_request, skreq->sksg_dma_address); 3515 3516 skreq->sksg_list = NULL; 3517 } 3518 3519 kmem_free(skdev->skreq_table, sizeof (struct skd_request_context) * 3520 skdev->num_req_context); 3521 3522 skdev->skreq_table = NULL; 3523 3524 } 3525 3526 /* 3527 * 3528 * Name: skd_free_sksb, deallocates skspcl data buf and 3529 * msg buf DMA resources. 3530 * 3531 * Inputs: skdev - device state structure. 3532 * 3533 * Returns: Nothing. 3534 * 3535 */ 3536 static void 3537 skd_free_sksb(struct skd_device *skdev) 3538 { 3539 struct skd_special_context *skspcl; 3540 3541 skspcl = &skdev->internal_skspcl; 3542 3543 if (skspcl->data_buf != NULL) { 3544 skd_free_phys(skdev, &skspcl->db_dma_address); 3545 } 3546 3547 skspcl->data_buf = NULL; 3548 3549 if (skspcl->msg_buf != NULL) { 3550 skd_free_phys(skdev, &skspcl->mb_dma_address); 3551 } 3552 3553 skspcl->msg_buf = NULL; 3554 3555 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1, 3556 skspcl->req.sksg_dma_address); 3557 3558 skspcl->req.sksg_list = NULL; 3559 } 3560 3561 /* 3562 * 3563 * Name: skd_free_sg_list, deallocates S/G DMA resources. 3564 * 3565 * Inputs: skdev - device state structure. 3566 * sg_list - S/G list itself. 3567 * n_sg - nukmber of segments 3568 * dma_addr - S/G list DMA address. 3569 * 3570 * Returns: Nothing. 3571 * 3572 */ 3573 /* ARGSUSED */ /* Upstream common source with other platforms. */ 3574 static void 3575 skd_free_sg_list(struct skd_device *skdev, 3576 struct fit_sg_descriptor *sg_list, 3577 uint32_t n_sg, dma_mem_t dma_addr) 3578 { 3579 if (sg_list != NULL) { 3580 skd_free_phys(skdev, &dma_addr); 3581 } 3582 } 3583 3584 /* 3585 * 3586 * Name: skd_queue, queues the I/O request. 3587 * 3588 * Inputs: skdev - device state structure. 3589 * pbuf - I/O request 3590 * 3591 * Returns: Nothing. 3592 * 3593 */ 3594 static void 3595 skd_queue(skd_device_t *skdev, skd_buf_private_t *pbuf) 3596 { 3597 struct waitqueue *waitq; 3598 3599 ASSERT(skdev != NULL); 3600 ASSERT(pbuf != NULL); 3601 3602 ASSERT(WAITQ_LOCK_HELD(skdev)); 3603 3604 waitq = &skdev->waitqueue; 3605 3606 if (SIMPLEQ_EMPTY(waitq)) 3607 SIMPLEQ_INSERT_HEAD(waitq, pbuf, sq); 3608 else 3609 SIMPLEQ_INSERT_TAIL(waitq, pbuf, sq); 3610 } 3611 3612 /* 3613 * 3614 * Name: skd_list_skreq, displays the skreq table entries. 3615 * 3616 * Inputs: skdev - device state structure. 3617 * list - flag, if true displays the entry address. 3618 * 3619 * Returns: Returns number of skmsg entries found. 3620 * 3621 */ 3622 /* ARGSUSED */ /* Upstream common source with other platforms. */ 3623 static int 3624 skd_list_skreq(skd_device_t *skdev, int list) 3625 { 3626 int inx = 0; 3627 struct skd_request_context *skreq; 3628 3629 if (list) { 3630 Dcmn_err(CE_NOTE, "skreq_table[0]\n"); 3631 3632 skreq = &skdev->skreq_table[0]; 3633 while (skreq) { 3634 if (list) 3635 Dcmn_err(CE_NOTE, 3636 "%d: skreq=%p state=%d id=%x fid=%x " 3637 "pbuf=%p dir=%d comp=%d\n", 3638 inx, (void *)skreq, skreq->state, 3639 skreq->id, skreq->fitmsg_id, 3640 (void *)skreq->pbuf, 3641 skreq->sg_data_dir, skreq->did_complete); 3642 inx++; 3643 skreq = skreq->next; 3644 } 3645 } 3646 3647 inx = 0; 3648 skreq = skdev->skreq_free_list; 3649 3650 if (list) 3651 Dcmn_err(CE_NOTE, "skreq_free_list\n"); 3652 while (skreq) { 3653 if (list) 3654 Dcmn_err(CE_NOTE, "%d: skreq=%p state=%d id=%x fid=%x " 3655 "pbuf=%p dir=%d\n", inx, (void *)skreq, 3656 skreq->state, skreq->id, skreq->fitmsg_id, 3657 (void *)skreq->pbuf, skreq->sg_data_dir); 3658 inx++; 3659 skreq = skreq->next; 3660 } 3661 3662 return (inx); 3663 } 3664 3665 /* 3666 * 3667 * Name: skd_list_skmsg, displays the skmsg table entries. 3668 * 3669 * Inputs: skdev - device state structure. 3670 * list - flag, if true displays the entry address. 3671 * 3672 * Returns: Returns number of skmsg entries found. 3673 * 3674 */ 3675 static int 3676 skd_list_skmsg(skd_device_t *skdev, int list) 3677 { 3678 int inx = 0; 3679 struct skd_fitmsg_context *skmsgp; 3680 3681 skmsgp = &skdev->skmsg_table[0]; 3682 3683 if (list) { 3684 Dcmn_err(CE_NOTE, "skmsg_table[0]\n"); 3685 3686 while (skmsgp) { 3687 if (list) 3688 Dcmn_err(CE_NOTE, "%d: skmsgp=%p id=%x outs=%d " 3689 "l=%d o=%d nxt=%p\n", inx, (void *)skmsgp, 3690 skmsgp->id, skmsgp->outstanding, 3691 skmsgp->length, skmsgp->offset, 3692 (void *)skmsgp->next); 3693 inx++; 3694 skmsgp = skmsgp->next; 3695 } 3696 } 3697 3698 inx = 0; 3699 if (list) 3700 Dcmn_err(CE_NOTE, "skmsg_free_list\n"); 3701 skmsgp = skdev->skmsg_free_list; 3702 while (skmsgp) { 3703 if (list) 3704 Dcmn_err(CE_NOTE, "%d: skmsgp=%p id=%x outs=%d l=%d " 3705 "o=%d nxt=%p\n", 3706 inx, (void *)skmsgp, skmsgp->id, 3707 skmsgp->outstanding, skmsgp->length, 3708 skmsgp->offset, (void *)skmsgp->next); 3709 inx++; 3710 skmsgp = skmsgp->next; 3711 } 3712 3713 return (inx); 3714 } 3715 3716 /* 3717 * 3718 * Name: skd_get_queue_pbuf, retrieves top of queue entry and 3719 * delinks entry from the queue. 3720 * 3721 * Inputs: skdev - device state structure. 3722 * drive - device number 3723 * 3724 * Returns: Returns the top of the job queue entry. 3725 * 3726 */ 3727 static skd_buf_private_t 3728 *skd_get_queued_pbuf(skd_device_t *skdev) 3729 { 3730 skd_buf_private_t *pbuf; 3731 3732 ASSERT(WAITQ_LOCK_HELD(skdev)); 3733 pbuf = SIMPLEQ_FIRST(&skdev->waitqueue); 3734 if (pbuf != NULL) 3735 SIMPLEQ_REMOVE_HEAD(&skdev->waitqueue, sq); 3736 return (pbuf); 3737 } 3738 3739 /* 3740 * PCI DRIVER GLUE 3741 */ 3742 3743 /* 3744 * 3745 * Name: skd_pci_info, logs certain device PCI info. 3746 * 3747 * Inputs: skdev - device state structure. 3748 * 3749 * Returns: str which contains the device speed info.. 3750 * 3751 */ 3752 static char * 3753 skd_pci_info(struct skd_device *skdev, char *str, size_t len) 3754 { 3755 int pcie_reg; 3756 3757 str[0] = '\0'; 3758 3759 pcie_reg = skd_pci_find_capability(skdev, PCI_CAP_ID_EXP); 3760 3761 if (pcie_reg) { 3762 uint16_t lstat, lspeed, lwidth; 3763 3764 pcie_reg += 0x12; 3765 lstat = pci_config_get16(skdev->pci_handle, pcie_reg); 3766 lspeed = lstat & (0xF); 3767 lwidth = (lstat & 0x3F0) >> 4; 3768 3769 (void) snprintf(str, len, "PCIe (%s rev %d)", 3770 lspeed == 1 ? "2.5GT/s" : 3771 lspeed == 2 ? "5.0GT/s" : "<unknown>", 3772 lwidth); 3773 } 3774 3775 return (str); 3776 } 3777 3778 /* 3779 * MODULE GLUE 3780 */ 3781 3782 /* 3783 * 3784 * Name: skd_init, initializes certain values. 3785 * 3786 * Inputs: skdev - device state structure. 3787 * 3788 * Returns: Zero. 3789 * 3790 */ 3791 /* ARGSUSED */ /* Upstream common source with other platforms. */ 3792 static int 3793 skd_init(skd_device_t *skdev) 3794 { 3795 Dcmn_err(CE_NOTE, "skd_init: v%s-b%s\n", DRV_VERSION, DRV_BUILD_ID); 3796 3797 if (skd_max_queue_depth < 1 || 3798 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) { 3799 cmn_err(CE_NOTE, "skd_max_q_depth %d invalid, re-set to %d\n", 3800 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT); 3801 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; 3802 } 3803 3804 if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) { 3805 cmn_err(CE_NOTE, "skd_max_req_per_msg %d invalid, set to %d\n", 3806 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT); 3807 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; 3808 } 3809 3810 3811 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) { 3812 cmn_err(CE_NOTE, "skd_sg_per_request %d invalid, set to %d\n", 3813 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT); 3814 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; 3815 } 3816 3817 if (skd_dbg_level < 0 || skd_dbg_level > 2) { 3818 cmn_err(CE_NOTE, "skd_dbg_level %d invalid, re-set to %d\n", 3819 skd_dbg_level, 0); 3820 skd_dbg_level = 0; 3821 } 3822 3823 return (0); 3824 } 3825 3826 /* 3827 * 3828 * Name: skd_exit, exits the driver & logs the fact. 3829 * 3830 * Inputs: none. 3831 * 3832 * Returns: Nothing. 3833 * 3834 */ 3835 static void 3836 skd_exit(void) 3837 { 3838 cmn_err(CE_NOTE, "skd v%s unloading", DRV_VERSION); 3839 } 3840 3841 /* 3842 * 3843 * Name: skd_drive_state_to_str, converts binary drive state 3844 * to its corresponding string value. 3845 * 3846 * Inputs: Drive state. 3847 * 3848 * Returns: String representing drive state. 3849 * 3850 */ 3851 const char * 3852 skd_drive_state_to_str(int state) 3853 { 3854 switch (state) { 3855 case FIT_SR_DRIVE_OFFLINE: return ("OFFLINE"); 3856 case FIT_SR_DRIVE_INIT: return ("INIT"); 3857 case FIT_SR_DRIVE_ONLINE: return ("ONLINE"); 3858 case FIT_SR_DRIVE_BUSY: return ("BUSY"); 3859 case FIT_SR_DRIVE_FAULT: return ("FAULT"); 3860 case FIT_SR_DRIVE_DEGRADED: return ("DEGRADED"); 3861 case FIT_SR_PCIE_LINK_DOWN: return ("LINK_DOWN"); 3862 case FIT_SR_DRIVE_SOFT_RESET: return ("SOFT_RESET"); 3863 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: return ("NEED_FW"); 3864 case FIT_SR_DRIVE_INIT_FAULT: return ("INIT_FAULT"); 3865 case FIT_SR_DRIVE_BUSY_SANITIZE:return ("BUSY_SANITIZE"); 3866 case FIT_SR_DRIVE_BUSY_ERASE: return ("BUSY_ERASE"); 3867 case FIT_SR_DRIVE_FW_BOOTING: return ("FW_BOOTING"); 3868 default: return ("???"); 3869 } 3870 } 3871 3872 /* 3873 * 3874 * Name: skd_skdev_state_to_str, converts binary driver state 3875 * to its corresponding string value. 3876 * 3877 * Inputs: Driver state. 3878 * 3879 * Returns: String representing driver state. 3880 * 3881 */ 3882 static const char * 3883 skd_skdev_state_to_str(enum skd_drvr_state state) 3884 { 3885 switch (state) { 3886 case SKD_DRVR_STATE_LOAD: return ("LOAD"); 3887 case SKD_DRVR_STATE_IDLE: return ("IDLE"); 3888 case SKD_DRVR_STATE_BUSY: return ("BUSY"); 3889 case SKD_DRVR_STATE_STARTING: return ("STARTING"); 3890 case SKD_DRVR_STATE_ONLINE: return ("ONLINE"); 3891 case SKD_DRVR_STATE_PAUSING: return ("PAUSING"); 3892 case SKD_DRVR_STATE_PAUSED: return ("PAUSED"); 3893 case SKD_DRVR_STATE_DRAINING_TIMEOUT: return ("DRAINING_TIMEOUT"); 3894 case SKD_DRVR_STATE_RESTARTING: return ("RESTARTING"); 3895 case SKD_DRVR_STATE_RESUMING: return ("RESUMING"); 3896 case SKD_DRVR_STATE_STOPPING: return ("STOPPING"); 3897 case SKD_DRVR_STATE_SYNCING: return ("SYNCING"); 3898 case SKD_DRVR_STATE_FAULT: return ("FAULT"); 3899 case SKD_DRVR_STATE_DISAPPEARED: return ("DISAPPEARED"); 3900 case SKD_DRVR_STATE_BUSY_ERASE: return ("BUSY_ERASE"); 3901 case SKD_DRVR_STATE_BUSY_SANITIZE:return ("BUSY_SANITIZE"); 3902 case SKD_DRVR_STATE_BUSY_IMMINENT: return ("BUSY_IMMINENT"); 3903 case SKD_DRVR_STATE_WAIT_BOOT: return ("WAIT_BOOT"); 3904 3905 default: return ("???"); 3906 } 3907 } 3908 3909 /* 3910 * 3911 * Name: skd_skmsg_state_to_str, converts binary driver state 3912 * to its corresponding string value. 3913 * 3914 * Inputs: Msg state. 3915 * 3916 * Returns: String representing msg state. 3917 * 3918 */ 3919 static const char * 3920 skd_skmsg_state_to_str(enum skd_fit_msg_state state) 3921 { 3922 switch (state) { 3923 case SKD_MSG_STATE_IDLE: return ("IDLE"); 3924 case SKD_MSG_STATE_BUSY: return ("BUSY"); 3925 default: return ("???"); 3926 } 3927 } 3928 3929 /* 3930 * 3931 * Name: skd_skreq_state_to_str, converts binary req state 3932 * to its corresponding string value. 3933 * 3934 * Inputs: Req state. 3935 * 3936 * Returns: String representing req state. 3937 * 3938 */ 3939 static const char * 3940 skd_skreq_state_to_str(enum skd_req_state state) 3941 { 3942 switch (state) { 3943 case SKD_REQ_STATE_IDLE: return ("IDLE"); 3944 case SKD_REQ_STATE_SETUP: return ("SETUP"); 3945 case SKD_REQ_STATE_BUSY: return ("BUSY"); 3946 case SKD_REQ_STATE_COMPLETED: return ("COMPLETED"); 3947 case SKD_REQ_STATE_TIMEOUT: return ("TIMEOUT"); 3948 case SKD_REQ_STATE_ABORTED: return ("ABORTED"); 3949 default: return ("???"); 3950 } 3951 } 3952 3953 /* 3954 * 3955 * Name: skd_log_skdev, logs device state & parameters. 3956 * 3957 * Inputs: skdev - device state structure. 3958 * event - event (string) to log. 3959 * 3960 * Returns: Nothing. 3961 * 3962 */ 3963 static void 3964 skd_log_skdev(struct skd_device *skdev, const char *event) 3965 { 3966 Dcmn_err(CE_NOTE, "log_skdev(%s) skdev=%p event='%s'", 3967 skdev->name, (void *)skdev, event); 3968 Dcmn_err(CE_NOTE, " drive_state=%s(%d) driver_state=%s(%d)", 3969 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 3970 skd_skdev_state_to_str(skdev->state), skdev->state); 3971 Dcmn_err(CE_NOTE, " busy=%d limit=%d soft=%d hard=%d lowat=%d", 3972 skdev->queue_depth_busy, skdev->queue_depth_limit, 3973 skdev->soft_queue_depth_limit, skdev->hard_queue_depth_limit, 3974 skdev->queue_depth_lowat); 3975 Dcmn_err(CE_NOTE, " timestamp=0x%x cycle=%d cycle_ix=%d", 3976 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix); 3977 } 3978 3979 /* 3980 * 3981 * Name: skd_log_skmsg, logs the skmsg event. 3982 * 3983 * Inputs: skdev - device state structure. 3984 * skmsg - FIT message structure. 3985 * event - event string to log. 3986 * 3987 * Returns: Nothing. 3988 * 3989 */ 3990 static void 3991 skd_log_skmsg(struct skd_device *skdev, 3992 struct skd_fitmsg_context *skmsg, const char *event) 3993 { 3994 Dcmn_err(CE_NOTE, "log_skmsg:(%s) skmsg=%p event='%s'", 3995 skdev->name, (void *)skmsg, event); 3996 Dcmn_err(CE_NOTE, " state=%s(%d) id=0x%04x length=%d", 3997 skd_skmsg_state_to_str(skmsg->state), skmsg->state, 3998 skmsg->id, skmsg->length); 3999 } 4000 4001 /* 4002 * 4003 * Name: skd_log_skreq, logs the skreq event. 4004 * 4005 * Inputs: skdev - device state structure. 4006 * skreq -skreq structure. 4007 * event - event string to log. 4008 * 4009 * Returns: Nothing. 4010 * 4011 */ 4012 static void 4013 skd_log_skreq(struct skd_device *skdev, 4014 struct skd_request_context *skreq, const char *event) 4015 { 4016 skd_buf_private_t *pbuf; 4017 4018 Dcmn_err(CE_NOTE, "log_skreq: (%s) skreq=%p pbuf=%p event='%s'", 4019 skdev->name, (void *)skreq, (void *)skreq->pbuf, event); 4020 4021 Dcmn_err(CE_NOTE, " state=%s(%d) id=0x%04x fitmsg=0x%04x", 4022 skd_skreq_state_to_str(skreq->state), skreq->state, 4023 skreq->id, skreq->fitmsg_id); 4024 Dcmn_err(CE_NOTE, " timo=0x%x sg_dir=%d n_sg=%d", 4025 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg); 4026 4027 if ((pbuf = skreq->pbuf) != NULL) { 4028 uint32_t lba, count; 4029 lba = pbuf->x_xfer->x_blkno; 4030 count = pbuf->x_xfer->x_nblks; 4031 Dcmn_err(CE_NOTE, " pbuf=%p lba=%u(0x%x) count=%u(0x%x) ", 4032 (void *)pbuf, lba, lba, count, count); 4033 Dcmn_err(CE_NOTE, " dir=%s " 4034 " intrs=%" PRId64 " qdepth=%d", 4035 (pbuf->dir & B_READ) ? "Read" : "Write", 4036 skdev->intr_cntr, skdev->queue_depth_busy); 4037 } else { 4038 Dcmn_err(CE_NOTE, " req=NULL\n"); 4039 } 4040 } 4041 4042 /* 4043 * 4044 * Name: skd_init_mutex, initializes all mutexes. 4045 * 4046 * Inputs: skdev - device state structure. 4047 * 4048 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS. 4049 * 4050 */ 4051 static int 4052 skd_init_mutex(skd_device_t *skdev) 4053 { 4054 void *intr; 4055 4056 Dcmn_err(CE_CONT, "(%s%d): init_mutex flags=%x", DRV_NAME, 4057 skdev->instance, skdev->flags); 4058 4059 intr = (void *)(uintptr_t)skdev->intr_pri; 4060 4061 if (skdev->flags & SKD_MUTEX_INITED) 4062 cmn_err(CE_NOTE, "init_mutex: Oh-Oh - already INITED"); 4063 4064 /* mutexes to protect the adapter state structure. */ 4065 mutex_init(&skdev->skd_lock_mutex, NULL, MUTEX_DRIVER, 4066 DDI_INTR_PRI(intr)); 4067 mutex_init(&skdev->skd_intr_mutex, NULL, MUTEX_DRIVER, 4068 DDI_INTR_PRI(intr)); 4069 mutex_init(&skdev->waitqueue_mutex, NULL, MUTEX_DRIVER, 4070 DDI_INTR_PRI(intr)); 4071 mutex_init(&skdev->skd_internalio_mutex, NULL, MUTEX_DRIVER, 4072 DDI_INTR_PRI(intr)); 4073 4074 cv_init(&skdev->cv_waitq, NULL, CV_DRIVER, NULL); 4075 4076 skdev->flags |= SKD_MUTEX_INITED; 4077 if (skdev->flags & SKD_MUTEX_DESTROYED) 4078 skdev->flags &= ~SKD_MUTEX_DESTROYED; 4079 4080 Dcmn_err(CE_CONT, "init_mutex (%s%d): done, flags=%x", DRV_NAME, 4081 skdev->instance, skdev->flags); 4082 4083 return (DDI_SUCCESS); 4084 } 4085 4086 /* 4087 * 4088 * Name: skd_destroy_mutex, destroys all mutexes. 4089 * 4090 * Inputs: skdev - device state structure. 4091 * 4092 * Returns: Nothing. 4093 * 4094 */ 4095 static void 4096 skd_destroy_mutex(skd_device_t *skdev) 4097 { 4098 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) { 4099 if (skdev->flags & SKD_MUTEX_INITED) { 4100 mutex_destroy(&skdev->waitqueue_mutex); 4101 mutex_destroy(&skdev->skd_intr_mutex); 4102 mutex_destroy(&skdev->skd_lock_mutex); 4103 mutex_destroy(&skdev->skd_internalio_mutex); 4104 4105 cv_destroy(&skdev->cv_waitq); 4106 4107 skdev->flags |= SKD_MUTEX_DESTROYED; 4108 4109 if (skdev->flags & SKD_MUTEX_INITED) 4110 skdev->flags &= ~SKD_MUTEX_INITED; 4111 } 4112 } 4113 } 4114 4115 /* 4116 * 4117 * Name: skd_setup_intr, setup the interrupt handling 4118 * 4119 * Inputs: skdev - device state structure. 4120 * intr_type - requested DDI interrupt type. 4121 * 4122 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS. 4123 * 4124 */ 4125 static int 4126 skd_setup_intr(skd_device_t *skdev, int intr_type) 4127 { 4128 int32_t count = 0; 4129 int32_t avail = 0; 4130 int32_t actual = 0; 4131 int32_t ret; 4132 uint32_t i; 4133 4134 Dcmn_err(CE_CONT, "(%s%d): setup_intr", DRV_NAME, skdev->instance); 4135 4136 /* Get number of interrupts the platform h/w supports */ 4137 if (((ret = ddi_intr_get_nintrs(skdev->dip, intr_type, &count)) != 4138 DDI_SUCCESS) || count == 0) { 4139 cmn_err(CE_WARN, "!intr_setup failed, nintrs ret=%xh, cnt=%xh", 4140 ret, count); 4141 4142 return (DDI_FAILURE); 4143 } 4144 4145 /* Get number of available system interrupts */ 4146 if (((ret = ddi_intr_get_navail(skdev->dip, intr_type, &avail)) != 4147 DDI_SUCCESS) || avail == 0) { 4148 cmn_err(CE_WARN, "!intr_setup failed, navail ret=%xh, " 4149 "avail=%xh", ret, avail); 4150 4151 return (DDI_FAILURE); 4152 } 4153 4154 if (intr_type == DDI_INTR_TYPE_MSIX && avail < SKD_MSIX_MAXAIF) { 4155 cmn_err(CE_WARN, "!intr_setup failed, min MSI-X h/w vectors " 4156 "req'd: %d, avail: %d", 4157 SKD_MSIX_MAXAIF, count); 4158 4159 return (DDI_FAILURE); 4160 } 4161 4162 /* Allocate space for interrupt handles */ 4163 skdev->hsize = sizeof (ddi_intr_handle_t) * avail; 4164 skdev->htable = kmem_zalloc(skdev->hsize, KM_SLEEP); 4165 4166 /* Allocate the interrupts */ 4167 if ((ret = ddi_intr_alloc(skdev->dip, skdev->htable, intr_type, 4168 0, count, &actual, 0)) != DDI_SUCCESS) { 4169 cmn_err(CE_WARN, "!intr_setup failed, intr_alloc ret=%xh, " 4170 "count = %xh, " "actual=%xh", ret, count, actual); 4171 4172 skd_release_intr(skdev); 4173 4174 return (DDI_FAILURE); 4175 } 4176 4177 skdev->intr_cnt = actual; 4178 4179 if (intr_type == DDI_INTR_TYPE_FIXED) 4180 (void) ddi_intr_set_pri(skdev->htable[0], 10); 4181 4182 /* Get interrupt priority */ 4183 if ((ret = ddi_intr_get_pri(skdev->htable[0], &skdev->intr_pri)) != 4184 DDI_SUCCESS) { 4185 cmn_err(CE_WARN, "!intr_setup failed, get_pri ret=%xh", ret); 4186 skd_release_intr(skdev); 4187 4188 return (ret); 4189 } 4190 4191 /* Add the interrupt handlers */ 4192 for (i = 0; i < actual; i++) { 4193 if ((ret = ddi_intr_add_handler(skdev->htable[i], 4194 skd_isr_aif, (void *)skdev, (void *)((ulong_t)i))) != 4195 DDI_SUCCESS) { 4196 cmn_err(CE_WARN, "!intr_setup failed, addh#=%xh, " 4197 "act=%xh, ret=%xh", i, actual, ret); 4198 skd_release_intr(skdev); 4199 4200 return (ret); 4201 } 4202 } 4203 4204 /* Setup mutexes */ 4205 if ((ret = skd_init_mutex(skdev)) != DDI_SUCCESS) { 4206 cmn_err(CE_WARN, "!intr_setup failed, mutex init ret=%xh", ret); 4207 skd_release_intr(skdev); 4208 4209 return (ret); 4210 } 4211 4212 /* Get the capabilities */ 4213 (void) ddi_intr_get_cap(skdev->htable[0], &skdev->intr_cap); 4214 4215 /* Enable interrupts */ 4216 if (skdev->intr_cap & DDI_INTR_FLAG_BLOCK) { 4217 if ((ret = ddi_intr_block_enable(skdev->htable, 4218 skdev->intr_cnt)) != DDI_SUCCESS) { 4219 cmn_err(CE_WARN, "!failed, intr_setup block enable, " 4220 "ret=%xh", ret); 4221 skd_destroy_mutex(skdev); 4222 skd_release_intr(skdev); 4223 4224 return (ret); 4225 } 4226 } else { 4227 for (i = 0; i < skdev->intr_cnt; i++) { 4228 if ((ret = ddi_intr_enable(skdev->htable[i])) != 4229 DDI_SUCCESS) { 4230 cmn_err(CE_WARN, "!intr_setup failed, " 4231 "intr enable, ret=%xh", ret); 4232 skd_destroy_mutex(skdev); 4233 skd_release_intr(skdev); 4234 4235 return (ret); 4236 } 4237 } 4238 } 4239 4240 if (intr_type == DDI_INTR_TYPE_FIXED) 4241 (void) ddi_intr_clr_mask(skdev->htable[0]); 4242 4243 skdev->irq_type = intr_type; 4244 4245 return (DDI_SUCCESS); 4246 } 4247 4248 /* 4249 * 4250 * Name: skd_disable_intr, disable interrupt handling. 4251 * 4252 * Inputs: skdev - device state structure. 4253 * 4254 * Returns: Nothing. 4255 * 4256 */ 4257 static void 4258 skd_disable_intr(skd_device_t *skdev) 4259 { 4260 uint32_t i, rval; 4261 4262 if (skdev->intr_cap & DDI_INTR_FLAG_BLOCK) { 4263 /* Remove AIF block interrupts (MSI/MSI-X) */ 4264 if ((rval = ddi_intr_block_disable(skdev->htable, 4265 skdev->intr_cnt)) != DDI_SUCCESS) { 4266 cmn_err(CE_WARN, "!failed intr block disable, rval=%x", 4267 rval); 4268 } 4269 } else { 4270 /* Remove AIF non-block interrupts (fixed). */ 4271 for (i = 0; i < skdev->intr_cnt; i++) { 4272 if ((rval = ddi_intr_disable(skdev->htable[i])) != 4273 DDI_SUCCESS) { 4274 cmn_err(CE_WARN, "!failed intr disable, " 4275 "intr#=%xh, " "rval=%xh", i, rval); 4276 } 4277 } 4278 } 4279 } 4280 4281 /* 4282 * 4283 * Name: skd_release_intr, disables interrupt handling. 4284 * 4285 * Inputs: skdev - device state structure. 4286 * 4287 * Returns: Nothing. 4288 * 4289 */ 4290 static void 4291 skd_release_intr(skd_device_t *skdev) 4292 { 4293 int32_t i; 4294 int rval; 4295 4296 4297 Dcmn_err(CE_CONT, "REL_INTR intr_cnt=%d", skdev->intr_cnt); 4298 4299 if (skdev->irq_type == 0) { 4300 Dcmn_err(CE_CONT, "release_intr: (%s%d): done", 4301 DRV_NAME, skdev->instance); 4302 return; 4303 } 4304 4305 if (skdev->htable != NULL && skdev->hsize > 0) { 4306 i = (int32_t)skdev->hsize / (int32_t)sizeof (ddi_intr_handle_t); 4307 4308 while (i-- > 0) { 4309 if (skdev->htable[i] == 0) { 4310 Dcmn_err(CE_NOTE, "htable[%x]=0h", i); 4311 continue; 4312 } 4313 4314 if ((rval = ddi_intr_disable(skdev->htable[i])) != 4315 DDI_SUCCESS) 4316 Dcmn_err(CE_NOTE, "release_intr: intr_disable " 4317 "htable[%d], rval=%d", i, rval); 4318 4319 if (i < skdev->intr_cnt) { 4320 if ((rval = ddi_intr_remove_handler( 4321 skdev->htable[i])) != DDI_SUCCESS) 4322 cmn_err(CE_WARN, "!release_intr: " 4323 "intr_remove_handler FAILED, " 4324 "rval=%d", rval); 4325 4326 Dcmn_err(CE_NOTE, "release_intr: " 4327 "remove_handler htable[%d]", i); 4328 } 4329 4330 if ((rval = ddi_intr_free(skdev->htable[i])) != 4331 DDI_SUCCESS) 4332 cmn_err(CE_WARN, "!release_intr: intr_free " 4333 "FAILED, rval=%d", rval); 4334 Dcmn_err(CE_NOTE, "release_intr: intr_free htable[%d]", 4335 i); 4336 } 4337 4338 kmem_free(skdev->htable, skdev->hsize); 4339 skdev->htable = NULL; 4340 } 4341 4342 skdev->hsize = 0; 4343 skdev->intr_cnt = 0; 4344 skdev->intr_pri = 0; 4345 skdev->intr_cap = 0; 4346 skdev->irq_type = 0; 4347 } 4348 4349 /* 4350 * 4351 * Name: skd_dealloc_resources, deallocate resources allocated 4352 * during attach. 4353 * 4354 * Inputs: dip - DDI device info pointer. 4355 * skdev - device state structure. 4356 * seq - bit flag representing allocated item. 4357 * instance - device instance. 4358 * 4359 * Returns: Nothing. 4360 * 4361 */ 4362 /* ARGSUSED */ /* Upstream common source with other platforms. */ 4363 static void 4364 skd_dealloc_resources(dev_info_t *dip, skd_device_t *skdev, 4365 uint32_t seq, int instance) 4366 { 4367 4368 if (skdev == NULL) 4369 return; 4370 4371 if (seq & SKD_CONSTRUCTED) 4372 skd_destruct(skdev); 4373 4374 if (seq & SKD_INTR_ADDED) { 4375 skd_disable_intr(skdev); 4376 skd_release_intr(skdev); 4377 } 4378 4379 if (seq & SKD_DEV_IOBASE_MAPPED) 4380 ddi_regs_map_free(&skdev->dev_handle); 4381 4382 if (seq & SKD_IOMAP_IOBASE_MAPPED) 4383 ddi_regs_map_free(&skdev->iomap_handle); 4384 4385 if (seq & SKD_REGS_MAPPED) 4386 ddi_regs_map_free(&skdev->iobase_handle); 4387 4388 if (seq & SKD_CONFIG_SPACE_SETUP) 4389 pci_config_teardown(&skdev->pci_handle); 4390 4391 if (seq & SKD_SOFT_STATE_ALLOCED) { 4392 if (skdev->pathname && 4393 (skdev->flags & SKD_PATHNAME_ALLOCED)) { 4394 kmem_free(skdev->pathname, 4395 strlen(skdev->pathname)+1); 4396 } 4397 } 4398 4399 if (skdev->s1120_devid) 4400 ddi_devid_free(skdev->s1120_devid); 4401 } 4402 4403 /* 4404 * 4405 * Name: skd_setup_interrupt, sets up the appropriate interrupt type 4406 * msi, msix, or fixed. 4407 * 4408 * Inputs: skdev - device state structure. 4409 * 4410 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS. 4411 * 4412 */ 4413 static int 4414 skd_setup_interrupts(skd_device_t *skdev) 4415 { 4416 int32_t rval = DDI_FAILURE; 4417 int32_t i; 4418 int32_t itypes = 0; 4419 4420 /* 4421 * See what types of interrupts this adapter and platform support 4422 */ 4423 if ((i = ddi_intr_get_supported_types(skdev->dip, &itypes)) != 4424 DDI_SUCCESS) { 4425 cmn_err(CE_NOTE, "intr supported types failed, rval=%xh, ", i); 4426 return (DDI_FAILURE); 4427 } 4428 4429 Dcmn_err(CE_NOTE, "%s:supported interrupts types: %x", 4430 skdev->name, itypes); 4431 4432 itypes &= skdev->irq_type; 4433 4434 if (!skd_disable_msix && (itypes & DDI_INTR_TYPE_MSIX) && 4435 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_MSIX)) == DDI_SUCCESS) { 4436 cmn_err(CE_NOTE, "!%s: successful MSI-X setup", 4437 skdev->name); 4438 } else if (!skd_disable_msi && (itypes & DDI_INTR_TYPE_MSI) && 4439 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_MSI)) == DDI_SUCCESS) { 4440 cmn_err(CE_NOTE, "!%s: successful MSI setup", 4441 skdev->name); 4442 } else if ((itypes & DDI_INTR_TYPE_FIXED) && 4443 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_FIXED)) 4444 == DDI_SUCCESS) { 4445 cmn_err(CE_NOTE, "!%s: successful fixed intr setup", 4446 skdev->name); 4447 } else { 4448 cmn_err(CE_WARN, "!%s: no supported interrupt types", 4449 skdev->name); 4450 return (DDI_FAILURE); 4451 } 4452 4453 Dcmn_err(CE_CONT, "%s: setup interrupts done", skdev->name); 4454 4455 return (rval); 4456 } 4457 4458 /* 4459 * 4460 * Name: skd_get_properties, retrieves properties from skd.conf. 4461 * 4462 * Inputs: skdev - device state structure. 4463 * dip - dev_info data structure. 4464 * 4465 * Returns: Nothing. 4466 * 4467 */ 4468 /* ARGSUSED */ /* Upstream common source with other platforms. */ 4469 static void 4470 skd_get_properties(dev_info_t *dip, skd_device_t *skdev) 4471 { 4472 int prop_value; 4473 4474 skd_isr_type = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4475 "intr-type-cap", -1); 4476 4477 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4478 "max-scsi-reqs", -1); 4479 if (prop_value >= 1 && prop_value <= SKD_MAX_QUEUE_DEPTH) 4480 skd_max_queue_depth = prop_value; 4481 4482 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4483 "max-scsi-reqs-per-msg", -1); 4484 if (prop_value >= 1 && prop_value <= SKD_MAX_REQ_PER_MSG) 4485 skd_max_req_per_msg = prop_value; 4486 4487 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4488 "max-sgs-per-req", -1); 4489 if (prop_value >= 1 && prop_value <= SKD_MAX_N_SG_PER_REQ) 4490 skd_sgs_per_request = prop_value; 4491 4492 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4493 "dbg-level", -1); 4494 if (prop_value >= 1 && prop_value <= 2) 4495 skd_dbg_level = prop_value; 4496 } 4497 4498 /* 4499 * 4500 * Name: skd_wait_for_s1120, wait for device to finish 4501 * its initialization. 4502 * 4503 * Inputs: skdev - device state structure. 4504 * 4505 * Returns: DDI_SUCCESS or DDI_FAILURE. 4506 * 4507 */ 4508 static int 4509 skd_wait_for_s1120(skd_device_t *skdev) 4510 { 4511 clock_t cur_ticks, tmo; 4512 int loop_cntr = 0; 4513 int rc = DDI_FAILURE; 4514 4515 mutex_enter(&skdev->skd_internalio_mutex); 4516 4517 while (skdev->gendisk_on == 0) { 4518 cur_ticks = ddi_get_lbolt(); 4519 tmo = cur_ticks + drv_usectohz(MICROSEC); 4520 if (cv_timedwait(&skdev->cv_waitq, 4521 &skdev->skd_internalio_mutex, tmo) == -1) { 4522 /* Oops - timed out */ 4523 if (loop_cntr++ > 10) 4524 break; 4525 } 4526 } 4527 4528 mutex_exit(&skdev->skd_internalio_mutex); 4529 4530 if (skdev->gendisk_on == 1) 4531 rc = DDI_SUCCESS; 4532 4533 return (rc); 4534 } 4535 4536 /* 4537 * 4538 * Name: skd_update_props, updates certain device properties. 4539 * 4540 * Inputs: skdev - device state structure. 4541 * dip - dev info structure 4542 * 4543 * Returns: Nothing. 4544 * 4545 */ 4546 static void 4547 skd_update_props(skd_device_t *skdev, dev_info_t *dip) 4548 { 4549 int blksize = 512; 4550 4551 if ((ddi_prop_update_int64(DDI_DEV_T_NONE, dip, "device-nblocks", 4552 skdev->Nblocks) != DDI_SUCCESS) || 4553 (ddi_prop_update_int(DDI_DEV_T_NONE, dip, "device-blksize", 4554 blksize) != DDI_SUCCESS)) { 4555 cmn_err(CE_NOTE, "%s: FAILED to create driver properties", 4556 skdev->name); 4557 } 4558 } 4559 4560 /* 4561 * 4562 * Name: skd_setup_devid, sets up device ID info. 4563 * 4564 * Inputs: skdev - device state structure. 4565 * devid - Device ID for the DDI. 4566 * 4567 * Returns: DDI_SUCCESS or DDI_FAILURE. 4568 * 4569 */ 4570 static int 4571 skd_setup_devid(skd_device_t *skdev, ddi_devid_t *devid) 4572 { 4573 int rc, sz_model, sz_sn, sz; 4574 4575 sz_model = scsi_ascii_inquiry_len(skdev->inq_product_id, 4576 strlen(skdev->inq_product_id)); 4577 sz_sn = scsi_ascii_inquiry_len(skdev->inq_serial_num, 4578 strlen(skdev->inq_serial_num)); 4579 sz = sz_model + sz_sn + 1; 4580 4581 (void) snprintf(skdev->devid_str, sizeof (skdev->devid_str), 4582 "%.*s=%.*s", sz_model, skdev->inq_product_id, sz_sn, 4583 skdev->inq_serial_num); 4584 rc = ddi_devid_init(skdev->dip, DEVID_SCSI_SERIAL, sz, 4585 skdev->devid_str, devid); 4586 4587 if (rc != DDI_SUCCESS) 4588 cmn_err(CE_WARN, "!%s: devid_init FAILED", skdev->name); 4589 4590 return (rc); 4591 4592 } 4593 4594 /* 4595 * 4596 * Name: skd_bd_attach, attach to blkdev driver 4597 * 4598 * Inputs: skdev - device state structure. 4599 * dip - device info structure. 4600 * 4601 * Returns: DDI_SUCCESS or DDI_FAILURE. 4602 * 4603 */ 4604 static int 4605 skd_bd_attach(dev_info_t *dip, skd_device_t *skdev) 4606 { 4607 int rv; 4608 4609 skdev->s_bdh = bd_alloc_handle(skdev, &skd_bd_ops, 4610 &skd_64bit_io_dma_attr, KM_SLEEP); 4611 4612 if (skdev->s_bdh == NULL) { 4613 cmn_err(CE_WARN, "!skd_bd_attach: FAILED"); 4614 4615 return (DDI_FAILURE); 4616 } 4617 4618 rv = bd_attach_handle(dip, skdev->s_bdh); 4619 4620 if (rv != DDI_SUCCESS) { 4621 cmn_err(CE_WARN, "!bd_attach_handle FAILED\n"); 4622 } else { 4623 Dcmn_err(CE_NOTE, "bd_attach_handle OK\n"); 4624 skdev->bd_attached++; 4625 } 4626 4627 return (rv); 4628 } 4629 4630 /* 4631 * 4632 * Name: skd_bd_detach, detach from the blkdev driver. 4633 * 4634 * Inputs: skdev - device state structure. 4635 * 4636 * Returns: Nothing. 4637 * 4638 */ 4639 static void 4640 skd_bd_detach(skd_device_t *skdev) 4641 { 4642 if (skdev->bd_attached) 4643 (void) bd_detach_handle(skdev->s_bdh); 4644 4645 bd_free_handle(skdev->s_bdh); 4646 } 4647 4648 /* 4649 * 4650 * Name: skd_attach, attach sdk device driver 4651 * 4652 * Inputs: dip - device info structure. 4653 * cmd - DDI attach argument (ATTACH, RESUME, etc.) 4654 * 4655 * Returns: DDI_SUCCESS or DDI_FAILURE. 4656 * 4657 */ 4658 static int 4659 skd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 4660 { 4661 int instance; 4662 int nregs; 4663 skd_device_t *skdev = NULL; 4664 int inx; 4665 uint16_t cmd_reg; 4666 int progress = 0; 4667 char name[MAXPATHLEN]; 4668 off_t regsize; 4669 char pci_str[32]; 4670 char fw_version[8]; 4671 4672 instance = ddi_get_instance(dip); 4673 4674 (void) ddi_get_parent_data(dip); 4675 4676 switch (cmd) { 4677 case DDI_ATTACH: 4678 break; 4679 4680 case DDI_RESUME: 4681 /* Re-enable timer */ 4682 skd_start_timer(skdev); 4683 4684 return (DDI_SUCCESS); 4685 4686 default: 4687 return (DDI_FAILURE); 4688 } 4689 4690 Dcmn_err(CE_NOTE, "sTec S1120 Driver v%s Instance: %d", 4691 VERSIONSTR, instance); 4692 4693 /* 4694 * Check that hardware is installed in a DMA-capable slot 4695 */ 4696 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 4697 cmn_err(CE_WARN, "!%s%d: installed in a " 4698 "slot that isn't DMA-capable slot", DRV_NAME, instance); 4699 return (DDI_FAILURE); 4700 } 4701 4702 /* 4703 * No support for high-level interrupts 4704 */ 4705 if (ddi_intr_hilevel(dip, 0) != 0) { 4706 cmn_err(CE_WARN, "!%s%d: High level interrupt not supported", 4707 DRV_NAME, instance); 4708 return (DDI_FAILURE); 4709 } 4710 4711 /* 4712 * Allocate our per-device-instance structure 4713 */ 4714 if (ddi_soft_state_zalloc(skd_state, instance) != 4715 DDI_SUCCESS) { 4716 cmn_err(CE_WARN, "!%s%d: soft state zalloc failed ", 4717 DRV_NAME, instance); 4718 return (DDI_FAILURE); 4719 } 4720 4721 progress |= SKD_SOFT_STATE_ALLOCED; 4722 4723 skdev = ddi_get_soft_state(skd_state, instance); 4724 if (skdev == NULL) { 4725 cmn_err(CE_WARN, "!%s%d: Unable to get soft state structure", 4726 DRV_NAME, instance); 4727 goto skd_attach_failed; 4728 } 4729 4730 (void) snprintf(skdev->name, sizeof (skdev->name), 4731 DRV_NAME "%d", instance); 4732 4733 skdev->dip = dip; 4734 skdev->instance = instance; 4735 4736 ddi_set_driver_private(dip, skdev); 4737 4738 (void) ddi_pathname(dip, name); 4739 for (inx = strlen(name); inx; inx--) { 4740 if (name[inx] == ',') { 4741 name[inx] = '\0'; 4742 break; 4743 } 4744 if (name[inx] == '@') { 4745 break; 4746 } 4747 } 4748 4749 skdev->pathname = kmem_zalloc(strlen(name) + 1, KM_SLEEP); 4750 (void) strlcpy(skdev->pathname, name, strlen(name) + 1); 4751 4752 progress |= SKD_PATHNAME_ALLOCED; 4753 skdev->flags |= SKD_PATHNAME_ALLOCED; 4754 4755 if (pci_config_setup(dip, &skdev->pci_handle) != DDI_SUCCESS) { 4756 cmn_err(CE_WARN, "!%s%d: pci_config_setup FAILED", 4757 DRV_NAME, instance); 4758 goto skd_attach_failed; 4759 } 4760 4761 progress |= SKD_CONFIG_SPACE_SETUP; 4762 4763 /* Save adapter path. */ 4764 4765 (void) ddi_dev_nregs(dip, &nregs); 4766 4767 /* 4768 * 0x0 Configuration Space 4769 * 0x1 I/O Space 4770 * 0x2 s1120 register space 4771 */ 4772 if (ddi_dev_regsize(dip, 1, ®size) != DDI_SUCCESS || 4773 ddi_regs_map_setup(dip, 1, &skdev->iobase, 0, regsize, 4774 &dev_acc_attr, &skdev->iobase_handle) != DDI_SUCCESS) { 4775 cmn_err(CE_WARN, "!%s%d: regs_map_setup(mem) failed", 4776 DRV_NAME, instance); 4777 goto skd_attach_failed; 4778 } 4779 progress |= SKD_REGS_MAPPED; 4780 4781 skdev->iomap_iobase = skdev->iobase; 4782 skdev->iomap_handle = skdev->iobase_handle; 4783 4784 Dcmn_err(CE_NOTE, "%s: PCI iobase=%ph, iomap=%ph, regnum=%d, " 4785 "regsize=%ld", skdev->name, (void *)skdev->iobase, 4786 (void *)skdev->iomap_iobase, 1, regsize); 4787 4788 if (ddi_dev_regsize(dip, 2, ®size) != DDI_SUCCESS || 4789 ddi_regs_map_setup(dip, 2, &skdev->dev_iobase, 0, regsize, 4790 &dev_acc_attr, &skdev->dev_handle) != DDI_SUCCESS) { 4791 cmn_err(CE_WARN, "!%s%d: regs_map_setup(mem) failed", 4792 DRV_NAME, instance); 4793 4794 goto skd_attach_failed; 4795 } 4796 4797 skdev->dev_memsize = (int)regsize; 4798 4799 Dcmn_err(CE_NOTE, "%s: DEV iobase=%ph regsize=%d", 4800 skdev->name, (void *)skdev->dev_iobase, 4801 skdev->dev_memsize); 4802 4803 progress |= SKD_DEV_IOBASE_MAPPED; 4804 4805 cmd_reg = pci_config_get16(skdev->pci_handle, PCI_CONF_COMM); 4806 cmd_reg |= (PCI_COMM_ME | PCI_COMM_INTX_DISABLE); 4807 cmd_reg &= ~PCI_COMM_PARITY_DETECT; 4808 pci_config_put16(skdev->pci_handle, PCI_CONF_COMM, cmd_reg); 4809 4810 /* Get adapter PCI device information. */ 4811 skdev->vendor_id = pci_config_get16(skdev->pci_handle, PCI_CONF_VENID); 4812 skdev->device_id = pci_config_get16(skdev->pci_handle, PCI_CONF_DEVID); 4813 4814 Dcmn_err(CE_NOTE, "%s: %x-%x card detected", 4815 skdev->name, skdev->vendor_id, skdev->device_id); 4816 4817 skd_get_properties(dip, skdev); 4818 4819 (void) skd_init(skdev); 4820 4821 if (skd_construct(skdev, instance)) { 4822 cmn_err(CE_WARN, "!%s: construct FAILED", skdev->name); 4823 goto skd_attach_failed; 4824 } 4825 4826 progress |= SKD_PROBED; 4827 progress |= SKD_CONSTRUCTED; 4828 4829 SIMPLEQ_INIT(&skdev->waitqueue); 4830 4831 /* 4832 * Setup interrupt handler 4833 */ 4834 if (skd_setup_interrupts(skdev) != DDI_SUCCESS) { 4835 cmn_err(CE_WARN, "!%s: Unable to add interrupt", 4836 skdev->name); 4837 goto skd_attach_failed; 4838 } 4839 4840 progress |= SKD_INTR_ADDED; 4841 4842 ADAPTER_STATE_LOCK(skdev); 4843 skdev->flags |= SKD_ATTACHED; 4844 ADAPTER_STATE_UNLOCK(skdev); 4845 4846 skdev->d_blkshift = 9; 4847 progress |= SKD_ATTACHED; 4848 4849 4850 skd_start_device(skdev); 4851 4852 ADAPTER_STATE_LOCK(skdev); 4853 skdev->progress = progress; 4854 ADAPTER_STATE_UNLOCK(skdev); 4855 4856 /* 4857 * Give the board a chance to 4858 * complete its initialization. 4859 */ 4860 if (skdev->gendisk_on != 1) 4861 (void) skd_wait_for_s1120(skdev); 4862 4863 if (skdev->gendisk_on != 1) { 4864 cmn_err(CE_WARN, "!%s: s1120 failed to come ONLINE", 4865 skdev->name); 4866 goto skd_attach_failed; 4867 } 4868 4869 ddi_report_dev(dip); 4870 4871 skd_send_internal_skspcl(skdev, &skdev->internal_skspcl, INQUIRY); 4872 4873 skdev->disks_initialized++; 4874 4875 (void) strcpy(fw_version, "???"); 4876 (void) skd_pci_info(skdev, pci_str, sizeof (pci_str)); 4877 Dcmn_err(CE_NOTE, " sTec S1120 Driver(%s) version %s-b%s", 4878 DRV_NAME, DRV_VERSION, DRV_BUILD_ID); 4879 4880 Dcmn_err(CE_NOTE, " sTec S1120 %04x:%04x %s 64 bit", 4881 skdev->vendor_id, skdev->device_id, pci_str); 4882 4883 Dcmn_err(CE_NOTE, " sTec S1120 %s\n", skdev->pathname); 4884 4885 if (*skdev->inq_serial_num) 4886 Dcmn_err(CE_NOTE, " sTec S1120 serial#=%s", 4887 skdev->inq_serial_num); 4888 4889 if (*skdev->inq_product_id && 4890 *skdev->inq_product_rev) 4891 Dcmn_err(CE_NOTE, " sTec S1120 prod ID=%s prod rev=%s", 4892 skdev->inq_product_id, skdev->inq_product_rev); 4893 4894 Dcmn_err(CE_NOTE, "%s: intr-type-cap: %d", 4895 skdev->name, skdev->irq_type); 4896 Dcmn_err(CE_NOTE, "%s: max-scsi-reqs: %d", 4897 skdev->name, skd_max_queue_depth); 4898 Dcmn_err(CE_NOTE, "%s: max-sgs-per-req: %d", 4899 skdev->name, skd_sgs_per_request); 4900 Dcmn_err(CE_NOTE, "%s: max-scsi-req-per-msg: %d", 4901 skdev->name, skd_max_req_per_msg); 4902 4903 if (skd_bd_attach(dip, skdev) == DDI_FAILURE) 4904 goto skd_attach_failed; 4905 4906 skd_update_props(skdev, dip); 4907 4908 /* Enable timer */ 4909 skd_start_timer(skdev); 4910 4911 ADAPTER_STATE_LOCK(skdev); 4912 skdev->progress = progress; 4913 ADAPTER_STATE_UNLOCK(skdev); 4914 4915 skdev->attached = 1; 4916 return (DDI_SUCCESS); 4917 4918 skd_attach_failed: 4919 skd_dealloc_resources(dip, skdev, progress, instance); 4920 4921 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) { 4922 skd_destroy_mutex(skdev); 4923 } 4924 4925 ddi_soft_state_free(skd_state, instance); 4926 4927 cmn_err(CE_WARN, "!skd_attach FAILED: progress=%x", progress); 4928 return (DDI_FAILURE); 4929 } 4930 4931 /* 4932 * 4933 * Name: skd_halt 4934 * 4935 * Inputs: skdev - device state structure. 4936 * 4937 * Returns: Nothing. 4938 * 4939 */ 4940 static void 4941 skd_halt(skd_device_t *skdev) 4942 { 4943 Dcmn_err(CE_NOTE, "%s: halt/suspend ......", skdev->name); 4944 } 4945 4946 /* 4947 * 4948 * Name: skd_detach, detaches driver from the system. 4949 * 4950 * Inputs: dip - device info structure. 4951 * 4952 * Returns: DDI_SUCCESS on successful detach otherwise DDI_FAILURE. 4953 * 4954 */ 4955 static int 4956 skd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 4957 { 4958 skd_buf_private_t *pbuf; 4959 skd_device_t *skdev; 4960 int instance; 4961 timeout_id_t timer_id = NULL; 4962 int rv1 = DDI_SUCCESS; 4963 struct skd_special_context *skspcl; 4964 4965 instance = ddi_get_instance(dip); 4966 4967 skdev = ddi_get_soft_state(skd_state, instance); 4968 if (skdev == NULL) { 4969 cmn_err(CE_WARN, "!detach failed: NULL skd state"); 4970 4971 return (DDI_FAILURE); 4972 } 4973 4974 Dcmn_err(CE_CONT, "skd_detach(%d): entered", instance); 4975 4976 switch (cmd) { 4977 case DDI_DETACH: 4978 /* Test for packet cache inuse. */ 4979 ADAPTER_STATE_LOCK(skdev); 4980 4981 /* Stop command/event processing. */ 4982 skdev->flags |= (SKD_SUSPENDED | SKD_CMD_ABORT_TMO); 4983 4984 /* Disable driver timer if no adapters. */ 4985 if (skdev->skd_timer_timeout_id != 0) { 4986 timer_id = skdev->skd_timer_timeout_id; 4987 skdev->skd_timer_timeout_id = 0; 4988 } 4989 ADAPTER_STATE_UNLOCK(skdev); 4990 4991 if (timer_id != 0) { 4992 (void) untimeout(timer_id); 4993 } 4994 4995 #ifdef SKD_PM 4996 if (skdev->power_level != LOW_POWER_LEVEL) { 4997 skd_halt(skdev); 4998 skdev->power_level = LOW_POWER_LEVEL; 4999 } 5000 #endif 5001 skspcl = &skdev->internal_skspcl; 5002 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); 5003 5004 skd_stop_device(skdev); 5005 5006 /* 5007 * Clear request queue. 5008 */ 5009 while (!SIMPLEQ_EMPTY(&skdev->waitqueue)) { 5010 pbuf = skd_get_queued_pbuf(skdev); 5011 skd_end_request_abnormal(skdev, pbuf, ECANCELED, 5012 SKD_IODONE_WNIOC); 5013 Dcmn_err(CE_NOTE, 5014 "detach: cancelled pbuf %p %ld <%s> %lld\n", 5015 (void *)pbuf, pbuf->x_xfer->x_nblks, 5016 (pbuf->dir & B_READ) ? "Read" : "Write", 5017 pbuf->x_xfer->x_blkno); 5018 } 5019 5020 skd_bd_detach(skdev); 5021 5022 skd_dealloc_resources(dip, skdev, skdev->progress, instance); 5023 5024 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) { 5025 skd_destroy_mutex(skdev); 5026 } 5027 5028 ddi_soft_state_free(skd_state, instance); 5029 5030 skd_exit(); 5031 5032 break; 5033 5034 case DDI_SUSPEND: 5035 /* Block timer. */ 5036 5037 ADAPTER_STATE_LOCK(skdev); 5038 skdev->flags |= SKD_SUSPENDED; 5039 5040 /* Disable driver timer if last adapter. */ 5041 if (skdev->skd_timer_timeout_id != 0) { 5042 timer_id = skdev->skd_timer_timeout_id; 5043 skdev->skd_timer_timeout_id = 0; 5044 } 5045 ADAPTER_STATE_UNLOCK(skdev); 5046 5047 if (timer_id != 0) { 5048 (void) untimeout(timer_id); 5049 } 5050 5051 ddi_prop_remove_all(dip); 5052 5053 skd_halt(skdev); 5054 5055 break; 5056 default: 5057 rv1 = DDI_FAILURE; 5058 break; 5059 } 5060 5061 if (rv1 != DDI_SUCCESS) { 5062 cmn_err(CE_WARN, "!skd_detach, failed, rv1=%x", rv1); 5063 } else { 5064 Dcmn_err(CE_CONT, "skd_detach: exiting"); 5065 } 5066 5067 if (rv1 != DDI_SUCCESS) 5068 return (DDI_FAILURE); 5069 5070 return (rv1); 5071 } 5072 5073 /* 5074 * 5075 * Name: skd_devid_init, calls skd_setup_devid to setup 5076 * the device's devid structure. 5077 * 5078 * Inputs: arg - device state structure. 5079 * dip - dev_info structure. 5080 * devid - devid structure. 5081 * 5082 * Returns: Nothing. 5083 * 5084 */ 5085 /* ARGSUSED */ /* Upstream common source with other platforms. */ 5086 static int 5087 skd_devid_init(void *arg, dev_info_t *dip, ddi_devid_t *devid) 5088 { 5089 skd_device_t *skdev = arg; 5090 5091 (void) skd_setup_devid(skdev, devid); 5092 5093 return (0); 5094 } 5095 5096 /* 5097 * 5098 * Name: skd_bd_driveinfo, retrieves device's info. 5099 * 5100 * Inputs: drive - drive data structure. 5101 * arg - device state structure. 5102 * 5103 * Returns: Nothing. 5104 * 5105 */ 5106 static void 5107 skd_bd_driveinfo(void *arg, bd_drive_t *drive) 5108 { 5109 skd_device_t *skdev = arg; 5110 5111 drive->d_qsize = (skdev->queue_depth_limit * 4) / 5; 5112 drive->d_maxxfer = SKD_DMA_MAXXFER; 5113 drive->d_removable = B_FALSE; 5114 drive->d_hotpluggable = B_FALSE; 5115 drive->d_target = 0; 5116 drive->d_lun = 0; 5117 5118 if (skdev->inquiry_is_valid != 0) { 5119 drive->d_vendor = skdev->inq_vendor_id; 5120 drive->d_vendor_len = strlen(drive->d_vendor); 5121 5122 drive->d_product = skdev->inq_product_id; 5123 drive->d_product_len = strlen(drive->d_product); 5124 5125 drive->d_serial = skdev->inq_serial_num; 5126 drive->d_serial_len = strlen(drive->d_serial); 5127 5128 drive->d_revision = skdev->inq_product_rev; 5129 drive->d_revision_len = strlen(drive->d_revision); 5130 } 5131 } 5132 5133 /* 5134 * 5135 * Name: skd_bd_mediainfo, retrieves device media info. 5136 * 5137 * Inputs: arg - device state structure. 5138 * media - container for media info. 5139 * 5140 * Returns: Zero. 5141 * 5142 */ 5143 static int 5144 skd_bd_mediainfo(void *arg, bd_media_t *media) 5145 { 5146 skd_device_t *skdev = arg; 5147 5148 media->m_nblks = skdev->Nblocks; 5149 media->m_blksize = 512; 5150 media->m_pblksize = 4096; 5151 media->m_readonly = B_FALSE; 5152 media->m_solidstate = B_TRUE; 5153 5154 return (0); 5155 } 5156 5157 /* 5158 * 5159 * Name: skd_rw, performs R/W requests for blkdev driver. 5160 * 5161 * Inputs: skdev - device state structure. 5162 * xfer - tranfer structure. 5163 * dir - I/O direction. 5164 * 5165 * Returns: EAGAIN if device is not online. EIO if blkdev wants us to 5166 * be a dump device (for now). 5167 * Value returned by skd_start(). 5168 * 5169 */ 5170 static int 5171 skd_rw(skd_device_t *skdev, bd_xfer_t *xfer, int dir) 5172 { 5173 skd_buf_private_t *pbuf; 5174 5175 /* 5176 * The x_flags structure element is not defined in Oracle Solaris 5177 */ 5178 /* We'll need to fix this in order to support dump on this device. */ 5179 if (xfer->x_flags & BD_XFER_POLL) 5180 return (EIO); 5181 5182 if (skdev->state != SKD_DRVR_STATE_ONLINE) { 5183 Dcmn_err(CE_NOTE, "Device - not ONLINE"); 5184 5185 skd_request_fn_not_online(skdev); 5186 5187 return (EAGAIN); 5188 } 5189 5190 pbuf = kmem_zalloc(sizeof (skd_buf_private_t), KM_NOSLEEP); 5191 if (pbuf == NULL) 5192 return (ENOMEM); 5193 5194 WAITQ_LOCK(skdev); 5195 pbuf->dir = dir; 5196 pbuf->x_xfer = xfer; 5197 5198 skd_queue(skdev, pbuf); 5199 skdev->ios_queued++; 5200 WAITQ_UNLOCK(skdev); 5201 5202 skd_start(skdev); 5203 5204 return (0); 5205 } 5206 5207 /* 5208 * 5209 * Name: skd_bd_read, performs blkdev read requests. 5210 * 5211 * Inputs: arg - device state structure. 5212 * xfer - tranfer request structure. 5213 * 5214 * Returns: Value return by skd_rw(). 5215 * 5216 */ 5217 static int 5218 skd_bd_read(void *arg, bd_xfer_t *xfer) 5219 { 5220 return (skd_rw(arg, xfer, B_READ)); 5221 } 5222 5223 /* 5224 * 5225 * Name: skd_bd_write, performs blkdev write requests. 5226 * 5227 * Inputs: arg - device state structure. 5228 * xfer - tranfer request structure. 5229 * 5230 * Returns: Value return by skd_rw(). 5231 * 5232 */ 5233 static int 5234 skd_bd_write(void *arg, bd_xfer_t *xfer) 5235 { 5236 return (skd_rw(arg, xfer, B_WRITE)); 5237 } 5238