1 /* 2 * 3 * skd.c: Solaris 11/10 Driver for sTec, Inc. S112x PCIe SSD card 4 * 5 * Solaris driver is based on the Linux driver authored by: 6 * 7 * Authors/Alphabetical: Dragan Stancevic <dstancevic@stec-inc.com> 8 * Gordon Waidhofer <gwaidhofer@stec-inc.com> 9 * John Hamilton <jhamilton@stec-inc.com> 10 */ 11 12 /* 13 * This file and its contents are supplied under the terms of the 14 * Common Development and Distribution License ("CDDL"), version 1.0. 15 * You may only use this file in accordance with the terms of version 16 * 1.0 of the CDDL. 17 * 18 * A full copy of the text of the CDDL should have accompanied this 19 * source. A copy of the CDDL is also available via the Internet at 20 * http://www.illumos.org/license/CDDL. 21 */ 22 23 /* 24 * Copyright 2013 STEC, Inc. All rights reserved. 25 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 26 */ 27 28 #include <sys/types.h> 29 #include <sys/stream.h> 30 #include <sys/cmn_err.h> 31 #include <sys/kmem.h> 32 #include <sys/file.h> 33 #include <sys/buf.h> 34 #include <sys/uio.h> 35 #include <sys/cred.h> 36 #include <sys/modctl.h> 37 #include <sys/debug.h> 38 #include <sys/modctl.h> 39 #include <sys/list.h> 40 #include <sys/sysmacros.h> 41 #include <sys/errno.h> 42 #include <sys/pcie.h> 43 #include <sys/pci.h> 44 #include <sys/ddi.h> 45 #include <sys/dditypes.h> 46 #include <sys/sunddi.h> 47 #include <sys/atomic.h> 48 #include <sys/mutex.h> 49 #include <sys/param.h> 50 #include <sys/devops.h> 51 #include <sys/blkdev.h> 52 #include <sys/queue.h> 53 54 #include "skd_s1120.h" 55 #include "skd.h" 56 57 int skd_dbg_level = 0; 58 59 void *skd_state = NULL; 60 int skd_disable_msi = 0; 61 int skd_disable_msix = 0; 62 63 /* Initialized in _init() and tunable, see _init(). */ 64 clock_t skd_timer_ticks; 65 66 /* I/O DMA attributes structures. */ 67 static ddi_dma_attr_t skd_64bit_io_dma_attr = { 68 DMA_ATTR_V0, /* dma_attr_version */ 69 SKD_DMA_LOW_ADDRESS, /* low DMA address range */ 70 SKD_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */ 71 SKD_DMA_XFER_COUNTER, /* DMA counter register */ 72 SKD_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */ 73 SKD_DMA_BURSTSIZES, /* DMA burstsizes */ 74 SKD_DMA_MIN_XFER_SIZE, /* min effective DMA size */ 75 SKD_DMA_MAX_XFER_SIZE, /* max DMA xfer size */ 76 SKD_DMA_SEGMENT_BOUNDARY, /* segment boundary */ 77 SKD_DMA_SG_LIST_LENGTH, /* s/g list length */ 78 SKD_DMA_GRANULARITY, /* granularity of device */ 79 SKD_DMA_XFER_FLAGS /* DMA transfer flags */ 80 }; 81 82 int skd_isr_type = -1; 83 84 #define SKD_MAX_QUEUE_DEPTH 255 85 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64 86 int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; 87 88 #define SKD_MAX_REQ_PER_MSG 14 89 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1 90 int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; 91 92 #define SKD_MAX_N_SG_PER_REQ 4096 93 int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; 94 95 static int skd_sys_quiesce_dev(dev_info_t *); 96 static int skd_quiesce_dev(skd_device_t *); 97 static int skd_list_skmsg(skd_device_t *, int); 98 static int skd_list_skreq(skd_device_t *, int); 99 static int skd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 100 static int skd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 101 static int skd_format_internal_skspcl(struct skd_device *skdev); 102 static void skd_start(skd_device_t *); 103 static void skd_destroy_mutex(skd_device_t *skdev); 104 static void skd_enable_interrupts(struct skd_device *); 105 static void skd_request_fn_not_online(skd_device_t *skdev); 106 static void skd_send_internal_skspcl(struct skd_device *, 107 struct skd_special_context *, uint8_t); 108 static void skd_queue(skd_device_t *, skd_buf_private_t *); 109 static void *skd_alloc_dma_mem(skd_device_t *, dma_mem_t *, uint8_t); 110 static void skd_release_intr(skd_device_t *skdev); 111 static void skd_isr_fwstate(struct skd_device *skdev); 112 static void skd_isr_msg_from_dev(struct skd_device *skdev); 113 static void skd_soft_reset(struct skd_device *skdev); 114 static void skd_refresh_device_data(struct skd_device *skdev); 115 static void skd_update_props(skd_device_t *, dev_info_t *); 116 static void skd_end_request_abnormal(struct skd_device *, skd_buf_private_t *, 117 int, int); 118 static char *skd_pci_info(struct skd_device *skdev, char *str, size_t len); 119 120 static skd_buf_private_t *skd_get_queued_pbuf(skd_device_t *); 121 122 static void skd_bd_driveinfo(void *arg, bd_drive_t *drive); 123 static int skd_bd_mediainfo(void *arg, bd_media_t *media); 124 static int skd_bd_read(void *arg, bd_xfer_t *xfer); 125 static int skd_bd_write(void *arg, bd_xfer_t *xfer); 126 static int skd_devid_init(void *arg, dev_info_t *, ddi_devid_t *); 127 128 129 static bd_ops_t skd_bd_ops = { 130 BD_OPS_VERSION_0, 131 skd_bd_driveinfo, 132 skd_bd_mediainfo, 133 skd_devid_init, 134 NULL, /* sync_cache */ 135 skd_bd_read, 136 skd_bd_write, 137 }; 138 139 static ddi_device_acc_attr_t dev_acc_attr = { 140 DDI_DEVICE_ATTR_V0, 141 DDI_STRUCTURE_LE_ACC, 142 DDI_STRICTORDER_ACC 143 }; 144 145 /* 146 * Solaris module loading/unloading structures 147 */ 148 struct dev_ops skd_dev_ops = { 149 DEVO_REV, /* devo_rev */ 150 0, /* refcnt */ 151 ddi_no_info, /* getinfo */ 152 nulldev, /* identify */ 153 nulldev, /* probe */ 154 skd_attach, /* attach */ 155 skd_detach, /* detach */ 156 nodev, /* reset */ 157 NULL, /* char/block ops */ 158 NULL, /* bus operations */ 159 NULL, /* power management */ 160 skd_sys_quiesce_dev /* quiesce */ 161 }; 162 163 static struct modldrv modldrv = { 164 &mod_driverops, /* type of module: driver */ 165 "sTec skd v" DRV_VER_COMPL, /* name of module */ 166 &skd_dev_ops /* driver dev_ops */ 167 }; 168 169 static struct modlinkage modlinkage = { 170 MODREV_1, 171 &modldrv, 172 NULL 173 }; 174 175 /* 176 * sTec-required wrapper for debug printing. 177 */ 178 /*PRINTFLIKE2*/ 179 static inline void 180 Dcmn_err(int lvl, const char *fmt, ...) 181 { 182 va_list ap; 183 184 if (skd_dbg_level == 0) 185 return; 186 187 va_start(ap, fmt); 188 vcmn_err(lvl, fmt, ap); 189 va_end(ap); 190 } 191 192 /* 193 * Solaris module loading/unloading routines 194 */ 195 196 /* 197 * 198 * Name: _init, performs initial installation 199 * 200 * Inputs: None. 201 * 202 * Returns: Returns the value returned by the ddi_softstate_init function 203 * on a failure to create the device state structure or the result 204 * of the module install routines. 205 * 206 */ 207 int 208 _init(void) 209 { 210 int rval = 0; 211 int tgts = 0; 212 213 tgts |= 0x02; 214 tgts |= 0x08; /* In #ifdef NEXENTA block from original sTec drop. */ 215 216 /* 217 * drv_usectohz() is a function, so can't initialize it at 218 * instantiation. 219 */ 220 skd_timer_ticks = drv_usectohz(1000000); 221 222 Dcmn_err(CE_NOTE, 223 "<# Installing skd Driver dbg-lvl=%d %s %x>", 224 skd_dbg_level, DRV_BUILD_ID, tgts); 225 226 rval = ddi_soft_state_init(&skd_state, sizeof (skd_device_t), 0); 227 if (rval != DDI_SUCCESS) 228 return (rval); 229 230 bd_mod_init(&skd_dev_ops); 231 232 rval = mod_install(&modlinkage); 233 if (rval != DDI_SUCCESS) { 234 ddi_soft_state_fini(&skd_state); 235 bd_mod_fini(&skd_dev_ops); 236 } 237 238 return (rval); 239 } 240 241 /* 242 * 243 * Name: _info, returns information about loadable module. 244 * 245 * Inputs: modinfo, pointer to module information structure. 246 * 247 * Returns: Value returned by mod_info(). 248 * 249 */ 250 int 251 _info(struct modinfo *modinfop) 252 { 253 return (mod_info(&modlinkage, modinfop)); 254 } 255 256 /* 257 * _fini Prepares a module for unloading. It is called when the system 258 * wants to unload a module. If the module determines that it can 259 * be unloaded, then _fini() returns the value returned by 260 * mod_remove(). Upon successful return from _fini() no other 261 * routine in the module will be called before _init() is called. 262 * 263 * Inputs: None. 264 * 265 * Returns: DDI_SUCCESS or DDI_FAILURE. 266 * 267 */ 268 int 269 _fini(void) 270 { 271 int rval; 272 273 rval = mod_remove(&modlinkage); 274 if (rval == DDI_SUCCESS) { 275 ddi_soft_state_fini(&skd_state); 276 bd_mod_fini(&skd_dev_ops); 277 } 278 279 return (rval); 280 } 281 282 /* 283 * Solaris Register read/write routines 284 */ 285 286 /* 287 * 288 * Name: skd_reg_write64, writes a 64-bit value to specified address 289 * 290 * Inputs: skdev - device state structure. 291 * val - 64-bit value to be written. 292 * offset - offset from PCI base address. 293 * 294 * Returns: Nothing. 295 * 296 */ 297 /* 298 * Local vars are to keep lint silent. Any compiler worth its weight will 299 * optimize it all right out... 300 */ 301 static inline void 302 skd_reg_write64(struct skd_device *skdev, uint64_t val, uint32_t offset) 303 { 304 uint64_t *addr; 305 306 ASSERT((offset & 0x7) == 0); 307 /* LINTED */ 308 addr = (uint64_t *)(skdev->dev_iobase + offset); 309 ddi_put64(skdev->dev_handle, addr, val); 310 } 311 312 /* 313 * 314 * Name: skd_reg_read32, reads a 32-bit value to specified address 315 * 316 * Inputs: skdev - device state structure. 317 * offset - offset from PCI base address. 318 * 319 * Returns: val, 32-bit value read from specified PCI address. 320 * 321 */ 322 static inline uint32_t 323 skd_reg_read32(struct skd_device *skdev, uint32_t offset) 324 { 325 uint32_t *addr; 326 327 ASSERT((offset & 0x3) == 0); 328 /* LINTED */ 329 addr = (uint32_t *)(skdev->dev_iobase + offset); 330 return (ddi_get32(skdev->dev_handle, addr)); 331 } 332 333 /* 334 * 335 * Name: skd_reg_write32, writes a 32-bit value to specified address 336 * 337 * Inputs: skdev - device state structure. 338 * val - value to be written. 339 * offset - offset from PCI base address. 340 * 341 * Returns: Nothing. 342 * 343 */ 344 static inline void 345 skd_reg_write32(struct skd_device *skdev, uint32_t val, uint32_t offset) 346 { 347 uint32_t *addr; 348 349 ASSERT((offset & 0x3) == 0); 350 /* LINTED */ 351 addr = (uint32_t *)(skdev->dev_iobase + offset); 352 ddi_put32(skdev->dev_handle, addr, val); 353 } 354 355 356 /* 357 * Solaris skd routines 358 */ 359 360 /* 361 * 362 * Name: skd_name, generates the name of the driver. 363 * 364 * Inputs: skdev - device state structure 365 * 366 * Returns: char pointer to generated driver name. 367 * 368 */ 369 static const char * 370 skd_name(struct skd_device *skdev) 371 { 372 (void) snprintf(skdev->id_str, sizeof (skdev->id_str), "%s:", DRV_NAME); 373 374 return (skdev->id_str); 375 } 376 377 /* 378 * 379 * Name: skd_pci_find_capability, searches the PCI capability 380 * list for the specified capability. 381 * 382 * Inputs: skdev - device state structure. 383 * cap - capability sought. 384 * 385 * Returns: Returns position where capability was found. 386 * If not found, returns zero. 387 * 388 */ 389 static int 390 skd_pci_find_capability(struct skd_device *skdev, int cap) 391 { 392 uint16_t status; 393 uint8_t pos, id, hdr; 394 int ttl = 48; 395 396 status = pci_config_get16(skdev->pci_handle, PCI_CONF_STAT); 397 398 if (!(status & PCI_STAT_CAP)) 399 return (0); 400 401 hdr = pci_config_get8(skdev->pci_handle, PCI_CONF_HEADER); 402 403 if ((hdr & PCI_HEADER_TYPE_M) != 0) 404 return (0); 405 406 pos = pci_config_get8(skdev->pci_handle, PCI_CONF_CAP_PTR); 407 408 while (ttl-- && pos >= 0x40) { 409 pos &= ~3; 410 id = pci_config_get8(skdev->pci_handle, pos+PCI_CAP_ID); 411 if (id == 0xff) 412 break; 413 if (id == cap) 414 return (pos); 415 pos = pci_config_get8(skdev->pci_handle, pos+PCI_CAP_NEXT_PTR); 416 } 417 418 return (0); 419 } 420 421 /* 422 * 423 * Name: skd_io_done, called to conclude an I/O operation. 424 * 425 * Inputs: skdev - device state structure. 426 * pbuf - I/O request 427 * error - contain error value. 428 * mode - debug only. 429 * 430 * Returns: Nothing. 431 * 432 */ 433 static void 434 skd_io_done(skd_device_t *skdev, skd_buf_private_t *pbuf, 435 int error, int mode) 436 { 437 bd_xfer_t *xfer; 438 439 ASSERT(pbuf != NULL); 440 441 xfer = pbuf->x_xfer; 442 443 switch (mode) { 444 case SKD_IODONE_WIOC: 445 skdev->iodone_wioc++; 446 break; 447 case SKD_IODONE_WNIOC: 448 skdev->iodone_wnioc++; 449 break; 450 case SKD_IODONE_WDEBUG: 451 skdev->iodone_wdebug++; 452 break; 453 default: 454 skdev->iodone_unknown++; 455 } 456 457 if (error) { 458 skdev->ios_errors++; 459 cmn_err(CE_WARN, 460 "!%s:skd_io_done:ERR=%d %lld-%ld %s", skdev->name, 461 error, xfer->x_blkno, xfer->x_nblks, 462 (pbuf->dir & B_READ) ? "Read" : "Write"); 463 } 464 465 kmem_free(pbuf, sizeof (skd_buf_private_t)); 466 467 bd_xfer_done(xfer, error); 468 } 469 470 /* 471 * QUIESCE DEVICE 472 */ 473 474 /* 475 * 476 * Name: skd_sys_quiesce_dev, quiets the device 477 * 478 * Inputs: dip - dev info strucuture 479 * 480 * Returns: Zero. 481 * 482 */ 483 static int 484 skd_sys_quiesce_dev(dev_info_t *dip) 485 { 486 skd_device_t *skdev; 487 488 skdev = ddi_get_soft_state(skd_state, ddi_get_instance(dip)); 489 490 /* make sure Dcmn_err() doesn't actually print anything */ 491 skd_dbg_level = 0; 492 493 skd_disable_interrupts(skdev); 494 skd_soft_reset(skdev); 495 496 return (0); 497 } 498 499 /* 500 * 501 * Name: skd_quiesce_dev, quiets the device, but doesn't really do much. 502 * 503 * Inputs: skdev - Device state. 504 * 505 * Returns: -EINVAL if device is not in proper state otherwise 506 * returns zero. 507 * 508 */ 509 static int 510 skd_quiesce_dev(skd_device_t *skdev) 511 { 512 int rc = 0; 513 514 if (skd_dbg_level) 515 Dcmn_err(CE_NOTE, "skd_quiece_dev:"); 516 517 switch (skdev->state) { 518 case SKD_DRVR_STATE_BUSY: 519 case SKD_DRVR_STATE_BUSY_IMMINENT: 520 Dcmn_err(CE_NOTE, "%s: stopping queue", skdev->name); 521 break; 522 case SKD_DRVR_STATE_ONLINE: 523 case SKD_DRVR_STATE_STOPPING: 524 case SKD_DRVR_STATE_SYNCING: 525 case SKD_DRVR_STATE_PAUSING: 526 case SKD_DRVR_STATE_PAUSED: 527 case SKD_DRVR_STATE_STARTING: 528 case SKD_DRVR_STATE_RESTARTING: 529 case SKD_DRVR_STATE_RESUMING: 530 default: 531 rc = -EINVAL; 532 cmn_err(CE_NOTE, "state [%d] not implemented", skdev->state); 533 } 534 535 return (rc); 536 } 537 538 /* 539 * UNQUIESCE DEVICE: 540 * Note: Assumes lock is held to protect device state. 541 */ 542 /* 543 * 544 * Name: skd_unquiesce_dev, awkens the device 545 * 546 * Inputs: skdev - Device state. 547 * 548 * Returns: -EINVAL if device is not in proper state otherwise 549 * returns zero. 550 * 551 */ 552 static int 553 skd_unquiesce_dev(struct skd_device *skdev) 554 { 555 Dcmn_err(CE_NOTE, "skd_unquiece_dev:"); 556 557 skd_log_skdev(skdev, "unquiesce"); 558 if (skdev->state == SKD_DRVR_STATE_ONLINE) { 559 Dcmn_err(CE_NOTE, "**** device already ONLINE"); 560 561 return (0); 562 } 563 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) { 564 /* 565 * If there has been an state change to other than 566 * ONLINE, we will rely on controller state change 567 * to come back online and restart the queue. 568 * The BUSY state means that driver is ready to 569 * continue normal processing but waiting for controller 570 * to become available. 571 */ 572 skdev->state = SKD_DRVR_STATE_BUSY; 573 Dcmn_err(CE_NOTE, "drive BUSY state\n"); 574 575 return (0); 576 } 577 /* 578 * Drive just come online, driver is either in startup, 579 * paused performing a task, or bust waiting for hardware. 580 */ 581 switch (skdev->state) { 582 case SKD_DRVR_STATE_PAUSED: 583 case SKD_DRVR_STATE_BUSY: 584 case SKD_DRVR_STATE_BUSY_IMMINENT: 585 case SKD_DRVR_STATE_BUSY_ERASE: 586 case SKD_DRVR_STATE_STARTING: 587 case SKD_DRVR_STATE_RESTARTING: 588 case SKD_DRVR_STATE_FAULT: 589 case SKD_DRVR_STATE_IDLE: 590 case SKD_DRVR_STATE_LOAD: 591 skdev->state = SKD_DRVR_STATE_ONLINE; 592 Dcmn_err(CE_NOTE, "%s: sTec s1120 ONLINE", skdev->name); 593 Dcmn_err(CE_NOTE, "%s: Starting request queue", skdev->name); 594 Dcmn_err(CE_NOTE, 595 "%s: queue depth limit=%d hard=%d soft=%d lowat=%d", 596 skdev->name, 597 skdev->queue_depth_limit, 598 skdev->hard_queue_depth_limit, 599 skdev->soft_queue_depth_limit, 600 skdev->queue_depth_lowat); 601 602 skdev->gendisk_on = 1; 603 cv_signal(&skdev->cv_waitq); 604 break; 605 case SKD_DRVR_STATE_DISAPPEARED: 606 default: 607 cmn_err(CE_NOTE, "**** driver state %d, not implemented \n", 608 skdev->state); 609 return (-EBUSY); 610 } 611 612 return (0); 613 } 614 615 /* 616 * READ/WRITE REQUESTS 617 */ 618 619 /* 620 * 621 * Name: skd_blkdev_preop_sg_list, builds the S/G list from info 622 * passed in by the blkdev driver. 623 * 624 * Inputs: skdev - device state structure. 625 * skreq - request structure. 626 * sg_byte_count - data transfer byte count. 627 * 628 * Returns: Nothing. 629 * 630 */ 631 /*ARGSUSED*/ 632 static void 633 skd_blkdev_preop_sg_list(struct skd_device *skdev, 634 struct skd_request_context *skreq, uint32_t *sg_byte_count) 635 { 636 bd_xfer_t *xfer; 637 skd_buf_private_t *pbuf; 638 int i, bcount = 0; 639 uint_t n_sg; 640 641 *sg_byte_count = 0; 642 643 ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD || 644 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); 645 646 pbuf = skreq->pbuf; 647 ASSERT(pbuf != NULL); 648 649 xfer = pbuf->x_xfer; 650 n_sg = xfer->x_ndmac; 651 652 ASSERT(n_sg <= skdev->sgs_per_request); 653 654 skreq->n_sg = n_sg; 655 656 skreq->io_dma_handle = xfer->x_dmah; 657 658 skreq->total_sg_bcount = 0; 659 660 for (i = 0; i < n_sg; i++) { 661 ddi_dma_cookie_t *cookiep = &xfer->x_dmac; 662 struct fit_sg_descriptor *sgd; 663 uint32_t cnt = (uint32_t)cookiep->dmac_size; 664 665 bcount += cnt; 666 667 sgd = &skreq->sksg_list[i]; 668 sgd->control = FIT_SGD_CONTROL_NOT_LAST; 669 sgd->byte_count = cnt; 670 sgd->host_side_addr = cookiep->dmac_laddress; 671 sgd->dev_side_addr = 0; /* not used */ 672 *sg_byte_count += cnt; 673 674 skreq->total_sg_bcount += cnt; 675 676 if ((i + 1) != n_sg) 677 ddi_dma_nextcookie(skreq->io_dma_handle, &xfer->x_dmac); 678 } 679 680 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; 681 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; 682 683 (void) ddi_dma_sync(skreq->sksg_dma_address.dma_handle, 0, 0, 684 DDI_DMA_SYNC_FORDEV); 685 } 686 687 /* 688 * 689 * Name: skd_blkdev_postop_sg_list, deallocates DMA 690 * 691 * Inputs: skdev - device state structure. 692 * skreq - skreq data structure. 693 * 694 * Returns: Nothing. 695 * 696 */ 697 /* ARGSUSED */ /* Upstream common source with other platforms. */ 698 static void 699 skd_blkdev_postop_sg_list(struct skd_device *skdev, 700 struct skd_request_context *skreq) 701 { 702 /* 703 * restore the next ptr for next IO request so we 704 * don't have to set it every time. 705 */ 706 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = 707 skreq->sksg_dma_address.cookies->dmac_laddress + 708 ((skreq->n_sg) * sizeof (struct fit_sg_descriptor)); 709 } 710 711 /* 712 * 713 * Name: skd_start, initiates an I/O. 714 * 715 * Inputs: skdev - device state structure. 716 * 717 * Returns: EAGAIN if devicfe is not ONLINE. 718 * On error, if the caller is the blkdev driver, return 719 * the error value. Otherwise, return zero. 720 * 721 */ 722 /* Upstream common source with other platforms. */ 723 static void 724 skd_start(skd_device_t *skdev) 725 { 726 struct skd_fitmsg_context *skmsg = NULL; 727 struct fit_msg_hdr *fmh = NULL; 728 struct skd_request_context *skreq = NULL; 729 struct waitqueue *waitq = &skdev->waitqueue; 730 struct skd_scsi_request *scsi_req; 731 skd_buf_private_t *pbuf = NULL; 732 int bcount; 733 734 uint32_t lba; 735 uint32_t count; 736 uint32_t timo_slot; 737 void *cmd_ptr; 738 uint32_t sg_byte_count = 0; 739 740 /* 741 * Stop conditions: 742 * - There are no more native requests 743 * - There are already the maximum number of requests is progress 744 * - There are no more skd_request_context entries 745 * - There are no more FIT msg buffers 746 */ 747 for (;;) { 748 /* Are too many requests already in progress? */ 749 if (skdev->queue_depth_busy >= skdev->queue_depth_limit) { 750 Dcmn_err(CE_NOTE, "qdepth %d, limit %d\n", 751 skdev->queue_depth_busy, 752 skdev->queue_depth_limit); 753 break; 754 } 755 756 WAITQ_LOCK(skdev); 757 if (SIMPLEQ_EMPTY(waitq)) { 758 WAITQ_UNLOCK(skdev); 759 break; 760 } 761 762 /* Is a skd_request_context available? */ 763 skreq = skdev->skreq_free_list; 764 if (skreq == NULL) { 765 WAITQ_UNLOCK(skdev); 766 break; 767 } 768 769 ASSERT(skreq->state == SKD_REQ_STATE_IDLE); 770 ASSERT((skreq->id & SKD_ID_INCR) == 0); 771 772 skdev->skreq_free_list = skreq->next; 773 774 skreq->state = SKD_REQ_STATE_BUSY; 775 skreq->id += SKD_ID_INCR; 776 777 /* Start a new FIT msg if there is none in progress. */ 778 if (skmsg == NULL) { 779 /* Are there any FIT msg buffers available? */ 780 skmsg = skdev->skmsg_free_list; 781 if (skmsg == NULL) { 782 WAITQ_UNLOCK(skdev); 783 break; 784 } 785 786 ASSERT(skmsg->state == SKD_MSG_STATE_IDLE); 787 ASSERT((skmsg->id & SKD_ID_INCR) == 0); 788 789 skdev->skmsg_free_list = skmsg->next; 790 791 skmsg->state = SKD_MSG_STATE_BUSY; 792 skmsg->id += SKD_ID_INCR; 793 794 /* Initialize the FIT msg header */ 795 fmh = (struct fit_msg_hdr *)skmsg->msg_buf64; 796 bzero(fmh, sizeof (*fmh)); /* Too expensive */ 797 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 798 skmsg->length = sizeof (struct fit_msg_hdr); 799 } 800 801 /* 802 * At this point we are committed to either start or reject 803 * the native request. Note that a FIT msg may have just been 804 * started but contains no SoFIT requests yet. 805 * Now - dequeue pbuf. 806 */ 807 pbuf = skd_get_queued_pbuf(skdev); 808 WAITQ_UNLOCK(skdev); 809 810 skreq->pbuf = pbuf; 811 lba = pbuf->x_xfer->x_blkno; 812 count = pbuf->x_xfer->x_nblks; 813 skreq->did_complete = 0; 814 815 skreq->fitmsg_id = skmsg->id; 816 817 Dcmn_err(CE_NOTE, 818 "pbuf=%p lba=%u(0x%x) count=%u(0x%x) dir=%x\n", 819 (void *)pbuf, lba, lba, count, count, pbuf->dir); 820 821 /* 822 * Transcode the request. 823 */ 824 cmd_ptr = &skmsg->msg_buf[skmsg->length]; 825 bzero(cmd_ptr, 32); /* This is too expensive */ 826 827 scsi_req = cmd_ptr; 828 scsi_req->hdr.tag = skreq->id; 829 scsi_req->hdr.sg_list_dma_address = 830 cpu_to_be64(skreq->sksg_dma_address.cookies->dmac_laddress); 831 scsi_req->cdb[1] = 0; 832 scsi_req->cdb[2] = (lba & 0xff000000) >> 24; 833 scsi_req->cdb[3] = (lba & 0xff0000) >> 16; 834 scsi_req->cdb[4] = (lba & 0xff00) >> 8; 835 scsi_req->cdb[5] = (lba & 0xff); 836 scsi_req->cdb[6] = 0; 837 scsi_req->cdb[7] = (count & 0xff00) >> 8; 838 scsi_req->cdb[8] = count & 0xff; 839 scsi_req->cdb[9] = 0; 840 841 if (pbuf->dir & B_READ) { 842 scsi_req->cdb[0] = 0x28; 843 skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST; 844 } else { 845 scsi_req->cdb[0] = 0x2a; 846 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD; 847 } 848 849 skd_blkdev_preop_sg_list(skdev, skreq, &sg_byte_count); 850 851 scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(sg_byte_count); 852 853 bcount = (sg_byte_count + 511) / 512; 854 scsi_req->cdb[7] = (bcount & 0xff00) >> 8; 855 scsi_req->cdb[8] = bcount & 0xff; 856 857 Dcmn_err(CE_NOTE, 858 "skd_start: pbuf=%p skreq->id=%x opc=%x ====>>>>>", 859 (void *)pbuf, skreq->id, *scsi_req->cdb); 860 861 skmsg->length += sizeof (struct skd_scsi_request); 862 fmh->num_protocol_cmds_coalesced++; 863 864 /* 865 * Update the active request counts. 866 * Capture the timeout timestamp. 867 */ 868 skreq->timeout_stamp = skdev->timeout_stamp; 869 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 870 871 atomic_inc_32(&skdev->timeout_slot[timo_slot]); 872 atomic_inc_32(&skdev->queue_depth_busy); 873 874 Dcmn_err(CE_NOTE, "req=0x%x busy=%d timo_slot=%d", 875 skreq->id, skdev->queue_depth_busy, timo_slot); 876 /* 877 * If the FIT msg buffer is full send it. 878 */ 879 if (skmsg->length >= SKD_N_FITMSG_BYTES || 880 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { 881 882 atomic_inc_64(&skdev->active_cmds); 883 pbuf->skreq = skreq; 884 885 skdev->fitmsg_sent1++; 886 skd_send_fitmsg(skdev, skmsg); 887 888 skmsg = NULL; 889 fmh = NULL; 890 } 891 } 892 893 /* 894 * Is a FIT msg in progress? If it is empty put the buffer back 895 * on the free list. If it is non-empty send what we got. 896 * This minimizes latency when there are fewer requests than 897 * what fits in a FIT msg. 898 */ 899 if (skmsg != NULL) { 900 ASSERT(skmsg->length > sizeof (struct fit_msg_hdr)); 901 Dcmn_err(CE_NOTE, "sending msg=%p, len %d", 902 (void *)skmsg, skmsg->length); 903 904 skdev->active_cmds++; 905 906 skdev->fitmsg_sent2++; 907 skd_send_fitmsg(skdev, skmsg); 908 } 909 } 910 911 /* 912 * 913 * Name: skd_end_request 914 * 915 * Inputs: skdev - device state structure. 916 * skreq - request structure. 917 * error - I/O error value. 918 * 919 * Returns: Nothing. 920 * 921 */ 922 static void 923 skd_end_request(struct skd_device *skdev, 924 struct skd_request_context *skreq, int error) 925 { 926 skdev->ios_completed++; 927 skd_io_done(skdev, skreq->pbuf, error, SKD_IODONE_WIOC); 928 skreq->pbuf = NULL; 929 skreq->did_complete = 1; 930 } 931 932 /* 933 * 934 * Name: skd_end_request_abnormal 935 * 936 * Inputs: skdev - device state structure. 937 * pbuf - I/O request. 938 * error - I/O error value. 939 * mode - debug 940 * 941 * Returns: Nothing. 942 * 943 */ 944 static void 945 skd_end_request_abnormal(skd_device_t *skdev, skd_buf_private_t *pbuf, 946 int error, int mode) 947 { 948 skd_io_done(skdev, pbuf, error, mode); 949 } 950 951 /* 952 * 953 * Name: skd_request_fn_not_online, handles the condition 954 * of the device not being online. 955 * 956 * Inputs: skdev - device state structure. 957 * 958 * Returns: nothing (void). 959 * 960 */ 961 static void 962 skd_request_fn_not_online(skd_device_t *skdev) 963 { 964 int error; 965 skd_buf_private_t *pbuf; 966 967 ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); 968 969 skd_log_skdev(skdev, "req_not_online"); 970 971 switch (skdev->state) { 972 case SKD_DRVR_STATE_PAUSING: 973 case SKD_DRVR_STATE_PAUSED: 974 case SKD_DRVR_STATE_STARTING: 975 case SKD_DRVR_STATE_RESTARTING: 976 case SKD_DRVR_STATE_WAIT_BOOT: 977 /* 978 * In case of starting, we haven't started the queue, 979 * so we can't get here... but requests are 980 * possibly hanging out waiting for us because we 981 * reported the dev/skd/0 already. They'll wait 982 * forever if connect doesn't complete. 983 * What to do??? delay dev/skd/0 ?? 984 */ 985 case SKD_DRVR_STATE_BUSY: 986 case SKD_DRVR_STATE_BUSY_IMMINENT: 987 case SKD_DRVR_STATE_BUSY_ERASE: 988 case SKD_DRVR_STATE_DRAINING_TIMEOUT: 989 return; 990 991 case SKD_DRVR_STATE_BUSY_SANITIZE: 992 case SKD_DRVR_STATE_STOPPING: 993 case SKD_DRVR_STATE_SYNCING: 994 case SKD_DRVR_STATE_FAULT: 995 case SKD_DRVR_STATE_DISAPPEARED: 996 default: 997 error = -EIO; 998 break; 999 } 1000 1001 /* 1002 * If we get here, terminate all pending block requeusts 1003 * with EIO and any scsi pass thru with appropriate sense 1004 */ 1005 ASSERT(WAITQ_LOCK_HELD(skdev)); 1006 if (SIMPLEQ_EMPTY(&skdev->waitqueue)) 1007 return; 1008 1009 while ((pbuf = skd_get_queued_pbuf(skdev))) 1010 skd_end_request_abnormal(skdev, pbuf, error, SKD_IODONE_WNIOC); 1011 1012 cv_signal(&skdev->cv_waitq); 1013 } 1014 1015 /* 1016 * TIMER 1017 */ 1018 1019 static void skd_timer_tick_not_online(struct skd_device *skdev); 1020 1021 /* 1022 * 1023 * Name: skd_timer_tick, monitors requests for timeouts. 1024 * 1025 * Inputs: skdev - device state structure. 1026 * 1027 * Returns: Nothing. 1028 * 1029 */ 1030 static void 1031 skd_timer_tick(skd_device_t *skdev) 1032 { 1033 uint32_t timo_slot; 1034 1035 skdev->timer_active = 1; 1036 1037 if (skdev->state != SKD_DRVR_STATE_ONLINE) { 1038 skd_timer_tick_not_online(skdev); 1039 goto timer_func_out; 1040 } 1041 1042 skdev->timeout_stamp++; 1043 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 1044 1045 /* 1046 * All requests that happened during the previous use of 1047 * this slot should be done by now. The previous use was 1048 * over 7 seconds ago. 1049 */ 1050 if (skdev->timeout_slot[timo_slot] == 0) { 1051 goto timer_func_out; 1052 } 1053 1054 /* Something is overdue */ 1055 Dcmn_err(CE_NOTE, "found %d timeouts, draining busy=%d", 1056 skdev->timeout_slot[timo_slot], 1057 skdev->queue_depth_busy); 1058 skdev->timer_countdown = SKD_TIMER_SECONDS(3); 1059 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT; 1060 skdev->timo_slot = timo_slot; 1061 1062 timer_func_out: 1063 skdev->timer_active = 0; 1064 } 1065 1066 /* 1067 * 1068 * Name: skd_timer_tick_not_online, handles various device 1069 * state transitions. 1070 * 1071 * Inputs: skdev - device state structure. 1072 * 1073 * Returns: Nothing. 1074 * 1075 */ 1076 static void 1077 skd_timer_tick_not_online(struct skd_device *skdev) 1078 { 1079 Dcmn_err(CE_NOTE, "skd_skd_timer_tick_not_online: state=%d tmo=%d", 1080 skdev->state, skdev->timer_countdown); 1081 1082 ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); 1083 1084 switch (skdev->state) { 1085 case SKD_DRVR_STATE_IDLE: 1086 case SKD_DRVR_STATE_LOAD: 1087 break; 1088 case SKD_DRVR_STATE_BUSY_SANITIZE: 1089 cmn_err(CE_WARN, "!drive busy sanitize[%x], driver[%x]\n", 1090 skdev->drive_state, skdev->state); 1091 break; 1092 1093 case SKD_DRVR_STATE_BUSY: 1094 case SKD_DRVR_STATE_BUSY_IMMINENT: 1095 case SKD_DRVR_STATE_BUSY_ERASE: 1096 Dcmn_err(CE_NOTE, "busy[%x], countdown=%d\n", 1097 skdev->state, skdev->timer_countdown); 1098 if (skdev->timer_countdown > 0) { 1099 skdev->timer_countdown--; 1100 return; 1101 } 1102 cmn_err(CE_WARN, "!busy[%x], timedout=%d, restarting device.", 1103 skdev->state, skdev->timer_countdown); 1104 skd_restart_device(skdev); 1105 break; 1106 1107 case SKD_DRVR_STATE_WAIT_BOOT: 1108 case SKD_DRVR_STATE_STARTING: 1109 if (skdev->timer_countdown > 0) { 1110 skdev->timer_countdown--; 1111 return; 1112 } 1113 /* 1114 * For now, we fault the drive. Could attempt resets to 1115 * revcover at some point. 1116 */ 1117 skdev->state = SKD_DRVR_STATE_FAULT; 1118 1119 cmn_err(CE_WARN, "!(%s): DriveFault Connect Timeout (%x)", 1120 skd_name(skdev), skdev->drive_state); 1121 1122 /* start the queue so we can respond with error to requests */ 1123 skd_start(skdev); 1124 1125 /* wakeup anyone waiting for startup complete */ 1126 skdev->gendisk_on = -1; 1127 1128 cv_signal(&skdev->cv_waitq); 1129 break; 1130 1131 1132 case SKD_DRVR_STATE_PAUSING: 1133 case SKD_DRVR_STATE_PAUSED: 1134 break; 1135 1136 case SKD_DRVR_STATE_DRAINING_TIMEOUT: 1137 cmn_err(CE_WARN, 1138 "!%s: draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n", 1139 skdev->name, 1140 skdev->timo_slot, 1141 skdev->timer_countdown, 1142 skdev->queue_depth_busy, 1143 skdev->timeout_slot[skdev->timo_slot]); 1144 /* if the slot has cleared we can let the I/O continue */ 1145 if (skdev->timeout_slot[skdev->timo_slot] == 0) { 1146 Dcmn_err(CE_NOTE, "Slot drained, starting queue."); 1147 skdev->state = SKD_DRVR_STATE_ONLINE; 1148 skd_start(skdev); 1149 return; 1150 } 1151 if (skdev->timer_countdown > 0) { 1152 skdev->timer_countdown--; 1153 return; 1154 } 1155 skd_restart_device(skdev); 1156 break; 1157 1158 case SKD_DRVR_STATE_RESTARTING: 1159 if (skdev->timer_countdown > 0) { 1160 skdev->timer_countdown--; 1161 1162 return; 1163 } 1164 /* 1165 * For now, we fault the drive. Could attempt resets to 1166 * revcover at some point. 1167 */ 1168 skdev->state = SKD_DRVR_STATE_FAULT; 1169 cmn_err(CE_WARN, "!(%s): DriveFault Reconnect Timeout (%x)\n", 1170 skd_name(skdev), skdev->drive_state); 1171 1172 /* 1173 * Recovering does two things: 1174 * 1. completes IO with error 1175 * 2. reclaims dma resources 1176 * When is it safe to recover requests? 1177 * - if the drive state is faulted 1178 * - if the state is still soft reset after out timeout 1179 * - if the drive registers are dead (state = FF) 1180 */ 1181 1182 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) || 1183 (skdev->drive_state == FIT_SR_DRIVE_FAULT) || 1184 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) { 1185 /* 1186 * It never came out of soft reset. Try to 1187 * recover the requests and then let them 1188 * fail. This is to mitigate hung processes. 1189 * 1190 * Acquire the interrupt lock since these lists are 1191 * manipulated by interrupt handlers. 1192 */ 1193 ASSERT(!WAITQ_LOCK_HELD(skdev)); 1194 INTR_LOCK(skdev); 1195 skd_recover_requests(skdev); 1196 INTR_UNLOCK(skdev); 1197 } 1198 /* start the queue so we can respond with error to requests */ 1199 skd_start(skdev); 1200 /* wakeup anyone waiting for startup complete */ 1201 skdev->gendisk_on = -1; 1202 cv_signal(&skdev->cv_waitq); 1203 break; 1204 1205 case SKD_DRVR_STATE_RESUMING: 1206 case SKD_DRVR_STATE_STOPPING: 1207 case SKD_DRVR_STATE_SYNCING: 1208 case SKD_DRVR_STATE_FAULT: 1209 case SKD_DRVR_STATE_DISAPPEARED: 1210 default: 1211 break; 1212 } 1213 } 1214 1215 /* 1216 * 1217 * Name: skd_timer, kicks off the timer processing. 1218 * 1219 * Inputs: skdev - device state structure. 1220 * 1221 * Returns: Nothing. 1222 * 1223 */ 1224 static void 1225 skd_timer(void *arg) 1226 { 1227 skd_device_t *skdev = (skd_device_t *)arg; 1228 1229 /* Someone set us to 0, don't bother rescheduling. */ 1230 ADAPTER_STATE_LOCK(skdev); 1231 if (skdev->skd_timer_timeout_id != 0) { 1232 ADAPTER_STATE_UNLOCK(skdev); 1233 /* Pardon the drop-and-then-acquire logic here. */ 1234 skd_timer_tick(skdev); 1235 ADAPTER_STATE_LOCK(skdev); 1236 /* Restart timer, if not being stopped. */ 1237 if (skdev->skd_timer_timeout_id != 0) { 1238 skdev->skd_timer_timeout_id = 1239 timeout(skd_timer, arg, skd_timer_ticks); 1240 } 1241 } 1242 ADAPTER_STATE_UNLOCK(skdev); 1243 } 1244 1245 /* 1246 * 1247 * Name: skd_start_timer, kicks off the 1-second timer. 1248 * 1249 * Inputs: skdev - device state structure. 1250 * 1251 * Returns: Zero. 1252 * 1253 */ 1254 static void 1255 skd_start_timer(struct skd_device *skdev) 1256 { 1257 /* Start one second driver timer. */ 1258 ADAPTER_STATE_LOCK(skdev); 1259 ASSERT(skdev->skd_timer_timeout_id == 0); 1260 1261 /* 1262 * Do first "timeout tick" right away, but not in this 1263 * thread. 1264 */ 1265 skdev->skd_timer_timeout_id = timeout(skd_timer, skdev, 1); 1266 ADAPTER_STATE_UNLOCK(skdev); 1267 } 1268 1269 /* 1270 * INTERNAL REQUESTS -- generated by driver itself 1271 */ 1272 1273 /* 1274 * 1275 * Name: skd_format_internal_skspcl, setups the internal 1276 * FIT request message. 1277 * 1278 * Inputs: skdev - device state structure. 1279 * 1280 * Returns: One. 1281 * 1282 */ 1283 static int 1284 skd_format_internal_skspcl(struct skd_device *skdev) 1285 { 1286 struct skd_special_context *skspcl = &skdev->internal_skspcl; 1287 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; 1288 struct fit_msg_hdr *fmh; 1289 uint64_t dma_address; 1290 struct skd_scsi_request *scsi; 1291 1292 fmh = (struct fit_msg_hdr *)&skspcl->msg_buf64[0]; 1293 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 1294 fmh->num_protocol_cmds_coalesced = 1; 1295 1296 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */ 1297 scsi = (struct skd_scsi_request *)&skspcl->msg_buf64[8]; 1298 bzero(scsi, sizeof (*scsi)); 1299 dma_address = skspcl->req.sksg_dma_address.cookies->_dmu._dmac_ll; 1300 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address); 1301 sgd->control = FIT_SGD_CONTROL_LAST; 1302 sgd->byte_count = 0; 1303 sgd->host_side_addr = skspcl->db_dma_address.cookies->_dmu._dmac_ll; 1304 sgd->dev_side_addr = 0; /* not used */ 1305 sgd->next_desc_ptr = 0LL; 1306 1307 return (1); 1308 } 1309 1310 /* 1311 * 1312 * Name: skd_send_internal_skspcl, send internal requests to 1313 * the hardware. 1314 * 1315 * Inputs: skdev - device state structure. 1316 * skspcl - request structure 1317 * opcode - just what it says 1318 * 1319 * Returns: Nothing. 1320 * 1321 */ 1322 void 1323 skd_send_internal_skspcl(struct skd_device *skdev, 1324 struct skd_special_context *skspcl, uint8_t opcode) 1325 { 1326 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; 1327 struct skd_scsi_request *scsi; 1328 1329 if (SKD_REQ_STATE_IDLE != skspcl->req.state) { 1330 /* 1331 * A refresh is already in progress. 1332 * Just wait for it to finish. 1333 */ 1334 return; 1335 } 1336 1337 ASSERT(0 == (skspcl->req.id & SKD_ID_INCR)); 1338 skspcl->req.state = SKD_REQ_STATE_BUSY; 1339 skspcl->req.id += SKD_ID_INCR; 1340 1341 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */ 1342 scsi = (struct skd_scsi_request *)&skspcl->msg_buf64[8]; 1343 scsi->hdr.tag = skspcl->req.id; 1344 1345 Dcmn_err(CE_NOTE, "internal skspcl: opcode=%x req.id=%x ==========>", 1346 opcode, skspcl->req.id); 1347 1348 switch (opcode) { 1349 case TEST_UNIT_READY: 1350 scsi->cdb[0] = TEST_UNIT_READY; 1351 scsi->cdb[1] = 0x00; 1352 scsi->cdb[2] = 0x00; 1353 scsi->cdb[3] = 0x00; 1354 scsi->cdb[4] = 0x00; 1355 scsi->cdb[5] = 0x00; 1356 sgd->byte_count = 0; 1357 scsi->hdr.sg_list_len_bytes = 0; 1358 break; 1359 case READ_CAPACITY_EXT: 1360 scsi->cdb[0] = READ_CAPACITY_EXT; 1361 scsi->cdb[1] = 0x10; 1362 scsi->cdb[2] = 0x00; 1363 scsi->cdb[3] = 0x00; 1364 scsi->cdb[4] = 0x00; 1365 scsi->cdb[5] = 0x00; 1366 scsi->cdb[6] = 0x00; 1367 scsi->cdb[7] = 0x00; 1368 scsi->cdb[8] = 0x00; 1369 scsi->cdb[9] = 0x00; 1370 scsi->cdb[10] = 0x00; 1371 scsi->cdb[11] = 0x00; 1372 scsi->cdb[12] = 0x00; 1373 scsi->cdb[13] = 0x20; 1374 scsi->cdb[14] = 0x00; 1375 scsi->cdb[15] = 0x00; 1376 sgd->byte_count = SKD_N_READ_CAP_EXT_BYTES; 1377 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 1378 break; 1379 case 0x28: 1380 (void) memset(skspcl->data_buf, 0x65, SKD_N_INTERNAL_BYTES); 1381 1382 scsi->cdb[0] = 0x28; 1383 scsi->cdb[1] = 0x00; 1384 scsi->cdb[2] = 0x00; 1385 scsi->cdb[3] = 0x00; 1386 scsi->cdb[4] = 0x00; 1387 scsi->cdb[5] = 0x00; 1388 scsi->cdb[6] = 0x00; 1389 scsi->cdb[7] = 0x00; 1390 scsi->cdb[8] = 0x01; 1391 scsi->cdb[9] = 0x00; 1392 sgd->byte_count = SKD_N_INTERNAL_BYTES; 1393 scsi->hdr.sg_list_len_bytes = cpu_to_be32(SKD_N_INTERNAL_BYTES); 1394 break; 1395 case INQUIRY: 1396 scsi->cdb[0] = INQUIRY; 1397 scsi->cdb[1] = 0x01; /* evpd */ 1398 scsi->cdb[2] = 0x80; /* serial number page */ 1399 scsi->cdb[3] = 0x00; 1400 scsi->cdb[4] = 0x10; 1401 scsi->cdb[5] = 0x00; 1402 sgd->byte_count = 16; /* SKD_N_INQ_BYTES */; 1403 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 1404 break; 1405 case INQUIRY2: 1406 scsi->cdb[0] = INQUIRY; 1407 scsi->cdb[1] = 0x00; 1408 scsi->cdb[2] = 0x00; /* serial number page */ 1409 scsi->cdb[3] = 0x00; 1410 scsi->cdb[4] = 0x24; 1411 scsi->cdb[5] = 0x00; 1412 sgd->byte_count = 36; /* SKD_N_INQ_BYTES */; 1413 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 1414 break; 1415 case SYNCHRONIZE_CACHE: 1416 scsi->cdb[0] = SYNCHRONIZE_CACHE; 1417 scsi->cdb[1] = 0x00; 1418 scsi->cdb[2] = 0x00; 1419 scsi->cdb[3] = 0x00; 1420 scsi->cdb[4] = 0x00; 1421 scsi->cdb[5] = 0x00; 1422 scsi->cdb[6] = 0x00; 1423 scsi->cdb[7] = 0x00; 1424 scsi->cdb[8] = 0x00; 1425 scsi->cdb[9] = 0x00; 1426 sgd->byte_count = 0; 1427 scsi->hdr.sg_list_len_bytes = 0; 1428 break; 1429 default: 1430 ASSERT("Don't know what to send"); 1431 return; 1432 1433 } 1434 1435 skd_send_special_fitmsg(skdev, skspcl); 1436 } 1437 1438 /* 1439 * 1440 * Name: skd_refresh_device_data, sends a TUR command. 1441 * 1442 * Inputs: skdev - device state structure. 1443 * 1444 * Returns: Nothing. 1445 * 1446 */ 1447 static void 1448 skd_refresh_device_data(struct skd_device *skdev) 1449 { 1450 struct skd_special_context *skspcl = &skdev->internal_skspcl; 1451 1452 Dcmn_err(CE_NOTE, "refresh_device_data: state=%d", skdev->state); 1453 1454 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY); 1455 } 1456 1457 /* 1458 * 1459 * Name: skd_complete_internal, handles the completion of 1460 * driver-initiated I/O requests. 1461 * 1462 * Inputs: skdev - device state structure. 1463 * skcomp - completion structure. 1464 * skerr - error structure. 1465 * skspcl - request structure. 1466 * 1467 * Returns: Nothing. 1468 * 1469 */ 1470 /* ARGSUSED */ /* Upstream common source with other platforms. */ 1471 static void 1472 skd_complete_internal(struct skd_device *skdev, 1473 volatile struct fit_completion_entry_v1 *skcomp, 1474 volatile struct fit_comp_error_info *skerr, 1475 struct skd_special_context *skspcl) 1476 { 1477 uint8_t *buf = skspcl->data_buf; 1478 uint8_t status = 2; 1479 int i; 1480 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */ 1481 struct skd_scsi_request *scsi = 1482 (struct skd_scsi_request *)&skspcl->msg_buf64[8]; 1483 1484 ASSERT(skspcl == &skdev->internal_skspcl); 1485 1486 (void) ddi_dma_sync(skspcl->db_dma_address.dma_handle, 0, 0, 1487 DDI_DMA_SYNC_FORKERNEL); 1488 (void) ddi_dma_sync(skspcl->mb_dma_address.dma_handle, 0, 0, 1489 DDI_DMA_SYNC_FORKERNEL); 1490 1491 Dcmn_err(CE_NOTE, "complete internal %x", scsi->cdb[0]); 1492 1493 skspcl->req.completion = *skcomp; 1494 skspcl->req.state = SKD_REQ_STATE_IDLE; 1495 skspcl->req.id += SKD_ID_INCR; 1496 1497 status = skspcl->req.completion.status; 1498 1499 Dcmn_err(CE_NOTE, "<<<<====== complete_internal: opc=%x", *scsi->cdb); 1500 1501 switch (scsi->cdb[0]) { 1502 case TEST_UNIT_READY: 1503 if (SAM_STAT_GOOD == status) { 1504 skd_send_internal_skspcl(skdev, skspcl, 1505 READ_CAPACITY_EXT); 1506 } else { 1507 if (skdev->state == SKD_DRVR_STATE_STOPPING) { 1508 cmn_err(CE_WARN, 1509 "!%s: TUR failed, don't send anymore" 1510 "state 0x%x", skdev->name, skdev->state); 1511 1512 return; 1513 } 1514 1515 Dcmn_err(CE_NOTE, "%s: TUR failed, retry skerr", 1516 skdev->name); 1517 skd_send_internal_skspcl(skdev, skspcl, 0x00); 1518 } 1519 break; 1520 case READ_CAPACITY_EXT: { 1521 uint64_t cap, Nblocks; 1522 uint64_t xbuf[1]; 1523 1524 skdev->read_cap_is_valid = 0; 1525 if (SAM_STAT_GOOD == status) { 1526 bcopy(buf, xbuf, 8); 1527 cap = be64_to_cpu(*xbuf); 1528 skdev->read_cap_last_lba = cap; 1529 skdev->read_cap_blocksize = 1530 (buf[8] << 24) | (buf[9] << 16) | 1531 (buf[10] << 8) | buf[11]; 1532 1533 cap *= skdev->read_cap_blocksize; 1534 Dcmn_err(CE_NOTE, " Last LBA: %" PRIu64 " (0x%" PRIx64 1535 "), blk sz: %d, Capacity: %" PRIu64 "GB\n", 1536 skdev->read_cap_last_lba, 1537 skdev->read_cap_last_lba, 1538 skdev->read_cap_blocksize, 1539 cap >> 30ULL); 1540 1541 Nblocks = skdev->read_cap_last_lba + 1; 1542 1543 skdev->Nblocks = Nblocks; 1544 skdev->read_cap_is_valid = 1; 1545 1546 skd_send_internal_skspcl(skdev, skspcl, INQUIRY2); 1547 1548 } else { 1549 Dcmn_err(CE_NOTE, "**** READCAP failed, retry TUR"); 1550 skd_send_internal_skspcl(skdev, skspcl, 1551 TEST_UNIT_READY); 1552 } 1553 break; 1554 } 1555 case INQUIRY: 1556 skdev->inquiry_is_valid = 0; 1557 if (SAM_STAT_GOOD == status) { 1558 skdev->inquiry_is_valid = 1; 1559 1560 if (scsi->cdb[1] == 0x1) { 1561 bcopy(&buf[4], skdev->inq_serial_num, 12); 1562 skdev->inq_serial_num[12] = '\0'; 1563 } else { 1564 char *tmp = skdev->inq_vendor_id; 1565 1566 bcopy(&buf[8], tmp, 8); 1567 tmp[8] = '\0'; 1568 for (i = 7; i >= 0 && tmp[i] != '\0'; i--) 1569 if (tmp[i] == ' ') 1570 tmp[i] = '\0'; 1571 1572 tmp = skdev->inq_product_id; 1573 bcopy(&buf[16], tmp, 16); 1574 tmp[16] = '\0'; 1575 1576 for (i = 15; i >= 0 && tmp[i] != '\0'; i--) 1577 if (tmp[i] == ' ') 1578 tmp[i] = '\0'; 1579 1580 tmp = skdev->inq_product_rev; 1581 bcopy(&buf[32], tmp, 4); 1582 tmp[4] = '\0'; 1583 } 1584 } 1585 1586 if (skdev->state != SKD_DRVR_STATE_ONLINE) 1587 if (skd_unquiesce_dev(skdev) < 0) 1588 cmn_err(CE_NOTE, "** failed, to ONLINE device"); 1589 break; 1590 case SYNCHRONIZE_CACHE: 1591 skdev->sync_done = (SAM_STAT_GOOD == status) ? 1 : -1; 1592 1593 cv_signal(&skdev->cv_waitq); 1594 break; 1595 1596 default: 1597 ASSERT("we didn't send this"); 1598 } 1599 } 1600 1601 /* 1602 * FIT MESSAGES 1603 */ 1604 1605 /* 1606 * 1607 * Name: skd_send_fitmsg, send a FIT message to the hardware. 1608 * 1609 * Inputs: skdev - device state structure. 1610 * skmsg - FIT message structure. 1611 * 1612 * Returns: Nothing. 1613 * 1614 */ 1615 /* ARGSUSED */ /* Upstream common source with other platforms. */ 1616 static void 1617 skd_send_fitmsg(struct skd_device *skdev, 1618 struct skd_fitmsg_context *skmsg) 1619 { 1620 uint64_t qcmd; 1621 struct fit_msg_hdr *fmh; 1622 1623 Dcmn_err(CE_NOTE, "msgbuf's DMA addr: 0x%" PRIx64 ", qdepth_busy=%d", 1624 skmsg->mb_dma_address.cookies->dmac_laddress, 1625 skdev->queue_depth_busy); 1626 1627 Dcmn_err(CE_NOTE, "msg_buf 0x%p, offset %x", (void *)skmsg->msg_buf, 1628 skmsg->offset); 1629 1630 qcmd = skmsg->mb_dma_address.cookies->dmac_laddress; 1631 qcmd |= FIT_QCMD_QID_NORMAL; 1632 1633 fmh = (struct fit_msg_hdr *)skmsg->msg_buf64; 1634 skmsg->outstanding = fmh->num_protocol_cmds_coalesced; 1635 1636 if (skdev->dbg_level > 1) { 1637 uint8_t *bp = skmsg->msg_buf; 1638 int i; 1639 1640 for (i = 0; i < skmsg->length; i += 8) { 1641 Dcmn_err(CE_NOTE, " msg[%2d] %02x %02x %02x %02x " 1642 "%02x %02x %02x %02x", 1643 i, bp[i + 0], bp[i + 1], bp[i + 2], 1644 bp[i + 3], bp[i + 4], bp[i + 5], 1645 bp[i + 6], bp[i + 7]); 1646 if (i == 0) i = 64 - 8; 1647 } 1648 } 1649 1650 (void) ddi_dma_sync(skmsg->mb_dma_address.dma_handle, 0, 0, 1651 DDI_DMA_SYNC_FORDEV); 1652 1653 ASSERT(skmsg->length > sizeof (struct fit_msg_hdr)); 1654 if (skmsg->length > 256) { 1655 qcmd |= FIT_QCMD_MSGSIZE_512; 1656 } else if (skmsg->length > 128) { 1657 qcmd |= FIT_QCMD_MSGSIZE_256; 1658 } else if (skmsg->length > 64) { 1659 qcmd |= FIT_QCMD_MSGSIZE_128; 1660 } 1661 1662 skdev->ios_started++; 1663 1664 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); 1665 } 1666 1667 /* 1668 * 1669 * Name: skd_send_special_fitmsg, send a special FIT message 1670 * to the hardware used driver-originated I/O requests. 1671 * 1672 * Inputs: skdev - device state structure. 1673 * skspcl - skspcl structure. 1674 * 1675 * Returns: Nothing. 1676 * 1677 */ 1678 static void 1679 skd_send_special_fitmsg(struct skd_device *skdev, 1680 struct skd_special_context *skspcl) 1681 { 1682 uint64_t qcmd; 1683 1684 Dcmn_err(CE_NOTE, "send_special_fitmsg: pt 1"); 1685 1686 if (skdev->dbg_level > 1) { 1687 uint8_t *bp = skspcl->msg_buf; 1688 int i; 1689 1690 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) { 1691 cmn_err(CE_NOTE, 1692 " spcl[%2d] %02x %02x %02x %02x " 1693 "%02x %02x %02x %02x\n", i, 1694 bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3], 1695 bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]); 1696 if (i == 0) i = 64 - 8; 1697 } 1698 1699 for (i = 0; i < skspcl->req.n_sg; i++) { 1700 struct fit_sg_descriptor *sgd = 1701 &skspcl->req.sksg_list[i]; 1702 1703 cmn_err(CE_NOTE, " sg[%d] count=%u ctrl=0x%x " 1704 "addr=0x%" PRIx64 " next=0x%" PRIx64, 1705 i, sgd->byte_count, sgd->control, 1706 sgd->host_side_addr, sgd->next_desc_ptr); 1707 } 1708 } 1709 1710 (void) ddi_dma_sync(skspcl->mb_dma_address.dma_handle, 0, 0, 1711 DDI_DMA_SYNC_FORDEV); 1712 (void) ddi_dma_sync(skspcl->db_dma_address.dma_handle, 0, 0, 1713 DDI_DMA_SYNC_FORDEV); 1714 1715 /* 1716 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr 1717 * and one 64-byte SSDI command. 1718 */ 1719 qcmd = skspcl->mb_dma_address.cookies->dmac_laddress; 1720 1721 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128; 1722 1723 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); 1724 } 1725 1726 /* 1727 * COMPLETION QUEUE 1728 */ 1729 1730 static void skd_complete_other(struct skd_device *skdev, 1731 volatile struct fit_completion_entry_v1 *skcomp, 1732 volatile struct fit_comp_error_info *skerr); 1733 1734 struct sns_info { 1735 uint8_t type; 1736 uint8_t stat; 1737 uint8_t key; 1738 uint8_t asc; 1739 uint8_t ascq; 1740 uint8_t mask; 1741 enum skd_check_status_action action; 1742 }; 1743 1744 static struct sns_info skd_chkstat_table[] = { 1745 /* Good */ 1746 {0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c, SKD_CHECK_STATUS_REPORT_GOOD}, 1747 1748 /* Smart alerts */ 1749 {0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */ 1750 SKD_CHECK_STATUS_REPORT_SMART_ALERT}, 1751 {0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */ 1752 SKD_CHECK_STATUS_REPORT_SMART_ALERT}, 1753 {0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temp over trigger */ 1754 SKD_CHECK_STATUS_REPORT_SMART_ALERT}, 1755 1756 /* Retry (with limits) */ 1757 {0x70, 0x02, ABORTED_COMMAND, 0, 0, 0x1C, /* DMA errors */ 1758 SKD_CHECK_STATUS_REQUEUE_REQUEST}, 1759 {0x70, 0x02, UNIT_ATTENTION, 0x0B, 0x00, 0x1E, /* warnings */ 1760 SKD_CHECK_STATUS_REQUEUE_REQUEST}, 1761 {0x70, 0x02, UNIT_ATTENTION, 0x5D, 0x00, 0x1E, /* thresholds */ 1762 SKD_CHECK_STATUS_REQUEUE_REQUEST}, 1763 {0x70, 0x02, UNIT_ATTENTION, 0x80, 0x30, 0x1F, /* backup power */ 1764 SKD_CHECK_STATUS_REQUEUE_REQUEST}, 1765 1766 /* Busy (or about to be) */ 1767 {0x70, 0x02, UNIT_ATTENTION, 0x3f, 0x01, 0x1F, /* fw changed */ 1768 SKD_CHECK_STATUS_BUSY_IMMINENT}, 1769 }; 1770 1771 /* 1772 * 1773 * Name: skd_check_status, checks the return status from a 1774 * completed I/O request. 1775 * 1776 * Inputs: skdev - device state structure. 1777 * cmp_status - SCSI status byte. 1778 * skerr - the error data structure. 1779 * 1780 * Returns: Depending on the error condition, return the action 1781 * to be taken as specified in the skd_chkstat_table. 1782 * If no corresponding value is found in the table 1783 * return SKD_CHECK_STATUS_REPORT_GOOD is no error otherwise 1784 * return SKD_CHECK_STATUS_REPORT_ERROR. 1785 * 1786 */ 1787 static enum skd_check_status_action 1788 skd_check_status(struct skd_device *skdev, uint8_t cmp_status, 1789 volatile struct fit_comp_error_info *skerr) 1790 { 1791 /* 1792 * Look up status and sense data to decide how to handle the error 1793 * from the device. 1794 * mask says which fields must match e.g., mask=0x18 means check 1795 * type and stat, ignore key, asc, ascq. 1796 */ 1797 int i, n; 1798 1799 Dcmn_err(CE_NOTE, "(%s): key/asc/ascq %02x/%02x/%02x", 1800 skd_name(skdev), skerr->key, skerr->code, skerr->qual); 1801 1802 Dcmn_err(CE_NOTE, "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x", 1803 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual); 1804 1805 /* Does the info match an entry in the good category? */ 1806 n = sizeof (skd_chkstat_table) / sizeof (skd_chkstat_table[0]); 1807 for (i = 0; i < n; i++) { 1808 struct sns_info *sns = &skd_chkstat_table[i]; 1809 1810 if (sns->mask & 0x10) 1811 if (skerr->type != sns->type) continue; 1812 1813 if (sns->mask & 0x08) 1814 if (cmp_status != sns->stat) continue; 1815 1816 if (sns->mask & 0x04) 1817 if (skerr->key != sns->key) continue; 1818 1819 if (sns->mask & 0x02) 1820 if (skerr->code != sns->asc) continue; 1821 1822 if (sns->mask & 0x01) 1823 if (skerr->qual != sns->ascq) continue; 1824 1825 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) { 1826 cmn_err(CE_WARN, "!(%s):SMART Alert: sense key/asc/ascq" 1827 " %02x/%02x/%02x", 1828 skd_name(skdev), skerr->key, 1829 skerr->code, skerr->qual); 1830 } 1831 1832 Dcmn_err(CE_NOTE, "skd_check_status: returning %x", 1833 sns->action); 1834 1835 return (sns->action); 1836 } 1837 1838 /* 1839 * No other match, so nonzero status means error, 1840 * zero status means good 1841 */ 1842 if (cmp_status) { 1843 cmn_err(CE_WARN, 1844 "!%s: status check: qdepth=%d skmfl=%p (%d) skrfl=%p (%d)", 1845 skdev->name, 1846 skdev->queue_depth_busy, 1847 (void *)skdev->skmsg_free_list, skd_list_skmsg(skdev, 0), 1848 (void *)skdev->skreq_free_list, skd_list_skreq(skdev, 0)); 1849 1850 cmn_err(CE_WARN, "!%s: t=%02x stat=%02x k=%02x c=%02x q=%02x", 1851 skdev->name, skerr->type, cmp_status, skerr->key, 1852 skerr->code, skerr->qual); 1853 1854 return (SKD_CHECK_STATUS_REPORT_ERROR); 1855 } 1856 1857 Dcmn_err(CE_NOTE, "status check good default"); 1858 1859 return (SKD_CHECK_STATUS_REPORT_GOOD); 1860 } 1861 1862 /* 1863 * 1864 * Name: skd_isr_completion_posted, handles I/O completions. 1865 * 1866 * Inputs: skdev - device state structure. 1867 * 1868 * Returns: Nothing. 1869 * 1870 */ 1871 static void 1872 skd_isr_completion_posted(struct skd_device *skdev) 1873 { 1874 volatile struct fit_completion_entry_v1 *skcmp = NULL; 1875 volatile struct fit_comp_error_info *skerr; 1876 struct skd_fitmsg_context *skmsg; 1877 struct skd_request_context *skreq; 1878 skd_buf_private_t *pbuf; 1879 uint16_t req_id; 1880 uint32_t req_slot; 1881 uint32_t timo_slot; 1882 uint32_t msg_slot; 1883 uint16_t cmp_cntxt = 0; 1884 uint8_t cmp_status = 0; 1885 uint8_t cmp_cycle = 0; 1886 uint32_t cmp_bytes = 0; 1887 1888 (void) ddi_dma_sync(skdev->cq_dma_address.dma_handle, 0, 0, 1889 DDI_DMA_SYNC_FORKERNEL); 1890 1891 for (;;) { 1892 ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); 1893 1894 WAITQ_LOCK(skdev); 1895 1896 skcmp = &skdev->skcomp_table[skdev->skcomp_ix]; 1897 cmp_cycle = skcmp->cycle; 1898 cmp_cntxt = skcmp->tag; 1899 cmp_status = skcmp->status; 1900 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes); 1901 1902 skerr = &skdev->skerr_table[skdev->skcomp_ix]; 1903 1904 Dcmn_err(CE_NOTE, 1905 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d " 1906 "qdepth_busy=%d rbytes=0x%x proto=%d", 1907 skdev->skcomp_cycle, skdev->skcomp_ix, 1908 cmp_cycle, cmp_cntxt, cmp_status, 1909 skdev->queue_depth_busy, cmp_bytes, skdev->proto_ver); 1910 1911 if (cmp_cycle != skdev->skcomp_cycle) { 1912 Dcmn_err(CE_NOTE, "%s:end of completions", skdev->name); 1913 1914 WAITQ_UNLOCK(skdev); 1915 break; 1916 } 1917 1918 1919 skdev->n_req++; 1920 1921 /* 1922 * Update the completion queue head index and possibly 1923 * the completion cycle count. 1924 */ 1925 skdev->skcomp_ix++; 1926 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) { 1927 skdev->skcomp_ix = 0; 1928 skdev->skcomp_cycle++; /* 8-bit wrap-around */ 1929 } 1930 1931 1932 /* 1933 * The command context is a unique 32-bit ID. The low order 1934 * bits help locate the request. The request is usually a 1935 * r/w request (see skd_start() above) or a special request. 1936 */ 1937 req_id = cmp_cntxt; 1938 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK; 1939 1940 Dcmn_err(CE_NOTE, 1941 "<<<< completion_posted 1: req_id=%x req_slot=%x", 1942 req_id, req_slot); 1943 1944 /* Is this other than a r/w request? */ 1945 if (req_slot >= skdev->num_req_context) { 1946 /* 1947 * This is not a completion for a r/w request. 1948 */ 1949 skd_complete_other(skdev, skcmp, skerr); 1950 WAITQ_UNLOCK(skdev); 1951 continue; 1952 } 1953 1954 skreq = &skdev->skreq_table[req_slot]; 1955 1956 /* 1957 * Make sure the request ID for the slot matches. 1958 */ 1959 ASSERT(skreq->id == req_id); 1960 1961 if (SKD_REQ_STATE_ABORTED == skreq->state) { 1962 Dcmn_err(CE_NOTE, "reclaim req %p id=%04x\n", 1963 (void *)skreq, skreq->id); 1964 /* 1965 * a previously timed out command can 1966 * now be cleaned up 1967 */ 1968 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK; 1969 ASSERT(msg_slot < skdev->num_fitmsg_context); 1970 skmsg = &skdev->skmsg_table[msg_slot]; 1971 if (skmsg->id == skreq->fitmsg_id) { 1972 ASSERT(skmsg->outstanding > 0); 1973 skmsg->outstanding--; 1974 if (skmsg->outstanding == 0) { 1975 ASSERT(SKD_MSG_STATE_BUSY == 1976 skmsg->state); 1977 skmsg->state = SKD_MSG_STATE_IDLE; 1978 skmsg->id += SKD_ID_INCR; 1979 skmsg->next = skdev->skmsg_free_list; 1980 skdev->skmsg_free_list = skmsg; 1981 } 1982 } 1983 /* 1984 * Reclaim the skd_request_context 1985 */ 1986 skreq->state = SKD_REQ_STATE_IDLE; 1987 skreq->id += SKD_ID_INCR; 1988 skreq->next = skdev->skreq_free_list; 1989 skdev->skreq_free_list = skreq; 1990 WAITQ_UNLOCK(skdev); 1991 continue; 1992 } 1993 1994 skreq->completion.status = cmp_status; 1995 1996 pbuf = skreq->pbuf; 1997 ASSERT(pbuf != NULL); 1998 1999 Dcmn_err(CE_NOTE, "<<<< completion_posted 2: pbuf=%p " 2000 "req_id=%x req_slot=%x", (void *)pbuf, req_id, req_slot); 2001 if (cmp_status && skdev->disks_initialized) { 2002 cmn_err(CE_WARN, "!%s: " 2003 "I/O err: pbuf=%p blkno=%lld (%llx) nbklks=%ld ", 2004 skdev->name, (void *)pbuf, pbuf->x_xfer->x_blkno, 2005 pbuf->x_xfer->x_blkno, pbuf->x_xfer->x_nblks); 2006 } 2007 2008 ASSERT(skdev->active_cmds); 2009 atomic_dec_64(&skdev->active_cmds); 2010 2011 if (SAM_STAT_GOOD == cmp_status) { 2012 /* Release DMA resources for the request. */ 2013 if (pbuf->x_xfer->x_nblks != 0) 2014 skd_blkdev_postop_sg_list(skdev, skreq); 2015 WAITQ_UNLOCK(skdev); 2016 skd_end_request(skdev, skreq, 0); 2017 WAITQ_LOCK(skdev); 2018 } else { 2019 switch (skd_check_status(skdev, cmp_status, skerr)) { 2020 case SKD_CHECK_STATUS_REPORT_GOOD: 2021 case SKD_CHECK_STATUS_REPORT_SMART_ALERT: 2022 WAITQ_UNLOCK(skdev); 2023 skd_end_request(skdev, skreq, 0); 2024 WAITQ_LOCK(skdev); 2025 break; 2026 2027 case SKD_CHECK_STATUS_BUSY_IMMINENT: 2028 skd_log_skreq(skdev, skreq, "retry(busy)"); 2029 skd_queue(skdev, pbuf); 2030 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; 2031 skdev->timer_countdown = SKD_TIMER_MINUTES(20); 2032 2033 (void) skd_quiesce_dev(skdev); 2034 break; 2035 2036 /* FALLTHRU */ 2037 case SKD_CHECK_STATUS_REPORT_ERROR: 2038 /* fall thru to report error */ 2039 default: 2040 /* 2041 * Save the entire completion 2042 * and error entries for 2043 * later error interpretation. 2044 */ 2045 skreq->completion = *skcmp; 2046 skreq->err_info = *skerr; 2047 WAITQ_UNLOCK(skdev); 2048 skd_end_request(skdev, skreq, -EIO); 2049 WAITQ_LOCK(skdev); 2050 break; 2051 } 2052 } 2053 2054 /* 2055 * Reclaim the FIT msg buffer if this is 2056 * the first of the requests it carried to 2057 * be completed. The FIT msg buffer used to 2058 * send this request cannot be reused until 2059 * we are sure the s1120 card has copied 2060 * it to its memory. The FIT msg might have 2061 * contained several requests. As soon as 2062 * any of them are completed we know that 2063 * the entire FIT msg was transferred. 2064 * Only the first completed request will 2065 * match the FIT msg buffer id. The FIT 2066 * msg buffer id is immediately updated. 2067 * When subsequent requests complete the FIT 2068 * msg buffer id won't match, so we know 2069 * quite cheaply that it is already done. 2070 */ 2071 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK; 2072 2073 ASSERT(msg_slot < skdev->num_fitmsg_context); 2074 skmsg = &skdev->skmsg_table[msg_slot]; 2075 if (skmsg->id == skreq->fitmsg_id) { 2076 ASSERT(SKD_MSG_STATE_BUSY == skmsg->state); 2077 skmsg->state = SKD_MSG_STATE_IDLE; 2078 skmsg->id += SKD_ID_INCR; 2079 skmsg->next = skdev->skmsg_free_list; 2080 skdev->skmsg_free_list = skmsg; 2081 } 2082 2083 /* 2084 * Decrease the number of active requests. 2085 * This also decrements the count in the 2086 * timeout slot. 2087 */ 2088 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 2089 ASSERT(skdev->timeout_slot[timo_slot] > 0); 2090 ASSERT(skdev->queue_depth_busy > 0); 2091 2092 atomic_dec_32(&skdev->timeout_slot[timo_slot]); 2093 atomic_dec_32(&skdev->queue_depth_busy); 2094 2095 /* 2096 * Reclaim the skd_request_context 2097 */ 2098 skreq->state = SKD_REQ_STATE_IDLE; 2099 skreq->id += SKD_ID_INCR; 2100 skreq->next = skdev->skreq_free_list; 2101 skdev->skreq_free_list = skreq; 2102 2103 WAITQ_UNLOCK(skdev); 2104 2105 /* 2106 * make sure the lock is held by caller. 2107 */ 2108 if ((skdev->state == SKD_DRVR_STATE_PAUSING) && 2109 (0 == skdev->queue_depth_busy)) { 2110 skdev->state = SKD_DRVR_STATE_PAUSED; 2111 cv_signal(&skdev->cv_waitq); 2112 } 2113 } /* for(;;) */ 2114 } 2115 2116 /* 2117 * 2118 * Name: skd_complete_other, handle the completion of a 2119 * non-r/w request. 2120 * 2121 * Inputs: skdev - device state structure. 2122 * skcomp - FIT completion structure. 2123 * skerr - error structure. 2124 * 2125 * Returns: Nothing. 2126 * 2127 */ 2128 static void 2129 skd_complete_other(struct skd_device *skdev, 2130 volatile struct fit_completion_entry_v1 *skcomp, 2131 volatile struct fit_comp_error_info *skerr) 2132 { 2133 uint32_t req_id = 0; 2134 uint32_t req_table; 2135 uint32_t req_slot; 2136 struct skd_special_context *skspcl; 2137 2138 req_id = skcomp->tag; 2139 req_table = req_id & SKD_ID_TABLE_MASK; 2140 req_slot = req_id & SKD_ID_SLOT_MASK; 2141 2142 Dcmn_err(CE_NOTE, "complete_other: table=0x%x id=0x%x slot=%d", 2143 req_table, req_id, req_slot); 2144 2145 /* 2146 * Based on the request id, determine how to dispatch this completion. 2147 * This swich/case is finding the good cases and forwarding the 2148 * completion entry. Errors are reported below the switch. 2149 */ 2150 ASSERT(req_table == SKD_ID_INTERNAL); 2151 ASSERT(req_slot == 0); 2152 2153 skspcl = &skdev->internal_skspcl; 2154 ASSERT(skspcl->req.id == req_id); 2155 ASSERT(skspcl->req.state == SKD_REQ_STATE_BUSY); 2156 2157 Dcmn_err(CE_NOTE, "<<<<== complete_other: ID_INTERNAL"); 2158 skd_complete_internal(skdev, skcomp, skerr, skspcl); 2159 } 2160 2161 /* 2162 * 2163 * Name: skd_reset_skcomp, does what it says, resetting completion 2164 * tables. 2165 * 2166 * Inputs: skdev - device state structure. 2167 * 2168 * Returns: Nothing. 2169 * 2170 */ 2171 static void 2172 skd_reset_skcomp(struct skd_device *skdev) 2173 { 2174 uint32_t nbytes; 2175 2176 nbytes = sizeof (struct fit_completion_entry_v1) * 2177 SKD_N_COMPLETION_ENTRY; 2178 nbytes += sizeof (struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; 2179 2180 if (skdev->skcomp_table) 2181 bzero(skdev->skcomp_table, nbytes); 2182 2183 skdev->skcomp_ix = 0; 2184 skdev->skcomp_cycle = 1; 2185 } 2186 2187 2188 2189 /* 2190 * INTERRUPTS 2191 */ 2192 2193 /* 2194 * 2195 * Name: skd_isr_aif, handles the device interrupts. 2196 * 2197 * Inputs: arg - skdev device state structure. 2198 * intvec - not referenced 2199 * 2200 * Returns: DDI_INTR_CLAIMED if interrupt is handled otherwise 2201 * return DDI_INTR_UNCLAIMED. 2202 * 2203 */ 2204 /* ARGSUSED */ /* Upstream common source with other platforms. */ 2205 static uint_t 2206 skd_isr_aif(caddr_t arg, caddr_t intvec) 2207 { 2208 uint32_t intstat; 2209 uint32_t ack; 2210 int rc = DDI_INTR_UNCLAIMED; 2211 struct skd_device *skdev; 2212 2213 skdev = (skd_device_t *)(uintptr_t)arg; 2214 2215 ASSERT(skdev != NULL); 2216 2217 skdev->intr_cntr++; 2218 2219 Dcmn_err(CE_NOTE, "skd_isr_aif: intr=%" PRId64 "\n", skdev->intr_cntr); 2220 2221 for (;;) { 2222 2223 ASSERT(!WAITQ_LOCK_HELD(skdev)); 2224 INTR_LOCK(skdev); 2225 2226 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST); 2227 2228 ack = FIT_INT_DEF_MASK; 2229 ack &= intstat; 2230 2231 Dcmn_err(CE_NOTE, "intstat=0x%x ack=0x%x", intstat, ack); 2232 2233 /* 2234 * As long as there is an int pending on device, keep 2235 * running loop. When none, get out, but if we've never 2236 * done any processing, call completion handler? 2237 */ 2238 if (ack == 0) { 2239 /* 2240 * No interrupts on device, but run the completion 2241 * processor anyway? 2242 */ 2243 if (rc == DDI_INTR_UNCLAIMED && 2244 skdev->state == SKD_DRVR_STATE_ONLINE) { 2245 Dcmn_err(CE_NOTE, 2246 "1: Want isr_comp_posted call"); 2247 skd_isr_completion_posted(skdev); 2248 } 2249 INTR_UNLOCK(skdev); 2250 2251 break; 2252 } 2253 rc = DDI_INTR_CLAIMED; 2254 2255 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST); 2256 2257 if ((skdev->state != SKD_DRVR_STATE_LOAD) && 2258 (skdev->state != SKD_DRVR_STATE_STOPPING)) { 2259 if (intstat & FIT_ISH_COMPLETION_POSTED) { 2260 Dcmn_err(CE_NOTE, 2261 "2: Want isr_comp_posted call"); 2262 skd_isr_completion_posted(skdev); 2263 } 2264 2265 if (intstat & FIT_ISH_FW_STATE_CHANGE) { 2266 Dcmn_err(CE_NOTE, "isr: fwstate change"); 2267 2268 skd_isr_fwstate(skdev); 2269 if (skdev->state == SKD_DRVR_STATE_FAULT || 2270 skdev->state == 2271 SKD_DRVR_STATE_DISAPPEARED) { 2272 INTR_UNLOCK(skdev); 2273 2274 return (rc); 2275 } 2276 } 2277 2278 if (intstat & FIT_ISH_MSG_FROM_DEV) { 2279 Dcmn_err(CE_NOTE, "isr: msg_from_dev change"); 2280 skd_isr_msg_from_dev(skdev); 2281 } 2282 } 2283 2284 INTR_UNLOCK(skdev); 2285 } 2286 2287 if (!SIMPLEQ_EMPTY(&skdev->waitqueue)) 2288 skd_start(skdev); 2289 2290 return (rc); 2291 } 2292 2293 /* 2294 * 2295 * Name: skd_drive_fault, set the drive state to DRV_STATE_FAULT. 2296 * 2297 * Inputs: skdev - device state structure. 2298 * 2299 * Returns: Nothing. 2300 * 2301 */ 2302 static void 2303 skd_drive_fault(struct skd_device *skdev) 2304 { 2305 skdev->state = SKD_DRVR_STATE_FAULT; 2306 cmn_err(CE_WARN, "!(%s): Drive FAULT\n", 2307 skd_name(skdev)); 2308 } 2309 2310 /* 2311 * 2312 * Name: skd_drive_disappeared, set the drive state to DISAPPEARED.. 2313 * 2314 * Inputs: skdev - device state structure. 2315 * 2316 * Returns: Nothing. 2317 * 2318 */ 2319 static void 2320 skd_drive_disappeared(struct skd_device *skdev) 2321 { 2322 skdev->state = SKD_DRVR_STATE_DISAPPEARED; 2323 cmn_err(CE_WARN, "!(%s): Drive DISAPPEARED\n", 2324 skd_name(skdev)); 2325 } 2326 2327 /* 2328 * 2329 * Name: skd_isr_fwstate, handles the various device states. 2330 * 2331 * Inputs: skdev - device state structure. 2332 * 2333 * Returns: Nothing. 2334 * 2335 */ 2336 static void 2337 skd_isr_fwstate(struct skd_device *skdev) 2338 { 2339 uint32_t sense; 2340 uint32_t state; 2341 int prev_driver_state; 2342 uint32_t mtd; 2343 2344 prev_driver_state = skdev->state; 2345 2346 sense = SKD_READL(skdev, FIT_STATUS); 2347 state = sense & FIT_SR_DRIVE_STATE_MASK; 2348 2349 Dcmn_err(CE_NOTE, "s1120 state %s(%d)=>%s(%d)", 2350 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 2351 skd_drive_state_to_str(state), state); 2352 2353 skdev->drive_state = state; 2354 2355 switch (skdev->drive_state) { 2356 case FIT_SR_DRIVE_INIT: 2357 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) { 2358 skd_disable_interrupts(skdev); 2359 break; 2360 } 2361 if (skdev->state == SKD_DRVR_STATE_RESTARTING) { 2362 skd_recover_requests(skdev); 2363 } 2364 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) { 2365 skdev->timer_countdown = 2366 SKD_TIMER_SECONDS(SKD_STARTING_TO); 2367 skdev->state = SKD_DRVR_STATE_STARTING; 2368 skd_soft_reset(skdev); 2369 break; 2370 } 2371 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0); 2372 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2373 skdev->last_mtd = mtd; 2374 break; 2375 2376 case FIT_SR_DRIVE_ONLINE: 2377 skdev->queue_depth_limit = skdev->soft_queue_depth_limit; 2378 if (skdev->queue_depth_limit > skdev->hard_queue_depth_limit) { 2379 skdev->queue_depth_limit = 2380 skdev->hard_queue_depth_limit; 2381 } 2382 2383 skdev->queue_depth_lowat = skdev->queue_depth_limit * 2 / 3 + 1; 2384 if (skdev->queue_depth_lowat < 1) 2385 skdev->queue_depth_lowat = 1; 2386 Dcmn_err(CE_NOTE, 2387 "%s queue depth limit=%d hard=%d soft=%d lowat=%d", 2388 DRV_NAME, 2389 skdev->queue_depth_limit, 2390 skdev->hard_queue_depth_limit, 2391 skdev->soft_queue_depth_limit, 2392 skdev->queue_depth_lowat); 2393 2394 skd_refresh_device_data(skdev); 2395 break; 2396 case FIT_SR_DRIVE_BUSY: 2397 skdev->state = SKD_DRVR_STATE_BUSY; 2398 skdev->timer_countdown = SKD_TIMER_MINUTES(20); 2399 (void) skd_quiesce_dev(skdev); 2400 break; 2401 case FIT_SR_DRIVE_BUSY_SANITIZE: 2402 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; 2403 skd_start(skdev); 2404 break; 2405 case FIT_SR_DRIVE_BUSY_ERASE: 2406 skdev->state = SKD_DRVR_STATE_BUSY_ERASE; 2407 skdev->timer_countdown = SKD_TIMER_MINUTES(20); 2408 break; 2409 case FIT_SR_DRIVE_OFFLINE: 2410 skdev->state = SKD_DRVR_STATE_IDLE; 2411 break; 2412 case FIT_SR_DRIVE_SOFT_RESET: 2413 skdev->state = SKD_DRVR_STATE_RESTARTING; 2414 2415 switch (skdev->state) { 2416 case SKD_DRVR_STATE_STARTING: 2417 case SKD_DRVR_STATE_RESTARTING: 2418 break; 2419 default: 2420 skdev->state = SKD_DRVR_STATE_RESTARTING; 2421 break; 2422 } 2423 break; 2424 case FIT_SR_DRIVE_FW_BOOTING: 2425 Dcmn_err(CE_NOTE, 2426 "ISR FIT_SR_DRIVE_FW_BOOTING %s", skdev->name); 2427 skdev->state = SKD_DRVR_STATE_WAIT_BOOT; 2428 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_WAIT_BOOT_TO); 2429 break; 2430 2431 case FIT_SR_DRIVE_DEGRADED: 2432 case FIT_SR_PCIE_LINK_DOWN: 2433 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: 2434 break; 2435 2436 case FIT_SR_DRIVE_FAULT: 2437 skd_drive_fault(skdev); 2438 skd_recover_requests(skdev); 2439 skd_start(skdev); 2440 break; 2441 2442 case 0xFF: 2443 skd_drive_disappeared(skdev); 2444 skd_recover_requests(skdev); 2445 skd_start(skdev); 2446 break; 2447 default: 2448 /* 2449 * Uknown FW State. Wait for a state we recognize. 2450 */ 2451 break; 2452 } 2453 2454 Dcmn_err(CE_NOTE, "Driver state %s(%d)=>%s(%d)", 2455 skd_skdev_state_to_str(prev_driver_state), prev_driver_state, 2456 skd_skdev_state_to_str(skdev->state), skdev->state); 2457 } 2458 2459 /* 2460 * 2461 * Name: skd_recover_requests, attempts to recover requests. 2462 * 2463 * Inputs: skdev - device state structure. 2464 * 2465 * Returns: Nothing. 2466 * 2467 */ 2468 static void 2469 skd_recover_requests(struct skd_device *skdev) 2470 { 2471 int i; 2472 2473 ASSERT(INTR_LOCK_HELD(skdev)); 2474 2475 for (i = 0; i < skdev->num_req_context; i++) { 2476 struct skd_request_context *skreq = &skdev->skreq_table[i]; 2477 2478 if (skreq->state == SKD_REQ_STATE_BUSY) { 2479 skd_log_skreq(skdev, skreq, "requeue"); 2480 2481 ASSERT(0 != (skreq->id & SKD_ID_INCR)); 2482 ASSERT(skreq->pbuf != NULL); 2483 /* Release DMA resources for the request. */ 2484 skd_blkdev_postop_sg_list(skdev, skreq); 2485 2486 skd_end_request(skdev, skreq, EAGAIN); 2487 skreq->pbuf = NULL; 2488 skreq->state = SKD_REQ_STATE_IDLE; 2489 skreq->id += SKD_ID_INCR; 2490 } 2491 if (i > 0) { 2492 skreq[-1].next = skreq; 2493 } 2494 skreq->next = NULL; 2495 } 2496 2497 WAITQ_LOCK(skdev); 2498 skdev->skreq_free_list = skdev->skreq_table; 2499 WAITQ_UNLOCK(skdev); 2500 2501 for (i = 0; i < skdev->num_fitmsg_context; i++) { 2502 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i]; 2503 2504 if (skmsg->state == SKD_MSG_STATE_BUSY) { 2505 skd_log_skmsg(skdev, skmsg, "salvaged"); 2506 ASSERT((skmsg->id & SKD_ID_INCR) != 0); 2507 skmsg->state = SKD_MSG_STATE_IDLE; 2508 skmsg->id &= ~SKD_ID_INCR; 2509 } 2510 if (i > 0) { 2511 skmsg[-1].next = skmsg; 2512 } 2513 skmsg->next = NULL; 2514 } 2515 WAITQ_LOCK(skdev); 2516 skdev->skmsg_free_list = skdev->skmsg_table; 2517 WAITQ_UNLOCK(skdev); 2518 2519 for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++) { 2520 skdev->timeout_slot[i] = 0; 2521 } 2522 skdev->queue_depth_busy = 0; 2523 } 2524 2525 /* 2526 * 2527 * Name: skd_isr_msg_from_dev, handles a message from the device. 2528 * 2529 * Inputs: skdev - device state structure. 2530 * 2531 * Returns: Nothing. 2532 * 2533 */ 2534 static void 2535 skd_isr_msg_from_dev(struct skd_device *skdev) 2536 { 2537 uint32_t mfd; 2538 uint32_t mtd; 2539 2540 Dcmn_err(CE_NOTE, "skd_isr_msg_from_dev:"); 2541 2542 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); 2543 2544 Dcmn_err(CE_NOTE, "mfd=0x%x last_mtd=0x%x\n", mfd, skdev->last_mtd); 2545 2546 /* 2547 * ignore any mtd that is an ack for something we didn't send 2548 */ 2549 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) { 2550 return; 2551 } 2552 2553 switch (FIT_MXD_TYPE(mfd)) { 2554 case FIT_MTD_FITFW_INIT: 2555 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd); 2556 2557 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) { 2558 cmn_err(CE_WARN, "!(%s): protocol mismatch\n", 2559 skdev->name); 2560 cmn_err(CE_WARN, "!(%s): got=%d support=%d\n", 2561 skdev->name, skdev->proto_ver, 2562 FIT_PROTOCOL_VERSION_1); 2563 cmn_err(CE_WARN, "!(%s): please upgrade driver\n", 2564 skdev->name); 2565 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH; 2566 skd_soft_reset(skdev); 2567 break; 2568 } 2569 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0); 2570 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2571 skdev->last_mtd = mtd; 2572 break; 2573 2574 case FIT_MTD_GET_CMDQ_DEPTH: 2575 skdev->hard_queue_depth_limit = FIT_MXD_DATA(mfd); 2576 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0, 2577 SKD_N_COMPLETION_ENTRY); 2578 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2579 skdev->last_mtd = mtd; 2580 break; 2581 2582 case FIT_MTD_SET_COMPQ_DEPTH: 2583 SKD_WRITEQ(skdev, skdev->cq_dma_address.cookies->dmac_laddress, 2584 FIT_MSG_TO_DEVICE_ARG); 2585 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0); 2586 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2587 skdev->last_mtd = mtd; 2588 break; 2589 2590 case FIT_MTD_SET_COMPQ_ADDR: 2591 skd_reset_skcomp(skdev); 2592 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0); 2593 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2594 skdev->last_mtd = mtd; 2595 break; 2596 2597 case FIT_MTD_ARM_QUEUE: 2598 skdev->last_mtd = 0; 2599 /* 2600 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE. 2601 */ 2602 break; 2603 2604 default: 2605 break; 2606 } 2607 } 2608 2609 2610 /* 2611 * 2612 * Name: skd_disable_interrupts, issues command to disable 2613 * device interrupts. 2614 * 2615 * Inputs: skdev - device state structure. 2616 * 2617 * Returns: Nothing. 2618 * 2619 */ 2620 static void 2621 skd_disable_interrupts(struct skd_device *skdev) 2622 { 2623 uint32_t sense; 2624 2625 Dcmn_err(CE_NOTE, "skd_disable_interrupts:"); 2626 2627 sense = SKD_READL(skdev, FIT_CONTROL); 2628 sense &= ~FIT_CR_ENABLE_INTERRUPTS; 2629 SKD_WRITEL(skdev, sense, FIT_CONTROL); 2630 2631 Dcmn_err(CE_NOTE, "sense 0x%x", sense); 2632 2633 /* 2634 * Note that the 1s is written. A 1-bit means 2635 * disable, a 0 means enable. 2636 */ 2637 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST); 2638 } 2639 2640 /* 2641 * 2642 * Name: skd_enable_interrupts, issues command to enable 2643 * device interrupts. 2644 * 2645 * Inputs: skdev - device state structure. 2646 * 2647 * Returns: Nothing. 2648 * 2649 */ 2650 static void 2651 skd_enable_interrupts(struct skd_device *skdev) 2652 { 2653 uint32_t val; 2654 2655 Dcmn_err(CE_NOTE, "skd_enable_interrupts:"); 2656 2657 /* unmask interrupts first */ 2658 val = FIT_ISH_FW_STATE_CHANGE + 2659 FIT_ISH_COMPLETION_POSTED + 2660 FIT_ISH_MSG_FROM_DEV; 2661 2662 /* 2663 * Note that the compliment of mask is written. A 1-bit means 2664 * disable, a 0 means enable. 2665 */ 2666 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST); 2667 2668 Dcmn_err(CE_NOTE, "interrupt mask=0x%x", ~val); 2669 2670 val = SKD_READL(skdev, FIT_CONTROL); 2671 val |= FIT_CR_ENABLE_INTERRUPTS; 2672 2673 Dcmn_err(CE_NOTE, "control=0x%x", val); 2674 2675 SKD_WRITEL(skdev, val, FIT_CONTROL); 2676 } 2677 2678 /* 2679 * 2680 * Name: skd_soft_reset, issues a soft reset to the hardware. 2681 * 2682 * Inputs: skdev - device state structure. 2683 * 2684 * Returns: Nothing. 2685 * 2686 */ 2687 static void 2688 skd_soft_reset(struct skd_device *skdev) 2689 { 2690 uint32_t val; 2691 2692 Dcmn_err(CE_NOTE, "skd_soft_reset:"); 2693 2694 val = SKD_READL(skdev, FIT_CONTROL); 2695 val |= (FIT_CR_SOFT_RESET); 2696 2697 Dcmn_err(CE_NOTE, "soft_reset: control=0x%x", val); 2698 2699 SKD_WRITEL(skdev, val, FIT_CONTROL); 2700 } 2701 2702 /* 2703 * 2704 * Name: skd_start_device, gets the device going. 2705 * 2706 * Inputs: skdev - device state structure. 2707 * 2708 * Returns: Nothing. 2709 * 2710 */ 2711 static void 2712 skd_start_device(struct skd_device *skdev) 2713 { 2714 uint32_t state; 2715 int delay_action = 0; 2716 2717 Dcmn_err(CE_NOTE, "skd_start_device:"); 2718 2719 /* ack all ghost interrupts */ 2720 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2721 2722 state = SKD_READL(skdev, FIT_STATUS); 2723 2724 Dcmn_err(CE_NOTE, "initial status=0x%x", state); 2725 2726 state &= FIT_SR_DRIVE_STATE_MASK; 2727 skdev->drive_state = state; 2728 skdev->last_mtd = 0; 2729 2730 skdev->state = SKD_DRVR_STATE_STARTING; 2731 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_STARTING_TO); 2732 2733 skd_enable_interrupts(skdev); 2734 2735 switch (skdev->drive_state) { 2736 case FIT_SR_DRIVE_OFFLINE: 2737 Dcmn_err(CE_NOTE, "(%s): Drive offline...", 2738 skd_name(skdev)); 2739 break; 2740 2741 case FIT_SR_DRIVE_FW_BOOTING: 2742 Dcmn_err(CE_NOTE, "FIT_SR_DRIVE_FW_BOOTING %s\n", skdev->name); 2743 skdev->state = SKD_DRVR_STATE_WAIT_BOOT; 2744 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_WAIT_BOOT_TO); 2745 break; 2746 2747 case FIT_SR_DRIVE_BUSY_SANITIZE: 2748 Dcmn_err(CE_NOTE, "(%s): Start: BUSY_SANITIZE\n", 2749 skd_name(skdev)); 2750 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; 2751 skdev->timer_countdown = SKD_TIMER_SECONDS(60); 2752 break; 2753 2754 case FIT_SR_DRIVE_BUSY_ERASE: 2755 Dcmn_err(CE_NOTE, "(%s): Start: BUSY_ERASE\n", 2756 skd_name(skdev)); 2757 skdev->state = SKD_DRVR_STATE_BUSY_ERASE; 2758 skdev->timer_countdown = SKD_TIMER_SECONDS(60); 2759 break; 2760 2761 case FIT_SR_DRIVE_INIT: 2762 case FIT_SR_DRIVE_ONLINE: 2763 skd_soft_reset(skdev); 2764 2765 break; 2766 2767 case FIT_SR_DRIVE_BUSY: 2768 Dcmn_err(CE_NOTE, "(%s): Drive Busy...\n", 2769 skd_name(skdev)); 2770 skdev->state = SKD_DRVR_STATE_BUSY; 2771 skdev->timer_countdown = SKD_TIMER_SECONDS(60); 2772 break; 2773 2774 case FIT_SR_DRIVE_SOFT_RESET: 2775 Dcmn_err(CE_NOTE, "(%s) drive soft reset in prog\n", 2776 skd_name(skdev)); 2777 break; 2778 2779 case FIT_SR_DRIVE_FAULT: 2780 /* 2781 * Fault state is bad...soft reset won't do it... 2782 * Hard reset, maybe, but does it work on device? 2783 * For now, just fault so the system doesn't hang. 2784 */ 2785 skd_drive_fault(skdev); 2786 2787 delay_action = 1; 2788 break; 2789 2790 case 0xFF: 2791 skd_drive_disappeared(skdev); 2792 2793 delay_action = 1; 2794 break; 2795 2796 default: 2797 Dcmn_err(CE_NOTE, "(%s) Start: unknown state %x\n", 2798 skd_name(skdev), skdev->drive_state); 2799 break; 2800 } 2801 2802 state = SKD_READL(skdev, FIT_CONTROL); 2803 Dcmn_err(CE_NOTE, "FIT Control Status=0x%x\n", state); 2804 2805 state = SKD_READL(skdev, FIT_INT_STATUS_HOST); 2806 Dcmn_err(CE_NOTE, "Intr Status=0x%x\n", state); 2807 2808 state = SKD_READL(skdev, FIT_INT_MASK_HOST); 2809 Dcmn_err(CE_NOTE, "Intr Mask=0x%x\n", state); 2810 2811 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); 2812 Dcmn_err(CE_NOTE, "Msg from Dev=0x%x\n", state); 2813 2814 state = SKD_READL(skdev, FIT_HW_VERSION); 2815 Dcmn_err(CE_NOTE, "HW version=0x%x\n", state); 2816 2817 if (delay_action) { 2818 /* start the queue so we can respond with error to requests */ 2819 Dcmn_err(CE_NOTE, "Starting %s queue\n", skdev->name); 2820 skd_start(skdev); 2821 skdev->gendisk_on = -1; 2822 cv_signal(&skdev->cv_waitq); 2823 } 2824 } 2825 2826 /* 2827 * 2828 * Name: skd_restart_device, restart the hardware. 2829 * 2830 * Inputs: skdev - device state structure. 2831 * 2832 * Returns: Nothing. 2833 * 2834 */ 2835 static void 2836 skd_restart_device(struct skd_device *skdev) 2837 { 2838 uint32_t state; 2839 2840 Dcmn_err(CE_NOTE, "skd_restart_device:"); 2841 2842 /* ack all ghost interrupts */ 2843 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2844 2845 state = SKD_READL(skdev, FIT_STATUS); 2846 2847 Dcmn_err(CE_NOTE, "skd_restart_device: drive status=0x%x\n", state); 2848 2849 state &= FIT_SR_DRIVE_STATE_MASK; 2850 skdev->drive_state = state; 2851 skdev->last_mtd = 0; 2852 2853 skdev->state = SKD_DRVR_STATE_RESTARTING; 2854 skdev->timer_countdown = SKD_TIMER_MINUTES(4); 2855 2856 skd_soft_reset(skdev); 2857 } 2858 2859 /* 2860 * 2861 * Name: skd_stop_device, stops the device. 2862 * 2863 * Inputs: skdev - device state structure. 2864 * 2865 * Returns: Nothing. 2866 * 2867 */ 2868 static void 2869 skd_stop_device(struct skd_device *skdev) 2870 { 2871 clock_t cur_ticks, tmo; 2872 int secs; 2873 struct skd_special_context *skspcl = &skdev->internal_skspcl; 2874 2875 if (SKD_DRVR_STATE_ONLINE != skdev->state) { 2876 Dcmn_err(CE_NOTE, "(%s): skd_stop_device not online no sync\n", 2877 skdev->name); 2878 goto stop_out; 2879 } 2880 2881 if (SKD_REQ_STATE_IDLE != skspcl->req.state) { 2882 Dcmn_err(CE_NOTE, "(%s): skd_stop_device no special\n", 2883 skdev->name); 2884 goto stop_out; 2885 } 2886 2887 skdev->state = SKD_DRVR_STATE_SYNCING; 2888 skdev->sync_done = 0; 2889 2890 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); 2891 2892 secs = 10; 2893 mutex_enter(&skdev->skd_internalio_mutex); 2894 while (skdev->sync_done == 0) { 2895 cur_ticks = ddi_get_lbolt(); 2896 tmo = cur_ticks + drv_usectohz(1000000 * secs); 2897 if (cv_timedwait(&skdev->cv_waitq, 2898 &skdev->skd_internalio_mutex, tmo) == -1) { 2899 /* Oops - timed out */ 2900 2901 Dcmn_err(CE_NOTE, "stop_device - %d secs TMO", secs); 2902 } 2903 } 2904 2905 mutex_exit(&skdev->skd_internalio_mutex); 2906 2907 switch (skdev->sync_done) { 2908 case 0: 2909 Dcmn_err(CE_NOTE, "(%s): skd_stop_device no sync\n", 2910 skdev->name); 2911 break; 2912 case 1: 2913 Dcmn_err(CE_NOTE, "(%s): skd_stop_device sync done\n", 2914 skdev->name); 2915 break; 2916 default: 2917 Dcmn_err(CE_NOTE, "(%s): skd_stop_device sync error\n", 2918 skdev->name); 2919 } 2920 2921 2922 stop_out: 2923 skdev->state = SKD_DRVR_STATE_STOPPING; 2924 2925 skd_disable_interrupts(skdev); 2926 2927 /* ensure all ints on device are cleared */ 2928 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2929 /* soft reset the device to unload with a clean slate */ 2930 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL); 2931 } 2932 2933 /* 2934 * CONSTRUCT 2935 */ 2936 2937 static int skd_cons_skcomp(struct skd_device *); 2938 static int skd_cons_skmsg(struct skd_device *); 2939 static int skd_cons_skreq(struct skd_device *); 2940 static int skd_cons_sksb(struct skd_device *); 2941 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *, uint32_t, 2942 dma_mem_t *); 2943 2944 /* 2945 * 2946 * Name: skd_construct, calls other routines to build device 2947 * interface structures. 2948 * 2949 * Inputs: skdev - device state structure. 2950 * instance - DDI instance number. 2951 * 2952 * Returns: Returns DDI_FAILURE on any failure otherwise returns 2953 * DDI_SUCCESS. 2954 * 2955 */ 2956 /* ARGSUSED */ /* Upstream common source with other platforms. */ 2957 static int 2958 skd_construct(skd_device_t *skdev, int instance) 2959 { 2960 int rc = 0; 2961 2962 skdev->state = SKD_DRVR_STATE_LOAD; 2963 skdev->irq_type = skd_isr_type; 2964 skdev->soft_queue_depth_limit = skd_max_queue_depth; 2965 skdev->hard_queue_depth_limit = 10; /* until GET_CMDQ_DEPTH */ 2966 2967 skdev->num_req_context = skd_max_queue_depth; 2968 skdev->num_fitmsg_context = skd_max_queue_depth; 2969 2970 skdev->queue_depth_limit = skdev->hard_queue_depth_limit; 2971 skdev->queue_depth_lowat = 1; 2972 skdev->proto_ver = 99; /* initialize to invalid value */ 2973 skdev->sgs_per_request = skd_sgs_per_request; 2974 skdev->dbg_level = skd_dbg_level; 2975 2976 rc = skd_cons_skcomp(skdev); 2977 if (rc < 0) { 2978 goto err_out; 2979 } 2980 2981 rc = skd_cons_skmsg(skdev); 2982 if (rc < 0) { 2983 goto err_out; 2984 } 2985 2986 rc = skd_cons_skreq(skdev); 2987 if (rc < 0) { 2988 goto err_out; 2989 } 2990 2991 rc = skd_cons_sksb(skdev); 2992 if (rc < 0) { 2993 goto err_out; 2994 } 2995 2996 Dcmn_err(CE_NOTE, "CONSTRUCT VICTORY"); 2997 2998 return (DDI_SUCCESS); 2999 3000 err_out: 3001 Dcmn_err(CE_NOTE, "construct failed\n"); 3002 skd_destruct(skdev); 3003 3004 return (DDI_FAILURE); 3005 } 3006 3007 /* 3008 * 3009 * Name: skd_free_phys, frees DMA memory. 3010 * 3011 * Inputs: skdev - device state structure. 3012 * mem - DMA info. 3013 * 3014 * Returns: Nothing. 3015 * 3016 */ 3017 static void 3018 skd_free_phys(skd_device_t *skdev, dma_mem_t *mem) 3019 { 3020 _NOTE(ARGUNUSED(skdev)); 3021 3022 if (mem == NULL || mem->dma_handle == NULL) 3023 return; 3024 3025 (void) ddi_dma_unbind_handle(mem->dma_handle); 3026 3027 if (mem->acc_handle != NULL) { 3028 ddi_dma_mem_free(&mem->acc_handle); 3029 mem->acc_handle = NULL; 3030 } 3031 3032 mem->bp = NULL; 3033 ddi_dma_free_handle(&mem->dma_handle); 3034 mem->dma_handle = NULL; 3035 } 3036 3037 /* 3038 * 3039 * Name: skd_alloc_dma_mem, allocates DMA memory. 3040 * 3041 * Inputs: skdev - device state structure. 3042 * mem - DMA data structure. 3043 * sleep - indicates whether called routine can sleep. 3044 * atype - specified 32 or 64 bit allocation. 3045 * 3046 * Returns: Void pointer to mem->bp on success else NULL. 3047 * NOTE: There are some failure modes even if sleep is set 3048 * to KM_SLEEP, so callers MUST check the return code even 3049 * if KM_SLEEP is passed in. 3050 * 3051 */ 3052 static void * 3053 skd_alloc_dma_mem(skd_device_t *skdev, dma_mem_t *mem, uint8_t atype) 3054 { 3055 size_t rlen; 3056 uint_t cnt; 3057 ddi_dma_attr_t dma_attr = skd_64bit_io_dma_attr; 3058 ddi_device_acc_attr_t acc_attr = { 3059 DDI_DEVICE_ATTR_V0, 3060 DDI_STRUCTURE_LE_ACC, 3061 DDI_STRICTORDER_ACC 3062 }; 3063 3064 if (atype == ATYPE_32BIT) 3065 dma_attr.dma_attr_addr_hi = SKD_DMA_HIGH_32BIT_ADDRESS; 3066 3067 dma_attr.dma_attr_sgllen = 1; 3068 3069 /* 3070 * Allocate DMA memory. 3071 */ 3072 if (ddi_dma_alloc_handle(skdev->dip, &dma_attr, DDI_DMA_SLEEP, NULL, 3073 &mem->dma_handle) != DDI_SUCCESS) { 3074 cmn_err(CE_WARN, "!alloc_dma_mem-1, failed"); 3075 3076 mem->dma_handle = NULL; 3077 3078 return (NULL); 3079 } 3080 3081 if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr, 3082 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, (caddr_t *)&mem->bp, &rlen, 3083 &mem->acc_handle) != DDI_SUCCESS) { 3084 cmn_err(CE_WARN, "!skd_alloc_dma_mem-2, failed"); 3085 ddi_dma_free_handle(&mem->dma_handle); 3086 mem->dma_handle = NULL; 3087 mem->acc_handle = NULL; 3088 mem->bp = NULL; 3089 3090 return (NULL); 3091 } 3092 bzero(mem->bp, mem->size); 3093 3094 if (ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp, 3095 mem->size, (DDI_DMA_CONSISTENT | DDI_DMA_RDWR), DDI_DMA_SLEEP, NULL, 3096 &mem->cookie, &cnt) != DDI_DMA_MAPPED) { 3097 cmn_err(CE_WARN, "!skd_alloc_dma_mem-3, failed"); 3098 ddi_dma_mem_free(&mem->acc_handle); 3099 ddi_dma_free_handle(&mem->dma_handle); 3100 3101 return (NULL); 3102 } 3103 3104 if (cnt > 1) { 3105 (void) ddi_dma_unbind_handle(mem->dma_handle); 3106 cmn_err(CE_WARN, "!skd_alloc_dma_mem-4, failed, " 3107 "cookie_count %d > 1", cnt); 3108 skd_free_phys(skdev, mem); 3109 3110 return (NULL); 3111 } 3112 mem->cookies = &mem->cookie; 3113 mem->cookies->dmac_size = mem->size; 3114 3115 return (mem->bp); 3116 } 3117 3118 /* 3119 * 3120 * Name: skd_cons_skcomp, allocates space for the skcomp table. 3121 * 3122 * Inputs: skdev - device state structure. 3123 * 3124 * Returns: -ENOMEM if no memory otherwise NULL. 3125 * 3126 */ 3127 static int 3128 skd_cons_skcomp(struct skd_device *skdev) 3129 { 3130 uint64_t *dma_alloc; 3131 struct fit_completion_entry_v1 *skcomp; 3132 int rc = 0; 3133 uint32_t nbytes; 3134 dma_mem_t *mem; 3135 3136 nbytes = sizeof (*skcomp) * SKD_N_COMPLETION_ENTRY; 3137 nbytes += sizeof (struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; 3138 3139 Dcmn_err(CE_NOTE, "cons_skcomp: nbytes=%d,entries=%d", nbytes, 3140 SKD_N_COMPLETION_ENTRY); 3141 3142 mem = &skdev->cq_dma_address; 3143 mem->size = nbytes; 3144 3145 dma_alloc = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT); 3146 skcomp = (struct fit_completion_entry_v1 *)dma_alloc; 3147 if (skcomp == NULL) { 3148 rc = -ENOMEM; 3149 goto err_out; 3150 } 3151 3152 bzero(skcomp, nbytes); 3153 3154 Dcmn_err(CE_NOTE, "cons_skcomp: skcomp=%p nbytes=%d", 3155 (void *)skcomp, nbytes); 3156 3157 skdev->skcomp_table = skcomp; 3158 skdev->skerr_table = (struct fit_comp_error_info *)(dma_alloc + 3159 (SKD_N_COMPLETION_ENTRY * sizeof (*skcomp) / sizeof (uint64_t))); 3160 3161 err_out: 3162 return (rc); 3163 } 3164 3165 /* 3166 * 3167 * Name: skd_cons_skmsg, allocates space for the skmsg table. 3168 * 3169 * Inputs: skdev - device state structure. 3170 * 3171 * Returns: -ENOMEM if no memory otherwise NULL. 3172 * 3173 */ 3174 static int 3175 skd_cons_skmsg(struct skd_device *skdev) 3176 { 3177 dma_mem_t *mem; 3178 int rc = 0; 3179 uint32_t i; 3180 3181 Dcmn_err(CE_NOTE, "skmsg_table kzalloc, struct %lu, count %u total %lu", 3182 (ulong_t)sizeof (struct skd_fitmsg_context), 3183 skdev->num_fitmsg_context, 3184 (ulong_t)(sizeof (struct skd_fitmsg_context) * 3185 skdev->num_fitmsg_context)); 3186 3187 skdev->skmsg_table = (struct skd_fitmsg_context *)kmem_zalloc( 3188 sizeof (struct skd_fitmsg_context) * skdev->num_fitmsg_context, 3189 KM_SLEEP); 3190 3191 for (i = 0; i < skdev->num_fitmsg_context; i++) { 3192 struct skd_fitmsg_context *skmsg; 3193 3194 skmsg = &skdev->skmsg_table[i]; 3195 3196 skmsg->id = i + SKD_ID_FIT_MSG; 3197 3198 skmsg->state = SKD_MSG_STATE_IDLE; 3199 3200 mem = &skmsg->mb_dma_address; 3201 mem->size = SKD_N_FITMSG_BYTES + 64; 3202 3203 skmsg->msg_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT); 3204 3205 if (NULL == skmsg->msg_buf) { 3206 rc = -ENOMEM; 3207 i++; 3208 break; 3209 } 3210 3211 skmsg->offset = 0; 3212 3213 bzero(skmsg->msg_buf, SKD_N_FITMSG_BYTES); 3214 3215 skmsg->next = &skmsg[1]; 3216 } 3217 3218 /* Free list is in order starting with the 0th entry. */ 3219 skdev->skmsg_table[i - 1].next = NULL; 3220 skdev->skmsg_free_list = skdev->skmsg_table; 3221 3222 return (rc); 3223 } 3224 3225 /* 3226 * 3227 * Name: skd_cons_skreq, allocates space for the skreq table. 3228 * 3229 * Inputs: skdev - device state structure. 3230 * 3231 * Returns: -ENOMEM if no memory otherwise NULL. 3232 * 3233 */ 3234 static int 3235 skd_cons_skreq(struct skd_device *skdev) 3236 { 3237 int rc = 0; 3238 uint32_t i; 3239 3240 Dcmn_err(CE_NOTE, 3241 "skreq_table kmem_zalloc, struct %lu, count %u total %lu", 3242 (ulong_t)sizeof (struct skd_request_context), 3243 skdev->num_req_context, 3244 (ulong_t) (sizeof (struct skd_request_context) * 3245 skdev->num_req_context)); 3246 3247 skdev->skreq_table = (struct skd_request_context *)kmem_zalloc( 3248 sizeof (struct skd_request_context) * skdev->num_req_context, 3249 KM_SLEEP); 3250 3251 for (i = 0; i < skdev->num_req_context; i++) { 3252 struct skd_request_context *skreq; 3253 3254 skreq = &skdev->skreq_table[i]; 3255 3256 skreq->id = (uint16_t)(i + SKD_ID_RW_REQUEST); 3257 skreq->state = SKD_REQ_STATE_IDLE; 3258 3259 skreq->sksg_list = skd_cons_sg_list(skdev, 3260 skdev->sgs_per_request, 3261 &skreq->sksg_dma_address); 3262 3263 if (NULL == skreq->sksg_list) { 3264 rc = -ENOMEM; 3265 goto err_out; 3266 } 3267 3268 skreq->next = &skreq[1]; 3269 } 3270 3271 /* Free list is in order starting with the 0th entry. */ 3272 skdev->skreq_table[i - 1].next = NULL; 3273 skdev->skreq_free_list = skdev->skreq_table; 3274 3275 err_out: 3276 return (rc); 3277 } 3278 3279 /* 3280 * 3281 * Name: skd_cons_sksb, allocates space for the skspcl msg buf 3282 * and data buf. 3283 * 3284 * Inputs: skdev - device state structure. 3285 * 3286 * Returns: -ENOMEM if no memory otherwise NULL. 3287 * 3288 */ 3289 static int 3290 skd_cons_sksb(struct skd_device *skdev) 3291 { 3292 int rc = 0; 3293 struct skd_special_context *skspcl; 3294 dma_mem_t *mem; 3295 uint32_t nbytes; 3296 3297 skspcl = &skdev->internal_skspcl; 3298 3299 skspcl->req.id = 0 + SKD_ID_INTERNAL; 3300 skspcl->req.state = SKD_REQ_STATE_IDLE; 3301 3302 nbytes = SKD_N_INTERNAL_BYTES; 3303 3304 mem = &skspcl->db_dma_address; 3305 mem->size = nbytes; 3306 3307 /* data_buf's DMA pointer is skspcl->db_dma_address */ 3308 skspcl->data_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT); 3309 if (skspcl->data_buf == NULL) { 3310 rc = -ENOMEM; 3311 goto err_out; 3312 } 3313 3314 bzero(skspcl->data_buf, nbytes); 3315 3316 nbytes = SKD_N_SPECIAL_FITMSG_BYTES; 3317 3318 mem = &skspcl->mb_dma_address; 3319 mem->size = nbytes; 3320 3321 /* msg_buf DMA pointer is skspcl->mb_dma_address */ 3322 skspcl->msg_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT); 3323 if (skspcl->msg_buf == NULL) { 3324 rc = -ENOMEM; 3325 goto err_out; 3326 } 3327 3328 3329 bzero(skspcl->msg_buf, nbytes); 3330 3331 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1, 3332 &skspcl->req.sksg_dma_address); 3333 3334 3335 if (skspcl->req.sksg_list == NULL) { 3336 rc = -ENOMEM; 3337 goto err_out; 3338 } 3339 3340 if (skd_format_internal_skspcl(skdev) == 0) { 3341 rc = -EINVAL; 3342 goto err_out; 3343 } 3344 3345 err_out: 3346 return (rc); 3347 } 3348 3349 /* 3350 * 3351 * Name: skd_cons_sg_list, allocates the S/G list. 3352 * 3353 * Inputs: skdev - device state structure. 3354 * n_sg - Number of scatter-gather entries. 3355 * ret_dma_addr - S/G list DMA pointer. 3356 * 3357 * Returns: A list of FIT message descriptors. 3358 * 3359 */ 3360 static struct fit_sg_descriptor 3361 *skd_cons_sg_list(struct skd_device *skdev, 3362 uint32_t n_sg, dma_mem_t *ret_dma_addr) 3363 { 3364 struct fit_sg_descriptor *sg_list; 3365 uint32_t nbytes; 3366 dma_mem_t *mem; 3367 3368 nbytes = sizeof (*sg_list) * n_sg; 3369 3370 mem = ret_dma_addr; 3371 mem->size = nbytes; 3372 3373 /* sg_list's DMA pointer is *ret_dma_addr */ 3374 sg_list = skd_alloc_dma_mem(skdev, mem, ATYPE_32BIT); 3375 3376 if (sg_list != NULL) { 3377 uint64_t dma_address = ret_dma_addr->cookie.dmac_laddress; 3378 uint32_t i; 3379 3380 bzero(sg_list, nbytes); 3381 3382 for (i = 0; i < n_sg - 1; i++) { 3383 uint64_t ndp_off; 3384 ndp_off = (i + 1) * sizeof (struct fit_sg_descriptor); 3385 3386 sg_list[i].next_desc_ptr = dma_address + ndp_off; 3387 } 3388 sg_list[i].next_desc_ptr = 0LL; 3389 } 3390 3391 return (sg_list); 3392 } 3393 3394 /* 3395 * DESTRUCT (FREE) 3396 */ 3397 3398 static void skd_free_skcomp(struct skd_device *skdev); 3399 static void skd_free_skmsg(struct skd_device *skdev); 3400 static void skd_free_skreq(struct skd_device *skdev); 3401 static void skd_free_sksb(struct skd_device *skdev); 3402 3403 static void skd_free_sg_list(struct skd_device *skdev, 3404 struct fit_sg_descriptor *sg_list, 3405 uint32_t n_sg, dma_mem_t dma_addr); 3406 3407 /* 3408 * 3409 * Name: skd_destruct, call various rouines to deallocate 3410 * space acquired during initialization. 3411 * 3412 * Inputs: skdev - device state structure. 3413 * 3414 * Returns: Nothing. 3415 * 3416 */ 3417 static void 3418 skd_destruct(struct skd_device *skdev) 3419 { 3420 if (skdev == NULL) { 3421 return; 3422 } 3423 3424 Dcmn_err(CE_NOTE, "destruct sksb"); 3425 skd_free_sksb(skdev); 3426 3427 Dcmn_err(CE_NOTE, "destruct skreq"); 3428 skd_free_skreq(skdev); 3429 3430 Dcmn_err(CE_NOTE, "destruct skmsg"); 3431 skd_free_skmsg(skdev); 3432 3433 Dcmn_err(CE_NOTE, "destruct skcomp"); 3434 skd_free_skcomp(skdev); 3435 3436 Dcmn_err(CE_NOTE, "DESTRUCT VICTORY"); 3437 } 3438 3439 /* 3440 * 3441 * Name: skd_free_skcomp, deallocates skcomp table DMA resources. 3442 * 3443 * Inputs: skdev - device state structure. 3444 * 3445 * Returns: Nothing. 3446 * 3447 */ 3448 static void 3449 skd_free_skcomp(struct skd_device *skdev) 3450 { 3451 if (skdev->skcomp_table != NULL) { 3452 skd_free_phys(skdev, &skdev->cq_dma_address); 3453 } 3454 3455 skdev->skcomp_table = NULL; 3456 } 3457 3458 /* 3459 * 3460 * Name: skd_free_skmsg, deallocates skmsg table DMA resources. 3461 * 3462 * Inputs: skdev - device state structure. 3463 * 3464 * Returns: Nothing. 3465 * 3466 */ 3467 static void 3468 skd_free_skmsg(struct skd_device *skdev) 3469 { 3470 uint32_t i; 3471 3472 if (NULL == skdev->skmsg_table) 3473 return; 3474 3475 for (i = 0; i < skdev->num_fitmsg_context; i++) { 3476 struct skd_fitmsg_context *skmsg; 3477 3478 skmsg = &skdev->skmsg_table[i]; 3479 3480 if (skmsg->msg_buf != NULL) { 3481 skd_free_phys(skdev, &skmsg->mb_dma_address); 3482 } 3483 3484 3485 skmsg->msg_buf = NULL; 3486 } 3487 3488 kmem_free(skdev->skmsg_table, sizeof (struct skd_fitmsg_context) * 3489 skdev->num_fitmsg_context); 3490 3491 skdev->skmsg_table = NULL; 3492 3493 } 3494 3495 /* 3496 * 3497 * Name: skd_free_skreq, deallocates skspcl table DMA resources. 3498 * 3499 * Inputs: skdev - device state structure. 3500 * 3501 * Returns: Nothing. 3502 * 3503 */ 3504 static void 3505 skd_free_skreq(struct skd_device *skdev) 3506 { 3507 uint32_t i; 3508 3509 if (NULL == skdev->skreq_table) 3510 return; 3511 3512 for (i = 0; i < skdev->num_req_context; i++) { 3513 struct skd_request_context *skreq; 3514 3515 skreq = &skdev->skreq_table[i]; 3516 3517 skd_free_sg_list(skdev, skreq->sksg_list, 3518 skdev->sgs_per_request, skreq->sksg_dma_address); 3519 3520 skreq->sksg_list = NULL; 3521 } 3522 3523 kmem_free(skdev->skreq_table, sizeof (struct skd_request_context) * 3524 skdev->num_req_context); 3525 3526 skdev->skreq_table = NULL; 3527 3528 } 3529 3530 /* 3531 * 3532 * Name: skd_free_sksb, deallocates skspcl data buf and 3533 * msg buf DMA resources. 3534 * 3535 * Inputs: skdev - device state structure. 3536 * 3537 * Returns: Nothing. 3538 * 3539 */ 3540 static void 3541 skd_free_sksb(struct skd_device *skdev) 3542 { 3543 struct skd_special_context *skspcl; 3544 3545 skspcl = &skdev->internal_skspcl; 3546 3547 if (skspcl->data_buf != NULL) { 3548 skd_free_phys(skdev, &skspcl->db_dma_address); 3549 } 3550 3551 skspcl->data_buf = NULL; 3552 3553 if (skspcl->msg_buf != NULL) { 3554 skd_free_phys(skdev, &skspcl->mb_dma_address); 3555 } 3556 3557 skspcl->msg_buf = NULL; 3558 3559 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1, 3560 skspcl->req.sksg_dma_address); 3561 3562 skspcl->req.sksg_list = NULL; 3563 } 3564 3565 /* 3566 * 3567 * Name: skd_free_sg_list, deallocates S/G DMA resources. 3568 * 3569 * Inputs: skdev - device state structure. 3570 * sg_list - S/G list itself. 3571 * n_sg - nukmber of segments 3572 * dma_addr - S/G list DMA address. 3573 * 3574 * Returns: Nothing. 3575 * 3576 */ 3577 /* ARGSUSED */ /* Upstream common source with other platforms. */ 3578 static void 3579 skd_free_sg_list(struct skd_device *skdev, 3580 struct fit_sg_descriptor *sg_list, 3581 uint32_t n_sg, dma_mem_t dma_addr) 3582 { 3583 if (sg_list != NULL) { 3584 skd_free_phys(skdev, &dma_addr); 3585 } 3586 } 3587 3588 /* 3589 * 3590 * Name: skd_queue, queues the I/O request. 3591 * 3592 * Inputs: skdev - device state structure. 3593 * pbuf - I/O request 3594 * 3595 * Returns: Nothing. 3596 * 3597 */ 3598 static void 3599 skd_queue(skd_device_t *skdev, skd_buf_private_t *pbuf) 3600 { 3601 struct waitqueue *waitq; 3602 3603 ASSERT(skdev != NULL); 3604 ASSERT(pbuf != NULL); 3605 3606 ASSERT(WAITQ_LOCK_HELD(skdev)); 3607 3608 waitq = &skdev->waitqueue; 3609 3610 if (SIMPLEQ_EMPTY(waitq)) 3611 SIMPLEQ_INSERT_HEAD(waitq, pbuf, sq); 3612 else 3613 SIMPLEQ_INSERT_TAIL(waitq, pbuf, sq); 3614 } 3615 3616 /* 3617 * 3618 * Name: skd_list_skreq, displays the skreq table entries. 3619 * 3620 * Inputs: skdev - device state structure. 3621 * list - flag, if true displays the entry address. 3622 * 3623 * Returns: Returns number of skmsg entries found. 3624 * 3625 */ 3626 /* ARGSUSED */ /* Upstream common source with other platforms. */ 3627 static int 3628 skd_list_skreq(skd_device_t *skdev, int list) 3629 { 3630 int inx = 0; 3631 struct skd_request_context *skreq; 3632 3633 if (list) { 3634 Dcmn_err(CE_NOTE, "skreq_table[0]\n"); 3635 3636 skreq = &skdev->skreq_table[0]; 3637 while (skreq) { 3638 if (list) 3639 Dcmn_err(CE_NOTE, 3640 "%d: skreq=%p state=%d id=%x fid=%x " 3641 "pbuf=%p dir=%d comp=%d\n", 3642 inx, (void *)skreq, skreq->state, 3643 skreq->id, skreq->fitmsg_id, 3644 (void *)skreq->pbuf, 3645 skreq->sg_data_dir, skreq->did_complete); 3646 inx++; 3647 skreq = skreq->next; 3648 } 3649 } 3650 3651 inx = 0; 3652 skreq = skdev->skreq_free_list; 3653 3654 if (list) 3655 Dcmn_err(CE_NOTE, "skreq_free_list\n"); 3656 while (skreq) { 3657 if (list) 3658 Dcmn_err(CE_NOTE, "%d: skreq=%p state=%d id=%x fid=%x " 3659 "pbuf=%p dir=%d\n", inx, (void *)skreq, 3660 skreq->state, skreq->id, skreq->fitmsg_id, 3661 (void *)skreq->pbuf, skreq->sg_data_dir); 3662 inx++; 3663 skreq = skreq->next; 3664 } 3665 3666 return (inx); 3667 } 3668 3669 /* 3670 * 3671 * Name: skd_list_skmsg, displays the skmsg table entries. 3672 * 3673 * Inputs: skdev - device state structure. 3674 * list - flag, if true displays the entry address. 3675 * 3676 * Returns: Returns number of skmsg entries found. 3677 * 3678 */ 3679 static int 3680 skd_list_skmsg(skd_device_t *skdev, int list) 3681 { 3682 int inx = 0; 3683 struct skd_fitmsg_context *skmsgp; 3684 3685 skmsgp = &skdev->skmsg_table[0]; 3686 3687 if (list) { 3688 Dcmn_err(CE_NOTE, "skmsg_table[0]\n"); 3689 3690 while (skmsgp) { 3691 if (list) 3692 Dcmn_err(CE_NOTE, "%d: skmsgp=%p id=%x outs=%d " 3693 "l=%d o=%d nxt=%p\n", inx, (void *)skmsgp, 3694 skmsgp->id, skmsgp->outstanding, 3695 skmsgp->length, skmsgp->offset, 3696 (void *)skmsgp->next); 3697 inx++; 3698 skmsgp = skmsgp->next; 3699 } 3700 } 3701 3702 inx = 0; 3703 if (list) 3704 Dcmn_err(CE_NOTE, "skmsg_free_list\n"); 3705 skmsgp = skdev->skmsg_free_list; 3706 while (skmsgp) { 3707 if (list) 3708 Dcmn_err(CE_NOTE, "%d: skmsgp=%p id=%x outs=%d l=%d " 3709 "o=%d nxt=%p\n", 3710 inx, (void *)skmsgp, skmsgp->id, 3711 skmsgp->outstanding, skmsgp->length, 3712 skmsgp->offset, (void *)skmsgp->next); 3713 inx++; 3714 skmsgp = skmsgp->next; 3715 } 3716 3717 return (inx); 3718 } 3719 3720 /* 3721 * 3722 * Name: skd_get_queue_pbuf, retrieves top of queue entry and 3723 * delinks entry from the queue. 3724 * 3725 * Inputs: skdev - device state structure. 3726 * drive - device number 3727 * 3728 * Returns: Returns the top of the job queue entry. 3729 * 3730 */ 3731 static skd_buf_private_t 3732 *skd_get_queued_pbuf(skd_device_t *skdev) 3733 { 3734 skd_buf_private_t *pbuf; 3735 3736 ASSERT(WAITQ_LOCK_HELD(skdev)); 3737 pbuf = SIMPLEQ_FIRST(&skdev->waitqueue); 3738 if (pbuf != NULL) 3739 SIMPLEQ_REMOVE_HEAD(&skdev->waitqueue, sq); 3740 return (pbuf); 3741 } 3742 3743 /* 3744 * PCI DRIVER GLUE 3745 */ 3746 3747 /* 3748 * 3749 * Name: skd_pci_info, logs certain device PCI info. 3750 * 3751 * Inputs: skdev - device state structure. 3752 * 3753 * Returns: str which contains the device speed info.. 3754 * 3755 */ 3756 static char * 3757 skd_pci_info(struct skd_device *skdev, char *str, size_t len) 3758 { 3759 int pcie_reg; 3760 3761 str[0] = '\0'; 3762 3763 pcie_reg = skd_pci_find_capability(skdev, PCI_CAP_ID_EXP); 3764 3765 if (pcie_reg) { 3766 uint16_t lstat, lspeed, lwidth; 3767 3768 pcie_reg += 0x12; 3769 lstat = pci_config_get16(skdev->pci_handle, pcie_reg); 3770 lspeed = lstat & (0xF); 3771 lwidth = (lstat & 0x3F0) >> 4; 3772 3773 (void) snprintf(str, len, "PCIe (%s rev %d)", 3774 lspeed == 1 ? "2.5GT/s" : 3775 lspeed == 2 ? "5.0GT/s" : "<unknown>", 3776 lwidth); 3777 } 3778 3779 return (str); 3780 } 3781 3782 /* 3783 * MODULE GLUE 3784 */ 3785 3786 /* 3787 * 3788 * Name: skd_init, initializes certain values. 3789 * 3790 * Inputs: skdev - device state structure. 3791 * 3792 * Returns: Zero. 3793 * 3794 */ 3795 /* ARGSUSED */ /* Upstream common source with other platforms. */ 3796 static int 3797 skd_init(skd_device_t *skdev) 3798 { 3799 Dcmn_err(CE_NOTE, "skd_init: v%s-b%s\n", DRV_VERSION, DRV_BUILD_ID); 3800 3801 if (skd_max_queue_depth < 1 || 3802 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) { 3803 cmn_err(CE_NOTE, "skd_max_q_depth %d invalid, re-set to %d\n", 3804 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT); 3805 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; 3806 } 3807 3808 if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) { 3809 cmn_err(CE_NOTE, "skd_max_req_per_msg %d invalid, set to %d\n", 3810 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT); 3811 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; 3812 } 3813 3814 3815 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) { 3816 cmn_err(CE_NOTE, "skd_sg_per_request %d invalid, set to %d\n", 3817 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT); 3818 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; 3819 } 3820 3821 if (skd_dbg_level < 0 || skd_dbg_level > 2) { 3822 cmn_err(CE_NOTE, "skd_dbg_level %d invalid, re-set to %d\n", 3823 skd_dbg_level, 0); 3824 skd_dbg_level = 0; 3825 } 3826 3827 return (0); 3828 } 3829 3830 /* 3831 * 3832 * Name: skd_exit, exits the driver & logs the fact. 3833 * 3834 * Inputs: none. 3835 * 3836 * Returns: Nothing. 3837 * 3838 */ 3839 static void 3840 skd_exit(void) 3841 { 3842 cmn_err(CE_NOTE, "skd v%s unloading", DRV_VERSION); 3843 } 3844 3845 /* 3846 * 3847 * Name: skd_drive_state_to_str, converts binary drive state 3848 * to its corresponding string value. 3849 * 3850 * Inputs: Drive state. 3851 * 3852 * Returns: String representing drive state. 3853 * 3854 */ 3855 const char * 3856 skd_drive_state_to_str(int state) 3857 { 3858 switch (state) { 3859 case FIT_SR_DRIVE_OFFLINE: return ("OFFLINE"); 3860 case FIT_SR_DRIVE_INIT: return ("INIT"); 3861 case FIT_SR_DRIVE_ONLINE: return ("ONLINE"); 3862 case FIT_SR_DRIVE_BUSY: return ("BUSY"); 3863 case FIT_SR_DRIVE_FAULT: return ("FAULT"); 3864 case FIT_SR_DRIVE_DEGRADED: return ("DEGRADED"); 3865 case FIT_SR_PCIE_LINK_DOWN: return ("LINK_DOWN"); 3866 case FIT_SR_DRIVE_SOFT_RESET: return ("SOFT_RESET"); 3867 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: return ("NEED_FW"); 3868 case FIT_SR_DRIVE_INIT_FAULT: return ("INIT_FAULT"); 3869 case FIT_SR_DRIVE_BUSY_SANITIZE:return ("BUSY_SANITIZE"); 3870 case FIT_SR_DRIVE_BUSY_ERASE: return ("BUSY_ERASE"); 3871 case FIT_SR_DRIVE_FW_BOOTING: return ("FW_BOOTING"); 3872 default: return ("???"); 3873 } 3874 } 3875 3876 /* 3877 * 3878 * Name: skd_skdev_state_to_str, converts binary driver state 3879 * to its corresponding string value. 3880 * 3881 * Inputs: Driver state. 3882 * 3883 * Returns: String representing driver state. 3884 * 3885 */ 3886 static const char * 3887 skd_skdev_state_to_str(enum skd_drvr_state state) 3888 { 3889 switch (state) { 3890 case SKD_DRVR_STATE_LOAD: return ("LOAD"); 3891 case SKD_DRVR_STATE_IDLE: return ("IDLE"); 3892 case SKD_DRVR_STATE_BUSY: return ("BUSY"); 3893 case SKD_DRVR_STATE_STARTING: return ("STARTING"); 3894 case SKD_DRVR_STATE_ONLINE: return ("ONLINE"); 3895 case SKD_DRVR_STATE_PAUSING: return ("PAUSING"); 3896 case SKD_DRVR_STATE_PAUSED: return ("PAUSED"); 3897 case SKD_DRVR_STATE_DRAINING_TIMEOUT: return ("DRAINING_TIMEOUT"); 3898 case SKD_DRVR_STATE_RESTARTING: return ("RESTARTING"); 3899 case SKD_DRVR_STATE_RESUMING: return ("RESUMING"); 3900 case SKD_DRVR_STATE_STOPPING: return ("STOPPING"); 3901 case SKD_DRVR_STATE_SYNCING: return ("SYNCING"); 3902 case SKD_DRVR_STATE_FAULT: return ("FAULT"); 3903 case SKD_DRVR_STATE_DISAPPEARED: return ("DISAPPEARED"); 3904 case SKD_DRVR_STATE_BUSY_ERASE: return ("BUSY_ERASE"); 3905 case SKD_DRVR_STATE_BUSY_SANITIZE:return ("BUSY_SANITIZE"); 3906 case SKD_DRVR_STATE_BUSY_IMMINENT: return ("BUSY_IMMINENT"); 3907 case SKD_DRVR_STATE_WAIT_BOOT: return ("WAIT_BOOT"); 3908 3909 default: return ("???"); 3910 } 3911 } 3912 3913 /* 3914 * 3915 * Name: skd_skmsg_state_to_str, converts binary driver state 3916 * to its corresponding string value. 3917 * 3918 * Inputs: Msg state. 3919 * 3920 * Returns: String representing msg state. 3921 * 3922 */ 3923 static const char * 3924 skd_skmsg_state_to_str(enum skd_fit_msg_state state) 3925 { 3926 switch (state) { 3927 case SKD_MSG_STATE_IDLE: return ("IDLE"); 3928 case SKD_MSG_STATE_BUSY: return ("BUSY"); 3929 default: return ("???"); 3930 } 3931 } 3932 3933 /* 3934 * 3935 * Name: skd_skreq_state_to_str, converts binary req state 3936 * to its corresponding string value. 3937 * 3938 * Inputs: Req state. 3939 * 3940 * Returns: String representing req state. 3941 * 3942 */ 3943 static const char * 3944 skd_skreq_state_to_str(enum skd_req_state state) 3945 { 3946 switch (state) { 3947 case SKD_REQ_STATE_IDLE: return ("IDLE"); 3948 case SKD_REQ_STATE_SETUP: return ("SETUP"); 3949 case SKD_REQ_STATE_BUSY: return ("BUSY"); 3950 case SKD_REQ_STATE_COMPLETED: return ("COMPLETED"); 3951 case SKD_REQ_STATE_TIMEOUT: return ("TIMEOUT"); 3952 case SKD_REQ_STATE_ABORTED: return ("ABORTED"); 3953 default: return ("???"); 3954 } 3955 } 3956 3957 /* 3958 * 3959 * Name: skd_log_skdev, logs device state & parameters. 3960 * 3961 * Inputs: skdev - device state structure. 3962 * event - event (string) to log. 3963 * 3964 * Returns: Nothing. 3965 * 3966 */ 3967 static void 3968 skd_log_skdev(struct skd_device *skdev, const char *event) 3969 { 3970 Dcmn_err(CE_NOTE, "log_skdev(%s) skdev=%p event='%s'", 3971 skdev->name, (void *)skdev, event); 3972 Dcmn_err(CE_NOTE, " drive_state=%s(%d) driver_state=%s(%d)", 3973 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 3974 skd_skdev_state_to_str(skdev->state), skdev->state); 3975 Dcmn_err(CE_NOTE, " busy=%d limit=%d soft=%d hard=%d lowat=%d", 3976 skdev->queue_depth_busy, skdev->queue_depth_limit, 3977 skdev->soft_queue_depth_limit, skdev->hard_queue_depth_limit, 3978 skdev->queue_depth_lowat); 3979 Dcmn_err(CE_NOTE, " timestamp=0x%x cycle=%d cycle_ix=%d", 3980 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix); 3981 } 3982 3983 /* 3984 * 3985 * Name: skd_log_skmsg, logs the skmsg event. 3986 * 3987 * Inputs: skdev - device state structure. 3988 * skmsg - FIT message structure. 3989 * event - event string to log. 3990 * 3991 * Returns: Nothing. 3992 * 3993 */ 3994 static void 3995 skd_log_skmsg(struct skd_device *skdev, 3996 struct skd_fitmsg_context *skmsg, const char *event) 3997 { 3998 Dcmn_err(CE_NOTE, "log_skmsg:(%s) skmsg=%p event='%s'", 3999 skdev->name, (void *)skmsg, event); 4000 Dcmn_err(CE_NOTE, " state=%s(%d) id=0x%04x length=%d", 4001 skd_skmsg_state_to_str(skmsg->state), skmsg->state, 4002 skmsg->id, skmsg->length); 4003 } 4004 4005 /* 4006 * 4007 * Name: skd_log_skreq, logs the skreq event. 4008 * 4009 * Inputs: skdev - device state structure. 4010 * skreq -skreq structure. 4011 * event - event string to log. 4012 * 4013 * Returns: Nothing. 4014 * 4015 */ 4016 static void 4017 skd_log_skreq(struct skd_device *skdev, 4018 struct skd_request_context *skreq, const char *event) 4019 { 4020 skd_buf_private_t *pbuf; 4021 4022 Dcmn_err(CE_NOTE, "log_skreq: (%s) skreq=%p pbuf=%p event='%s'", 4023 skdev->name, (void *)skreq, (void *)skreq->pbuf, event); 4024 4025 Dcmn_err(CE_NOTE, " state=%s(%d) id=0x%04x fitmsg=0x%04x", 4026 skd_skreq_state_to_str(skreq->state), skreq->state, 4027 skreq->id, skreq->fitmsg_id); 4028 Dcmn_err(CE_NOTE, " timo=0x%x sg_dir=%d n_sg=%d", 4029 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg); 4030 4031 if ((pbuf = skreq->pbuf) != NULL) { 4032 uint32_t lba, count; 4033 lba = pbuf->x_xfer->x_blkno; 4034 count = pbuf->x_xfer->x_nblks; 4035 Dcmn_err(CE_NOTE, " pbuf=%p lba=%u(0x%x) count=%u(0x%x) ", 4036 (void *)pbuf, lba, lba, count, count); 4037 Dcmn_err(CE_NOTE, " dir=%s " 4038 " intrs=%" PRId64 " qdepth=%d", 4039 (pbuf->dir & B_READ) ? "Read" : "Write", 4040 skdev->intr_cntr, skdev->queue_depth_busy); 4041 } else { 4042 Dcmn_err(CE_NOTE, " req=NULL\n"); 4043 } 4044 } 4045 4046 /* 4047 * 4048 * Name: skd_init_mutex, initializes all mutexes. 4049 * 4050 * Inputs: skdev - device state structure. 4051 * 4052 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS. 4053 * 4054 */ 4055 static int 4056 skd_init_mutex(skd_device_t *skdev) 4057 { 4058 void *intr; 4059 4060 Dcmn_err(CE_CONT, "(%s%d): init_mutex flags=%x", DRV_NAME, 4061 skdev->instance, skdev->flags); 4062 4063 intr = (void *)(uintptr_t)skdev->intr_pri; 4064 4065 if (skdev->flags & SKD_MUTEX_INITED) 4066 cmn_err(CE_NOTE, "init_mutex: Oh-Oh - already INITED"); 4067 4068 /* mutexes to protect the adapter state structure. */ 4069 mutex_init(&skdev->skd_lock_mutex, NULL, MUTEX_DRIVER, 4070 DDI_INTR_PRI(intr)); 4071 mutex_init(&skdev->skd_intr_mutex, NULL, MUTEX_DRIVER, 4072 DDI_INTR_PRI(intr)); 4073 mutex_init(&skdev->waitqueue_mutex, NULL, MUTEX_DRIVER, 4074 DDI_INTR_PRI(intr)); 4075 mutex_init(&skdev->skd_internalio_mutex, NULL, MUTEX_DRIVER, 4076 DDI_INTR_PRI(intr)); 4077 4078 cv_init(&skdev->cv_waitq, NULL, CV_DRIVER, NULL); 4079 4080 skdev->flags |= SKD_MUTEX_INITED; 4081 if (skdev->flags & SKD_MUTEX_DESTROYED) 4082 skdev->flags &= ~SKD_MUTEX_DESTROYED; 4083 4084 Dcmn_err(CE_CONT, "init_mutex (%s%d): done, flags=%x", DRV_NAME, 4085 skdev->instance, skdev->flags); 4086 4087 return (DDI_SUCCESS); 4088 } 4089 4090 /* 4091 * 4092 * Name: skd_destroy_mutex, destroys all mutexes. 4093 * 4094 * Inputs: skdev - device state structure. 4095 * 4096 * Returns: Nothing. 4097 * 4098 */ 4099 static void 4100 skd_destroy_mutex(skd_device_t *skdev) 4101 { 4102 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) { 4103 if (skdev->flags & SKD_MUTEX_INITED) { 4104 mutex_destroy(&skdev->waitqueue_mutex); 4105 mutex_destroy(&skdev->skd_intr_mutex); 4106 mutex_destroy(&skdev->skd_lock_mutex); 4107 mutex_destroy(&skdev->skd_internalio_mutex); 4108 4109 cv_destroy(&skdev->cv_waitq); 4110 4111 skdev->flags |= SKD_MUTEX_DESTROYED; 4112 4113 if (skdev->flags & SKD_MUTEX_INITED) 4114 skdev->flags &= ~SKD_MUTEX_INITED; 4115 } 4116 } 4117 } 4118 4119 /* 4120 * 4121 * Name: skd_setup_intr, setup the interrupt handling 4122 * 4123 * Inputs: skdev - device state structure. 4124 * intr_type - requested DDI interrupt type. 4125 * 4126 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS. 4127 * 4128 */ 4129 static int 4130 skd_setup_intr(skd_device_t *skdev, int intr_type) 4131 { 4132 int32_t count = 0; 4133 int32_t avail = 0; 4134 int32_t actual = 0; 4135 int32_t ret; 4136 uint32_t i; 4137 4138 Dcmn_err(CE_CONT, "(%s%d): setup_intr", DRV_NAME, skdev->instance); 4139 4140 /* Get number of interrupts the platform h/w supports */ 4141 if (((ret = ddi_intr_get_nintrs(skdev->dip, intr_type, &count)) != 4142 DDI_SUCCESS) || count == 0) { 4143 cmn_err(CE_WARN, "!intr_setup failed, nintrs ret=%xh, cnt=%xh", 4144 ret, count); 4145 4146 return (DDI_FAILURE); 4147 } 4148 4149 /* Get number of available system interrupts */ 4150 if (((ret = ddi_intr_get_navail(skdev->dip, intr_type, &avail)) != 4151 DDI_SUCCESS) || avail == 0) { 4152 cmn_err(CE_WARN, "!intr_setup failed, navail ret=%xh, " 4153 "avail=%xh", ret, avail); 4154 4155 return (DDI_FAILURE); 4156 } 4157 4158 if (intr_type == DDI_INTR_TYPE_MSIX && avail < SKD_MSIX_MAXAIF) { 4159 cmn_err(CE_WARN, "!intr_setup failed, min MSI-X h/w vectors " 4160 "req'd: %d, avail: %d", 4161 SKD_MSIX_MAXAIF, count); 4162 4163 return (DDI_FAILURE); 4164 } 4165 4166 /* Allocate space for interrupt handles */ 4167 skdev->hsize = sizeof (ddi_intr_handle_t) * avail; 4168 skdev->htable = kmem_zalloc(skdev->hsize, KM_SLEEP); 4169 4170 /* Allocate the interrupts */ 4171 if ((ret = ddi_intr_alloc(skdev->dip, skdev->htable, intr_type, 4172 0, count, &actual, 0)) != DDI_SUCCESS) { 4173 cmn_err(CE_WARN, "!intr_setup failed, intr_alloc ret=%xh, " 4174 "count = %xh, " "actual=%xh", ret, count, actual); 4175 4176 skd_release_intr(skdev); 4177 4178 return (DDI_FAILURE); 4179 } 4180 4181 skdev->intr_cnt = actual; 4182 4183 if (intr_type == DDI_INTR_TYPE_FIXED) 4184 (void) ddi_intr_set_pri(skdev->htable[0], 10); 4185 4186 /* Get interrupt priority */ 4187 if ((ret = ddi_intr_get_pri(skdev->htable[0], &skdev->intr_pri)) != 4188 DDI_SUCCESS) { 4189 cmn_err(CE_WARN, "!intr_setup failed, get_pri ret=%xh", ret); 4190 skd_release_intr(skdev); 4191 4192 return (ret); 4193 } 4194 4195 /* Add the interrupt handlers */ 4196 for (i = 0; i < actual; i++) { 4197 if ((ret = ddi_intr_add_handler(skdev->htable[i], 4198 skd_isr_aif, (void *)skdev, (void *)((ulong_t)i))) != 4199 DDI_SUCCESS) { 4200 cmn_err(CE_WARN, "!intr_setup failed, addh#=%xh, " 4201 "act=%xh, ret=%xh", i, actual, ret); 4202 skd_release_intr(skdev); 4203 4204 return (ret); 4205 } 4206 } 4207 4208 /* Setup mutexes */ 4209 if ((ret = skd_init_mutex(skdev)) != DDI_SUCCESS) { 4210 cmn_err(CE_WARN, "!intr_setup failed, mutex init ret=%xh", ret); 4211 skd_release_intr(skdev); 4212 4213 return (ret); 4214 } 4215 4216 /* Get the capabilities */ 4217 (void) ddi_intr_get_cap(skdev->htable[0], &skdev->intr_cap); 4218 4219 /* Enable interrupts */ 4220 if (skdev->intr_cap & DDI_INTR_FLAG_BLOCK) { 4221 if ((ret = ddi_intr_block_enable(skdev->htable, 4222 skdev->intr_cnt)) != DDI_SUCCESS) { 4223 cmn_err(CE_WARN, "!failed, intr_setup block enable, " 4224 "ret=%xh", ret); 4225 skd_destroy_mutex(skdev); 4226 skd_release_intr(skdev); 4227 4228 return (ret); 4229 } 4230 } else { 4231 for (i = 0; i < skdev->intr_cnt; i++) { 4232 if ((ret = ddi_intr_enable(skdev->htable[i])) != 4233 DDI_SUCCESS) { 4234 cmn_err(CE_WARN, "!intr_setup failed, " 4235 "intr enable, ret=%xh", ret); 4236 skd_destroy_mutex(skdev); 4237 skd_release_intr(skdev); 4238 4239 return (ret); 4240 } 4241 } 4242 } 4243 4244 if (intr_type == DDI_INTR_TYPE_FIXED) 4245 (void) ddi_intr_clr_mask(skdev->htable[0]); 4246 4247 skdev->irq_type = intr_type; 4248 4249 return (DDI_SUCCESS); 4250 } 4251 4252 /* 4253 * 4254 * Name: skd_disable_intr, disable interrupt handling. 4255 * 4256 * Inputs: skdev - device state structure. 4257 * 4258 * Returns: Nothing. 4259 * 4260 */ 4261 static void 4262 skd_disable_intr(skd_device_t *skdev) 4263 { 4264 uint32_t i, rval; 4265 4266 if (skdev->intr_cap & DDI_INTR_FLAG_BLOCK) { 4267 /* Remove AIF block interrupts (MSI/MSI-X) */ 4268 if ((rval = ddi_intr_block_disable(skdev->htable, 4269 skdev->intr_cnt)) != DDI_SUCCESS) { 4270 cmn_err(CE_WARN, "!failed intr block disable, rval=%x", 4271 rval); 4272 } 4273 } else { 4274 /* Remove AIF non-block interrupts (fixed). */ 4275 for (i = 0; i < skdev->intr_cnt; i++) { 4276 if ((rval = ddi_intr_disable(skdev->htable[i])) != 4277 DDI_SUCCESS) { 4278 cmn_err(CE_WARN, "!failed intr disable, " 4279 "intr#=%xh, " "rval=%xh", i, rval); 4280 } 4281 } 4282 } 4283 } 4284 4285 /* 4286 * 4287 * Name: skd_release_intr, disables interrupt handling. 4288 * 4289 * Inputs: skdev - device state structure. 4290 * 4291 * Returns: Nothing. 4292 * 4293 */ 4294 static void 4295 skd_release_intr(skd_device_t *skdev) 4296 { 4297 int32_t i; 4298 int rval; 4299 4300 4301 Dcmn_err(CE_CONT, "REL_INTR intr_cnt=%d", skdev->intr_cnt); 4302 4303 if (skdev->irq_type == 0) { 4304 Dcmn_err(CE_CONT, "release_intr: (%s%d): done", 4305 DRV_NAME, skdev->instance); 4306 return; 4307 } 4308 4309 if (skdev->htable != NULL && skdev->hsize > 0) { 4310 i = (int32_t)skdev->hsize / (int32_t)sizeof (ddi_intr_handle_t); 4311 4312 while (i-- > 0) { 4313 if (skdev->htable[i] == 0) { 4314 Dcmn_err(CE_NOTE, "htable[%x]=0h", i); 4315 continue; 4316 } 4317 4318 if ((rval = ddi_intr_disable(skdev->htable[i])) != 4319 DDI_SUCCESS) 4320 Dcmn_err(CE_NOTE, "release_intr: intr_disable " 4321 "htable[%d], rval=%d", i, rval); 4322 4323 if (i < skdev->intr_cnt) { 4324 if ((rval = ddi_intr_remove_handler( 4325 skdev->htable[i])) != DDI_SUCCESS) 4326 cmn_err(CE_WARN, "!release_intr: " 4327 "intr_remove_handler FAILED, " 4328 "rval=%d", rval); 4329 4330 Dcmn_err(CE_NOTE, "release_intr: " 4331 "remove_handler htable[%d]", i); 4332 } 4333 4334 if ((rval = ddi_intr_free(skdev->htable[i])) != 4335 DDI_SUCCESS) 4336 cmn_err(CE_WARN, "!release_intr: intr_free " 4337 "FAILED, rval=%d", rval); 4338 Dcmn_err(CE_NOTE, "release_intr: intr_free htable[%d]", 4339 i); 4340 } 4341 4342 kmem_free(skdev->htable, skdev->hsize); 4343 skdev->htable = NULL; 4344 } 4345 4346 skdev->hsize = 0; 4347 skdev->intr_cnt = 0; 4348 skdev->intr_pri = 0; 4349 skdev->intr_cap = 0; 4350 skdev->irq_type = 0; 4351 } 4352 4353 /* 4354 * 4355 * Name: skd_dealloc_resources, deallocate resources allocated 4356 * during attach. 4357 * 4358 * Inputs: dip - DDI device info pointer. 4359 * skdev - device state structure. 4360 * seq - bit flag representing allocated item. 4361 * instance - device instance. 4362 * 4363 * Returns: Nothing. 4364 * 4365 */ 4366 /* ARGSUSED */ /* Upstream common source with other platforms. */ 4367 static void 4368 skd_dealloc_resources(dev_info_t *dip, skd_device_t *skdev, 4369 uint32_t seq, int instance) 4370 { 4371 4372 if (skdev == NULL) 4373 return; 4374 4375 if (seq & SKD_CONSTRUCTED) 4376 skd_destruct(skdev); 4377 4378 if (seq & SKD_INTR_ADDED) { 4379 skd_disable_intr(skdev); 4380 skd_release_intr(skdev); 4381 } 4382 4383 if (seq & SKD_DEV_IOBASE_MAPPED) 4384 ddi_regs_map_free(&skdev->dev_handle); 4385 4386 if (seq & SKD_IOMAP_IOBASE_MAPPED) 4387 ddi_regs_map_free(&skdev->iomap_handle); 4388 4389 if (seq & SKD_REGS_MAPPED) 4390 ddi_regs_map_free(&skdev->iobase_handle); 4391 4392 if (seq & SKD_CONFIG_SPACE_SETUP) 4393 pci_config_teardown(&skdev->pci_handle); 4394 4395 if (seq & SKD_SOFT_STATE_ALLOCED) { 4396 if (skdev->pathname && 4397 (skdev->flags & SKD_PATHNAME_ALLOCED)) { 4398 kmem_free(skdev->pathname, 4399 strlen(skdev->pathname)+1); 4400 } 4401 } 4402 4403 if (skdev->s1120_devid) 4404 ddi_devid_free(skdev->s1120_devid); 4405 } 4406 4407 /* 4408 * 4409 * Name: skd_setup_interrupt, sets up the appropriate interrupt type 4410 * msi, msix, or fixed. 4411 * 4412 * Inputs: skdev - device state structure. 4413 * 4414 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS. 4415 * 4416 */ 4417 static int 4418 skd_setup_interrupts(skd_device_t *skdev) 4419 { 4420 int32_t rval = DDI_FAILURE; 4421 int32_t i; 4422 int32_t itypes = 0; 4423 4424 /* 4425 * See what types of interrupts this adapter and platform support 4426 */ 4427 if ((i = ddi_intr_get_supported_types(skdev->dip, &itypes)) != 4428 DDI_SUCCESS) { 4429 cmn_err(CE_NOTE, "intr supported types failed, rval=%xh, ", i); 4430 return (DDI_FAILURE); 4431 } 4432 4433 Dcmn_err(CE_NOTE, "%s:supported interrupts types: %x", 4434 skdev->name, itypes); 4435 4436 itypes &= skdev->irq_type; 4437 4438 if (!skd_disable_msix && (itypes & DDI_INTR_TYPE_MSIX) && 4439 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_MSIX)) == DDI_SUCCESS) { 4440 cmn_err(CE_NOTE, "!%s: successful MSI-X setup", 4441 skdev->name); 4442 } else if (!skd_disable_msi && (itypes & DDI_INTR_TYPE_MSI) && 4443 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_MSI)) == DDI_SUCCESS) { 4444 cmn_err(CE_NOTE, "!%s: successful MSI setup", 4445 skdev->name); 4446 } else if ((itypes & DDI_INTR_TYPE_FIXED) && 4447 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_FIXED)) 4448 == DDI_SUCCESS) { 4449 cmn_err(CE_NOTE, "!%s: successful fixed intr setup", 4450 skdev->name); 4451 } else { 4452 cmn_err(CE_WARN, "!%s: no supported interrupt types", 4453 skdev->name); 4454 return (DDI_FAILURE); 4455 } 4456 4457 Dcmn_err(CE_CONT, "%s: setup interrupts done", skdev->name); 4458 4459 return (rval); 4460 } 4461 4462 /* 4463 * 4464 * Name: skd_get_properties, retrieves properties from skd.conf. 4465 * 4466 * Inputs: skdev - device state structure. 4467 * dip - dev_info data structure. 4468 * 4469 * Returns: Nothing. 4470 * 4471 */ 4472 /* ARGSUSED */ /* Upstream common source with other platforms. */ 4473 static void 4474 skd_get_properties(dev_info_t *dip, skd_device_t *skdev) 4475 { 4476 int prop_value; 4477 4478 skd_isr_type = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4479 "intr-type-cap", -1); 4480 4481 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4482 "max-scsi-reqs", -1); 4483 if (prop_value >= 1 && prop_value <= SKD_MAX_QUEUE_DEPTH) 4484 skd_max_queue_depth = prop_value; 4485 4486 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4487 "max-scsi-reqs-per-msg", -1); 4488 if (prop_value >= 1 && prop_value <= SKD_MAX_REQ_PER_MSG) 4489 skd_max_req_per_msg = prop_value; 4490 4491 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4492 "max-sgs-per-req", -1); 4493 if (prop_value >= 1 && prop_value <= SKD_MAX_N_SG_PER_REQ) 4494 skd_sgs_per_request = prop_value; 4495 4496 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4497 "dbg-level", -1); 4498 if (prop_value >= 1 && prop_value <= 2) 4499 skd_dbg_level = prop_value; 4500 } 4501 4502 /* 4503 * 4504 * Name: skd_wait_for_s1120, wait for device to finish 4505 * its initialization. 4506 * 4507 * Inputs: skdev - device state structure. 4508 * 4509 * Returns: DDI_SUCCESS or DDI_FAILURE. 4510 * 4511 */ 4512 static int 4513 skd_wait_for_s1120(skd_device_t *skdev) 4514 { 4515 clock_t cur_ticks, tmo; 4516 int loop_cntr = 0; 4517 int rc = DDI_FAILURE; 4518 4519 mutex_enter(&skdev->skd_internalio_mutex); 4520 4521 while (skdev->gendisk_on == 0) { 4522 cur_ticks = ddi_get_lbolt(); 4523 tmo = cur_ticks + drv_usectohz(MICROSEC); 4524 if (cv_timedwait(&skdev->cv_waitq, 4525 &skdev->skd_internalio_mutex, tmo) == -1) { 4526 /* Oops - timed out */ 4527 if (loop_cntr++ > 10) 4528 break; 4529 } 4530 } 4531 4532 mutex_exit(&skdev->skd_internalio_mutex); 4533 4534 if (skdev->gendisk_on == 1) 4535 rc = DDI_SUCCESS; 4536 4537 return (rc); 4538 } 4539 4540 /* 4541 * 4542 * Name: skd_update_props, updates certain device properties. 4543 * 4544 * Inputs: skdev - device state structure. 4545 * dip - dev info structure 4546 * 4547 * Returns: Nothing. 4548 * 4549 */ 4550 static void 4551 skd_update_props(skd_device_t *skdev, dev_info_t *dip) 4552 { 4553 int blksize = 512; 4554 4555 if ((ddi_prop_update_int64(DDI_DEV_T_NONE, dip, "device-nblocks", 4556 skdev->Nblocks) != DDI_SUCCESS) || 4557 (ddi_prop_update_int(DDI_DEV_T_NONE, dip, "device-blksize", 4558 blksize) != DDI_SUCCESS)) { 4559 cmn_err(CE_NOTE, "%s: FAILED to create driver properties", 4560 skdev->name); 4561 } 4562 } 4563 4564 /* 4565 * 4566 * Name: skd_setup_devid, sets up device ID info. 4567 * 4568 * Inputs: skdev - device state structure. 4569 * devid - Device ID for the DDI. 4570 * 4571 * Returns: DDI_SUCCESS or DDI_FAILURE. 4572 * 4573 */ 4574 static int 4575 skd_setup_devid(skd_device_t *skdev, ddi_devid_t *devid) 4576 { 4577 int rc, sz_model, sz_sn, sz; 4578 4579 sz_model = strlen(skdev->inq_product_id); 4580 sz_sn = strlen(skdev->inq_serial_num); 4581 sz = sz_model + sz_sn + 1; 4582 4583 (void) snprintf(skdev->devid_str, sizeof (skdev->devid_str), "%s=%s", 4584 skdev->inq_product_id, skdev->inq_serial_num); 4585 rc = ddi_devid_init(skdev->dip, DEVID_SCSI_SERIAL, sz, 4586 skdev->devid_str, devid); 4587 4588 if (rc != DDI_SUCCESS) 4589 cmn_err(CE_WARN, "!%s: devid_init FAILED", skdev->name); 4590 4591 return (rc); 4592 4593 } 4594 4595 /* 4596 * 4597 * Name: skd_bd_attach, attach to blkdev driver 4598 * 4599 * Inputs: skdev - device state structure. 4600 * dip - device info structure. 4601 * 4602 * Returns: DDI_SUCCESS or DDI_FAILURE. 4603 * 4604 */ 4605 static int 4606 skd_bd_attach(dev_info_t *dip, skd_device_t *skdev) 4607 { 4608 int rv; 4609 4610 skdev->s_bdh = bd_alloc_handle(skdev, &skd_bd_ops, 4611 &skd_64bit_io_dma_attr, KM_SLEEP); 4612 4613 if (skdev->s_bdh == NULL) { 4614 cmn_err(CE_WARN, "!skd_bd_attach: FAILED"); 4615 4616 return (DDI_FAILURE); 4617 } 4618 4619 rv = bd_attach_handle(dip, skdev->s_bdh); 4620 4621 if (rv != DDI_SUCCESS) { 4622 cmn_err(CE_WARN, "!bd_attach_handle FAILED\n"); 4623 } else { 4624 Dcmn_err(CE_NOTE, "bd_attach_handle OK\n"); 4625 skdev->bd_attached++; 4626 } 4627 4628 return (rv); 4629 } 4630 4631 /* 4632 * 4633 * Name: skd_bd_detach, detach from the blkdev driver. 4634 * 4635 * Inputs: skdev - device state structure. 4636 * 4637 * Returns: Nothing. 4638 * 4639 */ 4640 static void 4641 skd_bd_detach(skd_device_t *skdev) 4642 { 4643 if (skdev->bd_attached) 4644 (void) bd_detach_handle(skdev->s_bdh); 4645 4646 bd_free_handle(skdev->s_bdh); 4647 } 4648 4649 /* 4650 * 4651 * Name: skd_attach, attach sdk device driver 4652 * 4653 * Inputs: dip - device info structure. 4654 * cmd - DDI attach argument (ATTACH, RESUME, etc.) 4655 * 4656 * Returns: DDI_SUCCESS or DDI_FAILURE. 4657 * 4658 */ 4659 static int 4660 skd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 4661 { 4662 int instance; 4663 int nregs; 4664 skd_device_t *skdev = NULL; 4665 int inx; 4666 uint16_t cmd_reg; 4667 int progress = 0; 4668 char name[MAXPATHLEN]; 4669 off_t regsize; 4670 char pci_str[32]; 4671 char fw_version[8]; 4672 4673 instance = ddi_get_instance(dip); 4674 4675 (void) ddi_get_parent_data(dip); 4676 4677 switch (cmd) { 4678 case DDI_ATTACH: 4679 break; 4680 4681 case DDI_RESUME: 4682 /* Re-enable timer */ 4683 skd_start_timer(skdev); 4684 4685 return (DDI_SUCCESS); 4686 4687 default: 4688 return (DDI_FAILURE); 4689 } 4690 4691 Dcmn_err(CE_NOTE, "sTec S1120 Driver v%s Instance: %d", 4692 VERSIONSTR, instance); 4693 4694 /* 4695 * Check that hardware is installed in a DMA-capable slot 4696 */ 4697 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 4698 cmn_err(CE_WARN, "!%s%d: installed in a " 4699 "slot that isn't DMA-capable slot", DRV_NAME, instance); 4700 return (DDI_FAILURE); 4701 } 4702 4703 /* 4704 * No support for high-level interrupts 4705 */ 4706 if (ddi_intr_hilevel(dip, 0) != 0) { 4707 cmn_err(CE_WARN, "!%s%d: High level interrupt not supported", 4708 DRV_NAME, instance); 4709 return (DDI_FAILURE); 4710 } 4711 4712 /* 4713 * Allocate our per-device-instance structure 4714 */ 4715 if (ddi_soft_state_zalloc(skd_state, instance) != 4716 DDI_SUCCESS) { 4717 cmn_err(CE_WARN, "!%s%d: soft state zalloc failed ", 4718 DRV_NAME, instance); 4719 return (DDI_FAILURE); 4720 } 4721 4722 progress |= SKD_SOFT_STATE_ALLOCED; 4723 4724 skdev = ddi_get_soft_state(skd_state, instance); 4725 if (skdev == NULL) { 4726 cmn_err(CE_WARN, "!%s%d: Unable to get soft state structure", 4727 DRV_NAME, instance); 4728 goto skd_attach_failed; 4729 } 4730 4731 (void) snprintf(skdev->name, sizeof (skdev->name), 4732 DRV_NAME "%d", instance); 4733 4734 skdev->dip = dip; 4735 skdev->instance = instance; 4736 4737 ddi_set_driver_private(dip, skdev); 4738 4739 (void) ddi_pathname(dip, name); 4740 for (inx = strlen(name); inx; inx--) { 4741 if (name[inx] == ',') { 4742 name[inx] = '\0'; 4743 break; 4744 } 4745 if (name[inx] == '@') { 4746 break; 4747 } 4748 } 4749 4750 skdev->pathname = kmem_zalloc(strlen(name) + 1, KM_SLEEP); 4751 (void) strlcpy(skdev->pathname, name, strlen(name) + 1); 4752 4753 progress |= SKD_PATHNAME_ALLOCED; 4754 skdev->flags |= SKD_PATHNAME_ALLOCED; 4755 4756 if (pci_config_setup(dip, &skdev->pci_handle) != DDI_SUCCESS) { 4757 cmn_err(CE_WARN, "!%s%d: pci_config_setup FAILED", 4758 DRV_NAME, instance); 4759 goto skd_attach_failed; 4760 } 4761 4762 progress |= SKD_CONFIG_SPACE_SETUP; 4763 4764 /* Save adapter path. */ 4765 4766 (void) ddi_dev_nregs(dip, &nregs); 4767 4768 /* 4769 * 0x0 Configuration Space 4770 * 0x1 I/O Space 4771 * 0x2 s1120 register space 4772 */ 4773 if (ddi_dev_regsize(dip, 1, ®size) != DDI_SUCCESS || 4774 ddi_regs_map_setup(dip, 1, &skdev->iobase, 0, regsize, 4775 &dev_acc_attr, &skdev->iobase_handle) != DDI_SUCCESS) { 4776 cmn_err(CE_WARN, "!%s%d: regs_map_setup(mem) failed", 4777 DRV_NAME, instance); 4778 goto skd_attach_failed; 4779 } 4780 progress |= SKD_REGS_MAPPED; 4781 4782 skdev->iomap_iobase = skdev->iobase; 4783 skdev->iomap_handle = skdev->iobase_handle; 4784 4785 Dcmn_err(CE_NOTE, "%s: PCI iobase=%ph, iomap=%ph, regnum=%d, " 4786 "regsize=%ld", skdev->name, (void *)skdev->iobase, 4787 (void *)skdev->iomap_iobase, 1, regsize); 4788 4789 if (ddi_dev_regsize(dip, 2, ®size) != DDI_SUCCESS || 4790 ddi_regs_map_setup(dip, 2, &skdev->dev_iobase, 0, regsize, 4791 &dev_acc_attr, &skdev->dev_handle) != DDI_SUCCESS) { 4792 cmn_err(CE_WARN, "!%s%d: regs_map_setup(mem) failed", 4793 DRV_NAME, instance); 4794 4795 goto skd_attach_failed; 4796 } 4797 4798 skdev->dev_memsize = (int)regsize; 4799 4800 Dcmn_err(CE_NOTE, "%s: DEV iobase=%ph regsize=%d", 4801 skdev->name, (void *)skdev->dev_iobase, 4802 skdev->dev_memsize); 4803 4804 progress |= SKD_DEV_IOBASE_MAPPED; 4805 4806 cmd_reg = pci_config_get16(skdev->pci_handle, PCI_CONF_COMM); 4807 cmd_reg |= (PCI_COMM_ME | PCI_COMM_INTX_DISABLE); 4808 cmd_reg &= ~PCI_COMM_PARITY_DETECT; 4809 pci_config_put16(skdev->pci_handle, PCI_CONF_COMM, cmd_reg); 4810 4811 /* Get adapter PCI device information. */ 4812 skdev->vendor_id = pci_config_get16(skdev->pci_handle, PCI_CONF_VENID); 4813 skdev->device_id = pci_config_get16(skdev->pci_handle, PCI_CONF_DEVID); 4814 4815 Dcmn_err(CE_NOTE, "%s: %x-%x card detected", 4816 skdev->name, skdev->vendor_id, skdev->device_id); 4817 4818 skd_get_properties(dip, skdev); 4819 4820 (void) skd_init(skdev); 4821 4822 if (skd_construct(skdev, instance)) { 4823 cmn_err(CE_WARN, "!%s: construct FAILED", skdev->name); 4824 goto skd_attach_failed; 4825 } 4826 4827 progress |= SKD_PROBED; 4828 progress |= SKD_CONSTRUCTED; 4829 4830 SIMPLEQ_INIT(&skdev->waitqueue); 4831 4832 /* 4833 * Setup interrupt handler 4834 */ 4835 if (skd_setup_interrupts(skdev) != DDI_SUCCESS) { 4836 cmn_err(CE_WARN, "!%s: Unable to add interrupt", 4837 skdev->name); 4838 goto skd_attach_failed; 4839 } 4840 4841 progress |= SKD_INTR_ADDED; 4842 4843 ADAPTER_STATE_LOCK(skdev); 4844 skdev->flags |= SKD_ATTACHED; 4845 ADAPTER_STATE_UNLOCK(skdev); 4846 4847 skdev->d_blkshift = 9; 4848 progress |= SKD_ATTACHED; 4849 4850 4851 skd_start_device(skdev); 4852 4853 ADAPTER_STATE_LOCK(skdev); 4854 skdev->progress = progress; 4855 ADAPTER_STATE_UNLOCK(skdev); 4856 4857 /* 4858 * Give the board a chance to 4859 * complete its initialization. 4860 */ 4861 if (skdev->gendisk_on != 1) 4862 (void) skd_wait_for_s1120(skdev); 4863 4864 if (skdev->gendisk_on != 1) { 4865 cmn_err(CE_WARN, "!%s: s1120 failed to come ONLINE", 4866 skdev->name); 4867 goto skd_attach_failed; 4868 } 4869 4870 ddi_report_dev(dip); 4871 4872 skd_send_internal_skspcl(skdev, &skdev->internal_skspcl, INQUIRY); 4873 4874 skdev->disks_initialized++; 4875 4876 (void) strcpy(fw_version, "???"); 4877 (void) skd_pci_info(skdev, pci_str, sizeof (pci_str)); 4878 Dcmn_err(CE_NOTE, " sTec S1120 Driver(%s) version %s-b%s", 4879 DRV_NAME, DRV_VERSION, DRV_BUILD_ID); 4880 4881 Dcmn_err(CE_NOTE, " sTec S1120 %04x:%04x %s 64 bit", 4882 skdev->vendor_id, skdev->device_id, pci_str); 4883 4884 Dcmn_err(CE_NOTE, " sTec S1120 %s\n", skdev->pathname); 4885 4886 if (*skdev->inq_serial_num) 4887 Dcmn_err(CE_NOTE, " sTec S1120 serial#=%s", 4888 skdev->inq_serial_num); 4889 4890 if (*skdev->inq_product_id && 4891 *skdev->inq_product_rev) 4892 Dcmn_err(CE_NOTE, " sTec S1120 prod ID=%s prod rev=%s", 4893 skdev->inq_product_id, skdev->inq_product_rev); 4894 4895 Dcmn_err(CE_NOTE, "%s: intr-type-cap: %d", 4896 skdev->name, skdev->irq_type); 4897 Dcmn_err(CE_NOTE, "%s: max-scsi-reqs: %d", 4898 skdev->name, skd_max_queue_depth); 4899 Dcmn_err(CE_NOTE, "%s: max-sgs-per-req: %d", 4900 skdev->name, skd_sgs_per_request); 4901 Dcmn_err(CE_NOTE, "%s: max-scsi-req-per-msg: %d", 4902 skdev->name, skd_max_req_per_msg); 4903 4904 if (skd_bd_attach(dip, skdev) == DDI_FAILURE) 4905 goto skd_attach_failed; 4906 4907 skd_update_props(skdev, dip); 4908 4909 /* Enable timer */ 4910 skd_start_timer(skdev); 4911 4912 ADAPTER_STATE_LOCK(skdev); 4913 skdev->progress = progress; 4914 ADAPTER_STATE_UNLOCK(skdev); 4915 4916 skdev->attached = 1; 4917 return (DDI_SUCCESS); 4918 4919 skd_attach_failed: 4920 skd_dealloc_resources(dip, skdev, progress, instance); 4921 4922 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) { 4923 skd_destroy_mutex(skdev); 4924 } 4925 4926 ddi_soft_state_free(skd_state, instance); 4927 4928 cmn_err(CE_WARN, "!skd_attach FAILED: progress=%x", progress); 4929 return (DDI_FAILURE); 4930 } 4931 4932 /* 4933 * 4934 * Name: skd_halt 4935 * 4936 * Inputs: skdev - device state structure. 4937 * 4938 * Returns: Nothing. 4939 * 4940 */ 4941 static void 4942 skd_halt(skd_device_t *skdev) 4943 { 4944 Dcmn_err(CE_NOTE, "%s: halt/suspend ......", skdev->name); 4945 } 4946 4947 /* 4948 * 4949 * Name: skd_detach, detaches driver from the system. 4950 * 4951 * Inputs: dip - device info structure. 4952 * 4953 * Returns: DDI_SUCCESS on successful detach otherwise DDI_FAILURE. 4954 * 4955 */ 4956 static int 4957 skd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 4958 { 4959 skd_buf_private_t *pbuf; 4960 skd_device_t *skdev; 4961 int instance; 4962 timeout_id_t timer_id = NULL; 4963 int rv1 = DDI_SUCCESS; 4964 struct skd_special_context *skspcl; 4965 4966 instance = ddi_get_instance(dip); 4967 4968 skdev = ddi_get_soft_state(skd_state, instance); 4969 if (skdev == NULL) { 4970 cmn_err(CE_WARN, "!detach failed: NULL skd state"); 4971 4972 return (DDI_FAILURE); 4973 } 4974 4975 Dcmn_err(CE_CONT, "skd_detach(%d): entered", instance); 4976 4977 switch (cmd) { 4978 case DDI_DETACH: 4979 /* Test for packet cache inuse. */ 4980 ADAPTER_STATE_LOCK(skdev); 4981 4982 /* Stop command/event processing. */ 4983 skdev->flags |= (SKD_SUSPENDED | SKD_CMD_ABORT_TMO); 4984 4985 /* Disable driver timer if no adapters. */ 4986 if (skdev->skd_timer_timeout_id != 0) { 4987 timer_id = skdev->skd_timer_timeout_id; 4988 skdev->skd_timer_timeout_id = 0; 4989 } 4990 ADAPTER_STATE_UNLOCK(skdev); 4991 4992 if (timer_id != 0) { 4993 (void) untimeout(timer_id); 4994 } 4995 4996 #ifdef SKD_PM 4997 if (skdev->power_level != LOW_POWER_LEVEL) { 4998 skd_halt(skdev); 4999 skdev->power_level = LOW_POWER_LEVEL; 5000 } 5001 #endif 5002 skspcl = &skdev->internal_skspcl; 5003 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); 5004 5005 skd_stop_device(skdev); 5006 5007 /* 5008 * Clear request queue. 5009 */ 5010 while (!SIMPLEQ_EMPTY(&skdev->waitqueue)) { 5011 pbuf = skd_get_queued_pbuf(skdev); 5012 skd_end_request_abnormal(skdev, pbuf, ECANCELED, 5013 SKD_IODONE_WNIOC); 5014 Dcmn_err(CE_NOTE, 5015 "detach: cancelled pbuf %p %ld <%s> %lld\n", 5016 (void *)pbuf, pbuf->x_xfer->x_nblks, 5017 (pbuf->dir & B_READ) ? "Read" : "Write", 5018 pbuf->x_xfer->x_blkno); 5019 } 5020 5021 skd_bd_detach(skdev); 5022 5023 skd_dealloc_resources(dip, skdev, skdev->progress, instance); 5024 5025 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) { 5026 skd_destroy_mutex(skdev); 5027 } 5028 5029 ddi_soft_state_free(skd_state, instance); 5030 5031 skd_exit(); 5032 5033 break; 5034 5035 case DDI_SUSPEND: 5036 /* Block timer. */ 5037 5038 ADAPTER_STATE_LOCK(skdev); 5039 skdev->flags |= SKD_SUSPENDED; 5040 5041 /* Disable driver timer if last adapter. */ 5042 if (skdev->skd_timer_timeout_id != 0) { 5043 timer_id = skdev->skd_timer_timeout_id; 5044 skdev->skd_timer_timeout_id = 0; 5045 } 5046 ADAPTER_STATE_UNLOCK(skdev); 5047 5048 if (timer_id != 0) { 5049 (void) untimeout(timer_id); 5050 } 5051 5052 ddi_prop_remove_all(dip); 5053 5054 skd_halt(skdev); 5055 5056 break; 5057 default: 5058 rv1 = DDI_FAILURE; 5059 break; 5060 } 5061 5062 if (rv1 != DDI_SUCCESS) { 5063 cmn_err(CE_WARN, "!skd_detach, failed, rv1=%x", rv1); 5064 } else { 5065 Dcmn_err(CE_CONT, "skd_detach: exiting"); 5066 } 5067 5068 if (rv1 != DDI_SUCCESS) 5069 return (DDI_FAILURE); 5070 5071 return (rv1); 5072 } 5073 5074 /* 5075 * 5076 * Name: skd_devid_init, calls skd_setup_devid to setup 5077 * the device's devid structure. 5078 * 5079 * Inputs: arg - device state structure. 5080 * dip - dev_info structure. 5081 * devid - devid structure. 5082 * 5083 * Returns: Nothing. 5084 * 5085 */ 5086 /* ARGSUSED */ /* Upstream common source with other platforms. */ 5087 static int 5088 skd_devid_init(void *arg, dev_info_t *dip, ddi_devid_t *devid) 5089 { 5090 skd_device_t *skdev = arg; 5091 5092 (void) skd_setup_devid(skdev, devid); 5093 5094 return (0); 5095 } 5096 5097 /* 5098 * 5099 * Name: skd_bd_driveinfo, retrieves device's info. 5100 * 5101 * Inputs: drive - drive data structure. 5102 * arg - device state structure. 5103 * 5104 * Returns: Nothing. 5105 * 5106 */ 5107 static void 5108 skd_bd_driveinfo(void *arg, bd_drive_t *drive) 5109 { 5110 skd_device_t *skdev = arg; 5111 5112 drive->d_qsize = (skdev->queue_depth_limit * 4) / 5; 5113 drive->d_maxxfer = SKD_DMA_MAXXFER; 5114 drive->d_removable = B_FALSE; 5115 drive->d_hotpluggable = B_FALSE; 5116 drive->d_target = 0; 5117 drive->d_lun = 0; 5118 5119 if (skdev->inquiry_is_valid != 0) { 5120 drive->d_vendor = skdev->inq_vendor_id; 5121 drive->d_vendor_len = strlen(drive->d_vendor); 5122 5123 drive->d_product = skdev->inq_product_id; 5124 drive->d_product_len = strlen(drive->d_product); 5125 5126 drive->d_serial = skdev->inq_serial_num; 5127 drive->d_serial_len = strlen(drive->d_serial); 5128 5129 drive->d_revision = skdev->inq_product_rev; 5130 drive->d_revision_len = strlen(drive->d_revision); 5131 } 5132 } 5133 5134 /* 5135 * 5136 * Name: skd_bd_mediainfo, retrieves device media info. 5137 * 5138 * Inputs: arg - device state structure. 5139 * media - container for media info. 5140 * 5141 * Returns: Zero. 5142 * 5143 */ 5144 static int 5145 skd_bd_mediainfo(void *arg, bd_media_t *media) 5146 { 5147 skd_device_t *skdev = arg; 5148 5149 media->m_nblks = skdev->Nblocks; 5150 media->m_blksize = 512; 5151 media->m_pblksize = 4096; 5152 media->m_readonly = B_FALSE; 5153 media->m_solidstate = B_TRUE; 5154 5155 return (0); 5156 } 5157 5158 /* 5159 * 5160 * Name: skd_rw, performs R/W requests for blkdev driver. 5161 * 5162 * Inputs: skdev - device state structure. 5163 * xfer - tranfer structure. 5164 * dir - I/O direction. 5165 * 5166 * Returns: EAGAIN if device is not online. EIO if blkdev wants us to 5167 * be a dump device (for now). 5168 * Value returned by skd_start(). 5169 * 5170 */ 5171 static int 5172 skd_rw(skd_device_t *skdev, bd_xfer_t *xfer, int dir) 5173 { 5174 skd_buf_private_t *pbuf; 5175 5176 /* 5177 * The x_flags structure element is not defined in Oracle Solaris 5178 */ 5179 /* We'll need to fix this in order to support dump on this device. */ 5180 if (xfer->x_flags & BD_XFER_POLL) 5181 return (EIO); 5182 5183 if (skdev->state != SKD_DRVR_STATE_ONLINE) { 5184 Dcmn_err(CE_NOTE, "Device - not ONLINE"); 5185 5186 skd_request_fn_not_online(skdev); 5187 5188 return (EAGAIN); 5189 } 5190 5191 pbuf = kmem_zalloc(sizeof (skd_buf_private_t), KM_NOSLEEP); 5192 if (pbuf == NULL) 5193 return (ENOMEM); 5194 5195 WAITQ_LOCK(skdev); 5196 pbuf->dir = dir; 5197 pbuf->x_xfer = xfer; 5198 5199 skd_queue(skdev, pbuf); 5200 skdev->ios_queued++; 5201 WAITQ_UNLOCK(skdev); 5202 5203 skd_start(skdev); 5204 5205 return (0); 5206 } 5207 5208 /* 5209 * 5210 * Name: skd_bd_read, performs blkdev read requests. 5211 * 5212 * Inputs: arg - device state structure. 5213 * xfer - tranfer request structure. 5214 * 5215 * Returns: Value return by skd_rw(). 5216 * 5217 */ 5218 static int 5219 skd_bd_read(void *arg, bd_xfer_t *xfer) 5220 { 5221 return (skd_rw(arg, xfer, B_READ)); 5222 } 5223 5224 /* 5225 * 5226 * Name: skd_bd_write, performs blkdev write requests. 5227 * 5228 * Inputs: arg - device state structure. 5229 * xfer - tranfer request structure. 5230 * 5231 * Returns: Value return by skd_rw(). 5232 * 5233 */ 5234 static int 5235 skd_bd_write(void *arg, bd_xfer_t *xfer) 5236 { 5237 return (skd_rw(arg, xfer, B_WRITE)); 5238 } 5239