1 /* 2 * 3 * skd.c: Solaris 11/10 Driver for sTec, Inc. S112x PCIe SSD card 4 * 5 * Solaris driver is based on the Linux driver authored by: 6 * 7 * Authors/Alphabetical: Dragan Stancevic <dstancevic@stec-inc.com> 8 * Gordon Waidhofer <gwaidhofer@stec-inc.com> 9 * John Hamilton <jhamilton@stec-inc.com> 10 */ 11 12 /* 13 * This file and its contents are supplied under the terms of the 14 * Common Development and Distribution License ("CDDL"), version 1.0. 15 * You may only use this file in accordance with the terms of version 16 * 1.0 of the CDDL. 17 * 18 * A full copy of the text of the CDDL should have accompanied this 19 * source. A copy of the CDDL is also available via the Internet at 20 * http://www.illumos.org/license/CDDL. 21 */ 22 23 /* 24 * Copyright 2013 STEC, Inc. All rights reserved. 25 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2018, Joyent, Inc. 27 */ 28 29 #include <sys/types.h> 30 #include <sys/stream.h> 31 #include <sys/cmn_err.h> 32 #include <sys/kmem.h> 33 #include <sys/file.h> 34 #include <sys/buf.h> 35 #include <sys/uio.h> 36 #include <sys/cred.h> 37 #include <sys/modctl.h> 38 #include <sys/debug.h> 39 #include <sys/modctl.h> 40 #include <sys/list.h> 41 #include <sys/sysmacros.h> 42 #include <sys/errno.h> 43 #include <sys/pcie.h> 44 #include <sys/pci.h> 45 #include <sys/ddi.h> 46 #include <sys/dditypes.h> 47 #include <sys/sunddi.h> 48 #include <sys/atomic.h> 49 #include <sys/mutex.h> 50 #include <sys/param.h> 51 #include <sys/devops.h> 52 #include <sys/blkdev.h> 53 #include <sys/queue.h> 54 #include <sys/scsi/impl/inquiry.h> 55 56 #include "skd_s1120.h" 57 #include "skd.h" 58 59 int skd_dbg_level = 0; 60 61 void *skd_state = NULL; 62 int skd_disable_msi = 0; 63 int skd_disable_msix = 0; 64 65 /* Initialized in _init() and tunable, see _init(). */ 66 clock_t skd_timer_ticks; 67 68 /* I/O DMA attributes structures. */ 69 static ddi_dma_attr_t skd_64bit_io_dma_attr = { 70 DMA_ATTR_V0, /* dma_attr_version */ 71 SKD_DMA_LOW_ADDRESS, /* low DMA address range */ 72 SKD_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */ 73 SKD_DMA_XFER_COUNTER, /* DMA counter register */ 74 SKD_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */ 75 SKD_DMA_BURSTSIZES, /* DMA burstsizes */ 76 SKD_DMA_MIN_XFER_SIZE, /* min effective DMA size */ 77 SKD_DMA_MAX_XFER_SIZE, /* max DMA xfer size */ 78 SKD_DMA_SEGMENT_BOUNDARY, /* segment boundary */ 79 SKD_DMA_SG_LIST_LENGTH, /* s/g list length */ 80 SKD_DMA_GRANULARITY, /* granularity of device */ 81 SKD_DMA_XFER_FLAGS /* DMA transfer flags */ 82 }; 83 84 int skd_isr_type = -1; 85 86 #define SKD_MAX_QUEUE_DEPTH 255 87 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64 88 int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; 89 90 #define SKD_MAX_REQ_PER_MSG 14 91 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1 92 int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; 93 94 #define SKD_MAX_N_SG_PER_REQ 4096 95 int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; 96 97 static int skd_sys_quiesce_dev(dev_info_t *); 98 static int skd_quiesce_dev(skd_device_t *); 99 static int skd_list_skmsg(skd_device_t *, int); 100 static int skd_list_skreq(skd_device_t *, int); 101 static int skd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 102 static int skd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 103 static int skd_format_internal_skspcl(struct skd_device *skdev); 104 static void skd_start(skd_device_t *); 105 static void skd_destroy_mutex(skd_device_t *skdev); 106 static void skd_enable_interrupts(struct skd_device *); 107 static void skd_request_fn_not_online(skd_device_t *skdev); 108 static void skd_send_internal_skspcl(struct skd_device *, 109 struct skd_special_context *, uint8_t); 110 static void skd_queue(skd_device_t *, skd_buf_private_t *); 111 static void *skd_alloc_dma_mem(skd_device_t *, dma_mem_t *, uint8_t); 112 static void skd_release_intr(skd_device_t *skdev); 113 static void skd_isr_fwstate(struct skd_device *skdev); 114 static void skd_isr_msg_from_dev(struct skd_device *skdev); 115 static void skd_soft_reset(struct skd_device *skdev); 116 static void skd_refresh_device_data(struct skd_device *skdev); 117 static void skd_update_props(skd_device_t *, dev_info_t *); 118 static void skd_end_request_abnormal(struct skd_device *, skd_buf_private_t *, 119 int, int); 120 static char *skd_pci_info(struct skd_device *skdev, char *str, size_t len); 121 122 static skd_buf_private_t *skd_get_queued_pbuf(skd_device_t *); 123 124 static void skd_bd_driveinfo(void *arg, bd_drive_t *drive); 125 static int skd_bd_mediainfo(void *arg, bd_media_t *media); 126 static int skd_bd_read(void *arg, bd_xfer_t *xfer); 127 static int skd_bd_write(void *arg, bd_xfer_t *xfer); 128 static int skd_devid_init(void *arg, dev_info_t *, ddi_devid_t *); 129 130 131 static bd_ops_t skd_bd_ops = { 132 BD_OPS_VERSION_0, 133 skd_bd_driveinfo, 134 skd_bd_mediainfo, 135 skd_devid_init, 136 NULL, /* sync_cache */ 137 skd_bd_read, 138 skd_bd_write, 139 }; 140 141 static ddi_device_acc_attr_t dev_acc_attr = { 142 DDI_DEVICE_ATTR_V0, 143 DDI_STRUCTURE_LE_ACC, 144 DDI_STRICTORDER_ACC 145 }; 146 147 /* 148 * Solaris module loading/unloading structures 149 */ 150 struct dev_ops skd_dev_ops = { 151 DEVO_REV, /* devo_rev */ 152 0, /* refcnt */ 153 ddi_no_info, /* getinfo */ 154 nulldev, /* identify */ 155 nulldev, /* probe */ 156 skd_attach, /* attach */ 157 skd_detach, /* detach */ 158 nodev, /* reset */ 159 NULL, /* char/block ops */ 160 NULL, /* bus operations */ 161 NULL, /* power management */ 162 skd_sys_quiesce_dev /* quiesce */ 163 }; 164 165 static struct modldrv modldrv = { 166 &mod_driverops, /* type of module: driver */ 167 "sTec skd v" DRV_VER_COMPL, /* name of module */ 168 &skd_dev_ops /* driver dev_ops */ 169 }; 170 171 static struct modlinkage modlinkage = { 172 MODREV_1, 173 &modldrv, 174 NULL 175 }; 176 177 /* 178 * sTec-required wrapper for debug printing. 179 */ 180 /*PRINTFLIKE2*/ 181 static inline void 182 Dcmn_err(int lvl, const char *fmt, ...) 183 { 184 va_list ap; 185 186 if (skd_dbg_level == 0) 187 return; 188 189 va_start(ap, fmt); 190 vcmn_err(lvl, fmt, ap); 191 va_end(ap); 192 } 193 194 /* 195 * Solaris module loading/unloading routines 196 */ 197 198 /* 199 * 200 * Name: _init, performs initial installation 201 * 202 * Inputs: None. 203 * 204 * Returns: Returns the value returned by the ddi_softstate_init function 205 * on a failure to create the device state structure or the result 206 * of the module install routines. 207 * 208 */ 209 int 210 _init(void) 211 { 212 int rval = 0; 213 int tgts = 0; 214 215 tgts |= 0x02; 216 tgts |= 0x08; /* In #ifdef NEXENTA block from original sTec drop. */ 217 218 /* 219 * drv_usectohz() is a function, so can't initialize it at 220 * instantiation. 221 */ 222 skd_timer_ticks = drv_usectohz(1000000); 223 224 Dcmn_err(CE_NOTE, 225 "<# Installing skd Driver dbg-lvl=%d %s %x>", 226 skd_dbg_level, DRV_BUILD_ID, tgts); 227 228 rval = ddi_soft_state_init(&skd_state, sizeof (skd_device_t), 0); 229 if (rval != DDI_SUCCESS) 230 return (rval); 231 232 bd_mod_init(&skd_dev_ops); 233 234 rval = mod_install(&modlinkage); 235 if (rval != DDI_SUCCESS) { 236 ddi_soft_state_fini(&skd_state); 237 bd_mod_fini(&skd_dev_ops); 238 } 239 240 return (rval); 241 } 242 243 /* 244 * 245 * Name: _info, returns information about loadable module. 246 * 247 * Inputs: modinfo, pointer to module information structure. 248 * 249 * Returns: Value returned by mod_info(). 250 * 251 */ 252 int 253 _info(struct modinfo *modinfop) 254 { 255 return (mod_info(&modlinkage, modinfop)); 256 } 257 258 /* 259 * _fini Prepares a module for unloading. It is called when the system 260 * wants to unload a module. If the module determines that it can 261 * be unloaded, then _fini() returns the value returned by 262 * mod_remove(). Upon successful return from _fini() no other 263 * routine in the module will be called before _init() is called. 264 * 265 * Inputs: None. 266 * 267 * Returns: DDI_SUCCESS or DDI_FAILURE. 268 * 269 */ 270 int 271 _fini(void) 272 { 273 int rval; 274 275 rval = mod_remove(&modlinkage); 276 if (rval == DDI_SUCCESS) { 277 ddi_soft_state_fini(&skd_state); 278 bd_mod_fini(&skd_dev_ops); 279 } 280 281 return (rval); 282 } 283 284 /* 285 * Solaris Register read/write routines 286 */ 287 288 /* 289 * 290 * Name: skd_reg_write64, writes a 64-bit value to specified address 291 * 292 * Inputs: skdev - device state structure. 293 * val - 64-bit value to be written. 294 * offset - offset from PCI base address. 295 * 296 * Returns: Nothing. 297 * 298 */ 299 /* 300 * Local vars are to keep lint silent. Any compiler worth its weight will 301 * optimize it all right out... 302 */ 303 static inline void 304 skd_reg_write64(struct skd_device *skdev, uint64_t val, uint32_t offset) 305 { 306 uint64_t *addr; 307 308 ASSERT((offset & 0x7) == 0); 309 /* LINTED */ 310 addr = (uint64_t *)(skdev->dev_iobase + offset); 311 ddi_put64(skdev->dev_handle, addr, val); 312 } 313 314 /* 315 * 316 * Name: skd_reg_read32, reads a 32-bit value to specified address 317 * 318 * Inputs: skdev - device state structure. 319 * offset - offset from PCI base address. 320 * 321 * Returns: val, 32-bit value read from specified PCI address. 322 * 323 */ 324 static inline uint32_t 325 skd_reg_read32(struct skd_device *skdev, uint32_t offset) 326 { 327 uint32_t *addr; 328 329 ASSERT((offset & 0x3) == 0); 330 /* LINTED */ 331 addr = (uint32_t *)(skdev->dev_iobase + offset); 332 return (ddi_get32(skdev->dev_handle, addr)); 333 } 334 335 /* 336 * 337 * Name: skd_reg_write32, writes a 32-bit value to specified address 338 * 339 * Inputs: skdev - device state structure. 340 * val - value to be written. 341 * offset - offset from PCI base address. 342 * 343 * Returns: Nothing. 344 * 345 */ 346 static inline void 347 skd_reg_write32(struct skd_device *skdev, uint32_t val, uint32_t offset) 348 { 349 uint32_t *addr; 350 351 ASSERT((offset & 0x3) == 0); 352 /* LINTED */ 353 addr = (uint32_t *)(skdev->dev_iobase + offset); 354 ddi_put32(skdev->dev_handle, addr, val); 355 } 356 357 358 /* 359 * Solaris skd routines 360 */ 361 362 /* 363 * 364 * Name: skd_name, generates the name of the driver. 365 * 366 * Inputs: skdev - device state structure 367 * 368 * Returns: char pointer to generated driver name. 369 * 370 */ 371 static const char * 372 skd_name(struct skd_device *skdev) 373 { 374 (void) snprintf(skdev->id_str, sizeof (skdev->id_str), "%s:", DRV_NAME); 375 376 return (skdev->id_str); 377 } 378 379 /* 380 * 381 * Name: skd_pci_find_capability, searches the PCI capability 382 * list for the specified capability. 383 * 384 * Inputs: skdev - device state structure. 385 * cap - capability sought. 386 * 387 * Returns: Returns position where capability was found. 388 * If not found, returns zero. 389 * 390 */ 391 static int 392 skd_pci_find_capability(struct skd_device *skdev, int cap) 393 { 394 uint16_t status; 395 uint8_t pos, id, hdr; 396 int ttl = 48; 397 398 status = pci_config_get16(skdev->pci_handle, PCI_CONF_STAT); 399 400 if (!(status & PCI_STAT_CAP)) 401 return (0); 402 403 hdr = pci_config_get8(skdev->pci_handle, PCI_CONF_HEADER); 404 405 if ((hdr & PCI_HEADER_TYPE_M) != 0) 406 return (0); 407 408 pos = pci_config_get8(skdev->pci_handle, PCI_CONF_CAP_PTR); 409 410 while (ttl-- && pos >= 0x40) { 411 pos &= ~3; 412 id = pci_config_get8(skdev->pci_handle, pos+PCI_CAP_ID); 413 if (id == 0xff) 414 break; 415 if (id == cap) 416 return (pos); 417 pos = pci_config_get8(skdev->pci_handle, pos+PCI_CAP_NEXT_PTR); 418 } 419 420 return (0); 421 } 422 423 /* 424 * 425 * Name: skd_io_done, called to conclude an I/O operation. 426 * 427 * Inputs: skdev - device state structure. 428 * pbuf - I/O request 429 * error - contain error value. 430 * mode - debug only. 431 * 432 * Returns: Nothing. 433 * 434 */ 435 static void 436 skd_io_done(skd_device_t *skdev, skd_buf_private_t *pbuf, 437 int error, int mode) 438 { 439 bd_xfer_t *xfer; 440 441 ASSERT(pbuf != NULL); 442 443 xfer = pbuf->x_xfer; 444 445 switch (mode) { 446 case SKD_IODONE_WIOC: 447 skdev->iodone_wioc++; 448 break; 449 case SKD_IODONE_WNIOC: 450 skdev->iodone_wnioc++; 451 break; 452 case SKD_IODONE_WDEBUG: 453 skdev->iodone_wdebug++; 454 break; 455 default: 456 skdev->iodone_unknown++; 457 } 458 459 if (error) { 460 skdev->ios_errors++; 461 cmn_err(CE_WARN, 462 "!%s:skd_io_done:ERR=%d %lld-%ld %s", skdev->name, 463 error, xfer->x_blkno, xfer->x_nblks, 464 (pbuf->dir & B_READ) ? "Read" : "Write"); 465 } 466 467 kmem_free(pbuf, sizeof (skd_buf_private_t)); 468 469 bd_xfer_done(xfer, error); 470 } 471 472 /* 473 * QUIESCE DEVICE 474 */ 475 476 /* 477 * 478 * Name: skd_sys_quiesce_dev, quiets the device 479 * 480 * Inputs: dip - dev info strucuture 481 * 482 * Returns: Zero. 483 * 484 */ 485 static int 486 skd_sys_quiesce_dev(dev_info_t *dip) 487 { 488 skd_device_t *skdev; 489 490 skdev = ddi_get_soft_state(skd_state, ddi_get_instance(dip)); 491 492 /* make sure Dcmn_err() doesn't actually print anything */ 493 skd_dbg_level = 0; 494 495 skd_disable_interrupts(skdev); 496 skd_soft_reset(skdev); 497 498 return (0); 499 } 500 501 /* 502 * 503 * Name: skd_quiesce_dev, quiets the device, but doesn't really do much. 504 * 505 * Inputs: skdev - Device state. 506 * 507 * Returns: -EINVAL if device is not in proper state otherwise 508 * returns zero. 509 * 510 */ 511 static int 512 skd_quiesce_dev(skd_device_t *skdev) 513 { 514 int rc = 0; 515 516 if (skd_dbg_level) 517 Dcmn_err(CE_NOTE, "skd_quiece_dev:"); 518 519 switch (skdev->state) { 520 case SKD_DRVR_STATE_BUSY: 521 case SKD_DRVR_STATE_BUSY_IMMINENT: 522 Dcmn_err(CE_NOTE, "%s: stopping queue", skdev->name); 523 break; 524 case SKD_DRVR_STATE_ONLINE: 525 case SKD_DRVR_STATE_STOPPING: 526 case SKD_DRVR_STATE_SYNCING: 527 case SKD_DRVR_STATE_PAUSING: 528 case SKD_DRVR_STATE_PAUSED: 529 case SKD_DRVR_STATE_STARTING: 530 case SKD_DRVR_STATE_RESTARTING: 531 case SKD_DRVR_STATE_RESUMING: 532 default: 533 rc = -EINVAL; 534 cmn_err(CE_NOTE, "state [%d] not implemented", skdev->state); 535 } 536 537 return (rc); 538 } 539 540 /* 541 * UNQUIESCE DEVICE: 542 * Note: Assumes lock is held to protect device state. 543 */ 544 /* 545 * 546 * Name: skd_unquiesce_dev, awkens the device 547 * 548 * Inputs: skdev - Device state. 549 * 550 * Returns: -EINVAL if device is not in proper state otherwise 551 * returns zero. 552 * 553 */ 554 static int 555 skd_unquiesce_dev(struct skd_device *skdev) 556 { 557 Dcmn_err(CE_NOTE, "skd_unquiece_dev:"); 558 559 skd_log_skdev(skdev, "unquiesce"); 560 if (skdev->state == SKD_DRVR_STATE_ONLINE) { 561 Dcmn_err(CE_NOTE, "**** device already ONLINE"); 562 563 return (0); 564 } 565 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) { 566 /* 567 * If there has been an state change to other than 568 * ONLINE, we will rely on controller state change 569 * to come back online and restart the queue. 570 * The BUSY state means that driver is ready to 571 * continue normal processing but waiting for controller 572 * to become available. 573 */ 574 skdev->state = SKD_DRVR_STATE_BUSY; 575 Dcmn_err(CE_NOTE, "drive BUSY state\n"); 576 577 return (0); 578 } 579 /* 580 * Drive just come online, driver is either in startup, 581 * paused performing a task, or bust waiting for hardware. 582 */ 583 switch (skdev->state) { 584 case SKD_DRVR_STATE_PAUSED: 585 case SKD_DRVR_STATE_BUSY: 586 case SKD_DRVR_STATE_BUSY_IMMINENT: 587 case SKD_DRVR_STATE_BUSY_ERASE: 588 case SKD_DRVR_STATE_STARTING: 589 case SKD_DRVR_STATE_RESTARTING: 590 case SKD_DRVR_STATE_FAULT: 591 case SKD_DRVR_STATE_IDLE: 592 case SKD_DRVR_STATE_LOAD: 593 skdev->state = SKD_DRVR_STATE_ONLINE; 594 Dcmn_err(CE_NOTE, "%s: sTec s1120 ONLINE", skdev->name); 595 Dcmn_err(CE_NOTE, "%s: Starting request queue", skdev->name); 596 Dcmn_err(CE_NOTE, 597 "%s: queue depth limit=%d hard=%d soft=%d lowat=%d", 598 skdev->name, 599 skdev->queue_depth_limit, 600 skdev->hard_queue_depth_limit, 601 skdev->soft_queue_depth_limit, 602 skdev->queue_depth_lowat); 603 604 skdev->gendisk_on = 1; 605 cv_signal(&skdev->cv_waitq); 606 break; 607 case SKD_DRVR_STATE_DISAPPEARED: 608 default: 609 cmn_err(CE_NOTE, "**** driver state %d, not implemented \n", 610 skdev->state); 611 return (-EBUSY); 612 } 613 614 return (0); 615 } 616 617 /* 618 * READ/WRITE REQUESTS 619 */ 620 621 /* 622 * 623 * Name: skd_blkdev_preop_sg_list, builds the S/G list from info 624 * passed in by the blkdev driver. 625 * 626 * Inputs: skdev - device state structure. 627 * skreq - request structure. 628 * sg_byte_count - data transfer byte count. 629 * 630 * Returns: Nothing. 631 * 632 */ 633 /*ARGSUSED*/ 634 static void 635 skd_blkdev_preop_sg_list(struct skd_device *skdev, 636 struct skd_request_context *skreq, uint32_t *sg_byte_count) 637 { 638 bd_xfer_t *xfer; 639 skd_buf_private_t *pbuf; 640 int i, bcount = 0; 641 uint_t n_sg; 642 643 *sg_byte_count = 0; 644 645 ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD || 646 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); 647 648 pbuf = skreq->pbuf; 649 ASSERT(pbuf != NULL); 650 651 xfer = pbuf->x_xfer; 652 n_sg = xfer->x_ndmac; 653 654 ASSERT(n_sg <= skdev->sgs_per_request); 655 656 skreq->n_sg = n_sg; 657 658 skreq->io_dma_handle = xfer->x_dmah; 659 660 skreq->total_sg_bcount = 0; 661 662 for (i = 0; i < n_sg; i++) { 663 ddi_dma_cookie_t *cookiep = &xfer->x_dmac; 664 struct fit_sg_descriptor *sgd; 665 uint32_t cnt = (uint32_t)cookiep->dmac_size; 666 667 bcount += cnt; 668 669 sgd = &skreq->sksg_list[i]; 670 sgd->control = FIT_SGD_CONTROL_NOT_LAST; 671 sgd->byte_count = cnt; 672 sgd->host_side_addr = cookiep->dmac_laddress; 673 sgd->dev_side_addr = 0; /* not used */ 674 *sg_byte_count += cnt; 675 676 skreq->total_sg_bcount += cnt; 677 678 if ((i + 1) != n_sg) 679 ddi_dma_nextcookie(skreq->io_dma_handle, &xfer->x_dmac); 680 } 681 682 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; 683 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; 684 685 (void) ddi_dma_sync(skreq->sksg_dma_address.dma_handle, 0, 0, 686 DDI_DMA_SYNC_FORDEV); 687 } 688 689 /* 690 * 691 * Name: skd_blkdev_postop_sg_list, deallocates DMA 692 * 693 * Inputs: skdev - device state structure. 694 * skreq - skreq data structure. 695 * 696 * Returns: Nothing. 697 * 698 */ 699 /* ARGSUSED */ /* Upstream common source with other platforms. */ 700 static void 701 skd_blkdev_postop_sg_list(struct skd_device *skdev, 702 struct skd_request_context *skreq) 703 { 704 /* 705 * restore the next ptr for next IO request so we 706 * don't have to set it every time. 707 */ 708 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = 709 skreq->sksg_dma_address.cookies->dmac_laddress + 710 ((skreq->n_sg) * sizeof (struct fit_sg_descriptor)); 711 } 712 713 /* 714 * 715 * Name: skd_start, initiates an I/O. 716 * 717 * Inputs: skdev - device state structure. 718 * 719 * Returns: EAGAIN if devicfe is not ONLINE. 720 * On error, if the caller is the blkdev driver, return 721 * the error value. Otherwise, return zero. 722 * 723 */ 724 /* Upstream common source with other platforms. */ 725 static void 726 skd_start(skd_device_t *skdev) 727 { 728 struct skd_fitmsg_context *skmsg = NULL; 729 struct fit_msg_hdr *fmh = NULL; 730 struct skd_request_context *skreq = NULL; 731 struct waitqueue *waitq = &skdev->waitqueue; 732 struct skd_scsi_request *scsi_req; 733 skd_buf_private_t *pbuf = NULL; 734 int bcount; 735 736 uint32_t lba; 737 uint32_t count; 738 uint32_t timo_slot; 739 void *cmd_ptr; 740 uint32_t sg_byte_count = 0; 741 742 /* 743 * Stop conditions: 744 * - There are no more native requests 745 * - There are already the maximum number of requests is progress 746 * - There are no more skd_request_context entries 747 * - There are no more FIT msg buffers 748 */ 749 for (;;) { 750 /* Are too many requests already in progress? */ 751 if (skdev->queue_depth_busy >= skdev->queue_depth_limit) { 752 Dcmn_err(CE_NOTE, "qdepth %d, limit %d\n", 753 skdev->queue_depth_busy, 754 skdev->queue_depth_limit); 755 break; 756 } 757 758 WAITQ_LOCK(skdev); 759 if (SIMPLEQ_EMPTY(waitq)) { 760 WAITQ_UNLOCK(skdev); 761 break; 762 } 763 764 /* Is a skd_request_context available? */ 765 skreq = skdev->skreq_free_list; 766 if (skreq == NULL) { 767 WAITQ_UNLOCK(skdev); 768 break; 769 } 770 771 ASSERT(skreq->state == SKD_REQ_STATE_IDLE); 772 ASSERT((skreq->id & SKD_ID_INCR) == 0); 773 774 skdev->skreq_free_list = skreq->next; 775 776 skreq->state = SKD_REQ_STATE_BUSY; 777 skreq->id += SKD_ID_INCR; 778 779 /* Start a new FIT msg if there is none in progress. */ 780 if (skmsg == NULL) { 781 /* Are there any FIT msg buffers available? */ 782 skmsg = skdev->skmsg_free_list; 783 if (skmsg == NULL) { 784 WAITQ_UNLOCK(skdev); 785 break; 786 } 787 788 ASSERT(skmsg->state == SKD_MSG_STATE_IDLE); 789 ASSERT((skmsg->id & SKD_ID_INCR) == 0); 790 791 skdev->skmsg_free_list = skmsg->next; 792 793 skmsg->state = SKD_MSG_STATE_BUSY; 794 skmsg->id += SKD_ID_INCR; 795 796 /* Initialize the FIT msg header */ 797 fmh = (struct fit_msg_hdr *)skmsg->msg_buf64; 798 bzero(fmh, sizeof (*fmh)); /* Too expensive */ 799 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 800 skmsg->length = sizeof (struct fit_msg_hdr); 801 } 802 803 /* 804 * At this point we are committed to either start or reject 805 * the native request. Note that a FIT msg may have just been 806 * started but contains no SoFIT requests yet. 807 * Now - dequeue pbuf. 808 */ 809 pbuf = skd_get_queued_pbuf(skdev); 810 WAITQ_UNLOCK(skdev); 811 812 skreq->pbuf = pbuf; 813 lba = pbuf->x_xfer->x_blkno; 814 count = pbuf->x_xfer->x_nblks; 815 skreq->did_complete = 0; 816 817 skreq->fitmsg_id = skmsg->id; 818 819 Dcmn_err(CE_NOTE, 820 "pbuf=%p lba=%u(0x%x) count=%u(0x%x) dir=%x\n", 821 (void *)pbuf, lba, lba, count, count, pbuf->dir); 822 823 /* 824 * Transcode the request. 825 */ 826 cmd_ptr = &skmsg->msg_buf[skmsg->length]; 827 bzero(cmd_ptr, 32); /* This is too expensive */ 828 829 scsi_req = cmd_ptr; 830 scsi_req->hdr.tag = skreq->id; 831 scsi_req->hdr.sg_list_dma_address = 832 cpu_to_be64(skreq->sksg_dma_address.cookies->dmac_laddress); 833 scsi_req->cdb[1] = 0; 834 scsi_req->cdb[2] = (lba & 0xff000000) >> 24; 835 scsi_req->cdb[3] = (lba & 0xff0000) >> 16; 836 scsi_req->cdb[4] = (lba & 0xff00) >> 8; 837 scsi_req->cdb[5] = (lba & 0xff); 838 scsi_req->cdb[6] = 0; 839 scsi_req->cdb[7] = (count & 0xff00) >> 8; 840 scsi_req->cdb[8] = count & 0xff; 841 scsi_req->cdb[9] = 0; 842 843 if (pbuf->dir & B_READ) { 844 scsi_req->cdb[0] = 0x28; 845 skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST; 846 } else { 847 scsi_req->cdb[0] = 0x2a; 848 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD; 849 } 850 851 skd_blkdev_preop_sg_list(skdev, skreq, &sg_byte_count); 852 853 scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(sg_byte_count); 854 855 bcount = (sg_byte_count + 511) / 512; 856 scsi_req->cdb[7] = (bcount & 0xff00) >> 8; 857 scsi_req->cdb[8] = bcount & 0xff; 858 859 Dcmn_err(CE_NOTE, 860 "skd_start: pbuf=%p skreq->id=%x opc=%x ====>>>>>", 861 (void *)pbuf, skreq->id, *scsi_req->cdb); 862 863 skmsg->length += sizeof (struct skd_scsi_request); 864 fmh->num_protocol_cmds_coalesced++; 865 866 /* 867 * Update the active request counts. 868 * Capture the timeout timestamp. 869 */ 870 skreq->timeout_stamp = skdev->timeout_stamp; 871 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 872 873 atomic_inc_32(&skdev->timeout_slot[timo_slot]); 874 atomic_inc_32(&skdev->queue_depth_busy); 875 876 Dcmn_err(CE_NOTE, "req=0x%x busy=%d timo_slot=%d", 877 skreq->id, skdev->queue_depth_busy, timo_slot); 878 /* 879 * If the FIT msg buffer is full send it. 880 */ 881 if (skmsg->length >= SKD_N_FITMSG_BYTES || 882 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { 883 884 atomic_inc_64(&skdev->active_cmds); 885 pbuf->skreq = skreq; 886 887 skdev->fitmsg_sent1++; 888 skd_send_fitmsg(skdev, skmsg); 889 890 skmsg = NULL; 891 fmh = NULL; 892 } 893 } 894 895 /* 896 * Is a FIT msg in progress? If it is empty put the buffer back 897 * on the free list. If it is non-empty send what we got. 898 * This minimizes latency when there are fewer requests than 899 * what fits in a FIT msg. 900 */ 901 if (skmsg != NULL) { 902 ASSERT(skmsg->length > sizeof (struct fit_msg_hdr)); 903 Dcmn_err(CE_NOTE, "sending msg=%p, len %d", 904 (void *)skmsg, skmsg->length); 905 906 skdev->active_cmds++; 907 908 skdev->fitmsg_sent2++; 909 skd_send_fitmsg(skdev, skmsg); 910 } 911 } 912 913 /* 914 * 915 * Name: skd_end_request 916 * 917 * Inputs: skdev - device state structure. 918 * skreq - request structure. 919 * error - I/O error value. 920 * 921 * Returns: Nothing. 922 * 923 */ 924 static void 925 skd_end_request(struct skd_device *skdev, 926 struct skd_request_context *skreq, int error) 927 { 928 skdev->ios_completed++; 929 skd_io_done(skdev, skreq->pbuf, error, SKD_IODONE_WIOC); 930 skreq->pbuf = NULL; 931 skreq->did_complete = 1; 932 } 933 934 /* 935 * 936 * Name: skd_end_request_abnormal 937 * 938 * Inputs: skdev - device state structure. 939 * pbuf - I/O request. 940 * error - I/O error value. 941 * mode - debug 942 * 943 * Returns: Nothing. 944 * 945 */ 946 static void 947 skd_end_request_abnormal(skd_device_t *skdev, skd_buf_private_t *pbuf, 948 int error, int mode) 949 { 950 skd_io_done(skdev, pbuf, error, mode); 951 } 952 953 /* 954 * 955 * Name: skd_request_fn_not_online, handles the condition 956 * of the device not being online. 957 * 958 * Inputs: skdev - device state structure. 959 * 960 * Returns: nothing (void). 961 * 962 */ 963 static void 964 skd_request_fn_not_online(skd_device_t *skdev) 965 { 966 int error; 967 skd_buf_private_t *pbuf; 968 969 ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); 970 971 skd_log_skdev(skdev, "req_not_online"); 972 973 switch (skdev->state) { 974 case SKD_DRVR_STATE_PAUSING: 975 case SKD_DRVR_STATE_PAUSED: 976 case SKD_DRVR_STATE_STARTING: 977 case SKD_DRVR_STATE_RESTARTING: 978 case SKD_DRVR_STATE_WAIT_BOOT: 979 /* 980 * In case of starting, we haven't started the queue, 981 * so we can't get here... but requests are 982 * possibly hanging out waiting for us because we 983 * reported the dev/skd/0 already. They'll wait 984 * forever if connect doesn't complete. 985 * What to do??? delay dev/skd/0 ?? 986 */ 987 case SKD_DRVR_STATE_BUSY: 988 case SKD_DRVR_STATE_BUSY_IMMINENT: 989 case SKD_DRVR_STATE_BUSY_ERASE: 990 case SKD_DRVR_STATE_DRAINING_TIMEOUT: 991 return; 992 993 case SKD_DRVR_STATE_BUSY_SANITIZE: 994 case SKD_DRVR_STATE_STOPPING: 995 case SKD_DRVR_STATE_SYNCING: 996 case SKD_DRVR_STATE_FAULT: 997 case SKD_DRVR_STATE_DISAPPEARED: 998 default: 999 error = -EIO; 1000 break; 1001 } 1002 1003 /* 1004 * If we get here, terminate all pending block requeusts 1005 * with EIO and any scsi pass thru with appropriate sense 1006 */ 1007 ASSERT(WAITQ_LOCK_HELD(skdev)); 1008 if (SIMPLEQ_EMPTY(&skdev->waitqueue)) 1009 return; 1010 1011 while ((pbuf = skd_get_queued_pbuf(skdev))) 1012 skd_end_request_abnormal(skdev, pbuf, error, SKD_IODONE_WNIOC); 1013 1014 cv_signal(&skdev->cv_waitq); 1015 } 1016 1017 /* 1018 * TIMER 1019 */ 1020 1021 static void skd_timer_tick_not_online(struct skd_device *skdev); 1022 1023 /* 1024 * 1025 * Name: skd_timer_tick, monitors requests for timeouts. 1026 * 1027 * Inputs: skdev - device state structure. 1028 * 1029 * Returns: Nothing. 1030 * 1031 */ 1032 static void 1033 skd_timer_tick(skd_device_t *skdev) 1034 { 1035 uint32_t timo_slot; 1036 1037 skdev->timer_active = 1; 1038 1039 if (skdev->state != SKD_DRVR_STATE_ONLINE) { 1040 skd_timer_tick_not_online(skdev); 1041 goto timer_func_out; 1042 } 1043 1044 skdev->timeout_stamp++; 1045 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 1046 1047 /* 1048 * All requests that happened during the previous use of 1049 * this slot should be done by now. The previous use was 1050 * over 7 seconds ago. 1051 */ 1052 if (skdev->timeout_slot[timo_slot] == 0) { 1053 goto timer_func_out; 1054 } 1055 1056 /* Something is overdue */ 1057 Dcmn_err(CE_NOTE, "found %d timeouts, draining busy=%d", 1058 skdev->timeout_slot[timo_slot], 1059 skdev->queue_depth_busy); 1060 skdev->timer_countdown = SKD_TIMER_SECONDS(3); 1061 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT; 1062 skdev->timo_slot = timo_slot; 1063 1064 timer_func_out: 1065 skdev->timer_active = 0; 1066 } 1067 1068 /* 1069 * 1070 * Name: skd_timer_tick_not_online, handles various device 1071 * state transitions. 1072 * 1073 * Inputs: skdev - device state structure. 1074 * 1075 * Returns: Nothing. 1076 * 1077 */ 1078 static void 1079 skd_timer_tick_not_online(struct skd_device *skdev) 1080 { 1081 Dcmn_err(CE_NOTE, "skd_skd_timer_tick_not_online: state=%d tmo=%d", 1082 skdev->state, skdev->timer_countdown); 1083 1084 ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); 1085 1086 switch (skdev->state) { 1087 case SKD_DRVR_STATE_IDLE: 1088 case SKD_DRVR_STATE_LOAD: 1089 break; 1090 case SKD_DRVR_STATE_BUSY_SANITIZE: 1091 cmn_err(CE_WARN, "!drive busy sanitize[%x], driver[%x]\n", 1092 skdev->drive_state, skdev->state); 1093 break; 1094 1095 case SKD_DRVR_STATE_BUSY: 1096 case SKD_DRVR_STATE_BUSY_IMMINENT: 1097 case SKD_DRVR_STATE_BUSY_ERASE: 1098 Dcmn_err(CE_NOTE, "busy[%x], countdown=%d\n", 1099 skdev->state, skdev->timer_countdown); 1100 if (skdev->timer_countdown > 0) { 1101 skdev->timer_countdown--; 1102 return; 1103 } 1104 cmn_err(CE_WARN, "!busy[%x], timedout=%d, restarting device.", 1105 skdev->state, skdev->timer_countdown); 1106 skd_restart_device(skdev); 1107 break; 1108 1109 case SKD_DRVR_STATE_WAIT_BOOT: 1110 case SKD_DRVR_STATE_STARTING: 1111 if (skdev->timer_countdown > 0) { 1112 skdev->timer_countdown--; 1113 return; 1114 } 1115 /* 1116 * For now, we fault the drive. Could attempt resets to 1117 * revcover at some point. 1118 */ 1119 skdev->state = SKD_DRVR_STATE_FAULT; 1120 1121 cmn_err(CE_WARN, "!(%s): DriveFault Connect Timeout (%x)", 1122 skd_name(skdev), skdev->drive_state); 1123 1124 /* start the queue so we can respond with error to requests */ 1125 skd_start(skdev); 1126 1127 /* wakeup anyone waiting for startup complete */ 1128 skdev->gendisk_on = -1; 1129 1130 cv_signal(&skdev->cv_waitq); 1131 break; 1132 1133 1134 case SKD_DRVR_STATE_PAUSING: 1135 case SKD_DRVR_STATE_PAUSED: 1136 break; 1137 1138 case SKD_DRVR_STATE_DRAINING_TIMEOUT: 1139 cmn_err(CE_WARN, 1140 "!%s: draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n", 1141 skdev->name, 1142 skdev->timo_slot, 1143 skdev->timer_countdown, 1144 skdev->queue_depth_busy, 1145 skdev->timeout_slot[skdev->timo_slot]); 1146 /* if the slot has cleared we can let the I/O continue */ 1147 if (skdev->timeout_slot[skdev->timo_slot] == 0) { 1148 Dcmn_err(CE_NOTE, "Slot drained, starting queue."); 1149 skdev->state = SKD_DRVR_STATE_ONLINE; 1150 skd_start(skdev); 1151 return; 1152 } 1153 if (skdev->timer_countdown > 0) { 1154 skdev->timer_countdown--; 1155 return; 1156 } 1157 skd_restart_device(skdev); 1158 break; 1159 1160 case SKD_DRVR_STATE_RESTARTING: 1161 if (skdev->timer_countdown > 0) { 1162 skdev->timer_countdown--; 1163 1164 return; 1165 } 1166 /* 1167 * For now, we fault the drive. Could attempt resets to 1168 * revcover at some point. 1169 */ 1170 skdev->state = SKD_DRVR_STATE_FAULT; 1171 cmn_err(CE_WARN, "!(%s): DriveFault Reconnect Timeout (%x)\n", 1172 skd_name(skdev), skdev->drive_state); 1173 1174 /* 1175 * Recovering does two things: 1176 * 1. completes IO with error 1177 * 2. reclaims dma resources 1178 * When is it safe to recover requests? 1179 * - if the drive state is faulted 1180 * - if the state is still soft reset after out timeout 1181 * - if the drive registers are dead (state = FF) 1182 */ 1183 1184 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) || 1185 (skdev->drive_state == FIT_SR_DRIVE_FAULT) || 1186 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) { 1187 /* 1188 * It never came out of soft reset. Try to 1189 * recover the requests and then let them 1190 * fail. This is to mitigate hung processes. 1191 * 1192 * Acquire the interrupt lock since these lists are 1193 * manipulated by interrupt handlers. 1194 */ 1195 ASSERT(!WAITQ_LOCK_HELD(skdev)); 1196 INTR_LOCK(skdev); 1197 skd_recover_requests(skdev); 1198 INTR_UNLOCK(skdev); 1199 } 1200 /* start the queue so we can respond with error to requests */ 1201 skd_start(skdev); 1202 /* wakeup anyone waiting for startup complete */ 1203 skdev->gendisk_on = -1; 1204 cv_signal(&skdev->cv_waitq); 1205 break; 1206 1207 case SKD_DRVR_STATE_RESUMING: 1208 case SKD_DRVR_STATE_STOPPING: 1209 case SKD_DRVR_STATE_SYNCING: 1210 case SKD_DRVR_STATE_FAULT: 1211 case SKD_DRVR_STATE_DISAPPEARED: 1212 default: 1213 break; 1214 } 1215 } 1216 1217 /* 1218 * 1219 * Name: skd_timer, kicks off the timer processing. 1220 * 1221 * Inputs: skdev - device state structure. 1222 * 1223 * Returns: Nothing. 1224 * 1225 */ 1226 static void 1227 skd_timer(void *arg) 1228 { 1229 skd_device_t *skdev = (skd_device_t *)arg; 1230 1231 /* Someone set us to 0, don't bother rescheduling. */ 1232 ADAPTER_STATE_LOCK(skdev); 1233 if (skdev->skd_timer_timeout_id != 0) { 1234 ADAPTER_STATE_UNLOCK(skdev); 1235 /* Pardon the drop-and-then-acquire logic here. */ 1236 skd_timer_tick(skdev); 1237 ADAPTER_STATE_LOCK(skdev); 1238 /* Restart timer, if not being stopped. */ 1239 if (skdev->skd_timer_timeout_id != 0) { 1240 skdev->skd_timer_timeout_id = 1241 timeout(skd_timer, arg, skd_timer_ticks); 1242 } 1243 } 1244 ADAPTER_STATE_UNLOCK(skdev); 1245 } 1246 1247 /* 1248 * 1249 * Name: skd_start_timer, kicks off the 1-second timer. 1250 * 1251 * Inputs: skdev - device state structure. 1252 * 1253 * Returns: Zero. 1254 * 1255 */ 1256 static void 1257 skd_start_timer(struct skd_device *skdev) 1258 { 1259 /* Start one second driver timer. */ 1260 ADAPTER_STATE_LOCK(skdev); 1261 ASSERT(skdev->skd_timer_timeout_id == 0); 1262 1263 /* 1264 * Do first "timeout tick" right away, but not in this 1265 * thread. 1266 */ 1267 skdev->skd_timer_timeout_id = timeout(skd_timer, skdev, 1); 1268 ADAPTER_STATE_UNLOCK(skdev); 1269 } 1270 1271 /* 1272 * INTERNAL REQUESTS -- generated by driver itself 1273 */ 1274 1275 /* 1276 * 1277 * Name: skd_format_internal_skspcl, setups the internal 1278 * FIT request message. 1279 * 1280 * Inputs: skdev - device state structure. 1281 * 1282 * Returns: One. 1283 * 1284 */ 1285 static int 1286 skd_format_internal_skspcl(struct skd_device *skdev) 1287 { 1288 struct skd_special_context *skspcl = &skdev->internal_skspcl; 1289 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; 1290 struct fit_msg_hdr *fmh; 1291 uint64_t dma_address; 1292 struct skd_scsi_request *scsi; 1293 1294 fmh = (struct fit_msg_hdr *)&skspcl->msg_buf64[0]; 1295 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 1296 fmh->num_protocol_cmds_coalesced = 1; 1297 1298 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */ 1299 scsi = (struct skd_scsi_request *)&skspcl->msg_buf64[8]; 1300 bzero(scsi, sizeof (*scsi)); 1301 dma_address = skspcl->req.sksg_dma_address.cookies->_dmu._dmac_ll; 1302 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address); 1303 sgd->control = FIT_SGD_CONTROL_LAST; 1304 sgd->byte_count = 0; 1305 sgd->host_side_addr = skspcl->db_dma_address.cookies->_dmu._dmac_ll; 1306 sgd->dev_side_addr = 0; /* not used */ 1307 sgd->next_desc_ptr = 0LL; 1308 1309 return (1); 1310 } 1311 1312 /* 1313 * 1314 * Name: skd_send_internal_skspcl, send internal requests to 1315 * the hardware. 1316 * 1317 * Inputs: skdev - device state structure. 1318 * skspcl - request structure 1319 * opcode - just what it says 1320 * 1321 * Returns: Nothing. 1322 * 1323 */ 1324 void 1325 skd_send_internal_skspcl(struct skd_device *skdev, 1326 struct skd_special_context *skspcl, uint8_t opcode) 1327 { 1328 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; 1329 struct skd_scsi_request *scsi; 1330 1331 if (SKD_REQ_STATE_IDLE != skspcl->req.state) { 1332 /* 1333 * A refresh is already in progress. 1334 * Just wait for it to finish. 1335 */ 1336 return; 1337 } 1338 1339 ASSERT(0 == (skspcl->req.id & SKD_ID_INCR)); 1340 skspcl->req.state = SKD_REQ_STATE_BUSY; 1341 skspcl->req.id += SKD_ID_INCR; 1342 1343 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */ 1344 scsi = (struct skd_scsi_request *)&skspcl->msg_buf64[8]; 1345 scsi->hdr.tag = skspcl->req.id; 1346 1347 Dcmn_err(CE_NOTE, "internal skspcl: opcode=%x req.id=%x ==========>", 1348 opcode, skspcl->req.id); 1349 1350 switch (opcode) { 1351 case TEST_UNIT_READY: 1352 scsi->cdb[0] = TEST_UNIT_READY; 1353 scsi->cdb[1] = 0x00; 1354 scsi->cdb[2] = 0x00; 1355 scsi->cdb[3] = 0x00; 1356 scsi->cdb[4] = 0x00; 1357 scsi->cdb[5] = 0x00; 1358 sgd->byte_count = 0; 1359 scsi->hdr.sg_list_len_bytes = 0; 1360 break; 1361 case READ_CAPACITY_EXT: 1362 scsi->cdb[0] = READ_CAPACITY_EXT; 1363 scsi->cdb[1] = 0x10; 1364 scsi->cdb[2] = 0x00; 1365 scsi->cdb[3] = 0x00; 1366 scsi->cdb[4] = 0x00; 1367 scsi->cdb[5] = 0x00; 1368 scsi->cdb[6] = 0x00; 1369 scsi->cdb[7] = 0x00; 1370 scsi->cdb[8] = 0x00; 1371 scsi->cdb[9] = 0x00; 1372 scsi->cdb[10] = 0x00; 1373 scsi->cdb[11] = 0x00; 1374 scsi->cdb[12] = 0x00; 1375 scsi->cdb[13] = 0x20; 1376 scsi->cdb[14] = 0x00; 1377 scsi->cdb[15] = 0x00; 1378 sgd->byte_count = SKD_N_READ_CAP_EXT_BYTES; 1379 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 1380 break; 1381 case 0x28: 1382 (void) memset(skspcl->data_buf, 0x65, SKD_N_INTERNAL_BYTES); 1383 1384 scsi->cdb[0] = 0x28; 1385 scsi->cdb[1] = 0x00; 1386 scsi->cdb[2] = 0x00; 1387 scsi->cdb[3] = 0x00; 1388 scsi->cdb[4] = 0x00; 1389 scsi->cdb[5] = 0x00; 1390 scsi->cdb[6] = 0x00; 1391 scsi->cdb[7] = 0x00; 1392 scsi->cdb[8] = 0x01; 1393 scsi->cdb[9] = 0x00; 1394 sgd->byte_count = SKD_N_INTERNAL_BYTES; 1395 scsi->hdr.sg_list_len_bytes = cpu_to_be32(SKD_N_INTERNAL_BYTES); 1396 break; 1397 case INQUIRY: 1398 scsi->cdb[0] = INQUIRY; 1399 scsi->cdb[1] = 0x01; /* evpd */ 1400 scsi->cdb[2] = 0x80; /* serial number page */ 1401 scsi->cdb[3] = 0x00; 1402 scsi->cdb[4] = 0x10; 1403 scsi->cdb[5] = 0x00; 1404 sgd->byte_count = 16; /* SKD_N_INQ_BYTES */; 1405 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 1406 break; 1407 case INQUIRY2: 1408 scsi->cdb[0] = INQUIRY; 1409 scsi->cdb[1] = 0x00; 1410 scsi->cdb[2] = 0x00; /* serial number page */ 1411 scsi->cdb[3] = 0x00; 1412 scsi->cdb[4] = 0x24; 1413 scsi->cdb[5] = 0x00; 1414 sgd->byte_count = 36; /* SKD_N_INQ_BYTES */; 1415 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 1416 break; 1417 case SYNCHRONIZE_CACHE: 1418 scsi->cdb[0] = SYNCHRONIZE_CACHE; 1419 scsi->cdb[1] = 0x00; 1420 scsi->cdb[2] = 0x00; 1421 scsi->cdb[3] = 0x00; 1422 scsi->cdb[4] = 0x00; 1423 scsi->cdb[5] = 0x00; 1424 scsi->cdb[6] = 0x00; 1425 scsi->cdb[7] = 0x00; 1426 scsi->cdb[8] = 0x00; 1427 scsi->cdb[9] = 0x00; 1428 sgd->byte_count = 0; 1429 scsi->hdr.sg_list_len_bytes = 0; 1430 break; 1431 default: 1432 ASSERT("Don't know what to send"); 1433 return; 1434 1435 } 1436 1437 skd_send_special_fitmsg(skdev, skspcl); 1438 } 1439 1440 /* 1441 * 1442 * Name: skd_refresh_device_data, sends a TUR command. 1443 * 1444 * Inputs: skdev - device state structure. 1445 * 1446 * Returns: Nothing. 1447 * 1448 */ 1449 static void 1450 skd_refresh_device_data(struct skd_device *skdev) 1451 { 1452 struct skd_special_context *skspcl = &skdev->internal_skspcl; 1453 1454 Dcmn_err(CE_NOTE, "refresh_device_data: state=%d", skdev->state); 1455 1456 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY); 1457 } 1458 1459 /* 1460 * 1461 * Name: skd_complete_internal, handles the completion of 1462 * driver-initiated I/O requests. 1463 * 1464 * Inputs: skdev - device state structure. 1465 * skcomp - completion structure. 1466 * skerr - error structure. 1467 * skspcl - request structure. 1468 * 1469 * Returns: Nothing. 1470 * 1471 */ 1472 /* ARGSUSED */ /* Upstream common source with other platforms. */ 1473 static void 1474 skd_complete_internal(struct skd_device *skdev, 1475 volatile struct fit_completion_entry_v1 *skcomp, 1476 volatile struct fit_comp_error_info *skerr, 1477 struct skd_special_context *skspcl) 1478 { 1479 uint8_t *buf = skspcl->data_buf; 1480 uint8_t status = 2; 1481 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */ 1482 struct skd_scsi_request *scsi = 1483 (struct skd_scsi_request *)&skspcl->msg_buf64[8]; 1484 1485 ASSERT(skspcl == &skdev->internal_skspcl); 1486 1487 (void) ddi_dma_sync(skspcl->db_dma_address.dma_handle, 0, 0, 1488 DDI_DMA_SYNC_FORKERNEL); 1489 (void) ddi_dma_sync(skspcl->mb_dma_address.dma_handle, 0, 0, 1490 DDI_DMA_SYNC_FORKERNEL); 1491 1492 Dcmn_err(CE_NOTE, "complete internal %x", scsi->cdb[0]); 1493 1494 skspcl->req.completion = *skcomp; 1495 skspcl->req.state = SKD_REQ_STATE_IDLE; 1496 skspcl->req.id += SKD_ID_INCR; 1497 1498 status = skspcl->req.completion.status; 1499 1500 Dcmn_err(CE_NOTE, "<<<<====== complete_internal: opc=%x", *scsi->cdb); 1501 1502 switch (scsi->cdb[0]) { 1503 case TEST_UNIT_READY: 1504 if (SAM_STAT_GOOD == status) { 1505 skd_send_internal_skspcl(skdev, skspcl, 1506 READ_CAPACITY_EXT); 1507 } else { 1508 if (skdev->state == SKD_DRVR_STATE_STOPPING) { 1509 cmn_err(CE_WARN, 1510 "!%s: TUR failed, don't send anymore" 1511 "state 0x%x", skdev->name, skdev->state); 1512 1513 return; 1514 } 1515 1516 Dcmn_err(CE_NOTE, "%s: TUR failed, retry skerr", 1517 skdev->name); 1518 skd_send_internal_skspcl(skdev, skspcl, 0x00); 1519 } 1520 break; 1521 case READ_CAPACITY_EXT: { 1522 uint64_t cap, Nblocks; 1523 uint64_t xbuf[1]; 1524 1525 skdev->read_cap_is_valid = 0; 1526 if (SAM_STAT_GOOD == status) { 1527 bcopy(buf, xbuf, 8); 1528 cap = be64_to_cpu(*xbuf); 1529 skdev->read_cap_last_lba = cap; 1530 skdev->read_cap_blocksize = 1531 (buf[8] << 24) | (buf[9] << 16) | 1532 (buf[10] << 8) | buf[11]; 1533 1534 cap *= skdev->read_cap_blocksize; 1535 Dcmn_err(CE_NOTE, " Last LBA: %" PRIu64 " (0x%" PRIx64 1536 "), blk sz: %d, Capacity: %" PRIu64 "GB\n", 1537 skdev->read_cap_last_lba, 1538 skdev->read_cap_last_lba, 1539 skdev->read_cap_blocksize, 1540 cap >> 30ULL); 1541 1542 Nblocks = skdev->read_cap_last_lba + 1; 1543 1544 skdev->Nblocks = Nblocks; 1545 skdev->read_cap_is_valid = 1; 1546 1547 skd_send_internal_skspcl(skdev, skspcl, INQUIRY2); 1548 1549 } else { 1550 Dcmn_err(CE_NOTE, "**** READCAP failed, retry TUR"); 1551 skd_send_internal_skspcl(skdev, skspcl, 1552 TEST_UNIT_READY); 1553 } 1554 break; 1555 } 1556 case INQUIRY: 1557 skdev->inquiry_is_valid = 0; 1558 if (SAM_STAT_GOOD == status) { 1559 skdev->inquiry_is_valid = 1; 1560 1561 if (scsi->cdb[1] == 0x1) { 1562 bcopy(&buf[4], skdev->inq_serial_num, 12); 1563 skdev->inq_serial_num[12] = '\0'; 1564 } else { 1565 char *tmp = skdev->inq_vendor_id; 1566 1567 bcopy(&buf[8], tmp, 8); 1568 tmp[8] = '\0'; 1569 1570 tmp = skdev->inq_product_id; 1571 bcopy(&buf[16], tmp, 16); 1572 tmp[16] = '\0'; 1573 1574 tmp = skdev->inq_product_rev; 1575 bcopy(&buf[32], tmp, 4); 1576 tmp[4] = '\0'; 1577 } 1578 } 1579 1580 if (skdev->state != SKD_DRVR_STATE_ONLINE) 1581 if (skd_unquiesce_dev(skdev) < 0) 1582 cmn_err(CE_NOTE, "** failed, to ONLINE device"); 1583 break; 1584 case SYNCHRONIZE_CACHE: 1585 skdev->sync_done = (SAM_STAT_GOOD == status) ? 1 : -1; 1586 1587 cv_signal(&skdev->cv_waitq); 1588 break; 1589 1590 default: 1591 ASSERT("we didn't send this"); 1592 } 1593 } 1594 1595 /* 1596 * FIT MESSAGES 1597 */ 1598 1599 /* 1600 * 1601 * Name: skd_send_fitmsg, send a FIT message to the hardware. 1602 * 1603 * Inputs: skdev - device state structure. 1604 * skmsg - FIT message structure. 1605 * 1606 * Returns: Nothing. 1607 * 1608 */ 1609 /* ARGSUSED */ /* Upstream common source with other platforms. */ 1610 static void 1611 skd_send_fitmsg(struct skd_device *skdev, 1612 struct skd_fitmsg_context *skmsg) 1613 { 1614 uint64_t qcmd; 1615 struct fit_msg_hdr *fmh; 1616 1617 Dcmn_err(CE_NOTE, "msgbuf's DMA addr: 0x%" PRIx64 ", qdepth_busy=%d", 1618 skmsg->mb_dma_address.cookies->dmac_laddress, 1619 skdev->queue_depth_busy); 1620 1621 Dcmn_err(CE_NOTE, "msg_buf 0x%p, offset %x", (void *)skmsg->msg_buf, 1622 skmsg->offset); 1623 1624 qcmd = skmsg->mb_dma_address.cookies->dmac_laddress; 1625 qcmd |= FIT_QCMD_QID_NORMAL; 1626 1627 fmh = (struct fit_msg_hdr *)skmsg->msg_buf64; 1628 skmsg->outstanding = fmh->num_protocol_cmds_coalesced; 1629 1630 if (skdev->dbg_level > 1) { 1631 uint8_t *bp = skmsg->msg_buf; 1632 int i; 1633 1634 for (i = 0; i < skmsg->length; i += 8) { 1635 Dcmn_err(CE_NOTE, " msg[%2d] %02x %02x %02x %02x " 1636 "%02x %02x %02x %02x", 1637 i, bp[i + 0], bp[i + 1], bp[i + 2], 1638 bp[i + 3], bp[i + 4], bp[i + 5], 1639 bp[i + 6], bp[i + 7]); 1640 if (i == 0) i = 64 - 8; 1641 } 1642 } 1643 1644 (void) ddi_dma_sync(skmsg->mb_dma_address.dma_handle, 0, 0, 1645 DDI_DMA_SYNC_FORDEV); 1646 1647 ASSERT(skmsg->length > sizeof (struct fit_msg_hdr)); 1648 if (skmsg->length > 256) { 1649 qcmd |= FIT_QCMD_MSGSIZE_512; 1650 } else if (skmsg->length > 128) { 1651 qcmd |= FIT_QCMD_MSGSIZE_256; 1652 } else if (skmsg->length > 64) { 1653 qcmd |= FIT_QCMD_MSGSIZE_128; 1654 } 1655 1656 skdev->ios_started++; 1657 1658 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); 1659 } 1660 1661 /* 1662 * 1663 * Name: skd_send_special_fitmsg, send a special FIT message 1664 * to the hardware used driver-originated I/O requests. 1665 * 1666 * Inputs: skdev - device state structure. 1667 * skspcl - skspcl structure. 1668 * 1669 * Returns: Nothing. 1670 * 1671 */ 1672 static void 1673 skd_send_special_fitmsg(struct skd_device *skdev, 1674 struct skd_special_context *skspcl) 1675 { 1676 uint64_t qcmd; 1677 1678 Dcmn_err(CE_NOTE, "send_special_fitmsg: pt 1"); 1679 1680 if (skdev->dbg_level > 1) { 1681 uint8_t *bp = skspcl->msg_buf; 1682 int i; 1683 1684 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) { 1685 cmn_err(CE_NOTE, 1686 " spcl[%2d] %02x %02x %02x %02x " 1687 "%02x %02x %02x %02x\n", i, 1688 bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3], 1689 bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]); 1690 if (i == 0) i = 64 - 8; 1691 } 1692 1693 for (i = 0; i < skspcl->req.n_sg; i++) { 1694 struct fit_sg_descriptor *sgd = 1695 &skspcl->req.sksg_list[i]; 1696 1697 cmn_err(CE_NOTE, " sg[%d] count=%u ctrl=0x%x " 1698 "addr=0x%" PRIx64 " next=0x%" PRIx64, 1699 i, sgd->byte_count, sgd->control, 1700 sgd->host_side_addr, sgd->next_desc_ptr); 1701 } 1702 } 1703 1704 (void) ddi_dma_sync(skspcl->mb_dma_address.dma_handle, 0, 0, 1705 DDI_DMA_SYNC_FORDEV); 1706 (void) ddi_dma_sync(skspcl->db_dma_address.dma_handle, 0, 0, 1707 DDI_DMA_SYNC_FORDEV); 1708 1709 /* 1710 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr 1711 * and one 64-byte SSDI command. 1712 */ 1713 qcmd = skspcl->mb_dma_address.cookies->dmac_laddress; 1714 1715 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128; 1716 1717 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); 1718 } 1719 1720 /* 1721 * COMPLETION QUEUE 1722 */ 1723 1724 static void skd_complete_other(struct skd_device *skdev, 1725 volatile struct fit_completion_entry_v1 *skcomp, 1726 volatile struct fit_comp_error_info *skerr); 1727 1728 struct sns_info { 1729 uint8_t type; 1730 uint8_t stat; 1731 uint8_t key; 1732 uint8_t asc; 1733 uint8_t ascq; 1734 uint8_t mask; 1735 enum skd_check_status_action action; 1736 }; 1737 1738 static struct sns_info skd_chkstat_table[] = { 1739 /* Good */ 1740 {0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c, SKD_CHECK_STATUS_REPORT_GOOD}, 1741 1742 /* Smart alerts */ 1743 {0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */ 1744 SKD_CHECK_STATUS_REPORT_SMART_ALERT}, 1745 {0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */ 1746 SKD_CHECK_STATUS_REPORT_SMART_ALERT}, 1747 {0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temp over trigger */ 1748 SKD_CHECK_STATUS_REPORT_SMART_ALERT}, 1749 1750 /* Retry (with limits) */ 1751 {0x70, 0x02, ABORTED_COMMAND, 0, 0, 0x1C, /* DMA errors */ 1752 SKD_CHECK_STATUS_REQUEUE_REQUEST}, 1753 {0x70, 0x02, UNIT_ATTENTION, 0x0B, 0x00, 0x1E, /* warnings */ 1754 SKD_CHECK_STATUS_REQUEUE_REQUEST}, 1755 {0x70, 0x02, UNIT_ATTENTION, 0x5D, 0x00, 0x1E, /* thresholds */ 1756 SKD_CHECK_STATUS_REQUEUE_REQUEST}, 1757 {0x70, 0x02, UNIT_ATTENTION, 0x80, 0x30, 0x1F, /* backup power */ 1758 SKD_CHECK_STATUS_REQUEUE_REQUEST}, 1759 1760 /* Busy (or about to be) */ 1761 {0x70, 0x02, UNIT_ATTENTION, 0x3f, 0x01, 0x1F, /* fw changed */ 1762 SKD_CHECK_STATUS_BUSY_IMMINENT}, 1763 }; 1764 1765 /* 1766 * 1767 * Name: skd_check_status, checks the return status from a 1768 * completed I/O request. 1769 * 1770 * Inputs: skdev - device state structure. 1771 * cmp_status - SCSI status byte. 1772 * skerr - the error data structure. 1773 * 1774 * Returns: Depending on the error condition, return the action 1775 * to be taken as specified in the skd_chkstat_table. 1776 * If no corresponding value is found in the table 1777 * return SKD_CHECK_STATUS_REPORT_GOOD is no error otherwise 1778 * return SKD_CHECK_STATUS_REPORT_ERROR. 1779 * 1780 */ 1781 static enum skd_check_status_action 1782 skd_check_status(struct skd_device *skdev, uint8_t cmp_status, 1783 volatile struct fit_comp_error_info *skerr) 1784 { 1785 /* 1786 * Look up status and sense data to decide how to handle the error 1787 * from the device. 1788 * mask says which fields must match e.g., mask=0x18 means check 1789 * type and stat, ignore key, asc, ascq. 1790 */ 1791 int i, n; 1792 1793 Dcmn_err(CE_NOTE, "(%s): key/asc/ascq %02x/%02x/%02x", 1794 skd_name(skdev), skerr->key, skerr->code, skerr->qual); 1795 1796 Dcmn_err(CE_NOTE, "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x", 1797 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual); 1798 1799 /* Does the info match an entry in the good category? */ 1800 n = sizeof (skd_chkstat_table) / sizeof (skd_chkstat_table[0]); 1801 for (i = 0; i < n; i++) { 1802 struct sns_info *sns = &skd_chkstat_table[i]; 1803 1804 if (sns->mask & 0x10) 1805 if (skerr->type != sns->type) continue; 1806 1807 if (sns->mask & 0x08) 1808 if (cmp_status != sns->stat) continue; 1809 1810 if (sns->mask & 0x04) 1811 if (skerr->key != sns->key) continue; 1812 1813 if (sns->mask & 0x02) 1814 if (skerr->code != sns->asc) continue; 1815 1816 if (sns->mask & 0x01) 1817 if (skerr->qual != sns->ascq) continue; 1818 1819 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) { 1820 cmn_err(CE_WARN, "!(%s):SMART Alert: sense key/asc/ascq" 1821 " %02x/%02x/%02x", 1822 skd_name(skdev), skerr->key, 1823 skerr->code, skerr->qual); 1824 } 1825 1826 Dcmn_err(CE_NOTE, "skd_check_status: returning %x", 1827 sns->action); 1828 1829 return (sns->action); 1830 } 1831 1832 /* 1833 * No other match, so nonzero status means error, 1834 * zero status means good 1835 */ 1836 if (cmp_status) { 1837 cmn_err(CE_WARN, 1838 "!%s: status check: qdepth=%d skmfl=%p (%d) skrfl=%p (%d)", 1839 skdev->name, 1840 skdev->queue_depth_busy, 1841 (void *)skdev->skmsg_free_list, skd_list_skmsg(skdev, 0), 1842 (void *)skdev->skreq_free_list, skd_list_skreq(skdev, 0)); 1843 1844 cmn_err(CE_WARN, "!%s: t=%02x stat=%02x k=%02x c=%02x q=%02x", 1845 skdev->name, skerr->type, cmp_status, skerr->key, 1846 skerr->code, skerr->qual); 1847 1848 return (SKD_CHECK_STATUS_REPORT_ERROR); 1849 } 1850 1851 Dcmn_err(CE_NOTE, "status check good default"); 1852 1853 return (SKD_CHECK_STATUS_REPORT_GOOD); 1854 } 1855 1856 /* 1857 * 1858 * Name: skd_isr_completion_posted, handles I/O completions. 1859 * 1860 * Inputs: skdev - device state structure. 1861 * 1862 * Returns: Nothing. 1863 * 1864 */ 1865 static void 1866 skd_isr_completion_posted(struct skd_device *skdev) 1867 { 1868 volatile struct fit_completion_entry_v1 *skcmp = NULL; 1869 volatile struct fit_comp_error_info *skerr; 1870 struct skd_fitmsg_context *skmsg; 1871 struct skd_request_context *skreq; 1872 skd_buf_private_t *pbuf; 1873 uint16_t req_id; 1874 uint32_t req_slot; 1875 uint32_t timo_slot; 1876 uint32_t msg_slot; 1877 uint16_t cmp_cntxt = 0; 1878 uint8_t cmp_status = 0; 1879 uint8_t cmp_cycle = 0; 1880 uint32_t cmp_bytes = 0; 1881 1882 (void) ddi_dma_sync(skdev->cq_dma_address.dma_handle, 0, 0, 1883 DDI_DMA_SYNC_FORKERNEL); 1884 1885 for (;;) { 1886 ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); 1887 1888 WAITQ_LOCK(skdev); 1889 1890 skcmp = &skdev->skcomp_table[skdev->skcomp_ix]; 1891 cmp_cycle = skcmp->cycle; 1892 cmp_cntxt = skcmp->tag; 1893 cmp_status = skcmp->status; 1894 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes); 1895 1896 skerr = &skdev->skerr_table[skdev->skcomp_ix]; 1897 1898 Dcmn_err(CE_NOTE, 1899 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d " 1900 "qdepth_busy=%d rbytes=0x%x proto=%d", 1901 skdev->skcomp_cycle, skdev->skcomp_ix, 1902 cmp_cycle, cmp_cntxt, cmp_status, 1903 skdev->queue_depth_busy, cmp_bytes, skdev->proto_ver); 1904 1905 if (cmp_cycle != skdev->skcomp_cycle) { 1906 Dcmn_err(CE_NOTE, "%s:end of completions", skdev->name); 1907 1908 WAITQ_UNLOCK(skdev); 1909 break; 1910 } 1911 1912 1913 skdev->n_req++; 1914 1915 /* 1916 * Update the completion queue head index and possibly 1917 * the completion cycle count. 1918 */ 1919 skdev->skcomp_ix++; 1920 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) { 1921 skdev->skcomp_ix = 0; 1922 skdev->skcomp_cycle++; /* 8-bit wrap-around */ 1923 } 1924 1925 1926 /* 1927 * The command context is a unique 32-bit ID. The low order 1928 * bits help locate the request. The request is usually a 1929 * r/w request (see skd_start() above) or a special request. 1930 */ 1931 req_id = cmp_cntxt; 1932 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK; 1933 1934 Dcmn_err(CE_NOTE, 1935 "<<<< completion_posted 1: req_id=%x req_slot=%x", 1936 req_id, req_slot); 1937 1938 /* Is this other than a r/w request? */ 1939 if (req_slot >= skdev->num_req_context) { 1940 /* 1941 * This is not a completion for a r/w request. 1942 */ 1943 skd_complete_other(skdev, skcmp, skerr); 1944 WAITQ_UNLOCK(skdev); 1945 continue; 1946 } 1947 1948 skreq = &skdev->skreq_table[req_slot]; 1949 1950 /* 1951 * Make sure the request ID for the slot matches. 1952 */ 1953 ASSERT(skreq->id == req_id); 1954 1955 if (SKD_REQ_STATE_ABORTED == skreq->state) { 1956 Dcmn_err(CE_NOTE, "reclaim req %p id=%04x\n", 1957 (void *)skreq, skreq->id); 1958 /* 1959 * a previously timed out command can 1960 * now be cleaned up 1961 */ 1962 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK; 1963 ASSERT(msg_slot < skdev->num_fitmsg_context); 1964 skmsg = &skdev->skmsg_table[msg_slot]; 1965 if (skmsg->id == skreq->fitmsg_id) { 1966 ASSERT(skmsg->outstanding > 0); 1967 skmsg->outstanding--; 1968 if (skmsg->outstanding == 0) { 1969 ASSERT(SKD_MSG_STATE_BUSY == 1970 skmsg->state); 1971 skmsg->state = SKD_MSG_STATE_IDLE; 1972 skmsg->id += SKD_ID_INCR; 1973 skmsg->next = skdev->skmsg_free_list; 1974 skdev->skmsg_free_list = skmsg; 1975 } 1976 } 1977 /* 1978 * Reclaim the skd_request_context 1979 */ 1980 skreq->state = SKD_REQ_STATE_IDLE; 1981 skreq->id += SKD_ID_INCR; 1982 skreq->next = skdev->skreq_free_list; 1983 skdev->skreq_free_list = skreq; 1984 WAITQ_UNLOCK(skdev); 1985 continue; 1986 } 1987 1988 skreq->completion.status = cmp_status; 1989 1990 pbuf = skreq->pbuf; 1991 ASSERT(pbuf != NULL); 1992 1993 Dcmn_err(CE_NOTE, "<<<< completion_posted 2: pbuf=%p " 1994 "req_id=%x req_slot=%x", (void *)pbuf, req_id, req_slot); 1995 if (cmp_status && skdev->disks_initialized) { 1996 cmn_err(CE_WARN, "!%s: " 1997 "I/O err: pbuf=%p blkno=%lld (%llx) nbklks=%ld ", 1998 skdev->name, (void *)pbuf, pbuf->x_xfer->x_blkno, 1999 pbuf->x_xfer->x_blkno, pbuf->x_xfer->x_nblks); 2000 } 2001 2002 ASSERT(skdev->active_cmds); 2003 atomic_dec_64(&skdev->active_cmds); 2004 2005 if (SAM_STAT_GOOD == cmp_status) { 2006 /* Release DMA resources for the request. */ 2007 if (pbuf->x_xfer->x_nblks != 0) 2008 skd_blkdev_postop_sg_list(skdev, skreq); 2009 WAITQ_UNLOCK(skdev); 2010 skd_end_request(skdev, skreq, 0); 2011 WAITQ_LOCK(skdev); 2012 } else { 2013 switch (skd_check_status(skdev, cmp_status, skerr)) { 2014 case SKD_CHECK_STATUS_REPORT_GOOD: 2015 case SKD_CHECK_STATUS_REPORT_SMART_ALERT: 2016 WAITQ_UNLOCK(skdev); 2017 skd_end_request(skdev, skreq, 0); 2018 WAITQ_LOCK(skdev); 2019 break; 2020 2021 case SKD_CHECK_STATUS_BUSY_IMMINENT: 2022 skd_log_skreq(skdev, skreq, "retry(busy)"); 2023 skd_queue(skdev, pbuf); 2024 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; 2025 skdev->timer_countdown = SKD_TIMER_MINUTES(20); 2026 2027 (void) skd_quiesce_dev(skdev); 2028 break; 2029 2030 /* FALLTHRU */ 2031 case SKD_CHECK_STATUS_REPORT_ERROR: 2032 /* fall thru to report error */ 2033 default: 2034 /* 2035 * Save the entire completion 2036 * and error entries for 2037 * later error interpretation. 2038 */ 2039 skreq->completion = *skcmp; 2040 skreq->err_info = *skerr; 2041 WAITQ_UNLOCK(skdev); 2042 skd_end_request(skdev, skreq, -EIO); 2043 WAITQ_LOCK(skdev); 2044 break; 2045 } 2046 } 2047 2048 /* 2049 * Reclaim the FIT msg buffer if this is 2050 * the first of the requests it carried to 2051 * be completed. The FIT msg buffer used to 2052 * send this request cannot be reused until 2053 * we are sure the s1120 card has copied 2054 * it to its memory. The FIT msg might have 2055 * contained several requests. As soon as 2056 * any of them are completed we know that 2057 * the entire FIT msg was transferred. 2058 * Only the first completed request will 2059 * match the FIT msg buffer id. The FIT 2060 * msg buffer id is immediately updated. 2061 * When subsequent requests complete the FIT 2062 * msg buffer id won't match, so we know 2063 * quite cheaply that it is already done. 2064 */ 2065 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK; 2066 2067 ASSERT(msg_slot < skdev->num_fitmsg_context); 2068 skmsg = &skdev->skmsg_table[msg_slot]; 2069 if (skmsg->id == skreq->fitmsg_id) { 2070 ASSERT(SKD_MSG_STATE_BUSY == skmsg->state); 2071 skmsg->state = SKD_MSG_STATE_IDLE; 2072 skmsg->id += SKD_ID_INCR; 2073 skmsg->next = skdev->skmsg_free_list; 2074 skdev->skmsg_free_list = skmsg; 2075 } 2076 2077 /* 2078 * Decrease the number of active requests. 2079 * This also decrements the count in the 2080 * timeout slot. 2081 */ 2082 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 2083 ASSERT(skdev->timeout_slot[timo_slot] > 0); 2084 ASSERT(skdev->queue_depth_busy > 0); 2085 2086 atomic_dec_32(&skdev->timeout_slot[timo_slot]); 2087 atomic_dec_32(&skdev->queue_depth_busy); 2088 2089 /* 2090 * Reclaim the skd_request_context 2091 */ 2092 skreq->state = SKD_REQ_STATE_IDLE; 2093 skreq->id += SKD_ID_INCR; 2094 skreq->next = skdev->skreq_free_list; 2095 skdev->skreq_free_list = skreq; 2096 2097 WAITQ_UNLOCK(skdev); 2098 2099 /* 2100 * make sure the lock is held by caller. 2101 */ 2102 if ((skdev->state == SKD_DRVR_STATE_PAUSING) && 2103 (0 == skdev->queue_depth_busy)) { 2104 skdev->state = SKD_DRVR_STATE_PAUSED; 2105 cv_signal(&skdev->cv_waitq); 2106 } 2107 } /* for(;;) */ 2108 } 2109 2110 /* 2111 * 2112 * Name: skd_complete_other, handle the completion of a 2113 * non-r/w request. 2114 * 2115 * Inputs: skdev - device state structure. 2116 * skcomp - FIT completion structure. 2117 * skerr - error structure. 2118 * 2119 * Returns: Nothing. 2120 * 2121 */ 2122 static void 2123 skd_complete_other(struct skd_device *skdev, 2124 volatile struct fit_completion_entry_v1 *skcomp, 2125 volatile struct fit_comp_error_info *skerr) 2126 { 2127 uint32_t req_id = 0; 2128 uint32_t req_table; 2129 uint32_t req_slot; 2130 struct skd_special_context *skspcl; 2131 2132 req_id = skcomp->tag; 2133 req_table = req_id & SKD_ID_TABLE_MASK; 2134 req_slot = req_id & SKD_ID_SLOT_MASK; 2135 2136 Dcmn_err(CE_NOTE, "complete_other: table=0x%x id=0x%x slot=%d", 2137 req_table, req_id, req_slot); 2138 2139 /* 2140 * Based on the request id, determine how to dispatch this completion. 2141 * This swich/case is finding the good cases and forwarding the 2142 * completion entry. Errors are reported below the switch. 2143 */ 2144 ASSERT(req_table == SKD_ID_INTERNAL); 2145 ASSERT(req_slot == 0); 2146 2147 skspcl = &skdev->internal_skspcl; 2148 ASSERT(skspcl->req.id == req_id); 2149 ASSERT(skspcl->req.state == SKD_REQ_STATE_BUSY); 2150 2151 Dcmn_err(CE_NOTE, "<<<<== complete_other: ID_INTERNAL"); 2152 skd_complete_internal(skdev, skcomp, skerr, skspcl); 2153 } 2154 2155 /* 2156 * 2157 * Name: skd_reset_skcomp, does what it says, resetting completion 2158 * tables. 2159 * 2160 * Inputs: skdev - device state structure. 2161 * 2162 * Returns: Nothing. 2163 * 2164 */ 2165 static void 2166 skd_reset_skcomp(struct skd_device *skdev) 2167 { 2168 uint32_t nbytes; 2169 2170 nbytes = sizeof (struct fit_completion_entry_v1) * 2171 SKD_N_COMPLETION_ENTRY; 2172 nbytes += sizeof (struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; 2173 2174 if (skdev->skcomp_table) 2175 bzero(skdev->skcomp_table, nbytes); 2176 2177 skdev->skcomp_ix = 0; 2178 skdev->skcomp_cycle = 1; 2179 } 2180 2181 2182 2183 /* 2184 * INTERRUPTS 2185 */ 2186 2187 /* 2188 * 2189 * Name: skd_isr_aif, handles the device interrupts. 2190 * 2191 * Inputs: arg - skdev device state structure. 2192 * intvec - not referenced 2193 * 2194 * Returns: DDI_INTR_CLAIMED if interrupt is handled otherwise 2195 * return DDI_INTR_UNCLAIMED. 2196 * 2197 */ 2198 /* ARGSUSED */ /* Upstream common source with other platforms. */ 2199 static uint_t 2200 skd_isr_aif(caddr_t arg, caddr_t intvec) 2201 { 2202 uint32_t intstat; 2203 uint32_t ack; 2204 int rc = DDI_INTR_UNCLAIMED; 2205 struct skd_device *skdev; 2206 2207 skdev = (skd_device_t *)(uintptr_t)arg; 2208 2209 ASSERT(skdev != NULL); 2210 2211 skdev->intr_cntr++; 2212 2213 Dcmn_err(CE_NOTE, "skd_isr_aif: intr=%" PRId64 "\n", skdev->intr_cntr); 2214 2215 for (;;) { 2216 2217 ASSERT(!WAITQ_LOCK_HELD(skdev)); 2218 INTR_LOCK(skdev); 2219 2220 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST); 2221 2222 ack = FIT_INT_DEF_MASK; 2223 ack &= intstat; 2224 2225 Dcmn_err(CE_NOTE, "intstat=0x%x ack=0x%x", intstat, ack); 2226 2227 /* 2228 * As long as there is an int pending on device, keep 2229 * running loop. When none, get out, but if we've never 2230 * done any processing, call completion handler? 2231 */ 2232 if (ack == 0) { 2233 /* 2234 * No interrupts on device, but run the completion 2235 * processor anyway? 2236 */ 2237 if (rc == DDI_INTR_UNCLAIMED && 2238 skdev->state == SKD_DRVR_STATE_ONLINE) { 2239 Dcmn_err(CE_NOTE, 2240 "1: Want isr_comp_posted call"); 2241 skd_isr_completion_posted(skdev); 2242 } 2243 INTR_UNLOCK(skdev); 2244 2245 break; 2246 } 2247 rc = DDI_INTR_CLAIMED; 2248 2249 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST); 2250 2251 if ((skdev->state != SKD_DRVR_STATE_LOAD) && 2252 (skdev->state != SKD_DRVR_STATE_STOPPING)) { 2253 if (intstat & FIT_ISH_COMPLETION_POSTED) { 2254 Dcmn_err(CE_NOTE, 2255 "2: Want isr_comp_posted call"); 2256 skd_isr_completion_posted(skdev); 2257 } 2258 2259 if (intstat & FIT_ISH_FW_STATE_CHANGE) { 2260 Dcmn_err(CE_NOTE, "isr: fwstate change"); 2261 2262 skd_isr_fwstate(skdev); 2263 if (skdev->state == SKD_DRVR_STATE_FAULT || 2264 skdev->state == 2265 SKD_DRVR_STATE_DISAPPEARED) { 2266 INTR_UNLOCK(skdev); 2267 2268 return (rc); 2269 } 2270 } 2271 2272 if (intstat & FIT_ISH_MSG_FROM_DEV) { 2273 Dcmn_err(CE_NOTE, "isr: msg_from_dev change"); 2274 skd_isr_msg_from_dev(skdev); 2275 } 2276 } 2277 2278 INTR_UNLOCK(skdev); 2279 } 2280 2281 if (!SIMPLEQ_EMPTY(&skdev->waitqueue)) 2282 skd_start(skdev); 2283 2284 return (rc); 2285 } 2286 2287 /* 2288 * 2289 * Name: skd_drive_fault, set the drive state to DRV_STATE_FAULT. 2290 * 2291 * Inputs: skdev - device state structure. 2292 * 2293 * Returns: Nothing. 2294 * 2295 */ 2296 static void 2297 skd_drive_fault(struct skd_device *skdev) 2298 { 2299 skdev->state = SKD_DRVR_STATE_FAULT; 2300 cmn_err(CE_WARN, "!(%s): Drive FAULT\n", 2301 skd_name(skdev)); 2302 } 2303 2304 /* 2305 * 2306 * Name: skd_drive_disappeared, set the drive state to DISAPPEARED.. 2307 * 2308 * Inputs: skdev - device state structure. 2309 * 2310 * Returns: Nothing. 2311 * 2312 */ 2313 static void 2314 skd_drive_disappeared(struct skd_device *skdev) 2315 { 2316 skdev->state = SKD_DRVR_STATE_DISAPPEARED; 2317 cmn_err(CE_WARN, "!(%s): Drive DISAPPEARED\n", 2318 skd_name(skdev)); 2319 } 2320 2321 /* 2322 * 2323 * Name: skd_isr_fwstate, handles the various device states. 2324 * 2325 * Inputs: skdev - device state structure. 2326 * 2327 * Returns: Nothing. 2328 * 2329 */ 2330 static void 2331 skd_isr_fwstate(struct skd_device *skdev) 2332 { 2333 uint32_t sense; 2334 uint32_t state; 2335 int prev_driver_state; 2336 uint32_t mtd; 2337 2338 prev_driver_state = skdev->state; 2339 2340 sense = SKD_READL(skdev, FIT_STATUS); 2341 state = sense & FIT_SR_DRIVE_STATE_MASK; 2342 2343 Dcmn_err(CE_NOTE, "s1120 state %s(%d)=>%s(%d)", 2344 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 2345 skd_drive_state_to_str(state), state); 2346 2347 skdev->drive_state = state; 2348 2349 switch (skdev->drive_state) { 2350 case FIT_SR_DRIVE_INIT: 2351 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) { 2352 skd_disable_interrupts(skdev); 2353 break; 2354 } 2355 if (skdev->state == SKD_DRVR_STATE_RESTARTING) { 2356 skd_recover_requests(skdev); 2357 } 2358 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) { 2359 skdev->timer_countdown = 2360 SKD_TIMER_SECONDS(SKD_STARTING_TO); 2361 skdev->state = SKD_DRVR_STATE_STARTING; 2362 skd_soft_reset(skdev); 2363 break; 2364 } 2365 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0); 2366 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2367 skdev->last_mtd = mtd; 2368 break; 2369 2370 case FIT_SR_DRIVE_ONLINE: 2371 skdev->queue_depth_limit = skdev->soft_queue_depth_limit; 2372 if (skdev->queue_depth_limit > skdev->hard_queue_depth_limit) { 2373 skdev->queue_depth_limit = 2374 skdev->hard_queue_depth_limit; 2375 } 2376 2377 skdev->queue_depth_lowat = skdev->queue_depth_limit * 2 / 3 + 1; 2378 if (skdev->queue_depth_lowat < 1) 2379 skdev->queue_depth_lowat = 1; 2380 Dcmn_err(CE_NOTE, 2381 "%s queue depth limit=%d hard=%d soft=%d lowat=%d", 2382 DRV_NAME, 2383 skdev->queue_depth_limit, 2384 skdev->hard_queue_depth_limit, 2385 skdev->soft_queue_depth_limit, 2386 skdev->queue_depth_lowat); 2387 2388 skd_refresh_device_data(skdev); 2389 break; 2390 case FIT_SR_DRIVE_BUSY: 2391 skdev->state = SKD_DRVR_STATE_BUSY; 2392 skdev->timer_countdown = SKD_TIMER_MINUTES(20); 2393 (void) skd_quiesce_dev(skdev); 2394 break; 2395 case FIT_SR_DRIVE_BUSY_SANITIZE: 2396 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; 2397 skd_start(skdev); 2398 break; 2399 case FIT_SR_DRIVE_BUSY_ERASE: 2400 skdev->state = SKD_DRVR_STATE_BUSY_ERASE; 2401 skdev->timer_countdown = SKD_TIMER_MINUTES(20); 2402 break; 2403 case FIT_SR_DRIVE_OFFLINE: 2404 skdev->state = SKD_DRVR_STATE_IDLE; 2405 break; 2406 case FIT_SR_DRIVE_SOFT_RESET: 2407 skdev->state = SKD_DRVR_STATE_RESTARTING; 2408 2409 switch (skdev->state) { 2410 case SKD_DRVR_STATE_STARTING: 2411 case SKD_DRVR_STATE_RESTARTING: 2412 break; 2413 default: 2414 skdev->state = SKD_DRVR_STATE_RESTARTING; 2415 break; 2416 } 2417 break; 2418 case FIT_SR_DRIVE_FW_BOOTING: 2419 Dcmn_err(CE_NOTE, 2420 "ISR FIT_SR_DRIVE_FW_BOOTING %s", skdev->name); 2421 skdev->state = SKD_DRVR_STATE_WAIT_BOOT; 2422 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_WAIT_BOOT_TO); 2423 break; 2424 2425 case FIT_SR_DRIVE_DEGRADED: 2426 case FIT_SR_PCIE_LINK_DOWN: 2427 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: 2428 break; 2429 2430 case FIT_SR_DRIVE_FAULT: 2431 skd_drive_fault(skdev); 2432 skd_recover_requests(skdev); 2433 skd_start(skdev); 2434 break; 2435 2436 case 0xFF: 2437 skd_drive_disappeared(skdev); 2438 skd_recover_requests(skdev); 2439 skd_start(skdev); 2440 break; 2441 default: 2442 /* 2443 * Uknown FW State. Wait for a state we recognize. 2444 */ 2445 break; 2446 } 2447 2448 Dcmn_err(CE_NOTE, "Driver state %s(%d)=>%s(%d)", 2449 skd_skdev_state_to_str(prev_driver_state), prev_driver_state, 2450 skd_skdev_state_to_str(skdev->state), skdev->state); 2451 } 2452 2453 /* 2454 * 2455 * Name: skd_recover_requests, attempts to recover requests. 2456 * 2457 * Inputs: skdev - device state structure. 2458 * 2459 * Returns: Nothing. 2460 * 2461 */ 2462 static void 2463 skd_recover_requests(struct skd_device *skdev) 2464 { 2465 int i; 2466 2467 ASSERT(INTR_LOCK_HELD(skdev)); 2468 2469 for (i = 0; i < skdev->num_req_context; i++) { 2470 struct skd_request_context *skreq = &skdev->skreq_table[i]; 2471 2472 if (skreq->state == SKD_REQ_STATE_BUSY) { 2473 skd_log_skreq(skdev, skreq, "requeue"); 2474 2475 ASSERT(0 != (skreq->id & SKD_ID_INCR)); 2476 ASSERT(skreq->pbuf != NULL); 2477 /* Release DMA resources for the request. */ 2478 skd_blkdev_postop_sg_list(skdev, skreq); 2479 2480 skd_end_request(skdev, skreq, EAGAIN); 2481 skreq->pbuf = NULL; 2482 skreq->state = SKD_REQ_STATE_IDLE; 2483 skreq->id += SKD_ID_INCR; 2484 } 2485 if (i > 0) { 2486 skreq[-1].next = skreq; 2487 } 2488 skreq->next = NULL; 2489 } 2490 2491 WAITQ_LOCK(skdev); 2492 skdev->skreq_free_list = skdev->skreq_table; 2493 WAITQ_UNLOCK(skdev); 2494 2495 for (i = 0; i < skdev->num_fitmsg_context; i++) { 2496 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i]; 2497 2498 if (skmsg->state == SKD_MSG_STATE_BUSY) { 2499 skd_log_skmsg(skdev, skmsg, "salvaged"); 2500 ASSERT((skmsg->id & SKD_ID_INCR) != 0); 2501 skmsg->state = SKD_MSG_STATE_IDLE; 2502 skmsg->id &= ~SKD_ID_INCR; 2503 } 2504 if (i > 0) { 2505 skmsg[-1].next = skmsg; 2506 } 2507 skmsg->next = NULL; 2508 } 2509 WAITQ_LOCK(skdev); 2510 skdev->skmsg_free_list = skdev->skmsg_table; 2511 WAITQ_UNLOCK(skdev); 2512 2513 for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++) { 2514 skdev->timeout_slot[i] = 0; 2515 } 2516 skdev->queue_depth_busy = 0; 2517 } 2518 2519 /* 2520 * 2521 * Name: skd_isr_msg_from_dev, handles a message from the device. 2522 * 2523 * Inputs: skdev - device state structure. 2524 * 2525 * Returns: Nothing. 2526 * 2527 */ 2528 static void 2529 skd_isr_msg_from_dev(struct skd_device *skdev) 2530 { 2531 uint32_t mfd; 2532 uint32_t mtd; 2533 2534 Dcmn_err(CE_NOTE, "skd_isr_msg_from_dev:"); 2535 2536 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); 2537 2538 Dcmn_err(CE_NOTE, "mfd=0x%x last_mtd=0x%x\n", mfd, skdev->last_mtd); 2539 2540 /* 2541 * ignore any mtd that is an ack for something we didn't send 2542 */ 2543 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) { 2544 return; 2545 } 2546 2547 switch (FIT_MXD_TYPE(mfd)) { 2548 case FIT_MTD_FITFW_INIT: 2549 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd); 2550 2551 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) { 2552 cmn_err(CE_WARN, "!(%s): protocol mismatch\n", 2553 skdev->name); 2554 cmn_err(CE_WARN, "!(%s): got=%d support=%d\n", 2555 skdev->name, skdev->proto_ver, 2556 FIT_PROTOCOL_VERSION_1); 2557 cmn_err(CE_WARN, "!(%s): please upgrade driver\n", 2558 skdev->name); 2559 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH; 2560 skd_soft_reset(skdev); 2561 break; 2562 } 2563 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0); 2564 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2565 skdev->last_mtd = mtd; 2566 break; 2567 2568 case FIT_MTD_GET_CMDQ_DEPTH: 2569 skdev->hard_queue_depth_limit = FIT_MXD_DATA(mfd); 2570 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0, 2571 SKD_N_COMPLETION_ENTRY); 2572 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2573 skdev->last_mtd = mtd; 2574 break; 2575 2576 case FIT_MTD_SET_COMPQ_DEPTH: 2577 SKD_WRITEQ(skdev, skdev->cq_dma_address.cookies->dmac_laddress, 2578 FIT_MSG_TO_DEVICE_ARG); 2579 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0); 2580 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2581 skdev->last_mtd = mtd; 2582 break; 2583 2584 case FIT_MTD_SET_COMPQ_ADDR: 2585 skd_reset_skcomp(skdev); 2586 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0); 2587 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2588 skdev->last_mtd = mtd; 2589 break; 2590 2591 case FIT_MTD_ARM_QUEUE: 2592 skdev->last_mtd = 0; 2593 /* 2594 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE. 2595 */ 2596 break; 2597 2598 default: 2599 break; 2600 } 2601 } 2602 2603 2604 /* 2605 * 2606 * Name: skd_disable_interrupts, issues command to disable 2607 * device interrupts. 2608 * 2609 * Inputs: skdev - device state structure. 2610 * 2611 * Returns: Nothing. 2612 * 2613 */ 2614 static void 2615 skd_disable_interrupts(struct skd_device *skdev) 2616 { 2617 uint32_t sense; 2618 2619 Dcmn_err(CE_NOTE, "skd_disable_interrupts:"); 2620 2621 sense = SKD_READL(skdev, FIT_CONTROL); 2622 sense &= ~FIT_CR_ENABLE_INTERRUPTS; 2623 SKD_WRITEL(skdev, sense, FIT_CONTROL); 2624 2625 Dcmn_err(CE_NOTE, "sense 0x%x", sense); 2626 2627 /* 2628 * Note that the 1s is written. A 1-bit means 2629 * disable, a 0 means enable. 2630 */ 2631 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST); 2632 } 2633 2634 /* 2635 * 2636 * Name: skd_enable_interrupts, issues command to enable 2637 * device interrupts. 2638 * 2639 * Inputs: skdev - device state structure. 2640 * 2641 * Returns: Nothing. 2642 * 2643 */ 2644 static void 2645 skd_enable_interrupts(struct skd_device *skdev) 2646 { 2647 uint32_t val; 2648 2649 Dcmn_err(CE_NOTE, "skd_enable_interrupts:"); 2650 2651 /* unmask interrupts first */ 2652 val = FIT_ISH_FW_STATE_CHANGE + 2653 FIT_ISH_COMPLETION_POSTED + 2654 FIT_ISH_MSG_FROM_DEV; 2655 2656 /* 2657 * Note that the compliment of mask is written. A 1-bit means 2658 * disable, a 0 means enable. 2659 */ 2660 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST); 2661 2662 Dcmn_err(CE_NOTE, "interrupt mask=0x%x", ~val); 2663 2664 val = SKD_READL(skdev, FIT_CONTROL); 2665 val |= FIT_CR_ENABLE_INTERRUPTS; 2666 2667 Dcmn_err(CE_NOTE, "control=0x%x", val); 2668 2669 SKD_WRITEL(skdev, val, FIT_CONTROL); 2670 } 2671 2672 /* 2673 * 2674 * Name: skd_soft_reset, issues a soft reset to the hardware. 2675 * 2676 * Inputs: skdev - device state structure. 2677 * 2678 * Returns: Nothing. 2679 * 2680 */ 2681 static void 2682 skd_soft_reset(struct skd_device *skdev) 2683 { 2684 uint32_t val; 2685 2686 Dcmn_err(CE_NOTE, "skd_soft_reset:"); 2687 2688 val = SKD_READL(skdev, FIT_CONTROL); 2689 val |= (FIT_CR_SOFT_RESET); 2690 2691 Dcmn_err(CE_NOTE, "soft_reset: control=0x%x", val); 2692 2693 SKD_WRITEL(skdev, val, FIT_CONTROL); 2694 } 2695 2696 /* 2697 * 2698 * Name: skd_start_device, gets the device going. 2699 * 2700 * Inputs: skdev - device state structure. 2701 * 2702 * Returns: Nothing. 2703 * 2704 */ 2705 static void 2706 skd_start_device(struct skd_device *skdev) 2707 { 2708 uint32_t state; 2709 int delay_action = 0; 2710 2711 Dcmn_err(CE_NOTE, "skd_start_device:"); 2712 2713 /* ack all ghost interrupts */ 2714 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2715 2716 state = SKD_READL(skdev, FIT_STATUS); 2717 2718 Dcmn_err(CE_NOTE, "initial status=0x%x", state); 2719 2720 state &= FIT_SR_DRIVE_STATE_MASK; 2721 skdev->drive_state = state; 2722 skdev->last_mtd = 0; 2723 2724 skdev->state = SKD_DRVR_STATE_STARTING; 2725 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_STARTING_TO); 2726 2727 skd_enable_interrupts(skdev); 2728 2729 switch (skdev->drive_state) { 2730 case FIT_SR_DRIVE_OFFLINE: 2731 Dcmn_err(CE_NOTE, "(%s): Drive offline...", 2732 skd_name(skdev)); 2733 break; 2734 2735 case FIT_SR_DRIVE_FW_BOOTING: 2736 Dcmn_err(CE_NOTE, "FIT_SR_DRIVE_FW_BOOTING %s\n", skdev->name); 2737 skdev->state = SKD_DRVR_STATE_WAIT_BOOT; 2738 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_WAIT_BOOT_TO); 2739 break; 2740 2741 case FIT_SR_DRIVE_BUSY_SANITIZE: 2742 Dcmn_err(CE_NOTE, "(%s): Start: BUSY_SANITIZE\n", 2743 skd_name(skdev)); 2744 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; 2745 skdev->timer_countdown = SKD_TIMER_SECONDS(60); 2746 break; 2747 2748 case FIT_SR_DRIVE_BUSY_ERASE: 2749 Dcmn_err(CE_NOTE, "(%s): Start: BUSY_ERASE\n", 2750 skd_name(skdev)); 2751 skdev->state = SKD_DRVR_STATE_BUSY_ERASE; 2752 skdev->timer_countdown = SKD_TIMER_SECONDS(60); 2753 break; 2754 2755 case FIT_SR_DRIVE_INIT: 2756 case FIT_SR_DRIVE_ONLINE: 2757 skd_soft_reset(skdev); 2758 2759 break; 2760 2761 case FIT_SR_DRIVE_BUSY: 2762 Dcmn_err(CE_NOTE, "(%s): Drive Busy...\n", 2763 skd_name(skdev)); 2764 skdev->state = SKD_DRVR_STATE_BUSY; 2765 skdev->timer_countdown = SKD_TIMER_SECONDS(60); 2766 break; 2767 2768 case FIT_SR_DRIVE_SOFT_RESET: 2769 Dcmn_err(CE_NOTE, "(%s) drive soft reset in prog\n", 2770 skd_name(skdev)); 2771 break; 2772 2773 case FIT_SR_DRIVE_FAULT: 2774 /* 2775 * Fault state is bad...soft reset won't do it... 2776 * Hard reset, maybe, but does it work on device? 2777 * For now, just fault so the system doesn't hang. 2778 */ 2779 skd_drive_fault(skdev); 2780 2781 delay_action = 1; 2782 break; 2783 2784 case 0xFF: 2785 skd_drive_disappeared(skdev); 2786 2787 delay_action = 1; 2788 break; 2789 2790 default: 2791 Dcmn_err(CE_NOTE, "(%s) Start: unknown state %x\n", 2792 skd_name(skdev), skdev->drive_state); 2793 break; 2794 } 2795 2796 state = SKD_READL(skdev, FIT_CONTROL); 2797 Dcmn_err(CE_NOTE, "FIT Control Status=0x%x\n", state); 2798 2799 state = SKD_READL(skdev, FIT_INT_STATUS_HOST); 2800 Dcmn_err(CE_NOTE, "Intr Status=0x%x\n", state); 2801 2802 state = SKD_READL(skdev, FIT_INT_MASK_HOST); 2803 Dcmn_err(CE_NOTE, "Intr Mask=0x%x\n", state); 2804 2805 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); 2806 Dcmn_err(CE_NOTE, "Msg from Dev=0x%x\n", state); 2807 2808 state = SKD_READL(skdev, FIT_HW_VERSION); 2809 Dcmn_err(CE_NOTE, "HW version=0x%x\n", state); 2810 2811 if (delay_action) { 2812 /* start the queue so we can respond with error to requests */ 2813 Dcmn_err(CE_NOTE, "Starting %s queue\n", skdev->name); 2814 skd_start(skdev); 2815 skdev->gendisk_on = -1; 2816 cv_signal(&skdev->cv_waitq); 2817 } 2818 } 2819 2820 /* 2821 * 2822 * Name: skd_restart_device, restart the hardware. 2823 * 2824 * Inputs: skdev - device state structure. 2825 * 2826 * Returns: Nothing. 2827 * 2828 */ 2829 static void 2830 skd_restart_device(struct skd_device *skdev) 2831 { 2832 uint32_t state; 2833 2834 Dcmn_err(CE_NOTE, "skd_restart_device:"); 2835 2836 /* ack all ghost interrupts */ 2837 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2838 2839 state = SKD_READL(skdev, FIT_STATUS); 2840 2841 Dcmn_err(CE_NOTE, "skd_restart_device: drive status=0x%x\n", state); 2842 2843 state &= FIT_SR_DRIVE_STATE_MASK; 2844 skdev->drive_state = state; 2845 skdev->last_mtd = 0; 2846 2847 skdev->state = SKD_DRVR_STATE_RESTARTING; 2848 skdev->timer_countdown = SKD_TIMER_MINUTES(4); 2849 2850 skd_soft_reset(skdev); 2851 } 2852 2853 /* 2854 * 2855 * Name: skd_stop_device, stops the device. 2856 * 2857 * Inputs: skdev - device state structure. 2858 * 2859 * Returns: Nothing. 2860 * 2861 */ 2862 static void 2863 skd_stop_device(struct skd_device *skdev) 2864 { 2865 clock_t cur_ticks, tmo; 2866 int secs; 2867 struct skd_special_context *skspcl = &skdev->internal_skspcl; 2868 2869 if (SKD_DRVR_STATE_ONLINE != skdev->state) { 2870 Dcmn_err(CE_NOTE, "(%s): skd_stop_device not online no sync\n", 2871 skdev->name); 2872 goto stop_out; 2873 } 2874 2875 if (SKD_REQ_STATE_IDLE != skspcl->req.state) { 2876 Dcmn_err(CE_NOTE, "(%s): skd_stop_device no special\n", 2877 skdev->name); 2878 goto stop_out; 2879 } 2880 2881 skdev->state = SKD_DRVR_STATE_SYNCING; 2882 skdev->sync_done = 0; 2883 2884 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); 2885 2886 secs = 10; 2887 mutex_enter(&skdev->skd_internalio_mutex); 2888 while (skdev->sync_done == 0) { 2889 cur_ticks = ddi_get_lbolt(); 2890 tmo = cur_ticks + drv_usectohz(1000000 * secs); 2891 if (cv_timedwait(&skdev->cv_waitq, 2892 &skdev->skd_internalio_mutex, tmo) == -1) { 2893 /* Oops - timed out */ 2894 2895 Dcmn_err(CE_NOTE, "stop_device - %d secs TMO", secs); 2896 } 2897 } 2898 2899 mutex_exit(&skdev->skd_internalio_mutex); 2900 2901 switch (skdev->sync_done) { 2902 case 0: 2903 Dcmn_err(CE_NOTE, "(%s): skd_stop_device no sync\n", 2904 skdev->name); 2905 break; 2906 case 1: 2907 Dcmn_err(CE_NOTE, "(%s): skd_stop_device sync done\n", 2908 skdev->name); 2909 break; 2910 default: 2911 Dcmn_err(CE_NOTE, "(%s): skd_stop_device sync error\n", 2912 skdev->name); 2913 } 2914 2915 2916 stop_out: 2917 skdev->state = SKD_DRVR_STATE_STOPPING; 2918 2919 skd_disable_interrupts(skdev); 2920 2921 /* ensure all ints on device are cleared */ 2922 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2923 /* soft reset the device to unload with a clean slate */ 2924 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL); 2925 } 2926 2927 /* 2928 * CONSTRUCT 2929 */ 2930 2931 static int skd_cons_skcomp(struct skd_device *); 2932 static int skd_cons_skmsg(struct skd_device *); 2933 static int skd_cons_skreq(struct skd_device *); 2934 static int skd_cons_sksb(struct skd_device *); 2935 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *, uint32_t, 2936 dma_mem_t *); 2937 2938 /* 2939 * 2940 * Name: skd_construct, calls other routines to build device 2941 * interface structures. 2942 * 2943 * Inputs: skdev - device state structure. 2944 * instance - DDI instance number. 2945 * 2946 * Returns: Returns DDI_FAILURE on any failure otherwise returns 2947 * DDI_SUCCESS. 2948 * 2949 */ 2950 /* ARGSUSED */ /* Upstream common source with other platforms. */ 2951 static int 2952 skd_construct(skd_device_t *skdev, int instance) 2953 { 2954 int rc = 0; 2955 2956 skdev->state = SKD_DRVR_STATE_LOAD; 2957 skdev->irq_type = skd_isr_type; 2958 skdev->soft_queue_depth_limit = skd_max_queue_depth; 2959 skdev->hard_queue_depth_limit = 10; /* until GET_CMDQ_DEPTH */ 2960 2961 skdev->num_req_context = skd_max_queue_depth; 2962 skdev->num_fitmsg_context = skd_max_queue_depth; 2963 2964 skdev->queue_depth_limit = skdev->hard_queue_depth_limit; 2965 skdev->queue_depth_lowat = 1; 2966 skdev->proto_ver = 99; /* initialize to invalid value */ 2967 skdev->sgs_per_request = skd_sgs_per_request; 2968 skdev->dbg_level = skd_dbg_level; 2969 2970 rc = skd_cons_skcomp(skdev); 2971 if (rc < 0) { 2972 goto err_out; 2973 } 2974 2975 rc = skd_cons_skmsg(skdev); 2976 if (rc < 0) { 2977 goto err_out; 2978 } 2979 2980 rc = skd_cons_skreq(skdev); 2981 if (rc < 0) { 2982 goto err_out; 2983 } 2984 2985 rc = skd_cons_sksb(skdev); 2986 if (rc < 0) { 2987 goto err_out; 2988 } 2989 2990 Dcmn_err(CE_NOTE, "CONSTRUCT VICTORY"); 2991 2992 return (DDI_SUCCESS); 2993 2994 err_out: 2995 Dcmn_err(CE_NOTE, "construct failed\n"); 2996 skd_destruct(skdev); 2997 2998 return (DDI_FAILURE); 2999 } 3000 3001 /* 3002 * 3003 * Name: skd_free_phys, frees DMA memory. 3004 * 3005 * Inputs: skdev - device state structure. 3006 * mem - DMA info. 3007 * 3008 * Returns: Nothing. 3009 * 3010 */ 3011 static void 3012 skd_free_phys(skd_device_t *skdev, dma_mem_t *mem) 3013 { 3014 _NOTE(ARGUNUSED(skdev)); 3015 3016 if (mem == NULL || mem->dma_handle == NULL) 3017 return; 3018 3019 (void) ddi_dma_unbind_handle(mem->dma_handle); 3020 3021 if (mem->acc_handle != NULL) { 3022 ddi_dma_mem_free(&mem->acc_handle); 3023 mem->acc_handle = NULL; 3024 } 3025 3026 mem->bp = NULL; 3027 ddi_dma_free_handle(&mem->dma_handle); 3028 mem->dma_handle = NULL; 3029 } 3030 3031 /* 3032 * 3033 * Name: skd_alloc_dma_mem, allocates DMA memory. 3034 * 3035 * Inputs: skdev - device state structure. 3036 * mem - DMA data structure. 3037 * sleep - indicates whether called routine can sleep. 3038 * atype - specified 32 or 64 bit allocation. 3039 * 3040 * Returns: Void pointer to mem->bp on success else NULL. 3041 * NOTE: There are some failure modes even if sleep is set 3042 * to KM_SLEEP, so callers MUST check the return code even 3043 * if KM_SLEEP is passed in. 3044 * 3045 */ 3046 static void * 3047 skd_alloc_dma_mem(skd_device_t *skdev, dma_mem_t *mem, uint8_t atype) 3048 { 3049 size_t rlen; 3050 uint_t cnt; 3051 ddi_dma_attr_t dma_attr = skd_64bit_io_dma_attr; 3052 ddi_device_acc_attr_t acc_attr = { 3053 DDI_DEVICE_ATTR_V0, 3054 DDI_STRUCTURE_LE_ACC, 3055 DDI_STRICTORDER_ACC 3056 }; 3057 3058 if (atype == ATYPE_32BIT) 3059 dma_attr.dma_attr_addr_hi = SKD_DMA_HIGH_32BIT_ADDRESS; 3060 3061 dma_attr.dma_attr_sgllen = 1; 3062 3063 /* 3064 * Allocate DMA memory. 3065 */ 3066 if (ddi_dma_alloc_handle(skdev->dip, &dma_attr, DDI_DMA_SLEEP, NULL, 3067 &mem->dma_handle) != DDI_SUCCESS) { 3068 cmn_err(CE_WARN, "!alloc_dma_mem-1, failed"); 3069 3070 mem->dma_handle = NULL; 3071 3072 return (NULL); 3073 } 3074 3075 if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr, 3076 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, (caddr_t *)&mem->bp, &rlen, 3077 &mem->acc_handle) != DDI_SUCCESS) { 3078 cmn_err(CE_WARN, "!skd_alloc_dma_mem-2, failed"); 3079 ddi_dma_free_handle(&mem->dma_handle); 3080 mem->dma_handle = NULL; 3081 mem->acc_handle = NULL; 3082 mem->bp = NULL; 3083 3084 return (NULL); 3085 } 3086 bzero(mem->bp, mem->size); 3087 3088 if (ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp, 3089 mem->size, (DDI_DMA_CONSISTENT | DDI_DMA_RDWR), DDI_DMA_SLEEP, NULL, 3090 &mem->cookie, &cnt) != DDI_DMA_MAPPED) { 3091 cmn_err(CE_WARN, "!skd_alloc_dma_mem-3, failed"); 3092 ddi_dma_mem_free(&mem->acc_handle); 3093 ddi_dma_free_handle(&mem->dma_handle); 3094 3095 return (NULL); 3096 } 3097 3098 if (cnt > 1) { 3099 (void) ddi_dma_unbind_handle(mem->dma_handle); 3100 cmn_err(CE_WARN, "!skd_alloc_dma_mem-4, failed, " 3101 "cookie_count %d > 1", cnt); 3102 skd_free_phys(skdev, mem); 3103 3104 return (NULL); 3105 } 3106 mem->cookies = &mem->cookie; 3107 mem->cookies->dmac_size = mem->size; 3108 3109 return (mem->bp); 3110 } 3111 3112 /* 3113 * 3114 * Name: skd_cons_skcomp, allocates space for the skcomp table. 3115 * 3116 * Inputs: skdev - device state structure. 3117 * 3118 * Returns: -ENOMEM if no memory otherwise NULL. 3119 * 3120 */ 3121 static int 3122 skd_cons_skcomp(struct skd_device *skdev) 3123 { 3124 uint64_t *dma_alloc; 3125 struct fit_completion_entry_v1 *skcomp; 3126 int rc = 0; 3127 uint32_t nbytes; 3128 dma_mem_t *mem; 3129 3130 nbytes = sizeof (*skcomp) * SKD_N_COMPLETION_ENTRY; 3131 nbytes += sizeof (struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; 3132 3133 Dcmn_err(CE_NOTE, "cons_skcomp: nbytes=%d,entries=%d", nbytes, 3134 SKD_N_COMPLETION_ENTRY); 3135 3136 mem = &skdev->cq_dma_address; 3137 mem->size = nbytes; 3138 3139 dma_alloc = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT); 3140 skcomp = (struct fit_completion_entry_v1 *)dma_alloc; 3141 if (skcomp == NULL) { 3142 rc = -ENOMEM; 3143 goto err_out; 3144 } 3145 3146 bzero(skcomp, nbytes); 3147 3148 Dcmn_err(CE_NOTE, "cons_skcomp: skcomp=%p nbytes=%d", 3149 (void *)skcomp, nbytes); 3150 3151 skdev->skcomp_table = skcomp; 3152 skdev->skerr_table = (struct fit_comp_error_info *)(dma_alloc + 3153 (SKD_N_COMPLETION_ENTRY * sizeof (*skcomp) / sizeof (uint64_t))); 3154 3155 err_out: 3156 return (rc); 3157 } 3158 3159 /* 3160 * 3161 * Name: skd_cons_skmsg, allocates space for the skmsg table. 3162 * 3163 * Inputs: skdev - device state structure. 3164 * 3165 * Returns: -ENOMEM if no memory otherwise NULL. 3166 * 3167 */ 3168 static int 3169 skd_cons_skmsg(struct skd_device *skdev) 3170 { 3171 dma_mem_t *mem; 3172 int rc = 0; 3173 uint32_t i; 3174 3175 Dcmn_err(CE_NOTE, "skmsg_table kzalloc, struct %lu, count %u total %lu", 3176 (ulong_t)sizeof (struct skd_fitmsg_context), 3177 skdev->num_fitmsg_context, 3178 (ulong_t)(sizeof (struct skd_fitmsg_context) * 3179 skdev->num_fitmsg_context)); 3180 3181 skdev->skmsg_table = (struct skd_fitmsg_context *)kmem_zalloc( 3182 sizeof (struct skd_fitmsg_context) * skdev->num_fitmsg_context, 3183 KM_SLEEP); 3184 3185 for (i = 0; i < skdev->num_fitmsg_context; i++) { 3186 struct skd_fitmsg_context *skmsg; 3187 3188 skmsg = &skdev->skmsg_table[i]; 3189 3190 skmsg->id = i + SKD_ID_FIT_MSG; 3191 3192 skmsg->state = SKD_MSG_STATE_IDLE; 3193 3194 mem = &skmsg->mb_dma_address; 3195 mem->size = SKD_N_FITMSG_BYTES + 64; 3196 3197 skmsg->msg_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT); 3198 3199 if (NULL == skmsg->msg_buf) { 3200 rc = -ENOMEM; 3201 i++; 3202 break; 3203 } 3204 3205 skmsg->offset = 0; 3206 3207 bzero(skmsg->msg_buf, SKD_N_FITMSG_BYTES); 3208 3209 skmsg->next = &skmsg[1]; 3210 } 3211 3212 /* Free list is in order starting with the 0th entry. */ 3213 skdev->skmsg_table[i - 1].next = NULL; 3214 skdev->skmsg_free_list = skdev->skmsg_table; 3215 3216 return (rc); 3217 } 3218 3219 /* 3220 * 3221 * Name: skd_cons_skreq, allocates space for the skreq table. 3222 * 3223 * Inputs: skdev - device state structure. 3224 * 3225 * Returns: -ENOMEM if no memory otherwise NULL. 3226 * 3227 */ 3228 static int 3229 skd_cons_skreq(struct skd_device *skdev) 3230 { 3231 int rc = 0; 3232 uint32_t i; 3233 3234 Dcmn_err(CE_NOTE, 3235 "skreq_table kmem_zalloc, struct %lu, count %u total %lu", 3236 (ulong_t)sizeof (struct skd_request_context), 3237 skdev->num_req_context, 3238 (ulong_t) (sizeof (struct skd_request_context) * 3239 skdev->num_req_context)); 3240 3241 skdev->skreq_table = (struct skd_request_context *)kmem_zalloc( 3242 sizeof (struct skd_request_context) * skdev->num_req_context, 3243 KM_SLEEP); 3244 3245 for (i = 0; i < skdev->num_req_context; i++) { 3246 struct skd_request_context *skreq; 3247 3248 skreq = &skdev->skreq_table[i]; 3249 3250 skreq->id = (uint16_t)(i + SKD_ID_RW_REQUEST); 3251 skreq->state = SKD_REQ_STATE_IDLE; 3252 3253 skreq->sksg_list = skd_cons_sg_list(skdev, 3254 skdev->sgs_per_request, 3255 &skreq->sksg_dma_address); 3256 3257 if (NULL == skreq->sksg_list) { 3258 rc = -ENOMEM; 3259 goto err_out; 3260 } 3261 3262 skreq->next = &skreq[1]; 3263 } 3264 3265 /* Free list is in order starting with the 0th entry. */ 3266 skdev->skreq_table[i - 1].next = NULL; 3267 skdev->skreq_free_list = skdev->skreq_table; 3268 3269 err_out: 3270 return (rc); 3271 } 3272 3273 /* 3274 * 3275 * Name: skd_cons_sksb, allocates space for the skspcl msg buf 3276 * and data buf. 3277 * 3278 * Inputs: skdev - device state structure. 3279 * 3280 * Returns: -ENOMEM if no memory otherwise NULL. 3281 * 3282 */ 3283 static int 3284 skd_cons_sksb(struct skd_device *skdev) 3285 { 3286 int rc = 0; 3287 struct skd_special_context *skspcl; 3288 dma_mem_t *mem; 3289 uint32_t nbytes; 3290 3291 skspcl = &skdev->internal_skspcl; 3292 3293 skspcl->req.id = 0 + SKD_ID_INTERNAL; 3294 skspcl->req.state = SKD_REQ_STATE_IDLE; 3295 3296 nbytes = SKD_N_INTERNAL_BYTES; 3297 3298 mem = &skspcl->db_dma_address; 3299 mem->size = nbytes; 3300 3301 /* data_buf's DMA pointer is skspcl->db_dma_address */ 3302 skspcl->data_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT); 3303 if (skspcl->data_buf == NULL) { 3304 rc = -ENOMEM; 3305 goto err_out; 3306 } 3307 3308 bzero(skspcl->data_buf, nbytes); 3309 3310 nbytes = SKD_N_SPECIAL_FITMSG_BYTES; 3311 3312 mem = &skspcl->mb_dma_address; 3313 mem->size = nbytes; 3314 3315 /* msg_buf DMA pointer is skspcl->mb_dma_address */ 3316 skspcl->msg_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT); 3317 if (skspcl->msg_buf == NULL) { 3318 rc = -ENOMEM; 3319 goto err_out; 3320 } 3321 3322 3323 bzero(skspcl->msg_buf, nbytes); 3324 3325 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1, 3326 &skspcl->req.sksg_dma_address); 3327 3328 3329 if (skspcl->req.sksg_list == NULL) { 3330 rc = -ENOMEM; 3331 goto err_out; 3332 } 3333 3334 if (skd_format_internal_skspcl(skdev) == 0) { 3335 rc = -EINVAL; 3336 goto err_out; 3337 } 3338 3339 err_out: 3340 return (rc); 3341 } 3342 3343 /* 3344 * 3345 * Name: skd_cons_sg_list, allocates the S/G list. 3346 * 3347 * Inputs: skdev - device state structure. 3348 * n_sg - Number of scatter-gather entries. 3349 * ret_dma_addr - S/G list DMA pointer. 3350 * 3351 * Returns: A list of FIT message descriptors. 3352 * 3353 */ 3354 static struct fit_sg_descriptor 3355 *skd_cons_sg_list(struct skd_device *skdev, 3356 uint32_t n_sg, dma_mem_t *ret_dma_addr) 3357 { 3358 struct fit_sg_descriptor *sg_list; 3359 uint32_t nbytes; 3360 dma_mem_t *mem; 3361 3362 nbytes = sizeof (*sg_list) * n_sg; 3363 3364 mem = ret_dma_addr; 3365 mem->size = nbytes; 3366 3367 /* sg_list's DMA pointer is *ret_dma_addr */ 3368 sg_list = skd_alloc_dma_mem(skdev, mem, ATYPE_32BIT); 3369 3370 if (sg_list != NULL) { 3371 uint64_t dma_address = ret_dma_addr->cookie.dmac_laddress; 3372 uint32_t i; 3373 3374 bzero(sg_list, nbytes); 3375 3376 for (i = 0; i < n_sg - 1; i++) { 3377 uint64_t ndp_off; 3378 ndp_off = (i + 1) * sizeof (struct fit_sg_descriptor); 3379 3380 sg_list[i].next_desc_ptr = dma_address + ndp_off; 3381 } 3382 sg_list[i].next_desc_ptr = 0LL; 3383 } 3384 3385 return (sg_list); 3386 } 3387 3388 /* 3389 * DESTRUCT (FREE) 3390 */ 3391 3392 static void skd_free_skcomp(struct skd_device *skdev); 3393 static void skd_free_skmsg(struct skd_device *skdev); 3394 static void skd_free_skreq(struct skd_device *skdev); 3395 static void skd_free_sksb(struct skd_device *skdev); 3396 3397 static void skd_free_sg_list(struct skd_device *skdev, 3398 struct fit_sg_descriptor *sg_list, 3399 uint32_t n_sg, dma_mem_t dma_addr); 3400 3401 /* 3402 * 3403 * Name: skd_destruct, call various rouines to deallocate 3404 * space acquired during initialization. 3405 * 3406 * Inputs: skdev - device state structure. 3407 * 3408 * Returns: Nothing. 3409 * 3410 */ 3411 static void 3412 skd_destruct(struct skd_device *skdev) 3413 { 3414 if (skdev == NULL) { 3415 return; 3416 } 3417 3418 Dcmn_err(CE_NOTE, "destruct sksb"); 3419 skd_free_sksb(skdev); 3420 3421 Dcmn_err(CE_NOTE, "destruct skreq"); 3422 skd_free_skreq(skdev); 3423 3424 Dcmn_err(CE_NOTE, "destruct skmsg"); 3425 skd_free_skmsg(skdev); 3426 3427 Dcmn_err(CE_NOTE, "destruct skcomp"); 3428 skd_free_skcomp(skdev); 3429 3430 Dcmn_err(CE_NOTE, "DESTRUCT VICTORY"); 3431 } 3432 3433 /* 3434 * 3435 * Name: skd_free_skcomp, deallocates skcomp table DMA resources. 3436 * 3437 * Inputs: skdev - device state structure. 3438 * 3439 * Returns: Nothing. 3440 * 3441 */ 3442 static void 3443 skd_free_skcomp(struct skd_device *skdev) 3444 { 3445 if (skdev->skcomp_table != NULL) { 3446 skd_free_phys(skdev, &skdev->cq_dma_address); 3447 } 3448 3449 skdev->skcomp_table = NULL; 3450 } 3451 3452 /* 3453 * 3454 * Name: skd_free_skmsg, deallocates skmsg table DMA resources. 3455 * 3456 * Inputs: skdev - device state structure. 3457 * 3458 * Returns: Nothing. 3459 * 3460 */ 3461 static void 3462 skd_free_skmsg(struct skd_device *skdev) 3463 { 3464 uint32_t i; 3465 3466 if (NULL == skdev->skmsg_table) 3467 return; 3468 3469 for (i = 0; i < skdev->num_fitmsg_context; i++) { 3470 struct skd_fitmsg_context *skmsg; 3471 3472 skmsg = &skdev->skmsg_table[i]; 3473 3474 if (skmsg->msg_buf != NULL) { 3475 skd_free_phys(skdev, &skmsg->mb_dma_address); 3476 } 3477 3478 3479 skmsg->msg_buf = NULL; 3480 } 3481 3482 kmem_free(skdev->skmsg_table, sizeof (struct skd_fitmsg_context) * 3483 skdev->num_fitmsg_context); 3484 3485 skdev->skmsg_table = NULL; 3486 3487 } 3488 3489 /* 3490 * 3491 * Name: skd_free_skreq, deallocates skspcl table DMA resources. 3492 * 3493 * Inputs: skdev - device state structure. 3494 * 3495 * Returns: Nothing. 3496 * 3497 */ 3498 static void 3499 skd_free_skreq(struct skd_device *skdev) 3500 { 3501 uint32_t i; 3502 3503 if (NULL == skdev->skreq_table) 3504 return; 3505 3506 for (i = 0; i < skdev->num_req_context; i++) { 3507 struct skd_request_context *skreq; 3508 3509 skreq = &skdev->skreq_table[i]; 3510 3511 skd_free_sg_list(skdev, skreq->sksg_list, 3512 skdev->sgs_per_request, skreq->sksg_dma_address); 3513 3514 skreq->sksg_list = NULL; 3515 } 3516 3517 kmem_free(skdev->skreq_table, sizeof (struct skd_request_context) * 3518 skdev->num_req_context); 3519 3520 skdev->skreq_table = NULL; 3521 3522 } 3523 3524 /* 3525 * 3526 * Name: skd_free_sksb, deallocates skspcl data buf and 3527 * msg buf DMA resources. 3528 * 3529 * Inputs: skdev - device state structure. 3530 * 3531 * Returns: Nothing. 3532 * 3533 */ 3534 static void 3535 skd_free_sksb(struct skd_device *skdev) 3536 { 3537 struct skd_special_context *skspcl; 3538 3539 skspcl = &skdev->internal_skspcl; 3540 3541 if (skspcl->data_buf != NULL) { 3542 skd_free_phys(skdev, &skspcl->db_dma_address); 3543 } 3544 3545 skspcl->data_buf = NULL; 3546 3547 if (skspcl->msg_buf != NULL) { 3548 skd_free_phys(skdev, &skspcl->mb_dma_address); 3549 } 3550 3551 skspcl->msg_buf = NULL; 3552 3553 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1, 3554 skspcl->req.sksg_dma_address); 3555 3556 skspcl->req.sksg_list = NULL; 3557 } 3558 3559 /* 3560 * 3561 * Name: skd_free_sg_list, deallocates S/G DMA resources. 3562 * 3563 * Inputs: skdev - device state structure. 3564 * sg_list - S/G list itself. 3565 * n_sg - nukmber of segments 3566 * dma_addr - S/G list DMA address. 3567 * 3568 * Returns: Nothing. 3569 * 3570 */ 3571 /* ARGSUSED */ /* Upstream common source with other platforms. */ 3572 static void 3573 skd_free_sg_list(struct skd_device *skdev, 3574 struct fit_sg_descriptor *sg_list, 3575 uint32_t n_sg, dma_mem_t dma_addr) 3576 { 3577 if (sg_list != NULL) { 3578 skd_free_phys(skdev, &dma_addr); 3579 } 3580 } 3581 3582 /* 3583 * 3584 * Name: skd_queue, queues the I/O request. 3585 * 3586 * Inputs: skdev - device state structure. 3587 * pbuf - I/O request 3588 * 3589 * Returns: Nothing. 3590 * 3591 */ 3592 static void 3593 skd_queue(skd_device_t *skdev, skd_buf_private_t *pbuf) 3594 { 3595 struct waitqueue *waitq; 3596 3597 ASSERT(skdev != NULL); 3598 ASSERT(pbuf != NULL); 3599 3600 ASSERT(WAITQ_LOCK_HELD(skdev)); 3601 3602 waitq = &skdev->waitqueue; 3603 3604 if (SIMPLEQ_EMPTY(waitq)) 3605 SIMPLEQ_INSERT_HEAD(waitq, pbuf, sq); 3606 else 3607 SIMPLEQ_INSERT_TAIL(waitq, pbuf, sq); 3608 } 3609 3610 /* 3611 * 3612 * Name: skd_list_skreq, displays the skreq table entries. 3613 * 3614 * Inputs: skdev - device state structure. 3615 * list - flag, if true displays the entry address. 3616 * 3617 * Returns: Returns number of skmsg entries found. 3618 * 3619 */ 3620 /* ARGSUSED */ /* Upstream common source with other platforms. */ 3621 static int 3622 skd_list_skreq(skd_device_t *skdev, int list) 3623 { 3624 int inx = 0; 3625 struct skd_request_context *skreq; 3626 3627 if (list) { 3628 Dcmn_err(CE_NOTE, "skreq_table[0]\n"); 3629 3630 skreq = &skdev->skreq_table[0]; 3631 while (skreq) { 3632 if (list) 3633 Dcmn_err(CE_NOTE, 3634 "%d: skreq=%p state=%d id=%x fid=%x " 3635 "pbuf=%p dir=%d comp=%d\n", 3636 inx, (void *)skreq, skreq->state, 3637 skreq->id, skreq->fitmsg_id, 3638 (void *)skreq->pbuf, 3639 skreq->sg_data_dir, skreq->did_complete); 3640 inx++; 3641 skreq = skreq->next; 3642 } 3643 } 3644 3645 inx = 0; 3646 skreq = skdev->skreq_free_list; 3647 3648 if (list) 3649 Dcmn_err(CE_NOTE, "skreq_free_list\n"); 3650 while (skreq) { 3651 if (list) 3652 Dcmn_err(CE_NOTE, "%d: skreq=%p state=%d id=%x fid=%x " 3653 "pbuf=%p dir=%d\n", inx, (void *)skreq, 3654 skreq->state, skreq->id, skreq->fitmsg_id, 3655 (void *)skreq->pbuf, skreq->sg_data_dir); 3656 inx++; 3657 skreq = skreq->next; 3658 } 3659 3660 return (inx); 3661 } 3662 3663 /* 3664 * 3665 * Name: skd_list_skmsg, displays the skmsg table entries. 3666 * 3667 * Inputs: skdev - device state structure. 3668 * list - flag, if true displays the entry address. 3669 * 3670 * Returns: Returns number of skmsg entries found. 3671 * 3672 */ 3673 static int 3674 skd_list_skmsg(skd_device_t *skdev, int list) 3675 { 3676 int inx = 0; 3677 struct skd_fitmsg_context *skmsgp; 3678 3679 skmsgp = &skdev->skmsg_table[0]; 3680 3681 if (list) { 3682 Dcmn_err(CE_NOTE, "skmsg_table[0]\n"); 3683 3684 while (skmsgp) { 3685 if (list) 3686 Dcmn_err(CE_NOTE, "%d: skmsgp=%p id=%x outs=%d " 3687 "l=%d o=%d nxt=%p\n", inx, (void *)skmsgp, 3688 skmsgp->id, skmsgp->outstanding, 3689 skmsgp->length, skmsgp->offset, 3690 (void *)skmsgp->next); 3691 inx++; 3692 skmsgp = skmsgp->next; 3693 } 3694 } 3695 3696 inx = 0; 3697 if (list) 3698 Dcmn_err(CE_NOTE, "skmsg_free_list\n"); 3699 skmsgp = skdev->skmsg_free_list; 3700 while (skmsgp) { 3701 if (list) 3702 Dcmn_err(CE_NOTE, "%d: skmsgp=%p id=%x outs=%d l=%d " 3703 "o=%d nxt=%p\n", 3704 inx, (void *)skmsgp, skmsgp->id, 3705 skmsgp->outstanding, skmsgp->length, 3706 skmsgp->offset, (void *)skmsgp->next); 3707 inx++; 3708 skmsgp = skmsgp->next; 3709 } 3710 3711 return (inx); 3712 } 3713 3714 /* 3715 * 3716 * Name: skd_get_queue_pbuf, retrieves top of queue entry and 3717 * delinks entry from the queue. 3718 * 3719 * Inputs: skdev - device state structure. 3720 * drive - device number 3721 * 3722 * Returns: Returns the top of the job queue entry. 3723 * 3724 */ 3725 static skd_buf_private_t 3726 *skd_get_queued_pbuf(skd_device_t *skdev) 3727 { 3728 skd_buf_private_t *pbuf; 3729 3730 ASSERT(WAITQ_LOCK_HELD(skdev)); 3731 pbuf = SIMPLEQ_FIRST(&skdev->waitqueue); 3732 if (pbuf != NULL) 3733 SIMPLEQ_REMOVE_HEAD(&skdev->waitqueue, sq); 3734 return (pbuf); 3735 } 3736 3737 /* 3738 * PCI DRIVER GLUE 3739 */ 3740 3741 /* 3742 * 3743 * Name: skd_pci_info, logs certain device PCI info. 3744 * 3745 * Inputs: skdev - device state structure. 3746 * 3747 * Returns: str which contains the device speed info.. 3748 * 3749 */ 3750 static char * 3751 skd_pci_info(struct skd_device *skdev, char *str, size_t len) 3752 { 3753 int pcie_reg; 3754 3755 str[0] = '\0'; 3756 3757 pcie_reg = skd_pci_find_capability(skdev, PCI_CAP_ID_EXP); 3758 3759 if (pcie_reg) { 3760 uint16_t lstat, lspeed, lwidth; 3761 3762 pcie_reg += 0x12; 3763 lstat = pci_config_get16(skdev->pci_handle, pcie_reg); 3764 lspeed = lstat & (0xF); 3765 lwidth = (lstat & 0x3F0) >> 4; 3766 3767 (void) snprintf(str, len, "PCIe (%s rev %d)", 3768 lspeed == 1 ? "2.5GT/s" : 3769 lspeed == 2 ? "5.0GT/s" : "<unknown>", 3770 lwidth); 3771 } 3772 3773 return (str); 3774 } 3775 3776 /* 3777 * MODULE GLUE 3778 */ 3779 3780 /* 3781 * 3782 * Name: skd_init, initializes certain values. 3783 * 3784 * Inputs: skdev - device state structure. 3785 * 3786 * Returns: Zero. 3787 * 3788 */ 3789 /* ARGSUSED */ /* Upstream common source with other platforms. */ 3790 static int 3791 skd_init(skd_device_t *skdev) 3792 { 3793 Dcmn_err(CE_NOTE, "skd_init: v%s-b%s\n", DRV_VERSION, DRV_BUILD_ID); 3794 3795 if (skd_max_queue_depth < 1 || 3796 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) { 3797 cmn_err(CE_NOTE, "skd_max_q_depth %d invalid, re-set to %d\n", 3798 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT); 3799 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; 3800 } 3801 3802 if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) { 3803 cmn_err(CE_NOTE, "skd_max_req_per_msg %d invalid, set to %d\n", 3804 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT); 3805 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; 3806 } 3807 3808 3809 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) { 3810 cmn_err(CE_NOTE, "skd_sg_per_request %d invalid, set to %d\n", 3811 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT); 3812 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; 3813 } 3814 3815 if (skd_dbg_level < 0 || skd_dbg_level > 2) { 3816 cmn_err(CE_NOTE, "skd_dbg_level %d invalid, re-set to %d\n", 3817 skd_dbg_level, 0); 3818 skd_dbg_level = 0; 3819 } 3820 3821 return (0); 3822 } 3823 3824 /* 3825 * 3826 * Name: skd_exit, exits the driver & logs the fact. 3827 * 3828 * Inputs: none. 3829 * 3830 * Returns: Nothing. 3831 * 3832 */ 3833 static void 3834 skd_exit(void) 3835 { 3836 cmn_err(CE_NOTE, "skd v%s unloading", DRV_VERSION); 3837 } 3838 3839 /* 3840 * 3841 * Name: skd_drive_state_to_str, converts binary drive state 3842 * to its corresponding string value. 3843 * 3844 * Inputs: Drive state. 3845 * 3846 * Returns: String representing drive state. 3847 * 3848 */ 3849 const char * 3850 skd_drive_state_to_str(int state) 3851 { 3852 switch (state) { 3853 case FIT_SR_DRIVE_OFFLINE: return ("OFFLINE"); 3854 case FIT_SR_DRIVE_INIT: return ("INIT"); 3855 case FIT_SR_DRIVE_ONLINE: return ("ONLINE"); 3856 case FIT_SR_DRIVE_BUSY: return ("BUSY"); 3857 case FIT_SR_DRIVE_FAULT: return ("FAULT"); 3858 case FIT_SR_DRIVE_DEGRADED: return ("DEGRADED"); 3859 case FIT_SR_PCIE_LINK_DOWN: return ("LINK_DOWN"); 3860 case FIT_SR_DRIVE_SOFT_RESET: return ("SOFT_RESET"); 3861 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: return ("NEED_FW"); 3862 case FIT_SR_DRIVE_INIT_FAULT: return ("INIT_FAULT"); 3863 case FIT_SR_DRIVE_BUSY_SANITIZE:return ("BUSY_SANITIZE"); 3864 case FIT_SR_DRIVE_BUSY_ERASE: return ("BUSY_ERASE"); 3865 case FIT_SR_DRIVE_FW_BOOTING: return ("FW_BOOTING"); 3866 default: return ("???"); 3867 } 3868 } 3869 3870 /* 3871 * 3872 * Name: skd_skdev_state_to_str, converts binary driver state 3873 * to its corresponding string value. 3874 * 3875 * Inputs: Driver state. 3876 * 3877 * Returns: String representing driver state. 3878 * 3879 */ 3880 static const char * 3881 skd_skdev_state_to_str(enum skd_drvr_state state) 3882 { 3883 switch (state) { 3884 case SKD_DRVR_STATE_LOAD: return ("LOAD"); 3885 case SKD_DRVR_STATE_IDLE: return ("IDLE"); 3886 case SKD_DRVR_STATE_BUSY: return ("BUSY"); 3887 case SKD_DRVR_STATE_STARTING: return ("STARTING"); 3888 case SKD_DRVR_STATE_ONLINE: return ("ONLINE"); 3889 case SKD_DRVR_STATE_PAUSING: return ("PAUSING"); 3890 case SKD_DRVR_STATE_PAUSED: return ("PAUSED"); 3891 case SKD_DRVR_STATE_DRAINING_TIMEOUT: return ("DRAINING_TIMEOUT"); 3892 case SKD_DRVR_STATE_RESTARTING: return ("RESTARTING"); 3893 case SKD_DRVR_STATE_RESUMING: return ("RESUMING"); 3894 case SKD_DRVR_STATE_STOPPING: return ("STOPPING"); 3895 case SKD_DRVR_STATE_SYNCING: return ("SYNCING"); 3896 case SKD_DRVR_STATE_FAULT: return ("FAULT"); 3897 case SKD_DRVR_STATE_DISAPPEARED: return ("DISAPPEARED"); 3898 case SKD_DRVR_STATE_BUSY_ERASE: return ("BUSY_ERASE"); 3899 case SKD_DRVR_STATE_BUSY_SANITIZE:return ("BUSY_SANITIZE"); 3900 case SKD_DRVR_STATE_BUSY_IMMINENT: return ("BUSY_IMMINENT"); 3901 case SKD_DRVR_STATE_WAIT_BOOT: return ("WAIT_BOOT"); 3902 3903 default: return ("???"); 3904 } 3905 } 3906 3907 /* 3908 * 3909 * Name: skd_skmsg_state_to_str, converts binary driver state 3910 * to its corresponding string value. 3911 * 3912 * Inputs: Msg state. 3913 * 3914 * Returns: String representing msg state. 3915 * 3916 */ 3917 static const char * 3918 skd_skmsg_state_to_str(enum skd_fit_msg_state state) 3919 { 3920 switch (state) { 3921 case SKD_MSG_STATE_IDLE: return ("IDLE"); 3922 case SKD_MSG_STATE_BUSY: return ("BUSY"); 3923 default: return ("???"); 3924 } 3925 } 3926 3927 /* 3928 * 3929 * Name: skd_skreq_state_to_str, converts binary req state 3930 * to its corresponding string value. 3931 * 3932 * Inputs: Req state. 3933 * 3934 * Returns: String representing req state. 3935 * 3936 */ 3937 static const char * 3938 skd_skreq_state_to_str(enum skd_req_state state) 3939 { 3940 switch (state) { 3941 case SKD_REQ_STATE_IDLE: return ("IDLE"); 3942 case SKD_REQ_STATE_SETUP: return ("SETUP"); 3943 case SKD_REQ_STATE_BUSY: return ("BUSY"); 3944 case SKD_REQ_STATE_COMPLETED: return ("COMPLETED"); 3945 case SKD_REQ_STATE_TIMEOUT: return ("TIMEOUT"); 3946 case SKD_REQ_STATE_ABORTED: return ("ABORTED"); 3947 default: return ("???"); 3948 } 3949 } 3950 3951 /* 3952 * 3953 * Name: skd_log_skdev, logs device state & parameters. 3954 * 3955 * Inputs: skdev - device state structure. 3956 * event - event (string) to log. 3957 * 3958 * Returns: Nothing. 3959 * 3960 */ 3961 static void 3962 skd_log_skdev(struct skd_device *skdev, const char *event) 3963 { 3964 Dcmn_err(CE_NOTE, "log_skdev(%s) skdev=%p event='%s'", 3965 skdev->name, (void *)skdev, event); 3966 Dcmn_err(CE_NOTE, " drive_state=%s(%d) driver_state=%s(%d)", 3967 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 3968 skd_skdev_state_to_str(skdev->state), skdev->state); 3969 Dcmn_err(CE_NOTE, " busy=%d limit=%d soft=%d hard=%d lowat=%d", 3970 skdev->queue_depth_busy, skdev->queue_depth_limit, 3971 skdev->soft_queue_depth_limit, skdev->hard_queue_depth_limit, 3972 skdev->queue_depth_lowat); 3973 Dcmn_err(CE_NOTE, " timestamp=0x%x cycle=%d cycle_ix=%d", 3974 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix); 3975 } 3976 3977 /* 3978 * 3979 * Name: skd_log_skmsg, logs the skmsg event. 3980 * 3981 * Inputs: skdev - device state structure. 3982 * skmsg - FIT message structure. 3983 * event - event string to log. 3984 * 3985 * Returns: Nothing. 3986 * 3987 */ 3988 static void 3989 skd_log_skmsg(struct skd_device *skdev, 3990 struct skd_fitmsg_context *skmsg, const char *event) 3991 { 3992 Dcmn_err(CE_NOTE, "log_skmsg:(%s) skmsg=%p event='%s'", 3993 skdev->name, (void *)skmsg, event); 3994 Dcmn_err(CE_NOTE, " state=%s(%d) id=0x%04x length=%d", 3995 skd_skmsg_state_to_str(skmsg->state), skmsg->state, 3996 skmsg->id, skmsg->length); 3997 } 3998 3999 /* 4000 * 4001 * Name: skd_log_skreq, logs the skreq event. 4002 * 4003 * Inputs: skdev - device state structure. 4004 * skreq -skreq structure. 4005 * event - event string to log. 4006 * 4007 * Returns: Nothing. 4008 * 4009 */ 4010 static void 4011 skd_log_skreq(struct skd_device *skdev, 4012 struct skd_request_context *skreq, const char *event) 4013 { 4014 skd_buf_private_t *pbuf; 4015 4016 Dcmn_err(CE_NOTE, "log_skreq: (%s) skreq=%p pbuf=%p event='%s'", 4017 skdev->name, (void *)skreq, (void *)skreq->pbuf, event); 4018 4019 Dcmn_err(CE_NOTE, " state=%s(%d) id=0x%04x fitmsg=0x%04x", 4020 skd_skreq_state_to_str(skreq->state), skreq->state, 4021 skreq->id, skreq->fitmsg_id); 4022 Dcmn_err(CE_NOTE, " timo=0x%x sg_dir=%d n_sg=%d", 4023 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg); 4024 4025 if ((pbuf = skreq->pbuf) != NULL) { 4026 uint32_t lba, count; 4027 lba = pbuf->x_xfer->x_blkno; 4028 count = pbuf->x_xfer->x_nblks; 4029 Dcmn_err(CE_NOTE, " pbuf=%p lba=%u(0x%x) count=%u(0x%x) ", 4030 (void *)pbuf, lba, lba, count, count); 4031 Dcmn_err(CE_NOTE, " dir=%s " 4032 " intrs=%" PRId64 " qdepth=%d", 4033 (pbuf->dir & B_READ) ? "Read" : "Write", 4034 skdev->intr_cntr, skdev->queue_depth_busy); 4035 } else { 4036 Dcmn_err(CE_NOTE, " req=NULL\n"); 4037 } 4038 } 4039 4040 /* 4041 * 4042 * Name: skd_init_mutex, initializes all mutexes. 4043 * 4044 * Inputs: skdev - device state structure. 4045 * 4046 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS. 4047 * 4048 */ 4049 static int 4050 skd_init_mutex(skd_device_t *skdev) 4051 { 4052 void *intr; 4053 4054 Dcmn_err(CE_CONT, "(%s%d): init_mutex flags=%x", DRV_NAME, 4055 skdev->instance, skdev->flags); 4056 4057 intr = (void *)(uintptr_t)skdev->intr_pri; 4058 4059 if (skdev->flags & SKD_MUTEX_INITED) 4060 cmn_err(CE_NOTE, "init_mutex: Oh-Oh - already INITED"); 4061 4062 /* mutexes to protect the adapter state structure. */ 4063 mutex_init(&skdev->skd_lock_mutex, NULL, MUTEX_DRIVER, 4064 DDI_INTR_PRI(intr)); 4065 mutex_init(&skdev->skd_intr_mutex, NULL, MUTEX_DRIVER, 4066 DDI_INTR_PRI(intr)); 4067 mutex_init(&skdev->waitqueue_mutex, NULL, MUTEX_DRIVER, 4068 DDI_INTR_PRI(intr)); 4069 mutex_init(&skdev->skd_internalio_mutex, NULL, MUTEX_DRIVER, 4070 DDI_INTR_PRI(intr)); 4071 4072 cv_init(&skdev->cv_waitq, NULL, CV_DRIVER, NULL); 4073 4074 skdev->flags |= SKD_MUTEX_INITED; 4075 if (skdev->flags & SKD_MUTEX_DESTROYED) 4076 skdev->flags &= ~SKD_MUTEX_DESTROYED; 4077 4078 Dcmn_err(CE_CONT, "init_mutex (%s%d): done, flags=%x", DRV_NAME, 4079 skdev->instance, skdev->flags); 4080 4081 return (DDI_SUCCESS); 4082 } 4083 4084 /* 4085 * 4086 * Name: skd_destroy_mutex, destroys all mutexes. 4087 * 4088 * Inputs: skdev - device state structure. 4089 * 4090 * Returns: Nothing. 4091 * 4092 */ 4093 static void 4094 skd_destroy_mutex(skd_device_t *skdev) 4095 { 4096 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) { 4097 if (skdev->flags & SKD_MUTEX_INITED) { 4098 mutex_destroy(&skdev->waitqueue_mutex); 4099 mutex_destroy(&skdev->skd_intr_mutex); 4100 mutex_destroy(&skdev->skd_lock_mutex); 4101 mutex_destroy(&skdev->skd_internalio_mutex); 4102 4103 cv_destroy(&skdev->cv_waitq); 4104 4105 skdev->flags |= SKD_MUTEX_DESTROYED; 4106 4107 if (skdev->flags & SKD_MUTEX_INITED) 4108 skdev->flags &= ~SKD_MUTEX_INITED; 4109 } 4110 } 4111 } 4112 4113 /* 4114 * 4115 * Name: skd_setup_intr, setup the interrupt handling 4116 * 4117 * Inputs: skdev - device state structure. 4118 * intr_type - requested DDI interrupt type. 4119 * 4120 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS. 4121 * 4122 */ 4123 static int 4124 skd_setup_intr(skd_device_t *skdev, int intr_type) 4125 { 4126 int32_t count = 0; 4127 int32_t avail = 0; 4128 int32_t actual = 0; 4129 int32_t ret; 4130 uint32_t i; 4131 4132 Dcmn_err(CE_CONT, "(%s%d): setup_intr", DRV_NAME, skdev->instance); 4133 4134 /* Get number of interrupts the platform h/w supports */ 4135 if (((ret = ddi_intr_get_nintrs(skdev->dip, intr_type, &count)) != 4136 DDI_SUCCESS) || count == 0) { 4137 cmn_err(CE_WARN, "!intr_setup failed, nintrs ret=%xh, cnt=%xh", 4138 ret, count); 4139 4140 return (DDI_FAILURE); 4141 } 4142 4143 /* Get number of available system interrupts */ 4144 if (((ret = ddi_intr_get_navail(skdev->dip, intr_type, &avail)) != 4145 DDI_SUCCESS) || avail == 0) { 4146 cmn_err(CE_WARN, "!intr_setup failed, navail ret=%xh, " 4147 "avail=%xh", ret, avail); 4148 4149 return (DDI_FAILURE); 4150 } 4151 4152 if (intr_type == DDI_INTR_TYPE_MSIX && avail < SKD_MSIX_MAXAIF) { 4153 cmn_err(CE_WARN, "!intr_setup failed, min MSI-X h/w vectors " 4154 "req'd: %d, avail: %d", 4155 SKD_MSIX_MAXAIF, count); 4156 4157 return (DDI_FAILURE); 4158 } 4159 4160 /* Allocate space for interrupt handles */ 4161 skdev->hsize = sizeof (ddi_intr_handle_t) * avail; 4162 skdev->htable = kmem_zalloc(skdev->hsize, KM_SLEEP); 4163 4164 /* Allocate the interrupts */ 4165 if ((ret = ddi_intr_alloc(skdev->dip, skdev->htable, intr_type, 4166 0, count, &actual, 0)) != DDI_SUCCESS) { 4167 cmn_err(CE_WARN, "!intr_setup failed, intr_alloc ret=%xh, " 4168 "count = %xh, " "actual=%xh", ret, count, actual); 4169 4170 skd_release_intr(skdev); 4171 4172 return (DDI_FAILURE); 4173 } 4174 4175 skdev->intr_cnt = actual; 4176 4177 if (intr_type == DDI_INTR_TYPE_FIXED) 4178 (void) ddi_intr_set_pri(skdev->htable[0], 10); 4179 4180 /* Get interrupt priority */ 4181 if ((ret = ddi_intr_get_pri(skdev->htable[0], &skdev->intr_pri)) != 4182 DDI_SUCCESS) { 4183 cmn_err(CE_WARN, "!intr_setup failed, get_pri ret=%xh", ret); 4184 skd_release_intr(skdev); 4185 4186 return (ret); 4187 } 4188 4189 /* Add the interrupt handlers */ 4190 for (i = 0; i < actual; i++) { 4191 if ((ret = ddi_intr_add_handler(skdev->htable[i], 4192 skd_isr_aif, (void *)skdev, (void *)((ulong_t)i))) != 4193 DDI_SUCCESS) { 4194 cmn_err(CE_WARN, "!intr_setup failed, addh#=%xh, " 4195 "act=%xh, ret=%xh", i, actual, ret); 4196 skd_release_intr(skdev); 4197 4198 return (ret); 4199 } 4200 } 4201 4202 /* Setup mutexes */ 4203 if ((ret = skd_init_mutex(skdev)) != DDI_SUCCESS) { 4204 cmn_err(CE_WARN, "!intr_setup failed, mutex init ret=%xh", ret); 4205 skd_release_intr(skdev); 4206 4207 return (ret); 4208 } 4209 4210 /* Get the capabilities */ 4211 (void) ddi_intr_get_cap(skdev->htable[0], &skdev->intr_cap); 4212 4213 /* Enable interrupts */ 4214 if (skdev->intr_cap & DDI_INTR_FLAG_BLOCK) { 4215 if ((ret = ddi_intr_block_enable(skdev->htable, 4216 skdev->intr_cnt)) != DDI_SUCCESS) { 4217 cmn_err(CE_WARN, "!failed, intr_setup block enable, " 4218 "ret=%xh", ret); 4219 skd_destroy_mutex(skdev); 4220 skd_release_intr(skdev); 4221 4222 return (ret); 4223 } 4224 } else { 4225 for (i = 0; i < skdev->intr_cnt; i++) { 4226 if ((ret = ddi_intr_enable(skdev->htable[i])) != 4227 DDI_SUCCESS) { 4228 cmn_err(CE_WARN, "!intr_setup failed, " 4229 "intr enable, ret=%xh", ret); 4230 skd_destroy_mutex(skdev); 4231 skd_release_intr(skdev); 4232 4233 return (ret); 4234 } 4235 } 4236 } 4237 4238 if (intr_type == DDI_INTR_TYPE_FIXED) 4239 (void) ddi_intr_clr_mask(skdev->htable[0]); 4240 4241 skdev->irq_type = intr_type; 4242 4243 return (DDI_SUCCESS); 4244 } 4245 4246 /* 4247 * 4248 * Name: skd_disable_intr, disable interrupt handling. 4249 * 4250 * Inputs: skdev - device state structure. 4251 * 4252 * Returns: Nothing. 4253 * 4254 */ 4255 static void 4256 skd_disable_intr(skd_device_t *skdev) 4257 { 4258 uint32_t i, rval; 4259 4260 if (skdev->intr_cap & DDI_INTR_FLAG_BLOCK) { 4261 /* Remove AIF block interrupts (MSI/MSI-X) */ 4262 if ((rval = ddi_intr_block_disable(skdev->htable, 4263 skdev->intr_cnt)) != DDI_SUCCESS) { 4264 cmn_err(CE_WARN, "!failed intr block disable, rval=%x", 4265 rval); 4266 } 4267 } else { 4268 /* Remove AIF non-block interrupts (fixed). */ 4269 for (i = 0; i < skdev->intr_cnt; i++) { 4270 if ((rval = ddi_intr_disable(skdev->htable[i])) != 4271 DDI_SUCCESS) { 4272 cmn_err(CE_WARN, "!failed intr disable, " 4273 "intr#=%xh, " "rval=%xh", i, rval); 4274 } 4275 } 4276 } 4277 } 4278 4279 /* 4280 * 4281 * Name: skd_release_intr, disables interrupt handling. 4282 * 4283 * Inputs: skdev - device state structure. 4284 * 4285 * Returns: Nothing. 4286 * 4287 */ 4288 static void 4289 skd_release_intr(skd_device_t *skdev) 4290 { 4291 int32_t i; 4292 int rval; 4293 4294 4295 Dcmn_err(CE_CONT, "REL_INTR intr_cnt=%d", skdev->intr_cnt); 4296 4297 if (skdev->irq_type == 0) { 4298 Dcmn_err(CE_CONT, "release_intr: (%s%d): done", 4299 DRV_NAME, skdev->instance); 4300 return; 4301 } 4302 4303 if (skdev->htable != NULL && skdev->hsize > 0) { 4304 i = (int32_t)skdev->hsize / (int32_t)sizeof (ddi_intr_handle_t); 4305 4306 while (i-- > 0) { 4307 if (skdev->htable[i] == 0) { 4308 Dcmn_err(CE_NOTE, "htable[%x]=0h", i); 4309 continue; 4310 } 4311 4312 if ((rval = ddi_intr_disable(skdev->htable[i])) != 4313 DDI_SUCCESS) 4314 Dcmn_err(CE_NOTE, "release_intr: intr_disable " 4315 "htable[%d], rval=%d", i, rval); 4316 4317 if (i < skdev->intr_cnt) { 4318 if ((rval = ddi_intr_remove_handler( 4319 skdev->htable[i])) != DDI_SUCCESS) 4320 cmn_err(CE_WARN, "!release_intr: " 4321 "intr_remove_handler FAILED, " 4322 "rval=%d", rval); 4323 4324 Dcmn_err(CE_NOTE, "release_intr: " 4325 "remove_handler htable[%d]", i); 4326 } 4327 4328 if ((rval = ddi_intr_free(skdev->htable[i])) != 4329 DDI_SUCCESS) 4330 cmn_err(CE_WARN, "!release_intr: intr_free " 4331 "FAILED, rval=%d", rval); 4332 Dcmn_err(CE_NOTE, "release_intr: intr_free htable[%d]", 4333 i); 4334 } 4335 4336 kmem_free(skdev->htable, skdev->hsize); 4337 skdev->htable = NULL; 4338 } 4339 4340 skdev->hsize = 0; 4341 skdev->intr_cnt = 0; 4342 skdev->intr_pri = 0; 4343 skdev->intr_cap = 0; 4344 skdev->irq_type = 0; 4345 } 4346 4347 /* 4348 * 4349 * Name: skd_dealloc_resources, deallocate resources allocated 4350 * during attach. 4351 * 4352 * Inputs: dip - DDI device info pointer. 4353 * skdev - device state structure. 4354 * seq - bit flag representing allocated item. 4355 * instance - device instance. 4356 * 4357 * Returns: Nothing. 4358 * 4359 */ 4360 /* ARGSUSED */ /* Upstream common source with other platforms. */ 4361 static void 4362 skd_dealloc_resources(dev_info_t *dip, skd_device_t *skdev, 4363 uint32_t seq, int instance) 4364 { 4365 4366 if (skdev == NULL) 4367 return; 4368 4369 if (seq & SKD_CONSTRUCTED) 4370 skd_destruct(skdev); 4371 4372 if (seq & SKD_INTR_ADDED) { 4373 skd_disable_intr(skdev); 4374 skd_release_intr(skdev); 4375 } 4376 4377 if (seq & SKD_DEV_IOBASE_MAPPED) 4378 ddi_regs_map_free(&skdev->dev_handle); 4379 4380 if (seq & SKD_IOMAP_IOBASE_MAPPED) 4381 ddi_regs_map_free(&skdev->iomap_handle); 4382 4383 if (seq & SKD_REGS_MAPPED) 4384 ddi_regs_map_free(&skdev->iobase_handle); 4385 4386 if (seq & SKD_CONFIG_SPACE_SETUP) 4387 pci_config_teardown(&skdev->pci_handle); 4388 4389 if (seq & SKD_SOFT_STATE_ALLOCED) { 4390 if (skdev->pathname && 4391 (skdev->flags & SKD_PATHNAME_ALLOCED)) { 4392 kmem_free(skdev->pathname, 4393 strlen(skdev->pathname)+1); 4394 } 4395 } 4396 4397 if (skdev->s1120_devid) 4398 ddi_devid_free(skdev->s1120_devid); 4399 } 4400 4401 /* 4402 * 4403 * Name: skd_setup_interrupt, sets up the appropriate interrupt type 4404 * msi, msix, or fixed. 4405 * 4406 * Inputs: skdev - device state structure. 4407 * 4408 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS. 4409 * 4410 */ 4411 static int 4412 skd_setup_interrupts(skd_device_t *skdev) 4413 { 4414 int32_t rval = DDI_FAILURE; 4415 int32_t i; 4416 int32_t itypes = 0; 4417 4418 /* 4419 * See what types of interrupts this adapter and platform support 4420 */ 4421 if ((i = ddi_intr_get_supported_types(skdev->dip, &itypes)) != 4422 DDI_SUCCESS) { 4423 cmn_err(CE_NOTE, "intr supported types failed, rval=%xh, ", i); 4424 return (DDI_FAILURE); 4425 } 4426 4427 Dcmn_err(CE_NOTE, "%s:supported interrupts types: %x", 4428 skdev->name, itypes); 4429 4430 itypes &= skdev->irq_type; 4431 4432 if (!skd_disable_msix && (itypes & DDI_INTR_TYPE_MSIX) && 4433 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_MSIX)) == DDI_SUCCESS) { 4434 cmn_err(CE_NOTE, "!%s: successful MSI-X setup", 4435 skdev->name); 4436 } else if (!skd_disable_msi && (itypes & DDI_INTR_TYPE_MSI) && 4437 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_MSI)) == DDI_SUCCESS) { 4438 cmn_err(CE_NOTE, "!%s: successful MSI setup", 4439 skdev->name); 4440 } else if ((itypes & DDI_INTR_TYPE_FIXED) && 4441 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_FIXED)) 4442 == DDI_SUCCESS) { 4443 cmn_err(CE_NOTE, "!%s: successful fixed intr setup", 4444 skdev->name); 4445 } else { 4446 cmn_err(CE_WARN, "!%s: no supported interrupt types", 4447 skdev->name); 4448 return (DDI_FAILURE); 4449 } 4450 4451 Dcmn_err(CE_CONT, "%s: setup interrupts done", skdev->name); 4452 4453 return (rval); 4454 } 4455 4456 /* 4457 * 4458 * Name: skd_get_properties, retrieves properties from skd.conf. 4459 * 4460 * Inputs: skdev - device state structure. 4461 * dip - dev_info data structure. 4462 * 4463 * Returns: Nothing. 4464 * 4465 */ 4466 /* ARGSUSED */ /* Upstream common source with other platforms. */ 4467 static void 4468 skd_get_properties(dev_info_t *dip, skd_device_t *skdev) 4469 { 4470 int prop_value; 4471 4472 skd_isr_type = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4473 "intr-type-cap", -1); 4474 4475 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4476 "max-scsi-reqs", -1); 4477 if (prop_value >= 1 && prop_value <= SKD_MAX_QUEUE_DEPTH) 4478 skd_max_queue_depth = prop_value; 4479 4480 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4481 "max-scsi-reqs-per-msg", -1); 4482 if (prop_value >= 1 && prop_value <= SKD_MAX_REQ_PER_MSG) 4483 skd_max_req_per_msg = prop_value; 4484 4485 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4486 "max-sgs-per-req", -1); 4487 if (prop_value >= 1 && prop_value <= SKD_MAX_N_SG_PER_REQ) 4488 skd_sgs_per_request = prop_value; 4489 4490 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4491 "dbg-level", -1); 4492 if (prop_value >= 1 && prop_value <= 2) 4493 skd_dbg_level = prop_value; 4494 } 4495 4496 /* 4497 * 4498 * Name: skd_wait_for_s1120, wait for device to finish 4499 * its initialization. 4500 * 4501 * Inputs: skdev - device state structure. 4502 * 4503 * Returns: DDI_SUCCESS or DDI_FAILURE. 4504 * 4505 */ 4506 static int 4507 skd_wait_for_s1120(skd_device_t *skdev) 4508 { 4509 clock_t cur_ticks, tmo; 4510 int loop_cntr = 0; 4511 int rc = DDI_FAILURE; 4512 4513 mutex_enter(&skdev->skd_internalio_mutex); 4514 4515 while (skdev->gendisk_on == 0) { 4516 cur_ticks = ddi_get_lbolt(); 4517 tmo = cur_ticks + drv_usectohz(MICROSEC); 4518 if (cv_timedwait(&skdev->cv_waitq, 4519 &skdev->skd_internalio_mutex, tmo) == -1) { 4520 /* Oops - timed out */ 4521 if (loop_cntr++ > 10) 4522 break; 4523 } 4524 } 4525 4526 mutex_exit(&skdev->skd_internalio_mutex); 4527 4528 if (skdev->gendisk_on == 1) 4529 rc = DDI_SUCCESS; 4530 4531 return (rc); 4532 } 4533 4534 /* 4535 * 4536 * Name: skd_update_props, updates certain device properties. 4537 * 4538 * Inputs: skdev - device state structure. 4539 * dip - dev info structure 4540 * 4541 * Returns: Nothing. 4542 * 4543 */ 4544 static void 4545 skd_update_props(skd_device_t *skdev, dev_info_t *dip) 4546 { 4547 int blksize = 512; 4548 4549 if ((ddi_prop_update_int64(DDI_DEV_T_NONE, dip, "device-nblocks", 4550 skdev->Nblocks) != DDI_SUCCESS) || 4551 (ddi_prop_update_int(DDI_DEV_T_NONE, dip, "device-blksize", 4552 blksize) != DDI_SUCCESS)) { 4553 cmn_err(CE_NOTE, "%s: FAILED to create driver properties", 4554 skdev->name); 4555 } 4556 } 4557 4558 /* 4559 * 4560 * Name: skd_setup_devid, sets up device ID info. 4561 * 4562 * Inputs: skdev - device state structure. 4563 * devid - Device ID for the DDI. 4564 * 4565 * Returns: DDI_SUCCESS or DDI_FAILURE. 4566 * 4567 */ 4568 static int 4569 skd_setup_devid(skd_device_t *skdev, ddi_devid_t *devid) 4570 { 4571 int rc, sz_model, sz_sn, sz; 4572 4573 sz_model = scsi_ascii_inquiry_len(skdev->inq_product_id, 4574 strlen(skdev->inq_product_id)); 4575 sz_sn = scsi_ascii_inquiry_len(skdev->inq_serial_num, 4576 strlen(skdev->inq_serial_num)); 4577 sz = sz_model + sz_sn + 1; 4578 4579 (void) snprintf(skdev->devid_str, sizeof (skdev->devid_str), 4580 "%.*s=%.*s", sz_model, skdev->inq_product_id, sz_sn, 4581 skdev->inq_serial_num); 4582 rc = ddi_devid_init(skdev->dip, DEVID_SCSI_SERIAL, sz, 4583 skdev->devid_str, devid); 4584 4585 if (rc != DDI_SUCCESS) 4586 cmn_err(CE_WARN, "!%s: devid_init FAILED", skdev->name); 4587 4588 return (rc); 4589 4590 } 4591 4592 /* 4593 * 4594 * Name: skd_bd_attach, attach to blkdev driver 4595 * 4596 * Inputs: skdev - device state structure. 4597 * dip - device info structure. 4598 * 4599 * Returns: DDI_SUCCESS or DDI_FAILURE. 4600 * 4601 */ 4602 static int 4603 skd_bd_attach(dev_info_t *dip, skd_device_t *skdev) 4604 { 4605 int rv; 4606 4607 skdev->s_bdh = bd_alloc_handle(skdev, &skd_bd_ops, 4608 &skd_64bit_io_dma_attr, KM_SLEEP); 4609 4610 if (skdev->s_bdh == NULL) { 4611 cmn_err(CE_WARN, "!skd_bd_attach: FAILED"); 4612 4613 return (DDI_FAILURE); 4614 } 4615 4616 rv = bd_attach_handle(dip, skdev->s_bdh); 4617 4618 if (rv != DDI_SUCCESS) { 4619 cmn_err(CE_WARN, "!bd_attach_handle FAILED\n"); 4620 } else { 4621 Dcmn_err(CE_NOTE, "bd_attach_handle OK\n"); 4622 skdev->bd_attached++; 4623 } 4624 4625 return (rv); 4626 } 4627 4628 /* 4629 * 4630 * Name: skd_bd_detach, detach from the blkdev driver. 4631 * 4632 * Inputs: skdev - device state structure. 4633 * 4634 * Returns: Nothing. 4635 * 4636 */ 4637 static void 4638 skd_bd_detach(skd_device_t *skdev) 4639 { 4640 if (skdev->bd_attached) 4641 (void) bd_detach_handle(skdev->s_bdh); 4642 4643 bd_free_handle(skdev->s_bdh); 4644 } 4645 4646 /* 4647 * 4648 * Name: skd_attach, attach sdk device driver 4649 * 4650 * Inputs: dip - device info structure. 4651 * cmd - DDI attach argument (ATTACH, RESUME, etc.) 4652 * 4653 * Returns: DDI_SUCCESS or DDI_FAILURE. 4654 * 4655 */ 4656 static int 4657 skd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 4658 { 4659 int instance; 4660 int nregs; 4661 skd_device_t *skdev = NULL; 4662 int inx; 4663 uint16_t cmd_reg; 4664 int progress = 0; 4665 char name[MAXPATHLEN]; 4666 off_t regsize; 4667 char pci_str[32]; 4668 char fw_version[8]; 4669 4670 instance = ddi_get_instance(dip); 4671 4672 (void) ddi_get_parent_data(dip); 4673 4674 switch (cmd) { 4675 case DDI_ATTACH: 4676 break; 4677 4678 case DDI_RESUME: 4679 /* Re-enable timer */ 4680 skd_start_timer(skdev); 4681 4682 return (DDI_SUCCESS); 4683 4684 default: 4685 return (DDI_FAILURE); 4686 } 4687 4688 Dcmn_err(CE_NOTE, "sTec S1120 Driver v%s Instance: %d", 4689 VERSIONSTR, instance); 4690 4691 /* 4692 * Check that hardware is installed in a DMA-capable slot 4693 */ 4694 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 4695 cmn_err(CE_WARN, "!%s%d: installed in a " 4696 "slot that isn't DMA-capable slot", DRV_NAME, instance); 4697 return (DDI_FAILURE); 4698 } 4699 4700 /* 4701 * No support for high-level interrupts 4702 */ 4703 if (ddi_intr_hilevel(dip, 0) != 0) { 4704 cmn_err(CE_WARN, "!%s%d: High level interrupt not supported", 4705 DRV_NAME, instance); 4706 return (DDI_FAILURE); 4707 } 4708 4709 /* 4710 * Allocate our per-device-instance structure 4711 */ 4712 if (ddi_soft_state_zalloc(skd_state, instance) != 4713 DDI_SUCCESS) { 4714 cmn_err(CE_WARN, "!%s%d: soft state zalloc failed ", 4715 DRV_NAME, instance); 4716 return (DDI_FAILURE); 4717 } 4718 4719 progress |= SKD_SOFT_STATE_ALLOCED; 4720 4721 skdev = ddi_get_soft_state(skd_state, instance); 4722 if (skdev == NULL) { 4723 cmn_err(CE_WARN, "!%s%d: Unable to get soft state structure", 4724 DRV_NAME, instance); 4725 goto skd_attach_failed; 4726 } 4727 4728 (void) snprintf(skdev->name, sizeof (skdev->name), 4729 DRV_NAME "%d", instance); 4730 4731 skdev->dip = dip; 4732 skdev->instance = instance; 4733 4734 ddi_set_driver_private(dip, skdev); 4735 4736 (void) ddi_pathname(dip, name); 4737 for (inx = strlen(name); inx; inx--) { 4738 if (name[inx] == ',') { 4739 name[inx] = '\0'; 4740 break; 4741 } 4742 if (name[inx] == '@') { 4743 break; 4744 } 4745 } 4746 4747 skdev->pathname = kmem_zalloc(strlen(name) + 1, KM_SLEEP); 4748 (void) strlcpy(skdev->pathname, name, strlen(name) + 1); 4749 4750 progress |= SKD_PATHNAME_ALLOCED; 4751 skdev->flags |= SKD_PATHNAME_ALLOCED; 4752 4753 if (pci_config_setup(dip, &skdev->pci_handle) != DDI_SUCCESS) { 4754 cmn_err(CE_WARN, "!%s%d: pci_config_setup FAILED", 4755 DRV_NAME, instance); 4756 goto skd_attach_failed; 4757 } 4758 4759 progress |= SKD_CONFIG_SPACE_SETUP; 4760 4761 /* Save adapter path. */ 4762 4763 (void) ddi_dev_nregs(dip, &nregs); 4764 4765 /* 4766 * 0x0 Configuration Space 4767 * 0x1 I/O Space 4768 * 0x2 s1120 register space 4769 */ 4770 if (ddi_dev_regsize(dip, 1, ®size) != DDI_SUCCESS || 4771 ddi_regs_map_setup(dip, 1, &skdev->iobase, 0, regsize, 4772 &dev_acc_attr, &skdev->iobase_handle) != DDI_SUCCESS) { 4773 cmn_err(CE_WARN, "!%s%d: regs_map_setup(mem) failed", 4774 DRV_NAME, instance); 4775 goto skd_attach_failed; 4776 } 4777 progress |= SKD_REGS_MAPPED; 4778 4779 skdev->iomap_iobase = skdev->iobase; 4780 skdev->iomap_handle = skdev->iobase_handle; 4781 4782 Dcmn_err(CE_NOTE, "%s: PCI iobase=%ph, iomap=%ph, regnum=%d, " 4783 "regsize=%ld", skdev->name, (void *)skdev->iobase, 4784 (void *)skdev->iomap_iobase, 1, regsize); 4785 4786 if (ddi_dev_regsize(dip, 2, ®size) != DDI_SUCCESS || 4787 ddi_regs_map_setup(dip, 2, &skdev->dev_iobase, 0, regsize, 4788 &dev_acc_attr, &skdev->dev_handle) != DDI_SUCCESS) { 4789 cmn_err(CE_WARN, "!%s%d: regs_map_setup(mem) failed", 4790 DRV_NAME, instance); 4791 4792 goto skd_attach_failed; 4793 } 4794 4795 skdev->dev_memsize = (int)regsize; 4796 4797 Dcmn_err(CE_NOTE, "%s: DEV iobase=%ph regsize=%d", 4798 skdev->name, (void *)skdev->dev_iobase, 4799 skdev->dev_memsize); 4800 4801 progress |= SKD_DEV_IOBASE_MAPPED; 4802 4803 cmd_reg = pci_config_get16(skdev->pci_handle, PCI_CONF_COMM); 4804 cmd_reg |= (PCI_COMM_ME | PCI_COMM_INTX_DISABLE); 4805 cmd_reg &= ~PCI_COMM_PARITY_DETECT; 4806 pci_config_put16(skdev->pci_handle, PCI_CONF_COMM, cmd_reg); 4807 4808 /* Get adapter PCI device information. */ 4809 skdev->vendor_id = pci_config_get16(skdev->pci_handle, PCI_CONF_VENID); 4810 skdev->device_id = pci_config_get16(skdev->pci_handle, PCI_CONF_DEVID); 4811 4812 Dcmn_err(CE_NOTE, "%s: %x-%x card detected", 4813 skdev->name, skdev->vendor_id, skdev->device_id); 4814 4815 skd_get_properties(dip, skdev); 4816 4817 (void) skd_init(skdev); 4818 4819 if (skd_construct(skdev, instance)) { 4820 cmn_err(CE_WARN, "!%s: construct FAILED", skdev->name); 4821 goto skd_attach_failed; 4822 } 4823 4824 progress |= SKD_PROBED; 4825 progress |= SKD_CONSTRUCTED; 4826 4827 SIMPLEQ_INIT(&skdev->waitqueue); 4828 4829 /* 4830 * Setup interrupt handler 4831 */ 4832 if (skd_setup_interrupts(skdev) != DDI_SUCCESS) { 4833 cmn_err(CE_WARN, "!%s: Unable to add interrupt", 4834 skdev->name); 4835 goto skd_attach_failed; 4836 } 4837 4838 progress |= SKD_INTR_ADDED; 4839 4840 ADAPTER_STATE_LOCK(skdev); 4841 skdev->flags |= SKD_ATTACHED; 4842 ADAPTER_STATE_UNLOCK(skdev); 4843 4844 skdev->d_blkshift = 9; 4845 progress |= SKD_ATTACHED; 4846 4847 4848 skd_start_device(skdev); 4849 4850 ADAPTER_STATE_LOCK(skdev); 4851 skdev->progress = progress; 4852 ADAPTER_STATE_UNLOCK(skdev); 4853 4854 /* 4855 * Give the board a chance to 4856 * complete its initialization. 4857 */ 4858 if (skdev->gendisk_on != 1) 4859 (void) skd_wait_for_s1120(skdev); 4860 4861 if (skdev->gendisk_on != 1) { 4862 cmn_err(CE_WARN, "!%s: s1120 failed to come ONLINE", 4863 skdev->name); 4864 goto skd_attach_failed; 4865 } 4866 4867 ddi_report_dev(dip); 4868 4869 skd_send_internal_skspcl(skdev, &skdev->internal_skspcl, INQUIRY); 4870 4871 skdev->disks_initialized++; 4872 4873 (void) strcpy(fw_version, "???"); 4874 (void) skd_pci_info(skdev, pci_str, sizeof (pci_str)); 4875 Dcmn_err(CE_NOTE, " sTec S1120 Driver(%s) version %s-b%s", 4876 DRV_NAME, DRV_VERSION, DRV_BUILD_ID); 4877 4878 Dcmn_err(CE_NOTE, " sTec S1120 %04x:%04x %s 64 bit", 4879 skdev->vendor_id, skdev->device_id, pci_str); 4880 4881 Dcmn_err(CE_NOTE, " sTec S1120 %s\n", skdev->pathname); 4882 4883 if (*skdev->inq_serial_num) 4884 Dcmn_err(CE_NOTE, " sTec S1120 serial#=%s", 4885 skdev->inq_serial_num); 4886 4887 if (*skdev->inq_product_id && 4888 *skdev->inq_product_rev) 4889 Dcmn_err(CE_NOTE, " sTec S1120 prod ID=%s prod rev=%s", 4890 skdev->inq_product_id, skdev->inq_product_rev); 4891 4892 Dcmn_err(CE_NOTE, "%s: intr-type-cap: %d", 4893 skdev->name, skdev->irq_type); 4894 Dcmn_err(CE_NOTE, "%s: max-scsi-reqs: %d", 4895 skdev->name, skd_max_queue_depth); 4896 Dcmn_err(CE_NOTE, "%s: max-sgs-per-req: %d", 4897 skdev->name, skd_sgs_per_request); 4898 Dcmn_err(CE_NOTE, "%s: max-scsi-req-per-msg: %d", 4899 skdev->name, skd_max_req_per_msg); 4900 4901 if (skd_bd_attach(dip, skdev) == DDI_FAILURE) 4902 goto skd_attach_failed; 4903 4904 skd_update_props(skdev, dip); 4905 4906 /* Enable timer */ 4907 skd_start_timer(skdev); 4908 4909 ADAPTER_STATE_LOCK(skdev); 4910 skdev->progress = progress; 4911 ADAPTER_STATE_UNLOCK(skdev); 4912 4913 skdev->attached = 1; 4914 return (DDI_SUCCESS); 4915 4916 skd_attach_failed: 4917 skd_dealloc_resources(dip, skdev, progress, instance); 4918 4919 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) { 4920 skd_destroy_mutex(skdev); 4921 } 4922 4923 ddi_soft_state_free(skd_state, instance); 4924 4925 cmn_err(CE_WARN, "!skd_attach FAILED: progress=%x", progress); 4926 return (DDI_FAILURE); 4927 } 4928 4929 /* 4930 * 4931 * Name: skd_halt 4932 * 4933 * Inputs: skdev - device state structure. 4934 * 4935 * Returns: Nothing. 4936 * 4937 */ 4938 static void 4939 skd_halt(skd_device_t *skdev) 4940 { 4941 Dcmn_err(CE_NOTE, "%s: halt/suspend ......", skdev->name); 4942 } 4943 4944 /* 4945 * 4946 * Name: skd_detach, detaches driver from the system. 4947 * 4948 * Inputs: dip - device info structure. 4949 * 4950 * Returns: DDI_SUCCESS on successful detach otherwise DDI_FAILURE. 4951 * 4952 */ 4953 static int 4954 skd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 4955 { 4956 skd_buf_private_t *pbuf; 4957 skd_device_t *skdev; 4958 int instance; 4959 timeout_id_t timer_id = NULL; 4960 int rv1 = DDI_SUCCESS; 4961 struct skd_special_context *skspcl; 4962 4963 instance = ddi_get_instance(dip); 4964 4965 skdev = ddi_get_soft_state(skd_state, instance); 4966 if (skdev == NULL) { 4967 cmn_err(CE_WARN, "!detach failed: NULL skd state"); 4968 4969 return (DDI_FAILURE); 4970 } 4971 4972 Dcmn_err(CE_CONT, "skd_detach(%d): entered", instance); 4973 4974 switch (cmd) { 4975 case DDI_DETACH: 4976 /* Test for packet cache inuse. */ 4977 ADAPTER_STATE_LOCK(skdev); 4978 4979 /* Stop command/event processing. */ 4980 skdev->flags |= (SKD_SUSPENDED | SKD_CMD_ABORT_TMO); 4981 4982 /* Disable driver timer if no adapters. */ 4983 if (skdev->skd_timer_timeout_id != 0) { 4984 timer_id = skdev->skd_timer_timeout_id; 4985 skdev->skd_timer_timeout_id = 0; 4986 } 4987 ADAPTER_STATE_UNLOCK(skdev); 4988 4989 if (timer_id != 0) { 4990 (void) untimeout(timer_id); 4991 } 4992 4993 #ifdef SKD_PM 4994 if (skdev->power_level != LOW_POWER_LEVEL) { 4995 skd_halt(skdev); 4996 skdev->power_level = LOW_POWER_LEVEL; 4997 } 4998 #endif 4999 skspcl = &skdev->internal_skspcl; 5000 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); 5001 5002 skd_stop_device(skdev); 5003 5004 /* 5005 * Clear request queue. 5006 */ 5007 while (!SIMPLEQ_EMPTY(&skdev->waitqueue)) { 5008 pbuf = skd_get_queued_pbuf(skdev); 5009 skd_end_request_abnormal(skdev, pbuf, ECANCELED, 5010 SKD_IODONE_WNIOC); 5011 Dcmn_err(CE_NOTE, 5012 "detach: cancelled pbuf %p %ld <%s> %lld\n", 5013 (void *)pbuf, pbuf->x_xfer->x_nblks, 5014 (pbuf->dir & B_READ) ? "Read" : "Write", 5015 pbuf->x_xfer->x_blkno); 5016 } 5017 5018 skd_bd_detach(skdev); 5019 5020 skd_dealloc_resources(dip, skdev, skdev->progress, instance); 5021 5022 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) { 5023 skd_destroy_mutex(skdev); 5024 } 5025 5026 ddi_soft_state_free(skd_state, instance); 5027 5028 skd_exit(); 5029 5030 break; 5031 5032 case DDI_SUSPEND: 5033 /* Block timer. */ 5034 5035 ADAPTER_STATE_LOCK(skdev); 5036 skdev->flags |= SKD_SUSPENDED; 5037 5038 /* Disable driver timer if last adapter. */ 5039 if (skdev->skd_timer_timeout_id != 0) { 5040 timer_id = skdev->skd_timer_timeout_id; 5041 skdev->skd_timer_timeout_id = 0; 5042 } 5043 ADAPTER_STATE_UNLOCK(skdev); 5044 5045 if (timer_id != 0) { 5046 (void) untimeout(timer_id); 5047 } 5048 5049 ddi_prop_remove_all(dip); 5050 5051 skd_halt(skdev); 5052 5053 break; 5054 default: 5055 rv1 = DDI_FAILURE; 5056 break; 5057 } 5058 5059 if (rv1 != DDI_SUCCESS) { 5060 cmn_err(CE_WARN, "!skd_detach, failed, rv1=%x", rv1); 5061 } else { 5062 Dcmn_err(CE_CONT, "skd_detach: exiting"); 5063 } 5064 5065 if (rv1 != DDI_SUCCESS) 5066 return (DDI_FAILURE); 5067 5068 return (rv1); 5069 } 5070 5071 /* 5072 * 5073 * Name: skd_devid_init, calls skd_setup_devid to setup 5074 * the device's devid structure. 5075 * 5076 * Inputs: arg - device state structure. 5077 * dip - dev_info structure. 5078 * devid - devid structure. 5079 * 5080 * Returns: Nothing. 5081 * 5082 */ 5083 /* ARGSUSED */ /* Upstream common source with other platforms. */ 5084 static int 5085 skd_devid_init(void *arg, dev_info_t *dip, ddi_devid_t *devid) 5086 { 5087 skd_device_t *skdev = arg; 5088 5089 (void) skd_setup_devid(skdev, devid); 5090 5091 return (0); 5092 } 5093 5094 /* 5095 * 5096 * Name: skd_bd_driveinfo, retrieves device's info. 5097 * 5098 * Inputs: drive - drive data structure. 5099 * arg - device state structure. 5100 * 5101 * Returns: Nothing. 5102 * 5103 */ 5104 static void 5105 skd_bd_driveinfo(void *arg, bd_drive_t *drive) 5106 { 5107 skd_device_t *skdev = arg; 5108 5109 drive->d_qsize = (skdev->queue_depth_limit * 4) / 5; 5110 drive->d_maxxfer = SKD_DMA_MAXXFER; 5111 drive->d_removable = B_FALSE; 5112 drive->d_hotpluggable = B_FALSE; 5113 drive->d_target = 0; 5114 drive->d_lun = 0; 5115 5116 if (skdev->inquiry_is_valid != 0) { 5117 drive->d_vendor = skdev->inq_vendor_id; 5118 drive->d_vendor_len = strlen(drive->d_vendor); 5119 5120 drive->d_product = skdev->inq_product_id; 5121 drive->d_product_len = strlen(drive->d_product); 5122 5123 drive->d_serial = skdev->inq_serial_num; 5124 drive->d_serial_len = strlen(drive->d_serial); 5125 5126 drive->d_revision = skdev->inq_product_rev; 5127 drive->d_revision_len = strlen(drive->d_revision); 5128 } 5129 } 5130 5131 /* 5132 * 5133 * Name: skd_bd_mediainfo, retrieves device media info. 5134 * 5135 * Inputs: arg - device state structure. 5136 * media - container for media info. 5137 * 5138 * Returns: Zero. 5139 * 5140 */ 5141 static int 5142 skd_bd_mediainfo(void *arg, bd_media_t *media) 5143 { 5144 skd_device_t *skdev = arg; 5145 5146 media->m_nblks = skdev->Nblocks; 5147 media->m_blksize = 512; 5148 media->m_pblksize = 4096; 5149 media->m_readonly = B_FALSE; 5150 media->m_solidstate = B_TRUE; 5151 5152 return (0); 5153 } 5154 5155 /* 5156 * 5157 * Name: skd_rw, performs R/W requests for blkdev driver. 5158 * 5159 * Inputs: skdev - device state structure. 5160 * xfer - tranfer structure. 5161 * dir - I/O direction. 5162 * 5163 * Returns: EAGAIN if device is not online. EIO if blkdev wants us to 5164 * be a dump device (for now). 5165 * Value returned by skd_start(). 5166 * 5167 */ 5168 static int 5169 skd_rw(skd_device_t *skdev, bd_xfer_t *xfer, int dir) 5170 { 5171 skd_buf_private_t *pbuf; 5172 5173 /* 5174 * The x_flags structure element is not defined in Oracle Solaris 5175 */ 5176 /* We'll need to fix this in order to support dump on this device. */ 5177 if (xfer->x_flags & BD_XFER_POLL) 5178 return (EIO); 5179 5180 if (skdev->state != SKD_DRVR_STATE_ONLINE) { 5181 Dcmn_err(CE_NOTE, "Device - not ONLINE"); 5182 5183 skd_request_fn_not_online(skdev); 5184 5185 return (EAGAIN); 5186 } 5187 5188 pbuf = kmem_zalloc(sizeof (skd_buf_private_t), KM_NOSLEEP); 5189 if (pbuf == NULL) 5190 return (ENOMEM); 5191 5192 WAITQ_LOCK(skdev); 5193 pbuf->dir = dir; 5194 pbuf->x_xfer = xfer; 5195 5196 skd_queue(skdev, pbuf); 5197 skdev->ios_queued++; 5198 WAITQ_UNLOCK(skdev); 5199 5200 skd_start(skdev); 5201 5202 return (0); 5203 } 5204 5205 /* 5206 * 5207 * Name: skd_bd_read, performs blkdev read requests. 5208 * 5209 * Inputs: arg - device state structure. 5210 * xfer - tranfer request structure. 5211 * 5212 * Returns: Value return by skd_rw(). 5213 * 5214 */ 5215 static int 5216 skd_bd_read(void *arg, bd_xfer_t *xfer) 5217 { 5218 return (skd_rw(arg, xfer, B_READ)); 5219 } 5220 5221 /* 5222 * 5223 * Name: skd_bd_write, performs blkdev write requests. 5224 * 5225 * Inputs: arg - device state structure. 5226 * xfer - tranfer request structure. 5227 * 5228 * Returns: Value return by skd_rw(). 5229 * 5230 */ 5231 static int 5232 skd_bd_write(void *arg, bd_xfer_t *xfer) 5233 { 5234 return (skd_rw(arg, xfer, B_WRITE)); 5235 } 5236