1 /* 2 * 3 * skd.c: Solaris 11/10 Driver for sTec, Inc. S112x PCIe SSD card 4 * 5 * Solaris driver is based on the Linux driver authored by: 6 * 7 * Authors/Alphabetical: Dragan Stancevic <dstancevic@stec-inc.com> 8 * Gordon Waidhofer <gwaidhofer@stec-inc.com> 9 * John Hamilton <jhamilton@stec-inc.com> 10 */ 11 12 /* 13 * This file and its contents are supplied under the terms of the 14 * Common Development and Distribution License ("CDDL"), version 1.0. 15 * You may only use this file in accordance with the terms of version 16 * 1.0 of the CDDL. 17 * 18 * A full copy of the text of the CDDL should have accompanied this 19 * source. A copy of the CDDL is also available via the Internet at 20 * http://www.illumos.org/license/CDDL. 21 */ 22 23 /* 24 * Copyright 2013 STEC, Inc. All rights reserved. 25 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2018, Joyent, Inc. 27 * Copyright 2019 Western Digital Corporation. 28 */ 29 30 #include <sys/types.h> 31 #include <sys/stream.h> 32 #include <sys/cmn_err.h> 33 #include <sys/kmem.h> 34 #include <sys/file.h> 35 #include <sys/buf.h> 36 #include <sys/uio.h> 37 #include <sys/cred.h> 38 #include <sys/modctl.h> 39 #include <sys/debug.h> 40 #include <sys/modctl.h> 41 #include <sys/list.h> 42 #include <sys/sysmacros.h> 43 #include <sys/errno.h> 44 #include <sys/pcie.h> 45 #include <sys/pci.h> 46 #include <sys/ddi.h> 47 #include <sys/dditypes.h> 48 #include <sys/sunddi.h> 49 #include <sys/atomic.h> 50 #include <sys/mutex.h> 51 #include <sys/param.h> 52 #include <sys/devops.h> 53 #include <sys/blkdev.h> 54 #include <sys/queue.h> 55 #include <sys/scsi/impl/inquiry.h> 56 57 #include "skd_s1120.h" 58 #include "skd.h" 59 60 int skd_dbg_level = 0; 61 62 void *skd_state = NULL; 63 int skd_disable_msi = 0; 64 int skd_disable_msix = 0; 65 66 /* Initialized in _init() and tunable, see _init(). */ 67 clock_t skd_timer_ticks; 68 69 /* I/O DMA attributes structures. */ 70 static ddi_dma_attr_t skd_64bit_io_dma_attr = { 71 DMA_ATTR_V0, /* dma_attr_version */ 72 SKD_DMA_LOW_ADDRESS, /* low DMA address range */ 73 SKD_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */ 74 SKD_DMA_XFER_COUNTER, /* DMA counter register */ 75 SKD_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */ 76 SKD_DMA_BURSTSIZES, /* DMA burstsizes */ 77 SKD_DMA_MIN_XFER_SIZE, /* min effective DMA size */ 78 SKD_DMA_MAX_XFER_SIZE, /* max DMA xfer size */ 79 SKD_DMA_SEGMENT_BOUNDARY, /* segment boundary */ 80 SKD_DMA_SG_LIST_LENGTH, /* s/g list length */ 81 SKD_DMA_GRANULARITY, /* granularity of device */ 82 SKD_DMA_XFER_FLAGS /* DMA transfer flags */ 83 }; 84 85 int skd_isr_type = -1; 86 87 #define SKD_MAX_QUEUE_DEPTH 255 88 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64 89 int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; 90 91 #define SKD_MAX_REQ_PER_MSG 14 92 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1 93 int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; 94 95 #define SKD_MAX_N_SG_PER_REQ 4096 96 int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; 97 98 static int skd_sys_quiesce_dev(dev_info_t *); 99 static int skd_quiesce_dev(skd_device_t *); 100 static int skd_list_skmsg(skd_device_t *, int); 101 static int skd_list_skreq(skd_device_t *, int); 102 static int skd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 103 static int skd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 104 static int skd_format_internal_skspcl(struct skd_device *skdev); 105 static void skd_start(skd_device_t *); 106 static void skd_destroy_mutex(skd_device_t *skdev); 107 static void skd_enable_interrupts(struct skd_device *); 108 static void skd_request_fn_not_online(skd_device_t *skdev); 109 static void skd_send_internal_skspcl(struct skd_device *, 110 struct skd_special_context *, uint8_t); 111 static void skd_queue(skd_device_t *, skd_buf_private_t *); 112 static void *skd_alloc_dma_mem(skd_device_t *, dma_mem_t *, uint8_t); 113 static void skd_release_intr(skd_device_t *skdev); 114 static void skd_isr_fwstate(struct skd_device *skdev); 115 static void skd_isr_msg_from_dev(struct skd_device *skdev); 116 static void skd_soft_reset(struct skd_device *skdev); 117 static void skd_refresh_device_data(struct skd_device *skdev); 118 static void skd_update_props(skd_device_t *, dev_info_t *); 119 static void skd_end_request_abnormal(struct skd_device *, skd_buf_private_t *, 120 int, int); 121 static char *skd_pci_info(struct skd_device *skdev, char *str, size_t len); 122 123 static skd_buf_private_t *skd_get_queued_pbuf(skd_device_t *); 124 125 static void skd_bd_driveinfo(void *arg, bd_drive_t *drive); 126 static int skd_bd_mediainfo(void *arg, bd_media_t *media); 127 static int skd_bd_read(void *arg, bd_xfer_t *xfer); 128 static int skd_bd_write(void *arg, bd_xfer_t *xfer); 129 static int skd_devid_init(void *arg, dev_info_t *, ddi_devid_t *); 130 131 132 static bd_ops_t skd_bd_ops = { 133 BD_OPS_CURRENT_VERSION, 134 skd_bd_driveinfo, 135 skd_bd_mediainfo, 136 skd_devid_init, 137 NULL, /* sync_cache */ 138 skd_bd_read, 139 skd_bd_write, 140 }; 141 142 static ddi_device_acc_attr_t dev_acc_attr = { 143 DDI_DEVICE_ATTR_V0, 144 DDI_STRUCTURE_LE_ACC, 145 DDI_STRICTORDER_ACC 146 }; 147 148 /* 149 * Solaris module loading/unloading structures 150 */ 151 struct dev_ops skd_dev_ops = { 152 DEVO_REV, /* devo_rev */ 153 0, /* refcnt */ 154 ddi_no_info, /* getinfo */ 155 nulldev, /* identify */ 156 nulldev, /* probe */ 157 skd_attach, /* attach */ 158 skd_detach, /* detach */ 159 nodev, /* reset */ 160 NULL, /* char/block ops */ 161 NULL, /* bus operations */ 162 NULL, /* power management */ 163 skd_sys_quiesce_dev /* quiesce */ 164 }; 165 166 static struct modldrv modldrv = { 167 &mod_driverops, /* type of module: driver */ 168 "sTec skd v" DRV_VER_COMPL, /* name of module */ 169 &skd_dev_ops /* driver dev_ops */ 170 }; 171 172 static struct modlinkage modlinkage = { 173 MODREV_1, 174 &modldrv, 175 NULL 176 }; 177 178 /* 179 * sTec-required wrapper for debug printing. 180 */ 181 /*PRINTFLIKE2*/ 182 static inline void 183 Dcmn_err(int lvl, const char *fmt, ...) 184 { 185 va_list ap; 186 187 if (skd_dbg_level == 0) 188 return; 189 190 va_start(ap, fmt); 191 vcmn_err(lvl, fmt, ap); 192 va_end(ap); 193 } 194 195 /* 196 * Solaris module loading/unloading routines 197 */ 198 199 /* 200 * 201 * Name: _init, performs initial installation 202 * 203 * Inputs: None. 204 * 205 * Returns: Returns the value returned by the ddi_softstate_init function 206 * on a failure to create the device state structure or the result 207 * of the module install routines. 208 * 209 */ 210 int 211 _init(void) 212 { 213 int rval = 0; 214 int tgts = 0; 215 216 tgts |= 0x02; 217 tgts |= 0x08; /* In #ifdef NEXENTA block from original sTec drop. */ 218 219 /* 220 * drv_usectohz() is a function, so can't initialize it at 221 * instantiation. 222 */ 223 skd_timer_ticks = drv_usectohz(1000000); 224 225 Dcmn_err(CE_NOTE, 226 "<# Installing skd Driver dbg-lvl=%d %s %x>", 227 skd_dbg_level, DRV_BUILD_ID, tgts); 228 229 rval = ddi_soft_state_init(&skd_state, sizeof (skd_device_t), 0); 230 if (rval != DDI_SUCCESS) 231 return (rval); 232 233 bd_mod_init(&skd_dev_ops); 234 235 rval = mod_install(&modlinkage); 236 if (rval != DDI_SUCCESS) { 237 ddi_soft_state_fini(&skd_state); 238 bd_mod_fini(&skd_dev_ops); 239 } 240 241 return (rval); 242 } 243 244 /* 245 * 246 * Name: _info, returns information about loadable module. 247 * 248 * Inputs: modinfo, pointer to module information structure. 249 * 250 * Returns: Value returned by mod_info(). 251 * 252 */ 253 int 254 _info(struct modinfo *modinfop) 255 { 256 return (mod_info(&modlinkage, modinfop)); 257 } 258 259 /* 260 * _fini Prepares a module for unloading. It is called when the system 261 * wants to unload a module. If the module determines that it can 262 * be unloaded, then _fini() returns the value returned by 263 * mod_remove(). Upon successful return from _fini() no other 264 * routine in the module will be called before _init() is called. 265 * 266 * Inputs: None. 267 * 268 * Returns: DDI_SUCCESS or DDI_FAILURE. 269 * 270 */ 271 int 272 _fini(void) 273 { 274 int rval; 275 276 rval = mod_remove(&modlinkage); 277 if (rval == DDI_SUCCESS) { 278 ddi_soft_state_fini(&skd_state); 279 bd_mod_fini(&skd_dev_ops); 280 } 281 282 return (rval); 283 } 284 285 /* 286 * Solaris Register read/write routines 287 */ 288 289 /* 290 * 291 * Name: skd_reg_write64, writes a 64-bit value to specified address 292 * 293 * Inputs: skdev - device state structure. 294 * val - 64-bit value to be written. 295 * offset - offset from PCI base address. 296 * 297 * Returns: Nothing. 298 * 299 */ 300 /* 301 * Local vars are to keep lint silent. Any compiler worth its weight will 302 * optimize it all right out... 303 */ 304 static inline void 305 skd_reg_write64(struct skd_device *skdev, uint64_t val, uint32_t offset) 306 { 307 uint64_t *addr; 308 309 ASSERT((offset & 0x7) == 0); 310 /* LINTED */ 311 addr = (uint64_t *)(skdev->dev_iobase + offset); 312 ddi_put64(skdev->dev_handle, addr, val); 313 } 314 315 /* 316 * 317 * Name: skd_reg_read32, reads a 32-bit value to specified address 318 * 319 * Inputs: skdev - device state structure. 320 * offset - offset from PCI base address. 321 * 322 * Returns: val, 32-bit value read from specified PCI address. 323 * 324 */ 325 static inline uint32_t 326 skd_reg_read32(struct skd_device *skdev, uint32_t offset) 327 { 328 uint32_t *addr; 329 330 ASSERT((offset & 0x3) == 0); 331 /* LINTED */ 332 addr = (uint32_t *)(skdev->dev_iobase + offset); 333 return (ddi_get32(skdev->dev_handle, addr)); 334 } 335 336 /* 337 * 338 * Name: skd_reg_write32, writes a 32-bit value to specified address 339 * 340 * Inputs: skdev - device state structure. 341 * val - value to be written. 342 * offset - offset from PCI base address. 343 * 344 * Returns: Nothing. 345 * 346 */ 347 static inline void 348 skd_reg_write32(struct skd_device *skdev, uint32_t val, uint32_t offset) 349 { 350 uint32_t *addr; 351 352 ASSERT((offset & 0x3) == 0); 353 /* LINTED */ 354 addr = (uint32_t *)(skdev->dev_iobase + offset); 355 ddi_put32(skdev->dev_handle, addr, val); 356 } 357 358 359 /* 360 * Solaris skd routines 361 */ 362 363 /* 364 * 365 * Name: skd_name, generates the name of the driver. 366 * 367 * Inputs: skdev - device state structure 368 * 369 * Returns: char pointer to generated driver name. 370 * 371 */ 372 static const char * 373 skd_name(struct skd_device *skdev) 374 { 375 (void) snprintf(skdev->id_str, sizeof (skdev->id_str), "%s:", DRV_NAME); 376 377 return (skdev->id_str); 378 } 379 380 /* 381 * 382 * Name: skd_pci_find_capability, searches the PCI capability 383 * list for the specified capability. 384 * 385 * Inputs: skdev - device state structure. 386 * cap - capability sought. 387 * 388 * Returns: Returns position where capability was found. 389 * If not found, returns zero. 390 * 391 */ 392 static int 393 skd_pci_find_capability(struct skd_device *skdev, int cap) 394 { 395 uint16_t status; 396 uint8_t pos, id, hdr; 397 int ttl = 48; 398 399 status = pci_config_get16(skdev->pci_handle, PCI_CONF_STAT); 400 401 if (!(status & PCI_STAT_CAP)) 402 return (0); 403 404 hdr = pci_config_get8(skdev->pci_handle, PCI_CONF_HEADER); 405 406 if ((hdr & PCI_HEADER_TYPE_M) != 0) 407 return (0); 408 409 pos = pci_config_get8(skdev->pci_handle, PCI_CONF_CAP_PTR); 410 411 while (ttl-- && pos >= 0x40) { 412 pos &= ~3; 413 id = pci_config_get8(skdev->pci_handle, pos+PCI_CAP_ID); 414 if (id == 0xff) 415 break; 416 if (id == cap) 417 return (pos); 418 pos = pci_config_get8(skdev->pci_handle, pos+PCI_CAP_NEXT_PTR); 419 } 420 421 return (0); 422 } 423 424 /* 425 * 426 * Name: skd_io_done, called to conclude an I/O operation. 427 * 428 * Inputs: skdev - device state structure. 429 * pbuf - I/O request 430 * error - contain error value. 431 * mode - debug only. 432 * 433 * Returns: Nothing. 434 * 435 */ 436 static void 437 skd_io_done(skd_device_t *skdev, skd_buf_private_t *pbuf, 438 int error, int mode) 439 { 440 bd_xfer_t *xfer; 441 442 ASSERT(pbuf != NULL); 443 444 xfer = pbuf->x_xfer; 445 446 switch (mode) { 447 case SKD_IODONE_WIOC: 448 skdev->iodone_wioc++; 449 break; 450 case SKD_IODONE_WNIOC: 451 skdev->iodone_wnioc++; 452 break; 453 case SKD_IODONE_WDEBUG: 454 skdev->iodone_wdebug++; 455 break; 456 default: 457 skdev->iodone_unknown++; 458 } 459 460 if (error) { 461 skdev->ios_errors++; 462 cmn_err(CE_WARN, 463 "!%s:skd_io_done:ERR=%d %lld-%ld %s", skdev->name, 464 error, xfer->x_blkno, xfer->x_nblks, 465 (pbuf->dir & B_READ) ? "Read" : "Write"); 466 } 467 468 kmem_free(pbuf, sizeof (skd_buf_private_t)); 469 470 bd_xfer_done(xfer, error); 471 } 472 473 /* 474 * QUIESCE DEVICE 475 */ 476 477 /* 478 * 479 * Name: skd_sys_quiesce_dev, quiets the device 480 * 481 * Inputs: dip - dev info strucuture 482 * 483 * Returns: Zero. 484 * 485 */ 486 static int 487 skd_sys_quiesce_dev(dev_info_t *dip) 488 { 489 skd_device_t *skdev; 490 491 skdev = ddi_get_soft_state(skd_state, ddi_get_instance(dip)); 492 493 /* make sure Dcmn_err() doesn't actually print anything */ 494 skd_dbg_level = 0; 495 496 skd_disable_interrupts(skdev); 497 skd_soft_reset(skdev); 498 499 return (0); 500 } 501 502 /* 503 * 504 * Name: skd_quiesce_dev, quiets the device, but doesn't really do much. 505 * 506 * Inputs: skdev - Device state. 507 * 508 * Returns: -EINVAL if device is not in proper state otherwise 509 * returns zero. 510 * 511 */ 512 static int 513 skd_quiesce_dev(skd_device_t *skdev) 514 { 515 int rc = 0; 516 517 if (skd_dbg_level) 518 Dcmn_err(CE_NOTE, "skd_quiece_dev:"); 519 520 switch (skdev->state) { 521 case SKD_DRVR_STATE_BUSY: 522 case SKD_DRVR_STATE_BUSY_IMMINENT: 523 Dcmn_err(CE_NOTE, "%s: stopping queue", skdev->name); 524 break; 525 case SKD_DRVR_STATE_ONLINE: 526 case SKD_DRVR_STATE_STOPPING: 527 case SKD_DRVR_STATE_SYNCING: 528 case SKD_DRVR_STATE_PAUSING: 529 case SKD_DRVR_STATE_PAUSED: 530 case SKD_DRVR_STATE_STARTING: 531 case SKD_DRVR_STATE_RESTARTING: 532 case SKD_DRVR_STATE_RESUMING: 533 default: 534 rc = -EINVAL; 535 cmn_err(CE_NOTE, "state [%d] not implemented", skdev->state); 536 } 537 538 return (rc); 539 } 540 541 /* 542 * UNQUIESCE DEVICE: 543 * Note: Assumes lock is held to protect device state. 544 */ 545 /* 546 * 547 * Name: skd_unquiesce_dev, awkens the device 548 * 549 * Inputs: skdev - Device state. 550 * 551 * Returns: -EINVAL if device is not in proper state otherwise 552 * returns zero. 553 * 554 */ 555 static int 556 skd_unquiesce_dev(struct skd_device *skdev) 557 { 558 Dcmn_err(CE_NOTE, "skd_unquiece_dev:"); 559 560 skd_log_skdev(skdev, "unquiesce"); 561 if (skdev->state == SKD_DRVR_STATE_ONLINE) { 562 Dcmn_err(CE_NOTE, "**** device already ONLINE"); 563 564 return (0); 565 } 566 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) { 567 /* 568 * If there has been an state change to other than 569 * ONLINE, we will rely on controller state change 570 * to come back online and restart the queue. 571 * The BUSY state means that driver is ready to 572 * continue normal processing but waiting for controller 573 * to become available. 574 */ 575 skdev->state = SKD_DRVR_STATE_BUSY; 576 Dcmn_err(CE_NOTE, "drive BUSY state\n"); 577 578 return (0); 579 } 580 /* 581 * Drive just come online, driver is either in startup, 582 * paused performing a task, or bust waiting for hardware. 583 */ 584 switch (skdev->state) { 585 case SKD_DRVR_STATE_PAUSED: 586 case SKD_DRVR_STATE_BUSY: 587 case SKD_DRVR_STATE_BUSY_IMMINENT: 588 case SKD_DRVR_STATE_BUSY_ERASE: 589 case SKD_DRVR_STATE_STARTING: 590 case SKD_DRVR_STATE_RESTARTING: 591 case SKD_DRVR_STATE_FAULT: 592 case SKD_DRVR_STATE_IDLE: 593 case SKD_DRVR_STATE_LOAD: 594 skdev->state = SKD_DRVR_STATE_ONLINE; 595 Dcmn_err(CE_NOTE, "%s: sTec s1120 ONLINE", skdev->name); 596 Dcmn_err(CE_NOTE, "%s: Starting request queue", skdev->name); 597 Dcmn_err(CE_NOTE, 598 "%s: queue depth limit=%d hard=%d soft=%d lowat=%d", 599 skdev->name, 600 skdev->queue_depth_limit, 601 skdev->hard_queue_depth_limit, 602 skdev->soft_queue_depth_limit, 603 skdev->queue_depth_lowat); 604 605 skdev->gendisk_on = 1; 606 cv_signal(&skdev->cv_waitq); 607 break; 608 case SKD_DRVR_STATE_DISAPPEARED: 609 default: 610 cmn_err(CE_NOTE, "**** driver state %d, not implemented \n", 611 skdev->state); 612 return (-EBUSY); 613 } 614 615 return (0); 616 } 617 618 /* 619 * READ/WRITE REQUESTS 620 */ 621 622 /* 623 * 624 * Name: skd_blkdev_preop_sg_list, builds the S/G list from info 625 * passed in by the blkdev driver. 626 * 627 * Inputs: skdev - device state structure. 628 * skreq - request structure. 629 * sg_byte_count - data transfer byte count. 630 * 631 * Returns: Nothing. 632 * 633 */ 634 /*ARGSUSED*/ 635 static void 636 skd_blkdev_preop_sg_list(struct skd_device *skdev, 637 struct skd_request_context *skreq, uint32_t *sg_byte_count) 638 { 639 bd_xfer_t *xfer; 640 skd_buf_private_t *pbuf; 641 int i, bcount = 0; 642 uint_t n_sg; 643 644 *sg_byte_count = 0; 645 646 ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD || 647 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); 648 649 pbuf = skreq->pbuf; 650 ASSERT(pbuf != NULL); 651 652 xfer = pbuf->x_xfer; 653 n_sg = xfer->x_ndmac; 654 655 ASSERT(n_sg <= skdev->sgs_per_request); 656 657 skreq->n_sg = n_sg; 658 659 skreq->io_dma_handle = xfer->x_dmah; 660 661 skreq->total_sg_bcount = 0; 662 663 for (i = 0; i < n_sg; i++) { 664 ddi_dma_cookie_t *cookiep = &xfer->x_dmac; 665 struct fit_sg_descriptor *sgd; 666 uint32_t cnt = (uint32_t)cookiep->dmac_size; 667 668 bcount += cnt; 669 670 sgd = &skreq->sksg_list[i]; 671 sgd->control = FIT_SGD_CONTROL_NOT_LAST; 672 sgd->byte_count = cnt; 673 sgd->host_side_addr = cookiep->dmac_laddress; 674 sgd->dev_side_addr = 0; /* not used */ 675 *sg_byte_count += cnt; 676 677 skreq->total_sg_bcount += cnt; 678 679 if ((i + 1) != n_sg) 680 ddi_dma_nextcookie(skreq->io_dma_handle, &xfer->x_dmac); 681 } 682 683 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; 684 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; 685 686 (void) ddi_dma_sync(skreq->sksg_dma_address.dma_handle, 0, 0, 687 DDI_DMA_SYNC_FORDEV); 688 } 689 690 /* 691 * 692 * Name: skd_blkdev_postop_sg_list, deallocates DMA 693 * 694 * Inputs: skdev - device state structure. 695 * skreq - skreq data structure. 696 * 697 * Returns: Nothing. 698 * 699 */ 700 /* ARGSUSED */ /* Upstream common source with other platforms. */ 701 static void 702 skd_blkdev_postop_sg_list(struct skd_device *skdev, 703 struct skd_request_context *skreq) 704 { 705 /* 706 * restore the next ptr for next IO request so we 707 * don't have to set it every time. 708 */ 709 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = 710 skreq->sksg_dma_address.cookies->dmac_laddress + 711 ((skreq->n_sg) * sizeof (struct fit_sg_descriptor)); 712 } 713 714 /* 715 * 716 * Name: skd_start, initiates an I/O. 717 * 718 * Inputs: skdev - device state structure. 719 * 720 * Returns: EAGAIN if devicfe is not ONLINE. 721 * On error, if the caller is the blkdev driver, return 722 * the error value. Otherwise, return zero. 723 * 724 */ 725 /* Upstream common source with other platforms. */ 726 static void 727 skd_start(skd_device_t *skdev) 728 { 729 struct skd_fitmsg_context *skmsg = NULL; 730 struct fit_msg_hdr *fmh = NULL; 731 struct skd_request_context *skreq = NULL; 732 struct waitqueue *waitq = &skdev->waitqueue; 733 struct skd_scsi_request *scsi_req; 734 skd_buf_private_t *pbuf = NULL; 735 int bcount; 736 737 uint32_t lba; 738 uint32_t count; 739 uint32_t timo_slot; 740 void *cmd_ptr; 741 uint32_t sg_byte_count = 0; 742 743 /* 744 * Stop conditions: 745 * - There are no more native requests 746 * - There are already the maximum number of requests is progress 747 * - There are no more skd_request_context entries 748 * - There are no more FIT msg buffers 749 */ 750 for (;;) { 751 /* Are too many requests already in progress? */ 752 if (skdev->queue_depth_busy >= skdev->queue_depth_limit) { 753 Dcmn_err(CE_NOTE, "qdepth %d, limit %d\n", 754 skdev->queue_depth_busy, 755 skdev->queue_depth_limit); 756 break; 757 } 758 759 WAITQ_LOCK(skdev); 760 if (SIMPLEQ_EMPTY(waitq)) { 761 WAITQ_UNLOCK(skdev); 762 break; 763 } 764 765 /* Is a skd_request_context available? */ 766 skreq = skdev->skreq_free_list; 767 if (skreq == NULL) { 768 WAITQ_UNLOCK(skdev); 769 break; 770 } 771 772 ASSERT(skreq->state == SKD_REQ_STATE_IDLE); 773 ASSERT((skreq->id & SKD_ID_INCR) == 0); 774 775 skdev->skreq_free_list = skreq->next; 776 777 skreq->state = SKD_REQ_STATE_BUSY; 778 skreq->id += SKD_ID_INCR; 779 780 /* Start a new FIT msg if there is none in progress. */ 781 if (skmsg == NULL) { 782 /* Are there any FIT msg buffers available? */ 783 skmsg = skdev->skmsg_free_list; 784 if (skmsg == NULL) { 785 WAITQ_UNLOCK(skdev); 786 break; 787 } 788 789 ASSERT(skmsg->state == SKD_MSG_STATE_IDLE); 790 ASSERT((skmsg->id & SKD_ID_INCR) == 0); 791 792 skdev->skmsg_free_list = skmsg->next; 793 794 skmsg->state = SKD_MSG_STATE_BUSY; 795 skmsg->id += SKD_ID_INCR; 796 797 /* Initialize the FIT msg header */ 798 fmh = (struct fit_msg_hdr *)skmsg->msg_buf64; 799 bzero(fmh, sizeof (*fmh)); /* Too expensive */ 800 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 801 skmsg->length = sizeof (struct fit_msg_hdr); 802 } 803 804 /* 805 * At this point we are committed to either start or reject 806 * the native request. Note that a FIT msg may have just been 807 * started but contains no SoFIT requests yet. 808 * Now - dequeue pbuf. 809 */ 810 pbuf = skd_get_queued_pbuf(skdev); 811 WAITQ_UNLOCK(skdev); 812 813 skreq->pbuf = pbuf; 814 lba = pbuf->x_xfer->x_blkno; 815 count = pbuf->x_xfer->x_nblks; 816 skreq->did_complete = 0; 817 818 skreq->fitmsg_id = skmsg->id; 819 820 Dcmn_err(CE_NOTE, 821 "pbuf=%p lba=%u(0x%x) count=%u(0x%x) dir=%x\n", 822 (void *)pbuf, lba, lba, count, count, pbuf->dir); 823 824 /* 825 * Transcode the request. 826 */ 827 cmd_ptr = &skmsg->msg_buf[skmsg->length]; 828 bzero(cmd_ptr, 32); /* This is too expensive */ 829 830 scsi_req = cmd_ptr; 831 scsi_req->hdr.tag = skreq->id; 832 scsi_req->hdr.sg_list_dma_address = 833 cpu_to_be64(skreq->sksg_dma_address.cookies->dmac_laddress); 834 scsi_req->cdb[1] = 0; 835 scsi_req->cdb[2] = (lba & 0xff000000) >> 24; 836 scsi_req->cdb[3] = (lba & 0xff0000) >> 16; 837 scsi_req->cdb[4] = (lba & 0xff00) >> 8; 838 scsi_req->cdb[5] = (lba & 0xff); 839 scsi_req->cdb[6] = 0; 840 scsi_req->cdb[7] = (count & 0xff00) >> 8; 841 scsi_req->cdb[8] = count & 0xff; 842 scsi_req->cdb[9] = 0; 843 844 if (pbuf->dir & B_READ) { 845 scsi_req->cdb[0] = 0x28; 846 skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST; 847 } else { 848 scsi_req->cdb[0] = 0x2a; 849 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD; 850 } 851 852 skd_blkdev_preop_sg_list(skdev, skreq, &sg_byte_count); 853 854 scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(sg_byte_count); 855 856 bcount = (sg_byte_count + 511) / 512; 857 scsi_req->cdb[7] = (bcount & 0xff00) >> 8; 858 scsi_req->cdb[8] = bcount & 0xff; 859 860 Dcmn_err(CE_NOTE, 861 "skd_start: pbuf=%p skreq->id=%x opc=%x ====>>>>>", 862 (void *)pbuf, skreq->id, *scsi_req->cdb); 863 864 skmsg->length += sizeof (struct skd_scsi_request); 865 fmh->num_protocol_cmds_coalesced++; 866 867 /* 868 * Update the active request counts. 869 * Capture the timeout timestamp. 870 */ 871 skreq->timeout_stamp = skdev->timeout_stamp; 872 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 873 874 atomic_inc_32(&skdev->timeout_slot[timo_slot]); 875 atomic_inc_32(&skdev->queue_depth_busy); 876 877 Dcmn_err(CE_NOTE, "req=0x%x busy=%d timo_slot=%d", 878 skreq->id, skdev->queue_depth_busy, timo_slot); 879 /* 880 * If the FIT msg buffer is full send it. 881 */ 882 if (skmsg->length >= SKD_N_FITMSG_BYTES || 883 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { 884 885 atomic_inc_64(&skdev->active_cmds); 886 pbuf->skreq = skreq; 887 888 skdev->fitmsg_sent1++; 889 skd_send_fitmsg(skdev, skmsg); 890 891 skmsg = NULL; 892 fmh = NULL; 893 } 894 } 895 896 /* 897 * Is a FIT msg in progress? If it is empty put the buffer back 898 * on the free list. If it is non-empty send what we got. 899 * This minimizes latency when there are fewer requests than 900 * what fits in a FIT msg. 901 */ 902 if (skmsg != NULL) { 903 ASSERT(skmsg->length > sizeof (struct fit_msg_hdr)); 904 Dcmn_err(CE_NOTE, "sending msg=%p, len %d", 905 (void *)skmsg, skmsg->length); 906 907 skdev->active_cmds++; 908 909 skdev->fitmsg_sent2++; 910 skd_send_fitmsg(skdev, skmsg); 911 } 912 } 913 914 /* 915 * 916 * Name: skd_end_request 917 * 918 * Inputs: skdev - device state structure. 919 * skreq - request structure. 920 * error - I/O error value. 921 * 922 * Returns: Nothing. 923 * 924 */ 925 static void 926 skd_end_request(struct skd_device *skdev, 927 struct skd_request_context *skreq, int error) 928 { 929 skdev->ios_completed++; 930 skd_io_done(skdev, skreq->pbuf, error, SKD_IODONE_WIOC); 931 skreq->pbuf = NULL; 932 skreq->did_complete = 1; 933 } 934 935 /* 936 * 937 * Name: skd_end_request_abnormal 938 * 939 * Inputs: skdev - device state structure. 940 * pbuf - I/O request. 941 * error - I/O error value. 942 * mode - debug 943 * 944 * Returns: Nothing. 945 * 946 */ 947 static void 948 skd_end_request_abnormal(skd_device_t *skdev, skd_buf_private_t *pbuf, 949 int error, int mode) 950 { 951 skd_io_done(skdev, pbuf, error, mode); 952 } 953 954 /* 955 * 956 * Name: skd_request_fn_not_online, handles the condition 957 * of the device not being online. 958 * 959 * Inputs: skdev - device state structure. 960 * 961 * Returns: nothing (void). 962 * 963 */ 964 static void 965 skd_request_fn_not_online(skd_device_t *skdev) 966 { 967 int error; 968 skd_buf_private_t *pbuf; 969 970 ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); 971 972 skd_log_skdev(skdev, "req_not_online"); 973 974 switch (skdev->state) { 975 case SKD_DRVR_STATE_PAUSING: 976 case SKD_DRVR_STATE_PAUSED: 977 case SKD_DRVR_STATE_STARTING: 978 case SKD_DRVR_STATE_RESTARTING: 979 case SKD_DRVR_STATE_WAIT_BOOT: 980 /* 981 * In case of starting, we haven't started the queue, 982 * so we can't get here... but requests are 983 * possibly hanging out waiting for us because we 984 * reported the dev/skd/0 already. They'll wait 985 * forever if connect doesn't complete. 986 * What to do??? delay dev/skd/0 ?? 987 */ 988 case SKD_DRVR_STATE_BUSY: 989 case SKD_DRVR_STATE_BUSY_IMMINENT: 990 case SKD_DRVR_STATE_BUSY_ERASE: 991 case SKD_DRVR_STATE_DRAINING_TIMEOUT: 992 return; 993 994 case SKD_DRVR_STATE_BUSY_SANITIZE: 995 case SKD_DRVR_STATE_STOPPING: 996 case SKD_DRVR_STATE_SYNCING: 997 case SKD_DRVR_STATE_FAULT: 998 case SKD_DRVR_STATE_DISAPPEARED: 999 default: 1000 error = -EIO; 1001 break; 1002 } 1003 1004 /* 1005 * If we get here, terminate all pending block requeusts 1006 * with EIO and any scsi pass thru with appropriate sense 1007 */ 1008 ASSERT(WAITQ_LOCK_HELD(skdev)); 1009 if (SIMPLEQ_EMPTY(&skdev->waitqueue)) 1010 return; 1011 1012 while ((pbuf = skd_get_queued_pbuf(skdev))) 1013 skd_end_request_abnormal(skdev, pbuf, error, SKD_IODONE_WNIOC); 1014 1015 cv_signal(&skdev->cv_waitq); 1016 } 1017 1018 /* 1019 * TIMER 1020 */ 1021 1022 static void skd_timer_tick_not_online(struct skd_device *skdev); 1023 1024 /* 1025 * 1026 * Name: skd_timer_tick, monitors requests for timeouts. 1027 * 1028 * Inputs: skdev - device state structure. 1029 * 1030 * Returns: Nothing. 1031 * 1032 */ 1033 static void 1034 skd_timer_tick(skd_device_t *skdev) 1035 { 1036 uint32_t timo_slot; 1037 1038 skdev->timer_active = 1; 1039 1040 if (skdev->state != SKD_DRVR_STATE_ONLINE) { 1041 skd_timer_tick_not_online(skdev); 1042 goto timer_func_out; 1043 } 1044 1045 skdev->timeout_stamp++; 1046 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 1047 1048 /* 1049 * All requests that happened during the previous use of 1050 * this slot should be done by now. The previous use was 1051 * over 7 seconds ago. 1052 */ 1053 if (skdev->timeout_slot[timo_slot] == 0) { 1054 goto timer_func_out; 1055 } 1056 1057 /* Something is overdue */ 1058 Dcmn_err(CE_NOTE, "found %d timeouts, draining busy=%d", 1059 skdev->timeout_slot[timo_slot], 1060 skdev->queue_depth_busy); 1061 skdev->timer_countdown = SKD_TIMER_SECONDS(3); 1062 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT; 1063 skdev->timo_slot = timo_slot; 1064 1065 timer_func_out: 1066 skdev->timer_active = 0; 1067 } 1068 1069 /* 1070 * 1071 * Name: skd_timer_tick_not_online, handles various device 1072 * state transitions. 1073 * 1074 * Inputs: skdev - device state structure. 1075 * 1076 * Returns: Nothing. 1077 * 1078 */ 1079 static void 1080 skd_timer_tick_not_online(struct skd_device *skdev) 1081 { 1082 Dcmn_err(CE_NOTE, "skd_skd_timer_tick_not_online: state=%d tmo=%d", 1083 skdev->state, skdev->timer_countdown); 1084 1085 ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); 1086 1087 switch (skdev->state) { 1088 case SKD_DRVR_STATE_IDLE: 1089 case SKD_DRVR_STATE_LOAD: 1090 break; 1091 case SKD_DRVR_STATE_BUSY_SANITIZE: 1092 cmn_err(CE_WARN, "!drive busy sanitize[%x], driver[%x]\n", 1093 skdev->drive_state, skdev->state); 1094 break; 1095 1096 case SKD_DRVR_STATE_BUSY: 1097 case SKD_DRVR_STATE_BUSY_IMMINENT: 1098 case SKD_DRVR_STATE_BUSY_ERASE: 1099 Dcmn_err(CE_NOTE, "busy[%x], countdown=%d\n", 1100 skdev->state, skdev->timer_countdown); 1101 if (skdev->timer_countdown > 0) { 1102 skdev->timer_countdown--; 1103 return; 1104 } 1105 cmn_err(CE_WARN, "!busy[%x], timedout=%d, restarting device.", 1106 skdev->state, skdev->timer_countdown); 1107 skd_restart_device(skdev); 1108 break; 1109 1110 case SKD_DRVR_STATE_WAIT_BOOT: 1111 case SKD_DRVR_STATE_STARTING: 1112 if (skdev->timer_countdown > 0) { 1113 skdev->timer_countdown--; 1114 return; 1115 } 1116 /* 1117 * For now, we fault the drive. Could attempt resets to 1118 * revcover at some point. 1119 */ 1120 skdev->state = SKD_DRVR_STATE_FAULT; 1121 1122 cmn_err(CE_WARN, "!(%s): DriveFault Connect Timeout (%x)", 1123 skd_name(skdev), skdev->drive_state); 1124 1125 /* start the queue so we can respond with error to requests */ 1126 skd_start(skdev); 1127 1128 /* wakeup anyone waiting for startup complete */ 1129 skdev->gendisk_on = -1; 1130 1131 cv_signal(&skdev->cv_waitq); 1132 break; 1133 1134 1135 case SKD_DRVR_STATE_PAUSING: 1136 case SKD_DRVR_STATE_PAUSED: 1137 break; 1138 1139 case SKD_DRVR_STATE_DRAINING_TIMEOUT: 1140 cmn_err(CE_WARN, 1141 "!%s: draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n", 1142 skdev->name, 1143 skdev->timo_slot, 1144 skdev->timer_countdown, 1145 skdev->queue_depth_busy, 1146 skdev->timeout_slot[skdev->timo_slot]); 1147 /* if the slot has cleared we can let the I/O continue */ 1148 if (skdev->timeout_slot[skdev->timo_slot] == 0) { 1149 Dcmn_err(CE_NOTE, "Slot drained, starting queue."); 1150 skdev->state = SKD_DRVR_STATE_ONLINE; 1151 skd_start(skdev); 1152 return; 1153 } 1154 if (skdev->timer_countdown > 0) { 1155 skdev->timer_countdown--; 1156 return; 1157 } 1158 skd_restart_device(skdev); 1159 break; 1160 1161 case SKD_DRVR_STATE_RESTARTING: 1162 if (skdev->timer_countdown > 0) { 1163 skdev->timer_countdown--; 1164 1165 return; 1166 } 1167 /* 1168 * For now, we fault the drive. Could attempt resets to 1169 * revcover at some point. 1170 */ 1171 skdev->state = SKD_DRVR_STATE_FAULT; 1172 cmn_err(CE_WARN, "!(%s): DriveFault Reconnect Timeout (%x)\n", 1173 skd_name(skdev), skdev->drive_state); 1174 1175 /* 1176 * Recovering does two things: 1177 * 1. completes IO with error 1178 * 2. reclaims dma resources 1179 * When is it safe to recover requests? 1180 * - if the drive state is faulted 1181 * - if the state is still soft reset after out timeout 1182 * - if the drive registers are dead (state = FF) 1183 */ 1184 1185 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) || 1186 (skdev->drive_state == FIT_SR_DRIVE_FAULT) || 1187 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) { 1188 /* 1189 * It never came out of soft reset. Try to 1190 * recover the requests and then let them 1191 * fail. This is to mitigate hung processes. 1192 * 1193 * Acquire the interrupt lock since these lists are 1194 * manipulated by interrupt handlers. 1195 */ 1196 ASSERT(!WAITQ_LOCK_HELD(skdev)); 1197 INTR_LOCK(skdev); 1198 skd_recover_requests(skdev); 1199 INTR_UNLOCK(skdev); 1200 } 1201 /* start the queue so we can respond with error to requests */ 1202 skd_start(skdev); 1203 /* wakeup anyone waiting for startup complete */ 1204 skdev->gendisk_on = -1; 1205 cv_signal(&skdev->cv_waitq); 1206 break; 1207 1208 case SKD_DRVR_STATE_RESUMING: 1209 case SKD_DRVR_STATE_STOPPING: 1210 case SKD_DRVR_STATE_SYNCING: 1211 case SKD_DRVR_STATE_FAULT: 1212 case SKD_DRVR_STATE_DISAPPEARED: 1213 default: 1214 break; 1215 } 1216 } 1217 1218 /* 1219 * 1220 * Name: skd_timer, kicks off the timer processing. 1221 * 1222 * Inputs: skdev - device state structure. 1223 * 1224 * Returns: Nothing. 1225 * 1226 */ 1227 static void 1228 skd_timer(void *arg) 1229 { 1230 skd_device_t *skdev = (skd_device_t *)arg; 1231 1232 /* Someone set us to 0, don't bother rescheduling. */ 1233 ADAPTER_STATE_LOCK(skdev); 1234 if (skdev->skd_timer_timeout_id != 0) { 1235 ADAPTER_STATE_UNLOCK(skdev); 1236 /* Pardon the drop-and-then-acquire logic here. */ 1237 skd_timer_tick(skdev); 1238 ADAPTER_STATE_LOCK(skdev); 1239 /* Restart timer, if not being stopped. */ 1240 if (skdev->skd_timer_timeout_id != 0) { 1241 skdev->skd_timer_timeout_id = 1242 timeout(skd_timer, arg, skd_timer_ticks); 1243 } 1244 } 1245 ADAPTER_STATE_UNLOCK(skdev); 1246 } 1247 1248 /* 1249 * 1250 * Name: skd_start_timer, kicks off the 1-second timer. 1251 * 1252 * Inputs: skdev - device state structure. 1253 * 1254 * Returns: Zero. 1255 * 1256 */ 1257 static void 1258 skd_start_timer(struct skd_device *skdev) 1259 { 1260 /* Start one second driver timer. */ 1261 ADAPTER_STATE_LOCK(skdev); 1262 ASSERT(skdev->skd_timer_timeout_id == 0); 1263 1264 /* 1265 * Do first "timeout tick" right away, but not in this 1266 * thread. 1267 */ 1268 skdev->skd_timer_timeout_id = timeout(skd_timer, skdev, 1); 1269 ADAPTER_STATE_UNLOCK(skdev); 1270 } 1271 1272 /* 1273 * INTERNAL REQUESTS -- generated by driver itself 1274 */ 1275 1276 /* 1277 * 1278 * Name: skd_format_internal_skspcl, setups the internal 1279 * FIT request message. 1280 * 1281 * Inputs: skdev - device state structure. 1282 * 1283 * Returns: One. 1284 * 1285 */ 1286 static int 1287 skd_format_internal_skspcl(struct skd_device *skdev) 1288 { 1289 struct skd_special_context *skspcl = &skdev->internal_skspcl; 1290 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; 1291 struct fit_msg_hdr *fmh; 1292 uint64_t dma_address; 1293 struct skd_scsi_request *scsi; 1294 1295 fmh = (struct fit_msg_hdr *)&skspcl->msg_buf64[0]; 1296 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 1297 fmh->num_protocol_cmds_coalesced = 1; 1298 1299 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */ 1300 scsi = (struct skd_scsi_request *)&skspcl->msg_buf64[8]; 1301 bzero(scsi, sizeof (*scsi)); 1302 dma_address = skspcl->req.sksg_dma_address.cookies->_dmu._dmac_ll; 1303 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address); 1304 sgd->control = FIT_SGD_CONTROL_LAST; 1305 sgd->byte_count = 0; 1306 sgd->host_side_addr = skspcl->db_dma_address.cookies->_dmu._dmac_ll; 1307 sgd->dev_side_addr = 0; /* not used */ 1308 sgd->next_desc_ptr = 0LL; 1309 1310 return (1); 1311 } 1312 1313 /* 1314 * 1315 * Name: skd_send_internal_skspcl, send internal requests to 1316 * the hardware. 1317 * 1318 * Inputs: skdev - device state structure. 1319 * skspcl - request structure 1320 * opcode - just what it says 1321 * 1322 * Returns: Nothing. 1323 * 1324 */ 1325 void 1326 skd_send_internal_skspcl(struct skd_device *skdev, 1327 struct skd_special_context *skspcl, uint8_t opcode) 1328 { 1329 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; 1330 struct skd_scsi_request *scsi; 1331 1332 if (SKD_REQ_STATE_IDLE != skspcl->req.state) { 1333 /* 1334 * A refresh is already in progress. 1335 * Just wait for it to finish. 1336 */ 1337 return; 1338 } 1339 1340 ASSERT(0 == (skspcl->req.id & SKD_ID_INCR)); 1341 skspcl->req.state = SKD_REQ_STATE_BUSY; 1342 skspcl->req.id += SKD_ID_INCR; 1343 1344 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */ 1345 scsi = (struct skd_scsi_request *)&skspcl->msg_buf64[8]; 1346 scsi->hdr.tag = skspcl->req.id; 1347 1348 Dcmn_err(CE_NOTE, "internal skspcl: opcode=%x req.id=%x ==========>", 1349 opcode, skspcl->req.id); 1350 1351 switch (opcode) { 1352 case TEST_UNIT_READY: 1353 scsi->cdb[0] = TEST_UNIT_READY; 1354 scsi->cdb[1] = 0x00; 1355 scsi->cdb[2] = 0x00; 1356 scsi->cdb[3] = 0x00; 1357 scsi->cdb[4] = 0x00; 1358 scsi->cdb[5] = 0x00; 1359 sgd->byte_count = 0; 1360 scsi->hdr.sg_list_len_bytes = 0; 1361 break; 1362 case READ_CAPACITY_EXT: 1363 scsi->cdb[0] = READ_CAPACITY_EXT; 1364 scsi->cdb[1] = 0x10; 1365 scsi->cdb[2] = 0x00; 1366 scsi->cdb[3] = 0x00; 1367 scsi->cdb[4] = 0x00; 1368 scsi->cdb[5] = 0x00; 1369 scsi->cdb[6] = 0x00; 1370 scsi->cdb[7] = 0x00; 1371 scsi->cdb[8] = 0x00; 1372 scsi->cdb[9] = 0x00; 1373 scsi->cdb[10] = 0x00; 1374 scsi->cdb[11] = 0x00; 1375 scsi->cdb[12] = 0x00; 1376 scsi->cdb[13] = 0x20; 1377 scsi->cdb[14] = 0x00; 1378 scsi->cdb[15] = 0x00; 1379 sgd->byte_count = SKD_N_READ_CAP_EXT_BYTES; 1380 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 1381 break; 1382 case 0x28: 1383 (void) memset(skspcl->data_buf, 0x65, SKD_N_INTERNAL_BYTES); 1384 1385 scsi->cdb[0] = 0x28; 1386 scsi->cdb[1] = 0x00; 1387 scsi->cdb[2] = 0x00; 1388 scsi->cdb[3] = 0x00; 1389 scsi->cdb[4] = 0x00; 1390 scsi->cdb[5] = 0x00; 1391 scsi->cdb[6] = 0x00; 1392 scsi->cdb[7] = 0x00; 1393 scsi->cdb[8] = 0x01; 1394 scsi->cdb[9] = 0x00; 1395 sgd->byte_count = SKD_N_INTERNAL_BYTES; 1396 scsi->hdr.sg_list_len_bytes = cpu_to_be32(SKD_N_INTERNAL_BYTES); 1397 break; 1398 case INQUIRY: 1399 scsi->cdb[0] = INQUIRY; 1400 scsi->cdb[1] = 0x01; /* evpd */ 1401 scsi->cdb[2] = 0x80; /* serial number page */ 1402 scsi->cdb[3] = 0x00; 1403 scsi->cdb[4] = 0x10; 1404 scsi->cdb[5] = 0x00; 1405 sgd->byte_count = 16; /* SKD_N_INQ_BYTES */; 1406 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 1407 break; 1408 case INQUIRY2: 1409 scsi->cdb[0] = INQUIRY; 1410 scsi->cdb[1] = 0x00; 1411 scsi->cdb[2] = 0x00; /* serial number page */ 1412 scsi->cdb[3] = 0x00; 1413 scsi->cdb[4] = 0x24; 1414 scsi->cdb[5] = 0x00; 1415 sgd->byte_count = 36; /* SKD_N_INQ_BYTES */; 1416 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); 1417 break; 1418 case SYNCHRONIZE_CACHE: 1419 scsi->cdb[0] = SYNCHRONIZE_CACHE; 1420 scsi->cdb[1] = 0x00; 1421 scsi->cdb[2] = 0x00; 1422 scsi->cdb[3] = 0x00; 1423 scsi->cdb[4] = 0x00; 1424 scsi->cdb[5] = 0x00; 1425 scsi->cdb[6] = 0x00; 1426 scsi->cdb[7] = 0x00; 1427 scsi->cdb[8] = 0x00; 1428 scsi->cdb[9] = 0x00; 1429 sgd->byte_count = 0; 1430 scsi->hdr.sg_list_len_bytes = 0; 1431 break; 1432 default: 1433 ASSERT("Don't know what to send"); 1434 return; 1435 1436 } 1437 1438 skd_send_special_fitmsg(skdev, skspcl); 1439 } 1440 1441 /* 1442 * 1443 * Name: skd_refresh_device_data, sends a TUR command. 1444 * 1445 * Inputs: skdev - device state structure. 1446 * 1447 * Returns: Nothing. 1448 * 1449 */ 1450 static void 1451 skd_refresh_device_data(struct skd_device *skdev) 1452 { 1453 struct skd_special_context *skspcl = &skdev->internal_skspcl; 1454 1455 Dcmn_err(CE_NOTE, "refresh_device_data: state=%d", skdev->state); 1456 1457 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY); 1458 } 1459 1460 /* 1461 * 1462 * Name: skd_complete_internal, handles the completion of 1463 * driver-initiated I/O requests. 1464 * 1465 * Inputs: skdev - device state structure. 1466 * skcomp - completion structure. 1467 * skerr - error structure. 1468 * skspcl - request structure. 1469 * 1470 * Returns: Nothing. 1471 * 1472 */ 1473 /* ARGSUSED */ /* Upstream common source with other platforms. */ 1474 static void 1475 skd_complete_internal(struct skd_device *skdev, 1476 volatile struct fit_completion_entry_v1 *skcomp, 1477 volatile struct fit_comp_error_info *skerr, 1478 struct skd_special_context *skspcl) 1479 { 1480 uint8_t *buf = skspcl->data_buf; 1481 uint8_t status = 2; 1482 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */ 1483 struct skd_scsi_request *scsi = 1484 (struct skd_scsi_request *)&skspcl->msg_buf64[8]; 1485 1486 ASSERT(skspcl == &skdev->internal_skspcl); 1487 1488 (void) ddi_dma_sync(skspcl->db_dma_address.dma_handle, 0, 0, 1489 DDI_DMA_SYNC_FORKERNEL); 1490 (void) ddi_dma_sync(skspcl->mb_dma_address.dma_handle, 0, 0, 1491 DDI_DMA_SYNC_FORKERNEL); 1492 1493 Dcmn_err(CE_NOTE, "complete internal %x", scsi->cdb[0]); 1494 1495 skspcl->req.completion = *skcomp; 1496 skspcl->req.state = SKD_REQ_STATE_IDLE; 1497 skspcl->req.id += SKD_ID_INCR; 1498 1499 status = skspcl->req.completion.status; 1500 1501 Dcmn_err(CE_NOTE, "<<<<====== complete_internal: opc=%x", *scsi->cdb); 1502 1503 switch (scsi->cdb[0]) { 1504 case TEST_UNIT_READY: 1505 if (SAM_STAT_GOOD == status) { 1506 skd_send_internal_skspcl(skdev, skspcl, 1507 READ_CAPACITY_EXT); 1508 } else { 1509 if (skdev->state == SKD_DRVR_STATE_STOPPING) { 1510 cmn_err(CE_WARN, 1511 "!%s: TUR failed, don't send anymore" 1512 "state 0x%x", skdev->name, skdev->state); 1513 1514 return; 1515 } 1516 1517 Dcmn_err(CE_NOTE, "%s: TUR failed, retry skerr", 1518 skdev->name); 1519 skd_send_internal_skspcl(skdev, skspcl, 0x00); 1520 } 1521 break; 1522 case READ_CAPACITY_EXT: { 1523 uint64_t cap, Nblocks; 1524 uint64_t xbuf[1]; 1525 1526 skdev->read_cap_is_valid = 0; 1527 if (SAM_STAT_GOOD == status) { 1528 bcopy(buf, xbuf, 8); 1529 cap = be64_to_cpu(*xbuf); 1530 skdev->read_cap_last_lba = cap; 1531 skdev->read_cap_blocksize = 1532 (buf[8] << 24) | (buf[9] << 16) | 1533 (buf[10] << 8) | buf[11]; 1534 1535 cap *= skdev->read_cap_blocksize; 1536 Dcmn_err(CE_NOTE, " Last LBA: %" PRIu64 " (0x%" PRIx64 1537 "), blk sz: %d, Capacity: %" PRIu64 "GB\n", 1538 skdev->read_cap_last_lba, 1539 skdev->read_cap_last_lba, 1540 skdev->read_cap_blocksize, 1541 cap >> 30ULL); 1542 1543 Nblocks = skdev->read_cap_last_lba + 1; 1544 1545 skdev->Nblocks = Nblocks; 1546 skdev->read_cap_is_valid = 1; 1547 1548 skd_send_internal_skspcl(skdev, skspcl, INQUIRY2); 1549 1550 } else { 1551 Dcmn_err(CE_NOTE, "**** READCAP failed, retry TUR"); 1552 skd_send_internal_skspcl(skdev, skspcl, 1553 TEST_UNIT_READY); 1554 } 1555 break; 1556 } 1557 case INQUIRY: 1558 skdev->inquiry_is_valid = 0; 1559 if (SAM_STAT_GOOD == status) { 1560 skdev->inquiry_is_valid = 1; 1561 1562 if (scsi->cdb[1] == 0x1) { 1563 bcopy(&buf[4], skdev->inq_serial_num, 12); 1564 skdev->inq_serial_num[12] = '\0'; 1565 } else { 1566 char *tmp = skdev->inq_vendor_id; 1567 1568 bcopy(&buf[8], tmp, 8); 1569 tmp[8] = '\0'; 1570 1571 tmp = skdev->inq_product_id; 1572 bcopy(&buf[16], tmp, 16); 1573 tmp[16] = '\0'; 1574 1575 tmp = skdev->inq_product_rev; 1576 bcopy(&buf[32], tmp, 4); 1577 tmp[4] = '\0'; 1578 } 1579 } 1580 1581 if (skdev->state != SKD_DRVR_STATE_ONLINE) 1582 if (skd_unquiesce_dev(skdev) < 0) 1583 cmn_err(CE_NOTE, "** failed, to ONLINE device"); 1584 break; 1585 case SYNCHRONIZE_CACHE: 1586 skdev->sync_done = (SAM_STAT_GOOD == status) ? 1 : -1; 1587 1588 cv_signal(&skdev->cv_waitq); 1589 break; 1590 1591 default: 1592 ASSERT("we didn't send this"); 1593 } 1594 } 1595 1596 /* 1597 * FIT MESSAGES 1598 */ 1599 1600 /* 1601 * 1602 * Name: skd_send_fitmsg, send a FIT message to the hardware. 1603 * 1604 * Inputs: skdev - device state structure. 1605 * skmsg - FIT message structure. 1606 * 1607 * Returns: Nothing. 1608 * 1609 */ 1610 /* ARGSUSED */ /* Upstream common source with other platforms. */ 1611 static void 1612 skd_send_fitmsg(struct skd_device *skdev, 1613 struct skd_fitmsg_context *skmsg) 1614 { 1615 uint64_t qcmd; 1616 struct fit_msg_hdr *fmh; 1617 1618 Dcmn_err(CE_NOTE, "msgbuf's DMA addr: 0x%" PRIx64 ", qdepth_busy=%d", 1619 skmsg->mb_dma_address.cookies->dmac_laddress, 1620 skdev->queue_depth_busy); 1621 1622 Dcmn_err(CE_NOTE, "msg_buf 0x%p, offset %x", (void *)skmsg->msg_buf, 1623 skmsg->offset); 1624 1625 qcmd = skmsg->mb_dma_address.cookies->dmac_laddress; 1626 qcmd |= FIT_QCMD_QID_NORMAL; 1627 1628 fmh = (struct fit_msg_hdr *)skmsg->msg_buf64; 1629 skmsg->outstanding = fmh->num_protocol_cmds_coalesced; 1630 1631 if (skdev->dbg_level > 1) { 1632 uint8_t *bp = skmsg->msg_buf; 1633 int i; 1634 1635 for (i = 0; i < skmsg->length; i += 8) { 1636 Dcmn_err(CE_NOTE, " msg[%2d] %02x %02x %02x %02x " 1637 "%02x %02x %02x %02x", 1638 i, bp[i + 0], bp[i + 1], bp[i + 2], 1639 bp[i + 3], bp[i + 4], bp[i + 5], 1640 bp[i + 6], bp[i + 7]); 1641 if (i == 0) i = 64 - 8; 1642 } 1643 } 1644 1645 (void) ddi_dma_sync(skmsg->mb_dma_address.dma_handle, 0, 0, 1646 DDI_DMA_SYNC_FORDEV); 1647 1648 ASSERT(skmsg->length > sizeof (struct fit_msg_hdr)); 1649 if (skmsg->length > 256) { 1650 qcmd |= FIT_QCMD_MSGSIZE_512; 1651 } else if (skmsg->length > 128) { 1652 qcmd |= FIT_QCMD_MSGSIZE_256; 1653 } else if (skmsg->length > 64) { 1654 qcmd |= FIT_QCMD_MSGSIZE_128; 1655 } 1656 1657 skdev->ios_started++; 1658 1659 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); 1660 } 1661 1662 /* 1663 * 1664 * Name: skd_send_special_fitmsg, send a special FIT message 1665 * to the hardware used driver-originated I/O requests. 1666 * 1667 * Inputs: skdev - device state structure. 1668 * skspcl - skspcl structure. 1669 * 1670 * Returns: Nothing. 1671 * 1672 */ 1673 static void 1674 skd_send_special_fitmsg(struct skd_device *skdev, 1675 struct skd_special_context *skspcl) 1676 { 1677 uint64_t qcmd; 1678 1679 Dcmn_err(CE_NOTE, "send_special_fitmsg: pt 1"); 1680 1681 if (skdev->dbg_level > 1) { 1682 uint8_t *bp = skspcl->msg_buf; 1683 int i; 1684 1685 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) { 1686 cmn_err(CE_NOTE, 1687 " spcl[%2d] %02x %02x %02x %02x " 1688 "%02x %02x %02x %02x\n", i, 1689 bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3], 1690 bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]); 1691 if (i == 0) i = 64 - 8; 1692 } 1693 1694 for (i = 0; i < skspcl->req.n_sg; i++) { 1695 struct fit_sg_descriptor *sgd = 1696 &skspcl->req.sksg_list[i]; 1697 1698 cmn_err(CE_NOTE, " sg[%d] count=%u ctrl=0x%x " 1699 "addr=0x%" PRIx64 " next=0x%" PRIx64, 1700 i, sgd->byte_count, sgd->control, 1701 sgd->host_side_addr, sgd->next_desc_ptr); 1702 } 1703 } 1704 1705 (void) ddi_dma_sync(skspcl->mb_dma_address.dma_handle, 0, 0, 1706 DDI_DMA_SYNC_FORDEV); 1707 (void) ddi_dma_sync(skspcl->db_dma_address.dma_handle, 0, 0, 1708 DDI_DMA_SYNC_FORDEV); 1709 1710 /* 1711 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr 1712 * and one 64-byte SSDI command. 1713 */ 1714 qcmd = skspcl->mb_dma_address.cookies->dmac_laddress; 1715 1716 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128; 1717 1718 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); 1719 } 1720 1721 /* 1722 * COMPLETION QUEUE 1723 */ 1724 1725 static void skd_complete_other(struct skd_device *skdev, 1726 volatile struct fit_completion_entry_v1 *skcomp, 1727 volatile struct fit_comp_error_info *skerr); 1728 1729 struct sns_info { 1730 uint8_t type; 1731 uint8_t stat; 1732 uint8_t key; 1733 uint8_t asc; 1734 uint8_t ascq; 1735 uint8_t mask; 1736 enum skd_check_status_action action; 1737 }; 1738 1739 static struct sns_info skd_chkstat_table[] = { 1740 /* Good */ 1741 {0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c, SKD_CHECK_STATUS_REPORT_GOOD}, 1742 1743 /* Smart alerts */ 1744 {0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */ 1745 SKD_CHECK_STATUS_REPORT_SMART_ALERT}, 1746 {0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */ 1747 SKD_CHECK_STATUS_REPORT_SMART_ALERT}, 1748 {0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temp over trigger */ 1749 SKD_CHECK_STATUS_REPORT_SMART_ALERT}, 1750 1751 /* Retry (with limits) */ 1752 {0x70, 0x02, ABORTED_COMMAND, 0, 0, 0x1C, /* DMA errors */ 1753 SKD_CHECK_STATUS_REQUEUE_REQUEST}, 1754 {0x70, 0x02, UNIT_ATTENTION, 0x0B, 0x00, 0x1E, /* warnings */ 1755 SKD_CHECK_STATUS_REQUEUE_REQUEST}, 1756 {0x70, 0x02, UNIT_ATTENTION, 0x5D, 0x00, 0x1E, /* thresholds */ 1757 SKD_CHECK_STATUS_REQUEUE_REQUEST}, 1758 {0x70, 0x02, UNIT_ATTENTION, 0x80, 0x30, 0x1F, /* backup power */ 1759 SKD_CHECK_STATUS_REQUEUE_REQUEST}, 1760 1761 /* Busy (or about to be) */ 1762 {0x70, 0x02, UNIT_ATTENTION, 0x3f, 0x01, 0x1F, /* fw changed */ 1763 SKD_CHECK_STATUS_BUSY_IMMINENT}, 1764 }; 1765 1766 /* 1767 * 1768 * Name: skd_check_status, checks the return status from a 1769 * completed I/O request. 1770 * 1771 * Inputs: skdev - device state structure. 1772 * cmp_status - SCSI status byte. 1773 * skerr - the error data structure. 1774 * 1775 * Returns: Depending on the error condition, return the action 1776 * to be taken as specified in the skd_chkstat_table. 1777 * If no corresponding value is found in the table 1778 * return SKD_CHECK_STATUS_REPORT_GOOD is no error otherwise 1779 * return SKD_CHECK_STATUS_REPORT_ERROR. 1780 * 1781 */ 1782 static enum skd_check_status_action 1783 skd_check_status(struct skd_device *skdev, uint8_t cmp_status, 1784 volatile struct fit_comp_error_info *skerr) 1785 { 1786 /* 1787 * Look up status and sense data to decide how to handle the error 1788 * from the device. 1789 * mask says which fields must match e.g., mask=0x18 means check 1790 * type and stat, ignore key, asc, ascq. 1791 */ 1792 int i, n; 1793 1794 Dcmn_err(CE_NOTE, "(%s): key/asc/ascq %02x/%02x/%02x", 1795 skd_name(skdev), skerr->key, skerr->code, skerr->qual); 1796 1797 Dcmn_err(CE_NOTE, "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x", 1798 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual); 1799 1800 /* Does the info match an entry in the good category? */ 1801 n = sizeof (skd_chkstat_table) / sizeof (skd_chkstat_table[0]); 1802 for (i = 0; i < n; i++) { 1803 struct sns_info *sns = &skd_chkstat_table[i]; 1804 1805 if (sns->mask & 0x10) 1806 if (skerr->type != sns->type) continue; 1807 1808 if (sns->mask & 0x08) 1809 if (cmp_status != sns->stat) continue; 1810 1811 if (sns->mask & 0x04) 1812 if (skerr->key != sns->key) continue; 1813 1814 if (sns->mask & 0x02) 1815 if (skerr->code != sns->asc) continue; 1816 1817 if (sns->mask & 0x01) 1818 if (skerr->qual != sns->ascq) continue; 1819 1820 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) { 1821 cmn_err(CE_WARN, "!(%s):SMART Alert: sense key/asc/ascq" 1822 " %02x/%02x/%02x", 1823 skd_name(skdev), skerr->key, 1824 skerr->code, skerr->qual); 1825 } 1826 1827 Dcmn_err(CE_NOTE, "skd_check_status: returning %x", 1828 sns->action); 1829 1830 return (sns->action); 1831 } 1832 1833 /* 1834 * No other match, so nonzero status means error, 1835 * zero status means good 1836 */ 1837 if (cmp_status) { 1838 cmn_err(CE_WARN, 1839 "!%s: status check: qdepth=%d skmfl=%p (%d) skrfl=%p (%d)", 1840 skdev->name, 1841 skdev->queue_depth_busy, 1842 (void *)skdev->skmsg_free_list, skd_list_skmsg(skdev, 0), 1843 (void *)skdev->skreq_free_list, skd_list_skreq(skdev, 0)); 1844 1845 cmn_err(CE_WARN, "!%s: t=%02x stat=%02x k=%02x c=%02x q=%02x", 1846 skdev->name, skerr->type, cmp_status, skerr->key, 1847 skerr->code, skerr->qual); 1848 1849 return (SKD_CHECK_STATUS_REPORT_ERROR); 1850 } 1851 1852 Dcmn_err(CE_NOTE, "status check good default"); 1853 1854 return (SKD_CHECK_STATUS_REPORT_GOOD); 1855 } 1856 1857 /* 1858 * 1859 * Name: skd_isr_completion_posted, handles I/O completions. 1860 * 1861 * Inputs: skdev - device state structure. 1862 * 1863 * Returns: Nothing. 1864 * 1865 */ 1866 static void 1867 skd_isr_completion_posted(struct skd_device *skdev) 1868 { 1869 volatile struct fit_completion_entry_v1 *skcmp = NULL; 1870 volatile struct fit_comp_error_info *skerr; 1871 struct skd_fitmsg_context *skmsg; 1872 struct skd_request_context *skreq; 1873 skd_buf_private_t *pbuf; 1874 uint16_t req_id; 1875 uint32_t req_slot; 1876 uint32_t timo_slot; 1877 uint32_t msg_slot; 1878 uint16_t cmp_cntxt = 0; 1879 uint8_t cmp_status = 0; 1880 uint8_t cmp_cycle = 0; 1881 uint32_t cmp_bytes = 0; 1882 1883 (void) ddi_dma_sync(skdev->cq_dma_address.dma_handle, 0, 0, 1884 DDI_DMA_SYNC_FORKERNEL); 1885 1886 for (;;) { 1887 ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); 1888 1889 WAITQ_LOCK(skdev); 1890 1891 skcmp = &skdev->skcomp_table[skdev->skcomp_ix]; 1892 cmp_cycle = skcmp->cycle; 1893 cmp_cntxt = skcmp->tag; 1894 cmp_status = skcmp->status; 1895 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes); 1896 1897 skerr = &skdev->skerr_table[skdev->skcomp_ix]; 1898 1899 Dcmn_err(CE_NOTE, 1900 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d " 1901 "qdepth_busy=%d rbytes=0x%x proto=%d", 1902 skdev->skcomp_cycle, skdev->skcomp_ix, 1903 cmp_cycle, cmp_cntxt, cmp_status, 1904 skdev->queue_depth_busy, cmp_bytes, skdev->proto_ver); 1905 1906 if (cmp_cycle != skdev->skcomp_cycle) { 1907 Dcmn_err(CE_NOTE, "%s:end of completions", skdev->name); 1908 1909 WAITQ_UNLOCK(skdev); 1910 break; 1911 } 1912 1913 1914 skdev->n_req++; 1915 1916 /* 1917 * Update the completion queue head index and possibly 1918 * the completion cycle count. 1919 */ 1920 skdev->skcomp_ix++; 1921 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) { 1922 skdev->skcomp_ix = 0; 1923 skdev->skcomp_cycle++; /* 8-bit wrap-around */ 1924 } 1925 1926 1927 /* 1928 * The command context is a unique 32-bit ID. The low order 1929 * bits help locate the request. The request is usually a 1930 * r/w request (see skd_start() above) or a special request. 1931 */ 1932 req_id = cmp_cntxt; 1933 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK; 1934 1935 Dcmn_err(CE_NOTE, 1936 "<<<< completion_posted 1: req_id=%x req_slot=%x", 1937 req_id, req_slot); 1938 1939 /* Is this other than a r/w request? */ 1940 if (req_slot >= skdev->num_req_context) { 1941 /* 1942 * This is not a completion for a r/w request. 1943 */ 1944 skd_complete_other(skdev, skcmp, skerr); 1945 WAITQ_UNLOCK(skdev); 1946 continue; 1947 } 1948 1949 skreq = &skdev->skreq_table[req_slot]; 1950 1951 /* 1952 * Make sure the request ID for the slot matches. 1953 */ 1954 ASSERT(skreq->id == req_id); 1955 1956 if (SKD_REQ_STATE_ABORTED == skreq->state) { 1957 Dcmn_err(CE_NOTE, "reclaim req %p id=%04x\n", 1958 (void *)skreq, skreq->id); 1959 /* 1960 * a previously timed out command can 1961 * now be cleaned up 1962 */ 1963 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK; 1964 ASSERT(msg_slot < skdev->num_fitmsg_context); 1965 skmsg = &skdev->skmsg_table[msg_slot]; 1966 if (skmsg->id == skreq->fitmsg_id) { 1967 ASSERT(skmsg->outstanding > 0); 1968 skmsg->outstanding--; 1969 if (skmsg->outstanding == 0) { 1970 ASSERT(SKD_MSG_STATE_BUSY == 1971 skmsg->state); 1972 skmsg->state = SKD_MSG_STATE_IDLE; 1973 skmsg->id += SKD_ID_INCR; 1974 skmsg->next = skdev->skmsg_free_list; 1975 skdev->skmsg_free_list = skmsg; 1976 } 1977 } 1978 /* 1979 * Reclaim the skd_request_context 1980 */ 1981 skreq->state = SKD_REQ_STATE_IDLE; 1982 skreq->id += SKD_ID_INCR; 1983 skreq->next = skdev->skreq_free_list; 1984 skdev->skreq_free_list = skreq; 1985 WAITQ_UNLOCK(skdev); 1986 continue; 1987 } 1988 1989 skreq->completion.status = cmp_status; 1990 1991 pbuf = skreq->pbuf; 1992 ASSERT(pbuf != NULL); 1993 1994 Dcmn_err(CE_NOTE, "<<<< completion_posted 2: pbuf=%p " 1995 "req_id=%x req_slot=%x", (void *)pbuf, req_id, req_slot); 1996 if (cmp_status && skdev->disks_initialized) { 1997 cmn_err(CE_WARN, "!%s: " 1998 "I/O err: pbuf=%p blkno=%lld (%llx) nbklks=%ld ", 1999 skdev->name, (void *)pbuf, pbuf->x_xfer->x_blkno, 2000 pbuf->x_xfer->x_blkno, pbuf->x_xfer->x_nblks); 2001 } 2002 2003 ASSERT(skdev->active_cmds); 2004 atomic_dec_64(&skdev->active_cmds); 2005 2006 if (SAM_STAT_GOOD == cmp_status) { 2007 /* Release DMA resources for the request. */ 2008 if (pbuf->x_xfer->x_nblks != 0) 2009 skd_blkdev_postop_sg_list(skdev, skreq); 2010 WAITQ_UNLOCK(skdev); 2011 skd_end_request(skdev, skreq, 0); 2012 WAITQ_LOCK(skdev); 2013 } else { 2014 switch (skd_check_status(skdev, cmp_status, skerr)) { 2015 case SKD_CHECK_STATUS_REPORT_GOOD: 2016 case SKD_CHECK_STATUS_REPORT_SMART_ALERT: 2017 WAITQ_UNLOCK(skdev); 2018 skd_end_request(skdev, skreq, 0); 2019 WAITQ_LOCK(skdev); 2020 break; 2021 2022 case SKD_CHECK_STATUS_BUSY_IMMINENT: 2023 skd_log_skreq(skdev, skreq, "retry(busy)"); 2024 skd_queue(skdev, pbuf); 2025 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; 2026 skdev->timer_countdown = SKD_TIMER_MINUTES(20); 2027 2028 (void) skd_quiesce_dev(skdev); 2029 break; 2030 2031 /* FALLTHRU */ 2032 case SKD_CHECK_STATUS_REPORT_ERROR: 2033 /* fall thru to report error */ 2034 default: 2035 /* 2036 * Save the entire completion 2037 * and error entries for 2038 * later error interpretation. 2039 */ 2040 skreq->completion = *skcmp; 2041 skreq->err_info = *skerr; 2042 WAITQ_UNLOCK(skdev); 2043 skd_end_request(skdev, skreq, -EIO); 2044 WAITQ_LOCK(skdev); 2045 break; 2046 } 2047 } 2048 2049 /* 2050 * Reclaim the FIT msg buffer if this is 2051 * the first of the requests it carried to 2052 * be completed. The FIT msg buffer used to 2053 * send this request cannot be reused until 2054 * we are sure the s1120 card has copied 2055 * it to its memory. The FIT msg might have 2056 * contained several requests. As soon as 2057 * any of them are completed we know that 2058 * the entire FIT msg was transferred. 2059 * Only the first completed request will 2060 * match the FIT msg buffer id. The FIT 2061 * msg buffer id is immediately updated. 2062 * When subsequent requests complete the FIT 2063 * msg buffer id won't match, so we know 2064 * quite cheaply that it is already done. 2065 */ 2066 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK; 2067 2068 ASSERT(msg_slot < skdev->num_fitmsg_context); 2069 skmsg = &skdev->skmsg_table[msg_slot]; 2070 if (skmsg->id == skreq->fitmsg_id) { 2071 ASSERT(SKD_MSG_STATE_BUSY == skmsg->state); 2072 skmsg->state = SKD_MSG_STATE_IDLE; 2073 skmsg->id += SKD_ID_INCR; 2074 skmsg->next = skdev->skmsg_free_list; 2075 skdev->skmsg_free_list = skmsg; 2076 } 2077 2078 /* 2079 * Decrease the number of active requests. 2080 * This also decrements the count in the 2081 * timeout slot. 2082 */ 2083 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 2084 ASSERT(skdev->timeout_slot[timo_slot] > 0); 2085 ASSERT(skdev->queue_depth_busy > 0); 2086 2087 atomic_dec_32(&skdev->timeout_slot[timo_slot]); 2088 atomic_dec_32(&skdev->queue_depth_busy); 2089 2090 /* 2091 * Reclaim the skd_request_context 2092 */ 2093 skreq->state = SKD_REQ_STATE_IDLE; 2094 skreq->id += SKD_ID_INCR; 2095 skreq->next = skdev->skreq_free_list; 2096 skdev->skreq_free_list = skreq; 2097 2098 WAITQ_UNLOCK(skdev); 2099 2100 /* 2101 * make sure the lock is held by caller. 2102 */ 2103 if ((skdev->state == SKD_DRVR_STATE_PAUSING) && 2104 (0 == skdev->queue_depth_busy)) { 2105 skdev->state = SKD_DRVR_STATE_PAUSED; 2106 cv_signal(&skdev->cv_waitq); 2107 } 2108 } /* for(;;) */ 2109 } 2110 2111 /* 2112 * 2113 * Name: skd_complete_other, handle the completion of a 2114 * non-r/w request. 2115 * 2116 * Inputs: skdev - device state structure. 2117 * skcomp - FIT completion structure. 2118 * skerr - error structure. 2119 * 2120 * Returns: Nothing. 2121 * 2122 */ 2123 static void 2124 skd_complete_other(struct skd_device *skdev, 2125 volatile struct fit_completion_entry_v1 *skcomp, 2126 volatile struct fit_comp_error_info *skerr) 2127 { 2128 uint32_t req_id = 0; 2129 uint32_t req_table; 2130 uint32_t req_slot; 2131 struct skd_special_context *skspcl; 2132 2133 req_id = skcomp->tag; 2134 req_table = req_id & SKD_ID_TABLE_MASK; 2135 req_slot = req_id & SKD_ID_SLOT_MASK; 2136 2137 Dcmn_err(CE_NOTE, "complete_other: table=0x%x id=0x%x slot=%d", 2138 req_table, req_id, req_slot); 2139 2140 /* 2141 * Based on the request id, determine how to dispatch this completion. 2142 * This swich/case is finding the good cases and forwarding the 2143 * completion entry. Errors are reported below the switch. 2144 */ 2145 ASSERT(req_table == SKD_ID_INTERNAL); 2146 ASSERT(req_slot == 0); 2147 2148 skspcl = &skdev->internal_skspcl; 2149 ASSERT(skspcl->req.id == req_id); 2150 ASSERT(skspcl->req.state == SKD_REQ_STATE_BUSY); 2151 2152 Dcmn_err(CE_NOTE, "<<<<== complete_other: ID_INTERNAL"); 2153 skd_complete_internal(skdev, skcomp, skerr, skspcl); 2154 } 2155 2156 /* 2157 * 2158 * Name: skd_reset_skcomp, does what it says, resetting completion 2159 * tables. 2160 * 2161 * Inputs: skdev - device state structure. 2162 * 2163 * Returns: Nothing. 2164 * 2165 */ 2166 static void 2167 skd_reset_skcomp(struct skd_device *skdev) 2168 { 2169 uint32_t nbytes; 2170 2171 nbytes = sizeof (struct fit_completion_entry_v1) * 2172 SKD_N_COMPLETION_ENTRY; 2173 nbytes += sizeof (struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; 2174 2175 if (skdev->skcomp_table) 2176 bzero(skdev->skcomp_table, nbytes); 2177 2178 skdev->skcomp_ix = 0; 2179 skdev->skcomp_cycle = 1; 2180 } 2181 2182 2183 2184 /* 2185 * INTERRUPTS 2186 */ 2187 2188 /* 2189 * 2190 * Name: skd_isr_aif, handles the device interrupts. 2191 * 2192 * Inputs: arg - skdev device state structure. 2193 * intvec - not referenced 2194 * 2195 * Returns: DDI_INTR_CLAIMED if interrupt is handled otherwise 2196 * return DDI_INTR_UNCLAIMED. 2197 * 2198 */ 2199 /* ARGSUSED */ /* Upstream common source with other platforms. */ 2200 static uint_t 2201 skd_isr_aif(caddr_t arg, caddr_t intvec) 2202 { 2203 uint32_t intstat; 2204 uint32_t ack; 2205 int rc = DDI_INTR_UNCLAIMED; 2206 struct skd_device *skdev; 2207 2208 skdev = (skd_device_t *)(uintptr_t)arg; 2209 2210 ASSERT(skdev != NULL); 2211 2212 skdev->intr_cntr++; 2213 2214 Dcmn_err(CE_NOTE, "skd_isr_aif: intr=%" PRId64 "\n", skdev->intr_cntr); 2215 2216 for (;;) { 2217 2218 ASSERT(!WAITQ_LOCK_HELD(skdev)); 2219 INTR_LOCK(skdev); 2220 2221 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST); 2222 2223 ack = FIT_INT_DEF_MASK; 2224 ack &= intstat; 2225 2226 Dcmn_err(CE_NOTE, "intstat=0x%x ack=0x%x", intstat, ack); 2227 2228 /* 2229 * As long as there is an int pending on device, keep 2230 * running loop. When none, get out, but if we've never 2231 * done any processing, call completion handler? 2232 */ 2233 if (ack == 0) { 2234 /* 2235 * No interrupts on device, but run the completion 2236 * processor anyway? 2237 */ 2238 if (rc == DDI_INTR_UNCLAIMED && 2239 skdev->state == SKD_DRVR_STATE_ONLINE) { 2240 Dcmn_err(CE_NOTE, 2241 "1: Want isr_comp_posted call"); 2242 skd_isr_completion_posted(skdev); 2243 } 2244 INTR_UNLOCK(skdev); 2245 2246 break; 2247 } 2248 rc = DDI_INTR_CLAIMED; 2249 2250 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST); 2251 2252 if ((skdev->state != SKD_DRVR_STATE_LOAD) && 2253 (skdev->state != SKD_DRVR_STATE_STOPPING)) { 2254 if (intstat & FIT_ISH_COMPLETION_POSTED) { 2255 Dcmn_err(CE_NOTE, 2256 "2: Want isr_comp_posted call"); 2257 skd_isr_completion_posted(skdev); 2258 } 2259 2260 if (intstat & FIT_ISH_FW_STATE_CHANGE) { 2261 Dcmn_err(CE_NOTE, "isr: fwstate change"); 2262 2263 skd_isr_fwstate(skdev); 2264 if (skdev->state == SKD_DRVR_STATE_FAULT || 2265 skdev->state == 2266 SKD_DRVR_STATE_DISAPPEARED) { 2267 INTR_UNLOCK(skdev); 2268 2269 return (rc); 2270 } 2271 } 2272 2273 if (intstat & FIT_ISH_MSG_FROM_DEV) { 2274 Dcmn_err(CE_NOTE, "isr: msg_from_dev change"); 2275 skd_isr_msg_from_dev(skdev); 2276 } 2277 } 2278 2279 INTR_UNLOCK(skdev); 2280 } 2281 2282 if (!SIMPLEQ_EMPTY(&skdev->waitqueue)) 2283 skd_start(skdev); 2284 2285 return (rc); 2286 } 2287 2288 /* 2289 * 2290 * Name: skd_drive_fault, set the drive state to DRV_STATE_FAULT. 2291 * 2292 * Inputs: skdev - device state structure. 2293 * 2294 * Returns: Nothing. 2295 * 2296 */ 2297 static void 2298 skd_drive_fault(struct skd_device *skdev) 2299 { 2300 skdev->state = SKD_DRVR_STATE_FAULT; 2301 cmn_err(CE_WARN, "!(%s): Drive FAULT\n", 2302 skd_name(skdev)); 2303 } 2304 2305 /* 2306 * 2307 * Name: skd_drive_disappeared, set the drive state to DISAPPEARED.. 2308 * 2309 * Inputs: skdev - device state structure. 2310 * 2311 * Returns: Nothing. 2312 * 2313 */ 2314 static void 2315 skd_drive_disappeared(struct skd_device *skdev) 2316 { 2317 skdev->state = SKD_DRVR_STATE_DISAPPEARED; 2318 cmn_err(CE_WARN, "!(%s): Drive DISAPPEARED\n", 2319 skd_name(skdev)); 2320 } 2321 2322 /* 2323 * 2324 * Name: skd_isr_fwstate, handles the various device states. 2325 * 2326 * Inputs: skdev - device state structure. 2327 * 2328 * Returns: Nothing. 2329 * 2330 */ 2331 static void 2332 skd_isr_fwstate(struct skd_device *skdev) 2333 { 2334 uint32_t sense; 2335 uint32_t state; 2336 int prev_driver_state; 2337 uint32_t mtd; 2338 2339 prev_driver_state = skdev->state; 2340 2341 sense = SKD_READL(skdev, FIT_STATUS); 2342 state = sense & FIT_SR_DRIVE_STATE_MASK; 2343 2344 Dcmn_err(CE_NOTE, "s1120 state %s(%d)=>%s(%d)", 2345 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 2346 skd_drive_state_to_str(state), state); 2347 2348 skdev->drive_state = state; 2349 2350 switch (skdev->drive_state) { 2351 case FIT_SR_DRIVE_INIT: 2352 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) { 2353 skd_disable_interrupts(skdev); 2354 break; 2355 } 2356 if (skdev->state == SKD_DRVR_STATE_RESTARTING) { 2357 skd_recover_requests(skdev); 2358 } 2359 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) { 2360 skdev->timer_countdown = 2361 SKD_TIMER_SECONDS(SKD_STARTING_TO); 2362 skdev->state = SKD_DRVR_STATE_STARTING; 2363 skd_soft_reset(skdev); 2364 break; 2365 } 2366 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0); 2367 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2368 skdev->last_mtd = mtd; 2369 break; 2370 2371 case FIT_SR_DRIVE_ONLINE: 2372 skdev->queue_depth_limit = skdev->soft_queue_depth_limit; 2373 if (skdev->queue_depth_limit > skdev->hard_queue_depth_limit) { 2374 skdev->queue_depth_limit = 2375 skdev->hard_queue_depth_limit; 2376 } 2377 2378 skdev->queue_depth_lowat = skdev->queue_depth_limit * 2 / 3 + 1; 2379 if (skdev->queue_depth_lowat < 1) 2380 skdev->queue_depth_lowat = 1; 2381 Dcmn_err(CE_NOTE, 2382 "%s queue depth limit=%d hard=%d soft=%d lowat=%d", 2383 DRV_NAME, 2384 skdev->queue_depth_limit, 2385 skdev->hard_queue_depth_limit, 2386 skdev->soft_queue_depth_limit, 2387 skdev->queue_depth_lowat); 2388 2389 skd_refresh_device_data(skdev); 2390 break; 2391 case FIT_SR_DRIVE_BUSY: 2392 skdev->state = SKD_DRVR_STATE_BUSY; 2393 skdev->timer_countdown = SKD_TIMER_MINUTES(20); 2394 (void) skd_quiesce_dev(skdev); 2395 break; 2396 case FIT_SR_DRIVE_BUSY_SANITIZE: 2397 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; 2398 skd_start(skdev); 2399 break; 2400 case FIT_SR_DRIVE_BUSY_ERASE: 2401 skdev->state = SKD_DRVR_STATE_BUSY_ERASE; 2402 skdev->timer_countdown = SKD_TIMER_MINUTES(20); 2403 break; 2404 case FIT_SR_DRIVE_OFFLINE: 2405 skdev->state = SKD_DRVR_STATE_IDLE; 2406 break; 2407 case FIT_SR_DRIVE_SOFT_RESET: 2408 skdev->state = SKD_DRVR_STATE_RESTARTING; 2409 2410 switch (skdev->state) { 2411 case SKD_DRVR_STATE_STARTING: 2412 case SKD_DRVR_STATE_RESTARTING: 2413 break; 2414 default: 2415 skdev->state = SKD_DRVR_STATE_RESTARTING; 2416 break; 2417 } 2418 break; 2419 case FIT_SR_DRIVE_FW_BOOTING: 2420 Dcmn_err(CE_NOTE, 2421 "ISR FIT_SR_DRIVE_FW_BOOTING %s", skdev->name); 2422 skdev->state = SKD_DRVR_STATE_WAIT_BOOT; 2423 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_WAIT_BOOT_TO); 2424 break; 2425 2426 case FIT_SR_DRIVE_DEGRADED: 2427 case FIT_SR_PCIE_LINK_DOWN: 2428 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: 2429 break; 2430 2431 case FIT_SR_DRIVE_FAULT: 2432 skd_drive_fault(skdev); 2433 skd_recover_requests(skdev); 2434 skd_start(skdev); 2435 break; 2436 2437 case 0xFF: 2438 skd_drive_disappeared(skdev); 2439 skd_recover_requests(skdev); 2440 skd_start(skdev); 2441 break; 2442 default: 2443 /* 2444 * Uknown FW State. Wait for a state we recognize. 2445 */ 2446 break; 2447 } 2448 2449 Dcmn_err(CE_NOTE, "Driver state %s(%d)=>%s(%d)", 2450 skd_skdev_state_to_str(prev_driver_state), prev_driver_state, 2451 skd_skdev_state_to_str(skdev->state), skdev->state); 2452 } 2453 2454 /* 2455 * 2456 * Name: skd_recover_requests, attempts to recover requests. 2457 * 2458 * Inputs: skdev - device state structure. 2459 * 2460 * Returns: Nothing. 2461 * 2462 */ 2463 static void 2464 skd_recover_requests(struct skd_device *skdev) 2465 { 2466 int i; 2467 2468 ASSERT(INTR_LOCK_HELD(skdev)); 2469 2470 for (i = 0; i < skdev->num_req_context; i++) { 2471 struct skd_request_context *skreq = &skdev->skreq_table[i]; 2472 2473 if (skreq->state == SKD_REQ_STATE_BUSY) { 2474 skd_log_skreq(skdev, skreq, "requeue"); 2475 2476 ASSERT(0 != (skreq->id & SKD_ID_INCR)); 2477 ASSERT(skreq->pbuf != NULL); 2478 /* Release DMA resources for the request. */ 2479 skd_blkdev_postop_sg_list(skdev, skreq); 2480 2481 skd_end_request(skdev, skreq, EAGAIN); 2482 skreq->pbuf = NULL; 2483 skreq->state = SKD_REQ_STATE_IDLE; 2484 skreq->id += SKD_ID_INCR; 2485 } 2486 if (i > 0) { 2487 skreq[-1].next = skreq; 2488 } 2489 skreq->next = NULL; 2490 } 2491 2492 WAITQ_LOCK(skdev); 2493 skdev->skreq_free_list = skdev->skreq_table; 2494 WAITQ_UNLOCK(skdev); 2495 2496 for (i = 0; i < skdev->num_fitmsg_context; i++) { 2497 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i]; 2498 2499 if (skmsg->state == SKD_MSG_STATE_BUSY) { 2500 skd_log_skmsg(skdev, skmsg, "salvaged"); 2501 ASSERT((skmsg->id & SKD_ID_INCR) != 0); 2502 skmsg->state = SKD_MSG_STATE_IDLE; 2503 skmsg->id &= ~SKD_ID_INCR; 2504 } 2505 if (i > 0) { 2506 skmsg[-1].next = skmsg; 2507 } 2508 skmsg->next = NULL; 2509 } 2510 WAITQ_LOCK(skdev); 2511 skdev->skmsg_free_list = skdev->skmsg_table; 2512 WAITQ_UNLOCK(skdev); 2513 2514 for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++) { 2515 skdev->timeout_slot[i] = 0; 2516 } 2517 skdev->queue_depth_busy = 0; 2518 } 2519 2520 /* 2521 * 2522 * Name: skd_isr_msg_from_dev, handles a message from the device. 2523 * 2524 * Inputs: skdev - device state structure. 2525 * 2526 * Returns: Nothing. 2527 * 2528 */ 2529 static void 2530 skd_isr_msg_from_dev(struct skd_device *skdev) 2531 { 2532 uint32_t mfd; 2533 uint32_t mtd; 2534 2535 Dcmn_err(CE_NOTE, "skd_isr_msg_from_dev:"); 2536 2537 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); 2538 2539 Dcmn_err(CE_NOTE, "mfd=0x%x last_mtd=0x%x\n", mfd, skdev->last_mtd); 2540 2541 /* 2542 * ignore any mtd that is an ack for something we didn't send 2543 */ 2544 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) { 2545 return; 2546 } 2547 2548 switch (FIT_MXD_TYPE(mfd)) { 2549 case FIT_MTD_FITFW_INIT: 2550 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd); 2551 2552 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) { 2553 cmn_err(CE_WARN, "!(%s): protocol mismatch\n", 2554 skdev->name); 2555 cmn_err(CE_WARN, "!(%s): got=%d support=%d\n", 2556 skdev->name, skdev->proto_ver, 2557 FIT_PROTOCOL_VERSION_1); 2558 cmn_err(CE_WARN, "!(%s): please upgrade driver\n", 2559 skdev->name); 2560 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH; 2561 skd_soft_reset(skdev); 2562 break; 2563 } 2564 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0); 2565 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2566 skdev->last_mtd = mtd; 2567 break; 2568 2569 case FIT_MTD_GET_CMDQ_DEPTH: 2570 skdev->hard_queue_depth_limit = FIT_MXD_DATA(mfd); 2571 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0, 2572 SKD_N_COMPLETION_ENTRY); 2573 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2574 skdev->last_mtd = mtd; 2575 break; 2576 2577 case FIT_MTD_SET_COMPQ_DEPTH: 2578 SKD_WRITEQ(skdev, skdev->cq_dma_address.cookies->dmac_laddress, 2579 FIT_MSG_TO_DEVICE_ARG); 2580 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0); 2581 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2582 skdev->last_mtd = mtd; 2583 break; 2584 2585 case FIT_MTD_SET_COMPQ_ADDR: 2586 skd_reset_skcomp(skdev); 2587 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0); 2588 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 2589 skdev->last_mtd = mtd; 2590 break; 2591 2592 case FIT_MTD_ARM_QUEUE: 2593 skdev->last_mtd = 0; 2594 /* 2595 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE. 2596 */ 2597 break; 2598 2599 default: 2600 break; 2601 } 2602 } 2603 2604 2605 /* 2606 * 2607 * Name: skd_disable_interrupts, issues command to disable 2608 * device interrupts. 2609 * 2610 * Inputs: skdev - device state structure. 2611 * 2612 * Returns: Nothing. 2613 * 2614 */ 2615 static void 2616 skd_disable_interrupts(struct skd_device *skdev) 2617 { 2618 uint32_t sense; 2619 2620 Dcmn_err(CE_NOTE, "skd_disable_interrupts:"); 2621 2622 sense = SKD_READL(skdev, FIT_CONTROL); 2623 sense &= ~FIT_CR_ENABLE_INTERRUPTS; 2624 SKD_WRITEL(skdev, sense, FIT_CONTROL); 2625 2626 Dcmn_err(CE_NOTE, "sense 0x%x", sense); 2627 2628 /* 2629 * Note that the 1s is written. A 1-bit means 2630 * disable, a 0 means enable. 2631 */ 2632 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST); 2633 } 2634 2635 /* 2636 * 2637 * Name: skd_enable_interrupts, issues command to enable 2638 * device interrupts. 2639 * 2640 * Inputs: skdev - device state structure. 2641 * 2642 * Returns: Nothing. 2643 * 2644 */ 2645 static void 2646 skd_enable_interrupts(struct skd_device *skdev) 2647 { 2648 uint32_t val; 2649 2650 Dcmn_err(CE_NOTE, "skd_enable_interrupts:"); 2651 2652 /* unmask interrupts first */ 2653 val = FIT_ISH_FW_STATE_CHANGE + 2654 FIT_ISH_COMPLETION_POSTED + 2655 FIT_ISH_MSG_FROM_DEV; 2656 2657 /* 2658 * Note that the compliment of mask is written. A 1-bit means 2659 * disable, a 0 means enable. 2660 */ 2661 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST); 2662 2663 Dcmn_err(CE_NOTE, "interrupt mask=0x%x", ~val); 2664 2665 val = SKD_READL(skdev, FIT_CONTROL); 2666 val |= FIT_CR_ENABLE_INTERRUPTS; 2667 2668 Dcmn_err(CE_NOTE, "control=0x%x", val); 2669 2670 SKD_WRITEL(skdev, val, FIT_CONTROL); 2671 } 2672 2673 /* 2674 * 2675 * Name: skd_soft_reset, issues a soft reset to the hardware. 2676 * 2677 * Inputs: skdev - device state structure. 2678 * 2679 * Returns: Nothing. 2680 * 2681 */ 2682 static void 2683 skd_soft_reset(struct skd_device *skdev) 2684 { 2685 uint32_t val; 2686 2687 Dcmn_err(CE_NOTE, "skd_soft_reset:"); 2688 2689 val = SKD_READL(skdev, FIT_CONTROL); 2690 val |= (FIT_CR_SOFT_RESET); 2691 2692 Dcmn_err(CE_NOTE, "soft_reset: control=0x%x", val); 2693 2694 SKD_WRITEL(skdev, val, FIT_CONTROL); 2695 } 2696 2697 /* 2698 * 2699 * Name: skd_start_device, gets the device going. 2700 * 2701 * Inputs: skdev - device state structure. 2702 * 2703 * Returns: Nothing. 2704 * 2705 */ 2706 static void 2707 skd_start_device(struct skd_device *skdev) 2708 { 2709 uint32_t state; 2710 int delay_action = 0; 2711 2712 Dcmn_err(CE_NOTE, "skd_start_device:"); 2713 2714 /* ack all ghost interrupts */ 2715 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2716 2717 state = SKD_READL(skdev, FIT_STATUS); 2718 2719 Dcmn_err(CE_NOTE, "initial status=0x%x", state); 2720 2721 state &= FIT_SR_DRIVE_STATE_MASK; 2722 skdev->drive_state = state; 2723 skdev->last_mtd = 0; 2724 2725 skdev->state = SKD_DRVR_STATE_STARTING; 2726 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_STARTING_TO); 2727 2728 skd_enable_interrupts(skdev); 2729 2730 switch (skdev->drive_state) { 2731 case FIT_SR_DRIVE_OFFLINE: 2732 Dcmn_err(CE_NOTE, "(%s): Drive offline...", 2733 skd_name(skdev)); 2734 break; 2735 2736 case FIT_SR_DRIVE_FW_BOOTING: 2737 Dcmn_err(CE_NOTE, "FIT_SR_DRIVE_FW_BOOTING %s\n", skdev->name); 2738 skdev->state = SKD_DRVR_STATE_WAIT_BOOT; 2739 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_WAIT_BOOT_TO); 2740 break; 2741 2742 case FIT_SR_DRIVE_BUSY_SANITIZE: 2743 Dcmn_err(CE_NOTE, "(%s): Start: BUSY_SANITIZE\n", 2744 skd_name(skdev)); 2745 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; 2746 skdev->timer_countdown = SKD_TIMER_SECONDS(60); 2747 break; 2748 2749 case FIT_SR_DRIVE_BUSY_ERASE: 2750 Dcmn_err(CE_NOTE, "(%s): Start: BUSY_ERASE\n", 2751 skd_name(skdev)); 2752 skdev->state = SKD_DRVR_STATE_BUSY_ERASE; 2753 skdev->timer_countdown = SKD_TIMER_SECONDS(60); 2754 break; 2755 2756 case FIT_SR_DRIVE_INIT: 2757 case FIT_SR_DRIVE_ONLINE: 2758 skd_soft_reset(skdev); 2759 2760 break; 2761 2762 case FIT_SR_DRIVE_BUSY: 2763 Dcmn_err(CE_NOTE, "(%s): Drive Busy...\n", 2764 skd_name(skdev)); 2765 skdev->state = SKD_DRVR_STATE_BUSY; 2766 skdev->timer_countdown = SKD_TIMER_SECONDS(60); 2767 break; 2768 2769 case FIT_SR_DRIVE_SOFT_RESET: 2770 Dcmn_err(CE_NOTE, "(%s) drive soft reset in prog\n", 2771 skd_name(skdev)); 2772 break; 2773 2774 case FIT_SR_DRIVE_FAULT: 2775 /* 2776 * Fault state is bad...soft reset won't do it... 2777 * Hard reset, maybe, but does it work on device? 2778 * For now, just fault so the system doesn't hang. 2779 */ 2780 skd_drive_fault(skdev); 2781 2782 delay_action = 1; 2783 break; 2784 2785 case 0xFF: 2786 skd_drive_disappeared(skdev); 2787 2788 delay_action = 1; 2789 break; 2790 2791 default: 2792 Dcmn_err(CE_NOTE, "(%s) Start: unknown state %x\n", 2793 skd_name(skdev), skdev->drive_state); 2794 break; 2795 } 2796 2797 state = SKD_READL(skdev, FIT_CONTROL); 2798 Dcmn_err(CE_NOTE, "FIT Control Status=0x%x\n", state); 2799 2800 state = SKD_READL(skdev, FIT_INT_STATUS_HOST); 2801 Dcmn_err(CE_NOTE, "Intr Status=0x%x\n", state); 2802 2803 state = SKD_READL(skdev, FIT_INT_MASK_HOST); 2804 Dcmn_err(CE_NOTE, "Intr Mask=0x%x\n", state); 2805 2806 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); 2807 Dcmn_err(CE_NOTE, "Msg from Dev=0x%x\n", state); 2808 2809 state = SKD_READL(skdev, FIT_HW_VERSION); 2810 Dcmn_err(CE_NOTE, "HW version=0x%x\n", state); 2811 2812 if (delay_action) { 2813 /* start the queue so we can respond with error to requests */ 2814 Dcmn_err(CE_NOTE, "Starting %s queue\n", skdev->name); 2815 skd_start(skdev); 2816 skdev->gendisk_on = -1; 2817 cv_signal(&skdev->cv_waitq); 2818 } 2819 } 2820 2821 /* 2822 * 2823 * Name: skd_restart_device, restart the hardware. 2824 * 2825 * Inputs: skdev - device state structure. 2826 * 2827 * Returns: Nothing. 2828 * 2829 */ 2830 static void 2831 skd_restart_device(struct skd_device *skdev) 2832 { 2833 uint32_t state; 2834 2835 Dcmn_err(CE_NOTE, "skd_restart_device:"); 2836 2837 /* ack all ghost interrupts */ 2838 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2839 2840 state = SKD_READL(skdev, FIT_STATUS); 2841 2842 Dcmn_err(CE_NOTE, "skd_restart_device: drive status=0x%x\n", state); 2843 2844 state &= FIT_SR_DRIVE_STATE_MASK; 2845 skdev->drive_state = state; 2846 skdev->last_mtd = 0; 2847 2848 skdev->state = SKD_DRVR_STATE_RESTARTING; 2849 skdev->timer_countdown = SKD_TIMER_MINUTES(4); 2850 2851 skd_soft_reset(skdev); 2852 } 2853 2854 /* 2855 * 2856 * Name: skd_stop_device, stops the device. 2857 * 2858 * Inputs: skdev - device state structure. 2859 * 2860 * Returns: Nothing. 2861 * 2862 */ 2863 static void 2864 skd_stop_device(struct skd_device *skdev) 2865 { 2866 clock_t cur_ticks, tmo; 2867 int secs; 2868 struct skd_special_context *skspcl = &skdev->internal_skspcl; 2869 2870 if (SKD_DRVR_STATE_ONLINE != skdev->state) { 2871 Dcmn_err(CE_NOTE, "(%s): skd_stop_device not online no sync\n", 2872 skdev->name); 2873 goto stop_out; 2874 } 2875 2876 if (SKD_REQ_STATE_IDLE != skspcl->req.state) { 2877 Dcmn_err(CE_NOTE, "(%s): skd_stop_device no special\n", 2878 skdev->name); 2879 goto stop_out; 2880 } 2881 2882 skdev->state = SKD_DRVR_STATE_SYNCING; 2883 skdev->sync_done = 0; 2884 2885 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); 2886 2887 secs = 10; 2888 mutex_enter(&skdev->skd_internalio_mutex); 2889 while (skdev->sync_done == 0) { 2890 cur_ticks = ddi_get_lbolt(); 2891 tmo = cur_ticks + drv_usectohz(1000000 * secs); 2892 if (cv_timedwait(&skdev->cv_waitq, 2893 &skdev->skd_internalio_mutex, tmo) == -1) { 2894 /* Oops - timed out */ 2895 2896 Dcmn_err(CE_NOTE, "stop_device - %d secs TMO", secs); 2897 } 2898 } 2899 2900 mutex_exit(&skdev->skd_internalio_mutex); 2901 2902 switch (skdev->sync_done) { 2903 case 0: 2904 Dcmn_err(CE_NOTE, "(%s): skd_stop_device no sync\n", 2905 skdev->name); 2906 break; 2907 case 1: 2908 Dcmn_err(CE_NOTE, "(%s): skd_stop_device sync done\n", 2909 skdev->name); 2910 break; 2911 default: 2912 Dcmn_err(CE_NOTE, "(%s): skd_stop_device sync error\n", 2913 skdev->name); 2914 } 2915 2916 2917 stop_out: 2918 skdev->state = SKD_DRVR_STATE_STOPPING; 2919 2920 skd_disable_interrupts(skdev); 2921 2922 /* ensure all ints on device are cleared */ 2923 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); 2924 /* soft reset the device to unload with a clean slate */ 2925 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL); 2926 } 2927 2928 /* 2929 * CONSTRUCT 2930 */ 2931 2932 static int skd_cons_skcomp(struct skd_device *); 2933 static int skd_cons_skmsg(struct skd_device *); 2934 static int skd_cons_skreq(struct skd_device *); 2935 static int skd_cons_sksb(struct skd_device *); 2936 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *, uint32_t, 2937 dma_mem_t *); 2938 2939 /* 2940 * 2941 * Name: skd_construct, calls other routines to build device 2942 * interface structures. 2943 * 2944 * Inputs: skdev - device state structure. 2945 * instance - DDI instance number. 2946 * 2947 * Returns: Returns DDI_FAILURE on any failure otherwise returns 2948 * DDI_SUCCESS. 2949 * 2950 */ 2951 /* ARGSUSED */ /* Upstream common source with other platforms. */ 2952 static int 2953 skd_construct(skd_device_t *skdev, int instance) 2954 { 2955 int rc = 0; 2956 2957 skdev->state = SKD_DRVR_STATE_LOAD; 2958 skdev->irq_type = skd_isr_type; 2959 skdev->soft_queue_depth_limit = skd_max_queue_depth; 2960 skdev->hard_queue_depth_limit = 10; /* until GET_CMDQ_DEPTH */ 2961 2962 skdev->num_req_context = skd_max_queue_depth; 2963 skdev->num_fitmsg_context = skd_max_queue_depth; 2964 2965 skdev->queue_depth_limit = skdev->hard_queue_depth_limit; 2966 skdev->queue_depth_lowat = 1; 2967 skdev->proto_ver = 99; /* initialize to invalid value */ 2968 skdev->sgs_per_request = skd_sgs_per_request; 2969 skdev->dbg_level = skd_dbg_level; 2970 2971 rc = skd_cons_skcomp(skdev); 2972 if (rc < 0) { 2973 goto err_out; 2974 } 2975 2976 rc = skd_cons_skmsg(skdev); 2977 if (rc < 0) { 2978 goto err_out; 2979 } 2980 2981 rc = skd_cons_skreq(skdev); 2982 if (rc < 0) { 2983 goto err_out; 2984 } 2985 2986 rc = skd_cons_sksb(skdev); 2987 if (rc < 0) { 2988 goto err_out; 2989 } 2990 2991 Dcmn_err(CE_NOTE, "CONSTRUCT VICTORY"); 2992 2993 return (DDI_SUCCESS); 2994 2995 err_out: 2996 Dcmn_err(CE_NOTE, "construct failed\n"); 2997 skd_destruct(skdev); 2998 2999 return (DDI_FAILURE); 3000 } 3001 3002 /* 3003 * 3004 * Name: skd_free_phys, frees DMA memory. 3005 * 3006 * Inputs: skdev - device state structure. 3007 * mem - DMA info. 3008 * 3009 * Returns: Nothing. 3010 * 3011 */ 3012 static void 3013 skd_free_phys(skd_device_t *skdev, dma_mem_t *mem) 3014 { 3015 _NOTE(ARGUNUSED(skdev)); 3016 3017 if (mem == NULL || mem->dma_handle == NULL) 3018 return; 3019 3020 (void) ddi_dma_unbind_handle(mem->dma_handle); 3021 3022 if (mem->acc_handle != NULL) { 3023 ddi_dma_mem_free(&mem->acc_handle); 3024 mem->acc_handle = NULL; 3025 } 3026 3027 mem->bp = NULL; 3028 ddi_dma_free_handle(&mem->dma_handle); 3029 mem->dma_handle = NULL; 3030 } 3031 3032 /* 3033 * 3034 * Name: skd_alloc_dma_mem, allocates DMA memory. 3035 * 3036 * Inputs: skdev - device state structure. 3037 * mem - DMA data structure. 3038 * sleep - indicates whether called routine can sleep. 3039 * atype - specified 32 or 64 bit allocation. 3040 * 3041 * Returns: Void pointer to mem->bp on success else NULL. 3042 * NOTE: There are some failure modes even if sleep is set 3043 * to KM_SLEEP, so callers MUST check the return code even 3044 * if KM_SLEEP is passed in. 3045 * 3046 */ 3047 static void * 3048 skd_alloc_dma_mem(skd_device_t *skdev, dma_mem_t *mem, uint8_t atype) 3049 { 3050 size_t rlen; 3051 uint_t cnt; 3052 ddi_dma_attr_t dma_attr = skd_64bit_io_dma_attr; 3053 ddi_device_acc_attr_t acc_attr = { 3054 DDI_DEVICE_ATTR_V0, 3055 DDI_STRUCTURE_LE_ACC, 3056 DDI_STRICTORDER_ACC 3057 }; 3058 3059 if (atype == ATYPE_32BIT) 3060 dma_attr.dma_attr_addr_hi = SKD_DMA_HIGH_32BIT_ADDRESS; 3061 3062 dma_attr.dma_attr_sgllen = 1; 3063 3064 /* 3065 * Allocate DMA memory. 3066 */ 3067 if (ddi_dma_alloc_handle(skdev->dip, &dma_attr, DDI_DMA_SLEEP, NULL, 3068 &mem->dma_handle) != DDI_SUCCESS) { 3069 cmn_err(CE_WARN, "!alloc_dma_mem-1, failed"); 3070 3071 mem->dma_handle = NULL; 3072 3073 return (NULL); 3074 } 3075 3076 if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr, 3077 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, (caddr_t *)&mem->bp, &rlen, 3078 &mem->acc_handle) != DDI_SUCCESS) { 3079 cmn_err(CE_WARN, "!skd_alloc_dma_mem-2, failed"); 3080 ddi_dma_free_handle(&mem->dma_handle); 3081 mem->dma_handle = NULL; 3082 mem->acc_handle = NULL; 3083 mem->bp = NULL; 3084 3085 return (NULL); 3086 } 3087 bzero(mem->bp, mem->size); 3088 3089 if (ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp, 3090 mem->size, (DDI_DMA_CONSISTENT | DDI_DMA_RDWR), DDI_DMA_SLEEP, NULL, 3091 &mem->cookie, &cnt) != DDI_DMA_MAPPED) { 3092 cmn_err(CE_WARN, "!skd_alloc_dma_mem-3, failed"); 3093 ddi_dma_mem_free(&mem->acc_handle); 3094 ddi_dma_free_handle(&mem->dma_handle); 3095 3096 return (NULL); 3097 } 3098 3099 if (cnt > 1) { 3100 (void) ddi_dma_unbind_handle(mem->dma_handle); 3101 cmn_err(CE_WARN, "!skd_alloc_dma_mem-4, failed, " 3102 "cookie_count %d > 1", cnt); 3103 skd_free_phys(skdev, mem); 3104 3105 return (NULL); 3106 } 3107 mem->cookies = &mem->cookie; 3108 mem->cookies->dmac_size = mem->size; 3109 3110 return (mem->bp); 3111 } 3112 3113 /* 3114 * 3115 * Name: skd_cons_skcomp, allocates space for the skcomp table. 3116 * 3117 * Inputs: skdev - device state structure. 3118 * 3119 * Returns: -ENOMEM if no memory otherwise NULL. 3120 * 3121 */ 3122 static int 3123 skd_cons_skcomp(struct skd_device *skdev) 3124 { 3125 uint64_t *dma_alloc; 3126 struct fit_completion_entry_v1 *skcomp; 3127 int rc = 0; 3128 uint32_t nbytes; 3129 dma_mem_t *mem; 3130 3131 nbytes = sizeof (*skcomp) * SKD_N_COMPLETION_ENTRY; 3132 nbytes += sizeof (struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; 3133 3134 Dcmn_err(CE_NOTE, "cons_skcomp: nbytes=%d,entries=%d", nbytes, 3135 SKD_N_COMPLETION_ENTRY); 3136 3137 mem = &skdev->cq_dma_address; 3138 mem->size = nbytes; 3139 3140 dma_alloc = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT); 3141 skcomp = (struct fit_completion_entry_v1 *)dma_alloc; 3142 if (skcomp == NULL) { 3143 rc = -ENOMEM; 3144 goto err_out; 3145 } 3146 3147 bzero(skcomp, nbytes); 3148 3149 Dcmn_err(CE_NOTE, "cons_skcomp: skcomp=%p nbytes=%d", 3150 (void *)skcomp, nbytes); 3151 3152 skdev->skcomp_table = skcomp; 3153 skdev->skerr_table = (struct fit_comp_error_info *)(dma_alloc + 3154 (SKD_N_COMPLETION_ENTRY * sizeof (*skcomp) / sizeof (uint64_t))); 3155 3156 err_out: 3157 return (rc); 3158 } 3159 3160 /* 3161 * 3162 * Name: skd_cons_skmsg, allocates space for the skmsg table. 3163 * 3164 * Inputs: skdev - device state structure. 3165 * 3166 * Returns: -ENOMEM if no memory otherwise NULL. 3167 * 3168 */ 3169 static int 3170 skd_cons_skmsg(struct skd_device *skdev) 3171 { 3172 dma_mem_t *mem; 3173 int rc = 0; 3174 uint32_t i; 3175 3176 Dcmn_err(CE_NOTE, "skmsg_table kzalloc, struct %lu, count %u total %lu", 3177 (ulong_t)sizeof (struct skd_fitmsg_context), 3178 skdev->num_fitmsg_context, 3179 (ulong_t)(sizeof (struct skd_fitmsg_context) * 3180 skdev->num_fitmsg_context)); 3181 3182 skdev->skmsg_table = (struct skd_fitmsg_context *)kmem_zalloc( 3183 sizeof (struct skd_fitmsg_context) * skdev->num_fitmsg_context, 3184 KM_SLEEP); 3185 3186 for (i = 0; i < skdev->num_fitmsg_context; i++) { 3187 struct skd_fitmsg_context *skmsg; 3188 3189 skmsg = &skdev->skmsg_table[i]; 3190 3191 skmsg->id = i + SKD_ID_FIT_MSG; 3192 3193 skmsg->state = SKD_MSG_STATE_IDLE; 3194 3195 mem = &skmsg->mb_dma_address; 3196 mem->size = SKD_N_FITMSG_BYTES + 64; 3197 3198 skmsg->msg_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT); 3199 3200 if (NULL == skmsg->msg_buf) { 3201 rc = -ENOMEM; 3202 i++; 3203 break; 3204 } 3205 3206 skmsg->offset = 0; 3207 3208 bzero(skmsg->msg_buf, SKD_N_FITMSG_BYTES); 3209 3210 skmsg->next = &skmsg[1]; 3211 } 3212 3213 /* Free list is in order starting with the 0th entry. */ 3214 skdev->skmsg_table[i - 1].next = NULL; 3215 skdev->skmsg_free_list = skdev->skmsg_table; 3216 3217 return (rc); 3218 } 3219 3220 /* 3221 * 3222 * Name: skd_cons_skreq, allocates space for the skreq table. 3223 * 3224 * Inputs: skdev - device state structure. 3225 * 3226 * Returns: -ENOMEM if no memory otherwise NULL. 3227 * 3228 */ 3229 static int 3230 skd_cons_skreq(struct skd_device *skdev) 3231 { 3232 int rc = 0; 3233 uint32_t i; 3234 3235 Dcmn_err(CE_NOTE, 3236 "skreq_table kmem_zalloc, struct %lu, count %u total %lu", 3237 (ulong_t)sizeof (struct skd_request_context), 3238 skdev->num_req_context, 3239 (ulong_t) (sizeof (struct skd_request_context) * 3240 skdev->num_req_context)); 3241 3242 skdev->skreq_table = (struct skd_request_context *)kmem_zalloc( 3243 sizeof (struct skd_request_context) * skdev->num_req_context, 3244 KM_SLEEP); 3245 3246 for (i = 0; i < skdev->num_req_context; i++) { 3247 struct skd_request_context *skreq; 3248 3249 skreq = &skdev->skreq_table[i]; 3250 3251 skreq->id = (uint16_t)(i + SKD_ID_RW_REQUEST); 3252 skreq->state = SKD_REQ_STATE_IDLE; 3253 3254 skreq->sksg_list = skd_cons_sg_list(skdev, 3255 skdev->sgs_per_request, 3256 &skreq->sksg_dma_address); 3257 3258 if (NULL == skreq->sksg_list) { 3259 rc = -ENOMEM; 3260 goto err_out; 3261 } 3262 3263 skreq->next = &skreq[1]; 3264 } 3265 3266 /* Free list is in order starting with the 0th entry. */ 3267 skdev->skreq_table[i - 1].next = NULL; 3268 skdev->skreq_free_list = skdev->skreq_table; 3269 3270 err_out: 3271 return (rc); 3272 } 3273 3274 /* 3275 * 3276 * Name: skd_cons_sksb, allocates space for the skspcl msg buf 3277 * and data buf. 3278 * 3279 * Inputs: skdev - device state structure. 3280 * 3281 * Returns: -ENOMEM if no memory otherwise NULL. 3282 * 3283 */ 3284 static int 3285 skd_cons_sksb(struct skd_device *skdev) 3286 { 3287 int rc = 0; 3288 struct skd_special_context *skspcl; 3289 dma_mem_t *mem; 3290 uint32_t nbytes; 3291 3292 skspcl = &skdev->internal_skspcl; 3293 3294 skspcl->req.id = 0 + SKD_ID_INTERNAL; 3295 skspcl->req.state = SKD_REQ_STATE_IDLE; 3296 3297 nbytes = SKD_N_INTERNAL_BYTES; 3298 3299 mem = &skspcl->db_dma_address; 3300 mem->size = nbytes; 3301 3302 /* data_buf's DMA pointer is skspcl->db_dma_address */ 3303 skspcl->data_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT); 3304 if (skspcl->data_buf == NULL) { 3305 rc = -ENOMEM; 3306 goto err_out; 3307 } 3308 3309 bzero(skspcl->data_buf, nbytes); 3310 3311 nbytes = SKD_N_SPECIAL_FITMSG_BYTES; 3312 3313 mem = &skspcl->mb_dma_address; 3314 mem->size = nbytes; 3315 3316 /* msg_buf DMA pointer is skspcl->mb_dma_address */ 3317 skspcl->msg_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT); 3318 if (skspcl->msg_buf == NULL) { 3319 rc = -ENOMEM; 3320 goto err_out; 3321 } 3322 3323 3324 bzero(skspcl->msg_buf, nbytes); 3325 3326 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1, 3327 &skspcl->req.sksg_dma_address); 3328 3329 3330 if (skspcl->req.sksg_list == NULL) { 3331 rc = -ENOMEM; 3332 goto err_out; 3333 } 3334 3335 if (skd_format_internal_skspcl(skdev) == 0) { 3336 rc = -EINVAL; 3337 goto err_out; 3338 } 3339 3340 err_out: 3341 return (rc); 3342 } 3343 3344 /* 3345 * 3346 * Name: skd_cons_sg_list, allocates the S/G list. 3347 * 3348 * Inputs: skdev - device state structure. 3349 * n_sg - Number of scatter-gather entries. 3350 * ret_dma_addr - S/G list DMA pointer. 3351 * 3352 * Returns: A list of FIT message descriptors. 3353 * 3354 */ 3355 static struct fit_sg_descriptor 3356 *skd_cons_sg_list(struct skd_device *skdev, 3357 uint32_t n_sg, dma_mem_t *ret_dma_addr) 3358 { 3359 struct fit_sg_descriptor *sg_list; 3360 uint32_t nbytes; 3361 dma_mem_t *mem; 3362 3363 nbytes = sizeof (*sg_list) * n_sg; 3364 3365 mem = ret_dma_addr; 3366 mem->size = nbytes; 3367 3368 /* sg_list's DMA pointer is *ret_dma_addr */ 3369 sg_list = skd_alloc_dma_mem(skdev, mem, ATYPE_32BIT); 3370 3371 if (sg_list != NULL) { 3372 uint64_t dma_address = ret_dma_addr->cookie.dmac_laddress; 3373 uint32_t i; 3374 3375 bzero(sg_list, nbytes); 3376 3377 for (i = 0; i < n_sg - 1; i++) { 3378 uint64_t ndp_off; 3379 ndp_off = (i + 1) * sizeof (struct fit_sg_descriptor); 3380 3381 sg_list[i].next_desc_ptr = dma_address + ndp_off; 3382 } 3383 sg_list[i].next_desc_ptr = 0LL; 3384 } 3385 3386 return (sg_list); 3387 } 3388 3389 /* 3390 * DESTRUCT (FREE) 3391 */ 3392 3393 static void skd_free_skcomp(struct skd_device *skdev); 3394 static void skd_free_skmsg(struct skd_device *skdev); 3395 static void skd_free_skreq(struct skd_device *skdev); 3396 static void skd_free_sksb(struct skd_device *skdev); 3397 3398 static void skd_free_sg_list(struct skd_device *skdev, 3399 struct fit_sg_descriptor *sg_list, 3400 uint32_t n_sg, dma_mem_t dma_addr); 3401 3402 /* 3403 * 3404 * Name: skd_destruct, call various rouines to deallocate 3405 * space acquired during initialization. 3406 * 3407 * Inputs: skdev - device state structure. 3408 * 3409 * Returns: Nothing. 3410 * 3411 */ 3412 static void 3413 skd_destruct(struct skd_device *skdev) 3414 { 3415 if (skdev == NULL) { 3416 return; 3417 } 3418 3419 Dcmn_err(CE_NOTE, "destruct sksb"); 3420 skd_free_sksb(skdev); 3421 3422 Dcmn_err(CE_NOTE, "destruct skreq"); 3423 skd_free_skreq(skdev); 3424 3425 Dcmn_err(CE_NOTE, "destruct skmsg"); 3426 skd_free_skmsg(skdev); 3427 3428 Dcmn_err(CE_NOTE, "destruct skcomp"); 3429 skd_free_skcomp(skdev); 3430 3431 Dcmn_err(CE_NOTE, "DESTRUCT VICTORY"); 3432 } 3433 3434 /* 3435 * 3436 * Name: skd_free_skcomp, deallocates skcomp table DMA resources. 3437 * 3438 * Inputs: skdev - device state structure. 3439 * 3440 * Returns: Nothing. 3441 * 3442 */ 3443 static void 3444 skd_free_skcomp(struct skd_device *skdev) 3445 { 3446 if (skdev->skcomp_table != NULL) { 3447 skd_free_phys(skdev, &skdev->cq_dma_address); 3448 } 3449 3450 skdev->skcomp_table = NULL; 3451 } 3452 3453 /* 3454 * 3455 * Name: skd_free_skmsg, deallocates skmsg table DMA resources. 3456 * 3457 * Inputs: skdev - device state structure. 3458 * 3459 * Returns: Nothing. 3460 * 3461 */ 3462 static void 3463 skd_free_skmsg(struct skd_device *skdev) 3464 { 3465 uint32_t i; 3466 3467 if (NULL == skdev->skmsg_table) 3468 return; 3469 3470 for (i = 0; i < skdev->num_fitmsg_context; i++) { 3471 struct skd_fitmsg_context *skmsg; 3472 3473 skmsg = &skdev->skmsg_table[i]; 3474 3475 if (skmsg->msg_buf != NULL) { 3476 skd_free_phys(skdev, &skmsg->mb_dma_address); 3477 } 3478 3479 3480 skmsg->msg_buf = NULL; 3481 } 3482 3483 kmem_free(skdev->skmsg_table, sizeof (struct skd_fitmsg_context) * 3484 skdev->num_fitmsg_context); 3485 3486 skdev->skmsg_table = NULL; 3487 3488 } 3489 3490 /* 3491 * 3492 * Name: skd_free_skreq, deallocates skspcl table DMA resources. 3493 * 3494 * Inputs: skdev - device state structure. 3495 * 3496 * Returns: Nothing. 3497 * 3498 */ 3499 static void 3500 skd_free_skreq(struct skd_device *skdev) 3501 { 3502 uint32_t i; 3503 3504 if (NULL == skdev->skreq_table) 3505 return; 3506 3507 for (i = 0; i < skdev->num_req_context; i++) { 3508 struct skd_request_context *skreq; 3509 3510 skreq = &skdev->skreq_table[i]; 3511 3512 skd_free_sg_list(skdev, skreq->sksg_list, 3513 skdev->sgs_per_request, skreq->sksg_dma_address); 3514 3515 skreq->sksg_list = NULL; 3516 } 3517 3518 kmem_free(skdev->skreq_table, sizeof (struct skd_request_context) * 3519 skdev->num_req_context); 3520 3521 skdev->skreq_table = NULL; 3522 3523 } 3524 3525 /* 3526 * 3527 * Name: skd_free_sksb, deallocates skspcl data buf and 3528 * msg buf DMA resources. 3529 * 3530 * Inputs: skdev - device state structure. 3531 * 3532 * Returns: Nothing. 3533 * 3534 */ 3535 static void 3536 skd_free_sksb(struct skd_device *skdev) 3537 { 3538 struct skd_special_context *skspcl; 3539 3540 skspcl = &skdev->internal_skspcl; 3541 3542 if (skspcl->data_buf != NULL) { 3543 skd_free_phys(skdev, &skspcl->db_dma_address); 3544 } 3545 3546 skspcl->data_buf = NULL; 3547 3548 if (skspcl->msg_buf != NULL) { 3549 skd_free_phys(skdev, &skspcl->mb_dma_address); 3550 } 3551 3552 skspcl->msg_buf = NULL; 3553 3554 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1, 3555 skspcl->req.sksg_dma_address); 3556 3557 skspcl->req.sksg_list = NULL; 3558 } 3559 3560 /* 3561 * 3562 * Name: skd_free_sg_list, deallocates S/G DMA resources. 3563 * 3564 * Inputs: skdev - device state structure. 3565 * sg_list - S/G list itself. 3566 * n_sg - nukmber of segments 3567 * dma_addr - S/G list DMA address. 3568 * 3569 * Returns: Nothing. 3570 * 3571 */ 3572 /* ARGSUSED */ /* Upstream common source with other platforms. */ 3573 static void 3574 skd_free_sg_list(struct skd_device *skdev, 3575 struct fit_sg_descriptor *sg_list, 3576 uint32_t n_sg, dma_mem_t dma_addr) 3577 { 3578 if (sg_list != NULL) { 3579 skd_free_phys(skdev, &dma_addr); 3580 } 3581 } 3582 3583 /* 3584 * 3585 * Name: skd_queue, queues the I/O request. 3586 * 3587 * Inputs: skdev - device state structure. 3588 * pbuf - I/O request 3589 * 3590 * Returns: Nothing. 3591 * 3592 */ 3593 static void 3594 skd_queue(skd_device_t *skdev, skd_buf_private_t *pbuf) 3595 { 3596 struct waitqueue *waitq; 3597 3598 ASSERT(skdev != NULL); 3599 ASSERT(pbuf != NULL); 3600 3601 ASSERT(WAITQ_LOCK_HELD(skdev)); 3602 3603 waitq = &skdev->waitqueue; 3604 3605 if (SIMPLEQ_EMPTY(waitq)) 3606 SIMPLEQ_INSERT_HEAD(waitq, pbuf, sq); 3607 else 3608 SIMPLEQ_INSERT_TAIL(waitq, pbuf, sq); 3609 } 3610 3611 /* 3612 * 3613 * Name: skd_list_skreq, displays the skreq table entries. 3614 * 3615 * Inputs: skdev - device state structure. 3616 * list - flag, if true displays the entry address. 3617 * 3618 * Returns: Returns number of skmsg entries found. 3619 * 3620 */ 3621 /* ARGSUSED */ /* Upstream common source with other platforms. */ 3622 static int 3623 skd_list_skreq(skd_device_t *skdev, int list) 3624 { 3625 int inx = 0; 3626 struct skd_request_context *skreq; 3627 3628 if (list) { 3629 Dcmn_err(CE_NOTE, "skreq_table[0]\n"); 3630 3631 skreq = &skdev->skreq_table[0]; 3632 while (skreq) { 3633 if (list) 3634 Dcmn_err(CE_NOTE, 3635 "%d: skreq=%p state=%d id=%x fid=%x " 3636 "pbuf=%p dir=%d comp=%d\n", 3637 inx, (void *)skreq, skreq->state, 3638 skreq->id, skreq->fitmsg_id, 3639 (void *)skreq->pbuf, 3640 skreq->sg_data_dir, skreq->did_complete); 3641 inx++; 3642 skreq = skreq->next; 3643 } 3644 } 3645 3646 inx = 0; 3647 skreq = skdev->skreq_free_list; 3648 3649 if (list) 3650 Dcmn_err(CE_NOTE, "skreq_free_list\n"); 3651 while (skreq) { 3652 if (list) 3653 Dcmn_err(CE_NOTE, "%d: skreq=%p state=%d id=%x fid=%x " 3654 "pbuf=%p dir=%d\n", inx, (void *)skreq, 3655 skreq->state, skreq->id, skreq->fitmsg_id, 3656 (void *)skreq->pbuf, skreq->sg_data_dir); 3657 inx++; 3658 skreq = skreq->next; 3659 } 3660 3661 return (inx); 3662 } 3663 3664 /* 3665 * 3666 * Name: skd_list_skmsg, displays the skmsg table entries. 3667 * 3668 * Inputs: skdev - device state structure. 3669 * list - flag, if true displays the entry address. 3670 * 3671 * Returns: Returns number of skmsg entries found. 3672 * 3673 */ 3674 static int 3675 skd_list_skmsg(skd_device_t *skdev, int list) 3676 { 3677 int inx = 0; 3678 struct skd_fitmsg_context *skmsgp; 3679 3680 skmsgp = &skdev->skmsg_table[0]; 3681 3682 if (list) { 3683 Dcmn_err(CE_NOTE, "skmsg_table[0]\n"); 3684 3685 while (skmsgp) { 3686 if (list) 3687 Dcmn_err(CE_NOTE, "%d: skmsgp=%p id=%x outs=%d " 3688 "l=%d o=%d nxt=%p\n", inx, (void *)skmsgp, 3689 skmsgp->id, skmsgp->outstanding, 3690 skmsgp->length, skmsgp->offset, 3691 (void *)skmsgp->next); 3692 inx++; 3693 skmsgp = skmsgp->next; 3694 } 3695 } 3696 3697 inx = 0; 3698 if (list) 3699 Dcmn_err(CE_NOTE, "skmsg_free_list\n"); 3700 skmsgp = skdev->skmsg_free_list; 3701 while (skmsgp) { 3702 if (list) 3703 Dcmn_err(CE_NOTE, "%d: skmsgp=%p id=%x outs=%d l=%d " 3704 "o=%d nxt=%p\n", 3705 inx, (void *)skmsgp, skmsgp->id, 3706 skmsgp->outstanding, skmsgp->length, 3707 skmsgp->offset, (void *)skmsgp->next); 3708 inx++; 3709 skmsgp = skmsgp->next; 3710 } 3711 3712 return (inx); 3713 } 3714 3715 /* 3716 * 3717 * Name: skd_get_queue_pbuf, retrieves top of queue entry and 3718 * delinks entry from the queue. 3719 * 3720 * Inputs: skdev - device state structure. 3721 * drive - device number 3722 * 3723 * Returns: Returns the top of the job queue entry. 3724 * 3725 */ 3726 static skd_buf_private_t 3727 *skd_get_queued_pbuf(skd_device_t *skdev) 3728 { 3729 skd_buf_private_t *pbuf; 3730 3731 ASSERT(WAITQ_LOCK_HELD(skdev)); 3732 pbuf = SIMPLEQ_FIRST(&skdev->waitqueue); 3733 if (pbuf != NULL) 3734 SIMPLEQ_REMOVE_HEAD(&skdev->waitqueue, sq); 3735 return (pbuf); 3736 } 3737 3738 /* 3739 * PCI DRIVER GLUE 3740 */ 3741 3742 /* 3743 * 3744 * Name: skd_pci_info, logs certain device PCI info. 3745 * 3746 * Inputs: skdev - device state structure. 3747 * 3748 * Returns: str which contains the device speed info.. 3749 * 3750 */ 3751 static char * 3752 skd_pci_info(struct skd_device *skdev, char *str, size_t len) 3753 { 3754 int pcie_reg; 3755 3756 str[0] = '\0'; 3757 3758 pcie_reg = skd_pci_find_capability(skdev, PCI_CAP_ID_EXP); 3759 3760 if (pcie_reg) { 3761 uint16_t lstat, lspeed, lwidth; 3762 3763 pcie_reg += 0x12; 3764 lstat = pci_config_get16(skdev->pci_handle, pcie_reg); 3765 lspeed = lstat & (0xF); 3766 lwidth = (lstat & 0x3F0) >> 4; 3767 3768 (void) snprintf(str, len, "PCIe (%s rev %d)", 3769 lspeed == 1 ? "2.5GT/s" : 3770 lspeed == 2 ? "5.0GT/s" : "<unknown>", 3771 lwidth); 3772 } 3773 3774 return (str); 3775 } 3776 3777 /* 3778 * MODULE GLUE 3779 */ 3780 3781 /* 3782 * 3783 * Name: skd_init, initializes certain values. 3784 * 3785 * Inputs: skdev - device state structure. 3786 * 3787 * Returns: Zero. 3788 * 3789 */ 3790 /* ARGSUSED */ /* Upstream common source with other platforms. */ 3791 static int 3792 skd_init(skd_device_t *skdev) 3793 { 3794 Dcmn_err(CE_NOTE, "skd_init: v%s-b%s\n", DRV_VERSION, DRV_BUILD_ID); 3795 3796 if (skd_max_queue_depth < 1 || 3797 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) { 3798 cmn_err(CE_NOTE, "skd_max_q_depth %d invalid, re-set to %d\n", 3799 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT); 3800 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; 3801 } 3802 3803 if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) { 3804 cmn_err(CE_NOTE, "skd_max_req_per_msg %d invalid, set to %d\n", 3805 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT); 3806 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; 3807 } 3808 3809 3810 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) { 3811 cmn_err(CE_NOTE, "skd_sg_per_request %d invalid, set to %d\n", 3812 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT); 3813 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; 3814 } 3815 3816 if (skd_dbg_level < 0 || skd_dbg_level > 2) { 3817 cmn_err(CE_NOTE, "skd_dbg_level %d invalid, re-set to %d\n", 3818 skd_dbg_level, 0); 3819 skd_dbg_level = 0; 3820 } 3821 3822 return (0); 3823 } 3824 3825 /* 3826 * 3827 * Name: skd_exit, exits the driver & logs the fact. 3828 * 3829 * Inputs: none. 3830 * 3831 * Returns: Nothing. 3832 * 3833 */ 3834 static void 3835 skd_exit(void) 3836 { 3837 cmn_err(CE_NOTE, "skd v%s unloading", DRV_VERSION); 3838 } 3839 3840 /* 3841 * 3842 * Name: skd_drive_state_to_str, converts binary drive state 3843 * to its corresponding string value. 3844 * 3845 * Inputs: Drive state. 3846 * 3847 * Returns: String representing drive state. 3848 * 3849 */ 3850 const char * 3851 skd_drive_state_to_str(int state) 3852 { 3853 switch (state) { 3854 case FIT_SR_DRIVE_OFFLINE: return ("OFFLINE"); 3855 case FIT_SR_DRIVE_INIT: return ("INIT"); 3856 case FIT_SR_DRIVE_ONLINE: return ("ONLINE"); 3857 case FIT_SR_DRIVE_BUSY: return ("BUSY"); 3858 case FIT_SR_DRIVE_FAULT: return ("FAULT"); 3859 case FIT_SR_DRIVE_DEGRADED: return ("DEGRADED"); 3860 case FIT_SR_PCIE_LINK_DOWN: return ("LINK_DOWN"); 3861 case FIT_SR_DRIVE_SOFT_RESET: return ("SOFT_RESET"); 3862 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: return ("NEED_FW"); 3863 case FIT_SR_DRIVE_INIT_FAULT: return ("INIT_FAULT"); 3864 case FIT_SR_DRIVE_BUSY_SANITIZE:return ("BUSY_SANITIZE"); 3865 case FIT_SR_DRIVE_BUSY_ERASE: return ("BUSY_ERASE"); 3866 case FIT_SR_DRIVE_FW_BOOTING: return ("FW_BOOTING"); 3867 default: return ("???"); 3868 } 3869 } 3870 3871 /* 3872 * 3873 * Name: skd_skdev_state_to_str, converts binary driver state 3874 * to its corresponding string value. 3875 * 3876 * Inputs: Driver state. 3877 * 3878 * Returns: String representing driver state. 3879 * 3880 */ 3881 static const char * 3882 skd_skdev_state_to_str(enum skd_drvr_state state) 3883 { 3884 switch (state) { 3885 case SKD_DRVR_STATE_LOAD: return ("LOAD"); 3886 case SKD_DRVR_STATE_IDLE: return ("IDLE"); 3887 case SKD_DRVR_STATE_BUSY: return ("BUSY"); 3888 case SKD_DRVR_STATE_STARTING: return ("STARTING"); 3889 case SKD_DRVR_STATE_ONLINE: return ("ONLINE"); 3890 case SKD_DRVR_STATE_PAUSING: return ("PAUSING"); 3891 case SKD_DRVR_STATE_PAUSED: return ("PAUSED"); 3892 case SKD_DRVR_STATE_DRAINING_TIMEOUT: return ("DRAINING_TIMEOUT"); 3893 case SKD_DRVR_STATE_RESTARTING: return ("RESTARTING"); 3894 case SKD_DRVR_STATE_RESUMING: return ("RESUMING"); 3895 case SKD_DRVR_STATE_STOPPING: return ("STOPPING"); 3896 case SKD_DRVR_STATE_SYNCING: return ("SYNCING"); 3897 case SKD_DRVR_STATE_FAULT: return ("FAULT"); 3898 case SKD_DRVR_STATE_DISAPPEARED: return ("DISAPPEARED"); 3899 case SKD_DRVR_STATE_BUSY_ERASE: return ("BUSY_ERASE"); 3900 case SKD_DRVR_STATE_BUSY_SANITIZE:return ("BUSY_SANITIZE"); 3901 case SKD_DRVR_STATE_BUSY_IMMINENT: return ("BUSY_IMMINENT"); 3902 case SKD_DRVR_STATE_WAIT_BOOT: return ("WAIT_BOOT"); 3903 3904 default: return ("???"); 3905 } 3906 } 3907 3908 /* 3909 * 3910 * Name: skd_skmsg_state_to_str, converts binary driver state 3911 * to its corresponding string value. 3912 * 3913 * Inputs: Msg state. 3914 * 3915 * Returns: String representing msg state. 3916 * 3917 */ 3918 static const char * 3919 skd_skmsg_state_to_str(enum skd_fit_msg_state state) 3920 { 3921 switch (state) { 3922 case SKD_MSG_STATE_IDLE: return ("IDLE"); 3923 case SKD_MSG_STATE_BUSY: return ("BUSY"); 3924 default: return ("???"); 3925 } 3926 } 3927 3928 /* 3929 * 3930 * Name: skd_skreq_state_to_str, converts binary req state 3931 * to its corresponding string value. 3932 * 3933 * Inputs: Req state. 3934 * 3935 * Returns: String representing req state. 3936 * 3937 */ 3938 static const char * 3939 skd_skreq_state_to_str(enum skd_req_state state) 3940 { 3941 switch (state) { 3942 case SKD_REQ_STATE_IDLE: return ("IDLE"); 3943 case SKD_REQ_STATE_SETUP: return ("SETUP"); 3944 case SKD_REQ_STATE_BUSY: return ("BUSY"); 3945 case SKD_REQ_STATE_COMPLETED: return ("COMPLETED"); 3946 case SKD_REQ_STATE_TIMEOUT: return ("TIMEOUT"); 3947 case SKD_REQ_STATE_ABORTED: return ("ABORTED"); 3948 default: return ("???"); 3949 } 3950 } 3951 3952 /* 3953 * 3954 * Name: skd_log_skdev, logs device state & parameters. 3955 * 3956 * Inputs: skdev - device state structure. 3957 * event - event (string) to log. 3958 * 3959 * Returns: Nothing. 3960 * 3961 */ 3962 static void 3963 skd_log_skdev(struct skd_device *skdev, const char *event) 3964 { 3965 Dcmn_err(CE_NOTE, "log_skdev(%s) skdev=%p event='%s'", 3966 skdev->name, (void *)skdev, event); 3967 Dcmn_err(CE_NOTE, " drive_state=%s(%d) driver_state=%s(%d)", 3968 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 3969 skd_skdev_state_to_str(skdev->state), skdev->state); 3970 Dcmn_err(CE_NOTE, " busy=%d limit=%d soft=%d hard=%d lowat=%d", 3971 skdev->queue_depth_busy, skdev->queue_depth_limit, 3972 skdev->soft_queue_depth_limit, skdev->hard_queue_depth_limit, 3973 skdev->queue_depth_lowat); 3974 Dcmn_err(CE_NOTE, " timestamp=0x%x cycle=%d cycle_ix=%d", 3975 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix); 3976 } 3977 3978 /* 3979 * 3980 * Name: skd_log_skmsg, logs the skmsg event. 3981 * 3982 * Inputs: skdev - device state structure. 3983 * skmsg - FIT message structure. 3984 * event - event string to log. 3985 * 3986 * Returns: Nothing. 3987 * 3988 */ 3989 static void 3990 skd_log_skmsg(struct skd_device *skdev, 3991 struct skd_fitmsg_context *skmsg, const char *event) 3992 { 3993 Dcmn_err(CE_NOTE, "log_skmsg:(%s) skmsg=%p event='%s'", 3994 skdev->name, (void *)skmsg, event); 3995 Dcmn_err(CE_NOTE, " state=%s(%d) id=0x%04x length=%d", 3996 skd_skmsg_state_to_str(skmsg->state), skmsg->state, 3997 skmsg->id, skmsg->length); 3998 } 3999 4000 /* 4001 * 4002 * Name: skd_log_skreq, logs the skreq event. 4003 * 4004 * Inputs: skdev - device state structure. 4005 * skreq -skreq structure. 4006 * event - event string to log. 4007 * 4008 * Returns: Nothing. 4009 * 4010 */ 4011 static void 4012 skd_log_skreq(struct skd_device *skdev, 4013 struct skd_request_context *skreq, const char *event) 4014 { 4015 skd_buf_private_t *pbuf; 4016 4017 Dcmn_err(CE_NOTE, "log_skreq: (%s) skreq=%p pbuf=%p event='%s'", 4018 skdev->name, (void *)skreq, (void *)skreq->pbuf, event); 4019 4020 Dcmn_err(CE_NOTE, " state=%s(%d) id=0x%04x fitmsg=0x%04x", 4021 skd_skreq_state_to_str(skreq->state), skreq->state, 4022 skreq->id, skreq->fitmsg_id); 4023 Dcmn_err(CE_NOTE, " timo=0x%x sg_dir=%d n_sg=%d", 4024 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg); 4025 4026 if ((pbuf = skreq->pbuf) != NULL) { 4027 uint32_t lba, count; 4028 lba = pbuf->x_xfer->x_blkno; 4029 count = pbuf->x_xfer->x_nblks; 4030 Dcmn_err(CE_NOTE, " pbuf=%p lba=%u(0x%x) count=%u(0x%x) ", 4031 (void *)pbuf, lba, lba, count, count); 4032 Dcmn_err(CE_NOTE, " dir=%s " 4033 " intrs=%" PRId64 " qdepth=%d", 4034 (pbuf->dir & B_READ) ? "Read" : "Write", 4035 skdev->intr_cntr, skdev->queue_depth_busy); 4036 } else { 4037 Dcmn_err(CE_NOTE, " req=NULL\n"); 4038 } 4039 } 4040 4041 /* 4042 * 4043 * Name: skd_init_mutex, initializes all mutexes. 4044 * 4045 * Inputs: skdev - device state structure. 4046 * 4047 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS. 4048 * 4049 */ 4050 static int 4051 skd_init_mutex(skd_device_t *skdev) 4052 { 4053 void *intr; 4054 4055 Dcmn_err(CE_CONT, "(%s%d): init_mutex flags=%x", DRV_NAME, 4056 skdev->instance, skdev->flags); 4057 4058 intr = (void *)(uintptr_t)skdev->intr_pri; 4059 4060 if (skdev->flags & SKD_MUTEX_INITED) 4061 cmn_err(CE_NOTE, "init_mutex: Oh-Oh - already INITED"); 4062 4063 /* mutexes to protect the adapter state structure. */ 4064 mutex_init(&skdev->skd_lock_mutex, NULL, MUTEX_DRIVER, 4065 DDI_INTR_PRI(intr)); 4066 mutex_init(&skdev->skd_intr_mutex, NULL, MUTEX_DRIVER, 4067 DDI_INTR_PRI(intr)); 4068 mutex_init(&skdev->waitqueue_mutex, NULL, MUTEX_DRIVER, 4069 DDI_INTR_PRI(intr)); 4070 mutex_init(&skdev->skd_internalio_mutex, NULL, MUTEX_DRIVER, 4071 DDI_INTR_PRI(intr)); 4072 4073 cv_init(&skdev->cv_waitq, NULL, CV_DRIVER, NULL); 4074 4075 skdev->flags |= SKD_MUTEX_INITED; 4076 if (skdev->flags & SKD_MUTEX_DESTROYED) 4077 skdev->flags &= ~SKD_MUTEX_DESTROYED; 4078 4079 Dcmn_err(CE_CONT, "init_mutex (%s%d): done, flags=%x", DRV_NAME, 4080 skdev->instance, skdev->flags); 4081 4082 return (DDI_SUCCESS); 4083 } 4084 4085 /* 4086 * 4087 * Name: skd_destroy_mutex, destroys all mutexes. 4088 * 4089 * Inputs: skdev - device state structure. 4090 * 4091 * Returns: Nothing. 4092 * 4093 */ 4094 static void 4095 skd_destroy_mutex(skd_device_t *skdev) 4096 { 4097 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) { 4098 if (skdev->flags & SKD_MUTEX_INITED) { 4099 mutex_destroy(&skdev->waitqueue_mutex); 4100 mutex_destroy(&skdev->skd_intr_mutex); 4101 mutex_destroy(&skdev->skd_lock_mutex); 4102 mutex_destroy(&skdev->skd_internalio_mutex); 4103 4104 cv_destroy(&skdev->cv_waitq); 4105 4106 skdev->flags |= SKD_MUTEX_DESTROYED; 4107 4108 if (skdev->flags & SKD_MUTEX_INITED) 4109 skdev->flags &= ~SKD_MUTEX_INITED; 4110 } 4111 } 4112 } 4113 4114 /* 4115 * 4116 * Name: skd_setup_intr, setup the interrupt handling 4117 * 4118 * Inputs: skdev - device state structure. 4119 * intr_type - requested DDI interrupt type. 4120 * 4121 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS. 4122 * 4123 */ 4124 static int 4125 skd_setup_intr(skd_device_t *skdev, int intr_type) 4126 { 4127 int32_t count = 0; 4128 int32_t avail = 0; 4129 int32_t actual = 0; 4130 int32_t ret; 4131 uint32_t i; 4132 4133 Dcmn_err(CE_CONT, "(%s%d): setup_intr", DRV_NAME, skdev->instance); 4134 4135 /* Get number of interrupts the platform h/w supports */ 4136 if (((ret = ddi_intr_get_nintrs(skdev->dip, intr_type, &count)) != 4137 DDI_SUCCESS) || count == 0) { 4138 cmn_err(CE_WARN, "!intr_setup failed, nintrs ret=%xh, cnt=%xh", 4139 ret, count); 4140 4141 return (DDI_FAILURE); 4142 } 4143 4144 /* Get number of available system interrupts */ 4145 if (((ret = ddi_intr_get_navail(skdev->dip, intr_type, &avail)) != 4146 DDI_SUCCESS) || avail == 0) { 4147 cmn_err(CE_WARN, "!intr_setup failed, navail ret=%xh, " 4148 "avail=%xh", ret, avail); 4149 4150 return (DDI_FAILURE); 4151 } 4152 4153 if (intr_type == DDI_INTR_TYPE_MSIX && avail < SKD_MSIX_MAXAIF) { 4154 cmn_err(CE_WARN, "!intr_setup failed, min MSI-X h/w vectors " 4155 "req'd: %d, avail: %d", 4156 SKD_MSIX_MAXAIF, count); 4157 4158 return (DDI_FAILURE); 4159 } 4160 4161 /* Allocate space for interrupt handles */ 4162 skdev->hsize = sizeof (ddi_intr_handle_t) * avail; 4163 skdev->htable = kmem_zalloc(skdev->hsize, KM_SLEEP); 4164 4165 /* Allocate the interrupts */ 4166 if ((ret = ddi_intr_alloc(skdev->dip, skdev->htable, intr_type, 4167 0, count, &actual, 0)) != DDI_SUCCESS) { 4168 cmn_err(CE_WARN, "!intr_setup failed, intr_alloc ret=%xh, " 4169 "count = %xh, " "actual=%xh", ret, count, actual); 4170 4171 skd_release_intr(skdev); 4172 4173 return (DDI_FAILURE); 4174 } 4175 4176 skdev->intr_cnt = actual; 4177 4178 if (intr_type == DDI_INTR_TYPE_FIXED) 4179 (void) ddi_intr_set_pri(skdev->htable[0], 10); 4180 4181 /* Get interrupt priority */ 4182 if ((ret = ddi_intr_get_pri(skdev->htable[0], &skdev->intr_pri)) != 4183 DDI_SUCCESS) { 4184 cmn_err(CE_WARN, "!intr_setup failed, get_pri ret=%xh", ret); 4185 skd_release_intr(skdev); 4186 4187 return (ret); 4188 } 4189 4190 /* Add the interrupt handlers */ 4191 for (i = 0; i < actual; i++) { 4192 if ((ret = ddi_intr_add_handler(skdev->htable[i], 4193 skd_isr_aif, (void *)skdev, (void *)((ulong_t)i))) != 4194 DDI_SUCCESS) { 4195 cmn_err(CE_WARN, "!intr_setup failed, addh#=%xh, " 4196 "act=%xh, ret=%xh", i, actual, ret); 4197 skd_release_intr(skdev); 4198 4199 return (ret); 4200 } 4201 } 4202 4203 /* Setup mutexes */ 4204 if ((ret = skd_init_mutex(skdev)) != DDI_SUCCESS) { 4205 cmn_err(CE_WARN, "!intr_setup failed, mutex init ret=%xh", ret); 4206 skd_release_intr(skdev); 4207 4208 return (ret); 4209 } 4210 4211 /* Get the capabilities */ 4212 (void) ddi_intr_get_cap(skdev->htable[0], &skdev->intr_cap); 4213 4214 /* Enable interrupts */ 4215 if (skdev->intr_cap & DDI_INTR_FLAG_BLOCK) { 4216 if ((ret = ddi_intr_block_enable(skdev->htable, 4217 skdev->intr_cnt)) != DDI_SUCCESS) { 4218 cmn_err(CE_WARN, "!failed, intr_setup block enable, " 4219 "ret=%xh", ret); 4220 skd_destroy_mutex(skdev); 4221 skd_release_intr(skdev); 4222 4223 return (ret); 4224 } 4225 } else { 4226 for (i = 0; i < skdev->intr_cnt; i++) { 4227 if ((ret = ddi_intr_enable(skdev->htable[i])) != 4228 DDI_SUCCESS) { 4229 cmn_err(CE_WARN, "!intr_setup failed, " 4230 "intr enable, ret=%xh", ret); 4231 skd_destroy_mutex(skdev); 4232 skd_release_intr(skdev); 4233 4234 return (ret); 4235 } 4236 } 4237 } 4238 4239 if (intr_type == DDI_INTR_TYPE_FIXED) 4240 (void) ddi_intr_clr_mask(skdev->htable[0]); 4241 4242 skdev->irq_type = intr_type; 4243 4244 return (DDI_SUCCESS); 4245 } 4246 4247 /* 4248 * 4249 * Name: skd_disable_intr, disable interrupt handling. 4250 * 4251 * Inputs: skdev - device state structure. 4252 * 4253 * Returns: Nothing. 4254 * 4255 */ 4256 static void 4257 skd_disable_intr(skd_device_t *skdev) 4258 { 4259 uint32_t i, rval; 4260 4261 if (skdev->intr_cap & DDI_INTR_FLAG_BLOCK) { 4262 /* Remove AIF block interrupts (MSI/MSI-X) */ 4263 if ((rval = ddi_intr_block_disable(skdev->htable, 4264 skdev->intr_cnt)) != DDI_SUCCESS) { 4265 cmn_err(CE_WARN, "!failed intr block disable, rval=%x", 4266 rval); 4267 } 4268 } else { 4269 /* Remove AIF non-block interrupts (fixed). */ 4270 for (i = 0; i < skdev->intr_cnt; i++) { 4271 if ((rval = ddi_intr_disable(skdev->htable[i])) != 4272 DDI_SUCCESS) { 4273 cmn_err(CE_WARN, "!failed intr disable, " 4274 "intr#=%xh, " "rval=%xh", i, rval); 4275 } 4276 } 4277 } 4278 } 4279 4280 /* 4281 * 4282 * Name: skd_release_intr, disables interrupt handling. 4283 * 4284 * Inputs: skdev - device state structure. 4285 * 4286 * Returns: Nothing. 4287 * 4288 */ 4289 static void 4290 skd_release_intr(skd_device_t *skdev) 4291 { 4292 int32_t i; 4293 int rval; 4294 4295 4296 Dcmn_err(CE_CONT, "REL_INTR intr_cnt=%d", skdev->intr_cnt); 4297 4298 if (skdev->irq_type == 0) { 4299 Dcmn_err(CE_CONT, "release_intr: (%s%d): done", 4300 DRV_NAME, skdev->instance); 4301 return; 4302 } 4303 4304 if (skdev->htable != NULL && skdev->hsize > 0) { 4305 i = (int32_t)skdev->hsize / (int32_t)sizeof (ddi_intr_handle_t); 4306 4307 while (i-- > 0) { 4308 if (skdev->htable[i] == 0) { 4309 Dcmn_err(CE_NOTE, "htable[%x]=0h", i); 4310 continue; 4311 } 4312 4313 if ((rval = ddi_intr_disable(skdev->htable[i])) != 4314 DDI_SUCCESS) 4315 Dcmn_err(CE_NOTE, "release_intr: intr_disable " 4316 "htable[%d], rval=%d", i, rval); 4317 4318 if (i < skdev->intr_cnt) { 4319 if ((rval = ddi_intr_remove_handler( 4320 skdev->htable[i])) != DDI_SUCCESS) 4321 cmn_err(CE_WARN, "!release_intr: " 4322 "intr_remove_handler FAILED, " 4323 "rval=%d", rval); 4324 4325 Dcmn_err(CE_NOTE, "release_intr: " 4326 "remove_handler htable[%d]", i); 4327 } 4328 4329 if ((rval = ddi_intr_free(skdev->htable[i])) != 4330 DDI_SUCCESS) 4331 cmn_err(CE_WARN, "!release_intr: intr_free " 4332 "FAILED, rval=%d", rval); 4333 Dcmn_err(CE_NOTE, "release_intr: intr_free htable[%d]", 4334 i); 4335 } 4336 4337 kmem_free(skdev->htable, skdev->hsize); 4338 skdev->htable = NULL; 4339 } 4340 4341 skdev->hsize = 0; 4342 skdev->intr_cnt = 0; 4343 skdev->intr_pri = 0; 4344 skdev->intr_cap = 0; 4345 skdev->irq_type = 0; 4346 } 4347 4348 /* 4349 * 4350 * Name: skd_dealloc_resources, deallocate resources allocated 4351 * during attach. 4352 * 4353 * Inputs: dip - DDI device info pointer. 4354 * skdev - device state structure. 4355 * seq - bit flag representing allocated item. 4356 * instance - device instance. 4357 * 4358 * Returns: Nothing. 4359 * 4360 */ 4361 /* ARGSUSED */ /* Upstream common source with other platforms. */ 4362 static void 4363 skd_dealloc_resources(dev_info_t *dip, skd_device_t *skdev, 4364 uint32_t seq, int instance) 4365 { 4366 4367 if (skdev == NULL) 4368 return; 4369 4370 if (seq & SKD_CONSTRUCTED) 4371 skd_destruct(skdev); 4372 4373 if (seq & SKD_INTR_ADDED) { 4374 skd_disable_intr(skdev); 4375 skd_release_intr(skdev); 4376 } 4377 4378 if (seq & SKD_DEV_IOBASE_MAPPED) 4379 ddi_regs_map_free(&skdev->dev_handle); 4380 4381 if (seq & SKD_IOMAP_IOBASE_MAPPED) 4382 ddi_regs_map_free(&skdev->iomap_handle); 4383 4384 if (seq & SKD_REGS_MAPPED) 4385 ddi_regs_map_free(&skdev->iobase_handle); 4386 4387 if (seq & SKD_CONFIG_SPACE_SETUP) 4388 pci_config_teardown(&skdev->pci_handle); 4389 4390 if (seq & SKD_SOFT_STATE_ALLOCED) { 4391 if (skdev->pathname && 4392 (skdev->flags & SKD_PATHNAME_ALLOCED)) { 4393 kmem_free(skdev->pathname, 4394 strlen(skdev->pathname)+1); 4395 } 4396 } 4397 4398 if (skdev->s1120_devid) 4399 ddi_devid_free(skdev->s1120_devid); 4400 } 4401 4402 /* 4403 * 4404 * Name: skd_setup_interrupt, sets up the appropriate interrupt type 4405 * msi, msix, or fixed. 4406 * 4407 * Inputs: skdev - device state structure. 4408 * 4409 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS. 4410 * 4411 */ 4412 static int 4413 skd_setup_interrupts(skd_device_t *skdev) 4414 { 4415 int32_t rval = DDI_FAILURE; 4416 int32_t i; 4417 int32_t itypes = 0; 4418 4419 /* 4420 * See what types of interrupts this adapter and platform support 4421 */ 4422 if ((i = ddi_intr_get_supported_types(skdev->dip, &itypes)) != 4423 DDI_SUCCESS) { 4424 cmn_err(CE_NOTE, "intr supported types failed, rval=%xh, ", i); 4425 return (DDI_FAILURE); 4426 } 4427 4428 Dcmn_err(CE_NOTE, "%s:supported interrupts types: %x", 4429 skdev->name, itypes); 4430 4431 itypes &= skdev->irq_type; 4432 4433 if (!skd_disable_msix && (itypes & DDI_INTR_TYPE_MSIX) && 4434 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_MSIX)) == DDI_SUCCESS) { 4435 cmn_err(CE_NOTE, "!%s: successful MSI-X setup", 4436 skdev->name); 4437 } else if (!skd_disable_msi && (itypes & DDI_INTR_TYPE_MSI) && 4438 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_MSI)) == DDI_SUCCESS) { 4439 cmn_err(CE_NOTE, "!%s: successful MSI setup", 4440 skdev->name); 4441 } else if ((itypes & DDI_INTR_TYPE_FIXED) && 4442 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_FIXED)) 4443 == DDI_SUCCESS) { 4444 cmn_err(CE_NOTE, "!%s: successful fixed intr setup", 4445 skdev->name); 4446 } else { 4447 cmn_err(CE_WARN, "!%s: no supported interrupt types", 4448 skdev->name); 4449 return (DDI_FAILURE); 4450 } 4451 4452 Dcmn_err(CE_CONT, "%s: setup interrupts done", skdev->name); 4453 4454 return (rval); 4455 } 4456 4457 /* 4458 * 4459 * Name: skd_get_properties, retrieves properties from skd.conf. 4460 * 4461 * Inputs: skdev - device state structure. 4462 * dip - dev_info data structure. 4463 * 4464 * Returns: Nothing. 4465 * 4466 */ 4467 /* ARGSUSED */ /* Upstream common source with other platforms. */ 4468 static void 4469 skd_get_properties(dev_info_t *dip, skd_device_t *skdev) 4470 { 4471 int prop_value; 4472 4473 skd_isr_type = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4474 "intr-type-cap", -1); 4475 4476 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4477 "max-scsi-reqs", -1); 4478 if (prop_value >= 1 && prop_value <= SKD_MAX_QUEUE_DEPTH) 4479 skd_max_queue_depth = prop_value; 4480 4481 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4482 "max-scsi-reqs-per-msg", -1); 4483 if (prop_value >= 1 && prop_value <= SKD_MAX_REQ_PER_MSG) 4484 skd_max_req_per_msg = prop_value; 4485 4486 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4487 "max-sgs-per-req", -1); 4488 if (prop_value >= 1 && prop_value <= SKD_MAX_N_SG_PER_REQ) 4489 skd_sgs_per_request = prop_value; 4490 4491 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 4492 "dbg-level", -1); 4493 if (prop_value >= 1 && prop_value <= 2) 4494 skd_dbg_level = prop_value; 4495 } 4496 4497 /* 4498 * 4499 * Name: skd_wait_for_s1120, wait for device to finish 4500 * its initialization. 4501 * 4502 * Inputs: skdev - device state structure. 4503 * 4504 * Returns: DDI_SUCCESS or DDI_FAILURE. 4505 * 4506 */ 4507 static int 4508 skd_wait_for_s1120(skd_device_t *skdev) 4509 { 4510 clock_t cur_ticks, tmo; 4511 int loop_cntr = 0; 4512 int rc = DDI_FAILURE; 4513 4514 mutex_enter(&skdev->skd_internalio_mutex); 4515 4516 while (skdev->gendisk_on == 0) { 4517 cur_ticks = ddi_get_lbolt(); 4518 tmo = cur_ticks + drv_usectohz(MICROSEC); 4519 if (cv_timedwait(&skdev->cv_waitq, 4520 &skdev->skd_internalio_mutex, tmo) == -1) { 4521 /* Oops - timed out */ 4522 if (loop_cntr++ > 10) 4523 break; 4524 } 4525 } 4526 4527 mutex_exit(&skdev->skd_internalio_mutex); 4528 4529 if (skdev->gendisk_on == 1) 4530 rc = DDI_SUCCESS; 4531 4532 return (rc); 4533 } 4534 4535 /* 4536 * 4537 * Name: skd_update_props, updates certain device properties. 4538 * 4539 * Inputs: skdev - device state structure. 4540 * dip - dev info structure 4541 * 4542 * Returns: Nothing. 4543 * 4544 */ 4545 static void 4546 skd_update_props(skd_device_t *skdev, dev_info_t *dip) 4547 { 4548 int blksize = 512; 4549 4550 if ((ddi_prop_update_int64(DDI_DEV_T_NONE, dip, "device-nblocks", 4551 skdev->Nblocks) != DDI_SUCCESS) || 4552 (ddi_prop_update_int(DDI_DEV_T_NONE, dip, "device-blksize", 4553 blksize) != DDI_SUCCESS)) { 4554 cmn_err(CE_NOTE, "%s: FAILED to create driver properties", 4555 skdev->name); 4556 } 4557 } 4558 4559 /* 4560 * 4561 * Name: skd_setup_devid, sets up device ID info. 4562 * 4563 * Inputs: skdev - device state structure. 4564 * devid - Device ID for the DDI. 4565 * 4566 * Returns: DDI_SUCCESS or DDI_FAILURE. 4567 * 4568 */ 4569 static int 4570 skd_setup_devid(skd_device_t *skdev, ddi_devid_t *devid) 4571 { 4572 int rc, sz_model, sz_sn, sz; 4573 4574 sz_model = scsi_ascii_inquiry_len(skdev->inq_product_id, 4575 strlen(skdev->inq_product_id)); 4576 sz_sn = scsi_ascii_inquiry_len(skdev->inq_serial_num, 4577 strlen(skdev->inq_serial_num)); 4578 sz = sz_model + sz_sn + 1; 4579 4580 (void) snprintf(skdev->devid_str, sizeof (skdev->devid_str), 4581 "%.*s=%.*s", sz_model, skdev->inq_product_id, sz_sn, 4582 skdev->inq_serial_num); 4583 rc = ddi_devid_init(skdev->dip, DEVID_SCSI_SERIAL, sz, 4584 skdev->devid_str, devid); 4585 4586 if (rc != DDI_SUCCESS) 4587 cmn_err(CE_WARN, "!%s: devid_init FAILED", skdev->name); 4588 4589 return (rc); 4590 4591 } 4592 4593 /* 4594 * 4595 * Name: skd_bd_attach, attach to blkdev driver 4596 * 4597 * Inputs: skdev - device state structure. 4598 * dip - device info structure. 4599 * 4600 * Returns: DDI_SUCCESS or DDI_FAILURE. 4601 * 4602 */ 4603 static int 4604 skd_bd_attach(dev_info_t *dip, skd_device_t *skdev) 4605 { 4606 int rv; 4607 4608 skdev->s_bdh = bd_alloc_handle(skdev, &skd_bd_ops, 4609 &skd_64bit_io_dma_attr, KM_SLEEP); 4610 4611 if (skdev->s_bdh == NULL) { 4612 cmn_err(CE_WARN, "!skd_bd_attach: FAILED"); 4613 4614 return (DDI_FAILURE); 4615 } 4616 4617 rv = bd_attach_handle(dip, skdev->s_bdh); 4618 4619 if (rv != DDI_SUCCESS) { 4620 cmn_err(CE_WARN, "!bd_attach_handle FAILED\n"); 4621 } else { 4622 Dcmn_err(CE_NOTE, "bd_attach_handle OK\n"); 4623 skdev->bd_attached++; 4624 } 4625 4626 return (rv); 4627 } 4628 4629 /* 4630 * 4631 * Name: skd_bd_detach, detach from the blkdev driver. 4632 * 4633 * Inputs: skdev - device state structure. 4634 * 4635 * Returns: Nothing. 4636 * 4637 */ 4638 static void 4639 skd_bd_detach(skd_device_t *skdev) 4640 { 4641 if (skdev->bd_attached) 4642 (void) bd_detach_handle(skdev->s_bdh); 4643 4644 bd_free_handle(skdev->s_bdh); 4645 } 4646 4647 /* 4648 * 4649 * Name: skd_attach, attach sdk device driver 4650 * 4651 * Inputs: dip - device info structure. 4652 * cmd - DDI attach argument (ATTACH, RESUME, etc.) 4653 * 4654 * Returns: DDI_SUCCESS or DDI_FAILURE. 4655 * 4656 */ 4657 static int 4658 skd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 4659 { 4660 int instance; 4661 int nregs; 4662 skd_device_t *skdev = NULL; 4663 int inx; 4664 uint16_t cmd_reg; 4665 int progress = 0; 4666 char name[MAXPATHLEN]; 4667 off_t regsize; 4668 char pci_str[32]; 4669 char fw_version[8]; 4670 4671 instance = ddi_get_instance(dip); 4672 4673 (void) ddi_get_parent_data(dip); 4674 4675 switch (cmd) { 4676 case DDI_ATTACH: 4677 break; 4678 4679 case DDI_RESUME: 4680 /* Re-enable timer */ 4681 skd_start_timer(skdev); 4682 4683 return (DDI_SUCCESS); 4684 4685 default: 4686 return (DDI_FAILURE); 4687 } 4688 4689 Dcmn_err(CE_NOTE, "sTec S1120 Driver v%s Instance: %d", 4690 VERSIONSTR, instance); 4691 4692 /* 4693 * Check that hardware is installed in a DMA-capable slot 4694 */ 4695 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 4696 cmn_err(CE_WARN, "!%s%d: installed in a " 4697 "slot that isn't DMA-capable slot", DRV_NAME, instance); 4698 return (DDI_FAILURE); 4699 } 4700 4701 /* 4702 * No support for high-level interrupts 4703 */ 4704 if (ddi_intr_hilevel(dip, 0) != 0) { 4705 cmn_err(CE_WARN, "!%s%d: High level interrupt not supported", 4706 DRV_NAME, instance); 4707 return (DDI_FAILURE); 4708 } 4709 4710 /* 4711 * Allocate our per-device-instance structure 4712 */ 4713 if (ddi_soft_state_zalloc(skd_state, instance) != 4714 DDI_SUCCESS) { 4715 cmn_err(CE_WARN, "!%s%d: soft state zalloc failed ", 4716 DRV_NAME, instance); 4717 return (DDI_FAILURE); 4718 } 4719 4720 progress |= SKD_SOFT_STATE_ALLOCED; 4721 4722 skdev = ddi_get_soft_state(skd_state, instance); 4723 if (skdev == NULL) { 4724 cmn_err(CE_WARN, "!%s%d: Unable to get soft state structure", 4725 DRV_NAME, instance); 4726 goto skd_attach_failed; 4727 } 4728 4729 (void) snprintf(skdev->name, sizeof (skdev->name), 4730 DRV_NAME "%d", instance); 4731 4732 skdev->dip = dip; 4733 skdev->instance = instance; 4734 4735 ddi_set_driver_private(dip, skdev); 4736 4737 (void) ddi_pathname(dip, name); 4738 for (inx = strlen(name); inx; inx--) { 4739 if (name[inx] == ',') { 4740 name[inx] = '\0'; 4741 break; 4742 } 4743 if (name[inx] == '@') { 4744 break; 4745 } 4746 } 4747 4748 skdev->pathname = kmem_zalloc(strlen(name) + 1, KM_SLEEP); 4749 (void) strlcpy(skdev->pathname, name, strlen(name) + 1); 4750 4751 progress |= SKD_PATHNAME_ALLOCED; 4752 skdev->flags |= SKD_PATHNAME_ALLOCED; 4753 4754 if (pci_config_setup(dip, &skdev->pci_handle) != DDI_SUCCESS) { 4755 cmn_err(CE_WARN, "!%s%d: pci_config_setup FAILED", 4756 DRV_NAME, instance); 4757 goto skd_attach_failed; 4758 } 4759 4760 progress |= SKD_CONFIG_SPACE_SETUP; 4761 4762 /* Save adapter path. */ 4763 4764 (void) ddi_dev_nregs(dip, &nregs); 4765 4766 /* 4767 * 0x0 Configuration Space 4768 * 0x1 I/O Space 4769 * 0x2 s1120 register space 4770 */ 4771 if (ddi_dev_regsize(dip, 1, ®size) != DDI_SUCCESS || 4772 ddi_regs_map_setup(dip, 1, &skdev->iobase, 0, regsize, 4773 &dev_acc_attr, &skdev->iobase_handle) != DDI_SUCCESS) { 4774 cmn_err(CE_WARN, "!%s%d: regs_map_setup(mem) failed", 4775 DRV_NAME, instance); 4776 goto skd_attach_failed; 4777 } 4778 progress |= SKD_REGS_MAPPED; 4779 4780 skdev->iomap_iobase = skdev->iobase; 4781 skdev->iomap_handle = skdev->iobase_handle; 4782 4783 Dcmn_err(CE_NOTE, "%s: PCI iobase=%ph, iomap=%ph, regnum=%d, " 4784 "regsize=%ld", skdev->name, (void *)skdev->iobase, 4785 (void *)skdev->iomap_iobase, 1, regsize); 4786 4787 if (ddi_dev_regsize(dip, 2, ®size) != DDI_SUCCESS || 4788 ddi_regs_map_setup(dip, 2, &skdev->dev_iobase, 0, regsize, 4789 &dev_acc_attr, &skdev->dev_handle) != DDI_SUCCESS) { 4790 cmn_err(CE_WARN, "!%s%d: regs_map_setup(mem) failed", 4791 DRV_NAME, instance); 4792 4793 goto skd_attach_failed; 4794 } 4795 4796 skdev->dev_memsize = (int)regsize; 4797 4798 Dcmn_err(CE_NOTE, "%s: DEV iobase=%ph regsize=%d", 4799 skdev->name, (void *)skdev->dev_iobase, 4800 skdev->dev_memsize); 4801 4802 progress |= SKD_DEV_IOBASE_MAPPED; 4803 4804 cmd_reg = pci_config_get16(skdev->pci_handle, PCI_CONF_COMM); 4805 cmd_reg |= (PCI_COMM_ME | PCI_COMM_INTX_DISABLE); 4806 cmd_reg &= ~PCI_COMM_PARITY_DETECT; 4807 pci_config_put16(skdev->pci_handle, PCI_CONF_COMM, cmd_reg); 4808 4809 /* Get adapter PCI device information. */ 4810 skdev->vendor_id = pci_config_get16(skdev->pci_handle, PCI_CONF_VENID); 4811 skdev->device_id = pci_config_get16(skdev->pci_handle, PCI_CONF_DEVID); 4812 4813 Dcmn_err(CE_NOTE, "%s: %x-%x card detected", 4814 skdev->name, skdev->vendor_id, skdev->device_id); 4815 4816 skd_get_properties(dip, skdev); 4817 4818 (void) skd_init(skdev); 4819 4820 if (skd_construct(skdev, instance)) { 4821 cmn_err(CE_WARN, "!%s: construct FAILED", skdev->name); 4822 goto skd_attach_failed; 4823 } 4824 4825 progress |= SKD_PROBED; 4826 progress |= SKD_CONSTRUCTED; 4827 4828 SIMPLEQ_INIT(&skdev->waitqueue); 4829 4830 /* 4831 * Setup interrupt handler 4832 */ 4833 if (skd_setup_interrupts(skdev) != DDI_SUCCESS) { 4834 cmn_err(CE_WARN, "!%s: Unable to add interrupt", 4835 skdev->name); 4836 goto skd_attach_failed; 4837 } 4838 4839 progress |= SKD_INTR_ADDED; 4840 4841 ADAPTER_STATE_LOCK(skdev); 4842 skdev->flags |= SKD_ATTACHED; 4843 ADAPTER_STATE_UNLOCK(skdev); 4844 4845 skdev->d_blkshift = 9; 4846 progress |= SKD_ATTACHED; 4847 4848 4849 skd_start_device(skdev); 4850 4851 ADAPTER_STATE_LOCK(skdev); 4852 skdev->progress = progress; 4853 ADAPTER_STATE_UNLOCK(skdev); 4854 4855 /* 4856 * Give the board a chance to 4857 * complete its initialization. 4858 */ 4859 if (skdev->gendisk_on != 1) 4860 (void) skd_wait_for_s1120(skdev); 4861 4862 if (skdev->gendisk_on != 1) { 4863 cmn_err(CE_WARN, "!%s: s1120 failed to come ONLINE", 4864 skdev->name); 4865 goto skd_attach_failed; 4866 } 4867 4868 ddi_report_dev(dip); 4869 4870 skd_send_internal_skspcl(skdev, &skdev->internal_skspcl, INQUIRY); 4871 4872 skdev->disks_initialized++; 4873 4874 (void) strcpy(fw_version, "???"); 4875 (void) skd_pci_info(skdev, pci_str, sizeof (pci_str)); 4876 Dcmn_err(CE_NOTE, " sTec S1120 Driver(%s) version %s-b%s", 4877 DRV_NAME, DRV_VERSION, DRV_BUILD_ID); 4878 4879 Dcmn_err(CE_NOTE, " sTec S1120 %04x:%04x %s 64 bit", 4880 skdev->vendor_id, skdev->device_id, pci_str); 4881 4882 Dcmn_err(CE_NOTE, " sTec S1120 %s\n", skdev->pathname); 4883 4884 if (*skdev->inq_serial_num) 4885 Dcmn_err(CE_NOTE, " sTec S1120 serial#=%s", 4886 skdev->inq_serial_num); 4887 4888 if (*skdev->inq_product_id && 4889 *skdev->inq_product_rev) 4890 Dcmn_err(CE_NOTE, " sTec S1120 prod ID=%s prod rev=%s", 4891 skdev->inq_product_id, skdev->inq_product_rev); 4892 4893 Dcmn_err(CE_NOTE, "%s: intr-type-cap: %d", 4894 skdev->name, skdev->irq_type); 4895 Dcmn_err(CE_NOTE, "%s: max-scsi-reqs: %d", 4896 skdev->name, skd_max_queue_depth); 4897 Dcmn_err(CE_NOTE, "%s: max-sgs-per-req: %d", 4898 skdev->name, skd_sgs_per_request); 4899 Dcmn_err(CE_NOTE, "%s: max-scsi-req-per-msg: %d", 4900 skdev->name, skd_max_req_per_msg); 4901 4902 if (skd_bd_attach(dip, skdev) == DDI_FAILURE) 4903 goto skd_attach_failed; 4904 4905 skd_update_props(skdev, dip); 4906 4907 /* Enable timer */ 4908 skd_start_timer(skdev); 4909 4910 ADAPTER_STATE_LOCK(skdev); 4911 skdev->progress = progress; 4912 ADAPTER_STATE_UNLOCK(skdev); 4913 4914 skdev->attached = 1; 4915 return (DDI_SUCCESS); 4916 4917 skd_attach_failed: 4918 skd_dealloc_resources(dip, skdev, progress, instance); 4919 4920 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) { 4921 skd_destroy_mutex(skdev); 4922 } 4923 4924 ddi_soft_state_free(skd_state, instance); 4925 4926 cmn_err(CE_WARN, "!skd_attach FAILED: progress=%x", progress); 4927 return (DDI_FAILURE); 4928 } 4929 4930 /* 4931 * 4932 * Name: skd_halt 4933 * 4934 * Inputs: skdev - device state structure. 4935 * 4936 * Returns: Nothing. 4937 * 4938 */ 4939 static void 4940 skd_halt(skd_device_t *skdev) 4941 { 4942 Dcmn_err(CE_NOTE, "%s: halt/suspend ......", skdev->name); 4943 } 4944 4945 /* 4946 * 4947 * Name: skd_detach, detaches driver from the system. 4948 * 4949 * Inputs: dip - device info structure. 4950 * 4951 * Returns: DDI_SUCCESS on successful detach otherwise DDI_FAILURE. 4952 * 4953 */ 4954 static int 4955 skd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 4956 { 4957 skd_buf_private_t *pbuf; 4958 skd_device_t *skdev; 4959 int instance; 4960 timeout_id_t timer_id = NULL; 4961 int rv1 = DDI_SUCCESS; 4962 struct skd_special_context *skspcl; 4963 4964 instance = ddi_get_instance(dip); 4965 4966 skdev = ddi_get_soft_state(skd_state, instance); 4967 if (skdev == NULL) { 4968 cmn_err(CE_WARN, "!detach failed: NULL skd state"); 4969 4970 return (DDI_FAILURE); 4971 } 4972 4973 Dcmn_err(CE_CONT, "skd_detach(%d): entered", instance); 4974 4975 switch (cmd) { 4976 case DDI_DETACH: 4977 /* Test for packet cache inuse. */ 4978 ADAPTER_STATE_LOCK(skdev); 4979 4980 /* Stop command/event processing. */ 4981 skdev->flags |= (SKD_SUSPENDED | SKD_CMD_ABORT_TMO); 4982 4983 /* Disable driver timer if no adapters. */ 4984 if (skdev->skd_timer_timeout_id != 0) { 4985 timer_id = skdev->skd_timer_timeout_id; 4986 skdev->skd_timer_timeout_id = 0; 4987 } 4988 ADAPTER_STATE_UNLOCK(skdev); 4989 4990 if (timer_id != 0) { 4991 (void) untimeout(timer_id); 4992 } 4993 4994 #ifdef SKD_PM 4995 if (skdev->power_level != LOW_POWER_LEVEL) { 4996 skd_halt(skdev); 4997 skdev->power_level = LOW_POWER_LEVEL; 4998 } 4999 #endif 5000 skspcl = &skdev->internal_skspcl; 5001 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); 5002 5003 skd_stop_device(skdev); 5004 5005 /* 5006 * Clear request queue. 5007 */ 5008 while (!SIMPLEQ_EMPTY(&skdev->waitqueue)) { 5009 pbuf = skd_get_queued_pbuf(skdev); 5010 skd_end_request_abnormal(skdev, pbuf, ECANCELED, 5011 SKD_IODONE_WNIOC); 5012 Dcmn_err(CE_NOTE, 5013 "detach: cancelled pbuf %p %ld <%s> %lld\n", 5014 (void *)pbuf, pbuf->x_xfer->x_nblks, 5015 (pbuf->dir & B_READ) ? "Read" : "Write", 5016 pbuf->x_xfer->x_blkno); 5017 } 5018 5019 skd_bd_detach(skdev); 5020 5021 skd_dealloc_resources(dip, skdev, skdev->progress, instance); 5022 5023 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) { 5024 skd_destroy_mutex(skdev); 5025 } 5026 5027 ddi_soft_state_free(skd_state, instance); 5028 5029 skd_exit(); 5030 5031 break; 5032 5033 case DDI_SUSPEND: 5034 /* Block timer. */ 5035 5036 ADAPTER_STATE_LOCK(skdev); 5037 skdev->flags |= SKD_SUSPENDED; 5038 5039 /* Disable driver timer if last adapter. */ 5040 if (skdev->skd_timer_timeout_id != 0) { 5041 timer_id = skdev->skd_timer_timeout_id; 5042 skdev->skd_timer_timeout_id = 0; 5043 } 5044 ADAPTER_STATE_UNLOCK(skdev); 5045 5046 if (timer_id != 0) { 5047 (void) untimeout(timer_id); 5048 } 5049 5050 ddi_prop_remove_all(dip); 5051 5052 skd_halt(skdev); 5053 5054 break; 5055 default: 5056 rv1 = DDI_FAILURE; 5057 break; 5058 } 5059 5060 if (rv1 != DDI_SUCCESS) { 5061 cmn_err(CE_WARN, "!skd_detach, failed, rv1=%x", rv1); 5062 } else { 5063 Dcmn_err(CE_CONT, "skd_detach: exiting"); 5064 } 5065 5066 if (rv1 != DDI_SUCCESS) 5067 return (DDI_FAILURE); 5068 5069 return (rv1); 5070 } 5071 5072 /* 5073 * 5074 * Name: skd_devid_init, calls skd_setup_devid to setup 5075 * the device's devid structure. 5076 * 5077 * Inputs: arg - device state structure. 5078 * dip - dev_info structure. 5079 * devid - devid structure. 5080 * 5081 * Returns: Nothing. 5082 * 5083 */ 5084 /* ARGSUSED */ /* Upstream common source with other platforms. */ 5085 static int 5086 skd_devid_init(void *arg, dev_info_t *dip, ddi_devid_t *devid) 5087 { 5088 skd_device_t *skdev = arg; 5089 5090 (void) skd_setup_devid(skdev, devid); 5091 5092 return (0); 5093 } 5094 5095 /* 5096 * 5097 * Name: skd_bd_driveinfo, retrieves device's info. 5098 * 5099 * Inputs: drive - drive data structure. 5100 * arg - device state structure. 5101 * 5102 * Returns: Nothing. 5103 * 5104 */ 5105 static void 5106 skd_bd_driveinfo(void *arg, bd_drive_t *drive) 5107 { 5108 skd_device_t *skdev = arg; 5109 5110 drive->d_qsize = (skdev->queue_depth_limit * 4) / 5; 5111 drive->d_maxxfer = SKD_DMA_MAXXFER; 5112 drive->d_removable = B_FALSE; 5113 drive->d_hotpluggable = B_FALSE; 5114 drive->d_target = 0; 5115 drive->d_lun = 0; 5116 5117 if (skdev->inquiry_is_valid != 0) { 5118 drive->d_vendor = skdev->inq_vendor_id; 5119 drive->d_vendor_len = strlen(drive->d_vendor); 5120 5121 drive->d_product = skdev->inq_product_id; 5122 drive->d_product_len = strlen(drive->d_product); 5123 5124 drive->d_serial = skdev->inq_serial_num; 5125 drive->d_serial_len = strlen(drive->d_serial); 5126 5127 drive->d_revision = skdev->inq_product_rev; 5128 drive->d_revision_len = strlen(drive->d_revision); 5129 } 5130 } 5131 5132 /* 5133 * 5134 * Name: skd_bd_mediainfo, retrieves device media info. 5135 * 5136 * Inputs: arg - device state structure. 5137 * media - container for media info. 5138 * 5139 * Returns: Zero. 5140 * 5141 */ 5142 static int 5143 skd_bd_mediainfo(void *arg, bd_media_t *media) 5144 { 5145 skd_device_t *skdev = arg; 5146 5147 media->m_nblks = skdev->Nblocks; 5148 media->m_blksize = 512; 5149 media->m_pblksize = 4096; 5150 media->m_readonly = B_FALSE; 5151 media->m_solidstate = B_TRUE; 5152 5153 return (0); 5154 } 5155 5156 /* 5157 * 5158 * Name: skd_rw, performs R/W requests for blkdev driver. 5159 * 5160 * Inputs: skdev - device state structure. 5161 * xfer - tranfer structure. 5162 * dir - I/O direction. 5163 * 5164 * Returns: EAGAIN if device is not online. EIO if blkdev wants us to 5165 * be a dump device (for now). 5166 * Value returned by skd_start(). 5167 * 5168 */ 5169 static int 5170 skd_rw(skd_device_t *skdev, bd_xfer_t *xfer, int dir) 5171 { 5172 skd_buf_private_t *pbuf; 5173 5174 /* 5175 * The x_flags structure element is not defined in Oracle Solaris 5176 */ 5177 /* We'll need to fix this in order to support dump on this device. */ 5178 if (xfer->x_flags & BD_XFER_POLL) 5179 return (EIO); 5180 5181 if (skdev->state != SKD_DRVR_STATE_ONLINE) { 5182 Dcmn_err(CE_NOTE, "Device - not ONLINE"); 5183 5184 skd_request_fn_not_online(skdev); 5185 5186 return (EAGAIN); 5187 } 5188 5189 pbuf = kmem_zalloc(sizeof (skd_buf_private_t), KM_NOSLEEP); 5190 if (pbuf == NULL) 5191 return (ENOMEM); 5192 5193 WAITQ_LOCK(skdev); 5194 pbuf->dir = dir; 5195 pbuf->x_xfer = xfer; 5196 5197 skd_queue(skdev, pbuf); 5198 skdev->ios_queued++; 5199 WAITQ_UNLOCK(skdev); 5200 5201 skd_start(skdev); 5202 5203 return (0); 5204 } 5205 5206 /* 5207 * 5208 * Name: skd_bd_read, performs blkdev read requests. 5209 * 5210 * Inputs: arg - device state structure. 5211 * xfer - tranfer request structure. 5212 * 5213 * Returns: Value return by skd_rw(). 5214 * 5215 */ 5216 static int 5217 skd_bd_read(void *arg, bd_xfer_t *xfer) 5218 { 5219 return (skd_rw(arg, xfer, B_READ)); 5220 } 5221 5222 /* 5223 * 5224 * Name: skd_bd_write, performs blkdev write requests. 5225 * 5226 * Inputs: arg - device state structure. 5227 * xfer - tranfer request structure. 5228 * 5229 * Returns: Value return by skd_rw(). 5230 * 5231 */ 5232 static int 5233 skd_bd_write(void *arg, bd_xfer_t *xfer) 5234 { 5235 return (skd_rw(arg, xfer, B_WRITE)); 5236 } 5237