1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include "sdhost.h" 27 28 typedef struct sdslot sdslot_t; 29 typedef struct sdhost sdhost_t; 30 31 /* 32 * Per slot state. 33 */ 34 struct sdslot { 35 sda_host_t *ss_host; 36 int ss_num; 37 ddi_acc_handle_t ss_acch; 38 caddr_t ss_regva; 39 kmutex_t ss_lock; 40 uint32_t ss_capab; 41 uint32_t ss_baseclk; /* Hz */ 42 uint32_t ss_cardclk; /* Hz */ 43 uint8_t ss_tmoutclk; 44 uint32_t ss_tmusecs; /* timeout units in usecs */ 45 uint32_t ss_ocr; /* OCR formatted voltages */ 46 uint16_t ss_mode; 47 boolean_t ss_suspended; 48 49 /* 50 * Command in progress 51 */ 52 uint8_t *ss_kvaddr; 53 ddi_dma_cookie_t *ss_dmacs; 54 uint_t ss_ndmac; 55 int ss_blksz; 56 uint16_t ss_resid; /* in blocks */ 57 58 /* scratch buffer, to receive extra PIO data */ 59 uint32_t ss_bounce[2048 / 4]; 60 }; 61 62 /* 63 * Per controller state. 64 */ 65 struct sdhost { 66 int sh_numslots; 67 ddi_dma_attr_t sh_dmaattr; 68 sdslot_t sh_slots[SDHOST_MAXSLOTS]; 69 sda_host_t *sh_host; 70 71 /* 72 * Interrupt related information. 73 */ 74 ddi_intr_handle_t sh_ihandle; 75 int sh_icap; 76 uint_t sh_ipri; 77 }; 78 79 80 static int sdhost_attach(dev_info_t *, ddi_attach_cmd_t); 81 static int sdhost_detach(dev_info_t *, ddi_detach_cmd_t); 82 static int sdhost_suspend(dev_info_t *); 83 static int sdhost_resume(dev_info_t *); 84 85 static void sdhost_enable_interrupts(sdslot_t *); 86 static void sdhost_disable_interrupts(sdslot_t *); 87 static int sdhost_setup_intr(dev_info_t *, sdhost_t *); 88 static uint_t sdhost_intr(caddr_t, caddr_t); 89 static int sdhost_init_slot(dev_info_t *, sdhost_t *, int, int); 90 static void sdhost_uninit_slot(sdhost_t *, int); 91 static sda_err_t sdhost_soft_reset(sdslot_t *, uint8_t); 92 static sda_err_t sdhost_set_clock(sdslot_t *, uint32_t); 93 static void sdhost_xfer_done(sdslot_t *, sda_err_t); 94 static sda_err_t sdhost_wait_cmd(sdslot_t *, sda_cmd_t *); 95 static uint_t sdhost_slot_intr(sdslot_t *); 96 97 static sda_err_t sdhost_cmd(void *, sda_cmd_t *); 98 static sda_err_t sdhost_getprop(void *, sda_prop_t, uint32_t *); 99 static sda_err_t sdhost_setprop(void *, sda_prop_t, uint32_t); 100 static sda_err_t sdhost_poll(void *); 101 static sda_err_t sdhost_reset(void *); 102 static sda_err_t sdhost_halt(void *); 103 104 static struct dev_ops sdhost_dev_ops = { 105 DEVO_REV, /* devo_rev */ 106 0, /* devo_refcnt */ 107 ddi_no_info, /* devo_getinfo */ 108 nulldev, /* devo_identify */ 109 nulldev, /* devo_probe */ 110 sdhost_attach, /* devo_attach */ 111 sdhost_detach, /* devo_detach */ 112 nodev, /* devo_reset */ 113 NULL, /* devo_cb_ops */ 114 NULL, /* devo_bus_ops */ 115 NULL, /* devo_power */ 116 ddi_quiesce_not_supported, /* devo_quiesce */ 117 }; 118 119 static struct modldrv sdhost_modldrv = { 120 &mod_driverops, /* drv_modops */ 121 "Standard SD Host Controller", /* drv_linkinfo */ 122 &sdhost_dev_ops /* drv_dev_ops */ 123 }; 124 125 static struct modlinkage modlinkage = { 126 MODREV_1, /* ml_rev */ 127 { &sdhost_modldrv, NULL } /* ml_linkage */ 128 }; 129 130 static struct sda_ops sdhost_ops = { 131 SDA_OPS_VERSION, 132 sdhost_cmd, /* so_cmd */ 133 sdhost_getprop, /* so_getprop */ 134 sdhost_setprop, /* so_setprop */ 135 sdhost_poll, /* so_poll */ 136 sdhost_reset, /* so_reset */ 137 sdhost_halt, /* so_halt */ 138 }; 139 140 static ddi_device_acc_attr_t sdhost_regattr = { 141 DDI_DEVICE_ATTR_V0, /* devacc_attr_version */ 142 DDI_STRUCTURE_LE_ACC, /* devacc_attr_endian_flags */ 143 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */ 144 DDI_DEFAULT_ACC, /* devacc_attr_access */ 145 }; 146 147 #define GET16(ss, reg) \ 148 ddi_get16(ss->ss_acch, (void *)(ss->ss_regva + reg)) 149 #define PUT16(ss, reg, val) \ 150 ddi_put16(ss->ss_acch, (void *)(ss->ss_regva + reg), val) 151 #define GET32(ss, reg) \ 152 ddi_get32(ss->ss_acch, (void *)(ss->ss_regva + reg)) 153 #define PUT32(ss, reg, val) \ 154 ddi_put32(ss->ss_acch, (void *)(ss->ss_regva + reg), val) 155 #define GET64(ss, reg) \ 156 ddi_get64(ss->ss_acch, (void *)(ss->ss_regva + reg)) 157 158 #define GET8(ss, reg) \ 159 ddi_get8(ss->ss_acch, (void *)(ss->ss_regva + reg)) 160 #define PUT8(ss, reg, val) \ 161 ddi_put8(ss->ss_acch, (void *)(ss->ss_regva + reg), val) 162 163 #define CLR8(ss, reg, mask) PUT8(ss, reg, GET8(ss, reg) & ~(mask)) 164 #define SET8(ss, reg, mask) PUT8(ss, reg, GET8(ss, reg) | (mask)) 165 166 /* 167 * If ever anyone uses PIO on SPARC, we have to endian-swap. But we 168 * think that SD Host Controllers are likely to be uncommon on SPARC, 169 * and hopefully when they exist at all they will be able to use DMA. 170 */ 171 #ifdef _BIG_ENDIAN 172 #define sw32(x) ddi_swap32(x) 173 #define sw16(x) ddi_swap16(x) 174 #else 175 #define sw32(x) (x) 176 #define sw16(x) (x) 177 #endif 178 179 #define GETDATA32(ss) sw32(GET32(ss, REG_DATA)) 180 #define GETDATA16(ss) sw16(GET16(ss, REG_DATA)) 181 #define GETDATA8(ss) GET8(ss, REG_DATA) 182 183 #define PUTDATA32(ss, val) PUT32(ss, REG_DATA, sw32(val)) 184 #define PUTDATA16(ss, val) PUT16(ss, REG_DATA, sw16(val)) 185 #define PUTDATA8(ss, val) PUT8(ss, REG_DATA, val) 186 187 #define CHECK_STATE(ss, nm) \ 188 ((GET32(ss, REG_PRS) & PRS_ ## nm) != 0) 189 190 int 191 _init(void) 192 { 193 int rv; 194 195 sda_host_init_ops(&sdhost_dev_ops); 196 197 if ((rv = mod_install(&modlinkage)) != 0) { 198 sda_host_fini_ops(&sdhost_dev_ops); 199 } 200 201 return (rv); 202 } 203 204 int 205 _fini(void) 206 { 207 int rv; 208 209 if ((rv = mod_remove(&modlinkage)) == 0) { 210 sda_host_fini_ops(&sdhost_dev_ops); 211 } 212 return (rv); 213 } 214 215 int 216 _info(struct modinfo *modinfop) 217 { 218 return (mod_info(&modlinkage, modinfop)); 219 } 220 221 int 222 sdhost_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 223 { 224 sdhost_t *shp; 225 ddi_acc_handle_t pcih; 226 uint8_t slotinfo; 227 uint8_t bar; 228 int i; 229 230 switch (cmd) { 231 case DDI_ATTACH: 232 break; 233 234 case DDI_RESUME: 235 return (sdhost_resume(dip)); 236 237 default: 238 return (DDI_FAILURE); 239 } 240 241 /* 242 * Soft state allocation. 243 */ 244 shp = kmem_zalloc(sizeof (*shp), KM_SLEEP); 245 ddi_set_driver_private(dip, shp); 246 247 /* 248 * Initialize DMA attributes. For now we initialize as for 249 * SDMA. If we add ADMA support we can improve this. 250 */ 251 shp->sh_dmaattr.dma_attr_version = DMA_ATTR_V0; 252 shp->sh_dmaattr.dma_attr_addr_lo = 0; 253 shp->sh_dmaattr.dma_attr_addr_hi = 0xffffffffU; 254 shp->sh_dmaattr.dma_attr_count_max = 0xffffffffU; 255 shp->sh_dmaattr.dma_attr_align = 1; 256 shp->sh_dmaattr.dma_attr_burstsizes = 0; /* for now! */ 257 shp->sh_dmaattr.dma_attr_minxfer = 1; 258 shp->sh_dmaattr.dma_attr_maxxfer = 0xffffffffU; 259 shp->sh_dmaattr.dma_attr_sgllen = -1; /* unlimited! */ 260 shp->sh_dmaattr.dma_attr_seg = 0xfff; /* 4K segments */ 261 shp->sh_dmaattr.dma_attr_granular = 1; 262 shp->sh_dmaattr.dma_attr_flags = 0; 263 264 /* 265 * PCI configuration access to figure out number of slots present. 266 */ 267 if (pci_config_setup(dip, &pcih) != DDI_SUCCESS) { 268 cmn_err(CE_WARN, "pci_config_setup failed"); 269 goto failed; 270 } 271 272 slotinfo = pci_config_get8(pcih, SLOTINFO); 273 shp->sh_numslots = SLOTINFO_NSLOT(slotinfo); 274 275 if (shp->sh_numslots > SDHOST_MAXSLOTS) { 276 cmn_err(CE_WARN, "Host reports to have too many slots: %d", 277 shp->sh_numslots); 278 goto failed; 279 } 280 281 /* 282 * Enable master accesses and DMA. 283 */ 284 pci_config_put16(pcih, PCI_CONF_COMM, 285 pci_config_get16(pcih, PCI_CONF_COMM) | 286 PCI_COMM_MAE | PCI_COMM_ME); 287 288 /* 289 * Figure out which BAR to use. Note that we number BARs from 290 * 1, although PCI and SD Host numbers from 0. (We number 291 * from 1, because register number 0 means PCI configuration 292 * space in Solaris.) 293 */ 294 bar = SLOTINFO_BAR(slotinfo) + 1; 295 296 pci_config_teardown(&pcih); 297 298 /* 299 * Setup interrupts ... supports the new DDI interrupt API. This 300 * will support MSI or MSI-X interrupts if a device is found to 301 * support it. 302 */ 303 if (sdhost_setup_intr(dip, shp) != DDI_SUCCESS) { 304 cmn_err(CE_WARN, "Failed to setup interrupts"); 305 goto failed; 306 } 307 308 shp->sh_host = sda_host_alloc(dip, shp->sh_numslots, &sdhost_ops, 309 &shp->sh_dmaattr); 310 if (shp->sh_host == NULL) { 311 cmn_err(CE_WARN, "Failed allocating SD host structure"); 312 goto failed; 313 } 314 315 /* 316 * Configure slots, this also maps registers, enables 317 * interrupts, etc. Most of the hardware setup is done here. 318 */ 319 for (i = 0; i < shp->sh_numslots; i++) { 320 if (sdhost_init_slot(dip, shp, i, bar + i) != DDI_SUCCESS) { 321 cmn_err(CE_WARN, "Failed initializing slot %d", i); 322 goto failed; 323 } 324 } 325 326 ddi_report_dev(dip); 327 328 /* 329 * Enable device interrupts at the DDI layer. 330 */ 331 (void) ddi_intr_enable(shp->sh_ihandle); 332 333 /* 334 * Mark the slots online with the framework. This will cause 335 * the framework to probe them for the presence of cards. 336 */ 337 if (sda_host_attach(shp->sh_host) != DDI_SUCCESS) { 338 cmn_err(CE_WARN, "Failed attaching to SDA framework"); 339 (void) ddi_intr_disable(shp->sh_ihandle); 340 goto failed; 341 } 342 343 return (DDI_SUCCESS); 344 345 failed: 346 if (shp->sh_ihandle != NULL) { 347 (void) ddi_intr_remove_handler(shp->sh_ihandle); 348 (void) ddi_intr_free(shp->sh_ihandle); 349 } 350 for (i = 0; i < shp->sh_numslots; i++) 351 sdhost_uninit_slot(shp, i); 352 kmem_free(shp, sizeof (*shp)); 353 354 return (DDI_FAILURE); 355 } 356 357 int 358 sdhost_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 359 { 360 sdhost_t *shp; 361 int i; 362 363 switch (cmd) { 364 case DDI_DETACH: 365 break; 366 367 case DDI_SUSPEND: 368 return (sdhost_suspend(dip)); 369 370 default: 371 return (DDI_FAILURE); 372 } 373 374 shp = ddi_get_driver_private(dip); 375 if (shp == NULL) 376 return (DDI_FAILURE); 377 378 /* 379 * Take host offline with the framework. 380 */ 381 sda_host_detach(shp->sh_host); 382 383 /* 384 * Tear down interrupts. 385 */ 386 if (shp->sh_ihandle != NULL) { 387 (void) ddi_intr_disable(shp->sh_ihandle); 388 (void) ddi_intr_remove_handler(shp->sh_ihandle); 389 (void) ddi_intr_free(shp->sh_ihandle); 390 } 391 392 /* 393 * Tear down register mappings, etc. 394 */ 395 for (i = 0; i < shp->sh_numslots; i++) 396 sdhost_uninit_slot(shp, i); 397 kmem_free(shp, sizeof (*shp)); 398 399 return (DDI_SUCCESS); 400 } 401 402 int 403 sdhost_suspend(dev_info_t *dip) 404 { 405 sdhost_t *shp; 406 sdslot_t *ss; 407 int i; 408 409 shp = ddi_get_driver_private(dip); 410 if (shp == NULL) 411 return (DDI_FAILURE); 412 413 /* disable the interrupts */ 414 (void) ddi_intr_disable(shp->sh_ihandle); 415 416 for (i = 0; i < shp->sh_numslots; i++) { 417 ss = &shp->sh_slots[i]; 418 mutex_enter(&ss->ss_lock); 419 ss->ss_suspended = B_TRUE; 420 sdhost_disable_interrupts(ss); 421 (void) sdhost_soft_reset(ss, SOFT_RESET_ALL); 422 mutex_exit(&ss->ss_lock); 423 } 424 return (DDI_SUCCESS); 425 } 426 427 int 428 sdhost_resume(dev_info_t *dip) 429 { 430 sdhost_t *shp; 431 sdslot_t *ss; 432 int i; 433 434 shp = ddi_get_driver_private(dip); 435 if (shp == NULL) 436 return (DDI_FAILURE); 437 438 for (i = 0; i < shp->sh_numslots; i++) { 439 ss = &shp->sh_slots[i]; 440 mutex_enter(&ss->ss_lock); 441 ss->ss_suspended = B_FALSE; 442 (void) sdhost_soft_reset(ss, SOFT_RESET_ALL); 443 sdhost_enable_interrupts(ss); 444 mutex_exit(&ss->ss_lock); 445 } 446 447 /* re-enable the interrupts */ 448 (void) ddi_intr_enable(shp->sh_ihandle); 449 450 /* kick off a new card detect task */ 451 for (i = 0; i < shp->sh_numslots; i++) { 452 ss = &shp->sh_slots[i]; 453 sda_host_detect(ss->ss_host, ss->ss_num); 454 } 455 return (DDI_SUCCESS); 456 } 457 458 sda_err_t 459 sdhost_set_clock(sdslot_t *ss, uint32_t hz) 460 { 461 uint16_t div; 462 uint32_t val; 463 uint32_t clk; 464 int count; 465 466 /* 467 * Shut off the clock to begin. 468 */ 469 ss->ss_cardclk = 0; 470 PUT16(ss, REG_CLOCK_CONTROL, 0); 471 if (hz == 0) { 472 return (SDA_EOK); 473 } 474 475 if (ss->ss_baseclk == 0) { 476 sda_host_log(ss->ss_host, ss->ss_num, 477 "Base clock frequency not established."); 478 return (SDA_EINVAL); 479 } 480 481 if ((hz > 25000000) && ((ss->ss_capab & CAPAB_HIGH_SPEED) != 0)) { 482 /* this clock requires high speed timings! */ 483 SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN); 484 } else { 485 /* don't allow clock to run faster than 25MHz */ 486 hz = min(hz, 25000000); 487 CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN); 488 } 489 490 /* figure out the divider */ 491 clk = ss->ss_baseclk; 492 div = 1; 493 while (clk > hz) { 494 if (div > 0x80) 495 break; 496 clk >>= 1; /* divide clock by two */ 497 div <<= 1; /* divider goes up by one */ 498 } 499 div >>= 1; /* 0 == divide by 1, 1 = divide by 2 */ 500 501 /* 502 * Set the internal clock divider first, without enabling the 503 * card clock yet. 504 */ 505 PUT16(ss, REG_CLOCK_CONTROL, 506 (div << CLOCK_CONTROL_FREQ_SHIFT) | CLOCK_CONTROL_INT_CLOCK_EN); 507 508 /* 509 * Wait up to 100 msec for the internal clock to stabilize. 510 * (The spec does not seem to indicate a maximum timeout, but 511 * it also suggests that an infinite loop be used, which is 512 * not appropriate for hardened Solaris drivers.) 513 */ 514 for (count = 100000; count; count -= 10) { 515 516 val = GET16(ss, REG_CLOCK_CONTROL); 517 518 if (val & CLOCK_CONTROL_INT_CLOCK_STABLE) { 519 /* if clock is stable, enable the SD clock pin */ 520 PUT16(ss, REG_CLOCK_CONTROL, val | 521 CLOCK_CONTROL_SD_CLOCK_EN); 522 523 ss->ss_cardclk = clk; 524 return (SDA_EOK); 525 } 526 527 drv_usecwait(10); 528 } 529 530 return (SDA_ETIME); 531 } 532 533 sda_err_t 534 sdhost_soft_reset(sdslot_t *ss, uint8_t bits) 535 { 536 int count; 537 538 /* 539 * There appears to be a bug where Ricoh hosts might have a 540 * problem if the host frequency is not set. If the card 541 * isn't present, or we are doing a master reset, just enable 542 * the internal clock at its native speed. (No dividers, and 543 * not exposed to card.). 544 */ 545 if ((bits == SOFT_RESET_ALL) || !(CHECK_STATE(ss, CARD_INSERTED))) { 546 PUT16(ss, REG_CLOCK_CONTROL, CLOCK_CONTROL_INT_CLOCK_EN); 547 /* simple 1msec wait, don't wait for clock to stabilize */ 548 drv_usecwait(1000); 549 } 550 551 PUT8(ss, REG_SOFT_RESET, bits); 552 for (count = 100000; count != 0; count -= 10) { 553 if ((GET8(ss, REG_SOFT_RESET) & bits) == 0) { 554 return (SDA_EOK); 555 } 556 drv_usecwait(10); 557 } 558 559 return (SDA_ETIME); 560 } 561 562 void 563 sdhost_disable_interrupts(sdslot_t *ss) 564 { 565 /* disable slot interrupts for card insert and remove */ 566 PUT16(ss, REG_INT_MASK, 0); 567 PUT16(ss, REG_INT_EN, 0); 568 569 /* disable error interrupts */ 570 PUT16(ss, REG_ERR_MASK, 0); 571 PUT16(ss, REG_ERR_EN, 0); 572 } 573 574 void 575 sdhost_enable_interrupts(sdslot_t *ss) 576 { 577 /* 578 * Note that we want to enable reading of the CMD related 579 * bits, but we do not want them to generate an interrupt. 580 * (The busy wait for typical CMD stuff will normally be less 581 * than 10usec, so its simpler/easier to just poll. Even in 582 * the worst case of 100 kHz, the poll is at worst 2 msec.) 583 */ 584 585 /* enable slot interrupts for card insert and remove */ 586 PUT16(ss, REG_INT_MASK, INT_MASK); 587 PUT16(ss, REG_INT_EN, INT_ENAB); 588 589 /* enable error interrupts */ 590 PUT16(ss, REG_ERR_MASK, ERR_MASK); 591 PUT16(ss, REG_ERR_EN, ERR_ENAB); 592 } 593 594 int 595 sdhost_setup_intr(dev_info_t *dip, sdhost_t *shp) 596 { 597 int itypes; 598 int itype; 599 600 /* 601 * Set up interrupt handler. 602 */ 603 if (ddi_intr_get_supported_types(dip, &itypes) != DDI_SUCCESS) { 604 cmn_err(CE_WARN, "ddi_intr_get_supported_types failed"); 605 return (DDI_FAILURE); 606 } 607 608 /* 609 * Interrupt types are bits in a mask. We know about these ones: 610 * FIXED = 1 611 * MSI = 2 612 * MSIX = 4 613 */ 614 for (itype = DDI_INTR_TYPE_MSIX; itype != 0; itype >>= 1) { 615 616 int count; 617 618 if ((itypes & itype) == 0) { 619 /* this type is not supported on this device! */ 620 continue; 621 } 622 623 if ((ddi_intr_get_nintrs(dip, itype, &count) != DDI_SUCCESS) || 624 (count == 0)) { 625 cmn_err(CE_WARN, "ddi_intr_get_nintrs failed"); 626 continue; 627 } 628 629 /* 630 * We have not seen a host device with multiple 631 * interrupts (one per slot?), and the spec does not 632 * indicate that they exist. But if one ever occurs, 633 * we spew a warning to help future debugging/support 634 * efforts. 635 */ 636 if (count > 1) { 637 cmn_err(CE_WARN, "Controller offers %d interrupts, " 638 "but driver only supports one", count); 639 continue; 640 } 641 642 if ((ddi_intr_alloc(dip, &shp->sh_ihandle, itype, 0, 1, 643 &count, DDI_INTR_ALLOC_NORMAL) != DDI_SUCCESS) || 644 (count != 1)) { 645 cmn_err(CE_WARN, "ddi_intr_alloc failed"); 646 continue; 647 } 648 649 if (ddi_intr_get_pri(shp->sh_ihandle, &shp->sh_ipri) != 650 DDI_SUCCESS) { 651 cmn_err(CE_WARN, "ddi_intr_get_pri failed"); 652 (void) ddi_intr_free(shp->sh_ihandle); 653 shp->sh_ihandle = NULL; 654 continue; 655 } 656 657 if (shp->sh_ipri >= ddi_intr_get_hilevel_pri()) { 658 cmn_err(CE_WARN, "Hi level interrupt not supported"); 659 (void) ddi_intr_free(shp->sh_ihandle); 660 shp->sh_ihandle = NULL; 661 continue; 662 } 663 664 if (ddi_intr_get_cap(shp->sh_ihandle, &shp->sh_icap) != 665 DDI_SUCCESS) { 666 cmn_err(CE_WARN, "ddi_intr_get_cap failed"); 667 (void) ddi_intr_free(shp->sh_ihandle); 668 shp->sh_ihandle = NULL; 669 continue; 670 } 671 672 if (ddi_intr_add_handler(shp->sh_ihandle, sdhost_intr, 673 shp, NULL) != DDI_SUCCESS) { 674 cmn_err(CE_WARN, "ddi_intr_add_handler failed"); 675 (void) ddi_intr_free(shp->sh_ihandle); 676 shp->sh_ihandle = NULL; 677 continue; 678 } 679 680 return (DDI_SUCCESS); 681 } 682 683 return (DDI_FAILURE); 684 } 685 686 void 687 sdhost_xfer_done(sdslot_t *ss, sda_err_t errno) 688 { 689 if ((errno == SDA_EOK) && (ss->ss_resid != 0)) { 690 /* an unexpected partial transfer was found */ 691 errno = SDA_ERESID; 692 } 693 ss->ss_blksz = 0; 694 ss->ss_resid = 0; 695 696 if (errno != SDA_EOK) { 697 (void) sdhost_soft_reset(ss, SOFT_RESET_CMD); 698 (void) sdhost_soft_reset(ss, SOFT_RESET_DAT); 699 700 /* send a STOP command if necessary */ 701 if (ss->ss_mode & XFR_MODE_AUTO_CMD12) { 702 PUT32(ss, REG_ARGUMENT, 0); 703 PUT16(ss, REG_COMMAND, 704 (CMD_STOP_TRANSMIT << 8) | 705 COMMAND_TYPE_NORM | COMMAND_INDEX_CHECK_EN | 706 COMMAND_CRC_CHECK_EN | COMMAND_RESP_48_BUSY); 707 } 708 } 709 710 sda_host_transfer(ss->ss_host, ss->ss_num, errno); 711 } 712 713 uint_t 714 sdhost_slot_intr(sdslot_t *ss) 715 { 716 uint16_t intr; 717 uint16_t errs; 718 uint8_t *data; 719 int count; 720 721 mutex_enter(&ss->ss_lock); 722 723 if (ss->ss_suspended) { 724 mutex_exit(&ss->ss_lock); 725 return (DDI_INTR_UNCLAIMED); 726 } 727 728 intr = GET16(ss, REG_INT_STAT); 729 if (intr == 0) { 730 mutex_exit(&ss->ss_lock); 731 return (DDI_INTR_UNCLAIMED); 732 } 733 errs = GET16(ss, REG_ERR_STAT); 734 735 if (intr & (INT_REM | INT_INS)) { 736 737 PUT16(ss, REG_INT_STAT, intr); 738 mutex_exit(&ss->ss_lock); 739 740 sda_host_detect(ss->ss_host, ss->ss_num); 741 /* no further interrupt processing this cycle */ 742 return (DDI_INTR_CLAIMED); 743 } 744 745 if (intr & INT_DMA) { 746 /* 747 * We have crossed a DMA/page boundary. Cope with it. 748 */ 749 if (ss->ss_ndmac) { 750 ss->ss_ndmac--; 751 ss->ss_dmacs++; 752 PUT16(ss, REG_INT_STAT, INT_DMA); 753 PUT32(ss, REG_SDMA_ADDR, ss->ss_dmacs->dmac_address); 754 755 } else { 756 /* 757 * Apparently some sdhost controllers issue a 758 * final DMA interrupt if the DMA completes on 759 * a boundary, even though there is no further 760 * data to transfer. 761 * 762 * There might be a risk here of the 763 * controller continuing to access the same 764 * data over and over again, but we accept the 765 * risk. 766 */ 767 PUT16(ss, REG_INT_STAT, INT_DMA); 768 } 769 } 770 771 if (intr & INT_RD) { 772 /* 773 * PIO read! PIO is quite suboptimal, but we expect 774 * performance critical applications to use DMA 775 * whenever possible. We have to stage this through 776 * the bounce buffer to meet alignment considerations. 777 */ 778 779 PUT16(ss, REG_INT_STAT, INT_RD); 780 781 while ((ss->ss_resid > 0) && CHECK_STATE(ss, BUF_RD_EN)) { 782 783 data = (void *)ss->ss_bounce; 784 count = ss->ss_blksz; 785 786 ASSERT(count > 0); 787 ASSERT(ss->ss_kvaddr != NULL); 788 789 while (count >= sizeof (uint32_t)) { 790 *(uint32_t *)(void *)data = GETDATA32(ss); 791 data += sizeof (uint32_t); 792 count -= sizeof (uint32_t); 793 } 794 while (count >= sizeof (uint16_t)) { 795 *(uint16_t *)(void *)data = GETDATA16(ss); 796 data += sizeof (uint16_t); 797 count -= sizeof (uint16_t); 798 } 799 while (count >= sizeof (uint8_t)) { 800 *(uint8_t *)data = GETDATA8(ss); 801 data += sizeof (uint8_t); 802 count -= sizeof (uint8_t); 803 } 804 805 bcopy(ss->ss_bounce, ss->ss_kvaddr, ss->ss_blksz); 806 ss->ss_kvaddr += ss->ss_blksz; 807 ss->ss_resid--; 808 } 809 } 810 811 if (intr & INT_WR) { 812 /* 813 * PIO write! PIO is quite suboptimal, but we expect 814 * performance critical applications to use DMA 815 * whenever possible. We have to stage this trhough 816 * the bounce buffer to meet alignment considerations. 817 */ 818 819 PUT16(ss, REG_INT_STAT, INT_WR); 820 821 while ((ss->ss_resid > 0) && CHECK_STATE(ss, BUF_WR_EN)) { 822 823 data = (void *)ss->ss_bounce; 824 count = ss->ss_blksz; 825 826 ASSERT(count > 0); 827 ASSERT(ss->ss_kvaddr != NULL); 828 829 bcopy(ss->ss_kvaddr, data, count); 830 while (count >= sizeof (uint32_t)) { 831 PUTDATA32(ss, *(uint32_t *)(void *)data); 832 data += sizeof (uint32_t); 833 count -= sizeof (uint32_t); 834 } 835 while (count >= sizeof (uint16_t)) { 836 PUTDATA16(ss, *(uint16_t *)(void *)data); 837 data += sizeof (uint16_t); 838 count -= sizeof (uint16_t); 839 } 840 while (count >= sizeof (uint8_t)) { 841 PUTDATA8(ss, *(uint8_t *)data); 842 data += sizeof (uint8_t); 843 count -= sizeof (uint8_t); 844 } 845 846 ss->ss_kvaddr += ss->ss_blksz; 847 ss->ss_resid--; 848 } 849 } 850 851 if (intr & INT_XFR) { 852 PUT16(ss, REG_INT_STAT, INT_XFR); 853 854 sdhost_xfer_done(ss, SDA_EOK); 855 } 856 857 if (intr & INT_ERR) { 858 PUT16(ss, REG_ERR_STAT, errs); 859 PUT16(ss, REG_INT_STAT, INT_ERR); 860 861 if (errs & ERR_DAT) { 862 if ((errs & ERR_DAT_END) == ERR_DAT_END) { 863 sdhost_xfer_done(ss, SDA_EPROTO); 864 } else if ((errs & ERR_DAT_CRC) == ERR_DAT_CRC) { 865 sdhost_xfer_done(ss, SDA_ECRC7); 866 } else { 867 sdhost_xfer_done(ss, SDA_ETIME); 868 } 869 870 } else if (errs & ERR_ACMD12) { 871 /* 872 * Generally, this is bad news. we need a full 873 * reset to recover properly. 874 */ 875 sdhost_xfer_done(ss, SDA_ECMD12); 876 } 877 878 /* 879 * This asynchronous error leaves the slot more or less 880 * useless. Report it to the framework. 881 */ 882 if (errs & ERR_CURRENT) { 883 sda_host_fault(ss->ss_host, ss->ss_num, 884 SDA_FAULT_CURRENT); 885 } 886 } 887 888 mutex_exit(&ss->ss_lock); 889 890 return (DDI_INTR_CLAIMED); 891 } 892 893 /*ARGSUSED1*/ 894 uint_t 895 sdhost_intr(caddr_t arg1, caddr_t arg2) 896 { 897 sdhost_t *shp = (void *)arg1; 898 int rv = DDI_INTR_UNCLAIMED; 899 int num; 900 901 /* interrupt for each of the slots present in the system */ 902 for (num = 0; num < shp->sh_numslots; num++) { 903 if (sdhost_slot_intr(&shp->sh_slots[num]) == 904 DDI_INTR_CLAIMED) { 905 rv = DDI_INTR_CLAIMED; 906 } 907 } 908 return (rv); 909 } 910 911 int 912 sdhost_init_slot(dev_info_t *dip, sdhost_t *shp, int num, int bar) 913 { 914 sdslot_t *ss; 915 uint32_t capab; 916 uint32_t clk; 917 918 /* 919 * Register the private state. 920 */ 921 ss = &shp->sh_slots[num]; 922 ss->ss_host = shp->sh_host; 923 ss->ss_num = num; 924 sda_host_set_private(shp->sh_host, num, ss); 925 926 /* 927 * Initialize core data structure, locks, etc. 928 */ 929 mutex_init(&ss->ss_lock, NULL, MUTEX_DRIVER, 930 DDI_INTR_PRI(shp->sh_ipri)); 931 932 if (ddi_regs_map_setup(dip, bar, &ss->ss_regva, 0, 0, &sdhost_regattr, 933 &ss->ss_acch) != DDI_SUCCESS) { 934 return (DDI_FAILURE); 935 } 936 937 /* reset before reading capabilities */ 938 if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK) 939 return (DDI_FAILURE); 940 941 capab = GET64(ss, REG_CAPAB) & 0xffffffffU; /* upper bits reserved */ 942 ss->ss_capab = capab; 943 944 /* host voltages in OCR format */ 945 ss->ss_ocr = 0; 946 if (capab & CAPAB_18V) 947 ss->ss_ocr |= OCR_18_19V; /* 1.8V */ 948 if (capab & CAPAB_30V) 949 ss->ss_ocr |= OCR_30_31V; 950 if (capab & CAPAB_33V) 951 ss->ss_ocr |= OCR_32_33V; 952 953 /* base clock */ 954 ss->ss_baseclk = 955 ((capab & CAPAB_BASE_FREQ_MASK) >> CAPAB_BASE_FREQ_SHIFT); 956 ss->ss_baseclk *= 1000000; 957 958 /* 959 * Timeout clock. We can calculate this using the following 960 * formula: 961 * 962 * (1000000 usec/1sec) * (1sec/tmoutclk) * base factor = clock time 963 * 964 * Clock time is the length of the base clock in usecs. 965 * 966 * Our base factor is 2^13, which is the shortest clock we 967 * can count. 968 * 969 * To simplify the math and avoid overflow, we cancel out the 970 * zeros for kHz or MHz. Since we want to wait more clocks, not 971 * less, on error, we truncate the result rather than rounding 972 * up. 973 */ 974 clk = ((capab & CAPAB_TIMEOUT_FREQ_MASK) >> CAPAB_TIMEOUT_FREQ_SHIFT); 975 if ((ss->ss_baseclk == 0) || (clk == 0)) { 976 cmn_err(CE_WARN, "Unable to determine clock frequencies"); 977 return (DDI_FAILURE); 978 } 979 980 if (capab & CAPAB_TIMEOUT_UNITS) { 981 /* MHz */ 982 ss->ss_tmusecs = (1 << 13) / clk; 983 clk *= 1000000; 984 } else { 985 /* kHz */ 986 ss->ss_tmusecs = (1000 * (1 << 13)) / clk; 987 clk *= 1000; 988 } 989 990 /* 991 * Calculation of the timeout. 992 * 993 * SDIO cards use a 1sec timeout, and SDHC cards use fixed 994 * 100msec for read and 250 msec for write. 995 * 996 * Legacy cards running at 375kHz have a worst case of about 997 * 15 seconds. Running at 25MHz (the standard speed) it is 998 * about 100msec for read, and about 3.2 sec for write. 999 * Typical values are 1/100th that, or about 1msec for read, 1000 * and 32 msec for write. 1001 * 1002 * No transaction at full speed should ever take more than 4 1003 * seconds. (Some slow legacy cards might have trouble, but 1004 * we'll worry about them if they ever are seen. Nobody wants 1005 * to wait 4 seconds to access a single block anyway!) 1006 * 1007 * To get to 4 seconds, we continuously double usec until we 1008 * get to the maximum value, or a timeout greater than 4 1009 * seconds. 1010 * 1011 * Note that for high-speed timeout clocks, we might not be 1012 * able to get to the full 4 seconds. E.g. with a 48MHz 1013 * timeout clock, we can only get to about 2.8 seconds. Its 1014 * possible that there could be some slow MMC cards that will 1015 * timeout at this clock rate, but it seems unlikely. (The 1016 * device would have to be pressing the very worst times, 1017 * against the 100-fold "permissive" window allowed, and 1018 * running at only 12.5MHz.) 1019 * 1020 * XXX: this could easily be a tunable. Someone dealing with only 1021 * reasonable cards could set this to just 1 second. 1022 */ 1023 for (ss->ss_tmoutclk = 0; ss->ss_tmoutclk < 14; ss->ss_tmoutclk++) { 1024 if ((ss->ss_tmusecs * (1 << ss->ss_tmoutclk)) >= 4000000) { 1025 break; 1026 } 1027 } 1028 1029 /* 1030 * Enable slot interrupts. 1031 */ 1032 sdhost_enable_interrupts(ss); 1033 1034 return (DDI_SUCCESS); 1035 } 1036 1037 void 1038 sdhost_uninit_slot(sdhost_t *shp, int num) 1039 { 1040 sdslot_t *ss; 1041 1042 ss = &shp->sh_slots[num]; 1043 if (ss->ss_acch == NULL) 1044 return; 1045 1046 (void) sdhost_soft_reset(ss, SOFT_RESET_ALL); 1047 1048 ddi_regs_map_free(&ss->ss_acch); 1049 mutex_destroy(&ss->ss_lock); 1050 } 1051 1052 void 1053 sdhost_get_response(sdslot_t *ss, sda_cmd_t *cmdp) 1054 { 1055 uint32_t *resp = cmdp->sc_response; 1056 int i; 1057 1058 resp[0] = GET32(ss, REG_RESP1); 1059 resp[1] = GET32(ss, REG_RESP2); 1060 resp[2] = GET32(ss, REG_RESP3); 1061 resp[3] = GET32(ss, REG_RESP4); 1062 1063 /* 1064 * Response 2 is goofy because the host drops the low 1065 * order CRC bits. This makes it a bit awkward, so we 1066 * have to shift the bits to make it work out right. 1067 * 1068 * Note that the framework expects the 32 bit 1069 * words to be ordered in LE fashion. (The 1070 * bits within the words are in native order). 1071 */ 1072 if (cmdp->sc_rtype == R2) { 1073 for (i = 3; i > 0; i--) { 1074 resp[i] <<= 8; 1075 resp[i] |= (resp[i - 1] >> 24); 1076 } 1077 resp[0] <<= 8; 1078 } 1079 } 1080 1081 sda_err_t 1082 sdhost_wait_cmd(sdslot_t *ss, sda_cmd_t *cmdp) 1083 { 1084 int i; 1085 uint16_t errs; 1086 1087 /* 1088 * Worst case for 100kHz timeout is 2msec (200 clocks), we add 1089 * a tiny bit for safety. (Generally timeout will be far, far 1090 * less than that.) 1091 * 1092 * Note that at more typical 12MHz (and normally it will be 1093 * even faster than that!) that the device timeout is only 1094 * 16.67 usec. We could be smarter and reduce the delay time, 1095 * but that would require putting more intelligence into the 1096 * code, and we don't expect CMD timeout to normally occur 1097 * except during initialization. (At which time we need the 1098 * full timeout anyway.) 1099 * 1100 * Checking the ERR_STAT will normally cause the timeout to 1101 * terminate to finish early if the device is healthy, anyway. 1102 */ 1103 1104 for (i = 3000; i > 0; i -= 5) { 1105 if (GET16(ss, REG_INT_STAT) & INT_CMD) { 1106 1107 PUT16(ss, REG_INT_STAT, INT_CMD); 1108 1109 /* command completed */ 1110 sdhost_get_response(ss, cmdp); 1111 return (SDA_EOK); 1112 } 1113 1114 if ((errs = (GET16(ss, REG_ERR_STAT) & ERR_CMD)) != 0) { 1115 PUT16(ss, REG_ERR_STAT, errs); 1116 1117 /* command timeout isn't a host failure */ 1118 if ((errs & ERR_CMD_TMO) == ERR_CMD_TMO) { 1119 return (SDA_ETIME); 1120 } 1121 1122 if ((errs & ERR_CMD_CRC) == ERR_CMD_CRC) { 1123 return (SDA_ECRC7); 1124 } else { 1125 return (SDA_EPROTO); 1126 } 1127 } 1128 1129 drv_usecwait(5); 1130 } 1131 1132 return (SDA_ETIME); 1133 } 1134 1135 sda_err_t 1136 sdhost_poll(void *arg) 1137 { 1138 sdslot_t *ss = arg; 1139 1140 (void) sdhost_slot_intr(ss); 1141 return (SDA_EOK); 1142 } 1143 1144 sda_err_t 1145 sdhost_cmd(void *arg, sda_cmd_t *cmdp) 1146 { 1147 sdslot_t *ss = arg; 1148 uint16_t command; 1149 uint16_t mode; 1150 sda_err_t rv; 1151 1152 /* 1153 * Command register: 1154 * bit 13-8 = command index 1155 * bit 7-6 = command type (always zero for us!) 1156 * bit 5 = data present select 1157 * bit 4 = command index check (always on!) 1158 * bit 3 = command CRC check enable 1159 * bit 2 = reserved 1160 * bit 1-0 = response type 1161 */ 1162 1163 command = ((uint16_t)cmdp->sc_index << 8); 1164 command |= COMMAND_TYPE_NORM | 1165 COMMAND_INDEX_CHECK_EN | COMMAND_CRC_CHECK_EN; 1166 1167 switch (cmdp->sc_rtype) { 1168 case R0: 1169 command |= COMMAND_RESP_NONE; 1170 break; 1171 case R1: 1172 case R5: 1173 case R6: 1174 case R7: 1175 command |= COMMAND_RESP_48; 1176 break; 1177 case R1b: 1178 case R5b: 1179 command |= COMMAND_RESP_48_BUSY; 1180 break; 1181 case R2: 1182 command |= COMMAND_RESP_136; 1183 command &= ~(COMMAND_INDEX_CHECK_EN | COMMAND_CRC_CHECK_EN); 1184 break; 1185 case R3: 1186 case R4: 1187 command |= COMMAND_RESP_48; 1188 command &= ~COMMAND_CRC_CHECK_EN; 1189 command &= ~COMMAND_INDEX_CHECK_EN; 1190 break; 1191 default: 1192 return (SDA_EINVAL); 1193 } 1194 1195 mutex_enter(&ss->ss_lock); 1196 if (ss->ss_suspended) { 1197 mutex_exit(&ss->ss_lock); 1198 return (SDA_ESUSPENDED); 1199 } 1200 1201 if (cmdp->sc_nblks != 0) { 1202 uint16_t blksz; 1203 uint16_t nblks; 1204 1205 blksz = cmdp->sc_blksz; 1206 nblks = cmdp->sc_nblks; 1207 1208 /* 1209 * Ensure that we have good data. 1210 */ 1211 if ((blksz < 1) || (blksz > 2048)) { 1212 mutex_exit(&ss->ss_lock); 1213 return (SDA_EINVAL); 1214 } 1215 command |= COMMAND_DATA_PRESENT; 1216 1217 ss->ss_blksz = blksz; 1218 1219 /* 1220 * Only SDMA for now. We can investigate ADMA2 later. 1221 * (Right now we don't have ADMA2 capable hardware.) 1222 */ 1223 if (((ss->ss_capab & CAPAB_SDMA) != 0) && 1224 (cmdp->sc_ndmac != 0)) { 1225 ddi_dma_cookie_t *dmacs = cmdp->sc_dmacs; 1226 1227 ASSERT(dmacs != NULL); 1228 1229 ss->ss_kvaddr = NULL; 1230 ss->ss_resid = 0; 1231 ss->ss_dmacs = dmacs; 1232 ss->ss_ndmac = cmdp->sc_ndmac - 1; 1233 1234 PUT32(ss, REG_SDMA_ADDR, dmacs->dmac_address); 1235 mode = XFR_MODE_DMA_EN; 1236 PUT16(ss, REG_BLKSZ, blksz); 1237 1238 } else { 1239 ss->ss_kvaddr = (void *)cmdp->sc_kvaddr; 1240 ss->ss_resid = nblks; 1241 ss->ss_dmacs = NULL; 1242 ss->ss_ndmac = 0; 1243 mode = 0; 1244 PUT16(ss, REG_BLKSZ, blksz); 1245 } 1246 1247 if (nblks > 1) { 1248 mode |= XFR_MODE_MULTI | XFR_MODE_COUNT; 1249 if (cmdp->sc_flags & SDA_CMDF_AUTO_CMD12) 1250 mode |= XFR_MODE_AUTO_CMD12; 1251 } 1252 if ((cmdp->sc_flags & SDA_CMDF_READ) != 0) { 1253 mode |= XFR_MODE_READ; 1254 } 1255 1256 ss->ss_mode = mode; 1257 1258 PUT8(ss, REG_TIMEOUT_CONTROL, ss->ss_tmoutclk); 1259 PUT16(ss, REG_BLOCK_COUNT, nblks); 1260 PUT16(ss, REG_XFR_MODE, mode); 1261 } 1262 1263 PUT32(ss, REG_ARGUMENT, cmdp->sc_argument); 1264 PUT16(ss, REG_COMMAND, command); 1265 1266 rv = sdhost_wait_cmd(ss, cmdp); 1267 1268 mutex_exit(&ss->ss_lock); 1269 1270 return (rv); 1271 } 1272 1273 sda_err_t 1274 sdhost_getprop(void *arg, sda_prop_t prop, uint32_t *val) 1275 { 1276 sdslot_t *ss = arg; 1277 sda_err_t rv = 0; 1278 1279 mutex_enter(&ss->ss_lock); 1280 1281 if (ss->ss_suspended) { 1282 mutex_exit(&ss->ss_lock); 1283 return (SDA_ESUSPENDED); 1284 } 1285 1286 switch (prop) { 1287 case SDA_PROP_INSERTED: 1288 if (CHECK_STATE(ss, CARD_INSERTED)) { 1289 *val = B_TRUE; 1290 } else { 1291 *val = B_FALSE; 1292 } 1293 break; 1294 1295 case SDA_PROP_WPROTECT: 1296 if (CHECK_STATE(ss, WRITE_ENABLE)) { 1297 *val = B_FALSE; 1298 } else { 1299 *val = B_TRUE; 1300 } 1301 break; 1302 1303 case SDA_PROP_OCR: 1304 *val = ss->ss_ocr; 1305 break; 1306 1307 case SDA_PROP_CLOCK: 1308 *val = ss->ss_cardclk; 1309 break; 1310 1311 case SDA_PROP_CAP_HISPEED: 1312 if ((ss->ss_capab & CAPAB_HIGH_SPEED) != 0) { 1313 *val = B_TRUE; 1314 } else { 1315 *val = B_FALSE; 1316 } 1317 break; 1318 1319 case SDA_PROP_CAP_4BITS: 1320 *val = B_TRUE; 1321 break; 1322 1323 case SDA_PROP_CAP_NOPIO: 1324 if ((ss->ss_capab & CAPAB_SDMA) != 0) { 1325 *val = B_TRUE; 1326 } else { 1327 *val = B_FALSE; 1328 } 1329 break; 1330 1331 case SDA_PROP_CAP_INTR: 1332 case SDA_PROP_CAP_8BITS: 1333 *val = B_FALSE; 1334 break; 1335 1336 default: 1337 rv = SDA_ENOTSUP; 1338 break; 1339 } 1340 mutex_exit(&ss->ss_lock); 1341 1342 return (rv); 1343 } 1344 1345 sda_err_t 1346 sdhost_setprop(void *arg, sda_prop_t prop, uint32_t val) 1347 { 1348 sdslot_t *ss = arg; 1349 sda_err_t rv = SDA_EOK; 1350 1351 mutex_enter(&ss->ss_lock); 1352 1353 if (ss->ss_suspended) { 1354 mutex_exit(&ss->ss_lock); 1355 return (SDA_ESUSPENDED); 1356 } 1357 1358 switch (prop) { 1359 case SDA_PROP_LED: 1360 if (val) { 1361 SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_LED_ON); 1362 } else { 1363 CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_LED_ON); 1364 } 1365 break; 1366 1367 case SDA_PROP_CLOCK: 1368 rv = sdhost_set_clock(arg, val); 1369 break; 1370 1371 case SDA_PROP_BUSWIDTH: 1372 switch (val) { 1373 case 1: 1374 CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_DATA_WIDTH); 1375 break; 1376 case 4: 1377 SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_DATA_WIDTH); 1378 break; 1379 default: 1380 rv = SDA_EINVAL; 1381 } 1382 break; 1383 1384 case SDA_PROP_OCR: 1385 val &= ss->ss_ocr; 1386 1387 if (val & OCR_17_18V) { 1388 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_18V); 1389 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_18V | 1390 POWER_CONTROL_BUS_POWER); 1391 } else if (val & OCR_29_30V) { 1392 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_30V); 1393 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_30V | 1394 POWER_CONTROL_BUS_POWER); 1395 } else if (val & OCR_32_33V) { 1396 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_33V); 1397 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_33V | 1398 POWER_CONTROL_BUS_POWER); 1399 } else if (val == 0) { 1400 /* turn off power */ 1401 PUT8(ss, REG_POWER_CONTROL, 0); 1402 } else { 1403 rv = SDA_EINVAL; 1404 } 1405 break; 1406 1407 case SDA_PROP_HISPEED: 1408 if (val) { 1409 SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN); 1410 } else { 1411 CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN); 1412 } 1413 /* give clocks time to settle */ 1414 drv_usecwait(10); 1415 break; 1416 1417 default: 1418 rv = SDA_ENOTSUP; 1419 break; 1420 } 1421 1422 /* 1423 * Apparently some controllers (ENE) have issues with changing 1424 * certain parameters (bus width seems to be one), requiring 1425 * a reset of the DAT and CMD lines. 1426 */ 1427 if (rv == SDA_EOK) { 1428 (void) sdhost_soft_reset(ss, SOFT_RESET_CMD); 1429 (void) sdhost_soft_reset(ss, SOFT_RESET_DAT); 1430 } 1431 mutex_exit(&ss->ss_lock); 1432 return (rv); 1433 } 1434 1435 sda_err_t 1436 sdhost_reset(void *arg) 1437 { 1438 sdslot_t *ss = arg; 1439 1440 mutex_enter(&ss->ss_lock); 1441 if (!ss->ss_suspended) { 1442 if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK) { 1443 mutex_exit(&ss->ss_lock); 1444 return (SDA_ETIME); 1445 } 1446 sdhost_enable_interrupts(ss); 1447 } 1448 mutex_exit(&ss->ss_lock); 1449 return (SDA_EOK); 1450 } 1451 1452 sda_err_t 1453 sdhost_halt(void *arg) 1454 { 1455 sdslot_t *ss = arg; 1456 1457 mutex_enter(&ss->ss_lock); 1458 if (!ss->ss_suspended) { 1459 sdhost_disable_interrupts(ss); 1460 /* this has the side effect of removing power from the card */ 1461 if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK) { 1462 mutex_exit(&ss->ss_lock); 1463 return (SDA_ETIME); 1464 } 1465 } 1466 mutex_exit(&ss->ss_lock); 1467 return (SDA_EOK); 1468 } 1469