1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include "sdhost.h" 27 28 typedef struct sdslot sdslot_t; 29 typedef struct sdhost sdhost_t; 30 31 /* 32 * Per slot state. 33 */ 34 struct sdslot { 35 sda_host_t *ss_host; 36 int ss_num; 37 ddi_acc_handle_t ss_acch; 38 caddr_t ss_regva; 39 kmutex_t ss_lock; 40 uint32_t ss_capab; 41 uint32_t ss_baseclk; /* Hz */ 42 uint32_t ss_cardclk; /* Hz */ 43 uint8_t ss_tmoutclk; 44 uint32_t ss_tmusecs; /* timeout units in usecs */ 45 uint32_t ss_ocr; /* OCR formatted voltages */ 46 uint16_t ss_mode; 47 boolean_t ss_suspended; 48 49 /* 50 * Command in progress 51 */ 52 uint8_t *ss_kvaddr; 53 ddi_dma_cookie_t *ss_dmacs; 54 uint_t ss_ndmac; 55 int ss_blksz; 56 uint16_t ss_resid; /* in blocks */ 57 58 /* scratch buffer, to receive extra PIO data */ 59 uint32_t ss_bounce[2048 / 4]; 60 }; 61 62 /* 63 * Per controller state. 64 */ 65 struct sdhost { 66 int sh_numslots; 67 ddi_dma_attr_t sh_dmaattr; 68 sdslot_t sh_slots[SDHOST_MAXSLOTS]; 69 sda_host_t *sh_host; 70 71 /* 72 * Interrupt related information. 73 */ 74 ddi_intr_handle_t sh_ihandle; 75 int sh_icap; 76 uint_t sh_ipri; 77 }; 78 79 80 static int sdhost_attach(dev_info_t *, ddi_attach_cmd_t); 81 static int sdhost_detach(dev_info_t *, ddi_detach_cmd_t); 82 static int sdhost_quiesce(dev_info_t *); 83 static int sdhost_suspend(dev_info_t *); 84 static int sdhost_resume(dev_info_t *); 85 86 static void sdhost_enable_interrupts(sdslot_t *); 87 static void sdhost_disable_interrupts(sdslot_t *); 88 static int sdhost_setup_intr(dev_info_t *, sdhost_t *); 89 static uint_t sdhost_intr(caddr_t, caddr_t); 90 static int sdhost_init_slot(dev_info_t *, sdhost_t *, int, int); 91 static void sdhost_uninit_slot(sdhost_t *, int); 92 static sda_err_t sdhost_soft_reset(sdslot_t *, uint8_t); 93 static sda_err_t sdhost_set_clock(sdslot_t *, uint32_t); 94 static void sdhost_xfer_done(sdslot_t *, sda_err_t); 95 static sda_err_t sdhost_wait_cmd(sdslot_t *, sda_cmd_t *); 96 static uint_t sdhost_slot_intr(sdslot_t *); 97 98 static sda_err_t sdhost_cmd(void *, sda_cmd_t *); 99 static sda_err_t sdhost_getprop(void *, sda_prop_t, uint32_t *); 100 static sda_err_t sdhost_setprop(void *, sda_prop_t, uint32_t); 101 static sda_err_t sdhost_poll(void *); 102 static sda_err_t sdhost_reset(void *); 103 static sda_err_t sdhost_halt(void *); 104 105 static struct dev_ops sdhost_dev_ops = { 106 DEVO_REV, /* devo_rev */ 107 0, /* devo_refcnt */ 108 ddi_no_info, /* devo_getinfo */ 109 nulldev, /* devo_identify */ 110 nulldev, /* devo_probe */ 111 sdhost_attach, /* devo_attach */ 112 sdhost_detach, /* devo_detach */ 113 nodev, /* devo_reset */ 114 NULL, /* devo_cb_ops */ 115 NULL, /* devo_bus_ops */ 116 NULL, /* devo_power */ 117 sdhost_quiesce, /* devo_quiesce */ 118 }; 119 120 static struct modldrv sdhost_modldrv = { 121 &mod_driverops, /* drv_modops */ 122 "Standard SD Host Controller", /* drv_linkinfo */ 123 &sdhost_dev_ops /* drv_dev_ops */ 124 }; 125 126 static struct modlinkage modlinkage = { 127 MODREV_1, /* ml_rev */ 128 { &sdhost_modldrv, NULL } /* ml_linkage */ 129 }; 130 131 static struct sda_ops sdhost_ops = { 132 SDA_OPS_VERSION, 133 sdhost_cmd, /* so_cmd */ 134 sdhost_getprop, /* so_getprop */ 135 sdhost_setprop, /* so_setprop */ 136 sdhost_poll, /* so_poll */ 137 sdhost_reset, /* so_reset */ 138 sdhost_halt, /* so_halt */ 139 }; 140 141 static ddi_device_acc_attr_t sdhost_regattr = { 142 DDI_DEVICE_ATTR_V0, /* devacc_attr_version */ 143 DDI_STRUCTURE_LE_ACC, /* devacc_attr_endian_flags */ 144 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */ 145 DDI_DEFAULT_ACC, /* devacc_attr_access */ 146 }; 147 148 #define GET16(ss, reg) \ 149 ddi_get16(ss->ss_acch, (void *)(ss->ss_regva + reg)) 150 #define PUT16(ss, reg, val) \ 151 ddi_put16(ss->ss_acch, (void *)(ss->ss_regva + reg), val) 152 #define GET32(ss, reg) \ 153 ddi_get32(ss->ss_acch, (void *)(ss->ss_regva + reg)) 154 #define PUT32(ss, reg, val) \ 155 ddi_put32(ss->ss_acch, (void *)(ss->ss_regva + reg), val) 156 #define GET64(ss, reg) \ 157 ddi_get64(ss->ss_acch, (void *)(ss->ss_regva + reg)) 158 159 #define GET8(ss, reg) \ 160 ddi_get8(ss->ss_acch, (void *)(ss->ss_regva + reg)) 161 #define PUT8(ss, reg, val) \ 162 ddi_put8(ss->ss_acch, (void *)(ss->ss_regva + reg), val) 163 164 #define CLR8(ss, reg, mask) PUT8(ss, reg, GET8(ss, reg) & ~(mask)) 165 #define SET8(ss, reg, mask) PUT8(ss, reg, GET8(ss, reg) | (mask)) 166 167 /* 168 * If ever anyone uses PIO on SPARC, we have to endian-swap. But we 169 * think that SD Host Controllers are likely to be uncommon on SPARC, 170 * and hopefully when they exist at all they will be able to use DMA. 171 */ 172 #ifdef _BIG_ENDIAN 173 #define sw32(x) ddi_swap32(x) 174 #define sw16(x) ddi_swap16(x) 175 #else 176 #define sw32(x) (x) 177 #define sw16(x) (x) 178 #endif 179 180 #define GETDATA32(ss) sw32(GET32(ss, REG_DATA)) 181 #define GETDATA16(ss) sw16(GET16(ss, REG_DATA)) 182 #define GETDATA8(ss) GET8(ss, REG_DATA) 183 184 #define PUTDATA32(ss, val) PUT32(ss, REG_DATA, sw32(val)) 185 #define PUTDATA16(ss, val) PUT16(ss, REG_DATA, sw16(val)) 186 #define PUTDATA8(ss, val) PUT8(ss, REG_DATA, val) 187 188 #define CHECK_STATE(ss, nm) \ 189 ((GET32(ss, REG_PRS) & PRS_ ## nm) != 0) 190 191 int 192 _init(void) 193 { 194 int rv; 195 196 sda_host_init_ops(&sdhost_dev_ops); 197 198 if ((rv = mod_install(&modlinkage)) != 0) { 199 sda_host_fini_ops(&sdhost_dev_ops); 200 } 201 202 return (rv); 203 } 204 205 int 206 _fini(void) 207 { 208 int rv; 209 210 if ((rv = mod_remove(&modlinkage)) == 0) { 211 sda_host_fini_ops(&sdhost_dev_ops); 212 } 213 return (rv); 214 } 215 216 int 217 _info(struct modinfo *modinfop) 218 { 219 return (mod_info(&modlinkage, modinfop)); 220 } 221 222 int 223 sdhost_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 224 { 225 sdhost_t *shp; 226 ddi_acc_handle_t pcih; 227 uint8_t slotinfo; 228 uint8_t bar; 229 int i; 230 int rv; 231 232 switch (cmd) { 233 case DDI_ATTACH: 234 break; 235 236 case DDI_RESUME: 237 return (sdhost_resume(dip)); 238 239 default: 240 return (DDI_FAILURE); 241 } 242 243 /* 244 * Soft state allocation. 245 */ 246 shp = kmem_zalloc(sizeof (*shp), KM_SLEEP); 247 ddi_set_driver_private(dip, shp); 248 249 /* 250 * Initialize DMA attributes. For now we initialize as for 251 * SDMA. If we add ADMA support we can improve this. 252 */ 253 shp->sh_dmaattr.dma_attr_version = DMA_ATTR_V0; 254 shp->sh_dmaattr.dma_attr_addr_lo = 0; 255 shp->sh_dmaattr.dma_attr_addr_hi = 0xffffffffU; 256 shp->sh_dmaattr.dma_attr_count_max = 0xffffffffU; 257 shp->sh_dmaattr.dma_attr_align = 1; 258 shp->sh_dmaattr.dma_attr_burstsizes = 0; /* for now! */ 259 shp->sh_dmaattr.dma_attr_minxfer = 1; 260 shp->sh_dmaattr.dma_attr_maxxfer = 0xffffffffU; 261 shp->sh_dmaattr.dma_attr_sgllen = -1; /* unlimited! */ 262 shp->sh_dmaattr.dma_attr_seg = 0xfff; /* 4K segments */ 263 shp->sh_dmaattr.dma_attr_granular = 1; 264 shp->sh_dmaattr.dma_attr_flags = 0; 265 266 /* 267 * PCI configuration access to figure out number of slots present. 268 */ 269 if (pci_config_setup(dip, &pcih) != DDI_SUCCESS) { 270 cmn_err(CE_WARN, "pci_config_setup failed"); 271 goto failed; 272 } 273 274 slotinfo = pci_config_get8(pcih, SLOTINFO); 275 shp->sh_numslots = SLOTINFO_NSLOT(slotinfo); 276 277 if (shp->sh_numslots > SDHOST_MAXSLOTS) { 278 cmn_err(CE_WARN, "Host reports to have too many slots: %d", 279 shp->sh_numslots); 280 goto failed; 281 } 282 283 /* 284 * Enable master accesses and DMA. 285 */ 286 pci_config_put16(pcih, PCI_CONF_COMM, 287 pci_config_get16(pcih, PCI_CONF_COMM) | 288 PCI_COMM_MAE | PCI_COMM_ME); 289 290 /* 291 * Figure out which BAR to use. Note that we number BARs from 292 * 1, although PCI and SD Host numbers from 0. (We number 293 * from 1, because register number 0 means PCI configuration 294 * space in Solaris.) 295 */ 296 bar = SLOTINFO_BAR(slotinfo) + 1; 297 298 pci_config_teardown(&pcih); 299 300 /* 301 * Setup interrupts ... supports the new DDI interrupt API. This 302 * will support MSI or MSI-X interrupts if a device is found to 303 * support it. 304 */ 305 if (sdhost_setup_intr(dip, shp) != DDI_SUCCESS) { 306 cmn_err(CE_WARN, "Failed to setup interrupts"); 307 goto failed; 308 } 309 310 shp->sh_host = sda_host_alloc(dip, shp->sh_numslots, &sdhost_ops, 311 &shp->sh_dmaattr); 312 if (shp->sh_host == NULL) { 313 cmn_err(CE_WARN, "Failed allocating SD host structure"); 314 goto failed; 315 } 316 317 /* 318 * Configure slots, this also maps registers, enables 319 * interrupts, etc. Most of the hardware setup is done here. 320 */ 321 for (i = 0; i < shp->sh_numslots; i++) { 322 if (sdhost_init_slot(dip, shp, i, bar + i) != DDI_SUCCESS) { 323 cmn_err(CE_WARN, "Failed initializing slot %d", i); 324 goto failed; 325 } 326 } 327 328 ddi_report_dev(dip); 329 330 /* 331 * Enable device interrupts at the DDI layer. 332 */ 333 if (shp->sh_icap & DDI_INTR_FLAG_BLOCK) { 334 rv = ddi_intr_block_enable(&shp->sh_ihandle, 1); 335 } else { 336 rv = ddi_intr_enable(shp->sh_ihandle); 337 } 338 if (rv != DDI_SUCCESS) { 339 cmn_err(CE_WARN, "Failed enabling interrupts"); 340 goto failed; 341 } 342 343 /* 344 * Mark the slots online with the framework. This will cause 345 * the framework to probe them for the presence of cards. 346 */ 347 if (sda_host_attach(shp->sh_host) != DDI_SUCCESS) { 348 cmn_err(CE_WARN, "Failed attaching to SDA framework"); 349 if (shp->sh_icap & DDI_INTR_FLAG_BLOCK) { 350 (void) ddi_intr_block_disable(&shp->sh_ihandle, 1); 351 } else { 352 (void) ddi_intr_disable(shp->sh_ihandle); 353 } 354 goto failed; 355 } 356 357 return (DDI_SUCCESS); 358 359 failed: 360 if (shp->sh_ihandle != NULL) { 361 (void) ddi_intr_remove_handler(shp->sh_ihandle); 362 (void) ddi_intr_free(shp->sh_ihandle); 363 } 364 for (i = 0; i < shp->sh_numslots; i++) 365 sdhost_uninit_slot(shp, i); 366 kmem_free(shp, sizeof (*shp)); 367 368 return (DDI_FAILURE); 369 } 370 371 int 372 sdhost_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 373 { 374 sdhost_t *shp; 375 int i; 376 377 switch (cmd) { 378 case DDI_DETACH: 379 break; 380 381 case DDI_SUSPEND: 382 return (sdhost_suspend(dip)); 383 384 default: 385 return (DDI_FAILURE); 386 } 387 388 shp = ddi_get_driver_private(dip); 389 390 /* 391 * Take host offline with the framework. 392 */ 393 sda_host_detach(shp->sh_host); 394 395 /* 396 * Tear down interrupts. 397 */ 398 if (shp->sh_ihandle != NULL) { 399 if (shp->sh_icap & DDI_INTR_FLAG_BLOCK) { 400 (void) ddi_intr_block_disable(&shp->sh_ihandle, 1); 401 } else { 402 (void) ddi_intr_disable(shp->sh_ihandle); 403 } 404 (void) ddi_intr_remove_handler(shp->sh_ihandle); 405 (void) ddi_intr_free(shp->sh_ihandle); 406 } 407 408 /* 409 * Tear down register mappings, etc. 410 */ 411 for (i = 0; i < shp->sh_numslots; i++) 412 sdhost_uninit_slot(shp, i); 413 kmem_free(shp, sizeof (*shp)); 414 415 return (DDI_SUCCESS); 416 } 417 418 int 419 sdhost_quiesce(dev_info_t *dip) 420 { 421 sdhost_t *shp; 422 sdslot_t *ss; 423 424 shp = ddi_get_driver_private(dip); 425 426 /* reset each slot separately */ 427 for (int i = 0; i < shp->sh_numslots; i++) { 428 ss = &shp->sh_slots[i]; 429 if (ss->ss_acch == NULL) 430 continue; 431 432 (void) sdhost_soft_reset(ss, SOFT_RESET_ALL); 433 } 434 return (DDI_SUCCESS); 435 } 436 437 int 438 sdhost_suspend(dev_info_t *dip) 439 { 440 sdhost_t *shp; 441 sdslot_t *ss; 442 int i; 443 444 shp = ddi_get_driver_private(dip); 445 446 sda_host_suspend(shp->sh_host); 447 448 for (i = 0; i < shp->sh_numslots; i++) { 449 ss = &shp->sh_slots[i]; 450 mutex_enter(&ss->ss_lock); 451 ss->ss_suspended = B_TRUE; 452 sdhost_disable_interrupts(ss); 453 (void) sdhost_soft_reset(ss, SOFT_RESET_ALL); 454 mutex_exit(&ss->ss_lock); 455 } 456 return (DDI_SUCCESS); 457 } 458 459 int 460 sdhost_resume(dev_info_t *dip) 461 { 462 sdhost_t *shp; 463 sdslot_t *ss; 464 int i; 465 466 shp = ddi_get_driver_private(dip); 467 468 for (i = 0; i < shp->sh_numslots; i++) { 469 ss = &shp->sh_slots[i]; 470 mutex_enter(&ss->ss_lock); 471 ss->ss_suspended = B_FALSE; 472 (void) sdhost_soft_reset(ss, SOFT_RESET_ALL); 473 sdhost_enable_interrupts(ss); 474 mutex_exit(&ss->ss_lock); 475 } 476 477 sda_host_resume(shp->sh_host); 478 479 return (DDI_SUCCESS); 480 } 481 482 sda_err_t 483 sdhost_set_clock(sdslot_t *ss, uint32_t hz) 484 { 485 uint16_t div; 486 uint32_t val; 487 uint32_t clk; 488 int count; 489 490 /* 491 * Shut off the clock to begin. 492 */ 493 ss->ss_cardclk = 0; 494 PUT16(ss, REG_CLOCK_CONTROL, 0); 495 if (hz == 0) { 496 return (SDA_EOK); 497 } 498 499 if (ss->ss_baseclk == 0) { 500 sda_host_log(ss->ss_host, ss->ss_num, 501 "Base clock frequency not established."); 502 return (SDA_EINVAL); 503 } 504 505 if ((hz > 25000000) && ((ss->ss_capab & CAPAB_HIGH_SPEED) != 0)) { 506 /* this clock requires high speed timings! */ 507 SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN); 508 } else { 509 /* don't allow clock to run faster than 25MHz */ 510 hz = min(hz, 25000000); 511 CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN); 512 } 513 514 /* figure out the divider */ 515 clk = ss->ss_baseclk; 516 div = 1; 517 while (clk > hz) { 518 if (div > 0x80) 519 break; 520 clk >>= 1; /* divide clock by two */ 521 div <<= 1; /* divider goes up by one */ 522 } 523 div >>= 1; /* 0 == divide by 1, 1 = divide by 2 */ 524 525 /* 526 * Set the internal clock divider first, without enabling the 527 * card clock yet. 528 */ 529 PUT16(ss, REG_CLOCK_CONTROL, 530 (div << CLOCK_CONTROL_FREQ_SHIFT) | CLOCK_CONTROL_INT_CLOCK_EN); 531 532 /* 533 * Wait up to 100 msec for the internal clock to stabilize. 534 * (The spec does not seem to indicate a maximum timeout, but 535 * it also suggests that an infinite loop be used, which is 536 * not appropriate for hardened Solaris drivers.) 537 */ 538 for (count = 100000; count; count -= 10) { 539 540 val = GET16(ss, REG_CLOCK_CONTROL); 541 542 if (val & CLOCK_CONTROL_INT_CLOCK_STABLE) { 543 /* if clock is stable, enable the SD clock pin */ 544 PUT16(ss, REG_CLOCK_CONTROL, val | 545 CLOCK_CONTROL_SD_CLOCK_EN); 546 547 ss->ss_cardclk = clk; 548 return (SDA_EOK); 549 } 550 551 drv_usecwait(10); 552 } 553 554 return (SDA_ETIME); 555 } 556 557 sda_err_t 558 sdhost_soft_reset(sdslot_t *ss, uint8_t bits) 559 { 560 int count; 561 562 /* 563 * There appears to be a bug where Ricoh hosts might have a 564 * problem if the host frequency is not set. If the card 565 * isn't present, or we are doing a master reset, just enable 566 * the internal clock at its native speed. (No dividers, and 567 * not exposed to card.). 568 */ 569 if ((bits == SOFT_RESET_ALL) || !(CHECK_STATE(ss, CARD_INSERTED))) { 570 PUT16(ss, REG_CLOCK_CONTROL, CLOCK_CONTROL_INT_CLOCK_EN); 571 /* simple 1msec wait, don't wait for clock to stabilize */ 572 drv_usecwait(1000); 573 } 574 575 PUT8(ss, REG_SOFT_RESET, bits); 576 for (count = 100000; count != 0; count -= 10) { 577 if ((GET8(ss, REG_SOFT_RESET) & bits) == 0) { 578 return (SDA_EOK); 579 } 580 drv_usecwait(10); 581 } 582 583 return (SDA_ETIME); 584 } 585 586 void 587 sdhost_disable_interrupts(sdslot_t *ss) 588 { 589 /* disable slot interrupts for card insert and remove */ 590 PUT16(ss, REG_INT_MASK, 0); 591 PUT16(ss, REG_INT_EN, 0); 592 593 /* disable error interrupts */ 594 PUT16(ss, REG_ERR_MASK, 0); 595 PUT16(ss, REG_ERR_EN, 0); 596 } 597 598 void 599 sdhost_enable_interrupts(sdslot_t *ss) 600 { 601 /* 602 * Note that we want to enable reading of the CMD related 603 * bits, but we do not want them to generate an interrupt. 604 * (The busy wait for typical CMD stuff will normally be less 605 * than 10usec, so its simpler/easier to just poll. Even in 606 * the worst case of 100 kHz, the poll is at worst 2 msec.) 607 */ 608 609 /* enable slot interrupts for card insert and remove */ 610 PUT16(ss, REG_INT_MASK, INT_MASK); 611 PUT16(ss, REG_INT_EN, INT_ENAB); 612 613 /* enable error interrupts */ 614 PUT16(ss, REG_ERR_MASK, ERR_MASK); 615 PUT16(ss, REG_ERR_EN, ERR_ENAB); 616 } 617 618 int 619 sdhost_setup_intr(dev_info_t *dip, sdhost_t *shp) 620 { 621 int itypes; 622 int itype; 623 624 /* 625 * Set up interrupt handler. 626 */ 627 if (ddi_intr_get_supported_types(dip, &itypes) != DDI_SUCCESS) { 628 cmn_err(CE_WARN, "ddi_intr_get_supported_types failed"); 629 return (DDI_FAILURE); 630 } 631 632 /* 633 * Interrupt types are bits in a mask. We know about these ones: 634 * FIXED = 1 635 * MSI = 2 636 * MSIX = 4 637 */ 638 for (itype = DDI_INTR_TYPE_MSIX; itype != 0; itype >>= 1) { 639 640 int count; 641 642 if ((itypes & itype) == 0) { 643 /* this type is not supported on this device! */ 644 continue; 645 } 646 647 if ((ddi_intr_get_nintrs(dip, itype, &count) != DDI_SUCCESS) || 648 (count == 0)) { 649 cmn_err(CE_WARN, "ddi_intr_get_nintrs failed"); 650 continue; 651 } 652 653 /* 654 * We have not seen a host device with multiple 655 * interrupts (one per slot?), and the spec does not 656 * indicate that they exist. But if one ever occurs, 657 * we spew a warning to help future debugging/support 658 * efforts. 659 */ 660 if (count > 1) { 661 cmn_err(CE_WARN, "Controller offers %d interrupts, " 662 "but driver only supports one", count); 663 continue; 664 } 665 666 if ((ddi_intr_alloc(dip, &shp->sh_ihandle, itype, 0, 1, 667 &count, DDI_INTR_ALLOC_NORMAL) != DDI_SUCCESS) || 668 (count != 1)) { 669 cmn_err(CE_WARN, "ddi_intr_alloc failed"); 670 continue; 671 } 672 673 if (ddi_intr_get_pri(shp->sh_ihandle, &shp->sh_ipri) != 674 DDI_SUCCESS) { 675 cmn_err(CE_WARN, "ddi_intr_get_pri failed"); 676 (void) ddi_intr_free(shp->sh_ihandle); 677 shp->sh_ihandle = NULL; 678 continue; 679 } 680 681 if (shp->sh_ipri >= ddi_intr_get_hilevel_pri()) { 682 cmn_err(CE_WARN, "Hi level interrupt not supported"); 683 (void) ddi_intr_free(shp->sh_ihandle); 684 shp->sh_ihandle = NULL; 685 continue; 686 } 687 688 if (ddi_intr_get_cap(shp->sh_ihandle, &shp->sh_icap) != 689 DDI_SUCCESS) { 690 cmn_err(CE_WARN, "ddi_intr_get_cap failed"); 691 (void) ddi_intr_free(shp->sh_ihandle); 692 shp->sh_ihandle = NULL; 693 continue; 694 } 695 696 if (ddi_intr_add_handler(shp->sh_ihandle, sdhost_intr, 697 shp, NULL) != DDI_SUCCESS) { 698 cmn_err(CE_WARN, "ddi_intr_add_handler failed"); 699 (void) ddi_intr_free(shp->sh_ihandle); 700 shp->sh_ihandle = NULL; 701 continue; 702 } 703 704 return (DDI_SUCCESS); 705 } 706 707 return (DDI_FAILURE); 708 } 709 710 void 711 sdhost_xfer_done(sdslot_t *ss, sda_err_t errno) 712 { 713 if ((errno == SDA_EOK) && (ss->ss_resid != 0)) { 714 /* an unexpected partial transfer was found */ 715 errno = SDA_ERESID; 716 } 717 ss->ss_blksz = 0; 718 ss->ss_resid = 0; 719 720 if (errno != SDA_EOK) { 721 (void) sdhost_soft_reset(ss, SOFT_RESET_CMD); 722 (void) sdhost_soft_reset(ss, SOFT_RESET_DAT); 723 724 /* send a STOP command if necessary */ 725 if (ss->ss_mode & XFR_MODE_AUTO_CMD12) { 726 PUT32(ss, REG_ARGUMENT, 0); 727 PUT16(ss, REG_COMMAND, 728 (CMD_STOP_TRANSMIT << 8) | 729 COMMAND_TYPE_NORM | COMMAND_INDEX_CHECK_EN | 730 COMMAND_CRC_CHECK_EN | COMMAND_RESP_48_BUSY); 731 } 732 } 733 734 sda_host_transfer(ss->ss_host, ss->ss_num, errno); 735 } 736 737 uint_t 738 sdhost_slot_intr(sdslot_t *ss) 739 { 740 uint16_t intr; 741 uint16_t errs; 742 uint8_t *data; 743 int count; 744 745 mutex_enter(&ss->ss_lock); 746 747 if (ss->ss_suspended) { 748 mutex_exit(&ss->ss_lock); 749 return (DDI_INTR_UNCLAIMED); 750 } 751 752 intr = GET16(ss, REG_INT_STAT); 753 if (intr == 0) { 754 mutex_exit(&ss->ss_lock); 755 return (DDI_INTR_UNCLAIMED); 756 } 757 errs = GET16(ss, REG_ERR_STAT); 758 759 if (intr & (INT_REM | INT_INS)) { 760 761 PUT16(ss, REG_INT_STAT, intr); 762 mutex_exit(&ss->ss_lock); 763 764 sda_host_detect(ss->ss_host, ss->ss_num); 765 /* no further interrupt processing this cycle */ 766 return (DDI_INTR_CLAIMED); 767 } 768 769 if (intr & INT_DMA) { 770 /* 771 * We have crossed a DMA/page boundary. Cope with it. 772 */ 773 if (ss->ss_ndmac) { 774 ss->ss_ndmac--; 775 ss->ss_dmacs++; 776 PUT16(ss, REG_INT_STAT, INT_DMA); 777 PUT32(ss, REG_SDMA_ADDR, ss->ss_dmacs->dmac_address); 778 779 } else { 780 /* 781 * Apparently some sdhost controllers issue a 782 * final DMA interrupt if the DMA completes on 783 * a boundary, even though there is no further 784 * data to transfer. 785 * 786 * There might be a risk here of the 787 * controller continuing to access the same 788 * data over and over again, but we accept the 789 * risk. 790 */ 791 PUT16(ss, REG_INT_STAT, INT_DMA); 792 } 793 } 794 795 if (intr & INT_RD) { 796 /* 797 * PIO read! PIO is quite suboptimal, but we expect 798 * performance critical applications to use DMA 799 * whenever possible. We have to stage this through 800 * the bounce buffer to meet alignment considerations. 801 */ 802 803 PUT16(ss, REG_INT_STAT, INT_RD); 804 805 while ((ss->ss_resid > 0) && CHECK_STATE(ss, BUF_RD_EN)) { 806 807 data = (void *)ss->ss_bounce; 808 count = ss->ss_blksz; 809 810 ASSERT(count > 0); 811 ASSERT(ss->ss_kvaddr != NULL); 812 813 while (count >= sizeof (uint32_t)) { 814 *(uint32_t *)(void *)data = GETDATA32(ss); 815 data += sizeof (uint32_t); 816 count -= sizeof (uint32_t); 817 } 818 while (count >= sizeof (uint16_t)) { 819 *(uint16_t *)(void *)data = GETDATA16(ss); 820 data += sizeof (uint16_t); 821 count -= sizeof (uint16_t); 822 } 823 while (count >= sizeof (uint8_t)) { 824 *(uint8_t *)data = GETDATA8(ss); 825 data += sizeof (uint8_t); 826 count -= sizeof (uint8_t); 827 } 828 829 bcopy(ss->ss_bounce, ss->ss_kvaddr, ss->ss_blksz); 830 ss->ss_kvaddr += ss->ss_blksz; 831 ss->ss_resid--; 832 } 833 } 834 835 if (intr & INT_WR) { 836 /* 837 * PIO write! PIO is quite suboptimal, but we expect 838 * performance critical applications to use DMA 839 * whenever possible. We have to stage this trhough 840 * the bounce buffer to meet alignment considerations. 841 */ 842 843 PUT16(ss, REG_INT_STAT, INT_WR); 844 845 while ((ss->ss_resid > 0) && CHECK_STATE(ss, BUF_WR_EN)) { 846 847 data = (void *)ss->ss_bounce; 848 count = ss->ss_blksz; 849 850 ASSERT(count > 0); 851 ASSERT(ss->ss_kvaddr != NULL); 852 853 bcopy(ss->ss_kvaddr, data, count); 854 while (count >= sizeof (uint32_t)) { 855 PUTDATA32(ss, *(uint32_t *)(void *)data); 856 data += sizeof (uint32_t); 857 count -= sizeof (uint32_t); 858 } 859 while (count >= sizeof (uint16_t)) { 860 PUTDATA16(ss, *(uint16_t *)(void *)data); 861 data += sizeof (uint16_t); 862 count -= sizeof (uint16_t); 863 } 864 while (count >= sizeof (uint8_t)) { 865 PUTDATA8(ss, *(uint8_t *)data); 866 data += sizeof (uint8_t); 867 count -= sizeof (uint8_t); 868 } 869 870 ss->ss_kvaddr += ss->ss_blksz; 871 ss->ss_resid--; 872 } 873 } 874 875 if (intr & INT_XFR) { 876 PUT16(ss, REG_INT_STAT, INT_XFR); 877 878 sdhost_xfer_done(ss, SDA_EOK); 879 } 880 881 if (intr & INT_ERR) { 882 PUT16(ss, REG_ERR_STAT, errs); 883 PUT16(ss, REG_INT_STAT, INT_ERR); 884 885 if (errs & ERR_DAT) { 886 if ((errs & ERR_DAT_END) == ERR_DAT_END) { 887 sdhost_xfer_done(ss, SDA_EPROTO); 888 } else if ((errs & ERR_DAT_CRC) == ERR_DAT_CRC) { 889 sdhost_xfer_done(ss, SDA_ECRC7); 890 } else { 891 sdhost_xfer_done(ss, SDA_ETIME); 892 } 893 894 } else if (errs & ERR_ACMD12) { 895 /* 896 * Generally, this is bad news. we need a full 897 * reset to recover properly. 898 */ 899 sdhost_xfer_done(ss, SDA_ECMD12); 900 } 901 902 /* 903 * This asynchronous error leaves the slot more or less 904 * useless. Report it to the framework. 905 */ 906 if (errs & ERR_CURRENT) { 907 sda_host_fault(ss->ss_host, ss->ss_num, 908 SDA_FAULT_CURRENT); 909 } 910 } 911 912 mutex_exit(&ss->ss_lock); 913 914 return (DDI_INTR_CLAIMED); 915 } 916 917 /*ARGSUSED1*/ 918 uint_t 919 sdhost_intr(caddr_t arg1, caddr_t arg2) 920 { 921 sdhost_t *shp = (void *)arg1; 922 int rv = DDI_INTR_UNCLAIMED; 923 int num; 924 925 /* interrupt for each of the slots present in the system */ 926 for (num = 0; num < shp->sh_numslots; num++) { 927 if (sdhost_slot_intr(&shp->sh_slots[num]) == 928 DDI_INTR_CLAIMED) { 929 rv = DDI_INTR_CLAIMED; 930 } 931 } 932 return (rv); 933 } 934 935 int 936 sdhost_init_slot(dev_info_t *dip, sdhost_t *shp, int num, int bar) 937 { 938 sdslot_t *ss; 939 uint32_t capab; 940 uint32_t clk; 941 942 /* 943 * Register the private state. 944 */ 945 ss = &shp->sh_slots[num]; 946 ss->ss_host = shp->sh_host; 947 ss->ss_num = num; 948 sda_host_set_private(shp->sh_host, num, ss); 949 950 /* 951 * Initialize core data structure, locks, etc. 952 */ 953 mutex_init(&ss->ss_lock, NULL, MUTEX_DRIVER, 954 DDI_INTR_PRI(shp->sh_ipri)); 955 956 if (ddi_regs_map_setup(dip, bar, &ss->ss_regva, 0, 0, &sdhost_regattr, 957 &ss->ss_acch) != DDI_SUCCESS) { 958 cmn_err(CE_WARN, "Failed to map registers!"); 959 return (DDI_FAILURE); 960 } 961 962 /* reset before reading capabilities */ 963 if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK) 964 return (DDI_FAILURE); 965 966 capab = GET64(ss, REG_CAPAB) & 0xffffffffU; /* upper bits reserved */ 967 ss->ss_capab = capab; 968 969 /* host voltages in OCR format */ 970 ss->ss_ocr = 0; 971 if (capab & CAPAB_18V) 972 ss->ss_ocr |= OCR_18_19V; /* 1.8V */ 973 if (capab & CAPAB_30V) 974 ss->ss_ocr |= OCR_30_31V; 975 if (capab & CAPAB_33V) 976 ss->ss_ocr |= OCR_32_33V; 977 978 /* base clock */ 979 ss->ss_baseclk = 980 ((capab & CAPAB_BASE_FREQ_MASK) >> CAPAB_BASE_FREQ_SHIFT); 981 ss->ss_baseclk *= 1000000; 982 983 /* 984 * Timeout clock. We can calculate this using the following 985 * formula: 986 * 987 * (1000000 usec/1sec) * (1sec/tmoutclk) * base factor = clock time 988 * 989 * Clock time is the length of the base clock in usecs. 990 * 991 * Our base factor is 2^13, which is the shortest clock we 992 * can count. 993 * 994 * To simplify the math and avoid overflow, we cancel out the 995 * zeros for kHz or MHz. Since we want to wait more clocks, not 996 * less, on error, we truncate the result rather than rounding 997 * up. 998 */ 999 clk = ((capab & CAPAB_TIMEOUT_FREQ_MASK) >> CAPAB_TIMEOUT_FREQ_SHIFT); 1000 if ((ss->ss_baseclk == 0) || (clk == 0)) { 1001 cmn_err(CE_WARN, "Unable to determine clock frequencies"); 1002 return (DDI_FAILURE); 1003 } 1004 1005 if (capab & CAPAB_TIMEOUT_UNITS) { 1006 /* MHz */ 1007 ss->ss_tmusecs = (1 << 13) / clk; 1008 clk *= 1000000; 1009 } else { 1010 /* kHz */ 1011 ss->ss_tmusecs = (1000 * (1 << 13)) / clk; 1012 clk *= 1000; 1013 } 1014 1015 /* 1016 * Calculation of the timeout. 1017 * 1018 * SDIO cards use a 1sec timeout, and SDHC cards use fixed 1019 * 100msec for read and 250 msec for write. 1020 * 1021 * Legacy cards running at 375kHz have a worst case of about 1022 * 15 seconds. Running at 25MHz (the standard speed) it is 1023 * about 100msec for read, and about 3.2 sec for write. 1024 * Typical values are 1/100th that, or about 1msec for read, 1025 * and 32 msec for write. 1026 * 1027 * No transaction at full speed should ever take more than 4 1028 * seconds. (Some slow legacy cards might have trouble, but 1029 * we'll worry about them if they ever are seen. Nobody wants 1030 * to wait 4 seconds to access a single block anyway!) 1031 * 1032 * To get to 4 seconds, we continuously double usec until we 1033 * get to the maximum value, or a timeout greater than 4 1034 * seconds. 1035 * 1036 * Note that for high-speed timeout clocks, we might not be 1037 * able to get to the full 4 seconds. E.g. with a 48MHz 1038 * timeout clock, we can only get to about 2.8 seconds. Its 1039 * possible that there could be some slow MMC cards that will 1040 * timeout at this clock rate, but it seems unlikely. (The 1041 * device would have to be pressing the very worst times, 1042 * against the 100-fold "permissive" window allowed, and 1043 * running at only 12.5MHz.) 1044 * 1045 * XXX: this could easily be a tunable. Someone dealing with only 1046 * reasonable cards could set this to just 1 second. 1047 */ 1048 for (ss->ss_tmoutclk = 0; ss->ss_tmoutclk < 14; ss->ss_tmoutclk++) { 1049 if ((ss->ss_tmusecs * (1 << ss->ss_tmoutclk)) >= 4000000) { 1050 break; 1051 } 1052 } 1053 1054 /* 1055 * Enable slot interrupts. 1056 */ 1057 sdhost_enable_interrupts(ss); 1058 1059 return (DDI_SUCCESS); 1060 } 1061 1062 void 1063 sdhost_uninit_slot(sdhost_t *shp, int num) 1064 { 1065 sdslot_t *ss; 1066 1067 ss = &shp->sh_slots[num]; 1068 if (ss->ss_acch == NULL) 1069 return; 1070 1071 (void) sdhost_soft_reset(ss, SOFT_RESET_ALL); 1072 1073 ddi_regs_map_free(&ss->ss_acch); 1074 mutex_destroy(&ss->ss_lock); 1075 } 1076 1077 void 1078 sdhost_get_response(sdslot_t *ss, sda_cmd_t *cmdp) 1079 { 1080 uint32_t *resp = cmdp->sc_response; 1081 int i; 1082 1083 resp[0] = GET32(ss, REG_RESP1); 1084 resp[1] = GET32(ss, REG_RESP2); 1085 resp[2] = GET32(ss, REG_RESP3); 1086 resp[3] = GET32(ss, REG_RESP4); 1087 1088 /* 1089 * Response 2 is goofy because the host drops the low 1090 * order CRC bits. This makes it a bit awkward, so we 1091 * have to shift the bits to make it work out right. 1092 * 1093 * Note that the framework expects the 32 bit 1094 * words to be ordered in LE fashion. (The 1095 * bits within the words are in native order). 1096 */ 1097 if (cmdp->sc_rtype == R2) { 1098 for (i = 3; i > 0; i--) { 1099 resp[i] <<= 8; 1100 resp[i] |= (resp[i - 1] >> 24); 1101 } 1102 resp[0] <<= 8; 1103 } 1104 } 1105 1106 sda_err_t 1107 sdhost_wait_cmd(sdslot_t *ss, sda_cmd_t *cmdp) 1108 { 1109 int i; 1110 uint16_t errs; 1111 sda_err_t rv; 1112 1113 /* 1114 * Worst case for 100kHz timeout is 2msec (200 clocks), we add 1115 * a tiny bit for safety. (Generally timeout will be far, far 1116 * less than that.) 1117 * 1118 * Note that at more typical 12MHz (and normally it will be 1119 * even faster than that!) that the device timeout is only 1120 * 16.67 usec. We could be smarter and reduce the delay time, 1121 * but that would require putting more intelligence into the 1122 * code, and we don't expect CMD timeout to normally occur 1123 * except during initialization. (At which time we need the 1124 * full timeout anyway.) 1125 * 1126 * Checking the ERR_STAT will normally cause the timeout to 1127 * terminate to finish early if the device is healthy, anyway. 1128 */ 1129 1130 for (i = 3000; i > 0; i -= 5) { 1131 if (GET16(ss, REG_INT_STAT) & INT_CMD) { 1132 1133 PUT16(ss, REG_INT_STAT, INT_CMD); 1134 1135 /* command completed */ 1136 sdhost_get_response(ss, cmdp); 1137 return (SDA_EOK); 1138 } 1139 1140 if ((errs = (GET16(ss, REG_ERR_STAT) & ERR_CMD)) != 0) { 1141 PUT16(ss, REG_ERR_STAT, errs); 1142 1143 /* command timeout isn't a host failure */ 1144 if ((errs & ERR_CMD_TMO) == ERR_CMD_TMO) { 1145 rv = SDA_ETIME; 1146 } else if ((errs & ERR_CMD_CRC) == ERR_CMD_CRC) { 1147 rv = SDA_ECRC7; 1148 } else { 1149 rv = SDA_EPROTO; 1150 } 1151 goto error; 1152 } 1153 1154 drv_usecwait(5); 1155 } 1156 1157 rv = SDA_ETIME; 1158 1159 error: 1160 /* 1161 * NB: We need to soft reset the CMD and DAT 1162 * lines after a failure of this sort. 1163 */ 1164 (void) sdhost_soft_reset(ss, SOFT_RESET_CMD); 1165 (void) sdhost_soft_reset(ss, SOFT_RESET_DAT); 1166 1167 return (rv); 1168 } 1169 1170 sda_err_t 1171 sdhost_poll(void *arg) 1172 { 1173 sdslot_t *ss = arg; 1174 1175 (void) sdhost_slot_intr(ss); 1176 return (SDA_EOK); 1177 } 1178 1179 sda_err_t 1180 sdhost_cmd(void *arg, sda_cmd_t *cmdp) 1181 { 1182 sdslot_t *ss = arg; 1183 uint16_t command; 1184 uint16_t mode; 1185 sda_err_t rv; 1186 1187 /* 1188 * Command register: 1189 * bit 13-8 = command index 1190 * bit 7-6 = command type (always zero for us!) 1191 * bit 5 = data present select 1192 * bit 4 = command index check (always on!) 1193 * bit 3 = command CRC check enable 1194 * bit 2 = reserved 1195 * bit 1-0 = response type 1196 */ 1197 1198 command = ((uint16_t)cmdp->sc_index << 8); 1199 command |= COMMAND_TYPE_NORM | 1200 COMMAND_INDEX_CHECK_EN | COMMAND_CRC_CHECK_EN; 1201 1202 switch (cmdp->sc_rtype) { 1203 case R0: 1204 command |= COMMAND_RESP_NONE; 1205 break; 1206 case R1: 1207 case R5: 1208 case R6: 1209 case R7: 1210 command |= COMMAND_RESP_48; 1211 break; 1212 case R1b: 1213 case R5b: 1214 command |= COMMAND_RESP_48_BUSY; 1215 break; 1216 case R2: 1217 command |= COMMAND_RESP_136; 1218 command &= ~(COMMAND_INDEX_CHECK_EN | COMMAND_CRC_CHECK_EN); 1219 break; 1220 case R3: 1221 case R4: 1222 command |= COMMAND_RESP_48; 1223 command &= ~COMMAND_CRC_CHECK_EN; 1224 command &= ~COMMAND_INDEX_CHECK_EN; 1225 break; 1226 default: 1227 return (SDA_EINVAL); 1228 } 1229 1230 mutex_enter(&ss->ss_lock); 1231 if (ss->ss_suspended) { 1232 mutex_exit(&ss->ss_lock); 1233 return (SDA_ESUSPENDED); 1234 } 1235 1236 if (cmdp->sc_nblks != 0) { 1237 uint16_t blksz; 1238 uint16_t nblks; 1239 1240 blksz = cmdp->sc_blksz; 1241 nblks = cmdp->sc_nblks; 1242 1243 /* 1244 * Ensure that we have good data. 1245 */ 1246 if ((blksz < 1) || (blksz > 2048)) { 1247 mutex_exit(&ss->ss_lock); 1248 return (SDA_EINVAL); 1249 } 1250 command |= COMMAND_DATA_PRESENT; 1251 1252 ss->ss_blksz = blksz; 1253 1254 /* 1255 * Only SDMA for now. We can investigate ADMA2 later. 1256 * (Right now we don't have ADMA2 capable hardware.) 1257 */ 1258 if (((ss->ss_capab & CAPAB_SDMA) != 0) && 1259 (cmdp->sc_ndmac != 0)) { 1260 ddi_dma_cookie_t *dmacs = cmdp->sc_dmacs; 1261 1262 ASSERT(dmacs != NULL); 1263 1264 ss->ss_kvaddr = NULL; 1265 ss->ss_resid = 0; 1266 ss->ss_dmacs = dmacs; 1267 ss->ss_ndmac = cmdp->sc_ndmac - 1; 1268 1269 PUT32(ss, REG_SDMA_ADDR, dmacs->dmac_address); 1270 mode = XFR_MODE_DMA_EN; 1271 PUT16(ss, REG_BLKSZ, blksz); 1272 1273 } else { 1274 ss->ss_kvaddr = (void *)cmdp->sc_kvaddr; 1275 ss->ss_resid = nblks; 1276 ss->ss_dmacs = NULL; 1277 ss->ss_ndmac = 0; 1278 mode = 0; 1279 PUT16(ss, REG_BLKSZ, blksz); 1280 } 1281 1282 if (nblks > 1) { 1283 mode |= XFR_MODE_MULTI | XFR_MODE_COUNT; 1284 if (cmdp->sc_flags & SDA_CMDF_AUTO_CMD12) 1285 mode |= XFR_MODE_AUTO_CMD12; 1286 } 1287 if ((cmdp->sc_flags & SDA_CMDF_READ) != 0) { 1288 mode |= XFR_MODE_READ; 1289 } 1290 1291 ss->ss_mode = mode; 1292 1293 PUT8(ss, REG_TIMEOUT_CONTROL, ss->ss_tmoutclk); 1294 PUT16(ss, REG_BLOCK_COUNT, nblks); 1295 PUT16(ss, REG_XFR_MODE, mode); 1296 } 1297 1298 PUT32(ss, REG_ARGUMENT, cmdp->sc_argument); 1299 PUT16(ss, REG_COMMAND, command); 1300 1301 rv = sdhost_wait_cmd(ss, cmdp); 1302 1303 mutex_exit(&ss->ss_lock); 1304 1305 return (rv); 1306 } 1307 1308 sda_err_t 1309 sdhost_getprop(void *arg, sda_prop_t prop, uint32_t *val) 1310 { 1311 sdslot_t *ss = arg; 1312 sda_err_t rv = 0; 1313 1314 mutex_enter(&ss->ss_lock); 1315 1316 if (ss->ss_suspended) { 1317 mutex_exit(&ss->ss_lock); 1318 return (SDA_ESUSPENDED); 1319 } 1320 switch (prop) { 1321 case SDA_PROP_INSERTED: 1322 if (CHECK_STATE(ss, CARD_INSERTED)) { 1323 *val = B_TRUE; 1324 } else { 1325 *val = B_FALSE; 1326 } 1327 break; 1328 1329 case SDA_PROP_WPROTECT: 1330 if (CHECK_STATE(ss, WRITE_ENABLE)) { 1331 *val = B_FALSE; 1332 } else { 1333 *val = B_TRUE; 1334 } 1335 break; 1336 1337 case SDA_PROP_OCR: 1338 *val = ss->ss_ocr; 1339 break; 1340 1341 case SDA_PROP_CLOCK: 1342 *val = ss->ss_cardclk; 1343 break; 1344 1345 case SDA_PROP_CAP_HISPEED: 1346 if ((ss->ss_capab & CAPAB_HIGH_SPEED) != 0) { 1347 *val = B_TRUE; 1348 } else { 1349 *val = B_FALSE; 1350 } 1351 break; 1352 1353 case SDA_PROP_CAP_4BITS: 1354 *val = B_TRUE; 1355 break; 1356 1357 case SDA_PROP_CAP_NOPIO: 1358 if ((ss->ss_capab & CAPAB_SDMA) != 0) { 1359 *val = B_TRUE; 1360 } else { 1361 *val = B_FALSE; 1362 } 1363 break; 1364 1365 case SDA_PROP_CAP_INTR: 1366 case SDA_PROP_CAP_8BITS: 1367 *val = B_FALSE; 1368 break; 1369 1370 default: 1371 rv = SDA_ENOTSUP; 1372 break; 1373 } 1374 mutex_exit(&ss->ss_lock); 1375 1376 return (rv); 1377 } 1378 1379 sda_err_t 1380 sdhost_setprop(void *arg, sda_prop_t prop, uint32_t val) 1381 { 1382 sdslot_t *ss = arg; 1383 sda_err_t rv = SDA_EOK; 1384 1385 mutex_enter(&ss->ss_lock); 1386 1387 if (ss->ss_suspended) { 1388 mutex_exit(&ss->ss_lock); 1389 return (SDA_ESUSPENDED); 1390 } 1391 1392 switch (prop) { 1393 case SDA_PROP_LED: 1394 if (val) { 1395 SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_LED_ON); 1396 } else { 1397 CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_LED_ON); 1398 } 1399 break; 1400 1401 case SDA_PROP_CLOCK: 1402 rv = sdhost_set_clock(arg, val); 1403 break; 1404 1405 case SDA_PROP_BUSWIDTH: 1406 switch (val) { 1407 case 1: 1408 CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_DATA_WIDTH); 1409 break; 1410 case 4: 1411 SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_DATA_WIDTH); 1412 break; 1413 default: 1414 rv = SDA_EINVAL; 1415 } 1416 break; 1417 1418 case SDA_PROP_OCR: 1419 val &= ss->ss_ocr; 1420 1421 if (val & OCR_17_18V) { 1422 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_18V); 1423 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_18V | 1424 POWER_CONTROL_BUS_POWER); 1425 } else if (val & OCR_29_30V) { 1426 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_30V); 1427 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_30V | 1428 POWER_CONTROL_BUS_POWER); 1429 } else if (val & OCR_32_33V) { 1430 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_33V); 1431 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_33V | 1432 POWER_CONTROL_BUS_POWER); 1433 } else if (val == 0) { 1434 /* turn off power */ 1435 PUT8(ss, REG_POWER_CONTROL, 0); 1436 } else { 1437 rv = SDA_EINVAL; 1438 } 1439 break; 1440 1441 case SDA_PROP_HISPEED: 1442 if (val) { 1443 SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN); 1444 } else { 1445 CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN); 1446 } 1447 /* give clocks time to settle */ 1448 drv_usecwait(10); 1449 break; 1450 1451 default: 1452 rv = SDA_ENOTSUP; 1453 break; 1454 } 1455 1456 /* 1457 * Apparently some controllers (ENE) have issues with changing 1458 * certain parameters (bus width seems to be one), requiring 1459 * a reset of the DAT and CMD lines. 1460 */ 1461 if (rv == SDA_EOK) { 1462 (void) sdhost_soft_reset(ss, SOFT_RESET_CMD); 1463 (void) sdhost_soft_reset(ss, SOFT_RESET_DAT); 1464 } 1465 mutex_exit(&ss->ss_lock); 1466 return (rv); 1467 } 1468 1469 sda_err_t 1470 sdhost_reset(void *arg) 1471 { 1472 sdslot_t *ss = arg; 1473 1474 mutex_enter(&ss->ss_lock); 1475 if (!ss->ss_suspended) { 1476 if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK) { 1477 mutex_exit(&ss->ss_lock); 1478 return (SDA_ETIME); 1479 } 1480 sdhost_enable_interrupts(ss); 1481 } 1482 mutex_exit(&ss->ss_lock); 1483 return (SDA_EOK); 1484 } 1485 1486 sda_err_t 1487 sdhost_halt(void *arg) 1488 { 1489 sdslot_t *ss = arg; 1490 1491 mutex_enter(&ss->ss_lock); 1492 if (!ss->ss_suspended) { 1493 sdhost_disable_interrupts(ss); 1494 /* this has the side effect of removing power from the card */ 1495 if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK) { 1496 mutex_exit(&ss->ss_lock); 1497 return (SDA_ETIME); 1498 } 1499 } 1500 mutex_exit(&ss->ss_lock); 1501 return (SDA_EOK); 1502 } 1503