1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 #include "sdhost.h"
26
27 typedef struct sdstats sdstats_t;
28 typedef struct sdslot sdslot_t;
29 typedef struct sdhost sdhost_t;
30
31 struct sdstats {
32 kstat_named_t ks_ncmd;
33 kstat_named_t ks_ixfr;
34 kstat_named_t ks_oxfr;
35 kstat_named_t ks_ibytes;
36 kstat_named_t ks_obytes;
37 kstat_named_t ks_npio;
38 kstat_named_t ks_ndma;
39 kstat_named_t ks_nmulti;
40 kstat_named_t ks_baseclk;
41 kstat_named_t ks_cardclk;
42 kstat_named_t ks_tmusecs;
43 kstat_named_t ks_width;
44 kstat_named_t ks_flags;
45 kstat_named_t ks_capab;
46 };
47
48 #define SDFLAG_FORCE_PIO (1U << 0)
49 #define SDFLAG_FORCE_DMA (1U << 1)
50
51 /*
52 * Per slot state.
53 */
54 struct sdslot {
55 sda_host_t *ss_host;
56 int ss_num;
57 ddi_acc_handle_t ss_acch;
58 caddr_t ss_regva;
59 kmutex_t ss_lock;
60 uint8_t ss_tmoutclk;
61 uint32_t ss_ocr; /* OCR formatted voltages */
62 uint16_t ss_mode;
63 boolean_t ss_suspended;
64 sdstats_t ss_stats;
65 #define ss_ncmd ss_stats.ks_ncmd.value.ui64
66 #define ss_ixfr ss_stats.ks_ixfr.value.ui64
67 #define ss_oxfr ss_stats.ks_oxfr.value.ui64
68 #define ss_ibytes ss_stats.ks_ibytes.value.ui64
69 #define ss_obytes ss_stats.ks_obytes.value.ui64
70 #define ss_ndma ss_stats.ks_ndma.value.ui64
71 #define ss_npio ss_stats.ks_npio.value.ui64
72 #define ss_nmulti ss_stats.ks_nmulti.value.ui64
73
74 #define ss_baseclk ss_stats.ks_baseclk.value.ui32
75 #define ss_cardclk ss_stats.ks_cardclk.value.ui32
76 #define ss_tmusecs ss_stats.ks_tmusecs.value.ui32
77 #define ss_width ss_stats.ks_width.value.ui32
78 #define ss_flags ss_stats.ks_flags.value.ui32
79 #define ss_capab ss_stats.ks_capab.value.ui32
80 kstat_t *ss_ksp;
81
82 /*
83 * Command in progress
84 */
85 uint8_t *ss_kvaddr;
86 int ss_blksz;
87 uint16_t ss_resid; /* in blocks */
88 int ss_rcnt;
89
90 /* scratch buffer, to receive extra PIO data */
91 caddr_t ss_bounce;
92 ddi_dma_handle_t ss_bufdmah;
93 ddi_acc_handle_t ss_bufacch;
94 ddi_dma_cookie_t ss_bufdmac;
95 };
96
97 /*
98 * This allocates a rather large chunk of contiguous memory for DMA.
99 * But doing so means that we'll almost never have to resort to PIO.
100 */
101 #define SDHOST_BOUNCESZ 65536
102
103 /*
104 * Per controller state.
105 */
106 struct sdhost {
107 int sh_numslots;
108 ddi_dma_attr_t sh_dmaattr;
109 sdslot_t sh_slots[SDHOST_MAXSLOTS];
110 sda_host_t *sh_host;
111
112 /*
113 * Interrupt related information.
114 */
115 ddi_intr_handle_t sh_ihandle;
116 int sh_icap;
117 uint_t sh_ipri;
118 };
119
120 #define PROPSET(x) \
121 (ddi_prop_get_int(DDI_DEV_T_ANY, dip, \
122 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, x, 0) != 0)
123
124
125 static int sdhost_attach(dev_info_t *, ddi_attach_cmd_t);
126 static int sdhost_detach(dev_info_t *, ddi_detach_cmd_t);
127 static int sdhost_quiesce(dev_info_t *);
128 static int sdhost_suspend(dev_info_t *);
129 static int sdhost_resume(dev_info_t *);
130
131 static void sdhost_enable_interrupts(sdslot_t *);
132 static void sdhost_disable_interrupts(sdslot_t *);
133 static int sdhost_setup_intr(dev_info_t *, sdhost_t *);
134 static uint_t sdhost_intr(caddr_t, caddr_t);
135 static int sdhost_init_slot(dev_info_t *, sdhost_t *, int, int);
136 static void sdhost_uninit_slot(sdhost_t *, int);
137 static sda_err_t sdhost_soft_reset(sdslot_t *, uint8_t);
138 static sda_err_t sdhost_set_clock(sdslot_t *, uint32_t);
139 static void sdhost_xfer_done(sdslot_t *, sda_err_t);
140 static sda_err_t sdhost_wait_cmd(sdslot_t *, sda_cmd_t *);
141 static uint_t sdhost_slot_intr(sdslot_t *);
142
143 static sda_err_t sdhost_cmd(void *, sda_cmd_t *);
144 static sda_err_t sdhost_getprop(void *, sda_prop_t, uint32_t *);
145 static sda_err_t sdhost_setprop(void *, sda_prop_t, uint32_t);
146 static sda_err_t sdhost_poll(void *);
147 static sda_err_t sdhost_reset(void *);
148 static sda_err_t sdhost_halt(void *);
149
150 static struct dev_ops sdhost_dev_ops = {
151 DEVO_REV, /* devo_rev */
152 0, /* devo_refcnt */
153 ddi_no_info, /* devo_getinfo */
154 nulldev, /* devo_identify */
155 nulldev, /* devo_probe */
156 sdhost_attach, /* devo_attach */
157 sdhost_detach, /* devo_detach */
158 nodev, /* devo_reset */
159 NULL, /* devo_cb_ops */
160 NULL, /* devo_bus_ops */
161 NULL, /* devo_power */
162 sdhost_quiesce, /* devo_quiesce */
163 };
164
165 static struct modldrv sdhost_modldrv = {
166 &mod_driverops, /* drv_modops */
167 "Standard SD Host Controller", /* drv_linkinfo */
168 &sdhost_dev_ops /* drv_dev_ops */
169 };
170
171 static struct modlinkage modlinkage = {
172 MODREV_1, /* ml_rev */
173 { &sdhost_modldrv, NULL } /* ml_linkage */
174 };
175
176 static struct sda_ops sdhost_ops = {
177 SDA_OPS_VERSION,
178 sdhost_cmd, /* so_cmd */
179 sdhost_getprop, /* so_getprop */
180 sdhost_setprop, /* so_setprop */
181 sdhost_poll, /* so_poll */
182 sdhost_reset, /* so_reset */
183 sdhost_halt, /* so_halt */
184 };
185
186 static ddi_device_acc_attr_t sdhost_regattr = {
187 DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
188 DDI_STRUCTURE_LE_ACC, /* devacc_attr_endian_flags */
189 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
190 DDI_DEFAULT_ACC, /* devacc_attr_access */
191 };
192 static ddi_device_acc_attr_t sdhost_bufattr = {
193 DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
194 DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */
195 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
196 DDI_DEFAULT_ACC, /* devacc_attr_access */
197 };
198
199 #define GET16(ss, reg) \
200 ddi_get16(ss->ss_acch, (void *)(ss->ss_regva + reg))
201 #define PUT16(ss, reg, val) \
202 ddi_put16(ss->ss_acch, (void *)(ss->ss_regva + reg), val)
203 #define GET32(ss, reg) \
204 ddi_get32(ss->ss_acch, (void *)(ss->ss_regva + reg))
205 #define PUT32(ss, reg, val) \
206 ddi_put32(ss->ss_acch, (void *)(ss->ss_regva + reg), val)
207 #define GET64(ss, reg) \
208 ddi_get64(ss->ss_acch, (void *)(ss->ss_regva + reg))
209
210 #define GET8(ss, reg) \
211 ddi_get8(ss->ss_acch, (void *)(ss->ss_regva + reg))
212 #define PUT8(ss, reg, val) \
213 ddi_put8(ss->ss_acch, (void *)(ss->ss_regva + reg), val)
214
215 #define CLR8(ss, reg, mask) PUT8(ss, reg, GET8(ss, reg) & ~(mask))
216 #define SET8(ss, reg, mask) PUT8(ss, reg, GET8(ss, reg) | (mask))
217
218 /*
219 * If ever anyone uses PIO on SPARC, we have to endian-swap. But we
220 * think that SD Host Controllers are likely to be uncommon on SPARC,
221 * and hopefully when they exist at all they will be able to use DMA.
222 */
223 #ifdef _BIG_ENDIAN
224 #define sw32(x) ddi_swap32(x)
225 #define sw16(x) ddi_swap16(x)
226 #else
227 #define sw32(x) (x)
228 #define sw16(x) (x)
229 #endif
230
231 #define GETDATA32(ss) sw32(GET32(ss, REG_DATA))
232 #define GETDATA16(ss) sw16(GET16(ss, REG_DATA))
233 #define GETDATA8(ss) GET8(ss, REG_DATA)
234
235 #define PUTDATA32(ss, val) PUT32(ss, REG_DATA, sw32(val))
236 #define PUTDATA16(ss, val) PUT16(ss, REG_DATA, sw16(val))
237 #define PUTDATA8(ss, val) PUT8(ss, REG_DATA, val)
238
239 #define CHECK_STATE(ss, nm) \
240 ((GET32(ss, REG_PRS) & PRS_ ## nm) != 0)
241
242 int
_init(void)243 _init(void)
244 {
245 int rv;
246
247 sda_host_init_ops(&sdhost_dev_ops);
248
249 if ((rv = mod_install(&modlinkage)) != 0) {
250 sda_host_fini_ops(&sdhost_dev_ops);
251 }
252
253 return (rv);
254 }
255
256 int
_fini(void)257 _fini(void)
258 {
259 int rv;
260
261 if ((rv = mod_remove(&modlinkage)) == 0) {
262 sda_host_fini_ops(&sdhost_dev_ops);
263 }
264 return (rv);
265 }
266
267 int
_info(struct modinfo * modinfop)268 _info(struct modinfo *modinfop)
269 {
270 return (mod_info(&modlinkage, modinfop));
271 }
272
273 int
sdhost_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)274 sdhost_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
275 {
276 sdhost_t *shp;
277 ddi_acc_handle_t pcih;
278 uint8_t slotinfo;
279 uint8_t bar;
280 int i;
281 int rv;
282
283 switch (cmd) {
284 case DDI_ATTACH:
285 break;
286
287 case DDI_RESUME:
288 return (sdhost_resume(dip));
289
290 default:
291 return (DDI_FAILURE);
292 }
293
294 /*
295 * Soft state allocation.
296 */
297 shp = kmem_zalloc(sizeof (*shp), KM_SLEEP);
298 ddi_set_driver_private(dip, shp);
299
300 /*
301 * Reset the "slot number", so uninit slot works properly.
302 */
303 for (i = 0; i < SDHOST_MAXSLOTS; i++) {
304 shp->sh_slots[i].ss_num = -1;
305 }
306
307 /*
308 * Initialize DMA attributes. For now we initialize as for
309 * SDMA. If we add ADMA support we can improve this.
310 */
311 shp->sh_dmaattr.dma_attr_version = DMA_ATTR_V0;
312 shp->sh_dmaattr.dma_attr_addr_lo = 0;
313 shp->sh_dmaattr.dma_attr_addr_hi = 0xffffffffU;
314 shp->sh_dmaattr.dma_attr_count_max = 0xffffffffU;
315 shp->sh_dmaattr.dma_attr_align = 4096; /* Ricoh needs it */
316 shp->sh_dmaattr.dma_attr_burstsizes = 0; /* for now! */
317 shp->sh_dmaattr.dma_attr_minxfer = 1;
318 shp->sh_dmaattr.dma_attr_maxxfer = 0x7ffffU;
319 shp->sh_dmaattr.dma_attr_sgllen = 1; /* no scatter/gather */
320 shp->sh_dmaattr.dma_attr_seg = 0x7ffffU; /* not to cross 512K */
321 shp->sh_dmaattr.dma_attr_granular = 1;
322 shp->sh_dmaattr.dma_attr_flags = 0;
323
324 /*
325 * PCI configuration access to figure out number of slots present.
326 */
327 if (pci_config_setup(dip, &pcih) != DDI_SUCCESS) {
328 cmn_err(CE_WARN, "pci_config_setup failed");
329 goto failed;
330 }
331
332 slotinfo = pci_config_get8(pcih, SLOTINFO);
333 shp->sh_numslots = SLOTINFO_NSLOT(slotinfo);
334
335 if (shp->sh_numslots > SDHOST_MAXSLOTS) {
336 cmn_err(CE_WARN, "Host reports to have too many slots: %d",
337 shp->sh_numslots);
338 pci_config_teardown(&pcih);
339 goto failed;
340 }
341
342 /*
343 * Enable master accesses and DMA.
344 */
345 pci_config_put16(pcih, PCI_CONF_COMM,
346 pci_config_get16(pcih, PCI_CONF_COMM) |
347 PCI_COMM_MAE | PCI_COMM_ME);
348
349 /*
350 * Figure out which BAR to use. Note that we number BARs from
351 * 1, although PCI and SD Host numbers from 0. (We number
352 * from 1, because register number 0 means PCI configuration
353 * space in Solaris.)
354 */
355 bar = SLOTINFO_BAR(slotinfo) + 1;
356
357 pci_config_teardown(&pcih);
358
359 /*
360 * Setup interrupts ... supports the new DDI interrupt API. This
361 * will support MSI or MSI-X interrupts if a device is found to
362 * support it.
363 */
364 if (sdhost_setup_intr(dip, shp) != DDI_SUCCESS) {
365 cmn_err(CE_WARN, "Failed to setup interrupts");
366 goto failed;
367 }
368
369 shp->sh_host = sda_host_alloc(dip, shp->sh_numslots, &sdhost_ops,
370 NULL);
371 if (shp->sh_host == NULL) {
372 cmn_err(CE_WARN, "Failed allocating SD host structure");
373 goto failed;
374 }
375
376 /*
377 * Configure slots, this also maps registers, enables
378 * interrupts, etc. Most of the hardware setup is done here.
379 */
380 for (i = 0; i < shp->sh_numslots; i++) {
381 if (sdhost_init_slot(dip, shp, i, bar + i) != DDI_SUCCESS) {
382 cmn_err(CE_WARN, "Failed initializing slot %d", i);
383 goto failed;
384 }
385 }
386
387 ddi_report_dev(dip);
388
389 /*
390 * Enable device interrupts at the DDI layer.
391 */
392 if (shp->sh_icap & DDI_INTR_FLAG_BLOCK) {
393 rv = ddi_intr_block_enable(&shp->sh_ihandle, 1);
394 } else {
395 rv = ddi_intr_enable(shp->sh_ihandle);
396 }
397 if (rv != DDI_SUCCESS) {
398 cmn_err(CE_WARN, "Failed enabling interrupts");
399 goto failed;
400 }
401
402 /*
403 * Mark the slots online with the framework. This will cause
404 * the framework to probe them for the presence of cards.
405 */
406 if (sda_host_attach(shp->sh_host) != DDI_SUCCESS) {
407 cmn_err(CE_WARN, "Failed attaching to SDA framework");
408 if (shp->sh_icap & DDI_INTR_FLAG_BLOCK) {
409 (void) ddi_intr_block_disable(&shp->sh_ihandle, 1);
410 } else {
411 (void) ddi_intr_disable(shp->sh_ihandle);
412 }
413 goto failed;
414 }
415
416 return (DDI_SUCCESS);
417
418 failed:
419 if (shp->sh_ihandle != NULL) {
420 (void) ddi_intr_remove_handler(shp->sh_ihandle);
421 (void) ddi_intr_free(shp->sh_ihandle);
422 }
423 for (i = 0; i < shp->sh_numslots; i++)
424 sdhost_uninit_slot(shp, i);
425 if (shp->sh_host != NULL)
426 sda_host_free(shp->sh_host);
427 kmem_free(shp, sizeof (*shp));
428
429 return (DDI_FAILURE);
430 }
431
432 int
sdhost_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)433 sdhost_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
434 {
435 sdhost_t *shp;
436 int i;
437
438 switch (cmd) {
439 case DDI_DETACH:
440 break;
441
442 case DDI_SUSPEND:
443 return (sdhost_suspend(dip));
444
445 default:
446 return (DDI_FAILURE);
447 }
448
449 shp = ddi_get_driver_private(dip);
450
451 /*
452 * Take host offline with the framework.
453 */
454 sda_host_detach(shp->sh_host);
455
456 /*
457 * Tear down interrupts.
458 */
459 if (shp->sh_ihandle != NULL) {
460 if (shp->sh_icap & DDI_INTR_FLAG_BLOCK) {
461 (void) ddi_intr_block_disable(&shp->sh_ihandle, 1);
462 } else {
463 (void) ddi_intr_disable(shp->sh_ihandle);
464 }
465 (void) ddi_intr_remove_handler(shp->sh_ihandle);
466 (void) ddi_intr_free(shp->sh_ihandle);
467 }
468
469 /*
470 * Tear down register mappings, etc.
471 */
472 for (i = 0; i < shp->sh_numslots; i++)
473 sdhost_uninit_slot(shp, i);
474 sda_host_free(shp->sh_host);
475 kmem_free(shp, sizeof (*shp));
476
477 return (DDI_SUCCESS);
478 }
479
480 int
sdhost_quiesce(dev_info_t * dip)481 sdhost_quiesce(dev_info_t *dip)
482 {
483 sdhost_t *shp;
484 sdslot_t *ss;
485
486 shp = ddi_get_driver_private(dip);
487
488 /* reset each slot separately */
489 for (int i = 0; i < shp->sh_numslots; i++) {
490 ss = &shp->sh_slots[i];
491 if (ss->ss_acch == NULL)
492 continue;
493
494 (void) sdhost_soft_reset(ss, SOFT_RESET_ALL);
495 }
496 return (DDI_SUCCESS);
497 }
498
499 int
sdhost_suspend(dev_info_t * dip)500 sdhost_suspend(dev_info_t *dip)
501 {
502 sdhost_t *shp;
503 sdslot_t *ss;
504 int i;
505
506 shp = ddi_get_driver_private(dip);
507
508 sda_host_suspend(shp->sh_host);
509
510 for (i = 0; i < shp->sh_numslots; i++) {
511 ss = &shp->sh_slots[i];
512 mutex_enter(&ss->ss_lock);
513 ss->ss_suspended = B_TRUE;
514 sdhost_disable_interrupts(ss);
515 (void) sdhost_soft_reset(ss, SOFT_RESET_ALL);
516 mutex_exit(&ss->ss_lock);
517 }
518 return (DDI_SUCCESS);
519 }
520
521 int
sdhost_resume(dev_info_t * dip)522 sdhost_resume(dev_info_t *dip)
523 {
524 sdhost_t *shp;
525 sdslot_t *ss;
526 int i;
527
528 shp = ddi_get_driver_private(dip);
529
530 for (i = 0; i < shp->sh_numslots; i++) {
531 ss = &shp->sh_slots[i];
532 mutex_enter(&ss->ss_lock);
533 ss->ss_suspended = B_FALSE;
534 (void) sdhost_soft_reset(ss, SOFT_RESET_ALL);
535 sdhost_enable_interrupts(ss);
536 mutex_exit(&ss->ss_lock);
537 }
538
539 sda_host_resume(shp->sh_host);
540
541 return (DDI_SUCCESS);
542 }
543
544 sda_err_t
sdhost_set_clock(sdslot_t * ss,uint32_t hz)545 sdhost_set_clock(sdslot_t *ss, uint32_t hz)
546 {
547 uint16_t div;
548 uint32_t val;
549 uint32_t clk;
550 int count;
551
552 /*
553 * Shut off the clock to begin.
554 */
555 ss->ss_cardclk = 0;
556 PUT16(ss, REG_CLOCK_CONTROL, 0);
557 if (hz == 0) {
558 return (SDA_EOK);
559 }
560
561 if (ss->ss_baseclk == 0) {
562 sda_host_log(ss->ss_host, ss->ss_num,
563 "Base clock frequency not established.");
564 return (SDA_EINVAL);
565 }
566
567 if ((hz > 25000000) && ((ss->ss_capab & CAPAB_HIGH_SPEED) != 0)) {
568 /* this clock requires high speed timings! */
569 SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN);
570 } else {
571 /* don't allow clock to run faster than 25MHz */
572 hz = min(hz, 25000000);
573 CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN);
574 }
575
576 /* figure out the divider */
577 clk = ss->ss_baseclk;
578 div = 1;
579 while (clk > hz) {
580 if (div > 0x80)
581 break;
582 clk >>= 1; /* divide clock by two */
583 div <<= 1; /* divider goes up by one */
584 }
585 div >>= 1; /* 0 == divide by 1, 1 = divide by 2 */
586
587 /*
588 * Set the internal clock divider first, without enabling the
589 * card clock yet.
590 */
591 PUT16(ss, REG_CLOCK_CONTROL,
592 (div << CLOCK_CONTROL_FREQ_SHIFT) | CLOCK_CONTROL_INT_CLOCK_EN);
593
594 /*
595 * Wait up to 100 msec for the internal clock to stabilize.
596 * (The spec does not seem to indicate a maximum timeout, but
597 * it also suggests that an infinite loop be used, which is
598 * not appropriate for hardened Solaris drivers.)
599 */
600 for (count = 100000; count; count -= 10) {
601
602 val = GET16(ss, REG_CLOCK_CONTROL);
603
604 if (val & CLOCK_CONTROL_INT_CLOCK_STABLE) {
605 /* if clock is stable, enable the SD clock pin */
606 PUT16(ss, REG_CLOCK_CONTROL, val |
607 CLOCK_CONTROL_SD_CLOCK_EN);
608
609 ss->ss_cardclk = clk;
610 return (SDA_EOK);
611 }
612
613 drv_usecwait(10);
614 }
615
616 return (SDA_ETIME);
617 }
618
619 sda_err_t
sdhost_soft_reset(sdslot_t * ss,uint8_t bits)620 sdhost_soft_reset(sdslot_t *ss, uint8_t bits)
621 {
622 int count;
623
624 /*
625 * There appears to be a bug where Ricoh hosts might have a
626 * problem if the host frequency is not set. If the card
627 * isn't present, or we are doing a master reset, just enable
628 * the internal clock at its native speed. (No dividers, and
629 * not exposed to card.).
630 */
631 if ((bits == SOFT_RESET_ALL) || !(CHECK_STATE(ss, CARD_INSERTED))) {
632 PUT16(ss, REG_CLOCK_CONTROL, CLOCK_CONTROL_INT_CLOCK_EN);
633 /* simple 1msec wait, don't wait for clock to stabilize */
634 drv_usecwait(1000);
635 /*
636 * reset the card clock & width -- master reset also
637 * resets these
638 */
639 ss->ss_cardclk = 0;
640 ss->ss_width = 1;
641 }
642
643
644 PUT8(ss, REG_SOFT_RESET, bits);
645 for (count = 100000; count != 0; count -= 10) {
646 if ((GET8(ss, REG_SOFT_RESET) & bits) == 0) {
647 return (SDA_EOK);
648 }
649 drv_usecwait(10);
650 }
651
652 return (SDA_ETIME);
653 }
654
655 void
sdhost_disable_interrupts(sdslot_t * ss)656 sdhost_disable_interrupts(sdslot_t *ss)
657 {
658 /* disable slot interrupts for card insert and remove */
659 PUT16(ss, REG_INT_MASK, 0);
660 PUT16(ss, REG_INT_EN, 0);
661
662 /* disable error interrupts */
663 PUT16(ss, REG_ERR_MASK, 0);
664 PUT16(ss, REG_ERR_EN, 0);
665 }
666
667 void
sdhost_enable_interrupts(sdslot_t * ss)668 sdhost_enable_interrupts(sdslot_t *ss)
669 {
670 /*
671 * Note that we want to enable reading of the CMD related
672 * bits, but we do not want them to generate an interrupt.
673 * (The busy wait for typical CMD stuff will normally be less
674 * than 10usec, so its simpler/easier to just poll. Even in
675 * the worst case of 100 kHz, the poll is at worst 2 msec.)
676 */
677
678 /* enable slot interrupts for card insert and remove */
679 PUT16(ss, REG_INT_MASK, INT_MASK);
680 PUT16(ss, REG_INT_EN, INT_ENAB);
681
682 /* enable error interrupts */
683 PUT16(ss, REG_ERR_MASK, ERR_MASK);
684 PUT16(ss, REG_ERR_EN, ERR_ENAB);
685 }
686
687 int
sdhost_setup_intr(dev_info_t * dip,sdhost_t * shp)688 sdhost_setup_intr(dev_info_t *dip, sdhost_t *shp)
689 {
690 int itypes;
691 int itype;
692
693 /*
694 * Set up interrupt handler.
695 */
696 if (ddi_intr_get_supported_types(dip, &itypes) != DDI_SUCCESS) {
697 cmn_err(CE_WARN, "ddi_intr_get_supported_types failed");
698 return (DDI_FAILURE);
699 }
700
701 /*
702 * It turns out that some controllers don't properly implement MSI,
703 * but advertise MSI capability in their PCI config space.
704 *
705 * While this is really a chip-specific bug, the simplest solution
706 * is to just suppress MSI for now by default -- every device seen
707 * so far can use FIXED interrupts.
708 *
709 * We offer an override property, though, just in case someone really
710 * wants to force it.
711 *
712 * We don't do this if the FIXED type isn't supported though!
713 */
714 if (itypes & DDI_INTR_TYPE_FIXED) {
715 if (!PROPSET(SDHOST_PROP_ENABLE_MSI)) {
716 itypes &= ~DDI_INTR_TYPE_MSI;
717 }
718 if (!PROPSET(SDHOST_PROP_ENABLE_MSIX)) {
719 itypes &= ~DDI_INTR_TYPE_MSIX;
720 }
721 }
722
723 /*
724 * Interrupt types are bits in a mask. We know about these ones:
725 * FIXED = 1
726 * MSI = 2
727 * MSIX = 4
728 */
729 for (itype = DDI_INTR_TYPE_MSIX; itype != 0; itype >>= 1) {
730
731 int count;
732
733 if ((itypes & itype) == 0) {
734 /* this type is not supported on this device! */
735 continue;
736 }
737
738 if ((ddi_intr_get_nintrs(dip, itype, &count) != DDI_SUCCESS) ||
739 (count == 0)) {
740 cmn_err(CE_WARN, "ddi_intr_get_nintrs failed");
741 continue;
742 }
743
744 /*
745 * We have not seen a host device with multiple
746 * interrupts (one per slot?), and the spec does not
747 * indicate that they exist. But if one ever occurs,
748 * we spew a warning to help future debugging/support
749 * efforts.
750 */
751 if (count > 1) {
752 cmn_err(CE_WARN, "Controller offers %d interrupts, "
753 "but driver only supports one", count);
754 continue;
755 }
756
757 if ((ddi_intr_alloc(dip, &shp->sh_ihandle, itype, 0, 1,
758 &count, DDI_INTR_ALLOC_NORMAL) != DDI_SUCCESS) ||
759 (count != 1)) {
760 cmn_err(CE_WARN, "ddi_intr_alloc failed");
761 continue;
762 }
763
764 if (ddi_intr_get_pri(shp->sh_ihandle, &shp->sh_ipri) !=
765 DDI_SUCCESS) {
766 cmn_err(CE_WARN, "ddi_intr_get_pri failed");
767 (void) ddi_intr_free(shp->sh_ihandle);
768 shp->sh_ihandle = NULL;
769 continue;
770 }
771
772 if (shp->sh_ipri >= ddi_intr_get_hilevel_pri()) {
773 cmn_err(CE_WARN, "Hi level interrupt not supported");
774 (void) ddi_intr_free(shp->sh_ihandle);
775 shp->sh_ihandle = NULL;
776 continue;
777 }
778
779 if (ddi_intr_get_cap(shp->sh_ihandle, &shp->sh_icap) !=
780 DDI_SUCCESS) {
781 cmn_err(CE_WARN, "ddi_intr_get_cap failed");
782 (void) ddi_intr_free(shp->sh_ihandle);
783 shp->sh_ihandle = NULL;
784 continue;
785 }
786
787 if (ddi_intr_add_handler(shp->sh_ihandle, sdhost_intr,
788 shp, NULL) != DDI_SUCCESS) {
789 cmn_err(CE_WARN, "ddi_intr_add_handler failed");
790 (void) ddi_intr_free(shp->sh_ihandle);
791 shp->sh_ihandle = NULL;
792 continue;
793 }
794
795 return (DDI_SUCCESS);
796 }
797
798 return (DDI_FAILURE);
799 }
800
801 void
sdhost_xfer_done(sdslot_t * ss,sda_err_t errno)802 sdhost_xfer_done(sdslot_t *ss, sda_err_t errno)
803 {
804 if ((errno == SDA_EOK) && (ss->ss_resid != 0)) {
805 /* an unexpected partial transfer was found */
806 errno = SDA_ERESID;
807 }
808 ss->ss_blksz = 0;
809 ss->ss_resid = 0;
810
811 if (errno != SDA_EOK) {
812 (void) sdhost_soft_reset(ss, SOFT_RESET_CMD);
813 (void) sdhost_soft_reset(ss, SOFT_RESET_DAT);
814
815 /* send a STOP command if necessary */
816 if (ss->ss_mode & XFR_MODE_AUTO_CMD12) {
817 PUT32(ss, REG_ARGUMENT, 0);
818 PUT16(ss, REG_COMMAND,
819 (CMD_STOP_TRANSMIT << 8) |
820 COMMAND_TYPE_NORM | COMMAND_INDEX_CHECK_EN |
821 COMMAND_CRC_CHECK_EN | COMMAND_RESP_48_BUSY);
822 }
823 }
824
825 sda_host_transfer(ss->ss_host, ss->ss_num, errno);
826 }
827
828 uint_t
sdhost_slot_intr(sdslot_t * ss)829 sdhost_slot_intr(sdslot_t *ss)
830 {
831 uint16_t intr;
832 uint16_t errs;
833 caddr_t data;
834 int count;
835
836 mutex_enter(&ss->ss_lock);
837
838 if (ss->ss_suspended) {
839 mutex_exit(&ss->ss_lock);
840 return (DDI_INTR_UNCLAIMED);
841 }
842
843 intr = GET16(ss, REG_INT_STAT);
844 if (intr == 0) {
845 mutex_exit(&ss->ss_lock);
846 return (DDI_INTR_UNCLAIMED);
847 }
848 errs = GET16(ss, REG_ERR_STAT);
849
850 if (intr & (INT_REM | INT_INS)) {
851
852 PUT16(ss, REG_INT_STAT, intr);
853 mutex_exit(&ss->ss_lock);
854
855 sda_host_detect(ss->ss_host, ss->ss_num);
856 /* no further interrupt processing this cycle */
857 return (DDI_INTR_CLAIMED);
858 }
859
860 if (intr & INT_DMA) {
861 /*
862 * We have crossed a DMA/page boundary. Cope with it.
863 */
864 /*
865 * Apparently some sdhost controllers issue a final
866 * DMA interrupt if the DMA completes on a boundary,
867 * even though there is no further data to transfer.
868 *
869 * There might be a risk here of the controller
870 * continuing to access the same data over and over
871 * again, but we accept the risk.
872 */
873 PUT16(ss, REG_INT_STAT, INT_DMA);
874 }
875
876 if (intr & INT_RD) {
877 /*
878 * PIO read! PIO is quite suboptimal, but we expect
879 * performance critical applications to use DMA
880 * whenever possible. We have to stage this through
881 * the bounce buffer to meet alignment considerations.
882 */
883
884 PUT16(ss, REG_INT_STAT, INT_RD);
885
886 while ((ss->ss_resid > 0) && CHECK_STATE(ss, BUF_RD_EN)) {
887
888 data = ss->ss_bounce;
889 count = ss->ss_blksz;
890
891 ASSERT(count > 0);
892 ASSERT(ss->ss_kvaddr != NULL);
893
894 while (count >= sizeof (uint32_t)) {
895 *(uint32_t *)(void *)data = GETDATA32(ss);
896 data += sizeof (uint32_t);
897 count -= sizeof (uint32_t);
898 }
899 while (count >= sizeof (uint16_t)) {
900 *(uint16_t *)(void *)data = GETDATA16(ss);
901 data += sizeof (uint16_t);
902 count -= sizeof (uint16_t);
903 }
904 while (count >= sizeof (uint8_t)) {
905 *(uint8_t *)data = GETDATA8(ss);
906 data += sizeof (uint8_t);
907 count -= sizeof (uint8_t);
908 }
909
910 bcopy(ss->ss_bounce, ss->ss_kvaddr, ss->ss_blksz);
911 ss->ss_kvaddr += ss->ss_blksz;
912 ss->ss_resid--;
913 }
914 }
915
916 if (intr & INT_WR) {
917 /*
918 * PIO write! PIO is quite suboptimal, but we expect
919 * performance critical applications to use DMA
920 * whenever possible. We have to stage this through
921 * the bounce buffer to meet alignment considerations.
922 */
923
924 PUT16(ss, REG_INT_STAT, INT_WR);
925
926 while ((ss->ss_resid > 0) && CHECK_STATE(ss, BUF_WR_EN)) {
927
928 data = ss->ss_bounce;
929 count = ss->ss_blksz;
930
931 ASSERT(count > 0);
932 ASSERT(ss->ss_kvaddr != NULL);
933
934 bcopy(ss->ss_kvaddr, data, count);
935 while (count >= sizeof (uint32_t)) {
936 PUTDATA32(ss, *(uint32_t *)(void *)data);
937 data += sizeof (uint32_t);
938 count -= sizeof (uint32_t);
939 }
940 while (count >= sizeof (uint16_t)) {
941 PUTDATA16(ss, *(uint16_t *)(void *)data);
942 data += sizeof (uint16_t);
943 count -= sizeof (uint16_t);
944 }
945 while (count >= sizeof (uint8_t)) {
946 PUTDATA8(ss, *(uint8_t *)data);
947 data += sizeof (uint8_t);
948 count -= sizeof (uint8_t);
949 }
950
951 ss->ss_kvaddr += ss->ss_blksz;
952 ss->ss_resid--;
953 }
954 }
955
956 if (intr & INT_XFR) {
957 if ((ss->ss_mode & (XFR_MODE_READ | XFR_MODE_DMA_EN)) ==
958 (XFR_MODE_READ | XFR_MODE_DMA_EN)) {
959 (void) ddi_dma_sync(ss->ss_bufdmah, 0, 0,
960 DDI_DMA_SYNC_FORKERNEL);
961 bcopy(ss->ss_bounce, ss->ss_kvaddr, ss->ss_rcnt);
962 ss->ss_rcnt = 0;
963 }
964 PUT16(ss, REG_INT_STAT, INT_XFR);
965
966 sdhost_xfer_done(ss, SDA_EOK);
967 }
968
969 if (intr & INT_ERR) {
970 PUT16(ss, REG_ERR_STAT, errs);
971 PUT16(ss, REG_INT_STAT, INT_ERR);
972
973 if (errs & ERR_DAT) {
974 if ((errs & ERR_DAT_END) == ERR_DAT_END) {
975 sdhost_xfer_done(ss, SDA_EPROTO);
976 } else if ((errs & ERR_DAT_CRC) == ERR_DAT_CRC) {
977 sdhost_xfer_done(ss, SDA_ECRC7);
978 } else {
979 sdhost_xfer_done(ss, SDA_ETIME);
980 }
981
982 } else if (errs & ERR_ACMD12) {
983 /*
984 * Generally, this is bad news. we need a full
985 * reset to recover properly.
986 */
987 sdhost_xfer_done(ss, SDA_ECMD12);
988 }
989
990 /*
991 * This asynchronous error leaves the slot more or less
992 * useless. Report it to the framework.
993 */
994 if (errs & ERR_CURRENT) {
995 sda_host_fault(ss->ss_host, ss->ss_num,
996 SDA_FAULT_CURRENT);
997 }
998 }
999
1000 mutex_exit(&ss->ss_lock);
1001
1002 return (DDI_INTR_CLAIMED);
1003 }
1004
1005 /*ARGSUSED1*/
1006 uint_t
sdhost_intr(caddr_t arg1,caddr_t arg2)1007 sdhost_intr(caddr_t arg1, caddr_t arg2)
1008 {
1009 sdhost_t *shp = (void *)arg1;
1010 int rv = DDI_INTR_UNCLAIMED;
1011 int num;
1012
1013 /* interrupt for each of the slots present in the system */
1014 for (num = 0; num < shp->sh_numslots; num++) {
1015 if (sdhost_slot_intr(&shp->sh_slots[num]) ==
1016 DDI_INTR_CLAIMED) {
1017 rv = DDI_INTR_CLAIMED;
1018 }
1019 }
1020 return (rv);
1021 }
1022
1023 int
sdhost_init_slot(dev_info_t * dip,sdhost_t * shp,int num,int bar)1024 sdhost_init_slot(dev_info_t *dip, sdhost_t *shp, int num, int bar)
1025 {
1026 sdslot_t *ss;
1027 uint32_t capab;
1028 uint32_t clk;
1029 char ksname[16];
1030 size_t blen;
1031 unsigned ndmac;
1032 int rv;
1033
1034 /*
1035 * Register the private state.
1036 */
1037 ss = &shp->sh_slots[num];
1038 ss->ss_host = shp->sh_host;
1039 ss->ss_num = num;
1040 sda_host_set_private(shp->sh_host, num, ss);
1041 /*
1042 * Initialize core data structure, locks, etc.
1043 */
1044 mutex_init(&ss->ss_lock, NULL, MUTEX_DRIVER,
1045 DDI_INTR_PRI(shp->sh_ipri));
1046
1047 /*
1048 * Set up DMA.
1049 */
1050 rv = ddi_dma_alloc_handle(dip, &shp->sh_dmaattr,
1051 DDI_DMA_SLEEP, NULL, &ss->ss_bufdmah);
1052 if (rv != DDI_SUCCESS) {
1053 cmn_err(CE_WARN, "Failed to alloc dma handle (%d)!", rv);
1054 return (DDI_FAILURE);
1055 }
1056
1057 rv = ddi_dma_mem_alloc(ss->ss_bufdmah, SDHOST_BOUNCESZ,
1058 &sdhost_bufattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1059 &ss->ss_bounce, &blen, &ss->ss_bufacch);
1060 if (rv != DDI_SUCCESS) {
1061 cmn_err(CE_WARN, "Failed to alloc bounce buffer (%d)!", rv);
1062 return (DDI_FAILURE);
1063 }
1064
1065 rv = ddi_dma_addr_bind_handle(ss->ss_bufdmah, NULL, ss->ss_bounce,
1066 blen, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1067 &ss->ss_bufdmac, &ndmac);
1068 if ((rv != DDI_DMA_MAPPED) || (ndmac != 1)) {
1069 cmn_err(CE_WARN, "Failed to bind DMA bounce buffer (%d, %u)!",
1070 rv, ndmac);
1071 return (DDI_FAILURE);
1072 }
1073
1074 /*
1075 * Set up virtual kstats.
1076 */
1077 (void) snprintf(ksname, sizeof (ksname), "slot%d", num);
1078 ss->ss_ksp = kstat_create(ddi_driver_name(dip), ddi_get_instance(dip),
1079 ksname, "misc", KSTAT_TYPE_NAMED,
1080 sizeof (sdstats_t) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
1081 if (ss->ss_ksp != NULL) {
1082 sdstats_t *sp = &ss->ss_stats;
1083 ss->ss_ksp->ks_data = sp;
1084 ss->ss_ksp->ks_private = ss;
1085 ss->ss_ksp->ks_lock = &ss->ss_lock;
1086 /* counters are 64 bits wide */
1087 kstat_named_init(&sp->ks_ncmd, "ncmd", KSTAT_DATA_UINT64);
1088 kstat_named_init(&sp->ks_ixfr, "ixfr", KSTAT_DATA_UINT64);
1089 kstat_named_init(&sp->ks_oxfr, "oxfr", KSTAT_DATA_UINT64);
1090 kstat_named_init(&sp->ks_ibytes, "ibytes", KSTAT_DATA_UINT64);
1091 kstat_named_init(&sp->ks_obytes, "obytes", KSTAT_DATA_UINT64);
1092 kstat_named_init(&sp->ks_npio, "npio", KSTAT_DATA_UINT64);
1093 kstat_named_init(&sp->ks_ndma, "ndma", KSTAT_DATA_UINT64);
1094 kstat_named_init(&sp->ks_nmulti, "nmulti", KSTAT_DATA_UINT64);
1095 /* these aren't counters -- leave them at 32 bits */
1096 kstat_named_init(&sp->ks_baseclk, "baseclk", KSTAT_DATA_UINT32);
1097 kstat_named_init(&sp->ks_cardclk, "cardclk", KSTAT_DATA_UINT32);
1098 kstat_named_init(&sp->ks_tmusecs, "tmusecs", KSTAT_DATA_UINT32);
1099 kstat_named_init(&sp->ks_width, "width", KSTAT_DATA_UINT32);
1100 kstat_named_init(&sp->ks_flags, "flags", KSTAT_DATA_UINT32);
1101 kstat_named_init(&sp->ks_capab, "capab", KSTAT_DATA_UINT32);
1102 kstat_install(ss->ss_ksp);
1103 }
1104
1105 if (PROPSET(SDHOST_PROP_FORCE_PIO)) {
1106 ss->ss_flags |= SDFLAG_FORCE_PIO;
1107 }
1108 if (PROPSET(SDHOST_PROP_FORCE_DMA)) {
1109 ss->ss_flags |= SDFLAG_FORCE_DMA;
1110 }
1111
1112 if (ddi_regs_map_setup(dip, bar, &ss->ss_regva, 0, 0, &sdhost_regattr,
1113 &ss->ss_acch) != DDI_SUCCESS) {
1114 cmn_err(CE_WARN, "Failed to map registers!");
1115 return (DDI_FAILURE);
1116 }
1117
1118 /* reset before reading capabilities */
1119 if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK)
1120 return (DDI_FAILURE);
1121
1122 capab = GET64(ss, REG_CAPAB) & 0xffffffffU; /* upper bits reserved */
1123 ss->ss_capab = capab;
1124
1125 /* host voltages in OCR format */
1126 ss->ss_ocr = 0;
1127 if (capab & CAPAB_18V)
1128 ss->ss_ocr |= OCR_18_19V; /* 1.8V */
1129 if (capab & CAPAB_30V)
1130 ss->ss_ocr |= OCR_30_31V;
1131 if (capab & CAPAB_33V)
1132 ss->ss_ocr |= OCR_32_33V;
1133
1134 /* base clock */
1135 ss->ss_baseclk =
1136 ((capab & CAPAB_BASE_FREQ_MASK) >> CAPAB_BASE_FREQ_SHIFT);
1137 ss->ss_baseclk *= 1000000;
1138
1139 /*
1140 * Timeout clock. We can calculate this using the following
1141 * formula:
1142 *
1143 * (1000000 usec/1sec) * (1sec/tmoutclk) * base factor = clock time
1144 *
1145 * Clock time is the length of the base clock in usecs.
1146 *
1147 * Our base factor is 2^13, which is the shortest clock we
1148 * can count.
1149 *
1150 * To simplify the math and avoid overflow, we cancel out the
1151 * zeros for kHz or MHz. Since we want to wait more clocks, not
1152 * less, on error, we truncate the result rather than rounding
1153 * up.
1154 */
1155 clk = ((capab & CAPAB_TIMEOUT_FREQ_MASK) >> CAPAB_TIMEOUT_FREQ_SHIFT);
1156 if ((ss->ss_baseclk == 0) || (clk == 0)) {
1157 cmn_err(CE_WARN, "Unable to determine clock frequencies");
1158 return (DDI_FAILURE);
1159 }
1160
1161 if (capab & CAPAB_TIMEOUT_UNITS) {
1162 /* MHz */
1163 ss->ss_tmusecs = (1 << 13) / clk;
1164 clk *= 1000000;
1165 } else {
1166 /* kHz */
1167 ss->ss_tmusecs = (1000 * (1 << 13)) / clk;
1168 clk *= 1000;
1169 }
1170
1171 /*
1172 * Calculation of the timeout.
1173 *
1174 * SDIO cards use a 1sec timeout, and SDHC cards use fixed
1175 * 100msec for read and 250 msec for write.
1176 *
1177 * Legacy cards running at 375kHz have a worst case of about
1178 * 15 seconds. Running at 25MHz (the standard speed) it is
1179 * about 100msec for read, and about 3.2 sec for write.
1180 * Typical values are 1/100th that, or about 1msec for read,
1181 * and 32 msec for write.
1182 *
1183 * No transaction at full speed should ever take more than 4
1184 * seconds. (Some slow legacy cards might have trouble, but
1185 * we'll worry about them if they ever are seen. Nobody wants
1186 * to wait 4 seconds to access a single block anyway!)
1187 *
1188 * To get to 4 seconds, we continuously double usec until we
1189 * get to the maximum value, or a timeout greater than 4
1190 * seconds.
1191 *
1192 * Note that for high-speed timeout clocks, we might not be
1193 * able to get to the full 4 seconds. E.g. with a 48MHz
1194 * timeout clock, we can only get to about 2.8 seconds. Its
1195 * possible that there could be some slow MMC cards that will
1196 * timeout at this clock rate, but it seems unlikely. (The
1197 * device would have to be pressing the very worst times,
1198 * against the 100-fold "permissive" window allowed, and
1199 * running at only 12.5MHz.)
1200 *
1201 * XXX: this could easily be a tunable. Someone dealing with only
1202 * reasonable cards could set this to just 1 second.
1203 */
1204 for (ss->ss_tmoutclk = 0; ss->ss_tmoutclk < 14; ss->ss_tmoutclk++) {
1205 if ((ss->ss_tmusecs * (1 << ss->ss_tmoutclk)) >= 4000000) {
1206 break;
1207 }
1208 }
1209
1210 /*
1211 * Enable slot interrupts.
1212 */
1213 sdhost_enable_interrupts(ss);
1214
1215 return (DDI_SUCCESS);
1216 }
1217
1218 void
sdhost_uninit_slot(sdhost_t * shp,int num)1219 sdhost_uninit_slot(sdhost_t *shp, int num)
1220 {
1221 sdslot_t *ss;
1222
1223 ss = &shp->sh_slots[num];
1224
1225 if (ss->ss_acch != NULL)
1226 (void) sdhost_soft_reset(ss, SOFT_RESET_ALL);
1227
1228 if (ss->ss_bufdmac.dmac_address)
1229 (void) ddi_dma_unbind_handle(ss->ss_bufdmah);
1230
1231 if (ss->ss_bufacch != NULL)
1232 ddi_dma_mem_free(&ss->ss_bufacch);
1233
1234 if (ss->ss_bufdmah != NULL)
1235 ddi_dma_free_handle(&ss->ss_bufdmah);
1236
1237 if (ss->ss_ksp != NULL)
1238 kstat_delete(ss->ss_ksp);
1239
1240 if (ss->ss_acch != NULL)
1241 ddi_regs_map_free(&ss->ss_acch);
1242
1243 if (ss->ss_num != -1)
1244 mutex_destroy(&ss->ss_lock);
1245 }
1246
1247 void
sdhost_get_response(sdslot_t * ss,sda_cmd_t * cmdp)1248 sdhost_get_response(sdslot_t *ss, sda_cmd_t *cmdp)
1249 {
1250 uint32_t *resp = cmdp->sc_response;
1251 int i;
1252
1253 resp[0] = GET32(ss, REG_RESP1);
1254 resp[1] = GET32(ss, REG_RESP2);
1255 resp[2] = GET32(ss, REG_RESP3);
1256 resp[3] = GET32(ss, REG_RESP4);
1257
1258 /*
1259 * Response 2 is goofy because the host drops the low
1260 * order CRC bits. This makes it a bit awkward, so we
1261 * have to shift the bits to make it work out right.
1262 *
1263 * Note that the framework expects the 32 bit
1264 * words to be ordered in LE fashion. (The
1265 * bits within the words are in native order).
1266 */
1267 if (cmdp->sc_rtype == R2) {
1268 for (i = 3; i > 0; i--) {
1269 resp[i] <<= 8;
1270 resp[i] |= (resp[i - 1] >> 24);
1271 }
1272 resp[0] <<= 8;
1273 }
1274 }
1275
1276 sda_err_t
sdhost_wait_cmd(sdslot_t * ss,sda_cmd_t * cmdp)1277 sdhost_wait_cmd(sdslot_t *ss, sda_cmd_t *cmdp)
1278 {
1279 int i;
1280 uint16_t errs;
1281 sda_err_t rv;
1282
1283 /*
1284 * Worst case for 100kHz timeout is 2msec (200 clocks), we add
1285 * a tiny bit for safety. (Generally timeout will be far, far
1286 * less than that.)
1287 *
1288 * Note that at more typical 12MHz (and normally it will be
1289 * even faster than that!) that the device timeout is only
1290 * 16.67 usec. We could be smarter and reduce the delay time,
1291 * but that would require putting more intelligence into the
1292 * code, and we don't expect CMD timeout to normally occur
1293 * except during initialization. (At which time we need the
1294 * full timeout anyway.)
1295 *
1296 * Checking the ERR_STAT will normally cause the timeout to
1297 * terminate to finish early if the device is healthy, anyway.
1298 */
1299
1300 for (i = 3000; i > 0; i -= 5) {
1301 if (GET16(ss, REG_INT_STAT) & INT_CMD) {
1302
1303 PUT16(ss, REG_INT_STAT, INT_CMD);
1304
1305 /* command completed */
1306 sdhost_get_response(ss, cmdp);
1307 return (SDA_EOK);
1308 }
1309
1310 if ((errs = (GET16(ss, REG_ERR_STAT) & ERR_CMD)) != 0) {
1311 PUT16(ss, REG_ERR_STAT, errs);
1312
1313 /* command timeout isn't a host failure */
1314 if ((errs & ERR_CMD_TMO) == ERR_CMD_TMO) {
1315 rv = SDA_ETIME;
1316 } else if ((errs & ERR_CMD_CRC) == ERR_CMD_CRC) {
1317 rv = SDA_ECRC7;
1318 } else {
1319 rv = SDA_EPROTO;
1320 }
1321 goto error;
1322 }
1323
1324 drv_usecwait(5);
1325 }
1326
1327 rv = SDA_ETIME;
1328
1329 error:
1330 /*
1331 * NB: We need to soft reset the CMD and DAT
1332 * lines after a failure of this sort.
1333 */
1334 (void) sdhost_soft_reset(ss, SOFT_RESET_CMD);
1335 (void) sdhost_soft_reset(ss, SOFT_RESET_DAT);
1336
1337 return (rv);
1338 }
1339
1340 sda_err_t
sdhost_poll(void * arg)1341 sdhost_poll(void *arg)
1342 {
1343 sdslot_t *ss = arg;
1344
1345 (void) sdhost_slot_intr(ss);
1346 return (SDA_EOK);
1347 }
1348
1349 sda_err_t
sdhost_cmd(void * arg,sda_cmd_t * cmdp)1350 sdhost_cmd(void *arg, sda_cmd_t *cmdp)
1351 {
1352 sdslot_t *ss = arg;
1353 uint16_t command;
1354 uint16_t mode;
1355 sda_err_t rv;
1356
1357 /*
1358 * Command register:
1359 * bit 13-8 = command index
1360 * bit 7-6 = command type (always zero for us!)
1361 * bit 5 = data present select
1362 * bit 4 = command index check (always on!)
1363 * bit 3 = command CRC check enable
1364 * bit 2 = reserved
1365 * bit 1-0 = response type
1366 */
1367
1368 command = ((uint16_t)cmdp->sc_index << 8);
1369 command |= COMMAND_TYPE_NORM |
1370 COMMAND_INDEX_CHECK_EN | COMMAND_CRC_CHECK_EN;
1371
1372 switch (cmdp->sc_rtype) {
1373 case R0:
1374 command |= COMMAND_RESP_NONE;
1375 break;
1376 case R1:
1377 case R5:
1378 case R6:
1379 case R7:
1380 command |= COMMAND_RESP_48;
1381 break;
1382 case R1b:
1383 case R5b:
1384 command |= COMMAND_RESP_48_BUSY;
1385 break;
1386 case R2:
1387 command |= COMMAND_RESP_136;
1388 command &= ~(COMMAND_INDEX_CHECK_EN | COMMAND_CRC_CHECK_EN);
1389 break;
1390 case R3:
1391 case R4:
1392 command |= COMMAND_RESP_48;
1393 command &= ~COMMAND_CRC_CHECK_EN;
1394 command &= ~COMMAND_INDEX_CHECK_EN;
1395 break;
1396 default:
1397 return (SDA_EINVAL);
1398 }
1399
1400 mutex_enter(&ss->ss_lock);
1401 if (ss->ss_suspended) {
1402 mutex_exit(&ss->ss_lock);
1403 return (SDA_ESUSPENDED);
1404 }
1405
1406 if (cmdp->sc_nblks != 0) {
1407 uint16_t blksz;
1408 uint16_t nblks;
1409
1410 blksz = cmdp->sc_blksz;
1411 nblks = cmdp->sc_nblks;
1412
1413 /*
1414 * Ensure that we have good data.
1415 */
1416 if ((blksz < 1) || (blksz > 2048)) {
1417 mutex_exit(&ss->ss_lock);
1418 return (SDA_EINVAL);
1419 }
1420 command |= COMMAND_DATA_PRESENT;
1421
1422 ss->ss_blksz = blksz;
1423
1424 ss->ss_kvaddr = (void *)cmdp->sc_kvaddr;
1425 ss->ss_rcnt = 0;
1426 ss->ss_resid = 0;
1427
1428 /*
1429 * Only SDMA for now. We can investigate ADMA2 later.
1430 * (Right now we don't have ADMA2 capable hardware.)
1431 * We always use a bounce buffer, which solves weird
1432 * problems with certain controllers. Doing this with
1433 * a large contiguous buffer may be faster than
1434 * servicing all the little per-page interrupts
1435 * anyway. (Bcopy of 64 K vs. 16 interrupts.)
1436 */
1437 if (((ss->ss_capab & CAPAB_SDMA) != 0) &&
1438 ((ss->ss_flags & SDFLAG_FORCE_PIO) == 0) &&
1439 ((blksz * nblks) <= SDHOST_BOUNCESZ)) {
1440
1441 if (cmdp->sc_flags & SDA_CMDF_WRITE) {
1442 /*
1443 * if we're writing, prepare initial round
1444 * of data
1445 */
1446 bcopy(cmdp->sc_kvaddr, ss->ss_bounce,
1447 nblks * blksz);
1448 (void) ddi_dma_sync(ss->ss_bufdmah, 0, 0,
1449 DDI_DMA_SYNC_FORDEV);
1450 } else {
1451 ss->ss_rcnt = nblks * blksz;
1452 }
1453 PUT32(ss, REG_SDMA_ADDR, ss->ss_bufdmac.dmac_address);
1454 mode = XFR_MODE_DMA_EN;
1455 PUT16(ss, REG_BLKSZ, BLKSZ_BOUNDARY_512K | blksz);
1456 ss->ss_ndma++;
1457
1458 } else {
1459 mode = 0;
1460 ss->ss_npio++;
1461 ss->ss_resid = nblks;
1462 PUT16(ss, REG_BLKSZ, blksz);
1463 }
1464
1465 if (nblks > 1) {
1466 mode |= XFR_MODE_MULTI | XFR_MODE_COUNT;
1467 if (cmdp->sc_flags & SDA_CMDF_AUTO_CMD12)
1468 mode |= XFR_MODE_AUTO_CMD12;
1469 ss->ss_nmulti++;
1470 }
1471 if ((cmdp->sc_flags & SDA_CMDF_READ) != 0) {
1472 mode |= XFR_MODE_READ;
1473 ss->ss_ixfr++;
1474 ss->ss_ibytes += nblks * blksz;
1475 } else {
1476 ss->ss_oxfr++;
1477 ss->ss_obytes += nblks * blksz;
1478 }
1479
1480 ss->ss_mode = mode;
1481
1482 PUT8(ss, REG_TIMEOUT_CONTROL, ss->ss_tmoutclk);
1483 PUT16(ss, REG_BLOCK_COUNT, nblks);
1484 PUT16(ss, REG_XFR_MODE, mode);
1485 }
1486
1487 PUT32(ss, REG_ARGUMENT, cmdp->sc_argument);
1488 PUT16(ss, REG_COMMAND, command);
1489
1490 ss->ss_ncmd++;
1491 rv = sdhost_wait_cmd(ss, cmdp);
1492
1493 mutex_exit(&ss->ss_lock);
1494
1495 return (rv);
1496 }
1497
1498 sda_err_t
sdhost_getprop(void * arg,sda_prop_t prop,uint32_t * val)1499 sdhost_getprop(void *arg, sda_prop_t prop, uint32_t *val)
1500 {
1501 sdslot_t *ss = arg;
1502 sda_err_t rv = 0;
1503
1504 mutex_enter(&ss->ss_lock);
1505
1506 if (ss->ss_suspended) {
1507 mutex_exit(&ss->ss_lock);
1508 return (SDA_ESUSPENDED);
1509 }
1510 switch (prop) {
1511 case SDA_PROP_INSERTED:
1512 if (CHECK_STATE(ss, CARD_INSERTED)) {
1513 *val = B_TRUE;
1514 } else {
1515 *val = B_FALSE;
1516 }
1517 break;
1518
1519 case SDA_PROP_WPROTECT:
1520 if (CHECK_STATE(ss, WRITE_ENABLE)) {
1521 *val = B_FALSE;
1522 } else {
1523 *val = B_TRUE;
1524 }
1525 break;
1526
1527 case SDA_PROP_OCR:
1528 *val = ss->ss_ocr;
1529 break;
1530
1531 case SDA_PROP_CLOCK:
1532 *val = ss->ss_cardclk;
1533 break;
1534
1535 case SDA_PROP_CAP_HISPEED:
1536 if ((ss->ss_capab & CAPAB_HIGH_SPEED) != 0) {
1537 *val = B_TRUE;
1538 } else {
1539 *val = B_FALSE;
1540 }
1541 break;
1542
1543 case SDA_PROP_CAP_4BITS:
1544 *val = B_TRUE;
1545 break;
1546
1547 case SDA_PROP_CAP_NOPIO:
1548 /*
1549 * We might have to use PIO for buffers that don't
1550 * have reasonable alignments. A few controllers seem
1551 * not to deal with granularity or alignments of
1552 * something other 32-bits.
1553 */
1554 *val = B_FALSE;
1555 break;
1556
1557 case SDA_PROP_CAP_INTR:
1558 case SDA_PROP_CAP_8BITS:
1559 *val = B_FALSE;
1560 break;
1561
1562 default:
1563 rv = SDA_ENOTSUP;
1564 break;
1565 }
1566 mutex_exit(&ss->ss_lock);
1567
1568 return (rv);
1569 }
1570
1571 sda_err_t
sdhost_setprop(void * arg,sda_prop_t prop,uint32_t val)1572 sdhost_setprop(void *arg, sda_prop_t prop, uint32_t val)
1573 {
1574 sdslot_t *ss = arg;
1575 sda_err_t rv = SDA_EOK;
1576
1577 mutex_enter(&ss->ss_lock);
1578
1579 if (ss->ss_suspended) {
1580 mutex_exit(&ss->ss_lock);
1581 return (SDA_ESUSPENDED);
1582 }
1583
1584 switch (prop) {
1585 case SDA_PROP_LED:
1586 if (val) {
1587 SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_LED_ON);
1588 } else {
1589 CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_LED_ON);
1590 }
1591 break;
1592
1593 case SDA_PROP_CLOCK:
1594 rv = sdhost_set_clock(arg, val);
1595 break;
1596
1597 case SDA_PROP_BUSWIDTH:
1598 switch (val) {
1599 case 1:
1600 ss->ss_width = val;
1601 CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_DATA_WIDTH);
1602 break;
1603 case 4:
1604 ss->ss_width = val;
1605 SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_DATA_WIDTH);
1606 break;
1607 default:
1608 rv = SDA_EINVAL;
1609 }
1610 break;
1611
1612 case SDA_PROP_OCR:
1613 val &= ss->ss_ocr;
1614
1615 if (val & OCR_17_18V) {
1616 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_18V);
1617 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_18V |
1618 POWER_CONTROL_BUS_POWER);
1619 } else if (val & OCR_29_30V) {
1620 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_30V);
1621 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_30V |
1622 POWER_CONTROL_BUS_POWER);
1623 } else if (val & OCR_32_33V) {
1624 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_33V);
1625 PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_33V |
1626 POWER_CONTROL_BUS_POWER);
1627 } else if (val == 0) {
1628 /* turn off power */
1629 PUT8(ss, REG_POWER_CONTROL, 0);
1630 } else {
1631 rv = SDA_EINVAL;
1632 }
1633 break;
1634
1635 case SDA_PROP_HISPEED:
1636 if (val) {
1637 SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN);
1638 } else {
1639 CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN);
1640 }
1641 /* give clocks time to settle */
1642 drv_usecwait(10);
1643 break;
1644
1645 default:
1646 rv = SDA_ENOTSUP;
1647 break;
1648 }
1649
1650 /*
1651 * Apparently some controllers (ENE) have issues with changing
1652 * certain parameters (bus width seems to be one), requiring
1653 * a reset of the DAT and CMD lines.
1654 */
1655 if (rv == SDA_EOK) {
1656 (void) sdhost_soft_reset(ss, SOFT_RESET_CMD);
1657 (void) sdhost_soft_reset(ss, SOFT_RESET_DAT);
1658 }
1659 mutex_exit(&ss->ss_lock);
1660 return (rv);
1661 }
1662
1663 sda_err_t
sdhost_reset(void * arg)1664 sdhost_reset(void *arg)
1665 {
1666 sdslot_t *ss = arg;
1667
1668 mutex_enter(&ss->ss_lock);
1669 if (!ss->ss_suspended) {
1670 if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK) {
1671 mutex_exit(&ss->ss_lock);
1672 return (SDA_ETIME);
1673 }
1674 sdhost_enable_interrupts(ss);
1675 }
1676 mutex_exit(&ss->ss_lock);
1677 return (SDA_EOK);
1678 }
1679
1680 sda_err_t
sdhost_halt(void * arg)1681 sdhost_halt(void *arg)
1682 {
1683 sdslot_t *ss = arg;
1684
1685 mutex_enter(&ss->ss_lock);
1686 if (!ss->ss_suspended) {
1687 sdhost_disable_interrupts(ss);
1688 /* this has the side effect of removing power from the card */
1689 if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK) {
1690 mutex_exit(&ss->ss_lock);
1691 return (SDA_ETIME);
1692 }
1693 }
1694 mutex_exit(&ss->ss_lock);
1695 return (SDA_EOK);
1696 }
1697