xref: /illumos-gate/usr/src/uts/common/io/sdcard/adapters/sdhost/sdhost.c (revision 6e375c8351497b82ffa4f33cbf61d712999b4605)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include "sdhost.h"
27 
28 typedef	struct sdslot	sdslot_t;
29 typedef	struct sdhost	sdhost_t;
30 
31 /*
32  * Per slot state.
33  */
34 struct sdslot {
35 	sda_host_t		*ss_host;
36 	int			ss_num;
37 	ddi_acc_handle_t	ss_acch;
38 	caddr_t 		ss_regva;
39 	kmutex_t		ss_lock;
40 	uint32_t		ss_capab;
41 	uint32_t		ss_baseclk;	/* Hz */
42 	uint32_t		ss_cardclk;	/* Hz */
43 	uint8_t			ss_tmoutclk;
44 	uint32_t		ss_tmusecs;	/* timeout units in usecs */
45 	uint32_t		ss_ocr;		/* OCR formatted voltages */
46 	uint16_t		ss_mode;
47 	boolean_t		ss_suspended;
48 
49 	/*
50 	 * Command in progress
51 	 */
52 	uint8_t			*ss_kvaddr;
53 	ddi_dma_cookie_t	*ss_dmacs;
54 	uint_t			ss_ndmac;
55 	int			ss_blksz;
56 	uint16_t		ss_resid;	/* in blocks */
57 
58 	/* scratch buffer, to receive extra PIO data */
59 	uint32_t		ss_bounce[2048 / 4];
60 };
61 
62 /*
63  * Per controller state.
64  */
65 struct sdhost {
66 	int			sh_numslots;
67 	ddi_dma_attr_t		sh_dmaattr;
68 	sdslot_t		sh_slots[SDHOST_MAXSLOTS];
69 	sda_host_t		*sh_host;
70 
71 	/*
72 	 * Interrupt related information.
73 	 */
74 	ddi_intr_handle_t	sh_ihandle;
75 	int			sh_icap;
76 	uint_t			sh_ipri;
77 };
78 
79 
80 static int sdhost_attach(dev_info_t *, ddi_attach_cmd_t);
81 static int sdhost_detach(dev_info_t *, ddi_detach_cmd_t);
82 static int sdhost_quiesce(dev_info_t *);
83 static int sdhost_suspend(dev_info_t *);
84 static int sdhost_resume(dev_info_t *);
85 
86 static void sdhost_enable_interrupts(sdslot_t *);
87 static void sdhost_disable_interrupts(sdslot_t *);
88 static int sdhost_setup_intr(dev_info_t *, sdhost_t *);
89 static uint_t sdhost_intr(caddr_t, caddr_t);
90 static int sdhost_init_slot(dev_info_t *, sdhost_t *, int, int);
91 static void sdhost_uninit_slot(sdhost_t *, int);
92 static sda_err_t sdhost_soft_reset(sdslot_t *, uint8_t);
93 static sda_err_t sdhost_set_clock(sdslot_t *, uint32_t);
94 static void sdhost_xfer_done(sdslot_t *, sda_err_t);
95 static sda_err_t sdhost_wait_cmd(sdslot_t *, sda_cmd_t *);
96 static uint_t sdhost_slot_intr(sdslot_t *);
97 
98 static sda_err_t sdhost_cmd(void *, sda_cmd_t *);
99 static sda_err_t sdhost_getprop(void *, sda_prop_t, uint32_t *);
100 static sda_err_t sdhost_setprop(void *, sda_prop_t, uint32_t);
101 static sda_err_t sdhost_poll(void *);
102 static sda_err_t sdhost_reset(void *);
103 static sda_err_t sdhost_halt(void *);
104 
105 static struct dev_ops sdhost_dev_ops = {
106 	DEVO_REV,			/* devo_rev */
107 	0,				/* devo_refcnt */
108 	ddi_no_info,			/* devo_getinfo */
109 	nulldev,			/* devo_identify */
110 	nulldev,			/* devo_probe */
111 	sdhost_attach,			/* devo_attach */
112 	sdhost_detach,			/* devo_detach */
113 	nodev,				/* devo_reset */
114 	NULL,				/* devo_cb_ops */
115 	NULL,				/* devo_bus_ops */
116 	NULL,				/* devo_power */
117 	sdhost_quiesce,			/* devo_quiesce */
118 };
119 
120 static struct modldrv sdhost_modldrv = {
121 	&mod_driverops,			/* drv_modops */
122 	"Standard SD Host Controller",	/* drv_linkinfo */
123 	&sdhost_dev_ops			/* drv_dev_ops */
124 };
125 
126 static struct modlinkage modlinkage = {
127 	MODREV_1,			/* ml_rev */
128 	{ &sdhost_modldrv, NULL }	/* ml_linkage */
129 };
130 
131 static struct sda_ops sdhost_ops = {
132 	SDA_OPS_VERSION,
133 	sdhost_cmd,			/* so_cmd */
134 	sdhost_getprop,			/* so_getprop */
135 	sdhost_setprop,			/* so_setprop */
136 	sdhost_poll,			/* so_poll */
137 	sdhost_reset,			/* so_reset */
138 	sdhost_halt,			/* so_halt */
139 };
140 
141 static ddi_device_acc_attr_t sdhost_regattr = {
142 	DDI_DEVICE_ATTR_V0,	/* devacc_attr_version */
143 	DDI_STRUCTURE_LE_ACC,	/* devacc_attr_endian_flags */
144 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder */
145 	DDI_DEFAULT_ACC,	/* devacc_attr_access */
146 };
147 
148 #define	GET16(ss, reg)	\
149 	ddi_get16(ss->ss_acch, (void *)(ss->ss_regva + reg))
150 #define	PUT16(ss, reg, val)	\
151 	ddi_put16(ss->ss_acch, (void *)(ss->ss_regva + reg), val)
152 #define	GET32(ss, reg)	\
153 	ddi_get32(ss->ss_acch, (void *)(ss->ss_regva + reg))
154 #define	PUT32(ss, reg, val)	\
155 	ddi_put32(ss->ss_acch, (void *)(ss->ss_regva + reg), val)
156 #define	GET64(ss, reg)	\
157 	ddi_get64(ss->ss_acch, (void *)(ss->ss_regva + reg))
158 
159 #define	GET8(ss, reg)	\
160 	ddi_get8(ss->ss_acch, (void *)(ss->ss_regva + reg))
161 #define	PUT8(ss, reg, val)	\
162 	ddi_put8(ss->ss_acch, (void *)(ss->ss_regva + reg), val)
163 
164 #define	CLR8(ss, reg, mask)	PUT8(ss, reg, GET8(ss, reg) & ~(mask))
165 #define	SET8(ss, reg, mask)	PUT8(ss, reg, GET8(ss, reg) | (mask))
166 
167 /*
168  * If ever anyone uses PIO on SPARC, we have to endian-swap.  But we
169  * think that SD Host Controllers are likely to be uncommon on SPARC,
170  * and hopefully when they exist at all they will be able to use DMA.
171  */
172 #ifdef	_BIG_ENDIAN
173 #define	sw32(x)		ddi_swap32(x)
174 #define	sw16(x)		ddi_swap16(x)
175 #else
176 #define	sw32(x)		(x)
177 #define	sw16(x)		(x)
178 #endif
179 
180 #define	GETDATA32(ss)		sw32(GET32(ss, REG_DATA))
181 #define	GETDATA16(ss)		sw16(GET16(ss, REG_DATA))
182 #define	GETDATA8(ss)		GET8(ss, REG_DATA)
183 
184 #define	PUTDATA32(ss, val)	PUT32(ss, REG_DATA, sw32(val))
185 #define	PUTDATA16(ss, val)	PUT16(ss, REG_DATA, sw16(val))
186 #define	PUTDATA8(ss, val)	PUT8(ss, REG_DATA, val)
187 
188 #define	CHECK_STATE(ss, nm)	\
189 	((GET32(ss, REG_PRS) & PRS_ ## nm) != 0)
190 
191 int
192 _init(void)
193 {
194 	int	rv;
195 
196 	sda_host_init_ops(&sdhost_dev_ops);
197 
198 	if ((rv = mod_install(&modlinkage)) != 0) {
199 		sda_host_fini_ops(&sdhost_dev_ops);
200 	}
201 
202 	return (rv);
203 }
204 
205 int
206 _fini(void)
207 {
208 	int	rv;
209 
210 	if ((rv = mod_remove(&modlinkage)) == 0) {
211 		sda_host_fini_ops(&sdhost_dev_ops);
212 	}
213 	return (rv);
214 }
215 
216 int
217 _info(struct modinfo *modinfop)
218 {
219 	return (mod_info(&modlinkage, modinfop));
220 }
221 
222 int
223 sdhost_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
224 {
225 	sdhost_t		*shp;
226 	ddi_acc_handle_t	pcih;
227 	uint8_t			slotinfo;
228 	uint8_t			bar;
229 	int			i;
230 	int			rv;
231 
232 	switch (cmd) {
233 	case DDI_ATTACH:
234 		break;
235 
236 	case DDI_RESUME:
237 		return (sdhost_resume(dip));
238 
239 	default:
240 		return (DDI_FAILURE);
241 	}
242 
243 	/*
244 	 * Soft state allocation.
245 	 */
246 	shp = kmem_zalloc(sizeof (*shp), KM_SLEEP);
247 	ddi_set_driver_private(dip, shp);
248 
249 	/*
250 	 * Initialize DMA attributes.  For now we initialize as for
251 	 * SDMA.  If we add ADMA support we can improve this.
252 	 */
253 	shp->sh_dmaattr.dma_attr_version = DMA_ATTR_V0;
254 	shp->sh_dmaattr.dma_attr_addr_lo = 0;
255 	shp->sh_dmaattr.dma_attr_addr_hi = 0xffffffffU;
256 	shp->sh_dmaattr.dma_attr_count_max = 0xffffffffU;
257 	shp->sh_dmaattr.dma_attr_align = 1;
258 	shp->sh_dmaattr.dma_attr_burstsizes = 0;	/* for now! */
259 	shp->sh_dmaattr.dma_attr_minxfer = 1;
260 	shp->sh_dmaattr.dma_attr_maxxfer = 0xffffffffU;
261 	shp->sh_dmaattr.dma_attr_sgllen = -1;		/* unlimited! */
262 	shp->sh_dmaattr.dma_attr_seg = 0xfff;		/* 4K segments */
263 	shp->sh_dmaattr.dma_attr_granular = 1;
264 	shp->sh_dmaattr.dma_attr_flags = 0;
265 
266 	/*
267 	 * PCI configuration access to figure out number of slots present.
268 	 */
269 	if (pci_config_setup(dip, &pcih) != DDI_SUCCESS) {
270 		cmn_err(CE_WARN, "pci_config_setup failed");
271 		goto failed;
272 	}
273 
274 	slotinfo = pci_config_get8(pcih, SLOTINFO);
275 	shp->sh_numslots = SLOTINFO_NSLOT(slotinfo);
276 
277 	if (shp->sh_numslots > SDHOST_MAXSLOTS) {
278 		cmn_err(CE_WARN, "Host reports to have too many slots: %d",
279 		    shp->sh_numslots);
280 		goto failed;
281 	}
282 
283 	/*
284 	 * Enable master accesses and DMA.
285 	 */
286 	pci_config_put16(pcih, PCI_CONF_COMM,
287 	    pci_config_get16(pcih, PCI_CONF_COMM) |
288 	    PCI_COMM_MAE | PCI_COMM_ME);
289 
290 	/*
291 	 * Figure out which BAR to use.  Note that we number BARs from
292 	 * 1, although PCI and SD Host numbers from 0.  (We number
293 	 * from 1, because register number 0 means PCI configuration
294 	 * space in Solaris.)
295 	 */
296 	bar = SLOTINFO_BAR(slotinfo) + 1;
297 
298 	pci_config_teardown(&pcih);
299 
300 	/*
301 	 * Setup interrupts ... supports the new DDI interrupt API.  This
302 	 * will support MSI or MSI-X interrupts if a device is found to
303 	 * support it.
304 	 */
305 	if (sdhost_setup_intr(dip, shp) != DDI_SUCCESS) {
306 		cmn_err(CE_WARN, "Failed to setup interrupts");
307 		goto failed;
308 	}
309 
310 	shp->sh_host = sda_host_alloc(dip, shp->sh_numslots, &sdhost_ops,
311 	    &shp->sh_dmaattr);
312 	if (shp->sh_host == NULL) {
313 		cmn_err(CE_WARN, "Failed allocating SD host structure");
314 		goto failed;
315 	}
316 
317 	/*
318 	 * Configure slots, this also maps registers, enables
319 	 * interrupts, etc.  Most of the hardware setup is done here.
320 	 */
321 	for (i = 0; i < shp->sh_numslots; i++) {
322 		if (sdhost_init_slot(dip, shp, i, bar + i) != DDI_SUCCESS) {
323 			cmn_err(CE_WARN, "Failed initializing slot %d", i);
324 			goto failed;
325 		}
326 	}
327 
328 	ddi_report_dev(dip);
329 
330 	/*
331 	 * Enable device interrupts at the DDI layer.
332 	 */
333 	if (shp->sh_icap & DDI_INTR_FLAG_BLOCK) {
334 		rv = ddi_intr_block_enable(&shp->sh_ihandle, 1);
335 	} else {
336 		rv = ddi_intr_enable(shp->sh_ihandle);
337 	}
338 	if (rv != DDI_SUCCESS) {
339 		cmn_err(CE_WARN, "Failed enabling interrupts");
340 		goto failed;
341 	}
342 
343 	/*
344 	 * Mark the slots online with the framework.  This will cause
345 	 * the framework to probe them for the presence of cards.
346 	 */
347 	if (sda_host_attach(shp->sh_host) != DDI_SUCCESS) {
348 		cmn_err(CE_WARN, "Failed attaching to SDA framework");
349 		if (shp->sh_icap & DDI_INTR_FLAG_BLOCK) {
350 			(void) ddi_intr_block_disable(&shp->sh_ihandle, 1);
351 		} else {
352 			(void) ddi_intr_disable(shp->sh_ihandle);
353 		}
354 		goto failed;
355 	}
356 
357 	return (DDI_SUCCESS);
358 
359 failed:
360 	if (shp->sh_ihandle != NULL) {
361 		(void) ddi_intr_remove_handler(shp->sh_ihandle);
362 		(void) ddi_intr_free(shp->sh_ihandle);
363 	}
364 	for (i = 0; i < shp->sh_numslots; i++)
365 		sdhost_uninit_slot(shp, i);
366 	kmem_free(shp, sizeof (*shp));
367 
368 	return (DDI_FAILURE);
369 }
370 
371 int
372 sdhost_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
373 {
374 	sdhost_t	*shp;
375 	int		i;
376 
377 	switch (cmd) {
378 	case DDI_DETACH:
379 		break;
380 
381 	case DDI_SUSPEND:
382 		return (sdhost_suspend(dip));
383 
384 	default:
385 		return (DDI_FAILURE);
386 	}
387 
388 	shp = ddi_get_driver_private(dip);
389 
390 	/*
391 	 * Take host offline with the framework.
392 	 */
393 	sda_host_detach(shp->sh_host);
394 
395 	/*
396 	 * Tear down interrupts.
397 	 */
398 	if (shp->sh_ihandle != NULL) {
399 		if (shp->sh_icap & DDI_INTR_FLAG_BLOCK) {
400 			(void) ddi_intr_block_disable(&shp->sh_ihandle, 1);
401 		} else {
402 			(void) ddi_intr_disable(shp->sh_ihandle);
403 		}
404 		(void) ddi_intr_remove_handler(shp->sh_ihandle);
405 		(void) ddi_intr_free(shp->sh_ihandle);
406 	}
407 
408 	/*
409 	 * Tear down register mappings, etc.
410 	 */
411 	for (i = 0; i < shp->sh_numslots; i++)
412 		sdhost_uninit_slot(shp, i);
413 	kmem_free(shp, sizeof (*shp));
414 
415 	return (DDI_SUCCESS);
416 }
417 
418 int
419 sdhost_quiesce(dev_info_t *dip)
420 {
421 	sdhost_t	*shp;
422 	sdslot_t	*ss;
423 
424 	shp = ddi_get_driver_private(dip);
425 
426 	/* reset each slot separately */
427 	for (int i = 0; i < shp->sh_numslots; i++) {
428 		ss = &shp->sh_slots[i];
429 		if (ss->ss_acch == NULL)
430 			continue;
431 
432 		(void) sdhost_soft_reset(ss, SOFT_RESET_ALL);
433 	}
434 	return (DDI_SUCCESS);
435 }
436 
437 int
438 sdhost_suspend(dev_info_t *dip)
439 {
440 	sdhost_t	*shp;
441 	sdslot_t	*ss;
442 	int		i;
443 
444 	shp = ddi_get_driver_private(dip);
445 
446 	sda_host_suspend(shp->sh_host);
447 
448 	for (i = 0; i < shp->sh_numslots; i++) {
449 		ss = &shp->sh_slots[i];
450 		mutex_enter(&ss->ss_lock);
451 		ss->ss_suspended = B_TRUE;
452 		sdhost_disable_interrupts(ss);
453 		(void) sdhost_soft_reset(ss, SOFT_RESET_ALL);
454 		mutex_exit(&ss->ss_lock);
455 	}
456 	return (DDI_SUCCESS);
457 }
458 
459 int
460 sdhost_resume(dev_info_t *dip)
461 {
462 	sdhost_t	*shp;
463 	sdslot_t	*ss;
464 	int		i;
465 
466 	shp = ddi_get_driver_private(dip);
467 
468 	for (i = 0; i < shp->sh_numslots; i++) {
469 		ss = &shp->sh_slots[i];
470 		mutex_enter(&ss->ss_lock);
471 		ss->ss_suspended = B_FALSE;
472 		(void) sdhost_soft_reset(ss, SOFT_RESET_ALL);
473 		sdhost_enable_interrupts(ss);
474 		mutex_exit(&ss->ss_lock);
475 	}
476 
477 	sda_host_resume(shp->sh_host);
478 
479 	return (DDI_SUCCESS);
480 }
481 
482 sda_err_t
483 sdhost_set_clock(sdslot_t *ss, uint32_t hz)
484 {
485 	uint16_t	div;
486 	uint32_t	val;
487 	uint32_t	clk;
488 	int		count;
489 
490 	/*
491 	 * Shut off the clock to begin.
492 	 */
493 	ss->ss_cardclk = 0;
494 	PUT16(ss, REG_CLOCK_CONTROL, 0);
495 	if (hz == 0) {
496 		return (SDA_EOK);
497 	}
498 
499 	if (ss->ss_baseclk == 0) {
500 		sda_host_log(ss->ss_host, ss->ss_num,
501 		    "Base clock frequency not established.");
502 		return (SDA_EINVAL);
503 	}
504 
505 	if ((hz > 25000000) && ((ss->ss_capab & CAPAB_HIGH_SPEED) != 0)) {
506 		/* this clock requires high speed timings! */
507 		SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN);
508 	} else {
509 		/* don't allow clock to run faster than 25MHz */
510 		hz = min(hz, 25000000);
511 		CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN);
512 	}
513 
514 	/* figure out the divider */
515 	clk = ss->ss_baseclk;
516 	div  = 1;
517 	while (clk > hz) {
518 		if (div > 0x80)
519 			break;
520 		clk >>= 1;	/* divide clock by two */
521 		div <<= 1;	/* divider goes up by one */
522 	}
523 	div >>= 1;	/* 0 == divide by 1, 1 = divide by 2 */
524 
525 	/*
526 	 * Set the internal clock divider first, without enabling the
527 	 * card clock yet.
528 	 */
529 	PUT16(ss, REG_CLOCK_CONTROL,
530 	    (div << CLOCK_CONTROL_FREQ_SHIFT) | CLOCK_CONTROL_INT_CLOCK_EN);
531 
532 	/*
533 	 * Wait up to 100 msec for the internal clock to stabilize.
534 	 * (The spec does not seem to indicate a maximum timeout, but
535 	 * it also suggests that an infinite loop be used, which is
536 	 * not appropriate for hardened Solaris drivers.)
537 	 */
538 	for (count = 100000; count; count -= 10) {
539 
540 		val = GET16(ss, REG_CLOCK_CONTROL);
541 
542 		if (val & CLOCK_CONTROL_INT_CLOCK_STABLE) {
543 			/* if clock is stable, enable the SD clock pin */
544 			PUT16(ss, REG_CLOCK_CONTROL, val |
545 			    CLOCK_CONTROL_SD_CLOCK_EN);
546 
547 			ss->ss_cardclk = clk;
548 			return (SDA_EOK);
549 		}
550 
551 		drv_usecwait(10);
552 	}
553 
554 	return (SDA_ETIME);
555 }
556 
557 sda_err_t
558 sdhost_soft_reset(sdslot_t *ss, uint8_t bits)
559 {
560 	int	count;
561 
562 	/*
563 	 * There appears to be a bug where Ricoh hosts might have a
564 	 * problem if the host frequency is not set.  If the card
565 	 * isn't present, or we are doing a master reset, just enable
566 	 * the internal clock at its native speed.  (No dividers, and
567 	 * not exposed to card.).
568 	 */
569 	if ((bits == SOFT_RESET_ALL) || !(CHECK_STATE(ss, CARD_INSERTED))) {
570 		PUT16(ss, REG_CLOCK_CONTROL, CLOCK_CONTROL_INT_CLOCK_EN);
571 		/* simple 1msec wait, don't wait for clock to stabilize */
572 		drv_usecwait(1000);
573 	}
574 
575 	PUT8(ss, REG_SOFT_RESET, bits);
576 	for (count = 100000; count != 0; count -= 10) {
577 		if ((GET8(ss, REG_SOFT_RESET) & bits) == 0) {
578 			return (SDA_EOK);
579 		}
580 		drv_usecwait(10);
581 	}
582 
583 	return (SDA_ETIME);
584 }
585 
586 void
587 sdhost_disable_interrupts(sdslot_t *ss)
588 {
589 	/* disable slot interrupts for card insert and remove */
590 	PUT16(ss, REG_INT_MASK, 0);
591 	PUT16(ss, REG_INT_EN, 0);
592 
593 	/* disable error interrupts */
594 	PUT16(ss, REG_ERR_MASK, 0);
595 	PUT16(ss, REG_ERR_EN, 0);
596 }
597 
598 void
599 sdhost_enable_interrupts(sdslot_t *ss)
600 {
601 	/*
602 	 * Note that we want to enable reading of the CMD related
603 	 * bits, but we do not want them to generate an interrupt.
604 	 * (The busy wait for typical CMD stuff will normally be less
605 	 * than 10usec, so its simpler/easier to just poll.  Even in
606 	 * the worst case of 100 kHz, the poll is at worst 2 msec.)
607 	 */
608 
609 	/* enable slot interrupts for card insert and remove */
610 	PUT16(ss, REG_INT_MASK, INT_MASK);
611 	PUT16(ss, REG_INT_EN, INT_ENAB);
612 
613 	/* enable error interrupts */
614 	PUT16(ss, REG_ERR_MASK, ERR_MASK);
615 	PUT16(ss, REG_ERR_EN, ERR_ENAB);
616 }
617 
618 int
619 sdhost_setup_intr(dev_info_t *dip, sdhost_t *shp)
620 {
621 	int		itypes;
622 	int		itype;
623 
624 	/*
625 	 * Set up interrupt handler.
626 	 */
627 	if (ddi_intr_get_supported_types(dip, &itypes) != DDI_SUCCESS) {
628 		cmn_err(CE_WARN, "ddi_intr_get_supported_types failed");
629 		return (DDI_FAILURE);
630 	}
631 
632 	/*
633 	 * It turns out that some controllers don't properly implement MSI,
634 	 * but advertise MSI capability in their  PCI config space.
635 	 *
636 	 * While this is really a chip-specific bug, the simplest solution
637 	 * is to just suppress MSI for now by default -- every device seen
638 	 * so far can use FIXED interrupts.
639 	 *
640 	 * We offer an override property, though, just in case someone really
641 	 * wants to force it.
642 	 *
643 	 * We don't do this if the FIXED type isn't supported though!
644 	 */
645 	if ((ddi_prop_get_int(DDI_DEV_T_ANY, dip,
646 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "enable-msi", 0) == 0) &&
647 	    (itypes & DDI_INTR_TYPE_FIXED)) {
648 		itypes &= ~DDI_INTR_TYPE_MSI;
649 	}
650 	if ((ddi_prop_get_int(DDI_DEV_T_ANY, dip,
651 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "enable-msix", 0) == 0) &&
652 	    (itypes & DDI_INTR_TYPE_FIXED)) {
653 		itypes &= ~DDI_INTR_TYPE_MSIX;
654 	}
655 
656 	/*
657 	 * Interrupt types are bits in a mask.  We know about these ones:
658 	 * FIXED = 1
659 	 * MSI = 2
660 	 * MSIX = 4
661 	 */
662 	for (itype = DDI_INTR_TYPE_MSIX; itype != 0; itype >>= 1) {
663 
664 		int			count;
665 
666 		if ((itypes & itype) == 0) {
667 			/* this type is not supported on this device! */
668 			continue;
669 		}
670 
671 		if ((ddi_intr_get_nintrs(dip, itype, &count) != DDI_SUCCESS) ||
672 		    (count == 0)) {
673 			cmn_err(CE_WARN, "ddi_intr_get_nintrs failed");
674 			continue;
675 		}
676 
677 		/*
678 		 * We have not seen a host device with multiple
679 		 * interrupts (one per slot?), and the spec does not
680 		 * indicate that they exist.  But if one ever occurs,
681 		 * we spew a warning to help future debugging/support
682 		 * efforts.
683 		 */
684 		if (count > 1) {
685 			cmn_err(CE_WARN, "Controller offers %d interrupts, "
686 			    "but driver only supports one", count);
687 			continue;
688 		}
689 
690 		if ((ddi_intr_alloc(dip, &shp->sh_ihandle, itype, 0, 1,
691 		    &count, DDI_INTR_ALLOC_NORMAL) != DDI_SUCCESS) ||
692 		    (count != 1)) {
693 			cmn_err(CE_WARN, "ddi_intr_alloc failed");
694 			continue;
695 		}
696 
697 		if (ddi_intr_get_pri(shp->sh_ihandle, &shp->sh_ipri) !=
698 		    DDI_SUCCESS) {
699 			cmn_err(CE_WARN, "ddi_intr_get_pri failed");
700 			(void) ddi_intr_free(shp->sh_ihandle);
701 			shp->sh_ihandle = NULL;
702 			continue;
703 		}
704 
705 		if (shp->sh_ipri >= ddi_intr_get_hilevel_pri()) {
706 			cmn_err(CE_WARN, "Hi level interrupt not supported");
707 			(void) ddi_intr_free(shp->sh_ihandle);
708 			shp->sh_ihandle = NULL;
709 			continue;
710 		}
711 
712 		if (ddi_intr_get_cap(shp->sh_ihandle, &shp->sh_icap) !=
713 		    DDI_SUCCESS) {
714 			cmn_err(CE_WARN, "ddi_intr_get_cap failed");
715 			(void) ddi_intr_free(shp->sh_ihandle);
716 			shp->sh_ihandle = NULL;
717 			continue;
718 		}
719 
720 		if (ddi_intr_add_handler(shp->sh_ihandle, sdhost_intr,
721 		    shp, NULL) != DDI_SUCCESS) {
722 			cmn_err(CE_WARN, "ddi_intr_add_handler failed");
723 			(void) ddi_intr_free(shp->sh_ihandle);
724 			shp->sh_ihandle = NULL;
725 			continue;
726 		}
727 
728 		return (DDI_SUCCESS);
729 	}
730 
731 	return (DDI_FAILURE);
732 }
733 
734 void
735 sdhost_xfer_done(sdslot_t *ss, sda_err_t errno)
736 {
737 	if ((errno == SDA_EOK) && (ss->ss_resid != 0)) {
738 		/* an unexpected partial transfer was found */
739 		errno = SDA_ERESID;
740 	}
741 	ss->ss_blksz = 0;
742 	ss->ss_resid = 0;
743 
744 	if (errno != SDA_EOK) {
745 		(void) sdhost_soft_reset(ss, SOFT_RESET_CMD);
746 		(void) sdhost_soft_reset(ss, SOFT_RESET_DAT);
747 
748 		/* send a STOP command if necessary */
749 		if (ss->ss_mode & XFR_MODE_AUTO_CMD12) {
750 			PUT32(ss, REG_ARGUMENT, 0);
751 			PUT16(ss, REG_COMMAND,
752 			    (CMD_STOP_TRANSMIT << 8) |
753 			    COMMAND_TYPE_NORM | COMMAND_INDEX_CHECK_EN |
754 			    COMMAND_CRC_CHECK_EN | COMMAND_RESP_48_BUSY);
755 		}
756 	}
757 
758 	sda_host_transfer(ss->ss_host, ss->ss_num, errno);
759 }
760 
761 uint_t
762 sdhost_slot_intr(sdslot_t *ss)
763 {
764 	uint16_t	intr;
765 	uint16_t	errs;
766 	uint8_t		*data;
767 	int		count;
768 
769 	mutex_enter(&ss->ss_lock);
770 
771 	if (ss->ss_suspended) {
772 		mutex_exit(&ss->ss_lock);
773 		return (DDI_INTR_UNCLAIMED);
774 	}
775 
776 	intr = GET16(ss, REG_INT_STAT);
777 	if (intr == 0) {
778 		mutex_exit(&ss->ss_lock);
779 		return (DDI_INTR_UNCLAIMED);
780 	}
781 	errs = GET16(ss, REG_ERR_STAT);
782 
783 	if (intr & (INT_REM | INT_INS)) {
784 
785 		PUT16(ss, REG_INT_STAT, intr);
786 		mutex_exit(&ss->ss_lock);
787 
788 		sda_host_detect(ss->ss_host, ss->ss_num);
789 		/* no further interrupt processing this cycle */
790 		return (DDI_INTR_CLAIMED);
791 	}
792 
793 	if (intr & INT_DMA) {
794 		/*
795 		 * We have crossed a DMA/page boundary.  Cope with it.
796 		 */
797 		if (ss->ss_ndmac) {
798 			ss->ss_ndmac--;
799 			ss->ss_dmacs++;
800 			PUT16(ss, REG_INT_STAT, INT_DMA);
801 			PUT32(ss, REG_SDMA_ADDR, ss->ss_dmacs->dmac_address);
802 
803 		} else {
804 			/*
805 			 * Apparently some sdhost controllers issue a
806 			 * final DMA interrupt if the DMA completes on
807 			 * a boundary, even though there is no further
808 			 * data to transfer.
809 			 *
810 			 * There might be a risk here of the
811 			 * controller continuing to access the same
812 			 * data over and over again, but we accept the
813 			 * risk.
814 			 */
815 			PUT16(ss, REG_INT_STAT, INT_DMA);
816 		}
817 	}
818 
819 	if (intr & INT_RD) {
820 		/*
821 		 * PIO read!  PIO is quite suboptimal, but we expect
822 		 * performance critical applications to use DMA
823 		 * whenever possible.  We have to stage this through
824 		 * the bounce buffer to meet alignment considerations.
825 		 */
826 
827 		PUT16(ss, REG_INT_STAT, INT_RD);
828 
829 		while ((ss->ss_resid > 0) && CHECK_STATE(ss, BUF_RD_EN)) {
830 
831 			data = (void *)ss->ss_bounce;
832 			count = ss->ss_blksz;
833 
834 			ASSERT(count > 0);
835 			ASSERT(ss->ss_kvaddr != NULL);
836 
837 			while (count >= sizeof (uint32_t)) {
838 				*(uint32_t *)(void *)data = GETDATA32(ss);
839 				data += sizeof (uint32_t);
840 				count -= sizeof (uint32_t);
841 			}
842 			while (count >= sizeof (uint16_t)) {
843 				*(uint16_t *)(void *)data = GETDATA16(ss);
844 				data += sizeof (uint16_t);
845 				count -= sizeof (uint16_t);
846 			}
847 			while (count >= sizeof (uint8_t)) {
848 				*(uint8_t *)data = GETDATA8(ss);
849 				data += sizeof (uint8_t);
850 				count -= sizeof (uint8_t);
851 			}
852 
853 			bcopy(ss->ss_bounce, ss->ss_kvaddr, ss->ss_blksz);
854 			ss->ss_kvaddr += ss->ss_blksz;
855 			ss->ss_resid--;
856 		}
857 	}
858 
859 	if (intr & INT_WR) {
860 		/*
861 		 * PIO write!  PIO is quite suboptimal, but we expect
862 		 * performance critical applications to use DMA
863 		 * whenever possible.  We have to stage this trhough
864 		 * the bounce buffer to meet alignment considerations.
865 		 */
866 
867 		PUT16(ss, REG_INT_STAT, INT_WR);
868 
869 		while ((ss->ss_resid > 0) && CHECK_STATE(ss, BUF_WR_EN)) {
870 
871 			data = (void *)ss->ss_bounce;
872 			count = ss->ss_blksz;
873 
874 			ASSERT(count > 0);
875 			ASSERT(ss->ss_kvaddr != NULL);
876 
877 			bcopy(ss->ss_kvaddr, data, count);
878 			while (count >= sizeof (uint32_t)) {
879 				PUTDATA32(ss, *(uint32_t *)(void *)data);
880 				data += sizeof (uint32_t);
881 				count -= sizeof (uint32_t);
882 			}
883 			while (count >= sizeof (uint16_t)) {
884 				PUTDATA16(ss, *(uint16_t *)(void *)data);
885 				data += sizeof (uint16_t);
886 				count -= sizeof (uint16_t);
887 			}
888 			while (count >= sizeof (uint8_t)) {
889 				PUTDATA8(ss, *(uint8_t *)data);
890 				data += sizeof (uint8_t);
891 				count -= sizeof (uint8_t);
892 			}
893 
894 			ss->ss_kvaddr += ss->ss_blksz;
895 			ss->ss_resid--;
896 		}
897 	}
898 
899 	if (intr & INT_XFR) {
900 		PUT16(ss, REG_INT_STAT, INT_XFR);
901 
902 		sdhost_xfer_done(ss, SDA_EOK);
903 	}
904 
905 	if (intr & INT_ERR) {
906 		PUT16(ss, REG_ERR_STAT, errs);
907 		PUT16(ss, REG_INT_STAT, INT_ERR);
908 
909 		if (errs & ERR_DAT) {
910 			if ((errs & ERR_DAT_END) == ERR_DAT_END) {
911 				sdhost_xfer_done(ss, SDA_EPROTO);
912 			} else if ((errs & ERR_DAT_CRC) == ERR_DAT_CRC) {
913 				sdhost_xfer_done(ss, SDA_ECRC7);
914 			} else {
915 				sdhost_xfer_done(ss, SDA_ETIME);
916 			}
917 
918 		} else if (errs & ERR_ACMD12) {
919 			/*
920 			 * Generally, this is bad news.  we need a full
921 			 * reset to recover properly.
922 			 */
923 			sdhost_xfer_done(ss, SDA_ECMD12);
924 		}
925 
926 		/*
927 		 * This asynchronous error leaves the slot more or less
928 		 * useless.  Report it to the framework.
929 		 */
930 		if (errs & ERR_CURRENT) {
931 			sda_host_fault(ss->ss_host, ss->ss_num,
932 			    SDA_FAULT_CURRENT);
933 		}
934 	}
935 
936 	mutex_exit(&ss->ss_lock);
937 
938 	return (DDI_INTR_CLAIMED);
939 }
940 
941 /*ARGSUSED1*/
942 uint_t
943 sdhost_intr(caddr_t arg1, caddr_t arg2)
944 {
945 	sdhost_t	*shp = (void *)arg1;
946 	int		rv = DDI_INTR_UNCLAIMED;
947 	int		num;
948 
949 	/* interrupt for each of the slots present in the system */
950 	for (num = 0; num < shp->sh_numslots; num++) {
951 		if (sdhost_slot_intr(&shp->sh_slots[num]) ==
952 		    DDI_INTR_CLAIMED) {
953 			rv = DDI_INTR_CLAIMED;
954 		}
955 	}
956 	return (rv);
957 }
958 
959 int
960 sdhost_init_slot(dev_info_t *dip, sdhost_t *shp, int num, int bar)
961 {
962 	sdslot_t	*ss;
963 	uint32_t	capab;
964 	uint32_t	clk;
965 
966 	/*
967 	 * Register the private state.
968 	 */
969 	ss = &shp->sh_slots[num];
970 	ss->ss_host = shp->sh_host;
971 	ss->ss_num = num;
972 	sda_host_set_private(shp->sh_host, num, ss);
973 
974 	/*
975 	 * Initialize core data structure, locks, etc.
976 	 */
977 	mutex_init(&ss->ss_lock, NULL, MUTEX_DRIVER,
978 	    DDI_INTR_PRI(shp->sh_ipri));
979 
980 	if (ddi_regs_map_setup(dip, bar, &ss->ss_regva, 0, 0, &sdhost_regattr,
981 	    &ss->ss_acch) != DDI_SUCCESS) {
982 		cmn_err(CE_WARN, "Failed to map registers!");
983 		return (DDI_FAILURE);
984 	}
985 
986 	/* reset before reading capabilities */
987 	if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK)
988 		return (DDI_FAILURE);
989 
990 	capab = GET64(ss, REG_CAPAB) & 0xffffffffU; /* upper bits reserved */
991 	ss->ss_capab = capab;
992 
993 	/* host voltages in OCR format */
994 	ss->ss_ocr = 0;
995 	if (capab & CAPAB_18V)
996 		ss->ss_ocr |= OCR_18_19V;	/* 1.8V */
997 	if (capab & CAPAB_30V)
998 		ss->ss_ocr |= OCR_30_31V;
999 	if (capab & CAPAB_33V)
1000 		ss->ss_ocr |= OCR_32_33V;
1001 
1002 	/* base clock */
1003 	ss->ss_baseclk =
1004 	    ((capab & CAPAB_BASE_FREQ_MASK) >> CAPAB_BASE_FREQ_SHIFT);
1005 	ss->ss_baseclk *= 1000000;
1006 
1007 	/*
1008 	 * Timeout clock.  We can calculate this using the following
1009 	 * formula:
1010 	 *
1011 	 * (1000000 usec/1sec) * (1sec/tmoutclk) * base factor = clock time
1012 	 *
1013 	 * Clock time is the length of the base clock in usecs.
1014 	 *
1015 	 * Our base factor is 2^13, which is the shortest clock we
1016 	 * can count.
1017 	 *
1018 	 * To simplify the math and avoid overflow, we cancel out the
1019 	 * zeros for kHz or MHz.  Since we want to wait more clocks, not
1020 	 * less, on error, we truncate the result rather than rounding
1021 	 * up.
1022 	 */
1023 	clk = ((capab & CAPAB_TIMEOUT_FREQ_MASK) >> CAPAB_TIMEOUT_FREQ_SHIFT);
1024 	if ((ss->ss_baseclk == 0) || (clk == 0)) {
1025 		cmn_err(CE_WARN, "Unable to determine clock frequencies");
1026 		return (DDI_FAILURE);
1027 	}
1028 
1029 	if (capab & CAPAB_TIMEOUT_UNITS) {
1030 		/* MHz */
1031 		ss->ss_tmusecs = (1 << 13) / clk;
1032 		clk *= 1000000;
1033 	} else {
1034 		/* kHz */
1035 		ss->ss_tmusecs = (1000 * (1 << 13)) / clk;
1036 		clk *= 1000;
1037 	}
1038 
1039 	/*
1040 	 * Calculation of the timeout.
1041 	 *
1042 	 * SDIO cards use a 1sec timeout, and SDHC cards use fixed
1043 	 * 100msec for read and 250 msec for write.
1044 	 *
1045 	 * Legacy cards running at 375kHz have a worst case of about
1046 	 * 15 seconds.  Running at 25MHz (the standard speed) it is
1047 	 * about 100msec for read, and about 3.2 sec for write.
1048 	 * Typical values are 1/100th that, or about 1msec for read,
1049 	 * and 32 msec for write.
1050 	 *
1051 	 * No transaction at full speed should ever take more than 4
1052 	 * seconds.  (Some slow legacy cards might have trouble, but
1053 	 * we'll worry about them if they ever are seen.  Nobody wants
1054 	 * to wait 4 seconds to access a single block anyway!)
1055 	 *
1056 	 * To get to 4 seconds, we continuously double usec until we
1057 	 * get to the maximum value, or a timeout greater than 4
1058 	 * seconds.
1059 	 *
1060 	 * Note that for high-speed timeout clocks, we might not be
1061 	 * able to get to the full 4 seconds.  E.g. with a 48MHz
1062 	 * timeout clock, we can only get to about 2.8 seconds.  Its
1063 	 * possible that there could be some slow MMC cards that will
1064 	 * timeout at this clock rate, but it seems unlikely.  (The
1065 	 * device would have to be pressing the very worst times,
1066 	 * against the 100-fold "permissive" window allowed, and
1067 	 * running at only 12.5MHz.)
1068 	 *
1069 	 * XXX: this could easily be a tunable.  Someone dealing with only
1070 	 * reasonable cards could set this to just 1 second.
1071 	 */
1072 	for (ss->ss_tmoutclk = 0; ss->ss_tmoutclk < 14; ss->ss_tmoutclk++) {
1073 		if ((ss->ss_tmusecs * (1 << ss->ss_tmoutclk)) >= 4000000) {
1074 			break;
1075 		}
1076 	}
1077 
1078 	/*
1079 	 * Enable slot interrupts.
1080 	 */
1081 	sdhost_enable_interrupts(ss);
1082 
1083 	return (DDI_SUCCESS);
1084 }
1085 
1086 void
1087 sdhost_uninit_slot(sdhost_t *shp, int num)
1088 {
1089 	sdslot_t	*ss;
1090 
1091 	ss = &shp->sh_slots[num];
1092 	if (ss->ss_acch == NULL)
1093 		return;
1094 
1095 	(void) sdhost_soft_reset(ss, SOFT_RESET_ALL);
1096 
1097 	ddi_regs_map_free(&ss->ss_acch);
1098 	mutex_destroy(&ss->ss_lock);
1099 }
1100 
1101 void
1102 sdhost_get_response(sdslot_t *ss, sda_cmd_t *cmdp)
1103 {
1104 	uint32_t	*resp = cmdp->sc_response;
1105 	int		i;
1106 
1107 	resp[0] = GET32(ss, REG_RESP1);
1108 	resp[1] = GET32(ss, REG_RESP2);
1109 	resp[2] = GET32(ss, REG_RESP3);
1110 	resp[3] = GET32(ss, REG_RESP4);
1111 
1112 	/*
1113 	 * Response 2 is goofy because the host drops the low
1114 	 * order CRC bits.  This makes it a bit awkward, so we
1115 	 * have to shift the bits to make it work out right.
1116 	 *
1117 	 * Note that the framework expects the 32 bit
1118 	 * words to be ordered in LE fashion.  (The
1119 	 * bits within the words are in native order).
1120 	 */
1121 	if (cmdp->sc_rtype == R2) {
1122 		for (i = 3; i > 0; i--) {
1123 			resp[i] <<= 8;
1124 			resp[i] |= (resp[i - 1] >> 24);
1125 		}
1126 		resp[0] <<= 8;
1127 	}
1128 }
1129 
1130 sda_err_t
1131 sdhost_wait_cmd(sdslot_t *ss, sda_cmd_t *cmdp)
1132 {
1133 	int		i;
1134 	uint16_t	errs;
1135 	sda_err_t	rv;
1136 
1137 	/*
1138 	 * Worst case for 100kHz timeout is 2msec (200 clocks), we add
1139 	 * a tiny bit for safety.  (Generally timeout will be far, far
1140 	 * less than that.)
1141 	 *
1142 	 * Note that at more typical 12MHz (and normally it will be
1143 	 * even faster than that!) that the device timeout is only
1144 	 * 16.67 usec.  We could be smarter and reduce the delay time,
1145 	 * but that would require putting more intelligence into the
1146 	 * code, and we don't expect CMD timeout to normally occur
1147 	 * except during initialization.  (At which time we need the
1148 	 * full timeout anyway.)
1149 	 *
1150 	 * Checking the ERR_STAT will normally cause the timeout to
1151 	 * terminate to finish early if the device is healthy, anyway.
1152 	 */
1153 
1154 	for (i = 3000; i > 0; i -= 5) {
1155 		if (GET16(ss, REG_INT_STAT) & INT_CMD) {
1156 
1157 			PUT16(ss, REG_INT_STAT, INT_CMD);
1158 
1159 			/* command completed */
1160 			sdhost_get_response(ss, cmdp);
1161 			return (SDA_EOK);
1162 		}
1163 
1164 		if ((errs = (GET16(ss, REG_ERR_STAT) & ERR_CMD)) != 0) {
1165 			PUT16(ss, REG_ERR_STAT, errs);
1166 
1167 			/* command timeout isn't a host failure */
1168 			if ((errs & ERR_CMD_TMO) == ERR_CMD_TMO) {
1169 				rv = SDA_ETIME;
1170 			} else if ((errs & ERR_CMD_CRC) == ERR_CMD_CRC) {
1171 				rv = SDA_ECRC7;
1172 			} else {
1173 				rv = SDA_EPROTO;
1174 			}
1175 			goto error;
1176 		}
1177 
1178 		drv_usecwait(5);
1179 	}
1180 
1181 	rv = SDA_ETIME;
1182 
1183 error:
1184 	/*
1185 	 * NB: We need to soft reset the CMD and DAT
1186 	 * lines after a failure of this sort.
1187 	 */
1188 	(void) sdhost_soft_reset(ss, SOFT_RESET_CMD);
1189 	(void) sdhost_soft_reset(ss, SOFT_RESET_DAT);
1190 
1191 	return (rv);
1192 }
1193 
1194 sda_err_t
1195 sdhost_poll(void *arg)
1196 {
1197 	sdslot_t	*ss = arg;
1198 
1199 	(void) sdhost_slot_intr(ss);
1200 	return (SDA_EOK);
1201 }
1202 
1203 sda_err_t
1204 sdhost_cmd(void *arg, sda_cmd_t *cmdp)
1205 {
1206 	sdslot_t	*ss = arg;
1207 	uint16_t	command;
1208 	uint16_t	mode;
1209 	sda_err_t	rv;
1210 
1211 	/*
1212 	 * Command register:
1213 	 * bit 13-8	= command index
1214 	 * bit 7-6	= command type (always zero for us!)
1215 	 * bit 5	= data present select
1216 	 * bit 4	= command index check (always on!)
1217 	 * bit 3	= command CRC check enable
1218 	 * bit 2	= reserved
1219 	 * bit 1-0	= response type
1220 	 */
1221 
1222 	command = ((uint16_t)cmdp->sc_index << 8);
1223 	command |= COMMAND_TYPE_NORM |
1224 	    COMMAND_INDEX_CHECK_EN | COMMAND_CRC_CHECK_EN;
1225 
1226 	switch (cmdp->sc_rtype) {
1227 	case R0:
1228 		command |= COMMAND_RESP_NONE;
1229 		break;
1230 	case R1:
1231 	case R5:
1232 	case R6:
1233 	case R7:
1234 		command |= COMMAND_RESP_48;
1235 		break;
1236 	case R1b:
1237 	case R5b:
1238 		command |= COMMAND_RESP_48_BUSY;
1239 		break;
1240 	case R2:
1241 		command |= COMMAND_RESP_136;
1242 		command &= ~(COMMAND_INDEX_CHECK_EN | COMMAND_CRC_CHECK_EN);
1243 		break;
1244 	case R3:
1245 	case R4:
1246 		command |= COMMAND_RESP_48;
1247 		command &= ~COMMAND_CRC_CHECK_EN;
1248 		command &= ~COMMAND_INDEX_CHECK_EN;
1249 		break;
1250 	default:
1251 		return (SDA_EINVAL);
1252 	}
1253 
1254 	mutex_enter(&ss->ss_lock);
1255 	if (ss->ss_suspended) {
1256 		mutex_exit(&ss->ss_lock);
1257 		return (SDA_ESUSPENDED);
1258 	}
1259 
1260 	if (cmdp->sc_nblks != 0) {
1261 		uint16_t	blksz;
1262 		uint16_t	nblks;
1263 
1264 		blksz = cmdp->sc_blksz;
1265 		nblks = cmdp->sc_nblks;
1266 
1267 		/*
1268 		 * Ensure that we have good data.
1269 		 */
1270 		if ((blksz < 1) || (blksz > 2048)) {
1271 			mutex_exit(&ss->ss_lock);
1272 			return (SDA_EINVAL);
1273 		}
1274 		command |= COMMAND_DATA_PRESENT;
1275 
1276 		ss->ss_blksz = blksz;
1277 
1278 		/*
1279 		 * Only SDMA for now.  We can investigate ADMA2 later.
1280 		 * (Right now we don't have ADMA2 capable hardware.)
1281 		 */
1282 		if (((ss->ss_capab & CAPAB_SDMA) != 0) &&
1283 		    (cmdp->sc_ndmac != 0)) {
1284 			ddi_dma_cookie_t	*dmacs = cmdp->sc_dmacs;
1285 
1286 			ASSERT(dmacs != NULL);
1287 
1288 			ss->ss_kvaddr = NULL;
1289 			ss->ss_resid = 0;
1290 			ss->ss_dmacs = dmacs;
1291 			ss->ss_ndmac = cmdp->sc_ndmac - 1;
1292 
1293 			PUT32(ss, REG_SDMA_ADDR, dmacs->dmac_address);
1294 			mode = XFR_MODE_DMA_EN;
1295 			PUT16(ss, REG_BLKSZ, blksz);
1296 
1297 		} else {
1298 			ss->ss_kvaddr = (void *)cmdp->sc_kvaddr;
1299 			ss->ss_resid = nblks;
1300 			ss->ss_dmacs = NULL;
1301 			ss->ss_ndmac = 0;
1302 			mode = 0;
1303 			PUT16(ss, REG_BLKSZ, blksz);
1304 		}
1305 
1306 		if (nblks > 1) {
1307 			mode |= XFR_MODE_MULTI | XFR_MODE_COUNT;
1308 			if (cmdp->sc_flags & SDA_CMDF_AUTO_CMD12)
1309 				mode |= XFR_MODE_AUTO_CMD12;
1310 		}
1311 		if ((cmdp->sc_flags & SDA_CMDF_READ) != 0) {
1312 			mode |= XFR_MODE_READ;
1313 		}
1314 
1315 		ss->ss_mode = mode;
1316 
1317 		PUT8(ss, REG_TIMEOUT_CONTROL, ss->ss_tmoutclk);
1318 		PUT16(ss, REG_BLOCK_COUNT, nblks);
1319 		PUT16(ss, REG_XFR_MODE, mode);
1320 	}
1321 
1322 	PUT32(ss, REG_ARGUMENT, cmdp->sc_argument);
1323 	PUT16(ss, REG_COMMAND, command);
1324 
1325 	rv = sdhost_wait_cmd(ss, cmdp);
1326 
1327 	mutex_exit(&ss->ss_lock);
1328 
1329 	return (rv);
1330 }
1331 
1332 sda_err_t
1333 sdhost_getprop(void *arg, sda_prop_t prop, uint32_t *val)
1334 {
1335 	sdslot_t	*ss = arg;
1336 	sda_err_t	rv = 0;
1337 
1338 	mutex_enter(&ss->ss_lock);
1339 
1340 	if (ss->ss_suspended) {
1341 		mutex_exit(&ss->ss_lock);
1342 		return (SDA_ESUSPENDED);
1343 	}
1344 	switch (prop) {
1345 	case SDA_PROP_INSERTED:
1346 		if (CHECK_STATE(ss, CARD_INSERTED)) {
1347 			*val = B_TRUE;
1348 		} else {
1349 			*val = B_FALSE;
1350 		}
1351 		break;
1352 
1353 	case SDA_PROP_WPROTECT:
1354 		if (CHECK_STATE(ss, WRITE_ENABLE)) {
1355 			*val = B_FALSE;
1356 		} else {
1357 			*val = B_TRUE;
1358 		}
1359 		break;
1360 
1361 	case SDA_PROP_OCR:
1362 		*val = ss->ss_ocr;
1363 		break;
1364 
1365 	case SDA_PROP_CLOCK:
1366 		*val = ss->ss_cardclk;
1367 		break;
1368 
1369 	case SDA_PROP_CAP_HISPEED:
1370 		if ((ss->ss_capab & CAPAB_HIGH_SPEED) != 0) {
1371 			*val = B_TRUE;
1372 		} else {
1373 			*val = B_FALSE;
1374 		}
1375 		break;
1376 
1377 	case SDA_PROP_CAP_4BITS:
1378 		*val = B_TRUE;
1379 		break;
1380 
1381 	case SDA_PROP_CAP_NOPIO:
1382 		if ((ss->ss_capab & CAPAB_SDMA) != 0) {
1383 			*val = B_TRUE;
1384 		} else {
1385 			*val = B_FALSE;
1386 		}
1387 		break;
1388 
1389 	case SDA_PROP_CAP_INTR:
1390 	case SDA_PROP_CAP_8BITS:
1391 		*val = B_FALSE;
1392 		break;
1393 
1394 	default:
1395 		rv = SDA_ENOTSUP;
1396 		break;
1397 	}
1398 	mutex_exit(&ss->ss_lock);
1399 
1400 	return (rv);
1401 }
1402 
1403 sda_err_t
1404 sdhost_setprop(void *arg, sda_prop_t prop, uint32_t val)
1405 {
1406 	sdslot_t	*ss = arg;
1407 	sda_err_t	rv = SDA_EOK;
1408 
1409 	mutex_enter(&ss->ss_lock);
1410 
1411 	if (ss->ss_suspended) {
1412 		mutex_exit(&ss->ss_lock);
1413 		return (SDA_ESUSPENDED);
1414 	}
1415 
1416 	switch (prop) {
1417 	case SDA_PROP_LED:
1418 		if (val) {
1419 			SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_LED_ON);
1420 		} else {
1421 			CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_LED_ON);
1422 		}
1423 		break;
1424 
1425 	case SDA_PROP_CLOCK:
1426 		rv = sdhost_set_clock(arg, val);
1427 		break;
1428 
1429 	case SDA_PROP_BUSWIDTH:
1430 		switch (val) {
1431 		case 1:
1432 			CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_DATA_WIDTH);
1433 			break;
1434 		case 4:
1435 			SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_DATA_WIDTH);
1436 			break;
1437 		default:
1438 			rv = SDA_EINVAL;
1439 		}
1440 		break;
1441 
1442 	case SDA_PROP_OCR:
1443 		val &= ss->ss_ocr;
1444 
1445 		if (val & OCR_17_18V) {
1446 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_18V);
1447 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_18V |
1448 			    POWER_CONTROL_BUS_POWER);
1449 		} else if (val & OCR_29_30V) {
1450 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_30V);
1451 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_30V |
1452 			    POWER_CONTROL_BUS_POWER);
1453 		} else if (val & OCR_32_33V) {
1454 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_33V);
1455 			PUT8(ss, REG_POWER_CONTROL, POWER_CONTROL_33V |
1456 			    POWER_CONTROL_BUS_POWER);
1457 		} else if (val == 0) {
1458 			/* turn off power */
1459 			PUT8(ss, REG_POWER_CONTROL, 0);
1460 		} else {
1461 			rv = SDA_EINVAL;
1462 		}
1463 		break;
1464 
1465 	case SDA_PROP_HISPEED:
1466 		if (val) {
1467 			SET8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN);
1468 		} else {
1469 			CLR8(ss, REG_HOST_CONTROL, HOST_CONTROL_HIGH_SPEED_EN);
1470 		}
1471 		/* give clocks time to settle */
1472 		drv_usecwait(10);
1473 		break;
1474 
1475 	default:
1476 		rv = SDA_ENOTSUP;
1477 		break;
1478 	}
1479 
1480 	/*
1481 	 * Apparently some controllers (ENE) have issues with changing
1482 	 * certain parameters (bus width seems to be one), requiring
1483 	 * a reset of the DAT and CMD lines.
1484 	 */
1485 	if (rv == SDA_EOK) {
1486 		(void) sdhost_soft_reset(ss, SOFT_RESET_CMD);
1487 		(void) sdhost_soft_reset(ss, SOFT_RESET_DAT);
1488 	}
1489 	mutex_exit(&ss->ss_lock);
1490 	return (rv);
1491 }
1492 
1493 sda_err_t
1494 sdhost_reset(void *arg)
1495 {
1496 	sdslot_t	*ss = arg;
1497 
1498 	mutex_enter(&ss->ss_lock);
1499 	if (!ss->ss_suspended) {
1500 		if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK) {
1501 			mutex_exit(&ss->ss_lock);
1502 			return (SDA_ETIME);
1503 		}
1504 		sdhost_enable_interrupts(ss);
1505 	}
1506 	mutex_exit(&ss->ss_lock);
1507 	return (SDA_EOK);
1508 }
1509 
1510 sda_err_t
1511 sdhost_halt(void *arg)
1512 {
1513 	sdslot_t	*ss = arg;
1514 
1515 	mutex_enter(&ss->ss_lock);
1516 	if (!ss->ss_suspended) {
1517 		sdhost_disable_interrupts(ss);
1518 		/* this has the side effect of removing power from the card */
1519 		if (sdhost_soft_reset(ss, SOFT_RESET_ALL) != SDA_EOK) {
1520 			mutex_exit(&ss->ss_lock);
1521 			return (SDA_ETIME);
1522 		}
1523 	}
1524 	mutex_exit(&ss->ss_lock);
1525 	return (SDA_EOK);
1526 }
1527