1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2018 Emmanuel Vadot <manu@FreeBSD.org>
5 * Copyright (c) 2013 Alexander Fedorov
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/conf.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/resource.h>
40 #include <sys/rman.h>
41 #include <sys/sysctl.h>
42 #include <sys/queue.h>
43 #include <sys/taskqueue.h>
44
45 #include <machine/bus.h>
46
47 #include <dev/ofw/ofw_bus.h>
48 #include <dev/ofw/ofw_bus_subr.h>
49
50 #include <dev/mmc/bridge.h>
51 #include <dev/mmc/mmcbrvar.h>
52 #include <dev/mmc/mmc_fdt_helpers.h>
53
54 #include <arm/allwinner/aw_mmc.h>
55 #include <dev/clk/clk.h>
56 #include <dev/hwreset/hwreset.h>
57 #include <dev/regulator/regulator.h>
58
59 #include "opt_mmccam.h"
60
61 #ifdef MMCCAM
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/mmc/mmc_sim.h>
68
69 #include "mmc_sim_if.h"
70 #endif
71
72 #include "mmc_pwrseq_if.h"
73
74 #define AW_MMC_MEMRES 0
75 #define AW_MMC_IRQRES 1
76 #define AW_MMC_RESSZ 2
77 #define AW_MMC_DMA_SEGS (PAGE_SIZE / sizeof(struct aw_mmc_dma_desc))
78 #define AW_MMC_DMA_DESC_SIZE (sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS)
79 #define AW_MMC_DMA_FTRGLEVEL 0x20070008
80
81 #define AW_MMC_RESET_RETRY 1000
82
83 #define CARD_ID_FREQUENCY 400000
84
85 struct aw_mmc_conf {
86 uint32_t dma_xferlen;
87 bool mask_data0;
88 bool can_calibrate;
89 bool new_timing;
90 };
91
92 static const struct aw_mmc_conf a10_mmc_conf = {
93 .dma_xferlen = 0x2000,
94 };
95
96 static const struct aw_mmc_conf a13_mmc_conf = {
97 .dma_xferlen = 0x10000,
98 };
99
100 static const struct aw_mmc_conf a64_mmc_conf = {
101 .dma_xferlen = 0x10000,
102 .mask_data0 = true,
103 .can_calibrate = true,
104 .new_timing = true,
105 };
106
107 static const struct aw_mmc_conf a64_emmc_conf = {
108 .dma_xferlen = 0x2000,
109 .can_calibrate = true,
110 };
111
112 static struct ofw_compat_data compat_data[] = {
113 {"allwinner,sun4i-a10-mmc", (uintptr_t)&a10_mmc_conf},
114 {"allwinner,sun5i-a13-mmc", (uintptr_t)&a13_mmc_conf},
115 {"allwinner,sun7i-a20-mmc", (uintptr_t)&a13_mmc_conf},
116 {"allwinner,sun50i-a64-mmc", (uintptr_t)&a64_mmc_conf},
117 {"allwinner,sun50i-a64-emmc", (uintptr_t)&a64_emmc_conf},
118 {NULL, 0}
119 };
120
121 struct aw_mmc_softc {
122 device_t aw_dev;
123 clk_t aw_clk_ahb;
124 clk_t aw_clk_mmc;
125 hwreset_t aw_rst_ahb;
126 int aw_bus_busy;
127 int aw_resid;
128 int aw_timeout;
129 struct callout aw_timeoutc;
130 struct mmc_host aw_host;
131 struct mmc_helper mmc_helper;
132 #ifdef MMCCAM
133 union ccb * ccb;
134 struct mmc_sim mmc_sim;
135 #else
136 struct mmc_request * aw_req;
137 #endif
138 struct mtx aw_mtx;
139 struct resource * aw_res[AW_MMC_RESSZ];
140 struct aw_mmc_conf * aw_mmc_conf;
141 uint32_t aw_intr;
142 uint32_t aw_intr_wait;
143 void * aw_intrhand;
144 unsigned int aw_clock;
145 device_t child;
146
147 /* Fields required for DMA access. */
148 bus_addr_t aw_dma_desc_phys;
149 bus_dmamap_t aw_dma_map;
150 bus_dma_tag_t aw_dma_tag;
151 void * aw_dma_desc;
152 bus_dmamap_t aw_dma_buf_map;
153 bus_dma_tag_t aw_dma_buf_tag;
154 int aw_dma_map_err;
155 };
156
157 static struct resource_spec aw_mmc_res_spec[] = {
158 { SYS_RES_MEMORY, 0, RF_ACTIVE },
159 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
160 { -1, 0, 0 }
161 };
162
163 static int aw_mmc_probe(device_t);
164 static int aw_mmc_attach(device_t);
165 static int aw_mmc_detach(device_t);
166 static int aw_mmc_setup_dma(struct aw_mmc_softc *);
167 static void aw_mmc_teardown_dma(struct aw_mmc_softc *sc);
168 static int aw_mmc_reset(struct aw_mmc_softc *);
169 static int aw_mmc_init(struct aw_mmc_softc *);
170 static void aw_mmc_intr(void *);
171 static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t);
172 static void aw_mmc_helper_cd_handler(device_t, bool);
173
174 static void aw_mmc_print_error(uint32_t);
175 static int aw_mmc_update_ios(device_t, device_t);
176 static int aw_mmc_request(device_t, device_t, struct mmc_request *);
177
178 #ifndef MMCCAM
179 static int aw_mmc_get_ro(device_t, device_t);
180 static int aw_mmc_acquire_host(device_t, device_t);
181 static int aw_mmc_release_host(device_t, device_t);
182 #endif
183
184 #define AW_MMC_LOCK(_sc) mtx_lock(&(_sc)->aw_mtx)
185 #define AW_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->aw_mtx)
186 #define AW_MMC_READ_4(_sc, _reg) \
187 bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg)
188 #define AW_MMC_WRITE_4(_sc, _reg, _value) \
189 bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value)
190
191 SYSCTL_NODE(_hw, OID_AUTO, aw_mmc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
192 "aw_mmc driver");
193
194 static int aw_mmc_debug = 0;
195 SYSCTL_INT(_hw_aw_mmc, OID_AUTO, debug, CTLFLAG_RWTUN, &aw_mmc_debug, 0,
196 "Debug level bit0=card changes bit1=ios changes, bit2=interrupts, bit3=commands");
197 #define AW_MMC_DEBUG_CARD 0x1
198 #define AW_MMC_DEBUG_IOS 0x2
199 #define AW_MMC_DEBUG_INT 0x4
200 #define AW_MMC_DEBUG_CMD 0x8
201
202 #ifdef MMCCAM
203 static int
aw_mmc_get_tran_settings(device_t dev,struct ccb_trans_settings_mmc * cts)204 aw_mmc_get_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
205 {
206 struct aw_mmc_softc *sc;
207
208 sc = device_get_softc(dev);
209
210 cts->host_ocr = sc->aw_host.host_ocr;
211 cts->host_f_min = sc->aw_host.f_min;
212 cts->host_f_max = sc->aw_host.f_max;
213 cts->host_caps = sc->aw_host.caps;
214 cts->host_max_data = (sc->aw_mmc_conf->dma_xferlen *
215 AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE;
216 memcpy(&cts->ios, &sc->aw_host.ios, sizeof(struct mmc_ios));
217
218 return (0);
219 }
220
221 static int
aw_mmc_set_tran_settings(device_t dev,struct ccb_trans_settings_mmc * cts)222 aw_mmc_set_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
223 {
224 struct aw_mmc_softc *sc;
225 struct mmc_ios *ios;
226 struct mmc_ios *new_ios;
227
228 sc = device_get_softc(dev);
229 ios = &sc->aw_host.ios;
230 new_ios = &cts->ios;
231
232 /* Update only requested fields */
233 if (cts->ios_valid & MMC_CLK) {
234 ios->clock = new_ios->clock;
235 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
236 device_printf(sc->aw_dev, "Clock => %d\n", ios->clock);
237 }
238 if (cts->ios_valid & MMC_VDD) {
239 ios->vdd = new_ios->vdd;
240 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
241 device_printf(sc->aw_dev, "VDD => %d\n", ios->vdd);
242 }
243 if (cts->ios_valid & MMC_CS) {
244 ios->chip_select = new_ios->chip_select;
245 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
246 device_printf(sc->aw_dev, "CS => %d\n", ios->chip_select);
247 }
248 if (cts->ios_valid & MMC_BW) {
249 ios->bus_width = new_ios->bus_width;
250 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
251 device_printf(sc->aw_dev, "Bus width => %d\n", ios->bus_width);
252 }
253 if (cts->ios_valid & MMC_PM) {
254 ios->power_mode = new_ios->power_mode;
255 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
256 device_printf(sc->aw_dev, "Power mode => %d\n", ios->power_mode);
257 }
258 if (cts->ios_valid & MMC_BT) {
259 ios->timing = new_ios->timing;
260 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
261 device_printf(sc->aw_dev, "Timing => %d\n", ios->timing);
262 }
263 if (cts->ios_valid & MMC_BM) {
264 ios->bus_mode = new_ios->bus_mode;
265 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
266 device_printf(sc->aw_dev, "Bus mode => %d\n", ios->bus_mode);
267 }
268
269 return (aw_mmc_update_ios(sc->aw_dev, NULL));
270 }
271
272 static int
aw_mmc_cam_request(device_t dev,union ccb * ccb)273 aw_mmc_cam_request(device_t dev, union ccb *ccb)
274 {
275 struct aw_mmc_softc *sc;
276 struct ccb_mmcio *mmcio;
277
278 sc = device_get_softc(dev);
279 mmcio = &ccb->mmcio;
280
281 AW_MMC_LOCK(sc);
282
283 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) {
284 device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
285 mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags,
286 mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0,
287 mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0);
288 }
289 if (mmcio->cmd.data != NULL) {
290 if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0)
291 panic("data->len = %d, data->flags = %d -- something is b0rked",
292 (int)mmcio->cmd.data->len, mmcio->cmd.data->flags);
293 }
294 if (sc->ccb != NULL) {
295 device_printf(sc->aw_dev, "Controller still has an active command\n");
296 return (EBUSY);
297 }
298 sc->ccb = ccb;
299 /* aw_mmc_request locks again */
300 AW_MMC_UNLOCK(sc);
301 aw_mmc_request(sc->aw_dev, NULL, NULL);
302
303 return (0);
304 }
305
306 static void
aw_mmc_cam_poll(device_t dev)307 aw_mmc_cam_poll(device_t dev)
308 {
309 struct aw_mmc_softc *sc;
310
311 sc = device_get_softc(dev);
312 aw_mmc_intr(sc);
313 }
314 #endif /* MMCCAM */
315
316 static void
aw_mmc_helper_cd_handler(device_t dev,bool present)317 aw_mmc_helper_cd_handler(device_t dev, bool present)
318 {
319 struct aw_mmc_softc *sc;
320
321 sc = device_get_softc(dev);
322 #ifdef MMCCAM
323 mmc_cam_sim_discover(&sc->mmc_sim);
324 #else
325 bus_topo_lock();
326 if (present) {
327 if (sc->child == NULL) {
328 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD))
329 device_printf(sc->aw_dev, "Card inserted\n");
330
331 sc->child = device_add_child(sc->aw_dev, "mmc", DEVICE_UNIT_ANY);
332 if (sc->child) {
333 device_set_ivars(sc->child, sc);
334 (void)device_probe_and_attach(sc->child);
335 }
336 }
337 } else {
338 /* Card isn't present, detach if necessary */
339 if (sc->child != NULL) {
340 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD))
341 device_printf(sc->aw_dev, "Card removed\n");
342
343 device_delete_child(sc->aw_dev, sc->child);
344 sc->child = NULL;
345 }
346 }
347 bus_topo_unlock();
348 #endif /* MMCCAM */
349 }
350
351 static int
aw_mmc_probe(device_t dev)352 aw_mmc_probe(device_t dev)
353 {
354
355 if (!ofw_bus_status_okay(dev))
356 return (ENXIO);
357 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
358 return (ENXIO);
359
360 device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
361
362 return (BUS_PROBE_DEFAULT);
363 }
364
365 static int
aw_mmc_attach(device_t dev)366 aw_mmc_attach(device_t dev)
367 {
368 struct aw_mmc_softc *sc;
369 struct sysctl_ctx_list *ctx;
370 struct sysctl_oid_list *tree;
371 int error;
372
373 sc = device_get_softc(dev);
374 sc->aw_dev = dev;
375
376 sc->aw_mmc_conf = (struct aw_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data;
377
378 #ifndef MMCCAM
379 sc->aw_req = NULL;
380 #endif
381 if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) {
382 device_printf(dev, "cannot allocate device resources\n");
383 return (ENXIO);
384 }
385 if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES],
386 INTR_TYPE_NET | INTR_MPSAFE, NULL, aw_mmc_intr, sc,
387 &sc->aw_intrhand)) {
388 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
389 device_printf(dev, "cannot setup interrupt handler\n");
390 return (ENXIO);
391 }
392 mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc",
393 MTX_DEF);
394 callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0);
395
396 /* De-assert reset */
397 if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) {
398 error = hwreset_deassert(sc->aw_rst_ahb);
399 if (error != 0) {
400 device_printf(dev, "cannot de-assert reset\n");
401 goto fail;
402 }
403 }
404
405 /* Activate the module clock. */
406 error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb);
407 if (error != 0) {
408 device_printf(dev, "cannot get ahb clock\n");
409 goto fail;
410 }
411 error = clk_enable(sc->aw_clk_ahb);
412 if (error != 0) {
413 device_printf(dev, "cannot enable ahb clock\n");
414 goto fail;
415 }
416 error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc);
417 if (error != 0) {
418 device_printf(dev, "cannot get mmc clock\n");
419 goto fail;
420 }
421 error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY,
422 CLK_SET_ROUND_DOWN);
423 if (error != 0) {
424 device_printf(dev, "cannot init mmc clock\n");
425 goto fail;
426 }
427 error = clk_enable(sc->aw_clk_mmc);
428 if (error != 0) {
429 device_printf(dev, "cannot enable mmc clock\n");
430 goto fail;
431 }
432
433 sc->aw_timeout = 10;
434 ctx = device_get_sysctl_ctx(dev);
435 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
436 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
437 &sc->aw_timeout, 0, "Request timeout in seconds");
438
439 /* Soft Reset controller. */
440 if (aw_mmc_reset(sc) != 0) {
441 device_printf(dev, "cannot reset the controller\n");
442 goto fail;
443 }
444
445 if (aw_mmc_setup_dma(sc) != 0) {
446 device_printf(sc->aw_dev, "Couldn't setup DMA!\n");
447 goto fail;
448 }
449
450 /* Set some defaults for freq and supported mode */
451 sc->aw_host.f_min = 400000;
452 sc->aw_host.f_max = 52000000;
453 sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
454 sc->aw_host.caps |= MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330;
455 mmc_fdt_parse(dev, 0, &sc->mmc_helper, &sc->aw_host);
456 mmc_fdt_gpio_setup(dev, 0, &sc->mmc_helper, aw_mmc_helper_cd_handler);
457
458 #ifdef MMCCAM
459 sc->ccb = NULL;
460
461 if (mmc_cam_sim_alloc(dev, "aw_mmc", &sc->mmc_sim) != 0) {
462 device_printf(dev, "cannot alloc cam sim\n");
463 goto fail;
464 }
465 #endif /* MMCCAM */
466
467 return (0);
468
469 fail:
470 callout_drain(&sc->aw_timeoutc);
471 mtx_destroy(&sc->aw_mtx);
472 bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
473 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
474
475 return (ENXIO);
476 }
477
478 static int
aw_mmc_detach(device_t dev)479 aw_mmc_detach(device_t dev)
480 {
481 struct aw_mmc_softc *sc;
482
483 sc = device_get_softc(dev);
484
485 clk_disable(sc->aw_clk_mmc);
486 clk_disable(sc->aw_clk_ahb);
487 hwreset_assert(sc->aw_rst_ahb);
488
489 mmc_fdt_gpio_teardown(&sc->mmc_helper);
490
491 callout_drain(&sc->aw_timeoutc);
492
493 device_delete_children(sc->aw_dev);
494
495 aw_mmc_teardown_dma(sc);
496
497 mtx_destroy(&sc->aw_mtx);
498
499 bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
500 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
501
502 #ifdef MMCCAM
503 mmc_cam_sim_free(&sc->mmc_sim);
504 #endif
505
506 return (0);
507 }
508
509 static void
aw_dma_desc_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int err)510 aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
511 {
512 struct aw_mmc_softc *sc;
513
514 sc = (struct aw_mmc_softc *)arg;
515 if (err) {
516 sc->aw_dma_map_err = err;
517 return;
518 }
519 sc->aw_dma_desc_phys = segs[0].ds_addr;
520 }
521
522 static int
aw_mmc_setup_dma(struct aw_mmc_softc * sc)523 aw_mmc_setup_dma(struct aw_mmc_softc *sc)
524 {
525 int error;
526
527 /* Allocate the DMA descriptor memory. */
528 error = bus_dma_tag_create(
529 bus_get_dma_tag(sc->aw_dev), /* parent */
530 AW_MMC_DMA_ALIGN, 0, /* align, boundary */
531 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
532 BUS_SPACE_MAXADDR, /* highaddr */
533 NULL, NULL, /* filter, filterarg*/
534 AW_MMC_DMA_DESC_SIZE, 1, /* maxsize, nsegment */
535 AW_MMC_DMA_DESC_SIZE, /* maxsegsize */
536 0, /* flags */
537 NULL, NULL, /* lock, lockarg*/
538 &sc->aw_dma_tag);
539 if (error)
540 return (error);
541
542 error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc,
543 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
544 &sc->aw_dma_map);
545 if (error)
546 return (error);
547
548 error = bus_dmamap_load(sc->aw_dma_tag,
549 sc->aw_dma_map,
550 sc->aw_dma_desc, AW_MMC_DMA_DESC_SIZE,
551 aw_dma_desc_cb, sc, 0);
552 if (error)
553 return (error);
554 if (sc->aw_dma_map_err)
555 return (sc->aw_dma_map_err);
556
557 /* Create the DMA map for data transfers. */
558 error = bus_dma_tag_create(
559 bus_get_dma_tag(sc->aw_dev), /* parent */
560 AW_MMC_DMA_ALIGN, 0, /* align, boundary */
561 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
562 BUS_SPACE_MAXADDR, /* highaddr */
563 NULL, NULL, /* filter, filterarg*/
564 sc->aw_mmc_conf->dma_xferlen *
565 AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS, /* maxsize, nsegments */
566 sc->aw_mmc_conf->dma_xferlen, /* maxsegsize */
567 BUS_DMA_ALLOCNOW, /* flags */
568 NULL, NULL, /* lock, lockarg*/
569 &sc->aw_dma_buf_tag);
570 if (error)
571 return (error);
572 error = bus_dmamap_create(sc->aw_dma_buf_tag, 0,
573 &sc->aw_dma_buf_map);
574 if (error)
575 return (error);
576
577 return (0);
578 }
579
580 static void
aw_mmc_teardown_dma(struct aw_mmc_softc * sc)581 aw_mmc_teardown_dma(struct aw_mmc_softc *sc)
582 {
583
584 bus_dmamap_unload(sc->aw_dma_tag, sc->aw_dma_map);
585 bus_dmamem_free(sc->aw_dma_tag, sc->aw_dma_desc, sc->aw_dma_map);
586 if (bus_dma_tag_destroy(sc->aw_dma_tag) != 0)
587 device_printf(sc->aw_dev, "Cannot destroy the dma tag\n");
588
589 bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
590 bus_dmamap_destroy(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
591 if (bus_dma_tag_destroy(sc->aw_dma_buf_tag) != 0)
592 device_printf(sc->aw_dev, "Cannot destroy the dma buf tag\n");
593 }
594
595 static void
aw_dma_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int err)596 aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
597 {
598 int i;
599 struct aw_mmc_dma_desc *dma_desc;
600 struct aw_mmc_softc *sc;
601
602 sc = (struct aw_mmc_softc *)arg;
603 sc->aw_dma_map_err = err;
604
605 if (err)
606 return;
607
608 dma_desc = sc->aw_dma_desc;
609 for (i = 0; i < nsegs; i++) {
610 if (segs[i].ds_len == sc->aw_mmc_conf->dma_xferlen)
611 dma_desc[i].buf_size = 0; /* Size of 0 indicate max len */
612 else
613 dma_desc[i].buf_size = segs[i].ds_len;
614 dma_desc[i].buf_addr = segs[i].ds_addr;
615 dma_desc[i].config = AW_MMC_DMA_CONFIG_CH |
616 AW_MMC_DMA_CONFIG_OWN | AW_MMC_DMA_CONFIG_DIC;
617
618 dma_desc[i].next = sc->aw_dma_desc_phys +
619 ((i + 1) * sizeof(struct aw_mmc_dma_desc));
620 }
621
622 dma_desc[0].config |= AW_MMC_DMA_CONFIG_FD;
623 dma_desc[nsegs - 1].config |= AW_MMC_DMA_CONFIG_LD |
624 AW_MMC_DMA_CONFIG_ER;
625 dma_desc[nsegs - 1].config &= ~AW_MMC_DMA_CONFIG_DIC;
626 dma_desc[nsegs - 1].next = 0;
627 }
628
629 static int
aw_mmc_prepare_dma(struct aw_mmc_softc * sc)630 aw_mmc_prepare_dma(struct aw_mmc_softc *sc)
631 {
632 bus_dmasync_op_t sync_op;
633 int error;
634 struct mmc_command *cmd;
635 uint32_t val;
636
637 #ifdef MMCCAM
638 cmd = &sc->ccb->mmcio.cmd;
639 #else
640 cmd = sc->aw_req->cmd;
641 #endif
642 if (cmd->data->len > (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS))
643 return (EFBIG);
644 error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
645 cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0);
646 if (error)
647 return (error);
648 if (sc->aw_dma_map_err)
649 return (sc->aw_dma_map_err);
650
651 if (cmd->data->flags & MMC_DATA_WRITE)
652 sync_op = BUS_DMASYNC_PREWRITE;
653 else
654 sync_op = BUS_DMASYNC_PREREAD;
655 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op);
656 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE);
657
658 /* Enable DMA */
659 val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
660 val &= ~AW_MMC_GCTL_FIFO_AC_MOD;
661 val |= AW_MMC_GCTL_DMA_ENB;
662 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
663
664 /* Reset DMA */
665 val |= AW_MMC_GCTL_DMA_RST;
666 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
667
668 AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST);
669 AW_MMC_WRITE_4(sc, AW_MMC_DMAC,
670 AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST);
671
672 /* Enable RX or TX DMA interrupt */
673 val = AW_MMC_READ_4(sc, AW_MMC_IDIE);
674 if (cmd->data->flags & MMC_DATA_WRITE)
675 val |= AW_MMC_IDST_TX_INT;
676 else
677 val |= AW_MMC_IDST_RX_INT;
678 AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val);
679
680 /* Set DMA descritptor list address */
681 AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys);
682
683 /* FIFO trigger level */
684 AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL);
685
686 return (0);
687 }
688
689 static int
aw_mmc_reset(struct aw_mmc_softc * sc)690 aw_mmc_reset(struct aw_mmc_softc *sc)
691 {
692 uint32_t reg;
693 int timeout;
694
695 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
696 reg |= AW_MMC_GCTL_RESET;
697 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
698 timeout = AW_MMC_RESET_RETRY;
699 while (--timeout > 0) {
700 if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_GCTL_RESET) == 0)
701 break;
702 DELAY(100);
703 }
704 if (timeout == 0)
705 return (ETIMEDOUT);
706
707 return (0);
708 }
709
710 static int
aw_mmc_init(struct aw_mmc_softc * sc)711 aw_mmc_init(struct aw_mmc_softc *sc)
712 {
713 uint32_t reg;
714 int ret;
715
716 ret = aw_mmc_reset(sc);
717 if (ret != 0)
718 return (ret);
719
720 /* Set the timeout. */
721 AW_MMC_WRITE_4(sc, AW_MMC_TMOR,
722 AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) |
723 AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK));
724
725 /* Unmask interrupts. */
726 AW_MMC_WRITE_4(sc, AW_MMC_IMKR, 0);
727
728 /* Clear pending interrupts. */
729 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
730
731 /* Debug register, undocumented */
732 AW_MMC_WRITE_4(sc, AW_MMC_DBGC, 0xdeb);
733
734 /* Function select register */
735 AW_MMC_WRITE_4(sc, AW_MMC_FUNS, 0xceaa0000);
736
737 AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff);
738
739 /* Enable interrupts and disable AHB access. */
740 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
741 reg |= AW_MMC_GCTL_INT_ENB;
742 reg &= ~AW_MMC_GCTL_FIFO_AC_MOD;
743 reg &= ~AW_MMC_GCTL_WAIT_MEM_ACCESS;
744 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
745
746 return (0);
747 }
748
749 static void
aw_mmc_req_done(struct aw_mmc_softc * sc)750 aw_mmc_req_done(struct aw_mmc_softc *sc)
751 {
752 struct mmc_command *cmd;
753 #ifdef MMCCAM
754 union ccb *ccb;
755 #else
756 struct mmc_request *req;
757 #endif
758 uint32_t val, mask;
759 int retry;
760
761 #ifdef MMCCAM
762 ccb = sc->ccb;
763 cmd = &ccb->mmcio.cmd;
764 #else
765 cmd = sc->aw_req->cmd;
766 #endif
767 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) {
768 device_printf(sc->aw_dev, "%s: cmd %d err %d\n", __func__, cmd->opcode, cmd->error);
769 }
770 if (cmd->error != MMC_ERR_NONE) {
771 /* Reset the FIFO and DMA engines. */
772 mask = AW_MMC_GCTL_FIFO_RST | AW_MMC_GCTL_DMA_RST;
773 val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
774 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask);
775
776 retry = AW_MMC_RESET_RETRY;
777 while (--retry > 0) {
778 if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) &
779 AW_MMC_GCTL_RESET) == 0)
780 break;
781 DELAY(100);
782 }
783 if (retry == 0)
784 device_printf(sc->aw_dev,
785 "timeout resetting DMA/FIFO\n");
786 aw_mmc_update_clock(sc, 1);
787 }
788
789 if (!dumping)
790 callout_stop(&sc->aw_timeoutc);
791 sc->aw_intr = 0;
792 sc->aw_resid = 0;
793 sc->aw_dma_map_err = 0;
794 sc->aw_intr_wait = 0;
795 #ifdef MMCCAM
796 sc->ccb = NULL;
797 ccb->ccb_h.status =
798 (ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR);
799 xpt_done(ccb);
800 #else
801 req = sc->aw_req;
802 sc->aw_req = NULL;
803 req->done(req);
804 #endif
805 }
806
807 static void
aw_mmc_req_ok(struct aw_mmc_softc * sc)808 aw_mmc_req_ok(struct aw_mmc_softc *sc)
809 {
810 int timeout;
811 struct mmc_command *cmd;
812 uint32_t status;
813
814 timeout = 1000;
815 while (--timeout > 0) {
816 status = AW_MMC_READ_4(sc, AW_MMC_STAR);
817 if ((status & AW_MMC_STAR_CARD_BUSY) == 0)
818 break;
819 DELAY(1000);
820 }
821 #ifdef MMCCAM
822 cmd = &sc->ccb->mmcio.cmd;
823 #else
824 cmd = sc->aw_req->cmd;
825 #endif
826 if (timeout == 0) {
827 cmd->error = MMC_ERR_FAILED;
828 aw_mmc_req_done(sc);
829 return;
830 }
831 if (cmd->flags & MMC_RSP_PRESENT) {
832 if (cmd->flags & MMC_RSP_136) {
833 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3);
834 cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2);
835 cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1);
836 cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
837 } else
838 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
839 }
840 /* All data has been transferred ? */
841 if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len)
842 cmd->error = MMC_ERR_FAILED;
843 aw_mmc_req_done(sc);
844 }
845
846 static inline void
set_mmc_error(struct aw_mmc_softc * sc,int error_code)847 set_mmc_error(struct aw_mmc_softc *sc, int error_code)
848 {
849 #ifdef MMCCAM
850 sc->ccb->mmcio.cmd.error = error_code;
851 #else
852 sc->aw_req->cmd->error = error_code;
853 #endif
854 }
855
856 static void
aw_mmc_timeout(void * arg)857 aw_mmc_timeout(void *arg)
858 {
859 struct aw_mmc_softc *sc;
860
861 sc = (struct aw_mmc_softc *)arg;
862 #ifdef MMCCAM
863 if (sc->ccb != NULL) {
864 #else
865 if (sc->aw_req != NULL) {
866 #endif
867 device_printf(sc->aw_dev, "controller timeout\n");
868 set_mmc_error(sc, MMC_ERR_TIMEOUT);
869 aw_mmc_req_done(sc);
870 } else
871 device_printf(sc->aw_dev,
872 "Spurious timeout - no active request\n");
873 }
874
875 static void
876 aw_mmc_print_error(uint32_t err)
877 {
878 if(err & AW_MMC_INT_RESP_ERR)
879 printf("AW_MMC_INT_RESP_ERR ");
880 if (err & AW_MMC_INT_RESP_CRC_ERR)
881 printf("AW_MMC_INT_RESP_CRC_ERR ");
882 if (err & AW_MMC_INT_DATA_CRC_ERR)
883 printf("AW_MMC_INT_DATA_CRC_ERR ");
884 if (err & AW_MMC_INT_RESP_TIMEOUT)
885 printf("AW_MMC_INT_RESP_TIMEOUT ");
886 if (err & AW_MMC_INT_FIFO_RUN_ERR)
887 printf("AW_MMC_INT_FIFO_RUN_ERR ");
888 if (err & AW_MMC_INT_CMD_BUSY)
889 printf("AW_MMC_INT_CMD_BUSY ");
890 if (err & AW_MMC_INT_DATA_START_ERR)
891 printf("AW_MMC_INT_DATA_START_ERR ");
892 if (err & AW_MMC_INT_DATA_END_BIT_ERR)
893 printf("AW_MMC_INT_DATA_END_BIT_ERR");
894 printf("\n");
895 }
896
897 static void
898 aw_mmc_intr(void *arg)
899 {
900 bus_dmasync_op_t sync_op;
901 struct aw_mmc_softc *sc;
902 struct mmc_data *data;
903 uint32_t idst, imask, rint;
904
905 sc = (struct aw_mmc_softc *)arg;
906 AW_MMC_LOCK(sc);
907 rint = AW_MMC_READ_4(sc, AW_MMC_RISR);
908 idst = AW_MMC_READ_4(sc, AW_MMC_IDST);
909 imask = AW_MMC_READ_4(sc, AW_MMC_IMKR);
910 if (idst == 0 && imask == 0 && rint == 0) {
911 AW_MMC_UNLOCK(sc);
912 return;
913 }
914 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) {
915 device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n",
916 idst, imask, rint);
917 }
918 #ifdef MMCCAM
919 if (sc->ccb == NULL) {
920 #else
921 if (sc->aw_req == NULL) {
922 #endif
923 device_printf(sc->aw_dev,
924 "Spurious interrupt - no active request, rint: 0x%08X\n",
925 rint);
926 aw_mmc_print_error(rint);
927 goto end;
928 }
929 if (rint & AW_MMC_INT_ERR_BIT) {
930 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) {
931 device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint);
932 aw_mmc_print_error(rint);
933 }
934 if (rint & AW_MMC_INT_RESP_TIMEOUT)
935 set_mmc_error(sc, MMC_ERR_TIMEOUT);
936 else
937 set_mmc_error(sc, MMC_ERR_FAILED);
938 aw_mmc_req_done(sc);
939 goto end;
940 }
941 if (idst & AW_MMC_IDST_ERROR) {
942 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT))
943 device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst);
944 set_mmc_error(sc, MMC_ERR_FAILED);
945 aw_mmc_req_done(sc);
946 goto end;
947 }
948
949 sc->aw_intr |= rint;
950 #ifdef MMCCAM
951 data = sc->ccb->mmcio.cmd.data;
952 #else
953 data = sc->aw_req->cmd->data;
954 #endif
955 if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) {
956 if (data->flags & MMC_DATA_WRITE)
957 sync_op = BUS_DMASYNC_POSTWRITE;
958 else
959 sync_op = BUS_DMASYNC_POSTREAD;
960 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
961 sync_op);
962 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map,
963 BUS_DMASYNC_POSTWRITE);
964 bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
965 sc->aw_resid = data->len >> 2;
966 }
967 if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait)
968 aw_mmc_req_ok(sc);
969
970 end:
971 AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst);
972 AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint);
973 AW_MMC_UNLOCK(sc);
974 }
975
976 static int
977 aw_mmc_request(device_t bus, device_t child, struct mmc_request *req)
978 {
979 int blksz;
980 struct aw_mmc_softc *sc;
981 struct mmc_command *cmd;
982 uint32_t cmdreg, imask;
983 int err;
984
985 sc = device_get_softc(bus);
986
987 AW_MMC_LOCK(sc);
988 #ifdef MMCCAM
989 KASSERT(req == NULL, ("req should be NULL in MMCCAM case!"));
990 /*
991 * For MMCCAM, sc->ccb has been NULL-checked and populated
992 * by aw_mmc_cam_request() already.
993 */
994 cmd = &sc->ccb->mmcio.cmd;
995 #else
996 if (sc->aw_req) {
997 AW_MMC_UNLOCK(sc);
998 return (EBUSY);
999 }
1000 sc->aw_req = req;
1001 cmd = req->cmd;
1002
1003 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) {
1004 device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
1005 cmd->opcode, cmd->arg, cmd->flags,
1006 cmd->data != NULL ? (unsigned int)cmd->data->len : 0,
1007 cmd->data != NULL ? cmd->data->flags: 0);
1008 }
1009 #endif
1010 cmdreg = AW_MMC_CMDR_LOAD;
1011 imask = AW_MMC_INT_ERR_BIT;
1012 sc->aw_intr_wait = 0;
1013 sc->aw_intr = 0;
1014 sc->aw_resid = 0;
1015 cmd->error = MMC_ERR_NONE;
1016
1017 if (cmd->opcode == MMC_GO_IDLE_STATE)
1018 cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ;
1019
1020 if (cmd->flags & MMC_RSP_PRESENT)
1021 cmdreg |= AW_MMC_CMDR_RESP_RCV;
1022 if (cmd->flags & MMC_RSP_136)
1023 cmdreg |= AW_MMC_CMDR_LONG_RESP;
1024 if (cmd->flags & MMC_RSP_CRC)
1025 cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC;
1026
1027 if (cmd->data) {
1028 cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER;
1029
1030 if (cmd->data->flags & MMC_DATA_MULTI) {
1031 cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG;
1032 imask |= AW_MMC_INT_AUTO_STOP_DONE;
1033 sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE;
1034 } else {
1035 sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER;
1036 imask |= AW_MMC_INT_DATA_OVER;
1037 }
1038 if (cmd->data->flags & MMC_DATA_WRITE)
1039 cmdreg |= AW_MMC_CMDR_DIR_WRITE;
1040 #ifdef MMCCAM
1041 if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) {
1042 AW_MMC_WRITE_4(sc, AW_MMC_BKSR, cmd->data->block_size);
1043 AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
1044 } else
1045 #endif
1046 {
1047 blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
1048 AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz);
1049 AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
1050 }
1051 } else {
1052 imask |= AW_MMC_INT_CMD_DONE;
1053 }
1054
1055 /* Enable the interrupts we are interested in */
1056 AW_MMC_WRITE_4(sc, AW_MMC_IMKR, imask);
1057 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
1058
1059 /* Enable auto stop if needed */
1060 AW_MMC_WRITE_4(sc, AW_MMC_A12A,
1061 cmdreg & AW_MMC_CMDR_STOP_CMD_FLAG ? 0 : 0xffff);
1062
1063 /* Write the command argument */
1064 AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg);
1065
1066 /*
1067 * If we don't have data start the request
1068 * if we do prepare the dma request and start the request
1069 */
1070 if (cmd->data == NULL) {
1071 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
1072 } else {
1073 err = aw_mmc_prepare_dma(sc);
1074 if (err != 0)
1075 device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err);
1076
1077 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
1078 }
1079
1080 if (!dumping) {
1081 callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz,
1082 aw_mmc_timeout, sc);
1083 }
1084 AW_MMC_UNLOCK(sc);
1085
1086 return (0);
1087 }
1088
1089 static int
1090 aw_mmc_read_ivar(device_t bus, device_t child, int which,
1091 uintptr_t *result)
1092 {
1093 struct aw_mmc_softc *sc;
1094
1095 sc = device_get_softc(bus);
1096 switch (which) {
1097 default:
1098 return (EINVAL);
1099 case MMCBR_IVAR_BUS_MODE:
1100 *(int *)result = sc->aw_host.ios.bus_mode;
1101 break;
1102 case MMCBR_IVAR_BUS_WIDTH:
1103 *(int *)result = sc->aw_host.ios.bus_width;
1104 break;
1105 case MMCBR_IVAR_CHIP_SELECT:
1106 *(int *)result = sc->aw_host.ios.chip_select;
1107 break;
1108 case MMCBR_IVAR_CLOCK:
1109 *(int *)result = sc->aw_host.ios.clock;
1110 break;
1111 case MMCBR_IVAR_F_MIN:
1112 *(int *)result = sc->aw_host.f_min;
1113 break;
1114 case MMCBR_IVAR_F_MAX:
1115 *(int *)result = sc->aw_host.f_max;
1116 break;
1117 case MMCBR_IVAR_HOST_OCR:
1118 *(int *)result = sc->aw_host.host_ocr;
1119 break;
1120 case MMCBR_IVAR_MODE:
1121 *(int *)result = sc->aw_host.mode;
1122 break;
1123 case MMCBR_IVAR_OCR:
1124 *(int *)result = sc->aw_host.ocr;
1125 break;
1126 case MMCBR_IVAR_POWER_MODE:
1127 *(int *)result = sc->aw_host.ios.power_mode;
1128 break;
1129 case MMCBR_IVAR_VDD:
1130 *(int *)result = sc->aw_host.ios.vdd;
1131 break;
1132 case MMCBR_IVAR_VCCQ:
1133 *(int *)result = sc->aw_host.ios.vccq;
1134 break;
1135 case MMCBR_IVAR_CAPS:
1136 *(int *)result = sc->aw_host.caps;
1137 break;
1138 case MMCBR_IVAR_TIMING:
1139 *(int *)result = sc->aw_host.ios.timing;
1140 break;
1141 case MMCBR_IVAR_MAX_DATA:
1142 *(int *)result = (sc->aw_mmc_conf->dma_xferlen *
1143 AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE;
1144 break;
1145 case MMCBR_IVAR_RETUNE_REQ:
1146 *(int *)result = retune_req_none;
1147 break;
1148 }
1149
1150 return (0);
1151 }
1152
1153 static int
1154 aw_mmc_write_ivar(device_t bus, device_t child, int which,
1155 uintptr_t value)
1156 {
1157 struct aw_mmc_softc *sc;
1158
1159 sc = device_get_softc(bus);
1160 switch (which) {
1161 default:
1162 return (EINVAL);
1163 case MMCBR_IVAR_BUS_MODE:
1164 sc->aw_host.ios.bus_mode = value;
1165 break;
1166 case MMCBR_IVAR_BUS_WIDTH:
1167 sc->aw_host.ios.bus_width = value;
1168 break;
1169 case MMCBR_IVAR_CHIP_SELECT:
1170 sc->aw_host.ios.chip_select = value;
1171 break;
1172 case MMCBR_IVAR_CLOCK:
1173 sc->aw_host.ios.clock = value;
1174 break;
1175 case MMCBR_IVAR_MODE:
1176 sc->aw_host.mode = value;
1177 break;
1178 case MMCBR_IVAR_OCR:
1179 sc->aw_host.ocr = value;
1180 break;
1181 case MMCBR_IVAR_POWER_MODE:
1182 sc->aw_host.ios.power_mode = value;
1183 break;
1184 case MMCBR_IVAR_VDD:
1185 sc->aw_host.ios.vdd = value;
1186 break;
1187 case MMCBR_IVAR_VCCQ:
1188 sc->aw_host.ios.vccq = value;
1189 break;
1190 case MMCBR_IVAR_TIMING:
1191 sc->aw_host.ios.timing = value;
1192 break;
1193 /* These are read-only */
1194 case MMCBR_IVAR_CAPS:
1195 case MMCBR_IVAR_HOST_OCR:
1196 case MMCBR_IVAR_F_MIN:
1197 case MMCBR_IVAR_F_MAX:
1198 case MMCBR_IVAR_MAX_DATA:
1199 return (EINVAL);
1200 }
1201
1202 return (0);
1203 }
1204
1205 static int
1206 aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon)
1207 {
1208 uint32_t reg;
1209 int retry;
1210
1211 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1212 reg &= ~(AW_MMC_CKCR_ENB | AW_MMC_CKCR_LOW_POWER |
1213 AW_MMC_CKCR_MASK_DATA0);
1214
1215 if (clkon)
1216 reg |= AW_MMC_CKCR_ENB;
1217 if (sc->aw_mmc_conf->mask_data0)
1218 reg |= AW_MMC_CKCR_MASK_DATA0;
1219
1220 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1221
1222 reg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK |
1223 AW_MMC_CMDR_WAIT_PRE_OVER;
1224 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, reg);
1225 retry = 0xfffff;
1226
1227 while (reg & AW_MMC_CMDR_LOAD && --retry > 0) {
1228 reg = AW_MMC_READ_4(sc, AW_MMC_CMDR);
1229 DELAY(10);
1230 }
1231 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
1232
1233 if (reg & AW_MMC_CMDR_LOAD) {
1234 device_printf(sc->aw_dev, "timeout updating clock\n");
1235 return (ETIMEDOUT);
1236 }
1237
1238 if (sc->aw_mmc_conf->mask_data0) {
1239 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1240 reg &= ~AW_MMC_CKCR_MASK_DATA0;
1241 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1242 }
1243
1244 return (0);
1245 }
1246
1247 #ifndef MMCCAM
1248 static int
1249 aw_mmc_switch_vccq(device_t bus, device_t child)
1250 {
1251 struct aw_mmc_softc *sc;
1252 int uvolt, err;
1253
1254 sc = device_get_softc(bus);
1255
1256 if (sc->mmc_helper.vqmmc_supply == NULL)
1257 return EOPNOTSUPP;
1258
1259 switch (sc->aw_host.ios.vccq) {
1260 case vccq_180:
1261 uvolt = 1800000;
1262 break;
1263 case vccq_330:
1264 uvolt = 3300000;
1265 break;
1266 default:
1267 return EINVAL;
1268 }
1269
1270 err = regulator_set_voltage(sc->mmc_helper.vqmmc_supply, uvolt, uvolt);
1271 if (err != 0) {
1272 device_printf(sc->aw_dev,
1273 "Cannot set vqmmc to %d<->%d\n",
1274 uvolt,
1275 uvolt);
1276 return (err);
1277 }
1278
1279 return (0);
1280 }
1281 #endif
1282
1283 static int
1284 aw_mmc_update_ios(device_t bus, device_t child)
1285 {
1286 int error;
1287 struct aw_mmc_softc *sc;
1288 struct mmc_ios *ios;
1289 unsigned int clock;
1290 uint32_t reg, div = 1;
1291 int reg_status;
1292 int rv;
1293
1294 sc = device_get_softc(bus);
1295
1296 ios = &sc->aw_host.ios;
1297
1298 /* Set the bus width. */
1299 switch (ios->bus_width) {
1300 case bus_width_1:
1301 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1);
1302 break;
1303 case bus_width_4:
1304 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4);
1305 break;
1306 case bus_width_8:
1307 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8);
1308 break;
1309 }
1310
1311 switch (ios->power_mode) {
1312 case power_on:
1313 break;
1314 case power_off:
1315 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD))
1316 device_printf(sc->aw_dev, "Powering down sd/mmc\n");
1317
1318 if (sc->mmc_helper.vmmc_supply) {
1319 rv = regulator_status(sc->mmc_helper.vmmc_supply, ®_status);
1320 if (rv == 0 && reg_status == REGULATOR_STATUS_ENABLED)
1321 regulator_disable(sc->mmc_helper.vmmc_supply);
1322 }
1323 if (sc->mmc_helper.vqmmc_supply) {
1324 rv = regulator_status(sc->mmc_helper.vqmmc_supply, ®_status);
1325 if (rv == 0 && reg_status == REGULATOR_STATUS_ENABLED)
1326 regulator_disable(sc->mmc_helper.vqmmc_supply);
1327 }
1328
1329 if (sc->mmc_helper.mmc_pwrseq)
1330 MMC_PWRSEQ_SET_POWER(sc->mmc_helper.mmc_pwrseq, false);
1331
1332 aw_mmc_reset(sc);
1333 break;
1334 case power_up:
1335 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD))
1336 device_printf(sc->aw_dev, "Powering up sd/mmc\n");
1337
1338 if (sc->mmc_helper.vmmc_supply) {
1339 rv = regulator_status(sc->mmc_helper.vmmc_supply, ®_status);
1340 if (rv == 0 && reg_status != REGULATOR_STATUS_ENABLED)
1341 regulator_enable(sc->mmc_helper.vmmc_supply);
1342 }
1343 if (sc->mmc_helper.vqmmc_supply) {
1344 rv = regulator_status(sc->mmc_helper.vqmmc_supply, ®_status);
1345 if (rv == 0 && reg_status != REGULATOR_STATUS_ENABLED)
1346 regulator_enable(sc->mmc_helper.vqmmc_supply);
1347 }
1348
1349 if (sc->mmc_helper.mmc_pwrseq)
1350 MMC_PWRSEQ_SET_POWER(sc->mmc_helper.mmc_pwrseq, true);
1351 aw_mmc_init(sc);
1352 break;
1353 };
1354
1355 /* Enable ddr mode if needed */
1356 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
1357 if (ios->timing == bus_timing_uhs_ddr50 ||
1358 ios->timing == bus_timing_mmc_ddr52)
1359 reg |= AW_MMC_GCTL_DDR_MOD_SEL;
1360 else
1361 reg &= ~AW_MMC_GCTL_DDR_MOD_SEL;
1362 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
1363
1364 if (ios->clock && ios->clock != sc->aw_clock) {
1365 sc->aw_clock = clock = ios->clock;
1366
1367 /* Disable clock */
1368 error = aw_mmc_update_clock(sc, 0);
1369 if (error != 0)
1370 return (error);
1371
1372 if (ios->timing == bus_timing_mmc_ddr52 &&
1373 (sc->aw_mmc_conf->new_timing ||
1374 ios->bus_width == bus_width_8)) {
1375 div = 2;
1376 clock <<= 1;
1377 }
1378
1379 /* Reset the divider. */
1380 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1381 reg &= ~AW_MMC_CKCR_DIV;
1382 reg |= div - 1;
1383 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1384
1385 /* New timing mode if needed */
1386 if (sc->aw_mmc_conf->new_timing) {
1387 reg = AW_MMC_READ_4(sc, AW_MMC_NTSR);
1388 reg |= AW_MMC_NTSR_MODE_SELECT;
1389 AW_MMC_WRITE_4(sc, AW_MMC_NTSR, reg);
1390 }
1391
1392 /* Set the MMC clock. */
1393 error = clk_disable(sc->aw_clk_mmc);
1394 if (error != 0 && bootverbose)
1395 device_printf(sc->aw_dev,
1396 "failed to disable mmc clock: %d\n", error);
1397 error = clk_set_freq(sc->aw_clk_mmc, clock,
1398 CLK_SET_ROUND_DOWN);
1399 if (error != 0) {
1400 device_printf(sc->aw_dev,
1401 "failed to set frequency to %u Hz: %d\n",
1402 clock, error);
1403 return (error);
1404 }
1405 error = clk_enable(sc->aw_clk_mmc);
1406 if (error != 0 && bootverbose)
1407 device_printf(sc->aw_dev,
1408 "failed to re-enable mmc clock: %d\n", error);
1409
1410 if (sc->aw_mmc_conf->can_calibrate)
1411 AW_MMC_WRITE_4(sc, AW_MMC_SAMP_DL, AW_MMC_SAMP_DL_SW_EN);
1412
1413 /* Enable clock. */
1414 error = aw_mmc_update_clock(sc, 1);
1415 if (error != 0)
1416 return (error);
1417 }
1418
1419 return (0);
1420 }
1421
1422 #ifndef MMCCAM
1423 static int
1424 aw_mmc_get_ro(device_t bus, device_t child)
1425 {
1426 struct aw_mmc_softc *sc;
1427
1428 sc = device_get_softc(bus);
1429
1430 return (mmc_fdt_gpio_get_readonly(&sc->mmc_helper));
1431 }
1432
1433 static int
1434 aw_mmc_acquire_host(device_t bus, device_t child)
1435 {
1436 struct aw_mmc_softc *sc;
1437 int error;
1438
1439 sc = device_get_softc(bus);
1440 AW_MMC_LOCK(sc);
1441 while (sc->aw_bus_busy) {
1442 error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0);
1443 if (error != 0) {
1444 AW_MMC_UNLOCK(sc);
1445 return (error);
1446 }
1447 }
1448 sc->aw_bus_busy++;
1449 AW_MMC_UNLOCK(sc);
1450
1451 return (0);
1452 }
1453
1454 static int
1455 aw_mmc_release_host(device_t bus, device_t child)
1456 {
1457 struct aw_mmc_softc *sc;
1458
1459 sc = device_get_softc(bus);
1460 AW_MMC_LOCK(sc);
1461 sc->aw_bus_busy--;
1462 wakeup(sc);
1463 AW_MMC_UNLOCK(sc);
1464
1465 return (0);
1466 }
1467 #endif
1468
1469 static device_method_t aw_mmc_methods[] = {
1470 /* Device interface */
1471 DEVMETHOD(device_probe, aw_mmc_probe),
1472 DEVMETHOD(device_attach, aw_mmc_attach),
1473 DEVMETHOD(device_detach, aw_mmc_detach),
1474
1475 /* Bus interface */
1476 DEVMETHOD(bus_read_ivar, aw_mmc_read_ivar),
1477 DEVMETHOD(bus_write_ivar, aw_mmc_write_ivar),
1478 DEVMETHOD(bus_add_child, bus_generic_add_child),
1479
1480 #ifndef MMCCAM
1481 /* MMC bridge interface */
1482 DEVMETHOD(mmcbr_update_ios, aw_mmc_update_ios),
1483 DEVMETHOD(mmcbr_request, aw_mmc_request),
1484 DEVMETHOD(mmcbr_get_ro, aw_mmc_get_ro),
1485 DEVMETHOD(mmcbr_switch_vccq, aw_mmc_switch_vccq),
1486 DEVMETHOD(mmcbr_acquire_host, aw_mmc_acquire_host),
1487 DEVMETHOD(mmcbr_release_host, aw_mmc_release_host),
1488 #endif
1489
1490 #ifdef MMCCAM
1491 /* MMCCAM interface */
1492 DEVMETHOD(mmc_sim_get_tran_settings, aw_mmc_get_tran_settings),
1493 DEVMETHOD(mmc_sim_set_tran_settings, aw_mmc_set_tran_settings),
1494 DEVMETHOD(mmc_sim_cam_request, aw_mmc_cam_request),
1495 DEVMETHOD(mmc_sim_cam_poll, aw_mmc_cam_poll),
1496 #endif
1497
1498 DEVMETHOD_END
1499 };
1500
1501 static driver_t aw_mmc_driver = {
1502 "aw_mmc",
1503 aw_mmc_methods,
1504 sizeof(struct aw_mmc_softc),
1505 };
1506
1507 DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, NULL, NULL);
1508 #ifndef MMCCAM
1509 MMC_DECLARE_BRIDGE(aw_mmc);
1510 #endif
1511 SIMPLEBUS_PNP_INFO(compat_data);
1512