1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2018 Oleksij Rempel <linux@rempel-privat.de>
4 *
5 * Driver for Alcor Micro AU6601 and AU6621 controllers
6 */
7
8 /* Note: this driver was created without any documentation. Based
9 * on sniffing, testing and in some cases mimic of original driver.
10 * As soon as some one with documentation or more experience in SD/MMC, or
11 * reverse engineering then me, please review this driver and question every
12 * thing what I did. 2018 Oleksij Rempel <linux@rempel-privat.de>
13 */
14
15 #include <linux/delay.h>
16 #include <linux/pci.h>
17 #include <linux/module.h>
18 #include <linux/io.h>
19 #include <linux/pm.h>
20 #include <linux/irq.h>
21 #include <linux/interrupt.h>
22 #include <linux/platform_device.h>
23 #include <linux/string_choices.h>
24
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/mmc.h>
27
28 #include <linux/alcor_pci.h>
29
30 enum alcor_cookie {
31 COOKIE_UNMAPPED,
32 COOKIE_PRE_MAPPED,
33 COOKIE_MAPPED,
34 };
35
36 struct alcor_pll_conf {
37 unsigned int clk_src_freq;
38 unsigned int clk_src_reg;
39 unsigned int min_div;
40 unsigned int max_div;
41 };
42
43 struct alcor_sdmmc_host {
44 struct device *dev;
45 struct alcor_pci_priv *alcor_pci;
46
47 struct mmc_request *mrq;
48 struct mmc_command *cmd;
49 struct mmc_data *data;
50 unsigned int dma_on:1;
51
52 struct mutex cmd_mutex;
53
54 struct delayed_work timeout_work;
55
56 struct sg_mapping_iter sg_miter; /* SG state for PIO */
57 struct scatterlist *sg;
58 unsigned int blocks; /* remaining PIO blocks */
59 int sg_count;
60
61 u32 irq_status_sd;
62 unsigned char cur_power_mode;
63 };
64
65 static const struct alcor_pll_conf alcor_pll_cfg[] = {
66 /* MHZ, CLK src, max div, min div */
67 { 31250000, AU6601_CLK_31_25_MHZ, 1, 511},
68 { 48000000, AU6601_CLK_48_MHZ, 1, 511},
69 {125000000, AU6601_CLK_125_MHZ, 1, 511},
70 {384000000, AU6601_CLK_384_MHZ, 1, 511},
71 };
72
alcor_rmw8(struct alcor_sdmmc_host * host,unsigned int addr,u8 clear,u8 set)73 static inline void alcor_rmw8(struct alcor_sdmmc_host *host, unsigned int addr,
74 u8 clear, u8 set)
75 {
76 struct alcor_pci_priv *priv = host->alcor_pci;
77 u32 var;
78
79 var = alcor_read8(priv, addr);
80 var &= ~clear;
81 var |= set;
82 alcor_write8(priv, var, addr);
83 }
84
85 /* As soon as irqs are masked, some status updates may be missed.
86 * Use this with care.
87 */
alcor_mask_sd_irqs(struct alcor_sdmmc_host * host)88 static inline void alcor_mask_sd_irqs(struct alcor_sdmmc_host *host)
89 {
90 struct alcor_pci_priv *priv = host->alcor_pci;
91
92 alcor_write32(priv, 0, AU6601_REG_INT_ENABLE);
93 }
94
alcor_unmask_sd_irqs(struct alcor_sdmmc_host * host)95 static inline void alcor_unmask_sd_irqs(struct alcor_sdmmc_host *host)
96 {
97 struct alcor_pci_priv *priv = host->alcor_pci;
98
99 alcor_write32(priv, AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK |
100 AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE |
101 AU6601_INT_OVER_CURRENT_ERR,
102 AU6601_REG_INT_ENABLE);
103 }
104
alcor_reset(struct alcor_sdmmc_host * host,u8 val)105 static void alcor_reset(struct alcor_sdmmc_host *host, u8 val)
106 {
107 struct alcor_pci_priv *priv = host->alcor_pci;
108 int i;
109
110 alcor_write8(priv, val | AU6601_BUF_CTRL_RESET,
111 AU6601_REG_SW_RESET);
112 for (i = 0; i < 100; i++) {
113 if (!(alcor_read8(priv, AU6601_REG_SW_RESET) & val))
114 return;
115 udelay(50);
116 }
117 dev_err(host->dev, "%s: timeout\n", __func__);
118 }
119
120 /*
121 * Perform DMA I/O of a single page.
122 */
alcor_data_set_dma(struct alcor_sdmmc_host * host)123 static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
124 {
125 struct alcor_pci_priv *priv = host->alcor_pci;
126 u32 addr;
127
128 if (!host->sg_count)
129 return;
130
131 if (!host->sg) {
132 dev_err(host->dev, "have blocks, but no SG\n");
133 return;
134 }
135
136 if (!sg_dma_len(host->sg)) {
137 dev_err(host->dev, "DMA SG len == 0\n");
138 return;
139 }
140
141
142 addr = (u32)sg_dma_address(host->sg);
143
144 alcor_write32(priv, addr, AU6601_REG_SDMA_ADDR);
145 host->sg = sg_next(host->sg);
146 host->sg_count--;
147 }
148
alcor_trigger_data_transfer(struct alcor_sdmmc_host * host)149 static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host)
150 {
151 struct alcor_pci_priv *priv = host->alcor_pci;
152 struct mmc_data *data = host->data;
153 u8 ctrl = 0;
154
155 if (data->flags & MMC_DATA_WRITE)
156 ctrl |= AU6601_DATA_WRITE;
157
158 if (data->host_cookie == COOKIE_MAPPED) {
159 /*
160 * For DMA transfers, this function is called just once,
161 * at the start of the operation. The hardware can only
162 * perform DMA I/O on a single page at a time, so here
163 * we kick off the transfer with the first page, and expect
164 * subsequent pages to be transferred upon IRQ events
165 * indicating that the single-page DMA was completed.
166 */
167 alcor_data_set_dma(host);
168 ctrl |= AU6601_DATA_DMA_MODE;
169 host->dma_on = 1;
170 alcor_write32(priv, data->sg_count * 0x1000,
171 AU6601_REG_BLOCK_SIZE);
172 } else {
173 /*
174 * For PIO transfers, we break down each operation
175 * into several sector-sized transfers. When one sector has
176 * complete, the IRQ handler will call this function again
177 * to kick off the transfer of the next sector.
178 */
179 alcor_write32(priv, data->blksz, AU6601_REG_BLOCK_SIZE);
180 }
181
182 alcor_write8(priv, ctrl | AU6601_DATA_START_XFER,
183 AU6601_DATA_XFER_CTRL);
184 }
185
alcor_trf_block_pio(struct alcor_sdmmc_host * host,bool read)186 static void alcor_trf_block_pio(struct alcor_sdmmc_host *host, bool read)
187 {
188 struct alcor_pci_priv *priv = host->alcor_pci;
189 size_t blksize, len;
190 u8 *buf;
191
192 if (!host->blocks)
193 return;
194
195 if (host->dma_on) {
196 dev_err(host->dev, "configured DMA but got PIO request.\n");
197 return;
198 }
199
200 if (!!(host->data->flags & MMC_DATA_READ) != read) {
201 dev_err(host->dev, "got unexpected direction %i != %i\n",
202 !!(host->data->flags & MMC_DATA_READ), read);
203 }
204
205 if (!sg_miter_next(&host->sg_miter))
206 return;
207
208 blksize = host->data->blksz;
209 len = min(host->sg_miter.length, blksize);
210
211 dev_dbg(host->dev, "PIO, %s block size: 0x%zx\n",
212 str_read_write(read), blksize);
213
214 host->sg_miter.consumed = len;
215 host->blocks--;
216
217 buf = host->sg_miter.addr;
218
219 if (read)
220 ioread32_rep(priv->iobase + AU6601_REG_BUFFER, buf, len >> 2);
221 else
222 iowrite32_rep(priv->iobase + AU6601_REG_BUFFER, buf, len >> 2);
223
224 sg_miter_stop(&host->sg_miter);
225 }
226
alcor_prepare_sg_miter(struct alcor_sdmmc_host * host)227 static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host)
228 {
229 unsigned int flags = SG_MITER_ATOMIC;
230 struct mmc_data *data = host->data;
231
232 if (data->flags & MMC_DATA_READ)
233 flags |= SG_MITER_TO_SG;
234 else
235 flags |= SG_MITER_FROM_SG;
236 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
237 }
238
alcor_prepare_data(struct alcor_sdmmc_host * host,struct mmc_command * cmd)239 static void alcor_prepare_data(struct alcor_sdmmc_host *host,
240 struct mmc_command *cmd)
241 {
242 struct alcor_pci_priv *priv = host->alcor_pci;
243 struct mmc_data *data = cmd->data;
244
245 if (!data)
246 return;
247
248
249 host->data = data;
250 host->data->bytes_xfered = 0;
251 host->blocks = data->blocks;
252 host->sg = data->sg;
253 host->sg_count = data->sg_count;
254 dev_dbg(host->dev, "prepare DATA: sg %i, blocks: %i\n",
255 host->sg_count, host->blocks);
256
257 if (data->host_cookie != COOKIE_MAPPED)
258 alcor_prepare_sg_miter(host);
259
260 alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL);
261 }
262
alcor_send_cmd(struct alcor_sdmmc_host * host,struct mmc_command * cmd,bool set_timeout)263 static void alcor_send_cmd(struct alcor_sdmmc_host *host,
264 struct mmc_command *cmd, bool set_timeout)
265 {
266 struct alcor_pci_priv *priv = host->alcor_pci;
267 unsigned long timeout = 0;
268 u8 ctrl = 0;
269
270 host->cmd = cmd;
271 alcor_prepare_data(host, cmd);
272
273 dev_dbg(host->dev, "send CMD. opcode: 0x%02x, arg; 0x%08x\n",
274 cmd->opcode, cmd->arg);
275 alcor_write8(priv, cmd->opcode | 0x40, AU6601_REG_CMD_OPCODE);
276 alcor_write32be(priv, cmd->arg, AU6601_REG_CMD_ARG);
277
278 switch (mmc_resp_type(cmd)) {
279 case MMC_RSP_NONE:
280 ctrl = AU6601_CMD_NO_RESP;
281 break;
282 case MMC_RSP_R1:
283 ctrl = AU6601_CMD_6_BYTE_CRC;
284 break;
285 case MMC_RSP_R1B:
286 ctrl = AU6601_CMD_6_BYTE_CRC | AU6601_CMD_STOP_WAIT_RDY;
287 break;
288 case MMC_RSP_R2:
289 ctrl = AU6601_CMD_17_BYTE_CRC;
290 break;
291 case MMC_RSP_R3:
292 ctrl = AU6601_CMD_6_BYTE_WO_CRC;
293 break;
294 default:
295 dev_err(host->dev, "%s: cmd->flag (0x%02x) is not valid\n",
296 mmc_hostname(mmc_from_priv(host)), mmc_resp_type(cmd));
297 break;
298 }
299
300 if (set_timeout) {
301 if (!cmd->data && cmd->busy_timeout)
302 timeout = cmd->busy_timeout;
303 else
304 timeout = 10000;
305
306 schedule_delayed_work(&host->timeout_work,
307 msecs_to_jiffies(timeout));
308 }
309
310 dev_dbg(host->dev, "xfer ctrl: 0x%02x; timeout: %lu\n", ctrl, timeout);
311 alcor_write8(priv, ctrl | AU6601_CMD_START_XFER,
312 AU6601_CMD_XFER_CTRL);
313 }
314
alcor_request_complete(struct alcor_sdmmc_host * host,bool cancel_timeout)315 static void alcor_request_complete(struct alcor_sdmmc_host *host,
316 bool cancel_timeout)
317 {
318 struct mmc_request *mrq;
319
320 /*
321 * If this work gets rescheduled while running, it will
322 * be run again afterwards but without any active request.
323 */
324 if (!host->mrq)
325 return;
326
327 if (cancel_timeout)
328 cancel_delayed_work(&host->timeout_work);
329
330 mrq = host->mrq;
331
332 host->mrq = NULL;
333 host->cmd = NULL;
334 host->data = NULL;
335 host->dma_on = 0;
336
337 mmc_request_done(mmc_from_priv(host), mrq);
338 }
339
alcor_finish_data(struct alcor_sdmmc_host * host)340 static void alcor_finish_data(struct alcor_sdmmc_host *host)
341 {
342 struct mmc_data *data;
343
344 data = host->data;
345 host->data = NULL;
346 host->dma_on = 0;
347
348 /*
349 * The specification states that the block count register must
350 * be updated, but it does not specify at what point in the
351 * data flow. That makes the register entirely useless to read
352 * back so we have to assume that nothing made it to the card
353 * in the event of an error.
354 */
355 if (data->error)
356 data->bytes_xfered = 0;
357 else
358 data->bytes_xfered = data->blksz * data->blocks;
359
360 /*
361 * Need to send CMD12 if -
362 * a) open-ended multiblock transfer (no CMD23)
363 * b) error in multiblock transfer
364 */
365 if (data->stop &&
366 (data->error ||
367 !host->mrq->sbc)) {
368
369 /*
370 * The controller needs a reset of internal state machines
371 * upon error conditions.
372 */
373 if (data->error)
374 alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
375
376 alcor_unmask_sd_irqs(host);
377 alcor_send_cmd(host, data->stop, false);
378 return;
379 }
380
381 alcor_request_complete(host, 1);
382 }
383
alcor_err_irq(struct alcor_sdmmc_host * host,u32 intmask)384 static void alcor_err_irq(struct alcor_sdmmc_host *host, u32 intmask)
385 {
386 dev_dbg(host->dev, "ERR IRQ %x\n", intmask);
387
388 if (host->cmd) {
389 if (intmask & AU6601_INT_CMD_TIMEOUT_ERR)
390 host->cmd->error = -ETIMEDOUT;
391 else
392 host->cmd->error = -EILSEQ;
393 }
394
395 if (host->data) {
396 if (intmask & AU6601_INT_DATA_TIMEOUT_ERR)
397 host->data->error = -ETIMEDOUT;
398 else
399 host->data->error = -EILSEQ;
400
401 host->data->bytes_xfered = 0;
402 }
403
404 alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
405 alcor_request_complete(host, 1);
406 }
407
alcor_cmd_irq_done(struct alcor_sdmmc_host * host,u32 intmask)408 static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
409 {
410 struct alcor_pci_priv *priv = host->alcor_pci;
411
412 intmask &= AU6601_INT_CMD_END;
413
414 if (!intmask)
415 return true;
416
417 /* got CMD_END but no CMD is in progress, wake thread an process the
418 * error
419 */
420 if (!host->cmd)
421 return false;
422
423 if (host->cmd->flags & MMC_RSP_PRESENT) {
424 struct mmc_command *cmd = host->cmd;
425
426 cmd->resp[0] = alcor_read32be(priv, AU6601_REG_CMD_RSP0);
427 dev_dbg(host->dev, "RSP0: 0x%04x\n", cmd->resp[0]);
428 if (host->cmd->flags & MMC_RSP_136) {
429 cmd->resp[1] =
430 alcor_read32be(priv, AU6601_REG_CMD_RSP1);
431 cmd->resp[2] =
432 alcor_read32be(priv, AU6601_REG_CMD_RSP2);
433 cmd->resp[3] =
434 alcor_read32be(priv, AU6601_REG_CMD_RSP3);
435 dev_dbg(host->dev, "RSP1,2,3: 0x%04x 0x%04x 0x%04x\n",
436 cmd->resp[1], cmd->resp[2], cmd->resp[3]);
437 }
438
439 }
440
441 host->cmd->error = 0;
442
443 /* Processed actual command. */
444 if (!host->data)
445 return false;
446
447 alcor_trigger_data_transfer(host);
448 host->cmd = NULL;
449 return true;
450 }
451
alcor_cmd_irq_thread(struct alcor_sdmmc_host * host,u32 intmask)452 static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
453 {
454 intmask &= AU6601_INT_CMD_END;
455
456 if (!intmask)
457 return;
458
459 if (!host->cmd && intmask & AU6601_INT_CMD_END) {
460 dev_dbg(host->dev, "Got command interrupt 0x%08x even though no command operation was in progress.\n",
461 intmask);
462 }
463
464 /* Processed actual command. */
465 if (!host->data)
466 alcor_request_complete(host, 1);
467 else
468 alcor_trigger_data_transfer(host);
469 host->cmd = NULL;
470 }
471
alcor_data_irq_done(struct alcor_sdmmc_host * host,u32 intmask)472 static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
473 {
474 u32 tmp;
475
476 intmask &= AU6601_INT_DATA_MASK;
477
478 /* nothing here to do */
479 if (!intmask)
480 return 1;
481
482 /* we was too fast and got DATA_END after it was processed?
483 * lets ignore it for now.
484 */
485 if (!host->data && intmask == AU6601_INT_DATA_END)
486 return 1;
487
488 /* looks like an error, so lets handle it. */
489 if (!host->data)
490 return 0;
491
492 tmp = intmask & (AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY
493 | AU6601_INT_DMA_END);
494 switch (tmp) {
495 case 0:
496 break;
497 case AU6601_INT_READ_BUF_RDY:
498 alcor_trf_block_pio(host, true);
499 return 1;
500 case AU6601_INT_WRITE_BUF_RDY:
501 alcor_trf_block_pio(host, false);
502 return 1;
503 case AU6601_INT_DMA_END:
504 if (!host->sg_count)
505 break;
506
507 alcor_data_set_dma(host);
508 break;
509 default:
510 dev_err(host->dev, "Got READ_BUF_RDY and WRITE_BUF_RDY at same time\n");
511 break;
512 }
513
514 if (intmask & AU6601_INT_DATA_END) {
515 if (!host->dma_on && host->blocks) {
516 alcor_trigger_data_transfer(host);
517 return 1;
518 } else {
519 return 0;
520 }
521 }
522
523 return 1;
524 }
525
alcor_data_irq_thread(struct alcor_sdmmc_host * host,u32 intmask)526 static void alcor_data_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
527 {
528 intmask &= AU6601_INT_DATA_MASK;
529
530 if (!intmask)
531 return;
532
533 if (!host->data) {
534 dev_dbg(host->dev, "Got data interrupt 0x%08x even though no data operation was in progress.\n",
535 intmask);
536 alcor_reset(host, AU6601_RESET_DATA);
537 return;
538 }
539
540 if (alcor_data_irq_done(host, intmask))
541 return;
542
543 if ((intmask & AU6601_INT_DATA_END) || !host->blocks ||
544 (host->dma_on && !host->sg_count))
545 alcor_finish_data(host);
546 }
547
alcor_cd_irq(struct alcor_sdmmc_host * host,u32 intmask)548 static void alcor_cd_irq(struct alcor_sdmmc_host *host, u32 intmask)
549 {
550 dev_dbg(host->dev, "card %s\n",
551 intmask & AU6601_INT_CARD_REMOVE ? "removed" : "inserted");
552
553 if (host->mrq) {
554 dev_dbg(host->dev, "cancel all pending tasks.\n");
555
556 if (host->data)
557 host->data->error = -ENOMEDIUM;
558
559 if (host->cmd)
560 host->cmd->error = -ENOMEDIUM;
561 else
562 host->mrq->cmd->error = -ENOMEDIUM;
563
564 alcor_request_complete(host, 1);
565 }
566
567 mmc_detect_change(mmc_from_priv(host), msecs_to_jiffies(1));
568 }
569
alcor_irq_thread(int irq,void * d)570 static irqreturn_t alcor_irq_thread(int irq, void *d)
571 {
572 struct alcor_sdmmc_host *host = d;
573 irqreturn_t ret = IRQ_HANDLED;
574 u32 intmask, tmp;
575
576 mutex_lock(&host->cmd_mutex);
577
578 intmask = host->irq_status_sd;
579
580 /* some thing bad */
581 if (unlikely(!intmask || AU6601_INT_ALL_MASK == intmask)) {
582 dev_dbg(host->dev, "unexpected IRQ: 0x%04x\n", intmask);
583 ret = IRQ_NONE;
584 goto exit;
585 }
586
587 tmp = intmask & (AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK);
588 if (tmp) {
589 if (tmp & AU6601_INT_ERROR_MASK)
590 alcor_err_irq(host, tmp);
591 else {
592 alcor_cmd_irq_thread(host, tmp);
593 alcor_data_irq_thread(host, tmp);
594 }
595 intmask &= ~(AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK);
596 }
597
598 if (intmask & (AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE)) {
599 alcor_cd_irq(host, intmask);
600 intmask &= ~(AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE);
601 }
602
603 if (intmask & AU6601_INT_OVER_CURRENT_ERR) {
604 dev_warn(host->dev,
605 "warning: over current detected!\n");
606 intmask &= ~AU6601_INT_OVER_CURRENT_ERR;
607 }
608
609 if (intmask)
610 dev_dbg(host->dev, "got not handled IRQ: 0x%04x\n", intmask);
611
612 exit:
613 mutex_unlock(&host->cmd_mutex);
614 alcor_unmask_sd_irqs(host);
615 return ret;
616 }
617
618
alcor_irq(int irq,void * d)619 static irqreturn_t alcor_irq(int irq, void *d)
620 {
621 struct alcor_sdmmc_host *host = d;
622 struct alcor_pci_priv *priv = host->alcor_pci;
623 u32 status, tmp;
624 irqreturn_t ret;
625 int cmd_done, data_done;
626
627 status = alcor_read32(priv, AU6601_REG_INT_STATUS);
628 if (!status)
629 return IRQ_NONE;
630
631 alcor_write32(priv, status, AU6601_REG_INT_STATUS);
632
633 tmp = status & (AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY
634 | AU6601_INT_DATA_END | AU6601_INT_DMA_END
635 | AU6601_INT_CMD_END);
636 if (tmp == status) {
637 cmd_done = alcor_cmd_irq_done(host, tmp);
638 data_done = alcor_data_irq_done(host, tmp);
639 /* use fast path for simple tasks */
640 if (cmd_done && data_done) {
641 ret = IRQ_HANDLED;
642 goto alcor_irq_done;
643 }
644 }
645
646 host->irq_status_sd = status;
647 ret = IRQ_WAKE_THREAD;
648 alcor_mask_sd_irqs(host);
649 alcor_irq_done:
650 return ret;
651 }
652
alcor_set_clock(struct alcor_sdmmc_host * host,unsigned int clock)653 static void alcor_set_clock(struct alcor_sdmmc_host *host, unsigned int clock)
654 {
655 struct alcor_pci_priv *priv = host->alcor_pci;
656 int i, diff = 0x7fffffff, tmp_clock = 0;
657 u16 clk_src = 0;
658 u8 clk_div = 0;
659
660 if (clock == 0) {
661 alcor_write16(priv, 0, AU6601_CLK_SELECT);
662 return;
663 }
664
665 for (i = 0; i < ARRAY_SIZE(alcor_pll_cfg); i++) {
666 unsigned int tmp_div, tmp_diff;
667 const struct alcor_pll_conf *cfg = &alcor_pll_cfg[i];
668
669 tmp_div = DIV_ROUND_UP(cfg->clk_src_freq, clock);
670 if (cfg->min_div > tmp_div || tmp_div > cfg->max_div)
671 continue;
672
673 tmp_clock = DIV_ROUND_UP(cfg->clk_src_freq, tmp_div);
674 tmp_diff = abs(clock - tmp_clock);
675
676 if (tmp_diff < diff) {
677 diff = tmp_diff;
678 clk_src = cfg->clk_src_reg;
679 clk_div = tmp_div;
680 }
681 }
682
683 clk_src |= ((clk_div - 1) << 8);
684 clk_src |= AU6601_CLK_ENABLE;
685
686 dev_dbg(host->dev, "set freq %d cal freq %d, use div %d, mod %x\n",
687 clock, tmp_clock, clk_div, clk_src);
688
689 alcor_write16(priv, clk_src, AU6601_CLK_SELECT);
690
691 }
692
alcor_set_timing(struct mmc_host * mmc,struct mmc_ios * ios)693 static void alcor_set_timing(struct mmc_host *mmc, struct mmc_ios *ios)
694 {
695 struct alcor_sdmmc_host *host = mmc_priv(mmc);
696
697 if (ios->timing == MMC_TIMING_LEGACY) {
698 alcor_rmw8(host, AU6601_CLK_DELAY,
699 AU6601_CLK_POSITIVE_EDGE_ALL, 0);
700 } else {
701 alcor_rmw8(host, AU6601_CLK_DELAY,
702 0, AU6601_CLK_POSITIVE_EDGE_ALL);
703 }
704 }
705
alcor_set_bus_width(struct mmc_host * mmc,struct mmc_ios * ios)706 static void alcor_set_bus_width(struct mmc_host *mmc, struct mmc_ios *ios)
707 {
708 struct alcor_sdmmc_host *host = mmc_priv(mmc);
709 struct alcor_pci_priv *priv = host->alcor_pci;
710
711 if (ios->bus_width == MMC_BUS_WIDTH_1) {
712 alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
713 } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
714 alcor_write8(priv, AU6601_BUS_WIDTH_4BIT,
715 AU6601_REG_BUS_CTRL);
716 } else
717 dev_err(host->dev, "Unknown BUS mode\n");
718
719 }
720
alcor_card_busy(struct mmc_host * mmc)721 static int alcor_card_busy(struct mmc_host *mmc)
722 {
723 struct alcor_sdmmc_host *host = mmc_priv(mmc);
724 struct alcor_pci_priv *priv = host->alcor_pci;
725 u8 status;
726
727 /* Check whether dat[0:3] low */
728 status = alcor_read8(priv, AU6601_DATA_PIN_STATE);
729
730 return !(status & AU6601_BUS_STAT_DAT_MASK);
731 }
732
alcor_get_cd(struct mmc_host * mmc)733 static int alcor_get_cd(struct mmc_host *mmc)
734 {
735 struct alcor_sdmmc_host *host = mmc_priv(mmc);
736 struct alcor_pci_priv *priv = host->alcor_pci;
737 u8 detect;
738
739 detect = alcor_read8(priv, AU6601_DETECT_STATUS)
740 & AU6601_DETECT_STATUS_M;
741 /* check if card is present then send command and data */
742 return (detect == AU6601_SD_DETECTED);
743 }
744
alcor_get_ro(struct mmc_host * mmc)745 static int alcor_get_ro(struct mmc_host *mmc)
746 {
747 struct alcor_sdmmc_host *host = mmc_priv(mmc);
748 struct alcor_pci_priv *priv = host->alcor_pci;
749 u8 status;
750
751 /* get write protect pin status */
752 status = alcor_read8(priv, AU6601_INTERFACE_MODE_CTRL);
753
754 return !!(status & AU6601_SD_CARD_WP);
755 }
756
alcor_request(struct mmc_host * mmc,struct mmc_request * mrq)757 static void alcor_request(struct mmc_host *mmc, struct mmc_request *mrq)
758 {
759 struct alcor_sdmmc_host *host = mmc_priv(mmc);
760
761 mutex_lock(&host->cmd_mutex);
762
763 host->mrq = mrq;
764
765 /* check if card is present then send command and data */
766 if (alcor_get_cd(mmc))
767 alcor_send_cmd(host, mrq->cmd, true);
768 else {
769 mrq->cmd->error = -ENOMEDIUM;
770 alcor_request_complete(host, 1);
771 }
772
773 mutex_unlock(&host->cmd_mutex);
774 }
775
alcor_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)776 static void alcor_pre_req(struct mmc_host *mmc,
777 struct mmc_request *mrq)
778 {
779 struct alcor_sdmmc_host *host = mmc_priv(mmc);
780 struct mmc_data *data = mrq->data;
781 struct mmc_command *cmd = mrq->cmd;
782 struct scatterlist *sg;
783 unsigned int i, sg_len;
784
785 if (!data || !cmd)
786 return;
787
788 data->host_cookie = COOKIE_UNMAPPED;
789
790 /* FIXME: looks like the DMA engine works only with CMD18 */
791 if (cmd->opcode != MMC_READ_MULTIPLE_BLOCK
792 && cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK)
793 return;
794 /*
795 * We don't do DMA on "complex" transfers, i.e. with
796 * non-word-aligned buffers or lengths. A future improvement
797 * could be made to use temporary DMA bounce-buffers when these
798 * requirements are not met.
799 *
800 * Also, we don't bother with all the DMA setup overhead for
801 * short transfers.
802 */
803 if (data->blocks * data->blksz < AU6601_MAX_DMA_BLOCK_SIZE)
804 return;
805
806 if (data->blksz & 3)
807 return;
808
809 for_each_sg(data->sg, sg, data->sg_len, i) {
810 if (sg->length != AU6601_MAX_DMA_BLOCK_SIZE)
811 return;
812 if (sg->offset != 0)
813 return;
814 }
815
816 /* This data might be unmapped at this time */
817
818 sg_len = dma_map_sg(host->dev, data->sg, data->sg_len,
819 mmc_get_dma_dir(data));
820 if (sg_len)
821 data->host_cookie = COOKIE_MAPPED;
822
823 data->sg_count = sg_len;
824 }
825
alcor_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)826 static void alcor_post_req(struct mmc_host *mmc,
827 struct mmc_request *mrq,
828 int err)
829 {
830 struct alcor_sdmmc_host *host = mmc_priv(mmc);
831 struct mmc_data *data = mrq->data;
832
833 if (!data)
834 return;
835
836 if (data->host_cookie == COOKIE_MAPPED) {
837 dma_unmap_sg(host->dev,
838 data->sg,
839 data->sg_len,
840 mmc_get_dma_dir(data));
841 }
842
843 data->host_cookie = COOKIE_UNMAPPED;
844 }
845
alcor_set_power_mode(struct mmc_host * mmc,struct mmc_ios * ios)846 static void alcor_set_power_mode(struct mmc_host *mmc, struct mmc_ios *ios)
847 {
848 struct alcor_sdmmc_host *host = mmc_priv(mmc);
849 struct alcor_pci_priv *priv = host->alcor_pci;
850
851 switch (ios->power_mode) {
852 case MMC_POWER_OFF:
853 alcor_set_clock(host, ios->clock);
854 /* set all pins to input */
855 alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
856 /* turn of VDD */
857 alcor_write8(priv, 0, AU6601_POWER_CONTROL);
858 break;
859 case MMC_POWER_UP:
860 break;
861 case MMC_POWER_ON:
862 /* This is most trickiest part. The order and timings of
863 * instructions seems to play important role. Any changes may
864 * confuse internal state engine if this HW.
865 * FIXME: If we will ever get access to documentation, then this
866 * part should be reviewed again.
867 */
868
869 /* enable SD card mode */
870 alcor_write8(priv, AU6601_SD_CARD,
871 AU6601_ACTIVE_CTRL);
872 /* set signal voltage to 3.3V */
873 alcor_write8(priv, 0, AU6601_OPT);
874 /* no documentation about clk delay, for now just try to mimic
875 * original driver.
876 */
877 alcor_write8(priv, 0x20, AU6601_CLK_DELAY);
878 /* set BUS width to 1 bit */
879 alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
880 /* set CLK first time */
881 alcor_set_clock(host, ios->clock);
882 /* power on VDD */
883 alcor_write8(priv, AU6601_SD_CARD,
884 AU6601_POWER_CONTROL);
885 /* wait until the CLK will get stable */
886 mdelay(20);
887 /* set CLK again, mimic original driver. */
888 alcor_set_clock(host, ios->clock);
889
890 /* enable output */
891 alcor_write8(priv, AU6601_SD_CARD,
892 AU6601_OUTPUT_ENABLE);
893 /* The clk will not work on au6621. We need to trigger data
894 * transfer.
895 */
896 alcor_write8(priv, AU6601_DATA_WRITE,
897 AU6601_DATA_XFER_CTRL);
898 /* configure timeout. Not clear what exactly it means. */
899 alcor_write8(priv, 0x7d, AU6601_TIME_OUT_CTRL);
900 mdelay(100);
901 break;
902 default:
903 dev_err(host->dev, "Unknown power parameter\n");
904 }
905 }
906
alcor_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)907 static void alcor_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
908 {
909 struct alcor_sdmmc_host *host = mmc_priv(mmc);
910
911 mutex_lock(&host->cmd_mutex);
912
913 dev_dbg(host->dev, "set ios. bus width: %x, power mode: %x\n",
914 ios->bus_width, ios->power_mode);
915
916 if (ios->power_mode != host->cur_power_mode) {
917 alcor_set_power_mode(mmc, ios);
918 host->cur_power_mode = ios->power_mode;
919 } else {
920 alcor_set_timing(mmc, ios);
921 alcor_set_bus_width(mmc, ios);
922 alcor_set_clock(host, ios->clock);
923 }
924
925 mutex_unlock(&host->cmd_mutex);
926 }
927
alcor_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)928 static int alcor_signal_voltage_switch(struct mmc_host *mmc,
929 struct mmc_ios *ios)
930 {
931 struct alcor_sdmmc_host *host = mmc_priv(mmc);
932
933 mutex_lock(&host->cmd_mutex);
934
935 switch (ios->signal_voltage) {
936 case MMC_SIGNAL_VOLTAGE_330:
937 alcor_rmw8(host, AU6601_OPT, AU6601_OPT_SD_18V, 0);
938 break;
939 case MMC_SIGNAL_VOLTAGE_180:
940 alcor_rmw8(host, AU6601_OPT, 0, AU6601_OPT_SD_18V);
941 break;
942 default:
943 /* No signal voltage switch required */
944 break;
945 }
946
947 mutex_unlock(&host->cmd_mutex);
948 return 0;
949 }
950
951 static const struct mmc_host_ops alcor_sdc_ops = {
952 .card_busy = alcor_card_busy,
953 .get_cd = alcor_get_cd,
954 .get_ro = alcor_get_ro,
955 .post_req = alcor_post_req,
956 .pre_req = alcor_pre_req,
957 .request = alcor_request,
958 .set_ios = alcor_set_ios,
959 .start_signal_voltage_switch = alcor_signal_voltage_switch,
960 };
961
alcor_timeout_timer(struct work_struct * work)962 static void alcor_timeout_timer(struct work_struct *work)
963 {
964 struct delayed_work *d = to_delayed_work(work);
965 struct alcor_sdmmc_host *host = container_of(d, struct alcor_sdmmc_host,
966 timeout_work);
967 mutex_lock(&host->cmd_mutex);
968
969 dev_dbg(host->dev, "triggered timeout\n");
970 if (host->mrq) {
971 dev_err(host->dev, "Timeout waiting for hardware interrupt.\n");
972
973 if (host->data) {
974 host->data->error = -ETIMEDOUT;
975 } else {
976 if (host->cmd)
977 host->cmd->error = -ETIMEDOUT;
978 else
979 host->mrq->cmd->error = -ETIMEDOUT;
980 }
981
982 alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
983 alcor_request_complete(host, 0);
984 }
985
986 mutex_unlock(&host->cmd_mutex);
987 }
988
alcor_hw_init(struct alcor_sdmmc_host * host)989 static void alcor_hw_init(struct alcor_sdmmc_host *host)
990 {
991 struct alcor_pci_priv *priv = host->alcor_pci;
992 struct alcor_dev_cfg *cfg = priv->cfg;
993
994 /* FIXME: This part is a mimics HW init of original driver.
995 * If we will ever get access to documentation, then this part
996 * should be reviewed again.
997 */
998
999 /* reset command state engine */
1000 alcor_reset(host, AU6601_RESET_CMD);
1001
1002 alcor_write8(priv, 0, AU6601_DMA_BOUNDARY);
1003 /* enable sd card mode */
1004 alcor_write8(priv, AU6601_SD_CARD, AU6601_ACTIVE_CTRL);
1005
1006 /* set BUS width to 1 bit */
1007 alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
1008
1009 /* reset data state engine */
1010 alcor_reset(host, AU6601_RESET_DATA);
1011 /* Not sure if a voodoo with AU6601_DMA_BOUNDARY is really needed */
1012 alcor_write8(priv, 0, AU6601_DMA_BOUNDARY);
1013
1014 alcor_write8(priv, 0, AU6601_INTERFACE_MODE_CTRL);
1015 /* not clear what we are doing here. */
1016 alcor_write8(priv, 0x44, AU6601_PAD_DRIVE0);
1017 alcor_write8(priv, 0x44, AU6601_PAD_DRIVE1);
1018 alcor_write8(priv, 0x00, AU6601_PAD_DRIVE2);
1019
1020 /* for 6601 - dma_boundary; for 6621 - dma_page_cnt
1021 * exact meaning of this register is not clear.
1022 */
1023 alcor_write8(priv, cfg->dma, AU6601_DMA_BOUNDARY);
1024
1025 /* make sure all pins are set to input and VDD is off */
1026 alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
1027 alcor_write8(priv, 0, AU6601_POWER_CONTROL);
1028
1029 alcor_write8(priv, AU6601_DETECT_EN, AU6601_DETECT_STATUS);
1030 /* now we should be safe to enable IRQs */
1031 alcor_unmask_sd_irqs(host);
1032 }
1033
alcor_hw_uninit(struct alcor_sdmmc_host * host)1034 static void alcor_hw_uninit(struct alcor_sdmmc_host *host)
1035 {
1036 struct alcor_pci_priv *priv = host->alcor_pci;
1037
1038 alcor_mask_sd_irqs(host);
1039 alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
1040
1041 alcor_write8(priv, 0, AU6601_DETECT_STATUS);
1042
1043 alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
1044 alcor_write8(priv, 0, AU6601_POWER_CONTROL);
1045
1046 alcor_write8(priv, 0, AU6601_OPT);
1047 }
1048
alcor_init_mmc(struct alcor_sdmmc_host * host)1049 static void alcor_init_mmc(struct alcor_sdmmc_host *host)
1050 {
1051 struct mmc_host *mmc = mmc_from_priv(host);
1052
1053 mmc->f_min = AU6601_MIN_CLOCK;
1054 mmc->f_max = AU6601_MAX_CLOCK;
1055 mmc->ocr_avail = MMC_VDD_33_34;
1056 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED
1057 | MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
1058 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50;
1059 mmc->caps2 = MMC_CAP2_NO_SDIO;
1060 mmc->ops = &alcor_sdc_ops;
1061
1062 /* The hardware does DMA data transfer of 4096 bytes to/from a single
1063 * buffer address. Scatterlists are not supported at the hardware
1064 * level, however we can work with them at the driver level,
1065 * provided that each segment is exactly 4096 bytes in size.
1066 * Upon DMA completion of a single segment (signalled via IRQ), we
1067 * immediately proceed to transfer the next segment from the
1068 * scatterlist.
1069 *
1070 * The overall request is limited to 240 sectors, matching the
1071 * original vendor driver.
1072 */
1073 mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
1074 mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
1075 mmc->max_blk_count = 240;
1076 mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
1077 dma_set_max_seg_size(host->dev, mmc->max_seg_size);
1078 }
1079
alcor_pci_sdmmc_drv_probe(struct platform_device * pdev)1080 static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
1081 {
1082 struct alcor_pci_priv *priv = pdev->dev.platform_data;
1083 struct mmc_host *mmc;
1084 struct alcor_sdmmc_host *host;
1085 int ret;
1086
1087 mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
1088 if (!mmc) {
1089 dev_err(&pdev->dev, "Can't allocate MMC\n");
1090 return -ENOMEM;
1091 }
1092
1093 host = mmc_priv(mmc);
1094 host->dev = &pdev->dev;
1095 host->cur_power_mode = MMC_POWER_UNDEFINED;
1096 host->alcor_pci = priv;
1097
1098 /* make sure irqs are disabled */
1099 alcor_write32(priv, 0, AU6601_REG_INT_ENABLE);
1100 alcor_write32(priv, 0, AU6601_MS_INT_ENABLE);
1101
1102 ret = devm_request_threaded_irq(&pdev->dev, priv->irq,
1103 alcor_irq, alcor_irq_thread, IRQF_SHARED,
1104 DRV_NAME_ALCOR_PCI_SDMMC, host);
1105
1106 if (ret) {
1107 dev_err(&pdev->dev, "Failed to get irq for data line\n");
1108 goto free_host;
1109 }
1110
1111 mutex_init(&host->cmd_mutex);
1112 INIT_DELAYED_WORK(&host->timeout_work, alcor_timeout_timer);
1113
1114 alcor_init_mmc(host);
1115 alcor_hw_init(host);
1116
1117 dev_set_drvdata(&pdev->dev, host);
1118 ret = mmc_add_host(mmc);
1119 if (ret)
1120 goto free_host;
1121
1122 return 0;
1123
1124 free_host:
1125 mmc_free_host(mmc);
1126 return ret;
1127 }
1128
alcor_pci_sdmmc_drv_remove(struct platform_device * pdev)1129 static void alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
1130 {
1131 struct alcor_sdmmc_host *host = dev_get_drvdata(&pdev->dev);
1132 struct mmc_host *mmc = mmc_from_priv(host);
1133
1134 if (cancel_delayed_work_sync(&host->timeout_work))
1135 alcor_request_complete(host, 0);
1136
1137 alcor_hw_uninit(host);
1138 mmc_remove_host(mmc);
1139 mmc_free_host(mmc);
1140 }
1141
1142 #ifdef CONFIG_PM_SLEEP
alcor_pci_sdmmc_suspend(struct device * dev)1143 static int alcor_pci_sdmmc_suspend(struct device *dev)
1144 {
1145 struct alcor_sdmmc_host *host = dev_get_drvdata(dev);
1146
1147 if (cancel_delayed_work_sync(&host->timeout_work))
1148 alcor_request_complete(host, 0);
1149
1150 alcor_hw_uninit(host);
1151
1152 return 0;
1153 }
1154
alcor_pci_sdmmc_resume(struct device * dev)1155 static int alcor_pci_sdmmc_resume(struct device *dev)
1156 {
1157 struct alcor_sdmmc_host *host = dev_get_drvdata(dev);
1158
1159 alcor_hw_init(host);
1160
1161 return 0;
1162 }
1163 #endif /* CONFIG_PM_SLEEP */
1164
1165 static SIMPLE_DEV_PM_OPS(alcor_mmc_pm_ops, alcor_pci_sdmmc_suspend,
1166 alcor_pci_sdmmc_resume);
1167
1168 static const struct platform_device_id alcor_pci_sdmmc_ids[] = {
1169 {
1170 .name = DRV_NAME_ALCOR_PCI_SDMMC,
1171 }, {
1172 /* sentinel */
1173 }
1174 };
1175 MODULE_DEVICE_TABLE(platform, alcor_pci_sdmmc_ids);
1176
1177 static struct platform_driver alcor_pci_sdmmc_driver = {
1178 .probe = alcor_pci_sdmmc_drv_probe,
1179 .remove = alcor_pci_sdmmc_drv_remove,
1180 .id_table = alcor_pci_sdmmc_ids,
1181 .driver = {
1182 .name = DRV_NAME_ALCOR_PCI_SDMMC,
1183 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1184 .pm = &alcor_mmc_pm_ops
1185 },
1186 };
1187 module_platform_driver(alcor_pci_sdmmc_driver);
1188
1189 MODULE_AUTHOR("Oleksij Rempel <linux@rempel-privat.de>");
1190 MODULE_DESCRIPTION("PCI driver for Alcor Micro AU6601 Secure Digital Host Controller Interface");
1191 MODULE_LICENSE("GPL");
1192