1 /* 2 * Amlogic SD/eMMC driver for the GX/S905 family SoCs 3 * 4 * Copyright (c) 2016 BayLibre, SAS. 5 * Author: Kevin Hilman <khilman@baylibre.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of version 2 of the GNU General Public License as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 * The full GNU General Public License is included in this distribution 19 * in the file called COPYING. 20 */ 21 #include <linux/kernel.h> 22 #include <linux/module.h> 23 #include <linux/init.h> 24 #include <linux/device.h> 25 #include <linux/of_device.h> 26 #include <linux/platform_device.h> 27 #include <linux/ioport.h> 28 #include <linux/spinlock.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/mmc.h> 32 #include <linux/mmc/sdio.h> 33 #include <linux/mmc/slot-gpio.h> 34 #include <linux/io.h> 35 #include <linux/clk.h> 36 #include <linux/clk-provider.h> 37 #include <linux/regulator/consumer.h> 38 #include <linux/interrupt.h> 39 #include <linux/bitfield.h> 40 41 #define DRIVER_NAME "meson-gx-mmc" 42 43 #define SD_EMMC_CLOCK 0x0 44 #define CLK_DIV_MASK GENMASK(5, 0) 45 #define CLK_DIV_MAX 63 46 #define CLK_SRC_MASK GENMASK(7, 6) 47 #define CLK_SRC_XTAL 0 /* external crystal */ 48 #define CLK_SRC_XTAL_RATE 24000000 49 #define CLK_SRC_PLL 1 /* FCLK_DIV2 */ 50 #define CLK_SRC_PLL_RATE 1000000000 51 #define CLK_CORE_PHASE_MASK GENMASK(9, 8) 52 #define CLK_TX_PHASE_MASK GENMASK(11, 10) 53 #define CLK_RX_PHASE_MASK GENMASK(13, 12) 54 #define CLK_PHASE_0 0 55 #define CLK_PHASE_90 1 56 #define CLK_PHASE_180 2 57 #define CLK_PHASE_270 3 58 #define CLK_ALWAYS_ON BIT(24) 59 60 #define SD_EMMC_DElAY 0x4 61 #define SD_EMMC_ADJUST 0x8 62 #define SD_EMMC_CALOUT 0x10 63 #define SD_EMMC_START 0x40 64 #define START_DESC_INIT BIT(0) 65 #define START_DESC_BUSY BIT(1) 66 #define START_DESC_ADDR_MASK GENMASK(31, 2) 67 68 #define SD_EMMC_CFG 0x44 69 #define CFG_BUS_WIDTH_MASK GENMASK(1, 0) 70 #define CFG_BUS_WIDTH_1 0x0 71 #define CFG_BUS_WIDTH_4 0x1 72 #define CFG_BUS_WIDTH_8 0x2 73 #define CFG_DDR BIT(2) 74 #define CFG_BLK_LEN_MASK GENMASK(7, 4) 75 #define CFG_RESP_TIMEOUT_MASK GENMASK(11, 8) 76 #define CFG_RC_CC_MASK GENMASK(15, 12) 77 #define CFG_STOP_CLOCK BIT(22) 78 #define CFG_CLK_ALWAYS_ON BIT(18) 79 #define CFG_CHK_DS BIT(20) 80 #define CFG_AUTO_CLK BIT(23) 81 82 #define SD_EMMC_STATUS 0x48 83 #define STATUS_BUSY BIT(31) 84 85 #define SD_EMMC_IRQ_EN 0x4c 86 #define IRQ_EN_MASK GENMASK(13, 0) 87 #define IRQ_RXD_ERR_MASK GENMASK(7, 0) 88 #define IRQ_TXD_ERR BIT(8) 89 #define IRQ_DESC_ERR BIT(9) 90 #define IRQ_RESP_ERR BIT(10) 91 #define IRQ_RESP_TIMEOUT BIT(11) 92 #define IRQ_DESC_TIMEOUT BIT(12) 93 #define IRQ_END_OF_CHAIN BIT(13) 94 #define IRQ_RESP_STATUS BIT(14) 95 #define IRQ_SDIO BIT(15) 96 97 #define SD_EMMC_CMD_CFG 0x50 98 #define SD_EMMC_CMD_ARG 0x54 99 #define SD_EMMC_CMD_DAT 0x58 100 #define SD_EMMC_CMD_RSP 0x5c 101 #define SD_EMMC_CMD_RSP1 0x60 102 #define SD_EMMC_CMD_RSP2 0x64 103 #define SD_EMMC_CMD_RSP3 0x68 104 105 #define SD_EMMC_RXD 0x94 106 #define SD_EMMC_TXD 0x94 107 #define SD_EMMC_LAST_REG SD_EMMC_TXD 108 109 #define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */ 110 #define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */ 111 #define SD_EMMC_CMD_TIMEOUT 1024 /* in ms */ 112 #define SD_EMMC_CMD_TIMEOUT_DATA 4096 /* in ms */ 113 #define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */ 114 #define SD_EMMC_DESC_BUF_LEN PAGE_SIZE 115 116 #define SD_EMMC_PRE_REQ_DONE BIT(0) 117 #define SD_EMMC_DESC_CHAIN_MODE BIT(1) 118 119 #define MUX_CLK_NUM_PARENTS 2 120 121 struct meson_tuning_params { 122 u8 core_phase; 123 u8 tx_phase; 124 u8 rx_phase; 125 }; 126 127 struct sd_emmc_desc { 128 u32 cmd_cfg; 129 u32 cmd_arg; 130 u32 cmd_data; 131 u32 cmd_resp; 132 }; 133 134 struct meson_host { 135 struct device *dev; 136 struct mmc_host *mmc; 137 struct mmc_command *cmd; 138 139 spinlock_t lock; 140 void __iomem *regs; 141 struct clk *core_clk; 142 struct clk_mux mux; 143 struct clk *mux_clk; 144 unsigned long current_clock; 145 146 struct clk_divider cfg_div; 147 struct clk *cfg_div_clk; 148 149 unsigned int bounce_buf_size; 150 void *bounce_buf; 151 dma_addr_t bounce_dma_addr; 152 struct sd_emmc_desc *descs; 153 dma_addr_t descs_dma_addr; 154 155 struct meson_tuning_params tp; 156 bool vqmmc_enabled; 157 }; 158 159 #define CMD_CFG_LENGTH_MASK GENMASK(8, 0) 160 #define CMD_CFG_BLOCK_MODE BIT(9) 161 #define CMD_CFG_R1B BIT(10) 162 #define CMD_CFG_END_OF_CHAIN BIT(11) 163 #define CMD_CFG_TIMEOUT_MASK GENMASK(15, 12) 164 #define CMD_CFG_NO_RESP BIT(16) 165 #define CMD_CFG_NO_CMD BIT(17) 166 #define CMD_CFG_DATA_IO BIT(18) 167 #define CMD_CFG_DATA_WR BIT(19) 168 #define CMD_CFG_RESP_NOCRC BIT(20) 169 #define CMD_CFG_RESP_128 BIT(21) 170 #define CMD_CFG_RESP_NUM BIT(22) 171 #define CMD_CFG_DATA_NUM BIT(23) 172 #define CMD_CFG_CMD_INDEX_MASK GENMASK(29, 24) 173 #define CMD_CFG_ERROR BIT(30) 174 #define CMD_CFG_OWNER BIT(31) 175 176 #define CMD_DATA_MASK GENMASK(31, 2) 177 #define CMD_DATA_BIG_ENDIAN BIT(1) 178 #define CMD_DATA_SRAM BIT(0) 179 #define CMD_RESP_MASK GENMASK(31, 1) 180 #define CMD_RESP_SRAM BIT(0) 181 182 static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data) 183 { 184 unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC; 185 186 if (!timeout) 187 return SD_EMMC_CMD_TIMEOUT_DATA; 188 189 timeout = roundup_pow_of_two(timeout); 190 191 return min(timeout, 32768U); /* max. 2^15 ms */ 192 } 193 194 static struct mmc_command *meson_mmc_get_next_command(struct mmc_command *cmd) 195 { 196 if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error) 197 return cmd->mrq->cmd; 198 else if (mmc_op_multi(cmd->opcode) && 199 (!cmd->mrq->sbc || cmd->error || cmd->data->error)) 200 return cmd->mrq->stop; 201 else 202 return NULL; 203 } 204 205 static void meson_mmc_get_transfer_mode(struct mmc_host *mmc, 206 struct mmc_request *mrq) 207 { 208 struct mmc_data *data = mrq->data; 209 struct scatterlist *sg; 210 int i; 211 bool use_desc_chain_mode = true; 212 213 /* 214 * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been 215 * reported. For some strange reason this occurs in descriptor 216 * chain mode only. So let's fall back to bounce buffer mode 217 * for command SD_IO_RW_EXTENDED. 218 */ 219 if (mrq->cmd->opcode == SD_IO_RW_EXTENDED) 220 return; 221 222 for_each_sg(data->sg, sg, data->sg_len, i) 223 /* check for 8 byte alignment */ 224 if (sg->offset & 7) { 225 WARN_ONCE(1, "unaligned scatterlist buffer\n"); 226 use_desc_chain_mode = false; 227 break; 228 } 229 230 if (use_desc_chain_mode) 231 data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE; 232 } 233 234 static inline bool meson_mmc_desc_chain_mode(const struct mmc_data *data) 235 { 236 return data->host_cookie & SD_EMMC_DESC_CHAIN_MODE; 237 } 238 239 static inline bool meson_mmc_bounce_buf_read(const struct mmc_data *data) 240 { 241 return data && data->flags & MMC_DATA_READ && 242 !meson_mmc_desc_chain_mode(data); 243 } 244 245 static void meson_mmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 246 { 247 struct mmc_data *data = mrq->data; 248 249 if (!data) 250 return; 251 252 meson_mmc_get_transfer_mode(mmc, mrq); 253 data->host_cookie |= SD_EMMC_PRE_REQ_DONE; 254 255 if (!meson_mmc_desc_chain_mode(data)) 256 return; 257 258 data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len, 259 mmc_get_dma_dir(data)); 260 if (!data->sg_count) 261 dev_err(mmc_dev(mmc), "dma_map_sg failed"); 262 } 263 264 static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 265 int err) 266 { 267 struct mmc_data *data = mrq->data; 268 269 if (data && meson_mmc_desc_chain_mode(data) && data->sg_count) 270 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, 271 mmc_get_dma_dir(data)); 272 } 273 274 static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate) 275 { 276 struct mmc_host *mmc = host->mmc; 277 int ret; 278 u32 cfg; 279 280 if (clk_rate) { 281 if (WARN_ON(clk_rate > mmc->f_max)) 282 clk_rate = mmc->f_max; 283 else if (WARN_ON(clk_rate < mmc->f_min)) 284 clk_rate = mmc->f_min; 285 } 286 287 if (clk_rate == host->current_clock) 288 return 0; 289 290 /* stop clock */ 291 cfg = readl(host->regs + SD_EMMC_CFG); 292 if (!(cfg & CFG_STOP_CLOCK)) { 293 cfg |= CFG_STOP_CLOCK; 294 writel(cfg, host->regs + SD_EMMC_CFG); 295 } 296 297 dev_dbg(host->dev, "change clock rate %u -> %lu\n", 298 mmc->actual_clock, clk_rate); 299 300 if (!clk_rate) { 301 mmc->actual_clock = 0; 302 host->current_clock = 0; 303 /* return with clock being stopped */ 304 return 0; 305 } 306 307 ret = clk_set_rate(host->cfg_div_clk, clk_rate); 308 if (ret) { 309 dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n", 310 clk_rate, ret); 311 return ret; 312 } 313 314 mmc->actual_clock = clk_get_rate(host->cfg_div_clk); 315 host->current_clock = clk_rate; 316 317 if (clk_rate != mmc->actual_clock) 318 dev_dbg(host->dev, 319 "divider requested rate %lu != actual rate %u\n", 320 clk_rate, mmc->actual_clock); 321 322 /* (re)start clock */ 323 cfg = readl(host->regs + SD_EMMC_CFG); 324 cfg &= ~CFG_STOP_CLOCK; 325 writel(cfg, host->regs + SD_EMMC_CFG); 326 327 return 0; 328 } 329 330 /* 331 * The SD/eMMC IP block has an internal mux and divider used for 332 * generating the MMC clock. Use the clock framework to create and 333 * manage these clocks. 334 */ 335 static int meson_mmc_clk_init(struct meson_host *host) 336 { 337 struct clk_init_data init; 338 char clk_name[32]; 339 int i, ret = 0; 340 const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; 341 const char *clk_div_parents[1]; 342 u32 clk_reg, cfg; 343 344 /* get the mux parents */ 345 for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) { 346 struct clk *clk; 347 char name[16]; 348 349 snprintf(name, sizeof(name), "clkin%d", i); 350 clk = devm_clk_get(host->dev, name); 351 if (IS_ERR(clk)) { 352 if (clk != ERR_PTR(-EPROBE_DEFER)) 353 dev_err(host->dev, "Missing clock %s\n", name); 354 return PTR_ERR(clk); 355 } 356 357 mux_parent_names[i] = __clk_get_name(clk); 358 } 359 360 /* create the mux */ 361 snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev)); 362 init.name = clk_name; 363 init.ops = &clk_mux_ops; 364 init.flags = 0; 365 init.parent_names = mux_parent_names; 366 init.num_parents = MUX_CLK_NUM_PARENTS; 367 host->mux.reg = host->regs + SD_EMMC_CLOCK; 368 host->mux.shift = __bf_shf(CLK_SRC_MASK); 369 host->mux.mask = CLK_SRC_MASK; 370 host->mux.flags = 0; 371 host->mux.table = NULL; 372 host->mux.hw.init = &init; 373 374 host->mux_clk = devm_clk_register(host->dev, &host->mux.hw); 375 if (WARN_ON(IS_ERR(host->mux_clk))) 376 return PTR_ERR(host->mux_clk); 377 378 /* create the divider */ 379 snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev)); 380 init.name = clk_name; 381 init.ops = &clk_divider_ops; 382 init.flags = CLK_SET_RATE_PARENT; 383 clk_div_parents[0] = __clk_get_name(host->mux_clk); 384 init.parent_names = clk_div_parents; 385 init.num_parents = ARRAY_SIZE(clk_div_parents); 386 387 host->cfg_div.reg = host->regs + SD_EMMC_CLOCK; 388 host->cfg_div.shift = __bf_shf(CLK_DIV_MASK); 389 host->cfg_div.width = __builtin_popcountl(CLK_DIV_MASK); 390 host->cfg_div.hw.init = &init; 391 host->cfg_div.flags = CLK_DIVIDER_ONE_BASED | 392 CLK_DIVIDER_ROUND_CLOSEST | CLK_DIVIDER_ALLOW_ZERO; 393 394 host->cfg_div_clk = devm_clk_register(host->dev, &host->cfg_div.hw); 395 if (WARN_ON(PTR_ERR_OR_ZERO(host->cfg_div_clk))) 396 return PTR_ERR(host->cfg_div_clk); 397 398 /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ 399 clk_reg = 0; 400 clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->tp.core_phase); 401 clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, host->tp.tx_phase); 402 clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, host->tp.rx_phase); 403 clk_reg |= FIELD_PREP(CLK_SRC_MASK, CLK_SRC_XTAL); 404 clk_reg |= FIELD_PREP(CLK_DIV_MASK, CLK_DIV_MAX); 405 clk_reg &= ~CLK_ALWAYS_ON; 406 writel(clk_reg, host->regs + SD_EMMC_CLOCK); 407 408 /* Ensure clock starts in "auto" mode, not "always on" */ 409 cfg = readl(host->regs + SD_EMMC_CFG); 410 cfg &= ~CFG_CLK_ALWAYS_ON; 411 cfg |= CFG_AUTO_CLK; 412 writel(cfg, host->regs + SD_EMMC_CFG); 413 414 ret = clk_prepare_enable(host->cfg_div_clk); 415 if (ret) 416 return ret; 417 418 /* Get the nearest minimum clock to 400KHz */ 419 host->mmc->f_min = clk_round_rate(host->cfg_div_clk, 400000); 420 421 ret = meson_mmc_clk_set(host, host->mmc->f_min); 422 if (ret) 423 clk_disable_unprepare(host->cfg_div_clk); 424 425 return ret; 426 } 427 428 static void meson_mmc_set_tuning_params(struct mmc_host *mmc) 429 { 430 struct meson_host *host = mmc_priv(mmc); 431 u32 regval; 432 433 /* stop clock */ 434 regval = readl(host->regs + SD_EMMC_CFG); 435 regval |= CFG_STOP_CLOCK; 436 writel(regval, host->regs + SD_EMMC_CFG); 437 438 regval = readl(host->regs + SD_EMMC_CLOCK); 439 regval &= ~CLK_CORE_PHASE_MASK; 440 regval |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->tp.core_phase); 441 regval &= ~CLK_TX_PHASE_MASK; 442 regval |= FIELD_PREP(CLK_TX_PHASE_MASK, host->tp.tx_phase); 443 regval &= ~CLK_RX_PHASE_MASK; 444 regval |= FIELD_PREP(CLK_RX_PHASE_MASK, host->tp.rx_phase); 445 writel(regval, host->regs + SD_EMMC_CLOCK); 446 447 /* start clock */ 448 regval = readl(host->regs + SD_EMMC_CFG); 449 regval &= ~CFG_STOP_CLOCK; 450 writel(regval, host->regs + SD_EMMC_CFG); 451 } 452 453 static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 454 { 455 struct meson_host *host = mmc_priv(mmc); 456 u32 bus_width; 457 u32 val, orig; 458 459 /* 460 * GPIO regulator, only controls switching between 1v8 and 461 * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON. 462 */ 463 switch (ios->power_mode) { 464 case MMC_POWER_OFF: 465 if (!IS_ERR(mmc->supply.vmmc)) 466 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 467 468 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { 469 regulator_disable(mmc->supply.vqmmc); 470 host->vqmmc_enabled = false; 471 } 472 473 break; 474 475 case MMC_POWER_UP: 476 if (!IS_ERR(mmc->supply.vmmc)) 477 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 478 break; 479 480 case MMC_POWER_ON: 481 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { 482 int ret = regulator_enable(mmc->supply.vqmmc); 483 484 if (ret < 0) 485 dev_err(mmc_dev(mmc), 486 "failed to enable vqmmc regulator\n"); 487 else 488 host->vqmmc_enabled = true; 489 } 490 491 break; 492 } 493 494 495 meson_mmc_clk_set(host, ios->clock); 496 497 /* Bus width */ 498 switch (ios->bus_width) { 499 case MMC_BUS_WIDTH_1: 500 bus_width = CFG_BUS_WIDTH_1; 501 break; 502 case MMC_BUS_WIDTH_4: 503 bus_width = CFG_BUS_WIDTH_4; 504 break; 505 case MMC_BUS_WIDTH_8: 506 bus_width = CFG_BUS_WIDTH_8; 507 break; 508 default: 509 dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n", 510 ios->bus_width); 511 bus_width = CFG_BUS_WIDTH_4; 512 } 513 514 val = readl(host->regs + SD_EMMC_CFG); 515 orig = val; 516 517 val &= ~CFG_BUS_WIDTH_MASK; 518 val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width); 519 520 val &= ~CFG_DDR; 521 if (ios->timing == MMC_TIMING_UHS_DDR50 || 522 ios->timing == MMC_TIMING_MMC_DDR52 || 523 ios->timing == MMC_TIMING_MMC_HS400) 524 val |= CFG_DDR; 525 526 val &= ~CFG_CHK_DS; 527 if (ios->timing == MMC_TIMING_MMC_HS400) 528 val |= CFG_CHK_DS; 529 530 if (val != orig) { 531 writel(val, host->regs + SD_EMMC_CFG); 532 dev_dbg(host->dev, "%s: SD_EMMC_CFG: 0x%08x -> 0x%08x\n", 533 __func__, orig, val); 534 } 535 } 536 537 static void meson_mmc_request_done(struct mmc_host *mmc, 538 struct mmc_request *mrq) 539 { 540 struct meson_host *host = mmc_priv(mmc); 541 542 host->cmd = NULL; 543 mmc_request_done(host->mmc, mrq); 544 } 545 546 static void meson_mmc_set_blksz(struct mmc_host *mmc, unsigned int blksz) 547 { 548 struct meson_host *host = mmc_priv(mmc); 549 u32 cfg, blksz_old; 550 551 cfg = readl(host->regs + SD_EMMC_CFG); 552 blksz_old = FIELD_GET(CFG_BLK_LEN_MASK, cfg); 553 554 if (!is_power_of_2(blksz)) 555 dev_err(host->dev, "blksz %u is not a power of 2\n", blksz); 556 557 blksz = ilog2(blksz); 558 559 /* check if block-size matches, if not update */ 560 if (blksz == blksz_old) 561 return; 562 563 dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", __func__, 564 blksz_old, blksz); 565 566 cfg &= ~CFG_BLK_LEN_MASK; 567 cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, blksz); 568 writel(cfg, host->regs + SD_EMMC_CFG); 569 } 570 571 static void meson_mmc_set_response_bits(struct mmc_command *cmd, u32 *cmd_cfg) 572 { 573 if (cmd->flags & MMC_RSP_PRESENT) { 574 if (cmd->flags & MMC_RSP_136) 575 *cmd_cfg |= CMD_CFG_RESP_128; 576 *cmd_cfg |= CMD_CFG_RESP_NUM; 577 578 if (!(cmd->flags & MMC_RSP_CRC)) 579 *cmd_cfg |= CMD_CFG_RESP_NOCRC; 580 581 if (cmd->flags & MMC_RSP_BUSY) 582 *cmd_cfg |= CMD_CFG_R1B; 583 } else { 584 *cmd_cfg |= CMD_CFG_NO_RESP; 585 } 586 } 587 588 static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg) 589 { 590 struct meson_host *host = mmc_priv(mmc); 591 struct sd_emmc_desc *desc = host->descs; 592 struct mmc_data *data = host->cmd->data; 593 struct scatterlist *sg; 594 u32 start; 595 int i; 596 597 if (data->flags & MMC_DATA_WRITE) 598 cmd_cfg |= CMD_CFG_DATA_WR; 599 600 if (data->blocks > 1) { 601 cmd_cfg |= CMD_CFG_BLOCK_MODE; 602 meson_mmc_set_blksz(mmc, data->blksz); 603 } 604 605 for_each_sg(data->sg, sg, data->sg_count, i) { 606 unsigned int len = sg_dma_len(sg); 607 608 if (data->blocks > 1) 609 len /= data->blksz; 610 611 desc[i].cmd_cfg = cmd_cfg; 612 desc[i].cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, len); 613 if (i > 0) 614 desc[i].cmd_cfg |= CMD_CFG_NO_CMD; 615 desc[i].cmd_arg = host->cmd->arg; 616 desc[i].cmd_resp = 0; 617 desc[i].cmd_data = sg_dma_address(sg); 618 } 619 desc[data->sg_count - 1].cmd_cfg |= CMD_CFG_END_OF_CHAIN; 620 621 dma_wmb(); /* ensure descriptor is written before kicked */ 622 start = host->descs_dma_addr | START_DESC_BUSY; 623 writel(start, host->regs + SD_EMMC_START); 624 } 625 626 static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd) 627 { 628 struct meson_host *host = mmc_priv(mmc); 629 struct mmc_data *data = cmd->data; 630 u32 cmd_cfg = 0, cmd_data = 0; 631 unsigned int xfer_bytes = 0; 632 633 /* Setup descriptors */ 634 dma_rmb(); 635 636 host->cmd = cmd; 637 638 cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode); 639 cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */ 640 641 meson_mmc_set_response_bits(cmd, &cmd_cfg); 642 643 /* data? */ 644 if (data) { 645 data->bytes_xfered = 0; 646 cmd_cfg |= CMD_CFG_DATA_IO; 647 cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK, 648 ilog2(meson_mmc_get_timeout_msecs(data))); 649 650 if (meson_mmc_desc_chain_mode(data)) { 651 meson_mmc_desc_chain_transfer(mmc, cmd_cfg); 652 return; 653 } 654 655 if (data->blocks > 1) { 656 cmd_cfg |= CMD_CFG_BLOCK_MODE; 657 cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, 658 data->blocks); 659 meson_mmc_set_blksz(mmc, data->blksz); 660 } else { 661 cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, data->blksz); 662 } 663 664 xfer_bytes = data->blksz * data->blocks; 665 if (data->flags & MMC_DATA_WRITE) { 666 cmd_cfg |= CMD_CFG_DATA_WR; 667 WARN_ON(xfer_bytes > host->bounce_buf_size); 668 sg_copy_to_buffer(data->sg, data->sg_len, 669 host->bounce_buf, xfer_bytes); 670 dma_wmb(); 671 } 672 673 cmd_data = host->bounce_dma_addr & CMD_DATA_MASK; 674 } else { 675 cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK, 676 ilog2(SD_EMMC_CMD_TIMEOUT)); 677 } 678 679 /* Last descriptor */ 680 cmd_cfg |= CMD_CFG_END_OF_CHAIN; 681 writel(cmd_cfg, host->regs + SD_EMMC_CMD_CFG); 682 writel(cmd_data, host->regs + SD_EMMC_CMD_DAT); 683 writel(0, host->regs + SD_EMMC_CMD_RSP); 684 wmb(); /* ensure descriptor is written before kicked */ 685 writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG); 686 } 687 688 static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 689 { 690 struct meson_host *host = mmc_priv(mmc); 691 bool needs_pre_post_req = mrq->data && 692 !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE); 693 694 if (needs_pre_post_req) { 695 meson_mmc_get_transfer_mode(mmc, mrq); 696 if (!meson_mmc_desc_chain_mode(mrq->data)) 697 needs_pre_post_req = false; 698 } 699 700 if (needs_pre_post_req) 701 meson_mmc_pre_req(mmc, mrq); 702 703 /* Stop execution */ 704 writel(0, host->regs + SD_EMMC_START); 705 706 meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd); 707 708 if (needs_pre_post_req) 709 meson_mmc_post_req(mmc, mrq, 0); 710 } 711 712 static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd) 713 { 714 struct meson_host *host = mmc_priv(mmc); 715 716 if (cmd->flags & MMC_RSP_136) { 717 cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3); 718 cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2); 719 cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1); 720 cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP); 721 } else if (cmd->flags & MMC_RSP_PRESENT) { 722 cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP); 723 } 724 } 725 726 static irqreturn_t meson_mmc_irq(int irq, void *dev_id) 727 { 728 struct meson_host *host = dev_id; 729 struct mmc_command *cmd; 730 struct mmc_data *data; 731 u32 irq_en, status, raw_status; 732 irqreturn_t ret = IRQ_HANDLED; 733 734 if (WARN_ON(!host)) 735 return IRQ_NONE; 736 737 cmd = host->cmd; 738 739 if (WARN_ON(!cmd)) 740 return IRQ_NONE; 741 742 data = cmd->data; 743 744 spin_lock(&host->lock); 745 irq_en = readl(host->regs + SD_EMMC_IRQ_EN); 746 raw_status = readl(host->regs + SD_EMMC_STATUS); 747 status = raw_status & irq_en; 748 749 if (!status) { 750 dev_warn(host->dev, "Spurious IRQ! status=0x%08x, irq_en=0x%08x\n", 751 raw_status, irq_en); 752 ret = IRQ_NONE; 753 goto out; 754 } 755 756 meson_mmc_read_resp(host->mmc, cmd); 757 758 cmd->error = 0; 759 if (status & IRQ_RXD_ERR_MASK) { 760 dev_dbg(host->dev, "Unhandled IRQ: RXD error\n"); 761 cmd->error = -EILSEQ; 762 } 763 if (status & IRQ_TXD_ERR) { 764 dev_dbg(host->dev, "Unhandled IRQ: TXD error\n"); 765 cmd->error = -EILSEQ; 766 } 767 if (status & IRQ_DESC_ERR) 768 dev_dbg(host->dev, "Unhandled IRQ: Descriptor error\n"); 769 if (status & IRQ_RESP_ERR) { 770 dev_dbg(host->dev, "Unhandled IRQ: Response error\n"); 771 cmd->error = -EILSEQ; 772 } 773 if (status & IRQ_RESP_TIMEOUT) { 774 dev_dbg(host->dev, "Unhandled IRQ: Response timeout\n"); 775 cmd->error = -ETIMEDOUT; 776 } 777 if (status & IRQ_DESC_TIMEOUT) { 778 dev_dbg(host->dev, "Unhandled IRQ: Descriptor timeout\n"); 779 cmd->error = -ETIMEDOUT; 780 } 781 if (status & IRQ_SDIO) 782 dev_dbg(host->dev, "Unhandled IRQ: SDIO.\n"); 783 784 if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) { 785 if (data && !cmd->error) 786 data->bytes_xfered = data->blksz * data->blocks; 787 if (meson_mmc_bounce_buf_read(data) || 788 meson_mmc_get_next_command(cmd)) 789 ret = IRQ_WAKE_THREAD; 790 } else { 791 dev_warn(host->dev, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n", 792 status, cmd->opcode, cmd->arg, 793 cmd->flags, cmd->mrq->stop ? 1 : 0); 794 if (cmd->data) { 795 struct mmc_data *data = cmd->data; 796 797 dev_warn(host->dev, "\tblksz %u blocks %u flags 0x%08x (%s%s)", 798 data->blksz, data->blocks, data->flags, 799 data->flags & MMC_DATA_WRITE ? "write" : "", 800 data->flags & MMC_DATA_READ ? "read" : ""); 801 } 802 } 803 804 out: 805 /* ack all (enabled) interrupts */ 806 writel(status, host->regs + SD_EMMC_STATUS); 807 808 if (ret == IRQ_HANDLED) 809 meson_mmc_request_done(host->mmc, cmd->mrq); 810 811 spin_unlock(&host->lock); 812 return ret; 813 } 814 815 static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) 816 { 817 struct meson_host *host = dev_id; 818 struct mmc_command *next_cmd, *cmd = host->cmd; 819 struct mmc_data *data; 820 unsigned int xfer_bytes; 821 822 if (WARN_ON(!cmd)) 823 return IRQ_NONE; 824 825 data = cmd->data; 826 if (meson_mmc_bounce_buf_read(data)) { 827 xfer_bytes = data->blksz * data->blocks; 828 WARN_ON(xfer_bytes > host->bounce_buf_size); 829 sg_copy_from_buffer(data->sg, data->sg_len, 830 host->bounce_buf, xfer_bytes); 831 } 832 833 next_cmd = meson_mmc_get_next_command(cmd); 834 if (next_cmd) 835 meson_mmc_start_cmd(host->mmc, next_cmd); 836 else 837 meson_mmc_request_done(host->mmc, cmd->mrq); 838 839 return IRQ_HANDLED; 840 } 841 842 static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) 843 { 844 struct meson_host *host = mmc_priv(mmc); 845 struct meson_tuning_params tp_old = host->tp; 846 int ret = -EINVAL, i, cmd_error; 847 848 dev_info(mmc_dev(mmc), "(re)tuning...\n"); 849 850 for (i = CLK_PHASE_0; i <= CLK_PHASE_270; i++) { 851 host->tp.rx_phase = i; 852 /* exclude the active parameter set if retuning */ 853 if (!memcmp(&tp_old, &host->tp, sizeof(tp_old)) && 854 mmc->doing_retune) 855 continue; 856 meson_mmc_set_tuning_params(mmc); 857 ret = mmc_send_tuning(mmc, opcode, &cmd_error); 858 if (!ret) 859 break; 860 } 861 862 return ret; 863 } 864 865 /* 866 * NOTE: we only need this until the GPIO/pinctrl driver can handle 867 * interrupts. For now, the MMC core will use this for polling. 868 */ 869 static int meson_mmc_get_cd(struct mmc_host *mmc) 870 { 871 int status = mmc_gpio_get_cd(mmc); 872 873 if (status == -ENOSYS) 874 return 1; /* assume present */ 875 876 return status; 877 } 878 879 static void meson_mmc_cfg_init(struct meson_host *host) 880 { 881 u32 cfg = 0; 882 883 cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK, 884 ilog2(SD_EMMC_CFG_RESP_TIMEOUT)); 885 cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP)); 886 cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE)); 887 888 writel(cfg, host->regs + SD_EMMC_CFG); 889 } 890 891 static const struct mmc_host_ops meson_mmc_ops = { 892 .request = meson_mmc_request, 893 .set_ios = meson_mmc_set_ios, 894 .get_cd = meson_mmc_get_cd, 895 .pre_req = meson_mmc_pre_req, 896 .post_req = meson_mmc_post_req, 897 .execute_tuning = meson_mmc_execute_tuning, 898 }; 899 900 static int meson_mmc_probe(struct platform_device *pdev) 901 { 902 struct resource *res; 903 struct meson_host *host; 904 struct mmc_host *mmc; 905 int ret, irq; 906 907 mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); 908 if (!mmc) 909 return -ENOMEM; 910 host = mmc_priv(mmc); 911 host->mmc = mmc; 912 host->dev = &pdev->dev; 913 dev_set_drvdata(&pdev->dev, host); 914 915 spin_lock_init(&host->lock); 916 917 /* Get regulators and the supported OCR mask */ 918 host->vqmmc_enabled = false; 919 ret = mmc_regulator_get_supply(mmc); 920 if (ret == -EPROBE_DEFER) 921 goto free_host; 922 923 ret = mmc_of_parse(mmc); 924 if (ret) { 925 if (ret != -EPROBE_DEFER) 926 dev_warn(&pdev->dev, "error parsing DT: %d\n", ret); 927 goto free_host; 928 } 929 930 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 931 host->regs = devm_ioremap_resource(&pdev->dev, res); 932 if (IS_ERR(host->regs)) { 933 ret = PTR_ERR(host->regs); 934 goto free_host; 935 } 936 937 irq = platform_get_irq(pdev, 0); 938 if (!irq) { 939 dev_err(&pdev->dev, "failed to get interrupt resource.\n"); 940 ret = -EINVAL; 941 goto free_host; 942 } 943 944 host->core_clk = devm_clk_get(&pdev->dev, "core"); 945 if (IS_ERR(host->core_clk)) { 946 ret = PTR_ERR(host->core_clk); 947 goto free_host; 948 } 949 950 ret = clk_prepare_enable(host->core_clk); 951 if (ret) 952 goto free_host; 953 954 host->tp.core_phase = CLK_PHASE_180; 955 host->tp.tx_phase = CLK_PHASE_0; 956 host->tp.rx_phase = CLK_PHASE_0; 957 958 ret = meson_mmc_clk_init(host); 959 if (ret) 960 goto err_core_clk; 961 962 /* Stop execution */ 963 writel(0, host->regs + SD_EMMC_START); 964 965 /* clear, ack, enable all interrupts */ 966 writel(0, host->regs + SD_EMMC_IRQ_EN); 967 writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS); 968 writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN); 969 970 /* set config to sane default */ 971 meson_mmc_cfg_init(host); 972 973 ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq, 974 meson_mmc_irq_thread, IRQF_SHARED, 975 NULL, host); 976 if (ret) 977 goto err_div_clk; 978 979 mmc->caps |= MMC_CAP_CMD23; 980 mmc->max_blk_count = CMD_CFG_LENGTH_MASK; 981 mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size; 982 mmc->max_segs = SD_EMMC_DESC_BUF_LEN / sizeof(struct sd_emmc_desc); 983 mmc->max_seg_size = mmc->max_req_size; 984 985 /* data bounce buffer */ 986 host->bounce_buf_size = mmc->max_req_size; 987 host->bounce_buf = 988 dma_alloc_coherent(host->dev, host->bounce_buf_size, 989 &host->bounce_dma_addr, GFP_KERNEL); 990 if (host->bounce_buf == NULL) { 991 dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); 992 ret = -ENOMEM; 993 goto err_div_clk; 994 } 995 996 host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 997 &host->descs_dma_addr, GFP_KERNEL); 998 if (!host->descs) { 999 dev_err(host->dev, "Allocating descriptor DMA buffer failed\n"); 1000 ret = -ENOMEM; 1001 goto err_bounce_buf; 1002 } 1003 1004 mmc->ops = &meson_mmc_ops; 1005 mmc_add_host(mmc); 1006 1007 return 0; 1008 1009 err_bounce_buf: 1010 dma_free_coherent(host->dev, host->bounce_buf_size, 1011 host->bounce_buf, host->bounce_dma_addr); 1012 err_div_clk: 1013 clk_disable_unprepare(host->cfg_div_clk); 1014 err_core_clk: 1015 clk_disable_unprepare(host->core_clk); 1016 free_host: 1017 mmc_free_host(mmc); 1018 return ret; 1019 } 1020 1021 static int meson_mmc_remove(struct platform_device *pdev) 1022 { 1023 struct meson_host *host = dev_get_drvdata(&pdev->dev); 1024 1025 mmc_remove_host(host->mmc); 1026 1027 /* disable interrupts */ 1028 writel(0, host->regs + SD_EMMC_IRQ_EN); 1029 1030 dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 1031 host->descs, host->descs_dma_addr); 1032 dma_free_coherent(host->dev, host->bounce_buf_size, 1033 host->bounce_buf, host->bounce_dma_addr); 1034 1035 clk_disable_unprepare(host->cfg_div_clk); 1036 clk_disable_unprepare(host->core_clk); 1037 1038 mmc_free_host(host->mmc); 1039 return 0; 1040 } 1041 1042 static const struct of_device_id meson_mmc_of_match[] = { 1043 { .compatible = "amlogic,meson-gx-mmc", }, 1044 { .compatible = "amlogic,meson-gxbb-mmc", }, 1045 { .compatible = "amlogic,meson-gxl-mmc", }, 1046 { .compatible = "amlogic,meson-gxm-mmc", }, 1047 {} 1048 }; 1049 MODULE_DEVICE_TABLE(of, meson_mmc_of_match); 1050 1051 static struct platform_driver meson_mmc_driver = { 1052 .probe = meson_mmc_probe, 1053 .remove = meson_mmc_remove, 1054 .driver = { 1055 .name = DRIVER_NAME, 1056 .of_match_table = of_match_ptr(meson_mmc_of_match), 1057 }, 1058 }; 1059 1060 module_platform_driver(meson_mmc_driver); 1061 1062 MODULE_DESCRIPTION("Amlogic S905*/GX* SD/eMMC driver"); 1063 MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>"); 1064 MODULE_LICENSE("GPL v2"); 1065