1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Mediatek SPI NOR controller driver 4 // 5 // Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com> 6 7 #include <linux/bits.h> 8 #include <linux/clk.h> 9 #include <linux/completion.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/iopoll.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/of.h> 17 #include <linux/platform_device.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/spi/spi.h> 20 #include <linux/spi/spi-mem.h> 21 #include <linux/string.h> 22 23 #define DRIVER_NAME "mtk-spi-nor" 24 25 #define MTK_NOR_REG_CMD 0x00 26 #define MTK_NOR_CMD_WRITE BIT(4) 27 #define MTK_NOR_CMD_PROGRAM BIT(2) 28 #define MTK_NOR_CMD_READ BIT(0) 29 #define MTK_NOR_CMD_MASK GENMASK(5, 0) 30 31 #define MTK_NOR_REG_PRG_CNT 0x04 32 #define MTK_NOR_PRG_CNT_MAX 56 33 #define MTK_NOR_REG_RDATA 0x0c 34 35 #define MTK_NOR_REG_RADR0 0x10 36 #define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n)) 37 #define MTK_NOR_REG_RADR3 0xc8 38 39 #define MTK_NOR_REG_WDATA 0x1c 40 41 #define MTK_NOR_REG_PRGDATA0 0x20 42 #define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n)) 43 #define MTK_NOR_REG_PRGDATA_MAX 5 44 45 #define MTK_NOR_REG_SHIFT0 0x38 46 #define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n)) 47 #define MTK_NOR_REG_SHIFT_MAX 9 48 49 #define MTK_NOR_REG_CFG1 0x60 50 #define MTK_NOR_FAST_READ BIT(0) 51 52 #define MTK_NOR_REG_CFG2 0x64 53 #define MTK_NOR_WR_CUSTOM_OP_EN BIT(4) 54 #define MTK_NOR_WR_BUF_EN BIT(0) 55 56 #define MTK_NOR_REG_PP_DATA 0x98 57 58 #define MTK_NOR_REG_IRQ_STAT 0xa8 59 #define MTK_NOR_REG_IRQ_EN 0xac 60 #define MTK_NOR_IRQ_DMA BIT(7) 61 #define MTK_NOR_IRQ_MASK GENMASK(7, 0) 62 63 #define MTK_NOR_REG_CFG3 0xb4 64 #define MTK_NOR_DISABLE_WREN BIT(7) 65 #define MTK_NOR_DISABLE_SR_POLL BIT(5) 66 67 #define MTK_NOR_REG_WP 0xc4 68 #define MTK_NOR_ENABLE_SF_CMD 0x30 69 70 #define MTK_NOR_REG_BUSCFG 0xcc 71 #define MTK_NOR_4B_ADDR BIT(4) 72 #define MTK_NOR_QUAD_ADDR BIT(3) 73 #define MTK_NOR_QUAD_READ BIT(2) 74 #define MTK_NOR_DUAL_ADDR BIT(1) 75 #define MTK_NOR_DUAL_READ BIT(0) 76 #define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0) 77 78 #define MTK_NOR_REG_DMA_CTL 0x718 79 #define MTK_NOR_DMA_START BIT(0) 80 81 #define MTK_NOR_REG_DMA_FADR 0x71c 82 #define MTK_NOR_REG_DMA_DADR 0x720 83 #define MTK_NOR_REG_DMA_END_DADR 0x724 84 #define MTK_NOR_REG_CG_DIS 0x728 85 #define MTK_NOR_SFC_SW_RST BIT(2) 86 87 #define MTK_NOR_REG_DMA_DADR_HB 0x738 88 #define MTK_NOR_REG_DMA_END_DADR_HB 0x73c 89 90 #define MTK_NOR_PRG_MAX_SIZE 6 91 // Reading DMA src/dst addresses have to be 16-byte aligned 92 #define MTK_NOR_DMA_ALIGN 16 93 #define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1) 94 // and we allocate a bounce buffer if destination address isn't aligned. 95 #define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE 96 97 // Buffered page program can do one 128-byte transfer 98 #define MTK_NOR_PP_SIZE 128 99 100 #define CLK_TO_US(sp, clkcnt) DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000) 101 102 struct mtk_nor_caps { 103 u8 dma_bits; 104 105 /* extra_dummy_bit is adding for the IP of new SoCs. 106 * Some new SoCs modify the timing of fetching registers' values 107 * and IDs of nor flash, they need a extra_dummy_bit which can add 108 * more clock cycles for fetching data. 109 */ 110 u8 extra_dummy_bit; 111 }; 112 113 struct mtk_nor { 114 struct spi_controller *ctlr; 115 struct device *dev; 116 void __iomem *base; 117 u8 *buffer; 118 dma_addr_t buffer_dma; 119 struct clk *spi_clk; 120 struct clk *ctlr_clk; 121 struct clk *axi_clk; 122 struct clk *axi_s_clk; 123 unsigned int spi_freq; 124 bool wbuf_en; 125 bool has_irq; 126 bool high_dma; 127 struct completion op_done; 128 const struct mtk_nor_caps *caps; 129 }; 130 131 static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr) 132 { 133 u32 val = readl(sp->base + reg); 134 135 val &= ~clr; 136 val |= set; 137 writel(val, sp->base + reg); 138 } 139 140 static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk) 141 { 142 ulong delay = CLK_TO_US(sp, clk); 143 u32 reg; 144 int ret; 145 146 writel(cmd, sp->base + MTK_NOR_REG_CMD); 147 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CMD, reg, !(reg & cmd), 148 delay / 3, (delay + 1) * 200); 149 if (ret < 0) 150 dev_err(sp->dev, "command %u timeout.\n", cmd); 151 return ret; 152 } 153 154 static void mtk_nor_reset(struct mtk_nor *sp) 155 { 156 mtk_nor_rmw(sp, MTK_NOR_REG_CG_DIS, 0, MTK_NOR_SFC_SW_RST); 157 mb(); /* flush previous writes */ 158 mtk_nor_rmw(sp, MTK_NOR_REG_CG_DIS, MTK_NOR_SFC_SW_RST, 0); 159 mb(); /* flush previous writes */ 160 writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP); 161 } 162 163 static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op) 164 { 165 u32 addr = op->addr.val; 166 int i; 167 168 for (i = 0; i < 3; i++) { 169 writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR(i)); 170 addr >>= 8; 171 } 172 if (op->addr.nbytes == 4) { 173 writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR3); 174 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0); 175 } else { 176 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR); 177 } 178 } 179 180 static bool need_bounce(struct mtk_nor *sp, const struct spi_mem_op *op) 181 { 182 return ((uintptr_t)op->data.buf.in & MTK_NOR_DMA_ALIGN_MASK); 183 } 184 185 static bool mtk_nor_match_read(const struct spi_mem_op *op) 186 { 187 int dummy = 0; 188 189 if (op->dummy.nbytes) 190 dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth; 191 192 if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) { 193 if (op->addr.buswidth == 1) 194 return dummy == 8; 195 else if (op->addr.buswidth == 2) 196 return dummy == 4; 197 else if (op->addr.buswidth == 4) 198 return dummy == 6; 199 } else if ((op->addr.buswidth == 1) && (op->data.buswidth == 1)) { 200 if (op->cmd.opcode == 0x03) 201 return dummy == 0; 202 else if (op->cmd.opcode == 0x0b) 203 return dummy == 8; 204 } 205 return false; 206 } 207 208 static bool mtk_nor_match_prg(const struct spi_mem_op *op) 209 { 210 int tx_len, rx_len, prg_len, prg_left; 211 212 // prg mode is spi-only. 213 if ((op->cmd.buswidth > 1) || (op->addr.buswidth > 1) || 214 (op->dummy.buswidth > 1) || (op->data.buswidth > 1)) 215 return false; 216 217 tx_len = op->cmd.nbytes + op->addr.nbytes; 218 219 if (op->data.dir == SPI_MEM_DATA_OUT) { 220 // count dummy bytes only if we need to write data after it 221 tx_len += op->dummy.nbytes; 222 223 // leave at least one byte for data 224 if (tx_len > MTK_NOR_REG_PRGDATA_MAX) 225 return false; 226 227 // if there's no addr, meaning adjust_op_size is impossible, 228 // check data length as well. 229 if ((!op->addr.nbytes) && 230 (tx_len + op->data.nbytes > MTK_NOR_REG_PRGDATA_MAX + 1)) 231 return false; 232 } else if (op->data.dir == SPI_MEM_DATA_IN) { 233 if (tx_len > MTK_NOR_REG_PRGDATA_MAX + 1) 234 return false; 235 236 rx_len = op->data.nbytes; 237 prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes; 238 if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1) 239 prg_left = MTK_NOR_REG_SHIFT_MAX + 1; 240 if (rx_len > prg_left) { 241 if (!op->addr.nbytes) 242 return false; 243 rx_len = prg_left; 244 } 245 246 prg_len = tx_len + op->dummy.nbytes + rx_len; 247 if (prg_len > MTK_NOR_PRG_CNT_MAX / 8) 248 return false; 249 } else { 250 prg_len = tx_len + op->dummy.nbytes; 251 if (prg_len > MTK_NOR_PRG_CNT_MAX / 8) 252 return false; 253 } 254 return true; 255 } 256 257 static void mtk_nor_adj_prg_size(struct spi_mem_op *op) 258 { 259 int tx_len, tx_left, prg_left; 260 261 tx_len = op->cmd.nbytes + op->addr.nbytes; 262 if (op->data.dir == SPI_MEM_DATA_OUT) { 263 tx_len += op->dummy.nbytes; 264 tx_left = MTK_NOR_REG_PRGDATA_MAX + 1 - tx_len; 265 if (op->data.nbytes > tx_left) 266 op->data.nbytes = tx_left; 267 } else if (op->data.dir == SPI_MEM_DATA_IN) { 268 prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes; 269 if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1) 270 prg_left = MTK_NOR_REG_SHIFT_MAX + 1; 271 if (op->data.nbytes > prg_left) 272 op->data.nbytes = prg_left; 273 } 274 } 275 276 static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 277 { 278 struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->controller); 279 280 if (!op->data.nbytes) 281 return 0; 282 283 if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) { 284 if ((op->data.dir == SPI_MEM_DATA_IN) && 285 mtk_nor_match_read(op)) { 286 // limit size to prevent timeout calculation overflow 287 if (op->data.nbytes > 0x400000) 288 op->data.nbytes = 0x400000; 289 290 if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) || 291 (op->data.nbytes < MTK_NOR_DMA_ALIGN)) 292 op->data.nbytes = 1; 293 else if (!need_bounce(sp, op)) 294 op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK; 295 else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE) 296 op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE; 297 return 0; 298 } else if (op->data.dir == SPI_MEM_DATA_OUT) { 299 if (op->data.nbytes >= MTK_NOR_PP_SIZE) 300 op->data.nbytes = MTK_NOR_PP_SIZE; 301 else 302 op->data.nbytes = 1; 303 return 0; 304 } 305 } 306 307 mtk_nor_adj_prg_size(op); 308 return 0; 309 } 310 311 static bool mtk_nor_supports_op(struct spi_mem *mem, 312 const struct spi_mem_op *op) 313 { 314 if (!spi_mem_default_supports_op(mem, op)) 315 return false; 316 317 if (op->cmd.buswidth != 1) 318 return false; 319 320 if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) { 321 switch (op->data.dir) { 322 case SPI_MEM_DATA_IN: 323 if (mtk_nor_match_read(op)) 324 return true; 325 break; 326 case SPI_MEM_DATA_OUT: 327 if ((op->addr.buswidth == 1) && 328 (op->dummy.nbytes == 0) && 329 (op->data.buswidth == 1)) 330 return true; 331 break; 332 default: 333 break; 334 } 335 } 336 337 return mtk_nor_match_prg(op); 338 } 339 340 static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op) 341 { 342 u32 reg = 0; 343 344 if (op->addr.nbytes == 4) 345 reg |= MTK_NOR_4B_ADDR; 346 347 if (op->data.buswidth == 4) { 348 reg |= MTK_NOR_QUAD_READ; 349 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(4)); 350 if (op->addr.buswidth == 4) 351 reg |= MTK_NOR_QUAD_ADDR; 352 } else if (op->data.buswidth == 2) { 353 reg |= MTK_NOR_DUAL_READ; 354 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(3)); 355 if (op->addr.buswidth == 2) 356 reg |= MTK_NOR_DUAL_ADDR; 357 } else { 358 if (op->cmd.opcode == 0x0b) 359 mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ, 0); 360 else 361 mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, 0, MTK_NOR_FAST_READ); 362 } 363 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK); 364 } 365 366 static int mtk_nor_dma_exec(struct mtk_nor *sp, u32 from, unsigned int length, 367 dma_addr_t dma_addr) 368 { 369 int ret = 0; 370 u32 delay, timeout; 371 u32 reg; 372 373 writel(from, sp->base + MTK_NOR_REG_DMA_FADR); 374 writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR); 375 writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR); 376 377 if (sp->high_dma) { 378 writel(upper_32_bits(dma_addr), 379 sp->base + MTK_NOR_REG_DMA_DADR_HB); 380 writel(upper_32_bits(dma_addr + length), 381 sp->base + MTK_NOR_REG_DMA_END_DADR_HB); 382 } 383 384 if (sp->has_irq) { 385 reinit_completion(&sp->op_done); 386 mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0); 387 } 388 389 mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0); 390 391 delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE); 392 timeout = (delay + 1) * 100; 393 394 if (sp->has_irq) { 395 if (!wait_for_completion_timeout(&sp->op_done, 396 usecs_to_jiffies(max(timeout, 10000U)))) 397 ret = -ETIMEDOUT; 398 } else { 399 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg, 400 !(reg & MTK_NOR_DMA_START), delay / 3, 401 timeout); 402 } 403 404 if (ret < 0) 405 dev_err(sp->dev, "dma read timeout.\n"); 406 407 return ret; 408 } 409 410 static int mtk_nor_read_bounce(struct mtk_nor *sp, const struct spi_mem_op *op) 411 { 412 unsigned int rdlen; 413 int ret; 414 415 if (op->data.nbytes & MTK_NOR_DMA_ALIGN_MASK) 416 rdlen = (op->data.nbytes + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK; 417 else 418 rdlen = op->data.nbytes; 419 420 ret = mtk_nor_dma_exec(sp, op->addr.val, rdlen, sp->buffer_dma); 421 422 if (!ret) 423 memcpy(op->data.buf.in, sp->buffer, op->data.nbytes); 424 425 return ret; 426 } 427 428 static int mtk_nor_read_dma(struct mtk_nor *sp, const struct spi_mem_op *op) 429 { 430 int ret; 431 dma_addr_t dma_addr; 432 433 if (need_bounce(sp, op)) 434 return mtk_nor_read_bounce(sp, op); 435 436 dma_addr = dma_map_single(sp->dev, op->data.buf.in, 437 op->data.nbytes, DMA_FROM_DEVICE); 438 439 if (dma_mapping_error(sp->dev, dma_addr)) 440 return -EINVAL; 441 442 ret = mtk_nor_dma_exec(sp, op->addr.val, op->data.nbytes, dma_addr); 443 444 dma_unmap_single(sp->dev, dma_addr, op->data.nbytes, DMA_FROM_DEVICE); 445 446 return ret; 447 } 448 449 static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op) 450 { 451 u8 *buf = op->data.buf.in; 452 int ret; 453 454 ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE); 455 if (!ret) 456 buf[0] = readb(sp->base + MTK_NOR_REG_RDATA); 457 return ret; 458 } 459 460 static int mtk_nor_setup_write_buffer(struct mtk_nor *sp, bool on) 461 { 462 int ret; 463 u32 val; 464 465 if (!(sp->wbuf_en ^ on)) 466 return 0; 467 468 val = readl(sp->base + MTK_NOR_REG_CFG2); 469 if (on) { 470 writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2); 471 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val, 472 val & MTK_NOR_WR_BUF_EN, 0, 10000); 473 } else { 474 writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2); 475 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val, 476 !(val & MTK_NOR_WR_BUF_EN), 0, 10000); 477 } 478 479 if (!ret) 480 sp->wbuf_en = on; 481 482 return ret; 483 } 484 485 static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op) 486 { 487 const u8 *buf = op->data.buf.out; 488 u32 val; 489 int ret, i; 490 491 ret = mtk_nor_setup_write_buffer(sp, true); 492 if (ret < 0) 493 return ret; 494 495 for (i = 0; i < op->data.nbytes; i += 4) { 496 val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 | 497 buf[i]; 498 writel(val, sp->base + MTK_NOR_REG_PP_DATA); 499 } 500 return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 501 (op->data.nbytes + 5) * BITS_PER_BYTE); 502 } 503 504 static int mtk_nor_pp_unbuffered(struct mtk_nor *sp, 505 const struct spi_mem_op *op) 506 { 507 const u8 *buf = op->data.buf.out; 508 int ret; 509 510 ret = mtk_nor_setup_write_buffer(sp, false); 511 if (ret < 0) 512 return ret; 513 writeb(buf[0], sp->base + MTK_NOR_REG_WDATA); 514 return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE); 515 } 516 517 static int mtk_nor_spi_mem_prg(struct mtk_nor *sp, const struct spi_mem_op *op) 518 { 519 int rx_len = 0; 520 int reg_offset = MTK_NOR_REG_PRGDATA_MAX; 521 int tx_len, prg_len; 522 int i, ret; 523 void __iomem *reg; 524 u8 bufbyte; 525 526 tx_len = op->cmd.nbytes + op->addr.nbytes; 527 528 // count dummy bytes only if we need to write data after it 529 if (op->data.dir == SPI_MEM_DATA_OUT) 530 tx_len += op->dummy.nbytes + op->data.nbytes; 531 else if (op->data.dir == SPI_MEM_DATA_IN) 532 rx_len = op->data.nbytes; 533 534 prg_len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes + 535 op->data.nbytes; 536 537 // an invalid op may reach here if the caller calls exec_op without 538 // adjust_op_size. return -EINVAL instead of -ENOTSUPP so that 539 // spi-mem won't try this op again with generic spi transfers. 540 if ((tx_len > MTK_NOR_REG_PRGDATA_MAX + 1) || 541 (rx_len > MTK_NOR_REG_SHIFT_MAX + 1) || 542 (prg_len > MTK_NOR_PRG_CNT_MAX / 8)) 543 return -EINVAL; 544 545 // fill tx data 546 for (i = op->cmd.nbytes; i > 0; i--, reg_offset--) { 547 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 548 bufbyte = (op->cmd.opcode >> ((i - 1) * BITS_PER_BYTE)) & 0xff; 549 writeb(bufbyte, reg); 550 } 551 552 for (i = op->addr.nbytes; i > 0; i--, reg_offset--) { 553 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 554 bufbyte = (op->addr.val >> ((i - 1) * BITS_PER_BYTE)) & 0xff; 555 writeb(bufbyte, reg); 556 } 557 558 if (op->data.dir == SPI_MEM_DATA_OUT) { 559 for (i = 0; i < op->dummy.nbytes; i++, reg_offset--) { 560 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 561 writeb(0, reg); 562 } 563 564 for (i = 0; i < op->data.nbytes; i++, reg_offset--) { 565 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 566 writeb(((const u8 *)(op->data.buf.out))[i], reg); 567 } 568 } 569 570 for (; reg_offset >= 0; reg_offset--) { 571 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 572 writeb(0, reg); 573 } 574 575 // trigger op 576 if (rx_len) 577 writel(prg_len * BITS_PER_BYTE + sp->caps->extra_dummy_bit, 578 sp->base + MTK_NOR_REG_PRG_CNT); 579 else 580 writel(prg_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT); 581 582 ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM, 583 prg_len * BITS_PER_BYTE); 584 if (ret) 585 return ret; 586 587 // fetch read data 588 reg_offset = 0; 589 if (op->data.dir == SPI_MEM_DATA_IN) { 590 for (i = op->data.nbytes - 1; i >= 0; i--, reg_offset++) { 591 reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset); 592 ((u8 *)(op->data.buf.in))[i] = readb(reg); 593 } 594 } 595 596 return 0; 597 } 598 599 static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 600 { 601 struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->controller); 602 int ret; 603 604 if ((op->data.nbytes == 0) || 605 ((op->addr.nbytes != 3) && (op->addr.nbytes != 4))) 606 return mtk_nor_spi_mem_prg(sp, op); 607 608 if (op->data.dir == SPI_MEM_DATA_OUT) { 609 mtk_nor_set_addr(sp, op); 610 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA0); 611 if (op->data.nbytes == MTK_NOR_PP_SIZE) 612 return mtk_nor_pp_buffered(sp, op); 613 return mtk_nor_pp_unbuffered(sp, op); 614 } 615 616 if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) { 617 ret = mtk_nor_setup_write_buffer(sp, false); 618 if (ret < 0) 619 return ret; 620 mtk_nor_setup_bus(sp, op); 621 if (op->data.nbytes == 1) { 622 mtk_nor_set_addr(sp, op); 623 return mtk_nor_read_pio(sp, op); 624 } else { 625 ret = mtk_nor_read_dma(sp, op); 626 if (unlikely(ret)) { 627 /* Handle rare bus glitch */ 628 mtk_nor_reset(sp); 629 mtk_nor_setup_bus(sp, op); 630 return mtk_nor_read_dma(sp, op); 631 } 632 633 return ret; 634 } 635 } 636 637 return mtk_nor_spi_mem_prg(sp, op); 638 } 639 640 static int mtk_nor_setup(struct spi_device *spi) 641 { 642 struct mtk_nor *sp = spi_controller_get_devdata(spi->controller); 643 644 if (spi->max_speed_hz && (spi->max_speed_hz < sp->spi_freq)) { 645 dev_err(&spi->dev, "spi clock should be %u Hz.\n", 646 sp->spi_freq); 647 return -EINVAL; 648 } 649 spi->max_speed_hz = sp->spi_freq; 650 651 return 0; 652 } 653 654 static int mtk_nor_transfer_one_message(struct spi_controller *host, 655 struct spi_message *m) 656 { 657 struct mtk_nor *sp = spi_controller_get_devdata(host); 658 struct spi_transfer *t = NULL; 659 unsigned long trx_len = 0; 660 int stat = 0; 661 int reg_offset = MTK_NOR_REG_PRGDATA_MAX; 662 void __iomem *reg; 663 const u8 *txbuf; 664 u8 *rxbuf; 665 int i; 666 667 list_for_each_entry(t, &m->transfers, transfer_list) { 668 txbuf = t->tx_buf; 669 for (i = 0; i < t->len; i++, reg_offset--) { 670 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 671 if (txbuf) 672 writeb(txbuf[i], reg); 673 else 674 writeb(0, reg); 675 } 676 trx_len += t->len; 677 } 678 679 writel(trx_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT); 680 681 stat = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM, 682 trx_len * BITS_PER_BYTE); 683 if (stat < 0) 684 goto msg_done; 685 686 reg_offset = trx_len - 1; 687 list_for_each_entry(t, &m->transfers, transfer_list) { 688 rxbuf = t->rx_buf; 689 for (i = 0; i < t->len; i++, reg_offset--) { 690 reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset); 691 if (rxbuf) 692 rxbuf[i] = readb(reg); 693 } 694 } 695 696 m->actual_length = trx_len; 697 msg_done: 698 m->status = stat; 699 spi_finalize_current_message(host); 700 701 return 0; 702 } 703 704 static void mtk_nor_disable_clk(struct mtk_nor *sp) 705 { 706 clk_disable_unprepare(sp->spi_clk); 707 clk_disable_unprepare(sp->ctlr_clk); 708 clk_disable_unprepare(sp->axi_clk); 709 clk_disable_unprepare(sp->axi_s_clk); 710 } 711 712 static int mtk_nor_enable_clk(struct mtk_nor *sp) 713 { 714 int ret; 715 716 ret = clk_prepare_enable(sp->spi_clk); 717 if (ret) 718 return ret; 719 720 ret = clk_prepare_enable(sp->ctlr_clk); 721 if (ret) { 722 clk_disable_unprepare(sp->spi_clk); 723 return ret; 724 } 725 726 ret = clk_prepare_enable(sp->axi_clk); 727 if (ret) { 728 clk_disable_unprepare(sp->spi_clk); 729 clk_disable_unprepare(sp->ctlr_clk); 730 return ret; 731 } 732 733 ret = clk_prepare_enable(sp->axi_s_clk); 734 if (ret) { 735 clk_disable_unprepare(sp->spi_clk); 736 clk_disable_unprepare(sp->ctlr_clk); 737 clk_disable_unprepare(sp->axi_clk); 738 return ret; 739 } 740 741 return 0; 742 } 743 744 static void mtk_nor_init(struct mtk_nor *sp) 745 { 746 writel(0, sp->base + MTK_NOR_REG_IRQ_EN); 747 writel(MTK_NOR_IRQ_MASK, sp->base + MTK_NOR_REG_IRQ_STAT); 748 749 writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP); 750 mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0); 751 mtk_nor_rmw(sp, MTK_NOR_REG_CFG3, 752 MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0); 753 } 754 755 static irqreturn_t mtk_nor_irq_handler(int irq, void *data) 756 { 757 struct mtk_nor *sp = data; 758 u32 irq_status, irq_enabled; 759 760 irq_status = readl(sp->base + MTK_NOR_REG_IRQ_STAT); 761 irq_enabled = readl(sp->base + MTK_NOR_REG_IRQ_EN); 762 // write status back to clear interrupt 763 writel(irq_status, sp->base + MTK_NOR_REG_IRQ_STAT); 764 765 if (!(irq_status & irq_enabled)) 766 return IRQ_NONE; 767 768 if (irq_status & MTK_NOR_IRQ_DMA) { 769 complete(&sp->op_done); 770 writel(0, sp->base + MTK_NOR_REG_IRQ_EN); 771 } 772 773 return IRQ_HANDLED; 774 } 775 776 static size_t mtk_max_msg_size(struct spi_device *spi) 777 { 778 return MTK_NOR_PRG_MAX_SIZE; 779 } 780 781 static const struct spi_controller_mem_ops mtk_nor_mem_ops = { 782 .adjust_op_size = mtk_nor_adjust_op_size, 783 .supports_op = mtk_nor_supports_op, 784 .exec_op = mtk_nor_exec_op 785 }; 786 787 static const struct mtk_nor_caps mtk_nor_caps_mt8173 = { 788 .dma_bits = 32, 789 .extra_dummy_bit = 0, 790 }; 791 792 static const struct mtk_nor_caps mtk_nor_caps_mt8186 = { 793 .dma_bits = 32, 794 .extra_dummy_bit = 1, 795 }; 796 797 static const struct mtk_nor_caps mtk_nor_caps_mt8192 = { 798 .dma_bits = 36, 799 .extra_dummy_bit = 0, 800 }; 801 802 static const struct of_device_id mtk_nor_match[] = { 803 { .compatible = "mediatek,mt8173-nor", .data = &mtk_nor_caps_mt8173 }, 804 { .compatible = "mediatek,mt8186-nor", .data = &mtk_nor_caps_mt8186 }, 805 { .compatible = "mediatek,mt8192-nor", .data = &mtk_nor_caps_mt8192 }, 806 { /* sentinel */ } 807 }; 808 MODULE_DEVICE_TABLE(of, mtk_nor_match); 809 810 static int mtk_nor_probe(struct platform_device *pdev) 811 { 812 struct spi_controller *ctlr; 813 struct mtk_nor *sp; 814 struct mtk_nor_caps *caps; 815 void __iomem *base; 816 struct clk *spi_clk, *ctlr_clk, *axi_clk, *axi_s_clk; 817 int ret, irq; 818 819 base = devm_platform_ioremap_resource(pdev, 0); 820 if (IS_ERR(base)) 821 return PTR_ERR(base); 822 823 spi_clk = devm_clk_get(&pdev->dev, "spi"); 824 if (IS_ERR(spi_clk)) 825 return PTR_ERR(spi_clk); 826 827 ctlr_clk = devm_clk_get(&pdev->dev, "sf"); 828 if (IS_ERR(ctlr_clk)) 829 return PTR_ERR(ctlr_clk); 830 831 axi_clk = devm_clk_get_optional(&pdev->dev, "axi"); 832 if (IS_ERR(axi_clk)) 833 return PTR_ERR(axi_clk); 834 835 axi_s_clk = devm_clk_get_optional(&pdev->dev, "axi_s"); 836 if (IS_ERR(axi_s_clk)) 837 return PTR_ERR(axi_s_clk); 838 839 caps = (struct mtk_nor_caps *)of_device_get_match_data(&pdev->dev); 840 841 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(caps->dma_bits)); 842 if (ret) { 843 dev_err(&pdev->dev, "failed to set dma mask(%u)\n", caps->dma_bits); 844 return ret; 845 } 846 847 ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*sp)); 848 if (!ctlr) { 849 dev_err(&pdev->dev, "failed to allocate spi controller\n"); 850 return -ENOMEM; 851 } 852 853 ctlr->bits_per_word_mask = SPI_BPW_MASK(8); 854 ctlr->dev.of_node = pdev->dev.of_node; 855 ctlr->max_message_size = mtk_max_msg_size; 856 ctlr->mem_ops = &mtk_nor_mem_ops; 857 ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; 858 ctlr->num_chipselect = 1; 859 ctlr->setup = mtk_nor_setup; 860 ctlr->transfer_one_message = mtk_nor_transfer_one_message; 861 ctlr->auto_runtime_pm = true; 862 863 dev_set_drvdata(&pdev->dev, ctlr); 864 865 sp = spi_controller_get_devdata(ctlr); 866 sp->base = base; 867 sp->has_irq = false; 868 sp->wbuf_en = false; 869 sp->ctlr = ctlr; 870 sp->dev = &pdev->dev; 871 sp->spi_clk = spi_clk; 872 sp->ctlr_clk = ctlr_clk; 873 sp->axi_clk = axi_clk; 874 sp->axi_s_clk = axi_s_clk; 875 sp->caps = caps; 876 sp->high_dma = caps->dma_bits > 32; 877 sp->buffer = dmam_alloc_coherent(&pdev->dev, 878 MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN, 879 &sp->buffer_dma, GFP_KERNEL); 880 if (!sp->buffer) 881 return -ENOMEM; 882 883 if ((uintptr_t)sp->buffer & MTK_NOR_DMA_ALIGN_MASK) { 884 dev_err(sp->dev, "misaligned allocation of internal buffer.\n"); 885 return -ENOMEM; 886 } 887 888 ret = mtk_nor_enable_clk(sp); 889 if (ret < 0) 890 return ret; 891 892 sp->spi_freq = clk_get_rate(sp->spi_clk); 893 894 mtk_nor_init(sp); 895 896 irq = platform_get_irq_optional(pdev, 0); 897 898 if (irq < 0) { 899 dev_warn(sp->dev, "IRQ not available."); 900 } else { 901 ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0, 902 pdev->name, sp); 903 if (ret < 0) { 904 dev_warn(sp->dev, "failed to request IRQ."); 905 } else { 906 init_completion(&sp->op_done); 907 sp->has_irq = true; 908 } 909 } 910 911 pm_runtime_set_autosuspend_delay(&pdev->dev, -1); 912 pm_runtime_use_autosuspend(&pdev->dev); 913 pm_runtime_set_active(&pdev->dev); 914 pm_runtime_enable(&pdev->dev); 915 pm_runtime_get_noresume(&pdev->dev); 916 917 ret = devm_spi_register_controller(&pdev->dev, ctlr); 918 if (ret < 0) 919 goto err_probe; 920 921 pm_runtime_mark_last_busy(&pdev->dev); 922 pm_runtime_put_autosuspend(&pdev->dev); 923 924 dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq); 925 926 return 0; 927 928 err_probe: 929 pm_runtime_disable(&pdev->dev); 930 pm_runtime_set_suspended(&pdev->dev); 931 pm_runtime_dont_use_autosuspend(&pdev->dev); 932 933 mtk_nor_disable_clk(sp); 934 935 return ret; 936 } 937 938 static void mtk_nor_remove(struct platform_device *pdev) 939 { 940 struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev); 941 struct mtk_nor *sp = spi_controller_get_devdata(ctlr); 942 943 pm_runtime_disable(&pdev->dev); 944 pm_runtime_set_suspended(&pdev->dev); 945 pm_runtime_dont_use_autosuspend(&pdev->dev); 946 947 mtk_nor_disable_clk(sp); 948 } 949 950 static int __maybe_unused mtk_nor_runtime_suspend(struct device *dev) 951 { 952 struct spi_controller *ctlr = dev_get_drvdata(dev); 953 struct mtk_nor *sp = spi_controller_get_devdata(ctlr); 954 955 mtk_nor_disable_clk(sp); 956 957 return 0; 958 } 959 960 static int __maybe_unused mtk_nor_runtime_resume(struct device *dev) 961 { 962 struct spi_controller *ctlr = dev_get_drvdata(dev); 963 struct mtk_nor *sp = spi_controller_get_devdata(ctlr); 964 965 return mtk_nor_enable_clk(sp); 966 } 967 968 static int __maybe_unused mtk_nor_suspend(struct device *dev) 969 { 970 return pm_runtime_force_suspend(dev); 971 } 972 973 static int __maybe_unused mtk_nor_resume(struct device *dev) 974 { 975 struct spi_controller *ctlr = dev_get_drvdata(dev); 976 struct mtk_nor *sp = spi_controller_get_devdata(ctlr); 977 int ret; 978 979 ret = pm_runtime_force_resume(dev); 980 if (ret) 981 return ret; 982 983 mtk_nor_init(sp); 984 985 return 0; 986 } 987 988 static const struct dev_pm_ops mtk_nor_pm_ops = { 989 SET_RUNTIME_PM_OPS(mtk_nor_runtime_suspend, 990 mtk_nor_runtime_resume, NULL) 991 SET_SYSTEM_SLEEP_PM_OPS(mtk_nor_suspend, mtk_nor_resume) 992 }; 993 994 static struct platform_driver mtk_nor_driver = { 995 .driver = { 996 .name = DRIVER_NAME, 997 .of_match_table = mtk_nor_match, 998 .pm = &mtk_nor_pm_ops, 999 }, 1000 .probe = mtk_nor_probe, 1001 .remove_new = mtk_nor_remove, 1002 }; 1003 1004 module_platform_driver(mtk_nor_driver); 1005 1006 MODULE_DESCRIPTION("Mediatek SPI NOR controller driver"); 1007 MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>"); 1008 MODULE_LICENSE("GPL v2"); 1009 MODULE_ALIAS("platform:" DRIVER_NAME); 1010