1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * ASPEED FMC/SPI Memory Controller Driver 4 * 5 * Copyright (c) 2015-2022, IBM Corporation. 6 * Copyright (c) 2020, ASPEED Corporation. 7 */ 8 9 #include <linux/clk.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/of_platform.h> 13 #include <linux/platform_device.h> 14 #include <linux/spi/spi.h> 15 #include <linux/spi/spi-mem.h> 16 17 #define DEVICE_NAME "spi-aspeed-smc" 18 19 /* Type setting Register */ 20 #define CONFIG_REG 0x0 21 #define CONFIG_TYPE_SPI 0x2 22 23 /* CE Control Register */ 24 #define CE_CTRL_REG 0x4 25 26 /* CEx Control Register */ 27 #define CE0_CTRL_REG 0x10 28 #define CTRL_IO_MODE_MASK GENMASK(30, 28) 29 #define CTRL_IO_SINGLE_DATA 0x0 30 #define CTRL_IO_DUAL_DATA BIT(29) 31 #define CTRL_IO_QUAD_DATA BIT(30) 32 #define CTRL_COMMAND_SHIFT 16 33 #define CTRL_IO_ADDRESS_4B BIT(13) /* AST2400 SPI only */ 34 #define CTRL_IO_DUMMY_SET(dummy) \ 35 (((((dummy) >> 2) & 0x1) << 14) | (((dummy) & 0x3) << 6)) 36 #define CTRL_FREQ_SEL_SHIFT 8 37 #define CTRL_FREQ_SEL_MASK GENMASK(11, CTRL_FREQ_SEL_SHIFT) 38 #define CTRL_CE_STOP_ACTIVE BIT(2) 39 #define CTRL_IO_MODE_CMD_MASK GENMASK(1, 0) 40 #define CTRL_IO_MODE_NORMAL 0x0 41 #define CTRL_IO_MODE_READ 0x1 42 #define CTRL_IO_MODE_WRITE 0x2 43 #define CTRL_IO_MODE_USER 0x3 44 45 #define CTRL_IO_CMD_MASK 0xf0ff40c3 46 47 /* CEx Address Decoding Range Register */ 48 #define CE0_SEGMENT_ADDR_REG 0x30 49 50 /* CEx Read timing compensation register */ 51 #define CE0_TIMING_COMPENSATION_REG 0x94 52 53 enum aspeed_spi_ctl_reg_value { 54 ASPEED_SPI_BASE, 55 ASPEED_SPI_READ, 56 ASPEED_SPI_WRITE, 57 ASPEED_SPI_MAX, 58 }; 59 60 struct aspeed_spi; 61 62 struct aspeed_spi_chip { 63 struct aspeed_spi *aspi; 64 u32 cs; 65 void __iomem *ctl; 66 void __iomem *ahb_base; 67 u32 ahb_window_size; 68 u32 ctl_val[ASPEED_SPI_MAX]; 69 u32 clk_freq; 70 }; 71 72 struct aspeed_spi_data { 73 u32 ctl0; 74 u32 max_cs; 75 bool hastype; 76 u32 mode_bits; 77 u32 we0; 78 u32 timing; 79 u32 hclk_mask; 80 u32 hdiv_max; 81 82 u32 (*segment_start)(struct aspeed_spi *aspi, u32 reg); 83 u32 (*segment_end)(struct aspeed_spi *aspi, u32 reg); 84 u32 (*segment_reg)(struct aspeed_spi *aspi, u32 start, u32 end); 85 int (*calibrate)(struct aspeed_spi_chip *chip, u32 hdiv, 86 const u8 *golden_buf, u8 *test_buf); 87 }; 88 89 #define ASPEED_SPI_MAX_NUM_CS 5 90 91 struct aspeed_spi { 92 const struct aspeed_spi_data *data; 93 94 void __iomem *regs; 95 void __iomem *ahb_base; 96 u32 ahb_base_phy; 97 u32 ahb_window_size; 98 struct device *dev; 99 100 struct clk *clk; 101 u32 clk_freq; 102 103 struct aspeed_spi_chip chips[ASPEED_SPI_MAX_NUM_CS]; 104 }; 105 106 static u32 aspeed_spi_get_io_mode(const struct spi_mem_op *op) 107 { 108 switch (op->data.buswidth) { 109 case 1: 110 return CTRL_IO_SINGLE_DATA; 111 case 2: 112 return CTRL_IO_DUAL_DATA; 113 case 4: 114 return CTRL_IO_QUAD_DATA; 115 default: 116 return CTRL_IO_SINGLE_DATA; 117 } 118 } 119 120 static void aspeed_spi_set_io_mode(struct aspeed_spi_chip *chip, u32 io_mode) 121 { 122 u32 ctl; 123 124 if (io_mode > 0) { 125 ctl = readl(chip->ctl) & ~CTRL_IO_MODE_MASK; 126 ctl |= io_mode; 127 writel(ctl, chip->ctl); 128 } 129 } 130 131 static void aspeed_spi_start_user(struct aspeed_spi_chip *chip) 132 { 133 u32 ctl = chip->ctl_val[ASPEED_SPI_BASE]; 134 135 ctl |= CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE; 136 writel(ctl, chip->ctl); 137 138 ctl &= ~CTRL_CE_STOP_ACTIVE; 139 writel(ctl, chip->ctl); 140 } 141 142 static void aspeed_spi_stop_user(struct aspeed_spi_chip *chip) 143 { 144 u32 ctl = chip->ctl_val[ASPEED_SPI_READ] | 145 CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE; 146 147 writel(ctl, chip->ctl); 148 149 /* Restore defaults */ 150 writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); 151 } 152 153 static int aspeed_spi_read_from_ahb(void *buf, void __iomem *src, size_t len) 154 { 155 size_t offset = 0; 156 157 if (IS_ALIGNED((uintptr_t)src, sizeof(uintptr_t)) && 158 IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) { 159 ioread32_rep(src, buf, len >> 2); 160 offset = len & ~0x3; 161 len -= offset; 162 } 163 ioread8_rep(src, (u8 *)buf + offset, len); 164 return 0; 165 } 166 167 static int aspeed_spi_write_to_ahb(void __iomem *dst, const void *buf, size_t len) 168 { 169 size_t offset = 0; 170 171 if (IS_ALIGNED((uintptr_t)dst, sizeof(uintptr_t)) && 172 IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) { 173 iowrite32_rep(dst, buf, len >> 2); 174 offset = len & ~0x3; 175 len -= offset; 176 } 177 iowrite8_rep(dst, (const u8 *)buf + offset, len); 178 return 0; 179 } 180 181 static int aspeed_spi_send_cmd_addr(struct aspeed_spi_chip *chip, u8 addr_nbytes, 182 u64 offset, u32 opcode) 183 { 184 __be32 temp; 185 u32 cmdaddr; 186 187 switch (addr_nbytes) { 188 case 3: 189 cmdaddr = offset & 0xFFFFFF; 190 cmdaddr |= opcode << 24; 191 192 temp = cpu_to_be32(cmdaddr); 193 aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4); 194 break; 195 case 4: 196 temp = cpu_to_be32(offset); 197 aspeed_spi_write_to_ahb(chip->ahb_base, &opcode, 1); 198 aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4); 199 break; 200 default: 201 WARN_ONCE(1, "Unexpected address width %u", addr_nbytes); 202 return -EOPNOTSUPP; 203 } 204 return 0; 205 } 206 207 static int aspeed_spi_read_reg(struct aspeed_spi_chip *chip, 208 const struct spi_mem_op *op) 209 { 210 aspeed_spi_start_user(chip); 211 aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1); 212 aspeed_spi_read_from_ahb(op->data.buf.in, 213 chip->ahb_base, op->data.nbytes); 214 aspeed_spi_stop_user(chip); 215 return 0; 216 } 217 218 static int aspeed_spi_write_reg(struct aspeed_spi_chip *chip, 219 const struct spi_mem_op *op) 220 { 221 aspeed_spi_start_user(chip); 222 aspeed_spi_write_to_ahb(chip->ahb_base, &op->cmd.opcode, 1); 223 aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, 224 op->data.nbytes); 225 aspeed_spi_stop_user(chip); 226 return 0; 227 } 228 229 static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip, 230 const struct spi_mem_op *op, 231 u64 offset, size_t len, void *buf) 232 { 233 int io_mode = aspeed_spi_get_io_mode(op); 234 u8 dummy = 0xFF; 235 int i; 236 int ret; 237 238 aspeed_spi_start_user(chip); 239 240 ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, offset, op->cmd.opcode); 241 if (ret < 0) 242 return ret; 243 244 if (op->dummy.buswidth && op->dummy.nbytes) { 245 for (i = 0; i < op->dummy.nbytes / op->dummy.buswidth; i++) 246 aspeed_spi_write_to_ahb(chip->ahb_base, &dummy, sizeof(dummy)); 247 } 248 249 aspeed_spi_set_io_mode(chip, io_mode); 250 251 aspeed_spi_read_from_ahb(buf, chip->ahb_base, len); 252 aspeed_spi_stop_user(chip); 253 return 0; 254 } 255 256 static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip, 257 const struct spi_mem_op *op) 258 { 259 int ret; 260 261 aspeed_spi_start_user(chip); 262 ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, op->addr.val, op->cmd.opcode); 263 if (ret < 0) 264 return ret; 265 aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, op->data.nbytes); 266 aspeed_spi_stop_user(chip); 267 return 0; 268 } 269 270 /* support for 1-1-1, 1-1-2 or 1-1-4 */ 271 static bool aspeed_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) 272 { 273 if (op->cmd.buswidth > 1) 274 return false; 275 276 if (op->addr.nbytes != 0) { 277 if (op->addr.buswidth > 1) 278 return false; 279 if (op->addr.nbytes < 3 || op->addr.nbytes > 4) 280 return false; 281 } 282 283 if (op->dummy.nbytes != 0) { 284 if (op->dummy.buswidth > 1 || op->dummy.nbytes > 7) 285 return false; 286 } 287 288 if (op->data.nbytes != 0 && op->data.buswidth > 4) 289 return false; 290 291 return spi_mem_default_supports_op(mem, op); 292 } 293 294 static const struct aspeed_spi_data ast2400_spi_data; 295 296 static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 297 { 298 struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->master); 299 struct aspeed_spi_chip *chip = &aspi->chips[mem->spi->chip_select]; 300 u32 addr_mode, addr_mode_backup; 301 u32 ctl_val; 302 int ret = 0; 303 304 dev_dbg(aspi->dev, 305 "CE%d %s OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x len:%#x", 306 chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write", 307 op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, 308 op->dummy.buswidth, op->data.buswidth, 309 op->addr.nbytes, op->dummy.nbytes, op->data.nbytes); 310 311 addr_mode = readl(aspi->regs + CE_CTRL_REG); 312 addr_mode_backup = addr_mode; 313 314 ctl_val = chip->ctl_val[ASPEED_SPI_BASE]; 315 ctl_val &= ~CTRL_IO_CMD_MASK; 316 317 ctl_val |= op->cmd.opcode << CTRL_COMMAND_SHIFT; 318 319 /* 4BYTE address mode */ 320 if (op->addr.nbytes) { 321 if (op->addr.nbytes == 4) 322 addr_mode |= (0x11 << chip->cs); 323 else 324 addr_mode &= ~(0x11 << chip->cs); 325 326 if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data) 327 ctl_val |= CTRL_IO_ADDRESS_4B; 328 } 329 330 if (op->dummy.nbytes) 331 ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth); 332 333 if (op->data.nbytes) 334 ctl_val |= aspeed_spi_get_io_mode(op); 335 336 if (op->data.dir == SPI_MEM_DATA_OUT) 337 ctl_val |= CTRL_IO_MODE_WRITE; 338 else 339 ctl_val |= CTRL_IO_MODE_READ; 340 341 if (addr_mode != addr_mode_backup) 342 writel(addr_mode, aspi->regs + CE_CTRL_REG); 343 writel(ctl_val, chip->ctl); 344 345 if (op->data.dir == SPI_MEM_DATA_IN) { 346 if (!op->addr.nbytes) 347 ret = aspeed_spi_read_reg(chip, op); 348 else 349 ret = aspeed_spi_read_user(chip, op, op->addr.val, 350 op->data.nbytes, op->data.buf.in); 351 } else { 352 if (!op->addr.nbytes) 353 ret = aspeed_spi_write_reg(chip, op); 354 else 355 ret = aspeed_spi_write_user(chip, op); 356 } 357 358 /* Restore defaults */ 359 if (addr_mode != addr_mode_backup) 360 writel(addr_mode_backup, aspi->regs + CE_CTRL_REG); 361 writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); 362 return ret; 363 } 364 365 static int aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 366 { 367 int ret; 368 369 ret = do_aspeed_spi_exec_op(mem, op); 370 if (ret) 371 dev_err(&mem->spi->dev, "operation failed: %d\n", ret); 372 return ret; 373 } 374 375 static const char *aspeed_spi_get_name(struct spi_mem *mem) 376 { 377 struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->master); 378 struct device *dev = aspi->dev; 379 380 return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), mem->spi->chip_select); 381 } 382 383 struct aspeed_spi_window { 384 u32 cs; 385 u32 offset; 386 u32 size; 387 }; 388 389 static void aspeed_spi_get_windows(struct aspeed_spi *aspi, 390 struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS]) 391 { 392 const struct aspeed_spi_data *data = aspi->data; 393 u32 reg_val; 394 u32 cs; 395 396 for (cs = 0; cs < aspi->data->max_cs; cs++) { 397 reg_val = readl(aspi->regs + CE0_SEGMENT_ADDR_REG + cs * 4); 398 windows[cs].cs = cs; 399 windows[cs].size = data->segment_end(aspi, reg_val) - 400 data->segment_start(aspi, reg_val); 401 windows[cs].offset = cs ? windows[cs - 1].offset + windows[cs - 1].size : 0; 402 dev_vdbg(aspi->dev, "CE%d offset=0x%.8x size=0x%x\n", cs, 403 windows[cs].offset, windows[cs].size); 404 } 405 } 406 407 /* 408 * On the AST2600, some CE windows are closed by default at reset but 409 * U-Boot should open all. 410 */ 411 static int aspeed_spi_chip_set_default_window(struct aspeed_spi_chip *chip) 412 { 413 struct aspeed_spi *aspi = chip->aspi; 414 struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 }; 415 struct aspeed_spi_window *win = &windows[chip->cs]; 416 417 /* No segment registers for the AST2400 SPI controller */ 418 if (aspi->data == &ast2400_spi_data) { 419 win->offset = 0; 420 win->size = aspi->ahb_window_size; 421 } else { 422 aspeed_spi_get_windows(aspi, windows); 423 } 424 425 chip->ahb_base = aspi->ahb_base + win->offset; 426 chip->ahb_window_size = win->size; 427 428 dev_dbg(aspi->dev, "CE%d default window [ 0x%.8x - 0x%.8x ] %dMB", 429 chip->cs, aspi->ahb_base_phy + win->offset, 430 aspi->ahb_base_phy + win->offset + win->size - 1, 431 win->size >> 20); 432 433 return chip->ahb_window_size ? 0 : -1; 434 } 435 436 static int aspeed_spi_set_window(struct aspeed_spi *aspi, 437 const struct aspeed_spi_window *win) 438 { 439 u32 start = aspi->ahb_base_phy + win->offset; 440 u32 end = start + win->size; 441 void __iomem *seg_reg = aspi->regs + CE0_SEGMENT_ADDR_REG + win->cs * 4; 442 u32 seg_val_backup = readl(seg_reg); 443 u32 seg_val = aspi->data->segment_reg(aspi, start, end); 444 445 if (seg_val == seg_val_backup) 446 return 0; 447 448 writel(seg_val, seg_reg); 449 450 /* 451 * Restore initial value if something goes wrong else we could 452 * loose access to the chip. 453 */ 454 if (seg_val != readl(seg_reg)) { 455 dev_err(aspi->dev, "CE%d invalid window [ 0x%.8x - 0x%.8x ] %dMB", 456 win->cs, start, end - 1, win->size >> 20); 457 writel(seg_val_backup, seg_reg); 458 return -EIO; 459 } 460 461 if (win->size) 462 dev_dbg(aspi->dev, "CE%d new window [ 0x%.8x - 0x%.8x ] %dMB", 463 win->cs, start, end - 1, win->size >> 20); 464 else 465 dev_dbg(aspi->dev, "CE%d window closed", win->cs); 466 467 return 0; 468 } 469 470 /* 471 * Yet to be done when possible : 472 * - Align mappings on flash size (we don't have the info) 473 * - ioremap each window, not strictly necessary since the overall window 474 * is correct. 475 */ 476 static const struct aspeed_spi_data ast2500_spi_data; 477 static const struct aspeed_spi_data ast2600_spi_data; 478 static const struct aspeed_spi_data ast2600_fmc_data; 479 480 static int aspeed_spi_chip_adjust_window(struct aspeed_spi_chip *chip, 481 u32 local_offset, u32 size) 482 { 483 struct aspeed_spi *aspi = chip->aspi; 484 struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 }; 485 struct aspeed_spi_window *win = &windows[chip->cs]; 486 int ret; 487 488 /* No segment registers for the AST2400 SPI controller */ 489 if (aspi->data == &ast2400_spi_data) 490 return 0; 491 492 /* 493 * Due to an HW issue on the AST2500 SPI controller, the CE0 494 * window size should be smaller than the maximum 128MB. 495 */ 496 if (aspi->data == &ast2500_spi_data && chip->cs == 0 && size == SZ_128M) { 497 size = 120 << 20; 498 dev_info(aspi->dev, "CE%d window resized to %dMB (AST2500 HW quirk)", 499 chip->cs, size >> 20); 500 } 501 502 /* 503 * The decoding size of AST2600 SPI controller should set at 504 * least 2MB. 505 */ 506 if ((aspi->data == &ast2600_spi_data || aspi->data == &ast2600_fmc_data) && 507 size < SZ_2M) { 508 size = SZ_2M; 509 dev_info(aspi->dev, "CE%d window resized to %dMB (AST2600 Decoding)", 510 chip->cs, size >> 20); 511 } 512 513 aspeed_spi_get_windows(aspi, windows); 514 515 /* Adjust this chip window */ 516 win->offset += local_offset; 517 win->size = size; 518 519 if (win->offset + win->size > aspi->ahb_window_size) { 520 win->size = aspi->ahb_window_size - win->offset; 521 dev_warn(aspi->dev, "CE%d window resized to %dMB", chip->cs, win->size >> 20); 522 } 523 524 ret = aspeed_spi_set_window(aspi, win); 525 if (ret) 526 return ret; 527 528 /* Update chip mapping info */ 529 chip->ahb_base = aspi->ahb_base + win->offset; 530 chip->ahb_window_size = win->size; 531 532 /* 533 * Also adjust next chip window to make sure that it does not 534 * overlap with the current window. 535 */ 536 if (chip->cs < aspi->data->max_cs - 1) { 537 struct aspeed_spi_window *next = &windows[chip->cs + 1]; 538 539 /* Change offset and size to keep the same end address */ 540 if ((next->offset + next->size) > (win->offset + win->size)) 541 next->size = (next->offset + next->size) - (win->offset + win->size); 542 else 543 next->size = 0; 544 next->offset = win->offset + win->size; 545 546 aspeed_spi_set_window(aspi, next); 547 } 548 return 0; 549 } 550 551 static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip); 552 553 static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc) 554 { 555 struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->master); 556 struct aspeed_spi_chip *chip = &aspi->chips[desc->mem->spi->chip_select]; 557 struct spi_mem_op *op = &desc->info.op_tmpl; 558 u32 ctl_val; 559 int ret = 0; 560 561 dev_dbg(aspi->dev, 562 "CE%d %s dirmap [ 0x%.8llx - 0x%.8llx ] OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x\n", 563 chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write", 564 desc->info.offset, desc->info.offset + desc->info.length, 565 op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, 566 op->dummy.buswidth, op->data.buswidth, 567 op->addr.nbytes, op->dummy.nbytes); 568 569 chip->clk_freq = desc->mem->spi->max_speed_hz; 570 571 /* Only for reads */ 572 if (op->data.dir != SPI_MEM_DATA_IN) 573 return -EOPNOTSUPP; 574 575 aspeed_spi_chip_adjust_window(chip, desc->info.offset, desc->info.length); 576 577 if (desc->info.length > chip->ahb_window_size) 578 dev_warn(aspi->dev, "CE%d window (%dMB) too small for mapping", 579 chip->cs, chip->ahb_window_size >> 20); 580 581 /* Define the default IO read settings */ 582 ctl_val = readl(chip->ctl) & ~CTRL_IO_CMD_MASK; 583 ctl_val |= aspeed_spi_get_io_mode(op) | 584 op->cmd.opcode << CTRL_COMMAND_SHIFT | 585 CTRL_IO_MODE_READ; 586 587 if (op->dummy.nbytes) 588 ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth); 589 590 /* Tune 4BYTE address mode */ 591 if (op->addr.nbytes) { 592 u32 addr_mode = readl(aspi->regs + CE_CTRL_REG); 593 594 if (op->addr.nbytes == 4) 595 addr_mode |= (0x11 << chip->cs); 596 else 597 addr_mode &= ~(0x11 << chip->cs); 598 writel(addr_mode, aspi->regs + CE_CTRL_REG); 599 600 /* AST2400 SPI controller sets 4BYTE address mode in 601 * CE0 Control Register 602 */ 603 if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data) 604 ctl_val |= CTRL_IO_ADDRESS_4B; 605 } 606 607 /* READ mode is the controller default setting */ 608 chip->ctl_val[ASPEED_SPI_READ] = ctl_val; 609 writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); 610 611 ret = aspeed_spi_do_calibration(chip); 612 613 dev_info(aspi->dev, "CE%d read buswidth:%d [0x%08x]\n", 614 chip->cs, op->data.buswidth, chip->ctl_val[ASPEED_SPI_READ]); 615 616 return ret; 617 } 618 619 static ssize_t aspeed_spi_dirmap_read(struct spi_mem_dirmap_desc *desc, 620 u64 offset, size_t len, void *buf) 621 { 622 struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->master); 623 struct aspeed_spi_chip *chip = &aspi->chips[desc->mem->spi->chip_select]; 624 625 /* Switch to USER command mode if mapping window is too small */ 626 if (chip->ahb_window_size < offset + len) { 627 int ret; 628 629 ret = aspeed_spi_read_user(chip, &desc->info.op_tmpl, offset, len, buf); 630 if (ret < 0) 631 return ret; 632 } else { 633 memcpy_fromio(buf, chip->ahb_base + offset, len); 634 } 635 636 return len; 637 } 638 639 static const struct spi_controller_mem_ops aspeed_spi_mem_ops = { 640 .supports_op = aspeed_spi_supports_op, 641 .exec_op = aspeed_spi_exec_op, 642 .get_name = aspeed_spi_get_name, 643 .dirmap_create = aspeed_spi_dirmap_create, 644 .dirmap_read = aspeed_spi_dirmap_read, 645 }; 646 647 static void aspeed_spi_chip_set_type(struct aspeed_spi *aspi, unsigned int cs, int type) 648 { 649 u32 reg; 650 651 reg = readl(aspi->regs + CONFIG_REG); 652 reg &= ~(0x3 << (cs * 2)); 653 reg |= type << (cs * 2); 654 writel(reg, aspi->regs + CONFIG_REG); 655 } 656 657 static void aspeed_spi_chip_enable(struct aspeed_spi *aspi, unsigned int cs, bool enable) 658 { 659 u32 we_bit = BIT(aspi->data->we0 + cs); 660 u32 reg = readl(aspi->regs + CONFIG_REG); 661 662 if (enable) 663 reg |= we_bit; 664 else 665 reg &= ~we_bit; 666 writel(reg, aspi->regs + CONFIG_REG); 667 } 668 669 static int aspeed_spi_setup(struct spi_device *spi) 670 { 671 struct aspeed_spi *aspi = spi_controller_get_devdata(spi->master); 672 const struct aspeed_spi_data *data = aspi->data; 673 unsigned int cs = spi->chip_select; 674 struct aspeed_spi_chip *chip = &aspi->chips[cs]; 675 676 chip->aspi = aspi; 677 chip->cs = cs; 678 chip->ctl = aspi->regs + data->ctl0 + cs * 4; 679 680 /* The driver only supports SPI type flash */ 681 if (data->hastype) 682 aspeed_spi_chip_set_type(aspi, cs, CONFIG_TYPE_SPI); 683 684 if (aspeed_spi_chip_set_default_window(chip) < 0) { 685 dev_warn(aspi->dev, "CE%d window invalid", cs); 686 return -EINVAL; 687 } 688 689 aspeed_spi_chip_enable(aspi, cs, true); 690 691 chip->ctl_val[ASPEED_SPI_BASE] = CTRL_CE_STOP_ACTIVE | CTRL_IO_MODE_USER; 692 693 dev_dbg(aspi->dev, "CE%d setup done\n", cs); 694 return 0; 695 } 696 697 static void aspeed_spi_cleanup(struct spi_device *spi) 698 { 699 struct aspeed_spi *aspi = spi_controller_get_devdata(spi->master); 700 unsigned int cs = spi->chip_select; 701 702 aspeed_spi_chip_enable(aspi, cs, false); 703 704 dev_dbg(aspi->dev, "CE%d cleanup done\n", cs); 705 } 706 707 static void aspeed_spi_enable(struct aspeed_spi *aspi, bool enable) 708 { 709 int cs; 710 711 for (cs = 0; cs < aspi->data->max_cs; cs++) 712 aspeed_spi_chip_enable(aspi, cs, enable); 713 } 714 715 static int aspeed_spi_probe(struct platform_device *pdev) 716 { 717 struct device *dev = &pdev->dev; 718 const struct aspeed_spi_data *data; 719 struct spi_controller *ctlr; 720 struct aspeed_spi *aspi; 721 struct resource *res; 722 int ret; 723 724 data = of_device_get_match_data(&pdev->dev); 725 if (!data) 726 return -ENODEV; 727 728 ctlr = devm_spi_alloc_master(dev, sizeof(*aspi)); 729 if (!ctlr) 730 return -ENOMEM; 731 732 aspi = spi_controller_get_devdata(ctlr); 733 platform_set_drvdata(pdev, aspi); 734 aspi->data = data; 735 aspi->dev = dev; 736 737 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 738 aspi->regs = devm_ioremap_resource(dev, res); 739 if (IS_ERR(aspi->regs)) { 740 dev_err(dev, "missing AHB register window\n"); 741 return PTR_ERR(aspi->regs); 742 } 743 744 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 745 aspi->ahb_base = devm_ioremap_resource(dev, res); 746 if (IS_ERR(aspi->ahb_base)) { 747 dev_err(dev, "missing AHB mapping window\n"); 748 return PTR_ERR(aspi->ahb_base); 749 } 750 751 aspi->ahb_window_size = resource_size(res); 752 aspi->ahb_base_phy = res->start; 753 754 aspi->clk = devm_clk_get(&pdev->dev, NULL); 755 if (IS_ERR(aspi->clk)) { 756 dev_err(dev, "missing clock\n"); 757 return PTR_ERR(aspi->clk); 758 } 759 760 aspi->clk_freq = clk_get_rate(aspi->clk); 761 if (!aspi->clk_freq) { 762 dev_err(dev, "invalid clock\n"); 763 return -EINVAL; 764 } 765 766 ret = clk_prepare_enable(aspi->clk); 767 if (ret) { 768 dev_err(dev, "can not enable the clock\n"); 769 return ret; 770 } 771 772 /* IRQ is for DMA, which the driver doesn't support yet */ 773 774 ctlr->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | data->mode_bits; 775 ctlr->bus_num = pdev->id; 776 ctlr->mem_ops = &aspeed_spi_mem_ops; 777 ctlr->setup = aspeed_spi_setup; 778 ctlr->cleanup = aspeed_spi_cleanup; 779 ctlr->num_chipselect = data->max_cs; 780 ctlr->dev.of_node = dev->of_node; 781 782 ret = devm_spi_register_controller(dev, ctlr); 783 if (ret) { 784 dev_err(&pdev->dev, "spi_register_controller failed\n"); 785 goto disable_clk; 786 } 787 return 0; 788 789 disable_clk: 790 clk_disable_unprepare(aspi->clk); 791 return ret; 792 } 793 794 static int aspeed_spi_remove(struct platform_device *pdev) 795 { 796 struct aspeed_spi *aspi = platform_get_drvdata(pdev); 797 798 aspeed_spi_enable(aspi, false); 799 clk_disable_unprepare(aspi->clk); 800 return 0; 801 } 802 803 /* 804 * AHB mappings 805 */ 806 807 /* 808 * The Segment Registers of the AST2400 and AST2500 use a 8MB unit. 809 * The address range is encoded with absolute addresses in the overall 810 * mapping window. 811 */ 812 static u32 aspeed_spi_segment_start(struct aspeed_spi *aspi, u32 reg) 813 { 814 return ((reg >> 16) & 0xFF) << 23; 815 } 816 817 static u32 aspeed_spi_segment_end(struct aspeed_spi *aspi, u32 reg) 818 { 819 return ((reg >> 24) & 0xFF) << 23; 820 } 821 822 static u32 aspeed_spi_segment_reg(struct aspeed_spi *aspi, u32 start, u32 end) 823 { 824 return (((start >> 23) & 0xFF) << 16) | (((end >> 23) & 0xFF) << 24); 825 } 826 827 /* 828 * The Segment Registers of the AST2600 use a 1MB unit. The address 829 * range is encoded with offsets in the overall mapping window. 830 */ 831 832 #define AST2600_SEG_ADDR_MASK 0x0ff00000 833 834 static u32 aspeed_spi_segment_ast2600_start(struct aspeed_spi *aspi, 835 u32 reg) 836 { 837 u32 start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK; 838 839 return aspi->ahb_base_phy + start_offset; 840 } 841 842 static u32 aspeed_spi_segment_ast2600_end(struct aspeed_spi *aspi, 843 u32 reg) 844 { 845 u32 end_offset = reg & AST2600_SEG_ADDR_MASK; 846 847 /* segment is disabled */ 848 if (!end_offset) 849 return aspi->ahb_base_phy; 850 851 return aspi->ahb_base_phy + end_offset + 0x100000; 852 } 853 854 static u32 aspeed_spi_segment_ast2600_reg(struct aspeed_spi *aspi, 855 u32 start, u32 end) 856 { 857 /* disable zero size segments */ 858 if (start == end) 859 return 0; 860 861 return ((start & AST2600_SEG_ADDR_MASK) >> 16) | 862 ((end - 1) & AST2600_SEG_ADDR_MASK); 863 } 864 865 /* 866 * Read timing compensation sequences 867 */ 868 869 #define CALIBRATE_BUF_SIZE SZ_16K 870 871 static bool aspeed_spi_check_reads(struct aspeed_spi_chip *chip, 872 const u8 *golden_buf, u8 *test_buf) 873 { 874 int i; 875 876 for (i = 0; i < 10; i++) { 877 memcpy_fromio(test_buf, chip->ahb_base, CALIBRATE_BUF_SIZE); 878 if (memcmp(test_buf, golden_buf, CALIBRATE_BUF_SIZE) != 0) { 879 #if defined(VERBOSE_DEBUG) 880 print_hex_dump_bytes(DEVICE_NAME " fail: ", DUMP_PREFIX_NONE, 881 test_buf, 0x100); 882 #endif 883 return false; 884 } 885 } 886 return true; 887 } 888 889 #define FREAD_TPASS(i) (((i) / 2) | (((i) & 1) ? 0 : 8)) 890 891 /* 892 * The timing register is shared by all devices. Only update for CE0. 893 */ 894 static int aspeed_spi_calibrate(struct aspeed_spi_chip *chip, u32 hdiv, 895 const u8 *golden_buf, u8 *test_buf) 896 { 897 struct aspeed_spi *aspi = chip->aspi; 898 const struct aspeed_spi_data *data = aspi->data; 899 int i; 900 int good_pass = -1, pass_count = 0; 901 u32 shift = (hdiv - 1) << 2; 902 u32 mask = ~(0xfu << shift); 903 u32 fread_timing_val = 0; 904 905 /* Try HCLK delay 0..5, each one with/without delay and look for a 906 * good pair. 907 */ 908 for (i = 0; i < 12; i++) { 909 bool pass; 910 911 if (chip->cs == 0) { 912 fread_timing_val &= mask; 913 fread_timing_val |= FREAD_TPASS(i) << shift; 914 writel(fread_timing_val, aspi->regs + data->timing); 915 } 916 pass = aspeed_spi_check_reads(chip, golden_buf, test_buf); 917 dev_dbg(aspi->dev, 918 " * [%08x] %d HCLK delay, %dns DI delay : %s", 919 fread_timing_val, i / 2, (i & 1) ? 0 : 4, 920 pass ? "PASS" : "FAIL"); 921 if (pass) { 922 pass_count++; 923 if (pass_count == 3) { 924 good_pass = i - 1; 925 break; 926 } 927 } else { 928 pass_count = 0; 929 } 930 } 931 932 /* No good setting for this frequency */ 933 if (good_pass < 0) 934 return -1; 935 936 /* We have at least one pass of margin, let's use first pass */ 937 if (chip->cs == 0) { 938 fread_timing_val &= mask; 939 fread_timing_val |= FREAD_TPASS(good_pass) << shift; 940 writel(fread_timing_val, aspi->regs + data->timing); 941 } 942 dev_dbg(aspi->dev, " * -> good is pass %d [0x%08x]", 943 good_pass, fread_timing_val); 944 return 0; 945 } 946 947 static bool aspeed_spi_check_calib_data(const u8 *test_buf, u32 size) 948 { 949 const u32 *tb32 = (const u32 *)test_buf; 950 u32 i, cnt = 0; 951 952 /* We check if we have enough words that are neither all 0 953 * nor all 1's so the calibration can be considered valid. 954 * 955 * I use an arbitrary threshold for now of 64 956 */ 957 size >>= 2; 958 for (i = 0; i < size; i++) { 959 if (tb32[i] != 0 && tb32[i] != 0xffffffff) 960 cnt++; 961 } 962 return cnt >= 64; 963 } 964 965 static const u32 aspeed_spi_hclk_divs[] = { 966 0xf, /* HCLK */ 967 0x7, /* HCLK/2 */ 968 0xe, /* HCLK/3 */ 969 0x6, /* HCLK/4 */ 970 0xd, /* HCLK/5 */ 971 }; 972 973 #define ASPEED_SPI_HCLK_DIV(i) \ 974 (aspeed_spi_hclk_divs[(i) - 1] << CTRL_FREQ_SEL_SHIFT) 975 976 static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip) 977 { 978 struct aspeed_spi *aspi = chip->aspi; 979 const struct aspeed_spi_data *data = aspi->data; 980 u32 ahb_freq = aspi->clk_freq; 981 u32 max_freq = chip->clk_freq; 982 u32 ctl_val; 983 u8 *golden_buf = NULL; 984 u8 *test_buf = NULL; 985 int i, rc, best_div = -1; 986 987 dev_dbg(aspi->dev, "calculate timing compensation - AHB freq: %d MHz", 988 ahb_freq / 1000000); 989 990 /* 991 * use the related low frequency to get check calibration data 992 * and get golden data. 993 */ 994 ctl_val = chip->ctl_val[ASPEED_SPI_READ] & data->hclk_mask; 995 writel(ctl_val, chip->ctl); 996 997 test_buf = kzalloc(CALIBRATE_BUF_SIZE * 2, GFP_KERNEL); 998 if (!test_buf) 999 return -ENOMEM; 1000 1001 golden_buf = test_buf + CALIBRATE_BUF_SIZE; 1002 1003 memcpy_fromio(golden_buf, chip->ahb_base, CALIBRATE_BUF_SIZE); 1004 if (!aspeed_spi_check_calib_data(golden_buf, CALIBRATE_BUF_SIZE)) { 1005 dev_info(aspi->dev, "Calibration area too uniform, using low speed"); 1006 goto no_calib; 1007 } 1008 1009 #if defined(VERBOSE_DEBUG) 1010 print_hex_dump_bytes(DEVICE_NAME " good: ", DUMP_PREFIX_NONE, 1011 golden_buf, 0x100); 1012 #endif 1013 1014 /* Now we iterate the HCLK dividers until we find our breaking point */ 1015 for (i = ARRAY_SIZE(aspeed_spi_hclk_divs); i > data->hdiv_max - 1; i--) { 1016 u32 tv, freq; 1017 1018 freq = ahb_freq / i; 1019 if (freq > max_freq) 1020 continue; 1021 1022 /* Set the timing */ 1023 tv = chip->ctl_val[ASPEED_SPI_READ] | ASPEED_SPI_HCLK_DIV(i); 1024 writel(tv, chip->ctl); 1025 dev_dbg(aspi->dev, "Trying HCLK/%d [%08x] ...", i, tv); 1026 rc = data->calibrate(chip, i, golden_buf, test_buf); 1027 if (rc == 0) 1028 best_div = i; 1029 } 1030 1031 /* Nothing found ? */ 1032 if (best_div < 0) { 1033 dev_warn(aspi->dev, "No good frequency, using dumb slow"); 1034 } else { 1035 dev_dbg(aspi->dev, "Found good read timings at HCLK/%d", best_div); 1036 1037 /* Record the freq */ 1038 for (i = 0; i < ASPEED_SPI_MAX; i++) 1039 chip->ctl_val[i] = (chip->ctl_val[i] & data->hclk_mask) | 1040 ASPEED_SPI_HCLK_DIV(best_div); 1041 } 1042 1043 no_calib: 1044 writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); 1045 kfree(test_buf); 1046 return 0; 1047 } 1048 1049 #define TIMING_DELAY_DI BIT(3) 1050 #define TIMING_DELAY_HCYCLE_MAX 5 1051 #define TIMING_REG_AST2600(chip) \ 1052 ((chip)->aspi->regs + (chip)->aspi->data->timing + \ 1053 (chip)->cs * 4) 1054 1055 static int aspeed_spi_ast2600_calibrate(struct aspeed_spi_chip *chip, u32 hdiv, 1056 const u8 *golden_buf, u8 *test_buf) 1057 { 1058 struct aspeed_spi *aspi = chip->aspi; 1059 int hcycle; 1060 u32 shift = (hdiv - 2) << 3; 1061 u32 mask = ~(0xfu << shift); 1062 u32 fread_timing_val = 0; 1063 1064 for (hcycle = 0; hcycle <= TIMING_DELAY_HCYCLE_MAX; hcycle++) { 1065 int delay_ns; 1066 bool pass = false; 1067 1068 fread_timing_val &= mask; 1069 fread_timing_val |= hcycle << shift; 1070 1071 /* no DI input delay first */ 1072 writel(fread_timing_val, TIMING_REG_AST2600(chip)); 1073 pass = aspeed_spi_check_reads(chip, golden_buf, test_buf); 1074 dev_dbg(aspi->dev, 1075 " * [%08x] %d HCLK delay, DI delay none : %s", 1076 fread_timing_val, hcycle, pass ? "PASS" : "FAIL"); 1077 if (pass) 1078 return 0; 1079 1080 /* Add DI input delays */ 1081 fread_timing_val &= mask; 1082 fread_timing_val |= (TIMING_DELAY_DI | hcycle) << shift; 1083 1084 for (delay_ns = 0; delay_ns < 0x10; delay_ns++) { 1085 fread_timing_val &= ~(0xf << (4 + shift)); 1086 fread_timing_val |= delay_ns << (4 + shift); 1087 1088 writel(fread_timing_val, TIMING_REG_AST2600(chip)); 1089 pass = aspeed_spi_check_reads(chip, golden_buf, test_buf); 1090 dev_dbg(aspi->dev, 1091 " * [%08x] %d HCLK delay, DI delay %d.%dns : %s", 1092 fread_timing_val, hcycle, (delay_ns + 1) / 2, 1093 (delay_ns + 1) & 1 ? 5 : 5, pass ? "PASS" : "FAIL"); 1094 /* 1095 * TODO: This is optimistic. We should look 1096 * for a working interval and save the middle 1097 * value in the read timing register. 1098 */ 1099 if (pass) 1100 return 0; 1101 } 1102 } 1103 1104 /* No good setting for this frequency */ 1105 return -1; 1106 } 1107 1108 /* 1109 * Platform definitions 1110 */ 1111 static const struct aspeed_spi_data ast2400_fmc_data = { 1112 .max_cs = 5, 1113 .hastype = true, 1114 .we0 = 16, 1115 .ctl0 = CE0_CTRL_REG, 1116 .timing = CE0_TIMING_COMPENSATION_REG, 1117 .hclk_mask = 0xfffff0ff, 1118 .hdiv_max = 1, 1119 .calibrate = aspeed_spi_calibrate, 1120 .segment_start = aspeed_spi_segment_start, 1121 .segment_end = aspeed_spi_segment_end, 1122 .segment_reg = aspeed_spi_segment_reg, 1123 }; 1124 1125 static const struct aspeed_spi_data ast2400_spi_data = { 1126 .max_cs = 1, 1127 .hastype = false, 1128 .we0 = 0, 1129 .ctl0 = 0x04, 1130 .timing = 0x14, 1131 .hclk_mask = 0xfffff0ff, 1132 .hdiv_max = 1, 1133 .calibrate = aspeed_spi_calibrate, 1134 /* No segment registers */ 1135 }; 1136 1137 static const struct aspeed_spi_data ast2500_fmc_data = { 1138 .max_cs = 3, 1139 .hastype = true, 1140 .we0 = 16, 1141 .ctl0 = CE0_CTRL_REG, 1142 .timing = CE0_TIMING_COMPENSATION_REG, 1143 .hclk_mask = 0xffffd0ff, 1144 .hdiv_max = 1, 1145 .calibrate = aspeed_spi_calibrate, 1146 .segment_start = aspeed_spi_segment_start, 1147 .segment_end = aspeed_spi_segment_end, 1148 .segment_reg = aspeed_spi_segment_reg, 1149 }; 1150 1151 static const struct aspeed_spi_data ast2500_spi_data = { 1152 .max_cs = 2, 1153 .hastype = false, 1154 .we0 = 16, 1155 .ctl0 = CE0_CTRL_REG, 1156 .timing = CE0_TIMING_COMPENSATION_REG, 1157 .hclk_mask = 0xffffd0ff, 1158 .hdiv_max = 1, 1159 .calibrate = aspeed_spi_calibrate, 1160 .segment_start = aspeed_spi_segment_start, 1161 .segment_end = aspeed_spi_segment_end, 1162 .segment_reg = aspeed_spi_segment_reg, 1163 }; 1164 1165 static const struct aspeed_spi_data ast2600_fmc_data = { 1166 .max_cs = 3, 1167 .hastype = false, 1168 .mode_bits = SPI_RX_QUAD | SPI_RX_QUAD, 1169 .we0 = 16, 1170 .ctl0 = CE0_CTRL_REG, 1171 .timing = CE0_TIMING_COMPENSATION_REG, 1172 .hclk_mask = 0xf0fff0ff, 1173 .hdiv_max = 2, 1174 .calibrate = aspeed_spi_ast2600_calibrate, 1175 .segment_start = aspeed_spi_segment_ast2600_start, 1176 .segment_end = aspeed_spi_segment_ast2600_end, 1177 .segment_reg = aspeed_spi_segment_ast2600_reg, 1178 }; 1179 1180 static const struct aspeed_spi_data ast2600_spi_data = { 1181 .max_cs = 2, 1182 .hastype = false, 1183 .mode_bits = SPI_RX_QUAD | SPI_RX_QUAD, 1184 .we0 = 16, 1185 .ctl0 = CE0_CTRL_REG, 1186 .timing = CE0_TIMING_COMPENSATION_REG, 1187 .hclk_mask = 0xf0fff0ff, 1188 .hdiv_max = 2, 1189 .calibrate = aspeed_spi_ast2600_calibrate, 1190 .segment_start = aspeed_spi_segment_ast2600_start, 1191 .segment_end = aspeed_spi_segment_ast2600_end, 1192 .segment_reg = aspeed_spi_segment_ast2600_reg, 1193 }; 1194 1195 static const struct of_device_id aspeed_spi_matches[] = { 1196 { .compatible = "aspeed,ast2400-fmc", .data = &ast2400_fmc_data }, 1197 { .compatible = "aspeed,ast2400-spi", .data = &ast2400_spi_data }, 1198 { .compatible = "aspeed,ast2500-fmc", .data = &ast2500_fmc_data }, 1199 { .compatible = "aspeed,ast2500-spi", .data = &ast2500_spi_data }, 1200 { .compatible = "aspeed,ast2600-fmc", .data = &ast2600_fmc_data }, 1201 { .compatible = "aspeed,ast2600-spi", .data = &ast2600_spi_data }, 1202 { } 1203 }; 1204 MODULE_DEVICE_TABLE(of, aspeed_spi_matches); 1205 1206 static struct platform_driver aspeed_spi_driver = { 1207 .probe = aspeed_spi_probe, 1208 .remove = aspeed_spi_remove, 1209 .driver = { 1210 .name = DEVICE_NAME, 1211 .of_match_table = aspeed_spi_matches, 1212 } 1213 }; 1214 1215 module_platform_driver(aspeed_spi_driver); 1216 1217 MODULE_DESCRIPTION("ASPEED Static Memory Controller Driver"); 1218 MODULE_AUTHOR("Chin-Ting Kuo <chin-ting_kuo@aspeedtech.com>"); 1219 MODULE_AUTHOR("Cedric Le Goater <clg@kaod.org>"); 1220 MODULE_LICENSE("GPL v2"); 1221