1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * SPI-Engine SPI controller driver 4 * Copyright 2015 Analog Devices Inc. 5 * Author: Lars-Peter Clausen <lars@metafoo.de> 6 */ 7 8 #include <linux/clk.h> 9 #include <linux/completion.h> 10 #include <linux/fpga/adi-axi-common.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/of.h> 14 #include <linux/module.h> 15 #include <linux/overflow.h> 16 #include <linux/platform_device.h> 17 #include <linux/spi/spi.h> 18 19 #define SPI_ENGINE_REG_RESET 0x40 20 21 #define SPI_ENGINE_REG_INT_ENABLE 0x80 22 #define SPI_ENGINE_REG_INT_PENDING 0x84 23 #define SPI_ENGINE_REG_INT_SOURCE 0x88 24 25 #define SPI_ENGINE_REG_SYNC_ID 0xc0 26 27 #define SPI_ENGINE_REG_CMD_FIFO_ROOM 0xd0 28 #define SPI_ENGINE_REG_SDO_FIFO_ROOM 0xd4 29 #define SPI_ENGINE_REG_SDI_FIFO_LEVEL 0xd8 30 31 #define SPI_ENGINE_REG_CMD_FIFO 0xe0 32 #define SPI_ENGINE_REG_SDO_DATA_FIFO 0xe4 33 #define SPI_ENGINE_REG_SDI_DATA_FIFO 0xe8 34 #define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK 0xec 35 36 #define SPI_ENGINE_INT_CMD_ALMOST_EMPTY BIT(0) 37 #define SPI_ENGINE_INT_SDO_ALMOST_EMPTY BIT(1) 38 #define SPI_ENGINE_INT_SDI_ALMOST_FULL BIT(2) 39 #define SPI_ENGINE_INT_SYNC BIT(3) 40 41 #define SPI_ENGINE_CONFIG_CPHA BIT(0) 42 #define SPI_ENGINE_CONFIG_CPOL BIT(1) 43 #define SPI_ENGINE_CONFIG_3WIRE BIT(2) 44 45 #define SPI_ENGINE_INST_TRANSFER 0x0 46 #define SPI_ENGINE_INST_ASSERT 0x1 47 #define SPI_ENGINE_INST_WRITE 0x2 48 #define SPI_ENGINE_INST_MISC 0x3 49 50 #define SPI_ENGINE_CMD_REG_CLK_DIV 0x0 51 #define SPI_ENGINE_CMD_REG_CONFIG 0x1 52 #define SPI_ENGINE_CMD_REG_XFER_BITS 0x2 53 54 #define SPI_ENGINE_MISC_SYNC 0x0 55 #define SPI_ENGINE_MISC_SLEEP 0x1 56 57 #define SPI_ENGINE_TRANSFER_WRITE 0x1 58 #define SPI_ENGINE_TRANSFER_READ 0x2 59 60 /* Arbitrary sync ID for use by host->cur_msg */ 61 #define AXI_SPI_ENGINE_CUR_MSG_SYNC_ID 0x1 62 63 #define SPI_ENGINE_CMD(inst, arg1, arg2) \ 64 (((inst) << 12) | ((arg1) << 8) | (arg2)) 65 66 #define SPI_ENGINE_CMD_TRANSFER(flags, n) \ 67 SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n)) 68 #define SPI_ENGINE_CMD_ASSERT(delay, cs) \ 69 SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs)) 70 #define SPI_ENGINE_CMD_WRITE(reg, val) \ 71 SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val)) 72 #define SPI_ENGINE_CMD_SLEEP(delay) \ 73 SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay)) 74 #define SPI_ENGINE_CMD_SYNC(id) \ 75 SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id)) 76 77 struct spi_engine_program { 78 unsigned int length; 79 uint16_t instructions[] __counted_by(length); 80 }; 81 82 /** 83 * struct spi_engine_message_state - SPI engine per-message state 84 */ 85 struct spi_engine_message_state { 86 /** @cmd_length: Number of elements in cmd_buf array. */ 87 unsigned cmd_length; 88 /** @cmd_buf: Array of commands not yet written to CMD FIFO. */ 89 const uint16_t *cmd_buf; 90 /** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */ 91 struct spi_transfer *tx_xfer; 92 /** @tx_length: Size of tx_buf in bytes. */ 93 unsigned int tx_length; 94 /** @tx_buf: Bytes not yet written to TX FIFO. */ 95 const uint8_t *tx_buf; 96 /** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */ 97 struct spi_transfer *rx_xfer; 98 /** @rx_length: Size of tx_buf in bytes. */ 99 unsigned int rx_length; 100 /** @rx_buf: Bytes not yet written to the RX FIFO. */ 101 uint8_t *rx_buf; 102 }; 103 104 struct spi_engine { 105 struct clk *clk; 106 struct clk *ref_clk; 107 108 spinlock_t lock; 109 110 void __iomem *base; 111 struct spi_engine_message_state msg_state; 112 struct completion msg_complete; 113 unsigned int int_enable; 114 }; 115 116 static void spi_engine_program_add_cmd(struct spi_engine_program *p, 117 bool dry, uint16_t cmd) 118 { 119 p->length++; 120 121 if (!dry) 122 p->instructions[p->length - 1] = cmd; 123 } 124 125 static unsigned int spi_engine_get_config(struct spi_device *spi) 126 { 127 unsigned int config = 0; 128 129 if (spi->mode & SPI_CPOL) 130 config |= SPI_ENGINE_CONFIG_CPOL; 131 if (spi->mode & SPI_CPHA) 132 config |= SPI_ENGINE_CONFIG_CPHA; 133 if (spi->mode & SPI_3WIRE) 134 config |= SPI_ENGINE_CONFIG_3WIRE; 135 136 return config; 137 } 138 139 static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry, 140 struct spi_transfer *xfer) 141 { 142 unsigned int len; 143 144 if (xfer->bits_per_word <= 8) 145 len = xfer->len; 146 else if (xfer->bits_per_word <= 16) 147 len = xfer->len / 2; 148 else 149 len = xfer->len / 4; 150 151 while (len) { 152 unsigned int n = min(len, 256U); 153 unsigned int flags = 0; 154 155 if (xfer->tx_buf) 156 flags |= SPI_ENGINE_TRANSFER_WRITE; 157 if (xfer->rx_buf) 158 flags |= SPI_ENGINE_TRANSFER_READ; 159 160 spi_engine_program_add_cmd(p, dry, 161 SPI_ENGINE_CMD_TRANSFER(flags, n - 1)); 162 len -= n; 163 } 164 } 165 166 static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry, 167 int delay_ns, u32 sclk_hz) 168 { 169 unsigned int t; 170 171 /* negative delay indicates error, e.g. from spi_delay_to_ns() */ 172 if (delay_ns <= 0) 173 return; 174 175 /* rounding down since executing the instruction adds a couple of ticks delay */ 176 t = DIV_ROUND_DOWN_ULL((u64)delay_ns * sclk_hz, NSEC_PER_SEC); 177 while (t) { 178 unsigned int n = min(t, 256U); 179 180 spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1)); 181 t -= n; 182 } 183 } 184 185 static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry, 186 struct spi_device *spi, bool assert) 187 { 188 unsigned int mask = 0xff; 189 190 if (assert) 191 mask ^= BIT(spi_get_chipselect(spi, 0)); 192 193 spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(0, mask)); 194 } 195 196 /* 197 * Performs precompile steps on the message. 198 * 199 * The SPI core does most of the message/transfer validation and filling in 200 * fields for us via __spi_validate(). This fixes up anything remaining not 201 * done there. 202 * 203 * NB: This is separate from spi_engine_compile_message() because the latter 204 * is called twice and would otherwise result in double-evaluation. 205 */ 206 static void spi_engine_precompile_message(struct spi_message *msg) 207 { 208 unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz; 209 struct spi_transfer *xfer; 210 211 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 212 clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz); 213 xfer->effective_speed_hz = max_hz / min(clk_div, 256U); 214 } 215 } 216 217 static void spi_engine_compile_message(struct spi_message *msg, bool dry, 218 struct spi_engine_program *p) 219 { 220 struct spi_device *spi = msg->spi; 221 struct spi_controller *host = spi->controller; 222 struct spi_transfer *xfer; 223 int clk_div, new_clk_div; 224 bool keep_cs = false; 225 u8 bits_per_word = 0; 226 227 clk_div = 1; 228 229 spi_engine_program_add_cmd(p, dry, 230 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG, 231 spi_engine_get_config(spi))); 232 233 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list); 234 spi_engine_gen_cs(p, dry, spi, !xfer->cs_off); 235 236 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 237 new_clk_div = host->max_speed_hz / xfer->effective_speed_hz; 238 if (new_clk_div != clk_div) { 239 clk_div = new_clk_div; 240 /* actual divider used is register value + 1 */ 241 spi_engine_program_add_cmd(p, dry, 242 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 243 clk_div - 1)); 244 } 245 246 if (bits_per_word != xfer->bits_per_word) { 247 bits_per_word = xfer->bits_per_word; 248 spi_engine_program_add_cmd(p, dry, 249 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS, 250 bits_per_word)); 251 } 252 253 spi_engine_gen_xfer(p, dry, xfer); 254 spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer), 255 xfer->effective_speed_hz); 256 257 if (xfer->cs_change) { 258 if (list_is_last(&xfer->transfer_list, &msg->transfers)) { 259 keep_cs = true; 260 } else { 261 if (!xfer->cs_off) 262 spi_engine_gen_cs(p, dry, spi, false); 263 264 spi_engine_gen_sleep(p, dry, spi_delay_to_ns( 265 &xfer->cs_change_delay, xfer), 266 xfer->effective_speed_hz); 267 268 if (!list_next_entry(xfer, transfer_list)->cs_off) 269 spi_engine_gen_cs(p, dry, spi, true); 270 } 271 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) && 272 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) { 273 spi_engine_gen_cs(p, dry, spi, xfer->cs_off); 274 } 275 } 276 277 if (!keep_cs) 278 spi_engine_gen_cs(p, dry, spi, false); 279 280 /* 281 * Restore clockdiv to default so that future gen_sleep commands don't 282 * have to be aware of the current register state. 283 */ 284 if (clk_div != 1) 285 spi_engine_program_add_cmd(p, dry, 286 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0)); 287 } 288 289 static void spi_engine_xfer_next(struct spi_message *msg, 290 struct spi_transfer **_xfer) 291 { 292 struct spi_transfer *xfer = *_xfer; 293 294 if (!xfer) { 295 xfer = list_first_entry(&msg->transfers, 296 struct spi_transfer, transfer_list); 297 } else if (list_is_last(&xfer->transfer_list, &msg->transfers)) { 298 xfer = NULL; 299 } else { 300 xfer = list_next_entry(xfer, transfer_list); 301 } 302 303 *_xfer = xfer; 304 } 305 306 static void spi_engine_tx_next(struct spi_message *msg) 307 { 308 struct spi_engine_message_state *st = msg->state; 309 struct spi_transfer *xfer = st->tx_xfer; 310 311 do { 312 spi_engine_xfer_next(msg, &xfer); 313 } while (xfer && !xfer->tx_buf); 314 315 st->tx_xfer = xfer; 316 if (xfer) { 317 st->tx_length = xfer->len; 318 st->tx_buf = xfer->tx_buf; 319 } else { 320 st->tx_buf = NULL; 321 } 322 } 323 324 static void spi_engine_rx_next(struct spi_message *msg) 325 { 326 struct spi_engine_message_state *st = msg->state; 327 struct spi_transfer *xfer = st->rx_xfer; 328 329 do { 330 spi_engine_xfer_next(msg, &xfer); 331 } while (xfer && !xfer->rx_buf); 332 333 st->rx_xfer = xfer; 334 if (xfer) { 335 st->rx_length = xfer->len; 336 st->rx_buf = xfer->rx_buf; 337 } else { 338 st->rx_buf = NULL; 339 } 340 } 341 342 static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine, 343 struct spi_message *msg) 344 { 345 void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO; 346 struct spi_engine_message_state *st = msg->state; 347 unsigned int n, m, i; 348 const uint16_t *buf; 349 350 n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM); 351 while (n && st->cmd_length) { 352 m = min(n, st->cmd_length); 353 buf = st->cmd_buf; 354 for (i = 0; i < m; i++) 355 writel_relaxed(buf[i], addr); 356 st->cmd_buf += m; 357 st->cmd_length -= m; 358 n -= m; 359 } 360 361 return st->cmd_length != 0; 362 } 363 364 static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine, 365 struct spi_message *msg) 366 { 367 void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO; 368 struct spi_engine_message_state *st = msg->state; 369 unsigned int n, m, i; 370 371 n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM); 372 while (n && st->tx_length) { 373 if (st->tx_xfer->bits_per_word <= 8) { 374 const u8 *buf = st->tx_buf; 375 376 m = min(n, st->tx_length); 377 for (i = 0; i < m; i++) 378 writel_relaxed(buf[i], addr); 379 st->tx_buf += m; 380 st->tx_length -= m; 381 } else if (st->tx_xfer->bits_per_word <= 16) { 382 const u16 *buf = (const u16 *)st->tx_buf; 383 384 m = min(n, st->tx_length / 2); 385 for (i = 0; i < m; i++) 386 writel_relaxed(buf[i], addr); 387 st->tx_buf += m * 2; 388 st->tx_length -= m * 2; 389 } else { 390 const u32 *buf = (const u32 *)st->tx_buf; 391 392 m = min(n, st->tx_length / 4); 393 for (i = 0; i < m; i++) 394 writel_relaxed(buf[i], addr); 395 st->tx_buf += m * 4; 396 st->tx_length -= m * 4; 397 } 398 n -= m; 399 if (st->tx_length == 0) 400 spi_engine_tx_next(msg); 401 } 402 403 return st->tx_length != 0; 404 } 405 406 static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine, 407 struct spi_message *msg) 408 { 409 void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO; 410 struct spi_engine_message_state *st = msg->state; 411 unsigned int n, m, i; 412 413 n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL); 414 while (n && st->rx_length) { 415 if (st->rx_xfer->bits_per_word <= 8) { 416 u8 *buf = st->rx_buf; 417 418 m = min(n, st->rx_length); 419 for (i = 0; i < m; i++) 420 buf[i] = readl_relaxed(addr); 421 st->rx_buf += m; 422 st->rx_length -= m; 423 } else if (st->rx_xfer->bits_per_word <= 16) { 424 u16 *buf = (u16 *)st->rx_buf; 425 426 m = min(n, st->rx_length / 2); 427 for (i = 0; i < m; i++) 428 buf[i] = readl_relaxed(addr); 429 st->rx_buf += m * 2; 430 st->rx_length -= m * 2; 431 } else { 432 u32 *buf = (u32 *)st->rx_buf; 433 434 m = min(n, st->rx_length / 4); 435 for (i = 0; i < m; i++) 436 buf[i] = readl_relaxed(addr); 437 st->rx_buf += m * 4; 438 st->rx_length -= m * 4; 439 } 440 n -= m; 441 if (st->rx_length == 0) 442 spi_engine_rx_next(msg); 443 } 444 445 return st->rx_length != 0; 446 } 447 448 static irqreturn_t spi_engine_irq(int irq, void *devid) 449 { 450 struct spi_controller *host = devid; 451 struct spi_message *msg = host->cur_msg; 452 struct spi_engine *spi_engine = spi_controller_get_devdata(host); 453 unsigned int disable_int = 0; 454 unsigned int pending; 455 int completed_id = -1; 456 457 pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING); 458 459 if (pending & SPI_ENGINE_INT_SYNC) { 460 writel_relaxed(SPI_ENGINE_INT_SYNC, 461 spi_engine->base + SPI_ENGINE_REG_INT_PENDING); 462 completed_id = readl_relaxed( 463 spi_engine->base + SPI_ENGINE_REG_SYNC_ID); 464 } 465 466 spin_lock(&spi_engine->lock); 467 468 if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) { 469 if (!spi_engine_write_cmd_fifo(spi_engine, msg)) 470 disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY; 471 } 472 473 if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) { 474 if (!spi_engine_write_tx_fifo(spi_engine, msg)) 475 disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY; 476 } 477 478 if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) { 479 if (!spi_engine_read_rx_fifo(spi_engine, msg)) 480 disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL; 481 } 482 483 if (pending & SPI_ENGINE_INT_SYNC && msg) { 484 if (completed_id == AXI_SPI_ENGINE_CUR_MSG_SYNC_ID) { 485 msg->status = 0; 486 msg->actual_length = msg->frame_length; 487 complete(&spi_engine->msg_complete); 488 disable_int |= SPI_ENGINE_INT_SYNC; 489 } 490 } 491 492 if (disable_int) { 493 spi_engine->int_enable &= ~disable_int; 494 writel_relaxed(spi_engine->int_enable, 495 spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); 496 } 497 498 spin_unlock(&spi_engine->lock); 499 500 return IRQ_HANDLED; 501 } 502 503 static int spi_engine_optimize_message(struct spi_message *msg) 504 { 505 struct spi_engine_program p_dry, *p; 506 507 spi_engine_precompile_message(msg); 508 509 p_dry.length = 0; 510 spi_engine_compile_message(msg, true, &p_dry); 511 512 p = kzalloc(struct_size(p, instructions, p_dry.length + 1), GFP_KERNEL); 513 if (!p) 514 return -ENOMEM; 515 516 spi_engine_compile_message(msg, false, p); 517 518 spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC( 519 AXI_SPI_ENGINE_CUR_MSG_SYNC_ID)); 520 521 msg->opt_state = p; 522 523 return 0; 524 } 525 526 static int spi_engine_unoptimize_message(struct spi_message *msg) 527 { 528 kfree(msg->opt_state); 529 530 return 0; 531 } 532 533 static int spi_engine_transfer_one_message(struct spi_controller *host, 534 struct spi_message *msg) 535 { 536 struct spi_engine *spi_engine = spi_controller_get_devdata(host); 537 struct spi_engine_message_state *st = &spi_engine->msg_state; 538 struct spi_engine_program *p = msg->opt_state; 539 unsigned int int_enable = 0; 540 unsigned long flags; 541 542 /* reinitialize message state for this transfer */ 543 memset(st, 0, sizeof(*st)); 544 st->cmd_buf = p->instructions; 545 st->cmd_length = p->length; 546 msg->state = st; 547 548 reinit_completion(&spi_engine->msg_complete); 549 550 spin_lock_irqsave(&spi_engine->lock, flags); 551 552 if (spi_engine_write_cmd_fifo(spi_engine, msg)) 553 int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY; 554 555 spi_engine_tx_next(msg); 556 if (spi_engine_write_tx_fifo(spi_engine, msg)) 557 int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY; 558 559 spi_engine_rx_next(msg); 560 if (st->rx_length != 0) 561 int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL; 562 563 int_enable |= SPI_ENGINE_INT_SYNC; 564 565 writel_relaxed(int_enable, 566 spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); 567 spi_engine->int_enable = int_enable; 568 spin_unlock_irqrestore(&spi_engine->lock, flags); 569 570 if (!wait_for_completion_timeout(&spi_engine->msg_complete, 571 msecs_to_jiffies(5000))) { 572 dev_err(&host->dev, 573 "Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n"); 574 msg->status = -ETIMEDOUT; 575 } 576 577 spi_finalize_current_message(host); 578 579 return msg->status; 580 } 581 582 static void spi_engine_release_hw(void *p) 583 { 584 struct spi_engine *spi_engine = p; 585 586 writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING); 587 writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); 588 writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET); 589 } 590 591 static int spi_engine_probe(struct platform_device *pdev) 592 { 593 struct spi_engine *spi_engine; 594 struct spi_controller *host; 595 unsigned int version; 596 int irq; 597 int ret; 598 599 irq = platform_get_irq(pdev, 0); 600 if (irq < 0) 601 return irq; 602 603 host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine)); 604 if (!host) 605 return -ENOMEM; 606 607 spi_engine = spi_controller_get_devdata(host); 608 609 spin_lock_init(&spi_engine->lock); 610 init_completion(&spi_engine->msg_complete); 611 612 spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk"); 613 if (IS_ERR(spi_engine->clk)) 614 return PTR_ERR(spi_engine->clk); 615 616 spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk"); 617 if (IS_ERR(spi_engine->ref_clk)) 618 return PTR_ERR(spi_engine->ref_clk); 619 620 spi_engine->base = devm_platform_ioremap_resource(pdev, 0); 621 if (IS_ERR(spi_engine->base)) 622 return PTR_ERR(spi_engine->base); 623 624 version = readl(spi_engine->base + ADI_AXI_REG_VERSION); 625 if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) { 626 dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n", 627 ADI_AXI_PCORE_VER_MAJOR(version), 628 ADI_AXI_PCORE_VER_MINOR(version), 629 ADI_AXI_PCORE_VER_PATCH(version)); 630 return -ENODEV; 631 } 632 633 writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET); 634 writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING); 635 writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); 636 637 ret = devm_add_action_or_reset(&pdev->dev, spi_engine_release_hw, 638 spi_engine); 639 if (ret) 640 return ret; 641 642 ret = devm_request_irq(&pdev->dev, irq, spi_engine_irq, 0, pdev->name, 643 host); 644 if (ret) 645 return ret; 646 647 host->dev.of_node = pdev->dev.of_node; 648 host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE; 649 host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); 650 host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2; 651 host->transfer_one_message = spi_engine_transfer_one_message; 652 host->optimize_message = spi_engine_optimize_message; 653 host->unoptimize_message = spi_engine_unoptimize_message; 654 host->num_chipselect = 8; 655 656 if (host->max_speed_hz == 0) 657 return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0"); 658 659 ret = devm_spi_register_controller(&pdev->dev, host); 660 if (ret) 661 return ret; 662 663 platform_set_drvdata(pdev, host); 664 665 return 0; 666 } 667 668 static const struct of_device_id spi_engine_match_table[] = { 669 { .compatible = "adi,axi-spi-engine-1.00.a" }, 670 { }, 671 }; 672 MODULE_DEVICE_TABLE(of, spi_engine_match_table); 673 674 static struct platform_driver spi_engine_driver = { 675 .probe = spi_engine_probe, 676 .driver = { 677 .name = "spi-engine", 678 .of_match_table = spi_engine_match_table, 679 }, 680 }; 681 module_platform_driver(spi_engine_driver); 682 683 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 684 MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver"); 685 MODULE_LICENSE("GPL"); 686