1 /* 2 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs 3 * Copyright (C) 2013, Intel Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/device.h> 19 #include <linux/ioport.h> 20 #include <linux/errno.h> 21 #include <linux/err.h> 22 #include <linux/interrupt.h> 23 #include <linux/kernel.h> 24 #include <linux/platform_device.h> 25 #include <linux/spi/pxa2xx_spi.h> 26 #include <linux/spi/spi.h> 27 #include <linux/delay.h> 28 #include <linux/gpio.h> 29 #include <linux/slab.h> 30 #include <linux/clk.h> 31 #include <linux/pm_runtime.h> 32 #include <linux/acpi.h> 33 34 #include "spi-pxa2xx.h" 35 36 MODULE_AUTHOR("Stephen Street"); 37 MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); 38 MODULE_LICENSE("GPL"); 39 MODULE_ALIAS("platform:pxa2xx-spi"); 40 41 #define TIMOUT_DFLT 1000 42 43 /* 44 * for testing SSCR1 changes that require SSP restart, basically 45 * everything except the service and interrupt enables, the pxa270 developer 46 * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this 47 * list, but the PXA255 dev man says all bits without really meaning the 48 * service and interrupt enables 49 */ 50 #define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \ 51 | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \ 52 | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \ 53 | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \ 54 | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \ 55 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) 56 57 #define QUARK_X1000_SSCR1_CHANGE_MASK (QUARK_X1000_SSCR1_STRF \ 58 | QUARK_X1000_SSCR1_EFWR \ 59 | QUARK_X1000_SSCR1_RFT \ 60 | QUARK_X1000_SSCR1_TFT \ 61 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) 62 63 #define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24) 64 #define SPI_CS_CONTROL_SW_MODE BIT(0) 65 #define SPI_CS_CONTROL_CS_HIGH BIT(1) 66 67 struct lpss_config { 68 /* LPSS offset from drv_data->ioaddr */ 69 unsigned offset; 70 /* Register offsets from drv_data->lpss_base or -1 */ 71 int reg_general; 72 int reg_ssp; 73 int reg_cs_ctrl; 74 /* FIFO thresholds */ 75 u32 rx_threshold; 76 u32 tx_threshold_lo; 77 u32 tx_threshold_hi; 78 }; 79 80 /* Keep these sorted with enum pxa_ssp_type */ 81 static const struct lpss_config lpss_platforms[] = { 82 { /* LPSS_LPT_SSP */ 83 .offset = 0x800, 84 .reg_general = 0x08, 85 .reg_ssp = 0x0c, 86 .reg_cs_ctrl = 0x18, 87 .rx_threshold = 64, 88 .tx_threshold_lo = 160, 89 .tx_threshold_hi = 224, 90 }, 91 { /* LPSS_BYT_SSP */ 92 .offset = 0x400, 93 .reg_general = 0x08, 94 .reg_ssp = 0x0c, 95 .reg_cs_ctrl = 0x18, 96 .rx_threshold = 64, 97 .tx_threshold_lo = 160, 98 .tx_threshold_hi = 224, 99 }, 100 }; 101 102 static inline const struct lpss_config 103 *lpss_get_config(const struct driver_data *drv_data) 104 { 105 return &lpss_platforms[drv_data->ssp_type - LPSS_LPT_SSP]; 106 } 107 108 static bool is_lpss_ssp(const struct driver_data *drv_data) 109 { 110 switch (drv_data->ssp_type) { 111 case LPSS_LPT_SSP: 112 case LPSS_BYT_SSP: 113 return true; 114 default: 115 return false; 116 } 117 } 118 119 static bool is_quark_x1000_ssp(const struct driver_data *drv_data) 120 { 121 return drv_data->ssp_type == QUARK_X1000_SSP; 122 } 123 124 static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data) 125 { 126 switch (drv_data->ssp_type) { 127 case QUARK_X1000_SSP: 128 return QUARK_X1000_SSCR1_CHANGE_MASK; 129 default: 130 return SSCR1_CHANGE_MASK; 131 } 132 } 133 134 static u32 135 pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data) 136 { 137 switch (drv_data->ssp_type) { 138 case QUARK_X1000_SSP: 139 return RX_THRESH_QUARK_X1000_DFLT; 140 default: 141 return RX_THRESH_DFLT; 142 } 143 } 144 145 static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data) 146 { 147 u32 mask; 148 149 switch (drv_data->ssp_type) { 150 case QUARK_X1000_SSP: 151 mask = QUARK_X1000_SSSR_TFL_MASK; 152 break; 153 default: 154 mask = SSSR_TFL_MASK; 155 break; 156 } 157 158 return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask; 159 } 160 161 static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data, 162 u32 *sccr1_reg) 163 { 164 u32 mask; 165 166 switch (drv_data->ssp_type) { 167 case QUARK_X1000_SSP: 168 mask = QUARK_X1000_SSCR1_RFT; 169 break; 170 default: 171 mask = SSCR1_RFT; 172 break; 173 } 174 *sccr1_reg &= ~mask; 175 } 176 177 static void pxa2xx_spi_set_rx_thre(const struct driver_data *drv_data, 178 u32 *sccr1_reg, u32 threshold) 179 { 180 switch (drv_data->ssp_type) { 181 case QUARK_X1000_SSP: 182 *sccr1_reg |= QUARK_X1000_SSCR1_RxTresh(threshold); 183 break; 184 default: 185 *sccr1_reg |= SSCR1_RxTresh(threshold); 186 break; 187 } 188 } 189 190 static u32 pxa2xx_configure_sscr0(const struct driver_data *drv_data, 191 u32 clk_div, u8 bits) 192 { 193 switch (drv_data->ssp_type) { 194 case QUARK_X1000_SSP: 195 return clk_div 196 | QUARK_X1000_SSCR0_Motorola 197 | QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits) 198 | SSCR0_SSE; 199 default: 200 return clk_div 201 | SSCR0_Motorola 202 | SSCR0_DataSize(bits > 16 ? bits - 16 : bits) 203 | SSCR0_SSE 204 | (bits > 16 ? SSCR0_EDSS : 0); 205 } 206 } 207 208 /* 209 * Read and write LPSS SSP private registers. Caller must first check that 210 * is_lpss_ssp() returns true before these can be called. 211 */ 212 static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset) 213 { 214 WARN_ON(!drv_data->lpss_base); 215 return readl(drv_data->lpss_base + offset); 216 } 217 218 static void __lpss_ssp_write_priv(struct driver_data *drv_data, 219 unsigned offset, u32 value) 220 { 221 WARN_ON(!drv_data->lpss_base); 222 writel(value, drv_data->lpss_base + offset); 223 } 224 225 /* 226 * lpss_ssp_setup - perform LPSS SSP specific setup 227 * @drv_data: pointer to the driver private data 228 * 229 * Perform LPSS SSP specific setup. This function must be called first if 230 * one is going to use LPSS SSP private registers. 231 */ 232 static void lpss_ssp_setup(struct driver_data *drv_data) 233 { 234 const struct lpss_config *config; 235 u32 value; 236 237 config = lpss_get_config(drv_data); 238 drv_data->lpss_base = drv_data->ioaddr + config->offset; 239 240 /* Enable software chip select control */ 241 value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH; 242 __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value); 243 244 /* Enable multiblock DMA transfers */ 245 if (drv_data->master_info->enable_dma) { 246 __lpss_ssp_write_priv(drv_data, config->reg_ssp, 1); 247 248 if (config->reg_general >= 0) { 249 value = __lpss_ssp_read_priv(drv_data, 250 config->reg_general); 251 value |= GENERAL_REG_RXTO_HOLDOFF_DISABLE; 252 __lpss_ssp_write_priv(drv_data, 253 config->reg_general, value); 254 } 255 } 256 } 257 258 static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable) 259 { 260 const struct lpss_config *config; 261 u32 value; 262 263 config = lpss_get_config(drv_data); 264 265 value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl); 266 if (enable) 267 value &= ~SPI_CS_CONTROL_CS_HIGH; 268 else 269 value |= SPI_CS_CONTROL_CS_HIGH; 270 __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value); 271 } 272 273 static void cs_assert(struct driver_data *drv_data) 274 { 275 struct chip_data *chip = drv_data->cur_chip; 276 277 if (drv_data->ssp_type == CE4100_SSP) { 278 pxa2xx_spi_write(drv_data, SSSR, drv_data->cur_chip->frm); 279 return; 280 } 281 282 if (chip->cs_control) { 283 chip->cs_control(PXA2XX_CS_ASSERT); 284 return; 285 } 286 287 if (gpio_is_valid(chip->gpio_cs)) { 288 gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted); 289 return; 290 } 291 292 if (is_lpss_ssp(drv_data)) 293 lpss_ssp_cs_control(drv_data, true); 294 } 295 296 static void cs_deassert(struct driver_data *drv_data) 297 { 298 struct chip_data *chip = drv_data->cur_chip; 299 300 if (drv_data->ssp_type == CE4100_SSP) 301 return; 302 303 if (chip->cs_control) { 304 chip->cs_control(PXA2XX_CS_DEASSERT); 305 return; 306 } 307 308 if (gpio_is_valid(chip->gpio_cs)) { 309 gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); 310 return; 311 } 312 313 if (is_lpss_ssp(drv_data)) 314 lpss_ssp_cs_control(drv_data, false); 315 } 316 317 int pxa2xx_spi_flush(struct driver_data *drv_data) 318 { 319 unsigned long limit = loops_per_jiffy << 1; 320 321 do { 322 while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE) 323 pxa2xx_spi_read(drv_data, SSDR); 324 } while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit); 325 write_SSSR_CS(drv_data, SSSR_ROR); 326 327 return limit; 328 } 329 330 static int null_writer(struct driver_data *drv_data) 331 { 332 u8 n_bytes = drv_data->n_bytes; 333 334 if (pxa2xx_spi_txfifo_full(drv_data) 335 || (drv_data->tx == drv_data->tx_end)) 336 return 0; 337 338 pxa2xx_spi_write(drv_data, SSDR, 0); 339 drv_data->tx += n_bytes; 340 341 return 1; 342 } 343 344 static int null_reader(struct driver_data *drv_data) 345 { 346 u8 n_bytes = drv_data->n_bytes; 347 348 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE) 349 && (drv_data->rx < drv_data->rx_end)) { 350 pxa2xx_spi_read(drv_data, SSDR); 351 drv_data->rx += n_bytes; 352 } 353 354 return drv_data->rx == drv_data->rx_end; 355 } 356 357 static int u8_writer(struct driver_data *drv_data) 358 { 359 if (pxa2xx_spi_txfifo_full(drv_data) 360 || (drv_data->tx == drv_data->tx_end)) 361 return 0; 362 363 pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx)); 364 ++drv_data->tx; 365 366 return 1; 367 } 368 369 static int u8_reader(struct driver_data *drv_data) 370 { 371 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE) 372 && (drv_data->rx < drv_data->rx_end)) { 373 *(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR); 374 ++drv_data->rx; 375 } 376 377 return drv_data->rx == drv_data->rx_end; 378 } 379 380 static int u16_writer(struct driver_data *drv_data) 381 { 382 if (pxa2xx_spi_txfifo_full(drv_data) 383 || (drv_data->tx == drv_data->tx_end)) 384 return 0; 385 386 pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx)); 387 drv_data->tx += 2; 388 389 return 1; 390 } 391 392 static int u16_reader(struct driver_data *drv_data) 393 { 394 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE) 395 && (drv_data->rx < drv_data->rx_end)) { 396 *(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR); 397 drv_data->rx += 2; 398 } 399 400 return drv_data->rx == drv_data->rx_end; 401 } 402 403 static int u32_writer(struct driver_data *drv_data) 404 { 405 if (pxa2xx_spi_txfifo_full(drv_data) 406 || (drv_data->tx == drv_data->tx_end)) 407 return 0; 408 409 pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx)); 410 drv_data->tx += 4; 411 412 return 1; 413 } 414 415 static int u32_reader(struct driver_data *drv_data) 416 { 417 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE) 418 && (drv_data->rx < drv_data->rx_end)) { 419 *(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR); 420 drv_data->rx += 4; 421 } 422 423 return drv_data->rx == drv_data->rx_end; 424 } 425 426 void *pxa2xx_spi_next_transfer(struct driver_data *drv_data) 427 { 428 struct spi_message *msg = drv_data->cur_msg; 429 struct spi_transfer *trans = drv_data->cur_transfer; 430 431 /* Move to next transfer */ 432 if (trans->transfer_list.next != &msg->transfers) { 433 drv_data->cur_transfer = 434 list_entry(trans->transfer_list.next, 435 struct spi_transfer, 436 transfer_list); 437 return RUNNING_STATE; 438 } else 439 return DONE_STATE; 440 } 441 442 /* caller already set message->status; dma and pio irqs are blocked */ 443 static void giveback(struct driver_data *drv_data) 444 { 445 struct spi_transfer* last_transfer; 446 struct spi_message *msg; 447 448 msg = drv_data->cur_msg; 449 drv_data->cur_msg = NULL; 450 drv_data->cur_transfer = NULL; 451 452 last_transfer = list_last_entry(&msg->transfers, struct spi_transfer, 453 transfer_list); 454 455 /* Delay if requested before any change in chip select */ 456 if (last_transfer->delay_usecs) 457 udelay(last_transfer->delay_usecs); 458 459 /* Drop chip select UNLESS cs_change is true or we are returning 460 * a message with an error, or next message is for another chip 461 */ 462 if (!last_transfer->cs_change) 463 cs_deassert(drv_data); 464 else { 465 struct spi_message *next_msg; 466 467 /* Holding of cs was hinted, but we need to make sure 468 * the next message is for the same chip. Don't waste 469 * time with the following tests unless this was hinted. 470 * 471 * We cannot postpone this until pump_messages, because 472 * after calling msg->complete (below) the driver that 473 * sent the current message could be unloaded, which 474 * could invalidate the cs_control() callback... 475 */ 476 477 /* get a pointer to the next message, if any */ 478 next_msg = spi_get_next_queued_message(drv_data->master); 479 480 /* see if the next and current messages point 481 * to the same chip 482 */ 483 if (next_msg && next_msg->spi != msg->spi) 484 next_msg = NULL; 485 if (!next_msg || msg->state == ERROR_STATE) 486 cs_deassert(drv_data); 487 } 488 489 drv_data->cur_chip = NULL; 490 spi_finalize_current_message(drv_data->master); 491 } 492 493 static void reset_sccr1(struct driver_data *drv_data) 494 { 495 struct chip_data *chip = drv_data->cur_chip; 496 u32 sccr1_reg; 497 498 sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1; 499 sccr1_reg &= ~SSCR1_RFT; 500 sccr1_reg |= chip->threshold; 501 pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg); 502 } 503 504 static void int_error_stop(struct driver_data *drv_data, const char* msg) 505 { 506 /* Stop and reset SSP */ 507 write_SSSR_CS(drv_data, drv_data->clear_sr); 508 reset_sccr1(drv_data); 509 if (!pxa25x_ssp_comp(drv_data)) 510 pxa2xx_spi_write(drv_data, SSTO, 0); 511 pxa2xx_spi_flush(drv_data); 512 pxa2xx_spi_write(drv_data, SSCR0, 513 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); 514 515 dev_err(&drv_data->pdev->dev, "%s\n", msg); 516 517 drv_data->cur_msg->state = ERROR_STATE; 518 tasklet_schedule(&drv_data->pump_transfers); 519 } 520 521 static void int_transfer_complete(struct driver_data *drv_data) 522 { 523 /* Stop SSP */ 524 write_SSSR_CS(drv_data, drv_data->clear_sr); 525 reset_sccr1(drv_data); 526 if (!pxa25x_ssp_comp(drv_data)) 527 pxa2xx_spi_write(drv_data, SSTO, 0); 528 529 /* Update total byte transferred return count actual bytes read */ 530 drv_data->cur_msg->actual_length += drv_data->len - 531 (drv_data->rx_end - drv_data->rx); 532 533 /* Transfer delays and chip select release are 534 * handled in pump_transfers or giveback 535 */ 536 537 /* Move to next transfer */ 538 drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data); 539 540 /* Schedule transfer tasklet */ 541 tasklet_schedule(&drv_data->pump_transfers); 542 } 543 544 static irqreturn_t interrupt_transfer(struct driver_data *drv_data) 545 { 546 u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ? 547 drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS; 548 549 u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask; 550 551 if (irq_status & SSSR_ROR) { 552 int_error_stop(drv_data, "interrupt_transfer: fifo overrun"); 553 return IRQ_HANDLED; 554 } 555 556 if (irq_status & SSSR_TINT) { 557 pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT); 558 if (drv_data->read(drv_data)) { 559 int_transfer_complete(drv_data); 560 return IRQ_HANDLED; 561 } 562 } 563 564 /* Drain rx fifo, Fill tx fifo and prevent overruns */ 565 do { 566 if (drv_data->read(drv_data)) { 567 int_transfer_complete(drv_data); 568 return IRQ_HANDLED; 569 } 570 } while (drv_data->write(drv_data)); 571 572 if (drv_data->read(drv_data)) { 573 int_transfer_complete(drv_data); 574 return IRQ_HANDLED; 575 } 576 577 if (drv_data->tx == drv_data->tx_end) { 578 u32 bytes_left; 579 u32 sccr1_reg; 580 581 sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1); 582 sccr1_reg &= ~SSCR1_TIE; 583 584 /* 585 * PXA25x_SSP has no timeout, set up rx threshould for the 586 * remaining RX bytes. 587 */ 588 if (pxa25x_ssp_comp(drv_data)) { 589 u32 rx_thre; 590 591 pxa2xx_spi_clear_rx_thre(drv_data, &sccr1_reg); 592 593 bytes_left = drv_data->rx_end - drv_data->rx; 594 switch (drv_data->n_bytes) { 595 case 4: 596 bytes_left >>= 1; 597 case 2: 598 bytes_left >>= 1; 599 } 600 601 rx_thre = pxa2xx_spi_get_rx_default_thre(drv_data); 602 if (rx_thre > bytes_left) 603 rx_thre = bytes_left; 604 605 pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre); 606 } 607 pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg); 608 } 609 610 /* We did something */ 611 return IRQ_HANDLED; 612 } 613 614 static irqreturn_t ssp_int(int irq, void *dev_id) 615 { 616 struct driver_data *drv_data = dev_id; 617 u32 sccr1_reg; 618 u32 mask = drv_data->mask_sr; 619 u32 status; 620 621 /* 622 * The IRQ might be shared with other peripherals so we must first 623 * check that are we RPM suspended or not. If we are we assume that 624 * the IRQ was not for us (we shouldn't be RPM suspended when the 625 * interrupt is enabled). 626 */ 627 if (pm_runtime_suspended(&drv_data->pdev->dev)) 628 return IRQ_NONE; 629 630 /* 631 * If the device is not yet in RPM suspended state and we get an 632 * interrupt that is meant for another device, check if status bits 633 * are all set to one. That means that the device is already 634 * powered off. 635 */ 636 status = pxa2xx_spi_read(drv_data, SSSR); 637 if (status == ~0) 638 return IRQ_NONE; 639 640 sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1); 641 642 /* Ignore possible writes if we don't need to write */ 643 if (!(sccr1_reg & SSCR1_TIE)) 644 mask &= ~SSSR_TFS; 645 646 if (!(status & mask)) 647 return IRQ_NONE; 648 649 if (!drv_data->cur_msg) { 650 651 pxa2xx_spi_write(drv_data, SSCR0, 652 pxa2xx_spi_read(drv_data, SSCR0) 653 & ~SSCR0_SSE); 654 pxa2xx_spi_write(drv_data, SSCR1, 655 pxa2xx_spi_read(drv_data, SSCR1) 656 & ~drv_data->int_cr1); 657 if (!pxa25x_ssp_comp(drv_data)) 658 pxa2xx_spi_write(drv_data, SSTO, 0); 659 write_SSSR_CS(drv_data, drv_data->clear_sr); 660 661 dev_err(&drv_data->pdev->dev, 662 "bad message state in interrupt handler\n"); 663 664 /* Never fail */ 665 return IRQ_HANDLED; 666 } 667 668 return drv_data->transfer_handler(drv_data); 669 } 670 671 /* 672 * The Quark SPI has an additional 24 bit register (DDS_CLK_RATE) to multiply 673 * input frequency by fractions of 2^24. It also has a divider by 5. 674 * 675 * There are formulas to get baud rate value for given input frequency and 676 * divider parameters, such as DDS_CLK_RATE and SCR: 677 * 678 * Fsys = 200MHz 679 * 680 * Fssp = Fsys * DDS_CLK_RATE / 2^24 (1) 681 * Baud rate = Fsclk = Fssp / (2 * (SCR + 1)) (2) 682 * 683 * DDS_CLK_RATE either 2^n or 2^n / 5. 684 * SCR is in range 0 .. 255 685 * 686 * Divisor = 5^i * 2^j * 2 * k 687 * i = [0, 1] i = 1 iff j = 0 or j > 3 688 * j = [0, 23] j = 0 iff i = 1 689 * k = [1, 256] 690 * Special case: j = 0, i = 1: Divisor = 2 / 5 691 * 692 * Accordingly to the specification the recommended values for DDS_CLK_RATE 693 * are: 694 * Case 1: 2^n, n = [0, 23] 695 * Case 2: 2^24 * 2 / 5 (0x666666) 696 * Case 3: less than or equal to 2^24 / 5 / 16 (0x33333) 697 * 698 * In all cases the lowest possible value is better. 699 * 700 * The function calculates parameters for all cases and chooses the one closest 701 * to the asked baud rate. 702 */ 703 static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds) 704 { 705 unsigned long xtal = 200000000; 706 unsigned long fref = xtal / 2; /* mandatory division by 2, 707 see (2) */ 708 /* case 3 */ 709 unsigned long fref1 = fref / 2; /* case 1 */ 710 unsigned long fref2 = fref * 2 / 5; /* case 2 */ 711 unsigned long scale; 712 unsigned long q, q1, q2; 713 long r, r1, r2; 714 u32 mul; 715 716 /* Case 1 */ 717 718 /* Set initial value for DDS_CLK_RATE */ 719 mul = (1 << 24) >> 1; 720 721 /* Calculate initial quot */ 722 q1 = DIV_ROUND_CLOSEST(fref1, rate); 723 724 /* Scale q1 if it's too big */ 725 if (q1 > 256) { 726 /* Scale q1 to range [1, 512] */ 727 scale = fls_long(q1 - 1); 728 if (scale > 9) { 729 q1 >>= scale - 9; 730 mul >>= scale - 9; 731 } 732 733 /* Round the result if we have a remainder */ 734 q1 += q1 & 1; 735 } 736 737 /* Decrease DDS_CLK_RATE as much as we can without loss in precision */ 738 scale = __ffs(q1); 739 q1 >>= scale; 740 mul >>= scale; 741 742 /* Get the remainder */ 743 r1 = abs(fref1 / (1 << (24 - fls_long(mul))) / q1 - rate); 744 745 /* Case 2 */ 746 747 q2 = DIV_ROUND_CLOSEST(fref2, rate); 748 r2 = abs(fref2 / q2 - rate); 749 750 /* 751 * Choose the best between two: less remainder we have the better. We 752 * can't go case 2 if q2 is greater than 256 since SCR register can 753 * hold only values 0 .. 255. 754 */ 755 if (r2 >= r1 || q2 > 256) { 756 /* case 1 is better */ 757 r = r1; 758 q = q1; 759 } else { 760 /* case 2 is better */ 761 r = r2; 762 q = q2; 763 mul = (1 << 24) * 2 / 5; 764 } 765 766 /* Check case 3 only If the divisor is big enough */ 767 if (fref / rate >= 80) { 768 u64 fssp; 769 u32 m; 770 771 /* Calculate initial quot */ 772 q1 = DIV_ROUND_CLOSEST(fref, rate); 773 m = (1 << 24) / q1; 774 775 /* Get the remainder */ 776 fssp = (u64)fref * m; 777 do_div(fssp, 1 << 24); 778 r1 = abs(fssp - rate); 779 780 /* Choose this one if it suits better */ 781 if (r1 < r) { 782 /* case 3 is better */ 783 q = 1; 784 mul = m; 785 } 786 } 787 788 *dds = mul; 789 return q - 1; 790 } 791 792 static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) 793 { 794 unsigned long ssp_clk = drv_data->max_clk_rate; 795 const struct ssp_device *ssp = drv_data->ssp; 796 797 rate = min_t(int, ssp_clk, rate); 798 799 if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP) 800 return (ssp_clk / (2 * rate) - 1) & 0xff; 801 else 802 return (ssp_clk / rate - 1) & 0xfff; 803 } 804 805 static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data, 806 struct chip_data *chip, int rate) 807 { 808 unsigned int clk_div; 809 810 switch (drv_data->ssp_type) { 811 case QUARK_X1000_SSP: 812 clk_div = quark_x1000_get_clk_div(rate, &chip->dds_rate); 813 break; 814 default: 815 clk_div = ssp_get_clk_div(drv_data, rate); 816 break; 817 } 818 return clk_div << 8; 819 } 820 821 static void pump_transfers(unsigned long data) 822 { 823 struct driver_data *drv_data = (struct driver_data *)data; 824 struct spi_message *message = NULL; 825 struct spi_transfer *transfer = NULL; 826 struct spi_transfer *previous = NULL; 827 struct chip_data *chip = NULL; 828 u32 clk_div = 0; 829 u8 bits = 0; 830 u32 speed = 0; 831 u32 cr0; 832 u32 cr1; 833 u32 dma_thresh = drv_data->cur_chip->dma_threshold; 834 u32 dma_burst = drv_data->cur_chip->dma_burst_size; 835 u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data); 836 837 /* Get current state information */ 838 message = drv_data->cur_msg; 839 transfer = drv_data->cur_transfer; 840 chip = drv_data->cur_chip; 841 842 /* Handle for abort */ 843 if (message->state == ERROR_STATE) { 844 message->status = -EIO; 845 giveback(drv_data); 846 return; 847 } 848 849 /* Handle end of message */ 850 if (message->state == DONE_STATE) { 851 message->status = 0; 852 giveback(drv_data); 853 return; 854 } 855 856 /* Delay if requested at end of transfer before CS change */ 857 if (message->state == RUNNING_STATE) { 858 previous = list_entry(transfer->transfer_list.prev, 859 struct spi_transfer, 860 transfer_list); 861 if (previous->delay_usecs) 862 udelay(previous->delay_usecs); 863 864 /* Drop chip select only if cs_change is requested */ 865 if (previous->cs_change) 866 cs_deassert(drv_data); 867 } 868 869 /* Check if we can DMA this transfer */ 870 if (!pxa2xx_spi_dma_is_possible(transfer->len) && chip->enable_dma) { 871 872 /* reject already-mapped transfers; PIO won't always work */ 873 if (message->is_dma_mapped 874 || transfer->rx_dma || transfer->tx_dma) { 875 dev_err(&drv_data->pdev->dev, 876 "pump_transfers: mapped transfer length of " 877 "%u is greater than %d\n", 878 transfer->len, MAX_DMA_LEN); 879 message->status = -EINVAL; 880 giveback(drv_data); 881 return; 882 } 883 884 /* warn ... we force this to PIO mode */ 885 dev_warn_ratelimited(&message->spi->dev, 886 "pump_transfers: DMA disabled for transfer length %ld " 887 "greater than %d\n", 888 (long)drv_data->len, MAX_DMA_LEN); 889 } 890 891 /* Setup the transfer state based on the type of transfer */ 892 if (pxa2xx_spi_flush(drv_data) == 0) { 893 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); 894 message->status = -EIO; 895 giveback(drv_data); 896 return; 897 } 898 drv_data->n_bytes = chip->n_bytes; 899 drv_data->tx = (void *)transfer->tx_buf; 900 drv_data->tx_end = drv_data->tx + transfer->len; 901 drv_data->rx = transfer->rx_buf; 902 drv_data->rx_end = drv_data->rx + transfer->len; 903 drv_data->rx_dma = transfer->rx_dma; 904 drv_data->tx_dma = transfer->tx_dma; 905 drv_data->len = transfer->len; 906 drv_data->write = drv_data->tx ? chip->write : null_writer; 907 drv_data->read = drv_data->rx ? chip->read : null_reader; 908 909 /* Change speed and bit per word on a per transfer */ 910 cr0 = chip->cr0; 911 if (transfer->speed_hz || transfer->bits_per_word) { 912 913 bits = chip->bits_per_word; 914 speed = chip->speed_hz; 915 916 if (transfer->speed_hz) 917 speed = transfer->speed_hz; 918 919 if (transfer->bits_per_word) 920 bits = transfer->bits_per_word; 921 922 clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, speed); 923 924 if (bits <= 8) { 925 drv_data->n_bytes = 1; 926 drv_data->read = drv_data->read != null_reader ? 927 u8_reader : null_reader; 928 drv_data->write = drv_data->write != null_writer ? 929 u8_writer : null_writer; 930 } else if (bits <= 16) { 931 drv_data->n_bytes = 2; 932 drv_data->read = drv_data->read != null_reader ? 933 u16_reader : null_reader; 934 drv_data->write = drv_data->write != null_writer ? 935 u16_writer : null_writer; 936 } else if (bits <= 32) { 937 drv_data->n_bytes = 4; 938 drv_data->read = drv_data->read != null_reader ? 939 u32_reader : null_reader; 940 drv_data->write = drv_data->write != null_writer ? 941 u32_writer : null_writer; 942 } 943 /* if bits/word is changed in dma mode, then must check the 944 * thresholds and burst also */ 945 if (chip->enable_dma) { 946 if (pxa2xx_spi_set_dma_burst_and_threshold(chip, 947 message->spi, 948 bits, &dma_burst, 949 &dma_thresh)) 950 dev_warn_ratelimited(&message->spi->dev, 951 "pump_transfers: DMA burst size reduced to match bits_per_word\n"); 952 } 953 954 cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits); 955 } 956 957 message->state = RUNNING_STATE; 958 959 drv_data->dma_mapped = 0; 960 if (pxa2xx_spi_dma_is_possible(drv_data->len)) 961 drv_data->dma_mapped = pxa2xx_spi_map_dma_buffers(drv_data); 962 if (drv_data->dma_mapped) { 963 964 /* Ensure we have the correct interrupt handler */ 965 drv_data->transfer_handler = pxa2xx_spi_dma_transfer; 966 967 pxa2xx_spi_dma_prepare(drv_data, dma_burst); 968 969 /* Clear status and start DMA engine */ 970 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; 971 pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr); 972 973 pxa2xx_spi_dma_start(drv_data); 974 } else { 975 /* Ensure we have the correct interrupt handler */ 976 drv_data->transfer_handler = interrupt_transfer; 977 978 /* Clear status */ 979 cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1; 980 write_SSSR_CS(drv_data, drv_data->clear_sr); 981 } 982 983 if (is_lpss_ssp(drv_data)) { 984 if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff) 985 != chip->lpss_rx_threshold) 986 pxa2xx_spi_write(drv_data, SSIRF, 987 chip->lpss_rx_threshold); 988 if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff) 989 != chip->lpss_tx_threshold) 990 pxa2xx_spi_write(drv_data, SSITF, 991 chip->lpss_tx_threshold); 992 } 993 994 if (is_quark_x1000_ssp(drv_data) && 995 (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate)) 996 pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate); 997 998 /* see if we need to reload the config registers */ 999 if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0) 1000 || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask) 1001 != (cr1 & change_mask)) { 1002 /* stop the SSP, and update the other bits */ 1003 pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE); 1004 if (!pxa25x_ssp_comp(drv_data)) 1005 pxa2xx_spi_write(drv_data, SSTO, chip->timeout); 1006 /* first set CR1 without interrupt and service enables */ 1007 pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask); 1008 /* restart the SSP */ 1009 pxa2xx_spi_write(drv_data, SSCR0, cr0); 1010 1011 } else { 1012 if (!pxa25x_ssp_comp(drv_data)) 1013 pxa2xx_spi_write(drv_data, SSTO, chip->timeout); 1014 } 1015 1016 cs_assert(drv_data); 1017 1018 /* after chip select, release the data by enabling service 1019 * requests and interrupts, without changing any mode bits */ 1020 pxa2xx_spi_write(drv_data, SSCR1, cr1); 1021 } 1022 1023 static int pxa2xx_spi_transfer_one_message(struct spi_master *master, 1024 struct spi_message *msg) 1025 { 1026 struct driver_data *drv_data = spi_master_get_devdata(master); 1027 1028 drv_data->cur_msg = msg; 1029 /* Initial message state*/ 1030 drv_data->cur_msg->state = START_STATE; 1031 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, 1032 struct spi_transfer, 1033 transfer_list); 1034 1035 /* prepare to setup the SSP, in pump_transfers, using the per 1036 * chip configuration */ 1037 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); 1038 1039 /* Mark as busy and launch transfers */ 1040 tasklet_schedule(&drv_data->pump_transfers); 1041 return 0; 1042 } 1043 1044 static int pxa2xx_spi_unprepare_transfer(struct spi_master *master) 1045 { 1046 struct driver_data *drv_data = spi_master_get_devdata(master); 1047 1048 /* Disable the SSP now */ 1049 pxa2xx_spi_write(drv_data, SSCR0, 1050 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); 1051 1052 return 0; 1053 } 1054 1055 static int setup_cs(struct spi_device *spi, struct chip_data *chip, 1056 struct pxa2xx_spi_chip *chip_info) 1057 { 1058 int err = 0; 1059 1060 if (chip == NULL || chip_info == NULL) 1061 return 0; 1062 1063 /* NOTE: setup() can be called multiple times, possibly with 1064 * different chip_info, release previously requested GPIO 1065 */ 1066 if (gpio_is_valid(chip->gpio_cs)) 1067 gpio_free(chip->gpio_cs); 1068 1069 /* If (*cs_control) is provided, ignore GPIO chip select */ 1070 if (chip_info->cs_control) { 1071 chip->cs_control = chip_info->cs_control; 1072 return 0; 1073 } 1074 1075 if (gpio_is_valid(chip_info->gpio_cs)) { 1076 err = gpio_request(chip_info->gpio_cs, "SPI_CS"); 1077 if (err) { 1078 dev_err(&spi->dev, "failed to request chip select GPIO%d\n", 1079 chip_info->gpio_cs); 1080 return err; 1081 } 1082 1083 chip->gpio_cs = chip_info->gpio_cs; 1084 chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH; 1085 1086 err = gpio_direction_output(chip->gpio_cs, 1087 !chip->gpio_cs_inverted); 1088 } 1089 1090 return err; 1091 } 1092 1093 static int setup(struct spi_device *spi) 1094 { 1095 struct pxa2xx_spi_chip *chip_info = NULL; 1096 struct chip_data *chip; 1097 const struct lpss_config *config; 1098 struct driver_data *drv_data = spi_master_get_devdata(spi->master); 1099 unsigned int clk_div; 1100 uint tx_thres, tx_hi_thres, rx_thres; 1101 1102 switch (drv_data->ssp_type) { 1103 case QUARK_X1000_SSP: 1104 tx_thres = TX_THRESH_QUARK_X1000_DFLT; 1105 tx_hi_thres = 0; 1106 rx_thres = RX_THRESH_QUARK_X1000_DFLT; 1107 break; 1108 case LPSS_LPT_SSP: 1109 case LPSS_BYT_SSP: 1110 config = lpss_get_config(drv_data); 1111 tx_thres = config->tx_threshold_lo; 1112 tx_hi_thres = config->tx_threshold_hi; 1113 rx_thres = config->rx_threshold; 1114 break; 1115 default: 1116 tx_thres = TX_THRESH_DFLT; 1117 tx_hi_thres = 0; 1118 rx_thres = RX_THRESH_DFLT; 1119 break; 1120 } 1121 1122 /* Only alloc on first setup */ 1123 chip = spi_get_ctldata(spi); 1124 if (!chip) { 1125 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 1126 if (!chip) 1127 return -ENOMEM; 1128 1129 if (drv_data->ssp_type == CE4100_SSP) { 1130 if (spi->chip_select > 4) { 1131 dev_err(&spi->dev, 1132 "failed setup: cs number must not be > 4.\n"); 1133 kfree(chip); 1134 return -EINVAL; 1135 } 1136 1137 chip->frm = spi->chip_select; 1138 } else 1139 chip->gpio_cs = -1; 1140 chip->enable_dma = 0; 1141 chip->timeout = TIMOUT_DFLT; 1142 } 1143 1144 /* protocol drivers may change the chip settings, so... 1145 * if chip_info exists, use it */ 1146 chip_info = spi->controller_data; 1147 1148 /* chip_info isn't always needed */ 1149 chip->cr1 = 0; 1150 if (chip_info) { 1151 if (chip_info->timeout) 1152 chip->timeout = chip_info->timeout; 1153 if (chip_info->tx_threshold) 1154 tx_thres = chip_info->tx_threshold; 1155 if (chip_info->tx_hi_threshold) 1156 tx_hi_thres = chip_info->tx_hi_threshold; 1157 if (chip_info->rx_threshold) 1158 rx_thres = chip_info->rx_threshold; 1159 chip->enable_dma = drv_data->master_info->enable_dma; 1160 chip->dma_threshold = 0; 1161 if (chip_info->enable_loopback) 1162 chip->cr1 = SSCR1_LBM; 1163 } else if (ACPI_HANDLE(&spi->dev)) { 1164 /* 1165 * Slave devices enumerated from ACPI namespace don't 1166 * usually have chip_info but we still might want to use 1167 * DMA with them. 1168 */ 1169 chip->enable_dma = drv_data->master_info->enable_dma; 1170 } 1171 1172 chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres); 1173 chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres) 1174 | SSITF_TxHiThresh(tx_hi_thres); 1175 1176 /* set dma burst and threshold outside of chip_info path so that if 1177 * chip_info goes away after setting chip->enable_dma, the 1178 * burst and threshold can still respond to changes in bits_per_word */ 1179 if (chip->enable_dma) { 1180 /* set up legal burst and threshold for dma */ 1181 if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi, 1182 spi->bits_per_word, 1183 &chip->dma_burst_size, 1184 &chip->dma_threshold)) { 1185 dev_warn(&spi->dev, 1186 "in setup: DMA burst size reduced to match bits_per_word\n"); 1187 } 1188 } 1189 1190 clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, spi->max_speed_hz); 1191 chip->speed_hz = spi->max_speed_hz; 1192 1193 chip->cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, 1194 spi->bits_per_word); 1195 switch (drv_data->ssp_type) { 1196 case QUARK_X1000_SSP: 1197 chip->threshold = (QUARK_X1000_SSCR1_RxTresh(rx_thres) 1198 & QUARK_X1000_SSCR1_RFT) 1199 | (QUARK_X1000_SSCR1_TxTresh(tx_thres) 1200 & QUARK_X1000_SSCR1_TFT); 1201 break; 1202 default: 1203 chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) | 1204 (SSCR1_TxTresh(tx_thres) & SSCR1_TFT); 1205 break; 1206 } 1207 1208 chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH); 1209 chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0) 1210 | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0); 1211 1212 if (spi->mode & SPI_LOOP) 1213 chip->cr1 |= SSCR1_LBM; 1214 1215 /* NOTE: PXA25x_SSP _could_ use external clocking ... */ 1216 if (!pxa25x_ssp_comp(drv_data)) 1217 dev_dbg(&spi->dev, "%ld Hz actual, %s\n", 1218 drv_data->max_clk_rate 1219 / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)), 1220 chip->enable_dma ? "DMA" : "PIO"); 1221 else 1222 dev_dbg(&spi->dev, "%ld Hz actual, %s\n", 1223 drv_data->max_clk_rate / 2 1224 / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)), 1225 chip->enable_dma ? "DMA" : "PIO"); 1226 1227 if (spi->bits_per_word <= 8) { 1228 chip->n_bytes = 1; 1229 chip->read = u8_reader; 1230 chip->write = u8_writer; 1231 } else if (spi->bits_per_word <= 16) { 1232 chip->n_bytes = 2; 1233 chip->read = u16_reader; 1234 chip->write = u16_writer; 1235 } else if (spi->bits_per_word <= 32) { 1236 if (!is_quark_x1000_ssp(drv_data)) 1237 chip->cr0 |= SSCR0_EDSS; 1238 chip->n_bytes = 4; 1239 chip->read = u32_reader; 1240 chip->write = u32_writer; 1241 } 1242 chip->bits_per_word = spi->bits_per_word; 1243 1244 spi_set_ctldata(spi, chip); 1245 1246 if (drv_data->ssp_type == CE4100_SSP) 1247 return 0; 1248 1249 return setup_cs(spi, chip, chip_info); 1250 } 1251 1252 static void cleanup(struct spi_device *spi) 1253 { 1254 struct chip_data *chip = spi_get_ctldata(spi); 1255 struct driver_data *drv_data = spi_master_get_devdata(spi->master); 1256 1257 if (!chip) 1258 return; 1259 1260 if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs)) 1261 gpio_free(chip->gpio_cs); 1262 1263 kfree(chip); 1264 } 1265 1266 #ifdef CONFIG_ACPI 1267 1268 static const struct acpi_device_id pxa2xx_spi_acpi_match[] = { 1269 { "INT33C0", LPSS_LPT_SSP }, 1270 { "INT33C1", LPSS_LPT_SSP }, 1271 { "INT3430", LPSS_LPT_SSP }, 1272 { "INT3431", LPSS_LPT_SSP }, 1273 { "80860F0E", LPSS_BYT_SSP }, 1274 { "8086228E", LPSS_BYT_SSP }, 1275 { }, 1276 }; 1277 MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match); 1278 1279 static struct pxa2xx_spi_master * 1280 pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) 1281 { 1282 struct pxa2xx_spi_master *pdata; 1283 struct acpi_device *adev; 1284 struct ssp_device *ssp; 1285 struct resource *res; 1286 const struct acpi_device_id *id; 1287 int devid, type; 1288 1289 if (!ACPI_HANDLE(&pdev->dev) || 1290 acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) 1291 return NULL; 1292 1293 id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); 1294 if (id) 1295 type = (int)id->driver_data; 1296 else 1297 return NULL; 1298 1299 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1300 if (!pdata) 1301 return NULL; 1302 1303 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1304 if (!res) 1305 return NULL; 1306 1307 ssp = &pdata->ssp; 1308 1309 ssp->phys_base = res->start; 1310 ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res); 1311 if (IS_ERR(ssp->mmio_base)) 1312 return NULL; 1313 1314 ssp->clk = devm_clk_get(&pdev->dev, NULL); 1315 ssp->irq = platform_get_irq(pdev, 0); 1316 ssp->type = type; 1317 ssp->pdev = pdev; 1318 1319 ssp->port_id = -1; 1320 if (adev->pnp.unique_id && !kstrtoint(adev->pnp.unique_id, 0, &devid)) 1321 ssp->port_id = devid; 1322 1323 pdata->num_chipselect = 1; 1324 pdata->enable_dma = true; 1325 1326 return pdata; 1327 } 1328 1329 #else 1330 static inline struct pxa2xx_spi_master * 1331 pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) 1332 { 1333 return NULL; 1334 } 1335 #endif 1336 1337 static int pxa2xx_spi_probe(struct platform_device *pdev) 1338 { 1339 struct device *dev = &pdev->dev; 1340 struct pxa2xx_spi_master *platform_info; 1341 struct spi_master *master; 1342 struct driver_data *drv_data; 1343 struct ssp_device *ssp; 1344 int status; 1345 u32 tmp; 1346 1347 platform_info = dev_get_platdata(dev); 1348 if (!platform_info) { 1349 platform_info = pxa2xx_spi_acpi_get_pdata(pdev); 1350 if (!platform_info) { 1351 dev_err(&pdev->dev, "missing platform data\n"); 1352 return -ENODEV; 1353 } 1354 } 1355 1356 ssp = pxa_ssp_request(pdev->id, pdev->name); 1357 if (!ssp) 1358 ssp = &platform_info->ssp; 1359 1360 if (!ssp->mmio_base) { 1361 dev_err(&pdev->dev, "failed to get ssp\n"); 1362 return -ENODEV; 1363 } 1364 1365 /* Allocate master with space for drv_data and null dma buffer */ 1366 master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); 1367 if (!master) { 1368 dev_err(&pdev->dev, "cannot alloc spi_master\n"); 1369 pxa_ssp_free(ssp); 1370 return -ENOMEM; 1371 } 1372 drv_data = spi_master_get_devdata(master); 1373 drv_data->master = master; 1374 drv_data->master_info = platform_info; 1375 drv_data->pdev = pdev; 1376 drv_data->ssp = ssp; 1377 1378 master->dev.parent = &pdev->dev; 1379 master->dev.of_node = pdev->dev.of_node; 1380 /* the spi->mode bits understood by this driver: */ 1381 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; 1382 1383 master->bus_num = ssp->port_id; 1384 master->num_chipselect = platform_info->num_chipselect; 1385 master->dma_alignment = DMA_ALIGNMENT; 1386 master->cleanup = cleanup; 1387 master->setup = setup; 1388 master->transfer_one_message = pxa2xx_spi_transfer_one_message; 1389 master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer; 1390 master->auto_runtime_pm = true; 1391 1392 drv_data->ssp_type = ssp->type; 1393 drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT); 1394 1395 drv_data->ioaddr = ssp->mmio_base; 1396 drv_data->ssdr_physical = ssp->phys_base + SSDR; 1397 if (pxa25x_ssp_comp(drv_data)) { 1398 switch (drv_data->ssp_type) { 1399 case QUARK_X1000_SSP: 1400 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); 1401 break; 1402 default: 1403 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 1404 break; 1405 } 1406 1407 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE; 1408 drv_data->dma_cr1 = 0; 1409 drv_data->clear_sr = SSSR_ROR; 1410 drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; 1411 } else { 1412 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); 1413 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; 1414 drv_data->dma_cr1 = DEFAULT_DMA_CR1; 1415 drv_data->clear_sr = SSSR_ROR | SSSR_TINT; 1416 drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; 1417 } 1418 1419 status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev), 1420 drv_data); 1421 if (status < 0) { 1422 dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq); 1423 goto out_error_master_alloc; 1424 } 1425 1426 /* Setup DMA if requested */ 1427 drv_data->tx_channel = -1; 1428 drv_data->rx_channel = -1; 1429 if (platform_info->enable_dma) { 1430 status = pxa2xx_spi_dma_setup(drv_data); 1431 if (status) { 1432 dev_dbg(dev, "no DMA channels available, using PIO\n"); 1433 platform_info->enable_dma = false; 1434 } 1435 } 1436 1437 /* Enable SOC clock */ 1438 clk_prepare_enable(ssp->clk); 1439 1440 drv_data->max_clk_rate = clk_get_rate(ssp->clk); 1441 1442 /* Load default SSP configuration */ 1443 pxa2xx_spi_write(drv_data, SSCR0, 0); 1444 switch (drv_data->ssp_type) { 1445 case QUARK_X1000_SSP: 1446 tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT) 1447 | QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT); 1448 pxa2xx_spi_write(drv_data, SSCR1, tmp); 1449 1450 /* using the Motorola SPI protocol and use 8 bit frame */ 1451 pxa2xx_spi_write(drv_data, SSCR0, 1452 QUARK_X1000_SSCR0_Motorola 1453 | QUARK_X1000_SSCR0_DataSize(8)); 1454 break; 1455 default: 1456 tmp = SSCR1_RxTresh(RX_THRESH_DFLT) | 1457 SSCR1_TxTresh(TX_THRESH_DFLT); 1458 pxa2xx_spi_write(drv_data, SSCR1, tmp); 1459 tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8); 1460 pxa2xx_spi_write(drv_data, SSCR0, tmp); 1461 break; 1462 } 1463 1464 if (!pxa25x_ssp_comp(drv_data)) 1465 pxa2xx_spi_write(drv_data, SSTO, 0); 1466 1467 if (!is_quark_x1000_ssp(drv_data)) 1468 pxa2xx_spi_write(drv_data, SSPSP, 0); 1469 1470 if (is_lpss_ssp(drv_data)) 1471 lpss_ssp_setup(drv_data); 1472 1473 tasklet_init(&drv_data->pump_transfers, pump_transfers, 1474 (unsigned long)drv_data); 1475 1476 pm_runtime_set_autosuspend_delay(&pdev->dev, 50); 1477 pm_runtime_use_autosuspend(&pdev->dev); 1478 pm_runtime_set_active(&pdev->dev); 1479 pm_runtime_enable(&pdev->dev); 1480 1481 /* Register with the SPI framework */ 1482 platform_set_drvdata(pdev, drv_data); 1483 status = devm_spi_register_master(&pdev->dev, master); 1484 if (status != 0) { 1485 dev_err(&pdev->dev, "problem registering spi master\n"); 1486 goto out_error_clock_enabled; 1487 } 1488 1489 return status; 1490 1491 out_error_clock_enabled: 1492 clk_disable_unprepare(ssp->clk); 1493 pxa2xx_spi_dma_release(drv_data); 1494 free_irq(ssp->irq, drv_data); 1495 1496 out_error_master_alloc: 1497 spi_master_put(master); 1498 pxa_ssp_free(ssp); 1499 return status; 1500 } 1501 1502 static int pxa2xx_spi_remove(struct platform_device *pdev) 1503 { 1504 struct driver_data *drv_data = platform_get_drvdata(pdev); 1505 struct ssp_device *ssp; 1506 1507 if (!drv_data) 1508 return 0; 1509 ssp = drv_data->ssp; 1510 1511 pm_runtime_get_sync(&pdev->dev); 1512 1513 /* Disable the SSP at the peripheral and SOC level */ 1514 pxa2xx_spi_write(drv_data, SSCR0, 0); 1515 clk_disable_unprepare(ssp->clk); 1516 1517 /* Release DMA */ 1518 if (drv_data->master_info->enable_dma) 1519 pxa2xx_spi_dma_release(drv_data); 1520 1521 pm_runtime_put_noidle(&pdev->dev); 1522 pm_runtime_disable(&pdev->dev); 1523 1524 /* Release IRQ */ 1525 free_irq(ssp->irq, drv_data); 1526 1527 /* Release SSP */ 1528 pxa_ssp_free(ssp); 1529 1530 return 0; 1531 } 1532 1533 static void pxa2xx_spi_shutdown(struct platform_device *pdev) 1534 { 1535 int status = 0; 1536 1537 if ((status = pxa2xx_spi_remove(pdev)) != 0) 1538 dev_err(&pdev->dev, "shutdown failed with %d\n", status); 1539 } 1540 1541 #ifdef CONFIG_PM_SLEEP 1542 static int pxa2xx_spi_suspend(struct device *dev) 1543 { 1544 struct driver_data *drv_data = dev_get_drvdata(dev); 1545 struct ssp_device *ssp = drv_data->ssp; 1546 int status = 0; 1547 1548 status = spi_master_suspend(drv_data->master); 1549 if (status != 0) 1550 return status; 1551 pxa2xx_spi_write(drv_data, SSCR0, 0); 1552 1553 if (!pm_runtime_suspended(dev)) 1554 clk_disable_unprepare(ssp->clk); 1555 1556 return 0; 1557 } 1558 1559 static int pxa2xx_spi_resume(struct device *dev) 1560 { 1561 struct driver_data *drv_data = dev_get_drvdata(dev); 1562 struct ssp_device *ssp = drv_data->ssp; 1563 int status = 0; 1564 1565 pxa2xx_spi_dma_resume(drv_data); 1566 1567 /* Enable the SSP clock */ 1568 if (!pm_runtime_suspended(dev)) 1569 clk_prepare_enable(ssp->clk); 1570 1571 /* Restore LPSS private register bits */ 1572 if (is_lpss_ssp(drv_data)) 1573 lpss_ssp_setup(drv_data); 1574 1575 /* Start the queue running */ 1576 status = spi_master_resume(drv_data->master); 1577 if (status != 0) { 1578 dev_err(dev, "problem starting queue (%d)\n", status); 1579 return status; 1580 } 1581 1582 return 0; 1583 } 1584 #endif 1585 1586 #ifdef CONFIG_PM 1587 static int pxa2xx_spi_runtime_suspend(struct device *dev) 1588 { 1589 struct driver_data *drv_data = dev_get_drvdata(dev); 1590 1591 clk_disable_unprepare(drv_data->ssp->clk); 1592 return 0; 1593 } 1594 1595 static int pxa2xx_spi_runtime_resume(struct device *dev) 1596 { 1597 struct driver_data *drv_data = dev_get_drvdata(dev); 1598 1599 clk_prepare_enable(drv_data->ssp->clk); 1600 return 0; 1601 } 1602 #endif 1603 1604 static const struct dev_pm_ops pxa2xx_spi_pm_ops = { 1605 SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume) 1606 SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend, 1607 pxa2xx_spi_runtime_resume, NULL) 1608 }; 1609 1610 static struct platform_driver driver = { 1611 .driver = { 1612 .name = "pxa2xx-spi", 1613 .pm = &pxa2xx_spi_pm_ops, 1614 .acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match), 1615 }, 1616 .probe = pxa2xx_spi_probe, 1617 .remove = pxa2xx_spi_remove, 1618 .shutdown = pxa2xx_spi_shutdown, 1619 }; 1620 1621 static int __init pxa2xx_spi_init(void) 1622 { 1623 return platform_driver_register(&driver); 1624 } 1625 subsys_initcall(pxa2xx_spi_init); 1626 1627 static void __exit pxa2xx_spi_exit(void) 1628 { 1629 platform_driver_unregister(&driver); 1630 } 1631 module_exit(pxa2xx_spi_exit); 1632