1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright © 2009 - Maxim Levitsky 4 * driver for Ricoh xD readers 5 */ 6 7 #define DRV_NAME "r852" 8 #define pr_fmt(fmt) DRV_NAME ": " fmt 9 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/jiffies.h> 13 #include <linux/workqueue.h> 14 #include <linux/interrupt.h> 15 #include <linux/pci.h> 16 #include <linux/pci_ids.h> 17 #include <linux/delay.h> 18 #include <linux/slab.h> 19 #include <asm/byteorder.h> 20 #include <linux/sched.h> 21 #include "sm_common.h" 22 #include "r852.h" 23 24 25 static bool r852_enable_dma = 1; 26 module_param(r852_enable_dma, bool, S_IRUGO); 27 MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)"); 28 29 static int debug; 30 module_param(debug, int, S_IRUGO | S_IWUSR); 31 MODULE_PARM_DESC(debug, "Debug level (0-2)"); 32 33 /* read register */ 34 static inline uint8_t r852_read_reg(struct r852_device *dev, int address) 35 { 36 uint8_t reg = readb(dev->mmio + address); 37 return reg; 38 } 39 40 /* write register */ 41 static inline void r852_write_reg(struct r852_device *dev, 42 int address, uint8_t value) 43 { 44 writeb(value, dev->mmio + address); 45 } 46 47 48 /* read dword sized register */ 49 static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address) 50 { 51 uint32_t reg = le32_to_cpu(readl(dev->mmio + address)); 52 return reg; 53 } 54 55 /* write dword sized register */ 56 static inline void r852_write_reg_dword(struct r852_device *dev, 57 int address, uint32_t value) 58 { 59 writel(cpu_to_le32(value), dev->mmio + address); 60 } 61 62 /* returns pointer to our private structure */ 63 static inline struct r852_device *r852_get_dev(struct mtd_info *mtd) 64 { 65 struct nand_chip *chip = mtd_to_nand(mtd); 66 return nand_get_controller_data(chip); 67 } 68 69 70 /* check if controller supports dma */ 71 static void r852_dma_test(struct r852_device *dev) 72 { 73 dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) & 74 (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2); 75 76 if (!dev->dma_usable) 77 message("Non dma capable device detected, dma disabled"); 78 79 if (!r852_enable_dma) { 80 message("disabling dma on user request"); 81 dev->dma_usable = 0; 82 } 83 } 84 85 /* 86 * Enable dma. Enables ether first or second stage of the DMA, 87 * Expects dev->dma_dir and dev->dma_state be set 88 */ 89 static void r852_dma_enable(struct r852_device *dev) 90 { 91 uint8_t dma_reg, dma_irq_reg; 92 93 /* Set up dma settings */ 94 dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS); 95 dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY); 96 97 if (dev->dma_dir) 98 dma_reg |= R852_DMA_READ; 99 100 if (dev->dma_state == DMA_INTERNAL) { 101 dma_reg |= R852_DMA_INTERNAL; 102 /* Precaution to make sure HW doesn't write */ 103 /* to random kernel memory */ 104 r852_write_reg_dword(dev, R852_DMA_ADDR, 105 cpu_to_le32(dev->phys_bounce_buffer)); 106 } else { 107 dma_reg |= R852_DMA_MEMORY; 108 r852_write_reg_dword(dev, R852_DMA_ADDR, 109 cpu_to_le32(dev->phys_dma_addr)); 110 } 111 112 /* Precaution: make sure write reached the device */ 113 r852_read_reg_dword(dev, R852_DMA_ADDR); 114 115 r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg); 116 117 /* Set dma irq */ 118 dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); 119 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 120 dma_irq_reg | 121 R852_DMA_IRQ_INTERNAL | 122 R852_DMA_IRQ_ERROR | 123 R852_DMA_IRQ_MEMORY); 124 } 125 126 /* 127 * Disable dma, called from the interrupt handler, which specifies 128 * success of the operation via 'error' argument 129 */ 130 static void r852_dma_done(struct r852_device *dev, int error) 131 { 132 WARN_ON(dev->dma_stage == 0); 133 134 r852_write_reg_dword(dev, R852_DMA_IRQ_STA, 135 r852_read_reg_dword(dev, R852_DMA_IRQ_STA)); 136 137 r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0); 138 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0); 139 140 /* Precaution to make sure HW doesn't write to random kernel memory */ 141 r852_write_reg_dword(dev, R852_DMA_ADDR, 142 cpu_to_le32(dev->phys_bounce_buffer)); 143 r852_read_reg_dword(dev, R852_DMA_ADDR); 144 145 dev->dma_error = error; 146 dev->dma_stage = 0; 147 148 if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer) 149 dma_unmap_single(&dev->pci_dev->dev, dev->phys_dma_addr, 150 R852_DMA_LEN, 151 dev->dma_dir ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 152 } 153 154 /* 155 * Wait, till dma is done, which includes both phases of it 156 */ 157 static int r852_dma_wait(struct r852_device *dev) 158 { 159 long timeout = wait_for_completion_timeout(&dev->dma_done, 160 msecs_to_jiffies(1000)); 161 if (!timeout) { 162 dbg("timeout waiting for DMA interrupt"); 163 return -ETIMEDOUT; 164 } 165 166 return 0; 167 } 168 169 /* 170 * Read/Write one page using dma. Only pages can be read (512 bytes) 171 */ 172 static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read) 173 { 174 int bounce = 0; 175 unsigned long flags; 176 int error; 177 178 dev->dma_error = 0; 179 180 /* Set dma direction */ 181 dev->dma_dir = do_read; 182 dev->dma_stage = 1; 183 reinit_completion(&dev->dma_done); 184 185 dbg_verbose("doing dma %s ", do_read ? "read" : "write"); 186 187 /* Set initial dma state: for reading first fill on board buffer, 188 from device, for writes first fill the buffer from memory*/ 189 dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY; 190 191 /* if incoming buffer is not page aligned, we should do bounce */ 192 if ((unsigned long)buf & (R852_DMA_LEN-1)) 193 bounce = 1; 194 195 if (!bounce) { 196 dev->phys_dma_addr = dma_map_single(&dev->pci_dev->dev, buf, 197 R852_DMA_LEN, 198 do_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 199 if (dma_mapping_error(&dev->pci_dev->dev, dev->phys_dma_addr)) 200 bounce = 1; 201 } 202 203 if (bounce) { 204 dbg_verbose("dma: using bounce buffer"); 205 dev->phys_dma_addr = dev->phys_bounce_buffer; 206 if (!do_read) 207 memcpy(dev->bounce_buffer, buf, R852_DMA_LEN); 208 } 209 210 /* Enable DMA */ 211 spin_lock_irqsave(&dev->irqlock, flags); 212 r852_dma_enable(dev); 213 spin_unlock_irqrestore(&dev->irqlock, flags); 214 215 /* Wait till complete */ 216 error = r852_dma_wait(dev); 217 218 if (error) { 219 r852_dma_done(dev, error); 220 return; 221 } 222 223 if (do_read && bounce) 224 memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN); 225 } 226 227 /* 228 * Program data lines of the nand chip to send data to it 229 */ 230 static void r852_write_buf(struct nand_chip *chip, const uint8_t *buf, int len) 231 { 232 struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); 233 uint32_t reg; 234 235 /* Don't allow any access to hardware if we suspect card removal */ 236 if (dev->card_unstable) 237 return; 238 239 /* Special case for whole sector read */ 240 if (len == R852_DMA_LEN && dev->dma_usable) { 241 r852_do_dma(dev, (uint8_t *)buf, 0); 242 return; 243 } 244 245 /* write DWORD chinks - faster */ 246 while (len >= 4) { 247 reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24; 248 r852_write_reg_dword(dev, R852_DATALINE, reg); 249 buf += 4; 250 len -= 4; 251 252 } 253 254 /* write rest */ 255 while (len > 0) { 256 r852_write_reg(dev, R852_DATALINE, *buf++); 257 len--; 258 } 259 } 260 261 /* 262 * Read data lines of the nand chip to retrieve data 263 */ 264 static void r852_read_buf(struct nand_chip *chip, uint8_t *buf, int len) 265 { 266 struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); 267 uint32_t reg; 268 269 if (dev->card_unstable) { 270 /* since we can't signal error here, at least, return 271 predictable buffer */ 272 memset(buf, 0, len); 273 return; 274 } 275 276 /* special case for whole sector read */ 277 if (len == R852_DMA_LEN && dev->dma_usable) { 278 r852_do_dma(dev, buf, 1); 279 return; 280 } 281 282 /* read in dword sized chunks */ 283 while (len >= 4) { 284 285 reg = r852_read_reg_dword(dev, R852_DATALINE); 286 *buf++ = reg & 0xFF; 287 *buf++ = (reg >> 8) & 0xFF; 288 *buf++ = (reg >> 16) & 0xFF; 289 *buf++ = (reg >> 24) & 0xFF; 290 len -= 4; 291 } 292 293 /* read the reset by bytes */ 294 while (len--) 295 *buf++ = r852_read_reg(dev, R852_DATALINE); 296 } 297 298 /* 299 * Read one byte from nand chip 300 */ 301 static uint8_t r852_read_byte(struct nand_chip *chip) 302 { 303 struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); 304 305 /* Same problem as in r852_read_buf.... */ 306 if (dev->card_unstable) 307 return 0; 308 309 return r852_read_reg(dev, R852_DATALINE); 310 } 311 312 /* 313 * Control several chip lines & send commands 314 */ 315 static void r852_cmdctl(struct nand_chip *chip, int dat, unsigned int ctrl) 316 { 317 struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); 318 319 if (dev->card_unstable) 320 return; 321 322 if (ctrl & NAND_CTRL_CHANGE) { 323 324 dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND | 325 R852_CTL_ON | R852_CTL_CARDENABLE); 326 327 if (ctrl & NAND_ALE) 328 dev->ctlreg |= R852_CTL_DATA; 329 330 if (ctrl & NAND_CLE) 331 dev->ctlreg |= R852_CTL_COMMAND; 332 333 if (ctrl & NAND_NCE) 334 dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON); 335 else 336 dev->ctlreg &= ~R852_CTL_WRITE; 337 338 /* when write is stareted, enable write access */ 339 if (dat == NAND_CMD_ERASE1) 340 dev->ctlreg |= R852_CTL_WRITE; 341 342 r852_write_reg(dev, R852_CTL, dev->ctlreg); 343 } 344 345 /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need 346 to set write mode */ 347 if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) { 348 dev->ctlreg |= R852_CTL_WRITE; 349 r852_write_reg(dev, R852_CTL, dev->ctlreg); 350 } 351 352 if (dat != NAND_CMD_NONE) 353 r852_write_reg(dev, R852_DATALINE, dat); 354 } 355 356 /* 357 * Wait till card is ready. 358 * based on nand_wait, but returns errors on DMA error 359 */ 360 static int r852_wait(struct nand_chip *chip) 361 { 362 struct r852_device *dev = nand_get_controller_data(chip); 363 364 unsigned long timeout; 365 u8 status; 366 367 timeout = jiffies + msecs_to_jiffies(400); 368 369 while (time_before(jiffies, timeout)) 370 if (chip->legacy.dev_ready(chip)) 371 break; 372 373 nand_status_op(chip, &status); 374 375 /* Unfortunelly, no way to send detailed error status... */ 376 if (dev->dma_error) { 377 status |= NAND_STATUS_FAIL; 378 dev->dma_error = 0; 379 } 380 return status; 381 } 382 383 /* 384 * Check if card is ready 385 */ 386 387 static int r852_ready(struct nand_chip *chip) 388 { 389 struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); 390 return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY); 391 } 392 393 394 /* 395 * Set ECC engine mode 396 */ 397 398 static void r852_ecc_hwctl(struct nand_chip *chip, int mode) 399 { 400 struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); 401 402 if (dev->card_unstable) 403 return; 404 405 switch (mode) { 406 case NAND_ECC_READ: 407 case NAND_ECC_WRITE: 408 /* enable ecc generation/check*/ 409 dev->ctlreg |= R852_CTL_ECC_ENABLE; 410 411 /* flush ecc buffer */ 412 r852_write_reg(dev, R852_CTL, 413 dev->ctlreg | R852_CTL_ECC_ACCESS); 414 415 r852_read_reg_dword(dev, R852_DATALINE); 416 r852_write_reg(dev, R852_CTL, dev->ctlreg); 417 return; 418 419 case NAND_ECC_READSYN: 420 /* disable ecc generation */ 421 dev->ctlreg &= ~R852_CTL_ECC_ENABLE; 422 r852_write_reg(dev, R852_CTL, dev->ctlreg); 423 } 424 } 425 426 /* 427 * Calculate ECC, only used for writes 428 */ 429 430 static int r852_ecc_calculate(struct nand_chip *chip, const uint8_t *dat, 431 uint8_t *ecc_code) 432 { 433 struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); 434 struct sm_oob *oob = (struct sm_oob *)ecc_code; 435 uint32_t ecc1, ecc2; 436 437 if (dev->card_unstable) 438 return 0; 439 440 dev->ctlreg &= ~R852_CTL_ECC_ENABLE; 441 r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); 442 443 ecc1 = r852_read_reg_dword(dev, R852_DATALINE); 444 ecc2 = r852_read_reg_dword(dev, R852_DATALINE); 445 446 oob->ecc1[0] = (ecc1) & 0xFF; 447 oob->ecc1[1] = (ecc1 >> 8) & 0xFF; 448 oob->ecc1[2] = (ecc1 >> 16) & 0xFF; 449 450 oob->ecc2[0] = (ecc2) & 0xFF; 451 oob->ecc2[1] = (ecc2 >> 8) & 0xFF; 452 oob->ecc2[2] = (ecc2 >> 16) & 0xFF; 453 454 r852_write_reg(dev, R852_CTL, dev->ctlreg); 455 return 0; 456 } 457 458 /* 459 * Correct the data using ECC, hw did almost everything for us 460 */ 461 462 static int r852_ecc_correct(struct nand_chip *chip, uint8_t *dat, 463 uint8_t *read_ecc, uint8_t *calc_ecc) 464 { 465 uint32_t ecc_reg; 466 uint8_t ecc_status, err_byte; 467 int i, error = 0; 468 469 struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); 470 471 if (dev->card_unstable) 472 return 0; 473 474 if (dev->dma_error) { 475 dev->dma_error = 0; 476 return -EIO; 477 } 478 479 r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); 480 ecc_reg = r852_read_reg_dword(dev, R852_DATALINE); 481 r852_write_reg(dev, R852_CTL, dev->ctlreg); 482 483 for (i = 0 ; i <= 1 ; i++) { 484 485 ecc_status = (ecc_reg >> 8) & 0xFF; 486 487 /* ecc uncorrectable error */ 488 if (ecc_status & R852_ECC_FAIL) { 489 dbg("ecc: unrecoverable error, in half %d", i); 490 error = -EBADMSG; 491 goto exit; 492 } 493 494 /* correctable error */ 495 if (ecc_status & R852_ECC_CORRECTABLE) { 496 497 err_byte = ecc_reg & 0xFF; 498 dbg("ecc: recoverable error, " 499 "in half %d, byte %d, bit %d", i, 500 err_byte, ecc_status & R852_ECC_ERR_BIT_MSK); 501 502 dat[err_byte] ^= 503 1 << (ecc_status & R852_ECC_ERR_BIT_MSK); 504 error++; 505 } 506 507 dat += 256; 508 ecc_reg >>= 16; 509 } 510 exit: 511 return error; 512 } 513 514 /* 515 * This is copy of nand_read_oob_std 516 * nand_read_oob_syndrome assumes we can send column address - we can't 517 */ 518 static int r852_read_oob(struct nand_chip *chip, int page) 519 { 520 struct mtd_info *mtd = nand_to_mtd(chip); 521 522 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); 523 } 524 525 /* 526 * Start the nand engine 527 */ 528 529 static void r852_engine_enable(struct r852_device *dev) 530 { 531 if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) { 532 r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); 533 r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); 534 } else { 535 r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); 536 r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); 537 } 538 msleep(300); 539 r852_write_reg(dev, R852_CTL, 0); 540 } 541 542 543 /* 544 * Stop the nand engine 545 */ 546 547 static void r852_engine_disable(struct r852_device *dev) 548 { 549 r852_write_reg_dword(dev, R852_HW, 0); 550 r852_write_reg(dev, R852_CTL, R852_CTL_RESET); 551 } 552 553 /* 554 * Test if card is present 555 */ 556 557 static void r852_card_update_present(struct r852_device *dev) 558 { 559 unsigned long flags; 560 uint8_t reg; 561 562 spin_lock_irqsave(&dev->irqlock, flags); 563 reg = r852_read_reg(dev, R852_CARD_STA); 564 dev->card_detected = !!(reg & R852_CARD_STA_PRESENT); 565 spin_unlock_irqrestore(&dev->irqlock, flags); 566 } 567 568 /* 569 * Update card detection IRQ state according to current card state 570 * which is read in r852_card_update_present 571 */ 572 static void r852_update_card_detect(struct r852_device *dev) 573 { 574 int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); 575 dev->card_unstable = 0; 576 577 card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT); 578 card_detect_reg |= R852_CARD_IRQ_GENABLE; 579 580 card_detect_reg |= dev->card_detected ? 581 R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT; 582 583 r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg); 584 } 585 586 static ssize_t r852_media_type_show(struct device *sys_dev, 587 struct device_attribute *attr, char *buf) 588 { 589 struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev); 590 struct r852_device *dev = r852_get_dev(mtd); 591 char *data = dev->sm ? "smartmedia" : "xd"; 592 593 strcpy(buf, data); 594 return strlen(data); 595 } 596 597 static DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL); 598 599 600 /* Detect properties of card in slot */ 601 static void r852_update_media_status(struct r852_device *dev) 602 { 603 uint8_t reg; 604 unsigned long flags; 605 int readonly; 606 607 spin_lock_irqsave(&dev->irqlock, flags); 608 if (!dev->card_detected) { 609 message("card removed"); 610 spin_unlock_irqrestore(&dev->irqlock, flags); 611 return ; 612 } 613 614 readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO; 615 reg = r852_read_reg(dev, R852_DMA_CAP); 616 dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT); 617 618 message("detected %s %s card in slot", 619 dev->sm ? "SmartMedia" : "xD", 620 readonly ? "readonly" : "writeable"); 621 622 dev->readonly = readonly; 623 spin_unlock_irqrestore(&dev->irqlock, flags); 624 } 625 626 /* 627 * Register the nand device 628 * Called when the card is detected 629 */ 630 static int r852_register_nand_device(struct r852_device *dev) 631 { 632 struct mtd_info *mtd = nand_to_mtd(dev->chip); 633 634 WARN_ON(dev->card_registered); 635 636 mtd->dev.parent = &dev->pci_dev->dev; 637 638 if (dev->readonly) 639 dev->chip->options |= NAND_ROM; 640 641 r852_engine_enable(dev); 642 643 if (sm_register_device(mtd, dev->sm)) 644 goto error1; 645 646 if (device_create_file(&mtd->dev, &dev_attr_media_type)) { 647 message("can't create media type sysfs attribute"); 648 goto error3; 649 } 650 651 dev->card_registered = 1; 652 return 0; 653 error3: 654 nand_release(dev->chip); 655 error1: 656 /* Force card redetect */ 657 dev->card_detected = 0; 658 return -1; 659 } 660 661 /* 662 * Unregister the card 663 */ 664 665 static void r852_unregister_nand_device(struct r852_device *dev) 666 { 667 struct mtd_info *mtd = nand_to_mtd(dev->chip); 668 669 if (!dev->card_registered) 670 return; 671 672 device_remove_file(&mtd->dev, &dev_attr_media_type); 673 nand_release(dev->chip); 674 r852_engine_disable(dev); 675 dev->card_registered = 0; 676 } 677 678 /* Card state updater */ 679 static void r852_card_detect_work(struct work_struct *work) 680 { 681 struct r852_device *dev = 682 container_of(work, struct r852_device, card_detect_work.work); 683 684 r852_card_update_present(dev); 685 r852_update_card_detect(dev); 686 dev->card_unstable = 0; 687 688 /* False alarm */ 689 if (dev->card_detected == dev->card_registered) 690 goto exit; 691 692 /* Read media properties */ 693 r852_update_media_status(dev); 694 695 /* Register the card */ 696 if (dev->card_detected) 697 r852_register_nand_device(dev); 698 else 699 r852_unregister_nand_device(dev); 700 exit: 701 r852_update_card_detect(dev); 702 } 703 704 /* Ack + disable IRQ generation */ 705 static void r852_disable_irqs(struct r852_device *dev) 706 { 707 uint8_t reg; 708 reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); 709 r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK); 710 711 reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); 712 r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 713 reg & ~R852_DMA_IRQ_MASK); 714 715 r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK); 716 r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK); 717 } 718 719 /* Interrupt handler */ 720 static irqreturn_t r852_irq(int irq, void *data) 721 { 722 struct r852_device *dev = (struct r852_device *)data; 723 724 uint8_t card_status, dma_status; 725 unsigned long flags; 726 irqreturn_t ret = IRQ_NONE; 727 728 spin_lock_irqsave(&dev->irqlock, flags); 729 730 /* handle card detection interrupts first */ 731 card_status = r852_read_reg(dev, R852_CARD_IRQ_STA); 732 r852_write_reg(dev, R852_CARD_IRQ_STA, card_status); 733 734 if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) { 735 736 ret = IRQ_HANDLED; 737 dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT); 738 739 /* we shouldn't receive any interrupts if we wait for card 740 to settle */ 741 WARN_ON(dev->card_unstable); 742 743 /* disable irqs while card is unstable */ 744 /* this will timeout DMA if active, but better that garbage */ 745 r852_disable_irqs(dev); 746 747 if (dev->card_unstable) 748 goto out; 749 750 /* let, card state to settle a bit, and then do the work */ 751 dev->card_unstable = 1; 752 queue_delayed_work(dev->card_workqueue, 753 &dev->card_detect_work, msecs_to_jiffies(100)); 754 goto out; 755 } 756 757 758 /* Handle dma interrupts */ 759 dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA); 760 r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status); 761 762 if (dma_status & R852_DMA_IRQ_MASK) { 763 764 ret = IRQ_HANDLED; 765 766 if (dma_status & R852_DMA_IRQ_ERROR) { 767 dbg("received dma error IRQ"); 768 r852_dma_done(dev, -EIO); 769 complete(&dev->dma_done); 770 goto out; 771 } 772 773 /* received DMA interrupt out of nowhere? */ 774 WARN_ON_ONCE(dev->dma_stage == 0); 775 776 if (dev->dma_stage == 0) 777 goto out; 778 779 /* done device access */ 780 if (dev->dma_state == DMA_INTERNAL && 781 (dma_status & R852_DMA_IRQ_INTERNAL)) { 782 783 dev->dma_state = DMA_MEMORY; 784 dev->dma_stage++; 785 } 786 787 /* done memory DMA */ 788 if (dev->dma_state == DMA_MEMORY && 789 (dma_status & R852_DMA_IRQ_MEMORY)) { 790 dev->dma_state = DMA_INTERNAL; 791 dev->dma_stage++; 792 } 793 794 /* Enable 2nd half of dma dance */ 795 if (dev->dma_stage == 2) 796 r852_dma_enable(dev); 797 798 /* Operation done */ 799 if (dev->dma_stage == 3) { 800 r852_dma_done(dev, 0); 801 complete(&dev->dma_done); 802 } 803 goto out; 804 } 805 806 /* Handle unknown interrupts */ 807 if (dma_status) 808 dbg("bad dma IRQ status = %x", dma_status); 809 810 if (card_status & ~R852_CARD_STA_CD) 811 dbg("strange card status = %x", card_status); 812 813 out: 814 spin_unlock_irqrestore(&dev->irqlock, flags); 815 return ret; 816 } 817 818 static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 819 { 820 int error; 821 struct nand_chip *chip; 822 struct r852_device *dev; 823 824 /* pci initialization */ 825 error = pci_enable_device(pci_dev); 826 827 if (error) 828 goto error1; 829 830 pci_set_master(pci_dev); 831 832 error = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32)); 833 if (error) 834 goto error2; 835 836 error = pci_request_regions(pci_dev, DRV_NAME); 837 838 if (error) 839 goto error3; 840 841 error = -ENOMEM; 842 843 /* init nand chip, but register it only on card insert */ 844 chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); 845 846 if (!chip) 847 goto error4; 848 849 /* commands */ 850 chip->legacy.cmd_ctrl = r852_cmdctl; 851 chip->legacy.waitfunc = r852_wait; 852 chip->legacy.dev_ready = r852_ready; 853 854 /* I/O */ 855 chip->legacy.read_byte = r852_read_byte; 856 chip->legacy.read_buf = r852_read_buf; 857 chip->legacy.write_buf = r852_write_buf; 858 859 /* ecc */ 860 chip->ecc.mode = NAND_ECC_HW_SYNDROME; 861 chip->ecc.size = R852_DMA_LEN; 862 chip->ecc.bytes = SM_OOB_SIZE; 863 chip->ecc.strength = 2; 864 chip->ecc.hwctl = r852_ecc_hwctl; 865 chip->ecc.calculate = r852_ecc_calculate; 866 chip->ecc.correct = r852_ecc_correct; 867 868 /* TODO: hack */ 869 chip->ecc.read_oob = r852_read_oob; 870 871 /* init our device structure */ 872 dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL); 873 874 if (!dev) 875 goto error5; 876 877 nand_set_controller_data(chip, dev); 878 dev->chip = chip; 879 dev->pci_dev = pci_dev; 880 pci_set_drvdata(pci_dev, dev); 881 882 dev->bounce_buffer = dma_alloc_coherent(&pci_dev->dev, R852_DMA_LEN, 883 &dev->phys_bounce_buffer, GFP_KERNEL); 884 885 if (!dev->bounce_buffer) 886 goto error6; 887 888 889 error = -ENODEV; 890 dev->mmio = pci_ioremap_bar(pci_dev, 0); 891 892 if (!dev->mmio) 893 goto error7; 894 895 error = -ENOMEM; 896 dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL); 897 898 if (!dev->tmp_buffer) 899 goto error8; 900 901 init_completion(&dev->dma_done); 902 903 dev->card_workqueue = create_freezable_workqueue(DRV_NAME); 904 905 if (!dev->card_workqueue) 906 goto error9; 907 908 INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work); 909 910 /* shutdown everything - precation */ 911 r852_engine_disable(dev); 912 r852_disable_irqs(dev); 913 914 r852_dma_test(dev); 915 916 dev->irq = pci_dev->irq; 917 spin_lock_init(&dev->irqlock); 918 919 dev->card_detected = 0; 920 r852_card_update_present(dev); 921 922 /*register irq handler*/ 923 error = -ENODEV; 924 if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED, 925 DRV_NAME, dev)) 926 goto error10; 927 928 /* kick initial present test */ 929 queue_delayed_work(dev->card_workqueue, 930 &dev->card_detect_work, 0); 931 932 933 pr_notice("driver loaded successfully\n"); 934 return 0; 935 936 error10: 937 destroy_workqueue(dev->card_workqueue); 938 error9: 939 kfree(dev->tmp_buffer); 940 error8: 941 pci_iounmap(pci_dev, dev->mmio); 942 error7: 943 dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer, 944 dev->phys_bounce_buffer); 945 error6: 946 kfree(dev); 947 error5: 948 kfree(chip); 949 error4: 950 pci_release_regions(pci_dev); 951 error3: 952 error2: 953 pci_disable_device(pci_dev); 954 error1: 955 return error; 956 } 957 958 static void r852_remove(struct pci_dev *pci_dev) 959 { 960 struct r852_device *dev = pci_get_drvdata(pci_dev); 961 962 /* Stop detect workqueue - 963 we are going to unregister the device anyway*/ 964 cancel_delayed_work_sync(&dev->card_detect_work); 965 destroy_workqueue(dev->card_workqueue); 966 967 /* Unregister the device, this might make more IO */ 968 r852_unregister_nand_device(dev); 969 970 /* Stop interrupts */ 971 r852_disable_irqs(dev); 972 free_irq(dev->irq, dev); 973 974 /* Cleanup */ 975 kfree(dev->tmp_buffer); 976 pci_iounmap(pci_dev, dev->mmio); 977 dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer, 978 dev->phys_bounce_buffer); 979 980 kfree(dev->chip); 981 kfree(dev); 982 983 /* Shutdown the PCI device */ 984 pci_release_regions(pci_dev); 985 pci_disable_device(pci_dev); 986 } 987 988 static void r852_shutdown(struct pci_dev *pci_dev) 989 { 990 struct r852_device *dev = pci_get_drvdata(pci_dev); 991 992 cancel_delayed_work_sync(&dev->card_detect_work); 993 r852_disable_irqs(dev); 994 synchronize_irq(dev->irq); 995 pci_disable_device(pci_dev); 996 } 997 998 #ifdef CONFIG_PM_SLEEP 999 static int r852_suspend(struct device *device) 1000 { 1001 struct r852_device *dev = dev_get_drvdata(device); 1002 1003 if (dev->ctlreg & R852_CTL_CARDENABLE) 1004 return -EBUSY; 1005 1006 /* First make sure the detect work is gone */ 1007 cancel_delayed_work_sync(&dev->card_detect_work); 1008 1009 /* Turn off the interrupts and stop the device */ 1010 r852_disable_irqs(dev); 1011 r852_engine_disable(dev); 1012 1013 /* If card was pulled off just during the suspend, which is very 1014 unlikely, we will remove it on resume, it too late now 1015 anyway... */ 1016 dev->card_unstable = 0; 1017 return 0; 1018 } 1019 1020 static int r852_resume(struct device *device) 1021 { 1022 struct r852_device *dev = dev_get_drvdata(device); 1023 1024 r852_disable_irqs(dev); 1025 r852_card_update_present(dev); 1026 r852_engine_disable(dev); 1027 1028 1029 /* If card status changed, just do the work */ 1030 if (dev->card_detected != dev->card_registered) { 1031 dbg("card was %s during low power state", 1032 dev->card_detected ? "added" : "removed"); 1033 1034 queue_delayed_work(dev->card_workqueue, 1035 &dev->card_detect_work, msecs_to_jiffies(1000)); 1036 return 0; 1037 } 1038 1039 /* Otherwise, initialize the card */ 1040 if (dev->card_registered) { 1041 r852_engine_enable(dev); 1042 nand_select_target(dev->chip, 0); 1043 nand_reset_op(dev->chip); 1044 nand_deselect_target(dev->chip); 1045 } 1046 1047 /* Program card detection IRQ */ 1048 r852_update_card_detect(dev); 1049 return 0; 1050 } 1051 #endif 1052 1053 static const struct pci_device_id r852_pci_id_tbl[] = { 1054 1055 { PCI_VDEVICE(RICOH, 0x0852), }, 1056 { }, 1057 }; 1058 1059 MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl); 1060 1061 static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); 1062 1063 static struct pci_driver r852_pci_driver = { 1064 .name = DRV_NAME, 1065 .id_table = r852_pci_id_tbl, 1066 .probe = r852_probe, 1067 .remove = r852_remove, 1068 .shutdown = r852_shutdown, 1069 .driver.pm = &r852_pm_ops, 1070 }; 1071 1072 module_pci_driver(r852_pci_driver); 1073 1074 MODULE_LICENSE("GPL"); 1075 MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); 1076 MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver"); 1077