1 /* 2 * LPDDR flash memory device operations. This module provides read, write, 3 * erase, lock/unlock support for LPDDR flash memories 4 * (C) 2008 Korolev Alexey <akorolev@infradead.org> 5 * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com> 6 * Many thanks to Roman Borisov for intial enabling 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 2 11 * of the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 21 * 02110-1301, USA. 22 * TODO: 23 * Implement VPP management 24 * Implement XIP support 25 * Implement OTP support 26 */ 27 #include <linux/mtd/pfow.h> 28 #include <linux/mtd/qinfo.h> 29 #include <linux/slab.h> 30 31 static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len, 32 size_t *retlen, u_char *buf); 33 static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, 34 size_t len, size_t *retlen, const u_char *buf); 35 static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs, 36 unsigned long count, loff_t to, size_t *retlen); 37 static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr); 38 static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 39 static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 40 static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, 41 size_t *retlen, void **mtdbuf, resource_size_t *phys); 42 static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len); 43 static int get_chip(struct map_info *map, struct flchip *chip, int mode); 44 static int chip_ready(struct map_info *map, struct flchip *chip, int mode); 45 static void put_chip(struct map_info *map, struct flchip *chip); 46 47 struct mtd_info *lpddr_cmdset(struct map_info *map) 48 { 49 struct lpddr_private *lpddr = map->fldrv_priv; 50 struct flchip_shared *shared; 51 struct flchip *chip; 52 struct mtd_info *mtd; 53 int numchips; 54 int i, j; 55 56 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 57 if (!mtd) { 58 printk(KERN_ERR "Failed to allocate memory for MTD device\n"); 59 return NULL; 60 } 61 mtd->priv = map; 62 mtd->type = MTD_NORFLASH; 63 64 /* Fill in the default mtd operations */ 65 mtd->read = lpddr_read; 66 mtd->type = MTD_NORFLASH; 67 mtd->flags = MTD_CAP_NORFLASH; 68 mtd->flags &= ~MTD_BIT_WRITEABLE; 69 mtd->erase = lpddr_erase; 70 mtd->write = lpddr_write_buffers; 71 mtd->writev = lpddr_writev; 72 mtd->read_oob = NULL; 73 mtd->write_oob = NULL; 74 mtd->sync = NULL; 75 mtd->lock = lpddr_lock; 76 mtd->unlock = lpddr_unlock; 77 mtd->suspend = NULL; 78 mtd->resume = NULL; 79 if (map_is_linear(map)) { 80 mtd->point = lpddr_point; 81 mtd->unpoint = lpddr_unpoint; 82 } 83 mtd->block_isbad = NULL; 84 mtd->block_markbad = NULL; 85 mtd->size = 1 << lpddr->qinfo->DevSizeShift; 86 mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift; 87 mtd->writesize = 1 << lpddr->qinfo->BufSizeShift; 88 89 shared = kmalloc(sizeof(struct flchip_shared) * lpddr->numchips, 90 GFP_KERNEL); 91 if (!shared) { 92 kfree(lpddr); 93 kfree(mtd); 94 return NULL; 95 } 96 97 chip = &lpddr->chips[0]; 98 numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum; 99 for (i = 0; i < numchips; i++) { 100 shared[i].writing = shared[i].erasing = NULL; 101 spin_lock_init(&shared[i].lock); 102 for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) { 103 *chip = lpddr->chips[i]; 104 chip->start += j << lpddr->chipshift; 105 chip->oldstate = chip->state = FL_READY; 106 chip->priv = &shared[i]; 107 /* those should be reset too since 108 they create memory references. */ 109 init_waitqueue_head(&chip->wq); 110 spin_lock_init(&chip->_spinlock); 111 chip->mutex = &chip->_spinlock; 112 chip++; 113 } 114 } 115 116 return mtd; 117 } 118 EXPORT_SYMBOL(lpddr_cmdset); 119 120 static int wait_for_ready(struct map_info *map, struct flchip *chip, 121 unsigned int chip_op_time) 122 { 123 unsigned int timeo, reset_timeo, sleep_time; 124 unsigned int dsr; 125 flstate_t chip_state = chip->state; 126 int ret = 0; 127 128 /* set our timeout to 8 times the expected delay */ 129 timeo = chip_op_time * 8; 130 if (!timeo) 131 timeo = 500000; 132 reset_timeo = timeo; 133 sleep_time = chip_op_time / 2; 134 135 for (;;) { 136 dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR)); 137 if (dsr & DSR_READY_STATUS) 138 break; 139 if (!timeo) { 140 printk(KERN_ERR "%s: Flash timeout error state %d \n", 141 map->name, chip_state); 142 ret = -ETIME; 143 break; 144 } 145 146 /* OK Still waiting. Drop the lock, wait a while and retry. */ 147 spin_unlock(chip->mutex); 148 if (sleep_time >= 1000000/HZ) { 149 /* 150 * Half of the normal delay still remaining 151 * can be performed with a sleeping delay instead 152 * of busy waiting. 153 */ 154 msleep(sleep_time/1000); 155 timeo -= sleep_time; 156 sleep_time = 1000000/HZ; 157 } else { 158 udelay(1); 159 cond_resched(); 160 timeo--; 161 } 162 spin_lock(chip->mutex); 163 164 while (chip->state != chip_state) { 165 /* Someone's suspended the operation: sleep */ 166 DECLARE_WAITQUEUE(wait, current); 167 set_current_state(TASK_UNINTERRUPTIBLE); 168 add_wait_queue(&chip->wq, &wait); 169 spin_unlock(chip->mutex); 170 schedule(); 171 remove_wait_queue(&chip->wq, &wait); 172 spin_lock(chip->mutex); 173 } 174 if (chip->erase_suspended || chip->write_suspended) { 175 /* Suspend has occured while sleep: reset timeout */ 176 timeo = reset_timeo; 177 chip->erase_suspended = chip->write_suspended = 0; 178 } 179 } 180 /* check status for errors */ 181 if (dsr & DSR_ERR) { 182 /* Clear DSR*/ 183 map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR); 184 printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n", 185 map->name, dsr); 186 print_drs_error(dsr); 187 ret = -EIO; 188 } 189 chip->state = FL_READY; 190 return ret; 191 } 192 193 static int get_chip(struct map_info *map, struct flchip *chip, int mode) 194 { 195 int ret; 196 DECLARE_WAITQUEUE(wait, current); 197 198 retry: 199 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING) 200 && chip->state != FL_SYNCING) { 201 /* 202 * OK. We have possibility for contension on the write/erase 203 * operations which are global to the real chip and not per 204 * partition. So let's fight it over in the partition which 205 * currently has authority on the operation. 206 * 207 * The rules are as follows: 208 * 209 * - any write operation must own shared->writing. 210 * 211 * - any erase operation must own _both_ shared->writing and 212 * shared->erasing. 213 * 214 * - contension arbitration is handled in the owner's context. 215 * 216 * The 'shared' struct can be read and/or written only when 217 * its lock is taken. 218 */ 219 struct flchip_shared *shared = chip->priv; 220 struct flchip *contender; 221 spin_lock(&shared->lock); 222 contender = shared->writing; 223 if (contender && contender != chip) { 224 /* 225 * The engine to perform desired operation on this 226 * partition is already in use by someone else. 227 * Let's fight over it in the context of the chip 228 * currently using it. If it is possible to suspend, 229 * that other partition will do just that, otherwise 230 * it'll happily send us to sleep. In any case, when 231 * get_chip returns success we're clear to go ahead. 232 */ 233 ret = spin_trylock(contender->mutex); 234 spin_unlock(&shared->lock); 235 if (!ret) 236 goto retry; 237 spin_unlock(chip->mutex); 238 ret = chip_ready(map, contender, mode); 239 spin_lock(chip->mutex); 240 241 if (ret == -EAGAIN) { 242 spin_unlock(contender->mutex); 243 goto retry; 244 } 245 if (ret) { 246 spin_unlock(contender->mutex); 247 return ret; 248 } 249 spin_lock(&shared->lock); 250 251 /* We should not own chip if it is already in FL_SYNCING 252 * state. Put contender and retry. */ 253 if (chip->state == FL_SYNCING) { 254 put_chip(map, contender); 255 spin_unlock(contender->mutex); 256 goto retry; 257 } 258 spin_unlock(contender->mutex); 259 } 260 261 /* Check if we have suspended erase on this chip. 262 Must sleep in such a case. */ 263 if (mode == FL_ERASING && shared->erasing 264 && shared->erasing->oldstate == FL_ERASING) { 265 spin_unlock(&shared->lock); 266 set_current_state(TASK_UNINTERRUPTIBLE); 267 add_wait_queue(&chip->wq, &wait); 268 spin_unlock(chip->mutex); 269 schedule(); 270 remove_wait_queue(&chip->wq, &wait); 271 spin_lock(chip->mutex); 272 goto retry; 273 } 274 275 /* We now own it */ 276 shared->writing = chip; 277 if (mode == FL_ERASING) 278 shared->erasing = chip; 279 spin_unlock(&shared->lock); 280 } 281 282 ret = chip_ready(map, chip, mode); 283 if (ret == -EAGAIN) 284 goto retry; 285 286 return ret; 287 } 288 289 static int chip_ready(struct map_info *map, struct flchip *chip, int mode) 290 { 291 struct lpddr_private *lpddr = map->fldrv_priv; 292 int ret = 0; 293 DECLARE_WAITQUEUE(wait, current); 294 295 /* Prevent setting state FL_SYNCING for chip in suspended state. */ 296 if (FL_SYNCING == mode && FL_READY != chip->oldstate) 297 goto sleep; 298 299 switch (chip->state) { 300 case FL_READY: 301 case FL_JEDEC_QUERY: 302 return 0; 303 304 case FL_ERASING: 305 if (!lpddr->qinfo->SuspEraseSupp || 306 !(mode == FL_READY || mode == FL_POINT)) 307 goto sleep; 308 309 map_write(map, CMD(LPDDR_SUSPEND), 310 map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND); 311 chip->oldstate = FL_ERASING; 312 chip->state = FL_ERASE_SUSPENDING; 313 ret = wait_for_ready(map, chip, 0); 314 if (ret) { 315 /* Oops. something got wrong. */ 316 /* Resume and pretend we weren't here. */ 317 map_write(map, CMD(LPDDR_RESUME), 318 map->pfow_base + PFOW_COMMAND_CODE); 319 map_write(map, CMD(LPDDR_START_EXECUTION), 320 map->pfow_base + PFOW_COMMAND_EXECUTE); 321 chip->state = FL_ERASING; 322 chip->oldstate = FL_READY; 323 printk(KERN_ERR "%s: suspend operation failed." 324 "State may be wrong \n", map->name); 325 return -EIO; 326 } 327 chip->erase_suspended = 1; 328 chip->state = FL_READY; 329 return 0; 330 /* Erase suspend */ 331 case FL_POINT: 332 /* Only if there's no operation suspended... */ 333 if (mode == FL_READY && chip->oldstate == FL_READY) 334 return 0; 335 336 default: 337 sleep: 338 set_current_state(TASK_UNINTERRUPTIBLE); 339 add_wait_queue(&chip->wq, &wait); 340 spin_unlock(chip->mutex); 341 schedule(); 342 remove_wait_queue(&chip->wq, &wait); 343 spin_lock(chip->mutex); 344 return -EAGAIN; 345 } 346 } 347 348 static void put_chip(struct map_info *map, struct flchip *chip) 349 { 350 if (chip->priv) { 351 struct flchip_shared *shared = chip->priv; 352 spin_lock(&shared->lock); 353 if (shared->writing == chip && chip->oldstate == FL_READY) { 354 /* We own the ability to write, but we're done */ 355 shared->writing = shared->erasing; 356 if (shared->writing && shared->writing != chip) { 357 /* give back the ownership */ 358 struct flchip *loaner = shared->writing; 359 spin_lock(loaner->mutex); 360 spin_unlock(&shared->lock); 361 spin_unlock(chip->mutex); 362 put_chip(map, loaner); 363 spin_lock(chip->mutex); 364 spin_unlock(loaner->mutex); 365 wake_up(&chip->wq); 366 return; 367 } 368 shared->erasing = NULL; 369 shared->writing = NULL; 370 } else if (shared->erasing == chip && shared->writing != chip) { 371 /* 372 * We own the ability to erase without the ability 373 * to write, which means the erase was suspended 374 * and some other partition is currently writing. 375 * Don't let the switch below mess things up since 376 * we don't have ownership to resume anything. 377 */ 378 spin_unlock(&shared->lock); 379 wake_up(&chip->wq); 380 return; 381 } 382 spin_unlock(&shared->lock); 383 } 384 385 switch (chip->oldstate) { 386 case FL_ERASING: 387 chip->state = chip->oldstate; 388 map_write(map, CMD(LPDDR_RESUME), 389 map->pfow_base + PFOW_COMMAND_CODE); 390 map_write(map, CMD(LPDDR_START_EXECUTION), 391 map->pfow_base + PFOW_COMMAND_EXECUTE); 392 chip->oldstate = FL_READY; 393 chip->state = FL_ERASING; 394 break; 395 case FL_READY: 396 break; 397 default: 398 printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n", 399 map->name, chip->oldstate); 400 } 401 wake_up(&chip->wq); 402 } 403 404 int do_write_buffer(struct map_info *map, struct flchip *chip, 405 unsigned long adr, const struct kvec **pvec, 406 unsigned long *pvec_seek, int len) 407 { 408 struct lpddr_private *lpddr = map->fldrv_priv; 409 map_word datum; 410 int ret, wbufsize, word_gap, words; 411 const struct kvec *vec; 412 unsigned long vec_seek; 413 unsigned long prog_buf_ofs; 414 415 wbufsize = 1 << lpddr->qinfo->BufSizeShift; 416 417 spin_lock(chip->mutex); 418 ret = get_chip(map, chip, FL_WRITING); 419 if (ret) { 420 spin_unlock(chip->mutex); 421 return ret; 422 } 423 /* Figure out the number of words to write */ 424 word_gap = (-adr & (map_bankwidth(map)-1)); 425 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map); 426 if (!word_gap) { 427 words--; 428 } else { 429 word_gap = map_bankwidth(map) - word_gap; 430 adr -= word_gap; 431 datum = map_word_ff(map); 432 } 433 /* Write data */ 434 /* Get the program buffer offset from PFOW register data first*/ 435 prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map, 436 map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET)); 437 vec = *pvec; 438 vec_seek = *pvec_seek; 439 do { 440 int n = map_bankwidth(map) - word_gap; 441 442 if (n > vec->iov_len - vec_seek) 443 n = vec->iov_len - vec_seek; 444 if (n > len) 445 n = len; 446 447 if (!word_gap && (len < map_bankwidth(map))) 448 datum = map_word_ff(map); 449 450 datum = map_word_load_partial(map, datum, 451 vec->iov_base + vec_seek, word_gap, n); 452 453 len -= n; 454 word_gap += n; 455 if (!len || word_gap == map_bankwidth(map)) { 456 map_write(map, datum, prog_buf_ofs); 457 prog_buf_ofs += map_bankwidth(map); 458 word_gap = 0; 459 } 460 461 vec_seek += n; 462 if (vec_seek == vec->iov_len) { 463 vec++; 464 vec_seek = 0; 465 } 466 } while (len); 467 *pvec = vec; 468 *pvec_seek = vec_seek; 469 470 /* GO GO GO */ 471 send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL); 472 chip->state = FL_WRITING; 473 ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime)); 474 if (ret) { 475 printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n", 476 map->name, ret, adr); 477 goto out; 478 } 479 480 out: put_chip(map, chip); 481 spin_unlock(chip->mutex); 482 return ret; 483 } 484 485 int do_erase_oneblock(struct mtd_info *mtd, loff_t adr) 486 { 487 struct map_info *map = mtd->priv; 488 struct lpddr_private *lpddr = map->fldrv_priv; 489 int chipnum = adr >> lpddr->chipshift; 490 struct flchip *chip = &lpddr->chips[chipnum]; 491 int ret; 492 493 spin_lock(chip->mutex); 494 ret = get_chip(map, chip, FL_ERASING); 495 if (ret) { 496 spin_unlock(chip->mutex); 497 return ret; 498 } 499 send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL); 500 chip->state = FL_ERASING; 501 ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000); 502 if (ret) { 503 printk(KERN_WARNING"%s Erase block error %d at : %llx\n", 504 map->name, ret, adr); 505 goto out; 506 } 507 out: put_chip(map, chip); 508 spin_unlock(chip->mutex); 509 return ret; 510 } 511 512 static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len, 513 size_t *retlen, u_char *buf) 514 { 515 struct map_info *map = mtd->priv; 516 struct lpddr_private *lpddr = map->fldrv_priv; 517 int chipnum = adr >> lpddr->chipshift; 518 struct flchip *chip = &lpddr->chips[chipnum]; 519 int ret = 0; 520 521 spin_lock(chip->mutex); 522 ret = get_chip(map, chip, FL_READY); 523 if (ret) { 524 spin_unlock(chip->mutex); 525 return ret; 526 } 527 528 map_copy_from(map, buf, adr, len); 529 *retlen = len; 530 531 put_chip(map, chip); 532 spin_unlock(chip->mutex); 533 return ret; 534 } 535 536 static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, 537 size_t *retlen, void **mtdbuf, resource_size_t *phys) 538 { 539 struct map_info *map = mtd->priv; 540 struct lpddr_private *lpddr = map->fldrv_priv; 541 int chipnum = adr >> lpddr->chipshift; 542 unsigned long ofs, last_end = 0; 543 struct flchip *chip = &lpddr->chips[chipnum]; 544 int ret = 0; 545 546 if (!map->virt || (adr + len > mtd->size)) 547 return -EINVAL; 548 549 /* ofs: offset within the first chip that the first read should start */ 550 ofs = adr - (chipnum << lpddr->chipshift); 551 552 *mtdbuf = (void *)map->virt + chip->start + ofs; 553 *retlen = 0; 554 555 while (len) { 556 unsigned long thislen; 557 558 if (chipnum >= lpddr->numchips) 559 break; 560 561 /* We cannot point across chips that are virtually disjoint */ 562 if (!last_end) 563 last_end = chip->start; 564 else if (chip->start != last_end) 565 break; 566 567 if ((len + ofs - 1) >> lpddr->chipshift) 568 thislen = (1<<lpddr->chipshift) - ofs; 569 else 570 thislen = len; 571 /* get the chip */ 572 spin_lock(chip->mutex); 573 ret = get_chip(map, chip, FL_POINT); 574 spin_unlock(chip->mutex); 575 if (ret) 576 break; 577 578 chip->state = FL_POINT; 579 chip->ref_point_counter++; 580 *retlen += thislen; 581 len -= thislen; 582 583 ofs = 0; 584 last_end += 1 << lpddr->chipshift; 585 chipnum++; 586 chip = &lpddr->chips[chipnum]; 587 } 588 return 0; 589 } 590 591 static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len) 592 { 593 struct map_info *map = mtd->priv; 594 struct lpddr_private *lpddr = map->fldrv_priv; 595 int chipnum = adr >> lpddr->chipshift; 596 unsigned long ofs; 597 598 /* ofs: offset within the first chip that the first read should start */ 599 ofs = adr - (chipnum << lpddr->chipshift); 600 601 while (len) { 602 unsigned long thislen; 603 struct flchip *chip; 604 605 chip = &lpddr->chips[chipnum]; 606 if (chipnum >= lpddr->numchips) 607 break; 608 609 if ((len + ofs - 1) >> lpddr->chipshift) 610 thislen = (1<<lpddr->chipshift) - ofs; 611 else 612 thislen = len; 613 614 spin_lock(chip->mutex); 615 if (chip->state == FL_POINT) { 616 chip->ref_point_counter--; 617 if (chip->ref_point_counter == 0) 618 chip->state = FL_READY; 619 } else 620 printk(KERN_WARNING "%s: Warning: unpoint called on non" 621 "pointed region\n", map->name); 622 623 put_chip(map, chip); 624 spin_unlock(chip->mutex); 625 626 len -= thislen; 627 ofs = 0; 628 chipnum++; 629 } 630 } 631 632 static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 633 size_t *retlen, const u_char *buf) 634 { 635 struct kvec vec; 636 637 vec.iov_base = (void *) buf; 638 vec.iov_len = len; 639 640 return lpddr_writev(mtd, &vec, 1, to, retlen); 641 } 642 643 644 static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs, 645 unsigned long count, loff_t to, size_t *retlen) 646 { 647 struct map_info *map = mtd->priv; 648 struct lpddr_private *lpddr = map->fldrv_priv; 649 int ret = 0; 650 int chipnum; 651 unsigned long ofs, vec_seek, i; 652 int wbufsize = 1 << lpddr->qinfo->BufSizeShift; 653 654 size_t len = 0; 655 656 for (i = 0; i < count; i++) 657 len += vecs[i].iov_len; 658 659 *retlen = 0; 660 if (!len) 661 return 0; 662 663 chipnum = to >> lpddr->chipshift; 664 665 ofs = to; 666 vec_seek = 0; 667 668 do { 669 /* We must not cross write block boundaries */ 670 int size = wbufsize - (ofs & (wbufsize-1)); 671 672 if (size > len) 673 size = len; 674 675 ret = do_write_buffer(map, &lpddr->chips[chipnum], 676 ofs, &vecs, &vec_seek, size); 677 if (ret) 678 return ret; 679 680 ofs += size; 681 (*retlen) += size; 682 len -= size; 683 684 /* Be nice and reschedule with the chip in a usable 685 * state for other processes */ 686 cond_resched(); 687 688 } while (len); 689 690 return 0; 691 } 692 693 static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr) 694 { 695 unsigned long ofs, len; 696 int ret; 697 struct map_info *map = mtd->priv; 698 struct lpddr_private *lpddr = map->fldrv_priv; 699 int size = 1 << lpddr->qinfo->UniformBlockSizeShift; 700 701 ofs = instr->addr; 702 len = instr->len; 703 704 if (ofs > mtd->size || (len + ofs) > mtd->size) 705 return -EINVAL; 706 707 while (len > 0) { 708 ret = do_erase_oneblock(mtd, ofs); 709 if (ret) 710 return ret; 711 ofs += size; 712 len -= size; 713 } 714 instr->state = MTD_ERASE_DONE; 715 mtd_erase_callback(instr); 716 717 return 0; 718 } 719 720 #define DO_XXLOCK_LOCK 1 721 #define DO_XXLOCK_UNLOCK 2 722 int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk) 723 { 724 int ret = 0; 725 struct map_info *map = mtd->priv; 726 struct lpddr_private *lpddr = map->fldrv_priv; 727 int chipnum = adr >> lpddr->chipshift; 728 struct flchip *chip = &lpddr->chips[chipnum]; 729 730 spin_lock(chip->mutex); 731 ret = get_chip(map, chip, FL_LOCKING); 732 if (ret) { 733 spin_unlock(chip->mutex); 734 return ret; 735 } 736 737 if (thunk == DO_XXLOCK_LOCK) { 738 send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL); 739 chip->state = FL_LOCKING; 740 } else if (thunk == DO_XXLOCK_UNLOCK) { 741 send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL); 742 chip->state = FL_UNLOCKING; 743 } else 744 BUG(); 745 746 ret = wait_for_ready(map, chip, 1); 747 if (ret) { 748 printk(KERN_ERR "%s: block unlock error status %d \n", 749 map->name, ret); 750 goto out; 751 } 752 out: put_chip(map, chip); 753 spin_unlock(chip->mutex); 754 return ret; 755 } 756 757 static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 758 { 759 return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK); 760 } 761 762 static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 763 { 764 return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK); 765 } 766 767 int word_program(struct map_info *map, loff_t adr, uint32_t curval) 768 { 769 int ret; 770 struct lpddr_private *lpddr = map->fldrv_priv; 771 int chipnum = adr >> lpddr->chipshift; 772 struct flchip *chip = &lpddr->chips[chipnum]; 773 774 spin_lock(chip->mutex); 775 ret = get_chip(map, chip, FL_WRITING); 776 if (ret) { 777 spin_unlock(chip->mutex); 778 return ret; 779 } 780 781 send_pfow_command(map, LPDDR_WORD_PROGRAM, adr, 0x00, (map_word *)&curval); 782 783 ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->SingleWordProgTime)); 784 if (ret) { 785 printk(KERN_WARNING"%s word_program error at: %llx; val: %x\n", 786 map->name, adr, curval); 787 goto out; 788 } 789 790 out: put_chip(map, chip); 791 spin_unlock(chip->mutex); 792 return ret; 793 } 794 795 MODULE_LICENSE("GPL"); 796 MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>"); 797 MODULE_DESCRIPTION("MTD driver for LPDDR flash chips"); 798