1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * LPDDR flash memory device operations. This module provides read, write,
4 * erase, lock/unlock support for LPDDR flash memories
5 * (C) 2008 Korolev Alexey <akorolev@infradead.org>
6 * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
7 * Many thanks to Roman Borisov for initial enabling
8 *
9 * TODO:
10 * Implement VPP management
11 * Implement XIP support
12 * Implement OTP support
13 */
14 #include <linux/mtd/pfow.h>
15 #include <linux/mtd/qinfo.h>
16 #include <linux/slab.h>
17 #include <linux/module.h>
18
19 static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
20 size_t *retlen, u_char *buf);
21 static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
22 size_t len, size_t *retlen, const u_char *buf);
23 static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
24 unsigned long count, loff_t to, size_t *retlen);
25 static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
26 static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
27 static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
28 static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
29 size_t *retlen, void **mtdbuf, resource_size_t *phys);
30 static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
31 static int get_chip(struct map_info *map, struct flchip *chip, int mode);
32 static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
33 static void put_chip(struct map_info *map, struct flchip *chip);
34
lpddr_cmdset(struct map_info * map)35 struct mtd_info *lpddr_cmdset(struct map_info *map)
36 {
37 struct lpddr_private *lpddr = map->fldrv_priv;
38 struct flchip_shared *shared;
39 struct flchip *chip;
40 struct mtd_info *mtd;
41 int numchips;
42 int i, j;
43
44 mtd = kzalloc_obj(*mtd);
45 if (!mtd)
46 return NULL;
47 mtd->priv = map;
48 mtd->type = MTD_NORFLASH;
49
50 /* Fill in the default mtd operations */
51 mtd->_read = lpddr_read;
52 mtd->type = MTD_NORFLASH;
53 mtd->flags = MTD_CAP_NORFLASH;
54 mtd->flags &= ~MTD_BIT_WRITEABLE;
55 mtd->_erase = lpddr_erase;
56 mtd->_write = lpddr_write_buffers;
57 mtd->_writev = lpddr_writev;
58 mtd->_lock = lpddr_lock;
59 mtd->_unlock = lpddr_unlock;
60 if (map_is_linear(map)) {
61 mtd->_point = lpddr_point;
62 mtd->_unpoint = lpddr_unpoint;
63 }
64 mtd->size = 1ULL << lpddr->qinfo->DevSizeShift;
65 mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
66 mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
67
68 shared = kmalloc_objs(struct flchip_shared, lpddr->numchips);
69 if (!shared) {
70 kfree(mtd);
71 return NULL;
72 }
73
74 chip = &lpddr->chips[0];
75 numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
76 for (i = 0; i < numchips; i++) {
77 shared[i].writing = shared[i].erasing = NULL;
78 mutex_init(&shared[i].lock);
79 for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
80 *chip = lpddr->chips[i];
81 chip->start += (unsigned long)j << lpddr->chipshift;
82 chip->oldstate = chip->state = FL_READY;
83 chip->priv = &shared[i];
84 /* those should be reset too since
85 they create memory references. */
86 init_waitqueue_head(&chip->wq);
87 mutex_init(&chip->mutex);
88 chip++;
89 }
90 }
91
92 return mtd;
93 }
94 EXPORT_SYMBOL(lpddr_cmdset);
95
print_drs_error(unsigned int dsr)96 static void print_drs_error(unsigned int dsr)
97 {
98 int prog_status = (dsr & DSR_RPS) >> 8;
99
100 if (!(dsr & DSR_AVAILABLE))
101 pr_notice("DSR.15: (0) Device not Available\n");
102 if ((prog_status & 0x03) == 0x03)
103 pr_notice("DSR.9,8: (11) Attempt to program invalid half with 41h command\n");
104 else if (prog_status & 0x02)
105 pr_notice("DSR.9,8: (10) Object Mode Program attempt in region with Control Mode data\n");
106 else if (prog_status & 0x01)
107 pr_notice("DSR.9,8: (01) Program attempt in region with Object Mode data\n");
108 if (!(dsr & DSR_READY_STATUS))
109 pr_notice("DSR.7: (0) Device is Busy\n");
110 if (dsr & DSR_ESS)
111 pr_notice("DSR.6: (1) Erase Suspended\n");
112 if (dsr & DSR_ERASE_STATUS)
113 pr_notice("DSR.5: (1) Erase/Blank check error\n");
114 if (dsr & DSR_PROGRAM_STATUS)
115 pr_notice("DSR.4: (1) Program Error\n");
116 if (dsr & DSR_VPPS)
117 pr_notice("DSR.3: (1) Vpp low detect, operation aborted\n");
118 if (dsr & DSR_PSS)
119 pr_notice("DSR.2: (1) Program suspended\n");
120 if (dsr & DSR_DPS)
121 pr_notice("DSR.1: (1) Aborted Erase/Program attempt on locked block\n");
122 }
123
wait_for_ready(struct map_info * map,struct flchip * chip,unsigned int chip_op_time)124 static int wait_for_ready(struct map_info *map, struct flchip *chip,
125 unsigned int chip_op_time)
126 {
127 unsigned int timeo, reset_timeo, sleep_time;
128 unsigned int dsr;
129 flstate_t chip_state = chip->state;
130 int ret = 0;
131
132 /* set our timeout to 8 times the expected delay */
133 timeo = chip_op_time * 8;
134 if (!timeo)
135 timeo = 500000;
136 reset_timeo = timeo;
137 sleep_time = chip_op_time / 2;
138
139 for (;;) {
140 dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
141 if (dsr & DSR_READY_STATUS)
142 break;
143 if (!timeo) {
144 printk(KERN_ERR "%s: Flash timeout error state %d\n",
145 map->name, chip_state);
146 ret = -ETIME;
147 break;
148 }
149
150 /* OK Still waiting. Drop the lock, wait a while and retry. */
151 mutex_unlock(&chip->mutex);
152 if (sleep_time >= 1000000/HZ) {
153 /*
154 * Half of the normal delay still remaining
155 * can be performed with a sleeping delay instead
156 * of busy waiting.
157 */
158 msleep(sleep_time/1000);
159 timeo -= sleep_time;
160 sleep_time = 1000000/HZ;
161 } else {
162 udelay(1);
163 cond_resched();
164 timeo--;
165 }
166 mutex_lock(&chip->mutex);
167
168 while (chip->state != chip_state) {
169 /* Someone's suspended the operation: sleep */
170 DECLARE_WAITQUEUE(wait, current);
171 set_current_state(TASK_UNINTERRUPTIBLE);
172 add_wait_queue(&chip->wq, &wait);
173 mutex_unlock(&chip->mutex);
174 schedule();
175 remove_wait_queue(&chip->wq, &wait);
176 mutex_lock(&chip->mutex);
177 }
178 if (chip->erase_suspended || chip->write_suspended) {
179 /* Suspend has occurred while sleep: reset timeout */
180 timeo = reset_timeo;
181 chip->erase_suspended = chip->write_suspended = 0;
182 }
183 }
184 /* check status for errors */
185 if (dsr & DSR_ERR) {
186 /* Clear DSR*/
187 map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
188 printk(KERN_WARNING"%s: Bad status on wait: 0x%x\n",
189 map->name, dsr);
190 print_drs_error(dsr);
191 ret = -EIO;
192 }
193 chip->state = FL_READY;
194 return ret;
195 }
196
get_chip(struct map_info * map,struct flchip * chip,int mode)197 static int get_chip(struct map_info *map, struct flchip *chip, int mode)
198 {
199 int ret;
200 DECLARE_WAITQUEUE(wait, current);
201
202 retry:
203 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
204 && chip->state != FL_SYNCING) {
205 /*
206 * OK. We have possibility for contension on the write/erase
207 * operations which are global to the real chip and not per
208 * partition. So let's fight it over in the partition which
209 * currently has authority on the operation.
210 *
211 * The rules are as follows:
212 *
213 * - any write operation must own shared->writing.
214 *
215 * - any erase operation must own _both_ shared->writing and
216 * shared->erasing.
217 *
218 * - contension arbitration is handled in the owner's context.
219 *
220 * The 'shared' struct can be read and/or written only when
221 * its lock is taken.
222 */
223 struct flchip_shared *shared = chip->priv;
224 struct flchip *contender;
225 mutex_lock(&shared->lock);
226 contender = shared->writing;
227 if (contender && contender != chip) {
228 /*
229 * The engine to perform desired operation on this
230 * partition is already in use by someone else.
231 * Let's fight over it in the context of the chip
232 * currently using it. If it is possible to suspend,
233 * that other partition will do just that, otherwise
234 * it'll happily send us to sleep. In any case, when
235 * get_chip returns success we're clear to go ahead.
236 */
237 ret = mutex_trylock(&contender->mutex);
238 mutex_unlock(&shared->lock);
239 if (!ret)
240 goto retry;
241 mutex_unlock(&chip->mutex);
242 ret = chip_ready(map, contender, mode);
243 mutex_lock(&chip->mutex);
244
245 if (ret == -EAGAIN) {
246 mutex_unlock(&contender->mutex);
247 goto retry;
248 }
249 if (ret) {
250 mutex_unlock(&contender->mutex);
251 return ret;
252 }
253 mutex_lock(&shared->lock);
254
255 /* We should not own chip if it is already in FL_SYNCING
256 * state. Put contender and retry. */
257 if (chip->state == FL_SYNCING) {
258 put_chip(map, contender);
259 mutex_unlock(&contender->mutex);
260 goto retry;
261 }
262 mutex_unlock(&contender->mutex);
263 }
264
265 /* Check if we have suspended erase on this chip.
266 Must sleep in such a case. */
267 if (mode == FL_ERASING && shared->erasing
268 && shared->erasing->oldstate == FL_ERASING) {
269 mutex_unlock(&shared->lock);
270 set_current_state(TASK_UNINTERRUPTIBLE);
271 add_wait_queue(&chip->wq, &wait);
272 mutex_unlock(&chip->mutex);
273 schedule();
274 remove_wait_queue(&chip->wq, &wait);
275 mutex_lock(&chip->mutex);
276 goto retry;
277 }
278
279 /* We now own it */
280 shared->writing = chip;
281 if (mode == FL_ERASING)
282 shared->erasing = chip;
283 mutex_unlock(&shared->lock);
284 }
285
286 ret = chip_ready(map, chip, mode);
287 if (ret == -EAGAIN)
288 goto retry;
289
290 return ret;
291 }
292
chip_ready(struct map_info * map,struct flchip * chip,int mode)293 static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
294 {
295 struct lpddr_private *lpddr = map->fldrv_priv;
296 int ret = 0;
297 DECLARE_WAITQUEUE(wait, current);
298
299 /* Prevent setting state FL_SYNCING for chip in suspended state. */
300 if (FL_SYNCING == mode && FL_READY != chip->oldstate)
301 goto sleep;
302
303 switch (chip->state) {
304 case FL_READY:
305 case FL_JEDEC_QUERY:
306 return 0;
307
308 case FL_ERASING:
309 if (!lpddr->qinfo->SuspEraseSupp ||
310 !(mode == FL_READY || mode == FL_POINT))
311 goto sleep;
312
313 map_write(map, CMD(LPDDR_SUSPEND),
314 map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
315 chip->oldstate = FL_ERASING;
316 chip->state = FL_ERASE_SUSPENDING;
317 ret = wait_for_ready(map, chip, 0);
318 if (ret) {
319 /* Oops. something got wrong. */
320 /* Resume and pretend we weren't here. */
321 put_chip(map, chip);
322 printk(KERN_ERR "%s: suspend operation failed."
323 "State may be wrong\n", map->name);
324 return -EIO;
325 }
326 chip->erase_suspended = 1;
327 chip->state = FL_READY;
328 return 0;
329 /* Erase suspend */
330 case FL_POINT:
331 /* Only if there's no operation suspended... */
332 if (mode == FL_READY && chip->oldstate == FL_READY)
333 return 0;
334 fallthrough;
335 default:
336 sleep:
337 set_current_state(TASK_UNINTERRUPTIBLE);
338 add_wait_queue(&chip->wq, &wait);
339 mutex_unlock(&chip->mutex);
340 schedule();
341 remove_wait_queue(&chip->wq, &wait);
342 mutex_lock(&chip->mutex);
343 return -EAGAIN;
344 }
345 }
346
put_chip(struct map_info * map,struct flchip * chip)347 static void put_chip(struct map_info *map, struct flchip *chip)
348 {
349 if (chip->priv) {
350 struct flchip_shared *shared = chip->priv;
351 mutex_lock(&shared->lock);
352 if (shared->writing == chip && chip->oldstate == FL_READY) {
353 /* We own the ability to write, but we're done */
354 shared->writing = shared->erasing;
355 if (shared->writing && shared->writing != chip) {
356 /* give back the ownership */
357 struct flchip *loaner = shared->writing;
358 mutex_lock(&loaner->mutex);
359 mutex_unlock(&shared->lock);
360 mutex_unlock(&chip->mutex);
361 put_chip(map, loaner);
362 mutex_lock(&chip->mutex);
363 mutex_unlock(&loaner->mutex);
364 wake_up(&chip->wq);
365 return;
366 }
367 shared->erasing = NULL;
368 shared->writing = NULL;
369 } else if (shared->erasing == chip && shared->writing != chip) {
370 /*
371 * We own the ability to erase without the ability
372 * to write, which means the erase was suspended
373 * and some other partition is currently writing.
374 * Don't let the switch below mess things up since
375 * we don't have ownership to resume anything.
376 */
377 mutex_unlock(&shared->lock);
378 wake_up(&chip->wq);
379 return;
380 }
381 mutex_unlock(&shared->lock);
382 }
383
384 switch (chip->oldstate) {
385 case FL_ERASING:
386 map_write(map, CMD(LPDDR_RESUME),
387 map->pfow_base + PFOW_COMMAND_CODE);
388 map_write(map, CMD(LPDDR_START_EXECUTION),
389 map->pfow_base + PFOW_COMMAND_EXECUTE);
390 chip->oldstate = FL_READY;
391 chip->state = FL_ERASING;
392 break;
393 case FL_READY:
394 break;
395 default:
396 printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
397 map->name, chip->oldstate);
398 }
399 wake_up(&chip->wq);
400 }
401
do_write_buffer(struct map_info * map,struct flchip * chip,unsigned long adr,const struct kvec ** pvec,unsigned long * pvec_seek,int len)402 static int do_write_buffer(struct map_info *map, struct flchip *chip,
403 unsigned long adr, const struct kvec **pvec,
404 unsigned long *pvec_seek, int len)
405 {
406 struct lpddr_private *lpddr = map->fldrv_priv;
407 map_word datum;
408 int ret, wbufsize, word_gap;
409 const struct kvec *vec;
410 unsigned long vec_seek;
411 unsigned long prog_buf_ofs;
412
413 wbufsize = 1 << lpddr->qinfo->BufSizeShift;
414
415 mutex_lock(&chip->mutex);
416 ret = get_chip(map, chip, FL_WRITING);
417 if (ret) {
418 mutex_unlock(&chip->mutex);
419 return ret;
420 }
421 /* Figure out the number of words to write */
422 word_gap = (-adr & (map_bankwidth(map)-1));
423 if (word_gap) {
424 word_gap = map_bankwidth(map) - word_gap;
425 adr -= word_gap;
426 datum = map_word_ff(map);
427 }
428 /* Write data */
429 /* Get the program buffer offset from PFOW register data first*/
430 prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
431 map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
432 vec = *pvec;
433 vec_seek = *pvec_seek;
434 do {
435 int n = map_bankwidth(map) - word_gap;
436
437 if (n > vec->iov_len - vec_seek)
438 n = vec->iov_len - vec_seek;
439 if (n > len)
440 n = len;
441
442 if (!word_gap && (len < map_bankwidth(map)))
443 datum = map_word_ff(map);
444
445 datum = map_word_load_partial(map, datum,
446 vec->iov_base + vec_seek, word_gap, n);
447
448 len -= n;
449 word_gap += n;
450 if (!len || word_gap == map_bankwidth(map)) {
451 map_write(map, datum, prog_buf_ofs);
452 prog_buf_ofs += map_bankwidth(map);
453 word_gap = 0;
454 }
455
456 vec_seek += n;
457 if (vec_seek == vec->iov_len) {
458 vec++;
459 vec_seek = 0;
460 }
461 } while (len);
462 *pvec = vec;
463 *pvec_seek = vec_seek;
464
465 /* GO GO GO */
466 send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
467 chip->state = FL_WRITING;
468 ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
469 if (ret) {
470 printk(KERN_WARNING"%s Buffer program error: %d at %lx\n",
471 map->name, ret, adr);
472 goto out;
473 }
474
475 out: put_chip(map, chip);
476 mutex_unlock(&chip->mutex);
477 return ret;
478 }
479
do_erase_oneblock(struct mtd_info * mtd,loff_t adr)480 static int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
481 {
482 struct map_info *map = mtd->priv;
483 struct lpddr_private *lpddr = map->fldrv_priv;
484 int chipnum = adr >> lpddr->chipshift;
485 struct flchip *chip = &lpddr->chips[chipnum];
486 int ret;
487
488 mutex_lock(&chip->mutex);
489 ret = get_chip(map, chip, FL_ERASING);
490 if (ret) {
491 mutex_unlock(&chip->mutex);
492 return ret;
493 }
494 send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
495 chip->state = FL_ERASING;
496 ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
497 if (ret) {
498 printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
499 map->name, ret, adr);
500 goto out;
501 }
502 out: put_chip(map, chip);
503 mutex_unlock(&chip->mutex);
504 return ret;
505 }
506
lpddr_read(struct mtd_info * mtd,loff_t adr,size_t len,size_t * retlen,u_char * buf)507 static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
508 size_t *retlen, u_char *buf)
509 {
510 struct map_info *map = mtd->priv;
511 struct lpddr_private *lpddr = map->fldrv_priv;
512 int chipnum = adr >> lpddr->chipshift;
513 struct flchip *chip = &lpddr->chips[chipnum];
514 int ret = 0;
515
516 mutex_lock(&chip->mutex);
517 ret = get_chip(map, chip, FL_READY);
518 if (ret) {
519 mutex_unlock(&chip->mutex);
520 return ret;
521 }
522
523 map_copy_from(map, buf, adr, len);
524 *retlen = len;
525
526 put_chip(map, chip);
527 mutex_unlock(&chip->mutex);
528 return ret;
529 }
530
lpddr_point(struct mtd_info * mtd,loff_t adr,size_t len,size_t * retlen,void ** mtdbuf,resource_size_t * phys)531 static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
532 size_t *retlen, void **mtdbuf, resource_size_t *phys)
533 {
534 struct map_info *map = mtd->priv;
535 struct lpddr_private *lpddr = map->fldrv_priv;
536 int chipnum = adr >> lpddr->chipshift;
537 unsigned long ofs, last_end = 0;
538 struct flchip *chip = &lpddr->chips[chipnum];
539 int ret = 0;
540
541 if (!map->virt)
542 return -EINVAL;
543
544 /* ofs: offset within the first chip that the first read should start */
545 ofs = adr - (chipnum << lpddr->chipshift);
546 *mtdbuf = (void *)map->virt + chip->start + ofs;
547
548 while (len) {
549 unsigned long thislen;
550
551 if (chipnum >= lpddr->numchips)
552 break;
553
554 /* We cannot point across chips that are virtually disjoint */
555 if (!last_end)
556 last_end = chip->start;
557 else if (chip->start != last_end)
558 break;
559
560 if ((len + ofs - 1) >> lpddr->chipshift)
561 thislen = (1UL << lpddr->chipshift) - ofs;
562 else
563 thislen = len;
564 /* get the chip */
565 mutex_lock(&chip->mutex);
566 ret = get_chip(map, chip, FL_POINT);
567 mutex_unlock(&chip->mutex);
568 if (ret)
569 break;
570
571 chip->state = FL_POINT;
572 chip->ref_point_counter++;
573 *retlen += thislen;
574 len -= thislen;
575
576 ofs = 0;
577 last_end += 1UL << lpddr->chipshift;
578 chipnum++;
579 chip = &lpddr->chips[chipnum];
580 }
581 return 0;
582 }
583
lpddr_unpoint(struct mtd_info * mtd,loff_t adr,size_t len)584 static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
585 {
586 struct map_info *map = mtd->priv;
587 struct lpddr_private *lpddr = map->fldrv_priv;
588 int chipnum = adr >> lpddr->chipshift, err = 0;
589 unsigned long ofs;
590
591 /* ofs: offset within the first chip that the first read should start */
592 ofs = adr - (chipnum << lpddr->chipshift);
593
594 while (len) {
595 unsigned long thislen;
596 struct flchip *chip;
597
598 chip = &lpddr->chips[chipnum];
599 if (chipnum >= lpddr->numchips)
600 break;
601
602 if ((len + ofs - 1) >> lpddr->chipshift)
603 thislen = (1UL << lpddr->chipshift) - ofs;
604 else
605 thislen = len;
606
607 mutex_lock(&chip->mutex);
608 if (chip->state == FL_POINT) {
609 chip->ref_point_counter--;
610 if (chip->ref_point_counter == 0)
611 chip->state = FL_READY;
612 } else {
613 printk(KERN_WARNING "%s: Warning: unpoint called on non"
614 "pointed region\n", map->name);
615 err = -EINVAL;
616 }
617
618 put_chip(map, chip);
619 mutex_unlock(&chip->mutex);
620
621 len -= thislen;
622 ofs = 0;
623 chipnum++;
624 }
625
626 return err;
627 }
628
lpddr_write_buffers(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)629 static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
630 size_t *retlen, const u_char *buf)
631 {
632 struct kvec vec;
633
634 vec.iov_base = (void *) buf;
635 vec.iov_len = len;
636
637 return lpddr_writev(mtd, &vec, 1, to, retlen);
638 }
639
640
lpddr_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)641 static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
642 unsigned long count, loff_t to, size_t *retlen)
643 {
644 struct map_info *map = mtd->priv;
645 struct lpddr_private *lpddr = map->fldrv_priv;
646 int ret = 0;
647 int chipnum;
648 unsigned long ofs, vec_seek, i;
649 int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
650 size_t len = 0;
651
652 for (i = 0; i < count; i++)
653 len += vecs[i].iov_len;
654
655 if (!len)
656 return 0;
657
658 chipnum = to >> lpddr->chipshift;
659
660 ofs = to;
661 vec_seek = 0;
662
663 do {
664 /* We must not cross write block boundaries */
665 int size = wbufsize - (ofs & (wbufsize-1));
666
667 if (size > len)
668 size = len;
669
670 ret = do_write_buffer(map, &lpddr->chips[chipnum],
671 ofs, &vecs, &vec_seek, size);
672 if (ret)
673 return ret;
674
675 ofs += size;
676 (*retlen) += size;
677 len -= size;
678
679 /* Be nice and reschedule with the chip in a usable
680 * state for other processes */
681 cond_resched();
682
683 } while (len);
684
685 return 0;
686 }
687
lpddr_erase(struct mtd_info * mtd,struct erase_info * instr)688 static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
689 {
690 unsigned long ofs, len;
691 int ret;
692 struct map_info *map = mtd->priv;
693 struct lpddr_private *lpddr = map->fldrv_priv;
694 int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
695
696 ofs = instr->addr;
697 len = instr->len;
698
699 while (len > 0) {
700 ret = do_erase_oneblock(mtd, ofs);
701 if (ret)
702 return ret;
703 ofs += size;
704 len -= size;
705 }
706
707 return 0;
708 }
709
710 #define DO_XXLOCK_LOCK 1
711 #define DO_XXLOCK_UNLOCK 2
do_xxlock(struct mtd_info * mtd,loff_t adr,uint32_t len,int thunk)712 static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
713 {
714 int ret = 0;
715 struct map_info *map = mtd->priv;
716 struct lpddr_private *lpddr = map->fldrv_priv;
717 int chipnum = adr >> lpddr->chipshift;
718 struct flchip *chip = &lpddr->chips[chipnum];
719
720 mutex_lock(&chip->mutex);
721 ret = get_chip(map, chip, FL_LOCKING);
722 if (ret) {
723 mutex_unlock(&chip->mutex);
724 return ret;
725 }
726
727 if (thunk == DO_XXLOCK_LOCK) {
728 send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
729 chip->state = FL_LOCKING;
730 } else if (thunk == DO_XXLOCK_UNLOCK) {
731 send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
732 chip->state = FL_UNLOCKING;
733 } else
734 BUG();
735
736 ret = wait_for_ready(map, chip, 1);
737 if (ret) {
738 printk(KERN_ERR "%s: block unlock error status %d\n",
739 map->name, ret);
740 goto out;
741 }
742 out: put_chip(map, chip);
743 mutex_unlock(&chip->mutex);
744 return ret;
745 }
746
lpddr_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)747 static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
748 {
749 return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
750 }
751
lpddr_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)752 static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
753 {
754 return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
755 }
756
757 MODULE_LICENSE("GPL");
758 MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
759 MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
760