xref: /linux/drivers/mtd/spi-nor/core.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
4  * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
5  *
6  * Copyright (C) 2005, Intec Automation Inc.
7  * Copyright (C) 2014, Freescale Semiconductor, Inc.
8  */
9 
10 #include <linux/err.h>
11 #include <linux/errno.h>
12 #include <linux/delay.h>
13 #include <linux/device.h>
14 #include <linux/math64.h>
15 #include <linux/module.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/spi-nor.h>
18 #include <linux/mutex.h>
19 #include <linux/of_platform.h>
20 #include <linux/regulator/consumer.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sizes.h>
23 #include <linux/slab.h>
24 #include <linux/spi/flash.h>
25 
26 #include "core.h"
27 
28 /* Define max times to check status register before we give up. */
29 
30 /*
31  * For everything but full-chip erase; probably could be much smaller, but kept
32  * around for safety for now
33  */
34 #define DEFAULT_READY_WAIT_JIFFIES		(40UL * HZ)
35 
36 /*
37  * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
38  * for larger flash
39  */
40 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES	(40UL * HZ)
41 
42 #define SPI_NOR_MAX_ADDR_NBYTES	4
43 
44 #define SPI_NOR_SRST_SLEEP_MIN 200
45 #define SPI_NOR_SRST_SLEEP_MAX 400
46 
47 /**
48  * spi_nor_get_cmd_ext() - Get the command opcode extension based on the
49  *			   extension type.
50  * @nor:		pointer to a 'struct spi_nor'
51  * @op:			pointer to the 'struct spi_mem_op' whose properties
52  *			need to be initialized.
53  *
54  * Right now, only "repeat" and "invert" are supported.
55  *
56  * Return: The opcode extension.
57  */
spi_nor_get_cmd_ext(const struct spi_nor * nor,const struct spi_mem_op * op)58 static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor,
59 			      const struct spi_mem_op *op)
60 {
61 	switch (nor->cmd_ext_type) {
62 	case SPI_NOR_EXT_INVERT:
63 		return ~op->cmd.opcode;
64 
65 	case SPI_NOR_EXT_REPEAT:
66 		return op->cmd.opcode;
67 
68 	default:
69 		dev_err(nor->dev, "Unknown command extension type\n");
70 		return 0;
71 	}
72 }
73 
74 /**
75  * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op.
76  * @nor:		pointer to a 'struct spi_nor'
77  * @op:			pointer to the 'struct spi_mem_op' whose properties
78  *			need to be initialized.
79  * @proto:		the protocol from which the properties need to be set.
80  */
spi_nor_spimem_setup_op(const struct spi_nor * nor,struct spi_mem_op * op,const enum spi_nor_protocol proto)81 void spi_nor_spimem_setup_op(const struct spi_nor *nor,
82 			     struct spi_mem_op *op,
83 			     const enum spi_nor_protocol proto)
84 {
85 	u8 ext;
86 
87 	op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto);
88 
89 	if (op->addr.nbytes)
90 		op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
91 
92 	if (op->dummy.nbytes)
93 		op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
94 
95 	if (op->data.nbytes)
96 		op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
97 
98 	if (spi_nor_protocol_is_dtr(proto)) {
99 		/*
100 		 * SPIMEM supports mixed DTR modes, but right now we can only
101 		 * have all phases either DTR or STR. IOW, SPIMEM can have
102 		 * something like 4S-4D-4D, but SPI NOR can't. So, set all 4
103 		 * phases to either DTR or STR.
104 		 */
105 		op->cmd.dtr = true;
106 		op->addr.dtr = true;
107 		op->dummy.dtr = true;
108 		op->data.dtr = true;
109 
110 		/* 2 bytes per clock cycle in DTR mode. */
111 		op->dummy.nbytes *= 2;
112 
113 		ext = spi_nor_get_cmd_ext(nor, op);
114 		op->cmd.opcode = (op->cmd.opcode << 8) | ext;
115 		op->cmd.nbytes = 2;
116 	}
117 
118 	if (proto == SNOR_PROTO_8_8_8_DTR && nor->flags & SNOR_F_SWAP16)
119 		op->data.swap16 = true;
120 }
121 
122 /**
123  * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
124  *                           transfer
125  * @nor:        pointer to 'struct spi_nor'
126  * @op:         pointer to 'struct spi_mem_op' template for transfer
127  *
128  * If we have to use the bounce buffer, the data field in @op will be updated.
129  *
130  * Return: true if the bounce buffer is needed, false if not
131  */
spi_nor_spimem_bounce(struct spi_nor * nor,struct spi_mem_op * op)132 static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
133 {
134 	/* op->data.buf.in occupies the same memory as op->data.buf.out */
135 	if (object_is_on_stack(op->data.buf.in) ||
136 	    !virt_addr_valid(op->data.buf.in)) {
137 		if (op->data.nbytes > nor->bouncebuf_size)
138 			op->data.nbytes = nor->bouncebuf_size;
139 		op->data.buf.in = nor->bouncebuf;
140 		return true;
141 	}
142 
143 	return false;
144 }
145 
146 /**
147  * spi_nor_spimem_exec_op() - execute a memory operation
148  * @nor:        pointer to 'struct spi_nor'
149  * @op:         pointer to 'struct spi_mem_op' template for transfer
150  *
151  * Return: 0 on success, -error otherwise.
152  */
spi_nor_spimem_exec_op(struct spi_nor * nor,struct spi_mem_op * op)153 static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
154 {
155 	int error;
156 
157 	error = spi_mem_adjust_op_size(nor->spimem, op);
158 	if (error)
159 		return error;
160 
161 	return spi_mem_exec_op(nor->spimem, op);
162 }
163 
spi_nor_controller_ops_read_reg(struct spi_nor * nor,u8 opcode,u8 * buf,size_t len)164 int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
165 				    u8 *buf, size_t len)
166 {
167 	if (spi_nor_protocol_is_dtr(nor->reg_proto))
168 		return -EOPNOTSUPP;
169 
170 	return nor->controller_ops->read_reg(nor, opcode, buf, len);
171 }
172 
spi_nor_controller_ops_write_reg(struct spi_nor * nor,u8 opcode,const u8 * buf,size_t len)173 int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
174 				     const u8 *buf, size_t len)
175 {
176 	if (spi_nor_protocol_is_dtr(nor->reg_proto))
177 		return -EOPNOTSUPP;
178 
179 	return nor->controller_ops->write_reg(nor, opcode, buf, len);
180 }
181 
spi_nor_controller_ops_erase(struct spi_nor * nor,loff_t offs)182 static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
183 {
184 	if (spi_nor_protocol_is_dtr(nor->reg_proto))
185 		return -EOPNOTSUPP;
186 
187 	return nor->controller_ops->erase(nor, offs);
188 }
189 
190 /**
191  * spi_nor_spimem_read_data() - read data from flash's memory region via
192  *                              spi-mem
193  * @nor:        pointer to 'struct spi_nor'
194  * @from:       offset to read from
195  * @len:        number of bytes to read
196  * @buf:        pointer to dst buffer
197  *
198  * Return: number of bytes read successfully, -errno otherwise
199  */
spi_nor_spimem_read_data(struct spi_nor * nor,loff_t from,size_t len,u8 * buf)200 static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
201 					size_t len, u8 *buf)
202 {
203 	struct spi_mem_op op =
204 		SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
205 			   SPI_MEM_OP_ADDR(nor->addr_nbytes, from, 0),
206 			   SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
207 			   SPI_MEM_OP_DATA_IN(len, buf, 0));
208 	bool usebouncebuf;
209 	ssize_t nbytes;
210 	int error;
211 
212 	spi_nor_spimem_setup_op(nor, &op, nor->read_proto);
213 
214 	/* convert the dummy cycles to the number of bytes */
215 	op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
216 	if (spi_nor_protocol_is_dtr(nor->read_proto))
217 		op.dummy.nbytes *= 2;
218 
219 	usebouncebuf = spi_nor_spimem_bounce(nor, &op);
220 
221 	if (nor->dirmap.rdesc) {
222 		nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
223 					     op.data.nbytes, op.data.buf.in);
224 	} else {
225 		error = spi_nor_spimem_exec_op(nor, &op);
226 		if (error)
227 			return error;
228 		nbytes = op.data.nbytes;
229 	}
230 
231 	if (usebouncebuf && nbytes > 0)
232 		memcpy(buf, op.data.buf.in, nbytes);
233 
234 	return nbytes;
235 }
236 
237 /**
238  * spi_nor_read_data() - read data from flash memory
239  * @nor:        pointer to 'struct spi_nor'
240  * @from:       offset to read from
241  * @len:        number of bytes to read
242  * @buf:        pointer to dst buffer
243  *
244  * Return: number of bytes read successfully, -errno otherwise
245  */
spi_nor_read_data(struct spi_nor * nor,loff_t from,size_t len,u8 * buf)246 ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
247 {
248 	if (nor->spimem)
249 		return spi_nor_spimem_read_data(nor, from, len, buf);
250 
251 	return nor->controller_ops->read(nor, from, len, buf);
252 }
253 
254 /**
255  * spi_nor_spimem_write_data() - write data to flash memory via
256  *                               spi-mem
257  * @nor:        pointer to 'struct spi_nor'
258  * @to:         offset to write to
259  * @len:        number of bytes to write
260  * @buf:        pointer to src buffer
261  *
262  * Return: number of bytes written successfully, -errno otherwise
263  */
spi_nor_spimem_write_data(struct spi_nor * nor,loff_t to,size_t len,const u8 * buf)264 static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
265 					 size_t len, const u8 *buf)
266 {
267 	struct spi_mem_op op =
268 		SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
269 			   SPI_MEM_OP_ADDR(nor->addr_nbytes, to, 0),
270 			   SPI_MEM_OP_NO_DUMMY,
271 			   SPI_MEM_OP_DATA_OUT(len, buf, 0));
272 	ssize_t nbytes;
273 	int error;
274 
275 	if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
276 		op.addr.nbytes = 0;
277 
278 	spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
279 
280 	if (spi_nor_spimem_bounce(nor, &op))
281 		memcpy(nor->bouncebuf, buf, op.data.nbytes);
282 
283 	if (nor->dirmap.wdesc) {
284 		nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
285 					      op.data.nbytes, op.data.buf.out);
286 	} else {
287 		error = spi_nor_spimem_exec_op(nor, &op);
288 		if (error)
289 			return error;
290 		nbytes = op.data.nbytes;
291 	}
292 
293 	return nbytes;
294 }
295 
296 /**
297  * spi_nor_write_data() - write data to flash memory
298  * @nor:        pointer to 'struct spi_nor'
299  * @to:         offset to write to
300  * @len:        number of bytes to write
301  * @buf:        pointer to src buffer
302  *
303  * Return: number of bytes written successfully, -errno otherwise
304  */
spi_nor_write_data(struct spi_nor * nor,loff_t to,size_t len,const u8 * buf)305 ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
306 			   const u8 *buf)
307 {
308 	if (nor->spimem)
309 		return spi_nor_spimem_write_data(nor, to, len, buf);
310 
311 	return nor->controller_ops->write(nor, to, len, buf);
312 }
313 
314 /**
315  * spi_nor_read_any_reg() - read any register from flash memory, nonvolatile or
316  * volatile.
317  * @nor:        pointer to 'struct spi_nor'.
318  * @op:		SPI memory operation. op->data.buf must be DMA-able.
319  * @proto:	SPI protocol to use for the register operation.
320  *
321  * Return: zero on success, -errno otherwise
322  */
spi_nor_read_any_reg(struct spi_nor * nor,struct spi_mem_op * op,enum spi_nor_protocol proto)323 int spi_nor_read_any_reg(struct spi_nor *nor, struct spi_mem_op *op,
324 			 enum spi_nor_protocol proto)
325 {
326 	if (!nor->spimem)
327 		return -EOPNOTSUPP;
328 
329 	spi_nor_spimem_setup_op(nor, op, proto);
330 	return spi_nor_spimem_exec_op(nor, op);
331 }
332 
333 /**
334  * spi_nor_write_any_volatile_reg() - write any volatile register to flash
335  * memory.
336  * @nor:        pointer to 'struct spi_nor'
337  * @op:		SPI memory operation. op->data.buf must be DMA-able.
338  * @proto:	SPI protocol to use for the register operation.
339  *
340  * Writing volatile registers are instant according to some manufacturers
341  * (Cypress, Micron) and do not need any status polling.
342  *
343  * Return: zero on success, -errno otherwise
344  */
spi_nor_write_any_volatile_reg(struct spi_nor * nor,struct spi_mem_op * op,enum spi_nor_protocol proto)345 int spi_nor_write_any_volatile_reg(struct spi_nor *nor, struct spi_mem_op *op,
346 				   enum spi_nor_protocol proto)
347 {
348 	int ret;
349 
350 	if (!nor->spimem)
351 		return -EOPNOTSUPP;
352 
353 	ret = spi_nor_write_enable(nor);
354 	if (ret)
355 		return ret;
356 	spi_nor_spimem_setup_op(nor, op, proto);
357 	return spi_nor_spimem_exec_op(nor, op);
358 }
359 
360 /**
361  * spi_nor_write_enable() - Set write enable latch with Write Enable command.
362  * @nor:	pointer to 'struct spi_nor'.
363  *
364  * Return: 0 on success, -errno otherwise.
365  */
spi_nor_write_enable(struct spi_nor * nor)366 int spi_nor_write_enable(struct spi_nor *nor)
367 {
368 	int ret;
369 
370 	if (nor->spimem) {
371 		struct spi_mem_op op = SPI_NOR_WREN_OP;
372 
373 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
374 
375 		ret = spi_mem_exec_op(nor->spimem, &op);
376 	} else {
377 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN,
378 						       NULL, 0);
379 	}
380 
381 	if (ret)
382 		dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
383 
384 	return ret;
385 }
386 
387 /**
388  * spi_nor_write_disable() - Send Write Disable instruction to the chip.
389  * @nor:	pointer to 'struct spi_nor'.
390  *
391  * Return: 0 on success, -errno otherwise.
392  */
spi_nor_write_disable(struct spi_nor * nor)393 int spi_nor_write_disable(struct spi_nor *nor)
394 {
395 	int ret;
396 
397 	if (nor->spimem) {
398 		struct spi_mem_op op = SPI_NOR_WRDI_OP;
399 
400 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
401 
402 		ret = spi_mem_exec_op(nor->spimem, &op);
403 	} else {
404 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI,
405 						       NULL, 0);
406 	}
407 
408 	if (ret)
409 		dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
410 
411 	return ret;
412 }
413 
414 /**
415  * spi_nor_read_id() - Read the JEDEC ID.
416  * @nor:	pointer to 'struct spi_nor'.
417  * @naddr:	number of address bytes to send. Can be zero if the operation
418  *		does not need to send an address.
419  * @ndummy:	number of dummy bytes to send after an opcode or address. Can
420  *		be zero if the operation does not require dummy bytes.
421  * @id:		pointer to a DMA-able buffer where the value of the JEDEC ID
422  *		will be written.
423  * @proto:	the SPI protocol for register operation.
424  *
425  * Return: 0 on success, -errno otherwise.
426  */
spi_nor_read_id(struct spi_nor * nor,u8 naddr,u8 ndummy,u8 * id,enum spi_nor_protocol proto)427 int spi_nor_read_id(struct spi_nor *nor, u8 naddr, u8 ndummy, u8 *id,
428 		    enum spi_nor_protocol proto)
429 {
430 	int ret;
431 
432 	if (nor->spimem) {
433 		struct spi_mem_op op =
434 			SPI_NOR_READID_OP(naddr, ndummy, id, SPI_NOR_MAX_ID_LEN);
435 
436 		spi_nor_spimem_setup_op(nor, &op, proto);
437 		ret = spi_mem_exec_op(nor->spimem, &op);
438 	} else {
439 		ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
440 						    SPI_NOR_MAX_ID_LEN);
441 	}
442 	return ret;
443 }
444 
445 /**
446  * spi_nor_read_sr() - Read the Status Register.
447  * @nor:	pointer to 'struct spi_nor'.
448  * @sr:		pointer to a DMA-able buffer where the value of the
449  *              Status Register will be written. Should be at least 2 bytes.
450  *
451  * Return: 0 on success, -errno otherwise.
452  */
spi_nor_read_sr(struct spi_nor * nor,u8 * sr)453 int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
454 {
455 	int ret;
456 
457 	if (nor->spimem) {
458 		struct spi_mem_op op = SPI_NOR_RDSR_OP(sr);
459 
460 		if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
461 			op.addr.nbytes = nor->params->rdsr_addr_nbytes;
462 			op.dummy.nbytes = nor->params->rdsr_dummy;
463 			/*
464 			 * We don't want to read only one byte in DTR mode. So,
465 			 * read 2 and then discard the second byte.
466 			 */
467 			op.data.nbytes = 2;
468 		}
469 
470 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
471 
472 		ret = spi_mem_exec_op(nor->spimem, &op);
473 	} else {
474 		ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr,
475 						      1);
476 	}
477 
478 	if (ret)
479 		dev_dbg(nor->dev, "error %d reading SR\n", ret);
480 
481 	return ret;
482 }
483 
484 /**
485  * spi_nor_read_cr() - Read the Configuration Register using the
486  * SPINOR_OP_RDCR (35h) command.
487  * @nor:	pointer to 'struct spi_nor'
488  * @cr:		pointer to a DMA-able buffer where the value of the
489  *              Configuration Register will be written.
490  *
491  * Return: 0 on success, -errno otherwise.
492  */
spi_nor_read_cr(struct spi_nor * nor,u8 * cr)493 int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
494 {
495 	int ret;
496 
497 	if (nor->spimem) {
498 		struct spi_mem_op op = SPI_NOR_RDCR_OP(cr);
499 
500 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
501 
502 		ret = spi_mem_exec_op(nor->spimem, &op);
503 	} else {
504 		ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr,
505 						      1);
506 	}
507 
508 	if (ret)
509 		dev_dbg(nor->dev, "error %d reading CR\n", ret);
510 
511 	return ret;
512 }
513 
514 /**
515  * spi_nor_set_4byte_addr_mode_en4b_ex4b() - Enter/Exit 4-byte address mode
516  *			using SPINOR_OP_EN4B/SPINOR_OP_EX4B. Typically used by
517  *			Winbond and Macronix.
518  * @nor:	pointer to 'struct spi_nor'.
519  * @enable:	true to enter the 4-byte address mode, false to exit the 4-byte
520  *		address mode.
521  *
522  * Return: 0 on success, -errno otherwise.
523  */
spi_nor_set_4byte_addr_mode_en4b_ex4b(struct spi_nor * nor,bool enable)524 int spi_nor_set_4byte_addr_mode_en4b_ex4b(struct spi_nor *nor, bool enable)
525 {
526 	int ret;
527 
528 	if (nor->spimem) {
529 		struct spi_mem_op op = SPI_NOR_EN4B_EX4B_OP(enable);
530 
531 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
532 
533 		ret = spi_mem_exec_op(nor->spimem, &op);
534 	} else {
535 		ret = spi_nor_controller_ops_write_reg(nor,
536 						       enable ? SPINOR_OP_EN4B :
537 								SPINOR_OP_EX4B,
538 						       NULL, 0);
539 	}
540 
541 	if (ret)
542 		dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
543 
544 	return ret;
545 }
546 
547 /**
548  * spi_nor_set_4byte_addr_mode_wren_en4b_ex4b() - Set 4-byte address mode using
549  * SPINOR_OP_WREN followed by SPINOR_OP_EN4B or SPINOR_OP_EX4B. Typically used
550  * by ST and Micron flashes.
551  * @nor:	pointer to 'struct spi_nor'.
552  * @enable:	true to enter the 4-byte address mode, false to exit the 4-byte
553  *		address mode.
554  *
555  * Return: 0 on success, -errno otherwise.
556  */
spi_nor_set_4byte_addr_mode_wren_en4b_ex4b(struct spi_nor * nor,bool enable)557 int spi_nor_set_4byte_addr_mode_wren_en4b_ex4b(struct spi_nor *nor, bool enable)
558 {
559 	int ret;
560 
561 	ret = spi_nor_write_enable(nor);
562 	if (ret)
563 		return ret;
564 
565 	ret = spi_nor_set_4byte_addr_mode_en4b_ex4b(nor, enable);
566 	if (ret)
567 		return ret;
568 
569 	return spi_nor_write_disable(nor);
570 }
571 
572 /**
573  * spi_nor_set_4byte_addr_mode_brwr() - Set 4-byte address mode using
574  *			SPINOR_OP_BRWR. Typically used by Spansion flashes.
575  * @nor:	pointer to 'struct spi_nor'.
576  * @enable:	true to enter the 4-byte address mode, false to exit the 4-byte
577  *		address mode.
578  *
579  * 8-bit volatile bank register used to define A[30:A24] bits. MSB (bit[7]) is
580  * used to enable/disable 4-byte address mode. When MSB is set to ‘1’, 4-byte
581  * address mode is active and A[30:24] bits are don’t care. Write instruction is
582  * SPINOR_OP_BRWR(17h) with 1 byte of data.
583  *
584  * Return: 0 on success, -errno otherwise.
585  */
spi_nor_set_4byte_addr_mode_brwr(struct spi_nor * nor,bool enable)586 int spi_nor_set_4byte_addr_mode_brwr(struct spi_nor *nor, bool enable)
587 {
588 	int ret;
589 
590 	nor->bouncebuf[0] = enable << 7;
591 
592 	if (nor->spimem) {
593 		struct spi_mem_op op = SPI_NOR_BRWR_OP(nor->bouncebuf);
594 
595 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
596 
597 		ret = spi_mem_exec_op(nor->spimem, &op);
598 	} else {
599 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR,
600 						       nor->bouncebuf, 1);
601 	}
602 
603 	if (ret)
604 		dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
605 
606 	return ret;
607 }
608 
609 /**
610  * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
611  * for new commands.
612  * @nor:	pointer to 'struct spi_nor'.
613  *
614  * Return: 1 if ready, 0 if not ready, -errno on errors.
615  */
spi_nor_sr_ready(struct spi_nor * nor)616 int spi_nor_sr_ready(struct spi_nor *nor)
617 {
618 	int ret;
619 
620 	ret = spi_nor_read_sr(nor, nor->bouncebuf);
621 	if (ret)
622 		return ret;
623 
624 	return !(nor->bouncebuf[0] & SR_WIP);
625 }
626 
627 /**
628  * spi_nor_use_parallel_locking() - Checks if RWW locking scheme shall be used
629  * @nor:	pointer to 'struct spi_nor'.
630  *
631  * Return: true if parallel locking is enabled, false otherwise.
632  */
spi_nor_use_parallel_locking(struct spi_nor * nor)633 static bool spi_nor_use_parallel_locking(struct spi_nor *nor)
634 {
635 	return nor->flags & SNOR_F_RWW;
636 }
637 
638 /* Locking helpers for status read operations */
spi_nor_rww_start_rdst(struct spi_nor * nor)639 static int spi_nor_rww_start_rdst(struct spi_nor *nor)
640 {
641 	struct spi_nor_rww *rww = &nor->rww;
642 	int ret = -EAGAIN;
643 
644 	mutex_lock(&nor->lock);
645 
646 	if (rww->ongoing_io || rww->ongoing_rd)
647 		goto busy;
648 
649 	rww->ongoing_io = true;
650 	rww->ongoing_rd = true;
651 	ret = 0;
652 
653 busy:
654 	mutex_unlock(&nor->lock);
655 	return ret;
656 }
657 
spi_nor_rww_end_rdst(struct spi_nor * nor)658 static void spi_nor_rww_end_rdst(struct spi_nor *nor)
659 {
660 	struct spi_nor_rww *rww = &nor->rww;
661 
662 	mutex_lock(&nor->lock);
663 
664 	rww->ongoing_io = false;
665 	rww->ongoing_rd = false;
666 
667 	mutex_unlock(&nor->lock);
668 }
669 
spi_nor_lock_rdst(struct spi_nor * nor)670 static int spi_nor_lock_rdst(struct spi_nor *nor)
671 {
672 	if (spi_nor_use_parallel_locking(nor))
673 		return spi_nor_rww_start_rdst(nor);
674 
675 	return 0;
676 }
677 
spi_nor_unlock_rdst(struct spi_nor * nor)678 static void spi_nor_unlock_rdst(struct spi_nor *nor)
679 {
680 	if (spi_nor_use_parallel_locking(nor)) {
681 		spi_nor_rww_end_rdst(nor);
682 		wake_up(&nor->rww.wait);
683 	}
684 }
685 
686 /**
687  * spi_nor_ready() - Query the flash to see if it is ready for new commands.
688  * @nor:	pointer to 'struct spi_nor'.
689  *
690  * Return: 1 if ready, 0 if not ready, -errno on errors.
691  */
spi_nor_ready(struct spi_nor * nor)692 static int spi_nor_ready(struct spi_nor *nor)
693 {
694 	int ret;
695 
696 	ret = spi_nor_lock_rdst(nor);
697 	if (ret)
698 		return 0;
699 
700 	/* Flashes might override the standard routine. */
701 	if (nor->params->ready)
702 		ret = nor->params->ready(nor);
703 	else
704 		ret = spi_nor_sr_ready(nor);
705 
706 	spi_nor_unlock_rdst(nor);
707 
708 	return ret;
709 }
710 
711 /**
712  * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
713  * Status Register until ready, or timeout occurs.
714  * @nor:		pointer to "struct spi_nor".
715  * @timeout_jiffies:	jiffies to wait until timeout.
716  *
717  * Return: 0 on success, -errno otherwise.
718  */
spi_nor_wait_till_ready_with_timeout(struct spi_nor * nor,unsigned long timeout_jiffies)719 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
720 						unsigned long timeout_jiffies)
721 {
722 	unsigned long deadline;
723 	int timeout = 0, ret;
724 
725 	deadline = jiffies + timeout_jiffies;
726 
727 	while (!timeout) {
728 		if (time_after_eq(jiffies, deadline))
729 			timeout = 1;
730 
731 		ret = spi_nor_ready(nor);
732 		if (ret < 0)
733 			return ret;
734 		if (ret)
735 			return 0;
736 
737 		cond_resched();
738 	}
739 
740 	dev_dbg(nor->dev, "flash operation timed out\n");
741 
742 	return -ETIMEDOUT;
743 }
744 
745 /**
746  * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
747  * flash to be ready, or timeout occurs.
748  * @nor:	pointer to "struct spi_nor".
749  *
750  * Return: 0 on success, -errno otherwise.
751  */
spi_nor_wait_till_ready(struct spi_nor * nor)752 int spi_nor_wait_till_ready(struct spi_nor *nor)
753 {
754 	return spi_nor_wait_till_ready_with_timeout(nor,
755 						    DEFAULT_READY_WAIT_JIFFIES);
756 }
757 
758 /**
759  * spi_nor_global_block_unlock() - Unlock Global Block Protection.
760  * @nor:	pointer to 'struct spi_nor'.
761  *
762  * Return: 0 on success, -errno otherwise.
763  */
spi_nor_global_block_unlock(struct spi_nor * nor)764 int spi_nor_global_block_unlock(struct spi_nor *nor)
765 {
766 	int ret;
767 
768 	ret = spi_nor_write_enable(nor);
769 	if (ret)
770 		return ret;
771 
772 	if (nor->spimem) {
773 		struct spi_mem_op op = SPI_NOR_GBULK_OP;
774 
775 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
776 
777 		ret = spi_mem_exec_op(nor->spimem, &op);
778 	} else {
779 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK,
780 						       NULL, 0);
781 	}
782 
783 	if (ret) {
784 		dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret);
785 		return ret;
786 	}
787 
788 	return spi_nor_wait_till_ready(nor);
789 }
790 
791 /**
792  * spi_nor_write_sr() - Write the Status Register.
793  * @nor:	pointer to 'struct spi_nor'.
794  * @sr:		pointer to DMA-able buffer to write to the Status Register.
795  * @len:	number of bytes to write to the Status Register.
796  *
797  * Return: 0 on success, -errno otherwise.
798  */
spi_nor_write_sr(struct spi_nor * nor,const u8 * sr,size_t len)799 int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
800 {
801 	int ret;
802 
803 	ret = spi_nor_write_enable(nor);
804 	if (ret)
805 		return ret;
806 
807 	if (nor->spimem) {
808 		struct spi_mem_op op = SPI_NOR_WRSR_OP(sr, len);
809 
810 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
811 
812 		ret = spi_mem_exec_op(nor->spimem, &op);
813 	} else {
814 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr,
815 						       len);
816 	}
817 
818 	if (ret) {
819 		dev_dbg(nor->dev, "error %d writing SR\n", ret);
820 		return ret;
821 	}
822 
823 	return spi_nor_wait_till_ready(nor);
824 }
825 
826 /**
827  * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
828  * ensure that the byte written match the received value.
829  * @nor:	pointer to a 'struct spi_nor'.
830  * @sr1:	byte value to be written to the Status Register.
831  *
832  * Return: 0 on success, -errno otherwise.
833  */
spi_nor_write_sr1_and_check(struct spi_nor * nor,u8 sr1)834 static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
835 {
836 	int ret;
837 
838 	nor->bouncebuf[0] = sr1;
839 
840 	ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
841 	if (ret)
842 		return ret;
843 
844 	ret = spi_nor_read_sr(nor, nor->bouncebuf);
845 	if (ret)
846 		return ret;
847 
848 	if (nor->bouncebuf[0] != sr1) {
849 		dev_dbg(nor->dev, "SR1: read back test failed\n");
850 		return -EIO;
851 	}
852 
853 	return 0;
854 }
855 
856 /**
857  * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
858  * Status Register 2 in one shot. Ensure that the byte written in the Status
859  * Register 1 match the received value, and that the 16-bit Write did not
860  * affect what was already in the Status Register 2.
861  * @nor:	pointer to a 'struct spi_nor'.
862  * @sr1:	byte value to be written to the Status Register 1.
863  *
864  * Return: 0 on success, -errno otherwise.
865  */
spi_nor_write_16bit_sr_and_check(struct spi_nor * nor,u8 sr1)866 static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
867 {
868 	int ret;
869 	u8 *sr_cr = nor->bouncebuf;
870 	u8 cr_written;
871 
872 	/* Make sure we don't overwrite the contents of Status Register 2. */
873 	if (!(nor->flags & SNOR_F_NO_READ_CR)) {
874 		ret = spi_nor_read_cr(nor, &sr_cr[1]);
875 		if (ret)
876 			return ret;
877 	} else if (spi_nor_get_protocol_width(nor->read_proto) == 4 &&
878 		   spi_nor_get_protocol_width(nor->write_proto) == 4 &&
879 		   nor->params->quad_enable) {
880 		/*
881 		 * If the Status Register 2 Read command (35h) is not
882 		 * supported, we should at least be sure we don't
883 		 * change the value of the SR2 Quad Enable bit.
884 		 *
885 		 * When the Quad Enable method is set and the buswidth is 4, we
886 		 * can safely assume that the value of the QE bit is one, as a
887 		 * consequence of the nor->params->quad_enable() call.
888 		 *
889 		 * According to the JESD216 revB standard, BFPT DWORDS[15],
890 		 * bits 22:20, the 16-bit Write Status (01h) command is
891 		 * available just for the cases in which the QE bit is
892 		 * described in SR2 at BIT(1).
893 		 */
894 		sr_cr[1] = SR2_QUAD_EN_BIT1;
895 	} else {
896 		sr_cr[1] = 0;
897 	}
898 
899 	sr_cr[0] = sr1;
900 
901 	ret = spi_nor_write_sr(nor, sr_cr, 2);
902 	if (ret)
903 		return ret;
904 
905 	ret = spi_nor_read_sr(nor, sr_cr);
906 	if (ret)
907 		return ret;
908 
909 	if (sr1 != sr_cr[0]) {
910 		dev_dbg(nor->dev, "SR: Read back test failed\n");
911 		return -EIO;
912 	}
913 
914 	if (nor->flags & SNOR_F_NO_READ_CR)
915 		return 0;
916 
917 	cr_written = sr_cr[1];
918 
919 	ret = spi_nor_read_cr(nor, &sr_cr[1]);
920 	if (ret)
921 		return ret;
922 
923 	if (cr_written != sr_cr[1]) {
924 		dev_dbg(nor->dev, "CR: read back test failed\n");
925 		return -EIO;
926 	}
927 
928 	return 0;
929 }
930 
931 /**
932  * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
933  * Configuration Register in one shot. Ensure that the byte written in the
934  * Configuration Register match the received value, and that the 16-bit Write
935  * did not affect what was already in the Status Register 1.
936  * @nor:	pointer to a 'struct spi_nor'.
937  * @cr:		byte value to be written to the Configuration Register.
938  *
939  * Return: 0 on success, -errno otherwise.
940  */
spi_nor_write_16bit_cr_and_check(struct spi_nor * nor,u8 cr)941 int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
942 {
943 	int ret;
944 	u8 *sr_cr = nor->bouncebuf;
945 	u8 sr_written;
946 
947 	/* Keep the current value of the Status Register 1. */
948 	ret = spi_nor_read_sr(nor, sr_cr);
949 	if (ret)
950 		return ret;
951 
952 	sr_cr[1] = cr;
953 
954 	ret = spi_nor_write_sr(nor, sr_cr, 2);
955 	if (ret)
956 		return ret;
957 
958 	sr_written = sr_cr[0];
959 
960 	ret = spi_nor_read_sr(nor, sr_cr);
961 	if (ret)
962 		return ret;
963 
964 	if (sr_written != sr_cr[0]) {
965 		dev_dbg(nor->dev, "SR: Read back test failed\n");
966 		return -EIO;
967 	}
968 
969 	if (nor->flags & SNOR_F_NO_READ_CR)
970 		return 0;
971 
972 	ret = spi_nor_read_cr(nor, &sr_cr[1]);
973 	if (ret)
974 		return ret;
975 
976 	if (cr != sr_cr[1]) {
977 		dev_dbg(nor->dev, "CR: read back test failed\n");
978 		return -EIO;
979 	}
980 
981 	return 0;
982 }
983 
984 /**
985  * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
986  * the byte written match the received value without affecting other bits in the
987  * Status Register 1 and 2.
988  * @nor:	pointer to a 'struct spi_nor'.
989  * @sr1:	byte value to be written to the Status Register.
990  *
991  * Return: 0 on success, -errno otherwise.
992  */
spi_nor_write_sr_and_check(struct spi_nor * nor,u8 sr1)993 int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
994 {
995 	if (nor->flags & SNOR_F_HAS_16BIT_SR)
996 		return spi_nor_write_16bit_sr_and_check(nor, sr1);
997 
998 	return spi_nor_write_sr1_and_check(nor, sr1);
999 }
1000 
1001 /**
1002  * spi_nor_write_sr2() - Write the Status Register 2 using the
1003  * SPINOR_OP_WRSR2 (3eh) command.
1004  * @nor:	pointer to 'struct spi_nor'.
1005  * @sr2:	pointer to DMA-able buffer to write to the Status Register 2.
1006  *
1007  * Return: 0 on success, -errno otherwise.
1008  */
spi_nor_write_sr2(struct spi_nor * nor,const u8 * sr2)1009 static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
1010 {
1011 	int ret;
1012 
1013 	ret = spi_nor_write_enable(nor);
1014 	if (ret)
1015 		return ret;
1016 
1017 	if (nor->spimem) {
1018 		struct spi_mem_op op = SPI_NOR_WRSR2_OP(sr2);
1019 
1020 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1021 
1022 		ret = spi_mem_exec_op(nor->spimem, &op);
1023 	} else {
1024 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2,
1025 						       sr2, 1);
1026 	}
1027 
1028 	if (ret) {
1029 		dev_dbg(nor->dev, "error %d writing SR2\n", ret);
1030 		return ret;
1031 	}
1032 
1033 	return spi_nor_wait_till_ready(nor);
1034 }
1035 
1036 /**
1037  * spi_nor_read_sr2() - Read the Status Register 2 using the
1038  * SPINOR_OP_RDSR2 (3fh) command.
1039  * @nor:	pointer to 'struct spi_nor'.
1040  * @sr2:	pointer to DMA-able buffer where the value of the
1041  *		Status Register 2 will be written.
1042  *
1043  * Return: 0 on success, -errno otherwise.
1044  */
spi_nor_read_sr2(struct spi_nor * nor,u8 * sr2)1045 static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
1046 {
1047 	int ret;
1048 
1049 	if (nor->spimem) {
1050 		struct spi_mem_op op = SPI_NOR_RDSR2_OP(sr2);
1051 
1052 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1053 
1054 		ret = spi_mem_exec_op(nor->spimem, &op);
1055 	} else {
1056 		ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2,
1057 						      1);
1058 	}
1059 
1060 	if (ret)
1061 		dev_dbg(nor->dev, "error %d reading SR2\n", ret);
1062 
1063 	return ret;
1064 }
1065 
1066 /**
1067  * spi_nor_erase_die() - Erase the entire die.
1068  * @nor:	pointer to 'struct spi_nor'.
1069  * @addr:	address of the die.
1070  * @die_size:	size of the die.
1071  *
1072  * Return: 0 on success, -errno otherwise.
1073  */
spi_nor_erase_die(struct spi_nor * nor,loff_t addr,size_t die_size)1074 static int spi_nor_erase_die(struct spi_nor *nor, loff_t addr, size_t die_size)
1075 {
1076 	bool multi_die = nor->mtd.size != die_size;
1077 	int ret;
1078 
1079 	dev_dbg(nor->dev, " %lldKiB\n", (long long)(die_size >> 10));
1080 
1081 	if (nor->spimem) {
1082 		struct spi_mem_op op =
1083 			SPI_NOR_DIE_ERASE_OP(nor->params->die_erase_opcode,
1084 					     nor->addr_nbytes, addr, multi_die);
1085 
1086 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1087 
1088 		ret = spi_mem_exec_op(nor->spimem, &op);
1089 	} else {
1090 		if (multi_die)
1091 			return -EOPNOTSUPP;
1092 
1093 		ret = spi_nor_controller_ops_write_reg(nor,
1094 						       SPINOR_OP_CHIP_ERASE,
1095 						       NULL, 0);
1096 	}
1097 
1098 	if (ret)
1099 		dev_dbg(nor->dev, "error %d erasing chip\n", ret);
1100 
1101 	return ret;
1102 }
1103 
spi_nor_convert_opcode(u8 opcode,const u8 table[][2],size_t size)1104 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
1105 {
1106 	size_t i;
1107 
1108 	for (i = 0; i < size; i++)
1109 		if (table[i][0] == opcode)
1110 			return table[i][1];
1111 
1112 	/* No conversion found, keep input op code. */
1113 	return opcode;
1114 }
1115 
spi_nor_convert_3to4_read(u8 opcode)1116 u8 spi_nor_convert_3to4_read(u8 opcode)
1117 {
1118 	static const u8 spi_nor_3to4_read[][2] = {
1119 		{ SPINOR_OP_READ,	SPINOR_OP_READ_4B },
1120 		{ SPINOR_OP_READ_FAST,	SPINOR_OP_READ_FAST_4B },
1121 		{ SPINOR_OP_READ_1_1_2,	SPINOR_OP_READ_1_1_2_4B },
1122 		{ SPINOR_OP_READ_1_2_2,	SPINOR_OP_READ_1_2_2_4B },
1123 		{ SPINOR_OP_READ_1_1_4,	SPINOR_OP_READ_1_1_4_4B },
1124 		{ SPINOR_OP_READ_1_4_4,	SPINOR_OP_READ_1_4_4_4B },
1125 		{ SPINOR_OP_READ_1_1_8,	SPINOR_OP_READ_1_1_8_4B },
1126 		{ SPINOR_OP_READ_1_8_8,	SPINOR_OP_READ_1_8_8_4B },
1127 
1128 		{ SPINOR_OP_READ_1_1_1_DTR,	SPINOR_OP_READ_1_1_1_DTR_4B },
1129 		{ SPINOR_OP_READ_1_2_2_DTR,	SPINOR_OP_READ_1_2_2_DTR_4B },
1130 		{ SPINOR_OP_READ_1_4_4_DTR,	SPINOR_OP_READ_1_4_4_DTR_4B },
1131 	};
1132 
1133 	return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
1134 				      ARRAY_SIZE(spi_nor_3to4_read));
1135 }
1136 
spi_nor_convert_3to4_program(u8 opcode)1137 static u8 spi_nor_convert_3to4_program(u8 opcode)
1138 {
1139 	static const u8 spi_nor_3to4_program[][2] = {
1140 		{ SPINOR_OP_PP,		SPINOR_OP_PP_4B },
1141 		{ SPINOR_OP_PP_1_1_4,	SPINOR_OP_PP_1_1_4_4B },
1142 		{ SPINOR_OP_PP_1_4_4,	SPINOR_OP_PP_1_4_4_4B },
1143 		{ SPINOR_OP_PP_1_1_8,	SPINOR_OP_PP_1_1_8_4B },
1144 		{ SPINOR_OP_PP_1_8_8,	SPINOR_OP_PP_1_8_8_4B },
1145 	};
1146 
1147 	return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
1148 				      ARRAY_SIZE(spi_nor_3to4_program));
1149 }
1150 
spi_nor_convert_3to4_erase(u8 opcode)1151 static u8 spi_nor_convert_3to4_erase(u8 opcode)
1152 {
1153 	static const u8 spi_nor_3to4_erase[][2] = {
1154 		{ SPINOR_OP_BE_4K,	SPINOR_OP_BE_4K_4B },
1155 		{ SPINOR_OP_BE_32K,	SPINOR_OP_BE_32K_4B },
1156 		{ SPINOR_OP_SE,		SPINOR_OP_SE_4B },
1157 	};
1158 
1159 	return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
1160 				      ARRAY_SIZE(spi_nor_3to4_erase));
1161 }
1162 
spi_nor_has_uniform_erase(const struct spi_nor * nor)1163 static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
1164 {
1165 	return !!nor->params->erase_map.uniform_region.erase_mask;
1166 }
1167 
spi_nor_set_4byte_opcodes(struct spi_nor * nor)1168 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
1169 {
1170 	nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
1171 	nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
1172 	nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
1173 
1174 	if (!spi_nor_has_uniform_erase(nor)) {
1175 		struct spi_nor_erase_map *map = &nor->params->erase_map;
1176 		struct spi_nor_erase_type *erase;
1177 		int i;
1178 
1179 		for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
1180 			erase = &map->erase_type[i];
1181 			erase->opcode =
1182 				spi_nor_convert_3to4_erase(erase->opcode);
1183 		}
1184 	}
1185 }
1186 
spi_nor_prep(struct spi_nor * nor)1187 static int spi_nor_prep(struct spi_nor *nor)
1188 {
1189 	int ret = 0;
1190 
1191 	if (nor->controller_ops && nor->controller_ops->prepare)
1192 		ret = nor->controller_ops->prepare(nor);
1193 
1194 	return ret;
1195 }
1196 
spi_nor_unprep(struct spi_nor * nor)1197 static void spi_nor_unprep(struct spi_nor *nor)
1198 {
1199 	if (nor->controller_ops && nor->controller_ops->unprepare)
1200 		nor->controller_ops->unprepare(nor);
1201 }
1202 
spi_nor_offset_to_banks(u64 bank_size,loff_t start,size_t len,u8 * first,u8 * last)1203 static void spi_nor_offset_to_banks(u64 bank_size, loff_t start, size_t len,
1204 				    u8 *first, u8 *last)
1205 {
1206 	/* This is currently safe, the number of banks being very small */
1207 	*first = DIV_ROUND_DOWN_ULL(start, bank_size);
1208 	*last = DIV_ROUND_DOWN_ULL(start + len - 1, bank_size);
1209 }
1210 
1211 /* Generic helpers for internal locking and serialization */
spi_nor_rww_start_io(struct spi_nor * nor)1212 static bool spi_nor_rww_start_io(struct spi_nor *nor)
1213 {
1214 	struct spi_nor_rww *rww = &nor->rww;
1215 	bool start = false;
1216 
1217 	mutex_lock(&nor->lock);
1218 
1219 	if (rww->ongoing_io)
1220 		goto busy;
1221 
1222 	rww->ongoing_io = true;
1223 	start = true;
1224 
1225 busy:
1226 	mutex_unlock(&nor->lock);
1227 	return start;
1228 }
1229 
spi_nor_rww_end_io(struct spi_nor * nor)1230 static void spi_nor_rww_end_io(struct spi_nor *nor)
1231 {
1232 	mutex_lock(&nor->lock);
1233 	nor->rww.ongoing_io = false;
1234 	mutex_unlock(&nor->lock);
1235 }
1236 
spi_nor_lock_device(struct spi_nor * nor)1237 static int spi_nor_lock_device(struct spi_nor *nor)
1238 {
1239 	if (!spi_nor_use_parallel_locking(nor))
1240 		return 0;
1241 
1242 	return wait_event_killable(nor->rww.wait, spi_nor_rww_start_io(nor));
1243 }
1244 
spi_nor_unlock_device(struct spi_nor * nor)1245 static void spi_nor_unlock_device(struct spi_nor *nor)
1246 {
1247 	if (spi_nor_use_parallel_locking(nor)) {
1248 		spi_nor_rww_end_io(nor);
1249 		wake_up(&nor->rww.wait);
1250 	}
1251 }
1252 
1253 /* Generic helpers for internal locking and serialization */
spi_nor_rww_start_exclusive(struct spi_nor * nor)1254 static bool spi_nor_rww_start_exclusive(struct spi_nor *nor)
1255 {
1256 	struct spi_nor_rww *rww = &nor->rww;
1257 	bool start = false;
1258 
1259 	mutex_lock(&nor->lock);
1260 
1261 	if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
1262 		goto busy;
1263 
1264 	rww->ongoing_io = true;
1265 	rww->ongoing_rd = true;
1266 	rww->ongoing_pe = true;
1267 	start = true;
1268 
1269 busy:
1270 	mutex_unlock(&nor->lock);
1271 	return start;
1272 }
1273 
spi_nor_rww_end_exclusive(struct spi_nor * nor)1274 static void spi_nor_rww_end_exclusive(struct spi_nor *nor)
1275 {
1276 	struct spi_nor_rww *rww = &nor->rww;
1277 
1278 	mutex_lock(&nor->lock);
1279 	rww->ongoing_io = false;
1280 	rww->ongoing_rd = false;
1281 	rww->ongoing_pe = false;
1282 	mutex_unlock(&nor->lock);
1283 }
1284 
spi_nor_prep_and_lock(struct spi_nor * nor)1285 int spi_nor_prep_and_lock(struct spi_nor *nor)
1286 {
1287 	int ret;
1288 
1289 	ret = spi_nor_prep(nor);
1290 	if (ret)
1291 		return ret;
1292 
1293 	if (!spi_nor_use_parallel_locking(nor))
1294 		mutex_lock(&nor->lock);
1295 	else
1296 		ret = wait_event_killable(nor->rww.wait,
1297 					  spi_nor_rww_start_exclusive(nor));
1298 
1299 	return ret;
1300 }
1301 
spi_nor_unlock_and_unprep(struct spi_nor * nor)1302 void spi_nor_unlock_and_unprep(struct spi_nor *nor)
1303 {
1304 	if (!spi_nor_use_parallel_locking(nor)) {
1305 		mutex_unlock(&nor->lock);
1306 	} else {
1307 		spi_nor_rww_end_exclusive(nor);
1308 		wake_up(&nor->rww.wait);
1309 	}
1310 
1311 	spi_nor_unprep(nor);
1312 }
1313 
1314 /* Internal locking helpers for program and erase operations */
spi_nor_rww_start_pe(struct spi_nor * nor,loff_t start,size_t len)1315 static bool spi_nor_rww_start_pe(struct spi_nor *nor, loff_t start, size_t len)
1316 {
1317 	struct spi_nor_rww *rww = &nor->rww;
1318 	unsigned int used_banks = 0;
1319 	bool started = false;
1320 	u8 first, last;
1321 	int bank;
1322 
1323 	mutex_lock(&nor->lock);
1324 
1325 	if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
1326 		goto busy;
1327 
1328 	spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
1329 	for (bank = first; bank <= last; bank++) {
1330 		if (rww->used_banks & BIT(bank))
1331 			goto busy;
1332 
1333 		used_banks |= BIT(bank);
1334 	}
1335 
1336 	rww->used_banks |= used_banks;
1337 	rww->ongoing_pe = true;
1338 	started = true;
1339 
1340 busy:
1341 	mutex_unlock(&nor->lock);
1342 	return started;
1343 }
1344 
spi_nor_rww_end_pe(struct spi_nor * nor,loff_t start,size_t len)1345 static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len)
1346 {
1347 	struct spi_nor_rww *rww = &nor->rww;
1348 	u8 first, last;
1349 	int bank;
1350 
1351 	mutex_lock(&nor->lock);
1352 
1353 	spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
1354 	for (bank = first; bank <= last; bank++)
1355 		rww->used_banks &= ~BIT(bank);
1356 
1357 	rww->ongoing_pe = false;
1358 
1359 	mutex_unlock(&nor->lock);
1360 }
1361 
spi_nor_prep_and_lock_pe(struct spi_nor * nor,loff_t start,size_t len)1362 static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t len)
1363 {
1364 	int ret;
1365 
1366 	ret = spi_nor_prep(nor);
1367 	if (ret)
1368 		return ret;
1369 
1370 	if (!spi_nor_use_parallel_locking(nor))
1371 		mutex_lock(&nor->lock);
1372 	else
1373 		ret = wait_event_killable(nor->rww.wait,
1374 					  spi_nor_rww_start_pe(nor, start, len));
1375 
1376 	return ret;
1377 }
1378 
spi_nor_unlock_and_unprep_pe(struct spi_nor * nor,loff_t start,size_t len)1379 static void spi_nor_unlock_and_unprep_pe(struct spi_nor *nor, loff_t start, size_t len)
1380 {
1381 	if (!spi_nor_use_parallel_locking(nor)) {
1382 		mutex_unlock(&nor->lock);
1383 	} else {
1384 		spi_nor_rww_end_pe(nor, start, len);
1385 		wake_up(&nor->rww.wait);
1386 	}
1387 
1388 	spi_nor_unprep(nor);
1389 }
1390 
1391 /* Internal locking helpers for read operations */
spi_nor_rww_start_rd(struct spi_nor * nor,loff_t start,size_t len)1392 static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len)
1393 {
1394 	struct spi_nor_rww *rww = &nor->rww;
1395 	unsigned int used_banks = 0;
1396 	bool started = false;
1397 	u8 first, last;
1398 	int bank;
1399 
1400 	mutex_lock(&nor->lock);
1401 
1402 	if (rww->ongoing_io || rww->ongoing_rd)
1403 		goto busy;
1404 
1405 	spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
1406 	for (bank = first; bank <= last; bank++) {
1407 		if (rww->used_banks & BIT(bank))
1408 			goto busy;
1409 
1410 		used_banks |= BIT(bank);
1411 	}
1412 
1413 	rww->used_banks |= used_banks;
1414 	rww->ongoing_io = true;
1415 	rww->ongoing_rd = true;
1416 	started = true;
1417 
1418 busy:
1419 	mutex_unlock(&nor->lock);
1420 	return started;
1421 }
1422 
spi_nor_rww_end_rd(struct spi_nor * nor,loff_t start,size_t len)1423 static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
1424 {
1425 	struct spi_nor_rww *rww = &nor->rww;
1426 	u8 first, last;
1427 	int bank;
1428 
1429 	mutex_lock(&nor->lock);
1430 
1431 	spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
1432 	for (bank = first; bank <= last; bank++)
1433 		nor->rww.used_banks &= ~BIT(bank);
1434 
1435 	rww->ongoing_io = false;
1436 	rww->ongoing_rd = false;
1437 
1438 	mutex_unlock(&nor->lock);
1439 }
1440 
spi_nor_prep_and_lock_rd(struct spi_nor * nor,loff_t start,size_t len)1441 static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len)
1442 {
1443 	int ret;
1444 
1445 	ret = spi_nor_prep(nor);
1446 	if (ret)
1447 		return ret;
1448 
1449 	if (!spi_nor_use_parallel_locking(nor))
1450 		mutex_lock(&nor->lock);
1451 	else
1452 		ret = wait_event_killable(nor->rww.wait,
1453 					  spi_nor_rww_start_rd(nor, start, len));
1454 
1455 	return ret;
1456 }
1457 
spi_nor_unlock_and_unprep_rd(struct spi_nor * nor,loff_t start,size_t len)1458 static void spi_nor_unlock_and_unprep_rd(struct spi_nor *nor, loff_t start, size_t len)
1459 {
1460 	if (!spi_nor_use_parallel_locking(nor)) {
1461 		mutex_unlock(&nor->lock);
1462 	} else {
1463 		spi_nor_rww_end_rd(nor, start, len);
1464 		wake_up(&nor->rww.wait);
1465 	}
1466 
1467 	spi_nor_unprep(nor);
1468 }
1469 
1470 /*
1471  * Initiate the erasure of a single sector
1472  */
spi_nor_erase_sector(struct spi_nor * nor,u32 addr)1473 int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
1474 {
1475 	int i;
1476 
1477 	if (nor->spimem) {
1478 		struct spi_mem_op op =
1479 			SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode,
1480 						nor->addr_nbytes, addr);
1481 
1482 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1483 
1484 		return spi_mem_exec_op(nor->spimem, &op);
1485 	} else if (nor->controller_ops->erase) {
1486 		return spi_nor_controller_ops_erase(nor, addr);
1487 	}
1488 
1489 	/*
1490 	 * Default implementation, if driver doesn't have a specialized HW
1491 	 * control
1492 	 */
1493 	for (i = nor->addr_nbytes - 1; i >= 0; i--) {
1494 		nor->bouncebuf[i] = addr & 0xff;
1495 		addr >>= 8;
1496 	}
1497 
1498 	return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode,
1499 						nor->bouncebuf, nor->addr_nbytes);
1500 }
1501 
1502 /**
1503  * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
1504  * @erase:	pointer to a structure that describes a SPI NOR erase type
1505  * @dividend:	dividend value
1506  * @remainder:	pointer to u32 remainder (will be updated)
1507  *
1508  * Return: the result of the division
1509  */
spi_nor_div_by_erase_size(const struct spi_nor_erase_type * erase,u64 dividend,u32 * remainder)1510 static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
1511 				     u64 dividend, u32 *remainder)
1512 {
1513 	/* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
1514 	*remainder = (u32)dividend & erase->size_mask;
1515 	return dividend >> erase->size_shift;
1516 }
1517 
1518 /**
1519  * spi_nor_find_best_erase_type() - find the best erase type for the given
1520  *				    offset in the serial flash memory and the
1521  *				    number of bytes to erase. The region in
1522  *				    which the address fits is expected to be
1523  *				    provided.
1524  * @map:	the erase map of the SPI NOR
1525  * @region:	pointer to a structure that describes a SPI NOR erase region
1526  * @addr:	offset in the serial flash memory
1527  * @len:	number of bytes to erase
1528  *
1529  * Return: a pointer to the best fitted erase type, NULL otherwise.
1530  */
1531 static const struct spi_nor_erase_type *
spi_nor_find_best_erase_type(const struct spi_nor_erase_map * map,const struct spi_nor_erase_region * region,u64 addr,u32 len)1532 spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
1533 			     const struct spi_nor_erase_region *region,
1534 			     u64 addr, u32 len)
1535 {
1536 	const struct spi_nor_erase_type *erase;
1537 	u32 rem;
1538 	int i;
1539 
1540 	/*
1541 	 * Erase types are ordered by size, with the smallest erase type at
1542 	 * index 0.
1543 	 */
1544 	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
1545 		/* Does the erase region support the tested erase type? */
1546 		if (!(region->erase_mask & BIT(i)))
1547 			continue;
1548 
1549 		erase = &map->erase_type[i];
1550 		if (!erase->size)
1551 			continue;
1552 
1553 		/* Alignment is not mandatory for overlaid regions */
1554 		if (region->overlaid && region->size <= len)
1555 			return erase;
1556 
1557 		/* Don't erase more than what the user has asked for. */
1558 		if (erase->size > len)
1559 			continue;
1560 
1561 		spi_nor_div_by_erase_size(erase, addr, &rem);
1562 		if (!rem)
1563 			return erase;
1564 	}
1565 
1566 	return NULL;
1567 }
1568 
1569 /**
1570  * spi_nor_init_erase_cmd() - initialize an erase command
1571  * @region:	pointer to a structure that describes a SPI NOR erase region
1572  * @erase:	pointer to a structure that describes a SPI NOR erase type
1573  *
1574  * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
1575  *	   otherwise.
1576  */
1577 static struct spi_nor_erase_command *
spi_nor_init_erase_cmd(const struct spi_nor_erase_region * region,const struct spi_nor_erase_type * erase)1578 spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
1579 		       const struct spi_nor_erase_type *erase)
1580 {
1581 	struct spi_nor_erase_command *cmd;
1582 
1583 	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1584 	if (!cmd)
1585 		return ERR_PTR(-ENOMEM);
1586 
1587 	INIT_LIST_HEAD(&cmd->list);
1588 	cmd->opcode = erase->opcode;
1589 	cmd->count = 1;
1590 
1591 	if (region->overlaid)
1592 		cmd->size = region->size;
1593 	else
1594 		cmd->size = erase->size;
1595 
1596 	return cmd;
1597 }
1598 
1599 /**
1600  * spi_nor_destroy_erase_cmd_list() - destroy erase command list
1601  * @erase_list:	list of erase commands
1602  */
spi_nor_destroy_erase_cmd_list(struct list_head * erase_list)1603 static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
1604 {
1605 	struct spi_nor_erase_command *cmd, *next;
1606 
1607 	list_for_each_entry_safe(cmd, next, erase_list, list) {
1608 		list_del(&cmd->list);
1609 		kfree(cmd);
1610 	}
1611 }
1612 
1613 /**
1614  * spi_nor_init_erase_cmd_list() - initialize erase command list
1615  * @nor:	pointer to a 'struct spi_nor'
1616  * @erase_list:	list of erase commands to be executed once we validate that the
1617  *		erase can be performed
1618  * @addr:	offset in the serial flash memory
1619  * @len:	number of bytes to erase
1620  *
1621  * Builds the list of best fitted erase commands and verifies if the erase can
1622  * be performed.
1623  *
1624  * Return: 0 on success, -errno otherwise.
1625  */
spi_nor_init_erase_cmd_list(struct spi_nor * nor,struct list_head * erase_list,u64 addr,u32 len)1626 static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
1627 				       struct list_head *erase_list,
1628 				       u64 addr, u32 len)
1629 {
1630 	const struct spi_nor_erase_map *map = &nor->params->erase_map;
1631 	const struct spi_nor_erase_type *erase, *prev_erase = NULL;
1632 	struct spi_nor_erase_region *region;
1633 	struct spi_nor_erase_command *cmd = NULL;
1634 	u64 region_end;
1635 	unsigned int i;
1636 	int ret = -EINVAL;
1637 
1638 	for (i = 0; i < map->n_regions && len; i++) {
1639 		region = &map->regions[i];
1640 		region_end = region->offset + region->size;
1641 
1642 		while (len && addr >= region->offset && addr < region_end) {
1643 			erase = spi_nor_find_best_erase_type(map, region, addr,
1644 							     len);
1645 			if (!erase)
1646 				goto destroy_erase_cmd_list;
1647 
1648 			if (prev_erase != erase || erase->size != cmd->size ||
1649 			    region->overlaid) {
1650 				cmd = spi_nor_init_erase_cmd(region, erase);
1651 				if (IS_ERR(cmd)) {
1652 					ret = PTR_ERR(cmd);
1653 					goto destroy_erase_cmd_list;
1654 				}
1655 
1656 				list_add_tail(&cmd->list, erase_list);
1657 			} else {
1658 				cmd->count++;
1659 			}
1660 
1661 			len -= cmd->size;
1662 			addr += cmd->size;
1663 			prev_erase = erase;
1664 		}
1665 	}
1666 
1667 	return 0;
1668 
1669 destroy_erase_cmd_list:
1670 	spi_nor_destroy_erase_cmd_list(erase_list);
1671 	return ret;
1672 }
1673 
1674 /**
1675  * spi_nor_erase_multi_sectors() - perform a non-uniform erase
1676  * @nor:	pointer to a 'struct spi_nor'
1677  * @addr:	offset in the serial flash memory
1678  * @len:	number of bytes to erase
1679  *
1680  * Build a list of best fitted erase commands and execute it once we validate
1681  * that the erase can be performed.
1682  *
1683  * Return: 0 on success, -errno otherwise.
1684  */
spi_nor_erase_multi_sectors(struct spi_nor * nor,u64 addr,u32 len)1685 static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
1686 {
1687 	LIST_HEAD(erase_list);
1688 	struct spi_nor_erase_command *cmd, *next;
1689 	int ret;
1690 
1691 	ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
1692 	if (ret)
1693 		return ret;
1694 
1695 	list_for_each_entry_safe(cmd, next, &erase_list, list) {
1696 		nor->erase_opcode = cmd->opcode;
1697 		while (cmd->count) {
1698 			dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n",
1699 				 cmd->size, cmd->opcode, cmd->count);
1700 
1701 			ret = spi_nor_lock_device(nor);
1702 			if (ret)
1703 				goto destroy_erase_cmd_list;
1704 
1705 			ret = spi_nor_write_enable(nor);
1706 			if (ret) {
1707 				spi_nor_unlock_device(nor);
1708 				goto destroy_erase_cmd_list;
1709 			}
1710 
1711 			ret = spi_nor_erase_sector(nor, addr);
1712 			spi_nor_unlock_device(nor);
1713 			if (ret)
1714 				goto destroy_erase_cmd_list;
1715 
1716 			ret = spi_nor_wait_till_ready(nor);
1717 			if (ret)
1718 				goto destroy_erase_cmd_list;
1719 
1720 			addr += cmd->size;
1721 			cmd->count--;
1722 		}
1723 		list_del(&cmd->list);
1724 		kfree(cmd);
1725 	}
1726 
1727 	return 0;
1728 
1729 destroy_erase_cmd_list:
1730 	spi_nor_destroy_erase_cmd_list(&erase_list);
1731 	return ret;
1732 }
1733 
spi_nor_erase_dice(struct spi_nor * nor,loff_t addr,size_t len,size_t die_size)1734 static int spi_nor_erase_dice(struct spi_nor *nor, loff_t addr,
1735 			      size_t len, size_t die_size)
1736 {
1737 	unsigned long timeout;
1738 	int ret;
1739 
1740 	/*
1741 	 * Scale the timeout linearly with the size of the flash, with
1742 	 * a minimum calibrated to an old 2MB flash. We could try to
1743 	 * pull these from CFI/SFDP, but these values should be good
1744 	 * enough for now.
1745 	 */
1746 	timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1747 		      CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1748 		      (unsigned long)(nor->mtd.size / SZ_2M));
1749 
1750 	do {
1751 		ret = spi_nor_lock_device(nor);
1752 		if (ret)
1753 			return ret;
1754 
1755 		ret = spi_nor_write_enable(nor);
1756 		if (ret) {
1757 			spi_nor_unlock_device(nor);
1758 			return ret;
1759 		}
1760 
1761 		ret = spi_nor_erase_die(nor, addr, die_size);
1762 
1763 		spi_nor_unlock_device(nor);
1764 		if (ret)
1765 			return ret;
1766 
1767 		ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1768 		if (ret)
1769 			return ret;
1770 
1771 		addr += die_size;
1772 		len -= die_size;
1773 
1774 	} while (len);
1775 
1776 	return 0;
1777 }
1778 
1779 /*
1780  * Erase an address range on the nor chip.  The address range may extend
1781  * one or more erase sectors. Return an error if there is a problem erasing.
1782  */
spi_nor_erase(struct mtd_info * mtd,struct erase_info * instr)1783 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
1784 {
1785 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
1786 	u8 n_dice = nor->params->n_dice;
1787 	bool multi_die_erase = false;
1788 	u32 addr, len, rem;
1789 	size_t die_size;
1790 	int ret;
1791 
1792 	dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
1793 			(long long)instr->len);
1794 
1795 	if (spi_nor_has_uniform_erase(nor)) {
1796 		div_u64_rem(instr->len, mtd->erasesize, &rem);
1797 		if (rem)
1798 			return -EINVAL;
1799 	}
1800 
1801 	addr = instr->addr;
1802 	len = instr->len;
1803 
1804 	if (n_dice) {
1805 		die_size = div_u64(mtd->size, n_dice);
1806 		if (!(len & (die_size - 1)) && !(addr & (die_size - 1)))
1807 			multi_die_erase = true;
1808 	} else {
1809 		die_size = mtd->size;
1810 	}
1811 
1812 	ret = spi_nor_prep_and_lock_pe(nor, instr->addr, instr->len);
1813 	if (ret)
1814 		return ret;
1815 
1816 	/* chip (die) erase? */
1817 	if ((len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) ||
1818 	    multi_die_erase) {
1819 		ret = spi_nor_erase_dice(nor, addr, len, die_size);
1820 		if (ret)
1821 			goto erase_err;
1822 
1823 	/* REVISIT in some cases we could speed up erasing large regions
1824 	 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K.  We may have set up
1825 	 * to use "small sector erase", but that's not always optimal.
1826 	 */
1827 
1828 	/* "sector"-at-a-time erase */
1829 	} else if (spi_nor_has_uniform_erase(nor)) {
1830 		while (len) {
1831 			ret = spi_nor_lock_device(nor);
1832 			if (ret)
1833 				goto erase_err;
1834 
1835 			ret = spi_nor_write_enable(nor);
1836 			if (ret) {
1837 				spi_nor_unlock_device(nor);
1838 				goto erase_err;
1839 			}
1840 
1841 			ret = spi_nor_erase_sector(nor, addr);
1842 			spi_nor_unlock_device(nor);
1843 			if (ret)
1844 				goto erase_err;
1845 
1846 			ret = spi_nor_wait_till_ready(nor);
1847 			if (ret)
1848 				goto erase_err;
1849 
1850 			addr += mtd->erasesize;
1851 			len -= mtd->erasesize;
1852 		}
1853 
1854 	/* erase multiple sectors */
1855 	} else {
1856 		ret = spi_nor_erase_multi_sectors(nor, addr, len);
1857 		if (ret)
1858 			goto erase_err;
1859 	}
1860 
1861 	ret = spi_nor_write_disable(nor);
1862 
1863 erase_err:
1864 	spi_nor_unlock_and_unprep_pe(nor, instr->addr, instr->len);
1865 
1866 	return ret;
1867 }
1868 
1869 /**
1870  * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
1871  * Register 1.
1872  * @nor:	pointer to a 'struct spi_nor'
1873  *
1874  * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
1875  *
1876  * Return: 0 on success, -errno otherwise.
1877  */
spi_nor_sr1_bit6_quad_enable(struct spi_nor * nor)1878 int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
1879 {
1880 	int ret;
1881 
1882 	ret = spi_nor_read_sr(nor, nor->bouncebuf);
1883 	if (ret)
1884 		return ret;
1885 
1886 	if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
1887 		return 0;
1888 
1889 	nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
1890 
1891 	return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
1892 }
1893 
1894 /**
1895  * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
1896  * Register 2.
1897  * @nor:       pointer to a 'struct spi_nor'.
1898  *
1899  * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
1900  *
1901  * Return: 0 on success, -errno otherwise.
1902  */
spi_nor_sr2_bit1_quad_enable(struct spi_nor * nor)1903 int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
1904 {
1905 	int ret;
1906 
1907 	if (nor->flags & SNOR_F_NO_READ_CR)
1908 		return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
1909 
1910 	ret = spi_nor_read_cr(nor, nor->bouncebuf);
1911 	if (ret)
1912 		return ret;
1913 
1914 	if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
1915 		return 0;
1916 
1917 	nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
1918 
1919 	return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
1920 }
1921 
1922 /**
1923  * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
1924  * @nor:	pointer to a 'struct spi_nor'
1925  *
1926  * Set the Quad Enable (QE) bit in the Status Register 2.
1927  *
1928  * This is one of the procedures to set the QE bit described in the SFDP
1929  * (JESD216 rev B) specification but no manufacturer using this procedure has
1930  * been identified yet, hence the name of the function.
1931  *
1932  * Return: 0 on success, -errno otherwise.
1933  */
spi_nor_sr2_bit7_quad_enable(struct spi_nor * nor)1934 int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
1935 {
1936 	u8 *sr2 = nor->bouncebuf;
1937 	int ret;
1938 	u8 sr2_written;
1939 
1940 	/* Check current Quad Enable bit value. */
1941 	ret = spi_nor_read_sr2(nor, sr2);
1942 	if (ret)
1943 		return ret;
1944 	if (*sr2 & SR2_QUAD_EN_BIT7)
1945 		return 0;
1946 
1947 	/* Update the Quad Enable bit. */
1948 	*sr2 |= SR2_QUAD_EN_BIT7;
1949 
1950 	ret = spi_nor_write_sr2(nor, sr2);
1951 	if (ret)
1952 		return ret;
1953 
1954 	sr2_written = *sr2;
1955 
1956 	/* Read back and check it. */
1957 	ret = spi_nor_read_sr2(nor, sr2);
1958 	if (ret)
1959 		return ret;
1960 
1961 	if (*sr2 != sr2_written) {
1962 		dev_dbg(nor->dev, "SR2: Read back test failed\n");
1963 		return -EIO;
1964 	}
1965 
1966 	return 0;
1967 }
1968 
1969 static const struct spi_nor_manufacturer *manufacturers[] = {
1970 	&spi_nor_atmel,
1971 	&spi_nor_eon,
1972 	&spi_nor_esmt,
1973 	&spi_nor_everspin,
1974 	&spi_nor_gigadevice,
1975 	&spi_nor_intel,
1976 	&spi_nor_issi,
1977 	&spi_nor_macronix,
1978 	&spi_nor_micron,
1979 	&spi_nor_st,
1980 	&spi_nor_spansion,
1981 	&spi_nor_sst,
1982 	&spi_nor_winbond,
1983 	&spi_nor_xmc,
1984 };
1985 
1986 static const struct flash_info spi_nor_generic_flash = {
1987 	.name = "spi-nor-generic",
1988 };
1989 
spi_nor_match_id(struct spi_nor * nor,const u8 * id)1990 static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
1991 						 const u8 *id)
1992 {
1993 	const struct flash_info *part;
1994 	unsigned int i, j;
1995 
1996 	for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
1997 		for (j = 0; j < manufacturers[i]->nparts; j++) {
1998 			part = &manufacturers[i]->parts[j];
1999 			if (part->id &&
2000 			    !memcmp(part->id->bytes, id, part->id->len)) {
2001 				nor->manufacturer = manufacturers[i];
2002 				return part;
2003 			}
2004 		}
2005 	}
2006 
2007 	return NULL;
2008 }
2009 
spi_nor_detect(struct spi_nor * nor)2010 static const struct flash_info *spi_nor_detect(struct spi_nor *nor)
2011 {
2012 	const struct flash_info *info;
2013 	u8 *id = nor->bouncebuf;
2014 	int ret;
2015 
2016 	ret = spi_nor_read_id(nor, 0, 0, id, nor->reg_proto);
2017 	if (ret) {
2018 		dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
2019 		return ERR_PTR(ret);
2020 	}
2021 
2022 	/* Cache the complete flash ID. */
2023 	nor->id = devm_kmemdup(nor->dev, id, SPI_NOR_MAX_ID_LEN, GFP_KERNEL);
2024 	if (!nor->id)
2025 		return ERR_PTR(-ENOMEM);
2026 
2027 	info = spi_nor_match_id(nor, id);
2028 
2029 	/* Fallback to a generic flash described only by its SFDP data. */
2030 	if (!info) {
2031 		ret = spi_nor_check_sfdp_signature(nor);
2032 		if (!ret)
2033 			info = &spi_nor_generic_flash;
2034 	}
2035 
2036 	if (!info) {
2037 		dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
2038 			SPI_NOR_MAX_ID_LEN, id);
2039 		return ERR_PTR(-ENODEV);
2040 	}
2041 	return info;
2042 }
2043 
spi_nor_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2044 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
2045 			size_t *retlen, u_char *buf)
2046 {
2047 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
2048 	loff_t from_lock = from;
2049 	size_t len_lock = len;
2050 	ssize_t ret;
2051 
2052 	dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
2053 
2054 	ret = spi_nor_prep_and_lock_rd(nor, from_lock, len_lock);
2055 	if (ret)
2056 		return ret;
2057 
2058 	while (len) {
2059 		loff_t addr = from;
2060 
2061 		ret = spi_nor_read_data(nor, addr, len, buf);
2062 		if (ret == 0) {
2063 			/* We shouldn't see 0-length reads */
2064 			ret = -EIO;
2065 			goto read_err;
2066 		}
2067 		if (ret < 0)
2068 			goto read_err;
2069 
2070 		WARN_ON(ret > len);
2071 		*retlen += ret;
2072 		buf += ret;
2073 		from += ret;
2074 		len -= ret;
2075 	}
2076 	ret = 0;
2077 
2078 read_err:
2079 	spi_nor_unlock_and_unprep_rd(nor, from_lock, len_lock);
2080 
2081 	return ret;
2082 }
2083 
2084 /*
2085  * Write an address range to the nor chip.  Data must be written in
2086  * FLASH_PAGESIZE chunks.  The address range may be any size provided
2087  * it is within the physical boundaries.
2088  */
spi_nor_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)2089 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
2090 	size_t *retlen, const u_char *buf)
2091 {
2092 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
2093 	size_t i;
2094 	ssize_t ret;
2095 	u32 page_size = nor->params->page_size;
2096 
2097 	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2098 
2099 	ret = spi_nor_prep_and_lock_pe(nor, to, len);
2100 	if (ret)
2101 		return ret;
2102 
2103 	for (i = 0; i < len; ) {
2104 		ssize_t written;
2105 		loff_t addr = to + i;
2106 		size_t page_offset = addr & (page_size - 1);
2107 		/* the size of data remaining on the first page */
2108 		size_t page_remain = min_t(size_t, page_size - page_offset, len - i);
2109 
2110 		ret = spi_nor_lock_device(nor);
2111 		if (ret)
2112 			goto write_err;
2113 
2114 		ret = spi_nor_write_enable(nor);
2115 		if (ret) {
2116 			spi_nor_unlock_device(nor);
2117 			goto write_err;
2118 		}
2119 
2120 		ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
2121 		spi_nor_unlock_device(nor);
2122 		if (ret < 0)
2123 			goto write_err;
2124 		written = ret;
2125 
2126 		ret = spi_nor_wait_till_ready(nor);
2127 		if (ret)
2128 			goto write_err;
2129 		*retlen += written;
2130 		i += written;
2131 	}
2132 
2133 write_err:
2134 	spi_nor_unlock_and_unprep_pe(nor, to, len);
2135 
2136 	return ret;
2137 }
2138 
spi_nor_check(struct spi_nor * nor)2139 static int spi_nor_check(struct spi_nor *nor)
2140 {
2141 	if (!nor->dev ||
2142 	    (!nor->spimem && !nor->controller_ops) ||
2143 	    (!nor->spimem && nor->controller_ops &&
2144 	    (!nor->controller_ops->read ||
2145 	     !nor->controller_ops->write ||
2146 	     !nor->controller_ops->read_reg ||
2147 	     !nor->controller_ops->write_reg))) {
2148 		pr_err("spi-nor: please fill all the necessary fields!\n");
2149 		return -EINVAL;
2150 	}
2151 
2152 	if (nor->spimem && nor->controller_ops) {
2153 		dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
2154 		return -EINVAL;
2155 	}
2156 
2157 	return 0;
2158 }
2159 
2160 void
spi_nor_set_read_settings(struct spi_nor_read_command * read,u8 num_mode_clocks,u8 num_wait_states,u8 opcode,enum spi_nor_protocol proto)2161 spi_nor_set_read_settings(struct spi_nor_read_command *read,
2162 			  u8 num_mode_clocks,
2163 			  u8 num_wait_states,
2164 			  u8 opcode,
2165 			  enum spi_nor_protocol proto)
2166 {
2167 	read->num_mode_clocks = num_mode_clocks;
2168 	read->num_wait_states = num_wait_states;
2169 	read->opcode = opcode;
2170 	read->proto = proto;
2171 }
2172 
spi_nor_set_pp_settings(struct spi_nor_pp_command * pp,u8 opcode,enum spi_nor_protocol proto)2173 void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
2174 			     enum spi_nor_protocol proto)
2175 {
2176 	pp->opcode = opcode;
2177 	pp->proto = proto;
2178 }
2179 
spi_nor_hwcaps2cmd(u32 hwcaps,const int table[][2],size_t size)2180 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2181 {
2182 	size_t i;
2183 
2184 	for (i = 0; i < size; i++)
2185 		if (table[i][0] == (int)hwcaps)
2186 			return table[i][1];
2187 
2188 	return -EINVAL;
2189 }
2190 
spi_nor_hwcaps_read2cmd(u32 hwcaps)2191 int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2192 {
2193 	static const int hwcaps_read2cmd[][2] = {
2194 		{ SNOR_HWCAPS_READ,		SNOR_CMD_READ },
2195 		{ SNOR_HWCAPS_READ_FAST,	SNOR_CMD_READ_FAST },
2196 		{ SNOR_HWCAPS_READ_1_1_1_DTR,	SNOR_CMD_READ_1_1_1_DTR },
2197 		{ SNOR_HWCAPS_READ_1_1_2,	SNOR_CMD_READ_1_1_2 },
2198 		{ SNOR_HWCAPS_READ_1_2_2,	SNOR_CMD_READ_1_2_2 },
2199 		{ SNOR_HWCAPS_READ_2_2_2,	SNOR_CMD_READ_2_2_2 },
2200 		{ SNOR_HWCAPS_READ_1_2_2_DTR,	SNOR_CMD_READ_1_2_2_DTR },
2201 		{ SNOR_HWCAPS_READ_1_1_4,	SNOR_CMD_READ_1_1_4 },
2202 		{ SNOR_HWCAPS_READ_1_4_4,	SNOR_CMD_READ_1_4_4 },
2203 		{ SNOR_HWCAPS_READ_4_4_4,	SNOR_CMD_READ_4_4_4 },
2204 		{ SNOR_HWCAPS_READ_1_4_4_DTR,	SNOR_CMD_READ_1_4_4_DTR },
2205 		{ SNOR_HWCAPS_READ_1_1_8,	SNOR_CMD_READ_1_1_8 },
2206 		{ SNOR_HWCAPS_READ_1_8_8,	SNOR_CMD_READ_1_8_8 },
2207 		{ SNOR_HWCAPS_READ_8_8_8,	SNOR_CMD_READ_8_8_8 },
2208 		{ SNOR_HWCAPS_READ_1_8_8_DTR,	SNOR_CMD_READ_1_8_8_DTR },
2209 		{ SNOR_HWCAPS_READ_8_8_8_DTR,	SNOR_CMD_READ_8_8_8_DTR },
2210 	};
2211 
2212 	return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2213 				  ARRAY_SIZE(hwcaps_read2cmd));
2214 }
2215 
spi_nor_hwcaps_pp2cmd(u32 hwcaps)2216 int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2217 {
2218 	static const int hwcaps_pp2cmd[][2] = {
2219 		{ SNOR_HWCAPS_PP,		SNOR_CMD_PP },
2220 		{ SNOR_HWCAPS_PP_1_1_4,		SNOR_CMD_PP_1_1_4 },
2221 		{ SNOR_HWCAPS_PP_1_4_4,		SNOR_CMD_PP_1_4_4 },
2222 		{ SNOR_HWCAPS_PP_4_4_4,		SNOR_CMD_PP_4_4_4 },
2223 		{ SNOR_HWCAPS_PP_1_1_8,		SNOR_CMD_PP_1_1_8 },
2224 		{ SNOR_HWCAPS_PP_1_8_8,		SNOR_CMD_PP_1_8_8 },
2225 		{ SNOR_HWCAPS_PP_8_8_8,		SNOR_CMD_PP_8_8_8 },
2226 		{ SNOR_HWCAPS_PP_8_8_8_DTR,	SNOR_CMD_PP_8_8_8_DTR },
2227 	};
2228 
2229 	return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2230 				  ARRAY_SIZE(hwcaps_pp2cmd));
2231 }
2232 
2233 /**
2234  * spi_nor_spimem_check_op - check if the operation is supported
2235  *                           by controller
2236  *@nor:        pointer to a 'struct spi_nor'
2237  *@op:         pointer to op template to be checked
2238  *
2239  * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2240  */
spi_nor_spimem_check_op(struct spi_nor * nor,struct spi_mem_op * op)2241 static int spi_nor_spimem_check_op(struct spi_nor *nor,
2242 				   struct spi_mem_op *op)
2243 {
2244 	/*
2245 	 * First test with 4 address bytes. The opcode itself might
2246 	 * be a 3B addressing opcode but we don't care, because
2247 	 * SPI controller implementation should not check the opcode,
2248 	 * but just the sequence.
2249 	 */
2250 	op->addr.nbytes = 4;
2251 	if (!spi_mem_supports_op(nor->spimem, op)) {
2252 		if (nor->params->size > SZ_16M)
2253 			return -EOPNOTSUPP;
2254 
2255 		/* If flash size <= 16MB, 3 address bytes are sufficient */
2256 		op->addr.nbytes = 3;
2257 		if (!spi_mem_supports_op(nor->spimem, op))
2258 			return -EOPNOTSUPP;
2259 	}
2260 
2261 	return 0;
2262 }
2263 
2264 /**
2265  * spi_nor_spimem_check_readop - check if the read op is supported
2266  *                               by controller
2267  *@nor:         pointer to a 'struct spi_nor'
2268  *@read:        pointer to op template to be checked
2269  *
2270  * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2271  */
spi_nor_spimem_check_readop(struct spi_nor * nor,const struct spi_nor_read_command * read)2272 static int spi_nor_spimem_check_readop(struct spi_nor *nor,
2273 				       const struct spi_nor_read_command *read)
2274 {
2275 	struct spi_mem_op op = SPI_NOR_READ_OP(read->opcode);
2276 
2277 	spi_nor_spimem_setup_op(nor, &op, read->proto);
2278 
2279 	/* convert the dummy cycles to the number of bytes */
2280 	op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
2281 			  op.dummy.buswidth / 8;
2282 	if (spi_nor_protocol_is_dtr(nor->read_proto))
2283 		op.dummy.nbytes *= 2;
2284 
2285 	return spi_nor_spimem_check_op(nor, &op);
2286 }
2287 
2288 /**
2289  * spi_nor_spimem_check_pp - check if the page program op is supported
2290  *                           by controller
2291  *@nor:         pointer to a 'struct spi_nor'
2292  *@pp:          pointer to op template to be checked
2293  *
2294  * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2295  */
spi_nor_spimem_check_pp(struct spi_nor * nor,const struct spi_nor_pp_command * pp)2296 static int spi_nor_spimem_check_pp(struct spi_nor *nor,
2297 				   const struct spi_nor_pp_command *pp)
2298 {
2299 	struct spi_mem_op op = SPI_NOR_PP_OP(pp->opcode);
2300 
2301 	spi_nor_spimem_setup_op(nor, &op, pp->proto);
2302 
2303 	return spi_nor_spimem_check_op(nor, &op);
2304 }
2305 
2306 /**
2307  * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
2308  *                                based on SPI controller capabilities
2309  * @nor:        pointer to a 'struct spi_nor'
2310  * @hwcaps:     pointer to resulting capabilities after adjusting
2311  *              according to controller and flash's capability
2312  */
2313 static void
spi_nor_spimem_adjust_hwcaps(struct spi_nor * nor,u32 * hwcaps)2314 spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
2315 {
2316 	struct spi_nor_flash_parameter *params = nor->params;
2317 	unsigned int cap;
2318 
2319 	/* X-X-X modes are not supported yet, mask them all. */
2320 	*hwcaps &= ~SNOR_HWCAPS_X_X_X;
2321 
2322 	/*
2323 	 * If the reset line is broken, we do not want to enter a stateful
2324 	 * mode.
2325 	 */
2326 	if (nor->flags & SNOR_F_BROKEN_RESET)
2327 		*hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR);
2328 
2329 	for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
2330 		int rdidx, ppidx;
2331 
2332 		if (!(*hwcaps & BIT(cap)))
2333 			continue;
2334 
2335 		rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
2336 		if (rdidx >= 0 &&
2337 		    spi_nor_spimem_check_readop(nor, &params->reads[rdidx]))
2338 			*hwcaps &= ~BIT(cap);
2339 
2340 		ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
2341 		if (ppidx < 0)
2342 			continue;
2343 
2344 		if (spi_nor_spimem_check_pp(nor,
2345 					    &params->page_programs[ppidx]))
2346 			*hwcaps &= ~BIT(cap);
2347 	}
2348 }
2349 
2350 /**
2351  * spi_nor_set_erase_type() - set a SPI NOR erase type
2352  * @erase:	pointer to a structure that describes a SPI NOR erase type
2353  * @size:	the size of the sector/block erased by the erase type
2354  * @opcode:	the SPI command op code to erase the sector/block
2355  */
spi_nor_set_erase_type(struct spi_nor_erase_type * erase,u32 size,u8 opcode)2356 void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
2357 			    u8 opcode)
2358 {
2359 	erase->size = size;
2360 	erase->opcode = opcode;
2361 	/* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
2362 	erase->size_shift = ffs(erase->size) - 1;
2363 	erase->size_mask = (1 << erase->size_shift) - 1;
2364 }
2365 
2366 /**
2367  * spi_nor_mask_erase_type() - mask out a SPI NOR erase type
2368  * @erase:	pointer to a structure that describes a SPI NOR erase type
2369  */
spi_nor_mask_erase_type(struct spi_nor_erase_type * erase)2370 void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase)
2371 {
2372 	erase->size = 0;
2373 }
2374 
2375 /**
2376  * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
2377  * @map:		the erase map of the SPI NOR
2378  * @erase_mask:		bitmask encoding erase types that can erase the entire
2379  *			flash memory
2380  * @flash_size:		the spi nor flash memory size
2381  */
spi_nor_init_uniform_erase_map(struct spi_nor_erase_map * map,u8 erase_mask,u64 flash_size)2382 void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
2383 				    u8 erase_mask, u64 flash_size)
2384 {
2385 	map->uniform_region.offset = 0;
2386 	map->uniform_region.size = flash_size;
2387 	map->uniform_region.erase_mask = erase_mask;
2388 	map->regions = &map->uniform_region;
2389 	map->n_regions = 1;
2390 }
2391 
spi_nor_post_bfpt_fixups(struct spi_nor * nor,const struct sfdp_parameter_header * bfpt_header,const struct sfdp_bfpt * bfpt)2392 int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2393 			     const struct sfdp_parameter_header *bfpt_header,
2394 			     const struct sfdp_bfpt *bfpt)
2395 {
2396 	int ret;
2397 
2398 	if (nor->manufacturer && nor->manufacturer->fixups &&
2399 	    nor->manufacturer->fixups->post_bfpt) {
2400 		ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
2401 							   bfpt);
2402 		if (ret)
2403 			return ret;
2404 	}
2405 
2406 	if (nor->info->fixups && nor->info->fixups->post_bfpt)
2407 		return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt);
2408 
2409 	return 0;
2410 }
2411 
spi_nor_select_read(struct spi_nor * nor,u32 shared_hwcaps)2412 static int spi_nor_select_read(struct spi_nor *nor,
2413 			       u32 shared_hwcaps)
2414 {
2415 	int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
2416 	const struct spi_nor_read_command *read;
2417 
2418 	if (best_match < 0)
2419 		return -EINVAL;
2420 
2421 	cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
2422 	if (cmd < 0)
2423 		return -EINVAL;
2424 
2425 	read = &nor->params->reads[cmd];
2426 	nor->read_opcode = read->opcode;
2427 	nor->read_proto = read->proto;
2428 
2429 	/*
2430 	 * In the SPI NOR framework, we don't need to make the difference
2431 	 * between mode clock cycles and wait state clock cycles.
2432 	 * Indeed, the value of the mode clock cycles is used by a QSPI
2433 	 * flash memory to know whether it should enter or leave its 0-4-4
2434 	 * (Continuous Read / XIP) mode.
2435 	 * eXecution In Place is out of the scope of the mtd sub-system.
2436 	 * Hence we choose to merge both mode and wait state clock cycles
2437 	 * into the so called dummy clock cycles.
2438 	 */
2439 	nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
2440 	return 0;
2441 }
2442 
spi_nor_select_pp(struct spi_nor * nor,u32 shared_hwcaps)2443 static int spi_nor_select_pp(struct spi_nor *nor,
2444 			     u32 shared_hwcaps)
2445 {
2446 	int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
2447 	const struct spi_nor_pp_command *pp;
2448 
2449 	if (best_match < 0)
2450 		return -EINVAL;
2451 
2452 	cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
2453 	if (cmd < 0)
2454 		return -EINVAL;
2455 
2456 	pp = &nor->params->page_programs[cmd];
2457 	nor->program_opcode = pp->opcode;
2458 	nor->write_proto = pp->proto;
2459 	return 0;
2460 }
2461 
2462 /**
2463  * spi_nor_select_uniform_erase() - select optimum uniform erase type
2464  * @map:		the erase map of the SPI NOR
2465  *
2466  * Once the optimum uniform sector erase command is found, disable all the
2467  * other.
2468  *
2469  * Return: pointer to erase type on success, NULL otherwise.
2470  */
2471 static const struct spi_nor_erase_type *
spi_nor_select_uniform_erase(struct spi_nor_erase_map * map)2472 spi_nor_select_uniform_erase(struct spi_nor_erase_map *map)
2473 {
2474 	const struct spi_nor_erase_type *tested_erase, *erase = NULL;
2475 	int i;
2476 	u8 uniform_erase_type = map->uniform_region.erase_mask;
2477 
2478 	/*
2479 	 * Search for the biggest erase size, except for when compiled
2480 	 * to use 4k erases.
2481 	 */
2482 	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2483 		if (!(uniform_erase_type & BIT(i)))
2484 			continue;
2485 
2486 		tested_erase = &map->erase_type[i];
2487 
2488 		/* Skip masked erase types. */
2489 		if (!tested_erase->size)
2490 			continue;
2491 
2492 		/*
2493 		 * If the current erase size is the 4k one, stop here,
2494 		 * we have found the right uniform Sector Erase command.
2495 		 */
2496 		if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS) &&
2497 		    tested_erase->size == SZ_4K) {
2498 			erase = tested_erase;
2499 			break;
2500 		}
2501 
2502 		/*
2503 		 * Otherwise, the current erase size is still a valid candidate.
2504 		 * Select the biggest valid candidate.
2505 		 */
2506 		if (!erase && tested_erase->size)
2507 			erase = tested_erase;
2508 			/* keep iterating to find the wanted_size */
2509 	}
2510 
2511 	if (!erase)
2512 		return NULL;
2513 
2514 	/* Disable all other Sector Erase commands. */
2515 	map->uniform_region.erase_mask = BIT(erase - map->erase_type);
2516 	return erase;
2517 }
2518 
spi_nor_select_erase(struct spi_nor * nor)2519 static int spi_nor_select_erase(struct spi_nor *nor)
2520 {
2521 	struct spi_nor_erase_map *map = &nor->params->erase_map;
2522 	const struct spi_nor_erase_type *erase = NULL;
2523 	struct mtd_info *mtd = &nor->mtd;
2524 	int i;
2525 
2526 	/*
2527 	 * The previous implementation handling Sector Erase commands assumed
2528 	 * that the SPI flash memory has an uniform layout then used only one
2529 	 * of the supported erase sizes for all Sector Erase commands.
2530 	 * So to be backward compatible, the new implementation also tries to
2531 	 * manage the SPI flash memory as uniform with a single erase sector
2532 	 * size, when possible.
2533 	 */
2534 	if (spi_nor_has_uniform_erase(nor)) {
2535 		erase = spi_nor_select_uniform_erase(map);
2536 		if (!erase)
2537 			return -EINVAL;
2538 		nor->erase_opcode = erase->opcode;
2539 		mtd->erasesize = erase->size;
2540 		return 0;
2541 	}
2542 
2543 	/*
2544 	 * For non-uniform SPI flash memory, set mtd->erasesize to the
2545 	 * maximum erase sector size. No need to set nor->erase_opcode.
2546 	 */
2547 	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2548 		if (map->erase_type[i].size) {
2549 			erase = &map->erase_type[i];
2550 			break;
2551 		}
2552 	}
2553 
2554 	if (!erase)
2555 		return -EINVAL;
2556 
2557 	mtd->erasesize = erase->size;
2558 	return 0;
2559 }
2560 
spi_nor_set_addr_nbytes(struct spi_nor * nor)2561 static int spi_nor_set_addr_nbytes(struct spi_nor *nor)
2562 {
2563 	if (nor->params->addr_nbytes) {
2564 		nor->addr_nbytes = nor->params->addr_nbytes;
2565 	} else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
2566 		/*
2567 		 * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
2568 		 * in this protocol an odd addr_nbytes cannot be used because
2569 		 * then the address phase would only span a cycle and a half.
2570 		 * Half a cycle would be left over. We would then have to start
2571 		 * the dummy phase in the middle of a cycle and so too the data
2572 		 * phase, and we will end the transaction with half a cycle left
2573 		 * over.
2574 		 *
2575 		 * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
2576 		 * avoid this situation.
2577 		 */
2578 		nor->addr_nbytes = 4;
2579 	} else if (nor->info->addr_nbytes) {
2580 		nor->addr_nbytes = nor->info->addr_nbytes;
2581 	} else {
2582 		nor->addr_nbytes = 3;
2583 	}
2584 
2585 	if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) {
2586 		/* enable 4-byte addressing if the device exceeds 16MiB */
2587 		nor->addr_nbytes = 4;
2588 	}
2589 
2590 	if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) {
2591 		dev_dbg(nor->dev, "The number of address bytes is too large: %u\n",
2592 			nor->addr_nbytes);
2593 		return -EINVAL;
2594 	}
2595 
2596 	/* Set 4byte opcodes when possible. */
2597 	if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES &&
2598 	    !(nor->flags & SNOR_F_HAS_4BAIT))
2599 		spi_nor_set_4byte_opcodes(nor);
2600 
2601 	return 0;
2602 }
2603 
spi_nor_setup(struct spi_nor * nor,const struct spi_nor_hwcaps * hwcaps)2604 static int spi_nor_setup(struct spi_nor *nor,
2605 			 const struct spi_nor_hwcaps *hwcaps)
2606 {
2607 	struct spi_nor_flash_parameter *params = nor->params;
2608 	u32 ignored_mask, shared_mask;
2609 	int err;
2610 
2611 	/*
2612 	 * Keep only the hardware capabilities supported by both the SPI
2613 	 * controller and the SPI flash memory.
2614 	 */
2615 	shared_mask = hwcaps->mask & params->hwcaps.mask;
2616 
2617 	if (nor->spimem) {
2618 		/*
2619 		 * When called from spi_nor_probe(), all caps are set and we
2620 		 * need to discard some of them based on what the SPI
2621 		 * controller actually supports (using spi_mem_supports_op()).
2622 		 */
2623 		spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
2624 	} else {
2625 		/*
2626 		 * SPI n-n-n protocols are not supported when the SPI
2627 		 * controller directly implements the spi_nor interface.
2628 		 * Yet another reason to switch to spi-mem.
2629 		 */
2630 		ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR;
2631 		if (shared_mask & ignored_mask) {
2632 			dev_dbg(nor->dev,
2633 				"SPI n-n-n protocols are not supported.\n");
2634 			shared_mask &= ~ignored_mask;
2635 		}
2636 	}
2637 
2638 	/* Select the (Fast) Read command. */
2639 	err = spi_nor_select_read(nor, shared_mask);
2640 	if (err) {
2641 		dev_dbg(nor->dev,
2642 			"can't select read settings supported by both the SPI controller and memory.\n");
2643 		return err;
2644 	}
2645 
2646 	/* Select the Page Program command. */
2647 	err = spi_nor_select_pp(nor, shared_mask);
2648 	if (err) {
2649 		dev_dbg(nor->dev,
2650 			"can't select write settings supported by both the SPI controller and memory.\n");
2651 		return err;
2652 	}
2653 
2654 	/* Select the Sector Erase command. */
2655 	err = spi_nor_select_erase(nor);
2656 	if (err) {
2657 		dev_dbg(nor->dev,
2658 			"can't select erase settings supported by both the SPI controller and memory.\n");
2659 		return err;
2660 	}
2661 
2662 	return spi_nor_set_addr_nbytes(nor);
2663 }
2664 
2665 /**
2666  * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
2667  * settings based on MFR register and ->default_init() hook.
2668  * @nor:	pointer to a 'struct spi_nor'.
2669  */
spi_nor_manufacturer_init_params(struct spi_nor * nor)2670 static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
2671 {
2672 	if (nor->manufacturer && nor->manufacturer->fixups &&
2673 	    nor->manufacturer->fixups->default_init)
2674 		nor->manufacturer->fixups->default_init(nor);
2675 
2676 	if (nor->info->fixups && nor->info->fixups->default_init)
2677 		nor->info->fixups->default_init(nor);
2678 }
2679 
2680 /**
2681  * spi_nor_no_sfdp_init_params() - Initialize the flash's parameters and
2682  * settings based on nor->info->sfdp_flags. This method should be called only by
2683  * flashes that do not define SFDP tables. If the flash supports SFDP but the
2684  * information is wrong and the settings from this function can not be retrieved
2685  * by parsing SFDP, one should instead use the fixup hooks and update the wrong
2686  * bits.
2687  * @nor:	pointer to a 'struct spi_nor'.
2688  */
spi_nor_no_sfdp_init_params(struct spi_nor * nor)2689 static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
2690 {
2691 	struct spi_nor_flash_parameter *params = nor->params;
2692 	struct spi_nor_erase_map *map = &params->erase_map;
2693 	const struct flash_info *info = nor->info;
2694 	const u8 no_sfdp_flags = info->no_sfdp_flags;
2695 	u8 i, erase_mask;
2696 
2697 	if (no_sfdp_flags & SPI_NOR_DUAL_READ) {
2698 		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
2699 		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
2700 					  0, 8, SPINOR_OP_READ_1_1_2,
2701 					  SNOR_PROTO_1_1_2);
2702 	}
2703 
2704 	if (no_sfdp_flags & SPI_NOR_QUAD_READ) {
2705 		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
2706 		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
2707 					  0, 8, SPINOR_OP_READ_1_1_4,
2708 					  SNOR_PROTO_1_1_4);
2709 	}
2710 
2711 	if (no_sfdp_flags & SPI_NOR_OCTAL_READ) {
2712 		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
2713 		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
2714 					  0, 8, SPINOR_OP_READ_1_1_8,
2715 					  SNOR_PROTO_1_1_8);
2716 	}
2717 
2718 	if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_READ) {
2719 		params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
2720 		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_8_8_8_DTR],
2721 					  0, 20, SPINOR_OP_READ_FAST,
2722 					  SNOR_PROTO_8_8_8_DTR);
2723 	}
2724 
2725 	if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_PP) {
2726 		params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
2727 		/*
2728 		 * Since xSPI Page Program opcode is backward compatible with
2729 		 * Legacy SPI, use Legacy SPI opcode there as well.
2730 		 */
2731 		spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_8_8_8_DTR],
2732 					SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR);
2733 	}
2734 
2735 	/*
2736 	 * Sector Erase settings. Sort Erase Types in ascending order, with the
2737 	 * smallest erase size starting at BIT(0).
2738 	 */
2739 	erase_mask = 0;
2740 	i = 0;
2741 	if (no_sfdp_flags & SECT_4K) {
2742 		erase_mask |= BIT(i);
2743 		spi_nor_set_erase_type(&map->erase_type[i], 4096u,
2744 				       SPINOR_OP_BE_4K);
2745 		i++;
2746 	}
2747 	erase_mask |= BIT(i);
2748 	spi_nor_set_erase_type(&map->erase_type[i],
2749 			       info->sector_size ?: SPI_NOR_DEFAULT_SECTOR_SIZE,
2750 			       SPINOR_OP_SE);
2751 	spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
2752 }
2753 
2754 /**
2755  * spi_nor_init_flags() - Initialize NOR flags for settings that are not defined
2756  * in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP.
2757  * @nor:	pointer to a 'struct spi_nor'
2758  */
spi_nor_init_flags(struct spi_nor * nor)2759 static void spi_nor_init_flags(struct spi_nor *nor)
2760 {
2761 	struct device_node *np = spi_nor_get_flash_node(nor);
2762 	const u16 flags = nor->info->flags;
2763 
2764 	if (of_property_read_bool(np, "broken-flash-reset"))
2765 		nor->flags |= SNOR_F_BROKEN_RESET;
2766 
2767 	if (of_property_read_bool(np, "no-wp"))
2768 		nor->flags |= SNOR_F_NO_WP;
2769 
2770 	if (flags & SPI_NOR_SWP_IS_VOLATILE)
2771 		nor->flags |= SNOR_F_SWP_IS_VOLATILE;
2772 
2773 	if (flags & SPI_NOR_HAS_LOCK)
2774 		nor->flags |= SNOR_F_HAS_LOCK;
2775 
2776 	if (flags & SPI_NOR_HAS_TB) {
2777 		nor->flags |= SNOR_F_HAS_SR_TB;
2778 		if (flags & SPI_NOR_TB_SR_BIT6)
2779 			nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
2780 	}
2781 
2782 	if (flags & SPI_NOR_4BIT_BP) {
2783 		nor->flags |= SNOR_F_HAS_4BIT_BP;
2784 		if (flags & SPI_NOR_BP3_SR_BIT6)
2785 			nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
2786 	}
2787 
2788 	if (flags & SPI_NOR_RWW && nor->params->n_banks > 1 &&
2789 	    !nor->controller_ops)
2790 		nor->flags |= SNOR_F_RWW;
2791 }
2792 
2793 /**
2794  * spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not
2795  * be discovered by SFDP for this particular flash because the SFDP table that
2796  * indicates this support is not defined in the flash. In case the table for
2797  * this support is defined but has wrong values, one should instead use a
2798  * post_sfdp() hook to set the SNOR_F equivalent flag.
2799  * @nor:       pointer to a 'struct spi_nor'
2800  */
spi_nor_init_fixup_flags(struct spi_nor * nor)2801 static void spi_nor_init_fixup_flags(struct spi_nor *nor)
2802 {
2803 	const u8 fixup_flags = nor->info->fixup_flags;
2804 
2805 	if (fixup_flags & SPI_NOR_4B_OPCODES)
2806 		nor->flags |= SNOR_F_4B_OPCODES;
2807 
2808 	if (fixup_flags & SPI_NOR_IO_MODE_EN_VOLATILE)
2809 		nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
2810 }
2811 
2812 /**
2813  * spi_nor_late_init_params() - Late initialization of default flash parameters.
2814  * @nor:	pointer to a 'struct spi_nor'
2815  *
2816  * Used to initialize flash parameters that are not declared in the JESD216
2817  * SFDP standard, or where SFDP tables are not defined at all.
2818  * Will replace the spi_nor_manufacturer_init_params() method.
2819  */
spi_nor_late_init_params(struct spi_nor * nor)2820 static int spi_nor_late_init_params(struct spi_nor *nor)
2821 {
2822 	struct spi_nor_flash_parameter *params = nor->params;
2823 	int ret;
2824 
2825 	if (nor->manufacturer && nor->manufacturer->fixups &&
2826 	    nor->manufacturer->fixups->late_init) {
2827 		ret = nor->manufacturer->fixups->late_init(nor);
2828 		if (ret)
2829 			return ret;
2830 	}
2831 
2832 	/* Needed by some flashes late_init hooks. */
2833 	spi_nor_init_flags(nor);
2834 
2835 	if (nor->info->fixups && nor->info->fixups->late_init) {
2836 		ret = nor->info->fixups->late_init(nor);
2837 		if (ret)
2838 			return ret;
2839 	}
2840 
2841 	if (!nor->params->die_erase_opcode)
2842 		nor->params->die_erase_opcode = SPINOR_OP_CHIP_ERASE;
2843 
2844 	/* Default method kept for backward compatibility. */
2845 	if (!params->set_4byte_addr_mode)
2846 		params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_brwr;
2847 
2848 	spi_nor_init_fixup_flags(nor);
2849 
2850 	/*
2851 	 * NOR protection support. When locking_ops are not provided, we pick
2852 	 * the default ones.
2853 	 */
2854 	if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
2855 		spi_nor_init_default_locking_ops(nor);
2856 
2857 	if (params->n_banks > 1)
2858 		params->bank_size = div_u64(params->size, params->n_banks);
2859 
2860 	return 0;
2861 }
2862 
2863 /**
2864  * spi_nor_sfdp_init_params_deprecated() - Deprecated way of initializing flash
2865  * parameters and settings based on JESD216 SFDP standard.
2866  * @nor:	pointer to a 'struct spi_nor'.
2867  *
2868  * The method has a roll-back mechanism: in case the SFDP parsing fails, the
2869  * legacy flash parameters and settings will be restored.
2870  */
spi_nor_sfdp_init_params_deprecated(struct spi_nor * nor)2871 static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor)
2872 {
2873 	struct spi_nor_flash_parameter sfdp_params;
2874 
2875 	memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
2876 
2877 	if (spi_nor_parse_sfdp(nor)) {
2878 		memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
2879 		nor->flags &= ~SNOR_F_4B_OPCODES;
2880 	}
2881 }
2882 
2883 /**
2884  * spi_nor_init_params_deprecated() - Deprecated way of initializing flash
2885  * parameters and settings.
2886  * @nor:	pointer to a 'struct spi_nor'.
2887  *
2888  * The method assumes that flash doesn't support SFDP so it initializes flash
2889  * parameters in spi_nor_no_sfdp_init_params() which later on can be overwritten
2890  * when parsing SFDP, if supported.
2891  */
spi_nor_init_params_deprecated(struct spi_nor * nor)2892 static void spi_nor_init_params_deprecated(struct spi_nor *nor)
2893 {
2894 	spi_nor_no_sfdp_init_params(nor);
2895 
2896 	spi_nor_manufacturer_init_params(nor);
2897 
2898 	if (nor->info->no_sfdp_flags & (SPI_NOR_DUAL_READ |
2899 					SPI_NOR_QUAD_READ |
2900 					SPI_NOR_OCTAL_READ |
2901 					SPI_NOR_OCTAL_DTR_READ))
2902 		spi_nor_sfdp_init_params_deprecated(nor);
2903 }
2904 
2905 /**
2906  * spi_nor_init_default_params() - Default initialization of flash parameters
2907  * and settings. Done for all flashes, regardless is they define SFDP tables
2908  * or not.
2909  * @nor:	pointer to a 'struct spi_nor'.
2910  */
spi_nor_init_default_params(struct spi_nor * nor)2911 static void spi_nor_init_default_params(struct spi_nor *nor)
2912 {
2913 	struct spi_nor_flash_parameter *params = nor->params;
2914 	const struct flash_info *info = nor->info;
2915 	struct device_node *np = spi_nor_get_flash_node(nor);
2916 
2917 	params->quad_enable = spi_nor_sr2_bit1_quad_enable;
2918 	params->otp.org = info->otp;
2919 
2920 	/* Default to 16-bit Write Status (01h) Command */
2921 	nor->flags |= SNOR_F_HAS_16BIT_SR;
2922 
2923 	/* Set SPI NOR sizes. */
2924 	params->writesize = 1;
2925 	params->size = info->size;
2926 	params->bank_size = params->size;
2927 	params->page_size = info->page_size ?: SPI_NOR_DEFAULT_PAGE_SIZE;
2928 	params->n_banks = info->n_banks ?: SPI_NOR_DEFAULT_N_BANKS;
2929 
2930 	/* Default to Fast Read for non-DT and enable it if requested by DT. */
2931 	if (!np || of_property_read_bool(np, "m25p,fast-read"))
2932 		params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
2933 
2934 	/* (Fast) Read settings. */
2935 	params->hwcaps.mask |= SNOR_HWCAPS_READ;
2936 	spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
2937 				  0, 0, SPINOR_OP_READ,
2938 				  SNOR_PROTO_1_1_1);
2939 
2940 	if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
2941 		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
2942 					  0, 8, SPINOR_OP_READ_FAST,
2943 					  SNOR_PROTO_1_1_1);
2944 	/* Page Program settings. */
2945 	params->hwcaps.mask |= SNOR_HWCAPS_PP;
2946 	spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
2947 				SPINOR_OP_PP, SNOR_PROTO_1_1_1);
2948 
2949 	if (info->flags & SPI_NOR_QUAD_PP) {
2950 		params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
2951 		spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
2952 					SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4);
2953 	}
2954 }
2955 
2956 /**
2957  * spi_nor_init_params() - Initialize the flash's parameters and settings.
2958  * @nor:	pointer to a 'struct spi_nor'.
2959  *
2960  * The flash parameters and settings are initialized based on a sequence of
2961  * calls that are ordered by priority:
2962  *
2963  * 1/ Default flash parameters initialization. The initializations are done
2964  *    based on nor->info data:
2965  *		spi_nor_info_init_params()
2966  *
2967  * which can be overwritten by:
2968  * 2/ Manufacturer flash parameters initialization. The initializations are
2969  *    done based on MFR register, or when the decisions can not be done solely
2970  *    based on MFR, by using specific flash_info tweeks, ->default_init():
2971  *		spi_nor_manufacturer_init_params()
2972  *
2973  * which can be overwritten by:
2974  * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
2975  *    should be more accurate that the above.
2976  *		spi_nor_parse_sfdp() or spi_nor_no_sfdp_init_params()
2977  *
2978  *    Please note that there is a ->post_bfpt() fixup hook that can overwrite
2979  *    the flash parameters and settings immediately after parsing the Basic
2980  *    Flash Parameter Table.
2981  *    spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed.
2982  *    It is used to tweak various flash parameters when information provided
2983  *    by the SFDP tables are wrong.
2984  *
2985  * which can be overwritten by:
2986  * 4/ Late flash parameters initialization, used to initialize flash
2987  * parameters that are not declared in the JESD216 SFDP standard, or where SFDP
2988  * tables are not defined at all.
2989  *		spi_nor_late_init_params()
2990  *
2991  * Return: 0 on success, -errno otherwise.
2992  */
spi_nor_init_params(struct spi_nor * nor)2993 static int spi_nor_init_params(struct spi_nor *nor)
2994 {
2995 	int ret;
2996 
2997 	nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
2998 	if (!nor->params)
2999 		return -ENOMEM;
3000 
3001 	spi_nor_init_default_params(nor);
3002 
3003 	if (spi_nor_needs_sfdp(nor)) {
3004 		ret = spi_nor_parse_sfdp(nor);
3005 		if (ret) {
3006 			dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n");
3007 			return ret;
3008 		}
3009 	} else if (nor->info->no_sfdp_flags & SPI_NOR_SKIP_SFDP) {
3010 		spi_nor_no_sfdp_init_params(nor);
3011 	} else {
3012 		spi_nor_init_params_deprecated(nor);
3013 	}
3014 
3015 	ret = spi_nor_late_init_params(nor);
3016 	if (ret)
3017 		return ret;
3018 
3019 	if (WARN_ON(!is_power_of_2(nor->params->page_size)))
3020 		return -EINVAL;
3021 
3022 	return 0;
3023 }
3024 
3025 /** spi_nor_set_octal_dtr() - enable or disable Octal DTR I/O.
3026  * @nor:                 pointer to a 'struct spi_nor'
3027  * @enable:              whether to enable or disable Octal DTR
3028  *
3029  * Return: 0 on success, -errno otherwise.
3030  */
spi_nor_set_octal_dtr(struct spi_nor * nor,bool enable)3031 static int spi_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
3032 {
3033 	int ret;
3034 
3035 	if (!nor->params->set_octal_dtr)
3036 		return 0;
3037 
3038 	if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
3039 	      nor->write_proto == SNOR_PROTO_8_8_8_DTR))
3040 		return 0;
3041 
3042 	if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
3043 		return 0;
3044 
3045 	ret = nor->params->set_octal_dtr(nor, enable);
3046 	if (ret)
3047 		return ret;
3048 
3049 	if (enable)
3050 		nor->reg_proto = SNOR_PROTO_8_8_8_DTR;
3051 	else
3052 		nor->reg_proto = SNOR_PROTO_1_1_1;
3053 
3054 	return 0;
3055 }
3056 
3057 /**
3058  * spi_nor_quad_enable() - enable Quad I/O if needed.
3059  * @nor:                pointer to a 'struct spi_nor'
3060  *
3061  * Return: 0 on success, -errno otherwise.
3062  */
spi_nor_quad_enable(struct spi_nor * nor)3063 static int spi_nor_quad_enable(struct spi_nor *nor)
3064 {
3065 	if (!nor->params->quad_enable)
3066 		return 0;
3067 
3068 	if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
3069 	      spi_nor_get_protocol_width(nor->write_proto) == 4))
3070 		return 0;
3071 
3072 	return nor->params->quad_enable(nor);
3073 }
3074 
3075 /**
3076  * spi_nor_set_4byte_addr_mode() - Set address mode.
3077  * @nor:                pointer to a 'struct spi_nor'.
3078  * @enable:             enable/disable 4 byte address mode.
3079  *
3080  * Return: 0 on success, -errno otherwise.
3081  */
spi_nor_set_4byte_addr_mode(struct spi_nor * nor,bool enable)3082 int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
3083 {
3084 	struct spi_nor_flash_parameter *params = nor->params;
3085 	int ret;
3086 
3087 	if (enable) {
3088 		/*
3089 		 * If the RESET# pin isn't hooked up properly, or the system
3090 		 * otherwise doesn't perform a reset command in the boot
3091 		 * sequence, it's impossible to 100% protect against unexpected
3092 		 * reboots (e.g., crashes). Warn the user (or hopefully, system
3093 		 * designer) that this is bad.
3094 		 */
3095 		WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
3096 			  "enabling reset hack; may not recover from unexpected reboots\n");
3097 	}
3098 
3099 	ret = params->set_4byte_addr_mode(nor, enable);
3100 	if (ret && ret != -EOPNOTSUPP)
3101 		return ret;
3102 
3103 	if (enable) {
3104 		params->addr_nbytes = 4;
3105 		params->addr_mode_nbytes = 4;
3106 	} else {
3107 		params->addr_nbytes = 3;
3108 		params->addr_mode_nbytes = 3;
3109 	}
3110 
3111 	return 0;
3112 }
3113 
spi_nor_init(struct spi_nor * nor)3114 static int spi_nor_init(struct spi_nor *nor)
3115 {
3116 	int err;
3117 
3118 	err = spi_nor_set_octal_dtr(nor, true);
3119 	if (err) {
3120 		dev_dbg(nor->dev, "octal mode not supported\n");
3121 		return err;
3122 	}
3123 
3124 	err = spi_nor_quad_enable(nor);
3125 	if (err) {
3126 		dev_dbg(nor->dev, "quad mode not supported\n");
3127 		return err;
3128 	}
3129 
3130 	/*
3131 	 * Some SPI NOR flashes are write protected by default after a power-on
3132 	 * reset cycle, in order to avoid inadvertent writes during power-up.
3133 	 * Backward compatibility imposes to unlock the entire flash memory
3134 	 * array at power-up by default. Depending on the kernel configuration
3135 	 * (1) do nothing, (2) always unlock the entire flash array or (3)
3136 	 * unlock the entire flash array only when the software write
3137 	 * protection bits are volatile. The latter is indicated by
3138 	 * SNOR_F_SWP_IS_VOLATILE.
3139 	 */
3140 	if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) ||
3141 	    (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) &&
3142 	     nor->flags & SNOR_F_SWP_IS_VOLATILE))
3143 		spi_nor_try_unlock_all(nor);
3144 
3145 	if (nor->addr_nbytes == 4 &&
3146 	    nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
3147 	    !(nor->flags & SNOR_F_4B_OPCODES))
3148 		return spi_nor_set_4byte_addr_mode(nor, true);
3149 
3150 	return 0;
3151 }
3152 
3153 /**
3154  * spi_nor_soft_reset() - Perform a software reset
3155  * @nor:	pointer to 'struct spi_nor'
3156  *
3157  * Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets
3158  * the device to its power-on-reset state. This is useful when the software has
3159  * made some changes to device (volatile) registers and needs to reset it before
3160  * shutting down, for example.
3161  *
3162  * Not every flash supports this sequence. The same set of opcodes might be used
3163  * for some other operation on a flash that does not support this. Support for
3164  * this sequence can be discovered via SFDP in the BFPT table.
3165  *
3166  * Return: 0 on success, -errno otherwise.
3167  */
spi_nor_soft_reset(struct spi_nor * nor)3168 static void spi_nor_soft_reset(struct spi_nor *nor)
3169 {
3170 	struct spi_mem_op op;
3171 	int ret;
3172 
3173 	op = (struct spi_mem_op)SPINOR_SRSTEN_OP;
3174 
3175 	spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
3176 
3177 	ret = spi_mem_exec_op(nor->spimem, &op);
3178 	if (ret) {
3179 		if (ret != -EOPNOTSUPP)
3180 			dev_warn(nor->dev, "Software reset failed: %d\n", ret);
3181 		return;
3182 	}
3183 
3184 	op = (struct spi_mem_op)SPINOR_SRST_OP;
3185 
3186 	spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
3187 
3188 	ret = spi_mem_exec_op(nor->spimem, &op);
3189 	if (ret) {
3190 		dev_warn(nor->dev, "Software reset failed: %d\n", ret);
3191 		return;
3192 	}
3193 
3194 	/*
3195 	 * Software Reset is not instant, and the delay varies from flash to
3196 	 * flash. Looking at a few flashes, most range somewhere below 100
3197 	 * microseconds. So, sleep for a range of 200-400 us.
3198 	 */
3199 	usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX);
3200 }
3201 
3202 /* mtd suspend handler */
spi_nor_suspend(struct mtd_info * mtd)3203 static int spi_nor_suspend(struct mtd_info *mtd)
3204 {
3205 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
3206 	int ret;
3207 
3208 	/* Disable octal DTR mode if we enabled it. */
3209 	ret = spi_nor_set_octal_dtr(nor, false);
3210 	if (ret)
3211 		dev_err(nor->dev, "suspend() failed\n");
3212 
3213 	return ret;
3214 }
3215 
3216 /* mtd resume handler */
spi_nor_resume(struct mtd_info * mtd)3217 static void spi_nor_resume(struct mtd_info *mtd)
3218 {
3219 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
3220 	struct device *dev = nor->dev;
3221 	int ret;
3222 
3223 	/* re-initialize the nor chip */
3224 	ret = spi_nor_init(nor);
3225 	if (ret)
3226 		dev_err(dev, "resume() failed\n");
3227 }
3228 
spi_nor_get_device(struct mtd_info * mtd)3229 static int spi_nor_get_device(struct mtd_info *mtd)
3230 {
3231 	struct mtd_info *master = mtd_get_master(mtd);
3232 	struct spi_nor *nor = mtd_to_spi_nor(master);
3233 	struct device *dev;
3234 
3235 	if (nor->spimem)
3236 		dev = nor->spimem->spi->controller->dev.parent;
3237 	else
3238 		dev = nor->dev;
3239 
3240 	if (!try_module_get(dev->driver->owner))
3241 		return -ENODEV;
3242 
3243 	return 0;
3244 }
3245 
spi_nor_put_device(struct mtd_info * mtd)3246 static void spi_nor_put_device(struct mtd_info *mtd)
3247 {
3248 	struct mtd_info *master = mtd_get_master(mtd);
3249 	struct spi_nor *nor = mtd_to_spi_nor(master);
3250 	struct device *dev;
3251 
3252 	if (nor->spimem)
3253 		dev = nor->spimem->spi->controller->dev.parent;
3254 	else
3255 		dev = nor->dev;
3256 
3257 	module_put(dev->driver->owner);
3258 }
3259 
spi_nor_restore(struct spi_nor * nor)3260 static void spi_nor_restore(struct spi_nor *nor)
3261 {
3262 	int ret;
3263 
3264 	/* restore the addressing mode */
3265 	if (nor->addr_nbytes == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
3266 	    nor->flags & SNOR_F_BROKEN_RESET) {
3267 		ret = spi_nor_set_4byte_addr_mode(nor, false);
3268 		if (ret)
3269 			/*
3270 			 * Do not stop the execution in the hope that the flash
3271 			 * will default to the 3-byte address mode after the
3272 			 * software reset.
3273 			 */
3274 			dev_err(nor->dev, "Failed to exit 4-byte address mode, err = %d\n", ret);
3275 	}
3276 
3277 	if (nor->flags & SNOR_F_SOFT_RESET)
3278 		spi_nor_soft_reset(nor);
3279 }
3280 
spi_nor_match_name(struct spi_nor * nor,const char * name)3281 static const struct flash_info *spi_nor_match_name(struct spi_nor *nor,
3282 						   const char *name)
3283 {
3284 	unsigned int i, j;
3285 
3286 	for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
3287 		for (j = 0; j < manufacturers[i]->nparts; j++) {
3288 			if (manufacturers[i]->parts[j].name &&
3289 			    !strcmp(name, manufacturers[i]->parts[j].name)) {
3290 				nor->manufacturer = manufacturers[i];
3291 				return &manufacturers[i]->parts[j];
3292 			}
3293 		}
3294 	}
3295 
3296 	return NULL;
3297 }
3298 
spi_nor_get_flash_info(struct spi_nor * nor,const char * name)3299 static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
3300 						       const char *name)
3301 {
3302 	const struct flash_info *info = NULL;
3303 
3304 	if (name)
3305 		info = spi_nor_match_name(nor, name);
3306 	/*
3307 	 * Auto-detect if chip name wasn't specified or not found, or the chip
3308 	 * has an ID. If the chip supposedly has an ID, we also do an
3309 	 * auto-detection to compare it later.
3310 	 */
3311 	if (!info || info->id) {
3312 		const struct flash_info *jinfo;
3313 
3314 		jinfo = spi_nor_detect(nor);
3315 		if (IS_ERR(jinfo))
3316 			return jinfo;
3317 
3318 		/*
3319 		 * If caller has specified name of flash model that can normally
3320 		 * be detected using JEDEC, let's verify it.
3321 		 */
3322 		if (info && jinfo != info)
3323 			dev_warn(nor->dev, "found %s, expected %s\n",
3324 				 jinfo->name, info->name);
3325 
3326 		/* If info was set before, JEDEC knows better. */
3327 		info = jinfo;
3328 	}
3329 
3330 	return info;
3331 }
3332 
3333 static u32
spi_nor_get_region_erasesize(const struct spi_nor_erase_region * region,const struct spi_nor_erase_type * erase_type)3334 spi_nor_get_region_erasesize(const struct spi_nor_erase_region *region,
3335 			     const struct spi_nor_erase_type *erase_type)
3336 {
3337 	int i;
3338 
3339 	if (region->overlaid)
3340 		return region->size;
3341 
3342 	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
3343 		if (region->erase_mask & BIT(i))
3344 			return erase_type[i].size;
3345 	}
3346 
3347 	return 0;
3348 }
3349 
spi_nor_set_mtd_eraseregions(struct spi_nor * nor)3350 static int spi_nor_set_mtd_eraseregions(struct spi_nor *nor)
3351 {
3352 	const struct spi_nor_erase_map *map = &nor->params->erase_map;
3353 	const struct spi_nor_erase_region *region = map->regions;
3354 	struct mtd_erase_region_info *mtd_region;
3355 	struct mtd_info *mtd = &nor->mtd;
3356 	u32 erasesize, i;
3357 
3358 	mtd_region = devm_kcalloc(nor->dev, map->n_regions, sizeof(*mtd_region),
3359 				  GFP_KERNEL);
3360 	if (!mtd_region)
3361 		return -ENOMEM;
3362 
3363 	for (i = 0; i < map->n_regions; i++) {
3364 		erasesize = spi_nor_get_region_erasesize(&region[i],
3365 							 map->erase_type);
3366 		if (!erasesize)
3367 			return -EINVAL;
3368 
3369 		mtd_region[i].erasesize = erasesize;
3370 		mtd_region[i].numblocks = div_u64(region[i].size, erasesize);
3371 		mtd_region[i].offset = region[i].offset;
3372 	}
3373 
3374 	mtd->numeraseregions = map->n_regions;
3375 	mtd->eraseregions = mtd_region;
3376 
3377 	return 0;
3378 }
3379 
spi_nor_set_mtd_info(struct spi_nor * nor)3380 static int spi_nor_set_mtd_info(struct spi_nor *nor)
3381 {
3382 	struct mtd_info *mtd = &nor->mtd;
3383 	struct device *dev = nor->dev;
3384 
3385 	spi_nor_set_mtd_locking_ops(nor);
3386 	spi_nor_set_mtd_otp_ops(nor);
3387 
3388 	mtd->dev.parent = dev;
3389 	if (!mtd->name)
3390 		mtd->name = dev_name(dev);
3391 	mtd->type = MTD_NORFLASH;
3392 	mtd->flags = MTD_CAP_NORFLASH;
3393 	/* Unset BIT_WRITEABLE to enable JFFS2 write buffer for ECC'd NOR */
3394 	if (nor->flags & SNOR_F_ECC)
3395 		mtd->flags &= ~MTD_BIT_WRITEABLE;
3396 	if (nor->info->flags & SPI_NOR_NO_ERASE)
3397 		mtd->flags |= MTD_NO_ERASE;
3398 	else
3399 		mtd->_erase = spi_nor_erase;
3400 	mtd->writesize = nor->params->writesize;
3401 	mtd->writebufsize = nor->params->page_size;
3402 	mtd->size = nor->params->size;
3403 	mtd->_read = spi_nor_read;
3404 	/* Might be already set by some SST flashes. */
3405 	if (!mtd->_write)
3406 		mtd->_write = spi_nor_write;
3407 	mtd->_suspend = spi_nor_suspend;
3408 	mtd->_resume = spi_nor_resume;
3409 	mtd->_get_device = spi_nor_get_device;
3410 	mtd->_put_device = spi_nor_put_device;
3411 
3412 	if (!spi_nor_has_uniform_erase(nor))
3413 		return spi_nor_set_mtd_eraseregions(nor);
3414 
3415 	return 0;
3416 }
3417 
spi_nor_hw_reset(struct spi_nor * nor)3418 static int spi_nor_hw_reset(struct spi_nor *nor)
3419 {
3420 	struct gpio_desc *reset;
3421 
3422 	reset = devm_gpiod_get_optional(nor->dev, "reset", GPIOD_OUT_LOW);
3423 	if (IS_ERR_OR_NULL(reset))
3424 		return PTR_ERR_OR_ZERO(reset);
3425 
3426 	/*
3427 	 * Experimental delay values by looking at different flash device
3428 	 * vendors datasheets.
3429 	 */
3430 	usleep_range(1, 5);
3431 	gpiod_set_value_cansleep(reset, 1);
3432 	usleep_range(100, 150);
3433 	gpiod_set_value_cansleep(reset, 0);
3434 	usleep_range(1000, 1200);
3435 
3436 	return 0;
3437 }
3438 
spi_nor_scan(struct spi_nor * nor,const char * name,const struct spi_nor_hwcaps * hwcaps)3439 int spi_nor_scan(struct spi_nor *nor, const char *name,
3440 		 const struct spi_nor_hwcaps *hwcaps)
3441 {
3442 	const struct flash_info *info;
3443 	struct device *dev = nor->dev;
3444 	int ret;
3445 
3446 	ret = spi_nor_check(nor);
3447 	if (ret)
3448 		return ret;
3449 
3450 	/* Reset SPI protocol for all commands. */
3451 	nor->reg_proto = SNOR_PROTO_1_1_1;
3452 	nor->read_proto = SNOR_PROTO_1_1_1;
3453 	nor->write_proto = SNOR_PROTO_1_1_1;
3454 
3455 	/*
3456 	 * We need the bounce buffer early to read/write registers when going
3457 	 * through the spi-mem layer (buffers have to be DMA-able).
3458 	 * For spi-mem drivers, we'll reallocate a new buffer if
3459 	 * nor->params->page_size turns out to be greater than PAGE_SIZE (which
3460 	 * shouldn't happen before long since NOR pages are usually less
3461 	 * than 1KB) after spi_nor_scan() returns.
3462 	 */
3463 	nor->bouncebuf_size = PAGE_SIZE;
3464 	nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
3465 				      GFP_KERNEL);
3466 	if (!nor->bouncebuf)
3467 		return -ENOMEM;
3468 
3469 	ret = spi_nor_hw_reset(nor);
3470 	if (ret)
3471 		return ret;
3472 
3473 	info = spi_nor_get_flash_info(nor, name);
3474 	if (IS_ERR(info))
3475 		return PTR_ERR(info);
3476 
3477 	nor->info = info;
3478 
3479 	mutex_init(&nor->lock);
3480 
3481 	/* Init flash parameters based on flash_info struct and SFDP */
3482 	ret = spi_nor_init_params(nor);
3483 	if (ret)
3484 		return ret;
3485 
3486 	if (spi_nor_use_parallel_locking(nor))
3487 		init_waitqueue_head(&nor->rww.wait);
3488 
3489 	/*
3490 	 * Configure the SPI memory:
3491 	 * - select op codes for (Fast) Read, Page Program and Sector Erase.
3492 	 * - set the number of dummy cycles (mode cycles + wait states).
3493 	 * - set the SPI protocols for register and memory accesses.
3494 	 * - set the number of address bytes.
3495 	 */
3496 	ret = spi_nor_setup(nor, hwcaps);
3497 	if (ret)
3498 		return ret;
3499 
3500 	/* Send all the required SPI flash commands to initialize device */
3501 	ret = spi_nor_init(nor);
3502 	if (ret)
3503 		return ret;
3504 
3505 	/* No mtd_info fields should be used up to this point. */
3506 	ret = spi_nor_set_mtd_info(nor);
3507 	if (ret)
3508 		return ret;
3509 
3510 	dev_dbg(dev, "Manufacturer and device ID: %*phN\n",
3511 		SPI_NOR_MAX_ID_LEN, nor->id);
3512 
3513 	return 0;
3514 }
3515 EXPORT_SYMBOL_GPL(spi_nor_scan);
3516 
spi_nor_create_read_dirmap(struct spi_nor * nor)3517 static int spi_nor_create_read_dirmap(struct spi_nor *nor)
3518 {
3519 	struct spi_mem_dirmap_info info = {
3520 		.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
3521 				      SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
3522 				      SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
3523 				      SPI_MEM_OP_DATA_IN(0, NULL, 0)),
3524 		.offset = 0,
3525 		.length = nor->params->size,
3526 	};
3527 	struct spi_mem_op *op = &info.op_tmpl;
3528 
3529 	spi_nor_spimem_setup_op(nor, op, nor->read_proto);
3530 
3531 	/* convert the dummy cycles to the number of bytes */
3532 	op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
3533 	if (spi_nor_protocol_is_dtr(nor->read_proto))
3534 		op->dummy.nbytes *= 2;
3535 
3536 	/*
3537 	 * Since spi_nor_spimem_setup_op() only sets buswidth when the number
3538 	 * of data bytes is non-zero, the data buswidth won't be set here. So,
3539 	 * do it explicitly.
3540 	 */
3541 	op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
3542 
3543 	nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3544 						       &info);
3545 	return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
3546 }
3547 
spi_nor_create_write_dirmap(struct spi_nor * nor)3548 static int spi_nor_create_write_dirmap(struct spi_nor *nor)
3549 {
3550 	struct spi_mem_dirmap_info info = {
3551 		.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
3552 				      SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
3553 				      SPI_MEM_OP_NO_DUMMY,
3554 				      SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
3555 		.offset = 0,
3556 		.length = nor->params->size,
3557 	};
3558 	struct spi_mem_op *op = &info.op_tmpl;
3559 
3560 	if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
3561 		op->addr.nbytes = 0;
3562 
3563 	spi_nor_spimem_setup_op(nor, op, nor->write_proto);
3564 
3565 	/*
3566 	 * Since spi_nor_spimem_setup_op() only sets buswidth when the number
3567 	 * of data bytes is non-zero, the data buswidth won't be set here. So,
3568 	 * do it explicitly.
3569 	 */
3570 	op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
3571 
3572 	nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3573 						       &info);
3574 	return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
3575 }
3576 
spi_nor_probe(struct spi_mem * spimem)3577 static int spi_nor_probe(struct spi_mem *spimem)
3578 {
3579 	struct spi_device *spi = spimem->spi;
3580 	struct device *dev = &spi->dev;
3581 	struct flash_platform_data *data = dev_get_platdata(dev);
3582 	struct spi_nor *nor;
3583 	/*
3584 	 * Enable all caps by default. The core will mask them after
3585 	 * checking what's really supported using spi_mem_supports_op().
3586 	 */
3587 	const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
3588 	char *flash_name;
3589 	int ret;
3590 
3591 	ret = devm_regulator_get_enable(dev, "vcc");
3592 	if (ret)
3593 		return ret;
3594 
3595 	nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL);
3596 	if (!nor)
3597 		return -ENOMEM;
3598 
3599 	nor->spimem = spimem;
3600 	nor->dev = dev;
3601 	spi_nor_set_flash_node(nor, dev->of_node);
3602 
3603 	spi_mem_set_drvdata(spimem, nor);
3604 
3605 	if (data && data->name)
3606 		nor->mtd.name = data->name;
3607 
3608 	if (!nor->mtd.name)
3609 		nor->mtd.name = spi_mem_get_name(spimem);
3610 
3611 	/*
3612 	 * For some (historical?) reason many platforms provide two different
3613 	 * names in flash_platform_data: "name" and "type". Quite often name is
3614 	 * set to "m25p80" and then "type" provides a real chip name.
3615 	 * If that's the case, respect "type" and ignore a "name".
3616 	 */
3617 	if (data && data->type)
3618 		flash_name = data->type;
3619 	else if (!strcmp(spi->modalias, "spi-nor"))
3620 		flash_name = NULL; /* auto-detect */
3621 	else
3622 		flash_name = spi->modalias;
3623 
3624 	ret = spi_nor_scan(nor, flash_name, &hwcaps);
3625 	if (ret)
3626 		return ret;
3627 
3628 	spi_nor_debugfs_register(nor);
3629 
3630 	/*
3631 	 * None of the existing parts have > 512B pages, but let's play safe
3632 	 * and add this logic so that if anyone ever adds support for such
3633 	 * a NOR we don't end up with buffer overflows.
3634 	 */
3635 	if (nor->params->page_size > PAGE_SIZE) {
3636 		nor->bouncebuf_size = nor->params->page_size;
3637 		devm_kfree(dev, nor->bouncebuf);
3638 		nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
3639 					      GFP_KERNEL);
3640 		if (!nor->bouncebuf)
3641 			return -ENOMEM;
3642 	}
3643 
3644 	ret = spi_nor_create_read_dirmap(nor);
3645 	if (ret)
3646 		return ret;
3647 
3648 	ret = spi_nor_create_write_dirmap(nor);
3649 	if (ret)
3650 		return ret;
3651 
3652 	return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
3653 				   data ? data->nr_parts : 0);
3654 }
3655 
spi_nor_remove(struct spi_mem * spimem)3656 static int spi_nor_remove(struct spi_mem *spimem)
3657 {
3658 	struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3659 
3660 	spi_nor_restore(nor);
3661 
3662 	/* Clean up MTD stuff. */
3663 	return mtd_device_unregister(&nor->mtd);
3664 }
3665 
spi_nor_shutdown(struct spi_mem * spimem)3666 static void spi_nor_shutdown(struct spi_mem *spimem)
3667 {
3668 	struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3669 
3670 	spi_nor_restore(nor);
3671 }
3672 
3673 /*
3674  * Do NOT add to this array without reading the following:
3675  *
3676  * Historically, many flash devices are bound to this driver by their name. But
3677  * since most of these flash are compatible to some extent, and their
3678  * differences can often be differentiated by the JEDEC read-ID command, we
3679  * encourage new users to add support to the spi-nor library, and simply bind
3680  * against a generic string here (e.g., "jedec,spi-nor").
3681  *
3682  * Many flash names are kept here in this list to keep them available
3683  * as module aliases for existing platforms.
3684  */
3685 static const struct spi_device_id spi_nor_dev_ids[] = {
3686 	/*
3687 	 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
3688 	 * hack around the fact that the SPI core does not provide uevent
3689 	 * matching for .of_match_table
3690 	 */
3691 	{"spi-nor"},
3692 
3693 	/*
3694 	 * Entries not used in DTs that should be safe to drop after replacing
3695 	 * them with "spi-nor" in platform data.
3696 	 */
3697 	{"s25sl064a"},	{"w25x16"},	{"m25p10"},	{"m25px64"},
3698 
3699 	/*
3700 	 * Entries that were used in DTs without "jedec,spi-nor" fallback and
3701 	 * should be kept for backward compatibility.
3702 	 */
3703 	{"at25df321a"},	{"at25df641"},	{"at26df081a"},
3704 	{"mx25l4005a"},	{"mx25l1606e"},	{"mx25l6405d"},	{"mx25l12805d"},
3705 	{"mx25l25635e"},{"mx66l51235l"},
3706 	{"n25q064"},	{"n25q128a11"},	{"n25q128a13"},	{"n25q512a"},
3707 	{"s25fl256s1"},	{"s25fl512s"},	{"s25sl12801"},	{"s25fl008k"},
3708 	{"s25fl064k"},
3709 	{"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
3710 	{"m25p40"},	{"m25p80"},	{"m25p16"},	{"m25p32"},
3711 	{"m25p64"},	{"m25p128"},
3712 	{"w25x80"},	{"w25x32"},	{"w25q32"},	{"w25q32dw"},
3713 	{"w25q80bl"},	{"w25q128"},	{"w25q256"},
3714 
3715 	/* Flashes that can't be detected using JEDEC */
3716 	{"m25p05-nonjedec"},	{"m25p10-nonjedec"},	{"m25p20-nonjedec"},
3717 	{"m25p40-nonjedec"},	{"m25p80-nonjedec"},	{"m25p16-nonjedec"},
3718 	{"m25p32-nonjedec"},	{"m25p64-nonjedec"},	{"m25p128-nonjedec"},
3719 
3720 	/* Everspin MRAMs (non-JEDEC) */
3721 	{ "mr25h128" }, /* 128 Kib, 40 MHz */
3722 	{ "mr25h256" }, /* 256 Kib, 40 MHz */
3723 	{ "mr25h10" },  /*   1 Mib, 40 MHz */
3724 	{ "mr25h40" },  /*   4 Mib, 40 MHz */
3725 
3726 	{ },
3727 };
3728 MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
3729 
3730 static const struct of_device_id spi_nor_of_table[] = {
3731 	/*
3732 	 * Generic compatibility for SPI NOR that can be identified by the
3733 	 * JEDEC READ ID opcode (0x9F). Use this, if possible.
3734 	 */
3735 	{ .compatible = "jedec,spi-nor" },
3736 	{ /* sentinel */ },
3737 };
3738 MODULE_DEVICE_TABLE(of, spi_nor_of_table);
3739 
3740 /*
3741  * REVISIT: many of these chips have deep power-down modes, which
3742  * should clearly be entered on suspend() to minimize power use.
3743  * And also when they're otherwise idle...
3744  */
3745 static struct spi_mem_driver spi_nor_driver = {
3746 	.spidrv = {
3747 		.driver = {
3748 			.name = "spi-nor",
3749 			.of_match_table = spi_nor_of_table,
3750 			.dev_groups = spi_nor_sysfs_groups,
3751 		},
3752 		.id_table = spi_nor_dev_ids,
3753 	},
3754 	.probe = spi_nor_probe,
3755 	.remove = spi_nor_remove,
3756 	.shutdown = spi_nor_shutdown,
3757 };
3758 
spi_nor_module_init(void)3759 static int __init spi_nor_module_init(void)
3760 {
3761 	return spi_mem_driver_register(&spi_nor_driver);
3762 }
3763 module_init(spi_nor_module_init);
3764 
spi_nor_module_exit(void)3765 static void __exit spi_nor_module_exit(void)
3766 {
3767 	spi_mem_driver_unregister(&spi_nor_driver);
3768 	spi_nor_debugfs_shutdown();
3769 }
3770 module_exit(spi_nor_module_exit);
3771 
3772 MODULE_LICENSE("GPL v2");
3773 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
3774 MODULE_AUTHOR("Mike Lavender");
3775 MODULE_DESCRIPTION("framework for SPI NOR");
3776