xref: /linux/drivers/mtd/spi-nor/core.c (revision 68a052239fc4b351e961f698b824f7654a346091)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
4  * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
5  *
6  * Copyright (C) 2005, Intec Automation Inc.
7  * Copyright (C) 2014, Freescale Semiconductor, Inc.
8  */
9 
10 #include <linux/cleanup.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/math64.h>
16 #include <linux/module.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/mtd/spi-nor.h>
19 #include <linux/mutex.h>
20 #include <linux/of.h>
21 #include <linux/regulator/consumer.h>
22 #include <linux/sched/task_stack.h>
23 #include <linux/sizes.h>
24 #include <linux/slab.h>
25 #include <linux/spi/flash.h>
26 
27 #include "core.h"
28 
29 /* Define max times to check status register before we give up. */
30 
31 /*
32  * For everything but full-chip erase; probably could be much smaller, but kept
33  * around for safety for now
34  */
35 #define DEFAULT_READY_WAIT_JIFFIES		(40UL * HZ)
36 
37 /*
38  * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
39  * for larger flash
40  */
41 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES	(40UL * HZ)
42 
43 #define SPI_NOR_MAX_ADDR_NBYTES	4
44 
45 #define SPI_NOR_SRST_SLEEP_MIN 200
46 #define SPI_NOR_SRST_SLEEP_MAX 400
47 
48 /**
49  * spi_nor_get_cmd_ext() - Get the command opcode extension based on the
50  *			   extension type.
51  * @nor:		pointer to a 'struct spi_nor'
52  * @op:			pointer to the 'struct spi_mem_op' whose properties
53  *			need to be initialized.
54  *
55  * Right now, only "repeat" and "invert" are supported.
56  *
57  * Return: The opcode extension.
58  */
59 static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor,
60 			      const struct spi_mem_op *op)
61 {
62 	switch (nor->cmd_ext_type) {
63 	case SPI_NOR_EXT_INVERT:
64 		return ~op->cmd.opcode;
65 
66 	case SPI_NOR_EXT_REPEAT:
67 		return op->cmd.opcode;
68 
69 	default:
70 		dev_err(nor->dev, "Unknown command extension type\n");
71 		return 0;
72 	}
73 }
74 
75 /**
76  * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op.
77  * @nor:		pointer to a 'struct spi_nor'
78  * @op:			pointer to the 'struct spi_mem_op' whose properties
79  *			need to be initialized.
80  * @proto:		the protocol from which the properties need to be set.
81  */
82 void spi_nor_spimem_setup_op(const struct spi_nor *nor,
83 			     struct spi_mem_op *op,
84 			     const enum spi_nor_protocol proto)
85 {
86 	u8 ext;
87 
88 	op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto);
89 
90 	if (op->addr.nbytes)
91 		op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
92 
93 	if (op->dummy.nbytes)
94 		op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
95 
96 	if (op->data.nbytes)
97 		op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
98 
99 	if (spi_nor_protocol_is_dtr(proto)) {
100 		/*
101 		 * SPIMEM supports mixed DTR modes, but right now we can only
102 		 * have all phases either DTR or STR. IOW, SPIMEM can have
103 		 * something like 4S-4D-4D, but SPI NOR can't. So, set all 4
104 		 * phases to either DTR or STR.
105 		 */
106 		op->cmd.dtr = true;
107 		op->addr.dtr = true;
108 		op->dummy.dtr = true;
109 		op->data.dtr = true;
110 
111 		/* 2 bytes per clock cycle in DTR mode. */
112 		op->dummy.nbytes *= 2;
113 
114 		ext = spi_nor_get_cmd_ext(nor, op);
115 		op->cmd.opcode = (op->cmd.opcode << 8) | ext;
116 		op->cmd.nbytes = 2;
117 	}
118 
119 	if (proto == SNOR_PROTO_8_8_8_DTR && nor->flags & SNOR_F_SWAP16)
120 		op->data.swap16 = true;
121 }
122 
123 /**
124  * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
125  *                           transfer
126  * @nor:        pointer to 'struct spi_nor'
127  * @op:         pointer to 'struct spi_mem_op' template for transfer
128  *
129  * If we have to use the bounce buffer, the data field in @op will be updated.
130  *
131  * Return: true if the bounce buffer is needed, false if not
132  */
133 static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
134 {
135 	/* op->data.buf.in occupies the same memory as op->data.buf.out */
136 	if (object_is_on_stack(op->data.buf.in) ||
137 	    !virt_addr_valid(op->data.buf.in)) {
138 		if (op->data.nbytes > nor->bouncebuf_size)
139 			op->data.nbytes = nor->bouncebuf_size;
140 		op->data.buf.in = nor->bouncebuf;
141 		return true;
142 	}
143 
144 	return false;
145 }
146 
147 /**
148  * spi_nor_spimem_exec_op() - execute a memory operation
149  * @nor:        pointer to 'struct spi_nor'
150  * @op:         pointer to 'struct spi_mem_op' template for transfer
151  *
152  * Return: 0 on success, -error otherwise.
153  */
154 static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
155 {
156 	int error;
157 
158 	error = spi_mem_adjust_op_size(nor->spimem, op);
159 	if (error)
160 		return error;
161 
162 	return spi_mem_exec_op(nor->spimem, op);
163 }
164 
165 int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
166 				    u8 *buf, size_t len)
167 {
168 	if (spi_nor_protocol_is_dtr(nor->reg_proto))
169 		return -EOPNOTSUPP;
170 
171 	return nor->controller_ops->read_reg(nor, opcode, buf, len);
172 }
173 
174 int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
175 				     const u8 *buf, size_t len)
176 {
177 	if (spi_nor_protocol_is_dtr(nor->reg_proto))
178 		return -EOPNOTSUPP;
179 
180 	return nor->controller_ops->write_reg(nor, opcode, buf, len);
181 }
182 
183 static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
184 {
185 	if (spi_nor_protocol_is_dtr(nor->reg_proto))
186 		return -EOPNOTSUPP;
187 
188 	return nor->controller_ops->erase(nor, offs);
189 }
190 
191 /**
192  * spi_nor_spimem_read_data() - read data from flash's memory region via
193  *                              spi-mem
194  * @nor:        pointer to 'struct spi_nor'
195  * @from:       offset to read from
196  * @len:        number of bytes to read
197  * @buf:        pointer to dst buffer
198  *
199  * Return: number of bytes read successfully, -errno otherwise
200  */
201 static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
202 					size_t len, u8 *buf)
203 {
204 	struct spi_mem_op op =
205 		SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
206 			   SPI_MEM_OP_ADDR(nor->addr_nbytes, from, 0),
207 			   SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
208 			   SPI_MEM_OP_DATA_IN(len, buf, 0));
209 	bool usebouncebuf;
210 	ssize_t nbytes;
211 	int error;
212 
213 	spi_nor_spimem_setup_op(nor, &op, nor->read_proto);
214 
215 	/* convert the dummy cycles to the number of bytes */
216 	op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
217 	if (spi_nor_protocol_is_dtr(nor->read_proto))
218 		op.dummy.nbytes *= 2;
219 
220 	usebouncebuf = spi_nor_spimem_bounce(nor, &op);
221 
222 	if (nor->dirmap.rdesc) {
223 		nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
224 					     op.data.nbytes, op.data.buf.in);
225 	} else {
226 		error = spi_nor_spimem_exec_op(nor, &op);
227 		if (error)
228 			return error;
229 		nbytes = op.data.nbytes;
230 	}
231 
232 	if (usebouncebuf && nbytes > 0)
233 		memcpy(buf, op.data.buf.in, nbytes);
234 
235 	return nbytes;
236 }
237 
238 /**
239  * spi_nor_read_data() - read data from flash memory
240  * @nor:        pointer to 'struct spi_nor'
241  * @from:       offset to read from
242  * @len:        number of bytes to read
243  * @buf:        pointer to dst buffer
244  *
245  * Return: number of bytes read successfully, -errno otherwise
246  */
247 ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
248 {
249 	if (nor->spimem)
250 		return spi_nor_spimem_read_data(nor, from, len, buf);
251 
252 	return nor->controller_ops->read(nor, from, len, buf);
253 }
254 
255 /**
256  * spi_nor_spimem_write_data() - write data to flash memory via
257  *                               spi-mem
258  * @nor:        pointer to 'struct spi_nor'
259  * @to:         offset to write to
260  * @len:        number of bytes to write
261  * @buf:        pointer to src buffer
262  *
263  * Return: number of bytes written successfully, -errno otherwise
264  */
265 static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
266 					 size_t len, const u8 *buf)
267 {
268 	struct spi_mem_op op =
269 		SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
270 			   SPI_MEM_OP_ADDR(nor->addr_nbytes, to, 0),
271 			   SPI_MEM_OP_NO_DUMMY,
272 			   SPI_MEM_OP_DATA_OUT(len, buf, 0));
273 	ssize_t nbytes;
274 	int error;
275 
276 	if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
277 		op.addr.nbytes = 0;
278 
279 	spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
280 
281 	if (spi_nor_spimem_bounce(nor, &op))
282 		memcpy(nor->bouncebuf, buf, op.data.nbytes);
283 
284 	if (nor->dirmap.wdesc) {
285 		nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
286 					      op.data.nbytes, op.data.buf.out);
287 	} else {
288 		error = spi_nor_spimem_exec_op(nor, &op);
289 		if (error)
290 			return error;
291 		nbytes = op.data.nbytes;
292 	}
293 
294 	return nbytes;
295 }
296 
297 /**
298  * spi_nor_write_data() - write data to flash memory
299  * @nor:        pointer to 'struct spi_nor'
300  * @to:         offset to write to
301  * @len:        number of bytes to write
302  * @buf:        pointer to src buffer
303  *
304  * Return: number of bytes written successfully, -errno otherwise
305  */
306 ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
307 			   const u8 *buf)
308 {
309 	if (nor->spimem)
310 		return spi_nor_spimem_write_data(nor, to, len, buf);
311 
312 	return nor->controller_ops->write(nor, to, len, buf);
313 }
314 
315 /**
316  * spi_nor_read_any_reg() - read any register from flash memory, nonvolatile or
317  * volatile.
318  * @nor:        pointer to 'struct spi_nor'.
319  * @op:		SPI memory operation. op->data.buf must be DMA-able.
320  * @proto:	SPI protocol to use for the register operation.
321  *
322  * Return: zero on success, -errno otherwise
323  */
324 int spi_nor_read_any_reg(struct spi_nor *nor, struct spi_mem_op *op,
325 			 enum spi_nor_protocol proto)
326 {
327 	if (!nor->spimem)
328 		return -EOPNOTSUPP;
329 
330 	spi_nor_spimem_setup_op(nor, op, proto);
331 	return spi_nor_spimem_exec_op(nor, op);
332 }
333 
334 /**
335  * spi_nor_write_any_volatile_reg() - write any volatile register to flash
336  * memory.
337  * @nor:        pointer to 'struct spi_nor'
338  * @op:		SPI memory operation. op->data.buf must be DMA-able.
339  * @proto:	SPI protocol to use for the register operation.
340  *
341  * Writing volatile registers are instant according to some manufacturers
342  * (Cypress, Micron) and do not need any status polling.
343  *
344  * Return: zero on success, -errno otherwise
345  */
346 int spi_nor_write_any_volatile_reg(struct spi_nor *nor, struct spi_mem_op *op,
347 				   enum spi_nor_protocol proto)
348 {
349 	int ret;
350 
351 	if (!nor->spimem)
352 		return -EOPNOTSUPP;
353 
354 	ret = spi_nor_write_enable(nor);
355 	if (ret)
356 		return ret;
357 	spi_nor_spimem_setup_op(nor, op, proto);
358 	return spi_nor_spimem_exec_op(nor, op);
359 }
360 
361 /**
362  * spi_nor_write_enable() - Set write enable latch with Write Enable command.
363  * @nor:	pointer to 'struct spi_nor'.
364  *
365  * Return: 0 on success, -errno otherwise.
366  */
367 int spi_nor_write_enable(struct spi_nor *nor)
368 {
369 	int ret;
370 
371 	if (nor->spimem) {
372 		struct spi_mem_op op = SPI_NOR_WREN_OP;
373 
374 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
375 
376 		ret = spi_mem_exec_op(nor->spimem, &op);
377 	} else {
378 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN,
379 						       NULL, 0);
380 	}
381 
382 	if (ret)
383 		dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
384 
385 	return ret;
386 }
387 
388 /**
389  * spi_nor_write_disable() - Send Write Disable instruction to the chip.
390  * @nor:	pointer to 'struct spi_nor'.
391  *
392  * Return: 0 on success, -errno otherwise.
393  */
394 int spi_nor_write_disable(struct spi_nor *nor)
395 {
396 	int ret;
397 
398 	if (nor->spimem) {
399 		struct spi_mem_op op = SPI_NOR_WRDI_OP;
400 
401 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
402 
403 		ret = spi_mem_exec_op(nor->spimem, &op);
404 	} else {
405 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI,
406 						       NULL, 0);
407 	}
408 
409 	if (ret)
410 		dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
411 
412 	return ret;
413 }
414 
415 /**
416  * spi_nor_read_id() - Read the JEDEC ID.
417  * @nor:	pointer to 'struct spi_nor'.
418  * @naddr:	number of address bytes to send. Can be zero if the operation
419  *		does not need to send an address.
420  * @ndummy:	number of dummy bytes to send after an opcode or address. Can
421  *		be zero if the operation does not require dummy bytes.
422  * @id:		pointer to a DMA-able buffer where the value of the JEDEC ID
423  *		will be written.
424  * @proto:	the SPI protocol for register operation.
425  *
426  * Return: 0 on success, -errno otherwise.
427  */
428 int spi_nor_read_id(struct spi_nor *nor, u8 naddr, u8 ndummy, u8 *id,
429 		    enum spi_nor_protocol proto)
430 {
431 	int ret;
432 
433 	if (nor->spimem) {
434 		struct spi_mem_op op =
435 			SPI_NOR_READID_OP(naddr, ndummy, id, SPI_NOR_MAX_ID_LEN);
436 
437 		spi_nor_spimem_setup_op(nor, &op, proto);
438 		ret = spi_mem_exec_op(nor->spimem, &op);
439 	} else {
440 		ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
441 						    SPI_NOR_MAX_ID_LEN);
442 	}
443 	return ret;
444 }
445 
446 /**
447  * spi_nor_read_sr() - Read the Status Register.
448  * @nor:	pointer to 'struct spi_nor'.
449  * @sr:		pointer to a DMA-able buffer where the value of the
450  *              Status Register will be written. Should be at least 2 bytes.
451  *
452  * Return: 0 on success, -errno otherwise.
453  */
454 int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
455 {
456 	int ret;
457 
458 	if (nor->spimem) {
459 		struct spi_mem_op op = SPI_NOR_RDSR_OP(sr);
460 
461 		if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
462 			op.addr.nbytes = nor->params->rdsr_addr_nbytes;
463 			op.dummy.nbytes = nor->params->rdsr_dummy;
464 			/*
465 			 * We don't want to read only one byte in DTR mode. So,
466 			 * read 2 and then discard the second byte.
467 			 */
468 			op.data.nbytes = 2;
469 		}
470 
471 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
472 
473 		ret = spi_mem_exec_op(nor->spimem, &op);
474 	} else {
475 		ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr,
476 						      1);
477 	}
478 
479 	if (ret)
480 		dev_dbg(nor->dev, "error %d reading SR\n", ret);
481 
482 	return ret;
483 }
484 
485 /**
486  * spi_nor_read_cr() - Read the Configuration Register using the
487  * SPINOR_OP_RDCR (35h) command.
488  * @nor:	pointer to 'struct spi_nor'
489  * @cr:		pointer to a DMA-able buffer where the value of the
490  *              Configuration Register will be written.
491  *
492  * Return: 0 on success, -errno otherwise.
493  */
494 int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
495 {
496 	int ret;
497 
498 	if (nor->spimem) {
499 		struct spi_mem_op op = SPI_NOR_RDCR_OP(cr);
500 
501 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
502 
503 		ret = spi_mem_exec_op(nor->spimem, &op);
504 	} else {
505 		ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr,
506 						      1);
507 	}
508 
509 	if (ret)
510 		dev_dbg(nor->dev, "error %d reading CR\n", ret);
511 
512 	return ret;
513 }
514 
515 /**
516  * spi_nor_set_4byte_addr_mode_en4b_ex4b() - Enter/Exit 4-byte address mode
517  *			using SPINOR_OP_EN4B/SPINOR_OP_EX4B. Typically used by
518  *			Winbond and Macronix.
519  * @nor:	pointer to 'struct spi_nor'.
520  * @enable:	true to enter the 4-byte address mode, false to exit the 4-byte
521  *		address mode.
522  *
523  * Return: 0 on success, -errno otherwise.
524  */
525 int spi_nor_set_4byte_addr_mode_en4b_ex4b(struct spi_nor *nor, bool enable)
526 {
527 	int ret;
528 
529 	if (nor->spimem) {
530 		struct spi_mem_op op = SPI_NOR_EN4B_EX4B_OP(enable);
531 
532 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
533 
534 		ret = spi_mem_exec_op(nor->spimem, &op);
535 	} else {
536 		ret = spi_nor_controller_ops_write_reg(nor,
537 						       enable ? SPINOR_OP_EN4B :
538 								SPINOR_OP_EX4B,
539 						       NULL, 0);
540 	}
541 
542 	if (ret)
543 		dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
544 
545 	return ret;
546 }
547 
548 /**
549  * spi_nor_set_4byte_addr_mode_wren_en4b_ex4b() - Set 4-byte address mode using
550  * SPINOR_OP_WREN followed by SPINOR_OP_EN4B or SPINOR_OP_EX4B. Typically used
551  * by ST and Micron flashes.
552  * @nor:	pointer to 'struct spi_nor'.
553  * @enable:	true to enter the 4-byte address mode, false to exit the 4-byte
554  *		address mode.
555  *
556  * Return: 0 on success, -errno otherwise.
557  */
558 int spi_nor_set_4byte_addr_mode_wren_en4b_ex4b(struct spi_nor *nor, bool enable)
559 {
560 	int ret;
561 
562 	ret = spi_nor_write_enable(nor);
563 	if (ret)
564 		return ret;
565 
566 	ret = spi_nor_set_4byte_addr_mode_en4b_ex4b(nor, enable);
567 	if (ret)
568 		return ret;
569 
570 	return spi_nor_write_disable(nor);
571 }
572 
573 /**
574  * spi_nor_set_4byte_addr_mode_brwr() - Set 4-byte address mode using
575  *			SPINOR_OP_BRWR. Typically used by Spansion flashes.
576  * @nor:	pointer to 'struct spi_nor'.
577  * @enable:	true to enter the 4-byte address mode, false to exit the 4-byte
578  *		address mode.
579  *
580  * 8-bit volatile bank register used to define A[30:A24] bits. MSB (bit[7]) is
581  * used to enable/disable 4-byte address mode. When MSB is set to ‘1’, 4-byte
582  * address mode is active and A[30:24] bits are don’t care. Write instruction is
583  * SPINOR_OP_BRWR(17h) with 1 byte of data.
584  *
585  * Return: 0 on success, -errno otherwise.
586  */
587 int spi_nor_set_4byte_addr_mode_brwr(struct spi_nor *nor, bool enable)
588 {
589 	int ret;
590 
591 	nor->bouncebuf[0] = enable << 7;
592 
593 	if (nor->spimem) {
594 		struct spi_mem_op op = SPI_NOR_BRWR_OP(nor->bouncebuf);
595 
596 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
597 
598 		ret = spi_mem_exec_op(nor->spimem, &op);
599 	} else {
600 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR,
601 						       nor->bouncebuf, 1);
602 	}
603 
604 	if (ret)
605 		dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
606 
607 	return ret;
608 }
609 
610 /**
611  * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
612  * for new commands.
613  * @nor:	pointer to 'struct spi_nor'.
614  *
615  * Return: 1 if ready, 0 if not ready, -errno on errors.
616  */
617 int spi_nor_sr_ready(struct spi_nor *nor)
618 {
619 	int ret;
620 
621 	ret = spi_nor_read_sr(nor, nor->bouncebuf);
622 	if (ret)
623 		return ret;
624 
625 	return !(nor->bouncebuf[0] & SR_WIP);
626 }
627 
628 /**
629  * spi_nor_use_parallel_locking() - Checks if RWW locking scheme shall be used
630  * @nor:	pointer to 'struct spi_nor'.
631  *
632  * Return: true if parallel locking is enabled, false otherwise.
633  */
634 static bool spi_nor_use_parallel_locking(struct spi_nor *nor)
635 {
636 	return nor->flags & SNOR_F_RWW;
637 }
638 
639 /* Locking helpers for status read operations */
640 static int spi_nor_rww_start_rdst(struct spi_nor *nor)
641 {
642 	struct spi_nor_rww *rww = &nor->rww;
643 
644 	guard(mutex)(&nor->lock);
645 
646 	if (rww->ongoing_io || rww->ongoing_rd)
647 		return -EAGAIN;
648 
649 	rww->ongoing_io = true;
650 	rww->ongoing_rd = true;
651 
652 	return 0;
653 }
654 
655 static void spi_nor_rww_end_rdst(struct spi_nor *nor)
656 {
657 	struct spi_nor_rww *rww = &nor->rww;
658 
659 	guard(mutex)(&nor->lock);
660 
661 	rww->ongoing_io = false;
662 	rww->ongoing_rd = false;
663 }
664 
665 static int spi_nor_lock_rdst(struct spi_nor *nor)
666 {
667 	if (spi_nor_use_parallel_locking(nor))
668 		return spi_nor_rww_start_rdst(nor);
669 
670 	return 0;
671 }
672 
673 static void spi_nor_unlock_rdst(struct spi_nor *nor)
674 {
675 	if (spi_nor_use_parallel_locking(nor)) {
676 		spi_nor_rww_end_rdst(nor);
677 		wake_up(&nor->rww.wait);
678 	}
679 }
680 
681 /**
682  * spi_nor_ready() - Query the flash to see if it is ready for new commands.
683  * @nor:	pointer to 'struct spi_nor'.
684  *
685  * Return: 1 if ready, 0 if not ready, -errno on errors.
686  */
687 static int spi_nor_ready(struct spi_nor *nor)
688 {
689 	int ret;
690 
691 	ret = spi_nor_lock_rdst(nor);
692 	if (ret)
693 		return 0;
694 
695 	/* Flashes might override the standard routine. */
696 	if (nor->params->ready)
697 		ret = nor->params->ready(nor);
698 	else
699 		ret = spi_nor_sr_ready(nor);
700 
701 	spi_nor_unlock_rdst(nor);
702 
703 	return ret;
704 }
705 
706 /**
707  * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
708  * Status Register until ready, or timeout occurs.
709  * @nor:		pointer to "struct spi_nor".
710  * @timeout_jiffies:	jiffies to wait until timeout.
711  *
712  * Return: 0 on success, -errno otherwise.
713  */
714 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
715 						unsigned long timeout_jiffies)
716 {
717 	unsigned long deadline;
718 	int timeout = 0, ret;
719 
720 	deadline = jiffies + timeout_jiffies;
721 
722 	while (!timeout) {
723 		if (time_after_eq(jiffies, deadline))
724 			timeout = 1;
725 
726 		ret = spi_nor_ready(nor);
727 		if (ret < 0)
728 			return ret;
729 		if (ret)
730 			return 0;
731 
732 		cond_resched();
733 	}
734 
735 	dev_dbg(nor->dev, "flash operation timed out\n");
736 
737 	return -ETIMEDOUT;
738 }
739 
740 /**
741  * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
742  * flash to be ready, or timeout occurs.
743  * @nor:	pointer to "struct spi_nor".
744  *
745  * Return: 0 on success, -errno otherwise.
746  */
747 int spi_nor_wait_till_ready(struct spi_nor *nor)
748 {
749 	return spi_nor_wait_till_ready_with_timeout(nor,
750 						    DEFAULT_READY_WAIT_JIFFIES);
751 }
752 
753 /**
754  * spi_nor_global_block_unlock() - Unlock Global Block Protection.
755  * @nor:	pointer to 'struct spi_nor'.
756  *
757  * Return: 0 on success, -errno otherwise.
758  */
759 int spi_nor_global_block_unlock(struct spi_nor *nor)
760 {
761 	int ret;
762 
763 	ret = spi_nor_write_enable(nor);
764 	if (ret)
765 		return ret;
766 
767 	if (nor->spimem) {
768 		struct spi_mem_op op = SPI_NOR_GBULK_OP;
769 
770 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
771 
772 		ret = spi_mem_exec_op(nor->spimem, &op);
773 	} else {
774 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK,
775 						       NULL, 0);
776 	}
777 
778 	if (ret) {
779 		dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret);
780 		return ret;
781 	}
782 
783 	return spi_nor_wait_till_ready(nor);
784 }
785 
786 /**
787  * spi_nor_write_sr() - Write the Status Register.
788  * @nor:	pointer to 'struct spi_nor'.
789  * @sr:		pointer to DMA-able buffer to write to the Status Register.
790  * @len:	number of bytes to write to the Status Register.
791  *
792  * Return: 0 on success, -errno otherwise.
793  */
794 int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
795 {
796 	int ret;
797 
798 	ret = spi_nor_write_enable(nor);
799 	if (ret)
800 		return ret;
801 
802 	if (nor->spimem) {
803 		struct spi_mem_op op = SPI_NOR_WRSR_OP(sr, len);
804 
805 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
806 
807 		ret = spi_mem_exec_op(nor->spimem, &op);
808 	} else {
809 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr,
810 						       len);
811 	}
812 
813 	if (ret) {
814 		dev_dbg(nor->dev, "error %d writing SR\n", ret);
815 		return ret;
816 	}
817 
818 	return spi_nor_wait_till_ready(nor);
819 }
820 
821 /**
822  * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
823  * ensure that the byte written match the received value.
824  * @nor:	pointer to a 'struct spi_nor'.
825  * @sr1:	byte value to be written to the Status Register.
826  *
827  * Return: 0 on success, -errno otherwise.
828  */
829 static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
830 {
831 	int ret;
832 
833 	nor->bouncebuf[0] = sr1;
834 
835 	ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
836 	if (ret)
837 		return ret;
838 
839 	ret = spi_nor_read_sr(nor, nor->bouncebuf);
840 	if (ret)
841 		return ret;
842 
843 	if (nor->bouncebuf[0] != sr1) {
844 		dev_dbg(nor->dev, "SR1: read back test failed\n");
845 		return -EIO;
846 	}
847 
848 	return 0;
849 }
850 
851 /**
852  * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
853  * Status Register 2 in one shot. Ensure that the byte written in the Status
854  * Register 1 match the received value, and that the 16-bit Write did not
855  * affect what was already in the Status Register 2.
856  * @nor:	pointer to a 'struct spi_nor'.
857  * @sr1:	byte value to be written to the Status Register 1.
858  *
859  * Return: 0 on success, -errno otherwise.
860  */
861 static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
862 {
863 	int ret;
864 	u8 *sr_cr = nor->bouncebuf;
865 	u8 cr_written;
866 
867 	/* Make sure we don't overwrite the contents of Status Register 2. */
868 	if (!(nor->flags & SNOR_F_NO_READ_CR)) {
869 		ret = spi_nor_read_cr(nor, &sr_cr[1]);
870 		if (ret)
871 			return ret;
872 	} else if (spi_nor_get_protocol_width(nor->read_proto) == 4 &&
873 		   spi_nor_get_protocol_width(nor->write_proto) == 4 &&
874 		   nor->params->quad_enable) {
875 		/*
876 		 * If the Status Register 2 Read command (35h) is not
877 		 * supported, we should at least be sure we don't
878 		 * change the value of the SR2 Quad Enable bit.
879 		 *
880 		 * When the Quad Enable method is set and the buswidth is 4, we
881 		 * can safely assume that the value of the QE bit is one, as a
882 		 * consequence of the nor->params->quad_enable() call.
883 		 *
884 		 * According to the JESD216 revB standard, BFPT DWORDS[15],
885 		 * bits 22:20, the 16-bit Write Status (01h) command is
886 		 * available just for the cases in which the QE bit is
887 		 * described in SR2 at BIT(1).
888 		 */
889 		sr_cr[1] = SR2_QUAD_EN_BIT1;
890 	} else {
891 		sr_cr[1] = 0;
892 	}
893 
894 	sr_cr[0] = sr1;
895 
896 	ret = spi_nor_write_sr(nor, sr_cr, 2);
897 	if (ret)
898 		return ret;
899 
900 	ret = spi_nor_read_sr(nor, sr_cr);
901 	if (ret)
902 		return ret;
903 
904 	if (sr1 != sr_cr[0]) {
905 		dev_dbg(nor->dev, "SR: Read back test failed\n");
906 		return -EIO;
907 	}
908 
909 	if (nor->flags & SNOR_F_NO_READ_CR)
910 		return 0;
911 
912 	cr_written = sr_cr[1];
913 
914 	ret = spi_nor_read_cr(nor, &sr_cr[1]);
915 	if (ret)
916 		return ret;
917 
918 	if (cr_written != sr_cr[1]) {
919 		dev_dbg(nor->dev, "CR: read back test failed\n");
920 		return -EIO;
921 	}
922 
923 	return 0;
924 }
925 
926 /**
927  * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
928  * Configuration Register in one shot. Ensure that the byte written in the
929  * Configuration Register match the received value, and that the 16-bit Write
930  * did not affect what was already in the Status Register 1.
931  * @nor:	pointer to a 'struct spi_nor'.
932  * @cr:		byte value to be written to the Configuration Register.
933  *
934  * Return: 0 on success, -errno otherwise.
935  */
936 int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
937 {
938 	int ret;
939 	u8 *sr_cr = nor->bouncebuf;
940 	u8 sr_written;
941 
942 	/* Keep the current value of the Status Register 1. */
943 	ret = spi_nor_read_sr(nor, sr_cr);
944 	if (ret)
945 		return ret;
946 
947 	sr_cr[1] = cr;
948 
949 	ret = spi_nor_write_sr(nor, sr_cr, 2);
950 	if (ret)
951 		return ret;
952 
953 	sr_written = sr_cr[0];
954 
955 	ret = spi_nor_read_sr(nor, sr_cr);
956 	if (ret)
957 		return ret;
958 
959 	if (sr_written != sr_cr[0]) {
960 		dev_dbg(nor->dev, "SR: Read back test failed\n");
961 		return -EIO;
962 	}
963 
964 	if (nor->flags & SNOR_F_NO_READ_CR)
965 		return 0;
966 
967 	ret = spi_nor_read_cr(nor, &sr_cr[1]);
968 	if (ret)
969 		return ret;
970 
971 	if (cr != sr_cr[1]) {
972 		dev_dbg(nor->dev, "CR: read back test failed\n");
973 		return -EIO;
974 	}
975 
976 	return 0;
977 }
978 
979 /**
980  * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
981  * the byte written match the received value without affecting other bits in the
982  * Status Register 1 and 2.
983  * @nor:	pointer to a 'struct spi_nor'.
984  * @sr1:	byte value to be written to the Status Register.
985  *
986  * Return: 0 on success, -errno otherwise.
987  */
988 int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
989 {
990 	if (nor->flags & SNOR_F_HAS_16BIT_SR)
991 		return spi_nor_write_16bit_sr_and_check(nor, sr1);
992 
993 	return spi_nor_write_sr1_and_check(nor, sr1);
994 }
995 
996 /**
997  * spi_nor_write_sr2() - Write the Status Register 2 using the
998  * SPINOR_OP_WRSR2 (3eh) command.
999  * @nor:	pointer to 'struct spi_nor'.
1000  * @sr2:	pointer to DMA-able buffer to write to the Status Register 2.
1001  *
1002  * Return: 0 on success, -errno otherwise.
1003  */
1004 static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
1005 {
1006 	int ret;
1007 
1008 	ret = spi_nor_write_enable(nor);
1009 	if (ret)
1010 		return ret;
1011 
1012 	if (nor->spimem) {
1013 		struct spi_mem_op op = SPI_NOR_WRSR2_OP(sr2);
1014 
1015 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1016 
1017 		ret = spi_mem_exec_op(nor->spimem, &op);
1018 	} else {
1019 		ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2,
1020 						       sr2, 1);
1021 	}
1022 
1023 	if (ret) {
1024 		dev_dbg(nor->dev, "error %d writing SR2\n", ret);
1025 		return ret;
1026 	}
1027 
1028 	return spi_nor_wait_till_ready(nor);
1029 }
1030 
1031 /**
1032  * spi_nor_read_sr2() - Read the Status Register 2 using the
1033  * SPINOR_OP_RDSR2 (3fh) command.
1034  * @nor:	pointer to 'struct spi_nor'.
1035  * @sr2:	pointer to DMA-able buffer where the value of the
1036  *		Status Register 2 will be written.
1037  *
1038  * Return: 0 on success, -errno otherwise.
1039  */
1040 static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
1041 {
1042 	int ret;
1043 
1044 	if (nor->spimem) {
1045 		struct spi_mem_op op = SPI_NOR_RDSR2_OP(sr2);
1046 
1047 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1048 
1049 		ret = spi_mem_exec_op(nor->spimem, &op);
1050 	} else {
1051 		ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2,
1052 						      1);
1053 	}
1054 
1055 	if (ret)
1056 		dev_dbg(nor->dev, "error %d reading SR2\n", ret);
1057 
1058 	return ret;
1059 }
1060 
1061 /**
1062  * spi_nor_erase_die() - Erase the entire die.
1063  * @nor:	pointer to 'struct spi_nor'.
1064  * @addr:	address of the die.
1065  * @die_size:	size of the die.
1066  *
1067  * Return: 0 on success, -errno otherwise.
1068  */
1069 static int spi_nor_erase_die(struct spi_nor *nor, loff_t addr, size_t die_size)
1070 {
1071 	bool multi_die = nor->mtd.size != die_size;
1072 	int ret;
1073 
1074 	dev_dbg(nor->dev, " %lldKiB\n", (long long)(die_size >> 10));
1075 
1076 	if (nor->spimem) {
1077 		struct spi_mem_op op =
1078 			SPI_NOR_DIE_ERASE_OP(nor->params->die_erase_opcode,
1079 					     nor->addr_nbytes, addr, multi_die);
1080 
1081 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1082 
1083 		ret = spi_mem_exec_op(nor->spimem, &op);
1084 	} else {
1085 		if (multi_die)
1086 			return -EOPNOTSUPP;
1087 
1088 		ret = spi_nor_controller_ops_write_reg(nor,
1089 						       SPINOR_OP_CHIP_ERASE,
1090 						       NULL, 0);
1091 	}
1092 
1093 	if (ret)
1094 		dev_dbg(nor->dev, "error %d erasing chip\n", ret);
1095 
1096 	return ret;
1097 }
1098 
1099 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
1100 {
1101 	size_t i;
1102 
1103 	for (i = 0; i < size; i++)
1104 		if (table[i][0] == opcode)
1105 			return table[i][1];
1106 
1107 	/* No conversion found, keep input op code. */
1108 	return opcode;
1109 }
1110 
1111 u8 spi_nor_convert_3to4_read(u8 opcode)
1112 {
1113 	static const u8 spi_nor_3to4_read[][2] = {
1114 		{ SPINOR_OP_READ,	SPINOR_OP_READ_4B },
1115 		{ SPINOR_OP_READ_FAST,	SPINOR_OP_READ_FAST_4B },
1116 		{ SPINOR_OP_READ_1_1_2,	SPINOR_OP_READ_1_1_2_4B },
1117 		{ SPINOR_OP_READ_1_2_2,	SPINOR_OP_READ_1_2_2_4B },
1118 		{ SPINOR_OP_READ_1_1_4,	SPINOR_OP_READ_1_1_4_4B },
1119 		{ SPINOR_OP_READ_1_4_4,	SPINOR_OP_READ_1_4_4_4B },
1120 		{ SPINOR_OP_READ_1_1_8,	SPINOR_OP_READ_1_1_8_4B },
1121 		{ SPINOR_OP_READ_1_8_8,	SPINOR_OP_READ_1_8_8_4B },
1122 
1123 		{ SPINOR_OP_READ_1_1_1_DTR,	SPINOR_OP_READ_1_1_1_DTR_4B },
1124 		{ SPINOR_OP_READ_1_2_2_DTR,	SPINOR_OP_READ_1_2_2_DTR_4B },
1125 		{ SPINOR_OP_READ_1_4_4_DTR,	SPINOR_OP_READ_1_4_4_DTR_4B },
1126 	};
1127 
1128 	return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
1129 				      ARRAY_SIZE(spi_nor_3to4_read));
1130 }
1131 
1132 static u8 spi_nor_convert_3to4_program(u8 opcode)
1133 {
1134 	static const u8 spi_nor_3to4_program[][2] = {
1135 		{ SPINOR_OP_PP,		SPINOR_OP_PP_4B },
1136 		{ SPINOR_OP_PP_1_1_4,	SPINOR_OP_PP_1_1_4_4B },
1137 		{ SPINOR_OP_PP_1_4_4,	SPINOR_OP_PP_1_4_4_4B },
1138 		{ SPINOR_OP_PP_1_1_8,	SPINOR_OP_PP_1_1_8_4B },
1139 		{ SPINOR_OP_PP_1_8_8,	SPINOR_OP_PP_1_8_8_4B },
1140 	};
1141 
1142 	return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
1143 				      ARRAY_SIZE(spi_nor_3to4_program));
1144 }
1145 
1146 static u8 spi_nor_convert_3to4_erase(u8 opcode)
1147 {
1148 	static const u8 spi_nor_3to4_erase[][2] = {
1149 		{ SPINOR_OP_BE_4K,	SPINOR_OP_BE_4K_4B },
1150 		{ SPINOR_OP_BE_32K,	SPINOR_OP_BE_32K_4B },
1151 		{ SPINOR_OP_SE,		SPINOR_OP_SE_4B },
1152 	};
1153 
1154 	return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
1155 				      ARRAY_SIZE(spi_nor_3to4_erase));
1156 }
1157 
1158 static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
1159 {
1160 	return !!nor->params->erase_map.uniform_region.erase_mask;
1161 }
1162 
1163 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
1164 {
1165 	nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
1166 	nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
1167 	nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
1168 
1169 	if (!spi_nor_has_uniform_erase(nor)) {
1170 		struct spi_nor_erase_map *map = &nor->params->erase_map;
1171 		struct spi_nor_erase_type *erase;
1172 		int i;
1173 
1174 		for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
1175 			erase = &map->erase_type[i];
1176 			erase->opcode =
1177 				spi_nor_convert_3to4_erase(erase->opcode);
1178 		}
1179 	}
1180 }
1181 
1182 static int spi_nor_prep(struct spi_nor *nor)
1183 {
1184 	int ret = 0;
1185 
1186 	if (nor->controller_ops && nor->controller_ops->prepare)
1187 		ret = nor->controller_ops->prepare(nor);
1188 
1189 	return ret;
1190 }
1191 
1192 static void spi_nor_unprep(struct spi_nor *nor)
1193 {
1194 	if (nor->controller_ops && nor->controller_ops->unprepare)
1195 		nor->controller_ops->unprepare(nor);
1196 }
1197 
1198 static void spi_nor_offset_to_banks(u64 bank_size, loff_t start, size_t len,
1199 				    u8 *first, u8 *last)
1200 {
1201 	/* This is currently safe, the number of banks being very small */
1202 	*first = DIV_ROUND_DOWN_ULL(start, bank_size);
1203 	*last = DIV_ROUND_DOWN_ULL(start + len - 1, bank_size);
1204 }
1205 
1206 /* Generic helpers for internal locking and serialization */
1207 static bool spi_nor_rww_start_io(struct spi_nor *nor)
1208 {
1209 	struct spi_nor_rww *rww = &nor->rww;
1210 
1211 	guard(mutex)(&nor->lock);
1212 
1213 	if (rww->ongoing_io)
1214 		return false;
1215 
1216 	rww->ongoing_io = true;
1217 
1218 	return true;
1219 }
1220 
1221 static void spi_nor_rww_end_io(struct spi_nor *nor)
1222 {
1223 	guard(mutex)(&nor->lock);
1224 	nor->rww.ongoing_io = false;
1225 }
1226 
1227 static int spi_nor_lock_device(struct spi_nor *nor)
1228 {
1229 	if (!spi_nor_use_parallel_locking(nor))
1230 		return 0;
1231 
1232 	return wait_event_killable(nor->rww.wait, spi_nor_rww_start_io(nor));
1233 }
1234 
1235 static void spi_nor_unlock_device(struct spi_nor *nor)
1236 {
1237 	if (spi_nor_use_parallel_locking(nor)) {
1238 		spi_nor_rww_end_io(nor);
1239 		wake_up(&nor->rww.wait);
1240 	}
1241 }
1242 
1243 /* Generic helpers for internal locking and serialization */
1244 static bool spi_nor_rww_start_exclusive(struct spi_nor *nor)
1245 {
1246 	struct spi_nor_rww *rww = &nor->rww;
1247 
1248 	mutex_lock(&nor->lock);
1249 
1250 	if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
1251 		return false;
1252 
1253 	rww->ongoing_io = true;
1254 	rww->ongoing_rd = true;
1255 	rww->ongoing_pe = true;
1256 
1257 	return true;
1258 }
1259 
1260 static void spi_nor_rww_end_exclusive(struct spi_nor *nor)
1261 {
1262 	struct spi_nor_rww *rww = &nor->rww;
1263 
1264 	guard(mutex)(&nor->lock);
1265 	rww->ongoing_io = false;
1266 	rww->ongoing_rd = false;
1267 	rww->ongoing_pe = false;
1268 }
1269 
1270 int spi_nor_prep_and_lock(struct spi_nor *nor)
1271 {
1272 	int ret;
1273 
1274 	ret = spi_nor_prep(nor);
1275 	if (ret)
1276 		return ret;
1277 
1278 	if (!spi_nor_use_parallel_locking(nor))
1279 		mutex_lock(&nor->lock);
1280 	else
1281 		ret = wait_event_killable(nor->rww.wait,
1282 					  spi_nor_rww_start_exclusive(nor));
1283 
1284 	return ret;
1285 }
1286 
1287 void spi_nor_unlock_and_unprep(struct spi_nor *nor)
1288 {
1289 	if (!spi_nor_use_parallel_locking(nor)) {
1290 		mutex_unlock(&nor->lock);
1291 	} else {
1292 		spi_nor_rww_end_exclusive(nor);
1293 		wake_up(&nor->rww.wait);
1294 	}
1295 
1296 	spi_nor_unprep(nor);
1297 }
1298 
1299 /* Internal locking helpers for program and erase operations */
1300 static bool spi_nor_rww_start_pe(struct spi_nor *nor, loff_t start, size_t len)
1301 {
1302 	struct spi_nor_rww *rww = &nor->rww;
1303 	unsigned int used_banks = 0;
1304 	u8 first, last;
1305 	int bank;
1306 
1307 	guard(mutex)(&nor->lock);
1308 
1309 	if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
1310 		return false;
1311 
1312 	spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
1313 	for (bank = first; bank <= last; bank++) {
1314 		if (rww->used_banks & BIT(bank))
1315 			return false;
1316 
1317 		used_banks |= BIT(bank);
1318 	}
1319 
1320 	rww->used_banks |= used_banks;
1321 	rww->ongoing_pe = true;
1322 
1323 	return true;
1324 }
1325 
1326 static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len)
1327 {
1328 	struct spi_nor_rww *rww = &nor->rww;
1329 	u8 first, last;
1330 	int bank;
1331 
1332 	guard(mutex)(&nor->lock);
1333 
1334 	spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
1335 	for (bank = first; bank <= last; bank++)
1336 		rww->used_banks &= ~BIT(bank);
1337 
1338 	rww->ongoing_pe = false;
1339 }
1340 
1341 static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t len)
1342 {
1343 	int ret;
1344 
1345 	ret = spi_nor_prep(nor);
1346 	if (ret)
1347 		return ret;
1348 
1349 	if (!spi_nor_use_parallel_locking(nor))
1350 		mutex_lock(&nor->lock);
1351 	else
1352 		ret = wait_event_killable(nor->rww.wait,
1353 					  spi_nor_rww_start_pe(nor, start, len));
1354 
1355 	return ret;
1356 }
1357 
1358 static void spi_nor_unlock_and_unprep_pe(struct spi_nor *nor, loff_t start, size_t len)
1359 {
1360 	if (!spi_nor_use_parallel_locking(nor)) {
1361 		mutex_unlock(&nor->lock);
1362 	} else {
1363 		spi_nor_rww_end_pe(nor, start, len);
1364 		wake_up(&nor->rww.wait);
1365 	}
1366 
1367 	spi_nor_unprep(nor);
1368 }
1369 
1370 /* Internal locking helpers for read operations */
1371 static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len)
1372 {
1373 	struct spi_nor_rww *rww = &nor->rww;
1374 	unsigned int used_banks = 0;
1375 	u8 first, last;
1376 	int bank;
1377 
1378 	guard(mutex)(&nor->lock);
1379 
1380 	if (rww->ongoing_io || rww->ongoing_rd)
1381 		return false;
1382 
1383 	spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
1384 	for (bank = first; bank <= last; bank++) {
1385 		if (rww->used_banks & BIT(bank))
1386 			return false;
1387 
1388 		used_banks |= BIT(bank);
1389 	}
1390 
1391 	rww->used_banks |= used_banks;
1392 	rww->ongoing_io = true;
1393 	rww->ongoing_rd = true;
1394 
1395 	return true;
1396 }
1397 
1398 static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
1399 {
1400 	struct spi_nor_rww *rww = &nor->rww;
1401 	u8 first, last;
1402 	int bank;
1403 
1404 	guard(mutex)(&nor->lock);
1405 
1406 	spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
1407 	for (bank = first; bank <= last; bank++)
1408 		nor->rww.used_banks &= ~BIT(bank);
1409 
1410 	rww->ongoing_io = false;
1411 	rww->ongoing_rd = false;
1412 }
1413 
1414 static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len)
1415 {
1416 	int ret;
1417 
1418 	ret = spi_nor_prep(nor);
1419 	if (ret)
1420 		return ret;
1421 
1422 	if (!spi_nor_use_parallel_locking(nor))
1423 		mutex_lock(&nor->lock);
1424 	else
1425 		ret = wait_event_killable(nor->rww.wait,
1426 					  spi_nor_rww_start_rd(nor, start, len));
1427 
1428 	return ret;
1429 }
1430 
1431 static void spi_nor_unlock_and_unprep_rd(struct spi_nor *nor, loff_t start, size_t len)
1432 {
1433 	if (!spi_nor_use_parallel_locking(nor)) {
1434 		mutex_unlock(&nor->lock);
1435 	} else {
1436 		spi_nor_rww_end_rd(nor, start, len);
1437 		wake_up(&nor->rww.wait);
1438 	}
1439 
1440 	spi_nor_unprep(nor);
1441 }
1442 
1443 /*
1444  * Initiate the erasure of a single sector
1445  */
1446 int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
1447 {
1448 	int i;
1449 
1450 	if (nor->spimem) {
1451 		struct spi_mem_op op =
1452 			SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode,
1453 						nor->addr_nbytes, addr);
1454 
1455 		spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1456 
1457 		return spi_mem_exec_op(nor->spimem, &op);
1458 	} else if (nor->controller_ops->erase) {
1459 		return spi_nor_controller_ops_erase(nor, addr);
1460 	}
1461 
1462 	/*
1463 	 * Default implementation, if driver doesn't have a specialized HW
1464 	 * control
1465 	 */
1466 	for (i = nor->addr_nbytes - 1; i >= 0; i--) {
1467 		nor->bouncebuf[i] = addr & 0xff;
1468 		addr >>= 8;
1469 	}
1470 
1471 	return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode,
1472 						nor->bouncebuf, nor->addr_nbytes);
1473 }
1474 
1475 /**
1476  * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
1477  * @erase:	pointer to a structure that describes a SPI NOR erase type
1478  * @dividend:	dividend value
1479  * @remainder:	pointer to u32 remainder (will be updated)
1480  *
1481  * Return: the result of the division
1482  */
1483 static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
1484 				     u64 dividend, u32 *remainder)
1485 {
1486 	/* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
1487 	*remainder = (u32)dividend & erase->size_mask;
1488 	return dividend >> erase->size_shift;
1489 }
1490 
1491 /**
1492  * spi_nor_find_best_erase_type() - find the best erase type for the given
1493  *				    offset in the serial flash memory and the
1494  *				    number of bytes to erase. The region in
1495  *				    which the address fits is expected to be
1496  *				    provided.
1497  * @map:	the erase map of the SPI NOR
1498  * @region:	pointer to a structure that describes a SPI NOR erase region
1499  * @addr:	offset in the serial flash memory
1500  * @len:	number of bytes to erase
1501  *
1502  * Return: a pointer to the best fitted erase type, NULL otherwise.
1503  */
1504 static const struct spi_nor_erase_type *
1505 spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
1506 			     const struct spi_nor_erase_region *region,
1507 			     u64 addr, u32 len)
1508 {
1509 	const struct spi_nor_erase_type *erase;
1510 	u32 rem;
1511 	int i;
1512 
1513 	/*
1514 	 * Erase types are ordered by size, with the smallest erase type at
1515 	 * index 0.
1516 	 */
1517 	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
1518 		/* Does the erase region support the tested erase type? */
1519 		if (!(region->erase_mask & BIT(i)))
1520 			continue;
1521 
1522 		erase = &map->erase_type[i];
1523 		if (!erase->size)
1524 			continue;
1525 
1526 		/* Alignment is not mandatory for overlaid regions */
1527 		if (region->overlaid && region->size <= len)
1528 			return erase;
1529 
1530 		/* Don't erase more than what the user has asked for. */
1531 		if (erase->size > len)
1532 			continue;
1533 
1534 		spi_nor_div_by_erase_size(erase, addr, &rem);
1535 		if (!rem)
1536 			return erase;
1537 	}
1538 
1539 	return NULL;
1540 }
1541 
1542 /**
1543  * spi_nor_init_erase_cmd() - initialize an erase command
1544  * @region:	pointer to a structure that describes a SPI NOR erase region
1545  * @erase:	pointer to a structure that describes a SPI NOR erase type
1546  *
1547  * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
1548  *	   otherwise.
1549  */
1550 static struct spi_nor_erase_command *
1551 spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
1552 		       const struct spi_nor_erase_type *erase)
1553 {
1554 	struct spi_nor_erase_command *cmd;
1555 
1556 	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1557 	if (!cmd)
1558 		return ERR_PTR(-ENOMEM);
1559 
1560 	INIT_LIST_HEAD(&cmd->list);
1561 	cmd->opcode = erase->opcode;
1562 	cmd->count = 1;
1563 
1564 	if (region->overlaid)
1565 		cmd->size = region->size;
1566 	else
1567 		cmd->size = erase->size;
1568 
1569 	return cmd;
1570 }
1571 
1572 /**
1573  * spi_nor_destroy_erase_cmd_list() - destroy erase command list
1574  * @erase_list:	list of erase commands
1575  */
1576 static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
1577 {
1578 	struct spi_nor_erase_command *cmd, *next;
1579 
1580 	list_for_each_entry_safe(cmd, next, erase_list, list) {
1581 		list_del(&cmd->list);
1582 		kfree(cmd);
1583 	}
1584 }
1585 
1586 /**
1587  * spi_nor_init_erase_cmd_list() - initialize erase command list
1588  * @nor:	pointer to a 'struct spi_nor'
1589  * @erase_list:	list of erase commands to be executed once we validate that the
1590  *		erase can be performed
1591  * @addr:	offset in the serial flash memory
1592  * @len:	number of bytes to erase
1593  *
1594  * Builds the list of best fitted erase commands and verifies if the erase can
1595  * be performed.
1596  *
1597  * Return: 0 on success, -errno otherwise.
1598  */
1599 static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
1600 				       struct list_head *erase_list,
1601 				       u64 addr, u32 len)
1602 {
1603 	const struct spi_nor_erase_map *map = &nor->params->erase_map;
1604 	const struct spi_nor_erase_type *erase, *prev_erase = NULL;
1605 	struct spi_nor_erase_region *region;
1606 	struct spi_nor_erase_command *cmd = NULL;
1607 	u64 region_end;
1608 	unsigned int i;
1609 	int ret = -EINVAL;
1610 
1611 	for (i = 0; i < map->n_regions && len; i++) {
1612 		region = &map->regions[i];
1613 		region_end = region->offset + region->size;
1614 
1615 		while (len && addr >= region->offset && addr < region_end) {
1616 			erase = spi_nor_find_best_erase_type(map, region, addr,
1617 							     len);
1618 			if (!erase)
1619 				goto destroy_erase_cmd_list;
1620 
1621 			if (prev_erase != erase || erase->size != cmd->size ||
1622 			    region->overlaid) {
1623 				cmd = spi_nor_init_erase_cmd(region, erase);
1624 				if (IS_ERR(cmd)) {
1625 					ret = PTR_ERR(cmd);
1626 					goto destroy_erase_cmd_list;
1627 				}
1628 
1629 				list_add_tail(&cmd->list, erase_list);
1630 			} else {
1631 				cmd->count++;
1632 			}
1633 
1634 			len -= cmd->size;
1635 			addr += cmd->size;
1636 			prev_erase = erase;
1637 		}
1638 	}
1639 
1640 	return 0;
1641 
1642 destroy_erase_cmd_list:
1643 	spi_nor_destroy_erase_cmd_list(erase_list);
1644 	return ret;
1645 }
1646 
1647 /**
1648  * spi_nor_erase_multi_sectors() - perform a non-uniform erase
1649  * @nor:	pointer to a 'struct spi_nor'
1650  * @addr:	offset in the serial flash memory
1651  * @len:	number of bytes to erase
1652  *
1653  * Build a list of best fitted erase commands and execute it once we validate
1654  * that the erase can be performed.
1655  *
1656  * Return: 0 on success, -errno otherwise.
1657  */
1658 static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
1659 {
1660 	LIST_HEAD(erase_list);
1661 	struct spi_nor_erase_command *cmd, *next;
1662 	int ret;
1663 
1664 	ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
1665 	if (ret)
1666 		return ret;
1667 
1668 	list_for_each_entry_safe(cmd, next, &erase_list, list) {
1669 		nor->erase_opcode = cmd->opcode;
1670 		while (cmd->count) {
1671 			dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n",
1672 				 cmd->size, cmd->opcode, cmd->count);
1673 
1674 			ret = spi_nor_lock_device(nor);
1675 			if (ret)
1676 				goto destroy_erase_cmd_list;
1677 
1678 			ret = spi_nor_write_enable(nor);
1679 			if (ret) {
1680 				spi_nor_unlock_device(nor);
1681 				goto destroy_erase_cmd_list;
1682 			}
1683 
1684 			ret = spi_nor_erase_sector(nor, addr);
1685 			spi_nor_unlock_device(nor);
1686 			if (ret)
1687 				goto destroy_erase_cmd_list;
1688 
1689 			ret = spi_nor_wait_till_ready(nor);
1690 			if (ret)
1691 				goto destroy_erase_cmd_list;
1692 
1693 			addr += cmd->size;
1694 			cmd->count--;
1695 		}
1696 		list_del(&cmd->list);
1697 		kfree(cmd);
1698 	}
1699 
1700 	return 0;
1701 
1702 destroy_erase_cmd_list:
1703 	spi_nor_destroy_erase_cmd_list(&erase_list);
1704 	return ret;
1705 }
1706 
1707 static int spi_nor_erase_dice(struct spi_nor *nor, loff_t addr,
1708 			      size_t len, size_t die_size)
1709 {
1710 	unsigned long timeout;
1711 	int ret;
1712 
1713 	/*
1714 	 * Scale the timeout linearly with the size of the flash, with
1715 	 * a minimum calibrated to an old 2MB flash. We could try to
1716 	 * pull these from CFI/SFDP, but these values should be good
1717 	 * enough for now.
1718 	 */
1719 	timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1720 		      CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1721 		      (unsigned long)(nor->mtd.size / SZ_2M));
1722 
1723 	do {
1724 		ret = spi_nor_lock_device(nor);
1725 		if (ret)
1726 			return ret;
1727 
1728 		ret = spi_nor_write_enable(nor);
1729 		if (ret) {
1730 			spi_nor_unlock_device(nor);
1731 			return ret;
1732 		}
1733 
1734 		ret = spi_nor_erase_die(nor, addr, die_size);
1735 
1736 		spi_nor_unlock_device(nor);
1737 		if (ret)
1738 			return ret;
1739 
1740 		ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1741 		if (ret)
1742 			return ret;
1743 
1744 		addr += die_size;
1745 		len -= die_size;
1746 
1747 	} while (len);
1748 
1749 	return 0;
1750 }
1751 
1752 /*
1753  * Erase an address range on the nor chip.  The address range may extend
1754  * one or more erase sectors. Return an error if there is a problem erasing.
1755  */
1756 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
1757 {
1758 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
1759 	u8 n_dice = nor->params->n_dice;
1760 	bool multi_die_erase = false;
1761 	u32 addr, len, rem;
1762 	size_t die_size;
1763 	int ret;
1764 
1765 	dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
1766 			(long long)instr->len);
1767 
1768 	if (spi_nor_has_uniform_erase(nor)) {
1769 		div_u64_rem(instr->len, mtd->erasesize, &rem);
1770 		if (rem)
1771 			return -EINVAL;
1772 	}
1773 
1774 	addr = instr->addr;
1775 	len = instr->len;
1776 
1777 	if (n_dice) {
1778 		die_size = div_u64(mtd->size, n_dice);
1779 		if (!(len & (die_size - 1)) && !(addr & (die_size - 1)))
1780 			multi_die_erase = true;
1781 	} else {
1782 		die_size = mtd->size;
1783 	}
1784 
1785 	ret = spi_nor_prep_and_lock_pe(nor, instr->addr, instr->len);
1786 	if (ret)
1787 		return ret;
1788 
1789 	/* chip (die) erase? */
1790 	if ((len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) ||
1791 	    multi_die_erase) {
1792 		ret = spi_nor_erase_dice(nor, addr, len, die_size);
1793 		if (ret)
1794 			goto erase_err;
1795 
1796 	/* REVISIT in some cases we could speed up erasing large regions
1797 	 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K.  We may have set up
1798 	 * to use "small sector erase", but that's not always optimal.
1799 	 */
1800 
1801 	/* "sector"-at-a-time erase */
1802 	} else if (spi_nor_has_uniform_erase(nor)) {
1803 		while (len) {
1804 			ret = spi_nor_lock_device(nor);
1805 			if (ret)
1806 				goto erase_err;
1807 
1808 			ret = spi_nor_write_enable(nor);
1809 			if (ret) {
1810 				spi_nor_unlock_device(nor);
1811 				goto erase_err;
1812 			}
1813 
1814 			ret = spi_nor_erase_sector(nor, addr);
1815 			spi_nor_unlock_device(nor);
1816 			if (ret)
1817 				goto erase_err;
1818 
1819 			ret = spi_nor_wait_till_ready(nor);
1820 			if (ret)
1821 				goto erase_err;
1822 
1823 			addr += mtd->erasesize;
1824 			len -= mtd->erasesize;
1825 		}
1826 
1827 	/* erase multiple sectors */
1828 	} else {
1829 		ret = spi_nor_erase_multi_sectors(nor, addr, len);
1830 		if (ret)
1831 			goto erase_err;
1832 	}
1833 
1834 	ret = spi_nor_write_disable(nor);
1835 
1836 erase_err:
1837 	spi_nor_unlock_and_unprep_pe(nor, instr->addr, instr->len);
1838 
1839 	return ret;
1840 }
1841 
1842 /**
1843  * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
1844  * Register 1.
1845  * @nor:	pointer to a 'struct spi_nor'
1846  *
1847  * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
1848  *
1849  * Return: 0 on success, -errno otherwise.
1850  */
1851 int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
1852 {
1853 	int ret;
1854 
1855 	ret = spi_nor_read_sr(nor, nor->bouncebuf);
1856 	if (ret)
1857 		return ret;
1858 
1859 	if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
1860 		return 0;
1861 
1862 	nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
1863 
1864 	return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
1865 }
1866 
1867 /**
1868  * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
1869  * Register 2.
1870  * @nor:       pointer to a 'struct spi_nor'.
1871  *
1872  * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
1873  *
1874  * Return: 0 on success, -errno otherwise.
1875  */
1876 int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
1877 {
1878 	int ret;
1879 
1880 	if (nor->flags & SNOR_F_NO_READ_CR)
1881 		return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
1882 
1883 	ret = spi_nor_read_cr(nor, nor->bouncebuf);
1884 	if (ret)
1885 		return ret;
1886 
1887 	if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
1888 		return 0;
1889 
1890 	nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
1891 
1892 	return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
1893 }
1894 
1895 /**
1896  * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
1897  * @nor:	pointer to a 'struct spi_nor'
1898  *
1899  * Set the Quad Enable (QE) bit in the Status Register 2.
1900  *
1901  * This is one of the procedures to set the QE bit described in the SFDP
1902  * (JESD216 rev B) specification but no manufacturer using this procedure has
1903  * been identified yet, hence the name of the function.
1904  *
1905  * Return: 0 on success, -errno otherwise.
1906  */
1907 int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
1908 {
1909 	u8 *sr2 = nor->bouncebuf;
1910 	int ret;
1911 	u8 sr2_written;
1912 
1913 	/* Check current Quad Enable bit value. */
1914 	ret = spi_nor_read_sr2(nor, sr2);
1915 	if (ret)
1916 		return ret;
1917 	if (*sr2 & SR2_QUAD_EN_BIT7)
1918 		return 0;
1919 
1920 	/* Update the Quad Enable bit. */
1921 	*sr2 |= SR2_QUAD_EN_BIT7;
1922 
1923 	ret = spi_nor_write_sr2(nor, sr2);
1924 	if (ret)
1925 		return ret;
1926 
1927 	sr2_written = *sr2;
1928 
1929 	/* Read back and check it. */
1930 	ret = spi_nor_read_sr2(nor, sr2);
1931 	if (ret)
1932 		return ret;
1933 
1934 	if (*sr2 != sr2_written) {
1935 		dev_dbg(nor->dev, "SR2: Read back test failed\n");
1936 		return -EIO;
1937 	}
1938 
1939 	return 0;
1940 }
1941 
1942 static const struct spi_nor_manufacturer *manufacturers[] = {
1943 	&spi_nor_atmel,
1944 	&spi_nor_eon,
1945 	&spi_nor_esmt,
1946 	&spi_nor_everspin,
1947 	&spi_nor_gigadevice,
1948 	&spi_nor_intel,
1949 	&spi_nor_issi,
1950 	&spi_nor_macronix,
1951 	&spi_nor_micron,
1952 	&spi_nor_st,
1953 	&spi_nor_spansion,
1954 	&spi_nor_sst,
1955 	&spi_nor_winbond,
1956 	&spi_nor_xmc,
1957 };
1958 
1959 static const struct flash_info spi_nor_generic_flash = {
1960 	.name = "spi-nor-generic",
1961 };
1962 
1963 static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
1964 						 const u8 *id)
1965 {
1966 	const struct flash_info *part;
1967 	unsigned int i, j;
1968 
1969 	for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
1970 		for (j = 0; j < manufacturers[i]->nparts; j++) {
1971 			part = &manufacturers[i]->parts[j];
1972 			if (part->id &&
1973 			    !memcmp(part->id->bytes, id, part->id->len)) {
1974 				nor->manufacturer = manufacturers[i];
1975 				return part;
1976 			}
1977 		}
1978 	}
1979 
1980 	return NULL;
1981 }
1982 
1983 static const struct flash_info *spi_nor_detect(struct spi_nor *nor)
1984 {
1985 	const struct flash_info *info;
1986 	u8 *id = nor->bouncebuf;
1987 	int ret;
1988 
1989 	ret = spi_nor_read_id(nor, 0, 0, id, nor->reg_proto);
1990 	if (ret) {
1991 		dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
1992 		return ERR_PTR(ret);
1993 	}
1994 
1995 	/* Cache the complete flash ID. */
1996 	nor->id = devm_kmemdup(nor->dev, id, SPI_NOR_MAX_ID_LEN, GFP_KERNEL);
1997 	if (!nor->id)
1998 		return ERR_PTR(-ENOMEM);
1999 
2000 	info = spi_nor_match_id(nor, id);
2001 
2002 	/* Fallback to a generic flash described only by its SFDP data. */
2003 	if (!info) {
2004 		ret = spi_nor_check_sfdp_signature(nor);
2005 		if (!ret)
2006 			info = &spi_nor_generic_flash;
2007 	}
2008 
2009 	if (!info) {
2010 		dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
2011 			SPI_NOR_MAX_ID_LEN, id);
2012 		return ERR_PTR(-ENODEV);
2013 	}
2014 	return info;
2015 }
2016 
2017 /*
2018  * On Octal DTR capable flashes, reads cannot start or end at an odd
2019  * address in Octal DTR mode. Extra bytes need to be read at the start
2020  * or end to make sure both the start address and length remain even.
2021  */
2022 static int spi_nor_octal_dtr_read(struct spi_nor *nor, loff_t from, size_t len,
2023 				  u_char *buf)
2024 {
2025 	u_char *tmp_buf;
2026 	size_t tmp_len;
2027 	loff_t start, end;
2028 	int ret, bytes_read;
2029 
2030 	if (IS_ALIGNED(from, 2) && IS_ALIGNED(len, 2))
2031 		return spi_nor_read_data(nor, from, len, buf);
2032 	else if (IS_ALIGNED(from, 2) && len > PAGE_SIZE)
2033 		return spi_nor_read_data(nor, from, round_down(len, PAGE_SIZE),
2034 					 buf);
2035 
2036 	tmp_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2037 	if (!tmp_buf)
2038 		return -ENOMEM;
2039 
2040 	start = round_down(from, 2);
2041 	end = round_up(from + len, 2);
2042 
2043 	/*
2044 	 * Avoid allocating too much memory. The requested read length might be
2045 	 * quite large. Allocating a buffer just as large (slightly bigger, in
2046 	 * fact) would put unnecessary memory pressure on the system.
2047 	 *
2048 	 * For example if the read is from 3 to 1M, then this will read from 2
2049 	 * to 4098. The reads from 4098 to 1M will then not need a temporary
2050 	 * buffer so they can proceed as normal.
2051 	 */
2052 	tmp_len = min_t(size_t, end - start, PAGE_SIZE);
2053 
2054 	ret = spi_nor_read_data(nor, start, tmp_len, tmp_buf);
2055 	if (ret == 0) {
2056 		ret = -EIO;
2057 		goto out;
2058 	}
2059 	if (ret < 0)
2060 		goto out;
2061 
2062 	/*
2063 	 * More bytes are read than actually requested, but that number can't be
2064 	 * reported to the calling function or it will confuse its calculations.
2065 	 * Calculate how many of the _requested_ bytes were read.
2066 	 */
2067 	bytes_read = ret;
2068 
2069 	if (from != start)
2070 		ret -= from - start;
2071 
2072 	/*
2073 	 * Only account for extra bytes at the end if they were actually read.
2074 	 * For example, if the total length was truncated because of temporary
2075 	 * buffer size limit then the adjustment for the extra bytes at the end
2076 	 * is not needed.
2077 	 */
2078 	if (start + bytes_read == end)
2079 		ret -= end - (from + len);
2080 
2081 	memcpy(buf, tmp_buf + (from - start), ret);
2082 out:
2083 	kfree(tmp_buf);
2084 	return ret;
2085 }
2086 
2087 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
2088 			size_t *retlen, u_char *buf)
2089 {
2090 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
2091 	loff_t from_lock = from;
2092 	size_t len_lock = len;
2093 	ssize_t ret;
2094 
2095 	dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
2096 
2097 	ret = spi_nor_prep_and_lock_rd(nor, from_lock, len_lock);
2098 	if (ret)
2099 		return ret;
2100 
2101 	while (len) {
2102 		loff_t addr = from;
2103 
2104 		if (nor->read_proto == SNOR_PROTO_8_8_8_DTR)
2105 			ret = spi_nor_octal_dtr_read(nor, addr, len, buf);
2106 		else
2107 			ret = spi_nor_read_data(nor, addr, len, buf);
2108 
2109 		if (ret == 0) {
2110 			/* We shouldn't see 0-length reads */
2111 			ret = -EIO;
2112 			goto read_err;
2113 		}
2114 		if (ret < 0)
2115 			goto read_err;
2116 
2117 		WARN_ON(ret > len);
2118 		*retlen += ret;
2119 		buf += ret;
2120 		from += ret;
2121 		len -= ret;
2122 	}
2123 	ret = 0;
2124 
2125 read_err:
2126 	spi_nor_unlock_and_unprep_rd(nor, from_lock, len_lock);
2127 
2128 	return ret;
2129 }
2130 
2131 /*
2132  * On Octal DTR capable flashes, writes cannot start or end at an odd address
2133  * in Octal DTR mode. Extra 0xff bytes need to be appended or prepended to
2134  * make sure the start address and end address are even. 0xff is used because
2135  * on NOR flashes a program operation can only flip bits from 1 to 0, not the
2136  * other way round. 0 to 1 flip needs to happen via erases.
2137  */
2138 static int spi_nor_octal_dtr_write(struct spi_nor *nor, loff_t to, size_t len,
2139 				   const u8 *buf)
2140 {
2141 	u8 *tmp_buf;
2142 	size_t bytes_written;
2143 	loff_t start, end;
2144 	int ret;
2145 
2146 	if (IS_ALIGNED(to, 2) && IS_ALIGNED(len, 2))
2147 		return spi_nor_write_data(nor, to, len, buf);
2148 
2149 	tmp_buf = kmalloc(nor->params->page_size, GFP_KERNEL);
2150 	if (!tmp_buf)
2151 		return -ENOMEM;
2152 
2153 	memset(tmp_buf, 0xff, nor->params->page_size);
2154 
2155 	start = round_down(to, 2);
2156 	end = round_up(to + len, 2);
2157 
2158 	memcpy(tmp_buf + (to - start), buf, len);
2159 
2160 	ret = spi_nor_write_data(nor, start, end - start, tmp_buf);
2161 	if (ret == 0) {
2162 		ret = -EIO;
2163 		goto out;
2164 	}
2165 	if (ret < 0)
2166 		goto out;
2167 
2168 	/*
2169 	 * More bytes are written than actually requested, but that number can't
2170 	 * be reported to the calling function or it will confuse its
2171 	 * calculations. Calculate how many of the _requested_ bytes were
2172 	 * written.
2173 	 */
2174 	bytes_written = ret;
2175 
2176 	if (to != start)
2177 		ret -= to - start;
2178 
2179 	/*
2180 	 * Only account for extra bytes at the end if they were actually
2181 	 * written. For example, if for some reason the controller could only
2182 	 * complete a partial write then the adjustment for the extra bytes at
2183 	 * the end is not needed.
2184 	 */
2185 	if (start + bytes_written == end)
2186 		ret -= end - (to + len);
2187 
2188 out:
2189 	kfree(tmp_buf);
2190 	return ret;
2191 }
2192 
2193 /*
2194  * Write an address range to the nor chip.  Data must be written in
2195  * FLASH_PAGESIZE chunks.  The address range may be any size provided
2196  * it is within the physical boundaries.
2197  */
2198 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
2199 	size_t *retlen, const u_char *buf)
2200 {
2201 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
2202 	size_t i;
2203 	ssize_t ret;
2204 	u32 page_size = nor->params->page_size;
2205 
2206 	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2207 
2208 	ret = spi_nor_prep_and_lock_pe(nor, to, len);
2209 	if (ret)
2210 		return ret;
2211 
2212 	for (i = 0; i < len; ) {
2213 		ssize_t written;
2214 		loff_t addr = to + i;
2215 		size_t page_offset = addr & (page_size - 1);
2216 		/* the size of data remaining on the first page */
2217 		size_t page_remain = min_t(size_t, page_size - page_offset, len - i);
2218 
2219 		ret = spi_nor_lock_device(nor);
2220 		if (ret)
2221 			goto write_err;
2222 
2223 		ret = spi_nor_write_enable(nor);
2224 		if (ret) {
2225 			spi_nor_unlock_device(nor);
2226 			goto write_err;
2227 		}
2228 
2229 		if (nor->write_proto == SNOR_PROTO_8_8_8_DTR)
2230 			ret = spi_nor_octal_dtr_write(nor, addr, page_remain,
2231 						      buf + i);
2232 		else
2233 			ret = spi_nor_write_data(nor, addr, page_remain,
2234 						 buf + i);
2235 		spi_nor_unlock_device(nor);
2236 		if (ret < 0)
2237 			goto write_err;
2238 		written = ret;
2239 
2240 		ret = spi_nor_wait_till_ready(nor);
2241 		if (ret)
2242 			goto write_err;
2243 		*retlen += written;
2244 		i += written;
2245 	}
2246 
2247 write_err:
2248 	spi_nor_unlock_and_unprep_pe(nor, to, len);
2249 
2250 	return ret;
2251 }
2252 
2253 static int spi_nor_check(struct spi_nor *nor)
2254 {
2255 	if (!nor->dev ||
2256 	    (!nor->spimem && !nor->controller_ops) ||
2257 	    (!nor->spimem && nor->controller_ops &&
2258 	    (!nor->controller_ops->read ||
2259 	     !nor->controller_ops->write ||
2260 	     !nor->controller_ops->read_reg ||
2261 	     !nor->controller_ops->write_reg))) {
2262 		pr_err("spi-nor: please fill all the necessary fields!\n");
2263 		return -EINVAL;
2264 	}
2265 
2266 	if (nor->spimem && nor->controller_ops) {
2267 		dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
2268 		return -EINVAL;
2269 	}
2270 
2271 	return 0;
2272 }
2273 
2274 void
2275 spi_nor_set_read_settings(struct spi_nor_read_command *read,
2276 			  u8 num_mode_clocks,
2277 			  u8 num_wait_states,
2278 			  u8 opcode,
2279 			  enum spi_nor_protocol proto)
2280 {
2281 	read->num_mode_clocks = num_mode_clocks;
2282 	read->num_wait_states = num_wait_states;
2283 	read->opcode = opcode;
2284 	read->proto = proto;
2285 }
2286 
2287 void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
2288 			     enum spi_nor_protocol proto)
2289 {
2290 	pp->opcode = opcode;
2291 	pp->proto = proto;
2292 }
2293 
2294 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2295 {
2296 	size_t i;
2297 
2298 	for (i = 0; i < size; i++)
2299 		if (table[i][0] == (int)hwcaps)
2300 			return table[i][1];
2301 
2302 	return -EINVAL;
2303 }
2304 
2305 int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2306 {
2307 	static const int hwcaps_read2cmd[][2] = {
2308 		{ SNOR_HWCAPS_READ,		SNOR_CMD_READ },
2309 		{ SNOR_HWCAPS_READ_FAST,	SNOR_CMD_READ_FAST },
2310 		{ SNOR_HWCAPS_READ_1_1_1_DTR,	SNOR_CMD_READ_1_1_1_DTR },
2311 		{ SNOR_HWCAPS_READ_1_1_2,	SNOR_CMD_READ_1_1_2 },
2312 		{ SNOR_HWCAPS_READ_1_2_2,	SNOR_CMD_READ_1_2_2 },
2313 		{ SNOR_HWCAPS_READ_2_2_2,	SNOR_CMD_READ_2_2_2 },
2314 		{ SNOR_HWCAPS_READ_1_2_2_DTR,	SNOR_CMD_READ_1_2_2_DTR },
2315 		{ SNOR_HWCAPS_READ_1_1_4,	SNOR_CMD_READ_1_1_4 },
2316 		{ SNOR_HWCAPS_READ_1_4_4,	SNOR_CMD_READ_1_4_4 },
2317 		{ SNOR_HWCAPS_READ_4_4_4,	SNOR_CMD_READ_4_4_4 },
2318 		{ SNOR_HWCAPS_READ_1_4_4_DTR,	SNOR_CMD_READ_1_4_4_DTR },
2319 		{ SNOR_HWCAPS_READ_1_1_8,	SNOR_CMD_READ_1_1_8 },
2320 		{ SNOR_HWCAPS_READ_1_8_8,	SNOR_CMD_READ_1_8_8 },
2321 		{ SNOR_HWCAPS_READ_8_8_8,	SNOR_CMD_READ_8_8_8 },
2322 		{ SNOR_HWCAPS_READ_1_8_8_DTR,	SNOR_CMD_READ_1_8_8_DTR },
2323 		{ SNOR_HWCAPS_READ_8_8_8_DTR,	SNOR_CMD_READ_8_8_8_DTR },
2324 	};
2325 
2326 	return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2327 				  ARRAY_SIZE(hwcaps_read2cmd));
2328 }
2329 
2330 int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2331 {
2332 	static const int hwcaps_pp2cmd[][2] = {
2333 		{ SNOR_HWCAPS_PP,		SNOR_CMD_PP },
2334 		{ SNOR_HWCAPS_PP_1_1_4,		SNOR_CMD_PP_1_1_4 },
2335 		{ SNOR_HWCAPS_PP_1_4_4,		SNOR_CMD_PP_1_4_4 },
2336 		{ SNOR_HWCAPS_PP_4_4_4,		SNOR_CMD_PP_4_4_4 },
2337 		{ SNOR_HWCAPS_PP_1_1_8,		SNOR_CMD_PP_1_1_8 },
2338 		{ SNOR_HWCAPS_PP_1_8_8,		SNOR_CMD_PP_1_8_8 },
2339 		{ SNOR_HWCAPS_PP_8_8_8,		SNOR_CMD_PP_8_8_8 },
2340 		{ SNOR_HWCAPS_PP_8_8_8_DTR,	SNOR_CMD_PP_8_8_8_DTR },
2341 	};
2342 
2343 	return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2344 				  ARRAY_SIZE(hwcaps_pp2cmd));
2345 }
2346 
2347 /**
2348  * spi_nor_spimem_check_op - check if the operation is supported
2349  *                           by controller
2350  *@nor:        pointer to a 'struct spi_nor'
2351  *@op:         pointer to op template to be checked
2352  *
2353  * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2354  */
2355 static int spi_nor_spimem_check_op(struct spi_nor *nor,
2356 				   struct spi_mem_op *op)
2357 {
2358 	/*
2359 	 * First test with 4 address bytes. The opcode itself might
2360 	 * be a 3B addressing opcode but we don't care, because
2361 	 * SPI controller implementation should not check the opcode,
2362 	 * but just the sequence.
2363 	 */
2364 	op->addr.nbytes = 4;
2365 	if (!spi_mem_supports_op(nor->spimem, op)) {
2366 		if (nor->params->size > SZ_16M)
2367 			return -EOPNOTSUPP;
2368 
2369 		/* If flash size <= 16MB, 3 address bytes are sufficient */
2370 		op->addr.nbytes = 3;
2371 		if (!spi_mem_supports_op(nor->spimem, op))
2372 			return -EOPNOTSUPP;
2373 	}
2374 
2375 	return 0;
2376 }
2377 
2378 /**
2379  * spi_nor_spimem_check_readop - check if the read op is supported
2380  *                               by controller
2381  *@nor:         pointer to a 'struct spi_nor'
2382  *@read:        pointer to op template to be checked
2383  *
2384  * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2385  */
2386 static int spi_nor_spimem_check_readop(struct spi_nor *nor,
2387 				       const struct spi_nor_read_command *read)
2388 {
2389 	struct spi_mem_op op = SPI_NOR_READ_OP(read->opcode);
2390 
2391 	spi_nor_spimem_setup_op(nor, &op, read->proto);
2392 
2393 	/* convert the dummy cycles to the number of bytes */
2394 	op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
2395 			  op.dummy.buswidth / 8;
2396 	if (spi_nor_protocol_is_dtr(nor->read_proto))
2397 		op.dummy.nbytes *= 2;
2398 
2399 	return spi_nor_spimem_check_op(nor, &op);
2400 }
2401 
2402 /**
2403  * spi_nor_spimem_check_pp - check if the page program op is supported
2404  *                           by controller
2405  *@nor:         pointer to a 'struct spi_nor'
2406  *@pp:          pointer to op template to be checked
2407  *
2408  * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2409  */
2410 static int spi_nor_spimem_check_pp(struct spi_nor *nor,
2411 				   const struct spi_nor_pp_command *pp)
2412 {
2413 	struct spi_mem_op op = SPI_NOR_PP_OP(pp->opcode);
2414 
2415 	spi_nor_spimem_setup_op(nor, &op, pp->proto);
2416 
2417 	return spi_nor_spimem_check_op(nor, &op);
2418 }
2419 
2420 /**
2421  * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
2422  *                                based on SPI controller capabilities
2423  * @nor:        pointer to a 'struct spi_nor'
2424  * @hwcaps:     pointer to resulting capabilities after adjusting
2425  *              according to controller and flash's capability
2426  */
2427 static void
2428 spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
2429 {
2430 	struct spi_nor_flash_parameter *params = nor->params;
2431 	unsigned int cap;
2432 
2433 	/* X-X-X modes are not supported yet, mask them all. */
2434 	*hwcaps &= ~SNOR_HWCAPS_X_X_X;
2435 
2436 	/*
2437 	 * If the reset line is broken, we do not want to enter a stateful
2438 	 * mode.
2439 	 */
2440 	if (nor->flags & SNOR_F_BROKEN_RESET)
2441 		*hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR);
2442 
2443 	for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
2444 		int rdidx, ppidx;
2445 
2446 		if (!(*hwcaps & BIT(cap)))
2447 			continue;
2448 
2449 		rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
2450 		if (rdidx >= 0 &&
2451 		    spi_nor_spimem_check_readop(nor, &params->reads[rdidx]))
2452 			*hwcaps &= ~BIT(cap);
2453 
2454 		ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
2455 		if (ppidx < 0)
2456 			continue;
2457 
2458 		if (spi_nor_spimem_check_pp(nor,
2459 					    &params->page_programs[ppidx]))
2460 			*hwcaps &= ~BIT(cap);
2461 	}
2462 }
2463 
2464 /**
2465  * spi_nor_set_erase_type() - set a SPI NOR erase type
2466  * @erase:	pointer to a structure that describes a SPI NOR erase type
2467  * @size:	the size of the sector/block erased by the erase type
2468  * @opcode:	the SPI command op code to erase the sector/block
2469  */
2470 void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
2471 			    u8 opcode)
2472 {
2473 	erase->size = size;
2474 	erase->opcode = opcode;
2475 	/* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
2476 	erase->size_shift = ffs(erase->size) - 1;
2477 	erase->size_mask = (1 << erase->size_shift) - 1;
2478 }
2479 
2480 /**
2481  * spi_nor_mask_erase_type() - mask out a SPI NOR erase type
2482  * @erase:	pointer to a structure that describes a SPI NOR erase type
2483  */
2484 void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase)
2485 {
2486 	erase->size = 0;
2487 }
2488 
2489 /**
2490  * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
2491  * @map:		the erase map of the SPI NOR
2492  * @erase_mask:		bitmask encoding erase types that can erase the entire
2493  *			flash memory
2494  * @flash_size:		the spi nor flash memory size
2495  */
2496 void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
2497 				    u8 erase_mask, u64 flash_size)
2498 {
2499 	map->uniform_region.offset = 0;
2500 	map->uniform_region.size = flash_size;
2501 	map->uniform_region.erase_mask = erase_mask;
2502 	map->regions = &map->uniform_region;
2503 	map->n_regions = 1;
2504 }
2505 
2506 int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2507 			     const struct sfdp_parameter_header *bfpt_header,
2508 			     const struct sfdp_bfpt *bfpt)
2509 {
2510 	int ret;
2511 
2512 	if (nor->manufacturer && nor->manufacturer->fixups &&
2513 	    nor->manufacturer->fixups->post_bfpt) {
2514 		ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
2515 							   bfpt);
2516 		if (ret)
2517 			return ret;
2518 	}
2519 
2520 	if (nor->info->fixups && nor->info->fixups->post_bfpt)
2521 		return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt);
2522 
2523 	return 0;
2524 }
2525 
2526 static int spi_nor_select_read(struct spi_nor *nor,
2527 			       u32 shared_hwcaps)
2528 {
2529 	int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
2530 	const struct spi_nor_read_command *read;
2531 
2532 	if (best_match < 0)
2533 		return -EINVAL;
2534 
2535 	cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
2536 	if (cmd < 0)
2537 		return -EINVAL;
2538 
2539 	read = &nor->params->reads[cmd];
2540 	nor->read_opcode = read->opcode;
2541 	nor->read_proto = read->proto;
2542 
2543 	/*
2544 	 * In the SPI NOR framework, we don't need to make the difference
2545 	 * between mode clock cycles and wait state clock cycles.
2546 	 * Indeed, the value of the mode clock cycles is used by a QSPI
2547 	 * flash memory to know whether it should enter or leave its 0-4-4
2548 	 * (Continuous Read / XIP) mode.
2549 	 * eXecution In Place is out of the scope of the mtd sub-system.
2550 	 * Hence we choose to merge both mode and wait state clock cycles
2551 	 * into the so called dummy clock cycles.
2552 	 */
2553 	nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
2554 	return 0;
2555 }
2556 
2557 static int spi_nor_select_pp(struct spi_nor *nor,
2558 			     u32 shared_hwcaps)
2559 {
2560 	int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
2561 	const struct spi_nor_pp_command *pp;
2562 
2563 	if (best_match < 0)
2564 		return -EINVAL;
2565 
2566 	cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
2567 	if (cmd < 0)
2568 		return -EINVAL;
2569 
2570 	pp = &nor->params->page_programs[cmd];
2571 	nor->program_opcode = pp->opcode;
2572 	nor->write_proto = pp->proto;
2573 	return 0;
2574 }
2575 
2576 /**
2577  * spi_nor_select_uniform_erase() - select optimum uniform erase type
2578  * @map:		the erase map of the SPI NOR
2579  *
2580  * Once the optimum uniform sector erase command is found, disable all the
2581  * other.
2582  *
2583  * Return: pointer to erase type on success, NULL otherwise.
2584  */
2585 static const struct spi_nor_erase_type *
2586 spi_nor_select_uniform_erase(struct spi_nor_erase_map *map)
2587 {
2588 	const struct spi_nor_erase_type *tested_erase, *erase = NULL;
2589 	int i;
2590 	u8 uniform_erase_type = map->uniform_region.erase_mask;
2591 
2592 	/*
2593 	 * Search for the biggest erase size, except for when compiled
2594 	 * to use 4k erases.
2595 	 */
2596 	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2597 		if (!(uniform_erase_type & BIT(i)))
2598 			continue;
2599 
2600 		tested_erase = &map->erase_type[i];
2601 
2602 		/* Skip masked erase types. */
2603 		if (!tested_erase->size)
2604 			continue;
2605 
2606 		/*
2607 		 * If the current erase size is the 4k one, stop here,
2608 		 * we have found the right uniform Sector Erase command.
2609 		 */
2610 		if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS) &&
2611 		    tested_erase->size == SZ_4K) {
2612 			erase = tested_erase;
2613 			break;
2614 		}
2615 
2616 		/*
2617 		 * Otherwise, the current erase size is still a valid candidate.
2618 		 * Select the biggest valid candidate.
2619 		 */
2620 		if (!erase && tested_erase->size)
2621 			erase = tested_erase;
2622 			/* keep iterating to find the wanted_size */
2623 	}
2624 
2625 	if (!erase)
2626 		return NULL;
2627 
2628 	/* Disable all other Sector Erase commands. */
2629 	map->uniform_region.erase_mask = BIT(erase - map->erase_type);
2630 	return erase;
2631 }
2632 
2633 static int spi_nor_select_erase(struct spi_nor *nor)
2634 {
2635 	struct spi_nor_erase_map *map = &nor->params->erase_map;
2636 	const struct spi_nor_erase_type *erase = NULL;
2637 	struct mtd_info *mtd = &nor->mtd;
2638 	int i;
2639 
2640 	/*
2641 	 * The previous implementation handling Sector Erase commands assumed
2642 	 * that the SPI flash memory has an uniform layout then used only one
2643 	 * of the supported erase sizes for all Sector Erase commands.
2644 	 * So to be backward compatible, the new implementation also tries to
2645 	 * manage the SPI flash memory as uniform with a single erase sector
2646 	 * size, when possible.
2647 	 */
2648 	if (spi_nor_has_uniform_erase(nor)) {
2649 		erase = spi_nor_select_uniform_erase(map);
2650 		if (!erase)
2651 			return -EINVAL;
2652 		nor->erase_opcode = erase->opcode;
2653 		mtd->erasesize = erase->size;
2654 		return 0;
2655 	}
2656 
2657 	/*
2658 	 * For non-uniform SPI flash memory, set mtd->erasesize to the
2659 	 * maximum erase sector size. No need to set nor->erase_opcode.
2660 	 */
2661 	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2662 		if (map->erase_type[i].size) {
2663 			erase = &map->erase_type[i];
2664 			break;
2665 		}
2666 	}
2667 
2668 	if (!erase)
2669 		return -EINVAL;
2670 
2671 	mtd->erasesize = erase->size;
2672 	return 0;
2673 }
2674 
2675 static int spi_nor_set_addr_nbytes(struct spi_nor *nor)
2676 {
2677 	if (nor->params->addr_nbytes) {
2678 		nor->addr_nbytes = nor->params->addr_nbytes;
2679 	} else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
2680 		/*
2681 		 * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
2682 		 * in this protocol an odd addr_nbytes cannot be used because
2683 		 * then the address phase would only span a cycle and a half.
2684 		 * Half a cycle would be left over. We would then have to start
2685 		 * the dummy phase in the middle of a cycle and so too the data
2686 		 * phase, and we will end the transaction with half a cycle left
2687 		 * over.
2688 		 *
2689 		 * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
2690 		 * avoid this situation.
2691 		 */
2692 		nor->addr_nbytes = 4;
2693 	} else if (nor->info->addr_nbytes) {
2694 		nor->addr_nbytes = nor->info->addr_nbytes;
2695 	} else {
2696 		nor->addr_nbytes = 3;
2697 	}
2698 
2699 	if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) {
2700 		/* enable 4-byte addressing if the device exceeds 16MiB */
2701 		nor->addr_nbytes = 4;
2702 	}
2703 
2704 	if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) {
2705 		dev_dbg(nor->dev, "The number of address bytes is too large: %u\n",
2706 			nor->addr_nbytes);
2707 		return -EINVAL;
2708 	}
2709 
2710 	/* Set 4byte opcodes when possible. */
2711 	if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES &&
2712 	    !(nor->flags & SNOR_F_HAS_4BAIT))
2713 		spi_nor_set_4byte_opcodes(nor);
2714 
2715 	return 0;
2716 }
2717 
2718 static int spi_nor_setup(struct spi_nor *nor,
2719 			 const struct spi_nor_hwcaps *hwcaps)
2720 {
2721 	struct spi_nor_flash_parameter *params = nor->params;
2722 	u32 ignored_mask, shared_mask;
2723 	int err;
2724 
2725 	/*
2726 	 * Keep only the hardware capabilities supported by both the SPI
2727 	 * controller and the SPI flash memory.
2728 	 */
2729 	shared_mask = hwcaps->mask & params->hwcaps.mask;
2730 
2731 	if (nor->spimem) {
2732 		/*
2733 		 * When called from spi_nor_probe(), all caps are set and we
2734 		 * need to discard some of them based on what the SPI
2735 		 * controller actually supports (using spi_mem_supports_op()).
2736 		 */
2737 		spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
2738 	} else {
2739 		/*
2740 		 * SPI n-n-n protocols are not supported when the SPI
2741 		 * controller directly implements the spi_nor interface.
2742 		 * Yet another reason to switch to spi-mem.
2743 		 */
2744 		ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR;
2745 		if (shared_mask & ignored_mask) {
2746 			dev_dbg(nor->dev,
2747 				"SPI n-n-n protocols are not supported.\n");
2748 			shared_mask &= ~ignored_mask;
2749 		}
2750 	}
2751 
2752 	/* Select the (Fast) Read command. */
2753 	err = spi_nor_select_read(nor, shared_mask);
2754 	if (err) {
2755 		dev_dbg(nor->dev,
2756 			"can't select read settings supported by both the SPI controller and memory.\n");
2757 		return err;
2758 	}
2759 
2760 	/* Select the Page Program command. */
2761 	err = spi_nor_select_pp(nor, shared_mask);
2762 	if (err) {
2763 		dev_dbg(nor->dev,
2764 			"can't select write settings supported by both the SPI controller and memory.\n");
2765 		return err;
2766 	}
2767 
2768 	/* Select the Sector Erase command. */
2769 	err = spi_nor_select_erase(nor);
2770 	if (err) {
2771 		dev_dbg(nor->dev,
2772 			"can't select erase settings supported by both the SPI controller and memory.\n");
2773 		return err;
2774 	}
2775 
2776 	return spi_nor_set_addr_nbytes(nor);
2777 }
2778 
2779 /**
2780  * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
2781  * settings based on MFR register and ->default_init() hook.
2782  * @nor:	pointer to a 'struct spi_nor'.
2783  */
2784 static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
2785 {
2786 	if (nor->manufacturer && nor->manufacturer->fixups &&
2787 	    nor->manufacturer->fixups->default_init)
2788 		nor->manufacturer->fixups->default_init(nor);
2789 
2790 	if (nor->info->fixups && nor->info->fixups->default_init)
2791 		nor->info->fixups->default_init(nor);
2792 }
2793 
2794 /**
2795  * spi_nor_no_sfdp_init_params() - Initialize the flash's parameters and
2796  * settings based on nor->info->sfdp_flags. This method should be called only by
2797  * flashes that do not define SFDP tables. If the flash supports SFDP but the
2798  * information is wrong and the settings from this function can not be retrieved
2799  * by parsing SFDP, one should instead use the fixup hooks and update the wrong
2800  * bits.
2801  * @nor:	pointer to a 'struct spi_nor'.
2802  */
2803 static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
2804 {
2805 	struct spi_nor_flash_parameter *params = nor->params;
2806 	struct spi_nor_erase_map *map = &params->erase_map;
2807 	const struct flash_info *info = nor->info;
2808 	const u8 no_sfdp_flags = info->no_sfdp_flags;
2809 	u8 i, erase_mask;
2810 
2811 	if (no_sfdp_flags & SPI_NOR_DUAL_READ) {
2812 		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
2813 		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
2814 					  0, 8, SPINOR_OP_READ_1_1_2,
2815 					  SNOR_PROTO_1_1_2);
2816 	}
2817 
2818 	if (no_sfdp_flags & SPI_NOR_QUAD_READ) {
2819 		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
2820 		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
2821 					  0, 8, SPINOR_OP_READ_1_1_4,
2822 					  SNOR_PROTO_1_1_4);
2823 	}
2824 
2825 	if (no_sfdp_flags & SPI_NOR_OCTAL_READ) {
2826 		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
2827 		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
2828 					  0, 8, SPINOR_OP_READ_1_1_8,
2829 					  SNOR_PROTO_1_1_8);
2830 	}
2831 
2832 	if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_READ) {
2833 		params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
2834 		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_8_8_8_DTR],
2835 					  0, 20, SPINOR_OP_READ_FAST,
2836 					  SNOR_PROTO_8_8_8_DTR);
2837 	}
2838 
2839 	if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_PP) {
2840 		params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
2841 		/*
2842 		 * Since xSPI Page Program opcode is backward compatible with
2843 		 * Legacy SPI, use Legacy SPI opcode there as well.
2844 		 */
2845 		spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_8_8_8_DTR],
2846 					SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR);
2847 	}
2848 
2849 	/*
2850 	 * Sector Erase settings. Sort Erase Types in ascending order, with the
2851 	 * smallest erase size starting at BIT(0).
2852 	 */
2853 	erase_mask = 0;
2854 	i = 0;
2855 	if (no_sfdp_flags & SECT_4K) {
2856 		erase_mask |= BIT(i);
2857 		spi_nor_set_erase_type(&map->erase_type[i], 4096u,
2858 				       SPINOR_OP_BE_4K);
2859 		i++;
2860 	}
2861 	erase_mask |= BIT(i);
2862 	spi_nor_set_erase_type(&map->erase_type[i],
2863 			       info->sector_size ?: SPI_NOR_DEFAULT_SECTOR_SIZE,
2864 			       SPINOR_OP_SE);
2865 	spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
2866 }
2867 
2868 /**
2869  * spi_nor_init_flags() - Initialize NOR flags for settings that are not defined
2870  * in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP.
2871  * @nor:	pointer to a 'struct spi_nor'
2872  */
2873 static void spi_nor_init_flags(struct spi_nor *nor)
2874 {
2875 	struct device_node *np = spi_nor_get_flash_node(nor);
2876 	const u16 flags = nor->info->flags;
2877 
2878 	if (of_property_read_bool(np, "broken-flash-reset"))
2879 		nor->flags |= SNOR_F_BROKEN_RESET;
2880 
2881 	if (of_property_read_bool(np, "no-wp"))
2882 		nor->flags |= SNOR_F_NO_WP;
2883 
2884 	if (flags & SPI_NOR_SWP_IS_VOLATILE)
2885 		nor->flags |= SNOR_F_SWP_IS_VOLATILE;
2886 
2887 	if (flags & SPI_NOR_HAS_LOCK)
2888 		nor->flags |= SNOR_F_HAS_LOCK;
2889 
2890 	if (flags & SPI_NOR_HAS_TB) {
2891 		nor->flags |= SNOR_F_HAS_SR_TB;
2892 		if (flags & SPI_NOR_TB_SR_BIT6)
2893 			nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
2894 	}
2895 
2896 	if (flags & SPI_NOR_4BIT_BP) {
2897 		nor->flags |= SNOR_F_HAS_4BIT_BP;
2898 		if (flags & SPI_NOR_BP3_SR_BIT6)
2899 			nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
2900 	}
2901 
2902 	if (flags & SPI_NOR_RWW && nor->params->n_banks > 1 &&
2903 	    !nor->controller_ops)
2904 		nor->flags |= SNOR_F_RWW;
2905 }
2906 
2907 /**
2908  * spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not
2909  * be discovered by SFDP for this particular flash because the SFDP table that
2910  * indicates this support is not defined in the flash. In case the table for
2911  * this support is defined but has wrong values, one should instead use a
2912  * post_sfdp() hook to set the SNOR_F equivalent flag.
2913  * @nor:       pointer to a 'struct spi_nor'
2914  */
2915 static void spi_nor_init_fixup_flags(struct spi_nor *nor)
2916 {
2917 	const u8 fixup_flags = nor->info->fixup_flags;
2918 
2919 	if (fixup_flags & SPI_NOR_4B_OPCODES)
2920 		nor->flags |= SNOR_F_4B_OPCODES;
2921 
2922 	if (fixup_flags & SPI_NOR_IO_MODE_EN_VOLATILE)
2923 		nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
2924 }
2925 
2926 /**
2927  * spi_nor_late_init_params() - Late initialization of default flash parameters.
2928  * @nor:	pointer to a 'struct spi_nor'
2929  *
2930  * Used to initialize flash parameters that are not declared in the JESD216
2931  * SFDP standard, or where SFDP tables are not defined at all.
2932  * Will replace the spi_nor_manufacturer_init_params() method.
2933  */
2934 static int spi_nor_late_init_params(struct spi_nor *nor)
2935 {
2936 	struct spi_nor_flash_parameter *params = nor->params;
2937 	int ret;
2938 
2939 	if (nor->manufacturer && nor->manufacturer->fixups &&
2940 	    nor->manufacturer->fixups->late_init) {
2941 		ret = nor->manufacturer->fixups->late_init(nor);
2942 		if (ret)
2943 			return ret;
2944 	}
2945 
2946 	/* Needed by some flashes late_init hooks. */
2947 	spi_nor_init_flags(nor);
2948 
2949 	if (nor->info->fixups && nor->info->fixups->late_init) {
2950 		ret = nor->info->fixups->late_init(nor);
2951 		if (ret)
2952 			return ret;
2953 	}
2954 
2955 	if (!nor->params->die_erase_opcode)
2956 		nor->params->die_erase_opcode = SPINOR_OP_CHIP_ERASE;
2957 
2958 	/* Default method kept for backward compatibility. */
2959 	if (!params->set_4byte_addr_mode)
2960 		params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_brwr;
2961 
2962 	spi_nor_init_fixup_flags(nor);
2963 
2964 	/*
2965 	 * NOR protection support. When locking_ops are not provided, we pick
2966 	 * the default ones.
2967 	 */
2968 	if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
2969 		spi_nor_init_default_locking_ops(nor);
2970 
2971 	if (params->n_banks > 1)
2972 		params->bank_size = div_u64(params->size, params->n_banks);
2973 
2974 	return 0;
2975 }
2976 
2977 /**
2978  * spi_nor_sfdp_init_params_deprecated() - Deprecated way of initializing flash
2979  * parameters and settings based on JESD216 SFDP standard.
2980  * @nor:	pointer to a 'struct spi_nor'.
2981  *
2982  * The method has a roll-back mechanism: in case the SFDP parsing fails, the
2983  * legacy flash parameters and settings will be restored.
2984  */
2985 static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor)
2986 {
2987 	struct spi_nor_flash_parameter sfdp_params;
2988 
2989 	memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
2990 
2991 	if (spi_nor_parse_sfdp(nor)) {
2992 		memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
2993 		nor->flags &= ~SNOR_F_4B_OPCODES;
2994 	}
2995 }
2996 
2997 /**
2998  * spi_nor_init_params_deprecated() - Deprecated way of initializing flash
2999  * parameters and settings.
3000  * @nor:	pointer to a 'struct spi_nor'.
3001  *
3002  * The method assumes that flash doesn't support SFDP so it initializes flash
3003  * parameters in spi_nor_no_sfdp_init_params() which later on can be overwritten
3004  * when parsing SFDP, if supported.
3005  */
3006 static void spi_nor_init_params_deprecated(struct spi_nor *nor)
3007 {
3008 	spi_nor_no_sfdp_init_params(nor);
3009 
3010 	spi_nor_manufacturer_init_params(nor);
3011 
3012 	if (nor->info->no_sfdp_flags & (SPI_NOR_DUAL_READ |
3013 					SPI_NOR_QUAD_READ |
3014 					SPI_NOR_OCTAL_READ |
3015 					SPI_NOR_OCTAL_DTR_READ))
3016 		spi_nor_sfdp_init_params_deprecated(nor);
3017 }
3018 
3019 /**
3020  * spi_nor_init_default_params() - Default initialization of flash parameters
3021  * and settings. Done for all flashes, regardless is they define SFDP tables
3022  * or not.
3023  * @nor:	pointer to a 'struct spi_nor'.
3024  */
3025 static void spi_nor_init_default_params(struct spi_nor *nor)
3026 {
3027 	struct spi_nor_flash_parameter *params = nor->params;
3028 	const struct flash_info *info = nor->info;
3029 	struct device_node *np = spi_nor_get_flash_node(nor);
3030 
3031 	params->quad_enable = spi_nor_sr2_bit1_quad_enable;
3032 	params->otp.org = info->otp;
3033 
3034 	/* Default to 16-bit Write Status (01h) Command */
3035 	nor->flags |= SNOR_F_HAS_16BIT_SR;
3036 
3037 	/* Set SPI NOR sizes. */
3038 	params->writesize = 1;
3039 	params->size = info->size;
3040 	params->bank_size = params->size;
3041 	params->page_size = info->page_size ?: SPI_NOR_DEFAULT_PAGE_SIZE;
3042 	params->n_banks = info->n_banks ?: SPI_NOR_DEFAULT_N_BANKS;
3043 
3044 	/* Default to Fast Read for non-DT and enable it if requested by DT. */
3045 	if (!np || of_property_read_bool(np, "m25p,fast-read"))
3046 		params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
3047 
3048 	/* (Fast) Read settings. */
3049 	params->hwcaps.mask |= SNOR_HWCAPS_READ;
3050 	spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
3051 				  0, 0, SPINOR_OP_READ,
3052 				  SNOR_PROTO_1_1_1);
3053 
3054 	if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
3055 		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
3056 					  0, 8, SPINOR_OP_READ_FAST,
3057 					  SNOR_PROTO_1_1_1);
3058 	/* Page Program settings. */
3059 	params->hwcaps.mask |= SNOR_HWCAPS_PP;
3060 	spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
3061 				SPINOR_OP_PP, SNOR_PROTO_1_1_1);
3062 
3063 	if (info->flags & SPI_NOR_QUAD_PP) {
3064 		params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
3065 		spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
3066 					SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4);
3067 	}
3068 }
3069 
3070 /**
3071  * spi_nor_init_params() - Initialize the flash's parameters and settings.
3072  * @nor:	pointer to a 'struct spi_nor'.
3073  *
3074  * The flash parameters and settings are initialized based on a sequence of
3075  * calls that are ordered by priority:
3076  *
3077  * 1/ Default flash parameters initialization. The initializations are done
3078  *    based on nor->info data:
3079  *		spi_nor_info_init_params()
3080  *
3081  * which can be overwritten by:
3082  * 2/ Manufacturer flash parameters initialization. The initializations are
3083  *    done based on MFR register, or when the decisions can not be done solely
3084  *    based on MFR, by using specific flash_info tweeks, ->default_init():
3085  *		spi_nor_manufacturer_init_params()
3086  *
3087  * which can be overwritten by:
3088  * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
3089  *    should be more accurate that the above.
3090  *		spi_nor_parse_sfdp() or spi_nor_no_sfdp_init_params()
3091  *
3092  *    Please note that there is a ->post_bfpt() fixup hook that can overwrite
3093  *    the flash parameters and settings immediately after parsing the Basic
3094  *    Flash Parameter Table.
3095  *    spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed.
3096  *    It is used to tweak various flash parameters when information provided
3097  *    by the SFDP tables are wrong.
3098  *
3099  * which can be overwritten by:
3100  * 4/ Late flash parameters initialization, used to initialize flash
3101  * parameters that are not declared in the JESD216 SFDP standard, or where SFDP
3102  * tables are not defined at all.
3103  *		spi_nor_late_init_params()
3104  *
3105  * Return: 0 on success, -errno otherwise.
3106  */
3107 static int spi_nor_init_params(struct spi_nor *nor)
3108 {
3109 	int ret;
3110 
3111 	nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
3112 	if (!nor->params)
3113 		return -ENOMEM;
3114 
3115 	spi_nor_init_default_params(nor);
3116 
3117 	if (spi_nor_needs_sfdp(nor)) {
3118 		ret = spi_nor_parse_sfdp(nor);
3119 		if (ret) {
3120 			dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n");
3121 			return ret;
3122 		}
3123 	} else if (nor->info->no_sfdp_flags & SPI_NOR_SKIP_SFDP) {
3124 		spi_nor_no_sfdp_init_params(nor);
3125 	} else {
3126 		spi_nor_init_params_deprecated(nor);
3127 	}
3128 
3129 	ret = spi_nor_late_init_params(nor);
3130 	if (ret)
3131 		return ret;
3132 
3133 	if (WARN_ON(!is_power_of_2(nor->params->page_size)))
3134 		return -EINVAL;
3135 
3136 	return 0;
3137 }
3138 
3139 /** spi_nor_set_octal_dtr() - enable or disable Octal DTR I/O.
3140  * @nor:                 pointer to a 'struct spi_nor'
3141  * @enable:              whether to enable or disable Octal DTR
3142  *
3143  * Return: 0 on success, -errno otherwise.
3144  */
3145 static int spi_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
3146 {
3147 	int ret;
3148 
3149 	if (!nor->params->set_octal_dtr)
3150 		return 0;
3151 
3152 	if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
3153 	      nor->write_proto == SNOR_PROTO_8_8_8_DTR))
3154 		return 0;
3155 
3156 	if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
3157 		return 0;
3158 
3159 	ret = nor->params->set_octal_dtr(nor, enable);
3160 	if (ret)
3161 		return ret;
3162 
3163 	if (enable)
3164 		nor->reg_proto = SNOR_PROTO_8_8_8_DTR;
3165 	else
3166 		nor->reg_proto = SNOR_PROTO_1_1_1;
3167 
3168 	return 0;
3169 }
3170 
3171 /**
3172  * spi_nor_quad_enable() - enable Quad I/O if needed.
3173  * @nor:                pointer to a 'struct spi_nor'
3174  *
3175  * Return: 0 on success, -errno otherwise.
3176  */
3177 static int spi_nor_quad_enable(struct spi_nor *nor)
3178 {
3179 	if (!nor->params->quad_enable)
3180 		return 0;
3181 
3182 	if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
3183 	      spi_nor_get_protocol_width(nor->write_proto) == 4))
3184 		return 0;
3185 
3186 	return nor->params->quad_enable(nor);
3187 }
3188 
3189 /**
3190  * spi_nor_set_4byte_addr_mode() - Set address mode.
3191  * @nor:                pointer to a 'struct spi_nor'.
3192  * @enable:             enable/disable 4 byte address mode.
3193  *
3194  * Return: 0 on success, -errno otherwise.
3195  */
3196 int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
3197 {
3198 	struct spi_nor_flash_parameter *params = nor->params;
3199 	int ret;
3200 
3201 	if (enable) {
3202 		/*
3203 		 * If the RESET# pin isn't hooked up properly, or the system
3204 		 * otherwise doesn't perform a reset command in the boot
3205 		 * sequence, it's impossible to 100% protect against unexpected
3206 		 * reboots (e.g., crashes). Warn the user (or hopefully, system
3207 		 * designer) that this is bad.
3208 		 */
3209 		WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
3210 			  "enabling reset hack; may not recover from unexpected reboots\n");
3211 	}
3212 
3213 	ret = params->set_4byte_addr_mode(nor, enable);
3214 	if (ret && ret != -EOPNOTSUPP)
3215 		return ret;
3216 
3217 	if (enable) {
3218 		params->addr_nbytes = 4;
3219 		params->addr_mode_nbytes = 4;
3220 	} else {
3221 		params->addr_nbytes = 3;
3222 		params->addr_mode_nbytes = 3;
3223 	}
3224 
3225 	return 0;
3226 }
3227 
3228 static int spi_nor_init(struct spi_nor *nor)
3229 {
3230 	int err;
3231 
3232 	err = spi_nor_set_octal_dtr(nor, true);
3233 	if (err) {
3234 		dev_dbg(nor->dev, "octal mode not supported\n");
3235 		return err;
3236 	}
3237 
3238 	err = spi_nor_quad_enable(nor);
3239 	if (err) {
3240 		dev_dbg(nor->dev, "quad mode not supported\n");
3241 		return err;
3242 	}
3243 
3244 	/*
3245 	 * Some SPI NOR flashes are write protected by default after a power-on
3246 	 * reset cycle, in order to avoid inadvertent writes during power-up.
3247 	 * Backward compatibility imposes to unlock the entire flash memory
3248 	 * array at power-up by default. Depending on the kernel configuration
3249 	 * (1) do nothing, (2) always unlock the entire flash array or (3)
3250 	 * unlock the entire flash array only when the software write
3251 	 * protection bits are volatile. The latter is indicated by
3252 	 * SNOR_F_SWP_IS_VOLATILE.
3253 	 */
3254 	if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) ||
3255 	    (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) &&
3256 	     nor->flags & SNOR_F_SWP_IS_VOLATILE))
3257 		spi_nor_try_unlock_all(nor);
3258 
3259 	if (nor->addr_nbytes == 4 &&
3260 	    nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
3261 	    !(nor->flags & SNOR_F_4B_OPCODES))
3262 		return spi_nor_set_4byte_addr_mode(nor, true);
3263 
3264 	return 0;
3265 }
3266 
3267 /**
3268  * spi_nor_soft_reset() - Perform a software reset
3269  * @nor:	pointer to 'struct spi_nor'
3270  *
3271  * Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets
3272  * the device to its power-on-reset state. This is useful when the software has
3273  * made some changes to device (volatile) registers and needs to reset it before
3274  * shutting down, for example.
3275  *
3276  * Not every flash supports this sequence. The same set of opcodes might be used
3277  * for some other operation on a flash that does not support this. Support for
3278  * this sequence can be discovered via SFDP in the BFPT table.
3279  *
3280  * Return: 0 on success, -errno otherwise.
3281  */
3282 static void spi_nor_soft_reset(struct spi_nor *nor)
3283 {
3284 	struct spi_mem_op op;
3285 	int ret;
3286 
3287 	op = (struct spi_mem_op)SPINOR_SRSTEN_OP;
3288 
3289 	spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
3290 
3291 	ret = spi_mem_exec_op(nor->spimem, &op);
3292 	if (ret) {
3293 		if (ret != -EOPNOTSUPP)
3294 			dev_warn(nor->dev, "Software reset failed: %d\n", ret);
3295 		return;
3296 	}
3297 
3298 	op = (struct spi_mem_op)SPINOR_SRST_OP;
3299 
3300 	spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
3301 
3302 	ret = spi_mem_exec_op(nor->spimem, &op);
3303 	if (ret) {
3304 		dev_warn(nor->dev, "Software reset failed: %d\n", ret);
3305 		return;
3306 	}
3307 
3308 	/*
3309 	 * Software Reset is not instant, and the delay varies from flash to
3310 	 * flash. Looking at a few flashes, most range somewhere below 100
3311 	 * microseconds. So, sleep for a range of 200-400 us.
3312 	 */
3313 	usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX);
3314 }
3315 
3316 /* mtd suspend handler */
3317 static int spi_nor_suspend(struct mtd_info *mtd)
3318 {
3319 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
3320 	int ret;
3321 
3322 	/* Disable octal DTR mode if we enabled it. */
3323 	ret = spi_nor_set_octal_dtr(nor, false);
3324 	if (ret)
3325 		dev_err(nor->dev, "suspend() failed\n");
3326 
3327 	return ret;
3328 }
3329 
3330 /* mtd resume handler */
3331 static void spi_nor_resume(struct mtd_info *mtd)
3332 {
3333 	struct spi_nor *nor = mtd_to_spi_nor(mtd);
3334 	struct device *dev = nor->dev;
3335 	int ret;
3336 
3337 	/* re-initialize the nor chip */
3338 	ret = spi_nor_init(nor);
3339 	if (ret)
3340 		dev_err(dev, "resume() failed\n");
3341 }
3342 
3343 static int spi_nor_get_device(struct mtd_info *mtd)
3344 {
3345 	struct mtd_info *master = mtd_get_master(mtd);
3346 	struct spi_nor *nor = mtd_to_spi_nor(master);
3347 	struct device *dev;
3348 
3349 	if (nor->spimem)
3350 		dev = nor->spimem->spi->controller->dev.parent;
3351 	else
3352 		dev = nor->dev;
3353 
3354 	if (!try_module_get(dev->driver->owner))
3355 		return -ENODEV;
3356 
3357 	return 0;
3358 }
3359 
3360 static void spi_nor_put_device(struct mtd_info *mtd)
3361 {
3362 	struct mtd_info *master = mtd_get_master(mtd);
3363 	struct spi_nor *nor = mtd_to_spi_nor(master);
3364 	struct device *dev;
3365 
3366 	if (nor->spimem)
3367 		dev = nor->spimem->spi->controller->dev.parent;
3368 	else
3369 		dev = nor->dev;
3370 
3371 	module_put(dev->driver->owner);
3372 }
3373 
3374 static void spi_nor_restore(struct spi_nor *nor)
3375 {
3376 	int ret;
3377 
3378 	/* restore the addressing mode */
3379 	if (nor->addr_nbytes == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
3380 	    nor->flags & SNOR_F_BROKEN_RESET) {
3381 		ret = spi_nor_set_4byte_addr_mode(nor, false);
3382 		if (ret)
3383 			/*
3384 			 * Do not stop the execution in the hope that the flash
3385 			 * will default to the 3-byte address mode after the
3386 			 * software reset.
3387 			 */
3388 			dev_err(nor->dev, "Failed to exit 4-byte address mode, err = %d\n", ret);
3389 	}
3390 
3391 	if (nor->flags & SNOR_F_SOFT_RESET)
3392 		spi_nor_soft_reset(nor);
3393 }
3394 
3395 static const struct flash_info *spi_nor_match_name(struct spi_nor *nor,
3396 						   const char *name)
3397 {
3398 	unsigned int i, j;
3399 
3400 	for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
3401 		for (j = 0; j < manufacturers[i]->nparts; j++) {
3402 			if (manufacturers[i]->parts[j].name &&
3403 			    !strcmp(name, manufacturers[i]->parts[j].name)) {
3404 				nor->manufacturer = manufacturers[i];
3405 				return &manufacturers[i]->parts[j];
3406 			}
3407 		}
3408 	}
3409 
3410 	return NULL;
3411 }
3412 
3413 static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
3414 						       const char *name)
3415 {
3416 	const struct flash_info *info = NULL;
3417 
3418 	if (name)
3419 		info = spi_nor_match_name(nor, name);
3420 	/*
3421 	 * Auto-detect if chip name wasn't specified or not found, or the chip
3422 	 * has an ID. If the chip supposedly has an ID, we also do an
3423 	 * auto-detection to compare it later.
3424 	 */
3425 	if (!info || info->id) {
3426 		const struct flash_info *jinfo;
3427 
3428 		jinfo = spi_nor_detect(nor);
3429 		if (IS_ERR(jinfo))
3430 			return jinfo;
3431 
3432 		/*
3433 		 * If caller has specified name of flash model that can normally
3434 		 * be detected using JEDEC, let's verify it.
3435 		 */
3436 		if (info && jinfo != info)
3437 			dev_warn(nor->dev, "found %s, expected %s\n",
3438 				 jinfo->name, info->name);
3439 
3440 		/* If info was set before, JEDEC knows better. */
3441 		info = jinfo;
3442 	}
3443 
3444 	return info;
3445 }
3446 
3447 static u32
3448 spi_nor_get_region_erasesize(const struct spi_nor_erase_region *region,
3449 			     const struct spi_nor_erase_type *erase_type)
3450 {
3451 	int i;
3452 
3453 	if (region->overlaid)
3454 		return region->size;
3455 
3456 	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
3457 		if (region->erase_mask & BIT(i))
3458 			return erase_type[i].size;
3459 	}
3460 
3461 	return 0;
3462 }
3463 
3464 static int spi_nor_set_mtd_eraseregions(struct spi_nor *nor)
3465 {
3466 	const struct spi_nor_erase_map *map = &nor->params->erase_map;
3467 	const struct spi_nor_erase_region *region = map->regions;
3468 	struct mtd_erase_region_info *mtd_region;
3469 	struct mtd_info *mtd = &nor->mtd;
3470 	u32 erasesize, i;
3471 
3472 	mtd_region = devm_kcalloc(nor->dev, map->n_regions, sizeof(*mtd_region),
3473 				  GFP_KERNEL);
3474 	if (!mtd_region)
3475 		return -ENOMEM;
3476 
3477 	for (i = 0; i < map->n_regions; i++) {
3478 		erasesize = spi_nor_get_region_erasesize(&region[i],
3479 							 map->erase_type);
3480 		if (!erasesize)
3481 			return -EINVAL;
3482 
3483 		mtd_region[i].erasesize = erasesize;
3484 		mtd_region[i].numblocks = div_u64(region[i].size, erasesize);
3485 		mtd_region[i].offset = region[i].offset;
3486 	}
3487 
3488 	mtd->numeraseregions = map->n_regions;
3489 	mtd->eraseregions = mtd_region;
3490 
3491 	return 0;
3492 }
3493 
3494 static int spi_nor_set_mtd_info(struct spi_nor *nor)
3495 {
3496 	struct mtd_info *mtd = &nor->mtd;
3497 	struct device *dev = nor->dev;
3498 
3499 	spi_nor_set_mtd_locking_ops(nor);
3500 	spi_nor_set_mtd_otp_ops(nor);
3501 
3502 	mtd->dev.parent = dev;
3503 	if (!mtd->name)
3504 		mtd->name = dev_name(dev);
3505 	mtd->type = MTD_NORFLASH;
3506 	mtd->flags = MTD_CAP_NORFLASH;
3507 	/* Unset BIT_WRITEABLE to enable JFFS2 write buffer for ECC'd NOR */
3508 	if (nor->flags & SNOR_F_ECC)
3509 		mtd->flags &= ~MTD_BIT_WRITEABLE;
3510 	if (nor->info->flags & SPI_NOR_NO_ERASE)
3511 		mtd->flags |= MTD_NO_ERASE;
3512 	else
3513 		mtd->_erase = spi_nor_erase;
3514 	mtd->writesize = nor->params->writesize;
3515 	mtd->writebufsize = nor->params->page_size;
3516 	mtd->size = nor->params->size;
3517 	mtd->_read = spi_nor_read;
3518 	/* Might be already set by some SST flashes. */
3519 	if (!mtd->_write)
3520 		mtd->_write = spi_nor_write;
3521 	mtd->_suspend = spi_nor_suspend;
3522 	mtd->_resume = spi_nor_resume;
3523 	mtd->_get_device = spi_nor_get_device;
3524 	mtd->_put_device = spi_nor_put_device;
3525 
3526 	if (!spi_nor_has_uniform_erase(nor))
3527 		return spi_nor_set_mtd_eraseregions(nor);
3528 
3529 	return 0;
3530 }
3531 
3532 static int spi_nor_hw_reset(struct spi_nor *nor)
3533 {
3534 	struct gpio_desc *reset;
3535 
3536 	reset = devm_gpiod_get_optional(nor->dev, "reset", GPIOD_OUT_LOW);
3537 	if (IS_ERR_OR_NULL(reset))
3538 		return PTR_ERR_OR_ZERO(reset);
3539 
3540 	/*
3541 	 * Experimental delay values by looking at different flash device
3542 	 * vendors datasheets.
3543 	 */
3544 	usleep_range(1, 5);
3545 	gpiod_set_value_cansleep(reset, 1);
3546 	usleep_range(100, 150);
3547 	gpiod_set_value_cansleep(reset, 0);
3548 	usleep_range(1000, 1200);
3549 
3550 	return 0;
3551 }
3552 
3553 int spi_nor_scan(struct spi_nor *nor, const char *name,
3554 		 const struct spi_nor_hwcaps *hwcaps)
3555 {
3556 	const struct flash_info *info;
3557 	struct device *dev = nor->dev;
3558 	int ret;
3559 
3560 	ret = spi_nor_check(nor);
3561 	if (ret)
3562 		return ret;
3563 
3564 	/* Reset SPI protocol for all commands. */
3565 	nor->reg_proto = SNOR_PROTO_1_1_1;
3566 	nor->read_proto = SNOR_PROTO_1_1_1;
3567 	nor->write_proto = SNOR_PROTO_1_1_1;
3568 
3569 	/*
3570 	 * We need the bounce buffer early to read/write registers when going
3571 	 * through the spi-mem layer (buffers have to be DMA-able).
3572 	 * For spi-mem drivers, we'll reallocate a new buffer if
3573 	 * nor->params->page_size turns out to be greater than PAGE_SIZE (which
3574 	 * shouldn't happen before long since NOR pages are usually less
3575 	 * than 1KB) after spi_nor_scan() returns.
3576 	 */
3577 	nor->bouncebuf_size = PAGE_SIZE;
3578 	nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
3579 				      GFP_KERNEL);
3580 	if (!nor->bouncebuf)
3581 		return -ENOMEM;
3582 
3583 	ret = spi_nor_hw_reset(nor);
3584 	if (ret)
3585 		return ret;
3586 
3587 	info = spi_nor_get_flash_info(nor, name);
3588 	if (IS_ERR(info))
3589 		return PTR_ERR(info);
3590 
3591 	nor->info = info;
3592 
3593 	mutex_init(&nor->lock);
3594 
3595 	/* Init flash parameters based on flash_info struct and SFDP */
3596 	ret = spi_nor_init_params(nor);
3597 	if (ret)
3598 		return ret;
3599 
3600 	if (spi_nor_use_parallel_locking(nor))
3601 		init_waitqueue_head(&nor->rww.wait);
3602 
3603 	/*
3604 	 * Configure the SPI memory:
3605 	 * - select op codes for (Fast) Read, Page Program and Sector Erase.
3606 	 * - set the number of dummy cycles (mode cycles + wait states).
3607 	 * - set the SPI protocols for register and memory accesses.
3608 	 * - set the number of address bytes.
3609 	 */
3610 	ret = spi_nor_setup(nor, hwcaps);
3611 	if (ret)
3612 		return ret;
3613 
3614 	/* Send all the required SPI flash commands to initialize device */
3615 	ret = spi_nor_init(nor);
3616 	if (ret)
3617 		return ret;
3618 
3619 	/* No mtd_info fields should be used up to this point. */
3620 	ret = spi_nor_set_mtd_info(nor);
3621 	if (ret)
3622 		return ret;
3623 
3624 	dev_dbg(dev, "Manufacturer and device ID: %*phN\n",
3625 		SPI_NOR_MAX_ID_LEN, nor->id);
3626 
3627 	return 0;
3628 }
3629 EXPORT_SYMBOL_GPL(spi_nor_scan);
3630 
3631 static int spi_nor_create_read_dirmap(struct spi_nor *nor)
3632 {
3633 	struct spi_mem_dirmap_info info = {
3634 		.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
3635 				      SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
3636 				      SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
3637 				      SPI_MEM_OP_DATA_IN(0, NULL, 0)),
3638 		.offset = 0,
3639 		.length = nor->params->size,
3640 	};
3641 	struct spi_mem_op *op = &info.op_tmpl;
3642 
3643 	spi_nor_spimem_setup_op(nor, op, nor->read_proto);
3644 
3645 	/* convert the dummy cycles to the number of bytes */
3646 	op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
3647 	if (spi_nor_protocol_is_dtr(nor->read_proto))
3648 		op->dummy.nbytes *= 2;
3649 
3650 	/*
3651 	 * Since spi_nor_spimem_setup_op() only sets buswidth when the number
3652 	 * of data bytes is non-zero, the data buswidth won't be set here. So,
3653 	 * do it explicitly.
3654 	 */
3655 	op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
3656 
3657 	nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3658 						       &info);
3659 	return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
3660 }
3661 
3662 static int spi_nor_create_write_dirmap(struct spi_nor *nor)
3663 {
3664 	struct spi_mem_dirmap_info info = {
3665 		.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
3666 				      SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
3667 				      SPI_MEM_OP_NO_DUMMY,
3668 				      SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
3669 		.offset = 0,
3670 		.length = nor->params->size,
3671 	};
3672 	struct spi_mem_op *op = &info.op_tmpl;
3673 
3674 	if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
3675 		op->addr.nbytes = 0;
3676 
3677 	spi_nor_spimem_setup_op(nor, op, nor->write_proto);
3678 
3679 	/*
3680 	 * Since spi_nor_spimem_setup_op() only sets buswidth when the number
3681 	 * of data bytes is non-zero, the data buswidth won't be set here. So,
3682 	 * do it explicitly.
3683 	 */
3684 	op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
3685 
3686 	nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3687 						       &info);
3688 	return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
3689 }
3690 
3691 static int spi_nor_probe(struct spi_mem *spimem)
3692 {
3693 	struct spi_device *spi = spimem->spi;
3694 	struct device *dev = &spi->dev;
3695 	struct flash_platform_data *data = dev_get_platdata(dev);
3696 	struct spi_nor *nor;
3697 	/*
3698 	 * Enable all caps by default. The core will mask them after
3699 	 * checking what's really supported using spi_mem_supports_op().
3700 	 */
3701 	const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
3702 	char *flash_name;
3703 	int ret;
3704 
3705 	ret = devm_regulator_get_enable(dev, "vcc");
3706 	if (ret)
3707 		return ret;
3708 
3709 	nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL);
3710 	if (!nor)
3711 		return -ENOMEM;
3712 
3713 	nor->spimem = spimem;
3714 	nor->dev = dev;
3715 	spi_nor_set_flash_node(nor, dev->of_node);
3716 
3717 	spi_mem_set_drvdata(spimem, nor);
3718 
3719 	if (data && data->name)
3720 		nor->mtd.name = data->name;
3721 
3722 	if (!nor->mtd.name)
3723 		nor->mtd.name = spi_mem_get_name(spimem);
3724 
3725 	/*
3726 	 * For some (historical?) reason many platforms provide two different
3727 	 * names in flash_platform_data: "name" and "type". Quite often name is
3728 	 * set to "m25p80" and then "type" provides a real chip name.
3729 	 * If that's the case, respect "type" and ignore a "name".
3730 	 */
3731 	if (data && data->type)
3732 		flash_name = data->type;
3733 	else if (!strcmp(spi->modalias, "spi-nor"))
3734 		flash_name = NULL; /* auto-detect */
3735 	else
3736 		flash_name = spi->modalias;
3737 
3738 	ret = spi_nor_scan(nor, flash_name, &hwcaps);
3739 	if (ret)
3740 		return ret;
3741 
3742 	spi_nor_debugfs_register(nor);
3743 
3744 	/*
3745 	 * None of the existing parts have > 512B pages, but let's play safe
3746 	 * and add this logic so that if anyone ever adds support for such
3747 	 * a NOR we don't end up with buffer overflows.
3748 	 */
3749 	if (nor->params->page_size > PAGE_SIZE) {
3750 		nor->bouncebuf_size = nor->params->page_size;
3751 		devm_kfree(dev, nor->bouncebuf);
3752 		nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
3753 					      GFP_KERNEL);
3754 		if (!nor->bouncebuf)
3755 			return -ENOMEM;
3756 	}
3757 
3758 	ret = spi_nor_create_read_dirmap(nor);
3759 	if (ret)
3760 		return ret;
3761 
3762 	ret = spi_nor_create_write_dirmap(nor);
3763 	if (ret)
3764 		return ret;
3765 
3766 	return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
3767 				   data ? data->nr_parts : 0);
3768 }
3769 
3770 static int spi_nor_remove(struct spi_mem *spimem)
3771 {
3772 	struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3773 
3774 	spi_nor_restore(nor);
3775 
3776 	/* Clean up MTD stuff. */
3777 	return mtd_device_unregister(&nor->mtd);
3778 }
3779 
3780 static void spi_nor_shutdown(struct spi_mem *spimem)
3781 {
3782 	struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3783 
3784 	spi_nor_restore(nor);
3785 }
3786 
3787 /*
3788  * Do NOT add to this array without reading the following:
3789  *
3790  * Historically, many flash devices are bound to this driver by their name. But
3791  * since most of these flash are compatible to some extent, and their
3792  * differences can often be differentiated by the JEDEC read-ID command, we
3793  * encourage new users to add support to the spi-nor library, and simply bind
3794  * against a generic string here (e.g., "jedec,spi-nor").
3795  *
3796  * Many flash names are kept here in this list to keep them available
3797  * as module aliases for existing platforms.
3798  */
3799 static const struct spi_device_id spi_nor_dev_ids[] = {
3800 	/*
3801 	 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
3802 	 * hack around the fact that the SPI core does not provide uevent
3803 	 * matching for .of_match_table
3804 	 */
3805 	{"spi-nor"},
3806 
3807 	/*
3808 	 * Entries not used in DTs that should be safe to drop after replacing
3809 	 * them with "spi-nor" in platform data.
3810 	 */
3811 	{"s25sl064a"},	{"w25x16"},	{"m25p10"},	{"m25px64"},
3812 
3813 	/*
3814 	 * Entries that were used in DTs without "jedec,spi-nor" fallback and
3815 	 * should be kept for backward compatibility.
3816 	 */
3817 	{"at25df321a"},	{"at25df641"},	{"at26df081a"},
3818 	{"mx25l4005a"},	{"mx25l1606e"},	{"mx25l6405d"},	{"mx25l12805d"},
3819 	{"mx25l25635e"},{"mx66l51235l"},
3820 	{"n25q064"},	{"n25q128a11"},	{"n25q128a13"},	{"n25q512a"},
3821 	{"s25fl256s1"},	{"s25fl512s"},	{"s25sl12801"},	{"s25fl008k"},
3822 	{"s25fl064k"},
3823 	{"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
3824 	{"m25p40"},	{"m25p80"},	{"m25p16"},	{"m25p32"},
3825 	{"m25p64"},	{"m25p128"},
3826 	{"w25x80"},	{"w25x32"},	{"w25q32"},	{"w25q32dw"},
3827 	{"w25q80bl"},	{"w25q128"},	{"w25q256"},
3828 
3829 	/* Flashes that can't be detected using JEDEC */
3830 	{"m25p05-nonjedec"},	{"m25p10-nonjedec"},	{"m25p20-nonjedec"},
3831 	{"m25p40-nonjedec"},	{"m25p80-nonjedec"},	{"m25p16-nonjedec"},
3832 	{"m25p32-nonjedec"},	{"m25p64-nonjedec"},	{"m25p128-nonjedec"},
3833 
3834 	/* Everspin MRAMs (non-JEDEC) */
3835 	{ "mr25h128" }, /* 128 Kib, 40 MHz */
3836 	{ "mr25h256" }, /* 256 Kib, 40 MHz */
3837 	{ "mr25h10" },  /*   1 Mib, 40 MHz */
3838 	{ "mr25h40" },  /*   4 Mib, 40 MHz */
3839 
3840 	{ },
3841 };
3842 MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
3843 
3844 static const struct of_device_id spi_nor_of_table[] = {
3845 	/*
3846 	 * Generic compatibility for SPI NOR that can be identified by the
3847 	 * JEDEC READ ID opcode (0x9F). Use this, if possible.
3848 	 */
3849 	{ .compatible = "jedec,spi-nor" },
3850 	{ /* sentinel */ },
3851 };
3852 MODULE_DEVICE_TABLE(of, spi_nor_of_table);
3853 
3854 /*
3855  * REVISIT: many of these chips have deep power-down modes, which
3856  * should clearly be entered on suspend() to minimize power use.
3857  * And also when they're otherwise idle...
3858  */
3859 static struct spi_mem_driver spi_nor_driver = {
3860 	.spidrv = {
3861 		.driver = {
3862 			.name = "spi-nor",
3863 			.of_match_table = spi_nor_of_table,
3864 			.dev_groups = spi_nor_sysfs_groups,
3865 		},
3866 		.id_table = spi_nor_dev_ids,
3867 	},
3868 	.probe = spi_nor_probe,
3869 	.remove = spi_nor_remove,
3870 	.shutdown = spi_nor_shutdown,
3871 };
3872 
3873 static int __init spi_nor_module_init(void)
3874 {
3875 	return spi_mem_driver_register(&spi_nor_driver);
3876 }
3877 module_init(spi_nor_module_init);
3878 
3879 static void __exit spi_nor_module_exit(void)
3880 {
3881 	spi_mem_driver_unregister(&spi_nor_driver);
3882 	spi_nor_debugfs_shutdown();
3883 }
3884 module_exit(spi_nor_module_exit);
3885 
3886 MODULE_LICENSE("GPL v2");
3887 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
3888 MODULE_AUTHOR("Mike Lavender");
3889 MODULE_DESCRIPTION("framework for SPI NOR");
3890