xref: /linux/drivers/spi/spi-intel.c (revision 09b1704f5b02c18dd02b21343530463fcfc92c54)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel PCH/PCU SPI flash driver.
4  *
5  * Copyright (C) 2016 - 2022, Intel Corporation
6  * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7  */
8 
9 #include <linux/iopoll.h>
10 #include <linux/module.h>
11 
12 #include <linux/mtd/partitions.h>
13 #include <linux/mtd/spi-nor.h>
14 
15 #include <linux/spi/flash.h>
16 #include <linux/spi/spi.h>
17 #include <linux/spi/spi-mem.h>
18 
19 #include "spi-intel.h"
20 
21 /* Offsets are from @ispi->base */
22 #define BFPREG				0x00
23 
24 #define HSFSTS_CTL			0x04
25 #define HSFSTS_CTL_FSMIE		BIT(31)
26 #define HSFSTS_CTL_FDBC_SHIFT		24
27 #define HSFSTS_CTL_FDBC_MASK		(0x3f << HSFSTS_CTL_FDBC_SHIFT)
28 
29 #define HSFSTS_CTL_FCYCLE_SHIFT		17
30 #define HSFSTS_CTL_FCYCLE_MASK		(0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
31 /* HW sequencer opcodes */
32 #define HSFSTS_CTL_FCYCLE_READ		(0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
33 #define HSFSTS_CTL_FCYCLE_WRITE		(0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
34 #define HSFSTS_CTL_FCYCLE_ERASE		(0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
35 #define HSFSTS_CTL_FCYCLE_ERASE_64K	(0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
36 #define HSFSTS_CTL_FCYCLE_RDSFDP	(0x05 << HSFSTS_CTL_FCYCLE_SHIFT)
37 #define HSFSTS_CTL_FCYCLE_RDID		(0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
38 #define HSFSTS_CTL_FCYCLE_WRSR		(0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
39 #define HSFSTS_CTL_FCYCLE_RDSR		(0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
40 
41 #define HSFSTS_CTL_FGO			BIT(16)
42 #define HSFSTS_CTL_FLOCKDN		BIT(15)
43 #define HSFSTS_CTL_FDV			BIT(14)
44 #define HSFSTS_CTL_SCIP			BIT(5)
45 #define HSFSTS_CTL_AEL			BIT(2)
46 #define HSFSTS_CTL_FCERR		BIT(1)
47 #define HSFSTS_CTL_FDONE		BIT(0)
48 
49 #define FADDR				0x08
50 #define DLOCK				0x0c
51 #define FDATA(n)			(0x10 + ((n) * 4))
52 
53 #define FRACC				0x50
54 
55 #define FREG(n)				(0x54 + ((n) * 4))
56 #define FREG_BASE_MASK			GENMASK(14, 0)
57 #define FREG_LIMIT_SHIFT		16
58 #define FREG_LIMIT_MASK			GENMASK(30, 16)
59 
60 /* Offset is from @ispi->pregs */
61 #define PR(n)				((n) * 4)
62 #define PR_WPE				BIT(31)
63 #define PR_LIMIT_SHIFT			16
64 #define PR_LIMIT_MASK			GENMASK(30, 16)
65 #define PR_RPE				BIT(15)
66 #define PR_BASE_MASK			GENMASK(14, 0)
67 
68 /* Offsets are from @ispi->sregs */
69 #define SSFSTS_CTL			0x00
70 #define SSFSTS_CTL_FSMIE		BIT(23)
71 #define SSFSTS_CTL_DS			BIT(22)
72 #define SSFSTS_CTL_DBC_SHIFT		16
73 #define SSFSTS_CTL_SPOP			BIT(11)
74 #define SSFSTS_CTL_ACS			BIT(10)
75 #define SSFSTS_CTL_SCGO			BIT(9)
76 #define SSFSTS_CTL_COP_SHIFT		12
77 #define SSFSTS_CTL_FRS			BIT(7)
78 #define SSFSTS_CTL_DOFRS		BIT(6)
79 #define SSFSTS_CTL_AEL			BIT(4)
80 #define SSFSTS_CTL_FCERR		BIT(3)
81 #define SSFSTS_CTL_FDONE		BIT(2)
82 #define SSFSTS_CTL_SCIP			BIT(0)
83 
84 #define PREOP_OPTYPE			0x04
85 #define OPMENU0				0x08
86 #define OPMENU1				0x0c
87 
88 #define OPTYPE_READ_NO_ADDR		0
89 #define OPTYPE_WRITE_NO_ADDR		1
90 #define OPTYPE_READ_WITH_ADDR		2
91 #define OPTYPE_WRITE_WITH_ADDR		3
92 
93 /* CPU specifics */
94 #define BYT_PR				0x74
95 #define BYT_SSFSTS_CTL			0x90
96 #define BYT_FREG_NUM			5
97 #define BYT_PR_NUM			5
98 
99 #define LPT_PR				0x74
100 #define LPT_SSFSTS_CTL			0x90
101 #define LPT_FREG_NUM			5
102 #define LPT_PR_NUM			5
103 
104 #define BXT_PR				0x84
105 #define BXT_SSFSTS_CTL			0xa0
106 #define BXT_FREG_NUM			12
107 #define BXT_PR_NUM			5
108 
109 #define CNL_PR				0x84
110 #define CNL_FREG_NUM			6
111 #define CNL_PR_NUM			5
112 
113 #define LVSCC				0xc4
114 #define UVSCC				0xc8
115 #define ERASE_OPCODE_SHIFT		8
116 #define ERASE_OPCODE_MASK		(0xff << ERASE_OPCODE_SHIFT)
117 #define ERASE_64K_OPCODE_SHIFT		16
118 #define ERASE_64K_OPCODE_MASK		(0xff << ERASE_64K_OPCODE_SHIFT)
119 
120 /* Flash descriptor fields */
121 #define FLVALSIG_MAGIC			0x0ff0a55a
122 #define FLMAP0_NC_MASK			GENMASK(9, 8)
123 #define FLMAP0_NC_SHIFT			8
124 #define FLMAP0_FCBA_MASK		GENMASK(7, 0)
125 
126 #define FLCOMP_C0DEN_MASK		GENMASK(3, 0)
127 #define FLCOMP_C0DEN_512K		0x00
128 #define FLCOMP_C0DEN_1M			0x01
129 #define FLCOMP_C0DEN_2M			0x02
130 #define FLCOMP_C0DEN_4M			0x03
131 #define FLCOMP_C0DEN_8M			0x04
132 #define FLCOMP_C0DEN_16M		0x05
133 #define FLCOMP_C0DEN_32M		0x06
134 #define FLCOMP_C0DEN_64M		0x07
135 #define FLCOMP_C0DEN_128M		0x08
136 
137 #define INTEL_SPI_TIMEOUT		5000 /* ms */
138 #define INTEL_SPI_FIFO_SZ		64
139 
140 /**
141  * struct intel_spi - Driver private data
142  * @dev: Device pointer
143  * @info: Pointer to board specific info
144  * @base: Beginning of MMIO space
145  * @pregs: Start of protection registers
146  * @sregs: Start of software sequencer registers
147  * @host: Pointer to the SPI controller structure
148  * @nregions: Maximum number of regions
149  * @pr_num: Maximum number of protected range registers
150  * @chip0_size: Size of the first flash chip in bytes
151  * @locked: Is SPI setting locked
152  * @protected: Whether the regions are write protected
153  * @bios_locked: Is BIOS region locked
154  * @swseq_reg: Use SW sequencer in register reads/writes
155  * @swseq_erase: Use SW sequencer in erase operation
156  * @atomic_preopcode: Holds preopcode when atomic sequence is requested
157  * @opcodes: Opcodes which are supported. This are programmed by BIOS
158  *           before it locks down the controller.
159  * @mem_ops: Pointer to SPI MEM ops supported by the controller
160  */
161 struct intel_spi {
162 	struct device *dev;
163 	const struct intel_spi_boardinfo *info;
164 	void __iomem *base;
165 	void __iomem *pregs;
166 	void __iomem *sregs;
167 	struct spi_controller *host;
168 	size_t nregions;
169 	size_t pr_num;
170 	size_t chip0_size;
171 	bool locked;
172 	bool protected;
173 	bool bios_locked;
174 	bool swseq_reg;
175 	bool swseq_erase;
176 	u8 atomic_preopcode;
177 	u8 opcodes[8];
178 	const struct intel_spi_mem_op *mem_ops;
179 };
180 
181 struct intel_spi_mem_op {
182 	struct spi_mem_op mem_op;
183 	u32 replacement_op;
184 	int (*exec_op)(struct intel_spi *ispi,
185 		       const struct spi_mem *mem,
186 		       const struct intel_spi_mem_op *iop,
187 		       const struct spi_mem_op *op);
188 };
189 
190 static bool writeable;
191 module_param(writeable, bool, 0);
192 MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)");
193 static bool ignore_protection_status;
194 module_param(ignore_protection_status, bool, 0);
195 MODULE_PARM_DESC(
196 	ignore_protection_status,
197 	"Do not block SPI flash chip write access even if it is write-protected (default=0)");
198 
199 static void intel_spi_dump_regs(struct intel_spi *ispi)
200 {
201 	u32 value;
202 	int i;
203 
204 	dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG));
205 
206 	value = readl(ispi->base + HSFSTS_CTL);
207 	dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value);
208 	if (value & HSFSTS_CTL_FLOCKDN)
209 		dev_dbg(ispi->dev, "-> Locked\n");
210 
211 	dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR));
212 	dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK));
213 
214 	for (i = 0; i < 16; i++)
215 		dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n",
216 			i, readl(ispi->base + FDATA(i)));
217 
218 	dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC));
219 
220 	for (i = 0; i < ispi->nregions; i++)
221 		dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
222 			readl(ispi->base + FREG(i)));
223 	for (i = 0; i < ispi->pr_num; i++)
224 		dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
225 			readl(ispi->pregs + PR(i)));
226 
227 	if (ispi->sregs) {
228 		value = readl(ispi->sregs + SSFSTS_CTL);
229 		dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
230 		dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
231 			readl(ispi->sregs + PREOP_OPTYPE));
232 		dev_dbg(ispi->dev, "OPMENU0=0x%08x\n",
233 			readl(ispi->sregs + OPMENU0));
234 		dev_dbg(ispi->dev, "OPMENU1=0x%08x\n",
235 			readl(ispi->sregs + OPMENU1));
236 	}
237 
238 	dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
239 	dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
240 
241 	dev_dbg(ispi->dev, "Protected regions:\n");
242 	for (i = 0; i < ispi->pr_num; i++) {
243 		u32 base, limit;
244 
245 		value = readl(ispi->pregs + PR(i));
246 		if (!(value & (PR_WPE | PR_RPE)))
247 			continue;
248 
249 		limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
250 		base = value & PR_BASE_MASK;
251 
252 		dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
253 			i, base << 12, (limit << 12) | 0xfff,
254 			value & PR_WPE ? 'W' : '.', value & PR_RPE ? 'R' : '.');
255 	}
256 
257 	dev_dbg(ispi->dev, "Flash regions:\n");
258 	for (i = 0; i < ispi->nregions; i++) {
259 		u32 region, base, limit;
260 
261 		region = readl(ispi->base + FREG(i));
262 		base = region & FREG_BASE_MASK;
263 		limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
264 
265 		if (base >= limit || (i > 0 && limit == 0))
266 			dev_dbg(ispi->dev, " %02d disabled\n", i);
267 		else
268 			dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
269 				i, base << 12, (limit << 12) | 0xfff);
270 	}
271 
272 	dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
273 		ispi->swseq_reg ? 'S' : 'H');
274 	dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
275 		ispi->swseq_erase ? 'S' : 'H');
276 }
277 
278 /* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
279 static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size)
280 {
281 	size_t bytes;
282 	int i = 0;
283 
284 	if (size > INTEL_SPI_FIFO_SZ)
285 		return -EINVAL;
286 
287 	while (size > 0) {
288 		bytes = min_t(size_t, size, 4);
289 		memcpy_fromio(buf, ispi->base + FDATA(i), bytes);
290 		size -= bytes;
291 		buf += bytes;
292 		i++;
293 	}
294 
295 	return 0;
296 }
297 
298 /* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
299 static int intel_spi_write_block(struct intel_spi *ispi, const void *buf,
300 				 size_t size)
301 {
302 	size_t bytes;
303 	int i = 0;
304 
305 	if (size > INTEL_SPI_FIFO_SZ)
306 		return -EINVAL;
307 
308 	while (size > 0) {
309 		bytes = min_t(size_t, size, 4);
310 		memcpy_toio(ispi->base + FDATA(i), buf, bytes);
311 		size -= bytes;
312 		buf += bytes;
313 		i++;
314 	}
315 
316 	return 0;
317 }
318 
319 static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
320 {
321 	u32 val;
322 
323 	return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
324 				  !(val & HSFSTS_CTL_SCIP), 0,
325 				  INTEL_SPI_TIMEOUT * 1000);
326 }
327 
328 static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
329 {
330 	u32 val;
331 
332 	return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
333 				  !(val & SSFSTS_CTL_SCIP), 0,
334 				  INTEL_SPI_TIMEOUT * 1000);
335 }
336 
337 static bool intel_spi_set_writeable(struct intel_spi *ispi)
338 {
339 	if (!ispi->info->set_writeable)
340 		return false;
341 
342 	return ispi->info->set_writeable(ispi->base, ispi->info->data);
343 }
344 
345 static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
346 {
347 	int i;
348 	int preop;
349 
350 	if (ispi->locked) {
351 		for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
352 			if (ispi->opcodes[i] == opcode)
353 				return i;
354 
355 		return -EINVAL;
356 	}
357 
358 	/* The lock is off, so just use index 0 */
359 	writel(opcode, ispi->sregs + OPMENU0);
360 	preop = readw(ispi->sregs + PREOP_OPTYPE);
361 	writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
362 
363 	return 0;
364 }
365 
366 static int intel_spi_hw_cycle(struct intel_spi *ispi,
367 			      const struct intel_spi_mem_op *iop, size_t len)
368 {
369 	u32 val, status;
370 	int ret;
371 
372 	if (!iop->replacement_op)
373 		return -EINVAL;
374 
375 	val = readl(ispi->base + HSFSTS_CTL);
376 	val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
377 	val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
378 	val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
379 	val |= HSFSTS_CTL_FGO;
380 	val |= iop->replacement_op;
381 	writel(val, ispi->base + HSFSTS_CTL);
382 
383 	ret = intel_spi_wait_hw_busy(ispi);
384 	if (ret)
385 		return ret;
386 
387 	status = readl(ispi->base + HSFSTS_CTL);
388 	if (status & HSFSTS_CTL_FCERR)
389 		return -EIO;
390 	else if (status & HSFSTS_CTL_AEL)
391 		return -EACCES;
392 
393 	return 0;
394 }
395 
396 static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
397 			      int optype)
398 {
399 	u32 val = 0, status;
400 	u8 atomic_preopcode;
401 	int ret;
402 
403 	ret = intel_spi_opcode_index(ispi, opcode, optype);
404 	if (ret < 0)
405 		return ret;
406 
407 	/*
408 	 * Always clear it after each SW sequencer operation regardless
409 	 * of whether it is successful or not.
410 	 */
411 	atomic_preopcode = ispi->atomic_preopcode;
412 	ispi->atomic_preopcode = 0;
413 
414 	/* Only mark 'Data Cycle' bit when there is data to be transferred */
415 	if (len > 0)
416 		val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
417 	val |= ret << SSFSTS_CTL_COP_SHIFT;
418 	val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
419 	val |= SSFSTS_CTL_SCGO;
420 	if (atomic_preopcode) {
421 		u16 preop;
422 
423 		switch (optype) {
424 		case OPTYPE_WRITE_NO_ADDR:
425 		case OPTYPE_WRITE_WITH_ADDR:
426 			/* Pick matching preopcode for the atomic sequence */
427 			preop = readw(ispi->sregs + PREOP_OPTYPE);
428 			if ((preop & 0xff) == atomic_preopcode)
429 				; /* Do nothing */
430 			else if ((preop >> 8) == atomic_preopcode)
431 				val |= SSFSTS_CTL_SPOP;
432 			else
433 				return -EINVAL;
434 
435 			/* Enable atomic sequence */
436 			val |= SSFSTS_CTL_ACS;
437 			break;
438 
439 		default:
440 			return -EINVAL;
441 		}
442 	}
443 	writel(val, ispi->sregs + SSFSTS_CTL);
444 
445 	ret = intel_spi_wait_sw_busy(ispi);
446 	if (ret)
447 		return ret;
448 
449 	status = readl(ispi->sregs + SSFSTS_CTL);
450 	if (status & SSFSTS_CTL_FCERR)
451 		return -EIO;
452 	else if (status & SSFSTS_CTL_AEL)
453 		return -EACCES;
454 
455 	return 0;
456 }
457 
458 static u32 intel_spi_chip_addr(const struct intel_spi *ispi,
459 			       const struct spi_mem *mem)
460 {
461 	/* Pick up the correct start address */
462 	if (!mem)
463 		return 0;
464 	return (spi_get_chipselect(mem->spi, 0) == 1) ? ispi->chip0_size : 0;
465 }
466 
467 static int intel_spi_read_reg(struct intel_spi *ispi, const struct spi_mem *mem,
468 			      const struct intel_spi_mem_op *iop,
469 			      const struct spi_mem_op *op)
470 {
471 	u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
472 	size_t nbytes = op->data.nbytes;
473 	u8 opcode = op->cmd.opcode;
474 	int ret;
475 
476 	writel(addr, ispi->base + FADDR);
477 
478 	if (ispi->swseq_reg)
479 		ret = intel_spi_sw_cycle(ispi, opcode, nbytes,
480 					 OPTYPE_READ_NO_ADDR);
481 	else
482 		ret = intel_spi_hw_cycle(ispi, iop, nbytes);
483 
484 	if (ret)
485 		return ret;
486 
487 	return intel_spi_read_block(ispi, op->data.buf.in, nbytes);
488 }
489 
490 static int intel_spi_write_reg(struct intel_spi *ispi, const struct spi_mem *mem,
491 			       const struct intel_spi_mem_op *iop,
492 			       const struct spi_mem_op *op)
493 {
494 	u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
495 	size_t nbytes = op->data.nbytes;
496 	u8 opcode = op->cmd.opcode;
497 	int ret;
498 
499 	/*
500 	 * This is handled with atomic operation and preop code in Intel
501 	 * controller so we only verify that it is available. If the
502 	 * controller is not locked, program the opcode to the PREOP
503 	 * register for later use.
504 	 *
505 	 * When hardware sequencer is used there is no need to program
506 	 * any opcodes (it handles them automatically as part of a command).
507 	 */
508 	if (opcode == SPINOR_OP_WREN) {
509 		u16 preop;
510 
511 		if (!ispi->swseq_reg)
512 			return 0;
513 
514 		preop = readw(ispi->sregs + PREOP_OPTYPE);
515 		if ((preop & 0xff) != opcode && (preop >> 8) != opcode) {
516 			if (ispi->locked)
517 				return -EINVAL;
518 			writel(opcode, ispi->sregs + PREOP_OPTYPE);
519 		}
520 
521 		/*
522 		 * This enables atomic sequence on next SW sycle. Will
523 		 * be cleared after next operation.
524 		 */
525 		ispi->atomic_preopcode = opcode;
526 		return 0;
527 	}
528 
529 	/*
530 	 * We hope that HW sequencer will do the right thing automatically and
531 	 * with the SW sequencer we cannot use preopcode anyway, so just ignore
532 	 * the Write Disable operation and pretend it was completed
533 	 * successfully.
534 	 */
535 	if (opcode == SPINOR_OP_WRDI)
536 		return 0;
537 
538 	writel(addr, ispi->base + FADDR);
539 
540 	/* Write the value beforehand */
541 	ret = intel_spi_write_block(ispi, op->data.buf.out, nbytes);
542 	if (ret)
543 		return ret;
544 
545 	if (ispi->swseq_reg)
546 		return intel_spi_sw_cycle(ispi, opcode, nbytes,
547 					  OPTYPE_WRITE_NO_ADDR);
548 	return intel_spi_hw_cycle(ispi, iop, nbytes);
549 }
550 
551 static int intel_spi_read(struct intel_spi *ispi, const struct spi_mem *mem,
552 			  const struct intel_spi_mem_op *iop,
553 			  const struct spi_mem_op *op)
554 {
555 	u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
556 	size_t block_size, nbytes = op->data.nbytes;
557 	void *read_buf = op->data.buf.in;
558 	u32 val, status;
559 	int ret;
560 
561 	/*
562 	 * Atomic sequence is not expected with HW sequencer reads. Make
563 	 * sure it is cleared regardless.
564 	 */
565 	if (WARN_ON_ONCE(ispi->atomic_preopcode))
566 		ispi->atomic_preopcode = 0;
567 
568 	while (nbytes > 0) {
569 		block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
570 
571 		/* Read cannot cross 4K boundary */
572 		block_size = min_t(loff_t, addr + block_size,
573 				   round_up(addr + 1, SZ_4K)) - addr;
574 
575 		writel(addr, ispi->base + FADDR);
576 
577 		val = readl(ispi->base + HSFSTS_CTL);
578 		val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
579 		val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
580 		val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
581 		val |= HSFSTS_CTL_FCYCLE_READ;
582 		val |= HSFSTS_CTL_FGO;
583 		writel(val, ispi->base + HSFSTS_CTL);
584 
585 		ret = intel_spi_wait_hw_busy(ispi);
586 		if (ret)
587 			return ret;
588 
589 		status = readl(ispi->base + HSFSTS_CTL);
590 		if (status & HSFSTS_CTL_FCERR)
591 			ret = -EIO;
592 		else if (status & HSFSTS_CTL_AEL)
593 			ret = -EACCES;
594 
595 		if (ret < 0) {
596 			dev_err(ispi->dev, "read error: %x: %#x\n", addr, status);
597 			return ret;
598 		}
599 
600 		ret = intel_spi_read_block(ispi, read_buf, block_size);
601 		if (ret)
602 			return ret;
603 
604 		nbytes -= block_size;
605 		addr += block_size;
606 		read_buf += block_size;
607 	}
608 
609 	return 0;
610 }
611 
612 static int intel_spi_write(struct intel_spi *ispi, const struct spi_mem *mem,
613 			   const struct intel_spi_mem_op *iop,
614 			   const struct spi_mem_op *op)
615 {
616 	u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
617 	size_t block_size, nbytes = op->data.nbytes;
618 	const void *write_buf = op->data.buf.out;
619 	u32 val, status;
620 	int ret;
621 
622 	/* Not needed with HW sequencer write, make sure it is cleared */
623 	ispi->atomic_preopcode = 0;
624 
625 	while (nbytes > 0) {
626 		block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
627 
628 		/* Write cannot cross 4K boundary */
629 		block_size = min_t(loff_t, addr + block_size,
630 				   round_up(addr + 1, SZ_4K)) - addr;
631 
632 		writel(addr, ispi->base + FADDR);
633 
634 		val = readl(ispi->base + HSFSTS_CTL);
635 		val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
636 		val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
637 		val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
638 		val |= HSFSTS_CTL_FCYCLE_WRITE;
639 
640 		ret = intel_spi_write_block(ispi, write_buf, block_size);
641 		if (ret) {
642 			dev_err(ispi->dev, "failed to write block\n");
643 			return ret;
644 		}
645 
646 		/* Start the write now */
647 		val |= HSFSTS_CTL_FGO;
648 		writel(val, ispi->base + HSFSTS_CTL);
649 
650 		ret = intel_spi_wait_hw_busy(ispi);
651 		if (ret) {
652 			dev_err(ispi->dev, "timeout\n");
653 			return ret;
654 		}
655 
656 		status = readl(ispi->base + HSFSTS_CTL);
657 		if (status & HSFSTS_CTL_FCERR)
658 			ret = -EIO;
659 		else if (status & HSFSTS_CTL_AEL)
660 			ret = -EACCES;
661 
662 		if (ret < 0) {
663 			dev_err(ispi->dev, "write error: %x: %#x\n", addr, status);
664 			return ret;
665 		}
666 
667 		nbytes -= block_size;
668 		addr += block_size;
669 		write_buf += block_size;
670 	}
671 
672 	return 0;
673 }
674 
675 static int intel_spi_erase(struct intel_spi *ispi, const struct spi_mem *mem,
676 			   const struct intel_spi_mem_op *iop,
677 			   const struct spi_mem_op *op)
678 {
679 	u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
680 	u8 opcode = op->cmd.opcode;
681 	u32 val, status;
682 	int ret;
683 
684 	writel(addr, ispi->base + FADDR);
685 
686 	if (ispi->swseq_erase)
687 		return intel_spi_sw_cycle(ispi, opcode, 0,
688 					  OPTYPE_WRITE_WITH_ADDR);
689 
690 	/* Not needed with HW sequencer erase, make sure it is cleared */
691 	ispi->atomic_preopcode = 0;
692 
693 	val = readl(ispi->base + HSFSTS_CTL);
694 	val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
695 	val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
696 	val |= HSFSTS_CTL_FGO;
697 	val |= iop->replacement_op;
698 	writel(val, ispi->base + HSFSTS_CTL);
699 
700 	ret = intel_spi_wait_hw_busy(ispi);
701 	if (ret)
702 		return ret;
703 
704 	status = readl(ispi->base + HSFSTS_CTL);
705 	if (status & HSFSTS_CTL_FCERR)
706 		return -EIO;
707 	if (status & HSFSTS_CTL_AEL)
708 		return -EACCES;
709 
710 	return 0;
711 }
712 
713 static int intel_spi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
714 {
715 	op->data.nbytes = clamp_val(op->data.nbytes, 0, INTEL_SPI_FIFO_SZ);
716 	return 0;
717 }
718 
719 static bool intel_spi_cmp_mem_op(const struct intel_spi_mem_op *iop,
720 				 const struct spi_mem_op *op)
721 {
722 	if (iop->mem_op.cmd.nbytes != op->cmd.nbytes ||
723 	    iop->mem_op.cmd.buswidth != op->cmd.buswidth ||
724 	    iop->mem_op.cmd.dtr != op->cmd.dtr)
725 		return false;
726 
727 	if (iop->mem_op.addr.nbytes != op->addr.nbytes ||
728 	    iop->mem_op.addr.dtr != op->addr.dtr)
729 		return false;
730 
731 	if (iop->mem_op.data.dir != op->data.dir ||
732 	    iop->mem_op.data.dtr != op->data.dtr)
733 		return false;
734 
735 	if (iop->mem_op.data.dir != SPI_MEM_NO_DATA) {
736 		if (iop->mem_op.data.buswidth != op->data.buswidth)
737 			return false;
738 	}
739 
740 	return true;
741 }
742 
743 static const struct intel_spi_mem_op *
744 intel_spi_match_mem_op(struct intel_spi *ispi, const struct spi_mem_op *op)
745 {
746 	const struct intel_spi_mem_op *iop;
747 
748 	for (iop = ispi->mem_ops; iop->mem_op.cmd.opcode; iop++) {
749 		if (iop->mem_op.cmd.opcode == op->cmd.opcode &&
750 		    intel_spi_cmp_mem_op(iop, op))
751 			return iop;
752 	}
753 
754 	return NULL;
755 }
756 
757 static bool intel_spi_supports_mem_op(struct spi_mem *mem,
758 				      const struct spi_mem_op *op)
759 {
760 	struct intel_spi *ispi = spi_controller_get_devdata(mem->spi->controller);
761 	const struct intel_spi_mem_op *iop;
762 
763 	iop = intel_spi_match_mem_op(ispi, op);
764 	if (!iop) {
765 		dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
766 		return false;
767 	}
768 
769 	/*
770 	 * For software sequencer check that the opcode is actually
771 	 * present in the opmenu if it is locked.
772 	 */
773 	if (ispi->swseq_reg && ispi->locked) {
774 		int i;
775 
776 		/* Check if it is in the locked opcodes list */
777 		for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) {
778 			if (ispi->opcodes[i] == op->cmd.opcode)
779 				return true;
780 		}
781 
782 		dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
783 		return false;
784 	}
785 
786 	return true;
787 }
788 
789 static int intel_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
790 {
791 	struct intel_spi *ispi = spi_controller_get_devdata(mem->spi->controller);
792 	const struct intel_spi_mem_op *iop;
793 
794 	iop = intel_spi_match_mem_op(ispi, op);
795 	if (!iop)
796 		return -EOPNOTSUPP;
797 
798 	return iop->exec_op(ispi, mem, iop, op);
799 }
800 
801 static const char *intel_spi_get_name(struct spi_mem *mem)
802 {
803 	const struct intel_spi *ispi = spi_controller_get_devdata(mem->spi->controller);
804 
805 	/*
806 	 * Return name of the flash controller device to be compatible
807 	 * with the MTD version.
808 	 */
809 	return dev_name(ispi->dev);
810 }
811 
812 static int intel_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
813 {
814 	struct intel_spi *ispi = spi_controller_get_devdata(desc->mem->spi->controller);
815 	const struct intel_spi_mem_op *iop;
816 
817 	iop = intel_spi_match_mem_op(ispi, &desc->info.op_tmpl);
818 	if (!iop)
819 		return -EOPNOTSUPP;
820 
821 	desc->priv = (void *)iop;
822 	return 0;
823 }
824 
825 static ssize_t intel_spi_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs,
826 				     size_t len, void *buf)
827 {
828 	struct intel_spi *ispi = spi_controller_get_devdata(desc->mem->spi->controller);
829 	const struct intel_spi_mem_op *iop = desc->priv;
830 	struct spi_mem_op op = desc->info.op_tmpl;
831 	int ret;
832 
833 	/* Fill in the gaps */
834 	op.addr.val = offs;
835 	op.data.nbytes = len;
836 	op.data.buf.in = buf;
837 
838 	ret = iop->exec_op(ispi, desc->mem, iop, &op);
839 	return ret ? ret : len;
840 }
841 
842 static ssize_t intel_spi_dirmap_write(struct spi_mem_dirmap_desc *desc, u64 offs,
843 				      size_t len, const void *buf)
844 {
845 	struct intel_spi *ispi = spi_controller_get_devdata(desc->mem->spi->controller);
846 	const struct intel_spi_mem_op *iop = desc->priv;
847 	struct spi_mem_op op = desc->info.op_tmpl;
848 	int ret;
849 
850 	op.addr.val = offs;
851 	op.data.nbytes = len;
852 	op.data.buf.out = buf;
853 
854 	ret = iop->exec_op(ispi, desc->mem, iop, &op);
855 	return ret ? ret : len;
856 }
857 
858 static const struct spi_controller_mem_ops intel_spi_mem_ops = {
859 	.adjust_op_size = intel_spi_adjust_op_size,
860 	.supports_op = intel_spi_supports_mem_op,
861 	.exec_op = intel_spi_exec_mem_op,
862 	.get_name = intel_spi_get_name,
863 	.dirmap_create = intel_spi_dirmap_create,
864 	.dirmap_read = intel_spi_dirmap_read,
865 	.dirmap_write = intel_spi_dirmap_write,
866 };
867 
868 #define INTEL_SPI_OP_ADDR(__nbytes)					\
869 	{								\
870 		.nbytes = __nbytes,					\
871 	}
872 
873 #define INTEL_SPI_OP_NO_DATA						\
874 	{								\
875 		.dir = SPI_MEM_NO_DATA,					\
876 	}
877 
878 #define INTEL_SPI_OP_DATA_IN(__buswidth)				\
879 	{								\
880 		.dir = SPI_MEM_DATA_IN,					\
881 		.buswidth = __buswidth,					\
882 	}
883 
884 #define INTEL_SPI_OP_DATA_OUT(__buswidth)				\
885 	{								\
886 		.dir = SPI_MEM_DATA_OUT,				\
887 		.buswidth = __buswidth,					\
888 	}
889 
890 #define INTEL_SPI_MEM_OP(__cmd, __addr, __data, __exec_op)		\
891 	{								\
892 		.mem_op = {						\
893 			.cmd = __cmd,					\
894 			.addr = __addr,					\
895 			.data = __data,					\
896 		},							\
897 		.exec_op = __exec_op,					\
898 	}
899 
900 #define INTEL_SPI_MEM_OP_REPL(__cmd, __addr, __data, __exec_op, __repl)	\
901 	{								\
902 		.mem_op = {						\
903 			.cmd = __cmd,					\
904 			.addr = __addr,					\
905 			.data = __data,					\
906 		},							\
907 		.exec_op = __exec_op,					\
908 		.replacement_op = __repl,				\
909 	}
910 
911 /*
912  * The controller handles pretty much everything internally based on the
913  * SFDP data but we want to make sure we only support the operations
914  * actually possible. Only check buswidth and transfer direction, the
915  * core validates data.
916  */
917 #define INTEL_SPI_GENERIC_OPS						\
918 	/* Status register operations */				\
919 	INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),	\
920 			      SPI_MEM_OP_NO_ADDR,			\
921 			      INTEL_SPI_OP_DATA_IN(1),			\
922 			      intel_spi_read_reg,			\
923 			      HSFSTS_CTL_FCYCLE_RDID),			\
924 	INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),	\
925 			      SPI_MEM_OP_NO_ADDR,			\
926 			      INTEL_SPI_OP_DATA_IN(1),			\
927 			      intel_spi_read_reg,			\
928 			      HSFSTS_CTL_FCYCLE_RDSR),			\
929 	INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),	\
930 			      SPI_MEM_OP_NO_ADDR,			\
931 			      INTEL_SPI_OP_DATA_OUT(1),			\
932 			      intel_spi_write_reg,			\
933 			      HSFSTS_CTL_FCYCLE_WRSR),			\
934 	INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDSFDP, 1),	\
935 			      INTEL_SPI_OP_ADDR(3),			\
936 			      INTEL_SPI_OP_DATA_IN(1),			\
937 			      intel_spi_read_reg,			\
938 			      HSFSTS_CTL_FCYCLE_RDSFDP),		\
939 	/* Normal read */						\
940 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),		\
941 			 INTEL_SPI_OP_ADDR(3),				\
942 			 INTEL_SPI_OP_DATA_IN(1),			\
943 			 intel_spi_read),				\
944 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),		\
945 			 INTEL_SPI_OP_ADDR(3),				\
946 			 INTEL_SPI_OP_DATA_IN(2),			\
947 			 intel_spi_read),				\
948 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),		\
949 			 INTEL_SPI_OP_ADDR(3),				\
950 			 INTEL_SPI_OP_DATA_IN(4),			\
951 			 intel_spi_read),				\
952 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),		\
953 			 INTEL_SPI_OP_ADDR(4),				\
954 			 INTEL_SPI_OP_DATA_IN(1),			\
955 			 intel_spi_read),				\
956 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),		\
957 			 INTEL_SPI_OP_ADDR(4),				\
958 			 INTEL_SPI_OP_DATA_IN(2),			\
959 			 intel_spi_read),				\
960 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),		\
961 			 INTEL_SPI_OP_ADDR(4),				\
962 			 INTEL_SPI_OP_DATA_IN(4),			\
963 			 intel_spi_read),				\
964 	/* Fast read */							\
965 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),	\
966 			 INTEL_SPI_OP_ADDR(3),				\
967 			 INTEL_SPI_OP_DATA_IN(1),			\
968 			 intel_spi_read),				\
969 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),	\
970 			 INTEL_SPI_OP_ADDR(3),				\
971 			 INTEL_SPI_OP_DATA_IN(2),			\
972 			 intel_spi_read),				\
973 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),	\
974 			 INTEL_SPI_OP_ADDR(3),				\
975 			 INTEL_SPI_OP_DATA_IN(4),			\
976 			 intel_spi_read),				\
977 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),	\
978 			 INTEL_SPI_OP_ADDR(4),				\
979 			 INTEL_SPI_OP_DATA_IN(1),			\
980 			 intel_spi_read),				\
981 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),	\
982 			 INTEL_SPI_OP_ADDR(4),				\
983 			 INTEL_SPI_OP_DATA_IN(2),			\
984 			 intel_spi_read),				\
985 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),	\
986 			 INTEL_SPI_OP_ADDR(4),				\
987 			 INTEL_SPI_OP_DATA_IN(4),			\
988 			 intel_spi_read),				\
989 	/* Read with 4-byte address opcode */				\
990 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1),		\
991 			 INTEL_SPI_OP_ADDR(4),				\
992 			 INTEL_SPI_OP_DATA_IN(1),			\
993 			 intel_spi_read),				\
994 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1),		\
995 			 INTEL_SPI_OP_ADDR(4),				\
996 			 INTEL_SPI_OP_DATA_IN(2),			\
997 			 intel_spi_read),				\
998 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1),		\
999 			 INTEL_SPI_OP_ADDR(4),				\
1000 			 INTEL_SPI_OP_DATA_IN(4),			\
1001 			 intel_spi_read),				\
1002 	/* Fast read with 4-byte address opcode */			\
1003 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1),	\
1004 			 INTEL_SPI_OP_ADDR(4),				\
1005 			 INTEL_SPI_OP_DATA_IN(1),			\
1006 			 intel_spi_read),				\
1007 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1),	\
1008 			 INTEL_SPI_OP_ADDR(4),				\
1009 			 INTEL_SPI_OP_DATA_IN(2),			\
1010 			 intel_spi_read),				\
1011 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1),	\
1012 			 INTEL_SPI_OP_ADDR(4),				\
1013 			 INTEL_SPI_OP_DATA_IN(4),			\
1014 			 intel_spi_read),				\
1015 	/* Write operations */						\
1016 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1),		\
1017 			 INTEL_SPI_OP_ADDR(3),				\
1018 			 INTEL_SPI_OP_DATA_OUT(1),			\
1019 			 intel_spi_write),				\
1020 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1),		\
1021 			 INTEL_SPI_OP_ADDR(4),				\
1022 			 INTEL_SPI_OP_DATA_OUT(1),			\
1023 			 intel_spi_write),				\
1024 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP_4B, 1),		\
1025 			 INTEL_SPI_OP_ADDR(4),				\
1026 			 INTEL_SPI_OP_DATA_OUT(1),			\
1027 			 intel_spi_write),				\
1028 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),		\
1029 			 SPI_MEM_OP_NO_ADDR,				\
1030 			 SPI_MEM_OP_NO_DATA,				\
1031 			 intel_spi_write_reg),				\
1032 	INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),		\
1033 			 SPI_MEM_OP_NO_ADDR,				\
1034 			 SPI_MEM_OP_NO_DATA,				\
1035 			 intel_spi_write_reg),				\
1036 	/* Erase operations */						\
1037 	INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1),	\
1038 			      INTEL_SPI_OP_ADDR(3),			\
1039 			      SPI_MEM_OP_NO_DATA,			\
1040 			      intel_spi_erase,				\
1041 			      HSFSTS_CTL_FCYCLE_ERASE),			\
1042 	INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1),	\
1043 			      INTEL_SPI_OP_ADDR(4),			\
1044 			      SPI_MEM_OP_NO_DATA,			\
1045 			      intel_spi_erase,				\
1046 			      HSFSTS_CTL_FCYCLE_ERASE),			\
1047 	INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K_4B, 1),	\
1048 			      INTEL_SPI_OP_ADDR(4),			\
1049 			      SPI_MEM_OP_NO_DATA,			\
1050 			      intel_spi_erase,				\
1051 			      HSFSTS_CTL_FCYCLE_ERASE)			\
1052 
1053 static const struct intel_spi_mem_op generic_mem_ops[] = {
1054 	INTEL_SPI_GENERIC_OPS,
1055 	{ },
1056 };
1057 
1058 static const struct intel_spi_mem_op erase_64k_mem_ops[] = {
1059 	INTEL_SPI_GENERIC_OPS,
1060 	/* 64k sector erase operations */
1061 	INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
1062 			      INTEL_SPI_OP_ADDR(3),
1063 			      SPI_MEM_OP_NO_DATA,
1064 			      intel_spi_erase,
1065 			      HSFSTS_CTL_FCYCLE_ERASE_64K),
1066 	INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
1067 			      INTEL_SPI_OP_ADDR(4),
1068 			      SPI_MEM_OP_NO_DATA,
1069 			      intel_spi_erase,
1070 			      HSFSTS_CTL_FCYCLE_ERASE_64K),
1071 	INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE_4B, 1),
1072 			      INTEL_SPI_OP_ADDR(4),
1073 			      SPI_MEM_OP_NO_DATA,
1074 			      intel_spi_erase,
1075 			      HSFSTS_CTL_FCYCLE_ERASE_64K),
1076 	{ },
1077 };
1078 
1079 static int intel_spi_init(struct intel_spi *ispi)
1080 {
1081 	u32 opmenu0, opmenu1, lvscc, uvscc, val;
1082 	bool erase_64k = false;
1083 	int i;
1084 
1085 	switch (ispi->info->type) {
1086 	case INTEL_SPI_BYT:
1087 		ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
1088 		ispi->pregs = ispi->base + BYT_PR;
1089 		ispi->nregions = BYT_FREG_NUM;
1090 		ispi->pr_num = BYT_PR_NUM;
1091 		ispi->swseq_reg = true;
1092 		break;
1093 
1094 	case INTEL_SPI_LPT:
1095 		ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
1096 		ispi->pregs = ispi->base + LPT_PR;
1097 		ispi->nregions = LPT_FREG_NUM;
1098 		ispi->pr_num = LPT_PR_NUM;
1099 		ispi->swseq_reg = true;
1100 		break;
1101 
1102 	case INTEL_SPI_BXT:
1103 		ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
1104 		ispi->pregs = ispi->base + BXT_PR;
1105 		ispi->nregions = BXT_FREG_NUM;
1106 		ispi->pr_num = BXT_PR_NUM;
1107 		erase_64k = true;
1108 		break;
1109 
1110 	case INTEL_SPI_CNL:
1111 		ispi->sregs = NULL;
1112 		ispi->pregs = ispi->base + CNL_PR;
1113 		ispi->nregions = CNL_FREG_NUM;
1114 		ispi->pr_num = CNL_PR_NUM;
1115 		erase_64k = true;
1116 		break;
1117 
1118 	default:
1119 		return -EINVAL;
1120 	}
1121 
1122 	ispi->bios_locked = true;
1123 	/* Try to disable BIOS write protection if user asked to do so */
1124 	if (writeable) {
1125 		if (intel_spi_set_writeable(ispi))
1126 			ispi->bios_locked = false;
1127 		else
1128 			dev_warn(ispi->dev, "can't disable chip write protection\n");
1129 	}
1130 
1131 	/* Disable #SMI generation from HW sequencer */
1132 	val = readl(ispi->base + HSFSTS_CTL);
1133 	val &= ~HSFSTS_CTL_FSMIE;
1134 	writel(val, ispi->base + HSFSTS_CTL);
1135 
1136 	/*
1137 	 * Determine whether erase operation should use HW or SW sequencer.
1138 	 *
1139 	 * The HW sequencer has a predefined list of opcodes, with only the
1140 	 * erase opcode being programmable in LVSCC and UVSCC registers.
1141 	 * If these registers don't contain a valid erase opcode, erase
1142 	 * cannot be done using HW sequencer.
1143 	 */
1144 	lvscc = readl(ispi->base + LVSCC);
1145 	uvscc = readl(ispi->base + UVSCC);
1146 	if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
1147 		ispi->swseq_erase = true;
1148 	/* SPI controller on Intel BXT supports 64K erase opcode */
1149 	if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
1150 		if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
1151 		    !(uvscc & ERASE_64K_OPCODE_MASK))
1152 			erase_64k = false;
1153 
1154 	if (!ispi->sregs && (ispi->swseq_reg || ispi->swseq_erase)) {
1155 		dev_err(ispi->dev, "software sequencer not supported, but required\n");
1156 		return -EINVAL;
1157 	}
1158 
1159 	/*
1160 	 * Some controllers can only do basic operations using hardware
1161 	 * sequencer. All other operations are supposed to be carried out
1162 	 * using software sequencer.
1163 	 */
1164 	if (ispi->swseq_reg) {
1165 		/* Disable #SMI generation from SW sequencer */
1166 		val = readl(ispi->sregs + SSFSTS_CTL);
1167 		val &= ~SSFSTS_CTL_FSMIE;
1168 		writel(val, ispi->sregs + SSFSTS_CTL);
1169 	}
1170 
1171 	/* Check controller's lock status */
1172 	val = readl(ispi->base + HSFSTS_CTL);
1173 	ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
1174 
1175 	if (ispi->locked && ispi->sregs) {
1176 		/*
1177 		 * BIOS programs allowed opcodes and then locks down the
1178 		 * register. So read back what opcodes it decided to support.
1179 		 * That's the set we are going to support as well.
1180 		 */
1181 		opmenu0 = readl(ispi->sregs + OPMENU0);
1182 		opmenu1 = readl(ispi->sregs + OPMENU1);
1183 
1184 		if (opmenu0 && opmenu1) {
1185 			for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
1186 				ispi->opcodes[i] = opmenu0 >> i * 8;
1187 				ispi->opcodes[i + 4] = opmenu1 >> i * 8;
1188 			}
1189 		}
1190 	}
1191 
1192 	if (erase_64k) {
1193 		dev_dbg(ispi->dev, "Using erase_64k memory operations");
1194 		ispi->mem_ops = erase_64k_mem_ops;
1195 	} else {
1196 		dev_dbg(ispi->dev, "Using generic memory operations");
1197 		ispi->mem_ops = generic_mem_ops;
1198 	}
1199 
1200 	intel_spi_dump_regs(ispi);
1201 	return 0;
1202 }
1203 
1204 static bool intel_spi_is_protected(const struct intel_spi *ispi,
1205 				   unsigned int base, unsigned int limit)
1206 {
1207 	int i;
1208 
1209 	for (i = 0; i < ispi->pr_num; i++) {
1210 		u32 pr_base, pr_limit, pr_value;
1211 
1212 		pr_value = readl(ispi->pregs + PR(i));
1213 		if (!(pr_value & (PR_WPE | PR_RPE)))
1214 			continue;
1215 
1216 		pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
1217 		pr_base = pr_value & PR_BASE_MASK;
1218 
1219 		if (pr_base >= base && pr_limit <= limit)
1220 			return true;
1221 	}
1222 
1223 	return false;
1224 }
1225 
1226 /*
1227  * There will be a single partition holding all enabled flash regions. We
1228  * call this "BIOS".
1229  */
1230 static void intel_spi_fill_partition(struct intel_spi *ispi,
1231 				     struct mtd_partition *part)
1232 {
1233 	u64 end;
1234 	int i;
1235 
1236 	memset(part, 0, sizeof(*part));
1237 
1238 	/* Start from the mandatory descriptor region */
1239 	part->size = 4096;
1240 	part->name = "BIOS";
1241 
1242 	/*
1243 	 * Now try to find where this partition ends based on the flash
1244 	 * region registers.
1245 	 */
1246 	for (i = 1; i < ispi->nregions; i++) {
1247 		u32 region, base, limit;
1248 
1249 		region = readl(ispi->base + FREG(i));
1250 		base = region & FREG_BASE_MASK;
1251 		limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
1252 
1253 		if (base >= limit || limit == 0)
1254 			continue;
1255 
1256 		/*
1257 		 * If any of the regions have protection bits set and
1258 		 * the ignore protection status parameter is not set,
1259 		 * make the whole partition read-only to be on the safe side.
1260 		 *
1261 		 * Also if the user did not ask the chip to be writeable
1262 		 * mask the bit too.
1263 		 */
1264 		if (!writeable || (!ignore_protection_status &&
1265 				   intel_spi_is_protected(ispi, base, limit))) {
1266 			part->mask_flags |= MTD_WRITEABLE;
1267 			ispi->protected = true;
1268 		}
1269 
1270 		end = (limit << 12) + 4096;
1271 		if (end > part->size)
1272 			part->size = end;
1273 	}
1274 
1275 	/*
1276 	 * Regions can refer to the second chip too so in this case we
1277 	 * just make the BIOS partition to occupy the whole chip.
1278 	 */
1279 	if (ispi->chip0_size && part->size > ispi->chip0_size)
1280 		part->size = MTDPART_SIZ_FULL;
1281 }
1282 
1283 static int intel_spi_read_desc(struct intel_spi *ispi)
1284 {
1285 	struct spi_mem_op op =
1286 		SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 0),
1287 			   SPI_MEM_OP_ADDR(3, 0, 0),
1288 			   SPI_MEM_OP_NO_DUMMY,
1289 			   SPI_MEM_OP_DATA_IN(0, NULL, 0));
1290 	u32 buf[2], nc, fcba, flcomp;
1291 	ssize_t ret;
1292 
1293 	op.addr.val = 0x10;
1294 	op.data.buf.in = buf;
1295 	op.data.nbytes = sizeof(buf);
1296 
1297 	ret = intel_spi_read(ispi, NULL, NULL, &op);
1298 	if (ret) {
1299 		dev_warn(ispi->dev, "failed to read descriptor\n");
1300 		return ret;
1301 	}
1302 
1303 	dev_dbg(ispi->dev, "FLVALSIG=0x%08x\n", buf[0]);
1304 	dev_dbg(ispi->dev, "FLMAP0=0x%08x\n", buf[1]);
1305 
1306 	if (buf[0] != FLVALSIG_MAGIC) {
1307 		dev_warn(ispi->dev, "descriptor signature not valid\n");
1308 		return -ENODEV;
1309 	}
1310 
1311 	fcba = (buf[1] & FLMAP0_FCBA_MASK) << 4;
1312 	dev_dbg(ispi->dev, "FCBA=%#x\n", fcba);
1313 
1314 	op.addr.val = fcba;
1315 	op.data.buf.in = &flcomp;
1316 	op.data.nbytes = sizeof(flcomp);
1317 
1318 	ret = intel_spi_read(ispi, NULL, NULL, &op);
1319 	if (ret) {
1320 		dev_warn(ispi->dev, "failed to read FLCOMP\n");
1321 		return -ENODEV;
1322 	}
1323 
1324 	dev_dbg(ispi->dev, "FLCOMP=0x%08x\n", flcomp);
1325 
1326 	switch (flcomp & FLCOMP_C0DEN_MASK) {
1327 	case FLCOMP_C0DEN_512K:
1328 		ispi->chip0_size = SZ_512K;
1329 		break;
1330 	case FLCOMP_C0DEN_1M:
1331 		ispi->chip0_size = SZ_1M;
1332 		break;
1333 	case FLCOMP_C0DEN_2M:
1334 		ispi->chip0_size = SZ_2M;
1335 		break;
1336 	case FLCOMP_C0DEN_4M:
1337 		ispi->chip0_size = SZ_4M;
1338 		break;
1339 	case FLCOMP_C0DEN_8M:
1340 		ispi->chip0_size = SZ_8M;
1341 		break;
1342 	case FLCOMP_C0DEN_16M:
1343 		ispi->chip0_size = SZ_16M;
1344 		break;
1345 	case FLCOMP_C0DEN_32M:
1346 		ispi->chip0_size = SZ_32M;
1347 		break;
1348 	case FLCOMP_C0DEN_64M:
1349 		ispi->chip0_size = SZ_64M;
1350 		break;
1351 	case FLCOMP_C0DEN_128M:
1352 		ispi->chip0_size = SZ_128M;
1353 		break;
1354 	default:
1355 		dev_warn(ispi->dev, "unsupported C0DEN: %#lx\n",
1356 			 flcomp & FLCOMP_C0DEN_MASK);
1357 		return -EINVAL;
1358 	}
1359 
1360 	dev_dbg(ispi->dev, "chip0 size %zd KB\n", ispi->chip0_size / SZ_1K);
1361 
1362 	nc = (buf[1] & FLMAP0_NC_MASK) >> FLMAP0_NC_SHIFT;
1363 	if (!nc)
1364 		ispi->host->num_chipselect = 1;
1365 	else if (nc == 1)
1366 		ispi->host->num_chipselect = 2;
1367 	else
1368 		return -EINVAL;
1369 
1370 	dev_dbg(ispi->dev, "%u flash components found\n",
1371 		ispi->host->num_chipselect);
1372 	return 0;
1373 }
1374 
1375 static int intel_spi_populate_chip(struct intel_spi *ispi)
1376 {
1377 	struct flash_platform_data *pdata;
1378 	struct mtd_partition *parts;
1379 	struct spi_board_info chip;
1380 	int ret;
1381 
1382 	ret = intel_spi_read_desc(ispi);
1383 	if (ret)
1384 		return ret;
1385 
1386 	pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL);
1387 	if (!pdata)
1388 		return -ENOMEM;
1389 
1390 	pdata->nr_parts = 1;
1391 	pdata->parts = devm_kcalloc(ispi->dev, pdata->nr_parts,
1392 				    sizeof(*pdata->parts), GFP_KERNEL);
1393 	if (!pdata->parts)
1394 		return -ENOMEM;
1395 
1396 	intel_spi_fill_partition(ispi, pdata->parts);
1397 
1398 	memset(&chip, 0, sizeof(chip));
1399 	snprintf(chip.modalias, 8, "spi-nor");
1400 	chip.platform_data = pdata;
1401 
1402 	if (!spi_new_device(ispi->host, &chip))
1403 		return -ENODEV;
1404 
1405 	/* Add the second chip if present */
1406 	if (ispi->host->num_chipselect < 2)
1407 		return 0;
1408 
1409 	pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL);
1410 	if (!pdata)
1411 		return -ENOMEM;
1412 
1413 	pdata->name = devm_kasprintf(ispi->dev, GFP_KERNEL, "%s-chip1",
1414 				     dev_name(ispi->dev));
1415 	if (!pdata->name)
1416 		return -ENOMEM;
1417 
1418 	pdata->nr_parts = 1;
1419 	parts = devm_kcalloc(ispi->dev, pdata->nr_parts, sizeof(*parts),
1420 			     GFP_KERNEL);
1421 	if (!parts)
1422 		return -ENOMEM;
1423 
1424 	parts[0].size = MTDPART_SIZ_FULL;
1425 	parts[0].name = "BIOS1";
1426 	pdata->parts = parts;
1427 
1428 	chip.platform_data = pdata;
1429 	chip.chip_select = 1;
1430 
1431 	if (!spi_new_device(ispi->host, &chip))
1432 		return -ENODEV;
1433 	return 0;
1434 }
1435 
1436 static ssize_t intel_spi_protected_show(struct device *dev,
1437 					struct device_attribute *attr, char *buf)
1438 {
1439 	struct intel_spi *ispi = dev_get_drvdata(dev);
1440 
1441 	return sysfs_emit(buf, "%d\n", ispi->protected);
1442 }
1443 static DEVICE_ATTR_ADMIN_RO(intel_spi_protected);
1444 
1445 static ssize_t intel_spi_locked_show(struct device *dev,
1446 				     struct device_attribute *attr, char *buf)
1447 {
1448 	struct intel_spi *ispi = dev_get_drvdata(dev);
1449 
1450 	return sysfs_emit(buf, "%d\n", ispi->locked);
1451 }
1452 static DEVICE_ATTR_ADMIN_RO(intel_spi_locked);
1453 
1454 static ssize_t intel_spi_bios_locked_show(struct device *dev,
1455 					  struct device_attribute *attr, char *buf)
1456 {
1457 	struct intel_spi *ispi = dev_get_drvdata(dev);
1458 
1459 	return sysfs_emit(buf, "%d\n", ispi->bios_locked);
1460 }
1461 static DEVICE_ATTR_ADMIN_RO(intel_spi_bios_locked);
1462 
1463 static struct attribute *intel_spi_attrs[] = {
1464 	&dev_attr_intel_spi_protected.attr,
1465 	&dev_attr_intel_spi_locked.attr,
1466 	&dev_attr_intel_spi_bios_locked.attr,
1467 	NULL
1468 };
1469 
1470 static const struct attribute_group intel_spi_attr_group = {
1471 	.attrs = intel_spi_attrs,
1472 };
1473 
1474 const struct attribute_group *intel_spi_groups[] = {
1475 	&intel_spi_attr_group,
1476 	NULL
1477 };
1478 EXPORT_SYMBOL_GPL(intel_spi_groups);
1479 
1480 /**
1481  * intel_spi_probe() - Probe the Intel SPI flash controller
1482  * @dev: Pointer to the parent device
1483  * @base: iomapped MMIO resource
1484  * @info: Platform specific information
1485  *
1486  * Probes Intel SPI flash controller and creates the flash chip device.
1487  * Returns %0 on success and negative errno in case of failure.
1488  */
1489 int intel_spi_probe(struct device *dev, void __iomem *base,
1490 		    const struct intel_spi_boardinfo *info)
1491 {
1492 	struct spi_controller *host;
1493 	struct intel_spi *ispi;
1494 	int ret;
1495 
1496 	host = devm_spi_alloc_host(dev, sizeof(*ispi));
1497 	if (!host)
1498 		return -ENOMEM;
1499 
1500 	host->mem_ops = &intel_spi_mem_ops;
1501 
1502 	ispi = spi_controller_get_devdata(host);
1503 
1504 	ispi->base = base;
1505 	ispi->dev = dev;
1506 	ispi->host = host;
1507 	ispi->info = info;
1508 
1509 	ret = intel_spi_init(ispi);
1510 	if (ret)
1511 		return ret;
1512 
1513 	ret = devm_spi_register_controller(dev, host);
1514 	if (ret)
1515 		return ret;
1516 
1517 	dev_set_drvdata(dev, ispi);
1518 	return intel_spi_populate_chip(ispi);
1519 }
1520 EXPORT_SYMBOL_GPL(intel_spi_probe);
1521 
1522 MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
1523 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
1524 MODULE_LICENSE("GPL v2");
1525