1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Intel PCH/PCU SPI flash driver.
4 *
5 * Copyright (C) 2016 - 2022, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9 #include <linux/iopoll.h>
10 #include <linux/module.h>
11
12 #include <linux/mtd/partitions.h>
13 #include <linux/mtd/spi-nor.h>
14
15 #include <linux/spi/flash.h>
16 #include <linux/spi/spi.h>
17 #include <linux/spi/spi-mem.h>
18
19 #include "spi-intel.h"
20
21 /* Offsets are from @ispi->base */
22 #define BFPREG 0x00
23
24 #define HSFSTS_CTL 0x04
25 #define HSFSTS_CTL_FSMIE BIT(31)
26 #define HSFSTS_CTL_FDBC_SHIFT 24
27 #define HSFSTS_CTL_FDBC_MASK (0x3f << HSFSTS_CTL_FDBC_SHIFT)
28
29 #define HSFSTS_CTL_FCYCLE_SHIFT 17
30 #define HSFSTS_CTL_FCYCLE_MASK (0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
31 /* HW sequencer opcodes */
32 #define HSFSTS_CTL_FCYCLE_READ (0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
33 #define HSFSTS_CTL_FCYCLE_WRITE (0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
34 #define HSFSTS_CTL_FCYCLE_ERASE (0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
35 #define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
36 #define HSFSTS_CTL_FCYCLE_RDSFDP (0x05 << HSFSTS_CTL_FCYCLE_SHIFT)
37 #define HSFSTS_CTL_FCYCLE_RDID (0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
38 #define HSFSTS_CTL_FCYCLE_WRSR (0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
39 #define HSFSTS_CTL_FCYCLE_RDSR (0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
40
41 #define HSFSTS_CTL_FGO BIT(16)
42 #define HSFSTS_CTL_FLOCKDN BIT(15)
43 #define HSFSTS_CTL_FDV BIT(14)
44 #define HSFSTS_CTL_SCIP BIT(5)
45 #define HSFSTS_CTL_AEL BIT(2)
46 #define HSFSTS_CTL_FCERR BIT(1)
47 #define HSFSTS_CTL_FDONE BIT(0)
48
49 #define FADDR 0x08
50 #define DLOCK 0x0c
51 #define FDATA(n) (0x10 + ((n) * 4))
52
53 #define FRACC 0x50
54
55 #define FREG(n) (0x54 + ((n) * 4))
56 #define FREG_BASE_MASK GENMASK(14, 0)
57 #define FREG_LIMIT_SHIFT 16
58 #define FREG_LIMIT_MASK GENMASK(30, 16)
59
60 /* Offset is from @ispi->pregs */
61 #define PR(n) ((n) * 4)
62 #define PR_WPE BIT(31)
63 #define PR_LIMIT_SHIFT 16
64 #define PR_LIMIT_MASK GENMASK(30, 16)
65 #define PR_RPE BIT(15)
66 #define PR_BASE_MASK GENMASK(14, 0)
67
68 /* Offsets are from @ispi->sregs */
69 #define SSFSTS_CTL 0x00
70 #define SSFSTS_CTL_FSMIE BIT(23)
71 #define SSFSTS_CTL_DS BIT(22)
72 #define SSFSTS_CTL_DBC_SHIFT 16
73 #define SSFSTS_CTL_SPOP BIT(11)
74 #define SSFSTS_CTL_ACS BIT(10)
75 #define SSFSTS_CTL_SCGO BIT(9)
76 #define SSFSTS_CTL_COP_SHIFT 12
77 #define SSFSTS_CTL_FRS BIT(7)
78 #define SSFSTS_CTL_DOFRS BIT(6)
79 #define SSFSTS_CTL_AEL BIT(4)
80 #define SSFSTS_CTL_FCERR BIT(3)
81 #define SSFSTS_CTL_FDONE BIT(2)
82 #define SSFSTS_CTL_SCIP BIT(0)
83
84 #define PREOP_OPTYPE 0x04
85 #define OPMENU0 0x08
86 #define OPMENU1 0x0c
87
88 #define OPTYPE_READ_NO_ADDR 0
89 #define OPTYPE_WRITE_NO_ADDR 1
90 #define OPTYPE_READ_WITH_ADDR 2
91 #define OPTYPE_WRITE_WITH_ADDR 3
92
93 /* CPU specifics */
94 #define BYT_PR 0x74
95 #define BYT_SSFSTS_CTL 0x90
96 #define BYT_FREG_NUM 5
97 #define BYT_PR_NUM 5
98
99 #define LPT_PR 0x74
100 #define LPT_SSFSTS_CTL 0x90
101 #define LPT_FREG_NUM 5
102 #define LPT_PR_NUM 5
103
104 #define BXT_PR 0x84
105 #define BXT_SSFSTS_CTL 0xa0
106 #define BXT_FREG_NUM 12
107 #define BXT_PR_NUM 5
108
109 #define CNL_PR 0x84
110 #define CNL_FREG_NUM 6
111 #define CNL_PR_NUM 5
112
113 #define LVSCC 0xc4
114 #define UVSCC 0xc8
115 #define ERASE_OPCODE_SHIFT 8
116 #define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
117 #define ERASE_64K_OPCODE_SHIFT 16
118 #define ERASE_64K_OPCODE_MASK (0xff << ERASE_64K_OPCODE_SHIFT)
119
120 /* Flash descriptor fields */
121 #define FLVALSIG_MAGIC 0x0ff0a55a
122 #define FLMAP0_NC_MASK GENMASK(9, 8)
123 #define FLMAP0_NC_SHIFT 8
124 #define FLMAP0_FCBA_MASK GENMASK(7, 0)
125
126 #define FLCOMP_C0DEN_MASK GENMASK(3, 0)
127 #define FLCOMP_C0DEN_512K 0x00
128 #define FLCOMP_C0DEN_1M 0x01
129 #define FLCOMP_C0DEN_2M 0x02
130 #define FLCOMP_C0DEN_4M 0x03
131 #define FLCOMP_C0DEN_8M 0x04
132 #define FLCOMP_C0DEN_16M 0x05
133 #define FLCOMP_C0DEN_32M 0x06
134 #define FLCOMP_C0DEN_64M 0x07
135
136 #define INTEL_SPI_TIMEOUT 5000 /* ms */
137 #define INTEL_SPI_FIFO_SZ 64
138
139 /**
140 * struct intel_spi - Driver private data
141 * @dev: Device pointer
142 * @info: Pointer to board specific info
143 * @base: Beginning of MMIO space
144 * @pregs: Start of protection registers
145 * @sregs: Start of software sequencer registers
146 * @host: Pointer to the SPI controller structure
147 * @nregions: Maximum number of regions
148 * @pr_num: Maximum number of protected range registers
149 * @chip0_size: Size of the first flash chip in bytes
150 * @locked: Is SPI setting locked
151 * @protected: Whether the regions are write protected
152 * @bios_locked: Is BIOS region locked
153 * @swseq_reg: Use SW sequencer in register reads/writes
154 * @swseq_erase: Use SW sequencer in erase operation
155 * @atomic_preopcode: Holds preopcode when atomic sequence is requested
156 * @opcodes: Opcodes which are supported. This are programmed by BIOS
157 * before it locks down the controller.
158 * @mem_ops: Pointer to SPI MEM ops supported by the controller
159 */
160 struct intel_spi {
161 struct device *dev;
162 const struct intel_spi_boardinfo *info;
163 void __iomem *base;
164 void __iomem *pregs;
165 void __iomem *sregs;
166 struct spi_controller *host;
167 size_t nregions;
168 size_t pr_num;
169 size_t chip0_size;
170 bool locked;
171 bool protected;
172 bool bios_locked;
173 bool swseq_reg;
174 bool swseq_erase;
175 u8 atomic_preopcode;
176 u8 opcodes[8];
177 const struct intel_spi_mem_op *mem_ops;
178 };
179
180 struct intel_spi_mem_op {
181 struct spi_mem_op mem_op;
182 u32 replacement_op;
183 int (*exec_op)(struct intel_spi *ispi,
184 const struct spi_mem *mem,
185 const struct intel_spi_mem_op *iop,
186 const struct spi_mem_op *op);
187 };
188
189 static bool writeable;
190 module_param(writeable, bool, 0);
191 MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)");
192 static bool ignore_protection_status;
193 module_param(ignore_protection_status, bool, 0);
194 MODULE_PARM_DESC(
195 ignore_protection_status,
196 "Do not block SPI flash chip write access even if it is write-protected (default=0)");
197
intel_spi_dump_regs(struct intel_spi * ispi)198 static void intel_spi_dump_regs(struct intel_spi *ispi)
199 {
200 u32 value;
201 int i;
202
203 dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG));
204
205 value = readl(ispi->base + HSFSTS_CTL);
206 dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value);
207 if (value & HSFSTS_CTL_FLOCKDN)
208 dev_dbg(ispi->dev, "-> Locked\n");
209
210 dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR));
211 dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK));
212
213 for (i = 0; i < 16; i++)
214 dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n",
215 i, readl(ispi->base + FDATA(i)));
216
217 dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC));
218
219 for (i = 0; i < ispi->nregions; i++)
220 dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
221 readl(ispi->base + FREG(i)));
222 for (i = 0; i < ispi->pr_num; i++)
223 dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
224 readl(ispi->pregs + PR(i)));
225
226 if (ispi->sregs) {
227 value = readl(ispi->sregs + SSFSTS_CTL);
228 dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
229 dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
230 readl(ispi->sregs + PREOP_OPTYPE));
231 dev_dbg(ispi->dev, "OPMENU0=0x%08x\n",
232 readl(ispi->sregs + OPMENU0));
233 dev_dbg(ispi->dev, "OPMENU1=0x%08x\n",
234 readl(ispi->sregs + OPMENU1));
235 }
236
237 dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
238 dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
239
240 dev_dbg(ispi->dev, "Protected regions:\n");
241 for (i = 0; i < ispi->pr_num; i++) {
242 u32 base, limit;
243
244 value = readl(ispi->pregs + PR(i));
245 if (!(value & (PR_WPE | PR_RPE)))
246 continue;
247
248 limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
249 base = value & PR_BASE_MASK;
250
251 dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
252 i, base << 12, (limit << 12) | 0xfff,
253 value & PR_WPE ? 'W' : '.', value & PR_RPE ? 'R' : '.');
254 }
255
256 dev_dbg(ispi->dev, "Flash regions:\n");
257 for (i = 0; i < ispi->nregions; i++) {
258 u32 region, base, limit;
259
260 region = readl(ispi->base + FREG(i));
261 base = region & FREG_BASE_MASK;
262 limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
263
264 if (base >= limit || (i > 0 && limit == 0))
265 dev_dbg(ispi->dev, " %02d disabled\n", i);
266 else
267 dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
268 i, base << 12, (limit << 12) | 0xfff);
269 }
270
271 dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
272 ispi->swseq_reg ? 'S' : 'H');
273 dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
274 ispi->swseq_erase ? 'S' : 'H');
275 }
276
277 /* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
intel_spi_read_block(struct intel_spi * ispi,void * buf,size_t size)278 static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size)
279 {
280 size_t bytes;
281 int i = 0;
282
283 if (size > INTEL_SPI_FIFO_SZ)
284 return -EINVAL;
285
286 while (size > 0) {
287 bytes = min_t(size_t, size, 4);
288 memcpy_fromio(buf, ispi->base + FDATA(i), bytes);
289 size -= bytes;
290 buf += bytes;
291 i++;
292 }
293
294 return 0;
295 }
296
297 /* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
intel_spi_write_block(struct intel_spi * ispi,const void * buf,size_t size)298 static int intel_spi_write_block(struct intel_spi *ispi, const void *buf,
299 size_t size)
300 {
301 size_t bytes;
302 int i = 0;
303
304 if (size > INTEL_SPI_FIFO_SZ)
305 return -EINVAL;
306
307 while (size > 0) {
308 bytes = min_t(size_t, size, 4);
309 memcpy_toio(ispi->base + FDATA(i), buf, bytes);
310 size -= bytes;
311 buf += bytes;
312 i++;
313 }
314
315 return 0;
316 }
317
intel_spi_wait_hw_busy(struct intel_spi * ispi)318 static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
319 {
320 u32 val;
321
322 return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
323 !(val & HSFSTS_CTL_SCIP), 0,
324 INTEL_SPI_TIMEOUT * 1000);
325 }
326
intel_spi_wait_sw_busy(struct intel_spi * ispi)327 static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
328 {
329 u32 val;
330
331 return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
332 !(val & SSFSTS_CTL_SCIP), 0,
333 INTEL_SPI_TIMEOUT * 1000);
334 }
335
intel_spi_set_writeable(struct intel_spi * ispi)336 static bool intel_spi_set_writeable(struct intel_spi *ispi)
337 {
338 if (!ispi->info->set_writeable)
339 return false;
340
341 return ispi->info->set_writeable(ispi->base, ispi->info->data);
342 }
343
intel_spi_opcode_index(struct intel_spi * ispi,u8 opcode,int optype)344 static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
345 {
346 int i;
347 int preop;
348
349 if (ispi->locked) {
350 for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
351 if (ispi->opcodes[i] == opcode)
352 return i;
353
354 return -EINVAL;
355 }
356
357 /* The lock is off, so just use index 0 */
358 writel(opcode, ispi->sregs + OPMENU0);
359 preop = readw(ispi->sregs + PREOP_OPTYPE);
360 writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
361
362 return 0;
363 }
364
intel_spi_hw_cycle(struct intel_spi * ispi,const struct intel_spi_mem_op * iop,size_t len)365 static int intel_spi_hw_cycle(struct intel_spi *ispi,
366 const struct intel_spi_mem_op *iop, size_t len)
367 {
368 u32 val, status;
369 int ret;
370
371 if (!iop->replacement_op)
372 return -EINVAL;
373
374 val = readl(ispi->base + HSFSTS_CTL);
375 val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
376 val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
377 val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
378 val |= HSFSTS_CTL_FGO;
379 val |= iop->replacement_op;
380 writel(val, ispi->base + HSFSTS_CTL);
381
382 ret = intel_spi_wait_hw_busy(ispi);
383 if (ret)
384 return ret;
385
386 status = readl(ispi->base + HSFSTS_CTL);
387 if (status & HSFSTS_CTL_FCERR)
388 return -EIO;
389 else if (status & HSFSTS_CTL_AEL)
390 return -EACCES;
391
392 return 0;
393 }
394
intel_spi_sw_cycle(struct intel_spi * ispi,u8 opcode,size_t len,int optype)395 static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
396 int optype)
397 {
398 u32 val = 0, status;
399 u8 atomic_preopcode;
400 int ret;
401
402 ret = intel_spi_opcode_index(ispi, opcode, optype);
403 if (ret < 0)
404 return ret;
405
406 /*
407 * Always clear it after each SW sequencer operation regardless
408 * of whether it is successful or not.
409 */
410 atomic_preopcode = ispi->atomic_preopcode;
411 ispi->atomic_preopcode = 0;
412
413 /* Only mark 'Data Cycle' bit when there is data to be transferred */
414 if (len > 0)
415 val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
416 val |= ret << SSFSTS_CTL_COP_SHIFT;
417 val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
418 val |= SSFSTS_CTL_SCGO;
419 if (atomic_preopcode) {
420 u16 preop;
421
422 switch (optype) {
423 case OPTYPE_WRITE_NO_ADDR:
424 case OPTYPE_WRITE_WITH_ADDR:
425 /* Pick matching preopcode for the atomic sequence */
426 preop = readw(ispi->sregs + PREOP_OPTYPE);
427 if ((preop & 0xff) == atomic_preopcode)
428 ; /* Do nothing */
429 else if ((preop >> 8) == atomic_preopcode)
430 val |= SSFSTS_CTL_SPOP;
431 else
432 return -EINVAL;
433
434 /* Enable atomic sequence */
435 val |= SSFSTS_CTL_ACS;
436 break;
437
438 default:
439 return -EINVAL;
440 }
441 }
442 writel(val, ispi->sregs + SSFSTS_CTL);
443
444 ret = intel_spi_wait_sw_busy(ispi);
445 if (ret)
446 return ret;
447
448 status = readl(ispi->sregs + SSFSTS_CTL);
449 if (status & SSFSTS_CTL_FCERR)
450 return -EIO;
451 else if (status & SSFSTS_CTL_AEL)
452 return -EACCES;
453
454 return 0;
455 }
456
intel_spi_chip_addr(const struct intel_spi * ispi,const struct spi_mem * mem)457 static u32 intel_spi_chip_addr(const struct intel_spi *ispi,
458 const struct spi_mem *mem)
459 {
460 /* Pick up the correct start address */
461 if (!mem)
462 return 0;
463 return (spi_get_chipselect(mem->spi, 0) == 1) ? ispi->chip0_size : 0;
464 }
465
intel_spi_read_reg(struct intel_spi * ispi,const struct spi_mem * mem,const struct intel_spi_mem_op * iop,const struct spi_mem_op * op)466 static int intel_spi_read_reg(struct intel_spi *ispi, const struct spi_mem *mem,
467 const struct intel_spi_mem_op *iop,
468 const struct spi_mem_op *op)
469 {
470 u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
471 size_t nbytes = op->data.nbytes;
472 u8 opcode = op->cmd.opcode;
473 int ret;
474
475 writel(addr, ispi->base + FADDR);
476
477 if (ispi->swseq_reg)
478 ret = intel_spi_sw_cycle(ispi, opcode, nbytes,
479 OPTYPE_READ_NO_ADDR);
480 else
481 ret = intel_spi_hw_cycle(ispi, iop, nbytes);
482
483 if (ret)
484 return ret;
485
486 return intel_spi_read_block(ispi, op->data.buf.in, nbytes);
487 }
488
intel_spi_write_reg(struct intel_spi * ispi,const struct spi_mem * mem,const struct intel_spi_mem_op * iop,const struct spi_mem_op * op)489 static int intel_spi_write_reg(struct intel_spi *ispi, const struct spi_mem *mem,
490 const struct intel_spi_mem_op *iop,
491 const struct spi_mem_op *op)
492 {
493 u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
494 size_t nbytes = op->data.nbytes;
495 u8 opcode = op->cmd.opcode;
496 int ret;
497
498 /*
499 * This is handled with atomic operation and preop code in Intel
500 * controller so we only verify that it is available. If the
501 * controller is not locked, program the opcode to the PREOP
502 * register for later use.
503 *
504 * When hardware sequencer is used there is no need to program
505 * any opcodes (it handles them automatically as part of a command).
506 */
507 if (opcode == SPINOR_OP_WREN) {
508 u16 preop;
509
510 if (!ispi->swseq_reg)
511 return 0;
512
513 preop = readw(ispi->sregs + PREOP_OPTYPE);
514 if ((preop & 0xff) != opcode && (preop >> 8) != opcode) {
515 if (ispi->locked)
516 return -EINVAL;
517 writel(opcode, ispi->sregs + PREOP_OPTYPE);
518 }
519
520 /*
521 * This enables atomic sequence on next SW sycle. Will
522 * be cleared after next operation.
523 */
524 ispi->atomic_preopcode = opcode;
525 return 0;
526 }
527
528 /*
529 * We hope that HW sequencer will do the right thing automatically and
530 * with the SW sequencer we cannot use preopcode anyway, so just ignore
531 * the Write Disable operation and pretend it was completed
532 * successfully.
533 */
534 if (opcode == SPINOR_OP_WRDI)
535 return 0;
536
537 writel(addr, ispi->base + FADDR);
538
539 /* Write the value beforehand */
540 ret = intel_spi_write_block(ispi, op->data.buf.out, nbytes);
541 if (ret)
542 return ret;
543
544 if (ispi->swseq_reg)
545 return intel_spi_sw_cycle(ispi, opcode, nbytes,
546 OPTYPE_WRITE_NO_ADDR);
547 return intel_spi_hw_cycle(ispi, iop, nbytes);
548 }
549
intel_spi_read(struct intel_spi * ispi,const struct spi_mem * mem,const struct intel_spi_mem_op * iop,const struct spi_mem_op * op)550 static int intel_spi_read(struct intel_spi *ispi, const struct spi_mem *mem,
551 const struct intel_spi_mem_op *iop,
552 const struct spi_mem_op *op)
553 {
554 u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
555 size_t block_size, nbytes = op->data.nbytes;
556 void *read_buf = op->data.buf.in;
557 u32 val, status;
558 int ret;
559
560 /*
561 * Atomic sequence is not expected with HW sequencer reads. Make
562 * sure it is cleared regardless.
563 */
564 if (WARN_ON_ONCE(ispi->atomic_preopcode))
565 ispi->atomic_preopcode = 0;
566
567 while (nbytes > 0) {
568 block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
569
570 /* Read cannot cross 4K boundary */
571 block_size = min_t(loff_t, addr + block_size,
572 round_up(addr + 1, SZ_4K)) - addr;
573
574 writel(addr, ispi->base + FADDR);
575
576 val = readl(ispi->base + HSFSTS_CTL);
577 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
578 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
579 val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
580 val |= HSFSTS_CTL_FCYCLE_READ;
581 val |= HSFSTS_CTL_FGO;
582 writel(val, ispi->base + HSFSTS_CTL);
583
584 ret = intel_spi_wait_hw_busy(ispi);
585 if (ret)
586 return ret;
587
588 status = readl(ispi->base + HSFSTS_CTL);
589 if (status & HSFSTS_CTL_FCERR)
590 ret = -EIO;
591 else if (status & HSFSTS_CTL_AEL)
592 ret = -EACCES;
593
594 if (ret < 0) {
595 dev_err(ispi->dev, "read error: %x: %#x\n", addr, status);
596 return ret;
597 }
598
599 ret = intel_spi_read_block(ispi, read_buf, block_size);
600 if (ret)
601 return ret;
602
603 nbytes -= block_size;
604 addr += block_size;
605 read_buf += block_size;
606 }
607
608 return 0;
609 }
610
intel_spi_write(struct intel_spi * ispi,const struct spi_mem * mem,const struct intel_spi_mem_op * iop,const struct spi_mem_op * op)611 static int intel_spi_write(struct intel_spi *ispi, const struct spi_mem *mem,
612 const struct intel_spi_mem_op *iop,
613 const struct spi_mem_op *op)
614 {
615 u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
616 size_t block_size, nbytes = op->data.nbytes;
617 const void *write_buf = op->data.buf.out;
618 u32 val, status;
619 int ret;
620
621 /* Not needed with HW sequencer write, make sure it is cleared */
622 ispi->atomic_preopcode = 0;
623
624 while (nbytes > 0) {
625 block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
626
627 /* Write cannot cross 4K boundary */
628 block_size = min_t(loff_t, addr + block_size,
629 round_up(addr + 1, SZ_4K)) - addr;
630
631 writel(addr, ispi->base + FADDR);
632
633 val = readl(ispi->base + HSFSTS_CTL);
634 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
635 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
636 val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
637 val |= HSFSTS_CTL_FCYCLE_WRITE;
638
639 ret = intel_spi_write_block(ispi, write_buf, block_size);
640 if (ret) {
641 dev_err(ispi->dev, "failed to write block\n");
642 return ret;
643 }
644
645 /* Start the write now */
646 val |= HSFSTS_CTL_FGO;
647 writel(val, ispi->base + HSFSTS_CTL);
648
649 ret = intel_spi_wait_hw_busy(ispi);
650 if (ret) {
651 dev_err(ispi->dev, "timeout\n");
652 return ret;
653 }
654
655 status = readl(ispi->base + HSFSTS_CTL);
656 if (status & HSFSTS_CTL_FCERR)
657 ret = -EIO;
658 else if (status & HSFSTS_CTL_AEL)
659 ret = -EACCES;
660
661 if (ret < 0) {
662 dev_err(ispi->dev, "write error: %x: %#x\n", addr, status);
663 return ret;
664 }
665
666 nbytes -= block_size;
667 addr += block_size;
668 write_buf += block_size;
669 }
670
671 return 0;
672 }
673
intel_spi_erase(struct intel_spi * ispi,const struct spi_mem * mem,const struct intel_spi_mem_op * iop,const struct spi_mem_op * op)674 static int intel_spi_erase(struct intel_spi *ispi, const struct spi_mem *mem,
675 const struct intel_spi_mem_op *iop,
676 const struct spi_mem_op *op)
677 {
678 u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
679 u8 opcode = op->cmd.opcode;
680 u32 val, status;
681 int ret;
682
683 writel(addr, ispi->base + FADDR);
684
685 if (ispi->swseq_erase)
686 return intel_spi_sw_cycle(ispi, opcode, 0,
687 OPTYPE_WRITE_WITH_ADDR);
688
689 /* Not needed with HW sequencer erase, make sure it is cleared */
690 ispi->atomic_preopcode = 0;
691
692 val = readl(ispi->base + HSFSTS_CTL);
693 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
694 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
695 val |= HSFSTS_CTL_FGO;
696 val |= iop->replacement_op;
697 writel(val, ispi->base + HSFSTS_CTL);
698
699 ret = intel_spi_wait_hw_busy(ispi);
700 if (ret)
701 return ret;
702
703 status = readl(ispi->base + HSFSTS_CTL);
704 if (status & HSFSTS_CTL_FCERR)
705 return -EIO;
706 if (status & HSFSTS_CTL_AEL)
707 return -EACCES;
708
709 return 0;
710 }
711
intel_spi_adjust_op_size(struct spi_mem * mem,struct spi_mem_op * op)712 static int intel_spi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
713 {
714 op->data.nbytes = clamp_val(op->data.nbytes, 0, INTEL_SPI_FIFO_SZ);
715 return 0;
716 }
717
intel_spi_cmp_mem_op(const struct intel_spi_mem_op * iop,const struct spi_mem_op * op)718 static bool intel_spi_cmp_mem_op(const struct intel_spi_mem_op *iop,
719 const struct spi_mem_op *op)
720 {
721 if (iop->mem_op.cmd.nbytes != op->cmd.nbytes ||
722 iop->mem_op.cmd.buswidth != op->cmd.buswidth ||
723 iop->mem_op.cmd.dtr != op->cmd.dtr)
724 return false;
725
726 if (iop->mem_op.addr.nbytes != op->addr.nbytes ||
727 iop->mem_op.addr.dtr != op->addr.dtr)
728 return false;
729
730 if (iop->mem_op.data.dir != op->data.dir ||
731 iop->mem_op.data.dtr != op->data.dtr)
732 return false;
733
734 if (iop->mem_op.data.dir != SPI_MEM_NO_DATA) {
735 if (iop->mem_op.data.buswidth != op->data.buswidth)
736 return false;
737 }
738
739 return true;
740 }
741
742 static const struct intel_spi_mem_op *
intel_spi_match_mem_op(struct intel_spi * ispi,const struct spi_mem_op * op)743 intel_spi_match_mem_op(struct intel_spi *ispi, const struct spi_mem_op *op)
744 {
745 const struct intel_spi_mem_op *iop;
746
747 for (iop = ispi->mem_ops; iop->mem_op.cmd.opcode; iop++) {
748 if (iop->mem_op.cmd.opcode == op->cmd.opcode &&
749 intel_spi_cmp_mem_op(iop, op))
750 return iop;
751 }
752
753 return NULL;
754 }
755
intel_spi_supports_mem_op(struct spi_mem * mem,const struct spi_mem_op * op)756 static bool intel_spi_supports_mem_op(struct spi_mem *mem,
757 const struct spi_mem_op *op)
758 {
759 struct intel_spi *ispi = spi_controller_get_devdata(mem->spi->controller);
760 const struct intel_spi_mem_op *iop;
761
762 iop = intel_spi_match_mem_op(ispi, op);
763 if (!iop) {
764 dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
765 return false;
766 }
767
768 /*
769 * For software sequencer check that the opcode is actually
770 * present in the opmenu if it is locked.
771 */
772 if (ispi->swseq_reg && ispi->locked) {
773 int i;
774
775 /* Check if it is in the locked opcodes list */
776 for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) {
777 if (ispi->opcodes[i] == op->cmd.opcode)
778 return true;
779 }
780
781 dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
782 return false;
783 }
784
785 return true;
786 }
787
intel_spi_exec_mem_op(struct spi_mem * mem,const struct spi_mem_op * op)788 static int intel_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
789 {
790 struct intel_spi *ispi = spi_controller_get_devdata(mem->spi->controller);
791 const struct intel_spi_mem_op *iop;
792
793 iop = intel_spi_match_mem_op(ispi, op);
794 if (!iop)
795 return -EOPNOTSUPP;
796
797 return iop->exec_op(ispi, mem, iop, op);
798 }
799
intel_spi_get_name(struct spi_mem * mem)800 static const char *intel_spi_get_name(struct spi_mem *mem)
801 {
802 const struct intel_spi *ispi = spi_controller_get_devdata(mem->spi->controller);
803
804 /*
805 * Return name of the flash controller device to be compatible
806 * with the MTD version.
807 */
808 return dev_name(ispi->dev);
809 }
810
intel_spi_dirmap_create(struct spi_mem_dirmap_desc * desc)811 static int intel_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
812 {
813 struct intel_spi *ispi = spi_controller_get_devdata(desc->mem->spi->controller);
814 const struct intel_spi_mem_op *iop;
815
816 iop = intel_spi_match_mem_op(ispi, &desc->info.op_tmpl);
817 if (!iop)
818 return -EOPNOTSUPP;
819
820 desc->priv = (void *)iop;
821 return 0;
822 }
823
intel_spi_dirmap_read(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,void * buf)824 static ssize_t intel_spi_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs,
825 size_t len, void *buf)
826 {
827 struct intel_spi *ispi = spi_controller_get_devdata(desc->mem->spi->controller);
828 const struct intel_spi_mem_op *iop = desc->priv;
829 struct spi_mem_op op = desc->info.op_tmpl;
830 int ret;
831
832 /* Fill in the gaps */
833 op.addr.val = offs;
834 op.data.nbytes = len;
835 op.data.buf.in = buf;
836
837 ret = iop->exec_op(ispi, desc->mem, iop, &op);
838 return ret ? ret : len;
839 }
840
intel_spi_dirmap_write(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,const void * buf)841 static ssize_t intel_spi_dirmap_write(struct spi_mem_dirmap_desc *desc, u64 offs,
842 size_t len, const void *buf)
843 {
844 struct intel_spi *ispi = spi_controller_get_devdata(desc->mem->spi->controller);
845 const struct intel_spi_mem_op *iop = desc->priv;
846 struct spi_mem_op op = desc->info.op_tmpl;
847 int ret;
848
849 op.addr.val = offs;
850 op.data.nbytes = len;
851 op.data.buf.out = buf;
852
853 ret = iop->exec_op(ispi, desc->mem, iop, &op);
854 return ret ? ret : len;
855 }
856
857 static const struct spi_controller_mem_ops intel_spi_mem_ops = {
858 .adjust_op_size = intel_spi_adjust_op_size,
859 .supports_op = intel_spi_supports_mem_op,
860 .exec_op = intel_spi_exec_mem_op,
861 .get_name = intel_spi_get_name,
862 .dirmap_create = intel_spi_dirmap_create,
863 .dirmap_read = intel_spi_dirmap_read,
864 .dirmap_write = intel_spi_dirmap_write,
865 };
866
867 #define INTEL_SPI_OP_ADDR(__nbytes) \
868 { \
869 .nbytes = __nbytes, \
870 }
871
872 #define INTEL_SPI_OP_NO_DATA \
873 { \
874 .dir = SPI_MEM_NO_DATA, \
875 }
876
877 #define INTEL_SPI_OP_DATA_IN(__buswidth) \
878 { \
879 .dir = SPI_MEM_DATA_IN, \
880 .buswidth = __buswidth, \
881 }
882
883 #define INTEL_SPI_OP_DATA_OUT(__buswidth) \
884 { \
885 .dir = SPI_MEM_DATA_OUT, \
886 .buswidth = __buswidth, \
887 }
888
889 #define INTEL_SPI_MEM_OP(__cmd, __addr, __data, __exec_op) \
890 { \
891 .mem_op = { \
892 .cmd = __cmd, \
893 .addr = __addr, \
894 .data = __data, \
895 }, \
896 .exec_op = __exec_op, \
897 }
898
899 #define INTEL_SPI_MEM_OP_REPL(__cmd, __addr, __data, __exec_op, __repl) \
900 { \
901 .mem_op = { \
902 .cmd = __cmd, \
903 .addr = __addr, \
904 .data = __data, \
905 }, \
906 .exec_op = __exec_op, \
907 .replacement_op = __repl, \
908 }
909
910 /*
911 * The controller handles pretty much everything internally based on the
912 * SFDP data but we want to make sure we only support the operations
913 * actually possible. Only check buswidth and transfer direction, the
914 * core validates data.
915 */
916 #define INTEL_SPI_GENERIC_OPS \
917 /* Status register operations */ \
918 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1), \
919 SPI_MEM_OP_NO_ADDR, \
920 INTEL_SPI_OP_DATA_IN(1), \
921 intel_spi_read_reg, \
922 HSFSTS_CTL_FCYCLE_RDID), \
923 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1), \
924 SPI_MEM_OP_NO_ADDR, \
925 INTEL_SPI_OP_DATA_IN(1), \
926 intel_spi_read_reg, \
927 HSFSTS_CTL_FCYCLE_RDSR), \
928 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1), \
929 SPI_MEM_OP_NO_ADDR, \
930 INTEL_SPI_OP_DATA_OUT(1), \
931 intel_spi_write_reg, \
932 HSFSTS_CTL_FCYCLE_WRSR), \
933 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDSFDP, 1), \
934 INTEL_SPI_OP_ADDR(3), \
935 INTEL_SPI_OP_DATA_IN(1), \
936 intel_spi_read_reg, \
937 HSFSTS_CTL_FCYCLE_RDSFDP), \
938 /* Normal read */ \
939 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
940 INTEL_SPI_OP_ADDR(3), \
941 INTEL_SPI_OP_DATA_IN(1), \
942 intel_spi_read), \
943 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
944 INTEL_SPI_OP_ADDR(3), \
945 INTEL_SPI_OP_DATA_IN(2), \
946 intel_spi_read), \
947 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
948 INTEL_SPI_OP_ADDR(3), \
949 INTEL_SPI_OP_DATA_IN(4), \
950 intel_spi_read), \
951 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
952 INTEL_SPI_OP_ADDR(4), \
953 INTEL_SPI_OP_DATA_IN(1), \
954 intel_spi_read), \
955 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
956 INTEL_SPI_OP_ADDR(4), \
957 INTEL_SPI_OP_DATA_IN(2), \
958 intel_spi_read), \
959 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
960 INTEL_SPI_OP_ADDR(4), \
961 INTEL_SPI_OP_DATA_IN(4), \
962 intel_spi_read), \
963 /* Fast read */ \
964 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
965 INTEL_SPI_OP_ADDR(3), \
966 INTEL_SPI_OP_DATA_IN(1), \
967 intel_spi_read), \
968 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
969 INTEL_SPI_OP_ADDR(3), \
970 INTEL_SPI_OP_DATA_IN(2), \
971 intel_spi_read), \
972 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
973 INTEL_SPI_OP_ADDR(3), \
974 INTEL_SPI_OP_DATA_IN(4), \
975 intel_spi_read), \
976 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
977 INTEL_SPI_OP_ADDR(4), \
978 INTEL_SPI_OP_DATA_IN(1), \
979 intel_spi_read), \
980 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
981 INTEL_SPI_OP_ADDR(4), \
982 INTEL_SPI_OP_DATA_IN(2), \
983 intel_spi_read), \
984 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
985 INTEL_SPI_OP_ADDR(4), \
986 INTEL_SPI_OP_DATA_IN(4), \
987 intel_spi_read), \
988 /* Read with 4-byte address opcode */ \
989 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
990 INTEL_SPI_OP_ADDR(4), \
991 INTEL_SPI_OP_DATA_IN(1), \
992 intel_spi_read), \
993 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
994 INTEL_SPI_OP_ADDR(4), \
995 INTEL_SPI_OP_DATA_IN(2), \
996 intel_spi_read), \
997 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
998 INTEL_SPI_OP_ADDR(4), \
999 INTEL_SPI_OP_DATA_IN(4), \
1000 intel_spi_read), \
1001 /* Fast read with 4-byte address opcode */ \
1002 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
1003 INTEL_SPI_OP_ADDR(4), \
1004 INTEL_SPI_OP_DATA_IN(1), \
1005 intel_spi_read), \
1006 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
1007 INTEL_SPI_OP_ADDR(4), \
1008 INTEL_SPI_OP_DATA_IN(2), \
1009 intel_spi_read), \
1010 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
1011 INTEL_SPI_OP_ADDR(4), \
1012 INTEL_SPI_OP_DATA_IN(4), \
1013 intel_spi_read), \
1014 /* Write operations */ \
1015 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1), \
1016 INTEL_SPI_OP_ADDR(3), \
1017 INTEL_SPI_OP_DATA_OUT(1), \
1018 intel_spi_write), \
1019 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1), \
1020 INTEL_SPI_OP_ADDR(4), \
1021 INTEL_SPI_OP_DATA_OUT(1), \
1022 intel_spi_write), \
1023 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP_4B, 1), \
1024 INTEL_SPI_OP_ADDR(4), \
1025 INTEL_SPI_OP_DATA_OUT(1), \
1026 intel_spi_write), \
1027 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1), \
1028 SPI_MEM_OP_NO_ADDR, \
1029 SPI_MEM_OP_NO_DATA, \
1030 intel_spi_write_reg), \
1031 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1), \
1032 SPI_MEM_OP_NO_ADDR, \
1033 SPI_MEM_OP_NO_DATA, \
1034 intel_spi_write_reg), \
1035 /* Erase operations */ \
1036 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1), \
1037 INTEL_SPI_OP_ADDR(3), \
1038 SPI_MEM_OP_NO_DATA, \
1039 intel_spi_erase, \
1040 HSFSTS_CTL_FCYCLE_ERASE), \
1041 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1), \
1042 INTEL_SPI_OP_ADDR(4), \
1043 SPI_MEM_OP_NO_DATA, \
1044 intel_spi_erase, \
1045 HSFSTS_CTL_FCYCLE_ERASE), \
1046 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K_4B, 1), \
1047 INTEL_SPI_OP_ADDR(4), \
1048 SPI_MEM_OP_NO_DATA, \
1049 intel_spi_erase, \
1050 HSFSTS_CTL_FCYCLE_ERASE) \
1051
1052 static const struct intel_spi_mem_op generic_mem_ops[] = {
1053 INTEL_SPI_GENERIC_OPS,
1054 { },
1055 };
1056
1057 static const struct intel_spi_mem_op erase_64k_mem_ops[] = {
1058 INTEL_SPI_GENERIC_OPS,
1059 /* 64k sector erase operations */
1060 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
1061 INTEL_SPI_OP_ADDR(3),
1062 SPI_MEM_OP_NO_DATA,
1063 intel_spi_erase,
1064 HSFSTS_CTL_FCYCLE_ERASE_64K),
1065 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
1066 INTEL_SPI_OP_ADDR(4),
1067 SPI_MEM_OP_NO_DATA,
1068 intel_spi_erase,
1069 HSFSTS_CTL_FCYCLE_ERASE_64K),
1070 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE_4B, 1),
1071 INTEL_SPI_OP_ADDR(4),
1072 SPI_MEM_OP_NO_DATA,
1073 intel_spi_erase,
1074 HSFSTS_CTL_FCYCLE_ERASE_64K),
1075 { },
1076 };
1077
intel_spi_init(struct intel_spi * ispi)1078 static int intel_spi_init(struct intel_spi *ispi)
1079 {
1080 u32 opmenu0, opmenu1, lvscc, uvscc, val;
1081 bool erase_64k = false;
1082 int i;
1083
1084 switch (ispi->info->type) {
1085 case INTEL_SPI_BYT:
1086 ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
1087 ispi->pregs = ispi->base + BYT_PR;
1088 ispi->nregions = BYT_FREG_NUM;
1089 ispi->pr_num = BYT_PR_NUM;
1090 ispi->swseq_reg = true;
1091 break;
1092
1093 case INTEL_SPI_LPT:
1094 ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
1095 ispi->pregs = ispi->base + LPT_PR;
1096 ispi->nregions = LPT_FREG_NUM;
1097 ispi->pr_num = LPT_PR_NUM;
1098 ispi->swseq_reg = true;
1099 break;
1100
1101 case INTEL_SPI_BXT:
1102 ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
1103 ispi->pregs = ispi->base + BXT_PR;
1104 ispi->nregions = BXT_FREG_NUM;
1105 ispi->pr_num = BXT_PR_NUM;
1106 erase_64k = true;
1107 break;
1108
1109 case INTEL_SPI_CNL:
1110 ispi->sregs = NULL;
1111 ispi->pregs = ispi->base + CNL_PR;
1112 ispi->nregions = CNL_FREG_NUM;
1113 ispi->pr_num = CNL_PR_NUM;
1114 erase_64k = true;
1115 break;
1116
1117 default:
1118 return -EINVAL;
1119 }
1120
1121 ispi->bios_locked = true;
1122 /* Try to disable BIOS write protection if user asked to do so */
1123 if (writeable) {
1124 if (intel_spi_set_writeable(ispi))
1125 ispi->bios_locked = false;
1126 else
1127 dev_warn(ispi->dev, "can't disable chip write protection\n");
1128 }
1129
1130 /* Disable #SMI generation from HW sequencer */
1131 val = readl(ispi->base + HSFSTS_CTL);
1132 val &= ~HSFSTS_CTL_FSMIE;
1133 writel(val, ispi->base + HSFSTS_CTL);
1134
1135 /*
1136 * Determine whether erase operation should use HW or SW sequencer.
1137 *
1138 * The HW sequencer has a predefined list of opcodes, with only the
1139 * erase opcode being programmable in LVSCC and UVSCC registers.
1140 * If these registers don't contain a valid erase opcode, erase
1141 * cannot be done using HW sequencer.
1142 */
1143 lvscc = readl(ispi->base + LVSCC);
1144 uvscc = readl(ispi->base + UVSCC);
1145 if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
1146 ispi->swseq_erase = true;
1147 /* SPI controller on Intel BXT supports 64K erase opcode */
1148 if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
1149 if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
1150 !(uvscc & ERASE_64K_OPCODE_MASK))
1151 erase_64k = false;
1152
1153 if (!ispi->sregs && (ispi->swseq_reg || ispi->swseq_erase)) {
1154 dev_err(ispi->dev, "software sequencer not supported, but required\n");
1155 return -EINVAL;
1156 }
1157
1158 /*
1159 * Some controllers can only do basic operations using hardware
1160 * sequencer. All other operations are supposed to be carried out
1161 * using software sequencer.
1162 */
1163 if (ispi->swseq_reg) {
1164 /* Disable #SMI generation from SW sequencer */
1165 val = readl(ispi->sregs + SSFSTS_CTL);
1166 val &= ~SSFSTS_CTL_FSMIE;
1167 writel(val, ispi->sregs + SSFSTS_CTL);
1168 }
1169
1170 /* Check controller's lock status */
1171 val = readl(ispi->base + HSFSTS_CTL);
1172 ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
1173
1174 if (ispi->locked && ispi->sregs) {
1175 /*
1176 * BIOS programs allowed opcodes and then locks down the
1177 * register. So read back what opcodes it decided to support.
1178 * That's the set we are going to support as well.
1179 */
1180 opmenu0 = readl(ispi->sregs + OPMENU0);
1181 opmenu1 = readl(ispi->sregs + OPMENU1);
1182
1183 if (opmenu0 && opmenu1) {
1184 for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
1185 ispi->opcodes[i] = opmenu0 >> i * 8;
1186 ispi->opcodes[i + 4] = opmenu1 >> i * 8;
1187 }
1188 }
1189 }
1190
1191 if (erase_64k) {
1192 dev_dbg(ispi->dev, "Using erase_64k memory operations");
1193 ispi->mem_ops = erase_64k_mem_ops;
1194 } else {
1195 dev_dbg(ispi->dev, "Using generic memory operations");
1196 ispi->mem_ops = generic_mem_ops;
1197 }
1198
1199 intel_spi_dump_regs(ispi);
1200 return 0;
1201 }
1202
intel_spi_is_protected(const struct intel_spi * ispi,unsigned int base,unsigned int limit)1203 static bool intel_spi_is_protected(const struct intel_spi *ispi,
1204 unsigned int base, unsigned int limit)
1205 {
1206 int i;
1207
1208 for (i = 0; i < ispi->pr_num; i++) {
1209 u32 pr_base, pr_limit, pr_value;
1210
1211 pr_value = readl(ispi->pregs + PR(i));
1212 if (!(pr_value & (PR_WPE | PR_RPE)))
1213 continue;
1214
1215 pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
1216 pr_base = pr_value & PR_BASE_MASK;
1217
1218 if (pr_base >= base && pr_limit <= limit)
1219 return true;
1220 }
1221
1222 return false;
1223 }
1224
1225 /*
1226 * There will be a single partition holding all enabled flash regions. We
1227 * call this "BIOS".
1228 */
intel_spi_fill_partition(struct intel_spi * ispi,struct mtd_partition * part)1229 static void intel_spi_fill_partition(struct intel_spi *ispi,
1230 struct mtd_partition *part)
1231 {
1232 u64 end;
1233 int i;
1234
1235 memset(part, 0, sizeof(*part));
1236
1237 /* Start from the mandatory descriptor region */
1238 part->size = 4096;
1239 part->name = "BIOS";
1240
1241 /*
1242 * Now try to find where this partition ends based on the flash
1243 * region registers.
1244 */
1245 for (i = 1; i < ispi->nregions; i++) {
1246 u32 region, base, limit;
1247
1248 region = readl(ispi->base + FREG(i));
1249 base = region & FREG_BASE_MASK;
1250 limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
1251
1252 if (base >= limit || limit == 0)
1253 continue;
1254
1255 /*
1256 * If any of the regions have protection bits set and
1257 * the ignore protection status parameter is not set,
1258 * make the whole partition read-only to be on the safe side.
1259 *
1260 * Also if the user did not ask the chip to be writeable
1261 * mask the bit too.
1262 */
1263 if (!writeable || (!ignore_protection_status &&
1264 intel_spi_is_protected(ispi, base, limit))) {
1265 part->mask_flags |= MTD_WRITEABLE;
1266 ispi->protected = true;
1267 }
1268
1269 end = (limit << 12) + 4096;
1270 if (end > part->size)
1271 part->size = end;
1272 }
1273
1274 /*
1275 * Regions can refer to the second chip too so in this case we
1276 * just make the BIOS partition to occupy the whole chip.
1277 */
1278 if (ispi->chip0_size && part->size > ispi->chip0_size)
1279 part->size = MTDPART_SIZ_FULL;
1280 }
1281
intel_spi_read_desc(struct intel_spi * ispi)1282 static int intel_spi_read_desc(struct intel_spi *ispi)
1283 {
1284 struct spi_mem_op op =
1285 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 0),
1286 SPI_MEM_OP_ADDR(3, 0, 0),
1287 SPI_MEM_OP_NO_DUMMY,
1288 SPI_MEM_OP_DATA_IN(0, NULL, 0));
1289 u32 buf[2], nc, fcba, flcomp;
1290 ssize_t ret;
1291
1292 op.addr.val = 0x10;
1293 op.data.buf.in = buf;
1294 op.data.nbytes = sizeof(buf);
1295
1296 ret = intel_spi_read(ispi, NULL, NULL, &op);
1297 if (ret) {
1298 dev_warn(ispi->dev, "failed to read descriptor\n");
1299 return ret;
1300 }
1301
1302 dev_dbg(ispi->dev, "FLVALSIG=0x%08x\n", buf[0]);
1303 dev_dbg(ispi->dev, "FLMAP0=0x%08x\n", buf[1]);
1304
1305 if (buf[0] != FLVALSIG_MAGIC) {
1306 dev_warn(ispi->dev, "descriptor signature not valid\n");
1307 return -ENODEV;
1308 }
1309
1310 fcba = (buf[1] & FLMAP0_FCBA_MASK) << 4;
1311 dev_dbg(ispi->dev, "FCBA=%#x\n", fcba);
1312
1313 op.addr.val = fcba;
1314 op.data.buf.in = &flcomp;
1315 op.data.nbytes = sizeof(flcomp);
1316
1317 ret = intel_spi_read(ispi, NULL, NULL, &op);
1318 if (ret) {
1319 dev_warn(ispi->dev, "failed to read FLCOMP\n");
1320 return -ENODEV;
1321 }
1322
1323 dev_dbg(ispi->dev, "FLCOMP=0x%08x\n", flcomp);
1324
1325 switch (flcomp & FLCOMP_C0DEN_MASK) {
1326 case FLCOMP_C0DEN_512K:
1327 ispi->chip0_size = SZ_512K;
1328 break;
1329 case FLCOMP_C0DEN_1M:
1330 ispi->chip0_size = SZ_1M;
1331 break;
1332 case FLCOMP_C0DEN_2M:
1333 ispi->chip0_size = SZ_2M;
1334 break;
1335 case FLCOMP_C0DEN_4M:
1336 ispi->chip0_size = SZ_4M;
1337 break;
1338 case FLCOMP_C0DEN_8M:
1339 ispi->chip0_size = SZ_8M;
1340 break;
1341 case FLCOMP_C0DEN_16M:
1342 ispi->chip0_size = SZ_16M;
1343 break;
1344 case FLCOMP_C0DEN_32M:
1345 ispi->chip0_size = SZ_32M;
1346 break;
1347 case FLCOMP_C0DEN_64M:
1348 ispi->chip0_size = SZ_64M;
1349 break;
1350 default:
1351 return -EINVAL;
1352 }
1353
1354 dev_dbg(ispi->dev, "chip0 size %zd KB\n", ispi->chip0_size / SZ_1K);
1355
1356 nc = (buf[1] & FLMAP0_NC_MASK) >> FLMAP0_NC_SHIFT;
1357 if (!nc)
1358 ispi->host->num_chipselect = 1;
1359 else if (nc == 1)
1360 ispi->host->num_chipselect = 2;
1361 else
1362 return -EINVAL;
1363
1364 dev_dbg(ispi->dev, "%u flash components found\n",
1365 ispi->host->num_chipselect);
1366 return 0;
1367 }
1368
intel_spi_populate_chip(struct intel_spi * ispi)1369 static int intel_spi_populate_chip(struct intel_spi *ispi)
1370 {
1371 struct flash_platform_data *pdata;
1372 struct mtd_partition *parts;
1373 struct spi_board_info chip;
1374 int ret;
1375
1376 ret = intel_spi_read_desc(ispi);
1377 if (ret)
1378 return ret;
1379
1380 pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL);
1381 if (!pdata)
1382 return -ENOMEM;
1383
1384 pdata->nr_parts = 1;
1385 pdata->parts = devm_kcalloc(ispi->dev, pdata->nr_parts,
1386 sizeof(*pdata->parts), GFP_KERNEL);
1387 if (!pdata->parts)
1388 return -ENOMEM;
1389
1390 intel_spi_fill_partition(ispi, pdata->parts);
1391
1392 memset(&chip, 0, sizeof(chip));
1393 snprintf(chip.modalias, 8, "spi-nor");
1394 chip.platform_data = pdata;
1395
1396 if (!spi_new_device(ispi->host, &chip))
1397 return -ENODEV;
1398
1399 /* Add the second chip if present */
1400 if (ispi->host->num_chipselect < 2)
1401 return 0;
1402
1403 pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL);
1404 if (!pdata)
1405 return -ENOMEM;
1406
1407 pdata->name = devm_kasprintf(ispi->dev, GFP_KERNEL, "%s-chip1",
1408 dev_name(ispi->dev));
1409 if (!pdata->name)
1410 return -ENOMEM;
1411
1412 pdata->nr_parts = 1;
1413 parts = devm_kcalloc(ispi->dev, pdata->nr_parts, sizeof(*parts),
1414 GFP_KERNEL);
1415 if (!parts)
1416 return -ENOMEM;
1417
1418 parts[0].size = MTDPART_SIZ_FULL;
1419 parts[0].name = "BIOS1";
1420 pdata->parts = parts;
1421
1422 chip.platform_data = pdata;
1423 chip.chip_select = 1;
1424
1425 if (!spi_new_device(ispi->host, &chip))
1426 return -ENODEV;
1427 return 0;
1428 }
1429
intel_spi_protected_show(struct device * dev,struct device_attribute * attr,char * buf)1430 static ssize_t intel_spi_protected_show(struct device *dev,
1431 struct device_attribute *attr, char *buf)
1432 {
1433 struct intel_spi *ispi = dev_get_drvdata(dev);
1434
1435 return sysfs_emit(buf, "%d\n", ispi->protected);
1436 }
1437 static DEVICE_ATTR_ADMIN_RO(intel_spi_protected);
1438
intel_spi_locked_show(struct device * dev,struct device_attribute * attr,char * buf)1439 static ssize_t intel_spi_locked_show(struct device *dev,
1440 struct device_attribute *attr, char *buf)
1441 {
1442 struct intel_spi *ispi = dev_get_drvdata(dev);
1443
1444 return sysfs_emit(buf, "%d\n", ispi->locked);
1445 }
1446 static DEVICE_ATTR_ADMIN_RO(intel_spi_locked);
1447
intel_spi_bios_locked_show(struct device * dev,struct device_attribute * attr,char * buf)1448 static ssize_t intel_spi_bios_locked_show(struct device *dev,
1449 struct device_attribute *attr, char *buf)
1450 {
1451 struct intel_spi *ispi = dev_get_drvdata(dev);
1452
1453 return sysfs_emit(buf, "%d\n", ispi->bios_locked);
1454 }
1455 static DEVICE_ATTR_ADMIN_RO(intel_spi_bios_locked);
1456
1457 static struct attribute *intel_spi_attrs[] = {
1458 &dev_attr_intel_spi_protected.attr,
1459 &dev_attr_intel_spi_locked.attr,
1460 &dev_attr_intel_spi_bios_locked.attr,
1461 NULL
1462 };
1463
1464 static const struct attribute_group intel_spi_attr_group = {
1465 .attrs = intel_spi_attrs,
1466 };
1467
1468 const struct attribute_group *intel_spi_groups[] = {
1469 &intel_spi_attr_group,
1470 NULL
1471 };
1472 EXPORT_SYMBOL_GPL(intel_spi_groups);
1473
1474 /**
1475 * intel_spi_probe() - Probe the Intel SPI flash controller
1476 * @dev: Pointer to the parent device
1477 * @base: iomapped MMIO resource
1478 * @info: Platform specific information
1479 *
1480 * Probes Intel SPI flash controller and creates the flash chip device.
1481 * Returns %0 on success and negative errno in case of failure.
1482 */
intel_spi_probe(struct device * dev,void __iomem * base,const struct intel_spi_boardinfo * info)1483 int intel_spi_probe(struct device *dev, void __iomem *base,
1484 const struct intel_spi_boardinfo *info)
1485 {
1486 struct spi_controller *host;
1487 struct intel_spi *ispi;
1488 int ret;
1489
1490 host = devm_spi_alloc_host(dev, sizeof(*ispi));
1491 if (!host)
1492 return -ENOMEM;
1493
1494 host->mem_ops = &intel_spi_mem_ops;
1495
1496 ispi = spi_controller_get_devdata(host);
1497
1498 ispi->base = base;
1499 ispi->dev = dev;
1500 ispi->host = host;
1501 ispi->info = info;
1502
1503 ret = intel_spi_init(ispi);
1504 if (ret)
1505 return ret;
1506
1507 ret = devm_spi_register_controller(dev, host);
1508 if (ret)
1509 return ret;
1510
1511 dev_set_drvdata(dev, ispi);
1512 return intel_spi_populate_chip(ispi);
1513 }
1514 EXPORT_SYMBOL_GPL(intel_spi_probe);
1515
1516 MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
1517 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
1518 MODULE_LICENSE("GPL v2");
1519