1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
4 */
5
6 #include <linux/bitfield.h>
7 #include <linux/bits.h>
8 #include <linux/cleanup.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/intel_dg_nvm_aux.h>
12 #include <linux/io.h>
13 #include <linux/io-64-nonatomic-lo-hi.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/partitions.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/string.h>
20 #include <linux/slab.h>
21 #include <linux/sizes.h>
22 #include <linux/types.h>
23
24 #define INTEL_DG_NVM_RPM_TIMEOUT_MS 500
25
26 struct intel_dg_nvm {
27 struct kref refcnt;
28 struct mtd_info mtd;
29 struct device *dev;
30 struct mutex lock; /* region access lock */
31 void __iomem *base;
32 void __iomem *base2;
33 bool non_posted_erase;
34
35 size_t size;
36 unsigned int nregions;
37 struct {
38 const char *name;
39 u8 id;
40 u64 offset;
41 u64 size;
42 unsigned int is_readable:1;
43 unsigned int is_writable:1;
44 } regions[] __counted_by(nregions);
45 };
46
47 #define NVM_TRIGGER_REG 0x00000000
48 #define NVM_VALSIG_REG 0x00000010
49 #define NVM_ADDRESS_REG 0x00000040
50 #define NVM_REGION_ID_REG 0x00000044
51 #define NVM_DEBUG_REG 0x00000000
52 /*
53 * [15:0]-Erase size = 0x0010 4K 0x0080 32K 0x0100 64K
54 * [23:16]-Reserved
55 * [31:24]-Erase MEM RegionID
56 */
57 #define NVM_ERASE_REG 0x00000048
58 #define NVM_ACCESS_ERROR_REG 0x00000070
59 #define NVM_ADDRESS_ERROR_REG 0x00000074
60
61 /* Flash Valid Signature */
62 #define NVM_FLVALSIG 0x0FF0A55A
63
64 #define NVM_MAP_ADDR_MASK GENMASK(7, 0)
65 #define NVM_MAP_ADDR_SHIFT 0x00000004
66
67 #define NVM_REGION_ID_DESCRIPTOR 0
68 /* Flash Region Base Address */
69 #define NVM_FRBA 0x40
70 /* Flash Region __n - Flash Descriptor Record */
71 #define NVM_FLREG(__n) (NVM_FRBA + ((__n) * 4))
72 /* Flash Map 1 Register */
73 #define NVM_FLMAP1_REG 0x18
74 #define NVM_FLMSTR4_OFFSET 0x00C
75
76 #define NVM_ACCESS_ERROR_PCIE_MASK 0x7
77
78 #define NVM_FREG_BASE_MASK GENMASK(15, 0)
79 #define NVM_FREG_ADDR_MASK GENMASK(31, 16)
80 #define NVM_FREG_ADDR_SHIFT 12
81 #define NVM_FREG_MIN_REGION_SIZE 0xFFF
82
83 #define NVM_NON_POSTED_ERASE_DONE BIT(23)
84 #define NVM_NON_POSTED_ERASE_DONE_ITER 3000
85
idg_nvm_set_region_id(struct intel_dg_nvm * nvm,u8 region)86 static inline void idg_nvm_set_region_id(struct intel_dg_nvm *nvm, u8 region)
87 {
88 iowrite32((u32)region, nvm->base + NVM_REGION_ID_REG);
89 }
90
idg_nvm_error(struct intel_dg_nvm * nvm)91 static inline u32 idg_nvm_error(struct intel_dg_nvm *nvm)
92 {
93 void __iomem *base = nvm->base;
94
95 u32 reg = ioread32(base + NVM_ACCESS_ERROR_REG) & NVM_ACCESS_ERROR_PCIE_MASK;
96
97 /* reset error bits */
98 if (reg)
99 iowrite32(reg, base + NVM_ACCESS_ERROR_REG);
100
101 return reg;
102 }
103
idg_nvm_read32(struct intel_dg_nvm * nvm,u32 address)104 static inline u32 idg_nvm_read32(struct intel_dg_nvm *nvm, u32 address)
105 {
106 void __iomem *base = nvm->base;
107
108 iowrite32(address, base + NVM_ADDRESS_REG);
109
110 return ioread32(base + NVM_TRIGGER_REG);
111 }
112
idg_nvm_read64(struct intel_dg_nvm * nvm,u32 address)113 static inline u64 idg_nvm_read64(struct intel_dg_nvm *nvm, u32 address)
114 {
115 void __iomem *base = nvm->base;
116
117 iowrite32(address, base + NVM_ADDRESS_REG);
118
119 return readq(base + NVM_TRIGGER_REG);
120 }
121
idg_nvm_write32(struct intel_dg_nvm * nvm,u32 address,u32 data)122 static void idg_nvm_write32(struct intel_dg_nvm *nvm, u32 address, u32 data)
123 {
124 void __iomem *base = nvm->base;
125
126 iowrite32(address, base + NVM_ADDRESS_REG);
127
128 iowrite32(data, base + NVM_TRIGGER_REG);
129 }
130
idg_nvm_write64(struct intel_dg_nvm * nvm,u32 address,u64 data)131 static void idg_nvm_write64(struct intel_dg_nvm *nvm, u32 address, u64 data)
132 {
133 void __iomem *base = nvm->base;
134
135 iowrite32(address, base + NVM_ADDRESS_REG);
136
137 writeq(data, base + NVM_TRIGGER_REG);
138 }
139
idg_nvm_get_access_map(struct intel_dg_nvm * nvm,u32 * access_map)140 static int idg_nvm_get_access_map(struct intel_dg_nvm *nvm, u32 *access_map)
141 {
142 u32 fmstr4_addr;
143 u32 fmstr4;
144 u32 flmap1;
145 u32 fmba;
146
147 idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR);
148
149 flmap1 = idg_nvm_read32(nvm, NVM_FLMAP1_REG);
150 if (idg_nvm_error(nvm))
151 return -EIO;
152 /* Get Flash Master Baser Address (FMBA) */
153 fmba = (FIELD_GET(NVM_MAP_ADDR_MASK, flmap1) << NVM_MAP_ADDR_SHIFT);
154 fmstr4_addr = fmba + NVM_FLMSTR4_OFFSET;
155
156 fmstr4 = idg_nvm_read32(nvm, fmstr4_addr);
157 if (idg_nvm_error(nvm))
158 return -EIO;
159
160 *access_map = fmstr4;
161 return 0;
162 }
163
164 /*
165 * Region read/write access encoded in the access map
166 * in the following order from the lower bit:
167 * [3:0] regions 12-15 read state
168 * [7:4] regions 12-15 write state
169 * [19:8] regions 0-11 read state
170 * [31:20] regions 0-11 write state
171 */
idg_nvm_region_readable(u32 access_map,u8 region)172 static bool idg_nvm_region_readable(u32 access_map, u8 region)
173 {
174 if (region < 12)
175 return access_map & BIT(region + 8); /* [19:8] */
176 else
177 return access_map & BIT(region - 12); /* [3:0] */
178 }
179
idg_nvm_region_writable(u32 access_map,u8 region)180 static bool idg_nvm_region_writable(u32 access_map, u8 region)
181 {
182 if (region < 12)
183 return access_map & BIT(region + 20); /* [31:20] */
184 else
185 return access_map & BIT(region - 8); /* [7:4] */
186 }
187
idg_nvm_is_valid(struct intel_dg_nvm * nvm)188 static int idg_nvm_is_valid(struct intel_dg_nvm *nvm)
189 {
190 u32 is_valid;
191
192 idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR);
193
194 is_valid = idg_nvm_read32(nvm, NVM_VALSIG_REG);
195 if (idg_nvm_error(nvm))
196 return -EIO;
197
198 if (is_valid != NVM_FLVALSIG)
199 return -ENODEV;
200
201 return 0;
202 }
203
idg_nvm_get_region(const struct intel_dg_nvm * nvm,loff_t from)204 static unsigned int idg_nvm_get_region(const struct intel_dg_nvm *nvm, loff_t from)
205 {
206 unsigned int i;
207
208 for (i = 0; i < nvm->nregions; i++) {
209 if ((nvm->regions[i].offset + nvm->regions[i].size - 1) >= from &&
210 nvm->regions[i].offset <= from &&
211 nvm->regions[i].size != 0)
212 break;
213 }
214
215 return i;
216 }
217
idg_nvm_rewrite_partial(struct intel_dg_nvm * nvm,loff_t to,loff_t offset,size_t len,const u32 * newdata)218 static ssize_t idg_nvm_rewrite_partial(struct intel_dg_nvm *nvm, loff_t to,
219 loff_t offset, size_t len, const u32 *newdata)
220 {
221 u32 data = idg_nvm_read32(nvm, to);
222
223 if (idg_nvm_error(nvm))
224 return -EIO;
225
226 memcpy((u8 *)&data + offset, newdata, len);
227
228 idg_nvm_write32(nvm, to, data);
229 if (idg_nvm_error(nvm))
230 return -EIO;
231
232 return len;
233 }
234
idg_write(struct intel_dg_nvm * nvm,u8 region,loff_t to,size_t len,const unsigned char * buf)235 static ssize_t idg_write(struct intel_dg_nvm *nvm, u8 region,
236 loff_t to, size_t len, const unsigned char *buf)
237 {
238 size_t len_s = len;
239 size_t to_shift;
240 size_t len8;
241 size_t len4;
242 ssize_t ret;
243 size_t to4;
244 size_t i;
245
246 idg_nvm_set_region_id(nvm, region);
247
248 to4 = ALIGN_DOWN(to, sizeof(u32));
249 to_shift = min(sizeof(u32) - ((size_t)to - to4), len);
250 if (to - to4) {
251 ret = idg_nvm_rewrite_partial(nvm, to4, to - to4, to_shift, (u32 *)&buf[0]);
252 if (ret < 0)
253 return ret;
254
255 buf += to_shift;
256 to += to_shift;
257 len_s -= to_shift;
258 }
259
260 if (!IS_ALIGNED(to, sizeof(u64)) &&
261 ((to ^ (to + len_s)) & GENMASK(31, 10))) {
262 /*
263 * Workaround reads/writes across 1k-aligned addresses
264 * (start u32 before 1k, end u32 after)
265 * as this fails on hardware.
266 */
267 u32 data;
268
269 memcpy(&data, &buf[0], sizeof(u32));
270 idg_nvm_write32(nvm, to, data);
271 if (idg_nvm_error(nvm))
272 return -EIO;
273 buf += sizeof(u32);
274 to += sizeof(u32);
275 len_s -= sizeof(u32);
276 }
277
278 len8 = ALIGN_DOWN(len_s, sizeof(u64));
279 for (i = 0; i < len8; i += sizeof(u64)) {
280 u64 data;
281
282 memcpy(&data, &buf[i], sizeof(u64));
283 idg_nvm_write64(nvm, to + i, data);
284 if (idg_nvm_error(nvm))
285 return -EIO;
286 }
287
288 len4 = len_s - len8;
289 if (len4 >= sizeof(u32)) {
290 u32 data;
291
292 memcpy(&data, &buf[i], sizeof(u32));
293 idg_nvm_write32(nvm, to + i, data);
294 if (idg_nvm_error(nvm))
295 return -EIO;
296 i += sizeof(u32);
297 len4 -= sizeof(u32);
298 }
299
300 if (len4 > 0) {
301 ret = idg_nvm_rewrite_partial(nvm, to + i, 0, len4, (u32 *)&buf[i]);
302 if (ret < 0)
303 return ret;
304 }
305
306 return len;
307 }
308
idg_read(struct intel_dg_nvm * nvm,u8 region,loff_t from,size_t len,unsigned char * buf)309 static ssize_t idg_read(struct intel_dg_nvm *nvm, u8 region,
310 loff_t from, size_t len, unsigned char *buf)
311 {
312 size_t len_s = len;
313 size_t from_shift;
314 size_t from4;
315 size_t len8;
316 size_t len4;
317 size_t i;
318
319 idg_nvm_set_region_id(nvm, region);
320
321 from4 = ALIGN_DOWN(from, sizeof(u32));
322 from_shift = min(sizeof(u32) - ((size_t)from - from4), len);
323
324 if (from - from4) {
325 u32 data = idg_nvm_read32(nvm, from4);
326
327 if (idg_nvm_error(nvm))
328 return -EIO;
329 memcpy(&buf[0], (u8 *)&data + (from - from4), from_shift);
330 len_s -= from_shift;
331 buf += from_shift;
332 from += from_shift;
333 }
334
335 if (!IS_ALIGNED(from, sizeof(u64)) &&
336 ((from ^ (from + len_s)) & GENMASK(31, 10))) {
337 /*
338 * Workaround reads/writes across 1k-aligned addresses
339 * (start u32 before 1k, end u32 after)
340 * as this fails on hardware.
341 */
342 u32 data = idg_nvm_read32(nvm, from);
343
344 if (idg_nvm_error(nvm))
345 return -EIO;
346 memcpy(&buf[0], &data, sizeof(data));
347 len_s -= sizeof(u32);
348 buf += sizeof(u32);
349 from += sizeof(u32);
350 }
351
352 len8 = ALIGN_DOWN(len_s, sizeof(u64));
353 for (i = 0; i < len8; i += sizeof(u64)) {
354 u64 data = idg_nvm_read64(nvm, from + i);
355
356 if (idg_nvm_error(nvm))
357 return -EIO;
358
359 memcpy(&buf[i], &data, sizeof(data));
360 }
361
362 len4 = len_s - len8;
363 if (len4 >= sizeof(u32)) {
364 u32 data = idg_nvm_read32(nvm, from + i);
365
366 if (idg_nvm_error(nvm))
367 return -EIO;
368 memcpy(&buf[i], &data, sizeof(data));
369 i += sizeof(u32);
370 len4 -= sizeof(u32);
371 }
372
373 if (len4 > 0) {
374 u32 data = idg_nvm_read32(nvm, from + i);
375
376 if (idg_nvm_error(nvm))
377 return -EIO;
378 memcpy(&buf[i], &data, len4);
379 }
380
381 return len;
382 }
383
384 static ssize_t
idg_erase(struct intel_dg_nvm * nvm,u8 region,loff_t from,u64 len,u64 * fail_addr)385 idg_erase(struct intel_dg_nvm *nvm, u8 region, loff_t from, u64 len, u64 *fail_addr)
386 {
387 void __iomem *base2 = nvm->base2;
388 void __iomem *base = nvm->base;
389 const u32 block = 0x10;
390 u32 iter = 0;
391 u32 reg;
392 u64 i;
393
394 for (i = 0; i < len; i += SZ_4K) {
395 iowrite32(from + i, base + NVM_ADDRESS_REG);
396 iowrite32(region << 24 | block, base + NVM_ERASE_REG);
397 if (nvm->non_posted_erase) {
398 /* Wait for Erase Done */
399 reg = ioread32(base2 + NVM_DEBUG_REG);
400 while (!(reg & NVM_NON_POSTED_ERASE_DONE) &&
401 ++iter < NVM_NON_POSTED_ERASE_DONE_ITER) {
402 msleep(10);
403 reg = ioread32(base2 + NVM_DEBUG_REG);
404 }
405 if (reg & NVM_NON_POSTED_ERASE_DONE) {
406 /* Clear Erase Done */
407 iowrite32(reg, base2 + NVM_DEBUG_REG);
408 } else {
409 *fail_addr = from + i;
410 return -ETIME;
411 }
412 }
413 /* Since the writes are via sgunit
414 * we cannot do back to back erases.
415 */
416 msleep(50);
417 }
418 return len;
419 }
420
intel_dg_nvm_init(struct intel_dg_nvm * nvm,struct device * device,bool non_posted_erase)421 static int intel_dg_nvm_init(struct intel_dg_nvm *nvm, struct device *device,
422 bool non_posted_erase)
423 {
424 u32 access_map = 0;
425 unsigned int i, n;
426 int ret;
427
428 nvm->dev = device;
429
430 /* clean error register, previous errors are ignored */
431 idg_nvm_error(nvm);
432
433 ret = idg_nvm_is_valid(nvm);
434 if (ret) {
435 dev_err(device, "The MEM is not valid %d\n", ret);
436 return ret;
437 }
438
439 if (idg_nvm_get_access_map(nvm, &access_map))
440 return -EIO;
441
442 for (i = 0, n = 0; i < nvm->nregions; i++) {
443 u32 address, base, limit, region;
444 u8 id = nvm->regions[i].id;
445
446 address = NVM_FLREG(id);
447 region = idg_nvm_read32(nvm, address);
448
449 base = FIELD_GET(NVM_FREG_BASE_MASK, region) << NVM_FREG_ADDR_SHIFT;
450 limit = (FIELD_GET(NVM_FREG_ADDR_MASK, region) << NVM_FREG_ADDR_SHIFT) |
451 NVM_FREG_MIN_REGION_SIZE;
452
453 dev_dbg(device, "[%d] %s: region: 0x%08X base: 0x%08x limit: 0x%08x\n",
454 id, nvm->regions[i].name, region, base, limit);
455
456 if (base >= limit || (i > 0 && limit == 0)) {
457 dev_dbg(device, "[%d] %s: disabled\n",
458 id, nvm->regions[i].name);
459 nvm->regions[i].is_readable = 0;
460 continue;
461 }
462
463 if (nvm->size < limit)
464 nvm->size = limit;
465
466 nvm->regions[i].offset = base;
467 nvm->regions[i].size = limit - base + 1;
468 /* No write access to descriptor; mask it out*/
469 nvm->regions[i].is_writable = idg_nvm_region_writable(access_map, id);
470
471 nvm->regions[i].is_readable = idg_nvm_region_readable(access_map, id);
472 dev_dbg(device, "Registered, %s id=%d offset=%lld size=%lld rd=%d wr=%d\n",
473 nvm->regions[i].name,
474 nvm->regions[i].id,
475 nvm->regions[i].offset,
476 nvm->regions[i].size,
477 nvm->regions[i].is_readable,
478 nvm->regions[i].is_writable);
479
480 if (nvm->regions[i].is_readable)
481 n++;
482 }
483
484 nvm->non_posted_erase = non_posted_erase;
485
486 dev_dbg(device, "Registered %d regions\n", n);
487 dev_dbg(device, "Non posted erase %d\n", nvm->non_posted_erase);
488
489 /* Need to add 1 to the amount of memory
490 * so it is reported as an even block
491 */
492 nvm->size += 1;
493
494 return n;
495 }
496
intel_dg_mtd_erase(struct mtd_info * mtd,struct erase_info * info)497 static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
498 {
499 struct intel_dg_nvm *nvm = mtd->priv;
500 size_t total_len;
501 unsigned int idx;
502 ssize_t bytes;
503 loff_t from;
504 size_t len;
505 u8 region;
506 u64 addr;
507 int ret;
508
509 if (WARN_ON(!nvm))
510 return -EINVAL;
511
512 if (!IS_ALIGNED(info->addr, SZ_4K) || !IS_ALIGNED(info->len, SZ_4K)) {
513 dev_err(&mtd->dev, "unaligned erase %llx %llx\n",
514 info->addr, info->len);
515 info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
516 return -EINVAL;
517 }
518
519 total_len = info->len;
520 addr = info->addr;
521
522 ret = pm_runtime_resume_and_get(nvm->dev);
523 if (ret < 0) {
524 dev_err(&mtd->dev, "rpm: get failed %d\n", ret);
525 return ret;
526 }
527
528 ret = 0;
529 guard(mutex)(&nvm->lock);
530
531 while (total_len > 0) {
532 if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) {
533 dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len);
534 info->fail_addr = addr;
535 ret = -ERANGE;
536 break;
537 }
538
539 idx = idg_nvm_get_region(nvm, addr);
540 if (idx >= nvm->nregions) {
541 dev_err(&mtd->dev, "out of range");
542 info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
543 ret = -ERANGE;
544 break;
545 }
546
547 from = addr - nvm->regions[idx].offset;
548 region = nvm->regions[idx].id;
549 len = total_len;
550 if (len > nvm->regions[idx].size - from)
551 len = nvm->regions[idx].size - from;
552
553 dev_dbg(&mtd->dev, "erasing region[%d] %s from %llx len %zx\n",
554 region, nvm->regions[idx].name, from, len);
555
556 bytes = idg_erase(nvm, region, from, len, &info->fail_addr);
557 if (bytes < 0) {
558 dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes);
559 info->fail_addr += nvm->regions[idx].offset;
560 ret = bytes;
561 break;
562 }
563
564 addr += len;
565 total_len -= len;
566 }
567
568 pm_runtime_put_autosuspend(nvm->dev);
569 return ret;
570 }
571
intel_dg_mtd_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)572 static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
573 size_t *retlen, u_char *buf)
574 {
575 struct intel_dg_nvm *nvm = mtd->priv;
576 unsigned int idx;
577 ssize_t ret;
578 u8 region;
579
580 if (WARN_ON(!nvm))
581 return -EINVAL;
582
583 idx = idg_nvm_get_region(nvm, from);
584
585 dev_dbg(&mtd->dev, "reading region[%d] %s from %lld len %zd\n",
586 nvm->regions[idx].id, nvm->regions[idx].name, from, len);
587
588 if (idx >= nvm->nregions) {
589 dev_err(&mtd->dev, "out of range");
590 return -ERANGE;
591 }
592
593 from -= nvm->regions[idx].offset;
594 region = nvm->regions[idx].id;
595 if (len > nvm->regions[idx].size - from)
596 len = nvm->regions[idx].size - from;
597
598 ret = pm_runtime_resume_and_get(nvm->dev);
599 if (ret < 0) {
600 dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
601 return ret;
602 }
603
604 guard(mutex)(&nvm->lock);
605
606 ret = idg_read(nvm, region, from, len, buf);
607 if (ret < 0) {
608 dev_dbg(&mtd->dev, "read failed with %zd\n", ret);
609 } else {
610 *retlen = ret;
611 ret = 0;
612 }
613
614 pm_runtime_put_autosuspend(nvm->dev);
615 return ret;
616 }
617
intel_dg_mtd_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)618 static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
619 size_t *retlen, const u_char *buf)
620 {
621 struct intel_dg_nvm *nvm = mtd->priv;
622 unsigned int idx;
623 ssize_t ret;
624 u8 region;
625
626 if (WARN_ON(!nvm))
627 return -EINVAL;
628
629 idx = idg_nvm_get_region(nvm, to);
630
631 dev_dbg(&mtd->dev, "writing region[%d] %s to %lld len %zd\n",
632 nvm->regions[idx].id, nvm->regions[idx].name, to, len);
633
634 if (idx >= nvm->nregions) {
635 dev_err(&mtd->dev, "out of range");
636 return -ERANGE;
637 }
638
639 to -= nvm->regions[idx].offset;
640 region = nvm->regions[idx].id;
641 if (len > nvm->regions[idx].size - to)
642 len = nvm->regions[idx].size - to;
643
644 ret = pm_runtime_resume_and_get(nvm->dev);
645 if (ret < 0) {
646 dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
647 return ret;
648 }
649
650 guard(mutex)(&nvm->lock);
651
652 ret = idg_write(nvm, region, to, len, buf);
653 if (ret < 0) {
654 dev_dbg(&mtd->dev, "write failed with %zd\n", ret);
655 } else {
656 *retlen = ret;
657 ret = 0;
658 }
659
660 pm_runtime_put_autosuspend(nvm->dev);
661 return ret;
662 }
663
intel_dg_nvm_release(struct kref * kref)664 static void intel_dg_nvm_release(struct kref *kref)
665 {
666 struct intel_dg_nvm *nvm = container_of(kref, struct intel_dg_nvm, refcnt);
667 int i;
668
669 pr_debug("freeing intel_dg nvm\n");
670 for (i = 0; i < nvm->nregions; i++)
671 kfree(nvm->regions[i].name);
672 mutex_destroy(&nvm->lock);
673 kfree(nvm);
674 }
675
intel_dg_mtd_get_device(struct mtd_info * mtd)676 static int intel_dg_mtd_get_device(struct mtd_info *mtd)
677 {
678 struct mtd_info *master = mtd_get_master(mtd);
679 struct intel_dg_nvm *nvm = master->priv;
680
681 if (WARN_ON(!nvm))
682 return -EINVAL;
683 pr_debug("get mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt));
684 kref_get(&nvm->refcnt);
685
686 return 0;
687 }
688
intel_dg_mtd_put_device(struct mtd_info * mtd)689 static void intel_dg_mtd_put_device(struct mtd_info *mtd)
690 {
691 struct mtd_info *master = mtd_get_master(mtd);
692 struct intel_dg_nvm *nvm = master->priv;
693
694 if (WARN_ON(!nvm))
695 return;
696 pr_debug("put mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt));
697 kref_put(&nvm->refcnt, intel_dg_nvm_release);
698 }
699
intel_dg_nvm_init_mtd(struct intel_dg_nvm * nvm,struct device * device,unsigned int nparts,bool writable_override)700 static int intel_dg_nvm_init_mtd(struct intel_dg_nvm *nvm, struct device *device,
701 unsigned int nparts, bool writable_override)
702 {
703 struct mtd_partition *parts = NULL;
704 unsigned int i, n;
705 int ret;
706
707 dev_dbg(device, "registering with mtd\n");
708
709 nvm->mtd.owner = THIS_MODULE;
710 nvm->mtd.dev.parent = device;
711 nvm->mtd.flags = MTD_CAP_NORFLASH;
712 nvm->mtd.type = MTD_DATAFLASH;
713 nvm->mtd.priv = nvm;
714 nvm->mtd._write = intel_dg_mtd_write;
715 nvm->mtd._read = intel_dg_mtd_read;
716 nvm->mtd._erase = intel_dg_mtd_erase;
717 nvm->mtd._get_device = intel_dg_mtd_get_device;
718 nvm->mtd._put_device = intel_dg_mtd_put_device;
719 nvm->mtd.writesize = SZ_1; /* 1 byte granularity */
720 nvm->mtd.erasesize = SZ_4K; /* 4K bytes granularity */
721 nvm->mtd.size = nvm->size;
722
723 parts = kzalloc_objs(*parts, nvm->nregions);
724 if (!parts)
725 return -ENOMEM;
726
727 for (i = 0, n = 0; i < nvm->nregions && n < nparts; i++) {
728 if (!nvm->regions[i].is_readable)
729 continue;
730 parts[n].name = nvm->regions[i].name;
731 parts[n].offset = nvm->regions[i].offset;
732 parts[n].size = nvm->regions[i].size;
733 if (!nvm->regions[i].is_writable && !writable_override)
734 parts[n].mask_flags = MTD_WRITEABLE;
735 n++;
736 }
737
738 ret = mtd_device_register(&nvm->mtd, parts, n);
739
740 kfree(parts);
741 return ret;
742 }
743
intel_dg_mtd_probe(struct auxiliary_device * aux_dev,const struct auxiliary_device_id * aux_dev_id)744 static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
745 const struct auxiliary_device_id *aux_dev_id)
746 {
747 struct intel_dg_nvm_dev *invm = auxiliary_dev_to_intel_dg_nvm_dev(aux_dev);
748 struct intel_dg_nvm *nvm;
749 struct device *device;
750 unsigned int nregions;
751 unsigned int i, n;
752 int ret;
753
754 device = &aux_dev->dev;
755
756 /* count available regions */
757 for (nregions = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) {
758 if (invm->regions[i].name)
759 nregions++;
760 }
761
762 if (!nregions) {
763 dev_err(device, "no regions defined\n");
764 return -ENODEV;
765 }
766
767 nvm = kzalloc_flex(*nvm, regions, nregions);
768 if (!nvm)
769 return -ENOMEM;
770
771 kref_init(&nvm->refcnt);
772 mutex_init(&nvm->lock);
773 nvm->nregions = nregions;
774
775 for (n = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) {
776 if (!invm->regions[i].name)
777 continue;
778
779 char *name = kasprintf(GFP_KERNEL, "%s.%s",
780 dev_name(&aux_dev->dev), invm->regions[i].name);
781 if (!name) {
782 ret = -ENOMEM;
783 goto err;
784 }
785
786 nvm->regions[n].name = name;
787 nvm->regions[n].id = i;
788 n++;
789 }
790
791 ret = devm_pm_runtime_enable(device);
792 if (ret < 0) {
793 dev_err(device, "rpm: enable failed %d\n", ret);
794 goto err_norpm;
795 }
796
797 pm_runtime_set_autosuspend_delay(device, INTEL_DG_NVM_RPM_TIMEOUT_MS);
798 pm_runtime_use_autosuspend(device);
799
800 ret = pm_runtime_resume_and_get(device);
801 if (ret < 0) {
802 dev_err(device, "rpm: get failed %d\n", ret);
803 goto err_norpm;
804 }
805
806 nvm->base = devm_ioremap_resource(device, &invm->bar);
807 if (IS_ERR(nvm->base)) {
808 ret = PTR_ERR(nvm->base);
809 goto err;
810 }
811
812 if (invm->non_posted_erase) {
813 nvm->base2 = devm_ioremap_resource(device, &invm->bar2);
814 if (IS_ERR(nvm->base2)) {
815 ret = PTR_ERR(nvm->base2);
816 goto err;
817 }
818 }
819
820 ret = intel_dg_nvm_init(nvm, device, invm->non_posted_erase);
821 if (ret < 0) {
822 dev_err(device, "cannot initialize nvm %d\n", ret);
823 goto err;
824 }
825
826 ret = intel_dg_nvm_init_mtd(nvm, device, ret, invm->writable_override);
827 if (ret) {
828 dev_err(device, "failed init mtd %d\n", ret);
829 goto err;
830 }
831
832 dev_set_drvdata(&aux_dev->dev, nvm);
833
834 pm_runtime_put(device);
835 return 0;
836
837 err:
838 pm_runtime_put(device);
839 err_norpm:
840 kref_put(&nvm->refcnt, intel_dg_nvm_release);
841 return ret;
842 }
843
intel_dg_mtd_remove(struct auxiliary_device * aux_dev)844 static void intel_dg_mtd_remove(struct auxiliary_device *aux_dev)
845 {
846 struct intel_dg_nvm *nvm = dev_get_drvdata(&aux_dev->dev);
847
848 if (!nvm)
849 return;
850
851 mtd_device_unregister(&nvm->mtd);
852
853 dev_set_drvdata(&aux_dev->dev, NULL);
854
855 kref_put(&nvm->refcnt, intel_dg_nvm_release);
856 }
857
858 static const struct auxiliary_device_id intel_dg_mtd_id_table[] = {
859 {
860 .name = "i915.nvm",
861 },
862 {
863 .name = "xe.nvm",
864 },
865 {
866 /* sentinel */
867 }
868 };
869 MODULE_DEVICE_TABLE(auxiliary, intel_dg_mtd_id_table);
870
871 static struct auxiliary_driver intel_dg_mtd_driver = {
872 .probe = intel_dg_mtd_probe,
873 .remove = intel_dg_mtd_remove,
874 .driver = {
875 /* auxiliary_driver_register() sets .name to be the modname */
876 },
877 .id_table = intel_dg_mtd_id_table
878 };
879 module_auxiliary_driver(intel_dg_mtd_driver);
880
881 MODULE_LICENSE("GPL");
882 MODULE_AUTHOR("Intel Corporation");
883 MODULE_DESCRIPTION("Intel DGFX MTD driver");
884