xref: /linux/drivers/mtd/devices/mtd_intel_dg.c (revision 9fe53abfac0b158e2db7c516264bf11b64489384)
1ceb5ab3cSAlexander Usyskin // SPDX-License-Identifier: GPL-2.0
2ceb5ab3cSAlexander Usyskin /*
3ceb5ab3cSAlexander Usyskin  * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
4ceb5ab3cSAlexander Usyskin  */
5ceb5ab3cSAlexander Usyskin 
67234b321SAlexander Usyskin #include <linux/bitfield.h>
77234b321SAlexander Usyskin #include <linux/bits.h>
8*9fe53abfSAlexander Usyskin #include <linux/delay.h>
9ceb5ab3cSAlexander Usyskin #include <linux/device.h>
10ceb5ab3cSAlexander Usyskin #include <linux/intel_dg_nvm_aux.h>
11ceb5ab3cSAlexander Usyskin #include <linux/io.h>
12*9fe53abfSAlexander Usyskin #include <linux/io-64-nonatomic-lo-hi.h>
13ceb5ab3cSAlexander Usyskin #include <linux/kernel.h>
14ceb5ab3cSAlexander Usyskin #include <linux/module.h>
15ceb5ab3cSAlexander Usyskin #include <linux/string.h>
16ceb5ab3cSAlexander Usyskin #include <linux/slab.h>
17*9fe53abfSAlexander Usyskin #include <linux/sizes.h>
18ceb5ab3cSAlexander Usyskin #include <linux/types.h>
19ceb5ab3cSAlexander Usyskin 
20ceb5ab3cSAlexander Usyskin struct intel_dg_nvm {
21ceb5ab3cSAlexander Usyskin 	struct kref refcnt;
22ceb5ab3cSAlexander Usyskin 	void __iomem *base;
23ceb5ab3cSAlexander Usyskin 	size_t size;
24ceb5ab3cSAlexander Usyskin 	unsigned int nregions;
25ceb5ab3cSAlexander Usyskin 	struct {
26ceb5ab3cSAlexander Usyskin 		const char *name;
27ceb5ab3cSAlexander Usyskin 		u8 id;
28ceb5ab3cSAlexander Usyskin 		u64 offset;
29ceb5ab3cSAlexander Usyskin 		u64 size;
307234b321SAlexander Usyskin 		unsigned int is_readable:1;
317234b321SAlexander Usyskin 		unsigned int is_writable:1;
32ceb5ab3cSAlexander Usyskin 	} regions[] __counted_by(nregions);
33ceb5ab3cSAlexander Usyskin };
34ceb5ab3cSAlexander Usyskin 
357234b321SAlexander Usyskin #define NVM_TRIGGER_REG       0x00000000
367234b321SAlexander Usyskin #define NVM_VALSIG_REG        0x00000010
377234b321SAlexander Usyskin #define NVM_ADDRESS_REG       0x00000040
387234b321SAlexander Usyskin #define NVM_REGION_ID_REG     0x00000044
397234b321SAlexander Usyskin /*
407234b321SAlexander Usyskin  * [15:0]-Erase size = 0x0010 4K 0x0080 32K 0x0100 64K
417234b321SAlexander Usyskin  * [23:16]-Reserved
427234b321SAlexander Usyskin  * [31:24]-Erase MEM RegionID
437234b321SAlexander Usyskin  */
447234b321SAlexander Usyskin #define NVM_ERASE_REG         0x00000048
457234b321SAlexander Usyskin #define NVM_ACCESS_ERROR_REG  0x00000070
467234b321SAlexander Usyskin #define NVM_ADDRESS_ERROR_REG 0x00000074
477234b321SAlexander Usyskin 
487234b321SAlexander Usyskin /* Flash Valid Signature */
497234b321SAlexander Usyskin #define NVM_FLVALSIG          0x0FF0A55A
507234b321SAlexander Usyskin 
517234b321SAlexander Usyskin #define NVM_MAP_ADDR_MASK     GENMASK(7, 0)
527234b321SAlexander Usyskin #define NVM_MAP_ADDR_SHIFT    0x00000004
537234b321SAlexander Usyskin 
547234b321SAlexander Usyskin #define NVM_REGION_ID_DESCRIPTOR  0
557234b321SAlexander Usyskin /* Flash Region Base Address */
567234b321SAlexander Usyskin #define NVM_FRBA      0x40
577234b321SAlexander Usyskin /* Flash Region __n - Flash Descriptor Record */
587234b321SAlexander Usyskin #define NVM_FLREG(__n) (NVM_FRBA + ((__n) * 4))
597234b321SAlexander Usyskin /*  Flash Map 1 Register */
607234b321SAlexander Usyskin #define NVM_FLMAP1_REG  0x18
617234b321SAlexander Usyskin #define NVM_FLMSTR4_OFFSET 0x00C
627234b321SAlexander Usyskin 
637234b321SAlexander Usyskin #define NVM_ACCESS_ERROR_PCIE_MASK 0x7
647234b321SAlexander Usyskin 
657234b321SAlexander Usyskin #define NVM_FREG_BASE_MASK GENMASK(15, 0)
667234b321SAlexander Usyskin #define NVM_FREG_ADDR_MASK GENMASK(31, 16)
677234b321SAlexander Usyskin #define NVM_FREG_ADDR_SHIFT 12
687234b321SAlexander Usyskin #define NVM_FREG_MIN_REGION_SIZE 0xFFF
697234b321SAlexander Usyskin 
707234b321SAlexander Usyskin static inline void idg_nvm_set_region_id(struct intel_dg_nvm *nvm, u8 region)
717234b321SAlexander Usyskin {
727234b321SAlexander Usyskin 	iowrite32((u32)region, nvm->base + NVM_REGION_ID_REG);
737234b321SAlexander Usyskin }
747234b321SAlexander Usyskin 
757234b321SAlexander Usyskin static inline u32 idg_nvm_error(struct intel_dg_nvm *nvm)
767234b321SAlexander Usyskin {
777234b321SAlexander Usyskin 	void __iomem *base = nvm->base;
787234b321SAlexander Usyskin 
797234b321SAlexander Usyskin 	u32 reg = ioread32(base + NVM_ACCESS_ERROR_REG) & NVM_ACCESS_ERROR_PCIE_MASK;
807234b321SAlexander Usyskin 
817234b321SAlexander Usyskin 	/* reset error bits */
827234b321SAlexander Usyskin 	if (reg)
837234b321SAlexander Usyskin 		iowrite32(reg, base + NVM_ACCESS_ERROR_REG);
847234b321SAlexander Usyskin 
857234b321SAlexander Usyskin 	return reg;
867234b321SAlexander Usyskin }
877234b321SAlexander Usyskin 
887234b321SAlexander Usyskin static inline u32 idg_nvm_read32(struct intel_dg_nvm *nvm, u32 address)
897234b321SAlexander Usyskin {
907234b321SAlexander Usyskin 	void __iomem *base = nvm->base;
917234b321SAlexander Usyskin 
927234b321SAlexander Usyskin 	iowrite32(address, base + NVM_ADDRESS_REG);
937234b321SAlexander Usyskin 
947234b321SAlexander Usyskin 	return ioread32(base + NVM_TRIGGER_REG);
957234b321SAlexander Usyskin }
967234b321SAlexander Usyskin 
97*9fe53abfSAlexander Usyskin static inline u64 idg_nvm_read64(struct intel_dg_nvm *nvm, u32 address)
98*9fe53abfSAlexander Usyskin {
99*9fe53abfSAlexander Usyskin 	void __iomem *base = nvm->base;
100*9fe53abfSAlexander Usyskin 
101*9fe53abfSAlexander Usyskin 	iowrite32(address, base + NVM_ADDRESS_REG);
102*9fe53abfSAlexander Usyskin 
103*9fe53abfSAlexander Usyskin 	return readq(base + NVM_TRIGGER_REG);
104*9fe53abfSAlexander Usyskin }
105*9fe53abfSAlexander Usyskin 
106*9fe53abfSAlexander Usyskin static void idg_nvm_write32(struct intel_dg_nvm *nvm, u32 address, u32 data)
107*9fe53abfSAlexander Usyskin {
108*9fe53abfSAlexander Usyskin 	void __iomem *base = nvm->base;
109*9fe53abfSAlexander Usyskin 
110*9fe53abfSAlexander Usyskin 	iowrite32(address, base + NVM_ADDRESS_REG);
111*9fe53abfSAlexander Usyskin 
112*9fe53abfSAlexander Usyskin 	iowrite32(data, base + NVM_TRIGGER_REG);
113*9fe53abfSAlexander Usyskin }
114*9fe53abfSAlexander Usyskin 
115*9fe53abfSAlexander Usyskin static void idg_nvm_write64(struct intel_dg_nvm *nvm, u32 address, u64 data)
116*9fe53abfSAlexander Usyskin {
117*9fe53abfSAlexander Usyskin 	void __iomem *base = nvm->base;
118*9fe53abfSAlexander Usyskin 
119*9fe53abfSAlexander Usyskin 	iowrite32(address, base + NVM_ADDRESS_REG);
120*9fe53abfSAlexander Usyskin 
121*9fe53abfSAlexander Usyskin 	writeq(data, base + NVM_TRIGGER_REG);
122*9fe53abfSAlexander Usyskin }
123*9fe53abfSAlexander Usyskin 
1247234b321SAlexander Usyskin static int idg_nvm_get_access_map(struct intel_dg_nvm *nvm, u32 *access_map)
1257234b321SAlexander Usyskin {
1267234b321SAlexander Usyskin 	u32 fmstr4_addr;
1277234b321SAlexander Usyskin 	u32 fmstr4;
1287234b321SAlexander Usyskin 	u32 flmap1;
1297234b321SAlexander Usyskin 	u32 fmba;
1307234b321SAlexander Usyskin 
1317234b321SAlexander Usyskin 	idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR);
1327234b321SAlexander Usyskin 
1337234b321SAlexander Usyskin 	flmap1 = idg_nvm_read32(nvm, NVM_FLMAP1_REG);
1347234b321SAlexander Usyskin 	if (idg_nvm_error(nvm))
1357234b321SAlexander Usyskin 		return -EIO;
1367234b321SAlexander Usyskin 	/* Get Flash Master Baser Address (FMBA) */
1377234b321SAlexander Usyskin 	fmba = (FIELD_GET(NVM_MAP_ADDR_MASK, flmap1) << NVM_MAP_ADDR_SHIFT);
1387234b321SAlexander Usyskin 	fmstr4_addr = fmba + NVM_FLMSTR4_OFFSET;
1397234b321SAlexander Usyskin 
1407234b321SAlexander Usyskin 	fmstr4 = idg_nvm_read32(nvm, fmstr4_addr);
1417234b321SAlexander Usyskin 	if (idg_nvm_error(nvm))
1427234b321SAlexander Usyskin 		return -EIO;
1437234b321SAlexander Usyskin 
1447234b321SAlexander Usyskin 	*access_map = fmstr4;
1457234b321SAlexander Usyskin 	return 0;
1467234b321SAlexander Usyskin }
1477234b321SAlexander Usyskin 
1487234b321SAlexander Usyskin /*
1497234b321SAlexander Usyskin  * Region read/write access encoded in the access map
1507234b321SAlexander Usyskin  * in the following order from the lower bit:
1517234b321SAlexander Usyskin  * [3:0] regions 12-15 read state
1527234b321SAlexander Usyskin  * [7:4] regions 12-15 write state
1537234b321SAlexander Usyskin  * [19:8] regions 0-11 read state
1547234b321SAlexander Usyskin  * [31:20] regions 0-11 write state
1557234b321SAlexander Usyskin  */
1567234b321SAlexander Usyskin static bool idg_nvm_region_readable(u32 access_map, u8 region)
1577234b321SAlexander Usyskin {
1587234b321SAlexander Usyskin 	if (region < 12)
1597234b321SAlexander Usyskin 		return access_map & BIT(region + 8); /* [19:8] */
1607234b321SAlexander Usyskin 	else
1617234b321SAlexander Usyskin 		return access_map & BIT(region - 12); /* [3:0] */
1627234b321SAlexander Usyskin }
1637234b321SAlexander Usyskin 
1647234b321SAlexander Usyskin static bool idg_nvm_region_writable(u32 access_map, u8 region)
1657234b321SAlexander Usyskin {
1667234b321SAlexander Usyskin 	if (region < 12)
1677234b321SAlexander Usyskin 		return access_map & BIT(region + 20); /* [31:20] */
1687234b321SAlexander Usyskin 	else
1697234b321SAlexander Usyskin 		return access_map & BIT(region - 8); /* [7:4] */
1707234b321SAlexander Usyskin }
1717234b321SAlexander Usyskin 
1727234b321SAlexander Usyskin static int idg_nvm_is_valid(struct intel_dg_nvm *nvm)
1737234b321SAlexander Usyskin {
1747234b321SAlexander Usyskin 	u32 is_valid;
1757234b321SAlexander Usyskin 
1767234b321SAlexander Usyskin 	idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR);
1777234b321SAlexander Usyskin 
1787234b321SAlexander Usyskin 	is_valid = idg_nvm_read32(nvm, NVM_VALSIG_REG);
1797234b321SAlexander Usyskin 	if (idg_nvm_error(nvm))
1807234b321SAlexander Usyskin 		return -EIO;
1817234b321SAlexander Usyskin 
1827234b321SAlexander Usyskin 	if (is_valid != NVM_FLVALSIG)
1837234b321SAlexander Usyskin 		return -ENODEV;
1847234b321SAlexander Usyskin 
1857234b321SAlexander Usyskin 	return 0;
1867234b321SAlexander Usyskin }
1877234b321SAlexander Usyskin 
188*9fe53abfSAlexander Usyskin __maybe_unused
189*9fe53abfSAlexander Usyskin static unsigned int idg_nvm_get_region(const struct intel_dg_nvm *nvm, loff_t from)
190*9fe53abfSAlexander Usyskin {
191*9fe53abfSAlexander Usyskin 	unsigned int i;
192*9fe53abfSAlexander Usyskin 
193*9fe53abfSAlexander Usyskin 	for (i = 0; i < nvm->nregions; i++) {
194*9fe53abfSAlexander Usyskin 		if ((nvm->regions[i].offset + nvm->regions[i].size - 1) >= from &&
195*9fe53abfSAlexander Usyskin 		    nvm->regions[i].offset <= from &&
196*9fe53abfSAlexander Usyskin 		    nvm->regions[i].size != 0)
197*9fe53abfSAlexander Usyskin 			break;
198*9fe53abfSAlexander Usyskin 	}
199*9fe53abfSAlexander Usyskin 
200*9fe53abfSAlexander Usyskin 	return i;
201*9fe53abfSAlexander Usyskin }
202*9fe53abfSAlexander Usyskin 
203*9fe53abfSAlexander Usyskin static ssize_t idg_nvm_rewrite_partial(struct intel_dg_nvm *nvm, loff_t to,
204*9fe53abfSAlexander Usyskin 				       loff_t offset, size_t len, const u32 *newdata)
205*9fe53abfSAlexander Usyskin {
206*9fe53abfSAlexander Usyskin 	u32 data = idg_nvm_read32(nvm, to);
207*9fe53abfSAlexander Usyskin 
208*9fe53abfSAlexander Usyskin 	if (idg_nvm_error(nvm))
209*9fe53abfSAlexander Usyskin 		return -EIO;
210*9fe53abfSAlexander Usyskin 
211*9fe53abfSAlexander Usyskin 	memcpy((u8 *)&data + offset, newdata, len);
212*9fe53abfSAlexander Usyskin 
213*9fe53abfSAlexander Usyskin 	idg_nvm_write32(nvm, to, data);
214*9fe53abfSAlexander Usyskin 	if (idg_nvm_error(nvm))
215*9fe53abfSAlexander Usyskin 		return -EIO;
216*9fe53abfSAlexander Usyskin 
217*9fe53abfSAlexander Usyskin 	return len;
218*9fe53abfSAlexander Usyskin }
219*9fe53abfSAlexander Usyskin 
220*9fe53abfSAlexander Usyskin __maybe_unused
221*9fe53abfSAlexander Usyskin static ssize_t idg_write(struct intel_dg_nvm *nvm, u8 region,
222*9fe53abfSAlexander Usyskin 			 loff_t to, size_t len, const unsigned char *buf)
223*9fe53abfSAlexander Usyskin {
224*9fe53abfSAlexander Usyskin 	size_t len_s = len;
225*9fe53abfSAlexander Usyskin 	size_t to_shift;
226*9fe53abfSAlexander Usyskin 	size_t len8;
227*9fe53abfSAlexander Usyskin 	size_t len4;
228*9fe53abfSAlexander Usyskin 	ssize_t ret;
229*9fe53abfSAlexander Usyskin 	size_t to4;
230*9fe53abfSAlexander Usyskin 	size_t i;
231*9fe53abfSAlexander Usyskin 
232*9fe53abfSAlexander Usyskin 	idg_nvm_set_region_id(nvm, region);
233*9fe53abfSAlexander Usyskin 
234*9fe53abfSAlexander Usyskin 	to4 = ALIGN_DOWN(to, sizeof(u32));
235*9fe53abfSAlexander Usyskin 	to_shift = min(sizeof(u32) - ((size_t)to - to4), len);
236*9fe53abfSAlexander Usyskin 	if (to - to4) {
237*9fe53abfSAlexander Usyskin 		ret = idg_nvm_rewrite_partial(nvm, to4, to - to4, to_shift, (u32 *)&buf[0]);
238*9fe53abfSAlexander Usyskin 		if (ret < 0)
239*9fe53abfSAlexander Usyskin 			return ret;
240*9fe53abfSAlexander Usyskin 
241*9fe53abfSAlexander Usyskin 		buf += to_shift;
242*9fe53abfSAlexander Usyskin 		to += to_shift;
243*9fe53abfSAlexander Usyskin 		len_s -= to_shift;
244*9fe53abfSAlexander Usyskin 	}
245*9fe53abfSAlexander Usyskin 
246*9fe53abfSAlexander Usyskin 	len8 = ALIGN_DOWN(len_s, sizeof(u64));
247*9fe53abfSAlexander Usyskin 	for (i = 0; i < len8; i += sizeof(u64)) {
248*9fe53abfSAlexander Usyskin 		u64 data;
249*9fe53abfSAlexander Usyskin 
250*9fe53abfSAlexander Usyskin 		memcpy(&data, &buf[i], sizeof(u64));
251*9fe53abfSAlexander Usyskin 		idg_nvm_write64(nvm, to + i, data);
252*9fe53abfSAlexander Usyskin 		if (idg_nvm_error(nvm))
253*9fe53abfSAlexander Usyskin 			return -EIO;
254*9fe53abfSAlexander Usyskin 	}
255*9fe53abfSAlexander Usyskin 
256*9fe53abfSAlexander Usyskin 	len4 = len_s - len8;
257*9fe53abfSAlexander Usyskin 	if (len4 >= sizeof(u32)) {
258*9fe53abfSAlexander Usyskin 		u32 data;
259*9fe53abfSAlexander Usyskin 
260*9fe53abfSAlexander Usyskin 		memcpy(&data, &buf[i], sizeof(u32));
261*9fe53abfSAlexander Usyskin 		idg_nvm_write32(nvm, to + i, data);
262*9fe53abfSAlexander Usyskin 		if (idg_nvm_error(nvm))
263*9fe53abfSAlexander Usyskin 			return -EIO;
264*9fe53abfSAlexander Usyskin 		i += sizeof(u32);
265*9fe53abfSAlexander Usyskin 		len4 -= sizeof(u32);
266*9fe53abfSAlexander Usyskin 	}
267*9fe53abfSAlexander Usyskin 
268*9fe53abfSAlexander Usyskin 	if (len4 > 0) {
269*9fe53abfSAlexander Usyskin 		ret = idg_nvm_rewrite_partial(nvm, to + i, 0, len4, (u32 *)&buf[i]);
270*9fe53abfSAlexander Usyskin 		if (ret < 0)
271*9fe53abfSAlexander Usyskin 			return ret;
272*9fe53abfSAlexander Usyskin 	}
273*9fe53abfSAlexander Usyskin 
274*9fe53abfSAlexander Usyskin 	return len;
275*9fe53abfSAlexander Usyskin }
276*9fe53abfSAlexander Usyskin 
277*9fe53abfSAlexander Usyskin __maybe_unused
278*9fe53abfSAlexander Usyskin static ssize_t idg_read(struct intel_dg_nvm *nvm, u8 region,
279*9fe53abfSAlexander Usyskin 			loff_t from, size_t len, unsigned char *buf)
280*9fe53abfSAlexander Usyskin {
281*9fe53abfSAlexander Usyskin 	size_t len_s = len;
282*9fe53abfSAlexander Usyskin 	size_t from_shift;
283*9fe53abfSAlexander Usyskin 	size_t from4;
284*9fe53abfSAlexander Usyskin 	size_t len8;
285*9fe53abfSAlexander Usyskin 	size_t len4;
286*9fe53abfSAlexander Usyskin 	size_t i;
287*9fe53abfSAlexander Usyskin 
288*9fe53abfSAlexander Usyskin 	idg_nvm_set_region_id(nvm, region);
289*9fe53abfSAlexander Usyskin 
290*9fe53abfSAlexander Usyskin 	from4 = ALIGN_DOWN(from, sizeof(u32));
291*9fe53abfSAlexander Usyskin 	from_shift = min(sizeof(u32) - ((size_t)from - from4), len);
292*9fe53abfSAlexander Usyskin 
293*9fe53abfSAlexander Usyskin 	if (from - from4) {
294*9fe53abfSAlexander Usyskin 		u32 data = idg_nvm_read32(nvm, from4);
295*9fe53abfSAlexander Usyskin 
296*9fe53abfSAlexander Usyskin 		if (idg_nvm_error(nvm))
297*9fe53abfSAlexander Usyskin 			return -EIO;
298*9fe53abfSAlexander Usyskin 		memcpy(&buf[0], (u8 *)&data + (from - from4), from_shift);
299*9fe53abfSAlexander Usyskin 		len_s -= from_shift;
300*9fe53abfSAlexander Usyskin 		buf += from_shift;
301*9fe53abfSAlexander Usyskin 		from += from_shift;
302*9fe53abfSAlexander Usyskin 	}
303*9fe53abfSAlexander Usyskin 
304*9fe53abfSAlexander Usyskin 	len8 = ALIGN_DOWN(len_s, sizeof(u64));
305*9fe53abfSAlexander Usyskin 	for (i = 0; i < len8; i += sizeof(u64)) {
306*9fe53abfSAlexander Usyskin 		u64 data = idg_nvm_read64(nvm, from + i);
307*9fe53abfSAlexander Usyskin 
308*9fe53abfSAlexander Usyskin 		if (idg_nvm_error(nvm))
309*9fe53abfSAlexander Usyskin 			return -EIO;
310*9fe53abfSAlexander Usyskin 
311*9fe53abfSAlexander Usyskin 		memcpy(&buf[i], &data, sizeof(data));
312*9fe53abfSAlexander Usyskin 	}
313*9fe53abfSAlexander Usyskin 
314*9fe53abfSAlexander Usyskin 	len4 = len_s - len8;
315*9fe53abfSAlexander Usyskin 	if (len4 >= sizeof(u32)) {
316*9fe53abfSAlexander Usyskin 		u32 data = idg_nvm_read32(nvm, from + i);
317*9fe53abfSAlexander Usyskin 
318*9fe53abfSAlexander Usyskin 		if (idg_nvm_error(nvm))
319*9fe53abfSAlexander Usyskin 			return -EIO;
320*9fe53abfSAlexander Usyskin 		memcpy(&buf[i], &data, sizeof(data));
321*9fe53abfSAlexander Usyskin 		i += sizeof(u32);
322*9fe53abfSAlexander Usyskin 		len4 -= sizeof(u32);
323*9fe53abfSAlexander Usyskin 	}
324*9fe53abfSAlexander Usyskin 
325*9fe53abfSAlexander Usyskin 	if (len4 > 0) {
326*9fe53abfSAlexander Usyskin 		u32 data = idg_nvm_read32(nvm, from + i);
327*9fe53abfSAlexander Usyskin 
328*9fe53abfSAlexander Usyskin 		if (idg_nvm_error(nvm))
329*9fe53abfSAlexander Usyskin 			return -EIO;
330*9fe53abfSAlexander Usyskin 		memcpy(&buf[i], &data, len4);
331*9fe53abfSAlexander Usyskin 	}
332*9fe53abfSAlexander Usyskin 
333*9fe53abfSAlexander Usyskin 	return len;
334*9fe53abfSAlexander Usyskin }
335*9fe53abfSAlexander Usyskin 
336*9fe53abfSAlexander Usyskin __maybe_unused
337*9fe53abfSAlexander Usyskin static ssize_t
338*9fe53abfSAlexander Usyskin idg_erase(struct intel_dg_nvm *nvm, u8 region, loff_t from, u64 len, u64 *fail_addr)
339*9fe53abfSAlexander Usyskin {
340*9fe53abfSAlexander Usyskin 	void __iomem *base = nvm->base;
341*9fe53abfSAlexander Usyskin 	const u32 block = 0x10;
342*9fe53abfSAlexander Usyskin 	u64 i;
343*9fe53abfSAlexander Usyskin 
344*9fe53abfSAlexander Usyskin 	for (i = 0; i < len; i += SZ_4K) {
345*9fe53abfSAlexander Usyskin 		iowrite32(from + i, base + NVM_ADDRESS_REG);
346*9fe53abfSAlexander Usyskin 		iowrite32(region << 24 | block, base + NVM_ERASE_REG);
347*9fe53abfSAlexander Usyskin 		/* Since the writes are via sgunit
348*9fe53abfSAlexander Usyskin 		 * we cannot do back to back erases.
349*9fe53abfSAlexander Usyskin 		 */
350*9fe53abfSAlexander Usyskin 		msleep(50);
351*9fe53abfSAlexander Usyskin 	}
352*9fe53abfSAlexander Usyskin 	return len;
353*9fe53abfSAlexander Usyskin }
354*9fe53abfSAlexander Usyskin 
3557234b321SAlexander Usyskin static int intel_dg_nvm_init(struct intel_dg_nvm *nvm, struct device *device)
3567234b321SAlexander Usyskin {
3577234b321SAlexander Usyskin 	u32 access_map = 0;
3587234b321SAlexander Usyskin 	unsigned int i, n;
3597234b321SAlexander Usyskin 	int ret;
3607234b321SAlexander Usyskin 
3617234b321SAlexander Usyskin 	/* clean error register, previous errors are ignored */
3627234b321SAlexander Usyskin 	idg_nvm_error(nvm);
3637234b321SAlexander Usyskin 
3647234b321SAlexander Usyskin 	ret = idg_nvm_is_valid(nvm);
3657234b321SAlexander Usyskin 	if (ret) {
3667234b321SAlexander Usyskin 		dev_err(device, "The MEM is not valid %d\n", ret);
3677234b321SAlexander Usyskin 		return ret;
3687234b321SAlexander Usyskin 	}
3697234b321SAlexander Usyskin 
3707234b321SAlexander Usyskin 	if (idg_nvm_get_access_map(nvm, &access_map))
3717234b321SAlexander Usyskin 		return -EIO;
3727234b321SAlexander Usyskin 
3737234b321SAlexander Usyskin 	for (i = 0, n = 0; i < nvm->nregions; i++) {
3747234b321SAlexander Usyskin 		u32 address, base, limit, region;
3757234b321SAlexander Usyskin 		u8 id = nvm->regions[i].id;
3767234b321SAlexander Usyskin 
3777234b321SAlexander Usyskin 		address = NVM_FLREG(id);
3787234b321SAlexander Usyskin 		region = idg_nvm_read32(nvm, address);
3797234b321SAlexander Usyskin 
3807234b321SAlexander Usyskin 		base = FIELD_GET(NVM_FREG_BASE_MASK, region) << NVM_FREG_ADDR_SHIFT;
3817234b321SAlexander Usyskin 		limit = (FIELD_GET(NVM_FREG_ADDR_MASK, region) << NVM_FREG_ADDR_SHIFT) |
3827234b321SAlexander Usyskin 			NVM_FREG_MIN_REGION_SIZE;
3837234b321SAlexander Usyskin 
3847234b321SAlexander Usyskin 		dev_dbg(device, "[%d] %s: region: 0x%08X base: 0x%08x limit: 0x%08x\n",
3857234b321SAlexander Usyskin 			id, nvm->regions[i].name, region, base, limit);
3867234b321SAlexander Usyskin 
3877234b321SAlexander Usyskin 		if (base >= limit || (i > 0 && limit == 0)) {
3887234b321SAlexander Usyskin 			dev_dbg(device, "[%d] %s: disabled\n",
3897234b321SAlexander Usyskin 				id, nvm->regions[i].name);
3907234b321SAlexander Usyskin 			nvm->regions[i].is_readable = 0;
3917234b321SAlexander Usyskin 			continue;
3927234b321SAlexander Usyskin 		}
3937234b321SAlexander Usyskin 
3947234b321SAlexander Usyskin 		if (nvm->size < limit)
3957234b321SAlexander Usyskin 			nvm->size = limit;
3967234b321SAlexander Usyskin 
3977234b321SAlexander Usyskin 		nvm->regions[i].offset = base;
3987234b321SAlexander Usyskin 		nvm->regions[i].size = limit - base + 1;
3997234b321SAlexander Usyskin 		/* No write access to descriptor; mask it out*/
4007234b321SAlexander Usyskin 		nvm->regions[i].is_writable = idg_nvm_region_writable(access_map, id);
4017234b321SAlexander Usyskin 
4027234b321SAlexander Usyskin 		nvm->regions[i].is_readable = idg_nvm_region_readable(access_map, id);
4037234b321SAlexander Usyskin 		dev_dbg(device, "Registered, %s id=%d offset=%lld size=%lld rd=%d wr=%d\n",
4047234b321SAlexander Usyskin 			nvm->regions[i].name,
4057234b321SAlexander Usyskin 			nvm->regions[i].id,
4067234b321SAlexander Usyskin 			nvm->regions[i].offset,
4077234b321SAlexander Usyskin 			nvm->regions[i].size,
4087234b321SAlexander Usyskin 			nvm->regions[i].is_readable,
4097234b321SAlexander Usyskin 			nvm->regions[i].is_writable);
4107234b321SAlexander Usyskin 
4117234b321SAlexander Usyskin 		if (nvm->regions[i].is_readable)
4127234b321SAlexander Usyskin 			n++;
4137234b321SAlexander Usyskin 	}
4147234b321SAlexander Usyskin 
4157234b321SAlexander Usyskin 	dev_dbg(device, "Registered %d regions\n", n);
4167234b321SAlexander Usyskin 
4177234b321SAlexander Usyskin 	/* Need to add 1 to the amount of memory
4187234b321SAlexander Usyskin 	 * so it is reported as an even block
4197234b321SAlexander Usyskin 	 */
4207234b321SAlexander Usyskin 	nvm->size += 1;
4217234b321SAlexander Usyskin 
4227234b321SAlexander Usyskin 	return n;
4237234b321SAlexander Usyskin }
4247234b321SAlexander Usyskin 
425ceb5ab3cSAlexander Usyskin static void intel_dg_nvm_release(struct kref *kref)
426ceb5ab3cSAlexander Usyskin {
427ceb5ab3cSAlexander Usyskin 	struct intel_dg_nvm *nvm = container_of(kref, struct intel_dg_nvm, refcnt);
428ceb5ab3cSAlexander Usyskin 	int i;
429ceb5ab3cSAlexander Usyskin 
430ceb5ab3cSAlexander Usyskin 	pr_debug("freeing intel_dg nvm\n");
431ceb5ab3cSAlexander Usyskin 	for (i = 0; i < nvm->nregions; i++)
432ceb5ab3cSAlexander Usyskin 		kfree(nvm->regions[i].name);
433ceb5ab3cSAlexander Usyskin 	kfree(nvm);
434ceb5ab3cSAlexander Usyskin }
435ceb5ab3cSAlexander Usyskin 
436ceb5ab3cSAlexander Usyskin static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
437ceb5ab3cSAlexander Usyskin 			      const struct auxiliary_device_id *aux_dev_id)
438ceb5ab3cSAlexander Usyskin {
439ceb5ab3cSAlexander Usyskin 	struct intel_dg_nvm_dev *invm = auxiliary_dev_to_intel_dg_nvm_dev(aux_dev);
440ceb5ab3cSAlexander Usyskin 	struct intel_dg_nvm *nvm;
441ceb5ab3cSAlexander Usyskin 	struct device *device;
442ceb5ab3cSAlexander Usyskin 	unsigned int nregions;
443ceb5ab3cSAlexander Usyskin 	unsigned int i, n;
444ceb5ab3cSAlexander Usyskin 	int ret;
445ceb5ab3cSAlexander Usyskin 
446ceb5ab3cSAlexander Usyskin 	device = &aux_dev->dev;
447ceb5ab3cSAlexander Usyskin 
448ceb5ab3cSAlexander Usyskin 	/* count available regions */
449ceb5ab3cSAlexander Usyskin 	for (nregions = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) {
450ceb5ab3cSAlexander Usyskin 		if (invm->regions[i].name)
451ceb5ab3cSAlexander Usyskin 			nregions++;
452ceb5ab3cSAlexander Usyskin 	}
453ceb5ab3cSAlexander Usyskin 
454ceb5ab3cSAlexander Usyskin 	if (!nregions) {
455ceb5ab3cSAlexander Usyskin 		dev_err(device, "no regions defined\n");
456ceb5ab3cSAlexander Usyskin 		return -ENODEV;
457ceb5ab3cSAlexander Usyskin 	}
458ceb5ab3cSAlexander Usyskin 
459ceb5ab3cSAlexander Usyskin 	nvm = kzalloc(struct_size(nvm, regions, nregions), GFP_KERNEL);
460ceb5ab3cSAlexander Usyskin 	if (!nvm)
461ceb5ab3cSAlexander Usyskin 		return -ENOMEM;
462ceb5ab3cSAlexander Usyskin 
463ceb5ab3cSAlexander Usyskin 	kref_init(&nvm->refcnt);
464ceb5ab3cSAlexander Usyskin 
465ceb5ab3cSAlexander Usyskin 	for (n = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) {
466ceb5ab3cSAlexander Usyskin 		if (!invm->regions[i].name)
467ceb5ab3cSAlexander Usyskin 			continue;
468ceb5ab3cSAlexander Usyskin 
469ceb5ab3cSAlexander Usyskin 		char *name = kasprintf(GFP_KERNEL, "%s.%s",
470ceb5ab3cSAlexander Usyskin 				       dev_name(&aux_dev->dev), invm->regions[i].name);
471ceb5ab3cSAlexander Usyskin 		if (!name)
472ceb5ab3cSAlexander Usyskin 			continue;
473ceb5ab3cSAlexander Usyskin 		nvm->regions[n].name = name;
474ceb5ab3cSAlexander Usyskin 		nvm->regions[n].id = i;
475ceb5ab3cSAlexander Usyskin 		n++;
476ceb5ab3cSAlexander Usyskin 	}
477ceb5ab3cSAlexander Usyskin 	nvm->nregions = n; /* in case where kasprintf fail */
478ceb5ab3cSAlexander Usyskin 
479ceb5ab3cSAlexander Usyskin 	nvm->base = devm_ioremap_resource(device, &invm->bar);
480ceb5ab3cSAlexander Usyskin 	if (IS_ERR(nvm->base)) {
481ceb5ab3cSAlexander Usyskin 		ret = PTR_ERR(nvm->base);
482ceb5ab3cSAlexander Usyskin 		goto err;
483ceb5ab3cSAlexander Usyskin 	}
484ceb5ab3cSAlexander Usyskin 
4857234b321SAlexander Usyskin 	ret = intel_dg_nvm_init(nvm, device);
4867234b321SAlexander Usyskin 	if (ret < 0) {
4877234b321SAlexander Usyskin 		dev_err(device, "cannot initialize nvm %d\n", ret);
4887234b321SAlexander Usyskin 		goto err;
4897234b321SAlexander Usyskin 	}
4907234b321SAlexander Usyskin 
491ceb5ab3cSAlexander Usyskin 	dev_set_drvdata(&aux_dev->dev, nvm);
492ceb5ab3cSAlexander Usyskin 
493ceb5ab3cSAlexander Usyskin 	return 0;
494ceb5ab3cSAlexander Usyskin 
495ceb5ab3cSAlexander Usyskin err:
496ceb5ab3cSAlexander Usyskin 	kref_put(&nvm->refcnt, intel_dg_nvm_release);
497ceb5ab3cSAlexander Usyskin 	return ret;
498ceb5ab3cSAlexander Usyskin }
499ceb5ab3cSAlexander Usyskin 
500ceb5ab3cSAlexander Usyskin static void intel_dg_mtd_remove(struct auxiliary_device *aux_dev)
501ceb5ab3cSAlexander Usyskin {
502ceb5ab3cSAlexander Usyskin 	struct intel_dg_nvm *nvm = dev_get_drvdata(&aux_dev->dev);
503ceb5ab3cSAlexander Usyskin 
504ceb5ab3cSAlexander Usyskin 	if (!nvm)
505ceb5ab3cSAlexander Usyskin 		return;
506ceb5ab3cSAlexander Usyskin 
507ceb5ab3cSAlexander Usyskin 	dev_set_drvdata(&aux_dev->dev, NULL);
508ceb5ab3cSAlexander Usyskin 
509ceb5ab3cSAlexander Usyskin 	kref_put(&nvm->refcnt, intel_dg_nvm_release);
510ceb5ab3cSAlexander Usyskin }
511ceb5ab3cSAlexander Usyskin 
512ceb5ab3cSAlexander Usyskin static const struct auxiliary_device_id intel_dg_mtd_id_table[] = {
513ceb5ab3cSAlexander Usyskin 	{
514ceb5ab3cSAlexander Usyskin 		.name = "i915.nvm",
515ceb5ab3cSAlexander Usyskin 	},
516ceb5ab3cSAlexander Usyskin 	{
517ceb5ab3cSAlexander Usyskin 		.name = "xe.nvm",
518ceb5ab3cSAlexander Usyskin 	},
519ceb5ab3cSAlexander Usyskin 	{
520ceb5ab3cSAlexander Usyskin 		/* sentinel */
521ceb5ab3cSAlexander Usyskin 	}
522ceb5ab3cSAlexander Usyskin };
523ceb5ab3cSAlexander Usyskin MODULE_DEVICE_TABLE(auxiliary, intel_dg_mtd_id_table);
524ceb5ab3cSAlexander Usyskin 
525ceb5ab3cSAlexander Usyskin static struct auxiliary_driver intel_dg_mtd_driver = {
526ceb5ab3cSAlexander Usyskin 	.probe  = intel_dg_mtd_probe,
527ceb5ab3cSAlexander Usyskin 	.remove = intel_dg_mtd_remove,
528ceb5ab3cSAlexander Usyskin 	.driver = {
529ceb5ab3cSAlexander Usyskin 		/* auxiliary_driver_register() sets .name to be the modname */
530ceb5ab3cSAlexander Usyskin 	},
531ceb5ab3cSAlexander Usyskin 	.id_table = intel_dg_mtd_id_table
532ceb5ab3cSAlexander Usyskin };
533ceb5ab3cSAlexander Usyskin module_auxiliary_driver(intel_dg_mtd_driver);
534ceb5ab3cSAlexander Usyskin 
535ceb5ab3cSAlexander Usyskin MODULE_LICENSE("GPL");
536ceb5ab3cSAlexander Usyskin MODULE_AUTHOR("Intel Corporation");
537ceb5ab3cSAlexander Usyskin MODULE_DESCRIPTION("Intel DGFX MTD driver");
538