xref: /linux/drivers/mtd/devices/mtd_intel_dg.c (revision 4b93f5fc3bdff9e89beb7aa7f39ca8c7e4d02924)
1ceb5ab3cSAlexander Usyskin // SPDX-License-Identifier: GPL-2.0
2ceb5ab3cSAlexander Usyskin /*
3ceb5ab3cSAlexander Usyskin  * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
4ceb5ab3cSAlexander Usyskin  */
5ceb5ab3cSAlexander Usyskin 
67234b321SAlexander Usyskin #include <linux/bitfield.h>
77234b321SAlexander Usyskin #include <linux/bits.h>
8044c8be0SAlexander Usyskin #include <linux/cleanup.h>
99fe53abfSAlexander Usyskin #include <linux/delay.h>
10ceb5ab3cSAlexander Usyskin #include <linux/device.h>
11ceb5ab3cSAlexander Usyskin #include <linux/intel_dg_nvm_aux.h>
12ceb5ab3cSAlexander Usyskin #include <linux/io.h>
139fe53abfSAlexander Usyskin #include <linux/io-64-nonatomic-lo-hi.h>
14ceb5ab3cSAlexander Usyskin #include <linux/kernel.h>
15ceb5ab3cSAlexander Usyskin #include <linux/module.h>
16044c8be0SAlexander Usyskin #include <linux/mtd/mtd.h>
17044c8be0SAlexander Usyskin #include <linux/mtd/partitions.h>
18ceb5ab3cSAlexander Usyskin #include <linux/string.h>
19ceb5ab3cSAlexander Usyskin #include <linux/slab.h>
209fe53abfSAlexander Usyskin #include <linux/sizes.h>
21ceb5ab3cSAlexander Usyskin #include <linux/types.h>
22ceb5ab3cSAlexander Usyskin 
23ceb5ab3cSAlexander Usyskin struct intel_dg_nvm {
24ceb5ab3cSAlexander Usyskin 	struct kref refcnt;
25044c8be0SAlexander Usyskin 	struct mtd_info mtd;
26044c8be0SAlexander Usyskin 	struct mutex lock; /* region access lock */
27ceb5ab3cSAlexander Usyskin 	void __iomem *base;
28ceb5ab3cSAlexander Usyskin 	size_t size;
29ceb5ab3cSAlexander Usyskin 	unsigned int nregions;
30ceb5ab3cSAlexander Usyskin 	struct {
31ceb5ab3cSAlexander Usyskin 		const char *name;
32ceb5ab3cSAlexander Usyskin 		u8 id;
33ceb5ab3cSAlexander Usyskin 		u64 offset;
34ceb5ab3cSAlexander Usyskin 		u64 size;
357234b321SAlexander Usyskin 		unsigned int is_readable:1;
367234b321SAlexander Usyskin 		unsigned int is_writable:1;
37ceb5ab3cSAlexander Usyskin 	} regions[] __counted_by(nregions);
38ceb5ab3cSAlexander Usyskin };
39ceb5ab3cSAlexander Usyskin 
407234b321SAlexander Usyskin #define NVM_TRIGGER_REG       0x00000000
417234b321SAlexander Usyskin #define NVM_VALSIG_REG        0x00000010
427234b321SAlexander Usyskin #define NVM_ADDRESS_REG       0x00000040
437234b321SAlexander Usyskin #define NVM_REGION_ID_REG     0x00000044
447234b321SAlexander Usyskin /*
457234b321SAlexander Usyskin  * [15:0]-Erase size = 0x0010 4K 0x0080 32K 0x0100 64K
467234b321SAlexander Usyskin  * [23:16]-Reserved
477234b321SAlexander Usyskin  * [31:24]-Erase MEM RegionID
487234b321SAlexander Usyskin  */
497234b321SAlexander Usyskin #define NVM_ERASE_REG         0x00000048
507234b321SAlexander Usyskin #define NVM_ACCESS_ERROR_REG  0x00000070
517234b321SAlexander Usyskin #define NVM_ADDRESS_ERROR_REG 0x00000074
527234b321SAlexander Usyskin 
537234b321SAlexander Usyskin /* Flash Valid Signature */
547234b321SAlexander Usyskin #define NVM_FLVALSIG          0x0FF0A55A
557234b321SAlexander Usyskin 
567234b321SAlexander Usyskin #define NVM_MAP_ADDR_MASK     GENMASK(7, 0)
577234b321SAlexander Usyskin #define NVM_MAP_ADDR_SHIFT    0x00000004
587234b321SAlexander Usyskin 
597234b321SAlexander Usyskin #define NVM_REGION_ID_DESCRIPTOR  0
607234b321SAlexander Usyskin /* Flash Region Base Address */
617234b321SAlexander Usyskin #define NVM_FRBA      0x40
627234b321SAlexander Usyskin /* Flash Region __n - Flash Descriptor Record */
637234b321SAlexander Usyskin #define NVM_FLREG(__n) (NVM_FRBA + ((__n) * 4))
647234b321SAlexander Usyskin /*  Flash Map 1 Register */
657234b321SAlexander Usyskin #define NVM_FLMAP1_REG  0x18
667234b321SAlexander Usyskin #define NVM_FLMSTR4_OFFSET 0x00C
677234b321SAlexander Usyskin 
687234b321SAlexander Usyskin #define NVM_ACCESS_ERROR_PCIE_MASK 0x7
697234b321SAlexander Usyskin 
707234b321SAlexander Usyskin #define NVM_FREG_BASE_MASK GENMASK(15, 0)
717234b321SAlexander Usyskin #define NVM_FREG_ADDR_MASK GENMASK(31, 16)
727234b321SAlexander Usyskin #define NVM_FREG_ADDR_SHIFT 12
737234b321SAlexander Usyskin #define NVM_FREG_MIN_REGION_SIZE 0xFFF
747234b321SAlexander Usyskin 
757234b321SAlexander Usyskin static inline void idg_nvm_set_region_id(struct intel_dg_nvm *nvm, u8 region)
767234b321SAlexander Usyskin {
777234b321SAlexander Usyskin 	iowrite32((u32)region, nvm->base + NVM_REGION_ID_REG);
787234b321SAlexander Usyskin }
797234b321SAlexander Usyskin 
807234b321SAlexander Usyskin static inline u32 idg_nvm_error(struct intel_dg_nvm *nvm)
817234b321SAlexander Usyskin {
827234b321SAlexander Usyskin 	void __iomem *base = nvm->base;
837234b321SAlexander Usyskin 
847234b321SAlexander Usyskin 	u32 reg = ioread32(base + NVM_ACCESS_ERROR_REG) & NVM_ACCESS_ERROR_PCIE_MASK;
857234b321SAlexander Usyskin 
867234b321SAlexander Usyskin 	/* reset error bits */
877234b321SAlexander Usyskin 	if (reg)
887234b321SAlexander Usyskin 		iowrite32(reg, base + NVM_ACCESS_ERROR_REG);
897234b321SAlexander Usyskin 
907234b321SAlexander Usyskin 	return reg;
917234b321SAlexander Usyskin }
927234b321SAlexander Usyskin 
937234b321SAlexander Usyskin static inline u32 idg_nvm_read32(struct intel_dg_nvm *nvm, u32 address)
947234b321SAlexander Usyskin {
957234b321SAlexander Usyskin 	void __iomem *base = nvm->base;
967234b321SAlexander Usyskin 
977234b321SAlexander Usyskin 	iowrite32(address, base + NVM_ADDRESS_REG);
987234b321SAlexander Usyskin 
997234b321SAlexander Usyskin 	return ioread32(base + NVM_TRIGGER_REG);
1007234b321SAlexander Usyskin }
1017234b321SAlexander Usyskin 
1029fe53abfSAlexander Usyskin static inline u64 idg_nvm_read64(struct intel_dg_nvm *nvm, u32 address)
1039fe53abfSAlexander Usyskin {
1049fe53abfSAlexander Usyskin 	void __iomem *base = nvm->base;
1059fe53abfSAlexander Usyskin 
1069fe53abfSAlexander Usyskin 	iowrite32(address, base + NVM_ADDRESS_REG);
1079fe53abfSAlexander Usyskin 
1089fe53abfSAlexander Usyskin 	return readq(base + NVM_TRIGGER_REG);
1099fe53abfSAlexander Usyskin }
1109fe53abfSAlexander Usyskin 
1119fe53abfSAlexander Usyskin static void idg_nvm_write32(struct intel_dg_nvm *nvm, u32 address, u32 data)
1129fe53abfSAlexander Usyskin {
1139fe53abfSAlexander Usyskin 	void __iomem *base = nvm->base;
1149fe53abfSAlexander Usyskin 
1159fe53abfSAlexander Usyskin 	iowrite32(address, base + NVM_ADDRESS_REG);
1169fe53abfSAlexander Usyskin 
1179fe53abfSAlexander Usyskin 	iowrite32(data, base + NVM_TRIGGER_REG);
1189fe53abfSAlexander Usyskin }
1199fe53abfSAlexander Usyskin 
1209fe53abfSAlexander Usyskin static void idg_nvm_write64(struct intel_dg_nvm *nvm, u32 address, u64 data)
1219fe53abfSAlexander Usyskin {
1229fe53abfSAlexander Usyskin 	void __iomem *base = nvm->base;
1239fe53abfSAlexander Usyskin 
1249fe53abfSAlexander Usyskin 	iowrite32(address, base + NVM_ADDRESS_REG);
1259fe53abfSAlexander Usyskin 
1269fe53abfSAlexander Usyskin 	writeq(data, base + NVM_TRIGGER_REG);
1279fe53abfSAlexander Usyskin }
1289fe53abfSAlexander Usyskin 
1297234b321SAlexander Usyskin static int idg_nvm_get_access_map(struct intel_dg_nvm *nvm, u32 *access_map)
1307234b321SAlexander Usyskin {
1317234b321SAlexander Usyskin 	u32 fmstr4_addr;
1327234b321SAlexander Usyskin 	u32 fmstr4;
1337234b321SAlexander Usyskin 	u32 flmap1;
1347234b321SAlexander Usyskin 	u32 fmba;
1357234b321SAlexander Usyskin 
1367234b321SAlexander Usyskin 	idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR);
1377234b321SAlexander Usyskin 
1387234b321SAlexander Usyskin 	flmap1 = idg_nvm_read32(nvm, NVM_FLMAP1_REG);
1397234b321SAlexander Usyskin 	if (idg_nvm_error(nvm))
1407234b321SAlexander Usyskin 		return -EIO;
1417234b321SAlexander Usyskin 	/* Get Flash Master Baser Address (FMBA) */
1427234b321SAlexander Usyskin 	fmba = (FIELD_GET(NVM_MAP_ADDR_MASK, flmap1) << NVM_MAP_ADDR_SHIFT);
1437234b321SAlexander Usyskin 	fmstr4_addr = fmba + NVM_FLMSTR4_OFFSET;
1447234b321SAlexander Usyskin 
1457234b321SAlexander Usyskin 	fmstr4 = idg_nvm_read32(nvm, fmstr4_addr);
1467234b321SAlexander Usyskin 	if (idg_nvm_error(nvm))
1477234b321SAlexander Usyskin 		return -EIO;
1487234b321SAlexander Usyskin 
1497234b321SAlexander Usyskin 	*access_map = fmstr4;
1507234b321SAlexander Usyskin 	return 0;
1517234b321SAlexander Usyskin }
1527234b321SAlexander Usyskin 
1537234b321SAlexander Usyskin /*
1547234b321SAlexander Usyskin  * Region read/write access encoded in the access map
1557234b321SAlexander Usyskin  * in the following order from the lower bit:
1567234b321SAlexander Usyskin  * [3:0] regions 12-15 read state
1577234b321SAlexander Usyskin  * [7:4] regions 12-15 write state
1587234b321SAlexander Usyskin  * [19:8] regions 0-11 read state
1597234b321SAlexander Usyskin  * [31:20] regions 0-11 write state
1607234b321SAlexander Usyskin  */
1617234b321SAlexander Usyskin static bool idg_nvm_region_readable(u32 access_map, u8 region)
1627234b321SAlexander Usyskin {
1637234b321SAlexander Usyskin 	if (region < 12)
1647234b321SAlexander Usyskin 		return access_map & BIT(region + 8); /* [19:8] */
1657234b321SAlexander Usyskin 	else
1667234b321SAlexander Usyskin 		return access_map & BIT(region - 12); /* [3:0] */
1677234b321SAlexander Usyskin }
1687234b321SAlexander Usyskin 
1697234b321SAlexander Usyskin static bool idg_nvm_region_writable(u32 access_map, u8 region)
1707234b321SAlexander Usyskin {
1717234b321SAlexander Usyskin 	if (region < 12)
1727234b321SAlexander Usyskin 		return access_map & BIT(region + 20); /* [31:20] */
1737234b321SAlexander Usyskin 	else
1747234b321SAlexander Usyskin 		return access_map & BIT(region - 8); /* [7:4] */
1757234b321SAlexander Usyskin }
1767234b321SAlexander Usyskin 
1777234b321SAlexander Usyskin static int idg_nvm_is_valid(struct intel_dg_nvm *nvm)
1787234b321SAlexander Usyskin {
1797234b321SAlexander Usyskin 	u32 is_valid;
1807234b321SAlexander Usyskin 
1817234b321SAlexander Usyskin 	idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR);
1827234b321SAlexander Usyskin 
1837234b321SAlexander Usyskin 	is_valid = idg_nvm_read32(nvm, NVM_VALSIG_REG);
1847234b321SAlexander Usyskin 	if (idg_nvm_error(nvm))
1857234b321SAlexander Usyskin 		return -EIO;
1867234b321SAlexander Usyskin 
1877234b321SAlexander Usyskin 	if (is_valid != NVM_FLVALSIG)
1887234b321SAlexander Usyskin 		return -ENODEV;
1897234b321SAlexander Usyskin 
1907234b321SAlexander Usyskin 	return 0;
1917234b321SAlexander Usyskin }
1927234b321SAlexander Usyskin 
1939fe53abfSAlexander Usyskin static unsigned int idg_nvm_get_region(const struct intel_dg_nvm *nvm, loff_t from)
1949fe53abfSAlexander Usyskin {
1959fe53abfSAlexander Usyskin 	unsigned int i;
1969fe53abfSAlexander Usyskin 
1979fe53abfSAlexander Usyskin 	for (i = 0; i < nvm->nregions; i++) {
1989fe53abfSAlexander Usyskin 		if ((nvm->regions[i].offset + nvm->regions[i].size - 1) >= from &&
1999fe53abfSAlexander Usyskin 		    nvm->regions[i].offset <= from &&
2009fe53abfSAlexander Usyskin 		    nvm->regions[i].size != 0)
2019fe53abfSAlexander Usyskin 			break;
2029fe53abfSAlexander Usyskin 	}
2039fe53abfSAlexander Usyskin 
2049fe53abfSAlexander Usyskin 	return i;
2059fe53abfSAlexander Usyskin }
2069fe53abfSAlexander Usyskin 
2079fe53abfSAlexander Usyskin static ssize_t idg_nvm_rewrite_partial(struct intel_dg_nvm *nvm, loff_t to,
2089fe53abfSAlexander Usyskin 				       loff_t offset, size_t len, const u32 *newdata)
2099fe53abfSAlexander Usyskin {
2109fe53abfSAlexander Usyskin 	u32 data = idg_nvm_read32(nvm, to);
2119fe53abfSAlexander Usyskin 
2129fe53abfSAlexander Usyskin 	if (idg_nvm_error(nvm))
2139fe53abfSAlexander Usyskin 		return -EIO;
2149fe53abfSAlexander Usyskin 
2159fe53abfSAlexander Usyskin 	memcpy((u8 *)&data + offset, newdata, len);
2169fe53abfSAlexander Usyskin 
2179fe53abfSAlexander Usyskin 	idg_nvm_write32(nvm, to, data);
2189fe53abfSAlexander Usyskin 	if (idg_nvm_error(nvm))
2199fe53abfSAlexander Usyskin 		return -EIO;
2209fe53abfSAlexander Usyskin 
2219fe53abfSAlexander Usyskin 	return len;
2229fe53abfSAlexander Usyskin }
2239fe53abfSAlexander Usyskin 
2249fe53abfSAlexander Usyskin static ssize_t idg_write(struct intel_dg_nvm *nvm, u8 region,
2259fe53abfSAlexander Usyskin 			 loff_t to, size_t len, const unsigned char *buf)
2269fe53abfSAlexander Usyskin {
2279fe53abfSAlexander Usyskin 	size_t len_s = len;
2289fe53abfSAlexander Usyskin 	size_t to_shift;
2299fe53abfSAlexander Usyskin 	size_t len8;
2309fe53abfSAlexander Usyskin 	size_t len4;
2319fe53abfSAlexander Usyskin 	ssize_t ret;
2329fe53abfSAlexander Usyskin 	size_t to4;
2339fe53abfSAlexander Usyskin 	size_t i;
2349fe53abfSAlexander Usyskin 
2359fe53abfSAlexander Usyskin 	idg_nvm_set_region_id(nvm, region);
2369fe53abfSAlexander Usyskin 
2379fe53abfSAlexander Usyskin 	to4 = ALIGN_DOWN(to, sizeof(u32));
2389fe53abfSAlexander Usyskin 	to_shift = min(sizeof(u32) - ((size_t)to - to4), len);
2399fe53abfSAlexander Usyskin 	if (to - to4) {
2409fe53abfSAlexander Usyskin 		ret = idg_nvm_rewrite_partial(nvm, to4, to - to4, to_shift, (u32 *)&buf[0]);
2419fe53abfSAlexander Usyskin 		if (ret < 0)
2429fe53abfSAlexander Usyskin 			return ret;
2439fe53abfSAlexander Usyskin 
2449fe53abfSAlexander Usyskin 		buf += to_shift;
2459fe53abfSAlexander Usyskin 		to += to_shift;
2469fe53abfSAlexander Usyskin 		len_s -= to_shift;
2479fe53abfSAlexander Usyskin 	}
2489fe53abfSAlexander Usyskin 
249*4b93f5fcSAlexander Usyskin 	if (!IS_ALIGNED(to, sizeof(u64)) &&
250*4b93f5fcSAlexander Usyskin 	    ((to ^ (to + len_s)) & GENMASK(31, 10))) {
251*4b93f5fcSAlexander Usyskin 		/*
252*4b93f5fcSAlexander Usyskin 		 * Workaround reads/writes across 1k-aligned addresses
253*4b93f5fcSAlexander Usyskin 		 * (start u32 before 1k, end u32 after)
254*4b93f5fcSAlexander Usyskin 		 * as this fails on hardware.
255*4b93f5fcSAlexander Usyskin 		 */
256*4b93f5fcSAlexander Usyskin 		u32 data;
257*4b93f5fcSAlexander Usyskin 
258*4b93f5fcSAlexander Usyskin 		memcpy(&data, &buf[0], sizeof(u32));
259*4b93f5fcSAlexander Usyskin 		idg_nvm_write32(nvm, to, data);
260*4b93f5fcSAlexander Usyskin 		if (idg_nvm_error(nvm))
261*4b93f5fcSAlexander Usyskin 			return -EIO;
262*4b93f5fcSAlexander Usyskin 		buf += sizeof(u32);
263*4b93f5fcSAlexander Usyskin 		to += sizeof(u32);
264*4b93f5fcSAlexander Usyskin 		len_s -= sizeof(u32);
265*4b93f5fcSAlexander Usyskin 	}
266*4b93f5fcSAlexander Usyskin 
2679fe53abfSAlexander Usyskin 	len8 = ALIGN_DOWN(len_s, sizeof(u64));
2689fe53abfSAlexander Usyskin 	for (i = 0; i < len8; i += sizeof(u64)) {
2699fe53abfSAlexander Usyskin 		u64 data;
2709fe53abfSAlexander Usyskin 
2719fe53abfSAlexander Usyskin 		memcpy(&data, &buf[i], sizeof(u64));
2729fe53abfSAlexander Usyskin 		idg_nvm_write64(nvm, to + i, data);
2739fe53abfSAlexander Usyskin 		if (idg_nvm_error(nvm))
2749fe53abfSAlexander Usyskin 			return -EIO;
2759fe53abfSAlexander Usyskin 	}
2769fe53abfSAlexander Usyskin 
2779fe53abfSAlexander Usyskin 	len4 = len_s - len8;
2789fe53abfSAlexander Usyskin 	if (len4 >= sizeof(u32)) {
2799fe53abfSAlexander Usyskin 		u32 data;
2809fe53abfSAlexander Usyskin 
2819fe53abfSAlexander Usyskin 		memcpy(&data, &buf[i], sizeof(u32));
2829fe53abfSAlexander Usyskin 		idg_nvm_write32(nvm, to + i, data);
2839fe53abfSAlexander Usyskin 		if (idg_nvm_error(nvm))
2849fe53abfSAlexander Usyskin 			return -EIO;
2859fe53abfSAlexander Usyskin 		i += sizeof(u32);
2869fe53abfSAlexander Usyskin 		len4 -= sizeof(u32);
2879fe53abfSAlexander Usyskin 	}
2889fe53abfSAlexander Usyskin 
2899fe53abfSAlexander Usyskin 	if (len4 > 0) {
2909fe53abfSAlexander Usyskin 		ret = idg_nvm_rewrite_partial(nvm, to + i, 0, len4, (u32 *)&buf[i]);
2919fe53abfSAlexander Usyskin 		if (ret < 0)
2929fe53abfSAlexander Usyskin 			return ret;
2939fe53abfSAlexander Usyskin 	}
2949fe53abfSAlexander Usyskin 
2959fe53abfSAlexander Usyskin 	return len;
2969fe53abfSAlexander Usyskin }
2979fe53abfSAlexander Usyskin 
2989fe53abfSAlexander Usyskin static ssize_t idg_read(struct intel_dg_nvm *nvm, u8 region,
2999fe53abfSAlexander Usyskin 			loff_t from, size_t len, unsigned char *buf)
3009fe53abfSAlexander Usyskin {
3019fe53abfSAlexander Usyskin 	size_t len_s = len;
3029fe53abfSAlexander Usyskin 	size_t from_shift;
3039fe53abfSAlexander Usyskin 	size_t from4;
3049fe53abfSAlexander Usyskin 	size_t len8;
3059fe53abfSAlexander Usyskin 	size_t len4;
3069fe53abfSAlexander Usyskin 	size_t i;
3079fe53abfSAlexander Usyskin 
3089fe53abfSAlexander Usyskin 	idg_nvm_set_region_id(nvm, region);
3099fe53abfSAlexander Usyskin 
3109fe53abfSAlexander Usyskin 	from4 = ALIGN_DOWN(from, sizeof(u32));
3119fe53abfSAlexander Usyskin 	from_shift = min(sizeof(u32) - ((size_t)from - from4), len);
3129fe53abfSAlexander Usyskin 
3139fe53abfSAlexander Usyskin 	if (from - from4) {
3149fe53abfSAlexander Usyskin 		u32 data = idg_nvm_read32(nvm, from4);
3159fe53abfSAlexander Usyskin 
3169fe53abfSAlexander Usyskin 		if (idg_nvm_error(nvm))
3179fe53abfSAlexander Usyskin 			return -EIO;
3189fe53abfSAlexander Usyskin 		memcpy(&buf[0], (u8 *)&data + (from - from4), from_shift);
3199fe53abfSAlexander Usyskin 		len_s -= from_shift;
3209fe53abfSAlexander Usyskin 		buf += from_shift;
3219fe53abfSAlexander Usyskin 		from += from_shift;
3229fe53abfSAlexander Usyskin 	}
3239fe53abfSAlexander Usyskin 
324*4b93f5fcSAlexander Usyskin 	if (!IS_ALIGNED(from, sizeof(u64)) &&
325*4b93f5fcSAlexander Usyskin 	    ((from ^ (from + len_s)) & GENMASK(31, 10))) {
326*4b93f5fcSAlexander Usyskin 		/*
327*4b93f5fcSAlexander Usyskin 		 * Workaround reads/writes across 1k-aligned addresses
328*4b93f5fcSAlexander Usyskin 		 * (start u32 before 1k, end u32 after)
329*4b93f5fcSAlexander Usyskin 		 * as this fails on hardware.
330*4b93f5fcSAlexander Usyskin 		 */
331*4b93f5fcSAlexander Usyskin 		u32 data = idg_nvm_read32(nvm, from);
332*4b93f5fcSAlexander Usyskin 
333*4b93f5fcSAlexander Usyskin 		if (idg_nvm_error(nvm))
334*4b93f5fcSAlexander Usyskin 			return -EIO;
335*4b93f5fcSAlexander Usyskin 		memcpy(&buf[0], &data, sizeof(data));
336*4b93f5fcSAlexander Usyskin 		len_s -= sizeof(u32);
337*4b93f5fcSAlexander Usyskin 		buf += sizeof(u32);
338*4b93f5fcSAlexander Usyskin 		from += sizeof(u32);
339*4b93f5fcSAlexander Usyskin 	}
340*4b93f5fcSAlexander Usyskin 
3419fe53abfSAlexander Usyskin 	len8 = ALIGN_DOWN(len_s, sizeof(u64));
3429fe53abfSAlexander Usyskin 	for (i = 0; i < len8; i += sizeof(u64)) {
3439fe53abfSAlexander Usyskin 		u64 data = idg_nvm_read64(nvm, from + i);
3449fe53abfSAlexander Usyskin 
3459fe53abfSAlexander Usyskin 		if (idg_nvm_error(nvm))
3469fe53abfSAlexander Usyskin 			return -EIO;
3479fe53abfSAlexander Usyskin 
3489fe53abfSAlexander Usyskin 		memcpy(&buf[i], &data, sizeof(data));
3499fe53abfSAlexander Usyskin 	}
3509fe53abfSAlexander Usyskin 
3519fe53abfSAlexander Usyskin 	len4 = len_s - len8;
3529fe53abfSAlexander Usyskin 	if (len4 >= sizeof(u32)) {
3539fe53abfSAlexander Usyskin 		u32 data = idg_nvm_read32(nvm, from + i);
3549fe53abfSAlexander Usyskin 
3559fe53abfSAlexander Usyskin 		if (idg_nvm_error(nvm))
3569fe53abfSAlexander Usyskin 			return -EIO;
3579fe53abfSAlexander Usyskin 		memcpy(&buf[i], &data, sizeof(data));
3589fe53abfSAlexander Usyskin 		i += sizeof(u32);
3599fe53abfSAlexander Usyskin 		len4 -= sizeof(u32);
3609fe53abfSAlexander Usyskin 	}
3619fe53abfSAlexander Usyskin 
3629fe53abfSAlexander Usyskin 	if (len4 > 0) {
3639fe53abfSAlexander Usyskin 		u32 data = idg_nvm_read32(nvm, from + i);
3649fe53abfSAlexander Usyskin 
3659fe53abfSAlexander Usyskin 		if (idg_nvm_error(nvm))
3669fe53abfSAlexander Usyskin 			return -EIO;
3679fe53abfSAlexander Usyskin 		memcpy(&buf[i], &data, len4);
3689fe53abfSAlexander Usyskin 	}
3699fe53abfSAlexander Usyskin 
3709fe53abfSAlexander Usyskin 	return len;
3719fe53abfSAlexander Usyskin }
3729fe53abfSAlexander Usyskin 
3739fe53abfSAlexander Usyskin static ssize_t
3749fe53abfSAlexander Usyskin idg_erase(struct intel_dg_nvm *nvm, u8 region, loff_t from, u64 len, u64 *fail_addr)
3759fe53abfSAlexander Usyskin {
3769fe53abfSAlexander Usyskin 	void __iomem *base = nvm->base;
3779fe53abfSAlexander Usyskin 	const u32 block = 0x10;
3789fe53abfSAlexander Usyskin 	u64 i;
3799fe53abfSAlexander Usyskin 
3809fe53abfSAlexander Usyskin 	for (i = 0; i < len; i += SZ_4K) {
3819fe53abfSAlexander Usyskin 		iowrite32(from + i, base + NVM_ADDRESS_REG);
3829fe53abfSAlexander Usyskin 		iowrite32(region << 24 | block, base + NVM_ERASE_REG);
3839fe53abfSAlexander Usyskin 		/* Since the writes are via sgunit
3849fe53abfSAlexander Usyskin 		 * we cannot do back to back erases.
3859fe53abfSAlexander Usyskin 		 */
3869fe53abfSAlexander Usyskin 		msleep(50);
3879fe53abfSAlexander Usyskin 	}
3889fe53abfSAlexander Usyskin 	return len;
3899fe53abfSAlexander Usyskin }
3909fe53abfSAlexander Usyskin 
3917234b321SAlexander Usyskin static int intel_dg_nvm_init(struct intel_dg_nvm *nvm, struct device *device)
3927234b321SAlexander Usyskin {
3937234b321SAlexander Usyskin 	u32 access_map = 0;
3947234b321SAlexander Usyskin 	unsigned int i, n;
3957234b321SAlexander Usyskin 	int ret;
3967234b321SAlexander Usyskin 
3977234b321SAlexander Usyskin 	/* clean error register, previous errors are ignored */
3987234b321SAlexander Usyskin 	idg_nvm_error(nvm);
3997234b321SAlexander Usyskin 
4007234b321SAlexander Usyskin 	ret = idg_nvm_is_valid(nvm);
4017234b321SAlexander Usyskin 	if (ret) {
4027234b321SAlexander Usyskin 		dev_err(device, "The MEM is not valid %d\n", ret);
4037234b321SAlexander Usyskin 		return ret;
4047234b321SAlexander Usyskin 	}
4057234b321SAlexander Usyskin 
4067234b321SAlexander Usyskin 	if (idg_nvm_get_access_map(nvm, &access_map))
4077234b321SAlexander Usyskin 		return -EIO;
4087234b321SAlexander Usyskin 
4097234b321SAlexander Usyskin 	for (i = 0, n = 0; i < nvm->nregions; i++) {
4107234b321SAlexander Usyskin 		u32 address, base, limit, region;
4117234b321SAlexander Usyskin 		u8 id = nvm->regions[i].id;
4127234b321SAlexander Usyskin 
4137234b321SAlexander Usyskin 		address = NVM_FLREG(id);
4147234b321SAlexander Usyskin 		region = idg_nvm_read32(nvm, address);
4157234b321SAlexander Usyskin 
4167234b321SAlexander Usyskin 		base = FIELD_GET(NVM_FREG_BASE_MASK, region) << NVM_FREG_ADDR_SHIFT;
4177234b321SAlexander Usyskin 		limit = (FIELD_GET(NVM_FREG_ADDR_MASK, region) << NVM_FREG_ADDR_SHIFT) |
4187234b321SAlexander Usyskin 			NVM_FREG_MIN_REGION_SIZE;
4197234b321SAlexander Usyskin 
4207234b321SAlexander Usyskin 		dev_dbg(device, "[%d] %s: region: 0x%08X base: 0x%08x limit: 0x%08x\n",
4217234b321SAlexander Usyskin 			id, nvm->regions[i].name, region, base, limit);
4227234b321SAlexander Usyskin 
4237234b321SAlexander Usyskin 		if (base >= limit || (i > 0 && limit == 0)) {
4247234b321SAlexander Usyskin 			dev_dbg(device, "[%d] %s: disabled\n",
4257234b321SAlexander Usyskin 				id, nvm->regions[i].name);
4267234b321SAlexander Usyskin 			nvm->regions[i].is_readable = 0;
4277234b321SAlexander Usyskin 			continue;
4287234b321SAlexander Usyskin 		}
4297234b321SAlexander Usyskin 
4307234b321SAlexander Usyskin 		if (nvm->size < limit)
4317234b321SAlexander Usyskin 			nvm->size = limit;
4327234b321SAlexander Usyskin 
4337234b321SAlexander Usyskin 		nvm->regions[i].offset = base;
4347234b321SAlexander Usyskin 		nvm->regions[i].size = limit - base + 1;
4357234b321SAlexander Usyskin 		/* No write access to descriptor; mask it out*/
4367234b321SAlexander Usyskin 		nvm->regions[i].is_writable = idg_nvm_region_writable(access_map, id);
4377234b321SAlexander Usyskin 
4387234b321SAlexander Usyskin 		nvm->regions[i].is_readable = idg_nvm_region_readable(access_map, id);
4397234b321SAlexander Usyskin 		dev_dbg(device, "Registered, %s id=%d offset=%lld size=%lld rd=%d wr=%d\n",
4407234b321SAlexander Usyskin 			nvm->regions[i].name,
4417234b321SAlexander Usyskin 			nvm->regions[i].id,
4427234b321SAlexander Usyskin 			nvm->regions[i].offset,
4437234b321SAlexander Usyskin 			nvm->regions[i].size,
4447234b321SAlexander Usyskin 			nvm->regions[i].is_readable,
4457234b321SAlexander Usyskin 			nvm->regions[i].is_writable);
4467234b321SAlexander Usyskin 
4477234b321SAlexander Usyskin 		if (nvm->regions[i].is_readable)
4487234b321SAlexander Usyskin 			n++;
4497234b321SAlexander Usyskin 	}
4507234b321SAlexander Usyskin 
4517234b321SAlexander Usyskin 	dev_dbg(device, "Registered %d regions\n", n);
4527234b321SAlexander Usyskin 
4537234b321SAlexander Usyskin 	/* Need to add 1 to the amount of memory
4547234b321SAlexander Usyskin 	 * so it is reported as an even block
4557234b321SAlexander Usyskin 	 */
4567234b321SAlexander Usyskin 	nvm->size += 1;
4577234b321SAlexander Usyskin 
4587234b321SAlexander Usyskin 	return n;
4597234b321SAlexander Usyskin }
4607234b321SAlexander Usyskin 
461044c8be0SAlexander Usyskin static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
462044c8be0SAlexander Usyskin {
463044c8be0SAlexander Usyskin 	struct intel_dg_nvm *nvm = mtd->priv;
464044c8be0SAlexander Usyskin 	size_t total_len;
465044c8be0SAlexander Usyskin 	unsigned int idx;
466044c8be0SAlexander Usyskin 	ssize_t bytes;
467044c8be0SAlexander Usyskin 	loff_t from;
468044c8be0SAlexander Usyskin 	size_t len;
469044c8be0SAlexander Usyskin 	u8 region;
470044c8be0SAlexander Usyskin 	u64 addr;
471044c8be0SAlexander Usyskin 
472044c8be0SAlexander Usyskin 	if (WARN_ON(!nvm))
473044c8be0SAlexander Usyskin 		return -EINVAL;
474044c8be0SAlexander Usyskin 
475044c8be0SAlexander Usyskin 	if (!IS_ALIGNED(info->addr, SZ_4K) || !IS_ALIGNED(info->len, SZ_4K)) {
476044c8be0SAlexander Usyskin 		dev_err(&mtd->dev, "unaligned erase %llx %llx\n",
477044c8be0SAlexander Usyskin 			info->addr, info->len);
478044c8be0SAlexander Usyskin 		info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
479044c8be0SAlexander Usyskin 		return -EINVAL;
480044c8be0SAlexander Usyskin 	}
481044c8be0SAlexander Usyskin 
482044c8be0SAlexander Usyskin 	total_len = info->len;
483044c8be0SAlexander Usyskin 	addr = info->addr;
484044c8be0SAlexander Usyskin 
485044c8be0SAlexander Usyskin 	guard(mutex)(&nvm->lock);
486044c8be0SAlexander Usyskin 
487044c8be0SAlexander Usyskin 	while (total_len > 0) {
488044c8be0SAlexander Usyskin 		if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) {
489044c8be0SAlexander Usyskin 			dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len);
490044c8be0SAlexander Usyskin 			info->fail_addr = addr;
491044c8be0SAlexander Usyskin 			return -ERANGE;
492044c8be0SAlexander Usyskin 		}
493044c8be0SAlexander Usyskin 
494044c8be0SAlexander Usyskin 		idx = idg_nvm_get_region(nvm, addr);
495044c8be0SAlexander Usyskin 		if (idx >= nvm->nregions) {
496044c8be0SAlexander Usyskin 			dev_err(&mtd->dev, "out of range");
497044c8be0SAlexander Usyskin 			info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
498044c8be0SAlexander Usyskin 			return -ERANGE;
499044c8be0SAlexander Usyskin 		}
500044c8be0SAlexander Usyskin 
501044c8be0SAlexander Usyskin 		from = addr - nvm->regions[idx].offset;
502044c8be0SAlexander Usyskin 		region = nvm->regions[idx].id;
503044c8be0SAlexander Usyskin 		len = total_len;
504044c8be0SAlexander Usyskin 		if (len > nvm->regions[idx].size - from)
505044c8be0SAlexander Usyskin 			len = nvm->regions[idx].size - from;
506044c8be0SAlexander Usyskin 
507044c8be0SAlexander Usyskin 		dev_dbg(&mtd->dev, "erasing region[%d] %s from %llx len %zx\n",
508044c8be0SAlexander Usyskin 			region, nvm->regions[idx].name, from, len);
509044c8be0SAlexander Usyskin 
510044c8be0SAlexander Usyskin 		bytes = idg_erase(nvm, region, from, len, &info->fail_addr);
511044c8be0SAlexander Usyskin 		if (bytes < 0) {
512044c8be0SAlexander Usyskin 			dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes);
513044c8be0SAlexander Usyskin 			info->fail_addr += nvm->regions[idx].offset;
514044c8be0SAlexander Usyskin 			return bytes;
515044c8be0SAlexander Usyskin 		}
516044c8be0SAlexander Usyskin 
517044c8be0SAlexander Usyskin 		addr += len;
518044c8be0SAlexander Usyskin 		total_len -= len;
519044c8be0SAlexander Usyskin 	}
520044c8be0SAlexander Usyskin 
521044c8be0SAlexander Usyskin 	return 0;
522044c8be0SAlexander Usyskin }
523044c8be0SAlexander Usyskin 
524044c8be0SAlexander Usyskin static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
525044c8be0SAlexander Usyskin 			     size_t *retlen, u_char *buf)
526044c8be0SAlexander Usyskin {
527044c8be0SAlexander Usyskin 	struct intel_dg_nvm *nvm = mtd->priv;
528044c8be0SAlexander Usyskin 	unsigned int idx;
529044c8be0SAlexander Usyskin 	ssize_t ret;
530044c8be0SAlexander Usyskin 	u8 region;
531044c8be0SAlexander Usyskin 
532044c8be0SAlexander Usyskin 	if (WARN_ON(!nvm))
533044c8be0SAlexander Usyskin 		return -EINVAL;
534044c8be0SAlexander Usyskin 
535044c8be0SAlexander Usyskin 	idx = idg_nvm_get_region(nvm, from);
536044c8be0SAlexander Usyskin 
537044c8be0SAlexander Usyskin 	dev_dbg(&mtd->dev, "reading region[%d] %s from %lld len %zd\n",
538044c8be0SAlexander Usyskin 		nvm->regions[idx].id, nvm->regions[idx].name, from, len);
539044c8be0SAlexander Usyskin 
540044c8be0SAlexander Usyskin 	if (idx >= nvm->nregions) {
541044c8be0SAlexander Usyskin 		dev_err(&mtd->dev, "out of range");
542044c8be0SAlexander Usyskin 		return -ERANGE;
543044c8be0SAlexander Usyskin 	}
544044c8be0SAlexander Usyskin 
545044c8be0SAlexander Usyskin 	from -= nvm->regions[idx].offset;
546044c8be0SAlexander Usyskin 	region = nvm->regions[idx].id;
547044c8be0SAlexander Usyskin 	if (len > nvm->regions[idx].size - from)
548044c8be0SAlexander Usyskin 		len = nvm->regions[idx].size - from;
549044c8be0SAlexander Usyskin 
550044c8be0SAlexander Usyskin 	guard(mutex)(&nvm->lock);
551044c8be0SAlexander Usyskin 
552044c8be0SAlexander Usyskin 	ret = idg_read(nvm, region, from, len, buf);
553044c8be0SAlexander Usyskin 	if (ret < 0) {
554044c8be0SAlexander Usyskin 		dev_dbg(&mtd->dev, "read failed with %zd\n", ret);
555044c8be0SAlexander Usyskin 		return ret;
556044c8be0SAlexander Usyskin 	}
557044c8be0SAlexander Usyskin 
558044c8be0SAlexander Usyskin 	*retlen = ret;
559044c8be0SAlexander Usyskin 
560044c8be0SAlexander Usyskin 	return 0;
561044c8be0SAlexander Usyskin }
562044c8be0SAlexander Usyskin 
563044c8be0SAlexander Usyskin static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
564044c8be0SAlexander Usyskin 			      size_t *retlen, const u_char *buf)
565044c8be0SAlexander Usyskin {
566044c8be0SAlexander Usyskin 	struct intel_dg_nvm *nvm = mtd->priv;
567044c8be0SAlexander Usyskin 	unsigned int idx;
568044c8be0SAlexander Usyskin 	ssize_t ret;
569044c8be0SAlexander Usyskin 	u8 region;
570044c8be0SAlexander Usyskin 
571044c8be0SAlexander Usyskin 	if (WARN_ON(!nvm))
572044c8be0SAlexander Usyskin 		return -EINVAL;
573044c8be0SAlexander Usyskin 
574044c8be0SAlexander Usyskin 	idx = idg_nvm_get_region(nvm, to);
575044c8be0SAlexander Usyskin 
576044c8be0SAlexander Usyskin 	dev_dbg(&mtd->dev, "writing region[%d] %s to %lld len %zd\n",
577044c8be0SAlexander Usyskin 		nvm->regions[idx].id, nvm->regions[idx].name, to, len);
578044c8be0SAlexander Usyskin 
579044c8be0SAlexander Usyskin 	if (idx >= nvm->nregions) {
580044c8be0SAlexander Usyskin 		dev_err(&mtd->dev, "out of range");
581044c8be0SAlexander Usyskin 		return -ERANGE;
582044c8be0SAlexander Usyskin 	}
583044c8be0SAlexander Usyskin 
584044c8be0SAlexander Usyskin 	to -= nvm->regions[idx].offset;
585044c8be0SAlexander Usyskin 	region = nvm->regions[idx].id;
586044c8be0SAlexander Usyskin 	if (len > nvm->regions[idx].size - to)
587044c8be0SAlexander Usyskin 		len = nvm->regions[idx].size - to;
588044c8be0SAlexander Usyskin 
589044c8be0SAlexander Usyskin 	guard(mutex)(&nvm->lock);
590044c8be0SAlexander Usyskin 
591044c8be0SAlexander Usyskin 	ret = idg_write(nvm, region, to, len, buf);
592044c8be0SAlexander Usyskin 	if (ret < 0) {
593044c8be0SAlexander Usyskin 		dev_dbg(&mtd->dev, "write failed with %zd\n", ret);
594044c8be0SAlexander Usyskin 		return ret;
595044c8be0SAlexander Usyskin 	}
596044c8be0SAlexander Usyskin 
597044c8be0SAlexander Usyskin 	*retlen = ret;
598044c8be0SAlexander Usyskin 
599044c8be0SAlexander Usyskin 	return 0;
600044c8be0SAlexander Usyskin }
601044c8be0SAlexander Usyskin 
602ceb5ab3cSAlexander Usyskin static void intel_dg_nvm_release(struct kref *kref)
603ceb5ab3cSAlexander Usyskin {
604ceb5ab3cSAlexander Usyskin 	struct intel_dg_nvm *nvm = container_of(kref, struct intel_dg_nvm, refcnt);
605ceb5ab3cSAlexander Usyskin 	int i;
606ceb5ab3cSAlexander Usyskin 
607ceb5ab3cSAlexander Usyskin 	pr_debug("freeing intel_dg nvm\n");
608ceb5ab3cSAlexander Usyskin 	for (i = 0; i < nvm->nregions; i++)
609ceb5ab3cSAlexander Usyskin 		kfree(nvm->regions[i].name);
610044c8be0SAlexander Usyskin 	mutex_destroy(&nvm->lock);
611ceb5ab3cSAlexander Usyskin 	kfree(nvm);
612ceb5ab3cSAlexander Usyskin }
613ceb5ab3cSAlexander Usyskin 
614044c8be0SAlexander Usyskin static int intel_dg_mtd_get_device(struct mtd_info *mtd)
615044c8be0SAlexander Usyskin {
616044c8be0SAlexander Usyskin 	struct mtd_info *master = mtd_get_master(mtd);
617044c8be0SAlexander Usyskin 	struct intel_dg_nvm *nvm = master->priv;
618044c8be0SAlexander Usyskin 
619044c8be0SAlexander Usyskin 	if (WARN_ON(!nvm))
620044c8be0SAlexander Usyskin 		return -EINVAL;
621044c8be0SAlexander Usyskin 	pr_debug("get mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt));
622044c8be0SAlexander Usyskin 	kref_get(&nvm->refcnt);
623044c8be0SAlexander Usyskin 
624044c8be0SAlexander Usyskin 	return 0;
625044c8be0SAlexander Usyskin }
626044c8be0SAlexander Usyskin 
627044c8be0SAlexander Usyskin static void intel_dg_mtd_put_device(struct mtd_info *mtd)
628044c8be0SAlexander Usyskin {
629044c8be0SAlexander Usyskin 	struct mtd_info *master = mtd_get_master(mtd);
630044c8be0SAlexander Usyskin 	struct intel_dg_nvm *nvm = master->priv;
631044c8be0SAlexander Usyskin 
632044c8be0SAlexander Usyskin 	if (WARN_ON(!nvm))
633044c8be0SAlexander Usyskin 		return;
634044c8be0SAlexander Usyskin 	pr_debug("put mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt));
635044c8be0SAlexander Usyskin 	kref_put(&nvm->refcnt, intel_dg_nvm_release);
636044c8be0SAlexander Usyskin }
637044c8be0SAlexander Usyskin 
638044c8be0SAlexander Usyskin static int intel_dg_nvm_init_mtd(struct intel_dg_nvm *nvm, struct device *device,
639044c8be0SAlexander Usyskin 				 unsigned int nparts, bool writable_override)
640044c8be0SAlexander Usyskin {
641044c8be0SAlexander Usyskin 	struct mtd_partition *parts = NULL;
642044c8be0SAlexander Usyskin 	unsigned int i, n;
643044c8be0SAlexander Usyskin 	int ret;
644044c8be0SAlexander Usyskin 
645044c8be0SAlexander Usyskin 	dev_dbg(device, "registering with mtd\n");
646044c8be0SAlexander Usyskin 
647044c8be0SAlexander Usyskin 	nvm->mtd.owner = THIS_MODULE;
648044c8be0SAlexander Usyskin 	nvm->mtd.dev.parent = device;
649044c8be0SAlexander Usyskin 	nvm->mtd.flags = MTD_CAP_NORFLASH;
650044c8be0SAlexander Usyskin 	nvm->mtd.type = MTD_DATAFLASH;
651044c8be0SAlexander Usyskin 	nvm->mtd.priv = nvm;
652044c8be0SAlexander Usyskin 	nvm->mtd._write = intel_dg_mtd_write;
653044c8be0SAlexander Usyskin 	nvm->mtd._read = intel_dg_mtd_read;
654044c8be0SAlexander Usyskin 	nvm->mtd._erase = intel_dg_mtd_erase;
655044c8be0SAlexander Usyskin 	nvm->mtd._get_device = intel_dg_mtd_get_device;
656044c8be0SAlexander Usyskin 	nvm->mtd._put_device = intel_dg_mtd_put_device;
657044c8be0SAlexander Usyskin 	nvm->mtd.writesize = SZ_1; /* 1 byte granularity */
658044c8be0SAlexander Usyskin 	nvm->mtd.erasesize = SZ_4K; /* 4K bytes granularity */
659044c8be0SAlexander Usyskin 	nvm->mtd.size = nvm->size;
660044c8be0SAlexander Usyskin 
661044c8be0SAlexander Usyskin 	parts = kcalloc(nvm->nregions, sizeof(*parts), GFP_KERNEL);
662044c8be0SAlexander Usyskin 	if (!parts)
663044c8be0SAlexander Usyskin 		return -ENOMEM;
664044c8be0SAlexander Usyskin 
665044c8be0SAlexander Usyskin 	for (i = 0, n = 0; i < nvm->nregions && n < nparts; i++) {
666044c8be0SAlexander Usyskin 		if (!nvm->regions[i].is_readable)
667044c8be0SAlexander Usyskin 			continue;
668044c8be0SAlexander Usyskin 		parts[n].name = nvm->regions[i].name;
669044c8be0SAlexander Usyskin 		parts[n].offset  = nvm->regions[i].offset;
670044c8be0SAlexander Usyskin 		parts[n].size = nvm->regions[i].size;
671044c8be0SAlexander Usyskin 		if (!nvm->regions[i].is_writable && !writable_override)
672044c8be0SAlexander Usyskin 			parts[n].mask_flags = MTD_WRITEABLE;
673044c8be0SAlexander Usyskin 		n++;
674044c8be0SAlexander Usyskin 	}
675044c8be0SAlexander Usyskin 
676044c8be0SAlexander Usyskin 	ret = mtd_device_register(&nvm->mtd, parts, n);
677044c8be0SAlexander Usyskin 
678044c8be0SAlexander Usyskin 	kfree(parts);
679044c8be0SAlexander Usyskin 	return ret;
680044c8be0SAlexander Usyskin }
681044c8be0SAlexander Usyskin 
682ceb5ab3cSAlexander Usyskin static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
683ceb5ab3cSAlexander Usyskin 			      const struct auxiliary_device_id *aux_dev_id)
684ceb5ab3cSAlexander Usyskin {
685ceb5ab3cSAlexander Usyskin 	struct intel_dg_nvm_dev *invm = auxiliary_dev_to_intel_dg_nvm_dev(aux_dev);
686ceb5ab3cSAlexander Usyskin 	struct intel_dg_nvm *nvm;
687ceb5ab3cSAlexander Usyskin 	struct device *device;
688ceb5ab3cSAlexander Usyskin 	unsigned int nregions;
689ceb5ab3cSAlexander Usyskin 	unsigned int i, n;
690ceb5ab3cSAlexander Usyskin 	int ret;
691ceb5ab3cSAlexander Usyskin 
692ceb5ab3cSAlexander Usyskin 	device = &aux_dev->dev;
693ceb5ab3cSAlexander Usyskin 
694ceb5ab3cSAlexander Usyskin 	/* count available regions */
695ceb5ab3cSAlexander Usyskin 	for (nregions = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) {
696ceb5ab3cSAlexander Usyskin 		if (invm->regions[i].name)
697ceb5ab3cSAlexander Usyskin 			nregions++;
698ceb5ab3cSAlexander Usyskin 	}
699ceb5ab3cSAlexander Usyskin 
700ceb5ab3cSAlexander Usyskin 	if (!nregions) {
701ceb5ab3cSAlexander Usyskin 		dev_err(device, "no regions defined\n");
702ceb5ab3cSAlexander Usyskin 		return -ENODEV;
703ceb5ab3cSAlexander Usyskin 	}
704ceb5ab3cSAlexander Usyskin 
705ceb5ab3cSAlexander Usyskin 	nvm = kzalloc(struct_size(nvm, regions, nregions), GFP_KERNEL);
706ceb5ab3cSAlexander Usyskin 	if (!nvm)
707ceb5ab3cSAlexander Usyskin 		return -ENOMEM;
708ceb5ab3cSAlexander Usyskin 
709ceb5ab3cSAlexander Usyskin 	kref_init(&nvm->refcnt);
710044c8be0SAlexander Usyskin 	mutex_init(&nvm->lock);
711ceb5ab3cSAlexander Usyskin 
712ceb5ab3cSAlexander Usyskin 	for (n = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) {
713ceb5ab3cSAlexander Usyskin 		if (!invm->regions[i].name)
714ceb5ab3cSAlexander Usyskin 			continue;
715ceb5ab3cSAlexander Usyskin 
716ceb5ab3cSAlexander Usyskin 		char *name = kasprintf(GFP_KERNEL, "%s.%s",
717ceb5ab3cSAlexander Usyskin 				       dev_name(&aux_dev->dev), invm->regions[i].name);
718ceb5ab3cSAlexander Usyskin 		if (!name)
719ceb5ab3cSAlexander Usyskin 			continue;
720ceb5ab3cSAlexander Usyskin 		nvm->regions[n].name = name;
721ceb5ab3cSAlexander Usyskin 		nvm->regions[n].id = i;
722ceb5ab3cSAlexander Usyskin 		n++;
723ceb5ab3cSAlexander Usyskin 	}
724ceb5ab3cSAlexander Usyskin 	nvm->nregions = n; /* in case where kasprintf fail */
725ceb5ab3cSAlexander Usyskin 
726ceb5ab3cSAlexander Usyskin 	nvm->base = devm_ioremap_resource(device, &invm->bar);
727ceb5ab3cSAlexander Usyskin 	if (IS_ERR(nvm->base)) {
728ceb5ab3cSAlexander Usyskin 		ret = PTR_ERR(nvm->base);
729ceb5ab3cSAlexander Usyskin 		goto err;
730ceb5ab3cSAlexander Usyskin 	}
731ceb5ab3cSAlexander Usyskin 
7327234b321SAlexander Usyskin 	ret = intel_dg_nvm_init(nvm, device);
7337234b321SAlexander Usyskin 	if (ret < 0) {
7347234b321SAlexander Usyskin 		dev_err(device, "cannot initialize nvm %d\n", ret);
7357234b321SAlexander Usyskin 		goto err;
7367234b321SAlexander Usyskin 	}
7377234b321SAlexander Usyskin 
738044c8be0SAlexander Usyskin 	ret = intel_dg_nvm_init_mtd(nvm, device, ret, invm->writable_override);
739044c8be0SAlexander Usyskin 	if (ret) {
740044c8be0SAlexander Usyskin 		dev_err(device, "failed init mtd %d\n", ret);
741044c8be0SAlexander Usyskin 		goto err;
742044c8be0SAlexander Usyskin 	}
743044c8be0SAlexander Usyskin 
744ceb5ab3cSAlexander Usyskin 	dev_set_drvdata(&aux_dev->dev, nvm);
745ceb5ab3cSAlexander Usyskin 
746ceb5ab3cSAlexander Usyskin 	return 0;
747ceb5ab3cSAlexander Usyskin 
748ceb5ab3cSAlexander Usyskin err:
749ceb5ab3cSAlexander Usyskin 	kref_put(&nvm->refcnt, intel_dg_nvm_release);
750ceb5ab3cSAlexander Usyskin 	return ret;
751ceb5ab3cSAlexander Usyskin }
752ceb5ab3cSAlexander Usyskin 
753ceb5ab3cSAlexander Usyskin static void intel_dg_mtd_remove(struct auxiliary_device *aux_dev)
754ceb5ab3cSAlexander Usyskin {
755ceb5ab3cSAlexander Usyskin 	struct intel_dg_nvm *nvm = dev_get_drvdata(&aux_dev->dev);
756ceb5ab3cSAlexander Usyskin 
757ceb5ab3cSAlexander Usyskin 	if (!nvm)
758ceb5ab3cSAlexander Usyskin 		return;
759ceb5ab3cSAlexander Usyskin 
760044c8be0SAlexander Usyskin 	mtd_device_unregister(&nvm->mtd);
761044c8be0SAlexander Usyskin 
762ceb5ab3cSAlexander Usyskin 	dev_set_drvdata(&aux_dev->dev, NULL);
763ceb5ab3cSAlexander Usyskin 
764ceb5ab3cSAlexander Usyskin 	kref_put(&nvm->refcnt, intel_dg_nvm_release);
765ceb5ab3cSAlexander Usyskin }
766ceb5ab3cSAlexander Usyskin 
767ceb5ab3cSAlexander Usyskin static const struct auxiliary_device_id intel_dg_mtd_id_table[] = {
768ceb5ab3cSAlexander Usyskin 	{
769ceb5ab3cSAlexander Usyskin 		.name = "i915.nvm",
770ceb5ab3cSAlexander Usyskin 	},
771ceb5ab3cSAlexander Usyskin 	{
772ceb5ab3cSAlexander Usyskin 		.name = "xe.nvm",
773ceb5ab3cSAlexander Usyskin 	},
774ceb5ab3cSAlexander Usyskin 	{
775ceb5ab3cSAlexander Usyskin 		/* sentinel */
776ceb5ab3cSAlexander Usyskin 	}
777ceb5ab3cSAlexander Usyskin };
778ceb5ab3cSAlexander Usyskin MODULE_DEVICE_TABLE(auxiliary, intel_dg_mtd_id_table);
779ceb5ab3cSAlexander Usyskin 
780ceb5ab3cSAlexander Usyskin static struct auxiliary_driver intel_dg_mtd_driver = {
781ceb5ab3cSAlexander Usyskin 	.probe  = intel_dg_mtd_probe,
782ceb5ab3cSAlexander Usyskin 	.remove = intel_dg_mtd_remove,
783ceb5ab3cSAlexander Usyskin 	.driver = {
784ceb5ab3cSAlexander Usyskin 		/* auxiliary_driver_register() sets .name to be the modname */
785ceb5ab3cSAlexander Usyskin 	},
786ceb5ab3cSAlexander Usyskin 	.id_table = intel_dg_mtd_id_table
787ceb5ab3cSAlexander Usyskin };
788ceb5ab3cSAlexander Usyskin module_auxiliary_driver(intel_dg_mtd_driver);
789ceb5ab3cSAlexander Usyskin 
790ceb5ab3cSAlexander Usyskin MODULE_LICENSE("GPL");
791ceb5ab3cSAlexander Usyskin MODULE_AUTHOR("Intel Corporation");
792ceb5ab3cSAlexander Usyskin MODULE_DESCRIPTION("Intel DGFX MTD driver");
793