1ceb5ab3cSAlexander Usyskin // SPDX-License-Identifier: GPL-2.0 2ceb5ab3cSAlexander Usyskin /* 3ceb5ab3cSAlexander Usyskin * Copyright(c) 2019-2025, Intel Corporation. All rights reserved. 4ceb5ab3cSAlexander Usyskin */ 5ceb5ab3cSAlexander Usyskin 67234b321SAlexander Usyskin #include <linux/bitfield.h> 77234b321SAlexander Usyskin #include <linux/bits.h> 8*044c8be0SAlexander Usyskin #include <linux/cleanup.h> 99fe53abfSAlexander Usyskin #include <linux/delay.h> 10ceb5ab3cSAlexander Usyskin #include <linux/device.h> 11ceb5ab3cSAlexander Usyskin #include <linux/intel_dg_nvm_aux.h> 12ceb5ab3cSAlexander Usyskin #include <linux/io.h> 139fe53abfSAlexander Usyskin #include <linux/io-64-nonatomic-lo-hi.h> 14ceb5ab3cSAlexander Usyskin #include <linux/kernel.h> 15ceb5ab3cSAlexander Usyskin #include <linux/module.h> 16*044c8be0SAlexander Usyskin #include <linux/mtd/mtd.h> 17*044c8be0SAlexander Usyskin #include <linux/mtd/partitions.h> 18ceb5ab3cSAlexander Usyskin #include <linux/string.h> 19ceb5ab3cSAlexander Usyskin #include <linux/slab.h> 209fe53abfSAlexander Usyskin #include <linux/sizes.h> 21ceb5ab3cSAlexander Usyskin #include <linux/types.h> 22ceb5ab3cSAlexander Usyskin 23ceb5ab3cSAlexander Usyskin struct intel_dg_nvm { 24ceb5ab3cSAlexander Usyskin struct kref refcnt; 25*044c8be0SAlexander Usyskin struct mtd_info mtd; 26*044c8be0SAlexander Usyskin struct mutex lock; /* region access lock */ 27ceb5ab3cSAlexander Usyskin void __iomem *base; 28ceb5ab3cSAlexander Usyskin size_t size; 29ceb5ab3cSAlexander Usyskin unsigned int nregions; 30ceb5ab3cSAlexander Usyskin struct { 31ceb5ab3cSAlexander Usyskin const char *name; 32ceb5ab3cSAlexander Usyskin u8 id; 33ceb5ab3cSAlexander Usyskin u64 offset; 34ceb5ab3cSAlexander Usyskin u64 size; 357234b321SAlexander Usyskin unsigned int is_readable:1; 367234b321SAlexander Usyskin unsigned int is_writable:1; 37ceb5ab3cSAlexander Usyskin } regions[] __counted_by(nregions); 38ceb5ab3cSAlexander Usyskin }; 39ceb5ab3cSAlexander Usyskin 407234b321SAlexander Usyskin #define NVM_TRIGGER_REG 0x00000000 417234b321SAlexander Usyskin #define NVM_VALSIG_REG 0x00000010 427234b321SAlexander Usyskin #define NVM_ADDRESS_REG 0x00000040 437234b321SAlexander Usyskin #define NVM_REGION_ID_REG 0x00000044 447234b321SAlexander Usyskin /* 457234b321SAlexander Usyskin * [15:0]-Erase size = 0x0010 4K 0x0080 32K 0x0100 64K 467234b321SAlexander Usyskin * [23:16]-Reserved 477234b321SAlexander Usyskin * [31:24]-Erase MEM RegionID 487234b321SAlexander Usyskin */ 497234b321SAlexander Usyskin #define NVM_ERASE_REG 0x00000048 507234b321SAlexander Usyskin #define NVM_ACCESS_ERROR_REG 0x00000070 517234b321SAlexander Usyskin #define NVM_ADDRESS_ERROR_REG 0x00000074 527234b321SAlexander Usyskin 537234b321SAlexander Usyskin /* Flash Valid Signature */ 547234b321SAlexander Usyskin #define NVM_FLVALSIG 0x0FF0A55A 557234b321SAlexander Usyskin 567234b321SAlexander Usyskin #define NVM_MAP_ADDR_MASK GENMASK(7, 0) 577234b321SAlexander Usyskin #define NVM_MAP_ADDR_SHIFT 0x00000004 587234b321SAlexander Usyskin 597234b321SAlexander Usyskin #define NVM_REGION_ID_DESCRIPTOR 0 607234b321SAlexander Usyskin /* Flash Region Base Address */ 617234b321SAlexander Usyskin #define NVM_FRBA 0x40 627234b321SAlexander Usyskin /* Flash Region __n - Flash Descriptor Record */ 637234b321SAlexander Usyskin #define NVM_FLREG(__n) (NVM_FRBA + ((__n) * 4)) 647234b321SAlexander Usyskin /* Flash Map 1 Register */ 657234b321SAlexander Usyskin #define NVM_FLMAP1_REG 0x18 667234b321SAlexander Usyskin #define NVM_FLMSTR4_OFFSET 0x00C 677234b321SAlexander Usyskin 687234b321SAlexander Usyskin #define NVM_ACCESS_ERROR_PCIE_MASK 0x7 697234b321SAlexander Usyskin 707234b321SAlexander Usyskin #define NVM_FREG_BASE_MASK GENMASK(15, 0) 717234b321SAlexander Usyskin #define NVM_FREG_ADDR_MASK GENMASK(31, 16) 727234b321SAlexander Usyskin #define NVM_FREG_ADDR_SHIFT 12 737234b321SAlexander Usyskin #define NVM_FREG_MIN_REGION_SIZE 0xFFF 747234b321SAlexander Usyskin 757234b321SAlexander Usyskin static inline void idg_nvm_set_region_id(struct intel_dg_nvm *nvm, u8 region) 767234b321SAlexander Usyskin { 777234b321SAlexander Usyskin iowrite32((u32)region, nvm->base + NVM_REGION_ID_REG); 787234b321SAlexander Usyskin } 797234b321SAlexander Usyskin 807234b321SAlexander Usyskin static inline u32 idg_nvm_error(struct intel_dg_nvm *nvm) 817234b321SAlexander Usyskin { 827234b321SAlexander Usyskin void __iomem *base = nvm->base; 837234b321SAlexander Usyskin 847234b321SAlexander Usyskin u32 reg = ioread32(base + NVM_ACCESS_ERROR_REG) & NVM_ACCESS_ERROR_PCIE_MASK; 857234b321SAlexander Usyskin 867234b321SAlexander Usyskin /* reset error bits */ 877234b321SAlexander Usyskin if (reg) 887234b321SAlexander Usyskin iowrite32(reg, base + NVM_ACCESS_ERROR_REG); 897234b321SAlexander Usyskin 907234b321SAlexander Usyskin return reg; 917234b321SAlexander Usyskin } 927234b321SAlexander Usyskin 937234b321SAlexander Usyskin static inline u32 idg_nvm_read32(struct intel_dg_nvm *nvm, u32 address) 947234b321SAlexander Usyskin { 957234b321SAlexander Usyskin void __iomem *base = nvm->base; 967234b321SAlexander Usyskin 977234b321SAlexander Usyskin iowrite32(address, base + NVM_ADDRESS_REG); 987234b321SAlexander Usyskin 997234b321SAlexander Usyskin return ioread32(base + NVM_TRIGGER_REG); 1007234b321SAlexander Usyskin } 1017234b321SAlexander Usyskin 1029fe53abfSAlexander Usyskin static inline u64 idg_nvm_read64(struct intel_dg_nvm *nvm, u32 address) 1039fe53abfSAlexander Usyskin { 1049fe53abfSAlexander Usyskin void __iomem *base = nvm->base; 1059fe53abfSAlexander Usyskin 1069fe53abfSAlexander Usyskin iowrite32(address, base + NVM_ADDRESS_REG); 1079fe53abfSAlexander Usyskin 1089fe53abfSAlexander Usyskin return readq(base + NVM_TRIGGER_REG); 1099fe53abfSAlexander Usyskin } 1109fe53abfSAlexander Usyskin 1119fe53abfSAlexander Usyskin static void idg_nvm_write32(struct intel_dg_nvm *nvm, u32 address, u32 data) 1129fe53abfSAlexander Usyskin { 1139fe53abfSAlexander Usyskin void __iomem *base = nvm->base; 1149fe53abfSAlexander Usyskin 1159fe53abfSAlexander Usyskin iowrite32(address, base + NVM_ADDRESS_REG); 1169fe53abfSAlexander Usyskin 1179fe53abfSAlexander Usyskin iowrite32(data, base + NVM_TRIGGER_REG); 1189fe53abfSAlexander Usyskin } 1199fe53abfSAlexander Usyskin 1209fe53abfSAlexander Usyskin static void idg_nvm_write64(struct intel_dg_nvm *nvm, u32 address, u64 data) 1219fe53abfSAlexander Usyskin { 1229fe53abfSAlexander Usyskin void __iomem *base = nvm->base; 1239fe53abfSAlexander Usyskin 1249fe53abfSAlexander Usyskin iowrite32(address, base + NVM_ADDRESS_REG); 1259fe53abfSAlexander Usyskin 1269fe53abfSAlexander Usyskin writeq(data, base + NVM_TRIGGER_REG); 1279fe53abfSAlexander Usyskin } 1289fe53abfSAlexander Usyskin 1297234b321SAlexander Usyskin static int idg_nvm_get_access_map(struct intel_dg_nvm *nvm, u32 *access_map) 1307234b321SAlexander Usyskin { 1317234b321SAlexander Usyskin u32 fmstr4_addr; 1327234b321SAlexander Usyskin u32 fmstr4; 1337234b321SAlexander Usyskin u32 flmap1; 1347234b321SAlexander Usyskin u32 fmba; 1357234b321SAlexander Usyskin 1367234b321SAlexander Usyskin idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR); 1377234b321SAlexander Usyskin 1387234b321SAlexander Usyskin flmap1 = idg_nvm_read32(nvm, NVM_FLMAP1_REG); 1397234b321SAlexander Usyskin if (idg_nvm_error(nvm)) 1407234b321SAlexander Usyskin return -EIO; 1417234b321SAlexander Usyskin /* Get Flash Master Baser Address (FMBA) */ 1427234b321SAlexander Usyskin fmba = (FIELD_GET(NVM_MAP_ADDR_MASK, flmap1) << NVM_MAP_ADDR_SHIFT); 1437234b321SAlexander Usyskin fmstr4_addr = fmba + NVM_FLMSTR4_OFFSET; 1447234b321SAlexander Usyskin 1457234b321SAlexander Usyskin fmstr4 = idg_nvm_read32(nvm, fmstr4_addr); 1467234b321SAlexander Usyskin if (idg_nvm_error(nvm)) 1477234b321SAlexander Usyskin return -EIO; 1487234b321SAlexander Usyskin 1497234b321SAlexander Usyskin *access_map = fmstr4; 1507234b321SAlexander Usyskin return 0; 1517234b321SAlexander Usyskin } 1527234b321SAlexander Usyskin 1537234b321SAlexander Usyskin /* 1547234b321SAlexander Usyskin * Region read/write access encoded in the access map 1557234b321SAlexander Usyskin * in the following order from the lower bit: 1567234b321SAlexander Usyskin * [3:0] regions 12-15 read state 1577234b321SAlexander Usyskin * [7:4] regions 12-15 write state 1587234b321SAlexander Usyskin * [19:8] regions 0-11 read state 1597234b321SAlexander Usyskin * [31:20] regions 0-11 write state 1607234b321SAlexander Usyskin */ 1617234b321SAlexander Usyskin static bool idg_nvm_region_readable(u32 access_map, u8 region) 1627234b321SAlexander Usyskin { 1637234b321SAlexander Usyskin if (region < 12) 1647234b321SAlexander Usyskin return access_map & BIT(region + 8); /* [19:8] */ 1657234b321SAlexander Usyskin else 1667234b321SAlexander Usyskin return access_map & BIT(region - 12); /* [3:0] */ 1677234b321SAlexander Usyskin } 1687234b321SAlexander Usyskin 1697234b321SAlexander Usyskin static bool idg_nvm_region_writable(u32 access_map, u8 region) 1707234b321SAlexander Usyskin { 1717234b321SAlexander Usyskin if (region < 12) 1727234b321SAlexander Usyskin return access_map & BIT(region + 20); /* [31:20] */ 1737234b321SAlexander Usyskin else 1747234b321SAlexander Usyskin return access_map & BIT(region - 8); /* [7:4] */ 1757234b321SAlexander Usyskin } 1767234b321SAlexander Usyskin 1777234b321SAlexander Usyskin static int idg_nvm_is_valid(struct intel_dg_nvm *nvm) 1787234b321SAlexander Usyskin { 1797234b321SAlexander Usyskin u32 is_valid; 1807234b321SAlexander Usyskin 1817234b321SAlexander Usyskin idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR); 1827234b321SAlexander Usyskin 1837234b321SAlexander Usyskin is_valid = idg_nvm_read32(nvm, NVM_VALSIG_REG); 1847234b321SAlexander Usyskin if (idg_nvm_error(nvm)) 1857234b321SAlexander Usyskin return -EIO; 1867234b321SAlexander Usyskin 1877234b321SAlexander Usyskin if (is_valid != NVM_FLVALSIG) 1887234b321SAlexander Usyskin return -ENODEV; 1897234b321SAlexander Usyskin 1907234b321SAlexander Usyskin return 0; 1917234b321SAlexander Usyskin } 1927234b321SAlexander Usyskin 1939fe53abfSAlexander Usyskin static unsigned int idg_nvm_get_region(const struct intel_dg_nvm *nvm, loff_t from) 1949fe53abfSAlexander Usyskin { 1959fe53abfSAlexander Usyskin unsigned int i; 1969fe53abfSAlexander Usyskin 1979fe53abfSAlexander Usyskin for (i = 0; i < nvm->nregions; i++) { 1989fe53abfSAlexander Usyskin if ((nvm->regions[i].offset + nvm->regions[i].size - 1) >= from && 1999fe53abfSAlexander Usyskin nvm->regions[i].offset <= from && 2009fe53abfSAlexander Usyskin nvm->regions[i].size != 0) 2019fe53abfSAlexander Usyskin break; 2029fe53abfSAlexander Usyskin } 2039fe53abfSAlexander Usyskin 2049fe53abfSAlexander Usyskin return i; 2059fe53abfSAlexander Usyskin } 2069fe53abfSAlexander Usyskin 2079fe53abfSAlexander Usyskin static ssize_t idg_nvm_rewrite_partial(struct intel_dg_nvm *nvm, loff_t to, 2089fe53abfSAlexander Usyskin loff_t offset, size_t len, const u32 *newdata) 2099fe53abfSAlexander Usyskin { 2109fe53abfSAlexander Usyskin u32 data = idg_nvm_read32(nvm, to); 2119fe53abfSAlexander Usyskin 2129fe53abfSAlexander Usyskin if (idg_nvm_error(nvm)) 2139fe53abfSAlexander Usyskin return -EIO; 2149fe53abfSAlexander Usyskin 2159fe53abfSAlexander Usyskin memcpy((u8 *)&data + offset, newdata, len); 2169fe53abfSAlexander Usyskin 2179fe53abfSAlexander Usyskin idg_nvm_write32(nvm, to, data); 2189fe53abfSAlexander Usyskin if (idg_nvm_error(nvm)) 2199fe53abfSAlexander Usyskin return -EIO; 2209fe53abfSAlexander Usyskin 2219fe53abfSAlexander Usyskin return len; 2229fe53abfSAlexander Usyskin } 2239fe53abfSAlexander Usyskin 2249fe53abfSAlexander Usyskin static ssize_t idg_write(struct intel_dg_nvm *nvm, u8 region, 2259fe53abfSAlexander Usyskin loff_t to, size_t len, const unsigned char *buf) 2269fe53abfSAlexander Usyskin { 2279fe53abfSAlexander Usyskin size_t len_s = len; 2289fe53abfSAlexander Usyskin size_t to_shift; 2299fe53abfSAlexander Usyskin size_t len8; 2309fe53abfSAlexander Usyskin size_t len4; 2319fe53abfSAlexander Usyskin ssize_t ret; 2329fe53abfSAlexander Usyskin size_t to4; 2339fe53abfSAlexander Usyskin size_t i; 2349fe53abfSAlexander Usyskin 2359fe53abfSAlexander Usyskin idg_nvm_set_region_id(nvm, region); 2369fe53abfSAlexander Usyskin 2379fe53abfSAlexander Usyskin to4 = ALIGN_DOWN(to, sizeof(u32)); 2389fe53abfSAlexander Usyskin to_shift = min(sizeof(u32) - ((size_t)to - to4), len); 2399fe53abfSAlexander Usyskin if (to - to4) { 2409fe53abfSAlexander Usyskin ret = idg_nvm_rewrite_partial(nvm, to4, to - to4, to_shift, (u32 *)&buf[0]); 2419fe53abfSAlexander Usyskin if (ret < 0) 2429fe53abfSAlexander Usyskin return ret; 2439fe53abfSAlexander Usyskin 2449fe53abfSAlexander Usyskin buf += to_shift; 2459fe53abfSAlexander Usyskin to += to_shift; 2469fe53abfSAlexander Usyskin len_s -= to_shift; 2479fe53abfSAlexander Usyskin } 2489fe53abfSAlexander Usyskin 2499fe53abfSAlexander Usyskin len8 = ALIGN_DOWN(len_s, sizeof(u64)); 2509fe53abfSAlexander Usyskin for (i = 0; i < len8; i += sizeof(u64)) { 2519fe53abfSAlexander Usyskin u64 data; 2529fe53abfSAlexander Usyskin 2539fe53abfSAlexander Usyskin memcpy(&data, &buf[i], sizeof(u64)); 2549fe53abfSAlexander Usyskin idg_nvm_write64(nvm, to + i, data); 2559fe53abfSAlexander Usyskin if (idg_nvm_error(nvm)) 2569fe53abfSAlexander Usyskin return -EIO; 2579fe53abfSAlexander Usyskin } 2589fe53abfSAlexander Usyskin 2599fe53abfSAlexander Usyskin len4 = len_s - len8; 2609fe53abfSAlexander Usyskin if (len4 >= sizeof(u32)) { 2619fe53abfSAlexander Usyskin u32 data; 2629fe53abfSAlexander Usyskin 2639fe53abfSAlexander Usyskin memcpy(&data, &buf[i], sizeof(u32)); 2649fe53abfSAlexander Usyskin idg_nvm_write32(nvm, to + i, data); 2659fe53abfSAlexander Usyskin if (idg_nvm_error(nvm)) 2669fe53abfSAlexander Usyskin return -EIO; 2679fe53abfSAlexander Usyskin i += sizeof(u32); 2689fe53abfSAlexander Usyskin len4 -= sizeof(u32); 2699fe53abfSAlexander Usyskin } 2709fe53abfSAlexander Usyskin 2719fe53abfSAlexander Usyskin if (len4 > 0) { 2729fe53abfSAlexander Usyskin ret = idg_nvm_rewrite_partial(nvm, to + i, 0, len4, (u32 *)&buf[i]); 2739fe53abfSAlexander Usyskin if (ret < 0) 2749fe53abfSAlexander Usyskin return ret; 2759fe53abfSAlexander Usyskin } 2769fe53abfSAlexander Usyskin 2779fe53abfSAlexander Usyskin return len; 2789fe53abfSAlexander Usyskin } 2799fe53abfSAlexander Usyskin 2809fe53abfSAlexander Usyskin static ssize_t idg_read(struct intel_dg_nvm *nvm, u8 region, 2819fe53abfSAlexander Usyskin loff_t from, size_t len, unsigned char *buf) 2829fe53abfSAlexander Usyskin { 2839fe53abfSAlexander Usyskin size_t len_s = len; 2849fe53abfSAlexander Usyskin size_t from_shift; 2859fe53abfSAlexander Usyskin size_t from4; 2869fe53abfSAlexander Usyskin size_t len8; 2879fe53abfSAlexander Usyskin size_t len4; 2889fe53abfSAlexander Usyskin size_t i; 2899fe53abfSAlexander Usyskin 2909fe53abfSAlexander Usyskin idg_nvm_set_region_id(nvm, region); 2919fe53abfSAlexander Usyskin 2929fe53abfSAlexander Usyskin from4 = ALIGN_DOWN(from, sizeof(u32)); 2939fe53abfSAlexander Usyskin from_shift = min(sizeof(u32) - ((size_t)from - from4), len); 2949fe53abfSAlexander Usyskin 2959fe53abfSAlexander Usyskin if (from - from4) { 2969fe53abfSAlexander Usyskin u32 data = idg_nvm_read32(nvm, from4); 2979fe53abfSAlexander Usyskin 2989fe53abfSAlexander Usyskin if (idg_nvm_error(nvm)) 2999fe53abfSAlexander Usyskin return -EIO; 3009fe53abfSAlexander Usyskin memcpy(&buf[0], (u8 *)&data + (from - from4), from_shift); 3019fe53abfSAlexander Usyskin len_s -= from_shift; 3029fe53abfSAlexander Usyskin buf += from_shift; 3039fe53abfSAlexander Usyskin from += from_shift; 3049fe53abfSAlexander Usyskin } 3059fe53abfSAlexander Usyskin 3069fe53abfSAlexander Usyskin len8 = ALIGN_DOWN(len_s, sizeof(u64)); 3079fe53abfSAlexander Usyskin for (i = 0; i < len8; i += sizeof(u64)) { 3089fe53abfSAlexander Usyskin u64 data = idg_nvm_read64(nvm, from + i); 3099fe53abfSAlexander Usyskin 3109fe53abfSAlexander Usyskin if (idg_nvm_error(nvm)) 3119fe53abfSAlexander Usyskin return -EIO; 3129fe53abfSAlexander Usyskin 3139fe53abfSAlexander Usyskin memcpy(&buf[i], &data, sizeof(data)); 3149fe53abfSAlexander Usyskin } 3159fe53abfSAlexander Usyskin 3169fe53abfSAlexander Usyskin len4 = len_s - len8; 3179fe53abfSAlexander Usyskin if (len4 >= sizeof(u32)) { 3189fe53abfSAlexander Usyskin u32 data = idg_nvm_read32(nvm, from + i); 3199fe53abfSAlexander Usyskin 3209fe53abfSAlexander Usyskin if (idg_nvm_error(nvm)) 3219fe53abfSAlexander Usyskin return -EIO; 3229fe53abfSAlexander Usyskin memcpy(&buf[i], &data, sizeof(data)); 3239fe53abfSAlexander Usyskin i += sizeof(u32); 3249fe53abfSAlexander Usyskin len4 -= sizeof(u32); 3259fe53abfSAlexander Usyskin } 3269fe53abfSAlexander Usyskin 3279fe53abfSAlexander Usyskin if (len4 > 0) { 3289fe53abfSAlexander Usyskin u32 data = idg_nvm_read32(nvm, from + i); 3299fe53abfSAlexander Usyskin 3309fe53abfSAlexander Usyskin if (idg_nvm_error(nvm)) 3319fe53abfSAlexander Usyskin return -EIO; 3329fe53abfSAlexander Usyskin memcpy(&buf[i], &data, len4); 3339fe53abfSAlexander Usyskin } 3349fe53abfSAlexander Usyskin 3359fe53abfSAlexander Usyskin return len; 3369fe53abfSAlexander Usyskin } 3379fe53abfSAlexander Usyskin 3389fe53abfSAlexander Usyskin static ssize_t 3399fe53abfSAlexander Usyskin idg_erase(struct intel_dg_nvm *nvm, u8 region, loff_t from, u64 len, u64 *fail_addr) 3409fe53abfSAlexander Usyskin { 3419fe53abfSAlexander Usyskin void __iomem *base = nvm->base; 3429fe53abfSAlexander Usyskin const u32 block = 0x10; 3439fe53abfSAlexander Usyskin u64 i; 3449fe53abfSAlexander Usyskin 3459fe53abfSAlexander Usyskin for (i = 0; i < len; i += SZ_4K) { 3469fe53abfSAlexander Usyskin iowrite32(from + i, base + NVM_ADDRESS_REG); 3479fe53abfSAlexander Usyskin iowrite32(region << 24 | block, base + NVM_ERASE_REG); 3489fe53abfSAlexander Usyskin /* Since the writes are via sgunit 3499fe53abfSAlexander Usyskin * we cannot do back to back erases. 3509fe53abfSAlexander Usyskin */ 3519fe53abfSAlexander Usyskin msleep(50); 3529fe53abfSAlexander Usyskin } 3539fe53abfSAlexander Usyskin return len; 3549fe53abfSAlexander Usyskin } 3559fe53abfSAlexander Usyskin 3567234b321SAlexander Usyskin static int intel_dg_nvm_init(struct intel_dg_nvm *nvm, struct device *device) 3577234b321SAlexander Usyskin { 3587234b321SAlexander Usyskin u32 access_map = 0; 3597234b321SAlexander Usyskin unsigned int i, n; 3607234b321SAlexander Usyskin int ret; 3617234b321SAlexander Usyskin 3627234b321SAlexander Usyskin /* clean error register, previous errors are ignored */ 3637234b321SAlexander Usyskin idg_nvm_error(nvm); 3647234b321SAlexander Usyskin 3657234b321SAlexander Usyskin ret = idg_nvm_is_valid(nvm); 3667234b321SAlexander Usyskin if (ret) { 3677234b321SAlexander Usyskin dev_err(device, "The MEM is not valid %d\n", ret); 3687234b321SAlexander Usyskin return ret; 3697234b321SAlexander Usyskin } 3707234b321SAlexander Usyskin 3717234b321SAlexander Usyskin if (idg_nvm_get_access_map(nvm, &access_map)) 3727234b321SAlexander Usyskin return -EIO; 3737234b321SAlexander Usyskin 3747234b321SAlexander Usyskin for (i = 0, n = 0; i < nvm->nregions; i++) { 3757234b321SAlexander Usyskin u32 address, base, limit, region; 3767234b321SAlexander Usyskin u8 id = nvm->regions[i].id; 3777234b321SAlexander Usyskin 3787234b321SAlexander Usyskin address = NVM_FLREG(id); 3797234b321SAlexander Usyskin region = idg_nvm_read32(nvm, address); 3807234b321SAlexander Usyskin 3817234b321SAlexander Usyskin base = FIELD_GET(NVM_FREG_BASE_MASK, region) << NVM_FREG_ADDR_SHIFT; 3827234b321SAlexander Usyskin limit = (FIELD_GET(NVM_FREG_ADDR_MASK, region) << NVM_FREG_ADDR_SHIFT) | 3837234b321SAlexander Usyskin NVM_FREG_MIN_REGION_SIZE; 3847234b321SAlexander Usyskin 3857234b321SAlexander Usyskin dev_dbg(device, "[%d] %s: region: 0x%08X base: 0x%08x limit: 0x%08x\n", 3867234b321SAlexander Usyskin id, nvm->regions[i].name, region, base, limit); 3877234b321SAlexander Usyskin 3887234b321SAlexander Usyskin if (base >= limit || (i > 0 && limit == 0)) { 3897234b321SAlexander Usyskin dev_dbg(device, "[%d] %s: disabled\n", 3907234b321SAlexander Usyskin id, nvm->regions[i].name); 3917234b321SAlexander Usyskin nvm->regions[i].is_readable = 0; 3927234b321SAlexander Usyskin continue; 3937234b321SAlexander Usyskin } 3947234b321SAlexander Usyskin 3957234b321SAlexander Usyskin if (nvm->size < limit) 3967234b321SAlexander Usyskin nvm->size = limit; 3977234b321SAlexander Usyskin 3987234b321SAlexander Usyskin nvm->regions[i].offset = base; 3997234b321SAlexander Usyskin nvm->regions[i].size = limit - base + 1; 4007234b321SAlexander Usyskin /* No write access to descriptor; mask it out*/ 4017234b321SAlexander Usyskin nvm->regions[i].is_writable = idg_nvm_region_writable(access_map, id); 4027234b321SAlexander Usyskin 4037234b321SAlexander Usyskin nvm->regions[i].is_readable = idg_nvm_region_readable(access_map, id); 4047234b321SAlexander Usyskin dev_dbg(device, "Registered, %s id=%d offset=%lld size=%lld rd=%d wr=%d\n", 4057234b321SAlexander Usyskin nvm->regions[i].name, 4067234b321SAlexander Usyskin nvm->regions[i].id, 4077234b321SAlexander Usyskin nvm->regions[i].offset, 4087234b321SAlexander Usyskin nvm->regions[i].size, 4097234b321SAlexander Usyskin nvm->regions[i].is_readable, 4107234b321SAlexander Usyskin nvm->regions[i].is_writable); 4117234b321SAlexander Usyskin 4127234b321SAlexander Usyskin if (nvm->regions[i].is_readable) 4137234b321SAlexander Usyskin n++; 4147234b321SAlexander Usyskin } 4157234b321SAlexander Usyskin 4167234b321SAlexander Usyskin dev_dbg(device, "Registered %d regions\n", n); 4177234b321SAlexander Usyskin 4187234b321SAlexander Usyskin /* Need to add 1 to the amount of memory 4197234b321SAlexander Usyskin * so it is reported as an even block 4207234b321SAlexander Usyskin */ 4217234b321SAlexander Usyskin nvm->size += 1; 4227234b321SAlexander Usyskin 4237234b321SAlexander Usyskin return n; 4247234b321SAlexander Usyskin } 4257234b321SAlexander Usyskin 426*044c8be0SAlexander Usyskin static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info) 427*044c8be0SAlexander Usyskin { 428*044c8be0SAlexander Usyskin struct intel_dg_nvm *nvm = mtd->priv; 429*044c8be0SAlexander Usyskin size_t total_len; 430*044c8be0SAlexander Usyskin unsigned int idx; 431*044c8be0SAlexander Usyskin ssize_t bytes; 432*044c8be0SAlexander Usyskin loff_t from; 433*044c8be0SAlexander Usyskin size_t len; 434*044c8be0SAlexander Usyskin u8 region; 435*044c8be0SAlexander Usyskin u64 addr; 436*044c8be0SAlexander Usyskin 437*044c8be0SAlexander Usyskin if (WARN_ON(!nvm)) 438*044c8be0SAlexander Usyskin return -EINVAL; 439*044c8be0SAlexander Usyskin 440*044c8be0SAlexander Usyskin if (!IS_ALIGNED(info->addr, SZ_4K) || !IS_ALIGNED(info->len, SZ_4K)) { 441*044c8be0SAlexander Usyskin dev_err(&mtd->dev, "unaligned erase %llx %llx\n", 442*044c8be0SAlexander Usyskin info->addr, info->len); 443*044c8be0SAlexander Usyskin info->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 444*044c8be0SAlexander Usyskin return -EINVAL; 445*044c8be0SAlexander Usyskin } 446*044c8be0SAlexander Usyskin 447*044c8be0SAlexander Usyskin total_len = info->len; 448*044c8be0SAlexander Usyskin addr = info->addr; 449*044c8be0SAlexander Usyskin 450*044c8be0SAlexander Usyskin guard(mutex)(&nvm->lock); 451*044c8be0SAlexander Usyskin 452*044c8be0SAlexander Usyskin while (total_len > 0) { 453*044c8be0SAlexander Usyskin if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) { 454*044c8be0SAlexander Usyskin dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len); 455*044c8be0SAlexander Usyskin info->fail_addr = addr; 456*044c8be0SAlexander Usyskin return -ERANGE; 457*044c8be0SAlexander Usyskin } 458*044c8be0SAlexander Usyskin 459*044c8be0SAlexander Usyskin idx = idg_nvm_get_region(nvm, addr); 460*044c8be0SAlexander Usyskin if (idx >= nvm->nregions) { 461*044c8be0SAlexander Usyskin dev_err(&mtd->dev, "out of range"); 462*044c8be0SAlexander Usyskin info->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 463*044c8be0SAlexander Usyskin return -ERANGE; 464*044c8be0SAlexander Usyskin } 465*044c8be0SAlexander Usyskin 466*044c8be0SAlexander Usyskin from = addr - nvm->regions[idx].offset; 467*044c8be0SAlexander Usyskin region = nvm->regions[idx].id; 468*044c8be0SAlexander Usyskin len = total_len; 469*044c8be0SAlexander Usyskin if (len > nvm->regions[idx].size - from) 470*044c8be0SAlexander Usyskin len = nvm->regions[idx].size - from; 471*044c8be0SAlexander Usyskin 472*044c8be0SAlexander Usyskin dev_dbg(&mtd->dev, "erasing region[%d] %s from %llx len %zx\n", 473*044c8be0SAlexander Usyskin region, nvm->regions[idx].name, from, len); 474*044c8be0SAlexander Usyskin 475*044c8be0SAlexander Usyskin bytes = idg_erase(nvm, region, from, len, &info->fail_addr); 476*044c8be0SAlexander Usyskin if (bytes < 0) { 477*044c8be0SAlexander Usyskin dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes); 478*044c8be0SAlexander Usyskin info->fail_addr += nvm->regions[idx].offset; 479*044c8be0SAlexander Usyskin return bytes; 480*044c8be0SAlexander Usyskin } 481*044c8be0SAlexander Usyskin 482*044c8be0SAlexander Usyskin addr += len; 483*044c8be0SAlexander Usyskin total_len -= len; 484*044c8be0SAlexander Usyskin } 485*044c8be0SAlexander Usyskin 486*044c8be0SAlexander Usyskin return 0; 487*044c8be0SAlexander Usyskin } 488*044c8be0SAlexander Usyskin 489*044c8be0SAlexander Usyskin static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len, 490*044c8be0SAlexander Usyskin size_t *retlen, u_char *buf) 491*044c8be0SAlexander Usyskin { 492*044c8be0SAlexander Usyskin struct intel_dg_nvm *nvm = mtd->priv; 493*044c8be0SAlexander Usyskin unsigned int idx; 494*044c8be0SAlexander Usyskin ssize_t ret; 495*044c8be0SAlexander Usyskin u8 region; 496*044c8be0SAlexander Usyskin 497*044c8be0SAlexander Usyskin if (WARN_ON(!nvm)) 498*044c8be0SAlexander Usyskin return -EINVAL; 499*044c8be0SAlexander Usyskin 500*044c8be0SAlexander Usyskin idx = idg_nvm_get_region(nvm, from); 501*044c8be0SAlexander Usyskin 502*044c8be0SAlexander Usyskin dev_dbg(&mtd->dev, "reading region[%d] %s from %lld len %zd\n", 503*044c8be0SAlexander Usyskin nvm->regions[idx].id, nvm->regions[idx].name, from, len); 504*044c8be0SAlexander Usyskin 505*044c8be0SAlexander Usyskin if (idx >= nvm->nregions) { 506*044c8be0SAlexander Usyskin dev_err(&mtd->dev, "out of range"); 507*044c8be0SAlexander Usyskin return -ERANGE; 508*044c8be0SAlexander Usyskin } 509*044c8be0SAlexander Usyskin 510*044c8be0SAlexander Usyskin from -= nvm->regions[idx].offset; 511*044c8be0SAlexander Usyskin region = nvm->regions[idx].id; 512*044c8be0SAlexander Usyskin if (len > nvm->regions[idx].size - from) 513*044c8be0SAlexander Usyskin len = nvm->regions[idx].size - from; 514*044c8be0SAlexander Usyskin 515*044c8be0SAlexander Usyskin guard(mutex)(&nvm->lock); 516*044c8be0SAlexander Usyskin 517*044c8be0SAlexander Usyskin ret = idg_read(nvm, region, from, len, buf); 518*044c8be0SAlexander Usyskin if (ret < 0) { 519*044c8be0SAlexander Usyskin dev_dbg(&mtd->dev, "read failed with %zd\n", ret); 520*044c8be0SAlexander Usyskin return ret; 521*044c8be0SAlexander Usyskin } 522*044c8be0SAlexander Usyskin 523*044c8be0SAlexander Usyskin *retlen = ret; 524*044c8be0SAlexander Usyskin 525*044c8be0SAlexander Usyskin return 0; 526*044c8be0SAlexander Usyskin } 527*044c8be0SAlexander Usyskin 528*044c8be0SAlexander Usyskin static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len, 529*044c8be0SAlexander Usyskin size_t *retlen, const u_char *buf) 530*044c8be0SAlexander Usyskin { 531*044c8be0SAlexander Usyskin struct intel_dg_nvm *nvm = mtd->priv; 532*044c8be0SAlexander Usyskin unsigned int idx; 533*044c8be0SAlexander Usyskin ssize_t ret; 534*044c8be0SAlexander Usyskin u8 region; 535*044c8be0SAlexander Usyskin 536*044c8be0SAlexander Usyskin if (WARN_ON(!nvm)) 537*044c8be0SAlexander Usyskin return -EINVAL; 538*044c8be0SAlexander Usyskin 539*044c8be0SAlexander Usyskin idx = idg_nvm_get_region(nvm, to); 540*044c8be0SAlexander Usyskin 541*044c8be0SAlexander Usyskin dev_dbg(&mtd->dev, "writing region[%d] %s to %lld len %zd\n", 542*044c8be0SAlexander Usyskin nvm->regions[idx].id, nvm->regions[idx].name, to, len); 543*044c8be0SAlexander Usyskin 544*044c8be0SAlexander Usyskin if (idx >= nvm->nregions) { 545*044c8be0SAlexander Usyskin dev_err(&mtd->dev, "out of range"); 546*044c8be0SAlexander Usyskin return -ERANGE; 547*044c8be0SAlexander Usyskin } 548*044c8be0SAlexander Usyskin 549*044c8be0SAlexander Usyskin to -= nvm->regions[idx].offset; 550*044c8be0SAlexander Usyskin region = nvm->regions[idx].id; 551*044c8be0SAlexander Usyskin if (len > nvm->regions[idx].size - to) 552*044c8be0SAlexander Usyskin len = nvm->regions[idx].size - to; 553*044c8be0SAlexander Usyskin 554*044c8be0SAlexander Usyskin guard(mutex)(&nvm->lock); 555*044c8be0SAlexander Usyskin 556*044c8be0SAlexander Usyskin ret = idg_write(nvm, region, to, len, buf); 557*044c8be0SAlexander Usyskin if (ret < 0) { 558*044c8be0SAlexander Usyskin dev_dbg(&mtd->dev, "write failed with %zd\n", ret); 559*044c8be0SAlexander Usyskin return ret; 560*044c8be0SAlexander Usyskin } 561*044c8be0SAlexander Usyskin 562*044c8be0SAlexander Usyskin *retlen = ret; 563*044c8be0SAlexander Usyskin 564*044c8be0SAlexander Usyskin return 0; 565*044c8be0SAlexander Usyskin } 566*044c8be0SAlexander Usyskin 567ceb5ab3cSAlexander Usyskin static void intel_dg_nvm_release(struct kref *kref) 568ceb5ab3cSAlexander Usyskin { 569ceb5ab3cSAlexander Usyskin struct intel_dg_nvm *nvm = container_of(kref, struct intel_dg_nvm, refcnt); 570ceb5ab3cSAlexander Usyskin int i; 571ceb5ab3cSAlexander Usyskin 572ceb5ab3cSAlexander Usyskin pr_debug("freeing intel_dg nvm\n"); 573ceb5ab3cSAlexander Usyskin for (i = 0; i < nvm->nregions; i++) 574ceb5ab3cSAlexander Usyskin kfree(nvm->regions[i].name); 575*044c8be0SAlexander Usyskin mutex_destroy(&nvm->lock); 576ceb5ab3cSAlexander Usyskin kfree(nvm); 577ceb5ab3cSAlexander Usyskin } 578ceb5ab3cSAlexander Usyskin 579*044c8be0SAlexander Usyskin static int intel_dg_mtd_get_device(struct mtd_info *mtd) 580*044c8be0SAlexander Usyskin { 581*044c8be0SAlexander Usyskin struct mtd_info *master = mtd_get_master(mtd); 582*044c8be0SAlexander Usyskin struct intel_dg_nvm *nvm = master->priv; 583*044c8be0SAlexander Usyskin 584*044c8be0SAlexander Usyskin if (WARN_ON(!nvm)) 585*044c8be0SAlexander Usyskin return -EINVAL; 586*044c8be0SAlexander Usyskin pr_debug("get mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt)); 587*044c8be0SAlexander Usyskin kref_get(&nvm->refcnt); 588*044c8be0SAlexander Usyskin 589*044c8be0SAlexander Usyskin return 0; 590*044c8be0SAlexander Usyskin } 591*044c8be0SAlexander Usyskin 592*044c8be0SAlexander Usyskin static void intel_dg_mtd_put_device(struct mtd_info *mtd) 593*044c8be0SAlexander Usyskin { 594*044c8be0SAlexander Usyskin struct mtd_info *master = mtd_get_master(mtd); 595*044c8be0SAlexander Usyskin struct intel_dg_nvm *nvm = master->priv; 596*044c8be0SAlexander Usyskin 597*044c8be0SAlexander Usyskin if (WARN_ON(!nvm)) 598*044c8be0SAlexander Usyskin return; 599*044c8be0SAlexander Usyskin pr_debug("put mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt)); 600*044c8be0SAlexander Usyskin kref_put(&nvm->refcnt, intel_dg_nvm_release); 601*044c8be0SAlexander Usyskin } 602*044c8be0SAlexander Usyskin 603*044c8be0SAlexander Usyskin static int intel_dg_nvm_init_mtd(struct intel_dg_nvm *nvm, struct device *device, 604*044c8be0SAlexander Usyskin unsigned int nparts, bool writable_override) 605*044c8be0SAlexander Usyskin { 606*044c8be0SAlexander Usyskin struct mtd_partition *parts = NULL; 607*044c8be0SAlexander Usyskin unsigned int i, n; 608*044c8be0SAlexander Usyskin int ret; 609*044c8be0SAlexander Usyskin 610*044c8be0SAlexander Usyskin dev_dbg(device, "registering with mtd\n"); 611*044c8be0SAlexander Usyskin 612*044c8be0SAlexander Usyskin nvm->mtd.owner = THIS_MODULE; 613*044c8be0SAlexander Usyskin nvm->mtd.dev.parent = device; 614*044c8be0SAlexander Usyskin nvm->mtd.flags = MTD_CAP_NORFLASH; 615*044c8be0SAlexander Usyskin nvm->mtd.type = MTD_DATAFLASH; 616*044c8be0SAlexander Usyskin nvm->mtd.priv = nvm; 617*044c8be0SAlexander Usyskin nvm->mtd._write = intel_dg_mtd_write; 618*044c8be0SAlexander Usyskin nvm->mtd._read = intel_dg_mtd_read; 619*044c8be0SAlexander Usyskin nvm->mtd._erase = intel_dg_mtd_erase; 620*044c8be0SAlexander Usyskin nvm->mtd._get_device = intel_dg_mtd_get_device; 621*044c8be0SAlexander Usyskin nvm->mtd._put_device = intel_dg_mtd_put_device; 622*044c8be0SAlexander Usyskin nvm->mtd.writesize = SZ_1; /* 1 byte granularity */ 623*044c8be0SAlexander Usyskin nvm->mtd.erasesize = SZ_4K; /* 4K bytes granularity */ 624*044c8be0SAlexander Usyskin nvm->mtd.size = nvm->size; 625*044c8be0SAlexander Usyskin 626*044c8be0SAlexander Usyskin parts = kcalloc(nvm->nregions, sizeof(*parts), GFP_KERNEL); 627*044c8be0SAlexander Usyskin if (!parts) 628*044c8be0SAlexander Usyskin return -ENOMEM; 629*044c8be0SAlexander Usyskin 630*044c8be0SAlexander Usyskin for (i = 0, n = 0; i < nvm->nregions && n < nparts; i++) { 631*044c8be0SAlexander Usyskin if (!nvm->regions[i].is_readable) 632*044c8be0SAlexander Usyskin continue; 633*044c8be0SAlexander Usyskin parts[n].name = nvm->regions[i].name; 634*044c8be0SAlexander Usyskin parts[n].offset = nvm->regions[i].offset; 635*044c8be0SAlexander Usyskin parts[n].size = nvm->regions[i].size; 636*044c8be0SAlexander Usyskin if (!nvm->regions[i].is_writable && !writable_override) 637*044c8be0SAlexander Usyskin parts[n].mask_flags = MTD_WRITEABLE; 638*044c8be0SAlexander Usyskin n++; 639*044c8be0SAlexander Usyskin } 640*044c8be0SAlexander Usyskin 641*044c8be0SAlexander Usyskin ret = mtd_device_register(&nvm->mtd, parts, n); 642*044c8be0SAlexander Usyskin 643*044c8be0SAlexander Usyskin kfree(parts); 644*044c8be0SAlexander Usyskin return ret; 645*044c8be0SAlexander Usyskin } 646*044c8be0SAlexander Usyskin 647ceb5ab3cSAlexander Usyskin static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev, 648ceb5ab3cSAlexander Usyskin const struct auxiliary_device_id *aux_dev_id) 649ceb5ab3cSAlexander Usyskin { 650ceb5ab3cSAlexander Usyskin struct intel_dg_nvm_dev *invm = auxiliary_dev_to_intel_dg_nvm_dev(aux_dev); 651ceb5ab3cSAlexander Usyskin struct intel_dg_nvm *nvm; 652ceb5ab3cSAlexander Usyskin struct device *device; 653ceb5ab3cSAlexander Usyskin unsigned int nregions; 654ceb5ab3cSAlexander Usyskin unsigned int i, n; 655ceb5ab3cSAlexander Usyskin int ret; 656ceb5ab3cSAlexander Usyskin 657ceb5ab3cSAlexander Usyskin device = &aux_dev->dev; 658ceb5ab3cSAlexander Usyskin 659ceb5ab3cSAlexander Usyskin /* count available regions */ 660ceb5ab3cSAlexander Usyskin for (nregions = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) { 661ceb5ab3cSAlexander Usyskin if (invm->regions[i].name) 662ceb5ab3cSAlexander Usyskin nregions++; 663ceb5ab3cSAlexander Usyskin } 664ceb5ab3cSAlexander Usyskin 665ceb5ab3cSAlexander Usyskin if (!nregions) { 666ceb5ab3cSAlexander Usyskin dev_err(device, "no regions defined\n"); 667ceb5ab3cSAlexander Usyskin return -ENODEV; 668ceb5ab3cSAlexander Usyskin } 669ceb5ab3cSAlexander Usyskin 670ceb5ab3cSAlexander Usyskin nvm = kzalloc(struct_size(nvm, regions, nregions), GFP_KERNEL); 671ceb5ab3cSAlexander Usyskin if (!nvm) 672ceb5ab3cSAlexander Usyskin return -ENOMEM; 673ceb5ab3cSAlexander Usyskin 674ceb5ab3cSAlexander Usyskin kref_init(&nvm->refcnt); 675*044c8be0SAlexander Usyskin mutex_init(&nvm->lock); 676ceb5ab3cSAlexander Usyskin 677ceb5ab3cSAlexander Usyskin for (n = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) { 678ceb5ab3cSAlexander Usyskin if (!invm->regions[i].name) 679ceb5ab3cSAlexander Usyskin continue; 680ceb5ab3cSAlexander Usyskin 681ceb5ab3cSAlexander Usyskin char *name = kasprintf(GFP_KERNEL, "%s.%s", 682ceb5ab3cSAlexander Usyskin dev_name(&aux_dev->dev), invm->regions[i].name); 683ceb5ab3cSAlexander Usyskin if (!name) 684ceb5ab3cSAlexander Usyskin continue; 685ceb5ab3cSAlexander Usyskin nvm->regions[n].name = name; 686ceb5ab3cSAlexander Usyskin nvm->regions[n].id = i; 687ceb5ab3cSAlexander Usyskin n++; 688ceb5ab3cSAlexander Usyskin } 689ceb5ab3cSAlexander Usyskin nvm->nregions = n; /* in case where kasprintf fail */ 690ceb5ab3cSAlexander Usyskin 691ceb5ab3cSAlexander Usyskin nvm->base = devm_ioremap_resource(device, &invm->bar); 692ceb5ab3cSAlexander Usyskin if (IS_ERR(nvm->base)) { 693ceb5ab3cSAlexander Usyskin ret = PTR_ERR(nvm->base); 694ceb5ab3cSAlexander Usyskin goto err; 695ceb5ab3cSAlexander Usyskin } 696ceb5ab3cSAlexander Usyskin 6977234b321SAlexander Usyskin ret = intel_dg_nvm_init(nvm, device); 6987234b321SAlexander Usyskin if (ret < 0) { 6997234b321SAlexander Usyskin dev_err(device, "cannot initialize nvm %d\n", ret); 7007234b321SAlexander Usyskin goto err; 7017234b321SAlexander Usyskin } 7027234b321SAlexander Usyskin 703*044c8be0SAlexander Usyskin ret = intel_dg_nvm_init_mtd(nvm, device, ret, invm->writable_override); 704*044c8be0SAlexander Usyskin if (ret) { 705*044c8be0SAlexander Usyskin dev_err(device, "failed init mtd %d\n", ret); 706*044c8be0SAlexander Usyskin goto err; 707*044c8be0SAlexander Usyskin } 708*044c8be0SAlexander Usyskin 709ceb5ab3cSAlexander Usyskin dev_set_drvdata(&aux_dev->dev, nvm); 710ceb5ab3cSAlexander Usyskin 711ceb5ab3cSAlexander Usyskin return 0; 712ceb5ab3cSAlexander Usyskin 713ceb5ab3cSAlexander Usyskin err: 714ceb5ab3cSAlexander Usyskin kref_put(&nvm->refcnt, intel_dg_nvm_release); 715ceb5ab3cSAlexander Usyskin return ret; 716ceb5ab3cSAlexander Usyskin } 717ceb5ab3cSAlexander Usyskin 718ceb5ab3cSAlexander Usyskin static void intel_dg_mtd_remove(struct auxiliary_device *aux_dev) 719ceb5ab3cSAlexander Usyskin { 720ceb5ab3cSAlexander Usyskin struct intel_dg_nvm *nvm = dev_get_drvdata(&aux_dev->dev); 721ceb5ab3cSAlexander Usyskin 722ceb5ab3cSAlexander Usyskin if (!nvm) 723ceb5ab3cSAlexander Usyskin return; 724ceb5ab3cSAlexander Usyskin 725*044c8be0SAlexander Usyskin mtd_device_unregister(&nvm->mtd); 726*044c8be0SAlexander Usyskin 727ceb5ab3cSAlexander Usyskin dev_set_drvdata(&aux_dev->dev, NULL); 728ceb5ab3cSAlexander Usyskin 729ceb5ab3cSAlexander Usyskin kref_put(&nvm->refcnt, intel_dg_nvm_release); 730ceb5ab3cSAlexander Usyskin } 731ceb5ab3cSAlexander Usyskin 732ceb5ab3cSAlexander Usyskin static const struct auxiliary_device_id intel_dg_mtd_id_table[] = { 733ceb5ab3cSAlexander Usyskin { 734ceb5ab3cSAlexander Usyskin .name = "i915.nvm", 735ceb5ab3cSAlexander Usyskin }, 736ceb5ab3cSAlexander Usyskin { 737ceb5ab3cSAlexander Usyskin .name = "xe.nvm", 738ceb5ab3cSAlexander Usyskin }, 739ceb5ab3cSAlexander Usyskin { 740ceb5ab3cSAlexander Usyskin /* sentinel */ 741ceb5ab3cSAlexander Usyskin } 742ceb5ab3cSAlexander Usyskin }; 743ceb5ab3cSAlexander Usyskin MODULE_DEVICE_TABLE(auxiliary, intel_dg_mtd_id_table); 744ceb5ab3cSAlexander Usyskin 745ceb5ab3cSAlexander Usyskin static struct auxiliary_driver intel_dg_mtd_driver = { 746ceb5ab3cSAlexander Usyskin .probe = intel_dg_mtd_probe, 747ceb5ab3cSAlexander Usyskin .remove = intel_dg_mtd_remove, 748ceb5ab3cSAlexander Usyskin .driver = { 749ceb5ab3cSAlexander Usyskin /* auxiliary_driver_register() sets .name to be the modname */ 750ceb5ab3cSAlexander Usyskin }, 751ceb5ab3cSAlexander Usyskin .id_table = intel_dg_mtd_id_table 752ceb5ab3cSAlexander Usyskin }; 753ceb5ab3cSAlexander Usyskin module_auxiliary_driver(intel_dg_mtd_driver); 754ceb5ab3cSAlexander Usyskin 755ceb5ab3cSAlexander Usyskin MODULE_LICENSE("GPL"); 756ceb5ab3cSAlexander Usyskin MODULE_AUTHOR("Intel Corporation"); 757ceb5ab3cSAlexander Usyskin MODULE_DESCRIPTION("Intel DGFX MTD driver"); 758