1ceb5ab3cSAlexander Usyskin // SPDX-License-Identifier: GPL-2.0 2ceb5ab3cSAlexander Usyskin /* 3ceb5ab3cSAlexander Usyskin * Copyright(c) 2019-2025, Intel Corporation. All rights reserved. 4ceb5ab3cSAlexander Usyskin */ 5ceb5ab3cSAlexander Usyskin 67234b321SAlexander Usyskin #include <linux/bitfield.h> 77234b321SAlexander Usyskin #include <linux/bits.h> 8044c8be0SAlexander Usyskin #include <linux/cleanup.h> 99fe53abfSAlexander Usyskin #include <linux/delay.h> 10ceb5ab3cSAlexander Usyskin #include <linux/device.h> 11ceb5ab3cSAlexander Usyskin #include <linux/intel_dg_nvm_aux.h> 12ceb5ab3cSAlexander Usyskin #include <linux/io.h> 139fe53abfSAlexander Usyskin #include <linux/io-64-nonatomic-lo-hi.h> 14ceb5ab3cSAlexander Usyskin #include <linux/kernel.h> 15ceb5ab3cSAlexander Usyskin #include <linux/module.h> 16044c8be0SAlexander Usyskin #include <linux/mtd/mtd.h> 17044c8be0SAlexander Usyskin #include <linux/mtd/partitions.h> 18ceb5ab3cSAlexander Usyskin #include <linux/string.h> 19ceb5ab3cSAlexander Usyskin #include <linux/slab.h> 209fe53abfSAlexander Usyskin #include <linux/sizes.h> 21ceb5ab3cSAlexander Usyskin #include <linux/types.h> 22ceb5ab3cSAlexander Usyskin 23ceb5ab3cSAlexander Usyskin struct intel_dg_nvm { 24ceb5ab3cSAlexander Usyskin struct kref refcnt; 25044c8be0SAlexander Usyskin struct mtd_info mtd; 26044c8be0SAlexander Usyskin struct mutex lock; /* region access lock */ 27ceb5ab3cSAlexander Usyskin void __iomem *base; 28*a1c940cbSReuven Abliyev void __iomem *base2; 29*a1c940cbSReuven Abliyev bool non_posted_erase; 30*a1c940cbSReuven Abliyev 31ceb5ab3cSAlexander Usyskin size_t size; 32ceb5ab3cSAlexander Usyskin unsigned int nregions; 33ceb5ab3cSAlexander Usyskin struct { 34ceb5ab3cSAlexander Usyskin const char *name; 35ceb5ab3cSAlexander Usyskin u8 id; 36ceb5ab3cSAlexander Usyskin u64 offset; 37ceb5ab3cSAlexander Usyskin u64 size; 387234b321SAlexander Usyskin unsigned int is_readable:1; 397234b321SAlexander Usyskin unsigned int is_writable:1; 40ceb5ab3cSAlexander Usyskin } regions[] __counted_by(nregions); 41ceb5ab3cSAlexander Usyskin }; 42ceb5ab3cSAlexander Usyskin 437234b321SAlexander Usyskin #define NVM_TRIGGER_REG 0x00000000 447234b321SAlexander Usyskin #define NVM_VALSIG_REG 0x00000010 457234b321SAlexander Usyskin #define NVM_ADDRESS_REG 0x00000040 467234b321SAlexander Usyskin #define NVM_REGION_ID_REG 0x00000044 47*a1c940cbSReuven Abliyev #define NVM_DEBUG_REG 0x00000000 487234b321SAlexander Usyskin /* 497234b321SAlexander Usyskin * [15:0]-Erase size = 0x0010 4K 0x0080 32K 0x0100 64K 507234b321SAlexander Usyskin * [23:16]-Reserved 517234b321SAlexander Usyskin * [31:24]-Erase MEM RegionID 527234b321SAlexander Usyskin */ 537234b321SAlexander Usyskin #define NVM_ERASE_REG 0x00000048 547234b321SAlexander Usyskin #define NVM_ACCESS_ERROR_REG 0x00000070 557234b321SAlexander Usyskin #define NVM_ADDRESS_ERROR_REG 0x00000074 567234b321SAlexander Usyskin 577234b321SAlexander Usyskin /* Flash Valid Signature */ 587234b321SAlexander Usyskin #define NVM_FLVALSIG 0x0FF0A55A 597234b321SAlexander Usyskin 607234b321SAlexander Usyskin #define NVM_MAP_ADDR_MASK GENMASK(7, 0) 617234b321SAlexander Usyskin #define NVM_MAP_ADDR_SHIFT 0x00000004 627234b321SAlexander Usyskin 637234b321SAlexander Usyskin #define NVM_REGION_ID_DESCRIPTOR 0 647234b321SAlexander Usyskin /* Flash Region Base Address */ 657234b321SAlexander Usyskin #define NVM_FRBA 0x40 667234b321SAlexander Usyskin /* Flash Region __n - Flash Descriptor Record */ 677234b321SAlexander Usyskin #define NVM_FLREG(__n) (NVM_FRBA + ((__n) * 4)) 687234b321SAlexander Usyskin /* Flash Map 1 Register */ 697234b321SAlexander Usyskin #define NVM_FLMAP1_REG 0x18 707234b321SAlexander Usyskin #define NVM_FLMSTR4_OFFSET 0x00C 717234b321SAlexander Usyskin 727234b321SAlexander Usyskin #define NVM_ACCESS_ERROR_PCIE_MASK 0x7 737234b321SAlexander Usyskin 747234b321SAlexander Usyskin #define NVM_FREG_BASE_MASK GENMASK(15, 0) 757234b321SAlexander Usyskin #define NVM_FREG_ADDR_MASK GENMASK(31, 16) 767234b321SAlexander Usyskin #define NVM_FREG_ADDR_SHIFT 12 777234b321SAlexander Usyskin #define NVM_FREG_MIN_REGION_SIZE 0xFFF 787234b321SAlexander Usyskin 79*a1c940cbSReuven Abliyev #define NVM_NON_POSTED_ERASE_DONE BIT(23) 80*a1c940cbSReuven Abliyev #define NVM_NON_POSTED_ERASE_DONE_ITER 3000 81*a1c940cbSReuven Abliyev 827234b321SAlexander Usyskin static inline void idg_nvm_set_region_id(struct intel_dg_nvm *nvm, u8 region) 837234b321SAlexander Usyskin { 847234b321SAlexander Usyskin iowrite32((u32)region, nvm->base + NVM_REGION_ID_REG); 857234b321SAlexander Usyskin } 867234b321SAlexander Usyskin 877234b321SAlexander Usyskin static inline u32 idg_nvm_error(struct intel_dg_nvm *nvm) 887234b321SAlexander Usyskin { 897234b321SAlexander Usyskin void __iomem *base = nvm->base; 907234b321SAlexander Usyskin 917234b321SAlexander Usyskin u32 reg = ioread32(base + NVM_ACCESS_ERROR_REG) & NVM_ACCESS_ERROR_PCIE_MASK; 927234b321SAlexander Usyskin 937234b321SAlexander Usyskin /* reset error bits */ 947234b321SAlexander Usyskin if (reg) 957234b321SAlexander Usyskin iowrite32(reg, base + NVM_ACCESS_ERROR_REG); 967234b321SAlexander Usyskin 977234b321SAlexander Usyskin return reg; 987234b321SAlexander Usyskin } 997234b321SAlexander Usyskin 1007234b321SAlexander Usyskin static inline u32 idg_nvm_read32(struct intel_dg_nvm *nvm, u32 address) 1017234b321SAlexander Usyskin { 1027234b321SAlexander Usyskin void __iomem *base = nvm->base; 1037234b321SAlexander Usyskin 1047234b321SAlexander Usyskin iowrite32(address, base + NVM_ADDRESS_REG); 1057234b321SAlexander Usyskin 1067234b321SAlexander Usyskin return ioread32(base + NVM_TRIGGER_REG); 1077234b321SAlexander Usyskin } 1087234b321SAlexander Usyskin 1099fe53abfSAlexander Usyskin static inline u64 idg_nvm_read64(struct intel_dg_nvm *nvm, u32 address) 1109fe53abfSAlexander Usyskin { 1119fe53abfSAlexander Usyskin void __iomem *base = nvm->base; 1129fe53abfSAlexander Usyskin 1139fe53abfSAlexander Usyskin iowrite32(address, base + NVM_ADDRESS_REG); 1149fe53abfSAlexander Usyskin 1159fe53abfSAlexander Usyskin return readq(base + NVM_TRIGGER_REG); 1169fe53abfSAlexander Usyskin } 1179fe53abfSAlexander Usyskin 1189fe53abfSAlexander Usyskin static void idg_nvm_write32(struct intel_dg_nvm *nvm, u32 address, u32 data) 1199fe53abfSAlexander Usyskin { 1209fe53abfSAlexander Usyskin void __iomem *base = nvm->base; 1219fe53abfSAlexander Usyskin 1229fe53abfSAlexander Usyskin iowrite32(address, base + NVM_ADDRESS_REG); 1239fe53abfSAlexander Usyskin 1249fe53abfSAlexander Usyskin iowrite32(data, base + NVM_TRIGGER_REG); 1259fe53abfSAlexander Usyskin } 1269fe53abfSAlexander Usyskin 1279fe53abfSAlexander Usyskin static void idg_nvm_write64(struct intel_dg_nvm *nvm, u32 address, u64 data) 1289fe53abfSAlexander Usyskin { 1299fe53abfSAlexander Usyskin void __iomem *base = nvm->base; 1309fe53abfSAlexander Usyskin 1319fe53abfSAlexander Usyskin iowrite32(address, base + NVM_ADDRESS_REG); 1329fe53abfSAlexander Usyskin 1339fe53abfSAlexander Usyskin writeq(data, base + NVM_TRIGGER_REG); 1349fe53abfSAlexander Usyskin } 1359fe53abfSAlexander Usyskin 1367234b321SAlexander Usyskin static int idg_nvm_get_access_map(struct intel_dg_nvm *nvm, u32 *access_map) 1377234b321SAlexander Usyskin { 1387234b321SAlexander Usyskin u32 fmstr4_addr; 1397234b321SAlexander Usyskin u32 fmstr4; 1407234b321SAlexander Usyskin u32 flmap1; 1417234b321SAlexander Usyskin u32 fmba; 1427234b321SAlexander Usyskin 1437234b321SAlexander Usyskin idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR); 1447234b321SAlexander Usyskin 1457234b321SAlexander Usyskin flmap1 = idg_nvm_read32(nvm, NVM_FLMAP1_REG); 1467234b321SAlexander Usyskin if (idg_nvm_error(nvm)) 1477234b321SAlexander Usyskin return -EIO; 1487234b321SAlexander Usyskin /* Get Flash Master Baser Address (FMBA) */ 1497234b321SAlexander Usyskin fmba = (FIELD_GET(NVM_MAP_ADDR_MASK, flmap1) << NVM_MAP_ADDR_SHIFT); 1507234b321SAlexander Usyskin fmstr4_addr = fmba + NVM_FLMSTR4_OFFSET; 1517234b321SAlexander Usyskin 1527234b321SAlexander Usyskin fmstr4 = idg_nvm_read32(nvm, fmstr4_addr); 1537234b321SAlexander Usyskin if (idg_nvm_error(nvm)) 1547234b321SAlexander Usyskin return -EIO; 1557234b321SAlexander Usyskin 1567234b321SAlexander Usyskin *access_map = fmstr4; 1577234b321SAlexander Usyskin return 0; 1587234b321SAlexander Usyskin } 1597234b321SAlexander Usyskin 1607234b321SAlexander Usyskin /* 1617234b321SAlexander Usyskin * Region read/write access encoded in the access map 1627234b321SAlexander Usyskin * in the following order from the lower bit: 1637234b321SAlexander Usyskin * [3:0] regions 12-15 read state 1647234b321SAlexander Usyskin * [7:4] regions 12-15 write state 1657234b321SAlexander Usyskin * [19:8] regions 0-11 read state 1667234b321SAlexander Usyskin * [31:20] regions 0-11 write state 1677234b321SAlexander Usyskin */ 1687234b321SAlexander Usyskin static bool idg_nvm_region_readable(u32 access_map, u8 region) 1697234b321SAlexander Usyskin { 1707234b321SAlexander Usyskin if (region < 12) 1717234b321SAlexander Usyskin return access_map & BIT(region + 8); /* [19:8] */ 1727234b321SAlexander Usyskin else 1737234b321SAlexander Usyskin return access_map & BIT(region - 12); /* [3:0] */ 1747234b321SAlexander Usyskin } 1757234b321SAlexander Usyskin 1767234b321SAlexander Usyskin static bool idg_nvm_region_writable(u32 access_map, u8 region) 1777234b321SAlexander Usyskin { 1787234b321SAlexander Usyskin if (region < 12) 1797234b321SAlexander Usyskin return access_map & BIT(region + 20); /* [31:20] */ 1807234b321SAlexander Usyskin else 1817234b321SAlexander Usyskin return access_map & BIT(region - 8); /* [7:4] */ 1827234b321SAlexander Usyskin } 1837234b321SAlexander Usyskin 1847234b321SAlexander Usyskin static int idg_nvm_is_valid(struct intel_dg_nvm *nvm) 1857234b321SAlexander Usyskin { 1867234b321SAlexander Usyskin u32 is_valid; 1877234b321SAlexander Usyskin 1887234b321SAlexander Usyskin idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR); 1897234b321SAlexander Usyskin 1907234b321SAlexander Usyskin is_valid = idg_nvm_read32(nvm, NVM_VALSIG_REG); 1917234b321SAlexander Usyskin if (idg_nvm_error(nvm)) 1927234b321SAlexander Usyskin return -EIO; 1937234b321SAlexander Usyskin 1947234b321SAlexander Usyskin if (is_valid != NVM_FLVALSIG) 1957234b321SAlexander Usyskin return -ENODEV; 1967234b321SAlexander Usyskin 1977234b321SAlexander Usyskin return 0; 1987234b321SAlexander Usyskin } 1997234b321SAlexander Usyskin 2009fe53abfSAlexander Usyskin static unsigned int idg_nvm_get_region(const struct intel_dg_nvm *nvm, loff_t from) 2019fe53abfSAlexander Usyskin { 2029fe53abfSAlexander Usyskin unsigned int i; 2039fe53abfSAlexander Usyskin 2049fe53abfSAlexander Usyskin for (i = 0; i < nvm->nregions; i++) { 2059fe53abfSAlexander Usyskin if ((nvm->regions[i].offset + nvm->regions[i].size - 1) >= from && 2069fe53abfSAlexander Usyskin nvm->regions[i].offset <= from && 2079fe53abfSAlexander Usyskin nvm->regions[i].size != 0) 2089fe53abfSAlexander Usyskin break; 2099fe53abfSAlexander Usyskin } 2109fe53abfSAlexander Usyskin 2119fe53abfSAlexander Usyskin return i; 2129fe53abfSAlexander Usyskin } 2139fe53abfSAlexander Usyskin 2149fe53abfSAlexander Usyskin static ssize_t idg_nvm_rewrite_partial(struct intel_dg_nvm *nvm, loff_t to, 2159fe53abfSAlexander Usyskin loff_t offset, size_t len, const u32 *newdata) 2169fe53abfSAlexander Usyskin { 2179fe53abfSAlexander Usyskin u32 data = idg_nvm_read32(nvm, to); 2189fe53abfSAlexander Usyskin 2199fe53abfSAlexander Usyskin if (idg_nvm_error(nvm)) 2209fe53abfSAlexander Usyskin return -EIO; 2219fe53abfSAlexander Usyskin 2229fe53abfSAlexander Usyskin memcpy((u8 *)&data + offset, newdata, len); 2239fe53abfSAlexander Usyskin 2249fe53abfSAlexander Usyskin idg_nvm_write32(nvm, to, data); 2259fe53abfSAlexander Usyskin if (idg_nvm_error(nvm)) 2269fe53abfSAlexander Usyskin return -EIO; 2279fe53abfSAlexander Usyskin 2289fe53abfSAlexander Usyskin return len; 2299fe53abfSAlexander Usyskin } 2309fe53abfSAlexander Usyskin 2319fe53abfSAlexander Usyskin static ssize_t idg_write(struct intel_dg_nvm *nvm, u8 region, 2329fe53abfSAlexander Usyskin loff_t to, size_t len, const unsigned char *buf) 2339fe53abfSAlexander Usyskin { 2349fe53abfSAlexander Usyskin size_t len_s = len; 2359fe53abfSAlexander Usyskin size_t to_shift; 2369fe53abfSAlexander Usyskin size_t len8; 2379fe53abfSAlexander Usyskin size_t len4; 2389fe53abfSAlexander Usyskin ssize_t ret; 2399fe53abfSAlexander Usyskin size_t to4; 2409fe53abfSAlexander Usyskin size_t i; 2419fe53abfSAlexander Usyskin 2429fe53abfSAlexander Usyskin idg_nvm_set_region_id(nvm, region); 2439fe53abfSAlexander Usyskin 2449fe53abfSAlexander Usyskin to4 = ALIGN_DOWN(to, sizeof(u32)); 2459fe53abfSAlexander Usyskin to_shift = min(sizeof(u32) - ((size_t)to - to4), len); 2469fe53abfSAlexander Usyskin if (to - to4) { 2479fe53abfSAlexander Usyskin ret = idg_nvm_rewrite_partial(nvm, to4, to - to4, to_shift, (u32 *)&buf[0]); 2489fe53abfSAlexander Usyskin if (ret < 0) 2499fe53abfSAlexander Usyskin return ret; 2509fe53abfSAlexander Usyskin 2519fe53abfSAlexander Usyskin buf += to_shift; 2529fe53abfSAlexander Usyskin to += to_shift; 2539fe53abfSAlexander Usyskin len_s -= to_shift; 2549fe53abfSAlexander Usyskin } 2559fe53abfSAlexander Usyskin 2564b93f5fcSAlexander Usyskin if (!IS_ALIGNED(to, sizeof(u64)) && 2574b93f5fcSAlexander Usyskin ((to ^ (to + len_s)) & GENMASK(31, 10))) { 2584b93f5fcSAlexander Usyskin /* 2594b93f5fcSAlexander Usyskin * Workaround reads/writes across 1k-aligned addresses 2604b93f5fcSAlexander Usyskin * (start u32 before 1k, end u32 after) 2614b93f5fcSAlexander Usyskin * as this fails on hardware. 2624b93f5fcSAlexander Usyskin */ 2634b93f5fcSAlexander Usyskin u32 data; 2644b93f5fcSAlexander Usyskin 2654b93f5fcSAlexander Usyskin memcpy(&data, &buf[0], sizeof(u32)); 2664b93f5fcSAlexander Usyskin idg_nvm_write32(nvm, to, data); 2674b93f5fcSAlexander Usyskin if (idg_nvm_error(nvm)) 2684b93f5fcSAlexander Usyskin return -EIO; 2694b93f5fcSAlexander Usyskin buf += sizeof(u32); 2704b93f5fcSAlexander Usyskin to += sizeof(u32); 2714b93f5fcSAlexander Usyskin len_s -= sizeof(u32); 2724b93f5fcSAlexander Usyskin } 2734b93f5fcSAlexander Usyskin 2749fe53abfSAlexander Usyskin len8 = ALIGN_DOWN(len_s, sizeof(u64)); 2759fe53abfSAlexander Usyskin for (i = 0; i < len8; i += sizeof(u64)) { 2769fe53abfSAlexander Usyskin u64 data; 2779fe53abfSAlexander Usyskin 2789fe53abfSAlexander Usyskin memcpy(&data, &buf[i], sizeof(u64)); 2799fe53abfSAlexander Usyskin idg_nvm_write64(nvm, to + i, data); 2809fe53abfSAlexander Usyskin if (idg_nvm_error(nvm)) 2819fe53abfSAlexander Usyskin return -EIO; 2829fe53abfSAlexander Usyskin } 2839fe53abfSAlexander Usyskin 2849fe53abfSAlexander Usyskin len4 = len_s - len8; 2859fe53abfSAlexander Usyskin if (len4 >= sizeof(u32)) { 2869fe53abfSAlexander Usyskin u32 data; 2879fe53abfSAlexander Usyskin 2889fe53abfSAlexander Usyskin memcpy(&data, &buf[i], sizeof(u32)); 2899fe53abfSAlexander Usyskin idg_nvm_write32(nvm, to + i, data); 2909fe53abfSAlexander Usyskin if (idg_nvm_error(nvm)) 2919fe53abfSAlexander Usyskin return -EIO; 2929fe53abfSAlexander Usyskin i += sizeof(u32); 2939fe53abfSAlexander Usyskin len4 -= sizeof(u32); 2949fe53abfSAlexander Usyskin } 2959fe53abfSAlexander Usyskin 2969fe53abfSAlexander Usyskin if (len4 > 0) { 2979fe53abfSAlexander Usyskin ret = idg_nvm_rewrite_partial(nvm, to + i, 0, len4, (u32 *)&buf[i]); 2989fe53abfSAlexander Usyskin if (ret < 0) 2999fe53abfSAlexander Usyskin return ret; 3009fe53abfSAlexander Usyskin } 3019fe53abfSAlexander Usyskin 3029fe53abfSAlexander Usyskin return len; 3039fe53abfSAlexander Usyskin } 3049fe53abfSAlexander Usyskin 3059fe53abfSAlexander Usyskin static ssize_t idg_read(struct intel_dg_nvm *nvm, u8 region, 3069fe53abfSAlexander Usyskin loff_t from, size_t len, unsigned char *buf) 3079fe53abfSAlexander Usyskin { 3089fe53abfSAlexander Usyskin size_t len_s = len; 3099fe53abfSAlexander Usyskin size_t from_shift; 3109fe53abfSAlexander Usyskin size_t from4; 3119fe53abfSAlexander Usyskin size_t len8; 3129fe53abfSAlexander Usyskin size_t len4; 3139fe53abfSAlexander Usyskin size_t i; 3149fe53abfSAlexander Usyskin 3159fe53abfSAlexander Usyskin idg_nvm_set_region_id(nvm, region); 3169fe53abfSAlexander Usyskin 3179fe53abfSAlexander Usyskin from4 = ALIGN_DOWN(from, sizeof(u32)); 3189fe53abfSAlexander Usyskin from_shift = min(sizeof(u32) - ((size_t)from - from4), len); 3199fe53abfSAlexander Usyskin 3209fe53abfSAlexander Usyskin if (from - from4) { 3219fe53abfSAlexander Usyskin u32 data = idg_nvm_read32(nvm, from4); 3229fe53abfSAlexander Usyskin 3239fe53abfSAlexander Usyskin if (idg_nvm_error(nvm)) 3249fe53abfSAlexander Usyskin return -EIO; 3259fe53abfSAlexander Usyskin memcpy(&buf[0], (u8 *)&data + (from - from4), from_shift); 3269fe53abfSAlexander Usyskin len_s -= from_shift; 3279fe53abfSAlexander Usyskin buf += from_shift; 3289fe53abfSAlexander Usyskin from += from_shift; 3299fe53abfSAlexander Usyskin } 3309fe53abfSAlexander Usyskin 3314b93f5fcSAlexander Usyskin if (!IS_ALIGNED(from, sizeof(u64)) && 3324b93f5fcSAlexander Usyskin ((from ^ (from + len_s)) & GENMASK(31, 10))) { 3334b93f5fcSAlexander Usyskin /* 3344b93f5fcSAlexander Usyskin * Workaround reads/writes across 1k-aligned addresses 3354b93f5fcSAlexander Usyskin * (start u32 before 1k, end u32 after) 3364b93f5fcSAlexander Usyskin * as this fails on hardware. 3374b93f5fcSAlexander Usyskin */ 3384b93f5fcSAlexander Usyskin u32 data = idg_nvm_read32(nvm, from); 3394b93f5fcSAlexander Usyskin 3404b93f5fcSAlexander Usyskin if (idg_nvm_error(nvm)) 3414b93f5fcSAlexander Usyskin return -EIO; 3424b93f5fcSAlexander Usyskin memcpy(&buf[0], &data, sizeof(data)); 3434b93f5fcSAlexander Usyskin len_s -= sizeof(u32); 3444b93f5fcSAlexander Usyskin buf += sizeof(u32); 3454b93f5fcSAlexander Usyskin from += sizeof(u32); 3464b93f5fcSAlexander Usyskin } 3474b93f5fcSAlexander Usyskin 3489fe53abfSAlexander Usyskin len8 = ALIGN_DOWN(len_s, sizeof(u64)); 3499fe53abfSAlexander Usyskin for (i = 0; i < len8; i += sizeof(u64)) { 3509fe53abfSAlexander Usyskin u64 data = idg_nvm_read64(nvm, from + i); 3519fe53abfSAlexander Usyskin 3529fe53abfSAlexander Usyskin if (idg_nvm_error(nvm)) 3539fe53abfSAlexander Usyskin return -EIO; 3549fe53abfSAlexander Usyskin 3559fe53abfSAlexander Usyskin memcpy(&buf[i], &data, sizeof(data)); 3569fe53abfSAlexander Usyskin } 3579fe53abfSAlexander Usyskin 3589fe53abfSAlexander Usyskin len4 = len_s - len8; 3599fe53abfSAlexander Usyskin if (len4 >= sizeof(u32)) { 3609fe53abfSAlexander Usyskin u32 data = idg_nvm_read32(nvm, from + i); 3619fe53abfSAlexander Usyskin 3629fe53abfSAlexander Usyskin if (idg_nvm_error(nvm)) 3639fe53abfSAlexander Usyskin return -EIO; 3649fe53abfSAlexander Usyskin memcpy(&buf[i], &data, sizeof(data)); 3659fe53abfSAlexander Usyskin i += sizeof(u32); 3669fe53abfSAlexander Usyskin len4 -= sizeof(u32); 3679fe53abfSAlexander Usyskin } 3689fe53abfSAlexander Usyskin 3699fe53abfSAlexander Usyskin if (len4 > 0) { 3709fe53abfSAlexander Usyskin u32 data = idg_nvm_read32(nvm, from + i); 3719fe53abfSAlexander Usyskin 3729fe53abfSAlexander Usyskin if (idg_nvm_error(nvm)) 3739fe53abfSAlexander Usyskin return -EIO; 3749fe53abfSAlexander Usyskin memcpy(&buf[i], &data, len4); 3759fe53abfSAlexander Usyskin } 3769fe53abfSAlexander Usyskin 3779fe53abfSAlexander Usyskin return len; 3789fe53abfSAlexander Usyskin } 3799fe53abfSAlexander Usyskin 3809fe53abfSAlexander Usyskin static ssize_t 3819fe53abfSAlexander Usyskin idg_erase(struct intel_dg_nvm *nvm, u8 region, loff_t from, u64 len, u64 *fail_addr) 3829fe53abfSAlexander Usyskin { 383*a1c940cbSReuven Abliyev void __iomem *base2 = nvm->base2; 3849fe53abfSAlexander Usyskin void __iomem *base = nvm->base; 3859fe53abfSAlexander Usyskin const u32 block = 0x10; 386*a1c940cbSReuven Abliyev u32 iter = 0; 387*a1c940cbSReuven Abliyev u32 reg; 3889fe53abfSAlexander Usyskin u64 i; 3899fe53abfSAlexander Usyskin 3909fe53abfSAlexander Usyskin for (i = 0; i < len; i += SZ_4K) { 3919fe53abfSAlexander Usyskin iowrite32(from + i, base + NVM_ADDRESS_REG); 3929fe53abfSAlexander Usyskin iowrite32(region << 24 | block, base + NVM_ERASE_REG); 393*a1c940cbSReuven Abliyev if (nvm->non_posted_erase) { 394*a1c940cbSReuven Abliyev /* Wait for Erase Done */ 395*a1c940cbSReuven Abliyev reg = ioread32(base2 + NVM_DEBUG_REG); 396*a1c940cbSReuven Abliyev while (!(reg & NVM_NON_POSTED_ERASE_DONE) && 397*a1c940cbSReuven Abliyev ++iter < NVM_NON_POSTED_ERASE_DONE_ITER) { 398*a1c940cbSReuven Abliyev msleep(10); 399*a1c940cbSReuven Abliyev reg = ioread32(base2 + NVM_DEBUG_REG); 400*a1c940cbSReuven Abliyev } 401*a1c940cbSReuven Abliyev if (reg & NVM_NON_POSTED_ERASE_DONE) { 402*a1c940cbSReuven Abliyev /* Clear Erase Done */ 403*a1c940cbSReuven Abliyev iowrite32(reg, base2 + NVM_DEBUG_REG); 404*a1c940cbSReuven Abliyev } else { 405*a1c940cbSReuven Abliyev *fail_addr = from + i; 406*a1c940cbSReuven Abliyev return -ETIME; 407*a1c940cbSReuven Abliyev } 408*a1c940cbSReuven Abliyev } 4099fe53abfSAlexander Usyskin /* Since the writes are via sgunit 4109fe53abfSAlexander Usyskin * we cannot do back to back erases. 4119fe53abfSAlexander Usyskin */ 4129fe53abfSAlexander Usyskin msleep(50); 4139fe53abfSAlexander Usyskin } 4149fe53abfSAlexander Usyskin return len; 4159fe53abfSAlexander Usyskin } 4169fe53abfSAlexander Usyskin 417*a1c940cbSReuven Abliyev static int intel_dg_nvm_init(struct intel_dg_nvm *nvm, struct device *device, 418*a1c940cbSReuven Abliyev bool non_posted_erase) 4197234b321SAlexander Usyskin { 4207234b321SAlexander Usyskin u32 access_map = 0; 4217234b321SAlexander Usyskin unsigned int i, n; 4227234b321SAlexander Usyskin int ret; 4237234b321SAlexander Usyskin 4247234b321SAlexander Usyskin /* clean error register, previous errors are ignored */ 4257234b321SAlexander Usyskin idg_nvm_error(nvm); 4267234b321SAlexander Usyskin 4277234b321SAlexander Usyskin ret = idg_nvm_is_valid(nvm); 4287234b321SAlexander Usyskin if (ret) { 4297234b321SAlexander Usyskin dev_err(device, "The MEM is not valid %d\n", ret); 4307234b321SAlexander Usyskin return ret; 4317234b321SAlexander Usyskin } 4327234b321SAlexander Usyskin 4337234b321SAlexander Usyskin if (idg_nvm_get_access_map(nvm, &access_map)) 4347234b321SAlexander Usyskin return -EIO; 4357234b321SAlexander Usyskin 4367234b321SAlexander Usyskin for (i = 0, n = 0; i < nvm->nregions; i++) { 4377234b321SAlexander Usyskin u32 address, base, limit, region; 4387234b321SAlexander Usyskin u8 id = nvm->regions[i].id; 4397234b321SAlexander Usyskin 4407234b321SAlexander Usyskin address = NVM_FLREG(id); 4417234b321SAlexander Usyskin region = idg_nvm_read32(nvm, address); 4427234b321SAlexander Usyskin 4437234b321SAlexander Usyskin base = FIELD_GET(NVM_FREG_BASE_MASK, region) << NVM_FREG_ADDR_SHIFT; 4447234b321SAlexander Usyskin limit = (FIELD_GET(NVM_FREG_ADDR_MASK, region) << NVM_FREG_ADDR_SHIFT) | 4457234b321SAlexander Usyskin NVM_FREG_MIN_REGION_SIZE; 4467234b321SAlexander Usyskin 4477234b321SAlexander Usyskin dev_dbg(device, "[%d] %s: region: 0x%08X base: 0x%08x limit: 0x%08x\n", 4487234b321SAlexander Usyskin id, nvm->regions[i].name, region, base, limit); 4497234b321SAlexander Usyskin 4507234b321SAlexander Usyskin if (base >= limit || (i > 0 && limit == 0)) { 4517234b321SAlexander Usyskin dev_dbg(device, "[%d] %s: disabled\n", 4527234b321SAlexander Usyskin id, nvm->regions[i].name); 4537234b321SAlexander Usyskin nvm->regions[i].is_readable = 0; 4547234b321SAlexander Usyskin continue; 4557234b321SAlexander Usyskin } 4567234b321SAlexander Usyskin 4577234b321SAlexander Usyskin if (nvm->size < limit) 4587234b321SAlexander Usyskin nvm->size = limit; 4597234b321SAlexander Usyskin 4607234b321SAlexander Usyskin nvm->regions[i].offset = base; 4617234b321SAlexander Usyskin nvm->regions[i].size = limit - base + 1; 4627234b321SAlexander Usyskin /* No write access to descriptor; mask it out*/ 4637234b321SAlexander Usyskin nvm->regions[i].is_writable = idg_nvm_region_writable(access_map, id); 4647234b321SAlexander Usyskin 4657234b321SAlexander Usyskin nvm->regions[i].is_readable = idg_nvm_region_readable(access_map, id); 4667234b321SAlexander Usyskin dev_dbg(device, "Registered, %s id=%d offset=%lld size=%lld rd=%d wr=%d\n", 4677234b321SAlexander Usyskin nvm->regions[i].name, 4687234b321SAlexander Usyskin nvm->regions[i].id, 4697234b321SAlexander Usyskin nvm->regions[i].offset, 4707234b321SAlexander Usyskin nvm->regions[i].size, 4717234b321SAlexander Usyskin nvm->regions[i].is_readable, 4727234b321SAlexander Usyskin nvm->regions[i].is_writable); 4737234b321SAlexander Usyskin 4747234b321SAlexander Usyskin if (nvm->regions[i].is_readable) 4757234b321SAlexander Usyskin n++; 4767234b321SAlexander Usyskin } 4777234b321SAlexander Usyskin 478*a1c940cbSReuven Abliyev nvm->non_posted_erase = non_posted_erase; 479*a1c940cbSReuven Abliyev 4807234b321SAlexander Usyskin dev_dbg(device, "Registered %d regions\n", n); 481*a1c940cbSReuven Abliyev dev_dbg(device, "Non posted erase %d\n", nvm->non_posted_erase); 4827234b321SAlexander Usyskin 4837234b321SAlexander Usyskin /* Need to add 1 to the amount of memory 4847234b321SAlexander Usyskin * so it is reported as an even block 4857234b321SAlexander Usyskin */ 4867234b321SAlexander Usyskin nvm->size += 1; 4877234b321SAlexander Usyskin 4887234b321SAlexander Usyskin return n; 4897234b321SAlexander Usyskin } 4907234b321SAlexander Usyskin 491044c8be0SAlexander Usyskin static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info) 492044c8be0SAlexander Usyskin { 493044c8be0SAlexander Usyskin struct intel_dg_nvm *nvm = mtd->priv; 494044c8be0SAlexander Usyskin size_t total_len; 495044c8be0SAlexander Usyskin unsigned int idx; 496044c8be0SAlexander Usyskin ssize_t bytes; 497044c8be0SAlexander Usyskin loff_t from; 498044c8be0SAlexander Usyskin size_t len; 499044c8be0SAlexander Usyskin u8 region; 500044c8be0SAlexander Usyskin u64 addr; 501044c8be0SAlexander Usyskin 502044c8be0SAlexander Usyskin if (WARN_ON(!nvm)) 503044c8be0SAlexander Usyskin return -EINVAL; 504044c8be0SAlexander Usyskin 505044c8be0SAlexander Usyskin if (!IS_ALIGNED(info->addr, SZ_4K) || !IS_ALIGNED(info->len, SZ_4K)) { 506044c8be0SAlexander Usyskin dev_err(&mtd->dev, "unaligned erase %llx %llx\n", 507044c8be0SAlexander Usyskin info->addr, info->len); 508044c8be0SAlexander Usyskin info->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 509044c8be0SAlexander Usyskin return -EINVAL; 510044c8be0SAlexander Usyskin } 511044c8be0SAlexander Usyskin 512044c8be0SAlexander Usyskin total_len = info->len; 513044c8be0SAlexander Usyskin addr = info->addr; 514044c8be0SAlexander Usyskin 515044c8be0SAlexander Usyskin guard(mutex)(&nvm->lock); 516044c8be0SAlexander Usyskin 517044c8be0SAlexander Usyskin while (total_len > 0) { 518044c8be0SAlexander Usyskin if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) { 519044c8be0SAlexander Usyskin dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len); 520044c8be0SAlexander Usyskin info->fail_addr = addr; 521044c8be0SAlexander Usyskin return -ERANGE; 522044c8be0SAlexander Usyskin } 523044c8be0SAlexander Usyskin 524044c8be0SAlexander Usyskin idx = idg_nvm_get_region(nvm, addr); 525044c8be0SAlexander Usyskin if (idx >= nvm->nregions) { 526044c8be0SAlexander Usyskin dev_err(&mtd->dev, "out of range"); 527044c8be0SAlexander Usyskin info->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 528044c8be0SAlexander Usyskin return -ERANGE; 529044c8be0SAlexander Usyskin } 530044c8be0SAlexander Usyskin 531044c8be0SAlexander Usyskin from = addr - nvm->regions[idx].offset; 532044c8be0SAlexander Usyskin region = nvm->regions[idx].id; 533044c8be0SAlexander Usyskin len = total_len; 534044c8be0SAlexander Usyskin if (len > nvm->regions[idx].size - from) 535044c8be0SAlexander Usyskin len = nvm->regions[idx].size - from; 536044c8be0SAlexander Usyskin 537044c8be0SAlexander Usyskin dev_dbg(&mtd->dev, "erasing region[%d] %s from %llx len %zx\n", 538044c8be0SAlexander Usyskin region, nvm->regions[idx].name, from, len); 539044c8be0SAlexander Usyskin 540044c8be0SAlexander Usyskin bytes = idg_erase(nvm, region, from, len, &info->fail_addr); 541044c8be0SAlexander Usyskin if (bytes < 0) { 542044c8be0SAlexander Usyskin dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes); 543044c8be0SAlexander Usyskin info->fail_addr += nvm->regions[idx].offset; 544044c8be0SAlexander Usyskin return bytes; 545044c8be0SAlexander Usyskin } 546044c8be0SAlexander Usyskin 547044c8be0SAlexander Usyskin addr += len; 548044c8be0SAlexander Usyskin total_len -= len; 549044c8be0SAlexander Usyskin } 550044c8be0SAlexander Usyskin 551044c8be0SAlexander Usyskin return 0; 552044c8be0SAlexander Usyskin } 553044c8be0SAlexander Usyskin 554044c8be0SAlexander Usyskin static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len, 555044c8be0SAlexander Usyskin size_t *retlen, u_char *buf) 556044c8be0SAlexander Usyskin { 557044c8be0SAlexander Usyskin struct intel_dg_nvm *nvm = mtd->priv; 558044c8be0SAlexander Usyskin unsigned int idx; 559044c8be0SAlexander Usyskin ssize_t ret; 560044c8be0SAlexander Usyskin u8 region; 561044c8be0SAlexander Usyskin 562044c8be0SAlexander Usyskin if (WARN_ON(!nvm)) 563044c8be0SAlexander Usyskin return -EINVAL; 564044c8be0SAlexander Usyskin 565044c8be0SAlexander Usyskin idx = idg_nvm_get_region(nvm, from); 566044c8be0SAlexander Usyskin 567044c8be0SAlexander Usyskin dev_dbg(&mtd->dev, "reading region[%d] %s from %lld len %zd\n", 568044c8be0SAlexander Usyskin nvm->regions[idx].id, nvm->regions[idx].name, from, len); 569044c8be0SAlexander Usyskin 570044c8be0SAlexander Usyskin if (idx >= nvm->nregions) { 571044c8be0SAlexander Usyskin dev_err(&mtd->dev, "out of range"); 572044c8be0SAlexander Usyskin return -ERANGE; 573044c8be0SAlexander Usyskin } 574044c8be0SAlexander Usyskin 575044c8be0SAlexander Usyskin from -= nvm->regions[idx].offset; 576044c8be0SAlexander Usyskin region = nvm->regions[idx].id; 577044c8be0SAlexander Usyskin if (len > nvm->regions[idx].size - from) 578044c8be0SAlexander Usyskin len = nvm->regions[idx].size - from; 579044c8be0SAlexander Usyskin 580044c8be0SAlexander Usyskin guard(mutex)(&nvm->lock); 581044c8be0SAlexander Usyskin 582044c8be0SAlexander Usyskin ret = idg_read(nvm, region, from, len, buf); 583044c8be0SAlexander Usyskin if (ret < 0) { 584044c8be0SAlexander Usyskin dev_dbg(&mtd->dev, "read failed with %zd\n", ret); 585044c8be0SAlexander Usyskin return ret; 586044c8be0SAlexander Usyskin } 587044c8be0SAlexander Usyskin 588044c8be0SAlexander Usyskin *retlen = ret; 589044c8be0SAlexander Usyskin 590044c8be0SAlexander Usyskin return 0; 591044c8be0SAlexander Usyskin } 592044c8be0SAlexander Usyskin 593044c8be0SAlexander Usyskin static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len, 594044c8be0SAlexander Usyskin size_t *retlen, const u_char *buf) 595044c8be0SAlexander Usyskin { 596044c8be0SAlexander Usyskin struct intel_dg_nvm *nvm = mtd->priv; 597044c8be0SAlexander Usyskin unsigned int idx; 598044c8be0SAlexander Usyskin ssize_t ret; 599044c8be0SAlexander Usyskin u8 region; 600044c8be0SAlexander Usyskin 601044c8be0SAlexander Usyskin if (WARN_ON(!nvm)) 602044c8be0SAlexander Usyskin return -EINVAL; 603044c8be0SAlexander Usyskin 604044c8be0SAlexander Usyskin idx = idg_nvm_get_region(nvm, to); 605044c8be0SAlexander Usyskin 606044c8be0SAlexander Usyskin dev_dbg(&mtd->dev, "writing region[%d] %s to %lld len %zd\n", 607044c8be0SAlexander Usyskin nvm->regions[idx].id, nvm->regions[idx].name, to, len); 608044c8be0SAlexander Usyskin 609044c8be0SAlexander Usyskin if (idx >= nvm->nregions) { 610044c8be0SAlexander Usyskin dev_err(&mtd->dev, "out of range"); 611044c8be0SAlexander Usyskin return -ERANGE; 612044c8be0SAlexander Usyskin } 613044c8be0SAlexander Usyskin 614044c8be0SAlexander Usyskin to -= nvm->regions[idx].offset; 615044c8be0SAlexander Usyskin region = nvm->regions[idx].id; 616044c8be0SAlexander Usyskin if (len > nvm->regions[idx].size - to) 617044c8be0SAlexander Usyskin len = nvm->regions[idx].size - to; 618044c8be0SAlexander Usyskin 619044c8be0SAlexander Usyskin guard(mutex)(&nvm->lock); 620044c8be0SAlexander Usyskin 621044c8be0SAlexander Usyskin ret = idg_write(nvm, region, to, len, buf); 622044c8be0SAlexander Usyskin if (ret < 0) { 623044c8be0SAlexander Usyskin dev_dbg(&mtd->dev, "write failed with %zd\n", ret); 624044c8be0SAlexander Usyskin return ret; 625044c8be0SAlexander Usyskin } 626044c8be0SAlexander Usyskin 627044c8be0SAlexander Usyskin *retlen = ret; 628044c8be0SAlexander Usyskin 629044c8be0SAlexander Usyskin return 0; 630044c8be0SAlexander Usyskin } 631044c8be0SAlexander Usyskin 632ceb5ab3cSAlexander Usyskin static void intel_dg_nvm_release(struct kref *kref) 633ceb5ab3cSAlexander Usyskin { 634ceb5ab3cSAlexander Usyskin struct intel_dg_nvm *nvm = container_of(kref, struct intel_dg_nvm, refcnt); 635ceb5ab3cSAlexander Usyskin int i; 636ceb5ab3cSAlexander Usyskin 637ceb5ab3cSAlexander Usyskin pr_debug("freeing intel_dg nvm\n"); 638ceb5ab3cSAlexander Usyskin for (i = 0; i < nvm->nregions; i++) 639ceb5ab3cSAlexander Usyskin kfree(nvm->regions[i].name); 640044c8be0SAlexander Usyskin mutex_destroy(&nvm->lock); 641ceb5ab3cSAlexander Usyskin kfree(nvm); 642ceb5ab3cSAlexander Usyskin } 643ceb5ab3cSAlexander Usyskin 644044c8be0SAlexander Usyskin static int intel_dg_mtd_get_device(struct mtd_info *mtd) 645044c8be0SAlexander Usyskin { 646044c8be0SAlexander Usyskin struct mtd_info *master = mtd_get_master(mtd); 647044c8be0SAlexander Usyskin struct intel_dg_nvm *nvm = master->priv; 648044c8be0SAlexander Usyskin 649044c8be0SAlexander Usyskin if (WARN_ON(!nvm)) 650044c8be0SAlexander Usyskin return -EINVAL; 651044c8be0SAlexander Usyskin pr_debug("get mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt)); 652044c8be0SAlexander Usyskin kref_get(&nvm->refcnt); 653044c8be0SAlexander Usyskin 654044c8be0SAlexander Usyskin return 0; 655044c8be0SAlexander Usyskin } 656044c8be0SAlexander Usyskin 657044c8be0SAlexander Usyskin static void intel_dg_mtd_put_device(struct mtd_info *mtd) 658044c8be0SAlexander Usyskin { 659044c8be0SAlexander Usyskin struct mtd_info *master = mtd_get_master(mtd); 660044c8be0SAlexander Usyskin struct intel_dg_nvm *nvm = master->priv; 661044c8be0SAlexander Usyskin 662044c8be0SAlexander Usyskin if (WARN_ON(!nvm)) 663044c8be0SAlexander Usyskin return; 664044c8be0SAlexander Usyskin pr_debug("put mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt)); 665044c8be0SAlexander Usyskin kref_put(&nvm->refcnt, intel_dg_nvm_release); 666044c8be0SAlexander Usyskin } 667044c8be0SAlexander Usyskin 668044c8be0SAlexander Usyskin static int intel_dg_nvm_init_mtd(struct intel_dg_nvm *nvm, struct device *device, 669044c8be0SAlexander Usyskin unsigned int nparts, bool writable_override) 670044c8be0SAlexander Usyskin { 671044c8be0SAlexander Usyskin struct mtd_partition *parts = NULL; 672044c8be0SAlexander Usyskin unsigned int i, n; 673044c8be0SAlexander Usyskin int ret; 674044c8be0SAlexander Usyskin 675044c8be0SAlexander Usyskin dev_dbg(device, "registering with mtd\n"); 676044c8be0SAlexander Usyskin 677044c8be0SAlexander Usyskin nvm->mtd.owner = THIS_MODULE; 678044c8be0SAlexander Usyskin nvm->mtd.dev.parent = device; 679044c8be0SAlexander Usyskin nvm->mtd.flags = MTD_CAP_NORFLASH; 680044c8be0SAlexander Usyskin nvm->mtd.type = MTD_DATAFLASH; 681044c8be0SAlexander Usyskin nvm->mtd.priv = nvm; 682044c8be0SAlexander Usyskin nvm->mtd._write = intel_dg_mtd_write; 683044c8be0SAlexander Usyskin nvm->mtd._read = intel_dg_mtd_read; 684044c8be0SAlexander Usyskin nvm->mtd._erase = intel_dg_mtd_erase; 685044c8be0SAlexander Usyskin nvm->mtd._get_device = intel_dg_mtd_get_device; 686044c8be0SAlexander Usyskin nvm->mtd._put_device = intel_dg_mtd_put_device; 687044c8be0SAlexander Usyskin nvm->mtd.writesize = SZ_1; /* 1 byte granularity */ 688044c8be0SAlexander Usyskin nvm->mtd.erasesize = SZ_4K; /* 4K bytes granularity */ 689044c8be0SAlexander Usyskin nvm->mtd.size = nvm->size; 690044c8be0SAlexander Usyskin 691044c8be0SAlexander Usyskin parts = kcalloc(nvm->nregions, sizeof(*parts), GFP_KERNEL); 692044c8be0SAlexander Usyskin if (!parts) 693044c8be0SAlexander Usyskin return -ENOMEM; 694044c8be0SAlexander Usyskin 695044c8be0SAlexander Usyskin for (i = 0, n = 0; i < nvm->nregions && n < nparts; i++) { 696044c8be0SAlexander Usyskin if (!nvm->regions[i].is_readable) 697044c8be0SAlexander Usyskin continue; 698044c8be0SAlexander Usyskin parts[n].name = nvm->regions[i].name; 699044c8be0SAlexander Usyskin parts[n].offset = nvm->regions[i].offset; 700044c8be0SAlexander Usyskin parts[n].size = nvm->regions[i].size; 701044c8be0SAlexander Usyskin if (!nvm->regions[i].is_writable && !writable_override) 702044c8be0SAlexander Usyskin parts[n].mask_flags = MTD_WRITEABLE; 703044c8be0SAlexander Usyskin n++; 704044c8be0SAlexander Usyskin } 705044c8be0SAlexander Usyskin 706044c8be0SAlexander Usyskin ret = mtd_device_register(&nvm->mtd, parts, n); 707044c8be0SAlexander Usyskin 708044c8be0SAlexander Usyskin kfree(parts); 709044c8be0SAlexander Usyskin return ret; 710044c8be0SAlexander Usyskin } 711044c8be0SAlexander Usyskin 712ceb5ab3cSAlexander Usyskin static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev, 713ceb5ab3cSAlexander Usyskin const struct auxiliary_device_id *aux_dev_id) 714ceb5ab3cSAlexander Usyskin { 715ceb5ab3cSAlexander Usyskin struct intel_dg_nvm_dev *invm = auxiliary_dev_to_intel_dg_nvm_dev(aux_dev); 716ceb5ab3cSAlexander Usyskin struct intel_dg_nvm *nvm; 717ceb5ab3cSAlexander Usyskin struct device *device; 718ceb5ab3cSAlexander Usyskin unsigned int nregions; 719ceb5ab3cSAlexander Usyskin unsigned int i, n; 720ceb5ab3cSAlexander Usyskin int ret; 721ceb5ab3cSAlexander Usyskin 722ceb5ab3cSAlexander Usyskin device = &aux_dev->dev; 723ceb5ab3cSAlexander Usyskin 724ceb5ab3cSAlexander Usyskin /* count available regions */ 725ceb5ab3cSAlexander Usyskin for (nregions = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) { 726ceb5ab3cSAlexander Usyskin if (invm->regions[i].name) 727ceb5ab3cSAlexander Usyskin nregions++; 728ceb5ab3cSAlexander Usyskin } 729ceb5ab3cSAlexander Usyskin 730ceb5ab3cSAlexander Usyskin if (!nregions) { 731ceb5ab3cSAlexander Usyskin dev_err(device, "no regions defined\n"); 732ceb5ab3cSAlexander Usyskin return -ENODEV; 733ceb5ab3cSAlexander Usyskin } 734ceb5ab3cSAlexander Usyskin 735ceb5ab3cSAlexander Usyskin nvm = kzalloc(struct_size(nvm, regions, nregions), GFP_KERNEL); 736ceb5ab3cSAlexander Usyskin if (!nvm) 737ceb5ab3cSAlexander Usyskin return -ENOMEM; 738ceb5ab3cSAlexander Usyskin 739ceb5ab3cSAlexander Usyskin kref_init(&nvm->refcnt); 740044c8be0SAlexander Usyskin mutex_init(&nvm->lock); 741ceb5ab3cSAlexander Usyskin 742ceb5ab3cSAlexander Usyskin for (n = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) { 743ceb5ab3cSAlexander Usyskin if (!invm->regions[i].name) 744ceb5ab3cSAlexander Usyskin continue; 745ceb5ab3cSAlexander Usyskin 746ceb5ab3cSAlexander Usyskin char *name = kasprintf(GFP_KERNEL, "%s.%s", 747ceb5ab3cSAlexander Usyskin dev_name(&aux_dev->dev), invm->regions[i].name); 748ceb5ab3cSAlexander Usyskin if (!name) 749ceb5ab3cSAlexander Usyskin continue; 750ceb5ab3cSAlexander Usyskin nvm->regions[n].name = name; 751ceb5ab3cSAlexander Usyskin nvm->regions[n].id = i; 752ceb5ab3cSAlexander Usyskin n++; 753ceb5ab3cSAlexander Usyskin } 754ceb5ab3cSAlexander Usyskin nvm->nregions = n; /* in case where kasprintf fail */ 755ceb5ab3cSAlexander Usyskin 756ceb5ab3cSAlexander Usyskin nvm->base = devm_ioremap_resource(device, &invm->bar); 757ceb5ab3cSAlexander Usyskin if (IS_ERR(nvm->base)) { 758ceb5ab3cSAlexander Usyskin ret = PTR_ERR(nvm->base); 759ceb5ab3cSAlexander Usyskin goto err; 760ceb5ab3cSAlexander Usyskin } 761ceb5ab3cSAlexander Usyskin 762*a1c940cbSReuven Abliyev if (invm->non_posted_erase) { 763*a1c940cbSReuven Abliyev nvm->base2 = devm_ioremap_resource(device, &invm->bar2); 764*a1c940cbSReuven Abliyev if (IS_ERR(nvm->base2)) { 765*a1c940cbSReuven Abliyev ret = PTR_ERR(nvm->base2); 766*a1c940cbSReuven Abliyev goto err; 767*a1c940cbSReuven Abliyev } 768*a1c940cbSReuven Abliyev } 769*a1c940cbSReuven Abliyev 770*a1c940cbSReuven Abliyev ret = intel_dg_nvm_init(nvm, device, invm->non_posted_erase); 7717234b321SAlexander Usyskin if (ret < 0) { 7727234b321SAlexander Usyskin dev_err(device, "cannot initialize nvm %d\n", ret); 7737234b321SAlexander Usyskin goto err; 7747234b321SAlexander Usyskin } 7757234b321SAlexander Usyskin 776044c8be0SAlexander Usyskin ret = intel_dg_nvm_init_mtd(nvm, device, ret, invm->writable_override); 777044c8be0SAlexander Usyskin if (ret) { 778044c8be0SAlexander Usyskin dev_err(device, "failed init mtd %d\n", ret); 779044c8be0SAlexander Usyskin goto err; 780044c8be0SAlexander Usyskin } 781044c8be0SAlexander Usyskin 782ceb5ab3cSAlexander Usyskin dev_set_drvdata(&aux_dev->dev, nvm); 783ceb5ab3cSAlexander Usyskin 784ceb5ab3cSAlexander Usyskin return 0; 785ceb5ab3cSAlexander Usyskin 786ceb5ab3cSAlexander Usyskin err: 787ceb5ab3cSAlexander Usyskin kref_put(&nvm->refcnt, intel_dg_nvm_release); 788ceb5ab3cSAlexander Usyskin return ret; 789ceb5ab3cSAlexander Usyskin } 790ceb5ab3cSAlexander Usyskin 791ceb5ab3cSAlexander Usyskin static void intel_dg_mtd_remove(struct auxiliary_device *aux_dev) 792ceb5ab3cSAlexander Usyskin { 793ceb5ab3cSAlexander Usyskin struct intel_dg_nvm *nvm = dev_get_drvdata(&aux_dev->dev); 794ceb5ab3cSAlexander Usyskin 795ceb5ab3cSAlexander Usyskin if (!nvm) 796ceb5ab3cSAlexander Usyskin return; 797ceb5ab3cSAlexander Usyskin 798044c8be0SAlexander Usyskin mtd_device_unregister(&nvm->mtd); 799044c8be0SAlexander Usyskin 800ceb5ab3cSAlexander Usyskin dev_set_drvdata(&aux_dev->dev, NULL); 801ceb5ab3cSAlexander Usyskin 802ceb5ab3cSAlexander Usyskin kref_put(&nvm->refcnt, intel_dg_nvm_release); 803ceb5ab3cSAlexander Usyskin } 804ceb5ab3cSAlexander Usyskin 805ceb5ab3cSAlexander Usyskin static const struct auxiliary_device_id intel_dg_mtd_id_table[] = { 806ceb5ab3cSAlexander Usyskin { 807ceb5ab3cSAlexander Usyskin .name = "i915.nvm", 808ceb5ab3cSAlexander Usyskin }, 809ceb5ab3cSAlexander Usyskin { 810ceb5ab3cSAlexander Usyskin .name = "xe.nvm", 811ceb5ab3cSAlexander Usyskin }, 812ceb5ab3cSAlexander Usyskin { 813ceb5ab3cSAlexander Usyskin /* sentinel */ 814ceb5ab3cSAlexander Usyskin } 815ceb5ab3cSAlexander Usyskin }; 816ceb5ab3cSAlexander Usyskin MODULE_DEVICE_TABLE(auxiliary, intel_dg_mtd_id_table); 817ceb5ab3cSAlexander Usyskin 818ceb5ab3cSAlexander Usyskin static struct auxiliary_driver intel_dg_mtd_driver = { 819ceb5ab3cSAlexander Usyskin .probe = intel_dg_mtd_probe, 820ceb5ab3cSAlexander Usyskin .remove = intel_dg_mtd_remove, 821ceb5ab3cSAlexander Usyskin .driver = { 822ceb5ab3cSAlexander Usyskin /* auxiliary_driver_register() sets .name to be the modname */ 823ceb5ab3cSAlexander Usyskin }, 824ceb5ab3cSAlexander Usyskin .id_table = intel_dg_mtd_id_table 825ceb5ab3cSAlexander Usyskin }; 826ceb5ab3cSAlexander Usyskin module_auxiliary_driver(intel_dg_mtd_driver); 827ceb5ab3cSAlexander Usyskin 828ceb5ab3cSAlexander Usyskin MODULE_LICENSE("GPL"); 829ceb5ab3cSAlexander Usyskin MODULE_AUTHOR("Intel Corporation"); 830ceb5ab3cSAlexander Usyskin MODULE_DESCRIPTION("Intel DGFX MTD driver"); 831