1ceb5ab3cSAlexander Usyskin // SPDX-License-Identifier: GPL-2.0 2ceb5ab3cSAlexander Usyskin /* 3ceb5ab3cSAlexander Usyskin * Copyright(c) 2019-2025, Intel Corporation. All rights reserved. 4ceb5ab3cSAlexander Usyskin */ 5ceb5ab3cSAlexander Usyskin 6*7234b321SAlexander Usyskin #include <linux/bitfield.h> 7*7234b321SAlexander Usyskin #include <linux/bits.h> 8ceb5ab3cSAlexander Usyskin #include <linux/device.h> 9ceb5ab3cSAlexander Usyskin #include <linux/intel_dg_nvm_aux.h> 10ceb5ab3cSAlexander Usyskin #include <linux/io.h> 11ceb5ab3cSAlexander Usyskin #include <linux/kernel.h> 12ceb5ab3cSAlexander Usyskin #include <linux/module.h> 13ceb5ab3cSAlexander Usyskin #include <linux/string.h> 14ceb5ab3cSAlexander Usyskin #include <linux/slab.h> 15ceb5ab3cSAlexander Usyskin #include <linux/types.h> 16ceb5ab3cSAlexander Usyskin 17ceb5ab3cSAlexander Usyskin struct intel_dg_nvm { 18ceb5ab3cSAlexander Usyskin struct kref refcnt; 19ceb5ab3cSAlexander Usyskin void __iomem *base; 20ceb5ab3cSAlexander Usyskin size_t size; 21ceb5ab3cSAlexander Usyskin unsigned int nregions; 22ceb5ab3cSAlexander Usyskin struct { 23ceb5ab3cSAlexander Usyskin const char *name; 24ceb5ab3cSAlexander Usyskin u8 id; 25ceb5ab3cSAlexander Usyskin u64 offset; 26ceb5ab3cSAlexander Usyskin u64 size; 27*7234b321SAlexander Usyskin unsigned int is_readable:1; 28*7234b321SAlexander Usyskin unsigned int is_writable:1; 29ceb5ab3cSAlexander Usyskin } regions[] __counted_by(nregions); 30ceb5ab3cSAlexander Usyskin }; 31ceb5ab3cSAlexander Usyskin 32*7234b321SAlexander Usyskin #define NVM_TRIGGER_REG 0x00000000 33*7234b321SAlexander Usyskin #define NVM_VALSIG_REG 0x00000010 34*7234b321SAlexander Usyskin #define NVM_ADDRESS_REG 0x00000040 35*7234b321SAlexander Usyskin #define NVM_REGION_ID_REG 0x00000044 36*7234b321SAlexander Usyskin /* 37*7234b321SAlexander Usyskin * [15:0]-Erase size = 0x0010 4K 0x0080 32K 0x0100 64K 38*7234b321SAlexander Usyskin * [23:16]-Reserved 39*7234b321SAlexander Usyskin * [31:24]-Erase MEM RegionID 40*7234b321SAlexander Usyskin */ 41*7234b321SAlexander Usyskin #define NVM_ERASE_REG 0x00000048 42*7234b321SAlexander Usyskin #define NVM_ACCESS_ERROR_REG 0x00000070 43*7234b321SAlexander Usyskin #define NVM_ADDRESS_ERROR_REG 0x00000074 44*7234b321SAlexander Usyskin 45*7234b321SAlexander Usyskin /* Flash Valid Signature */ 46*7234b321SAlexander Usyskin #define NVM_FLVALSIG 0x0FF0A55A 47*7234b321SAlexander Usyskin 48*7234b321SAlexander Usyskin #define NVM_MAP_ADDR_MASK GENMASK(7, 0) 49*7234b321SAlexander Usyskin #define NVM_MAP_ADDR_SHIFT 0x00000004 50*7234b321SAlexander Usyskin 51*7234b321SAlexander Usyskin #define NVM_REGION_ID_DESCRIPTOR 0 52*7234b321SAlexander Usyskin /* Flash Region Base Address */ 53*7234b321SAlexander Usyskin #define NVM_FRBA 0x40 54*7234b321SAlexander Usyskin /* Flash Region __n - Flash Descriptor Record */ 55*7234b321SAlexander Usyskin #define NVM_FLREG(__n) (NVM_FRBA + ((__n) * 4)) 56*7234b321SAlexander Usyskin /* Flash Map 1 Register */ 57*7234b321SAlexander Usyskin #define NVM_FLMAP1_REG 0x18 58*7234b321SAlexander Usyskin #define NVM_FLMSTR4_OFFSET 0x00C 59*7234b321SAlexander Usyskin 60*7234b321SAlexander Usyskin #define NVM_ACCESS_ERROR_PCIE_MASK 0x7 61*7234b321SAlexander Usyskin 62*7234b321SAlexander Usyskin #define NVM_FREG_BASE_MASK GENMASK(15, 0) 63*7234b321SAlexander Usyskin #define NVM_FREG_ADDR_MASK GENMASK(31, 16) 64*7234b321SAlexander Usyskin #define NVM_FREG_ADDR_SHIFT 12 65*7234b321SAlexander Usyskin #define NVM_FREG_MIN_REGION_SIZE 0xFFF 66*7234b321SAlexander Usyskin 67*7234b321SAlexander Usyskin static inline void idg_nvm_set_region_id(struct intel_dg_nvm *nvm, u8 region) 68*7234b321SAlexander Usyskin { 69*7234b321SAlexander Usyskin iowrite32((u32)region, nvm->base + NVM_REGION_ID_REG); 70*7234b321SAlexander Usyskin } 71*7234b321SAlexander Usyskin 72*7234b321SAlexander Usyskin static inline u32 idg_nvm_error(struct intel_dg_nvm *nvm) 73*7234b321SAlexander Usyskin { 74*7234b321SAlexander Usyskin void __iomem *base = nvm->base; 75*7234b321SAlexander Usyskin 76*7234b321SAlexander Usyskin u32 reg = ioread32(base + NVM_ACCESS_ERROR_REG) & NVM_ACCESS_ERROR_PCIE_MASK; 77*7234b321SAlexander Usyskin 78*7234b321SAlexander Usyskin /* reset error bits */ 79*7234b321SAlexander Usyskin if (reg) 80*7234b321SAlexander Usyskin iowrite32(reg, base + NVM_ACCESS_ERROR_REG); 81*7234b321SAlexander Usyskin 82*7234b321SAlexander Usyskin return reg; 83*7234b321SAlexander Usyskin } 84*7234b321SAlexander Usyskin 85*7234b321SAlexander Usyskin static inline u32 idg_nvm_read32(struct intel_dg_nvm *nvm, u32 address) 86*7234b321SAlexander Usyskin { 87*7234b321SAlexander Usyskin void __iomem *base = nvm->base; 88*7234b321SAlexander Usyskin 89*7234b321SAlexander Usyskin iowrite32(address, base + NVM_ADDRESS_REG); 90*7234b321SAlexander Usyskin 91*7234b321SAlexander Usyskin return ioread32(base + NVM_TRIGGER_REG); 92*7234b321SAlexander Usyskin } 93*7234b321SAlexander Usyskin 94*7234b321SAlexander Usyskin static int idg_nvm_get_access_map(struct intel_dg_nvm *nvm, u32 *access_map) 95*7234b321SAlexander Usyskin { 96*7234b321SAlexander Usyskin u32 fmstr4_addr; 97*7234b321SAlexander Usyskin u32 fmstr4; 98*7234b321SAlexander Usyskin u32 flmap1; 99*7234b321SAlexander Usyskin u32 fmba; 100*7234b321SAlexander Usyskin 101*7234b321SAlexander Usyskin idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR); 102*7234b321SAlexander Usyskin 103*7234b321SAlexander Usyskin flmap1 = idg_nvm_read32(nvm, NVM_FLMAP1_REG); 104*7234b321SAlexander Usyskin if (idg_nvm_error(nvm)) 105*7234b321SAlexander Usyskin return -EIO; 106*7234b321SAlexander Usyskin /* Get Flash Master Baser Address (FMBA) */ 107*7234b321SAlexander Usyskin fmba = (FIELD_GET(NVM_MAP_ADDR_MASK, flmap1) << NVM_MAP_ADDR_SHIFT); 108*7234b321SAlexander Usyskin fmstr4_addr = fmba + NVM_FLMSTR4_OFFSET; 109*7234b321SAlexander Usyskin 110*7234b321SAlexander Usyskin fmstr4 = idg_nvm_read32(nvm, fmstr4_addr); 111*7234b321SAlexander Usyskin if (idg_nvm_error(nvm)) 112*7234b321SAlexander Usyskin return -EIO; 113*7234b321SAlexander Usyskin 114*7234b321SAlexander Usyskin *access_map = fmstr4; 115*7234b321SAlexander Usyskin return 0; 116*7234b321SAlexander Usyskin } 117*7234b321SAlexander Usyskin 118*7234b321SAlexander Usyskin /* 119*7234b321SAlexander Usyskin * Region read/write access encoded in the access map 120*7234b321SAlexander Usyskin * in the following order from the lower bit: 121*7234b321SAlexander Usyskin * [3:0] regions 12-15 read state 122*7234b321SAlexander Usyskin * [7:4] regions 12-15 write state 123*7234b321SAlexander Usyskin * [19:8] regions 0-11 read state 124*7234b321SAlexander Usyskin * [31:20] regions 0-11 write state 125*7234b321SAlexander Usyskin */ 126*7234b321SAlexander Usyskin static bool idg_nvm_region_readable(u32 access_map, u8 region) 127*7234b321SAlexander Usyskin { 128*7234b321SAlexander Usyskin if (region < 12) 129*7234b321SAlexander Usyskin return access_map & BIT(region + 8); /* [19:8] */ 130*7234b321SAlexander Usyskin else 131*7234b321SAlexander Usyskin return access_map & BIT(region - 12); /* [3:0] */ 132*7234b321SAlexander Usyskin } 133*7234b321SAlexander Usyskin 134*7234b321SAlexander Usyskin static bool idg_nvm_region_writable(u32 access_map, u8 region) 135*7234b321SAlexander Usyskin { 136*7234b321SAlexander Usyskin if (region < 12) 137*7234b321SAlexander Usyskin return access_map & BIT(region + 20); /* [31:20] */ 138*7234b321SAlexander Usyskin else 139*7234b321SAlexander Usyskin return access_map & BIT(region - 8); /* [7:4] */ 140*7234b321SAlexander Usyskin } 141*7234b321SAlexander Usyskin 142*7234b321SAlexander Usyskin static int idg_nvm_is_valid(struct intel_dg_nvm *nvm) 143*7234b321SAlexander Usyskin { 144*7234b321SAlexander Usyskin u32 is_valid; 145*7234b321SAlexander Usyskin 146*7234b321SAlexander Usyskin idg_nvm_set_region_id(nvm, NVM_REGION_ID_DESCRIPTOR); 147*7234b321SAlexander Usyskin 148*7234b321SAlexander Usyskin is_valid = idg_nvm_read32(nvm, NVM_VALSIG_REG); 149*7234b321SAlexander Usyskin if (idg_nvm_error(nvm)) 150*7234b321SAlexander Usyskin return -EIO; 151*7234b321SAlexander Usyskin 152*7234b321SAlexander Usyskin if (is_valid != NVM_FLVALSIG) 153*7234b321SAlexander Usyskin return -ENODEV; 154*7234b321SAlexander Usyskin 155*7234b321SAlexander Usyskin return 0; 156*7234b321SAlexander Usyskin } 157*7234b321SAlexander Usyskin 158*7234b321SAlexander Usyskin static int intel_dg_nvm_init(struct intel_dg_nvm *nvm, struct device *device) 159*7234b321SAlexander Usyskin { 160*7234b321SAlexander Usyskin u32 access_map = 0; 161*7234b321SAlexander Usyskin unsigned int i, n; 162*7234b321SAlexander Usyskin int ret; 163*7234b321SAlexander Usyskin 164*7234b321SAlexander Usyskin /* clean error register, previous errors are ignored */ 165*7234b321SAlexander Usyskin idg_nvm_error(nvm); 166*7234b321SAlexander Usyskin 167*7234b321SAlexander Usyskin ret = idg_nvm_is_valid(nvm); 168*7234b321SAlexander Usyskin if (ret) { 169*7234b321SAlexander Usyskin dev_err(device, "The MEM is not valid %d\n", ret); 170*7234b321SAlexander Usyskin return ret; 171*7234b321SAlexander Usyskin } 172*7234b321SAlexander Usyskin 173*7234b321SAlexander Usyskin if (idg_nvm_get_access_map(nvm, &access_map)) 174*7234b321SAlexander Usyskin return -EIO; 175*7234b321SAlexander Usyskin 176*7234b321SAlexander Usyskin for (i = 0, n = 0; i < nvm->nregions; i++) { 177*7234b321SAlexander Usyskin u32 address, base, limit, region; 178*7234b321SAlexander Usyskin u8 id = nvm->regions[i].id; 179*7234b321SAlexander Usyskin 180*7234b321SAlexander Usyskin address = NVM_FLREG(id); 181*7234b321SAlexander Usyskin region = idg_nvm_read32(nvm, address); 182*7234b321SAlexander Usyskin 183*7234b321SAlexander Usyskin base = FIELD_GET(NVM_FREG_BASE_MASK, region) << NVM_FREG_ADDR_SHIFT; 184*7234b321SAlexander Usyskin limit = (FIELD_GET(NVM_FREG_ADDR_MASK, region) << NVM_FREG_ADDR_SHIFT) | 185*7234b321SAlexander Usyskin NVM_FREG_MIN_REGION_SIZE; 186*7234b321SAlexander Usyskin 187*7234b321SAlexander Usyskin dev_dbg(device, "[%d] %s: region: 0x%08X base: 0x%08x limit: 0x%08x\n", 188*7234b321SAlexander Usyskin id, nvm->regions[i].name, region, base, limit); 189*7234b321SAlexander Usyskin 190*7234b321SAlexander Usyskin if (base >= limit || (i > 0 && limit == 0)) { 191*7234b321SAlexander Usyskin dev_dbg(device, "[%d] %s: disabled\n", 192*7234b321SAlexander Usyskin id, nvm->regions[i].name); 193*7234b321SAlexander Usyskin nvm->regions[i].is_readable = 0; 194*7234b321SAlexander Usyskin continue; 195*7234b321SAlexander Usyskin } 196*7234b321SAlexander Usyskin 197*7234b321SAlexander Usyskin if (nvm->size < limit) 198*7234b321SAlexander Usyskin nvm->size = limit; 199*7234b321SAlexander Usyskin 200*7234b321SAlexander Usyskin nvm->regions[i].offset = base; 201*7234b321SAlexander Usyskin nvm->regions[i].size = limit - base + 1; 202*7234b321SAlexander Usyskin /* No write access to descriptor; mask it out*/ 203*7234b321SAlexander Usyskin nvm->regions[i].is_writable = idg_nvm_region_writable(access_map, id); 204*7234b321SAlexander Usyskin 205*7234b321SAlexander Usyskin nvm->regions[i].is_readable = idg_nvm_region_readable(access_map, id); 206*7234b321SAlexander Usyskin dev_dbg(device, "Registered, %s id=%d offset=%lld size=%lld rd=%d wr=%d\n", 207*7234b321SAlexander Usyskin nvm->regions[i].name, 208*7234b321SAlexander Usyskin nvm->regions[i].id, 209*7234b321SAlexander Usyskin nvm->regions[i].offset, 210*7234b321SAlexander Usyskin nvm->regions[i].size, 211*7234b321SAlexander Usyskin nvm->regions[i].is_readable, 212*7234b321SAlexander Usyskin nvm->regions[i].is_writable); 213*7234b321SAlexander Usyskin 214*7234b321SAlexander Usyskin if (nvm->regions[i].is_readable) 215*7234b321SAlexander Usyskin n++; 216*7234b321SAlexander Usyskin } 217*7234b321SAlexander Usyskin 218*7234b321SAlexander Usyskin dev_dbg(device, "Registered %d regions\n", n); 219*7234b321SAlexander Usyskin 220*7234b321SAlexander Usyskin /* Need to add 1 to the amount of memory 221*7234b321SAlexander Usyskin * so it is reported as an even block 222*7234b321SAlexander Usyskin */ 223*7234b321SAlexander Usyskin nvm->size += 1; 224*7234b321SAlexander Usyskin 225*7234b321SAlexander Usyskin return n; 226*7234b321SAlexander Usyskin } 227*7234b321SAlexander Usyskin 228ceb5ab3cSAlexander Usyskin static void intel_dg_nvm_release(struct kref *kref) 229ceb5ab3cSAlexander Usyskin { 230ceb5ab3cSAlexander Usyskin struct intel_dg_nvm *nvm = container_of(kref, struct intel_dg_nvm, refcnt); 231ceb5ab3cSAlexander Usyskin int i; 232ceb5ab3cSAlexander Usyskin 233ceb5ab3cSAlexander Usyskin pr_debug("freeing intel_dg nvm\n"); 234ceb5ab3cSAlexander Usyskin for (i = 0; i < nvm->nregions; i++) 235ceb5ab3cSAlexander Usyskin kfree(nvm->regions[i].name); 236ceb5ab3cSAlexander Usyskin kfree(nvm); 237ceb5ab3cSAlexander Usyskin } 238ceb5ab3cSAlexander Usyskin 239ceb5ab3cSAlexander Usyskin static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev, 240ceb5ab3cSAlexander Usyskin const struct auxiliary_device_id *aux_dev_id) 241ceb5ab3cSAlexander Usyskin { 242ceb5ab3cSAlexander Usyskin struct intel_dg_nvm_dev *invm = auxiliary_dev_to_intel_dg_nvm_dev(aux_dev); 243ceb5ab3cSAlexander Usyskin struct intel_dg_nvm *nvm; 244ceb5ab3cSAlexander Usyskin struct device *device; 245ceb5ab3cSAlexander Usyskin unsigned int nregions; 246ceb5ab3cSAlexander Usyskin unsigned int i, n; 247ceb5ab3cSAlexander Usyskin int ret; 248ceb5ab3cSAlexander Usyskin 249ceb5ab3cSAlexander Usyskin device = &aux_dev->dev; 250ceb5ab3cSAlexander Usyskin 251ceb5ab3cSAlexander Usyskin /* count available regions */ 252ceb5ab3cSAlexander Usyskin for (nregions = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) { 253ceb5ab3cSAlexander Usyskin if (invm->regions[i].name) 254ceb5ab3cSAlexander Usyskin nregions++; 255ceb5ab3cSAlexander Usyskin } 256ceb5ab3cSAlexander Usyskin 257ceb5ab3cSAlexander Usyskin if (!nregions) { 258ceb5ab3cSAlexander Usyskin dev_err(device, "no regions defined\n"); 259ceb5ab3cSAlexander Usyskin return -ENODEV; 260ceb5ab3cSAlexander Usyskin } 261ceb5ab3cSAlexander Usyskin 262ceb5ab3cSAlexander Usyskin nvm = kzalloc(struct_size(nvm, regions, nregions), GFP_KERNEL); 263ceb5ab3cSAlexander Usyskin if (!nvm) 264ceb5ab3cSAlexander Usyskin return -ENOMEM; 265ceb5ab3cSAlexander Usyskin 266ceb5ab3cSAlexander Usyskin kref_init(&nvm->refcnt); 267ceb5ab3cSAlexander Usyskin 268ceb5ab3cSAlexander Usyskin for (n = 0, i = 0; i < INTEL_DG_NVM_REGIONS; i++) { 269ceb5ab3cSAlexander Usyskin if (!invm->regions[i].name) 270ceb5ab3cSAlexander Usyskin continue; 271ceb5ab3cSAlexander Usyskin 272ceb5ab3cSAlexander Usyskin char *name = kasprintf(GFP_KERNEL, "%s.%s", 273ceb5ab3cSAlexander Usyskin dev_name(&aux_dev->dev), invm->regions[i].name); 274ceb5ab3cSAlexander Usyskin if (!name) 275ceb5ab3cSAlexander Usyskin continue; 276ceb5ab3cSAlexander Usyskin nvm->regions[n].name = name; 277ceb5ab3cSAlexander Usyskin nvm->regions[n].id = i; 278ceb5ab3cSAlexander Usyskin n++; 279ceb5ab3cSAlexander Usyskin } 280ceb5ab3cSAlexander Usyskin nvm->nregions = n; /* in case where kasprintf fail */ 281ceb5ab3cSAlexander Usyskin 282ceb5ab3cSAlexander Usyskin nvm->base = devm_ioremap_resource(device, &invm->bar); 283ceb5ab3cSAlexander Usyskin if (IS_ERR(nvm->base)) { 284ceb5ab3cSAlexander Usyskin ret = PTR_ERR(nvm->base); 285ceb5ab3cSAlexander Usyskin goto err; 286ceb5ab3cSAlexander Usyskin } 287ceb5ab3cSAlexander Usyskin 288*7234b321SAlexander Usyskin ret = intel_dg_nvm_init(nvm, device); 289*7234b321SAlexander Usyskin if (ret < 0) { 290*7234b321SAlexander Usyskin dev_err(device, "cannot initialize nvm %d\n", ret); 291*7234b321SAlexander Usyskin goto err; 292*7234b321SAlexander Usyskin } 293*7234b321SAlexander Usyskin 294ceb5ab3cSAlexander Usyskin dev_set_drvdata(&aux_dev->dev, nvm); 295ceb5ab3cSAlexander Usyskin 296ceb5ab3cSAlexander Usyskin return 0; 297ceb5ab3cSAlexander Usyskin 298ceb5ab3cSAlexander Usyskin err: 299ceb5ab3cSAlexander Usyskin kref_put(&nvm->refcnt, intel_dg_nvm_release); 300ceb5ab3cSAlexander Usyskin return ret; 301ceb5ab3cSAlexander Usyskin } 302ceb5ab3cSAlexander Usyskin 303ceb5ab3cSAlexander Usyskin static void intel_dg_mtd_remove(struct auxiliary_device *aux_dev) 304ceb5ab3cSAlexander Usyskin { 305ceb5ab3cSAlexander Usyskin struct intel_dg_nvm *nvm = dev_get_drvdata(&aux_dev->dev); 306ceb5ab3cSAlexander Usyskin 307ceb5ab3cSAlexander Usyskin if (!nvm) 308ceb5ab3cSAlexander Usyskin return; 309ceb5ab3cSAlexander Usyskin 310ceb5ab3cSAlexander Usyskin dev_set_drvdata(&aux_dev->dev, NULL); 311ceb5ab3cSAlexander Usyskin 312ceb5ab3cSAlexander Usyskin kref_put(&nvm->refcnt, intel_dg_nvm_release); 313ceb5ab3cSAlexander Usyskin } 314ceb5ab3cSAlexander Usyskin 315ceb5ab3cSAlexander Usyskin static const struct auxiliary_device_id intel_dg_mtd_id_table[] = { 316ceb5ab3cSAlexander Usyskin { 317ceb5ab3cSAlexander Usyskin .name = "i915.nvm", 318ceb5ab3cSAlexander Usyskin }, 319ceb5ab3cSAlexander Usyskin { 320ceb5ab3cSAlexander Usyskin .name = "xe.nvm", 321ceb5ab3cSAlexander Usyskin }, 322ceb5ab3cSAlexander Usyskin { 323ceb5ab3cSAlexander Usyskin /* sentinel */ 324ceb5ab3cSAlexander Usyskin } 325ceb5ab3cSAlexander Usyskin }; 326ceb5ab3cSAlexander Usyskin MODULE_DEVICE_TABLE(auxiliary, intel_dg_mtd_id_table); 327ceb5ab3cSAlexander Usyskin 328ceb5ab3cSAlexander Usyskin static struct auxiliary_driver intel_dg_mtd_driver = { 329ceb5ab3cSAlexander Usyskin .probe = intel_dg_mtd_probe, 330ceb5ab3cSAlexander Usyskin .remove = intel_dg_mtd_remove, 331ceb5ab3cSAlexander Usyskin .driver = { 332ceb5ab3cSAlexander Usyskin /* auxiliary_driver_register() sets .name to be the modname */ 333ceb5ab3cSAlexander Usyskin }, 334ceb5ab3cSAlexander Usyskin .id_table = intel_dg_mtd_id_table 335ceb5ab3cSAlexander Usyskin }; 336ceb5ab3cSAlexander Usyskin module_auxiliary_driver(intel_dg_mtd_driver); 337ceb5ab3cSAlexander Usyskin 338ceb5ab3cSAlexander Usyskin MODULE_LICENSE("GPL"); 339ceb5ab3cSAlexander Usyskin MODULE_AUTHOR("Intel Corporation"); 340ceb5ab3cSAlexander Usyskin MODULE_DESCRIPTION("Intel DGFX MTD driver"); 341