Lines Matching +full:device +full:- +full:id +full:- +full:base
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
10 #include <linux/device.h>
13 #include <linux/io-64-nonatomic-lo-hi.h>
27 void __iomem *base; member
35 u8 id; member
49 * [15:0]-Erase size = 0x0010 4K 0x0080 32K 0x0100 64K
50 * [23:16]-Reserved
51 * [31:24]-Erase MEM RegionID
64 /* Flash Region Base Address */
66 /* Flash Region __n - Flash Descriptor Record */
84 iowrite32((u32)region, nvm->base + NVM_REGION_ID_REG); in idg_nvm_set_region_id()
89 void __iomem *base = nvm->base; in idg_nvm_error() local
91 u32 reg = ioread32(base + NVM_ACCESS_ERROR_REG) & NVM_ACCESS_ERROR_PCIE_MASK; in idg_nvm_error()
95 iowrite32(reg, base + NVM_ACCESS_ERROR_REG); in idg_nvm_error()
102 void __iomem *base = nvm->base; in idg_nvm_read32() local
104 iowrite32(address, base + NVM_ADDRESS_REG); in idg_nvm_read32()
106 return ioread32(base + NVM_TRIGGER_REG); in idg_nvm_read32()
111 void __iomem *base = nvm->base; in idg_nvm_read64() local
113 iowrite32(address, base + NVM_ADDRESS_REG); in idg_nvm_read64()
115 return readq(base + NVM_TRIGGER_REG); in idg_nvm_read64()
120 void __iomem *base = nvm->base; in idg_nvm_write32() local
122 iowrite32(address, base + NVM_ADDRESS_REG); in idg_nvm_write32()
124 iowrite32(data, base + NVM_TRIGGER_REG); in idg_nvm_write32()
129 void __iomem *base = nvm->base; in idg_nvm_write64() local
131 iowrite32(address, base + NVM_ADDRESS_REG); in idg_nvm_write64()
133 writeq(data, base + NVM_TRIGGER_REG); in idg_nvm_write64()
147 return -EIO; in idg_nvm_get_access_map()
154 return -EIO; in idg_nvm_get_access_map()
163 * [3:0] regions 12-15 read state
164 * [7:4] regions 12-15 write state
165 * [19:8] regions 0-11 read state
166 * [31:20] regions 0-11 write state
173 return access_map & BIT(region - 12); /* [3:0] */ in idg_nvm_region_readable()
181 return access_map & BIT(region - 8); /* [7:4] */ in idg_nvm_region_writable()
192 return -EIO; in idg_nvm_is_valid()
195 return -ENODEV; in idg_nvm_is_valid()
204 for (i = 0; i < nvm->nregions; i++) { in idg_nvm_get_region()
205 if ((nvm->regions[i].offset + nvm->regions[i].size - 1) >= from && in idg_nvm_get_region()
206 nvm->regions[i].offset <= from && in idg_nvm_get_region()
207 nvm->regions[i].size != 0) in idg_nvm_get_region()
220 return -EIO; in idg_nvm_rewrite_partial()
226 return -EIO; in idg_nvm_rewrite_partial()
245 to_shift = min(sizeof(u32) - ((size_t)to - to4), len); in idg_write()
246 if (to - to4) { in idg_write()
247 ret = idg_nvm_rewrite_partial(nvm, to4, to - to4, to_shift, (u32 *)&buf[0]); in idg_write()
253 len_s -= to_shift; in idg_write()
259 * Workaround reads/writes across 1k-aligned addresses in idg_write()
268 return -EIO; in idg_write()
271 len_s -= sizeof(u32); in idg_write()
281 return -EIO; in idg_write()
284 len4 = len_s - len8; in idg_write()
291 return -EIO; in idg_write()
293 len4 -= sizeof(u32); in idg_write()
318 from_shift = min(sizeof(u32) - ((size_t)from - from4), len); in idg_read()
320 if (from - from4) { in idg_read()
324 return -EIO; in idg_read()
325 memcpy(&buf[0], (u8 *)&data + (from - from4), from_shift); in idg_read()
326 len_s -= from_shift; in idg_read()
334 * Workaround reads/writes across 1k-aligned addresses in idg_read()
341 return -EIO; in idg_read()
343 len_s -= sizeof(u32); in idg_read()
353 return -EIO; in idg_read()
358 len4 = len_s - len8; in idg_read()
363 return -EIO; in idg_read()
366 len4 -= sizeof(u32); in idg_read()
373 return -EIO; in idg_read()
383 void __iomem *base2 = nvm->base2; in idg_erase()
384 void __iomem *base = nvm->base; in idg_erase() local
391 iowrite32(from + i, base + NVM_ADDRESS_REG); in idg_erase()
392 iowrite32(region << 24 | block, base + NVM_ERASE_REG); in idg_erase()
393 if (nvm->non_posted_erase) { in idg_erase()
406 return -ETIME; in idg_erase()
417 static int intel_dg_nvm_init(struct intel_dg_nvm *nvm, struct device *device, in intel_dg_nvm_init() argument
429 dev_err(device, "The MEM is not valid %d\n", ret); in intel_dg_nvm_init()
434 return -EIO; in intel_dg_nvm_init()
436 for (i = 0, n = 0; i < nvm->nregions; i++) { in intel_dg_nvm_init()
437 u32 address, base, limit, region; in intel_dg_nvm_init() local
438 u8 id = nvm->regions[i].id; in intel_dg_nvm_init() local
440 address = NVM_FLREG(id); in intel_dg_nvm_init()
443 base = FIELD_GET(NVM_FREG_BASE_MASK, region) << NVM_FREG_ADDR_SHIFT; in intel_dg_nvm_init()
447 dev_dbg(device, "[%d] %s: region: 0x%08X base: 0x%08x limit: 0x%08x\n", in intel_dg_nvm_init()
448 id, nvm->regions[i].name, region, base, limit); in intel_dg_nvm_init()
450 if (base >= limit || (i > 0 && limit == 0)) { in intel_dg_nvm_init()
451 dev_dbg(device, "[%d] %s: disabled\n", in intel_dg_nvm_init()
452 id, nvm->regions[i].name); in intel_dg_nvm_init()
453 nvm->regions[i].is_readable = 0; in intel_dg_nvm_init()
457 if (nvm->size < limit) in intel_dg_nvm_init()
458 nvm->size = limit; in intel_dg_nvm_init()
460 nvm->regions[i].offset = base; in intel_dg_nvm_init()
461 nvm->regions[i].size = limit - base + 1; in intel_dg_nvm_init()
463 nvm->regions[i].is_writable = idg_nvm_region_writable(access_map, id); in intel_dg_nvm_init()
465 nvm->regions[i].is_readable = idg_nvm_region_readable(access_map, id); in intel_dg_nvm_init()
466 dev_dbg(device, "Registered, %s id=%d offset=%lld size=%lld rd=%d wr=%d\n", in intel_dg_nvm_init()
467 nvm->regions[i].name, in intel_dg_nvm_init()
468 nvm->regions[i].id, in intel_dg_nvm_init()
469 nvm->regions[i].offset, in intel_dg_nvm_init()
470 nvm->regions[i].size, in intel_dg_nvm_init()
471 nvm->regions[i].is_readable, in intel_dg_nvm_init()
472 nvm->regions[i].is_writable); in intel_dg_nvm_init()
474 if (nvm->regions[i].is_readable) in intel_dg_nvm_init()
478 nvm->non_posted_erase = non_posted_erase; in intel_dg_nvm_init()
480 dev_dbg(device, "Registered %d regions\n", n); in intel_dg_nvm_init()
481 dev_dbg(device, "Non posted erase %d\n", nvm->non_posted_erase); in intel_dg_nvm_init()
486 nvm->size += 1; in intel_dg_nvm_init()
493 struct intel_dg_nvm *nvm = mtd->priv; in intel_dg_mtd_erase()
503 return -EINVAL; in intel_dg_mtd_erase()
505 if (!IS_ALIGNED(info->addr, SZ_4K) || !IS_ALIGNED(info->len, SZ_4K)) { in intel_dg_mtd_erase()
506 dev_err(&mtd->dev, "unaligned erase %llx %llx\n", in intel_dg_mtd_erase()
507 info->addr, info->len); in intel_dg_mtd_erase()
508 info->fail_addr = MTD_FAIL_ADDR_UNKNOWN; in intel_dg_mtd_erase()
509 return -EINVAL; in intel_dg_mtd_erase()
512 total_len = info->len; in intel_dg_mtd_erase()
513 addr = info->addr; in intel_dg_mtd_erase()
515 guard(mutex)(&nvm->lock); in intel_dg_mtd_erase()
519 dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len); in intel_dg_mtd_erase()
520 info->fail_addr = addr; in intel_dg_mtd_erase()
521 return -ERANGE; in intel_dg_mtd_erase()
525 if (idx >= nvm->nregions) { in intel_dg_mtd_erase()
526 dev_err(&mtd->dev, "out of range"); in intel_dg_mtd_erase()
527 info->fail_addr = MTD_FAIL_ADDR_UNKNOWN; in intel_dg_mtd_erase()
528 return -ERANGE; in intel_dg_mtd_erase()
531 from = addr - nvm->regions[idx].offset; in intel_dg_mtd_erase()
532 region = nvm->regions[idx].id; in intel_dg_mtd_erase()
534 if (len > nvm->regions[idx].size - from) in intel_dg_mtd_erase()
535 len = nvm->regions[idx].size - from; in intel_dg_mtd_erase()
537 dev_dbg(&mtd->dev, "erasing region[%d] %s from %llx len %zx\n", in intel_dg_mtd_erase()
538 region, nvm->regions[idx].name, from, len); in intel_dg_mtd_erase()
540 bytes = idg_erase(nvm, region, from, len, &info->fail_addr); in intel_dg_mtd_erase()
542 dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes); in intel_dg_mtd_erase()
543 info->fail_addr += nvm->regions[idx].offset; in intel_dg_mtd_erase()
548 total_len -= len; in intel_dg_mtd_erase()
557 struct intel_dg_nvm *nvm = mtd->priv; in intel_dg_mtd_read()
563 return -EINVAL; in intel_dg_mtd_read()
567 dev_dbg(&mtd->dev, "reading region[%d] %s from %lld len %zd\n", in intel_dg_mtd_read()
568 nvm->regions[idx].id, nvm->regions[idx].name, from, len); in intel_dg_mtd_read()
570 if (idx >= nvm->nregions) { in intel_dg_mtd_read()
571 dev_err(&mtd->dev, "out of range"); in intel_dg_mtd_read()
572 return -ERANGE; in intel_dg_mtd_read()
575 from -= nvm->regions[idx].offset; in intel_dg_mtd_read()
576 region = nvm->regions[idx].id; in intel_dg_mtd_read()
577 if (len > nvm->regions[idx].size - from) in intel_dg_mtd_read()
578 len = nvm->regions[idx].size - from; in intel_dg_mtd_read()
580 guard(mutex)(&nvm->lock); in intel_dg_mtd_read()
584 dev_dbg(&mtd->dev, "read failed with %zd\n", ret); in intel_dg_mtd_read()
596 struct intel_dg_nvm *nvm = mtd->priv; in intel_dg_mtd_write()
602 return -EINVAL; in intel_dg_mtd_write()
606 dev_dbg(&mtd->dev, "writing region[%d] %s to %lld len %zd\n", in intel_dg_mtd_write()
607 nvm->regions[idx].id, nvm->regions[idx].name, to, len); in intel_dg_mtd_write()
609 if (idx >= nvm->nregions) { in intel_dg_mtd_write()
610 dev_err(&mtd->dev, "out of range"); in intel_dg_mtd_write()
611 return -ERANGE; in intel_dg_mtd_write()
614 to -= nvm->regions[idx].offset; in intel_dg_mtd_write()
615 region = nvm->regions[idx].id; in intel_dg_mtd_write()
616 if (len > nvm->regions[idx].size - to) in intel_dg_mtd_write()
617 len = nvm->regions[idx].size - to; in intel_dg_mtd_write()
619 guard(mutex)(&nvm->lock); in intel_dg_mtd_write()
623 dev_dbg(&mtd->dev, "write failed with %zd\n", ret); in intel_dg_mtd_write()
638 for (i = 0; i < nvm->nregions; i++) in intel_dg_nvm_release()
639 kfree(nvm->regions[i].name); in intel_dg_nvm_release()
640 mutex_destroy(&nvm->lock); in intel_dg_nvm_release()
647 struct intel_dg_nvm *nvm = master->priv; in intel_dg_mtd_get_device()
650 return -EINVAL; in intel_dg_mtd_get_device()
651 pr_debug("get mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt)); in intel_dg_mtd_get_device()
652 kref_get(&nvm->refcnt); in intel_dg_mtd_get_device()
660 struct intel_dg_nvm *nvm = master->priv; in intel_dg_mtd_put_device()
664 pr_debug("put mtd %s %d\n", mtd->name, kref_read(&nvm->refcnt)); in intel_dg_mtd_put_device()
665 kref_put(&nvm->refcnt, intel_dg_nvm_release); in intel_dg_mtd_put_device()
668 static int intel_dg_nvm_init_mtd(struct intel_dg_nvm *nvm, struct device *device, in intel_dg_nvm_init_mtd() argument
675 dev_dbg(device, "registering with mtd\n"); in intel_dg_nvm_init_mtd()
677 nvm->mtd.owner = THIS_MODULE; in intel_dg_nvm_init_mtd()
678 nvm->mtd.dev.parent = device; in intel_dg_nvm_init_mtd()
679 nvm->mtd.flags = MTD_CAP_NORFLASH; in intel_dg_nvm_init_mtd()
680 nvm->mtd.type = MTD_DATAFLASH; in intel_dg_nvm_init_mtd()
681 nvm->mtd.priv = nvm; in intel_dg_nvm_init_mtd()
682 nvm->mtd._write = intel_dg_mtd_write; in intel_dg_nvm_init_mtd()
683 nvm->mtd._read = intel_dg_mtd_read; in intel_dg_nvm_init_mtd()
684 nvm->mtd._erase = intel_dg_mtd_erase; in intel_dg_nvm_init_mtd()
685 nvm->mtd._get_device = intel_dg_mtd_get_device; in intel_dg_nvm_init_mtd()
686 nvm->mtd._put_device = intel_dg_mtd_put_device; in intel_dg_nvm_init_mtd()
687 nvm->mtd.writesize = SZ_1; /* 1 byte granularity */ in intel_dg_nvm_init_mtd()
688 nvm->mtd.erasesize = SZ_4K; /* 4K bytes granularity */ in intel_dg_nvm_init_mtd()
689 nvm->mtd.size = nvm->size; in intel_dg_nvm_init_mtd()
691 parts = kcalloc(nvm->nregions, sizeof(*parts), GFP_KERNEL); in intel_dg_nvm_init_mtd()
693 return -ENOMEM; in intel_dg_nvm_init_mtd()
695 for (i = 0, n = 0; i < nvm->nregions && n < nparts; i++) { in intel_dg_nvm_init_mtd()
696 if (!nvm->regions[i].is_readable) in intel_dg_nvm_init_mtd()
698 parts[n].name = nvm->regions[i].name; in intel_dg_nvm_init_mtd()
699 parts[n].offset = nvm->regions[i].offset; in intel_dg_nvm_init_mtd()
700 parts[n].size = nvm->regions[i].size; in intel_dg_nvm_init_mtd()
701 if (!nvm->regions[i].is_writable && !writable_override) in intel_dg_nvm_init_mtd()
706 ret = mtd_device_register(&nvm->mtd, parts, n); in intel_dg_nvm_init_mtd()
717 struct device *device; in intel_dg_mtd_probe() local
722 device = &aux_dev->dev; in intel_dg_mtd_probe()
726 if (invm->regions[i].name) in intel_dg_mtd_probe()
731 dev_err(device, "no regions defined\n"); in intel_dg_mtd_probe()
732 return -ENODEV; in intel_dg_mtd_probe()
737 return -ENOMEM; in intel_dg_mtd_probe()
739 kref_init(&nvm->refcnt); in intel_dg_mtd_probe()
740 mutex_init(&nvm->lock); in intel_dg_mtd_probe()
743 if (!invm->regions[i].name) in intel_dg_mtd_probe()
747 dev_name(&aux_dev->dev), invm->regions[i].name); in intel_dg_mtd_probe()
750 nvm->regions[n].name = name; in intel_dg_mtd_probe()
751 nvm->regions[n].id = i; in intel_dg_mtd_probe()
754 nvm->nregions = n; /* in case where kasprintf fail */ in intel_dg_mtd_probe()
756 nvm->base = devm_ioremap_resource(device, &invm->bar); in intel_dg_mtd_probe()
757 if (IS_ERR(nvm->base)) { in intel_dg_mtd_probe()
758 ret = PTR_ERR(nvm->base); in intel_dg_mtd_probe()
762 if (invm->non_posted_erase) { in intel_dg_mtd_probe()
763 nvm->base2 = devm_ioremap_resource(device, &invm->bar2); in intel_dg_mtd_probe()
764 if (IS_ERR(nvm->base2)) { in intel_dg_mtd_probe()
765 ret = PTR_ERR(nvm->base2); in intel_dg_mtd_probe()
770 ret = intel_dg_nvm_init(nvm, device, invm->non_posted_erase); in intel_dg_mtd_probe()
772 dev_err(device, "cannot initialize nvm %d\n", ret); in intel_dg_mtd_probe()
776 ret = intel_dg_nvm_init_mtd(nvm, device, ret, invm->writable_override); in intel_dg_mtd_probe()
778 dev_err(device, "failed init mtd %d\n", ret); in intel_dg_mtd_probe()
782 dev_set_drvdata(&aux_dev->dev, nvm); in intel_dg_mtd_probe()
787 kref_put(&nvm->refcnt, intel_dg_nvm_release); in intel_dg_mtd_probe()
793 struct intel_dg_nvm *nvm = dev_get_drvdata(&aux_dev->dev); in intel_dg_mtd_remove()
798 mtd_device_unregister(&nvm->mtd); in intel_dg_mtd_remove()
800 dev_set_drvdata(&aux_dev->dev, NULL); in intel_dg_mtd_remove()
802 kref_put(&nvm->refcnt, intel_dg_nvm_release); in intel_dg_mtd_remove()