xref: /linux/drivers/gpu/drm/xe/xe_nvm.c (revision 72c181399b01bb4836d1fabaa9f5f6438c82178e)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
4  */
5 
6 #include <linux/intel_dg_nvm_aux.h>
7 #include <linux/pci.h>
8 
9 #include "xe_device.h"
10 #include "xe_device_types.h"
11 #include "xe_mmio.h"
12 #include "xe_nvm.h"
13 #include "regs/xe_gsc_regs.h"
14 #include "xe_sriov.h"
15 
16 #define GEN12_GUNIT_NVM_BASE 0x00102040
17 #define GEN12_DEBUG_NVM_BASE 0x00101018
18 
19 #define GEN12_CNTL_PROTECTED_NVM_REG 0x0010100C
20 
21 #define GEN12_GUNIT_NVM_SIZE 0x80
22 #define GEN12_DEBUG_NVM_SIZE 0x4
23 
24 #define NVM_NON_POSTED_ERASE_CHICKEN_BIT BIT(13)
25 
26 #define HECI_FW_STATUS_2_NVM_ACCESS_MODE BIT(3)
27 
28 static const struct intel_dg_nvm_region regions[INTEL_DG_NVM_REGIONS] = {
29 	[0] = { .name = "DESCRIPTOR", },
30 	[2] = { .name = "GSC", },
31 	[9] = { .name = "PADDING", },
32 	[11] = { .name = "OptionROM", },
33 	[12] = { .name = "DAM", },
34 };
35 
xe_nvm_release_dev(struct device * dev)36 static void xe_nvm_release_dev(struct device *dev)
37 {
38 	struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
39 	struct intel_dg_nvm_dev *nvm = container_of(aux, struct intel_dg_nvm_dev, aux_dev);
40 
41 	kfree(nvm);
42 }
43 
xe_nvm_non_posted_erase(struct xe_device * xe)44 static bool xe_nvm_non_posted_erase(struct xe_device *xe)
45 {
46 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
47 
48 	if (xe->info.platform != XE_BATTLEMAGE)
49 		return false;
50 	return !(xe_mmio_read32(mmio, XE_REG(GEN12_CNTL_PROTECTED_NVM_REG)) &
51 		 NVM_NON_POSTED_ERASE_CHICKEN_BIT);
52 }
53 
xe_nvm_writable_override(struct xe_device * xe)54 static bool xe_nvm_writable_override(struct xe_device *xe)
55 {
56 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
57 	bool writable_override;
58 	resource_size_t base;
59 
60 	switch (xe->info.platform) {
61 	case XE_BATTLEMAGE:
62 		base = DG2_GSC_HECI2_BASE;
63 		break;
64 	case XE_PVC:
65 		base = PVC_GSC_HECI2_BASE;
66 		break;
67 	case XE_DG2:
68 		base = DG2_GSC_HECI2_BASE;
69 		break;
70 	case XE_DG1:
71 		base = DG1_GSC_HECI2_BASE;
72 		break;
73 	default:
74 		drm_err(&xe->drm, "Unknown platform\n");
75 		return true;
76 	}
77 
78 	writable_override =
79 		!(xe_mmio_read32(mmio, HECI_FWSTS2(base)) &
80 		  HECI_FW_STATUS_2_NVM_ACCESS_MODE);
81 	if (writable_override)
82 		drm_info(&xe->drm, "NVM access overridden by jumper\n");
83 	return writable_override;
84 }
85 
xe_nvm_init(struct xe_device * xe)86 int xe_nvm_init(struct xe_device *xe)
87 {
88 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
89 	struct auxiliary_device *aux_dev;
90 	struct intel_dg_nvm_dev *nvm;
91 	int ret;
92 
93 	if (!xe->info.has_gsc_nvm)
94 		return 0;
95 
96 	/* No access to internal NVM from VFs */
97 	if (IS_SRIOV_VF(xe))
98 		return 0;
99 
100 	/* Nvm pointer should be NULL here */
101 	if (WARN_ON(xe->nvm))
102 		return -EFAULT;
103 
104 	xe->nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
105 	if (!xe->nvm)
106 		return -ENOMEM;
107 
108 	nvm = xe->nvm;
109 
110 	nvm->writable_override = xe_nvm_writable_override(xe);
111 	nvm->non_posted_erase = xe_nvm_non_posted_erase(xe);
112 	nvm->bar.parent = &pdev->resource[0];
113 	nvm->bar.start = GEN12_GUNIT_NVM_BASE + pdev->resource[0].start;
114 	nvm->bar.end = nvm->bar.start + GEN12_GUNIT_NVM_SIZE - 1;
115 	nvm->bar.flags = IORESOURCE_MEM;
116 	nvm->bar.desc = IORES_DESC_NONE;
117 	nvm->regions = regions;
118 
119 	nvm->bar2.parent = &pdev->resource[0];
120 	nvm->bar2.start = GEN12_DEBUG_NVM_BASE + pdev->resource[0].start;
121 	nvm->bar2.end = nvm->bar2.start + GEN12_DEBUG_NVM_SIZE - 1;
122 	nvm->bar2.flags = IORESOURCE_MEM;
123 	nvm->bar2.desc = IORES_DESC_NONE;
124 
125 	aux_dev = &nvm->aux_dev;
126 
127 	aux_dev->name = "nvm";
128 	aux_dev->id = (pci_domain_nr(pdev->bus) << 16) | pci_dev_id(pdev);
129 	aux_dev->dev.parent = &pdev->dev;
130 	aux_dev->dev.release = xe_nvm_release_dev;
131 
132 	ret = auxiliary_device_init(aux_dev);
133 	if (ret) {
134 		drm_err(&xe->drm, "xe-nvm aux init failed %d\n", ret);
135 		goto err;
136 	}
137 
138 	ret = auxiliary_device_add(aux_dev);
139 	if (ret) {
140 		drm_err(&xe->drm, "xe-nvm aux add failed %d\n", ret);
141 		auxiliary_device_uninit(aux_dev);
142 		goto err;
143 	}
144 	return 0;
145 
146 err:
147 	kfree(nvm);
148 	xe->nvm = NULL;
149 	return ret;
150 }
151 
xe_nvm_fini(struct xe_device * xe)152 void xe_nvm_fini(struct xe_device *xe)
153 {
154 	struct intel_dg_nvm_dev *nvm = xe->nvm;
155 
156 	if (!xe->info.has_gsc_nvm)
157 		return;
158 
159 	/* No access to internal NVM from VFs */
160 	if (IS_SRIOV_VF(xe))
161 		return;
162 
163 	/* Nvm pointer should not be NULL here */
164 	if (WARN_ON(!nvm))
165 		return;
166 
167 	auxiliary_device_delete(&nvm->aux_dev);
168 	auxiliary_device_uninit(&nvm->aux_dev);
169 	xe->nvm = NULL;
170 }
171