xref: /linux/drivers/gpu/drm/xe/xe_nvm.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
4  */
5 
6 #include <linux/intel_dg_nvm_aux.h>
7 #include <linux/pci.h>
8 
9 #include "xe_device_types.h"
10 #include "xe_mmio.h"
11 #include "xe_nvm.h"
12 #include "xe_pcode_api.h"
13 #include "regs/xe_gsc_regs.h"
14 #include "xe_sriov.h"
15 
16 #define GEN12_GUNIT_NVM_BASE 0x00102040
17 #define GEN12_DEBUG_NVM_BASE 0x00101018
18 
19 #define GEN12_CNTL_PROTECTED_NVM_REG 0x0010100C
20 
21 #define GEN12_GUNIT_NVM_SIZE 0x80
22 #define GEN12_DEBUG_NVM_SIZE 0x4
23 
24 #define NVM_NON_POSTED_ERASE_CHICKEN_BIT BIT(13)
25 
26 #define HECI_FW_STATUS_2_NVM_ACCESS_MODE BIT(3)
27 
28 static const struct intel_dg_nvm_region regions[INTEL_DG_NVM_REGIONS] = {
29 	[0] = { .name = "DESCRIPTOR", },
30 	[2] = { .name = "GSC", },
31 	[9] = { .name = "PADDING", },
32 	[11] = { .name = "OptionROM", },
33 	[12] = { .name = "DAM", },
34 };
35 
xe_nvm_release_dev(struct device * dev)36 static void xe_nvm_release_dev(struct device *dev)
37 {
38 	struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
39 	struct intel_dg_nvm_dev *nvm = container_of(aux, struct intel_dg_nvm_dev, aux_dev);
40 
41 	kfree(nvm);
42 }
43 
xe_nvm_non_posted_erase(struct xe_device * xe)44 static bool xe_nvm_non_posted_erase(struct xe_device *xe)
45 {
46 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
47 
48 	switch (xe->info.platform) {
49 	case XE_CRESCENTISLAND:
50 	case XE_BATTLEMAGE:
51 		return !(xe_mmio_read32(mmio, XE_REG(GEN12_CNTL_PROTECTED_NVM_REG)) &
52 			 NVM_NON_POSTED_ERASE_CHICKEN_BIT);
53 	default:
54 		return false;
55 	}
56 }
57 
xe_nvm_writable_override(struct xe_device * xe)58 static bool xe_nvm_writable_override(struct xe_device *xe)
59 {
60 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
61 	bool writable_override;
62 	struct xe_reg reg;
63 	u32 test_bit;
64 
65 	switch (xe->info.platform) {
66 	case XE_CRESCENTISLAND:
67 		reg = PCODE_SCRATCH(0);
68 		test_bit = FDO_MODE;
69 		break;
70 	case XE_BATTLEMAGE:
71 		reg = HECI_FWSTS2(DG2_GSC_HECI2_BASE);
72 		test_bit = HECI_FW_STATUS_2_NVM_ACCESS_MODE;
73 		break;
74 	case XE_PVC:
75 		reg = HECI_FWSTS2(PVC_GSC_HECI2_BASE);
76 		test_bit = HECI_FW_STATUS_2_NVM_ACCESS_MODE;
77 		break;
78 	case XE_DG2:
79 		reg = HECI_FWSTS2(DG2_GSC_HECI2_BASE);
80 		test_bit = HECI_FW_STATUS_2_NVM_ACCESS_MODE;
81 		break;
82 	case XE_DG1:
83 		reg = HECI_FWSTS2(DG1_GSC_HECI2_BASE);
84 		test_bit = HECI_FW_STATUS_2_NVM_ACCESS_MODE;
85 		break;
86 	default:
87 		drm_err(&xe->drm, "Unknown platform\n");
88 		return true;
89 	}
90 
91 	writable_override = !(xe_mmio_read32(mmio, reg) & test_bit);
92 	if (writable_override)
93 		drm_info(&xe->drm, "NVM access overridden by jumper\n");
94 	return writable_override;
95 }
96 
xe_nvm_fini(void * arg)97 static void xe_nvm_fini(void *arg)
98 {
99 	struct xe_device *xe = arg;
100 	struct intel_dg_nvm_dev *nvm = xe->nvm;
101 
102 	if (!xe->info.has_gsc_nvm)
103 		return;
104 
105 	/* No access to internal NVM from VFs */
106 	if (IS_SRIOV_VF(xe))
107 		return;
108 
109 	/* Nvm pointer should not be NULL here */
110 	if (WARN_ON(!nvm))
111 		return;
112 
113 	auxiliary_device_delete(&nvm->aux_dev);
114 	auxiliary_device_uninit(&nvm->aux_dev);
115 	xe->nvm = NULL;
116 }
117 
xe_nvm_init(struct xe_device * xe)118 int xe_nvm_init(struct xe_device *xe)
119 {
120 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
121 	struct auxiliary_device *aux_dev;
122 	struct intel_dg_nvm_dev *nvm;
123 	int ret;
124 
125 	if (!xe->info.has_gsc_nvm)
126 		return 0;
127 
128 	/* No access to internal NVM from VFs */
129 	if (IS_SRIOV_VF(xe))
130 		return 0;
131 
132 	/* Nvm pointer should be NULL here */
133 	if (WARN_ON(xe->nvm))
134 		return -EFAULT;
135 
136 	xe->nvm = kzalloc_obj(*nvm);
137 	if (!xe->nvm)
138 		return -ENOMEM;
139 
140 	nvm = xe->nvm;
141 
142 	nvm->writable_override = xe_nvm_writable_override(xe);
143 	nvm->non_posted_erase = xe_nvm_non_posted_erase(xe);
144 	nvm->bar.parent = &pdev->resource[0];
145 	nvm->bar.start = GEN12_GUNIT_NVM_BASE + pdev->resource[0].start;
146 	nvm->bar.end = nvm->bar.start + GEN12_GUNIT_NVM_SIZE - 1;
147 	nvm->bar.flags = IORESOURCE_MEM;
148 	nvm->bar.desc = IORES_DESC_NONE;
149 	nvm->regions = regions;
150 
151 	nvm->bar2.parent = &pdev->resource[0];
152 	nvm->bar2.start = GEN12_DEBUG_NVM_BASE + pdev->resource[0].start;
153 	nvm->bar2.end = nvm->bar2.start + GEN12_DEBUG_NVM_SIZE - 1;
154 	nvm->bar2.flags = IORESOURCE_MEM;
155 	nvm->bar2.desc = IORES_DESC_NONE;
156 
157 	aux_dev = &nvm->aux_dev;
158 
159 	aux_dev->name = "nvm";
160 	aux_dev->id = (pci_domain_nr(pdev->bus) << 16) | pci_dev_id(pdev);
161 	aux_dev->dev.parent = &pdev->dev;
162 	aux_dev->dev.release = xe_nvm_release_dev;
163 
164 	ret = auxiliary_device_init(aux_dev);
165 	if (ret) {
166 		drm_err(&xe->drm, "xe-nvm aux init failed %d\n", ret);
167 		kfree(nvm);
168 		xe->nvm = NULL;
169 		return ret;
170 	}
171 
172 	ret = auxiliary_device_add(aux_dev);
173 	if (ret) {
174 		drm_err(&xe->drm, "xe-nvm aux add failed %d\n", ret);
175 		auxiliary_device_uninit(aux_dev);
176 		xe->nvm = NULL;
177 		return ret;
178 	}
179 	return devm_add_action_or_reset(xe->drm.dev, xe_nvm_fini, xe);
180 }
181