xref: /linux/drivers/gpu/drm/xe/xe_nvm.c (revision 220994d61cebfc04f071d69049127657c7e8191b)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
4  */
5 
6 #include <linux/intel_dg_nvm_aux.h>
7 #include <linux/pci.h>
8 
9 #include "xe_device.h"
10 #include "xe_device_types.h"
11 #include "xe_mmio.h"
12 #include "xe_nvm.h"
13 #include "regs/xe_gsc_regs.h"
14 #include "xe_sriov.h"
15 
16 #define GEN12_GUNIT_NVM_BASE 0x00102040
17 #define GEN12_DEBUG_NVM_BASE 0x00101018
18 
19 #define GEN12_CNTL_PROTECTED_NVM_REG 0x0010100C
20 
21 #define GEN12_GUNIT_NVM_SIZE 0x80
22 #define GEN12_DEBUG_NVM_SIZE 0x4
23 
24 #define NVM_NON_POSTED_ERASE_CHICKEN_BIT BIT(13)
25 
26 #define HECI_FW_STATUS_2_NVM_ACCESS_MODE BIT(3)
27 
28 static const struct intel_dg_nvm_region regions[INTEL_DG_NVM_REGIONS] = {
29 	[0] = { .name = "DESCRIPTOR", },
30 	[2] = { .name = "GSC", },
31 	[9] = { .name = "PADDING", },
32 	[11] = { .name = "OptionROM", },
33 	[12] = { .name = "DAM", },
34 };
35 
xe_nvm_release_dev(struct device * dev)36 static void xe_nvm_release_dev(struct device *dev)
37 {
38 }
39 
xe_nvm_non_posted_erase(struct xe_device * xe)40 static bool xe_nvm_non_posted_erase(struct xe_device *xe)
41 {
42 	struct xe_gt *gt = xe_root_mmio_gt(xe);
43 
44 	if (xe->info.platform != XE_BATTLEMAGE)
45 		return false;
46 	return !(xe_mmio_read32(&gt->mmio, XE_REG(GEN12_CNTL_PROTECTED_NVM_REG)) &
47 		 NVM_NON_POSTED_ERASE_CHICKEN_BIT);
48 }
49 
xe_nvm_writable_override(struct xe_device * xe)50 static bool xe_nvm_writable_override(struct xe_device *xe)
51 {
52 	struct xe_gt *gt = xe_root_mmio_gt(xe);
53 	bool writable_override;
54 	resource_size_t base;
55 
56 	switch (xe->info.platform) {
57 	case XE_BATTLEMAGE:
58 		base = DG2_GSC_HECI2_BASE;
59 		break;
60 	case XE_PVC:
61 		base = PVC_GSC_HECI2_BASE;
62 		break;
63 	case XE_DG2:
64 		base = DG2_GSC_HECI2_BASE;
65 		break;
66 	case XE_DG1:
67 		base = DG1_GSC_HECI2_BASE;
68 		break;
69 	default:
70 		drm_err(&xe->drm, "Unknown platform\n");
71 		return true;
72 	}
73 
74 	writable_override =
75 		!(xe_mmio_read32(&gt->mmio, HECI_FWSTS2(base)) &
76 		  HECI_FW_STATUS_2_NVM_ACCESS_MODE);
77 	if (writable_override)
78 		drm_info(&xe->drm, "NVM access overridden by jumper\n");
79 	return writable_override;
80 }
81 
xe_nvm_init(struct xe_device * xe)82 int xe_nvm_init(struct xe_device *xe)
83 {
84 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
85 	struct auxiliary_device *aux_dev;
86 	struct intel_dg_nvm_dev *nvm;
87 	int ret;
88 
89 	if (!xe->info.has_gsc_nvm)
90 		return 0;
91 
92 	/* No access to internal NVM from VFs */
93 	if (IS_SRIOV_VF(xe))
94 		return 0;
95 
96 	/* Nvm pointer should be NULL here */
97 	if (WARN_ON(xe->nvm))
98 		return -EFAULT;
99 
100 	xe->nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
101 	if (!xe->nvm)
102 		return -ENOMEM;
103 
104 	nvm = xe->nvm;
105 
106 	nvm->writable_override = xe_nvm_writable_override(xe);
107 	nvm->non_posted_erase = xe_nvm_non_posted_erase(xe);
108 	nvm->bar.parent = &pdev->resource[0];
109 	nvm->bar.start = GEN12_GUNIT_NVM_BASE + pdev->resource[0].start;
110 	nvm->bar.end = nvm->bar.start + GEN12_GUNIT_NVM_SIZE - 1;
111 	nvm->bar.flags = IORESOURCE_MEM;
112 	nvm->bar.desc = IORES_DESC_NONE;
113 	nvm->regions = regions;
114 
115 	nvm->bar2.parent = &pdev->resource[0];
116 	nvm->bar2.start = GEN12_DEBUG_NVM_BASE + pdev->resource[0].start;
117 	nvm->bar2.end = nvm->bar2.start + GEN12_DEBUG_NVM_SIZE - 1;
118 	nvm->bar2.flags = IORESOURCE_MEM;
119 	nvm->bar2.desc = IORES_DESC_NONE;
120 
121 	aux_dev = &nvm->aux_dev;
122 
123 	aux_dev->name = "nvm";
124 	aux_dev->id = (pci_domain_nr(pdev->bus) << 16) | pci_dev_id(pdev);
125 	aux_dev->dev.parent = &pdev->dev;
126 	aux_dev->dev.release = xe_nvm_release_dev;
127 
128 	ret = auxiliary_device_init(aux_dev);
129 	if (ret) {
130 		drm_err(&xe->drm, "xe-nvm aux init failed %d\n", ret);
131 		goto err;
132 	}
133 
134 	ret = auxiliary_device_add(aux_dev);
135 	if (ret) {
136 		drm_err(&xe->drm, "xe-nvm aux add failed %d\n", ret);
137 		auxiliary_device_uninit(aux_dev);
138 		goto err;
139 	}
140 	return 0;
141 
142 err:
143 	kfree(nvm);
144 	xe->nvm = NULL;
145 	return ret;
146 }
147 
xe_nvm_fini(struct xe_device * xe)148 void xe_nvm_fini(struct xe_device *xe)
149 {
150 	struct intel_dg_nvm_dev *nvm = xe->nvm;
151 
152 	if (!xe->info.has_gsc_nvm)
153 		return;
154 
155 	/* No access to internal NVM from VFs */
156 	if (IS_SRIOV_VF(xe))
157 		return;
158 
159 	/* Nvm pointer should not be NULL here */
160 	if (WARN_ON(!nvm))
161 		return;
162 
163 	auxiliary_device_delete(&nvm->aux_dev);
164 	auxiliary_device_uninit(&nvm->aux_dev);
165 	kfree(nvm);
166 	xe->nvm = NULL;
167 }
168