xref: /linux/drivers/cxl/core/atl.c (revision e812928be2ee1c2744adf20ed04e0ce1e2fc5c13)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2025 Advanced Micro Devices, Inc.
4  */
5 
6 #include <linux/prmt.h>
7 #include <linux/pci.h>
8 #include <linux/acpi.h>
9 
10 #include <cxlmem.h>
11 #include "core.h"
12 
13 /*
14  * PRM Address Translation - CXL DPA to System Physical Address
15  *
16  * Reference:
17  *
18  * AMD Family 1Ah Models 00h–0Fh and Models 10h–1Fh
19  * ACPI v6.5 Porting Guide, Publication # 58088
20  */
21 
22 static const guid_t prm_cxl_dpa_spa_guid =
23 	GUID_INIT(0xee41b397, 0x25d4, 0x452c, 0xad, 0x54, 0x48, 0xc6, 0xe3,
24 		  0x48, 0x0b, 0x94);
25 
26 struct prm_cxl_dpa_spa_data {
27 	u64 dpa;
28 	u8 reserved;
29 	u8 devfn;
30 	u8 bus;
31 	u8 segment;
32 	u64 *spa;
33 } __packed;
34 
35 static u64 prm_cxl_dpa_spa(struct pci_dev *pci_dev, u64 dpa)
36 {
37 	struct prm_cxl_dpa_spa_data data;
38 	u64 spa;
39 	int rc;
40 
41 	data = (struct prm_cxl_dpa_spa_data) {
42 		.dpa     = dpa,
43 		.devfn   = pci_dev->devfn,
44 		.bus     = pci_dev->bus->number,
45 		.segment = pci_domain_nr(pci_dev->bus),
46 		.spa     = &spa,
47 	};
48 
49 	rc = acpi_call_prm_handler(prm_cxl_dpa_spa_guid, &data);
50 	if (rc) {
51 		pci_dbg(pci_dev, "failed to get SPA for %#llx: %d\n", dpa, rc);
52 		return ULLONG_MAX;
53 	}
54 
55 	pci_dbg(pci_dev, "PRM address translation: DPA -> SPA: %#llx -> %#llx\n", dpa, spa);
56 
57 	return spa;
58 }
59 
60 static int cxl_prm_setup_root(struct cxl_root *cxl_root, void *data)
61 {
62 	struct cxl_region_context *ctx = data;
63 	struct cxl_endpoint_decoder *cxled = ctx->cxled;
64 	struct cxl_decoder *cxld = &cxled->cxld;
65 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
66 	struct range hpa_range = ctx->hpa_range;
67 	struct pci_dev *pci_dev;
68 	u64 spa_len, len;
69 	u64 addr, base_spa, base;
70 	int ways, gran;
71 
72 	/*
73 	 * When Normalized Addressing is enabled, the endpoint maintains a 1:1
74 	 * mapping between HPA and DPA. If disabled, skip address translation
75 	 * and perform only a range check.
76 	 */
77 	if (hpa_range.start != cxled->dpa_res->start)
78 		return 0;
79 
80 	/*
81 	 * Endpoints are programmed passthrough in Normalized Addressing mode.
82 	 */
83 	if (ctx->interleave_ways != 1) {
84 		dev_dbg(&cxld->dev, "unexpected interleaving config: ways: %d granularity: %d\n",
85 			ctx->interleave_ways, ctx->interleave_granularity);
86 		return -ENXIO;
87 	}
88 
89 	if (!cxlmd || !dev_is_pci(cxlmd->dev.parent)) {
90 		dev_dbg(&cxld->dev, "No endpoint found: %s, range %#llx-%#llx\n",
91 			dev_name(cxld->dev.parent), hpa_range.start,
92 			hpa_range.end);
93 		return -ENXIO;
94 	}
95 
96 	pci_dev = to_pci_dev(cxlmd->dev.parent);
97 
98 	/* Translate HPA range to SPA. */
99 	base = hpa_range.start;
100 	hpa_range.start = prm_cxl_dpa_spa(pci_dev, hpa_range.start);
101 	hpa_range.end = prm_cxl_dpa_spa(pci_dev, hpa_range.end);
102 	base_spa = hpa_range.start;
103 
104 	if (hpa_range.start == ULLONG_MAX || hpa_range.end == ULLONG_MAX) {
105 		dev_dbg(cxld->dev.parent,
106 			"CXL address translation: Failed to translate HPA range: %#llx-%#llx:%#llx-%#llx(%s)\n",
107 			hpa_range.start, hpa_range.end, ctx->hpa_range.start,
108 			ctx->hpa_range.end, dev_name(&cxld->dev));
109 		return -ENXIO;
110 	}
111 
112 	/*
113 	 * Since translated addresses include the interleaving offsets, align
114 	 * the range to 256 MB.
115 	 */
116 	hpa_range.start = ALIGN_DOWN(hpa_range.start, SZ_256M);
117 	hpa_range.end = ALIGN(hpa_range.end, SZ_256M) - 1;
118 
119 	len = range_len(&ctx->hpa_range);
120 	spa_len = range_len(&hpa_range);
121 	if (!len || !spa_len || spa_len % len) {
122 		dev_dbg(cxld->dev.parent,
123 			"CXL address translation: HPA range not contiguous: %#llx-%#llx:%#llx-%#llx(%s)\n",
124 			hpa_range.start, hpa_range.end, ctx->hpa_range.start,
125 			ctx->hpa_range.end, dev_name(&cxld->dev));
126 		return -ENXIO;
127 	}
128 
129 	ways = spa_len / len;
130 	gran = SZ_256;
131 
132 	/*
133 	 * Determine interleave granularity
134 	 *
135 	 * Note: The position of the chunk from one interleaving block to the
136 	 * next may vary and thus cannot be considered constant. Address offsets
137 	 * larger than the interleaving block size cannot be used to calculate
138 	 * the granularity.
139 	 */
140 	if (ways > 1) {
141 		while (gran <= SZ_16M) {
142 			addr = prm_cxl_dpa_spa(pci_dev, base + gran);
143 			if (addr != base_spa + gran)
144 				break;
145 			gran <<= 1;
146 		}
147 	}
148 
149 	if (gran > SZ_16M) {
150 		dev_dbg(cxld->dev.parent,
151 			"CXL address translation: Cannot determine granularity: %#llx-%#llx:%#llx-%#llx(%s)\n",
152 			hpa_range.start, hpa_range.end, ctx->hpa_range.start,
153 			ctx->hpa_range.end, dev_name(&cxld->dev));
154 		return -ENXIO;
155 	}
156 
157 	/*
158 	 * The current kernel implementation does not support endpoint
159 	 * setup with Normalized Addressing. It only translates an
160 	 * endpoint's DPA to the SPA range of the host bridge.
161 	 * Therefore, the endpoint address range cannot be determined,
162 	 * making a non-auto setup impossible. If a decoder requires
163 	 * address translation, reprogramming should be disabled and
164 	 * the decoder locked.
165 	 *
166 	 * The BIOS, however, provides all the necessary address
167 	 * translation data, which the kernel can use to reconfigure
168 	 * endpoint decoders with normalized addresses. Locking the
169 	 * decoders in the BIOS would prevent a capable kernel (or
170 	 * other operating systems) from shutting down auto-generated
171 	 * regions and managing resources dynamically.
172 	 *
173 	 * Indicate that Normalized Addressing is enabled.
174 	 */
175 	cxld->flags |= CXL_DECODER_F_LOCK;
176 	cxld->flags |= CXL_DECODER_F_NORMALIZED_ADDRESSING;
177 
178 	ctx->hpa_range = hpa_range;
179 	ctx->interleave_ways = ways;
180 	ctx->interleave_granularity = gran;
181 
182 	dev_dbg(&cxld->dev,
183 		"address mapping found for %s (hpa -> spa): %#llx+%#llx -> %#llx+%#llx ways:%d granularity:%d\n",
184 		dev_name(cxlmd->dev.parent), base, len, hpa_range.start,
185 		spa_len, ways, gran);
186 
187 	return 0;
188 }
189 
190 void cxl_setup_prm_address_translation(struct cxl_root *cxl_root)
191 {
192 	struct device *host = cxl_root->port.uport_dev;
193 	u64 spa;
194 	struct prm_cxl_dpa_spa_data data = { .spa = &spa };
195 	int rc;
196 
197 	/*
198 	 * Applies only to PCIe Host Bridges which are children of the CXL Root
199 	 * Device (HID=“ACPI0017”). Check this and drop cxl_test instances.
200 	 */
201 	if (!acpi_match_device(host->driver->acpi_match_table, host))
202 		return;
203 
204 	/* Check kernel (-EOPNOTSUPP) and firmware support (-ENODEV) */
205 	rc = acpi_call_prm_handler(prm_cxl_dpa_spa_guid, &data);
206 	if (rc == -EOPNOTSUPP || rc == -ENODEV)
207 		return;
208 
209 	cxl_root->ops.translation_setup_root = cxl_prm_setup_root;
210 }
211 EXPORT_SYMBOL_NS_GPL(cxl_setup_prm_address_translation, "CXL");
212