1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */ 3 #include <linux/platform_device.h> 4 #include <linux/module.h> 5 #include <linux/device.h> 6 #include <linux/kernel.h> 7 #include <linux/acpi.h> 8 #include <linux/pci.h> 9 #include "cxlpci.h" 10 #include "cxl.h" 11 12 /* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */ 13 #define CFMWS_INTERLEAVE_WAYS(x) (1 << (x)->interleave_ways) 14 #define CFMWS_INTERLEAVE_GRANULARITY(x) ((x)->granularity + 8) 15 16 static unsigned long cfmws_to_decoder_flags(int restrictions) 17 { 18 unsigned long flags = CXL_DECODER_F_ENABLE; 19 20 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2) 21 flags |= CXL_DECODER_F_TYPE2; 22 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3) 23 flags |= CXL_DECODER_F_TYPE3; 24 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE) 25 flags |= CXL_DECODER_F_RAM; 26 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM) 27 flags |= CXL_DECODER_F_PMEM; 28 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED) 29 flags |= CXL_DECODER_F_LOCK; 30 31 return flags; 32 } 33 34 static int cxl_acpi_cfmws_verify(struct device *dev, 35 struct acpi_cedt_cfmws *cfmws) 36 { 37 int expected_len; 38 39 if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) { 40 dev_err(dev, "CFMWS Unsupported Interleave Arithmetic\n"); 41 return -EINVAL; 42 } 43 44 if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) { 45 dev_err(dev, "CFMWS Base HPA not 256MB aligned\n"); 46 return -EINVAL; 47 } 48 49 if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) { 50 dev_err(dev, "CFMWS Window Size not 256MB aligned\n"); 51 return -EINVAL; 52 } 53 54 if (CFMWS_INTERLEAVE_WAYS(cfmws) > CXL_DECODER_MAX_INTERLEAVE) { 55 dev_err(dev, "CFMWS Interleave Ways (%d) too large\n", 56 CFMWS_INTERLEAVE_WAYS(cfmws)); 57 return -EINVAL; 58 } 59 60 expected_len = struct_size((cfmws), interleave_targets, 61 CFMWS_INTERLEAVE_WAYS(cfmws)); 62 63 if (cfmws->header.length < expected_len) { 64 dev_err(dev, "CFMWS length %d less than expected %d\n", 65 cfmws->header.length, expected_len); 66 return -EINVAL; 67 } 68 69 if (cfmws->header.length > expected_len) 70 dev_dbg(dev, "CFMWS length %d greater than expected %d\n", 71 cfmws->header.length, expected_len); 72 73 return 0; 74 } 75 76 struct cxl_cfmws_context { 77 struct device *dev; 78 struct cxl_port *root_port; 79 }; 80 81 static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, 82 const unsigned long end) 83 { 84 int target_map[CXL_DECODER_MAX_INTERLEAVE]; 85 struct cxl_cfmws_context *ctx = arg; 86 struct cxl_port *root_port = ctx->root_port; 87 struct device *dev = ctx->dev; 88 struct acpi_cedt_cfmws *cfmws; 89 struct cxl_decoder *cxld; 90 int rc, i; 91 92 cfmws = (struct acpi_cedt_cfmws *) header; 93 94 rc = cxl_acpi_cfmws_verify(dev, cfmws); 95 if (rc) { 96 dev_err(dev, "CFMWS range %#llx-%#llx not registered\n", 97 cfmws->base_hpa, 98 cfmws->base_hpa + cfmws->window_size - 1); 99 return 0; 100 } 101 102 for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++) 103 target_map[i] = cfmws->interleave_targets[i]; 104 105 cxld = cxl_root_decoder_alloc(root_port, CFMWS_INTERLEAVE_WAYS(cfmws)); 106 if (IS_ERR(cxld)) 107 return 0; 108 109 cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions); 110 cxld->target_type = CXL_DECODER_EXPANDER; 111 cxld->platform_res = (struct resource)DEFINE_RES_MEM(cfmws->base_hpa, 112 cfmws->window_size); 113 cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws); 114 cxld->interleave_granularity = CFMWS_INTERLEAVE_GRANULARITY(cfmws); 115 116 rc = cxl_decoder_add(cxld, target_map); 117 if (rc) 118 put_device(&cxld->dev); 119 else 120 rc = cxl_decoder_autoremove(dev, cxld); 121 if (rc) { 122 dev_err(dev, "Failed to add decoder for %pr\n", 123 &cxld->platform_res); 124 return 0; 125 } 126 dev_dbg(dev, "add: %s node: %d range %pr\n", dev_name(&cxld->dev), 127 phys_to_target_node(cxld->platform_res.start), 128 &cxld->platform_res); 129 130 return 0; 131 } 132 133 __mock struct acpi_device *to_cxl_host_bridge(struct device *host, 134 struct device *dev) 135 { 136 struct acpi_device *adev = to_acpi_device(dev); 137 138 if (!acpi_pci_find_root(adev->handle)) 139 return NULL; 140 141 if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0) 142 return adev; 143 return NULL; 144 } 145 146 /* 147 * A host bridge is a dport to a CFMWS decode and it is a uport to the 148 * dport (PCIe Root Ports) in the host bridge. 149 */ 150 static int add_host_bridge_uport(struct device *match, void *arg) 151 { 152 struct cxl_port *root_port = arg; 153 struct device *host = root_port->dev.parent; 154 struct acpi_device *bridge = to_cxl_host_bridge(host, match); 155 struct acpi_pci_root *pci_root; 156 struct cxl_dport *dport; 157 struct cxl_port *port; 158 int rc; 159 160 if (!bridge) 161 return 0; 162 163 dport = cxl_find_dport_by_dev(root_port, match); 164 if (!dport) { 165 dev_dbg(host, "host bridge expected and not found\n"); 166 return 0; 167 } 168 169 /* 170 * Note that this lookup already succeeded in 171 * to_cxl_host_bridge(), so no need to check for failure here 172 */ 173 pci_root = acpi_pci_find_root(bridge->handle); 174 rc = devm_cxl_register_pci_bus(host, match, pci_root->bus); 175 if (rc) 176 return rc; 177 178 port = devm_cxl_add_port(host, match, dport->component_reg_phys, 179 root_port); 180 if (IS_ERR(port)) 181 return PTR_ERR(port); 182 dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev)); 183 184 return 0; 185 } 186 187 struct cxl_chbs_context { 188 struct device *dev; 189 unsigned long long uid; 190 resource_size_t chbcr; 191 }; 192 193 static int cxl_get_chbcr(union acpi_subtable_headers *header, void *arg, 194 const unsigned long end) 195 { 196 struct cxl_chbs_context *ctx = arg; 197 struct acpi_cedt_chbs *chbs; 198 199 if (ctx->chbcr) 200 return 0; 201 202 chbs = (struct acpi_cedt_chbs *) header; 203 204 if (ctx->uid != chbs->uid) 205 return 0; 206 ctx->chbcr = chbs->base; 207 208 return 0; 209 } 210 211 static int add_host_bridge_dport(struct device *match, void *arg) 212 { 213 acpi_status status; 214 unsigned long long uid; 215 struct cxl_dport *dport; 216 struct cxl_chbs_context ctx; 217 struct cxl_port *root_port = arg; 218 struct device *host = root_port->dev.parent; 219 struct acpi_device *bridge = to_cxl_host_bridge(host, match); 220 221 if (!bridge) 222 return 0; 223 224 status = acpi_evaluate_integer(bridge->handle, METHOD_NAME__UID, NULL, 225 &uid); 226 if (status != AE_OK) { 227 dev_err(host, "unable to retrieve _UID of %s\n", 228 dev_name(match)); 229 return -ENODEV; 230 } 231 232 ctx = (struct cxl_chbs_context) { 233 .dev = host, 234 .uid = uid, 235 }; 236 acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbcr, &ctx); 237 238 if (ctx.chbcr == 0) { 239 dev_warn(host, "No CHBS found for Host Bridge: %s\n", 240 dev_name(match)); 241 return 0; 242 } 243 244 dport = devm_cxl_add_dport(root_port, match, uid, ctx.chbcr); 245 if (IS_ERR(dport)) { 246 dev_err(host, "failed to add downstream port: %s\n", 247 dev_name(match)); 248 return PTR_ERR(dport); 249 } 250 dev_dbg(host, "add dport%llu: %s\n", uid, dev_name(match)); 251 return 0; 252 } 253 254 static int add_root_nvdimm_bridge(struct device *match, void *data) 255 { 256 struct cxl_decoder *cxld; 257 struct cxl_port *root_port = data; 258 struct cxl_nvdimm_bridge *cxl_nvb; 259 struct device *host = root_port->dev.parent; 260 261 if (!is_root_decoder(match)) 262 return 0; 263 264 cxld = to_cxl_decoder(match); 265 if (!(cxld->flags & CXL_DECODER_F_PMEM)) 266 return 0; 267 268 cxl_nvb = devm_cxl_add_nvdimm_bridge(host, root_port); 269 if (IS_ERR(cxl_nvb)) { 270 dev_dbg(host, "failed to register pmem\n"); 271 return PTR_ERR(cxl_nvb); 272 } 273 dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev), 274 dev_name(&cxl_nvb->dev)); 275 return 1; 276 } 277 278 static int cxl_acpi_probe(struct platform_device *pdev) 279 { 280 int rc; 281 struct cxl_port *root_port; 282 struct device *host = &pdev->dev; 283 struct acpi_device *adev = ACPI_COMPANION(host); 284 struct cxl_cfmws_context ctx; 285 286 root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL); 287 if (IS_ERR(root_port)) 288 return PTR_ERR(root_port); 289 dev_dbg(host, "add: %s\n", dev_name(&root_port->dev)); 290 291 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port, 292 add_host_bridge_dport); 293 if (rc < 0) 294 return rc; 295 296 ctx = (struct cxl_cfmws_context) { 297 .dev = host, 298 .root_port = root_port, 299 }; 300 acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx); 301 302 /* 303 * Root level scanned with host-bridge as dports, now scan host-bridges 304 * for their role as CXL uports to their CXL-capable PCIe Root Ports. 305 */ 306 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port, 307 add_host_bridge_uport); 308 if (rc < 0) 309 return rc; 310 311 if (IS_ENABLED(CONFIG_CXL_PMEM)) 312 rc = device_for_each_child(&root_port->dev, root_port, 313 add_root_nvdimm_bridge); 314 if (rc < 0) 315 return rc; 316 317 /* In case PCI is scanned before ACPI re-trigger memdev attach */ 318 return cxl_bus_rescan(); 319 } 320 321 static const struct acpi_device_id cxl_acpi_ids[] = { 322 { "ACPI0017" }, 323 { }, 324 }; 325 MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids); 326 327 static struct platform_driver cxl_acpi_driver = { 328 .probe = cxl_acpi_probe, 329 .driver = { 330 .name = KBUILD_MODNAME, 331 .acpi_match_table = cxl_acpi_ids, 332 }, 333 }; 334 335 module_platform_driver(cxl_acpi_driver); 336 MODULE_LICENSE("GPL v2"); 337 MODULE_IMPORT_NS(CXL); 338 MODULE_IMPORT_NS(ACPI); 339