1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/platform_device.h>
4 #include <linux/module.h>
5 #include <linux/device.h>
6 #include <linux/kernel.h>
7 #include <linux/acpi.h>
8 #include <linux/pci.h>
9 #include <linux/node.h>
10 #include <asm/div64.h>
11 #include "cxlpci.h"
12 #include "cxl.h"
13
14 static const guid_t acpi_cxl_qtg_id_guid =
15 GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071,
16 0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52);
17
18 #define HBIW_TO_NR_MAPS_SIZE (CXL_DECODER_MAX_INTERLEAVE + 1)
19 static const int hbiw_to_nr_maps[HBIW_TO_NR_MAPS_SIZE] = {
20 [1] = 0, [2] = 1, [3] = 0, [4] = 2, [6] = 1, [8] = 3, [12] = 2, [16] = 4
21 };
22
23 static const int valid_hbiw[] = { 1, 2, 3, 4, 6, 8, 12, 16 };
24
cxl_do_xormap_calc(struct cxl_cxims_data * cximsd,u64 addr,int hbiw)25 u64 cxl_do_xormap_calc(struct cxl_cxims_data *cximsd, u64 addr, int hbiw)
26 {
27 int nr_maps_to_apply = -1;
28 u64 val;
29 int pos;
30
31 /*
32 * Strictly validate hbiw since this function is used for testing and
33 * that nullifies any expectation of trusted parameters from the CXL
34 * Region Driver.
35 */
36 for (int i = 0; i < ARRAY_SIZE(valid_hbiw); i++) {
37 if (valid_hbiw[i] == hbiw) {
38 nr_maps_to_apply = hbiw_to_nr_maps[hbiw];
39 break;
40 }
41 }
42 if (nr_maps_to_apply == -1 || nr_maps_to_apply > cximsd->nr_maps)
43 return ULLONG_MAX;
44
45 /*
46 * In regions using XOR interleave arithmetic the CXL HPA may not
47 * be the same as the SPA. This helper performs the SPA->CXL HPA
48 * or the CXL HPA->SPA translation. Since XOR is self-inverting,
49 * so is this function.
50 *
51 * For root decoders using xormaps (hbiw: 2,4,6,8,12,16) applying the
52 * xormaps will toggle a position bit.
53 *
54 * pos is the lowest set bit in an XORMAP
55 * val is the XORALLBITS(addr & XORMAP)
56 *
57 * XORALLBITS: The CXL spec (3.1 Table 9-22) defines XORALLBITS
58 * as an operation that outputs a single bit by XORing all the
59 * bits in the input (addr & xormap). Implement XORALLBITS using
60 * hweight64(). If the hamming weight is even the XOR of those
61 * bits results in val==0, if odd the XOR result is val==1.
62 */
63
64 for (int i = 0; i < cximsd->nr_maps; i++) {
65 if (!cximsd->xormaps[i])
66 continue;
67 pos = __ffs(cximsd->xormaps[i]);
68 val = (hweight64(addr & cximsd->xormaps[i]) & 1);
69 addr = (addr & ~(1ULL << pos)) | (val << pos);
70 }
71
72 return addr;
73 }
74 EXPORT_SYMBOL_FOR_MODULES(cxl_do_xormap_calc, "cxl_translate");
75
cxl_apply_xor_maps(struct cxl_root_decoder * cxlrd,u64 addr)76 static u64 cxl_apply_xor_maps(struct cxl_root_decoder *cxlrd, u64 addr)
77 {
78 int hbiw = cxlrd->cxlsd.nr_targets;
79 struct cxl_cxims_data *cximsd;
80
81 /* No xormaps for host bridge interleave ways of 1 or 3 */
82 if (hbiw == 1 || hbiw == 3)
83 return addr;
84
85 cximsd = cxlrd->platform_data;
86
87 return cxl_do_xormap_calc(cximsd, addr, hbiw);
88 }
89
90 struct cxl_cxims_context {
91 struct device *dev;
92 struct cxl_root_decoder *cxlrd;
93 };
94
cxl_parse_cxims(union acpi_subtable_headers * header,void * arg,const unsigned long end)95 static int cxl_parse_cxims(union acpi_subtable_headers *header, void *arg,
96 const unsigned long end)
97 {
98 struct acpi_cedt_cxims *cxims = (struct acpi_cedt_cxims *)header;
99 struct cxl_cxims_context *ctx = arg;
100 struct cxl_root_decoder *cxlrd = ctx->cxlrd;
101 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
102 struct device *dev = ctx->dev;
103 struct cxl_cxims_data *cximsd;
104 unsigned int hbig, nr_maps;
105 int rc;
106
107 rc = eig_to_granularity(cxims->hbig, &hbig);
108 if (rc)
109 return rc;
110
111 /* Does this CXIMS entry apply to the given CXL Window? */
112 if (hbig != cxld->interleave_granularity)
113 return 0;
114
115 /* IW 1,3 do not use xormaps and skip this parsing entirely */
116 if (is_power_of_2(cxld->interleave_ways))
117 /* 2, 4, 8, 16 way */
118 nr_maps = ilog2(cxld->interleave_ways);
119 else
120 /* 6, 12 way */
121 nr_maps = ilog2(cxld->interleave_ways / 3);
122
123 if (cxims->nr_xormaps < nr_maps) {
124 dev_dbg(dev, "CXIMS nr_xormaps[%d] expected[%d]\n",
125 cxims->nr_xormaps, nr_maps);
126 return -ENXIO;
127 }
128
129 cximsd = devm_kzalloc(dev, struct_size(cximsd, xormaps, nr_maps),
130 GFP_KERNEL);
131 if (!cximsd)
132 return -ENOMEM;
133 cximsd->nr_maps = nr_maps;
134 memcpy(cximsd->xormaps, cxims->xormap_list,
135 nr_maps * sizeof(*cximsd->xormaps));
136 cxlrd->platform_data = cximsd;
137
138 return 0;
139 }
140
cfmws_to_decoder_flags(int restrictions)141 static unsigned long cfmws_to_decoder_flags(int restrictions)
142 {
143 unsigned long flags = CXL_DECODER_F_ENABLE;
144
145 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_DEVMEM)
146 flags |= CXL_DECODER_F_TYPE2;
147 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM)
148 flags |= CXL_DECODER_F_TYPE3;
149 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE)
150 flags |= CXL_DECODER_F_RAM;
151 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM)
152 flags |= CXL_DECODER_F_PMEM;
153 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED)
154 flags |= CXL_DECODER_F_LOCK;
155
156 return flags;
157 }
158
cxl_acpi_cfmws_verify(struct device * dev,struct acpi_cedt_cfmws * cfmws)159 static int cxl_acpi_cfmws_verify(struct device *dev,
160 struct acpi_cedt_cfmws *cfmws)
161 {
162 int rc, expected_len;
163 unsigned int ways;
164
165 if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO &&
166 cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
167 dev_err(dev, "CFMWS Unknown Interleave Arithmetic: %d\n",
168 cfmws->interleave_arithmetic);
169 return -EINVAL;
170 }
171
172 if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) {
173 dev_err(dev, "CFMWS Base HPA not 256MB aligned\n");
174 return -EINVAL;
175 }
176
177 if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) {
178 dev_err(dev, "CFMWS Window Size not 256MB aligned\n");
179 return -EINVAL;
180 }
181
182 rc = eiw_to_ways(cfmws->interleave_ways, &ways);
183 if (rc) {
184 dev_err(dev, "CFMWS Interleave Ways (%d) invalid\n",
185 cfmws->interleave_ways);
186 return -EINVAL;
187 }
188
189 expected_len = struct_size(cfmws, interleave_targets, ways);
190
191 if (cfmws->header.length < expected_len) {
192 dev_err(dev, "CFMWS length %d less than expected %d\n",
193 cfmws->header.length, expected_len);
194 return -EINVAL;
195 }
196
197 if (cfmws->header.length > expected_len)
198 dev_dbg(dev, "CFMWS length %d greater than expected %d\n",
199 cfmws->header.length, expected_len);
200
201 return 0;
202 }
203
204 /*
205 * Note, @dev must be the first member, see 'struct cxl_chbs_context'
206 * and mock_acpi_table_parse_cedt()
207 */
208 struct cxl_cfmws_context {
209 struct device *dev;
210 struct cxl_port *root_port;
211 struct resource *cxl_res;
212 int id;
213 };
214
215 /**
216 * cxl_acpi_evaluate_qtg_dsm - Retrieve QTG ids via ACPI _DSM
217 * @handle: ACPI handle
218 * @coord: performance access coordinates
219 * @entries: number of QTG IDs to return
220 * @qos_class: int array provided by caller to return QTG IDs
221 *
222 * Return: number of QTG IDs returned, or -errno for errors
223 *
224 * Issue QTG _DSM with accompanied bandwidth and latency data in order to get
225 * the QTG IDs that are suitable for the performance point in order of most
226 * suitable to least suitable. Write back array of QTG IDs and return the
227 * actual number of QTG IDs written back.
228 */
229 static int
cxl_acpi_evaluate_qtg_dsm(acpi_handle handle,struct access_coordinate * coord,int entries,int * qos_class)230 cxl_acpi_evaluate_qtg_dsm(acpi_handle handle, struct access_coordinate *coord,
231 int entries, int *qos_class)
232 {
233 union acpi_object *out_obj, *out_buf, *obj;
234 union acpi_object in_array[4] = {
235 [0].integer = { ACPI_TYPE_INTEGER, coord->read_latency },
236 [1].integer = { ACPI_TYPE_INTEGER, coord->write_latency },
237 [2].integer = { ACPI_TYPE_INTEGER, coord->read_bandwidth },
238 [3].integer = { ACPI_TYPE_INTEGER, coord->write_bandwidth },
239 };
240 union acpi_object in_obj = {
241 .package = {
242 .type = ACPI_TYPE_PACKAGE,
243 .count = 4,
244 .elements = in_array,
245 },
246 };
247 int count, pkg_entries, i;
248 u16 max_qtg;
249 int rc;
250
251 if (!entries)
252 return -EINVAL;
253
254 out_obj = acpi_evaluate_dsm(handle, &acpi_cxl_qtg_id_guid, 1, 1, &in_obj);
255 if (!out_obj)
256 return -ENXIO;
257
258 if (out_obj->type != ACPI_TYPE_PACKAGE) {
259 rc = -ENXIO;
260 goto out;
261 }
262
263 /* Check Max QTG ID */
264 obj = &out_obj->package.elements[0];
265 if (obj->type != ACPI_TYPE_INTEGER) {
266 rc = -ENXIO;
267 goto out;
268 }
269
270 max_qtg = obj->integer.value;
271
272 /* It's legal to have 0 QTG entries */
273 pkg_entries = out_obj->package.count;
274 if (pkg_entries <= 1) {
275 rc = 0;
276 goto out;
277 }
278
279 /* Retrieve QTG IDs package */
280 obj = &out_obj->package.elements[1];
281 if (obj->type != ACPI_TYPE_PACKAGE) {
282 rc = -ENXIO;
283 goto out;
284 }
285
286 pkg_entries = obj->package.count;
287 count = min(entries, pkg_entries);
288 for (i = 0; i < count; i++) {
289 u16 qtg_id;
290
291 out_buf = &obj->package.elements[i];
292 if (out_buf->type != ACPI_TYPE_INTEGER) {
293 rc = -ENXIO;
294 goto out;
295 }
296
297 qtg_id = out_buf->integer.value;
298 if (qtg_id > max_qtg)
299 pr_warn("QTG ID %u greater than MAX %u\n",
300 qtg_id, max_qtg);
301
302 qos_class[i] = qtg_id;
303 }
304 rc = count;
305
306 out:
307 ACPI_FREE(out_obj);
308 return rc;
309 }
310
cxl_acpi_qos_class(struct cxl_root * cxl_root,struct access_coordinate * coord,int entries,int * qos_class)311 static int cxl_acpi_qos_class(struct cxl_root *cxl_root,
312 struct access_coordinate *coord, int entries,
313 int *qos_class)
314 {
315 struct device *dev = cxl_root->port.uport_dev;
316 acpi_handle handle;
317
318 if (!dev_is_platform(dev))
319 return -ENODEV;
320
321 handle = ACPI_HANDLE(dev);
322 if (!handle)
323 return -ENODEV;
324
325 return cxl_acpi_evaluate_qtg_dsm(handle, coord, entries, qos_class);
326 }
327
328 static const struct cxl_root_ops acpi_root_ops = {
329 .qos_class = cxl_acpi_qos_class,
330 };
331
del_cxl_resource(struct resource * res)332 static void del_cxl_resource(struct resource *res)
333 {
334 if (!res)
335 return;
336 kfree(res->name);
337 kfree(res);
338 }
339
alloc_cxl_resource(resource_size_t base,resource_size_t n,int id)340 static struct resource *alloc_cxl_resource(resource_size_t base,
341 resource_size_t n, int id)
342 {
343 struct resource *res __free(kfree) = kzalloc(sizeof(*res), GFP_KERNEL);
344
345 if (!res)
346 return NULL;
347
348 res->start = base;
349 res->end = base + n - 1;
350 res->flags = IORESOURCE_MEM;
351 res->name = kasprintf(GFP_KERNEL, "CXL Window %d", id);
352 if (!res->name)
353 return NULL;
354
355 return no_free_ptr(res);
356 }
357
add_or_reset_cxl_resource(struct resource * parent,struct resource * res)358 static int add_or_reset_cxl_resource(struct resource *parent, struct resource *res)
359 {
360 int rc = insert_resource(parent, res);
361
362 if (rc)
363 del_cxl_resource(res);
364 return rc;
365 }
366
cxl_acpi_set_cache_size(struct cxl_root_decoder * cxlrd)367 static int cxl_acpi_set_cache_size(struct cxl_root_decoder *cxlrd)
368 {
369 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
370 struct range *hpa = &cxld->hpa_range;
371 resource_size_t size = range_len(hpa);
372 resource_size_t start = hpa->start;
373 resource_size_t cache_size;
374 struct resource res;
375 int nid, rc;
376
377 res = DEFINE_RES_MEM(start, size);
378 nid = phys_to_target_node(start);
379
380 rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size);
381 if (rc)
382 return 0;
383
384 /*
385 * The cache range is expected to be within the CFMWS.
386 * Currently there is only support cache_size == cxl_size. CXL
387 * size is then half of the total CFMWS window size.
388 */
389 size = size >> 1;
390 if (cache_size && size != cache_size) {
391 dev_warn(&cxld->dev,
392 "Extended Linear Cache size %pa != CXL size %pa. No Support!",
393 &cache_size, &size);
394 return -ENXIO;
395 }
396
397 cxlrd->cache_size = cache_size;
398
399 return 0;
400 }
401
cxl_setup_extended_linear_cache(struct cxl_root_decoder * cxlrd)402 static void cxl_setup_extended_linear_cache(struct cxl_root_decoder *cxlrd)
403 {
404 int rc;
405
406 rc = cxl_acpi_set_cache_size(cxlrd);
407 if (rc) {
408 /*
409 * Failing to retrieve extended linear cache region resize does not
410 * prevent the region from functioning. Only causes cxl list showing
411 * incorrect region size.
412 */
413 dev_warn(cxlrd->cxlsd.cxld.dev.parent,
414 "Extended linear cache retrieval failed rc:%d\n", rc);
415
416 /* Ignoring return code */
417 cxlrd->cache_size = 0;
418 }
419 }
420
421 DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *,
422 if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
DEFINE_FREE(del_cxl_resource,struct resource *,if (_T)del_cxl_resource (_T))423 DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T))
424 static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
425 struct cxl_cfmws_context *ctx)
426 {
427 struct cxl_port *root_port = ctx->root_port;
428 struct cxl_cxims_context cxims_ctx;
429 struct device *dev = ctx->dev;
430 struct cxl_decoder *cxld;
431 unsigned int ways, i, ig;
432 int rc;
433
434 rc = cxl_acpi_cfmws_verify(dev, cfmws);
435 if (rc)
436 return rc;
437
438 rc = eiw_to_ways(cfmws->interleave_ways, &ways);
439 if (rc)
440 return rc;
441 rc = eig_to_granularity(cfmws->granularity, &ig);
442 if (rc)
443 return rc;
444
445 struct resource *res __free(del_cxl_resource) = alloc_cxl_resource(
446 cfmws->base_hpa, cfmws->window_size, ctx->id++);
447 if (!res)
448 return -ENOMEM;
449
450 /* add to the local resource tracking to establish a sort order */
451 rc = add_or_reset_cxl_resource(ctx->cxl_res, no_free_ptr(res));
452 if (rc)
453 return rc;
454
455 struct cxl_root_decoder *cxlrd __free(put_cxlrd) =
456 cxl_root_decoder_alloc(root_port, ways);
457
458 if (IS_ERR(cxlrd))
459 return PTR_ERR(cxlrd);
460
461 cxld = &cxlrd->cxlsd.cxld;
462 cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
463 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
464 cxld->hpa_range = (struct range) {
465 .start = cfmws->base_hpa,
466 .end = cfmws->base_hpa + cfmws->window_size - 1,
467 };
468 cxld->interleave_ways = ways;
469 for (i = 0; i < ways; i++)
470 cxld->target_map[i] = cfmws->interleave_targets[i];
471 /*
472 * Minimize the x1 granularity to advertise support for any
473 * valid region granularity
474 */
475 if (ways == 1)
476 ig = CXL_DECODER_MIN_GRANULARITY;
477 cxld->interleave_granularity = ig;
478
479 if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
480 if (ways != 1 && ways != 3) {
481 cxims_ctx = (struct cxl_cxims_context) {
482 .dev = dev,
483 .cxlrd = cxlrd,
484 };
485 rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CXIMS,
486 cxl_parse_cxims, &cxims_ctx);
487 if (rc < 0)
488 return rc;
489 if (!cxlrd->platform_data) {
490 dev_err(dev, "No CXIMS for HBIG %u\n", ig);
491 return -EINVAL;
492 }
493 }
494 cxlrd->ops.hpa_to_spa = cxl_apply_xor_maps;
495 cxlrd->ops.spa_to_hpa = cxl_apply_xor_maps;
496 }
497
498 cxl_setup_extended_linear_cache(cxlrd);
499
500 cxlrd->qos_class = cfmws->qtg_id;
501
502 rc = cxl_decoder_add(cxld);
503 if (rc)
504 return rc;
505
506 rc = cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
507 if (rc)
508 return rc;
509
510 dev_dbg(root_port->dev.parent, "%s added to %s\n",
511 dev_name(&cxld->dev), dev_name(&root_port->dev));
512
513 return 0;
514 }
515
cxl_parse_cfmws(union acpi_subtable_headers * header,void * arg,const unsigned long end)516 static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
517 const unsigned long end)
518 {
519 struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header;
520 struct cxl_cfmws_context *ctx = arg;
521 struct device *dev = ctx->dev;
522 int rc;
523
524 rc = __cxl_parse_cfmws(cfmws, ctx);
525 if (rc)
526 dev_err(dev,
527 "Failed to add decode range: [%#llx - %#llx] (%d)\n",
528 cfmws->base_hpa,
529 cfmws->base_hpa + cfmws->window_size - 1, rc);
530 else
531 dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n",
532 phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa,
533 cfmws->base_hpa + cfmws->window_size - 1);
534
535 /* never fail cxl_acpi load for a single window failure */
536 return 0;
537 }
538
to_cxl_host_bridge(struct device * host,struct device * dev)539 __mock struct acpi_device *to_cxl_host_bridge(struct device *host,
540 struct device *dev)
541 {
542 struct acpi_device *adev = to_acpi_device(dev);
543
544 if (!acpi_pci_find_root(adev->handle))
545 return NULL;
546
547 if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
548 return adev;
549 return NULL;
550 }
551
552 /* Note, @dev is used by mock_acpi_table_parse_cedt() */
553 struct cxl_chbs_context {
554 struct device *dev;
555 unsigned long long uid;
556 resource_size_t base;
557 u32 cxl_version;
558 int nr_versions;
559 u32 saved_version;
560 };
561
cxl_get_chbs_iter(union acpi_subtable_headers * header,void * arg,const unsigned long end)562 static int cxl_get_chbs_iter(union acpi_subtable_headers *header, void *arg,
563 const unsigned long end)
564 {
565 struct cxl_chbs_context *ctx = arg;
566 struct acpi_cedt_chbs *chbs;
567
568 chbs = (struct acpi_cedt_chbs *) header;
569
570 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 &&
571 chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL11)
572 return 0;
573
574 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20 &&
575 chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL20)
576 return 0;
577
578 if (!chbs->base)
579 return 0;
580
581 if (ctx->saved_version != chbs->cxl_version) {
582 /*
583 * cxl_version cannot be overwritten before the next two
584 * checks, then use saved_version
585 */
586 ctx->saved_version = chbs->cxl_version;
587 ctx->nr_versions++;
588 }
589
590 if (ctx->base != CXL_RESOURCE_NONE)
591 return 0;
592
593 if (ctx->uid != chbs->uid)
594 return 0;
595
596 ctx->cxl_version = chbs->cxl_version;
597 ctx->base = chbs->base;
598
599 return 0;
600 }
601
cxl_get_chbs(struct device * dev,struct acpi_device * hb,struct cxl_chbs_context * ctx)602 static int cxl_get_chbs(struct device *dev, struct acpi_device *hb,
603 struct cxl_chbs_context *ctx)
604 {
605 unsigned long long uid;
606 int rc;
607
608 rc = acpi_evaluate_integer(hb->handle, METHOD_NAME__UID, NULL, &uid);
609 if (rc != AE_OK) {
610 dev_err(dev, "unable to retrieve _UID\n");
611 return -ENOENT;
612 }
613
614 dev_dbg(dev, "UID found: %lld\n", uid);
615 *ctx = (struct cxl_chbs_context) {
616 .dev = dev,
617 .uid = uid,
618 .base = CXL_RESOURCE_NONE,
619 .cxl_version = UINT_MAX,
620 .saved_version = UINT_MAX,
621 };
622
623 acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbs_iter, ctx);
624
625 if (ctx->nr_versions > 1) {
626 /*
627 * Disclaim eRCD support given some component register may
628 * only be found via CHBCR
629 */
630 dev_info(dev, "Unsupported platform config, mixed Virtual Host and Restricted CXL Host hierarchy.");
631 }
632
633 return 0;
634 }
635
get_genport_coordinates(struct device * dev,struct cxl_dport * dport)636 static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
637 {
638 struct acpi_device *hb = to_cxl_host_bridge(NULL, dev);
639 u32 uid;
640
641 if (kstrtou32(acpi_device_uid(hb), 0, &uid))
642 return -EINVAL;
643
644 return acpi_get_genport_coordinates(uid, dport->coord);
645 }
646
add_host_bridge_dport(struct device * match,void * arg)647 static int add_host_bridge_dport(struct device *match, void *arg)
648 {
649 int ret;
650 acpi_status rc;
651 struct device *bridge;
652 struct cxl_dport *dport;
653 struct cxl_chbs_context ctx;
654 struct acpi_pci_root *pci_root;
655 struct cxl_port *root_port = arg;
656 struct device *host = root_port->dev.parent;
657 struct acpi_device *hb = to_cxl_host_bridge(host, match);
658
659 if (!hb)
660 return 0;
661
662 rc = cxl_get_chbs(match, hb, &ctx);
663 if (rc)
664 return rc;
665
666 if (ctx.cxl_version == UINT_MAX) {
667 dev_warn(match, "No CHBS found for Host Bridge (UID %lld)\n",
668 ctx.uid);
669 return 0;
670 }
671
672 if (ctx.base == CXL_RESOURCE_NONE) {
673 dev_warn(match, "CHBS invalid for Host Bridge (UID %lld)\n",
674 ctx.uid);
675 return 0;
676 }
677
678 pci_root = acpi_pci_find_root(hb->handle);
679 bridge = pci_root->bus->bridge;
680
681 /*
682 * In RCH mode, bind the component regs base to the dport. In
683 * VH mode it will be bound to the CXL host bridge's port
684 * object later in add_host_bridge_uport().
685 */
686 if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) {
687 dev_dbg(match, "RCRB found for UID %lld: %pa\n", ctx.uid,
688 &ctx.base);
689 dport = devm_cxl_add_rch_dport(root_port, bridge, ctx.uid,
690 ctx.base);
691 } else {
692 dport = devm_cxl_add_dport(root_port, bridge, ctx.uid,
693 CXL_RESOURCE_NONE);
694 }
695
696 if (IS_ERR(dport))
697 return PTR_ERR(dport);
698
699 ret = get_genport_coordinates(match, dport);
700 if (ret)
701 dev_dbg(match, "Failed to get generic port perf coordinates.\n");
702
703 return 0;
704 }
705
706 /*
707 * A host bridge is a dport to a CFMWS decode and it is a uport to the
708 * dport (PCIe Root Ports) in the host bridge.
709 */
add_host_bridge_uport(struct device * match,void * arg)710 static int add_host_bridge_uport(struct device *match, void *arg)
711 {
712 struct cxl_port *root_port = arg;
713 struct device *host = root_port->dev.parent;
714 struct acpi_device *hb = to_cxl_host_bridge(host, match);
715 struct acpi_pci_root *pci_root;
716 struct cxl_dport *dport;
717 struct cxl_port *port;
718 struct device *bridge;
719 struct cxl_chbs_context ctx;
720 resource_size_t component_reg_phys;
721 int rc;
722
723 if (!hb)
724 return 0;
725
726 pci_root = acpi_pci_find_root(hb->handle);
727 bridge = pci_root->bus->bridge;
728 dport = cxl_find_dport_by_dev(root_port, bridge);
729 if (!dport) {
730 dev_dbg(host, "host bridge expected and not found\n");
731 return 0;
732 }
733
734 if (dport->rch) {
735 dev_info(bridge, "host supports CXL (restricted)\n");
736 return 0;
737 }
738
739 rc = cxl_get_chbs(match, hb, &ctx);
740 if (rc)
741 return rc;
742
743 if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) {
744 dev_warn(bridge,
745 "CXL CHBS version mismatch, skip port registration\n");
746 return 0;
747 }
748
749 component_reg_phys = ctx.base;
750 if (component_reg_phys != CXL_RESOURCE_NONE)
751 dev_dbg(match, "CHBCR found for UID %lld: %pa\n",
752 ctx.uid, &component_reg_phys);
753
754 rc = devm_cxl_register_pci_bus(host, bridge, pci_root->bus);
755 if (rc)
756 return rc;
757
758 port = devm_cxl_add_port(host, bridge, component_reg_phys, dport);
759 if (IS_ERR(port))
760 return PTR_ERR(port);
761
762 dev_info(bridge, "host supports CXL\n");
763
764 return 0;
765 }
766
add_root_nvdimm_bridge(struct device * match,void * data)767 static int add_root_nvdimm_bridge(struct device *match, void *data)
768 {
769 struct cxl_decoder *cxld;
770 struct cxl_port *root_port = data;
771 struct cxl_nvdimm_bridge *cxl_nvb;
772 struct device *host = root_port->dev.parent;
773
774 if (!is_root_decoder(match))
775 return 0;
776
777 cxld = to_cxl_decoder(match);
778 if (!(cxld->flags & CXL_DECODER_F_PMEM))
779 return 0;
780
781 cxl_nvb = devm_cxl_add_nvdimm_bridge(host, root_port);
782 if (IS_ERR(cxl_nvb)) {
783 dev_dbg(host, "failed to register pmem\n");
784 return PTR_ERR(cxl_nvb);
785 }
786 dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev),
787 dev_name(&cxl_nvb->dev));
788 return 1;
789 }
790
791 static struct lock_class_key cxl_root_key;
792
cxl_acpi_lock_reset_class(void * dev)793 static void cxl_acpi_lock_reset_class(void *dev)
794 {
795 device_lock_reset_class(dev);
796 }
797
cxl_set_public_resource(struct resource * priv,struct resource * pub)798 static void cxl_set_public_resource(struct resource *priv, struct resource *pub)
799 {
800 priv->desc = (unsigned long) pub;
801 }
802
cxl_get_public_resource(struct resource * priv)803 static struct resource *cxl_get_public_resource(struct resource *priv)
804 {
805 return (struct resource *) priv->desc;
806 }
807
remove_cxl_resources(void * data)808 static void remove_cxl_resources(void *data)
809 {
810 struct resource *res, *next, *cxl = data;
811
812 for (res = cxl->child; res; res = next) {
813 struct resource *victim = cxl_get_public_resource(res);
814
815 next = res->sibling;
816 remove_resource(res);
817
818 if (victim) {
819 remove_resource(victim);
820 kfree(victim);
821 }
822
823 del_cxl_resource(res);
824 }
825 }
826
827 /**
828 * add_cxl_resources() - reflect CXL fixed memory windows in iomem_resource
829 * @cxl_res: A standalone resource tree where each CXL window is a sibling
830 *
831 * Walk each CXL window in @cxl_res and add it to iomem_resource potentially
832 * expanding its boundaries to ensure that any conflicting resources become
833 * children. If a window is expanded it may then conflict with a another window
834 * entry and require the window to be truncated or trimmed. Consider this
835 * situation::
836 *
837 * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
838 * |--------------- "System RAM" -------------|
839 *
840 * ...where platform firmware has established as System RAM resource across 2
841 * windows, but has left some portion of window 1 for dynamic CXL region
842 * provisioning. In this case "Window 0" will span the entirety of the "System
843 * RAM" span, and "CXL Window 1" is truncated to the remaining tail past the end
844 * of that "System RAM" resource.
845 */
add_cxl_resources(struct resource * cxl_res)846 static int add_cxl_resources(struct resource *cxl_res)
847 {
848 struct resource *res, *new, *next;
849
850 for (res = cxl_res->child; res; res = next) {
851 new = kzalloc(sizeof(*new), GFP_KERNEL);
852 if (!new)
853 return -ENOMEM;
854 new->name = res->name;
855 new->start = res->start;
856 new->end = res->end;
857 new->flags = IORESOURCE_MEM;
858 new->desc = IORES_DESC_CXL;
859
860 /*
861 * Record the public resource in the private cxl_res tree for
862 * later removal.
863 */
864 cxl_set_public_resource(res, new);
865
866 insert_resource_expand_to_fit(&iomem_resource, new);
867
868 next = res->sibling;
869 while (next && resource_overlaps(new, next)) {
870 if (resource_contains(new, next)) {
871 struct resource *_next = next->sibling;
872
873 remove_resource(next);
874 del_cxl_resource(next);
875 next = _next;
876 } else
877 next->start = new->end + 1;
878 }
879 }
880 return 0;
881 }
882
pair_cxl_resource(struct device * dev,void * data)883 static int pair_cxl_resource(struct device *dev, void *data)
884 {
885 struct resource *cxl_res = data;
886 struct resource *p;
887
888 if (!is_root_decoder(dev))
889 return 0;
890
891 for (p = cxl_res->child; p; p = p->sibling) {
892 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
893 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
894 struct resource res = {
895 .start = cxld->hpa_range.start,
896 .end = cxld->hpa_range.end,
897 .flags = IORESOURCE_MEM,
898 };
899
900 if (resource_contains(p, &res)) {
901 cxlrd->res = cxl_get_public_resource(p);
902 break;
903 }
904 }
905
906 return 0;
907 }
908
cxl_acpi_probe(struct platform_device * pdev)909 static int cxl_acpi_probe(struct platform_device *pdev)
910 {
911 int rc;
912 struct resource *cxl_res;
913 struct cxl_root *cxl_root;
914 struct cxl_port *root_port;
915 struct device *host = &pdev->dev;
916 struct acpi_device *adev = ACPI_COMPANION(host);
917 struct cxl_cfmws_context ctx;
918
919 device_lock_set_class(&pdev->dev, &cxl_root_key);
920 rc = devm_add_action_or_reset(&pdev->dev, cxl_acpi_lock_reset_class,
921 &pdev->dev);
922 if (rc)
923 return rc;
924
925 cxl_res = devm_kzalloc(host, sizeof(*cxl_res), GFP_KERNEL);
926 if (!cxl_res)
927 return -ENOMEM;
928 cxl_res->name = "CXL mem";
929 cxl_res->start = 0;
930 cxl_res->end = -1;
931 cxl_res->flags = IORESOURCE_MEM;
932
933 cxl_root = devm_cxl_add_root(host, &acpi_root_ops);
934 if (IS_ERR(cxl_root))
935 return PTR_ERR(cxl_root);
936 root_port = &cxl_root->port;
937
938 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
939 add_host_bridge_dport);
940 if (rc < 0)
941 return rc;
942
943 rc = devm_add_action_or_reset(host, remove_cxl_resources, cxl_res);
944 if (rc)
945 return rc;
946
947 ctx = (struct cxl_cfmws_context) {
948 .dev = host,
949 .root_port = root_port,
950 .cxl_res = cxl_res,
951 };
952 rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
953 if (rc < 0)
954 return -ENXIO;
955
956 rc = add_cxl_resources(cxl_res);
957 if (rc)
958 return rc;
959
960 /*
961 * Populate the root decoders with their related iomem resource,
962 * if present
963 */
964 device_for_each_child(&root_port->dev, cxl_res, pair_cxl_resource);
965
966 /*
967 * Root level scanned with host-bridge as dports, now scan host-bridges
968 * for their role as CXL uports to their CXL-capable PCIe Root Ports.
969 */
970 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
971 add_host_bridge_uport);
972 if (rc < 0)
973 return rc;
974
975 if (IS_ENABLED(CONFIG_CXL_PMEM))
976 rc = device_for_each_child(&root_port->dev, root_port,
977 add_root_nvdimm_bridge);
978 if (rc < 0)
979 return rc;
980
981 /* In case PCI is scanned before ACPI re-trigger memdev attach */
982 cxl_bus_rescan();
983 return 0;
984 }
985
986 static const struct acpi_device_id cxl_acpi_ids[] = {
987 { "ACPI0017" },
988 { },
989 };
990 MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
991
992 static const struct platform_device_id cxl_test_ids[] = {
993 { "cxl_acpi" },
994 { },
995 };
996 MODULE_DEVICE_TABLE(platform, cxl_test_ids);
997
998 static struct platform_driver cxl_acpi_driver = {
999 .probe = cxl_acpi_probe,
1000 .driver = {
1001 .name = KBUILD_MODNAME,
1002 .acpi_match_table = cxl_acpi_ids,
1003 },
1004 .id_table = cxl_test_ids,
1005 };
1006
cxl_acpi_init(void)1007 static int __init cxl_acpi_init(void)
1008 {
1009 return platform_driver_register(&cxl_acpi_driver);
1010 }
1011
cxl_acpi_exit(void)1012 static void __exit cxl_acpi_exit(void)
1013 {
1014 platform_driver_unregister(&cxl_acpi_driver);
1015 cxl_bus_drain();
1016 }
1017
1018 /* load before dax_hmem sees 'Soft Reserved' CXL ranges */
1019 subsys_initcall(cxl_acpi_init);
1020
1021 /*
1022 * Arrange for host-bridge ports to be active synchronous with
1023 * cxl_acpi_probe() exit.
1024 */
1025 MODULE_SOFTDEP("pre: cxl_port");
1026
1027 module_exit(cxl_acpi_exit);
1028 MODULE_DESCRIPTION("CXL ACPI: Platform Support");
1029 MODULE_LICENSE("GPL v2");
1030 MODULE_IMPORT_NS("CXL");
1031 MODULE_IMPORT_NS("ACPI");
1032