1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/platform_device.h>
4 #include <linux/module.h>
5 #include <linux/device.h>
6 #include <linux/kernel.h>
7 #include <linux/acpi.h>
8 #include <linux/pci.h>
9 #include <linux/node.h>
10 #include <asm/div64.h>
11 #include "cxlpci.h"
12 #include "cxl.h"
13
14 struct cxl_cxims_data {
15 int nr_maps;
16 u64 xormaps[] __counted_by(nr_maps);
17 };
18
19 static const guid_t acpi_cxl_qtg_id_guid =
20 GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071,
21 0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52);
22
cxl_apply_xor_maps(struct cxl_root_decoder * cxlrd,u64 addr)23 static u64 cxl_apply_xor_maps(struct cxl_root_decoder *cxlrd, u64 addr)
24 {
25 struct cxl_cxims_data *cximsd = cxlrd->platform_data;
26 int hbiw = cxlrd->cxlsd.nr_targets;
27 u64 val;
28 int pos;
29
30 /* No xormaps for host bridge interleave ways of 1 or 3 */
31 if (hbiw == 1 || hbiw == 3)
32 return addr;
33
34 /*
35 * In regions using XOR interleave arithmetic the CXL HPA may not
36 * be the same as the SPA. This helper performs the SPA->CXL HPA
37 * or the CXL HPA->SPA translation. Since XOR is self-inverting,
38 * so is this function.
39 *
40 * For root decoders using xormaps (hbiw: 2,4,6,8,12,16) applying the
41 * xormaps will toggle a position bit.
42 *
43 * pos is the lowest set bit in an XORMAP
44 * val is the XORALLBITS(addr & XORMAP)
45 *
46 * XORALLBITS: The CXL spec (3.1 Table 9-22) defines XORALLBITS
47 * as an operation that outputs a single bit by XORing all the
48 * bits in the input (addr & xormap). Implement XORALLBITS using
49 * hweight64(). If the hamming weight is even the XOR of those
50 * bits results in val==0, if odd the XOR result is val==1.
51 */
52
53 for (int i = 0; i < cximsd->nr_maps; i++) {
54 if (!cximsd->xormaps[i])
55 continue;
56 pos = __ffs(cximsd->xormaps[i]);
57 val = (hweight64(addr & cximsd->xormaps[i]) & 1);
58 addr = (addr & ~(1ULL << pos)) | (val << pos);
59 }
60
61 return addr;
62 }
63
64 struct cxl_cxims_context {
65 struct device *dev;
66 struct cxl_root_decoder *cxlrd;
67 };
68
cxl_parse_cxims(union acpi_subtable_headers * header,void * arg,const unsigned long end)69 static int cxl_parse_cxims(union acpi_subtable_headers *header, void *arg,
70 const unsigned long end)
71 {
72 struct acpi_cedt_cxims *cxims = (struct acpi_cedt_cxims *)header;
73 struct cxl_cxims_context *ctx = arg;
74 struct cxl_root_decoder *cxlrd = ctx->cxlrd;
75 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
76 struct device *dev = ctx->dev;
77 struct cxl_cxims_data *cximsd;
78 unsigned int hbig, nr_maps;
79 int rc;
80
81 rc = eig_to_granularity(cxims->hbig, &hbig);
82 if (rc)
83 return rc;
84
85 /* Does this CXIMS entry apply to the given CXL Window? */
86 if (hbig != cxld->interleave_granularity)
87 return 0;
88
89 /* IW 1,3 do not use xormaps and skip this parsing entirely */
90 if (is_power_of_2(cxld->interleave_ways))
91 /* 2, 4, 8, 16 way */
92 nr_maps = ilog2(cxld->interleave_ways);
93 else
94 /* 6, 12 way */
95 nr_maps = ilog2(cxld->interleave_ways / 3);
96
97 if (cxims->nr_xormaps < nr_maps) {
98 dev_dbg(dev, "CXIMS nr_xormaps[%d] expected[%d]\n",
99 cxims->nr_xormaps, nr_maps);
100 return -ENXIO;
101 }
102
103 cximsd = devm_kzalloc(dev, struct_size(cximsd, xormaps, nr_maps),
104 GFP_KERNEL);
105 if (!cximsd)
106 return -ENOMEM;
107 cximsd->nr_maps = nr_maps;
108 memcpy(cximsd->xormaps, cxims->xormap_list,
109 nr_maps * sizeof(*cximsd->xormaps));
110 cxlrd->platform_data = cximsd;
111
112 return 0;
113 }
114
cfmws_to_decoder_flags(int restrictions)115 static unsigned long cfmws_to_decoder_flags(int restrictions)
116 {
117 unsigned long flags = CXL_DECODER_F_ENABLE;
118
119 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_DEVMEM)
120 flags |= CXL_DECODER_F_TYPE2;
121 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM)
122 flags |= CXL_DECODER_F_TYPE3;
123 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE)
124 flags |= CXL_DECODER_F_RAM;
125 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM)
126 flags |= CXL_DECODER_F_PMEM;
127 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED)
128 flags |= CXL_DECODER_F_LOCK;
129
130 return flags;
131 }
132
cxl_acpi_cfmws_verify(struct device * dev,struct acpi_cedt_cfmws * cfmws)133 static int cxl_acpi_cfmws_verify(struct device *dev,
134 struct acpi_cedt_cfmws *cfmws)
135 {
136 int rc, expected_len;
137 unsigned int ways;
138
139 if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO &&
140 cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
141 dev_err(dev, "CFMWS Unknown Interleave Arithmetic: %d\n",
142 cfmws->interleave_arithmetic);
143 return -EINVAL;
144 }
145
146 if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) {
147 dev_err(dev, "CFMWS Base HPA not 256MB aligned\n");
148 return -EINVAL;
149 }
150
151 if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) {
152 dev_err(dev, "CFMWS Window Size not 256MB aligned\n");
153 return -EINVAL;
154 }
155
156 rc = eiw_to_ways(cfmws->interleave_ways, &ways);
157 if (rc) {
158 dev_err(dev, "CFMWS Interleave Ways (%d) invalid\n",
159 cfmws->interleave_ways);
160 return -EINVAL;
161 }
162
163 expected_len = struct_size(cfmws, interleave_targets, ways);
164
165 if (cfmws->header.length < expected_len) {
166 dev_err(dev, "CFMWS length %d less than expected %d\n",
167 cfmws->header.length, expected_len);
168 return -EINVAL;
169 }
170
171 if (cfmws->header.length > expected_len)
172 dev_dbg(dev, "CFMWS length %d greater than expected %d\n",
173 cfmws->header.length, expected_len);
174
175 return 0;
176 }
177
178 /*
179 * Note, @dev must be the first member, see 'struct cxl_chbs_context'
180 * and mock_acpi_table_parse_cedt()
181 */
182 struct cxl_cfmws_context {
183 struct device *dev;
184 struct cxl_port *root_port;
185 struct resource *cxl_res;
186 int id;
187 };
188
189 /**
190 * cxl_acpi_evaluate_qtg_dsm - Retrieve QTG ids via ACPI _DSM
191 * @handle: ACPI handle
192 * @coord: performance access coordinates
193 * @entries: number of QTG IDs to return
194 * @qos_class: int array provided by caller to return QTG IDs
195 *
196 * Return: number of QTG IDs returned, or -errno for errors
197 *
198 * Issue QTG _DSM with accompanied bandwidth and latency data in order to get
199 * the QTG IDs that are suitable for the performance point in order of most
200 * suitable to least suitable. Write back array of QTG IDs and return the
201 * actual number of QTG IDs written back.
202 */
203 static int
cxl_acpi_evaluate_qtg_dsm(acpi_handle handle,struct access_coordinate * coord,int entries,int * qos_class)204 cxl_acpi_evaluate_qtg_dsm(acpi_handle handle, struct access_coordinate *coord,
205 int entries, int *qos_class)
206 {
207 union acpi_object *out_obj, *out_buf, *obj;
208 union acpi_object in_array[4] = {
209 [0].integer = { ACPI_TYPE_INTEGER, coord->read_latency },
210 [1].integer = { ACPI_TYPE_INTEGER, coord->write_latency },
211 [2].integer = { ACPI_TYPE_INTEGER, coord->read_bandwidth },
212 [3].integer = { ACPI_TYPE_INTEGER, coord->write_bandwidth },
213 };
214 union acpi_object in_obj = {
215 .package = {
216 .type = ACPI_TYPE_PACKAGE,
217 .count = 4,
218 .elements = in_array,
219 },
220 };
221 int count, pkg_entries, i;
222 u16 max_qtg;
223 int rc;
224
225 if (!entries)
226 return -EINVAL;
227
228 out_obj = acpi_evaluate_dsm(handle, &acpi_cxl_qtg_id_guid, 1, 1, &in_obj);
229 if (!out_obj)
230 return -ENXIO;
231
232 if (out_obj->type != ACPI_TYPE_PACKAGE) {
233 rc = -ENXIO;
234 goto out;
235 }
236
237 /* Check Max QTG ID */
238 obj = &out_obj->package.elements[0];
239 if (obj->type != ACPI_TYPE_INTEGER) {
240 rc = -ENXIO;
241 goto out;
242 }
243
244 max_qtg = obj->integer.value;
245
246 /* It's legal to have 0 QTG entries */
247 pkg_entries = out_obj->package.count;
248 if (pkg_entries <= 1) {
249 rc = 0;
250 goto out;
251 }
252
253 /* Retrieve QTG IDs package */
254 obj = &out_obj->package.elements[1];
255 if (obj->type != ACPI_TYPE_PACKAGE) {
256 rc = -ENXIO;
257 goto out;
258 }
259
260 pkg_entries = obj->package.count;
261 count = min(entries, pkg_entries);
262 for (i = 0; i < count; i++) {
263 u16 qtg_id;
264
265 out_buf = &obj->package.elements[i];
266 if (out_buf->type != ACPI_TYPE_INTEGER) {
267 rc = -ENXIO;
268 goto out;
269 }
270
271 qtg_id = out_buf->integer.value;
272 if (qtg_id > max_qtg)
273 pr_warn("QTG ID %u greater than MAX %u\n",
274 qtg_id, max_qtg);
275
276 qos_class[i] = qtg_id;
277 }
278 rc = count;
279
280 out:
281 ACPI_FREE(out_obj);
282 return rc;
283 }
284
cxl_acpi_qos_class(struct cxl_root * cxl_root,struct access_coordinate * coord,int entries,int * qos_class)285 static int cxl_acpi_qos_class(struct cxl_root *cxl_root,
286 struct access_coordinate *coord, int entries,
287 int *qos_class)
288 {
289 struct device *dev = cxl_root->port.uport_dev;
290 acpi_handle handle;
291
292 if (!dev_is_platform(dev))
293 return -ENODEV;
294
295 handle = ACPI_HANDLE(dev);
296 if (!handle)
297 return -ENODEV;
298
299 return cxl_acpi_evaluate_qtg_dsm(handle, coord, entries, qos_class);
300 }
301
302 static const struct cxl_root_ops acpi_root_ops = {
303 .qos_class = cxl_acpi_qos_class,
304 };
305
del_cxl_resource(struct resource * res)306 static void del_cxl_resource(struct resource *res)
307 {
308 if (!res)
309 return;
310 kfree(res->name);
311 kfree(res);
312 }
313
alloc_cxl_resource(resource_size_t base,resource_size_t n,int id)314 static struct resource *alloc_cxl_resource(resource_size_t base,
315 resource_size_t n, int id)
316 {
317 struct resource *res __free(kfree) = kzalloc(sizeof(*res), GFP_KERNEL);
318
319 if (!res)
320 return NULL;
321
322 res->start = base;
323 res->end = base + n - 1;
324 res->flags = IORESOURCE_MEM;
325 res->name = kasprintf(GFP_KERNEL, "CXL Window %d", id);
326 if (!res->name)
327 return NULL;
328
329 return no_free_ptr(res);
330 }
331
add_or_reset_cxl_resource(struct resource * parent,struct resource * res)332 static int add_or_reset_cxl_resource(struct resource *parent, struct resource *res)
333 {
334 int rc = insert_resource(parent, res);
335
336 if (rc)
337 del_cxl_resource(res);
338 return rc;
339 }
340
cxl_acpi_set_cache_size(struct cxl_root_decoder * cxlrd)341 static int cxl_acpi_set_cache_size(struct cxl_root_decoder *cxlrd)
342 {
343 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
344 struct range *hpa = &cxld->hpa_range;
345 resource_size_t size = range_len(hpa);
346 resource_size_t start = hpa->start;
347 resource_size_t cache_size;
348 struct resource res;
349 int nid, rc;
350
351 res = DEFINE_RES_MEM(start, size);
352 nid = phys_to_target_node(start);
353
354 rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size);
355 if (rc)
356 return rc;
357
358 /*
359 * The cache range is expected to be within the CFMWS.
360 * Currently there is only support cache_size == cxl_size. CXL
361 * size is then half of the total CFMWS window size.
362 */
363 size = size >> 1;
364 if (cache_size && size != cache_size) {
365 dev_warn(&cxld->dev,
366 "Extended Linear Cache size %pa != CXL size %pa. No Support!",
367 &cache_size, &size);
368 return -ENXIO;
369 }
370
371 cxlrd->cache_size = cache_size;
372
373 return 0;
374 }
375
cxl_setup_extended_linear_cache(struct cxl_root_decoder * cxlrd)376 static void cxl_setup_extended_linear_cache(struct cxl_root_decoder *cxlrd)
377 {
378 int rc;
379
380 rc = cxl_acpi_set_cache_size(cxlrd);
381 if (!rc)
382 return;
383
384 if (rc != -EOPNOTSUPP) {
385 /*
386 * Failing to support extended linear cache region resize does not
387 * prevent the region from functioning. Only causes cxl list showing
388 * incorrect region size.
389 */
390 dev_warn(cxlrd->cxlsd.cxld.dev.parent,
391 "Extended linear cache calculation failed rc:%d\n", rc);
392 }
393
394 /* Ignoring return code */
395 cxlrd->cache_size = 0;
396 }
397
398 DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *,
399 if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
DEFINE_FREE(del_cxl_resource,struct resource *,if (_T)del_cxl_resource (_T))400 DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T))
401 static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
402 struct cxl_cfmws_context *ctx)
403 {
404 struct cxl_port *root_port = ctx->root_port;
405 struct cxl_cxims_context cxims_ctx;
406 struct device *dev = ctx->dev;
407 struct cxl_decoder *cxld;
408 unsigned int ways, i, ig;
409 int rc;
410
411 rc = cxl_acpi_cfmws_verify(dev, cfmws);
412 if (rc)
413 return rc;
414
415 rc = eiw_to_ways(cfmws->interleave_ways, &ways);
416 if (rc)
417 return rc;
418 rc = eig_to_granularity(cfmws->granularity, &ig);
419 if (rc)
420 return rc;
421
422 struct resource *res __free(del_cxl_resource) = alloc_cxl_resource(
423 cfmws->base_hpa, cfmws->window_size, ctx->id++);
424 if (!res)
425 return -ENOMEM;
426
427 /* add to the local resource tracking to establish a sort order */
428 rc = add_or_reset_cxl_resource(ctx->cxl_res, no_free_ptr(res));
429 if (rc)
430 return rc;
431
432 struct cxl_root_decoder *cxlrd __free(put_cxlrd) =
433 cxl_root_decoder_alloc(root_port, ways);
434
435 if (IS_ERR(cxlrd))
436 return PTR_ERR(cxlrd);
437
438 cxld = &cxlrd->cxlsd.cxld;
439 cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
440 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
441 cxld->hpa_range = (struct range) {
442 .start = cfmws->base_hpa,
443 .end = cfmws->base_hpa + cfmws->window_size - 1,
444 };
445 cxld->interleave_ways = ways;
446 for (i = 0; i < ways; i++)
447 cxld->target_map[i] = cfmws->interleave_targets[i];
448 /*
449 * Minimize the x1 granularity to advertise support for any
450 * valid region granularity
451 */
452 if (ways == 1)
453 ig = CXL_DECODER_MIN_GRANULARITY;
454 cxld->interleave_granularity = ig;
455
456 cxl_setup_extended_linear_cache(cxlrd);
457
458 if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
459 if (ways != 1 && ways != 3) {
460 cxims_ctx = (struct cxl_cxims_context) {
461 .dev = dev,
462 .cxlrd = cxlrd,
463 };
464 rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CXIMS,
465 cxl_parse_cxims, &cxims_ctx);
466 if (rc < 0)
467 return rc;
468 if (!cxlrd->platform_data) {
469 dev_err(dev, "No CXIMS for HBIG %u\n", ig);
470 return -EINVAL;
471 }
472 }
473 }
474
475 cxlrd->qos_class = cfmws->qtg_id;
476
477 if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
478 cxlrd->ops = kzalloc(sizeof(*cxlrd->ops), GFP_KERNEL);
479 if (!cxlrd->ops)
480 return -ENOMEM;
481
482 cxlrd->ops->hpa_to_spa = cxl_apply_xor_maps;
483 cxlrd->ops->spa_to_hpa = cxl_apply_xor_maps;
484 }
485
486 rc = cxl_decoder_add(cxld);
487 if (rc)
488 return rc;
489
490 rc = cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
491 if (rc)
492 return rc;
493
494 dev_dbg(root_port->dev.parent, "%s added to %s\n",
495 dev_name(&cxld->dev), dev_name(&root_port->dev));
496
497 return 0;
498 }
499
cxl_parse_cfmws(union acpi_subtable_headers * header,void * arg,const unsigned long end)500 static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
501 const unsigned long end)
502 {
503 struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header;
504 struct cxl_cfmws_context *ctx = arg;
505 struct device *dev = ctx->dev;
506 int rc;
507
508 rc = __cxl_parse_cfmws(cfmws, ctx);
509 if (rc)
510 dev_err(dev,
511 "Failed to add decode range: [%#llx - %#llx] (%d)\n",
512 cfmws->base_hpa,
513 cfmws->base_hpa + cfmws->window_size - 1, rc);
514 else
515 dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n",
516 phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa,
517 cfmws->base_hpa + cfmws->window_size - 1);
518
519 /* never fail cxl_acpi load for a single window failure */
520 return 0;
521 }
522
to_cxl_host_bridge(struct device * host,struct device * dev)523 __mock struct acpi_device *to_cxl_host_bridge(struct device *host,
524 struct device *dev)
525 {
526 struct acpi_device *adev = to_acpi_device(dev);
527
528 if (!acpi_pci_find_root(adev->handle))
529 return NULL;
530
531 if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
532 return adev;
533 return NULL;
534 }
535
536 /* Note, @dev is used by mock_acpi_table_parse_cedt() */
537 struct cxl_chbs_context {
538 struct device *dev;
539 unsigned long long uid;
540 resource_size_t base;
541 u32 cxl_version;
542 int nr_versions;
543 u32 saved_version;
544 };
545
cxl_get_chbs_iter(union acpi_subtable_headers * header,void * arg,const unsigned long end)546 static int cxl_get_chbs_iter(union acpi_subtable_headers *header, void *arg,
547 const unsigned long end)
548 {
549 struct cxl_chbs_context *ctx = arg;
550 struct acpi_cedt_chbs *chbs;
551
552 chbs = (struct acpi_cedt_chbs *) header;
553
554 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 &&
555 chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL11)
556 return 0;
557
558 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20 &&
559 chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL20)
560 return 0;
561
562 if (!chbs->base)
563 return 0;
564
565 if (ctx->saved_version != chbs->cxl_version) {
566 /*
567 * cxl_version cannot be overwritten before the next two
568 * checks, then use saved_version
569 */
570 ctx->saved_version = chbs->cxl_version;
571 ctx->nr_versions++;
572 }
573
574 if (ctx->base != CXL_RESOURCE_NONE)
575 return 0;
576
577 if (ctx->uid != chbs->uid)
578 return 0;
579
580 ctx->cxl_version = chbs->cxl_version;
581 ctx->base = chbs->base;
582
583 return 0;
584 }
585
cxl_get_chbs(struct device * dev,struct acpi_device * hb,struct cxl_chbs_context * ctx)586 static int cxl_get_chbs(struct device *dev, struct acpi_device *hb,
587 struct cxl_chbs_context *ctx)
588 {
589 unsigned long long uid;
590 int rc;
591
592 rc = acpi_evaluate_integer(hb->handle, METHOD_NAME__UID, NULL, &uid);
593 if (rc != AE_OK) {
594 dev_err(dev, "unable to retrieve _UID\n");
595 return -ENOENT;
596 }
597
598 dev_dbg(dev, "UID found: %lld\n", uid);
599 *ctx = (struct cxl_chbs_context) {
600 .dev = dev,
601 .uid = uid,
602 .base = CXL_RESOURCE_NONE,
603 .cxl_version = UINT_MAX,
604 .saved_version = UINT_MAX,
605 };
606
607 acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbs_iter, ctx);
608
609 if (ctx->nr_versions > 1) {
610 /*
611 * Disclaim eRCD support given some component register may
612 * only be found via CHBCR
613 */
614 dev_info(dev, "Unsupported platform config, mixed Virtual Host and Restricted CXL Host hierarchy.");
615 }
616
617 return 0;
618 }
619
get_genport_coordinates(struct device * dev,struct cxl_dport * dport)620 static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
621 {
622 struct acpi_device *hb = to_cxl_host_bridge(NULL, dev);
623 u32 uid;
624
625 if (kstrtou32(acpi_device_uid(hb), 0, &uid))
626 return -EINVAL;
627
628 return acpi_get_genport_coordinates(uid, dport->coord);
629 }
630
add_host_bridge_dport(struct device * match,void * arg)631 static int add_host_bridge_dport(struct device *match, void *arg)
632 {
633 int ret;
634 acpi_status rc;
635 struct device *bridge;
636 struct cxl_dport *dport;
637 struct cxl_chbs_context ctx;
638 struct acpi_pci_root *pci_root;
639 struct cxl_port *root_port = arg;
640 struct device *host = root_port->dev.parent;
641 struct acpi_device *hb = to_cxl_host_bridge(host, match);
642
643 if (!hb)
644 return 0;
645
646 rc = cxl_get_chbs(match, hb, &ctx);
647 if (rc)
648 return rc;
649
650 if (ctx.cxl_version == UINT_MAX) {
651 dev_warn(match, "No CHBS found for Host Bridge (UID %lld)\n",
652 ctx.uid);
653 return 0;
654 }
655
656 if (ctx.base == CXL_RESOURCE_NONE) {
657 dev_warn(match, "CHBS invalid for Host Bridge (UID %lld)\n",
658 ctx.uid);
659 return 0;
660 }
661
662 pci_root = acpi_pci_find_root(hb->handle);
663 bridge = pci_root->bus->bridge;
664
665 /*
666 * In RCH mode, bind the component regs base to the dport. In
667 * VH mode it will be bound to the CXL host bridge's port
668 * object later in add_host_bridge_uport().
669 */
670 if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) {
671 dev_dbg(match, "RCRB found for UID %lld: %pa\n", ctx.uid,
672 &ctx.base);
673 dport = devm_cxl_add_rch_dport(root_port, bridge, ctx.uid,
674 ctx.base);
675 } else {
676 dport = devm_cxl_add_dport(root_port, bridge, ctx.uid,
677 CXL_RESOURCE_NONE);
678 }
679
680 if (IS_ERR(dport))
681 return PTR_ERR(dport);
682
683 ret = get_genport_coordinates(match, dport);
684 if (ret)
685 dev_dbg(match, "Failed to get generic port perf coordinates.\n");
686
687 return 0;
688 }
689
690 /*
691 * A host bridge is a dport to a CFMWS decode and it is a uport to the
692 * dport (PCIe Root Ports) in the host bridge.
693 */
add_host_bridge_uport(struct device * match,void * arg)694 static int add_host_bridge_uport(struct device *match, void *arg)
695 {
696 struct cxl_port *root_port = arg;
697 struct device *host = root_port->dev.parent;
698 struct acpi_device *hb = to_cxl_host_bridge(host, match);
699 struct acpi_pci_root *pci_root;
700 struct cxl_dport *dport;
701 struct cxl_port *port;
702 struct device *bridge;
703 struct cxl_chbs_context ctx;
704 resource_size_t component_reg_phys;
705 int rc;
706
707 if (!hb)
708 return 0;
709
710 pci_root = acpi_pci_find_root(hb->handle);
711 bridge = pci_root->bus->bridge;
712 dport = cxl_find_dport_by_dev(root_port, bridge);
713 if (!dport) {
714 dev_dbg(host, "host bridge expected and not found\n");
715 return 0;
716 }
717
718 if (dport->rch) {
719 dev_info(bridge, "host supports CXL (restricted)\n");
720 return 0;
721 }
722
723 rc = cxl_get_chbs(match, hb, &ctx);
724 if (rc)
725 return rc;
726
727 if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) {
728 dev_warn(bridge,
729 "CXL CHBS version mismatch, skip port registration\n");
730 return 0;
731 }
732
733 component_reg_phys = ctx.base;
734 if (component_reg_phys != CXL_RESOURCE_NONE)
735 dev_dbg(match, "CHBCR found for UID %lld: %pa\n",
736 ctx.uid, &component_reg_phys);
737
738 rc = devm_cxl_register_pci_bus(host, bridge, pci_root->bus);
739 if (rc)
740 return rc;
741
742 port = devm_cxl_add_port(host, bridge, component_reg_phys, dport);
743 if (IS_ERR(port))
744 return PTR_ERR(port);
745
746 dev_info(bridge, "host supports CXL\n");
747
748 return 0;
749 }
750
add_root_nvdimm_bridge(struct device * match,void * data)751 static int add_root_nvdimm_bridge(struct device *match, void *data)
752 {
753 struct cxl_decoder *cxld;
754 struct cxl_port *root_port = data;
755 struct cxl_nvdimm_bridge *cxl_nvb;
756 struct device *host = root_port->dev.parent;
757
758 if (!is_root_decoder(match))
759 return 0;
760
761 cxld = to_cxl_decoder(match);
762 if (!(cxld->flags & CXL_DECODER_F_PMEM))
763 return 0;
764
765 cxl_nvb = devm_cxl_add_nvdimm_bridge(host, root_port);
766 if (IS_ERR(cxl_nvb)) {
767 dev_dbg(host, "failed to register pmem\n");
768 return PTR_ERR(cxl_nvb);
769 }
770 dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev),
771 dev_name(&cxl_nvb->dev));
772 return 1;
773 }
774
775 static struct lock_class_key cxl_root_key;
776
cxl_acpi_lock_reset_class(void * dev)777 static void cxl_acpi_lock_reset_class(void *dev)
778 {
779 device_lock_reset_class(dev);
780 }
781
cxl_set_public_resource(struct resource * priv,struct resource * pub)782 static void cxl_set_public_resource(struct resource *priv, struct resource *pub)
783 {
784 priv->desc = (unsigned long) pub;
785 }
786
cxl_get_public_resource(struct resource * priv)787 static struct resource *cxl_get_public_resource(struct resource *priv)
788 {
789 return (struct resource *) priv->desc;
790 }
791
remove_cxl_resources(void * data)792 static void remove_cxl_resources(void *data)
793 {
794 struct resource *res, *next, *cxl = data;
795
796 for (res = cxl->child; res; res = next) {
797 struct resource *victim = cxl_get_public_resource(res);
798
799 next = res->sibling;
800 remove_resource(res);
801
802 if (victim) {
803 remove_resource(victim);
804 kfree(victim);
805 }
806
807 del_cxl_resource(res);
808 }
809 }
810
811 /**
812 * add_cxl_resources() - reflect CXL fixed memory windows in iomem_resource
813 * @cxl_res: A standalone resource tree where each CXL window is a sibling
814 *
815 * Walk each CXL window in @cxl_res and add it to iomem_resource potentially
816 * expanding its boundaries to ensure that any conflicting resources become
817 * children. If a window is expanded it may then conflict with a another window
818 * entry and require the window to be truncated or trimmed. Consider this
819 * situation::
820 *
821 * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
822 * |--------------- "System RAM" -------------|
823 *
824 * ...where platform firmware has established as System RAM resource across 2
825 * windows, but has left some portion of window 1 for dynamic CXL region
826 * provisioning. In this case "Window 0" will span the entirety of the "System
827 * RAM" span, and "CXL Window 1" is truncated to the remaining tail past the end
828 * of that "System RAM" resource.
829 */
add_cxl_resources(struct resource * cxl_res)830 static int add_cxl_resources(struct resource *cxl_res)
831 {
832 struct resource *res, *new, *next;
833
834 for (res = cxl_res->child; res; res = next) {
835 new = kzalloc(sizeof(*new), GFP_KERNEL);
836 if (!new)
837 return -ENOMEM;
838 new->name = res->name;
839 new->start = res->start;
840 new->end = res->end;
841 new->flags = IORESOURCE_MEM;
842 new->desc = IORES_DESC_CXL;
843
844 /*
845 * Record the public resource in the private cxl_res tree for
846 * later removal.
847 */
848 cxl_set_public_resource(res, new);
849
850 insert_resource_expand_to_fit(&iomem_resource, new);
851
852 next = res->sibling;
853 while (next && resource_overlaps(new, next)) {
854 if (resource_contains(new, next)) {
855 struct resource *_next = next->sibling;
856
857 remove_resource(next);
858 del_cxl_resource(next);
859 next = _next;
860 } else
861 next->start = new->end + 1;
862 }
863 }
864 return 0;
865 }
866
pair_cxl_resource(struct device * dev,void * data)867 static int pair_cxl_resource(struct device *dev, void *data)
868 {
869 struct resource *cxl_res = data;
870 struct resource *p;
871
872 if (!is_root_decoder(dev))
873 return 0;
874
875 for (p = cxl_res->child; p; p = p->sibling) {
876 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
877 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
878 struct resource res = {
879 .start = cxld->hpa_range.start,
880 .end = cxld->hpa_range.end,
881 .flags = IORESOURCE_MEM,
882 };
883
884 if (resource_contains(p, &res)) {
885 cxlrd->res = cxl_get_public_resource(p);
886 break;
887 }
888 }
889
890 return 0;
891 }
892
cxl_acpi_probe(struct platform_device * pdev)893 static int cxl_acpi_probe(struct platform_device *pdev)
894 {
895 int rc;
896 struct resource *cxl_res;
897 struct cxl_root *cxl_root;
898 struct cxl_port *root_port;
899 struct device *host = &pdev->dev;
900 struct acpi_device *adev = ACPI_COMPANION(host);
901 struct cxl_cfmws_context ctx;
902
903 device_lock_set_class(&pdev->dev, &cxl_root_key);
904 rc = devm_add_action_or_reset(&pdev->dev, cxl_acpi_lock_reset_class,
905 &pdev->dev);
906 if (rc)
907 return rc;
908
909 cxl_res = devm_kzalloc(host, sizeof(*cxl_res), GFP_KERNEL);
910 if (!cxl_res)
911 return -ENOMEM;
912 cxl_res->name = "CXL mem";
913 cxl_res->start = 0;
914 cxl_res->end = -1;
915 cxl_res->flags = IORESOURCE_MEM;
916
917 cxl_root = devm_cxl_add_root(host, &acpi_root_ops);
918 if (IS_ERR(cxl_root))
919 return PTR_ERR(cxl_root);
920 root_port = &cxl_root->port;
921
922 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
923 add_host_bridge_dport);
924 if (rc < 0)
925 return rc;
926
927 rc = devm_add_action_or_reset(host, remove_cxl_resources, cxl_res);
928 if (rc)
929 return rc;
930
931 ctx = (struct cxl_cfmws_context) {
932 .dev = host,
933 .root_port = root_port,
934 .cxl_res = cxl_res,
935 };
936 rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
937 if (rc < 0)
938 return -ENXIO;
939
940 rc = add_cxl_resources(cxl_res);
941 if (rc)
942 return rc;
943
944 /*
945 * Populate the root decoders with their related iomem resource,
946 * if present
947 */
948 device_for_each_child(&root_port->dev, cxl_res, pair_cxl_resource);
949
950 /*
951 * Root level scanned with host-bridge as dports, now scan host-bridges
952 * for their role as CXL uports to their CXL-capable PCIe Root Ports.
953 */
954 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
955 add_host_bridge_uport);
956 if (rc < 0)
957 return rc;
958
959 if (IS_ENABLED(CONFIG_CXL_PMEM))
960 rc = device_for_each_child(&root_port->dev, root_port,
961 add_root_nvdimm_bridge);
962 if (rc < 0)
963 return rc;
964
965 /* In case PCI is scanned before ACPI re-trigger memdev attach */
966 cxl_bus_rescan();
967 return 0;
968 }
969
970 static const struct acpi_device_id cxl_acpi_ids[] = {
971 { "ACPI0017" },
972 { },
973 };
974 MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
975
976 static const struct platform_device_id cxl_test_ids[] = {
977 { "cxl_acpi" },
978 { },
979 };
980 MODULE_DEVICE_TABLE(platform, cxl_test_ids);
981
982 static struct platform_driver cxl_acpi_driver = {
983 .probe = cxl_acpi_probe,
984 .driver = {
985 .name = KBUILD_MODNAME,
986 .acpi_match_table = cxl_acpi_ids,
987 },
988 .id_table = cxl_test_ids,
989 };
990
cxl_acpi_init(void)991 static int __init cxl_acpi_init(void)
992 {
993 return platform_driver_register(&cxl_acpi_driver);
994 }
995
cxl_acpi_exit(void)996 static void __exit cxl_acpi_exit(void)
997 {
998 platform_driver_unregister(&cxl_acpi_driver);
999 cxl_bus_drain();
1000 }
1001
1002 /* load before dax_hmem sees 'Soft Reserved' CXL ranges */
1003 subsys_initcall(cxl_acpi_init);
1004
1005 /*
1006 * Arrange for host-bridge ports to be active synchronous with
1007 * cxl_acpi_probe() exit.
1008 */
1009 MODULE_SOFTDEP("pre: cxl_port");
1010
1011 module_exit(cxl_acpi_exit);
1012 MODULE_DESCRIPTION("CXL ACPI: Platform Support");
1013 MODULE_LICENSE("GPL v2");
1014 MODULE_IMPORT_NS("CXL");
1015 MODULE_IMPORT_NS("ACPI");
1016