1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2023 Intel Corporation. All rights reserved. */
3 #include <linux/acpi.h>
4 #include <linux/xarray.h>
5 #include <linux/fw_table.h>
6 #include <linux/node.h>
7 #include <linux/overflow.h>
8 #include "cxlpci.h"
9 #include "cxlmem.h"
10 #include "core.h"
11 #include "cxl.h"
12
13 struct dsmas_entry {
14 struct range dpa_range;
15 u8 handle;
16 struct access_coordinate coord[ACCESS_COORDINATE_MAX];
17 struct access_coordinate cdat_coord[ACCESS_COORDINATE_MAX];
18 int entries;
19 int qos_class;
20 };
21
cdat_normalize(u16 entry,u64 base,u8 type)22 static u32 cdat_normalize(u16 entry, u64 base, u8 type)
23 {
24 u32 value;
25
26 /*
27 * Check for invalid and overflow values
28 */
29 if (entry == 0xffff || !entry)
30 return 0;
31 if (base > (UINT_MAX / (entry)))
32 return 0;
33
34 /*
35 * CDAT fields follow the format of HMAT fields. See table 5 Device
36 * Scoped Latency and Bandwidth Information Structure in Coherent Device
37 * Attribute Table (CDAT) Specification v1.01.
38 */
39 value = entry * base;
40 switch (type) {
41 case ACPI_HMAT_ACCESS_LATENCY:
42 case ACPI_HMAT_READ_LATENCY:
43 case ACPI_HMAT_WRITE_LATENCY:
44 value = DIV_ROUND_UP(value, 1000);
45 break;
46 default:
47 break;
48 }
49 return value;
50 }
51
cdat_dsmas_handler(union acpi_subtable_headers * header,void * arg,const unsigned long end)52 static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
53 const unsigned long end)
54 {
55 struct acpi_cdat_header *hdr = &header->cdat;
56 struct acpi_cdat_dsmas *dsmas;
57 int size = sizeof(*hdr) + sizeof(*dsmas);
58 struct xarray *dsmas_xa = arg;
59 struct dsmas_entry *dent;
60 u16 len;
61 int rc;
62
63 len = le16_to_cpu((__force __le16)hdr->length);
64 if (len != size || (unsigned long)hdr + len > end) {
65 pr_warn("Malformed DSMAS table length: (%u:%u)\n", size, len);
66 return -EINVAL;
67 }
68
69 /* Skip common header */
70 dsmas = (struct acpi_cdat_dsmas *)(hdr + 1);
71
72 dent = kzalloc(sizeof(*dent), GFP_KERNEL);
73 if (!dent)
74 return -ENOMEM;
75
76 dent->handle = dsmas->dsmad_handle;
77 dent->dpa_range.start = le64_to_cpu((__force __le64)dsmas->dpa_base_address);
78 dent->dpa_range.end = le64_to_cpu((__force __le64)dsmas->dpa_base_address) +
79 le64_to_cpu((__force __le64)dsmas->dpa_length) - 1;
80
81 rc = xa_insert(dsmas_xa, dent->handle, dent, GFP_KERNEL);
82 if (rc) {
83 kfree(dent);
84 return rc;
85 }
86
87 return 0;
88 }
89
__cxl_access_coordinate_set(struct access_coordinate * coord,int access,unsigned int val)90 static void __cxl_access_coordinate_set(struct access_coordinate *coord,
91 int access, unsigned int val)
92 {
93 switch (access) {
94 case ACPI_HMAT_ACCESS_LATENCY:
95 coord->read_latency = val;
96 coord->write_latency = val;
97 break;
98 case ACPI_HMAT_READ_LATENCY:
99 coord->read_latency = val;
100 break;
101 case ACPI_HMAT_WRITE_LATENCY:
102 coord->write_latency = val;
103 break;
104 case ACPI_HMAT_ACCESS_BANDWIDTH:
105 coord->read_bandwidth = val;
106 coord->write_bandwidth = val;
107 break;
108 case ACPI_HMAT_READ_BANDWIDTH:
109 coord->read_bandwidth = val;
110 break;
111 case ACPI_HMAT_WRITE_BANDWIDTH:
112 coord->write_bandwidth = val;
113 break;
114 }
115 }
116
cxl_access_coordinate_set(struct access_coordinate * coord,int access,unsigned int val)117 static void cxl_access_coordinate_set(struct access_coordinate *coord,
118 int access, unsigned int val)
119 {
120 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
121 __cxl_access_coordinate_set(&coord[i], access, val);
122 }
123
cdat_dslbis_handler(union acpi_subtable_headers * header,void * arg,const unsigned long end)124 static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
125 const unsigned long end)
126 {
127 struct acpi_cdat_header *hdr = &header->cdat;
128 struct acpi_cdat_dslbis *dslbis;
129 int size = sizeof(*hdr) + sizeof(*dslbis);
130 struct xarray *dsmas_xa = arg;
131 struct dsmas_entry *dent;
132 __le64 le_base;
133 __le16 le_val;
134 u64 val;
135 u16 len;
136
137 len = le16_to_cpu((__force __le16)hdr->length);
138 if (len != size || (unsigned long)hdr + len > end) {
139 pr_warn("Malformed DSLBIS table length: (%u:%u)\n", size, len);
140 return -EINVAL;
141 }
142
143 /* Skip common header */
144 dslbis = (struct acpi_cdat_dslbis *)(hdr + 1);
145
146 /* Skip unrecognized data type */
147 if (dslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
148 return 0;
149
150 /* Not a memory type, skip */
151 if ((dslbis->flags & ACPI_HMAT_MEMORY_HIERARCHY) != ACPI_HMAT_MEMORY)
152 return 0;
153
154 dent = xa_load(dsmas_xa, dslbis->handle);
155 if (!dent) {
156 pr_warn("No matching DSMAS entry for DSLBIS entry.\n");
157 return 0;
158 }
159
160 le_base = (__force __le64)dslbis->entry_base_unit;
161 le_val = (__force __le16)dslbis->entry[0];
162 val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
163 dslbis->data_type);
164
165 cxl_access_coordinate_set(dent->cdat_coord, dslbis->data_type, val);
166
167 return 0;
168 }
169
cdat_table_parse_output(int rc)170 static int cdat_table_parse_output(int rc)
171 {
172 if (rc < 0)
173 return rc;
174 if (rc == 0)
175 return -ENOENT;
176
177 return 0;
178 }
179
cxl_cdat_endpoint_process(struct cxl_port * port,struct xarray * dsmas_xa)180 static int cxl_cdat_endpoint_process(struct cxl_port *port,
181 struct xarray *dsmas_xa)
182 {
183 int rc;
184
185 rc = cdat_table_parse(ACPI_CDAT_TYPE_DSMAS, cdat_dsmas_handler,
186 dsmas_xa, port->cdat.table, port->cdat.length);
187 rc = cdat_table_parse_output(rc);
188 if (rc)
189 return rc;
190
191 rc = cdat_table_parse(ACPI_CDAT_TYPE_DSLBIS, cdat_dslbis_handler,
192 dsmas_xa, port->cdat.table, port->cdat.length);
193 return cdat_table_parse_output(rc);
194 }
195
cxl_port_perf_data_calculate(struct cxl_port * port,struct xarray * dsmas_xa)196 static int cxl_port_perf_data_calculate(struct cxl_port *port,
197 struct xarray *dsmas_xa)
198 {
199 struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
200 struct dsmas_entry *dent;
201 int valid_entries = 0;
202 unsigned long index;
203 int rc;
204
205 rc = cxl_endpoint_get_perf_coordinates(port, ep_c);
206 if (rc) {
207 dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n");
208 return rc;
209 }
210
211 struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
212
213 if (!cxl_root)
214 return -ENODEV;
215
216 if (!cxl_root->ops || !cxl_root->ops->qos_class)
217 return -EOPNOTSUPP;
218
219 xa_for_each(dsmas_xa, index, dent) {
220 int qos_class;
221
222 cxl_coordinates_combine(dent->coord, dent->cdat_coord, ep_c);
223 dent->entries = 1;
224 rc = cxl_root->ops->qos_class(cxl_root,
225 &dent->coord[ACCESS_COORDINATE_CPU],
226 1, &qos_class);
227 if (rc != 1)
228 continue;
229
230 valid_entries++;
231 dent->qos_class = qos_class;
232 }
233
234 if (!valid_entries)
235 return -ENOENT;
236
237 return 0;
238 }
239
update_perf_entry(struct device * dev,struct dsmas_entry * dent,struct cxl_dpa_perf * dpa_perf)240 static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
241 struct cxl_dpa_perf *dpa_perf)
242 {
243 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
244 dpa_perf->coord[i] = dent->coord[i];
245 dpa_perf->cdat_coord[i] = dent->cdat_coord[i];
246 }
247 dpa_perf->dpa_range = dent->dpa_range;
248 dpa_perf->qos_class = dent->qos_class;
249 dev_dbg(dev,
250 "DSMAS: dpa: %pra qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
251 &dent->dpa_range, dpa_perf->qos_class,
252 dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth,
253 dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth,
254 dent->coord[ACCESS_COORDINATE_CPU].read_latency,
255 dent->coord[ACCESS_COORDINATE_CPU].write_latency);
256 }
257
cxl_memdev_set_qos_class(struct cxl_dev_state * cxlds,struct xarray * dsmas_xa)258 static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
259 struct xarray *dsmas_xa)
260 {
261 struct device *dev = cxlds->dev;
262 struct dsmas_entry *dent;
263 unsigned long index;
264
265 xa_for_each(dsmas_xa, index, dent) {
266 bool found = false;
267
268 for (int i = 0; i < cxlds->nr_partitions; i++) {
269 struct resource *res = &cxlds->part[i].res;
270 struct range range = {
271 .start = res->start,
272 .end = res->end,
273 };
274
275 if (range_contains(&range, &dent->dpa_range)) {
276 update_perf_entry(dev, dent,
277 &cxlds->part[i].perf);
278 found = true;
279 break;
280 }
281 }
282
283 if (!found)
284 dev_dbg(dev, "no partition for dsmas dpa: %pra\n",
285 &dent->dpa_range);
286 }
287 }
288
match_cxlrd_qos_class(struct device * dev,void * data)289 static int match_cxlrd_qos_class(struct device *dev, void *data)
290 {
291 int dev_qos_class = *(int *)data;
292 struct cxl_root_decoder *cxlrd;
293
294 if (!is_root_decoder(dev))
295 return 0;
296
297 cxlrd = to_cxl_root_decoder(dev);
298 if (cxlrd->qos_class == CXL_QOS_CLASS_INVALID)
299 return 0;
300
301 if (cxlrd->qos_class == dev_qos_class)
302 return 1;
303
304 return 0;
305 }
306
reset_dpa_perf(struct cxl_dpa_perf * dpa_perf)307 static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
308 {
309 *dpa_perf = (struct cxl_dpa_perf) {
310 .qos_class = CXL_QOS_CLASS_INVALID,
311 };
312 }
313
cxl_qos_match(struct cxl_port * root_port,struct cxl_dpa_perf * dpa_perf)314 static bool cxl_qos_match(struct cxl_port *root_port,
315 struct cxl_dpa_perf *dpa_perf)
316 {
317 if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
318 return false;
319
320 if (!device_for_each_child(&root_port->dev, &dpa_perf->qos_class,
321 match_cxlrd_qos_class))
322 return false;
323
324 return true;
325 }
326
match_cxlrd_hb(struct device * dev,void * data)327 static int match_cxlrd_hb(struct device *dev, void *data)
328 {
329 struct device *host_bridge = data;
330 struct cxl_switch_decoder *cxlsd;
331 struct cxl_root_decoder *cxlrd;
332
333 if (!is_root_decoder(dev))
334 return 0;
335
336 cxlrd = to_cxl_root_decoder(dev);
337 cxlsd = &cxlrd->cxlsd;
338
339 guard(rwsem_read)(&cxl_rwsem.region);
340 for (int i = 0; i < cxlsd->nr_targets; i++) {
341 if (cxlsd->target[i] && host_bridge == cxlsd->target[i]->dport_dev)
342 return 1;
343 }
344
345 return 0;
346 }
347
cxl_qos_class_verify(struct cxl_memdev * cxlmd)348 static void cxl_qos_class_verify(struct cxl_memdev *cxlmd)
349 {
350 struct cxl_dev_state *cxlds = cxlmd->cxlds;
351 struct cxl_port *root_port;
352
353 struct cxl_root *cxl_root __free(put_cxl_root) =
354 find_cxl_root(cxlmd->endpoint);
355
356 /*
357 * No need to reset_dpa_perf() here as find_cxl_root() is guaranteed to
358 * succeed when called in the cxl_endpoint_port_probe() path.
359 */
360 if (!cxl_root)
361 return;
362
363 root_port = &cxl_root->port;
364
365 /*
366 * Save userspace from needing to check if a qos class has any matches
367 * by hiding qos class info if the memdev is not mapped by a root
368 * decoder, or the partition class does not match any root decoder
369 * class.
370 */
371 if (!device_for_each_child(&root_port->dev,
372 cxlmd->endpoint->host_bridge,
373 match_cxlrd_hb)) {
374 for (int i = 0; i < cxlds->nr_partitions; i++) {
375 struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
376
377 reset_dpa_perf(perf);
378 }
379 return;
380 }
381
382 for (int i = 0; i < cxlds->nr_partitions; i++) {
383 struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
384
385 if (!cxl_qos_match(root_port, perf))
386 reset_dpa_perf(perf);
387 }
388 }
389
discard_dsmas(struct xarray * xa)390 static void discard_dsmas(struct xarray *xa)
391 {
392 unsigned long index;
393 void *ent;
394
395 xa_for_each(xa, index, ent) {
396 xa_erase(xa, index);
397 kfree(ent);
398 }
399 xa_destroy(xa);
400 }
DEFINE_FREE(dsmas,struct xarray *,if (_T)discard_dsmas (_T))401 DEFINE_FREE(dsmas, struct xarray *, if (_T) discard_dsmas(_T))
402
403 void cxl_endpoint_parse_cdat(struct cxl_port *port)
404 {
405 struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
406 struct cxl_dev_state *cxlds = cxlmd->cxlds;
407 struct xarray __dsmas_xa;
408 struct xarray *dsmas_xa __free(dsmas) = &__dsmas_xa;
409 int rc;
410
411 xa_init(&__dsmas_xa);
412 if (!port->cdat.table)
413 return;
414
415 rc = cxl_cdat_endpoint_process(port, dsmas_xa);
416 if (rc < 0) {
417 dev_dbg(&port->dev, "Failed to parse CDAT: %d\n", rc);
418 return;
419 }
420
421 rc = cxl_port_perf_data_calculate(port, dsmas_xa);
422 if (rc) {
423 dev_dbg(&port->dev, "Failed to do perf coord calculations.\n");
424 return;
425 }
426
427 cxl_memdev_set_qos_class(cxlds, dsmas_xa);
428 cxl_qos_class_verify(cxlmd);
429 cxl_memdev_update_perf(cxlmd);
430 }
431 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, "CXL");
432
cdat_sslbis_handler(union acpi_subtable_headers * header,void * arg,const unsigned long end)433 static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
434 const unsigned long end)
435 {
436 struct acpi_cdat_sslbis_table {
437 struct acpi_cdat_header header;
438 struct acpi_cdat_sslbis sslbis_header;
439 struct acpi_cdat_sslbe entries[];
440 } *tbl = (struct acpi_cdat_sslbis_table *)header;
441 int size = sizeof(header->cdat) + sizeof(tbl->sslbis_header);
442 struct acpi_cdat_sslbis *sslbis;
443 struct cxl_dport *dport = arg;
444 struct device *dev = &dport->port->dev;
445 int remain, entries, i;
446 u16 len;
447
448 len = le16_to_cpu((__force __le16)header->cdat.length);
449 remain = len - size;
450 if (!remain || remain % sizeof(tbl->entries[0]) ||
451 (unsigned long)header + len > end) {
452 dev_warn(dev, "Malformed SSLBIS table length: (%u)\n", len);
453 return -EINVAL;
454 }
455
456 sslbis = &tbl->sslbis_header;
457 /* Unrecognized data type, we can skip */
458 if (sslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
459 return 0;
460
461 entries = remain / sizeof(tbl->entries[0]);
462 if (struct_size(tbl, entries, entries) != len)
463 return -EINVAL;
464
465 for (i = 0; i < entries; i++) {
466 u16 x = le16_to_cpu((__force __le16)tbl->entries[i].portx_id);
467 u16 y = le16_to_cpu((__force __le16)tbl->entries[i].porty_id);
468 __le64 le_base;
469 __le16 le_val;
470 u16 dsp_id;
471 u64 val;
472
473 switch (x) {
474 case ACPI_CDAT_SSLBIS_US_PORT:
475 dsp_id = y;
476 break;
477 case ACPI_CDAT_SSLBIS_ANY_PORT:
478 switch (y) {
479 case ACPI_CDAT_SSLBIS_US_PORT:
480 dsp_id = x;
481 break;
482 case ACPI_CDAT_SSLBIS_ANY_PORT:
483 dsp_id = ACPI_CDAT_SSLBIS_ANY_PORT;
484 break;
485 default:
486 dsp_id = y;
487 break;
488 }
489 break;
490 default:
491 dsp_id = x;
492 break;
493 }
494
495 le_base = (__force __le64)tbl->sslbis_header.entry_base_unit;
496 le_val = (__force __le16)tbl->entries[i].latency_or_bandwidth;
497 val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
498 sslbis->data_type);
499
500 if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
501 dsp_id == dport->port_id) {
502 cxl_access_coordinate_set(dport->coord,
503 sslbis->data_type, val);
504 return 0;
505 }
506 }
507
508 return 0;
509 }
510
cxl_switch_parse_cdat(struct cxl_dport * dport)511 void cxl_switch_parse_cdat(struct cxl_dport *dport)
512 {
513 struct cxl_port *port = dport->port;
514 int rc;
515
516 if (!port->cdat.table)
517 return;
518
519 rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
520 dport, port->cdat.table, port->cdat.length);
521 rc = cdat_table_parse_output(rc);
522 if (rc)
523 dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
524 }
525 EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, "CXL");
526
__cxl_coordinates_combine(struct access_coordinate * out,struct access_coordinate * c1,struct access_coordinate * c2)527 static void __cxl_coordinates_combine(struct access_coordinate *out,
528 struct access_coordinate *c1,
529 struct access_coordinate *c2)
530 {
531 if (c1->write_bandwidth && c2->write_bandwidth)
532 out->write_bandwidth = min(c1->write_bandwidth,
533 c2->write_bandwidth);
534 out->write_latency = c1->write_latency + c2->write_latency;
535
536 if (c1->read_bandwidth && c2->read_bandwidth)
537 out->read_bandwidth = min(c1->read_bandwidth,
538 c2->read_bandwidth);
539 out->read_latency = c1->read_latency + c2->read_latency;
540 }
541
542 /**
543 * cxl_coordinates_combine - Combine the two input coordinates
544 *
545 * @out: Output coordinate of c1 and c2 combined
546 * @c1: input coordinates
547 * @c2: input coordinates
548 */
cxl_coordinates_combine(struct access_coordinate * out,struct access_coordinate * c1,struct access_coordinate * c2)549 void cxl_coordinates_combine(struct access_coordinate *out,
550 struct access_coordinate *c1,
551 struct access_coordinate *c2)
552 {
553 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
554 __cxl_coordinates_combine(&out[i], &c1[i], &c2[i]);
555 }
556
557 MODULE_IMPORT_NS("CXL");
558
cxl_bandwidth_add(struct access_coordinate * coord,struct access_coordinate * c1,struct access_coordinate * c2)559 static void cxl_bandwidth_add(struct access_coordinate *coord,
560 struct access_coordinate *c1,
561 struct access_coordinate *c2)
562 {
563 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
564 coord[i].read_bandwidth = c1[i].read_bandwidth +
565 c2[i].read_bandwidth;
566 coord[i].write_bandwidth = c1[i].write_bandwidth +
567 c2[i].write_bandwidth;
568 }
569 }
570
dpa_perf_contains(struct cxl_dpa_perf * perf,struct resource * dpa_res)571 static bool dpa_perf_contains(struct cxl_dpa_perf *perf,
572 struct resource *dpa_res)
573 {
574 struct range dpa = {
575 .start = dpa_res->start,
576 .end = dpa_res->end,
577 };
578
579 return range_contains(&perf->dpa_range, &dpa);
580 }
581
cxled_get_dpa_perf(struct cxl_endpoint_decoder * cxled)582 static struct cxl_dpa_perf *cxled_get_dpa_perf(struct cxl_endpoint_decoder *cxled)
583 {
584 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
585 struct cxl_dev_state *cxlds = cxlmd->cxlds;
586 struct cxl_dpa_perf *perf;
587
588 if (cxled->part < 0)
589 return ERR_PTR(-EINVAL);
590 perf = &cxlds->part[cxled->part].perf;
591
592 if (!perf)
593 return ERR_PTR(-EINVAL);
594
595 if (!dpa_perf_contains(perf, cxled->dpa_res))
596 return ERR_PTR(-EINVAL);
597
598 return perf;
599 }
600
601 /*
602 * Transient context for containing the current calculation of bandwidth when
603 * doing walking the port hierarchy to deal with shared upstream link.
604 */
605 struct cxl_perf_ctx {
606 struct access_coordinate coord[ACCESS_COORDINATE_MAX];
607 struct cxl_port *port;
608 };
609
610 /**
611 * cxl_endpoint_gather_bandwidth - collect all the endpoint bandwidth in an xarray
612 * @cxlr: CXL region for the bandwidth calculation
613 * @cxled: endpoint decoder to start on
614 * @usp_xa: (output) the xarray that collects all the bandwidth coordinates
615 * indexed by the upstream device with data of 'struct cxl_perf_ctx'.
616 * @gp_is_root: (output) bool of whether the grandparent is cxl root.
617 *
618 * Return: 0 for success or -errno
619 *
620 * Collects aggregated endpoint bandwidth and store the bandwidth in
621 * an xarray indexed by the upstream device of the switch or the RP
622 * device. Each endpoint consists the minimum of the bandwidth from DSLBIS
623 * from the endpoint CDAT, the endpoint upstream link bandwidth, and the
624 * bandwidth from the SSLBIS of the switch CDAT for the switch upstream port to
625 * the downstream port that's associated with the endpoint. If the
626 * device is directly connected to a RP, then no SSLBIS is involved.
627 */
cxl_endpoint_gather_bandwidth(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,struct xarray * usp_xa,bool * gp_is_root)628 static int cxl_endpoint_gather_bandwidth(struct cxl_region *cxlr,
629 struct cxl_endpoint_decoder *cxled,
630 struct xarray *usp_xa,
631 bool *gp_is_root)
632 {
633 struct cxl_port *endpoint = to_cxl_port(cxled->cxld.dev.parent);
634 struct cxl_port *parent_port = to_cxl_port(endpoint->dev.parent);
635 struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent);
636 struct access_coordinate pci_coord[ACCESS_COORDINATE_MAX];
637 struct access_coordinate sw_coord[ACCESS_COORDINATE_MAX];
638 struct access_coordinate ep_coord[ACCESS_COORDINATE_MAX];
639 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
640 struct cxl_dev_state *cxlds = cxlmd->cxlds;
641 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
642 struct cxl_perf_ctx *perf_ctx;
643 struct cxl_dpa_perf *perf;
644 unsigned long index;
645 void *ptr;
646 int rc;
647
648 if (!dev_is_pci(cxlds->dev))
649 return -ENODEV;
650
651 if (cxlds->rcd)
652 return -ENODEV;
653
654 perf = cxled_get_dpa_perf(cxled);
655 if (IS_ERR(perf))
656 return PTR_ERR(perf);
657
658 *gp_is_root = is_cxl_root(gp_port);
659
660 /*
661 * If the grandparent is cxl root, then index is the root port,
662 * otherwise it's the parent switch upstream device.
663 */
664 if (*gp_is_root)
665 index = (unsigned long)endpoint->parent_dport->dport_dev;
666 else
667 index = (unsigned long)parent_port->uport_dev;
668
669 perf_ctx = xa_load(usp_xa, index);
670 if (!perf_ctx) {
671 struct cxl_perf_ctx *c __free(kfree) =
672 kzalloc(sizeof(*perf_ctx), GFP_KERNEL);
673
674 if (!c)
675 return -ENOMEM;
676 ptr = xa_store(usp_xa, index, c, GFP_KERNEL);
677 if (xa_is_err(ptr))
678 return xa_err(ptr);
679 perf_ctx = no_free_ptr(c);
680 perf_ctx->port = parent_port;
681 }
682
683 /* Direct upstream link from EP bandwidth */
684 rc = cxl_pci_get_bandwidth(pdev, pci_coord);
685 if (rc < 0)
686 return rc;
687
688 /*
689 * Min of upstream link bandwidth and Endpoint CDAT bandwidth from
690 * DSLBIS.
691 */
692 cxl_coordinates_combine(ep_coord, pci_coord, perf->cdat_coord);
693
694 /*
695 * If grandparent port is root, then there's no switch involved and
696 * the endpoint is connected to a root port.
697 */
698 if (!*gp_is_root) {
699 /*
700 * Retrieve the switch SSLBIS for switch downstream port
701 * associated with the endpoint bandwidth.
702 */
703 rc = cxl_port_get_switch_dport_bandwidth(endpoint, sw_coord);
704 if (rc)
705 return rc;
706
707 /*
708 * Min of the earlier coordinates with the switch SSLBIS
709 * bandwidth
710 */
711 cxl_coordinates_combine(ep_coord, ep_coord, sw_coord);
712 }
713
714 /*
715 * Aggregate the computed bandwidth with the current aggregated bandwidth
716 * of the endpoints with the same switch upstream device or RP.
717 */
718 cxl_bandwidth_add(perf_ctx->coord, perf_ctx->coord, ep_coord);
719
720 return 0;
721 }
722
free_perf_xa(struct xarray * xa)723 static void free_perf_xa(struct xarray *xa)
724 {
725 struct cxl_perf_ctx *ctx;
726 unsigned long index;
727
728 if (!xa)
729 return;
730
731 xa_for_each(xa, index, ctx)
732 kfree(ctx);
733 xa_destroy(xa);
734 kfree(xa);
735 }
DEFINE_FREE(free_perf_xa,struct xarray *,if (_T)free_perf_xa (_T))736 DEFINE_FREE(free_perf_xa, struct xarray *, if (_T) free_perf_xa(_T))
737
738 /**
739 * cxl_switch_gather_bandwidth - collect all the bandwidth at switch level in an xarray
740 * @cxlr: The region being operated on
741 * @input_xa: xarray indexed by upstream device of a switch with data of 'struct
742 * cxl_perf_ctx'
743 * @gp_is_root: (output) bool of whether the grandparent is cxl root.
744 *
745 * Return: a xarray of resulting cxl_perf_ctx per parent switch or root port
746 * or ERR_PTR(-errno)
747 *
748 * Iterate through the xarray. Take the minimum of the downstream calculated
749 * bandwidth, the upstream link bandwidth, and the SSLBIS of the upstream
750 * switch if exists. Sum the resulting bandwidth under the switch upstream
751 * device or a RP device. The function can be iterated over multiple switches
752 * if the switches are present.
753 */
754 static struct xarray *cxl_switch_gather_bandwidth(struct cxl_region *cxlr,
755 struct xarray *input_xa,
756 bool *gp_is_root)
757 {
758 struct xarray *res_xa __free(free_perf_xa) =
759 kzalloc(sizeof(*res_xa), GFP_KERNEL);
760 struct access_coordinate coords[ACCESS_COORDINATE_MAX];
761 struct cxl_perf_ctx *ctx, *us_ctx;
762 unsigned long index, us_index;
763 int dev_count = 0;
764 int gp_count = 0;
765 void *ptr;
766 int rc;
767
768 if (!res_xa)
769 return ERR_PTR(-ENOMEM);
770 xa_init(res_xa);
771
772 xa_for_each(input_xa, index, ctx) {
773 struct device *dev = (struct device *)index;
774 struct cxl_port *port = ctx->port;
775 struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
776 struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent);
777 struct cxl_dport *dport = port->parent_dport;
778 bool is_root = false;
779
780 dev_count++;
781 if (is_cxl_root(gp_port)) {
782 is_root = true;
783 gp_count++;
784 }
785
786 /*
787 * If the grandparent is cxl root, then index is the root port,
788 * otherwise it's the parent switch upstream device.
789 */
790 if (is_root)
791 us_index = (unsigned long)port->parent_dport->dport_dev;
792 else
793 us_index = (unsigned long)parent_port->uport_dev;
794
795 us_ctx = xa_load(res_xa, us_index);
796 if (!us_ctx) {
797 struct cxl_perf_ctx *n __free(kfree) =
798 kzalloc(sizeof(*n), GFP_KERNEL);
799
800 if (!n)
801 return ERR_PTR(-ENOMEM);
802
803 ptr = xa_store(res_xa, us_index, n, GFP_KERNEL);
804 if (xa_is_err(ptr))
805 return ERR_PTR(xa_err(ptr));
806 us_ctx = no_free_ptr(n);
807 us_ctx->port = parent_port;
808 }
809
810 /*
811 * If the device isn't an upstream PCIe port, there's something
812 * wrong with the topology.
813 */
814 if (!dev_is_pci(dev))
815 return ERR_PTR(-EINVAL);
816
817 /* Retrieve the upstream link bandwidth */
818 rc = cxl_pci_get_bandwidth(to_pci_dev(dev), coords);
819 if (rc)
820 return ERR_PTR(-ENXIO);
821
822 /*
823 * Take the min of downstream bandwidth and the upstream link
824 * bandwidth.
825 */
826 cxl_coordinates_combine(coords, coords, ctx->coord);
827
828 /*
829 * Take the min of the calculated bandwidth and the upstream
830 * switch SSLBIS bandwidth if there's a parent switch
831 */
832 if (!is_root)
833 cxl_coordinates_combine(coords, coords, dport->coord);
834
835 /*
836 * Aggregate the calculated bandwidth common to an upstream
837 * switch.
838 */
839 cxl_bandwidth_add(us_ctx->coord, us_ctx->coord, coords);
840 }
841
842 /* Asymmetric topology detected. */
843 if (gp_count) {
844 if (gp_count != dev_count) {
845 dev_dbg(&cxlr->dev,
846 "Asymmetric hierarchy detected, bandwidth not updated\n");
847 return ERR_PTR(-EOPNOTSUPP);
848 }
849 *gp_is_root = true;
850 }
851
852 return no_free_ptr(res_xa);
853 }
854
855 /**
856 * cxl_rp_gather_bandwidth - handle the root port level bandwidth collection
857 * @xa: the xarray that holds the cxl_perf_ctx that has the bandwidth calculated
858 * below each root port device.
859 *
860 * Return: xarray that holds cxl_perf_ctx per host bridge or ERR_PTR(-errno)
861 */
cxl_rp_gather_bandwidth(struct xarray * xa)862 static struct xarray *cxl_rp_gather_bandwidth(struct xarray *xa)
863 {
864 struct xarray *hb_xa __free(free_perf_xa) =
865 kzalloc(sizeof(*hb_xa), GFP_KERNEL);
866 struct cxl_perf_ctx *ctx;
867 unsigned long index;
868
869 if (!hb_xa)
870 return ERR_PTR(-ENOMEM);
871 xa_init(hb_xa);
872
873 xa_for_each(xa, index, ctx) {
874 struct cxl_port *port = ctx->port;
875 unsigned long hb_index = (unsigned long)port->uport_dev;
876 struct cxl_perf_ctx *hb_ctx;
877 void *ptr;
878
879 hb_ctx = xa_load(hb_xa, hb_index);
880 if (!hb_ctx) {
881 struct cxl_perf_ctx *n __free(kfree) =
882 kzalloc(sizeof(*n), GFP_KERNEL);
883
884 if (!n)
885 return ERR_PTR(-ENOMEM);
886 ptr = xa_store(hb_xa, hb_index, n, GFP_KERNEL);
887 if (xa_is_err(ptr))
888 return ERR_PTR(xa_err(ptr));
889 hb_ctx = no_free_ptr(n);
890 hb_ctx->port = port;
891 }
892
893 cxl_bandwidth_add(hb_ctx->coord, hb_ctx->coord, ctx->coord);
894 }
895
896 return no_free_ptr(hb_xa);
897 }
898
899 /**
900 * cxl_hb_gather_bandwidth - handle the host bridge level bandwidth collection
901 * @xa: the xarray that holds the cxl_perf_ctx that has the bandwidth calculated
902 * below each host bridge.
903 *
904 * Return: xarray that holds cxl_perf_ctx per ACPI0017 device or ERR_PTR(-errno)
905 */
cxl_hb_gather_bandwidth(struct xarray * xa)906 static struct xarray *cxl_hb_gather_bandwidth(struct xarray *xa)
907 {
908 struct xarray *mw_xa __free(free_perf_xa) =
909 kzalloc(sizeof(*mw_xa), GFP_KERNEL);
910 struct cxl_perf_ctx *ctx;
911 unsigned long index;
912
913 if (!mw_xa)
914 return ERR_PTR(-ENOMEM);
915 xa_init(mw_xa);
916
917 xa_for_each(xa, index, ctx) {
918 struct cxl_port *port = ctx->port;
919 struct cxl_port *parent_port;
920 struct cxl_perf_ctx *mw_ctx;
921 struct cxl_dport *dport;
922 unsigned long mw_index;
923 void *ptr;
924
925 parent_port = to_cxl_port(port->dev.parent);
926 mw_index = (unsigned long)parent_port->uport_dev;
927
928 mw_ctx = xa_load(mw_xa, mw_index);
929 if (!mw_ctx) {
930 struct cxl_perf_ctx *n __free(kfree) =
931 kzalloc(sizeof(*n), GFP_KERNEL);
932
933 if (!n)
934 return ERR_PTR(-ENOMEM);
935 ptr = xa_store(mw_xa, mw_index, n, GFP_KERNEL);
936 if (xa_is_err(ptr))
937 return ERR_PTR(xa_err(ptr));
938 mw_ctx = no_free_ptr(n);
939 }
940
941 dport = port->parent_dport;
942 cxl_coordinates_combine(ctx->coord, ctx->coord, dport->coord);
943 cxl_bandwidth_add(mw_ctx->coord, mw_ctx->coord, ctx->coord);
944 }
945
946 return no_free_ptr(mw_xa);
947 }
948
949 /**
950 * cxl_region_update_bandwidth - Update the bandwidth access coordinates of a region
951 * @cxlr: The region being operated on
952 * @input_xa: xarray holds cxl_perf_ctx with calculated bandwidth per ACPI0017 instance
953 */
cxl_region_update_bandwidth(struct cxl_region * cxlr,struct xarray * input_xa)954 static void cxl_region_update_bandwidth(struct cxl_region *cxlr,
955 struct xarray *input_xa)
956 {
957 struct access_coordinate coord[ACCESS_COORDINATE_MAX];
958 struct cxl_perf_ctx *ctx;
959 unsigned long index;
960
961 memset(coord, 0, sizeof(coord));
962 xa_for_each(input_xa, index, ctx)
963 cxl_bandwidth_add(coord, coord, ctx->coord);
964
965 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
966 cxlr->coord[i].read_bandwidth = coord[i].read_bandwidth;
967 cxlr->coord[i].write_bandwidth = coord[i].write_bandwidth;
968 }
969 }
970
971 /**
972 * cxl_region_shared_upstream_bandwidth_update - Recalculate the bandwidth for
973 * the region
974 * @cxlr: the cxl region to recalculate
975 *
976 * The function walks the topology from bottom up and calculates the bandwidth. It
977 * starts at the endpoints, processes at the switches if any, processes at the rootport
978 * level, at the host bridge level, and finally aggregates at the region.
979 */
cxl_region_shared_upstream_bandwidth_update(struct cxl_region * cxlr)980 void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr)
981 {
982 struct xarray *working_xa;
983 int root_count = 0;
984 bool is_root;
985 int rc;
986
987 lockdep_assert_held(&cxl_rwsem.dpa);
988
989 struct xarray *usp_xa __free(free_perf_xa) =
990 kzalloc(sizeof(*usp_xa), GFP_KERNEL);
991
992 if (!usp_xa)
993 return;
994
995 xa_init(usp_xa);
996
997 /* Collect bandwidth data from all the endpoints. */
998 for (int i = 0; i < cxlr->params.nr_targets; i++) {
999 struct cxl_endpoint_decoder *cxled = cxlr->params.targets[i];
1000
1001 is_root = false;
1002 rc = cxl_endpoint_gather_bandwidth(cxlr, cxled, usp_xa, &is_root);
1003 if (rc)
1004 return;
1005 root_count += is_root;
1006 }
1007
1008 /* Detect asymmetric hierarchy with some direct attached endpoints. */
1009 if (root_count && root_count != cxlr->params.nr_targets) {
1010 dev_dbg(&cxlr->dev,
1011 "Asymmetric hierarchy detected, bandwidth not updated\n");
1012 return;
1013 }
1014
1015 /*
1016 * Walk up one or more switches to deal with the bandwidth of the
1017 * switches if they exist. Endpoints directly attached to RPs skip
1018 * over this part.
1019 */
1020 if (!root_count) {
1021 do {
1022 working_xa = cxl_switch_gather_bandwidth(cxlr, usp_xa,
1023 &is_root);
1024 if (IS_ERR(working_xa))
1025 return;
1026 free_perf_xa(usp_xa);
1027 usp_xa = working_xa;
1028 } while (!is_root);
1029 }
1030
1031 /* Handle the bandwidth at the root port of the hierarchy */
1032 working_xa = cxl_rp_gather_bandwidth(usp_xa);
1033 if (IS_ERR(working_xa))
1034 return;
1035 free_perf_xa(usp_xa);
1036 usp_xa = working_xa;
1037
1038 /* Handle the bandwidth at the host bridge of the hierarchy */
1039 working_xa = cxl_hb_gather_bandwidth(usp_xa);
1040 if (IS_ERR(working_xa))
1041 return;
1042 free_perf_xa(usp_xa);
1043 usp_xa = working_xa;
1044
1045 /*
1046 * Aggregate all the bandwidth collected per CFMWS (ACPI0017) and
1047 * update the region bandwidth with the final calculated values.
1048 */
1049 cxl_region_update_bandwidth(cxlr, usp_xa);
1050 }
1051
cxl_region_perf_data_calculate(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled)1052 void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
1053 struct cxl_endpoint_decoder *cxled)
1054 {
1055 struct cxl_dpa_perf *perf;
1056
1057 lockdep_assert_held(&cxl_rwsem.dpa);
1058
1059 perf = cxled_get_dpa_perf(cxled);
1060 if (IS_ERR(perf))
1061 return;
1062
1063 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
1064 /* Get total bandwidth and the worst latency for the cxl region */
1065 cxlr->coord[i].read_latency = max_t(unsigned int,
1066 cxlr->coord[i].read_latency,
1067 perf->coord[i].read_latency);
1068 cxlr->coord[i].write_latency = max_t(unsigned int,
1069 cxlr->coord[i].write_latency,
1070 perf->coord[i].write_latency);
1071 cxlr->coord[i].read_bandwidth += perf->coord[i].read_bandwidth;
1072 cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth;
1073 }
1074 }
1075