xref: /linux/drivers/cxl/core/cdat.c (revision b1966a1fd218e1f5d5376bf352f9a4c26aba50b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2023 Intel Corporation. All rights reserved. */
3 #include <linux/acpi.h>
4 #include <linux/xarray.h>
5 #include <linux/fw_table.h>
6 #include <linux/node.h>
7 #include <linux/overflow.h>
8 #include "cxlpci.h"
9 #include "cxlmem.h"
10 #include "core.h"
11 #include "cxl.h"
12 
13 struct dsmas_entry {
14 	struct range dpa_range;
15 	u8 handle;
16 	struct access_coordinate coord[ACCESS_COORDINATE_MAX];
17 	struct access_coordinate cdat_coord[ACCESS_COORDINATE_MAX];
18 	int entries;
19 	int qos_class;
20 };
21 
cdat_normalize(u16 entry,u64 base,u8 type)22 static u32 cdat_normalize(u16 entry, u64 base, u8 type)
23 {
24 	u32 value;
25 
26 	/*
27 	 * Check for invalid and overflow values
28 	 */
29 	if (entry == 0xffff || !entry)
30 		return 0;
31 	else if (base > (UINT_MAX / (entry)))
32 		return 0;
33 
34 	/*
35 	 * CDAT fields follow the format of HMAT fields. See table 5 Device
36 	 * Scoped Latency and Bandwidth Information Structure in Coherent Device
37 	 * Attribute Table (CDAT) Specification v1.01.
38 	 */
39 	value = entry * base;
40 	switch (type) {
41 	case ACPI_HMAT_ACCESS_LATENCY:
42 	case ACPI_HMAT_READ_LATENCY:
43 	case ACPI_HMAT_WRITE_LATENCY:
44 		value = DIV_ROUND_UP(value, 1000);
45 		break;
46 	default:
47 		break;
48 	}
49 	return value;
50 }
51 
cdat_dsmas_handler(union acpi_subtable_headers * header,void * arg,const unsigned long end)52 static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
53 			      const unsigned long end)
54 {
55 	struct acpi_cdat_header *hdr = &header->cdat;
56 	struct acpi_cdat_dsmas *dsmas;
57 	int size = sizeof(*hdr) + sizeof(*dsmas);
58 	struct xarray *dsmas_xa = arg;
59 	struct dsmas_entry *dent;
60 	u16 len;
61 	int rc;
62 
63 	len = le16_to_cpu((__force __le16)hdr->length);
64 	if (len != size || (unsigned long)hdr + len > end) {
65 		pr_warn("Malformed DSMAS table length: (%u:%u)\n", size, len);
66 		return -EINVAL;
67 	}
68 
69 	/* Skip common header */
70 	dsmas = (struct acpi_cdat_dsmas *)(hdr + 1);
71 
72 	dent = kzalloc(sizeof(*dent), GFP_KERNEL);
73 	if (!dent)
74 		return -ENOMEM;
75 
76 	dent->handle = dsmas->dsmad_handle;
77 	dent->dpa_range.start = le64_to_cpu((__force __le64)dsmas->dpa_base_address);
78 	dent->dpa_range.end = le64_to_cpu((__force __le64)dsmas->dpa_base_address) +
79 			      le64_to_cpu((__force __le64)dsmas->dpa_length) - 1;
80 
81 	rc = xa_insert(dsmas_xa, dent->handle, dent, GFP_KERNEL);
82 	if (rc) {
83 		kfree(dent);
84 		return rc;
85 	}
86 
87 	return 0;
88 }
89 
__cxl_access_coordinate_set(struct access_coordinate * coord,int access,unsigned int val)90 static void __cxl_access_coordinate_set(struct access_coordinate *coord,
91 					int access, unsigned int val)
92 {
93 	switch (access) {
94 	case ACPI_HMAT_ACCESS_LATENCY:
95 		coord->read_latency = val;
96 		coord->write_latency = val;
97 		break;
98 	case ACPI_HMAT_READ_LATENCY:
99 		coord->read_latency = val;
100 		break;
101 	case ACPI_HMAT_WRITE_LATENCY:
102 		coord->write_latency = val;
103 		break;
104 	case ACPI_HMAT_ACCESS_BANDWIDTH:
105 		coord->read_bandwidth = val;
106 		coord->write_bandwidth = val;
107 		break;
108 	case ACPI_HMAT_READ_BANDWIDTH:
109 		coord->read_bandwidth = val;
110 		break;
111 	case ACPI_HMAT_WRITE_BANDWIDTH:
112 		coord->write_bandwidth = val;
113 		break;
114 	}
115 }
116 
cxl_access_coordinate_set(struct access_coordinate * coord,int access,unsigned int val)117 static void cxl_access_coordinate_set(struct access_coordinate *coord,
118 				      int access, unsigned int val)
119 {
120 	for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
121 		__cxl_access_coordinate_set(&coord[i], access, val);
122 }
123 
cdat_dslbis_handler(union acpi_subtable_headers * header,void * arg,const unsigned long end)124 static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
125 			       const unsigned long end)
126 {
127 	struct acpi_cdat_header *hdr = &header->cdat;
128 	struct acpi_cdat_dslbis *dslbis;
129 	int size = sizeof(*hdr) + sizeof(*dslbis);
130 	struct xarray *dsmas_xa = arg;
131 	struct dsmas_entry *dent;
132 	__le64 le_base;
133 	__le16 le_val;
134 	u64 val;
135 	u16 len;
136 
137 	len = le16_to_cpu((__force __le16)hdr->length);
138 	if (len != size || (unsigned long)hdr + len > end) {
139 		pr_warn("Malformed DSLBIS table length: (%u:%u)\n", size, len);
140 		return -EINVAL;
141 	}
142 
143 	/* Skip common header */
144 	dslbis = (struct acpi_cdat_dslbis *)(hdr + 1);
145 
146 	/* Skip unrecognized data type */
147 	if (dslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
148 		return 0;
149 
150 	/* Not a memory type, skip */
151 	if ((dslbis->flags & ACPI_HMAT_MEMORY_HIERARCHY) != ACPI_HMAT_MEMORY)
152 		return 0;
153 
154 	dent = xa_load(dsmas_xa, dslbis->handle);
155 	if (!dent) {
156 		pr_warn("No matching DSMAS entry for DSLBIS entry.\n");
157 		return 0;
158 	}
159 
160 	le_base = (__force __le64)dslbis->entry_base_unit;
161 	le_val = (__force __le16)dslbis->entry[0];
162 	val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
163 			     dslbis->data_type);
164 
165 	cxl_access_coordinate_set(dent->cdat_coord, dslbis->data_type, val);
166 
167 	return 0;
168 }
169 
cdat_table_parse_output(int rc)170 static int cdat_table_parse_output(int rc)
171 {
172 	if (rc < 0)
173 		return rc;
174 	if (rc == 0)
175 		return -ENOENT;
176 
177 	return 0;
178 }
179 
cxl_cdat_endpoint_process(struct cxl_port * port,struct xarray * dsmas_xa)180 static int cxl_cdat_endpoint_process(struct cxl_port *port,
181 				     struct xarray *dsmas_xa)
182 {
183 	int rc;
184 
185 	rc = cdat_table_parse(ACPI_CDAT_TYPE_DSMAS, cdat_dsmas_handler,
186 			      dsmas_xa, port->cdat.table, port->cdat.length);
187 	rc = cdat_table_parse_output(rc);
188 	if (rc)
189 		return rc;
190 
191 	rc = cdat_table_parse(ACPI_CDAT_TYPE_DSLBIS, cdat_dslbis_handler,
192 			      dsmas_xa, port->cdat.table, port->cdat.length);
193 	return cdat_table_parse_output(rc);
194 }
195 
cxl_port_perf_data_calculate(struct cxl_port * port,struct xarray * dsmas_xa)196 static int cxl_port_perf_data_calculate(struct cxl_port *port,
197 					struct xarray *dsmas_xa)
198 {
199 	struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
200 	struct dsmas_entry *dent;
201 	int valid_entries = 0;
202 	unsigned long index;
203 	int rc;
204 
205 	rc = cxl_endpoint_get_perf_coordinates(port, ep_c);
206 	if (rc) {
207 		dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n");
208 		return rc;
209 	}
210 
211 	struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
212 
213 	if (!cxl_root)
214 		return -ENODEV;
215 
216 	if (!cxl_root->ops || !cxl_root->ops->qos_class)
217 		return -EOPNOTSUPP;
218 
219 	xa_for_each(dsmas_xa, index, dent) {
220 		int qos_class;
221 
222 		cxl_coordinates_combine(dent->coord, dent->cdat_coord, ep_c);
223 		dent->entries = 1;
224 		rc = cxl_root->ops->qos_class(cxl_root,
225 					      &dent->coord[ACCESS_COORDINATE_CPU],
226 					      1, &qos_class);
227 		if (rc != 1)
228 			continue;
229 
230 		valid_entries++;
231 		dent->qos_class = qos_class;
232 	}
233 
234 	if (!valid_entries)
235 		return -ENOENT;
236 
237 	return 0;
238 }
239 
update_perf_entry(struct device * dev,struct dsmas_entry * dent,struct cxl_dpa_perf * dpa_perf)240 static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
241 			      struct cxl_dpa_perf *dpa_perf)
242 {
243 	for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
244 		dpa_perf->coord[i] = dent->coord[i];
245 		dpa_perf->cdat_coord[i] = dent->cdat_coord[i];
246 	}
247 	dpa_perf->dpa_range = dent->dpa_range;
248 	dpa_perf->qos_class = dent->qos_class;
249 	dev_dbg(dev,
250 		"DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
251 		dent->dpa_range.start, dpa_perf->qos_class,
252 		dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth,
253 		dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth,
254 		dent->coord[ACCESS_COORDINATE_CPU].read_latency,
255 		dent->coord[ACCESS_COORDINATE_CPU].write_latency);
256 }
257 
cxl_memdev_set_qos_class(struct cxl_dev_state * cxlds,struct xarray * dsmas_xa)258 static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
259 				     struct xarray *dsmas_xa)
260 {
261 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
262 	struct device *dev = cxlds->dev;
263 	struct range pmem_range = {
264 		.start = cxlds->pmem_res.start,
265 		.end = cxlds->pmem_res.end,
266 	};
267 	struct range ram_range = {
268 		.start = cxlds->ram_res.start,
269 		.end = cxlds->ram_res.end,
270 	};
271 	struct dsmas_entry *dent;
272 	unsigned long index;
273 
274 	xa_for_each(dsmas_xa, index, dent) {
275 		if (resource_size(&cxlds->ram_res) &&
276 		    range_contains(&ram_range, &dent->dpa_range))
277 			update_perf_entry(dev, dent, &mds->ram_perf);
278 		else if (resource_size(&cxlds->pmem_res) &&
279 			 range_contains(&pmem_range, &dent->dpa_range))
280 			update_perf_entry(dev, dent, &mds->pmem_perf);
281 		else
282 			dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
283 				dent->dpa_range.start);
284 	}
285 }
286 
match_cxlrd_qos_class(struct device * dev,void * data)287 static int match_cxlrd_qos_class(struct device *dev, void *data)
288 {
289 	int dev_qos_class = *(int *)data;
290 	struct cxl_root_decoder *cxlrd;
291 
292 	if (!is_root_decoder(dev))
293 		return 0;
294 
295 	cxlrd = to_cxl_root_decoder(dev);
296 	if (cxlrd->qos_class == CXL_QOS_CLASS_INVALID)
297 		return 0;
298 
299 	if (cxlrd->qos_class == dev_qos_class)
300 		return 1;
301 
302 	return 0;
303 }
304 
reset_dpa_perf(struct cxl_dpa_perf * dpa_perf)305 static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
306 {
307 	*dpa_perf = (struct cxl_dpa_perf) {
308 		.qos_class = CXL_QOS_CLASS_INVALID,
309 	};
310 }
311 
cxl_qos_match(struct cxl_port * root_port,struct cxl_dpa_perf * dpa_perf)312 static bool cxl_qos_match(struct cxl_port *root_port,
313 			  struct cxl_dpa_perf *dpa_perf)
314 {
315 	if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
316 		return false;
317 
318 	if (!device_for_each_child(&root_port->dev, &dpa_perf->qos_class,
319 				   match_cxlrd_qos_class))
320 		return false;
321 
322 	return true;
323 }
324 
match_cxlrd_hb(struct device * dev,void * data)325 static int match_cxlrd_hb(struct device *dev, void *data)
326 {
327 	struct device *host_bridge = data;
328 	struct cxl_switch_decoder *cxlsd;
329 	struct cxl_root_decoder *cxlrd;
330 
331 	if (!is_root_decoder(dev))
332 		return 0;
333 
334 	cxlrd = to_cxl_root_decoder(dev);
335 	cxlsd = &cxlrd->cxlsd;
336 
337 	guard(rwsem_read)(&cxl_region_rwsem);
338 	for (int i = 0; i < cxlsd->nr_targets; i++) {
339 		if (host_bridge == cxlsd->target[i]->dport_dev)
340 			return 1;
341 	}
342 
343 	return 0;
344 }
345 
cxl_qos_class_verify(struct cxl_memdev * cxlmd)346 static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
347 {
348 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
349 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
350 	struct cxl_port *root_port;
351 	int rc;
352 
353 	struct cxl_root *cxl_root __free(put_cxl_root) =
354 		find_cxl_root(cxlmd->endpoint);
355 
356 	if (!cxl_root)
357 		return -ENODEV;
358 
359 	root_port = &cxl_root->port;
360 
361 	/* Check that the QTG IDs are all sane between end device and root decoders */
362 	if (!cxl_qos_match(root_port, &mds->ram_perf))
363 		reset_dpa_perf(&mds->ram_perf);
364 	if (!cxl_qos_match(root_port, &mds->pmem_perf))
365 		reset_dpa_perf(&mds->pmem_perf);
366 
367 	/* Check to make sure that the device's host bridge is under a root decoder */
368 	rc = device_for_each_child(&root_port->dev,
369 				   cxlmd->endpoint->host_bridge, match_cxlrd_hb);
370 	if (!rc) {
371 		reset_dpa_perf(&mds->ram_perf);
372 		reset_dpa_perf(&mds->pmem_perf);
373 	}
374 
375 	return rc;
376 }
377 
discard_dsmas(struct xarray * xa)378 static void discard_dsmas(struct xarray *xa)
379 {
380 	unsigned long index;
381 	void *ent;
382 
383 	xa_for_each(xa, index, ent) {
384 		xa_erase(xa, index);
385 		kfree(ent);
386 	}
387 	xa_destroy(xa);
388 }
DEFINE_FREE(dsmas,struct xarray *,if (_T)discard_dsmas (_T))389 DEFINE_FREE(dsmas, struct xarray *, if (_T) discard_dsmas(_T))
390 
391 void cxl_endpoint_parse_cdat(struct cxl_port *port)
392 {
393 	struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
394 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
395 	struct xarray __dsmas_xa;
396 	struct xarray *dsmas_xa __free(dsmas) = &__dsmas_xa;
397 	int rc;
398 
399 	xa_init(&__dsmas_xa);
400 	if (!port->cdat.table)
401 		return;
402 
403 	rc = cxl_cdat_endpoint_process(port, dsmas_xa);
404 	if (rc < 0) {
405 		dev_dbg(&port->dev, "Failed to parse CDAT: %d\n", rc);
406 		return;
407 	}
408 
409 	rc = cxl_port_perf_data_calculate(port, dsmas_xa);
410 	if (rc) {
411 		dev_dbg(&port->dev, "Failed to do perf coord calculations.\n");
412 		return;
413 	}
414 
415 	cxl_memdev_set_qos_class(cxlds, dsmas_xa);
416 	cxl_qos_class_verify(cxlmd);
417 	cxl_memdev_update_perf(cxlmd);
418 }
419 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
420 
cdat_sslbis_handler(union acpi_subtable_headers * header,void * arg,const unsigned long end)421 static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
422 			       const unsigned long end)
423 {
424 	struct acpi_cdat_sslbis_table {
425 		struct acpi_cdat_header header;
426 		struct acpi_cdat_sslbis sslbis_header;
427 		struct acpi_cdat_sslbe entries[];
428 	} *tbl = (struct acpi_cdat_sslbis_table *)header;
429 	int size = sizeof(header->cdat) + sizeof(tbl->sslbis_header);
430 	struct acpi_cdat_sslbis *sslbis;
431 	struct cxl_port *port = arg;
432 	struct device *dev = &port->dev;
433 	int remain, entries, i;
434 	u16 len;
435 
436 	len = le16_to_cpu((__force __le16)header->cdat.length);
437 	remain = len - size;
438 	if (!remain || remain % sizeof(tbl->entries[0]) ||
439 	    (unsigned long)header + len > end) {
440 		dev_warn(dev, "Malformed SSLBIS table length: (%u)\n", len);
441 		return -EINVAL;
442 	}
443 
444 	sslbis = &tbl->sslbis_header;
445 	/* Unrecognized data type, we can skip */
446 	if (sslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
447 		return 0;
448 
449 	entries = remain / sizeof(tbl->entries[0]);
450 	if (struct_size(tbl, entries, entries) != len)
451 		return -EINVAL;
452 
453 	for (i = 0; i < entries; i++) {
454 		u16 x = le16_to_cpu((__force __le16)tbl->entries[i].portx_id);
455 		u16 y = le16_to_cpu((__force __le16)tbl->entries[i].porty_id);
456 		__le64 le_base;
457 		__le16 le_val;
458 		struct cxl_dport *dport;
459 		unsigned long index;
460 		u16 dsp_id;
461 		u64 val;
462 
463 		switch (x) {
464 		case ACPI_CDAT_SSLBIS_US_PORT:
465 			dsp_id = y;
466 			break;
467 		case ACPI_CDAT_SSLBIS_ANY_PORT:
468 			switch (y) {
469 			case ACPI_CDAT_SSLBIS_US_PORT:
470 				dsp_id = x;
471 				break;
472 			case ACPI_CDAT_SSLBIS_ANY_PORT:
473 				dsp_id = ACPI_CDAT_SSLBIS_ANY_PORT;
474 				break;
475 			default:
476 				dsp_id = y;
477 				break;
478 			}
479 			break;
480 		default:
481 			dsp_id = x;
482 			break;
483 		}
484 
485 		le_base = (__force __le64)tbl->sslbis_header.entry_base_unit;
486 		le_val = (__force __le16)tbl->entries[i].latency_or_bandwidth;
487 		val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
488 				     sslbis->data_type);
489 
490 		xa_for_each(&port->dports, index, dport) {
491 			if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
492 			    dsp_id == dport->port_id) {
493 				cxl_access_coordinate_set(dport->coord,
494 							  sslbis->data_type,
495 							  val);
496 			}
497 		}
498 	}
499 
500 	return 0;
501 }
502 
cxl_switch_parse_cdat(struct cxl_port * port)503 void cxl_switch_parse_cdat(struct cxl_port *port)
504 {
505 	int rc;
506 
507 	if (!port->cdat.table)
508 		return;
509 
510 	rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
511 			      port, port->cdat.table, port->cdat.length);
512 	rc = cdat_table_parse_output(rc);
513 	if (rc)
514 		dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
515 }
516 EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
517 
__cxl_coordinates_combine(struct access_coordinate * out,struct access_coordinate * c1,struct access_coordinate * c2)518 static void __cxl_coordinates_combine(struct access_coordinate *out,
519 				      struct access_coordinate *c1,
520 				      struct access_coordinate *c2)
521 {
522 		if (c1->write_bandwidth && c2->write_bandwidth)
523 			out->write_bandwidth = min(c1->write_bandwidth,
524 						   c2->write_bandwidth);
525 		out->write_latency = c1->write_latency + c2->write_latency;
526 
527 		if (c1->read_bandwidth && c2->read_bandwidth)
528 			out->read_bandwidth = min(c1->read_bandwidth,
529 						  c2->read_bandwidth);
530 		out->read_latency = c1->read_latency + c2->read_latency;
531 }
532 
533 /**
534  * cxl_coordinates_combine - Combine the two input coordinates
535  *
536  * @out: Output coordinate of c1 and c2 combined
537  * @c1: input coordinates
538  * @c2: input coordinates
539  */
cxl_coordinates_combine(struct access_coordinate * out,struct access_coordinate * c1,struct access_coordinate * c2)540 void cxl_coordinates_combine(struct access_coordinate *out,
541 			     struct access_coordinate *c1,
542 			     struct access_coordinate *c2)
543 {
544 	for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
545 		__cxl_coordinates_combine(&out[i], &c1[i], &c2[i]);
546 }
547 
548 MODULE_IMPORT_NS(CXL);
549 
cxl_bandwidth_add(struct access_coordinate * coord,struct access_coordinate * c1,struct access_coordinate * c2)550 static void cxl_bandwidth_add(struct access_coordinate *coord,
551 			      struct access_coordinate *c1,
552 			      struct access_coordinate *c2)
553 {
554 	for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
555 		coord[i].read_bandwidth = c1[i].read_bandwidth +
556 					  c2[i].read_bandwidth;
557 		coord[i].write_bandwidth = c1[i].write_bandwidth +
558 					   c2[i].write_bandwidth;
559 	}
560 }
561 
dpa_perf_contains(struct cxl_dpa_perf * perf,struct resource * dpa_res)562 static bool dpa_perf_contains(struct cxl_dpa_perf *perf,
563 			      struct resource *dpa_res)
564 {
565 	struct range dpa = {
566 		.start = dpa_res->start,
567 		.end = dpa_res->end,
568 	};
569 
570 	return range_contains(&perf->dpa_range, &dpa);
571 }
572 
cxled_get_dpa_perf(struct cxl_endpoint_decoder * cxled,enum cxl_decoder_mode mode)573 static struct cxl_dpa_perf *cxled_get_dpa_perf(struct cxl_endpoint_decoder *cxled,
574 					       enum cxl_decoder_mode mode)
575 {
576 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
577 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
578 	struct cxl_dpa_perf *perf;
579 
580 	switch (mode) {
581 	case CXL_DECODER_RAM:
582 		perf = &mds->ram_perf;
583 		break;
584 	case CXL_DECODER_PMEM:
585 		perf = &mds->pmem_perf;
586 		break;
587 	default:
588 		return ERR_PTR(-EINVAL);
589 	}
590 
591 	if (!dpa_perf_contains(perf, cxled->dpa_res))
592 		return ERR_PTR(-EINVAL);
593 
594 	return perf;
595 }
596 
597 /*
598  * Transient context for containing the current calculation of bandwidth when
599  * doing walking the port hierarchy to deal with shared upstream link.
600  */
601 struct cxl_perf_ctx {
602 	struct access_coordinate coord[ACCESS_COORDINATE_MAX];
603 	struct cxl_port *port;
604 };
605 
606 /**
607  * cxl_endpoint_gather_bandwidth - collect all the endpoint bandwidth in an xarray
608  * @cxlr: CXL region for the bandwidth calculation
609  * @cxled: endpoint decoder to start on
610  * @usp_xa: (output) the xarray that collects all the bandwidth coordinates
611  *          indexed by the upstream device with data of 'struct cxl_perf_ctx'.
612  * @gp_is_root: (output) bool of whether the grandparent is cxl root.
613  *
614  * Return: 0 for success or -errno
615  *
616  * Collects aggregated endpoint bandwidth and store the bandwidth in
617  * an xarray indexed by the upstream device of the switch or the RP
618  * device. Each endpoint consists the minimum of the bandwidth from DSLBIS
619  * from the endpoint CDAT, the endpoint upstream link bandwidth, and the
620  * bandwidth from the SSLBIS of the switch CDAT for the switch upstream port to
621  * the downstream port that's associated with the endpoint. If the
622  * device is directly connected to a RP, then no SSLBIS is involved.
623  */
cxl_endpoint_gather_bandwidth(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,struct xarray * usp_xa,bool * gp_is_root)624 static int cxl_endpoint_gather_bandwidth(struct cxl_region *cxlr,
625 					 struct cxl_endpoint_decoder *cxled,
626 					 struct xarray *usp_xa,
627 					 bool *gp_is_root)
628 {
629 	struct cxl_port *endpoint = to_cxl_port(cxled->cxld.dev.parent);
630 	struct cxl_port *parent_port = to_cxl_port(endpoint->dev.parent);
631 	struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent);
632 	struct access_coordinate pci_coord[ACCESS_COORDINATE_MAX];
633 	struct access_coordinate sw_coord[ACCESS_COORDINATE_MAX];
634 	struct access_coordinate ep_coord[ACCESS_COORDINATE_MAX];
635 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
636 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
637 	struct pci_dev *pdev = to_pci_dev(cxlds->dev);
638 	struct cxl_perf_ctx *perf_ctx;
639 	struct cxl_dpa_perf *perf;
640 	unsigned long index;
641 	void *ptr;
642 	int rc;
643 
644 	if (!dev_is_pci(cxlds->dev))
645 		return -ENODEV;
646 
647 	if (cxlds->rcd)
648 		return -ENODEV;
649 
650 	perf = cxled_get_dpa_perf(cxled, cxlr->mode);
651 	if (IS_ERR(perf))
652 		return PTR_ERR(perf);
653 
654 	gp_port = to_cxl_port(parent_port->dev.parent);
655 	*gp_is_root = is_cxl_root(gp_port);
656 
657 	/*
658 	 * If the grandparent is cxl root, then index is the root port,
659 	 * otherwise it's the parent switch upstream device.
660 	 */
661 	if (*gp_is_root)
662 		index = (unsigned long)endpoint->parent_dport->dport_dev;
663 	else
664 		index = (unsigned long)parent_port->uport_dev;
665 
666 	perf_ctx = xa_load(usp_xa, index);
667 	if (!perf_ctx) {
668 		struct cxl_perf_ctx *c __free(kfree) =
669 			kzalloc(sizeof(*perf_ctx), GFP_KERNEL);
670 
671 		if (!c)
672 			return -ENOMEM;
673 		ptr = xa_store(usp_xa, index, c, GFP_KERNEL);
674 		if (xa_is_err(ptr))
675 			return xa_err(ptr);
676 		perf_ctx = no_free_ptr(c);
677 		perf_ctx->port = parent_port;
678 	}
679 
680 	/* Direct upstream link from EP bandwidth */
681 	rc = cxl_pci_get_bandwidth(pdev, pci_coord);
682 	if (rc < 0)
683 		return rc;
684 
685 	/*
686 	 * Min of upstream link bandwidth and Endpoint CDAT bandwidth from
687 	 * DSLBIS.
688 	 */
689 	cxl_coordinates_combine(ep_coord, pci_coord, perf->cdat_coord);
690 
691 	/*
692 	 * If grandparent port is root, then there's no switch involved and
693 	 * the endpoint is connected to a root port.
694 	 */
695 	if (!*gp_is_root) {
696 		/*
697 		 * Retrieve the switch SSLBIS for switch downstream port
698 		 * associated with the endpoint bandwidth.
699 		 */
700 		rc = cxl_port_get_switch_dport_bandwidth(endpoint, sw_coord);
701 		if (rc)
702 			return rc;
703 
704 		/*
705 		 * Min of the earlier coordinates with the switch SSLBIS
706 		 * bandwidth
707 		 */
708 		cxl_coordinates_combine(ep_coord, ep_coord, sw_coord);
709 	}
710 
711 	/*
712 	 * Aggregate the computed bandwidth with the current aggregated bandwidth
713 	 * of the endpoints with the same switch upstream device or RP.
714 	 */
715 	cxl_bandwidth_add(perf_ctx->coord, perf_ctx->coord, ep_coord);
716 
717 	return 0;
718 }
719 
free_perf_xa(struct xarray * xa)720 static void free_perf_xa(struct xarray *xa)
721 {
722 	struct cxl_perf_ctx *ctx;
723 	unsigned long index;
724 
725 	if (!xa)
726 		return;
727 
728 	xa_for_each(xa, index, ctx)
729 		kfree(ctx);
730 	xa_destroy(xa);
731 	kfree(xa);
732 }
DEFINE_FREE(free_perf_xa,struct xarray *,if (_T)free_perf_xa (_T))733 DEFINE_FREE(free_perf_xa, struct xarray *, if (_T) free_perf_xa(_T))
734 
735 /**
736  * cxl_switch_gather_bandwidth - collect all the bandwidth at switch level in an xarray
737  * @cxlr: The region being operated on
738  * @input_xa: xarray indexed by upstream device of a switch with data of 'struct
739  *	      cxl_perf_ctx'
740  * @gp_is_root: (output) bool of whether the grandparent is cxl root.
741  *
742  * Return: a xarray of resulting cxl_perf_ctx per parent switch or root port
743  *         or ERR_PTR(-errno)
744  *
745  * Iterate through the xarray. Take the minimum of the downstream calculated
746  * bandwidth, the upstream link bandwidth, and the SSLBIS of the upstream
747  * switch if exists. Sum the resulting bandwidth under the switch upstream
748  * device or a RP device. The function can be iterated over multiple switches
749  * if the switches are present.
750  */
751 static struct xarray *cxl_switch_gather_bandwidth(struct cxl_region *cxlr,
752 						  struct xarray *input_xa,
753 						  bool *gp_is_root)
754 {
755 	struct xarray *res_xa __free(free_perf_xa) =
756 		kzalloc(sizeof(*res_xa), GFP_KERNEL);
757 	struct access_coordinate coords[ACCESS_COORDINATE_MAX];
758 	struct cxl_perf_ctx *ctx, *us_ctx;
759 	unsigned long index, us_index;
760 	int dev_count = 0;
761 	int gp_count = 0;
762 	void *ptr;
763 	int rc;
764 
765 	if (!res_xa)
766 		return ERR_PTR(-ENOMEM);
767 	xa_init(res_xa);
768 
769 	xa_for_each(input_xa, index, ctx) {
770 		struct device *dev = (struct device *)index;
771 		struct cxl_port *port = ctx->port;
772 		struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
773 		struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent);
774 		struct cxl_dport *dport = port->parent_dport;
775 		bool is_root = false;
776 
777 		dev_count++;
778 		if (is_cxl_root(gp_port)) {
779 			is_root = true;
780 			gp_count++;
781 		}
782 
783 		/*
784 		 * If the grandparent is cxl root, then index is the root port,
785 		 * otherwise it's the parent switch upstream device.
786 		 */
787 		if (is_root)
788 			us_index = (unsigned long)port->parent_dport->dport_dev;
789 		else
790 			us_index = (unsigned long)parent_port->uport_dev;
791 
792 		us_ctx = xa_load(res_xa, us_index);
793 		if (!us_ctx) {
794 			struct cxl_perf_ctx *n __free(kfree) =
795 				kzalloc(sizeof(*n), GFP_KERNEL);
796 
797 			if (!n)
798 				return ERR_PTR(-ENOMEM);
799 
800 			ptr = xa_store(res_xa, us_index, n, GFP_KERNEL);
801 			if (xa_is_err(ptr))
802 				return ERR_PTR(xa_err(ptr));
803 			us_ctx = no_free_ptr(n);
804 			us_ctx->port = parent_port;
805 		}
806 
807 		/*
808 		 * If the device isn't an upstream PCIe port, there's something
809 		 * wrong with the topology.
810 		 */
811 		if (!dev_is_pci(dev))
812 			return ERR_PTR(-EINVAL);
813 
814 		/* Retrieve the upstream link bandwidth */
815 		rc = cxl_pci_get_bandwidth(to_pci_dev(dev), coords);
816 		if (rc)
817 			return ERR_PTR(-ENXIO);
818 
819 		/*
820 		 * Take the min of downstream bandwidth and the upstream link
821 		 * bandwidth.
822 		 */
823 		cxl_coordinates_combine(coords, coords, ctx->coord);
824 
825 		/*
826 		 * Take the min of the calculated bandwdith and the upstream
827 		 * switch SSLBIS bandwidth if there's a parent switch
828 		 */
829 		if (!is_root)
830 			cxl_coordinates_combine(coords, coords, dport->coord);
831 
832 		/*
833 		 * Aggregate the calculated bandwidth common to an upstream
834 		 * switch.
835 		 */
836 		cxl_bandwidth_add(us_ctx->coord, us_ctx->coord, coords);
837 	}
838 
839 	/* Asymmetric topology detected. */
840 	if (gp_count) {
841 		if (gp_count != dev_count) {
842 			dev_dbg(&cxlr->dev,
843 				"Asymmetric hierarchy detected, bandwidth not updated\n");
844 			return ERR_PTR(-EOPNOTSUPP);
845 		}
846 		*gp_is_root = true;
847 	}
848 
849 	return no_free_ptr(res_xa);
850 }
851 
852 /**
853  * cxl_rp_gather_bandwidth - handle the root port level bandwidth collection
854  * @xa: the xarray that holds the cxl_perf_ctx that has the bandwidth calculated
855  *      below each root port device.
856  *
857  * Return: xarray that holds cxl_perf_ctx per host bridge or ERR_PTR(-errno)
858  */
cxl_rp_gather_bandwidth(struct xarray * xa)859 static struct xarray *cxl_rp_gather_bandwidth(struct xarray *xa)
860 {
861 	struct xarray *hb_xa __free(free_perf_xa) =
862 		kzalloc(sizeof(*hb_xa), GFP_KERNEL);
863 	struct cxl_perf_ctx *ctx;
864 	unsigned long index;
865 
866 	if (!hb_xa)
867 		return ERR_PTR(-ENOMEM);
868 	xa_init(hb_xa);
869 
870 	xa_for_each(xa, index, ctx) {
871 		struct cxl_port *port = ctx->port;
872 		unsigned long hb_index = (unsigned long)port->uport_dev;
873 		struct cxl_perf_ctx *hb_ctx;
874 		void *ptr;
875 
876 		hb_ctx = xa_load(hb_xa, hb_index);
877 		if (!hb_ctx) {
878 			struct cxl_perf_ctx *n __free(kfree) =
879 				kzalloc(sizeof(*n), GFP_KERNEL);
880 
881 			if (!n)
882 				return ERR_PTR(-ENOMEM);
883 			ptr = xa_store(hb_xa, hb_index, n, GFP_KERNEL);
884 			if (xa_is_err(ptr))
885 				return ERR_PTR(xa_err(ptr));
886 			hb_ctx = no_free_ptr(n);
887 			hb_ctx->port = port;
888 		}
889 
890 		cxl_bandwidth_add(hb_ctx->coord, hb_ctx->coord, ctx->coord);
891 	}
892 
893 	return no_free_ptr(hb_xa);
894 }
895 
896 /**
897  * cxl_hb_gather_bandwidth - handle the host bridge level bandwidth collection
898  * @xa: the xarray that holds the cxl_perf_ctx that has the bandwidth calculated
899  *      below each host bridge.
900  *
901  * Return: xarray that holds cxl_perf_ctx per ACPI0017 device or ERR_PTR(-errno)
902  */
cxl_hb_gather_bandwidth(struct xarray * xa)903 static struct xarray *cxl_hb_gather_bandwidth(struct xarray *xa)
904 {
905 	struct xarray *mw_xa __free(free_perf_xa) =
906 		kzalloc(sizeof(*mw_xa), GFP_KERNEL);
907 	struct cxl_perf_ctx *ctx;
908 	unsigned long index;
909 
910 	if (!mw_xa)
911 		return ERR_PTR(-ENOMEM);
912 	xa_init(mw_xa);
913 
914 	xa_for_each(xa, index, ctx) {
915 		struct cxl_port *port = ctx->port;
916 		struct cxl_port *parent_port;
917 		struct cxl_perf_ctx *mw_ctx;
918 		struct cxl_dport *dport;
919 		unsigned long mw_index;
920 		void *ptr;
921 
922 		parent_port = to_cxl_port(port->dev.parent);
923 		mw_index = (unsigned long)parent_port->uport_dev;
924 
925 		mw_ctx = xa_load(mw_xa, mw_index);
926 		if (!mw_ctx) {
927 			struct cxl_perf_ctx *n __free(kfree) =
928 				kzalloc(sizeof(*n), GFP_KERNEL);
929 
930 			if (!n)
931 				return ERR_PTR(-ENOMEM);
932 			ptr = xa_store(mw_xa, mw_index, n, GFP_KERNEL);
933 			if (xa_is_err(ptr))
934 				return ERR_PTR(xa_err(ptr));
935 			mw_ctx = no_free_ptr(n);
936 		}
937 
938 		dport = port->parent_dport;
939 		cxl_coordinates_combine(ctx->coord, ctx->coord, dport->coord);
940 		cxl_bandwidth_add(mw_ctx->coord, mw_ctx->coord, ctx->coord);
941 	}
942 
943 	return no_free_ptr(mw_xa);
944 }
945 
946 /**
947  * cxl_region_update_bandwidth - Update the bandwidth access coordinates of a region
948  * @cxlr: The region being operated on
949  * @input_xa: xarray holds cxl_perf_ctx wht calculated bandwidth per ACPI0017 instance
950  */
cxl_region_update_bandwidth(struct cxl_region * cxlr,struct xarray * input_xa)951 static void cxl_region_update_bandwidth(struct cxl_region *cxlr,
952 					struct xarray *input_xa)
953 {
954 	struct access_coordinate coord[ACCESS_COORDINATE_MAX];
955 	struct cxl_perf_ctx *ctx;
956 	unsigned long index;
957 
958 	memset(coord, 0, sizeof(coord));
959 	xa_for_each(input_xa, index, ctx)
960 		cxl_bandwidth_add(coord, coord, ctx->coord);
961 
962 	for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
963 		cxlr->coord[i].read_bandwidth = coord[i].read_bandwidth;
964 		cxlr->coord[i].write_bandwidth = coord[i].write_bandwidth;
965 	}
966 }
967 
968 /**
969  * cxl_region_shared_upstream_bandwidth_update - Recalculate the bandwidth for
970  *						 the region
971  * @cxlr: the cxl region to recalculate
972  *
973  * The function walks the topology from bottom up and calculates the bandwidth. It
974  * starts at the endpoints, processes at the switches if any, processes at the rootport
975  * level, at the host bridge level, and finally aggregates at the region.
976  */
cxl_region_shared_upstream_bandwidth_update(struct cxl_region * cxlr)977 void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr)
978 {
979 	struct xarray *working_xa;
980 	int root_count = 0;
981 	bool is_root;
982 	int rc;
983 
984 	lockdep_assert_held(&cxl_dpa_rwsem);
985 
986 	struct xarray *usp_xa __free(free_perf_xa) =
987 		kzalloc(sizeof(*usp_xa), GFP_KERNEL);
988 
989 	if (!usp_xa)
990 		return;
991 
992 	xa_init(usp_xa);
993 
994 	/* Collect bandwidth data from all the endpoints. */
995 	for (int i = 0; i < cxlr->params.nr_targets; i++) {
996 		struct cxl_endpoint_decoder *cxled = cxlr->params.targets[i];
997 
998 		is_root = false;
999 		rc = cxl_endpoint_gather_bandwidth(cxlr, cxled, usp_xa, &is_root);
1000 		if (rc)
1001 			return;
1002 		root_count += is_root;
1003 	}
1004 
1005 	/* Detect asymmetric hierarchy with some direct attached endpoints. */
1006 	if (root_count && root_count != cxlr->params.nr_targets) {
1007 		dev_dbg(&cxlr->dev,
1008 			"Asymmetric hierarchy detected, bandwidth not updated\n");
1009 		return;
1010 	}
1011 
1012 	/*
1013 	 * Walk up one or more switches to deal with the bandwidth of the
1014 	 * switches if they exist. Endpoints directly attached to RPs skip
1015 	 * over this part.
1016 	 */
1017 	if (!root_count) {
1018 		do {
1019 			working_xa = cxl_switch_gather_bandwidth(cxlr, usp_xa,
1020 								 &is_root);
1021 			if (IS_ERR(working_xa))
1022 				return;
1023 			free_perf_xa(usp_xa);
1024 			usp_xa = working_xa;
1025 		} while (!is_root);
1026 	}
1027 
1028 	/* Handle the bandwidth at the root port of the hierarchy */
1029 	working_xa = cxl_rp_gather_bandwidth(usp_xa);
1030 	if (IS_ERR(working_xa))
1031 		return;
1032 	free_perf_xa(usp_xa);
1033 	usp_xa = working_xa;
1034 
1035 	/* Handle the bandwidth at the host bridge of the hierarchy */
1036 	working_xa = cxl_hb_gather_bandwidth(usp_xa);
1037 	if (IS_ERR(working_xa))
1038 		return;
1039 	free_perf_xa(usp_xa);
1040 	usp_xa = working_xa;
1041 
1042 	/*
1043 	 * Aggregate all the bandwidth collected per CFMWS (ACPI0017) and
1044 	 * update the region bandwidth with the final calculated values.
1045 	 */
1046 	cxl_region_update_bandwidth(cxlr, usp_xa);
1047 }
1048 
cxl_region_perf_data_calculate(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled)1049 void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
1050 				    struct cxl_endpoint_decoder *cxled)
1051 {
1052 	struct cxl_dpa_perf *perf;
1053 
1054 	lockdep_assert_held(&cxl_dpa_rwsem);
1055 
1056 	perf = cxled_get_dpa_perf(cxled, cxlr->mode);
1057 	if (IS_ERR(perf))
1058 		return;
1059 
1060 	for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
1061 		/* Get total bandwidth and the worst latency for the cxl region */
1062 		cxlr->coord[i].read_latency = max_t(unsigned int,
1063 						    cxlr->coord[i].read_latency,
1064 						    perf->coord[i].read_latency);
1065 		cxlr->coord[i].write_latency = max_t(unsigned int,
1066 						     cxlr->coord[i].write_latency,
1067 						     perf->coord[i].write_latency);
1068 		cxlr->coord[i].read_bandwidth += perf->coord[i].read_bandwidth;
1069 		cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth;
1070 	}
1071 }
1072 
cxl_update_hmat_access_coordinates(int nid,struct cxl_region * cxlr,enum access_coordinate_class access)1073 int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
1074 				       enum access_coordinate_class access)
1075 {
1076 	return hmat_update_target_coordinates(nid, &cxlr->coord[access], access);
1077 }
1078 
cxl_need_node_perf_attrs_update(int nid)1079 bool cxl_need_node_perf_attrs_update(int nid)
1080 {
1081 	return !acpi_node_backed_by_real_pxm(nid);
1082 }
1083