xref: /linux/drivers/cxl/core/cdat.c (revision 3f41368fbfe1b3d5922d317fe1a0a0cab6846802)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2023 Intel Corporation. All rights reserved. */
3 #include <linux/acpi.h>
4 #include <linux/xarray.h>
5 #include <linux/fw_table.h>
6 #include <linux/node.h>
7 #include <linux/overflow.h>
8 #include "cxlpci.h"
9 #include "cxlmem.h"
10 #include "core.h"
11 #include "cxl.h"
12 #include "core.h"
13 
14 struct dsmas_entry {
15 	struct range dpa_range;
16 	u8 handle;
17 	struct access_coordinate coord[ACCESS_COORDINATE_MAX];
18 
19 	int entries;
20 	int qos_class;
21 };
22 
23 static u32 cdat_normalize(u16 entry, u64 base, u8 type)
24 {
25 	u32 value;
26 
27 	/*
28 	 * Check for invalid and overflow values
29 	 */
30 	if (entry == 0xffff || !entry)
31 		return 0;
32 	else if (base > (UINT_MAX / (entry)))
33 		return 0;
34 
35 	/*
36 	 * CDAT fields follow the format of HMAT fields. See table 5 Device
37 	 * Scoped Latency and Bandwidth Information Structure in Coherent Device
38 	 * Attribute Table (CDAT) Specification v1.01.
39 	 */
40 	value = entry * base;
41 	switch (type) {
42 	case ACPI_HMAT_ACCESS_LATENCY:
43 	case ACPI_HMAT_READ_LATENCY:
44 	case ACPI_HMAT_WRITE_LATENCY:
45 		value = DIV_ROUND_UP(value, 1000);
46 		break;
47 	default:
48 		break;
49 	}
50 	return value;
51 }
52 
53 static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
54 			      const unsigned long end)
55 {
56 	struct acpi_cdat_header *hdr = &header->cdat;
57 	struct acpi_cdat_dsmas *dsmas;
58 	int size = sizeof(*hdr) + sizeof(*dsmas);
59 	struct xarray *dsmas_xa = arg;
60 	struct dsmas_entry *dent;
61 	u16 len;
62 	int rc;
63 
64 	len = le16_to_cpu((__force __le16)hdr->length);
65 	if (len != size || (unsigned long)hdr + len > end) {
66 		pr_warn("Malformed DSMAS table length: (%u:%u)\n", size, len);
67 		return -EINVAL;
68 	}
69 
70 	/* Skip common header */
71 	dsmas = (struct acpi_cdat_dsmas *)(hdr + 1);
72 
73 	dent = kzalloc(sizeof(*dent), GFP_KERNEL);
74 	if (!dent)
75 		return -ENOMEM;
76 
77 	dent->handle = dsmas->dsmad_handle;
78 	dent->dpa_range.start = le64_to_cpu((__force __le64)dsmas->dpa_base_address);
79 	dent->dpa_range.end = le64_to_cpu((__force __le64)dsmas->dpa_base_address) +
80 			      le64_to_cpu((__force __le64)dsmas->dpa_length) - 1;
81 
82 	rc = xa_insert(dsmas_xa, dent->handle, dent, GFP_KERNEL);
83 	if (rc) {
84 		kfree(dent);
85 		return rc;
86 	}
87 
88 	return 0;
89 }
90 
91 static void __cxl_access_coordinate_set(struct access_coordinate *coord,
92 					int access, unsigned int val)
93 {
94 	switch (access) {
95 	case ACPI_HMAT_ACCESS_LATENCY:
96 		coord->read_latency = val;
97 		coord->write_latency = val;
98 		break;
99 	case ACPI_HMAT_READ_LATENCY:
100 		coord->read_latency = val;
101 		break;
102 	case ACPI_HMAT_WRITE_LATENCY:
103 		coord->write_latency = val;
104 		break;
105 	case ACPI_HMAT_ACCESS_BANDWIDTH:
106 		coord->read_bandwidth = val;
107 		coord->write_bandwidth = val;
108 		break;
109 	case ACPI_HMAT_READ_BANDWIDTH:
110 		coord->read_bandwidth = val;
111 		break;
112 	case ACPI_HMAT_WRITE_BANDWIDTH:
113 		coord->write_bandwidth = val;
114 		break;
115 	}
116 }
117 
118 static void cxl_access_coordinate_set(struct access_coordinate *coord,
119 				      int access, unsigned int val)
120 {
121 	for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
122 		__cxl_access_coordinate_set(&coord[i], access, val);
123 }
124 
125 static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
126 			       const unsigned long end)
127 {
128 	struct acpi_cdat_header *hdr = &header->cdat;
129 	struct acpi_cdat_dslbis *dslbis;
130 	int size = sizeof(*hdr) + sizeof(*dslbis);
131 	struct xarray *dsmas_xa = arg;
132 	struct dsmas_entry *dent;
133 	__le64 le_base;
134 	__le16 le_val;
135 	u64 val;
136 	u16 len;
137 
138 	len = le16_to_cpu((__force __le16)hdr->length);
139 	if (len != size || (unsigned long)hdr + len > end) {
140 		pr_warn("Malformed DSLBIS table length: (%u:%u)\n", size, len);
141 		return -EINVAL;
142 	}
143 
144 	/* Skip common header */
145 	dslbis = (struct acpi_cdat_dslbis *)(hdr + 1);
146 
147 	/* Skip unrecognized data type */
148 	if (dslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
149 		return 0;
150 
151 	/* Not a memory type, skip */
152 	if ((dslbis->flags & ACPI_HMAT_MEMORY_HIERARCHY) != ACPI_HMAT_MEMORY)
153 		return 0;
154 
155 	dent = xa_load(dsmas_xa, dslbis->handle);
156 	if (!dent) {
157 		pr_warn("No matching DSMAS entry for DSLBIS entry.\n");
158 		return 0;
159 	}
160 
161 	le_base = (__force __le64)dslbis->entry_base_unit;
162 	le_val = (__force __le16)dslbis->entry[0];
163 	val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
164 			     dslbis->data_type);
165 
166 	cxl_access_coordinate_set(dent->coord, dslbis->data_type, val);
167 
168 	return 0;
169 }
170 
171 static int cdat_table_parse_output(int rc)
172 {
173 	if (rc < 0)
174 		return rc;
175 	if (rc == 0)
176 		return -ENOENT;
177 
178 	return 0;
179 }
180 
181 static int cxl_cdat_endpoint_process(struct cxl_port *port,
182 				     struct xarray *dsmas_xa)
183 {
184 	int rc;
185 
186 	rc = cdat_table_parse(ACPI_CDAT_TYPE_DSMAS, cdat_dsmas_handler,
187 			      dsmas_xa, port->cdat.table, port->cdat.length);
188 	rc = cdat_table_parse_output(rc);
189 	if (rc)
190 		return rc;
191 
192 	rc = cdat_table_parse(ACPI_CDAT_TYPE_DSLBIS, cdat_dslbis_handler,
193 			      dsmas_xa, port->cdat.table, port->cdat.length);
194 	return cdat_table_parse_output(rc);
195 }
196 
197 static int cxl_port_perf_data_calculate(struct cxl_port *port,
198 					struct xarray *dsmas_xa)
199 {
200 	struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
201 	struct dsmas_entry *dent;
202 	int valid_entries = 0;
203 	unsigned long index;
204 	int rc;
205 
206 	rc = cxl_endpoint_get_perf_coordinates(port, ep_c);
207 	if (rc) {
208 		dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n");
209 		return rc;
210 	}
211 
212 	struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
213 
214 	if (!cxl_root)
215 		return -ENODEV;
216 
217 	if (!cxl_root->ops || !cxl_root->ops->qos_class)
218 		return -EOPNOTSUPP;
219 
220 	xa_for_each(dsmas_xa, index, dent) {
221 		int qos_class;
222 
223 		cxl_coordinates_combine(dent->coord, dent->coord, ep_c);
224 		dent->entries = 1;
225 		rc = cxl_root->ops->qos_class(cxl_root,
226 					      &dent->coord[ACCESS_COORDINATE_CPU],
227 					      1, &qos_class);
228 		if (rc != 1)
229 			continue;
230 
231 		valid_entries++;
232 		dent->qos_class = qos_class;
233 	}
234 
235 	if (!valid_entries)
236 		return -ENOENT;
237 
238 	return 0;
239 }
240 
241 static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
242 			      struct cxl_dpa_perf *dpa_perf)
243 {
244 	for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
245 		dpa_perf->coord[i] = dent->coord[i];
246 	dpa_perf->dpa_range = dent->dpa_range;
247 	dpa_perf->qos_class = dent->qos_class;
248 	dev_dbg(dev,
249 		"DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
250 		dent->dpa_range.start, dpa_perf->qos_class,
251 		dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth,
252 		dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth,
253 		dent->coord[ACCESS_COORDINATE_CPU].read_latency,
254 		dent->coord[ACCESS_COORDINATE_CPU].write_latency);
255 }
256 
257 static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
258 				     struct xarray *dsmas_xa)
259 {
260 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
261 	struct device *dev = cxlds->dev;
262 	struct range pmem_range = {
263 		.start = cxlds->pmem_res.start,
264 		.end = cxlds->pmem_res.end,
265 	};
266 	struct range ram_range = {
267 		.start = cxlds->ram_res.start,
268 		.end = cxlds->ram_res.end,
269 	};
270 	struct dsmas_entry *dent;
271 	unsigned long index;
272 
273 	xa_for_each(dsmas_xa, index, dent) {
274 		if (resource_size(&cxlds->ram_res) &&
275 		    range_contains(&ram_range, &dent->dpa_range))
276 			update_perf_entry(dev, dent, &mds->ram_perf);
277 		else if (resource_size(&cxlds->pmem_res) &&
278 			 range_contains(&pmem_range, &dent->dpa_range))
279 			update_perf_entry(dev, dent, &mds->pmem_perf);
280 		else
281 			dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
282 				dent->dpa_range.start);
283 	}
284 }
285 
286 static int match_cxlrd_qos_class(struct device *dev, void *data)
287 {
288 	int dev_qos_class = *(int *)data;
289 	struct cxl_root_decoder *cxlrd;
290 
291 	if (!is_root_decoder(dev))
292 		return 0;
293 
294 	cxlrd = to_cxl_root_decoder(dev);
295 	if (cxlrd->qos_class == CXL_QOS_CLASS_INVALID)
296 		return 0;
297 
298 	if (cxlrd->qos_class == dev_qos_class)
299 		return 1;
300 
301 	return 0;
302 }
303 
304 static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
305 {
306 	*dpa_perf = (struct cxl_dpa_perf) {
307 		.qos_class = CXL_QOS_CLASS_INVALID,
308 	};
309 }
310 
311 static bool cxl_qos_match(struct cxl_port *root_port,
312 			  struct cxl_dpa_perf *dpa_perf)
313 {
314 	if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
315 		return false;
316 
317 	if (!device_for_each_child(&root_port->dev, &dpa_perf->qos_class,
318 				   match_cxlrd_qos_class))
319 		return false;
320 
321 	return true;
322 }
323 
324 static int match_cxlrd_hb(struct device *dev, void *data)
325 {
326 	struct device *host_bridge = data;
327 	struct cxl_switch_decoder *cxlsd;
328 	struct cxl_root_decoder *cxlrd;
329 
330 	if (!is_root_decoder(dev))
331 		return 0;
332 
333 	cxlrd = to_cxl_root_decoder(dev);
334 	cxlsd = &cxlrd->cxlsd;
335 
336 	guard(rwsem_read)(&cxl_region_rwsem);
337 	for (int i = 0; i < cxlsd->nr_targets; i++) {
338 		if (host_bridge == cxlsd->target[i]->dport_dev)
339 			return 1;
340 	}
341 
342 	return 0;
343 }
344 
345 static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
346 {
347 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
348 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
349 	struct cxl_port *root_port;
350 	int rc;
351 
352 	struct cxl_root *cxl_root __free(put_cxl_root) =
353 		find_cxl_root(cxlmd->endpoint);
354 
355 	if (!cxl_root)
356 		return -ENODEV;
357 
358 	root_port = &cxl_root->port;
359 
360 	/* Check that the QTG IDs are all sane between end device and root decoders */
361 	if (!cxl_qos_match(root_port, &mds->ram_perf))
362 		reset_dpa_perf(&mds->ram_perf);
363 	if (!cxl_qos_match(root_port, &mds->pmem_perf))
364 		reset_dpa_perf(&mds->pmem_perf);
365 
366 	/* Check to make sure that the device's host bridge is under a root decoder */
367 	rc = device_for_each_child(&root_port->dev,
368 				   cxlmd->endpoint->host_bridge, match_cxlrd_hb);
369 	if (!rc) {
370 		reset_dpa_perf(&mds->ram_perf);
371 		reset_dpa_perf(&mds->pmem_perf);
372 	}
373 
374 	return rc;
375 }
376 
377 static void discard_dsmas(struct xarray *xa)
378 {
379 	unsigned long index;
380 	void *ent;
381 
382 	xa_for_each(xa, index, ent) {
383 		xa_erase(xa, index);
384 		kfree(ent);
385 	}
386 	xa_destroy(xa);
387 }
388 DEFINE_FREE(dsmas, struct xarray *, if (_T) discard_dsmas(_T))
389 
390 void cxl_endpoint_parse_cdat(struct cxl_port *port)
391 {
392 	struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
393 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
394 	struct xarray __dsmas_xa;
395 	struct xarray *dsmas_xa __free(dsmas) = &__dsmas_xa;
396 	int rc;
397 
398 	xa_init(&__dsmas_xa);
399 	if (!port->cdat.table)
400 		return;
401 
402 	rc = cxl_cdat_endpoint_process(port, dsmas_xa);
403 	if (rc < 0) {
404 		dev_dbg(&port->dev, "Failed to parse CDAT: %d\n", rc);
405 		return;
406 	}
407 
408 	rc = cxl_port_perf_data_calculate(port, dsmas_xa);
409 	if (rc) {
410 		dev_dbg(&port->dev, "Failed to do perf coord calculations.\n");
411 		return;
412 	}
413 
414 	cxl_memdev_set_qos_class(cxlds, dsmas_xa);
415 	cxl_qos_class_verify(cxlmd);
416 	cxl_memdev_update_perf(cxlmd);
417 }
418 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
419 
420 static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
421 			       const unsigned long end)
422 {
423 	struct acpi_cdat_sslbis_table {
424 		struct acpi_cdat_header header;
425 		struct acpi_cdat_sslbis sslbis_header;
426 		struct acpi_cdat_sslbe entries[];
427 	} *tbl = (struct acpi_cdat_sslbis_table *)header;
428 	int size = sizeof(header->cdat) + sizeof(tbl->sslbis_header);
429 	struct acpi_cdat_sslbis *sslbis;
430 	struct cxl_port *port = arg;
431 	struct device *dev = &port->dev;
432 	int remain, entries, i;
433 	u16 len;
434 
435 	len = le16_to_cpu((__force __le16)header->cdat.length);
436 	remain = len - size;
437 	if (!remain || remain % sizeof(tbl->entries[0]) ||
438 	    (unsigned long)header + len > end) {
439 		dev_warn(dev, "Malformed SSLBIS table length: (%u)\n", len);
440 		return -EINVAL;
441 	}
442 
443 	sslbis = &tbl->sslbis_header;
444 	/* Unrecognized data type, we can skip */
445 	if (sslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
446 		return 0;
447 
448 	entries = remain / sizeof(tbl->entries[0]);
449 	if (struct_size(tbl, entries, entries) != len)
450 		return -EINVAL;
451 
452 	for (i = 0; i < entries; i++) {
453 		u16 x = le16_to_cpu((__force __le16)tbl->entries[i].portx_id);
454 		u16 y = le16_to_cpu((__force __le16)tbl->entries[i].porty_id);
455 		__le64 le_base;
456 		__le16 le_val;
457 		struct cxl_dport *dport;
458 		unsigned long index;
459 		u16 dsp_id;
460 		u64 val;
461 
462 		switch (x) {
463 		case ACPI_CDAT_SSLBIS_US_PORT:
464 			dsp_id = y;
465 			break;
466 		case ACPI_CDAT_SSLBIS_ANY_PORT:
467 			switch (y) {
468 			case ACPI_CDAT_SSLBIS_US_PORT:
469 				dsp_id = x;
470 				break;
471 			case ACPI_CDAT_SSLBIS_ANY_PORT:
472 				dsp_id = ACPI_CDAT_SSLBIS_ANY_PORT;
473 				break;
474 			default:
475 				dsp_id = y;
476 				break;
477 			}
478 			break;
479 		default:
480 			dsp_id = x;
481 			break;
482 		}
483 
484 		le_base = (__force __le64)tbl->sslbis_header.entry_base_unit;
485 		le_val = (__force __le16)tbl->entries[i].latency_or_bandwidth;
486 		val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
487 				     sslbis->data_type);
488 
489 		xa_for_each(&port->dports, index, dport) {
490 			if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
491 			    dsp_id == dport->port_id) {
492 				cxl_access_coordinate_set(dport->coord,
493 							  sslbis->data_type,
494 							  val);
495 			}
496 		}
497 	}
498 
499 	return 0;
500 }
501 
502 void cxl_switch_parse_cdat(struct cxl_port *port)
503 {
504 	int rc;
505 
506 	if (!port->cdat.table)
507 		return;
508 
509 	rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
510 			      port, port->cdat.table, port->cdat.length);
511 	rc = cdat_table_parse_output(rc);
512 	if (rc)
513 		dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
514 }
515 EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
516 
517 static void __cxl_coordinates_combine(struct access_coordinate *out,
518 				      struct access_coordinate *c1,
519 				      struct access_coordinate *c2)
520 {
521 		if (c1->write_bandwidth && c2->write_bandwidth)
522 			out->write_bandwidth = min(c1->write_bandwidth,
523 						   c2->write_bandwidth);
524 		out->write_latency = c1->write_latency + c2->write_latency;
525 
526 		if (c1->read_bandwidth && c2->read_bandwidth)
527 			out->read_bandwidth = min(c1->read_bandwidth,
528 						  c2->read_bandwidth);
529 		out->read_latency = c1->read_latency + c2->read_latency;
530 }
531 
532 /**
533  * cxl_coordinates_combine - Combine the two input coordinates
534  *
535  * @out: Output coordinate of c1 and c2 combined
536  * @c1: input coordinates
537  * @c2: input coordinates
538  */
539 void cxl_coordinates_combine(struct access_coordinate *out,
540 			     struct access_coordinate *c1,
541 			     struct access_coordinate *c2)
542 {
543 	for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
544 		__cxl_coordinates_combine(&out[i], &c1[i], &c2[i]);
545 }
546 
547 MODULE_IMPORT_NS(CXL);
548 
549 void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
550 				    struct cxl_endpoint_decoder *cxled)
551 {
552 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
553 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
554 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
555 	struct range dpa = {
556 			.start = cxled->dpa_res->start,
557 			.end = cxled->dpa_res->end,
558 	};
559 	struct cxl_dpa_perf *perf;
560 
561 	switch (cxlr->mode) {
562 	case CXL_DECODER_RAM:
563 		perf = &mds->ram_perf;
564 		break;
565 	case CXL_DECODER_PMEM:
566 		perf = &mds->pmem_perf;
567 		break;
568 	default:
569 		return;
570 	}
571 
572 	lockdep_assert_held(&cxl_dpa_rwsem);
573 
574 	if (!range_contains(&perf->dpa_range, &dpa))
575 		return;
576 
577 	for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
578 		/* Get total bandwidth and the worst latency for the cxl region */
579 		cxlr->coord[i].read_latency = max_t(unsigned int,
580 						    cxlr->coord[i].read_latency,
581 						    perf->coord[i].read_latency);
582 		cxlr->coord[i].write_latency = max_t(unsigned int,
583 						     cxlr->coord[i].write_latency,
584 						     perf->coord[i].write_latency);
585 		cxlr->coord[i].read_bandwidth += perf->coord[i].read_bandwidth;
586 		cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth;
587 	}
588 }
589 
590 int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
591 				       enum access_coordinate_class access)
592 {
593 	return hmat_update_target_coordinates(nid, &cxlr->coord[access], access);
594 }
595 
596 bool cxl_need_node_perf_attrs_update(int nid)
597 {
598 	return !acpi_node_backed_by_real_pxm(nid);
599 }
600