xref: /linux/drivers/cxl/core/cdat.c (revision 6c7353836a91b1479e6b81791cdc163fb04b4834)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2023 Intel Corporation. All rights reserved. */
3 #include <linux/acpi.h>
4 #include <linux/xarray.h>
5 #include <linux/fw_table.h>
6 #include <linux/node.h>
7 #include <linux/overflow.h>
8 #include "cxlpci.h"
9 #include "cxlmem.h"
10 #include "core.h"
11 #include "cxl.h"
12 
13 struct dsmas_entry {
14 	struct range dpa_range;
15 	u8 handle;
16 	struct access_coordinate coord;
17 
18 	int entries;
19 	int qos_class;
20 };
21 
22 static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
23 			      const unsigned long end)
24 {
25 	struct acpi_cdat_header *hdr = &header->cdat;
26 	struct acpi_cdat_dsmas *dsmas;
27 	int size = sizeof(*hdr) + sizeof(*dsmas);
28 	struct xarray *dsmas_xa = arg;
29 	struct dsmas_entry *dent;
30 	u16 len;
31 	int rc;
32 
33 	len = le16_to_cpu((__force __le16)hdr->length);
34 	if (len != size || (unsigned long)hdr + len > end) {
35 		pr_warn("Malformed DSMAS table length: (%u:%u)\n", size, len);
36 		return -EINVAL;
37 	}
38 
39 	/* Skip common header */
40 	dsmas = (struct acpi_cdat_dsmas *)(hdr + 1);
41 
42 	dent = kzalloc(sizeof(*dent), GFP_KERNEL);
43 	if (!dent)
44 		return -ENOMEM;
45 
46 	dent->handle = dsmas->dsmad_handle;
47 	dent->dpa_range.start = le64_to_cpu((__force __le64)dsmas->dpa_base_address);
48 	dent->dpa_range.end = le64_to_cpu((__force __le64)dsmas->dpa_base_address) +
49 			      le64_to_cpu((__force __le64)dsmas->dpa_length) - 1;
50 
51 	rc = xa_insert(dsmas_xa, dent->handle, dent, GFP_KERNEL);
52 	if (rc) {
53 		kfree(dent);
54 		return rc;
55 	}
56 
57 	return 0;
58 }
59 
60 static void cxl_access_coordinate_set(struct access_coordinate *coord,
61 				      int access, unsigned int val)
62 {
63 	switch (access) {
64 	case ACPI_HMAT_ACCESS_LATENCY:
65 		coord->read_latency = val;
66 		coord->write_latency = val;
67 		break;
68 	case ACPI_HMAT_READ_LATENCY:
69 		coord->read_latency = val;
70 		break;
71 	case ACPI_HMAT_WRITE_LATENCY:
72 		coord->write_latency = val;
73 		break;
74 	case ACPI_HMAT_ACCESS_BANDWIDTH:
75 		coord->read_bandwidth = val;
76 		coord->write_bandwidth = val;
77 		break;
78 	case ACPI_HMAT_READ_BANDWIDTH:
79 		coord->read_bandwidth = val;
80 		break;
81 	case ACPI_HMAT_WRITE_BANDWIDTH:
82 		coord->write_bandwidth = val;
83 		break;
84 	}
85 }
86 
87 static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
88 			       const unsigned long end)
89 {
90 	struct acpi_cdat_header *hdr = &header->cdat;
91 	struct acpi_cdat_dslbis *dslbis;
92 	int size = sizeof(*hdr) + sizeof(*dslbis);
93 	struct xarray *dsmas_xa = arg;
94 	struct dsmas_entry *dent;
95 	__le64 le_base;
96 	__le16 le_val;
97 	u64 val;
98 	u16 len;
99 	int rc;
100 
101 	len = le16_to_cpu((__force __le16)hdr->length);
102 	if (len != size || (unsigned long)hdr + len > end) {
103 		pr_warn("Malformed DSLBIS table length: (%u:%u)\n", size, len);
104 		return -EINVAL;
105 	}
106 
107 	/* Skip common header */
108 	dslbis = (struct acpi_cdat_dslbis *)(hdr + 1);
109 
110 	/* Skip unrecognized data type */
111 	if (dslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
112 		return 0;
113 
114 	/* Not a memory type, skip */
115 	if ((dslbis->flags & ACPI_HMAT_MEMORY_HIERARCHY) != ACPI_HMAT_MEMORY)
116 		return 0;
117 
118 	dent = xa_load(dsmas_xa, dslbis->handle);
119 	if (!dent) {
120 		pr_warn("No matching DSMAS entry for DSLBIS entry.\n");
121 		return 0;
122 	}
123 
124 	le_base = (__force __le64)dslbis->entry_base_unit;
125 	le_val = (__force __le16)dslbis->entry[0];
126 	rc = check_mul_overflow(le64_to_cpu(le_base),
127 				le16_to_cpu(le_val), &val);
128 	if (rc)
129 		pr_warn("DSLBIS value overflowed.\n");
130 
131 	cxl_access_coordinate_set(&dent->coord, dslbis->data_type, val);
132 
133 	return 0;
134 }
135 
136 static int cdat_table_parse_output(int rc)
137 {
138 	if (rc < 0)
139 		return rc;
140 	if (rc == 0)
141 		return -ENOENT;
142 
143 	return 0;
144 }
145 
146 static int cxl_cdat_endpoint_process(struct cxl_port *port,
147 				     struct xarray *dsmas_xa)
148 {
149 	int rc;
150 
151 	rc = cdat_table_parse(ACPI_CDAT_TYPE_DSMAS, cdat_dsmas_handler,
152 			      dsmas_xa, port->cdat.table);
153 	rc = cdat_table_parse_output(rc);
154 	if (rc)
155 		return rc;
156 
157 	rc = cdat_table_parse(ACPI_CDAT_TYPE_DSLBIS, cdat_dslbis_handler,
158 			      dsmas_xa, port->cdat.table);
159 	return cdat_table_parse_output(rc);
160 }
161 
162 static int cxl_port_perf_data_calculate(struct cxl_port *port,
163 					struct xarray *dsmas_xa)
164 {
165 	struct access_coordinate c;
166 	struct dsmas_entry *dent;
167 	int valid_entries = 0;
168 	unsigned long index;
169 	int rc;
170 
171 	rc = cxl_endpoint_get_perf_coordinates(port, &c);
172 	if (rc) {
173 		dev_dbg(&port->dev, "Failed to retrieve perf coordinates.\n");
174 		return rc;
175 	}
176 
177 	struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
178 
179 	if (!cxl_root)
180 		return -ENODEV;
181 
182 	if (!cxl_root->ops || !cxl_root->ops->qos_class)
183 		return -EOPNOTSUPP;
184 
185 	xa_for_each(dsmas_xa, index, dent) {
186 		int qos_class;
187 
188 		dent->coord.read_latency = dent->coord.read_latency +
189 					   c.read_latency;
190 		dent->coord.write_latency = dent->coord.write_latency +
191 					    c.write_latency;
192 		dent->coord.read_bandwidth = min_t(int, c.read_bandwidth,
193 						   dent->coord.read_bandwidth);
194 		dent->coord.write_bandwidth = min_t(int, c.write_bandwidth,
195 						    dent->coord.write_bandwidth);
196 
197 		dent->entries = 1;
198 		rc = cxl_root->ops->qos_class(cxl_root, &dent->coord, 1,
199 					      &qos_class);
200 		if (rc != 1)
201 			continue;
202 
203 		valid_entries++;
204 		dent->qos_class = qos_class;
205 	}
206 
207 	if (!valid_entries)
208 		return -ENOENT;
209 
210 	return 0;
211 }
212 
213 static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
214 			   struct list_head *list)
215 {
216 	struct cxl_dpa_perf *dpa_perf;
217 
218 	dpa_perf = kzalloc(sizeof(*dpa_perf), GFP_KERNEL);
219 	if (!dpa_perf)
220 		return;
221 
222 	dpa_perf->dpa_range = dent->dpa_range;
223 	dpa_perf->coord = dent->coord;
224 	dpa_perf->qos_class = dent->qos_class;
225 	list_add_tail(&dpa_perf->list, list);
226 	dev_dbg(dev,
227 		"DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
228 		dent->dpa_range.start, dpa_perf->qos_class,
229 		dent->coord.read_bandwidth, dent->coord.write_bandwidth,
230 		dent->coord.read_latency, dent->coord.write_latency);
231 }
232 
233 static void free_perf_ents(void *data)
234 {
235 	struct cxl_memdev_state *mds = data;
236 	struct cxl_dpa_perf *dpa_perf, *n;
237 	LIST_HEAD(discard);
238 
239 	list_splice_tail_init(&mds->ram_perf_list, &discard);
240 	list_splice_tail_init(&mds->pmem_perf_list, &discard);
241 	list_for_each_entry_safe(dpa_perf, n, &discard, list) {
242 		list_del(&dpa_perf->list);
243 		kfree(dpa_perf);
244 	}
245 }
246 
247 static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
248 				     struct xarray *dsmas_xa)
249 {
250 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
251 	struct device *dev = cxlds->dev;
252 	struct range pmem_range = {
253 		.start = cxlds->pmem_res.start,
254 		.end = cxlds->pmem_res.end,
255 	};
256 	struct range ram_range = {
257 		.start = cxlds->ram_res.start,
258 		.end = cxlds->ram_res.end,
259 	};
260 	struct dsmas_entry *dent;
261 	unsigned long index;
262 
263 	xa_for_each(dsmas_xa, index, dent) {
264 		if (resource_size(&cxlds->ram_res) &&
265 		    range_contains(&ram_range, &dent->dpa_range))
266 			add_perf_entry(dev, dent, &mds->ram_perf_list);
267 		else if (resource_size(&cxlds->pmem_res) &&
268 			 range_contains(&pmem_range, &dent->dpa_range))
269 			add_perf_entry(dev, dent, &mds->pmem_perf_list);
270 		else
271 			dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
272 				dent->dpa_range.start);
273 	}
274 
275 	devm_add_action_or_reset(&cxlds->cxlmd->dev, free_perf_ents, mds);
276 }
277 
278 static int match_cxlrd_qos_class(struct device *dev, void *data)
279 {
280 	int dev_qos_class = *(int *)data;
281 	struct cxl_root_decoder *cxlrd;
282 
283 	if (!is_root_decoder(dev))
284 		return 0;
285 
286 	cxlrd = to_cxl_root_decoder(dev);
287 	if (cxlrd->qos_class == CXL_QOS_CLASS_INVALID)
288 		return 0;
289 
290 	if (cxlrd->qos_class == dev_qos_class)
291 		return 1;
292 
293 	return 0;
294 }
295 
296 static void cxl_qos_match(struct cxl_port *root_port,
297 			  struct list_head *work_list,
298 			  struct list_head *discard_list)
299 {
300 	struct cxl_dpa_perf *dpa_perf, *n;
301 
302 	list_for_each_entry_safe(dpa_perf, n, work_list, list) {
303 		int rc;
304 
305 		if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
306 			return;
307 
308 		rc = device_for_each_child(&root_port->dev,
309 					   (void *)&dpa_perf->qos_class,
310 					   match_cxlrd_qos_class);
311 		if (!rc)
312 			list_move_tail(&dpa_perf->list, discard_list);
313 	}
314 }
315 
316 static int match_cxlrd_hb(struct device *dev, void *data)
317 {
318 	struct device *host_bridge = data;
319 	struct cxl_switch_decoder *cxlsd;
320 	struct cxl_root_decoder *cxlrd;
321 
322 	if (!is_root_decoder(dev))
323 		return 0;
324 
325 	cxlrd = to_cxl_root_decoder(dev);
326 	cxlsd = &cxlrd->cxlsd;
327 
328 	guard(rwsem_read)(&cxl_region_rwsem);
329 	for (int i = 0; i < cxlsd->nr_targets; i++) {
330 		if (host_bridge == cxlsd->target[i]->dport_dev)
331 			return 1;
332 	}
333 
334 	return 0;
335 }
336 
337 static void discard_dpa_perf(struct list_head *list)
338 {
339 	struct cxl_dpa_perf *dpa_perf, *n;
340 
341 	list_for_each_entry_safe(dpa_perf, n, list, list) {
342 		list_del(&dpa_perf->list);
343 		kfree(dpa_perf);
344 	}
345 }
346 DEFINE_FREE(dpa_perf, struct list_head *, if (!list_empty(_T)) discard_dpa_perf(_T))
347 
348 static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
349 {
350 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
351 	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
352 	LIST_HEAD(__discard);
353 	struct list_head *discard __free(dpa_perf) = &__discard;
354 	struct cxl_port *root_port;
355 	int rc;
356 
357 	struct cxl_root *cxl_root __free(put_cxl_root) =
358 		find_cxl_root(cxlmd->endpoint);
359 
360 	if (!cxl_root)
361 		return -ENODEV;
362 
363 	root_port = &cxl_root->port;
364 
365 	/* Check that the QTG IDs are all sane between end device and root decoders */
366 	cxl_qos_match(root_port, &mds->ram_perf_list, discard);
367 	cxl_qos_match(root_port, &mds->pmem_perf_list, discard);
368 
369 	/* Check to make sure that the device's host bridge is under a root decoder */
370 	rc = device_for_each_child(&root_port->dev,
371 				   (void *)cxlmd->endpoint->host_bridge,
372 				   match_cxlrd_hb);
373 	if (!rc) {
374 		list_splice_tail_init(&mds->ram_perf_list, discard);
375 		list_splice_tail_init(&mds->pmem_perf_list, discard);
376 	}
377 
378 	return rc;
379 }
380 
381 static void discard_dsmas(struct xarray *xa)
382 {
383 	unsigned long index;
384 	void *ent;
385 
386 	xa_for_each(xa, index, ent) {
387 		xa_erase(xa, index);
388 		kfree(ent);
389 	}
390 	xa_destroy(xa);
391 }
392 DEFINE_FREE(dsmas, struct xarray *, if (_T) discard_dsmas(_T))
393 
394 void cxl_endpoint_parse_cdat(struct cxl_port *port)
395 {
396 	struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
397 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
398 	struct xarray __dsmas_xa;
399 	struct xarray *dsmas_xa __free(dsmas) = &__dsmas_xa;
400 	int rc;
401 
402 	xa_init(&__dsmas_xa);
403 	if (!port->cdat.table)
404 		return;
405 
406 	rc = cxl_cdat_endpoint_process(port, dsmas_xa);
407 	if (rc < 0) {
408 		dev_dbg(&port->dev, "Failed to parse CDAT: %d\n", rc);
409 		return;
410 	}
411 
412 	rc = cxl_port_perf_data_calculate(port, dsmas_xa);
413 	if (rc) {
414 		dev_dbg(&port->dev, "Failed to do perf coord calculations.\n");
415 		return;
416 	}
417 
418 	cxl_memdev_set_qos_class(cxlds, dsmas_xa);
419 	cxl_qos_class_verify(cxlmd);
420 }
421 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
422 
423 static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
424 			       const unsigned long end)
425 {
426 	struct acpi_cdat_sslbis *sslbis;
427 	int size = sizeof(header->cdat) + sizeof(*sslbis);
428 	struct cxl_port *port = arg;
429 	struct device *dev = &port->dev;
430 	struct acpi_cdat_sslbe *entry;
431 	int remain, entries, i;
432 	u16 len;
433 
434 	len = le16_to_cpu((__force __le16)header->cdat.length);
435 	remain = len - size;
436 	if (!remain || remain % sizeof(*entry) ||
437 	    (unsigned long)header + len > end) {
438 		dev_warn(dev, "Malformed SSLBIS table length: (%u)\n", len);
439 		return -EINVAL;
440 	}
441 
442 	/* Skip common header */
443 	sslbis = (struct acpi_cdat_sslbis *)((unsigned long)header +
444 					     sizeof(header->cdat));
445 
446 	/* Unrecognized data type, we can skip */
447 	if (sslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
448 		return 0;
449 
450 	entries = remain / sizeof(*entry);
451 	entry = (struct acpi_cdat_sslbe *)((unsigned long)header + sizeof(*sslbis));
452 
453 	for (i = 0; i < entries; i++) {
454 		u16 x = le16_to_cpu((__force __le16)entry->portx_id);
455 		u16 y = le16_to_cpu((__force __le16)entry->porty_id);
456 		__le64 le_base;
457 		__le16 le_val;
458 		struct cxl_dport *dport;
459 		unsigned long index;
460 		u16 dsp_id;
461 		u64 val;
462 
463 		switch (x) {
464 		case ACPI_CDAT_SSLBIS_US_PORT:
465 			dsp_id = y;
466 			break;
467 		case ACPI_CDAT_SSLBIS_ANY_PORT:
468 			switch (y) {
469 			case ACPI_CDAT_SSLBIS_US_PORT:
470 				dsp_id = x;
471 				break;
472 			case ACPI_CDAT_SSLBIS_ANY_PORT:
473 				dsp_id = ACPI_CDAT_SSLBIS_ANY_PORT;
474 				break;
475 			default:
476 				dsp_id = y;
477 				break;
478 			}
479 			break;
480 		default:
481 			dsp_id = x;
482 			break;
483 		}
484 
485 		le_base = (__force __le64)sslbis->entry_base_unit;
486 		le_val = (__force __le16)entry->latency_or_bandwidth;
487 
488 		if (check_mul_overflow(le64_to_cpu(le_base),
489 				       le16_to_cpu(le_val), &val))
490 			dev_warn(dev, "SSLBIS value overflowed!\n");
491 
492 		xa_for_each(&port->dports, index, dport) {
493 			if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
494 			    dsp_id == dport->port_id)
495 				cxl_access_coordinate_set(&dport->sw_coord,
496 							  sslbis->data_type,
497 							  val);
498 		}
499 
500 		entry++;
501 	}
502 
503 	return 0;
504 }
505 
506 void cxl_switch_parse_cdat(struct cxl_port *port)
507 {
508 	int rc;
509 
510 	if (!port->cdat.table)
511 		return;
512 
513 	rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
514 			      port, port->cdat.table);
515 	rc = cdat_table_parse_output(rc);
516 	if (rc)
517 		dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
518 }
519 EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
520 
521 MODULE_IMPORT_NS(CXL);
522