xref: /linux/drivers/cxl/core/hdm.c (revision 01ecadbe09b6c685de413ada8ba6688e9467c4b3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/seq_file.h>
4 #include <linux/device.h>
5 #include <linux/delay.h>
6 
7 #include "cxlmem.h"
8 #include "core.h"
9 
10 /**
11  * DOC: cxl core hdm
12  *
13  * Compute Express Link Host Managed Device Memory, starting with the
14  * CXL 2.0 specification, is managed by an array of HDM Decoder register
15  * instances per CXL port and per CXL endpoint. Define common helpers
16  * for enumerating these registers and capabilities.
17  */
18 
19 DECLARE_RWSEM(cxl_dpa_rwsem);
20 
add_hdm_decoder(struct cxl_port * port,struct cxl_decoder * cxld,int * target_map)21 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
22 			   int *target_map)
23 {
24 	int rc;
25 
26 	rc = cxl_decoder_add_locked(cxld, target_map);
27 	if (rc) {
28 		put_device(&cxld->dev);
29 		dev_err(&port->dev, "Failed to add decoder\n");
30 		return rc;
31 	}
32 
33 	rc = cxl_decoder_autoremove(&port->dev, cxld);
34 	if (rc)
35 		return rc;
36 
37 	dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
38 
39 	return 0;
40 }
41 
42 /*
43  * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
44  * single ported host-bridges need not publish a decoder capability when a
45  * passthrough decode can be assumed, i.e. all transactions that the uport sees
46  * are claimed and passed to the single dport. Disable the range until the first
47  * CXL region is enumerated / activated.
48  */
devm_cxl_add_passthrough_decoder(struct cxl_port * port)49 int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
50 {
51 	struct cxl_switch_decoder *cxlsd;
52 	struct cxl_dport *dport = NULL;
53 	int single_port_map[1];
54 	unsigned long index;
55 	struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
56 
57 	/*
58 	 * Capability checks are moot for passthrough decoders, support
59 	 * any and all possibilities.
60 	 */
61 	cxlhdm->interleave_mask = ~0U;
62 	cxlhdm->iw_cap_mask = ~0UL;
63 
64 	cxlsd = cxl_switch_decoder_alloc(port, 1);
65 	if (IS_ERR(cxlsd))
66 		return PTR_ERR(cxlsd);
67 
68 	device_lock_assert(&port->dev);
69 
70 	xa_for_each(&port->dports, index, dport)
71 		break;
72 	single_port_map[0] = dport->port_id;
73 
74 	return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
75 }
76 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, "CXL");
77 
parse_hdm_decoder_caps(struct cxl_hdm * cxlhdm)78 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
79 {
80 	u32 hdm_cap;
81 
82 	hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
83 	cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
84 	cxlhdm->target_count =
85 		FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
86 	if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
87 		cxlhdm->interleave_mask |= GENMASK(11, 8);
88 	if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
89 		cxlhdm->interleave_mask |= GENMASK(14, 12);
90 	cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8);
91 	if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap))
92 		cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12);
93 	if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap))
94 		cxlhdm->iw_cap_mask |= BIT(16);
95 }
96 
should_emulate_decoders(struct cxl_endpoint_dvsec_info * info)97 static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
98 {
99 	struct cxl_hdm *cxlhdm;
100 	void __iomem *hdm;
101 	u32 ctrl;
102 	int i;
103 
104 	if (!info)
105 		return false;
106 
107 	cxlhdm = dev_get_drvdata(&info->port->dev);
108 	hdm = cxlhdm->regs.hdm_decoder;
109 
110 	if (!hdm)
111 		return true;
112 
113 	/*
114 	 * If HDM decoders are present and the driver is in control of
115 	 * Mem_Enable skip DVSEC based emulation
116 	 */
117 	if (!info->mem_enabled)
118 		return false;
119 
120 	/*
121 	 * If any decoders are committed already, there should not be any
122 	 * emulated DVSEC decoders.
123 	 */
124 	for (i = 0; i < cxlhdm->decoder_count; i++) {
125 		ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
126 		dev_dbg(&info->port->dev,
127 			"decoder%d.%d: committed: %ld base: %#x_%.8x size: %#x_%.8x\n",
128 			info->port->id, i,
129 			FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl),
130 			readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i)),
131 			readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(i)),
132 			readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i)),
133 			readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i)));
134 		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
135 			return false;
136 	}
137 
138 	return true;
139 }
140 
141 /**
142  * devm_cxl_setup_hdm - map HDM decoder component registers
143  * @port: cxl_port to map
144  * @info: cached DVSEC range register info
145  */
devm_cxl_setup_hdm(struct cxl_port * port,struct cxl_endpoint_dvsec_info * info)146 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
147 				   struct cxl_endpoint_dvsec_info *info)
148 {
149 	struct cxl_register_map *reg_map = &port->reg_map;
150 	struct device *dev = &port->dev;
151 	struct cxl_hdm *cxlhdm;
152 	int rc;
153 
154 	cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
155 	if (!cxlhdm)
156 		return ERR_PTR(-ENOMEM);
157 	cxlhdm->port = port;
158 	dev_set_drvdata(dev, cxlhdm);
159 
160 	/* Memory devices can configure device HDM using DVSEC range regs. */
161 	if (reg_map->resource == CXL_RESOURCE_NONE) {
162 		if (!info || !info->mem_enabled) {
163 			dev_err(dev, "No component registers mapped\n");
164 			return ERR_PTR(-ENXIO);
165 		}
166 
167 		cxlhdm->decoder_count = info->ranges;
168 		return cxlhdm;
169 	}
170 
171 	if (!reg_map->component_map.hdm_decoder.valid) {
172 		dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
173 		/* unique error code to indicate no HDM decoder capability */
174 		return ERR_PTR(-ENODEV);
175 	}
176 
177 	rc = cxl_map_component_regs(reg_map, &cxlhdm->regs,
178 				    BIT(CXL_CM_CAP_CAP_ID_HDM));
179 	if (rc) {
180 		dev_err(dev, "Failed to map HDM capability.\n");
181 		return ERR_PTR(rc);
182 	}
183 
184 	parse_hdm_decoder_caps(cxlhdm);
185 	if (cxlhdm->decoder_count == 0) {
186 		dev_err(dev, "Spec violation. Caps invalid\n");
187 		return ERR_PTR(-ENXIO);
188 	}
189 
190 	/*
191 	 * Now that the hdm capability is parsed, decide if range
192 	 * register emulation is needed and fixup cxlhdm accordingly.
193 	 */
194 	if (should_emulate_decoders(info)) {
195 		dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
196 			info->ranges > 1 ? "s" : "");
197 		cxlhdm->decoder_count = info->ranges;
198 	}
199 
200 	return cxlhdm;
201 }
202 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, "CXL");
203 
__cxl_dpa_debug(struct seq_file * file,struct resource * r,int depth)204 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
205 {
206 	unsigned long long start = r->start, end = r->end;
207 
208 	seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
209 		   r->name);
210 }
211 
cxl_dpa_debug(struct seq_file * file,struct cxl_dev_state * cxlds)212 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
213 {
214 	struct resource *p1, *p2;
215 
216 	guard(rwsem_read)(&cxl_dpa_rwsem);
217 	for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
218 		__cxl_dpa_debug(file, p1, 0);
219 		for (p2 = p1->child; p2; p2 = p2->sibling)
220 			__cxl_dpa_debug(file, p2, 1);
221 	}
222 }
223 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, "CXL");
224 
225 /* See request_skip() kernel-doc */
__adjust_skip(struct cxl_dev_state * cxlds,const resource_size_t skip_base,const resource_size_t skip_len,const char * requester)226 static resource_size_t __adjust_skip(struct cxl_dev_state *cxlds,
227 				     const resource_size_t skip_base,
228 				     const resource_size_t skip_len,
229 				     const char *requester)
230 {
231 	const resource_size_t skip_end = skip_base + skip_len - 1;
232 
233 	for (int i = 0; i < cxlds->nr_partitions; i++) {
234 		const struct resource *part_res = &cxlds->part[i].res;
235 		resource_size_t adjust_start, adjust_end, size;
236 
237 		adjust_start = max(skip_base, part_res->start);
238 		adjust_end = min(skip_end, part_res->end);
239 
240 		if (adjust_end < adjust_start)
241 			continue;
242 
243 		size = adjust_end - adjust_start + 1;
244 
245 		if (!requester)
246 			__release_region(&cxlds->dpa_res, adjust_start, size);
247 		else if (!__request_region(&cxlds->dpa_res, adjust_start, size,
248 					   requester, 0))
249 			return adjust_start - skip_base;
250 	}
251 
252 	return skip_len;
253 }
254 #define release_skip(c, b, l) __adjust_skip((c), (b), (l), NULL)
255 
256 /*
257  * Must be called in a context that synchronizes against this decoder's
258  * port ->remove() callback (like an endpoint decoder sysfs attribute)
259  */
__cxl_dpa_release(struct cxl_endpoint_decoder * cxled)260 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
261 {
262 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
263 	struct cxl_port *port = cxled_to_port(cxled);
264 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
265 	struct resource *res = cxled->dpa_res;
266 	resource_size_t skip_start;
267 
268 	lockdep_assert_held_write(&cxl_dpa_rwsem);
269 
270 	/* save @skip_start, before @res is released */
271 	skip_start = res->start - cxled->skip;
272 	__release_region(&cxlds->dpa_res, res->start, resource_size(res));
273 	if (cxled->skip)
274 		release_skip(cxlds, skip_start, cxled->skip);
275 	cxled->skip = 0;
276 	cxled->dpa_res = NULL;
277 	put_device(&cxled->cxld.dev);
278 	port->hdm_end--;
279 }
280 
cxl_dpa_release(void * cxled)281 static void cxl_dpa_release(void *cxled)
282 {
283 	guard(rwsem_write)(&cxl_dpa_rwsem);
284 	__cxl_dpa_release(cxled);
285 }
286 
287 /*
288  * Must be called from context that will not race port device
289  * unregistration, like decoder sysfs attribute methods
290  */
devm_cxl_dpa_release(struct cxl_endpoint_decoder * cxled)291 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
292 {
293 	struct cxl_port *port = cxled_to_port(cxled);
294 
295 	lockdep_assert_held_write(&cxl_dpa_rwsem);
296 	devm_remove_action(&port->dev, cxl_dpa_release, cxled);
297 	__cxl_dpa_release(cxled);
298 }
299 
300 /**
301  * request_skip() - Track DPA 'skip' in @cxlds->dpa_res resource tree
302  * @cxlds: CXL.mem device context that parents @cxled
303  * @cxled: Endpoint decoder establishing new allocation that skips lower DPA
304  * @skip_base: DPA < start of new DPA allocation (DPAnew)
305  * @skip_len: @skip_base + @skip_len == DPAnew
306  *
307  * DPA 'skip' arises from out-of-sequence DPA allocation events relative
308  * to free capacity across multiple partitions. It is a wasteful event
309  * as usable DPA gets thrown away, but if a deployment has, for example,
310  * a dual RAM+PMEM device, wants to use PMEM, and has unallocated RAM
311  * DPA, the free RAM DPA must be sacrificed to start allocating PMEM.
312  * See third "Implementation Note" in CXL 3.1 8.2.4.19.13 "Decoder
313  * Protection" for more details.
314  *
315  * A 'skip' always covers the last allocated DPA in a previous partition
316  * to the start of the current partition to allocate.  Allocations never
317  * start in the middle of a partition, and allocations are always
318  * de-allocated in reverse order (see cxl_dpa_free(), or natural devm
319  * unwind order from forced in-order allocation).
320  *
321  * If @cxlds->nr_partitions was guaranteed to be <= 2 then the 'skip'
322  * would always be contained to a single partition. Given
323  * @cxlds->nr_partitions may be > 2 it results in cases where the 'skip'
324  * might span "tail capacity of partition[0], all of partition[1], ...,
325  * all of partition[N-1]" to support allocating from partition[N]. That
326  * in turn interacts with the partition 'struct resource' boundaries
327  * within @cxlds->dpa_res whereby 'skip' requests need to be divided by
328  * partition. I.e. this is a quirk of using a 'struct resource' tree to
329  * detect range conflicts while also tracking partition boundaries in
330  * @cxlds->dpa_res.
331  */
request_skip(struct cxl_dev_state * cxlds,struct cxl_endpoint_decoder * cxled,const resource_size_t skip_base,const resource_size_t skip_len)332 static int request_skip(struct cxl_dev_state *cxlds,
333 			struct cxl_endpoint_decoder *cxled,
334 			const resource_size_t skip_base,
335 			const resource_size_t skip_len)
336 {
337 	resource_size_t skipped = __adjust_skip(cxlds, skip_base, skip_len,
338 						dev_name(&cxled->cxld.dev));
339 
340 	if (skipped == skip_len)
341 		return 0;
342 
343 	dev_dbg(cxlds->dev,
344 		"%s: failed to reserve skipped space (%pa %pa %pa)\n",
345 		dev_name(&cxled->cxld.dev), &skip_base, &skip_len, &skipped);
346 
347 	release_skip(cxlds, skip_base, skipped);
348 
349 	return -EBUSY;
350 }
351 
__cxl_dpa_reserve(struct cxl_endpoint_decoder * cxled,resource_size_t base,resource_size_t len,resource_size_t skipped)352 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
353 			     resource_size_t base, resource_size_t len,
354 			     resource_size_t skipped)
355 {
356 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
357 	struct cxl_port *port = cxled_to_port(cxled);
358 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
359 	struct device *dev = &port->dev;
360 	struct resource *res;
361 	int rc;
362 
363 	lockdep_assert_held_write(&cxl_dpa_rwsem);
364 
365 	if (!len) {
366 		dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
367 			 port->id, cxled->cxld.id);
368 		return -EINVAL;
369 	}
370 
371 	if (cxled->dpa_res) {
372 		dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
373 			port->id, cxled->cxld.id, cxled->dpa_res);
374 		return -EBUSY;
375 	}
376 
377 	if (port->hdm_end + 1 != cxled->cxld.id) {
378 		/*
379 		 * Assumes alloc and commit order is always in hardware instance
380 		 * order per expectations from 8.2.5.12.20 Committing Decoder
381 		 * Programming that enforce decoder[m] committed before
382 		 * decoder[m+1] commit start.
383 		 */
384 		dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
385 			cxled->cxld.id, port->id, port->hdm_end + 1);
386 		return -EBUSY;
387 	}
388 
389 	if (skipped) {
390 		rc = request_skip(cxlds, cxled, base - skipped, skipped);
391 		if (rc)
392 			return rc;
393 	}
394 	res = __request_region(&cxlds->dpa_res, base, len,
395 			       dev_name(&cxled->cxld.dev), 0);
396 	if (!res) {
397 		dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
398 			port->id, cxled->cxld.id);
399 		if (skipped)
400 			release_skip(cxlds, base - skipped, skipped);
401 		return -EBUSY;
402 	}
403 	cxled->dpa_res = res;
404 	cxled->skip = skipped;
405 
406 	/*
407 	 * When allocating new capacity, ->part is already set, when
408 	 * discovering decoder settings at initial enumeration, ->part
409 	 * is not set.
410 	 */
411 	if (cxled->part < 0)
412 		for (int i = 0; cxlds->nr_partitions; i++)
413 			if (resource_contains(&cxlds->part[i].res, res)) {
414 				cxled->part = i;
415 				break;
416 			}
417 
418 	if (cxled->part < 0)
419 		dev_warn(dev, "decoder%d.%d: %pr does not map any partition\n",
420 			 port->id, cxled->cxld.id, res);
421 
422 	port->hdm_end++;
423 	get_device(&cxled->cxld.dev);
424 	return 0;
425 }
426 
add_dpa_res(struct device * dev,struct resource * parent,struct resource * res,resource_size_t start,resource_size_t size,const char * type)427 static int add_dpa_res(struct device *dev, struct resource *parent,
428 		       struct resource *res, resource_size_t start,
429 		       resource_size_t size, const char *type)
430 {
431 	int rc;
432 
433 	*res = (struct resource) {
434 		.name = type,
435 		.start = start,
436 		.end =  start + size - 1,
437 		.flags = IORESOURCE_MEM,
438 	};
439 	if (resource_size(res) == 0) {
440 		dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
441 		return 0;
442 	}
443 	rc = request_resource(parent, res);
444 	if (rc) {
445 		dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
446 			res, rc);
447 		return rc;
448 	}
449 
450 	dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
451 
452 	return 0;
453 }
454 
cxl_mode_name(enum cxl_partition_mode mode)455 static const char *cxl_mode_name(enum cxl_partition_mode mode)
456 {
457 	switch (mode) {
458 	case CXL_PARTMODE_RAM:
459 		return "ram";
460 	case CXL_PARTMODE_PMEM:
461 		return "pmem";
462 	default:
463 		return "";
464 	};
465 }
466 
467 /* if this fails the caller must destroy @cxlds, there is no recovery */
cxl_dpa_setup(struct cxl_dev_state * cxlds,const struct cxl_dpa_info * info)468 int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info)
469 {
470 	struct device *dev = cxlds->dev;
471 
472 	guard(rwsem_write)(&cxl_dpa_rwsem);
473 
474 	if (cxlds->nr_partitions)
475 		return -EBUSY;
476 
477 	if (!info->size || !info->nr_partitions) {
478 		cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
479 		cxlds->nr_partitions = 0;
480 		return 0;
481 	}
482 
483 	cxlds->dpa_res = DEFINE_RES_MEM(0, info->size);
484 
485 	for (int i = 0; i < info->nr_partitions; i++) {
486 		const struct cxl_dpa_part_info *part = &info->part[i];
487 		int rc;
488 
489 		cxlds->part[i].perf.qos_class = CXL_QOS_CLASS_INVALID;
490 		cxlds->part[i].mode = part->mode;
491 
492 		/* Require ordered + contiguous partitions */
493 		if (i) {
494 			const struct cxl_dpa_part_info *prev = &info->part[i - 1];
495 
496 			if (prev->range.end + 1 != part->range.start)
497 				return -EINVAL;
498 		}
499 		rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->part[i].res,
500 				 part->range.start, range_len(&part->range),
501 				 cxl_mode_name(part->mode));
502 		if (rc)
503 			return rc;
504 		cxlds->nr_partitions++;
505 	}
506 
507 	return 0;
508 }
509 EXPORT_SYMBOL_GPL(cxl_dpa_setup);
510 
devm_cxl_dpa_reserve(struct cxl_endpoint_decoder * cxled,resource_size_t base,resource_size_t len,resource_size_t skipped)511 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
512 				resource_size_t base, resource_size_t len,
513 				resource_size_t skipped)
514 {
515 	struct cxl_port *port = cxled_to_port(cxled);
516 	int rc;
517 
518 	down_write(&cxl_dpa_rwsem);
519 	rc = __cxl_dpa_reserve(cxled, base, len, skipped);
520 	up_write(&cxl_dpa_rwsem);
521 
522 	if (rc)
523 		return rc;
524 
525 	return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
526 }
527 EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, "CXL");
528 
cxl_dpa_size(struct cxl_endpoint_decoder * cxled)529 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
530 {
531 	guard(rwsem_read)(&cxl_dpa_rwsem);
532 	if (cxled->dpa_res)
533 		return resource_size(cxled->dpa_res);
534 
535 	return 0;
536 }
537 
cxl_dpa_resource_start(struct cxl_endpoint_decoder * cxled)538 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
539 {
540 	resource_size_t base = -1;
541 
542 	lockdep_assert_held(&cxl_dpa_rwsem);
543 	if (cxled->dpa_res)
544 		base = cxled->dpa_res->start;
545 
546 	return base;
547 }
548 
cxl_dpa_free(struct cxl_endpoint_decoder * cxled)549 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
550 {
551 	struct cxl_port *port = cxled_to_port(cxled);
552 	struct device *dev = &cxled->cxld.dev;
553 
554 	guard(rwsem_write)(&cxl_dpa_rwsem);
555 	if (!cxled->dpa_res)
556 		return 0;
557 	if (cxled->cxld.region) {
558 		dev_dbg(dev, "decoder assigned to: %s\n",
559 			dev_name(&cxled->cxld.region->dev));
560 		return -EBUSY;
561 	}
562 	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
563 		dev_dbg(dev, "decoder enabled\n");
564 		return -EBUSY;
565 	}
566 	if (cxled->cxld.id != port->hdm_end) {
567 		dev_dbg(dev, "expected decoder%d.%d\n", port->id,
568 			port->hdm_end);
569 		return -EBUSY;
570 	}
571 
572 	devm_cxl_dpa_release(cxled);
573 	return 0;
574 }
575 
cxl_dpa_set_part(struct cxl_endpoint_decoder * cxled,enum cxl_partition_mode mode)576 int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
577 		     enum cxl_partition_mode mode)
578 {
579 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
580 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
581 	struct device *dev = &cxled->cxld.dev;
582 	int part;
583 
584 	guard(rwsem_write)(&cxl_dpa_rwsem);
585 	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)
586 		return -EBUSY;
587 
588 	for (part = 0; part < cxlds->nr_partitions; part++)
589 		if (cxlds->part[part].mode == mode)
590 			break;
591 
592 	if (part >= cxlds->nr_partitions) {
593 		dev_dbg(dev, "unsupported mode: %d\n", mode);
594 		return -EINVAL;
595 	}
596 
597 	if (!resource_size(&cxlds->part[part].res)) {
598 		dev_dbg(dev, "no available capacity for mode: %d\n", mode);
599 		return -ENXIO;
600 	}
601 
602 	cxled->part = part;
603 	return 0;
604 }
605 
__cxl_dpa_alloc(struct cxl_endpoint_decoder * cxled,unsigned long long size)606 static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
607 {
608 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
609 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
610 	struct device *dev = &cxled->cxld.dev;
611 	struct resource *res, *prev = NULL;
612 	resource_size_t start, avail, skip, skip_start;
613 	struct resource *p, *last;
614 	int part;
615 
616 	guard(rwsem_write)(&cxl_dpa_rwsem);
617 	if (cxled->cxld.region) {
618 		dev_dbg(dev, "decoder attached to %s\n",
619 			dev_name(&cxled->cxld.region->dev));
620 		return -EBUSY;
621 	}
622 
623 	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
624 		dev_dbg(dev, "decoder enabled\n");
625 		return -EBUSY;
626 	}
627 
628 	part = cxled->part;
629 	if (part < 0) {
630 		dev_dbg(dev, "partition not set\n");
631 		return -EBUSY;
632 	}
633 
634 	res = &cxlds->part[part].res;
635 	for (p = res->child, last = NULL; p; p = p->sibling)
636 		last = p;
637 	if (last)
638 		start = last->end + 1;
639 	else
640 		start = res->start;
641 
642 	/*
643 	 * To allocate at partition N, a skip needs to be calculated for all
644 	 * unallocated space at lower partitions indices.
645 	 *
646 	 * If a partition has any allocations, the search can end because a
647 	 * previous cxl_dpa_alloc() invocation is assumed to have accounted for
648 	 * all previous partitions.
649 	 */
650 	skip_start = CXL_RESOURCE_NONE;
651 	for (int i = part; i; i--) {
652 		prev = &cxlds->part[i - 1].res;
653 		for (p = prev->child, last = NULL; p; p = p->sibling)
654 			last = p;
655 		if (last) {
656 			skip_start = last->end + 1;
657 			break;
658 		}
659 		skip_start = prev->start;
660 	}
661 
662 	avail = res->end - start + 1;
663 	if (skip_start == CXL_RESOURCE_NONE)
664 		skip = 0;
665 	else
666 		skip = res->start - skip_start;
667 
668 	if (size > avail) {
669 		dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
670 			res->name, &avail);
671 		return -ENOSPC;
672 	}
673 
674 	return __cxl_dpa_reserve(cxled, start, size, skip);
675 }
676 
cxl_dpa_alloc(struct cxl_endpoint_decoder * cxled,unsigned long long size)677 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
678 {
679 	struct cxl_port *port = cxled_to_port(cxled);
680 	int rc;
681 
682 	rc = __cxl_dpa_alloc(cxled, size);
683 	if (rc)
684 		return rc;
685 
686 	return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
687 }
688 
cxld_set_interleave(struct cxl_decoder * cxld,u32 * ctrl)689 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
690 {
691 	u16 eig;
692 	u8 eiw;
693 
694 	/*
695 	 * Input validation ensures these warns never fire, but otherwise
696 	 * suppress unititalized variable usage warnings.
697 	 */
698 	if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw),
699 		      "invalid interleave_ways: %d\n", cxld->interleave_ways))
700 		return;
701 	if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig),
702 		      "invalid interleave_granularity: %d\n",
703 		      cxld->interleave_granularity))
704 		return;
705 
706 	u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
707 	u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
708 	*ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
709 }
710 
cxld_set_type(struct cxl_decoder * cxld,u32 * ctrl)711 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
712 {
713 	u32p_replace_bits(ctrl,
714 			  !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM),
715 			  CXL_HDM_DECODER0_CTRL_HOSTONLY);
716 }
717 
cxlsd_set_targets(struct cxl_switch_decoder * cxlsd,u64 * tgt)718 static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
719 {
720 	struct cxl_dport **t = &cxlsd->target[0];
721 	int ways = cxlsd->cxld.interleave_ways;
722 
723 	*tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
724 	if (ways > 1)
725 		*tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
726 	if (ways > 2)
727 		*tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
728 	if (ways > 3)
729 		*tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
730 	if (ways > 4)
731 		*tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
732 	if (ways > 5)
733 		*tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
734 	if (ways > 6)
735 		*tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
736 	if (ways > 7)
737 		*tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
738 }
739 
740 /*
741  * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
742  * committed or error within 10ms, but just be generous with 20ms to account for
743  * clock skew and other marginal behavior
744  */
745 #define COMMIT_TIMEOUT_MS 20
cxld_await_commit(void __iomem * hdm,int id)746 static int cxld_await_commit(void __iomem *hdm, int id)
747 {
748 	u32 ctrl;
749 	int i;
750 
751 	for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
752 		ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
753 		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
754 			ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
755 			writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
756 			return -EIO;
757 		}
758 		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
759 			return 0;
760 		fsleep(1000);
761 	}
762 
763 	return -ETIMEDOUT;
764 }
765 
cxl_decoder_commit(struct cxl_decoder * cxld)766 static int cxl_decoder_commit(struct cxl_decoder *cxld)
767 {
768 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
769 	struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
770 	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
771 	int id = cxld->id, rc;
772 	u64 base, size;
773 	u32 ctrl;
774 
775 	if (cxld->flags & CXL_DECODER_F_ENABLE)
776 		return 0;
777 
778 	if (cxl_num_decoders_committed(port) != id) {
779 		dev_dbg(&port->dev,
780 			"%s: out of order commit, expected decoder%d.%d\n",
781 			dev_name(&cxld->dev), port->id,
782 			cxl_num_decoders_committed(port));
783 		return -EBUSY;
784 	}
785 
786 	/*
787 	 * For endpoint decoders hosted on CXL memory devices that
788 	 * support the sanitize operation, make sure sanitize is not in-flight.
789 	 */
790 	if (is_endpoint_decoder(&cxld->dev)) {
791 		struct cxl_endpoint_decoder *cxled =
792 			to_cxl_endpoint_decoder(&cxld->dev);
793 		struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
794 		struct cxl_memdev_state *mds =
795 			to_cxl_memdev_state(cxlmd->cxlds);
796 
797 		if (mds && mds->security.sanitize_active) {
798 			dev_dbg(&cxlmd->dev,
799 				"attempted to commit %s during sanitize\n",
800 				dev_name(&cxld->dev));
801 			return -EBUSY;
802 		}
803 	}
804 
805 	down_read(&cxl_dpa_rwsem);
806 	/* common decoder settings */
807 	ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
808 	cxld_set_interleave(cxld, &ctrl);
809 	cxld_set_type(cxld, &ctrl);
810 	base = cxld->hpa_range.start;
811 	size = range_len(&cxld->hpa_range);
812 
813 	writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
814 	writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
815 	writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
816 	writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
817 
818 	if (is_switch_decoder(&cxld->dev)) {
819 		struct cxl_switch_decoder *cxlsd =
820 			to_cxl_switch_decoder(&cxld->dev);
821 		void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
822 		void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
823 		u64 targets;
824 
825 		cxlsd_set_targets(cxlsd, &targets);
826 		writel(upper_32_bits(targets), tl_hi);
827 		writel(lower_32_bits(targets), tl_lo);
828 	} else {
829 		struct cxl_endpoint_decoder *cxled =
830 			to_cxl_endpoint_decoder(&cxld->dev);
831 		void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
832 		void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
833 
834 		writel(upper_32_bits(cxled->skip), sk_hi);
835 		writel(lower_32_bits(cxled->skip), sk_lo);
836 	}
837 
838 	writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
839 	up_read(&cxl_dpa_rwsem);
840 
841 	port->commit_end++;
842 	rc = cxld_await_commit(hdm, cxld->id);
843 	if (rc) {
844 		dev_dbg(&port->dev, "%s: error %d committing decoder\n",
845 			dev_name(&cxld->dev), rc);
846 		cxld->reset(cxld);
847 		return rc;
848 	}
849 	cxld->flags |= CXL_DECODER_F_ENABLE;
850 
851 	return 0;
852 }
853 
commit_reap(struct device * dev,void * data)854 static int commit_reap(struct device *dev, void *data)
855 {
856 	struct cxl_port *port = to_cxl_port(dev->parent);
857 	struct cxl_decoder *cxld;
858 
859 	if (!is_switch_decoder(dev) && !is_endpoint_decoder(dev))
860 		return 0;
861 
862 	cxld = to_cxl_decoder(dev);
863 	if (port->commit_end == cxld->id &&
864 	    ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
865 		port->commit_end--;
866 		dev_dbg(&port->dev, "reap: %s commit_end: %d\n",
867 			dev_name(&cxld->dev), port->commit_end);
868 	}
869 
870 	return 0;
871 }
872 
cxl_port_commit_reap(struct cxl_decoder * cxld)873 void cxl_port_commit_reap(struct cxl_decoder *cxld)
874 {
875 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
876 
877 	lockdep_assert_held_write(&cxl_region_rwsem);
878 
879 	/*
880 	 * Once the highest committed decoder is disabled, free any other
881 	 * decoders that were pinned allocated by out-of-order release.
882 	 */
883 	port->commit_end--;
884 	dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev),
885 		port->commit_end);
886 	device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL,
887 					   commit_reap);
888 }
889 EXPORT_SYMBOL_NS_GPL(cxl_port_commit_reap, "CXL");
890 
cxl_decoder_reset(struct cxl_decoder * cxld)891 static void cxl_decoder_reset(struct cxl_decoder *cxld)
892 {
893 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
894 	struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
895 	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
896 	int id = cxld->id;
897 	u32 ctrl;
898 
899 	if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
900 		return;
901 
902 	if (port->commit_end == id)
903 		cxl_port_commit_reap(cxld);
904 	else
905 		dev_dbg(&port->dev,
906 			"%s: out of order reset, expected decoder%d.%d\n",
907 			dev_name(&cxld->dev), port->id, port->commit_end);
908 
909 	down_read(&cxl_dpa_rwsem);
910 	ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
911 	ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
912 	writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
913 
914 	writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
915 	writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
916 	writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
917 	writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
918 	up_read(&cxl_dpa_rwsem);
919 
920 	cxld->flags &= ~CXL_DECODER_F_ENABLE;
921 
922 	/* Userspace is now responsible for reconfiguring this decoder */
923 	if (is_endpoint_decoder(&cxld->dev)) {
924 		struct cxl_endpoint_decoder *cxled;
925 
926 		cxled = to_cxl_endpoint_decoder(&cxld->dev);
927 		cxled->state = CXL_DECODER_STATE_MANUAL;
928 	}
929 }
930 
cxl_setup_hdm_decoder_from_dvsec(struct cxl_port * port,struct cxl_decoder * cxld,u64 * dpa_base,int which,struct cxl_endpoint_dvsec_info * info)931 static int cxl_setup_hdm_decoder_from_dvsec(
932 	struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
933 	int which, struct cxl_endpoint_dvsec_info *info)
934 {
935 	struct cxl_endpoint_decoder *cxled;
936 	u64 len;
937 	int rc;
938 
939 	if (!is_cxl_endpoint(port))
940 		return -EOPNOTSUPP;
941 
942 	cxled = to_cxl_endpoint_decoder(&cxld->dev);
943 	len = range_len(&info->dvsec_range[which]);
944 	if (!len)
945 		return -ENOENT;
946 
947 	cxld->target_type = CXL_DECODER_HOSTONLYMEM;
948 	cxld->commit = NULL;
949 	cxld->reset = NULL;
950 	cxld->hpa_range = info->dvsec_range[which];
951 
952 	/*
953 	 * Set the emulated decoder as locked pending additional support to
954 	 * change the range registers at run time.
955 	 */
956 	cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
957 	port->commit_end = cxld->id;
958 
959 	rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
960 	if (rc) {
961 		dev_err(&port->dev,
962 			"decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
963 			port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
964 		return rc;
965 	}
966 	*dpa_base += len;
967 	cxled->state = CXL_DECODER_STATE_AUTO;
968 
969 	return 0;
970 }
971 
init_hdm_decoder(struct cxl_port * port,struct cxl_decoder * cxld,int * target_map,void __iomem * hdm,int which,u64 * dpa_base,struct cxl_endpoint_dvsec_info * info)972 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
973 			    int *target_map, void __iomem *hdm, int which,
974 			    u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
975 {
976 	struct cxl_endpoint_decoder *cxled = NULL;
977 	u64 size, base, skip, dpa_size, lo, hi;
978 	bool committed;
979 	u32 remainder;
980 	int i, rc;
981 	u32 ctrl;
982 	union {
983 		u64 value;
984 		unsigned char target_id[8];
985 	} target_list;
986 
987 	if (should_emulate_decoders(info))
988 		return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
989 							which, info);
990 
991 	ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
992 	lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
993 	hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which));
994 	base = (hi << 32) + lo;
995 	lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
996 	hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which));
997 	size = (hi << 32) + lo;
998 	committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
999 	cxld->commit = cxl_decoder_commit;
1000 	cxld->reset = cxl_decoder_reset;
1001 
1002 	if (!committed)
1003 		size = 0;
1004 	if (base == U64_MAX || size == U64_MAX) {
1005 		dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
1006 			 port->id, cxld->id);
1007 		return -ENXIO;
1008 	}
1009 
1010 	if (info)
1011 		cxled = to_cxl_endpoint_decoder(&cxld->dev);
1012 	cxld->hpa_range = (struct range) {
1013 		.start = base,
1014 		.end = base + size - 1,
1015 	};
1016 
1017 	/* decoders are enabled if committed */
1018 	if (committed) {
1019 		cxld->flags |= CXL_DECODER_F_ENABLE;
1020 		if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
1021 			cxld->flags |= CXL_DECODER_F_LOCK;
1022 		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl))
1023 			cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1024 		else
1025 			cxld->target_type = CXL_DECODER_DEVMEM;
1026 
1027 		guard(rwsem_write)(&cxl_region_rwsem);
1028 		if (cxld->id != cxl_num_decoders_committed(port)) {
1029 			dev_warn(&port->dev,
1030 				 "decoder%d.%d: Committed out of order\n",
1031 				 port->id, cxld->id);
1032 			return -ENXIO;
1033 		}
1034 
1035 		if (size == 0) {
1036 			dev_warn(&port->dev,
1037 				 "decoder%d.%d: Committed with zero size\n",
1038 				 port->id, cxld->id);
1039 			return -ENXIO;
1040 		}
1041 		port->commit_end = cxld->id;
1042 	} else {
1043 		if (cxled) {
1044 			struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1045 			struct cxl_dev_state *cxlds = cxlmd->cxlds;
1046 
1047 			/*
1048 			 * Default by devtype until a device arrives that needs
1049 			 * more precision.
1050 			 */
1051 			if (cxlds->type == CXL_DEVTYPE_CLASSMEM)
1052 				cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1053 			else
1054 				cxld->target_type = CXL_DECODER_DEVMEM;
1055 		} else {
1056 			/* To be overridden by region type at commit time */
1057 			cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1058 		}
1059 
1060 		if (!FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl) &&
1061 		    cxld->target_type == CXL_DECODER_HOSTONLYMEM) {
1062 			ctrl |= CXL_HDM_DECODER0_CTRL_HOSTONLY;
1063 			writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
1064 		}
1065 	}
1066 	rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
1067 			  &cxld->interleave_ways);
1068 	if (rc) {
1069 		dev_warn(&port->dev,
1070 			 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
1071 			 port->id, cxld->id, ctrl);
1072 		return rc;
1073 	}
1074 	rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
1075 				 &cxld->interleave_granularity);
1076 	if (rc) {
1077 		dev_warn(&port->dev,
1078 			 "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n",
1079 			 port->id, cxld->id, ctrl);
1080 		return rc;
1081 	}
1082 
1083 	dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n",
1084 		port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
1085 		cxld->interleave_ways, cxld->interleave_granularity);
1086 
1087 	if (!cxled) {
1088 		lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which));
1089 		hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
1090 		target_list.value = (hi << 32) + lo;
1091 		for (i = 0; i < cxld->interleave_ways; i++)
1092 			target_map[i] = target_list.target_id[i];
1093 
1094 		return 0;
1095 	}
1096 
1097 	if (!committed)
1098 		return 0;
1099 
1100 	dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
1101 	if (remainder) {
1102 		dev_err(&port->dev,
1103 			"decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
1104 			port->id, cxld->id, size, cxld->interleave_ways);
1105 		return -ENXIO;
1106 	}
1107 	lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
1108 	hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which));
1109 	skip = (hi << 32) + lo;
1110 	rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
1111 	if (rc) {
1112 		dev_err(&port->dev,
1113 			"decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
1114 			port->id, cxld->id, *dpa_base,
1115 			*dpa_base + dpa_size + skip - 1, rc);
1116 		return rc;
1117 	}
1118 	*dpa_base += dpa_size + skip;
1119 
1120 	cxled->state = CXL_DECODER_STATE_AUTO;
1121 
1122 	return 0;
1123 }
1124 
cxl_settle_decoders(struct cxl_hdm * cxlhdm)1125 static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
1126 {
1127 	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
1128 	int committed, i;
1129 	u32 ctrl;
1130 
1131 	if (!hdm)
1132 		return;
1133 
1134 	/*
1135 	 * Since the register resource was recently claimed via request_region()
1136 	 * be careful about trusting the "not-committed" status until the commit
1137 	 * timeout has elapsed.  The commit timeout is 10ms (CXL 2.0
1138 	 * 8.2.5.12.20), but double it to be tolerant of any clock skew between
1139 	 * host and target.
1140 	 */
1141 	for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
1142 		ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
1143 		if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
1144 			committed++;
1145 	}
1146 
1147 	/* ensure that future checks of committed can be trusted */
1148 	if (committed != cxlhdm->decoder_count)
1149 		msleep(20);
1150 }
1151 
1152 /**
1153  * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
1154  * @cxlhdm: Structure to populate with HDM capabilities
1155  * @info: cached DVSEC range register info
1156  */
devm_cxl_enumerate_decoders(struct cxl_hdm * cxlhdm,struct cxl_endpoint_dvsec_info * info)1157 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
1158 				struct cxl_endpoint_dvsec_info *info)
1159 {
1160 	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
1161 	struct cxl_port *port = cxlhdm->port;
1162 	int i;
1163 	u64 dpa_base = 0;
1164 
1165 	cxl_settle_decoders(cxlhdm);
1166 
1167 	for (i = 0; i < cxlhdm->decoder_count; i++) {
1168 		int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
1169 		int rc, target_count = cxlhdm->target_count;
1170 		struct cxl_decoder *cxld;
1171 
1172 		if (is_cxl_endpoint(port)) {
1173 			struct cxl_endpoint_decoder *cxled;
1174 
1175 			cxled = cxl_endpoint_decoder_alloc(port);
1176 			if (IS_ERR(cxled)) {
1177 				dev_warn(&port->dev,
1178 					 "Failed to allocate decoder%d.%d\n",
1179 					 port->id, i);
1180 				return PTR_ERR(cxled);
1181 			}
1182 			cxld = &cxled->cxld;
1183 		} else {
1184 			struct cxl_switch_decoder *cxlsd;
1185 
1186 			cxlsd = cxl_switch_decoder_alloc(port, target_count);
1187 			if (IS_ERR(cxlsd)) {
1188 				dev_warn(&port->dev,
1189 					 "Failed to allocate decoder%d.%d\n",
1190 					 port->id, i);
1191 				return PTR_ERR(cxlsd);
1192 			}
1193 			cxld = &cxlsd->cxld;
1194 		}
1195 
1196 		rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
1197 				      &dpa_base, info);
1198 		if (rc) {
1199 			dev_warn(&port->dev,
1200 				 "Failed to initialize decoder%d.%d\n",
1201 				 port->id, i);
1202 			put_device(&cxld->dev);
1203 			return rc;
1204 		}
1205 		rc = add_hdm_decoder(port, cxld, target_map);
1206 		if (rc) {
1207 			dev_warn(&port->dev,
1208 				 "Failed to add decoder%d.%d\n", port->id, i);
1209 			return rc;
1210 		}
1211 	}
1212 
1213 	return 0;
1214 }
1215 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, "CXL");
1216