1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/seq_file.h>
4 #include <linux/device.h>
5 #include <linux/delay.h>
6
7 #include "cxlmem.h"
8 #include "core.h"
9
10 /**
11 * DOC: cxl core hdm
12 *
13 * Compute Express Link Host Managed Device Memory, starting with the
14 * CXL 2.0 specification, is managed by an array of HDM Decoder register
15 * instances per CXL port and per CXL endpoint. Define common helpers
16 * for enumerating these registers and capabilities.
17 */
18
19 struct cxl_rwsem cxl_rwsem = {
20 .region = __RWSEM_INITIALIZER(cxl_rwsem.region),
21 .dpa = __RWSEM_INITIALIZER(cxl_rwsem.dpa),
22 };
23
add_hdm_decoder(struct cxl_port * port,struct cxl_decoder * cxld)24 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld)
25 {
26 int rc;
27
28 rc = cxl_decoder_add_locked(cxld);
29 if (rc) {
30 put_device(&cxld->dev);
31 dev_err(&port->dev, "Failed to add decoder\n");
32 return rc;
33 }
34
35 rc = cxl_decoder_autoremove(&port->dev, cxld);
36 if (rc)
37 return rc;
38
39 dev_dbg(port->uport_dev, "%s added to %s\n",
40 dev_name(&cxld->dev), dev_name(&port->dev));
41
42 return 0;
43 }
44
45 /*
46 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
47 * single ported host-bridges need not publish a decoder capability when a
48 * passthrough decode can be assumed, i.e. all transactions that the uport sees
49 * are claimed and passed to the single dport. Disable the range until the first
50 * CXL region is enumerated / activated.
51 */
devm_cxl_add_passthrough_decoder(struct cxl_port * port)52 static int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
53 {
54 struct cxl_switch_decoder *cxlsd;
55 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
56
57 /*
58 * Capability checks are moot for passthrough decoders, support
59 * any and all possibilities.
60 */
61 cxlhdm->interleave_mask = ~0U;
62 cxlhdm->iw_cap_mask = ~0UL;
63
64 cxlsd = cxl_switch_decoder_alloc(port, 1);
65 if (IS_ERR(cxlsd))
66 return PTR_ERR(cxlsd);
67
68 device_lock_assert(&port->dev);
69
70 return add_hdm_decoder(port, &cxlsd->cxld);
71 }
72
parse_hdm_decoder_caps(struct cxl_hdm * cxlhdm)73 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
74 {
75 u32 hdm_cap;
76
77 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
78 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
79 cxlhdm->target_count =
80 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
81 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
82 cxlhdm->interleave_mask |= GENMASK(11, 8);
83 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
84 cxlhdm->interleave_mask |= GENMASK(14, 12);
85 cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8);
86 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap))
87 cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12);
88 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap))
89 cxlhdm->iw_cap_mask |= BIT(16);
90 }
91
should_emulate_decoders(struct cxl_endpoint_dvsec_info * info)92 static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
93 {
94 struct cxl_hdm *cxlhdm;
95 void __iomem *hdm;
96 u32 ctrl;
97
98 if (!info)
99 return false;
100
101 cxlhdm = dev_get_drvdata(&info->port->dev);
102 hdm = cxlhdm->regs.hdm_decoder;
103
104 if (!hdm)
105 return true;
106
107 /*
108 * If HDM decoders are present and the driver is in control of
109 * Mem_Enable skip DVSEC based emulation
110 */
111 if (!info->mem_enabled)
112 return false;
113
114 /*
115 * If HDM decoders are globally enabled, do not fall back to DVSEC
116 * range emulation. Zeroed decoder registers after region teardown
117 * do not imply absence of HDM capability.
118 *
119 * Falling back to DVSEC here would treat the decoder as AUTO and
120 * may incorrectly latch default interleave settings.
121 */
122 ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
123 if (ctrl & CXL_HDM_DECODER_ENABLE)
124 return false;
125
126 return true;
127 }
128
129 /**
130 * devm_cxl_setup_hdm - map HDM decoder component registers
131 * @port: cxl_port to map
132 * @info: cached DVSEC range register info
133 */
devm_cxl_setup_hdm(struct cxl_port * port,struct cxl_endpoint_dvsec_info * info)134 static struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
135 struct cxl_endpoint_dvsec_info *info)
136 {
137 struct cxl_register_map *reg_map = &port->reg_map;
138 struct device *dev = &port->dev;
139 struct cxl_hdm *cxlhdm;
140 int rc;
141
142 cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
143 if (!cxlhdm)
144 return ERR_PTR(-ENOMEM);
145 cxlhdm->port = port;
146 dev_set_drvdata(dev, cxlhdm);
147
148 /* Memory devices can configure device HDM using DVSEC range regs. */
149 if (reg_map->resource == CXL_RESOURCE_NONE) {
150 if (!info || !info->mem_enabled) {
151 dev_err(dev, "No component registers mapped\n");
152 return ERR_PTR(-ENXIO);
153 }
154
155 cxlhdm->decoder_count = info->ranges;
156 return cxlhdm;
157 }
158
159 if (!reg_map->component_map.hdm_decoder.valid) {
160 dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
161 /* unique error code to indicate no HDM decoder capability */
162 return ERR_PTR(-ENODEV);
163 }
164
165 rc = cxl_map_component_regs(reg_map, &cxlhdm->regs,
166 BIT(CXL_CM_CAP_CAP_ID_HDM));
167 if (rc) {
168 dev_err(dev, "Failed to map HDM capability.\n");
169 return ERR_PTR(rc);
170 }
171
172 parse_hdm_decoder_caps(cxlhdm);
173 if (cxlhdm->decoder_count == 0) {
174 dev_err(dev, "Spec violation. Caps invalid\n");
175 return ERR_PTR(-ENXIO);
176 }
177
178 /*
179 * Now that the hdm capability is parsed, decide if range
180 * register emulation is needed and fixup cxlhdm accordingly.
181 */
182 if (should_emulate_decoders(info)) {
183 dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
184 str_plural(info->ranges));
185 cxlhdm->decoder_count = info->ranges;
186 }
187
188 return cxlhdm;
189 }
190
__cxl_dpa_debug(struct seq_file * file,struct resource * r,int depth)191 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
192 {
193 unsigned long long start = r->start, end = r->end;
194
195 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
196 r->name);
197 }
198
cxl_dpa_debug(struct seq_file * file,struct cxl_dev_state * cxlds)199 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
200 {
201 struct resource *p1, *p2;
202
203 guard(rwsem_read)(&cxl_rwsem.dpa);
204 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
205 __cxl_dpa_debug(file, p1, 0);
206 for (p2 = p1->child; p2; p2 = p2->sibling)
207 __cxl_dpa_debug(file, p2, 1);
208 }
209 }
210 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, "CXL");
211
212 /* See request_skip() kernel-doc */
__adjust_skip(struct cxl_dev_state * cxlds,const resource_size_t skip_base,const resource_size_t skip_len,const char * requester)213 static resource_size_t __adjust_skip(struct cxl_dev_state *cxlds,
214 const resource_size_t skip_base,
215 const resource_size_t skip_len,
216 const char *requester)
217 {
218 const resource_size_t skip_end = skip_base + skip_len - 1;
219
220 for (int i = 0; i < cxlds->nr_partitions; i++) {
221 const struct resource *part_res = &cxlds->part[i].res;
222 resource_size_t adjust_start, adjust_end, size;
223
224 adjust_start = max(skip_base, part_res->start);
225 adjust_end = min(skip_end, part_res->end);
226
227 if (adjust_end < adjust_start)
228 continue;
229
230 size = adjust_end - adjust_start + 1;
231
232 if (!requester)
233 __release_region(&cxlds->dpa_res, adjust_start, size);
234 else if (!__request_region(&cxlds->dpa_res, adjust_start, size,
235 requester, 0))
236 return adjust_start - skip_base;
237 }
238
239 return skip_len;
240 }
241 #define release_skip(c, b, l) __adjust_skip((c), (b), (l), NULL)
242
243 /*
244 * Must be called in a context that synchronizes against this decoder's
245 * port ->remove() callback (like an endpoint decoder sysfs attribute)
246 */
__cxl_dpa_release(struct cxl_endpoint_decoder * cxled)247 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
248 {
249 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
250 struct cxl_port *port = cxled_to_port(cxled);
251 struct cxl_dev_state *cxlds = cxlmd->cxlds;
252 struct resource *res = cxled->dpa_res;
253 resource_size_t skip_start;
254
255 lockdep_assert_held_write(&cxl_rwsem.dpa);
256
257 /* save @skip_start, before @res is released */
258 skip_start = res->start - cxled->skip;
259 __release_region(&cxlds->dpa_res, res->start, resource_size(res));
260 if (cxled->skip)
261 release_skip(cxlds, skip_start, cxled->skip);
262 cxled->skip = 0;
263 cxled->dpa_res = NULL;
264 put_device(&cxled->cxld.dev);
265 port->hdm_end--;
266 }
267
cxl_dpa_release(void * cxled)268 static void cxl_dpa_release(void *cxled)
269 {
270 guard(rwsem_write)(&cxl_rwsem.dpa);
271 __cxl_dpa_release(cxled);
272 }
273
274 /*
275 * Must be called from context that will not race port device
276 * unregistration, like decoder sysfs attribute methods
277 */
devm_cxl_dpa_release(struct cxl_endpoint_decoder * cxled)278 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
279 {
280 struct cxl_port *port = cxled_to_port(cxled);
281
282 lockdep_assert_held_write(&cxl_rwsem.dpa);
283 devm_remove_action(&port->dev, cxl_dpa_release, cxled);
284 __cxl_dpa_release(cxled);
285 }
286
287 /**
288 * request_skip() - Track DPA 'skip' in @cxlds->dpa_res resource tree
289 * @cxlds: CXL.mem device context that parents @cxled
290 * @cxled: Endpoint decoder establishing new allocation that skips lower DPA
291 * @skip_base: DPA < start of new DPA allocation (DPAnew)
292 * @skip_len: @skip_base + @skip_len == DPAnew
293 *
294 * DPA 'skip' arises from out-of-sequence DPA allocation events relative
295 * to free capacity across multiple partitions. It is a wasteful event
296 * as usable DPA gets thrown away, but if a deployment has, for example,
297 * a dual RAM+PMEM device, wants to use PMEM, and has unallocated RAM
298 * DPA, the free RAM DPA must be sacrificed to start allocating PMEM.
299 * See third "Implementation Note" in CXL 3.1 8.2.4.19.13 "Decoder
300 * Protection" for more details.
301 *
302 * A 'skip' always covers the last allocated DPA in a previous partition
303 * to the start of the current partition to allocate. Allocations never
304 * start in the middle of a partition, and allocations are always
305 * de-allocated in reverse order (see cxl_dpa_free(), or natural devm
306 * unwind order from forced in-order allocation).
307 *
308 * If @cxlds->nr_partitions was guaranteed to be <= 2 then the 'skip'
309 * would always be contained to a single partition. Given
310 * @cxlds->nr_partitions may be > 2 it results in cases where the 'skip'
311 * might span "tail capacity of partition[0], all of partition[1], ...,
312 * all of partition[N-1]" to support allocating from partition[N]. That
313 * in turn interacts with the partition 'struct resource' boundaries
314 * within @cxlds->dpa_res whereby 'skip' requests need to be divided by
315 * partition. I.e. this is a quirk of using a 'struct resource' tree to
316 * detect range conflicts while also tracking partition boundaries in
317 * @cxlds->dpa_res.
318 */
request_skip(struct cxl_dev_state * cxlds,struct cxl_endpoint_decoder * cxled,const resource_size_t skip_base,const resource_size_t skip_len)319 static int request_skip(struct cxl_dev_state *cxlds,
320 struct cxl_endpoint_decoder *cxled,
321 const resource_size_t skip_base,
322 const resource_size_t skip_len)
323 {
324 resource_size_t skipped = __adjust_skip(cxlds, skip_base, skip_len,
325 dev_name(&cxled->cxld.dev));
326
327 if (skipped == skip_len)
328 return 0;
329
330 dev_dbg(cxlds->dev,
331 "%s: failed to reserve skipped space (%pa %pa %pa)\n",
332 dev_name(&cxled->cxld.dev), &skip_base, &skip_len, &skipped);
333
334 release_skip(cxlds, skip_base, skipped);
335
336 return -EBUSY;
337 }
338
__cxl_dpa_reserve(struct cxl_endpoint_decoder * cxled,resource_size_t base,resource_size_t len,resource_size_t skipped)339 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
340 resource_size_t base, resource_size_t len,
341 resource_size_t skipped)
342 {
343 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
344 struct cxl_port *port = cxled_to_port(cxled);
345 struct cxl_dev_state *cxlds = cxlmd->cxlds;
346 struct device *dev = &port->dev;
347 struct resource *res;
348 int rc;
349
350 lockdep_assert_held_write(&cxl_rwsem.dpa);
351
352 if (!len) {
353 dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
354 port->id, cxled->cxld.id);
355 return -EINVAL;
356 }
357
358 if (cxled->dpa_res) {
359 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
360 port->id, cxled->cxld.id, cxled->dpa_res);
361 return -EBUSY;
362 }
363
364 if (port->hdm_end + 1 != cxled->cxld.id) {
365 /*
366 * Assumes alloc and commit order is always in hardware instance
367 * order per expectations from 8.2.5.12.20 Committing Decoder
368 * Programming that enforce decoder[m] committed before
369 * decoder[m+1] commit start.
370 */
371 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
372 cxled->cxld.id, port->id, port->hdm_end + 1);
373 return -EBUSY;
374 }
375
376 if (skipped) {
377 rc = request_skip(cxlds, cxled, base - skipped, skipped);
378 if (rc)
379 return rc;
380 }
381 res = __request_region(&cxlds->dpa_res, base, len,
382 dev_name(&cxled->cxld.dev), 0);
383 if (!res) {
384 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
385 port->id, cxled->cxld.id);
386 if (skipped)
387 release_skip(cxlds, base - skipped, skipped);
388 return -EBUSY;
389 }
390 cxled->dpa_res = res;
391 cxled->skip = skipped;
392
393 /*
394 * When allocating new capacity, ->part is already set, when
395 * discovering decoder settings at initial enumeration, ->part
396 * is not set.
397 */
398 if (cxled->part < 0)
399 for (int i = 0; i < cxlds->nr_partitions; i++)
400 if (resource_contains(&cxlds->part[i].res, res)) {
401 cxled->part = i;
402 break;
403 }
404
405 if (cxled->part < 0)
406 dev_warn(dev, "decoder%d.%d: %pr does not map any partition\n",
407 port->id, cxled->cxld.id, res);
408
409 port->hdm_end++;
410 get_device(&cxled->cxld.dev);
411 return 0;
412 }
413
add_dpa_res(struct device * dev,struct resource * parent,struct resource * res,resource_size_t start,resource_size_t size,const char * type)414 static int add_dpa_res(struct device *dev, struct resource *parent,
415 struct resource *res, resource_size_t start,
416 resource_size_t size, const char *type)
417 {
418 int rc;
419
420 *res = (struct resource) {
421 .name = type,
422 .start = start,
423 .end = start + size - 1,
424 .flags = IORESOURCE_MEM,
425 };
426 if (resource_size(res) == 0) {
427 dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
428 return 0;
429 }
430 rc = request_resource(parent, res);
431 if (rc) {
432 dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
433 res, rc);
434 return rc;
435 }
436
437 dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
438
439 return 0;
440 }
441
cxl_mode_name(enum cxl_partition_mode mode)442 static const char *cxl_mode_name(enum cxl_partition_mode mode)
443 {
444 switch (mode) {
445 case CXL_PARTMODE_RAM:
446 return "ram";
447 case CXL_PARTMODE_PMEM:
448 return "pmem";
449 default:
450 return "";
451 };
452 }
453
454 /* if this fails the caller must destroy @cxlds, there is no recovery */
cxl_dpa_setup(struct cxl_dev_state * cxlds,const struct cxl_dpa_info * info)455 int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info)
456 {
457 struct device *dev = cxlds->dev;
458
459 guard(rwsem_write)(&cxl_rwsem.dpa);
460
461 if (cxlds->nr_partitions)
462 return -EBUSY;
463
464 if (!info->size || !info->nr_partitions) {
465 cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
466 cxlds->nr_partitions = 0;
467 return 0;
468 }
469
470 cxlds->dpa_res = DEFINE_RES_MEM(0, info->size);
471
472 for (int i = 0; i < info->nr_partitions; i++) {
473 const struct cxl_dpa_part_info *part = &info->part[i];
474 int rc;
475
476 cxlds->part[i].perf.qos_class = CXL_QOS_CLASS_INVALID;
477 cxlds->part[i].mode = part->mode;
478
479 /* Require ordered + contiguous partitions */
480 if (i) {
481 const struct cxl_dpa_part_info *prev = &info->part[i - 1];
482
483 if (prev->range.end + 1 != part->range.start)
484 return -EINVAL;
485 }
486 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->part[i].res,
487 part->range.start, range_len(&part->range),
488 cxl_mode_name(part->mode));
489 if (rc)
490 return rc;
491 cxlds->nr_partitions++;
492 }
493
494 return 0;
495 }
496 EXPORT_SYMBOL_GPL(cxl_dpa_setup);
497
devm_cxl_dpa_reserve(struct cxl_endpoint_decoder * cxled,resource_size_t base,resource_size_t len,resource_size_t skipped)498 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
499 resource_size_t base, resource_size_t len,
500 resource_size_t skipped)
501 {
502 struct cxl_port *port = cxled_to_port(cxled);
503 int rc;
504
505 scoped_guard(rwsem_write, &cxl_rwsem.dpa)
506 rc = __cxl_dpa_reserve(cxled, base, len, skipped);
507
508 if (rc)
509 return rc;
510
511 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
512 }
513 EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, "CXL");
514
cxl_dpa_size(struct cxl_endpoint_decoder * cxled)515 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
516 {
517 guard(rwsem_read)(&cxl_rwsem.dpa);
518 if (cxled->dpa_res)
519 return resource_size(cxled->dpa_res);
520
521 return 0;
522 }
523
cxl_dpa_resource_start(struct cxl_endpoint_decoder * cxled)524 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
525 {
526 resource_size_t base = RESOURCE_SIZE_MAX;
527
528 lockdep_assert_held(&cxl_rwsem.dpa);
529 if (cxled->dpa_res)
530 base = cxled->dpa_res->start;
531
532 return base;
533 }
534
cxl_resource_contains_addr(const struct resource * res,const resource_size_t addr)535 bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr)
536 {
537 struct resource _addr = DEFINE_RES_MEM(addr, 1);
538
539 return resource_contains(res, &_addr);
540 }
541
cxl_dpa_free(struct cxl_endpoint_decoder * cxled)542 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
543 {
544 struct cxl_port *port = cxled_to_port(cxled);
545 struct device *dev = &cxled->cxld.dev;
546
547 guard(rwsem_write)(&cxl_rwsem.dpa);
548 if (!cxled->dpa_res)
549 return 0;
550 if (cxled->cxld.region) {
551 dev_dbg(dev, "decoder assigned to: %s\n",
552 dev_name(&cxled->cxld.region->dev));
553 return -EBUSY;
554 }
555 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
556 dev_dbg(dev, "decoder enabled\n");
557 return -EBUSY;
558 }
559 if (cxled->cxld.id != port->hdm_end) {
560 dev_dbg(dev, "expected decoder%d.%d\n", port->id,
561 port->hdm_end);
562 return -EBUSY;
563 }
564
565 devm_cxl_dpa_release(cxled);
566 return 0;
567 }
568
cxl_dpa_set_part(struct cxl_endpoint_decoder * cxled,enum cxl_partition_mode mode)569 int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
570 enum cxl_partition_mode mode)
571 {
572 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
573 struct cxl_dev_state *cxlds = cxlmd->cxlds;
574 struct device *dev = &cxled->cxld.dev;
575 int part;
576
577 guard(rwsem_write)(&cxl_rwsem.dpa);
578 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)
579 return -EBUSY;
580
581 for (part = 0; part < cxlds->nr_partitions; part++)
582 if (cxlds->part[part].mode == mode)
583 break;
584
585 if (part >= cxlds->nr_partitions) {
586 dev_dbg(dev, "unsupported mode: %d\n", mode);
587 return -EINVAL;
588 }
589
590 if (!resource_size(&cxlds->part[part].res)) {
591 dev_dbg(dev, "no available capacity for mode: %d\n", mode);
592 return -ENXIO;
593 }
594
595 cxled->part = part;
596 return 0;
597 }
598
__cxl_dpa_alloc(struct cxl_endpoint_decoder * cxled,u64 size)599 static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
600 {
601 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
602 struct cxl_dev_state *cxlds = cxlmd->cxlds;
603 struct device *dev = &cxled->cxld.dev;
604 struct resource *res, *prev = NULL;
605 resource_size_t start, avail, skip, skip_start;
606 struct resource *p, *last;
607 int part;
608
609 guard(rwsem_write)(&cxl_rwsem.dpa);
610 if (cxled->cxld.region) {
611 dev_dbg(dev, "decoder attached to %s\n",
612 dev_name(&cxled->cxld.region->dev));
613 return -EBUSY;
614 }
615
616 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
617 dev_dbg(dev, "decoder enabled\n");
618 return -EBUSY;
619 }
620
621 part = cxled->part;
622 if (part < 0) {
623 dev_dbg(dev, "partition not set\n");
624 return -EBUSY;
625 }
626
627 res = &cxlds->part[part].res;
628 for (p = res->child, last = NULL; p; p = p->sibling)
629 last = p;
630 if (last)
631 start = last->end + 1;
632 else
633 start = res->start;
634
635 /*
636 * To allocate at partition N, a skip needs to be calculated for all
637 * unallocated space at lower partitions indices.
638 *
639 * If a partition has any allocations, the search can end because a
640 * previous cxl_dpa_alloc() invocation is assumed to have accounted for
641 * all previous partitions.
642 */
643 skip_start = CXL_RESOURCE_NONE;
644 for (int i = part; i; i--) {
645 prev = &cxlds->part[i - 1].res;
646 for (p = prev->child, last = NULL; p; p = p->sibling)
647 last = p;
648 if (last) {
649 skip_start = last->end + 1;
650 break;
651 }
652 skip_start = prev->start;
653 }
654
655 avail = res->end - start + 1;
656 if (skip_start == CXL_RESOURCE_NONE)
657 skip = 0;
658 else
659 skip = res->start - skip_start;
660
661 if (size > avail) {
662 dev_dbg(dev, "%llu exceeds available %s capacity: %llu\n", size,
663 res->name, (u64)avail);
664 return -ENOSPC;
665 }
666
667 return __cxl_dpa_reserve(cxled, start, size, skip);
668 }
669
cxl_dpa_alloc(struct cxl_endpoint_decoder * cxled,u64 size)670 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
671 {
672 struct cxl_port *port = cxled_to_port(cxled);
673 int rc;
674
675 rc = __cxl_dpa_alloc(cxled, size);
676 if (rc)
677 return rc;
678
679 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
680 }
681
cxld_set_interleave(struct cxl_decoder * cxld,u32 * ctrl)682 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
683 {
684 u16 eig;
685 u8 eiw;
686
687 /*
688 * Input validation ensures these warns never fire, but otherwise
689 * suppress unititalized variable usage warnings.
690 */
691 if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw),
692 "invalid interleave_ways: %d\n", cxld->interleave_ways))
693 return;
694 if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig),
695 "invalid interleave_granularity: %d\n",
696 cxld->interleave_granularity))
697 return;
698
699 u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
700 u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
701 *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
702 }
703
cxld_set_type(struct cxl_decoder * cxld,u32 * ctrl)704 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
705 {
706 u32p_replace_bits(ctrl,
707 !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM),
708 CXL_HDM_DECODER0_CTRL_HOSTONLY);
709 }
710
cxlsd_set_targets(struct cxl_switch_decoder * cxlsd,u64 * tgt)711 static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
712 {
713 struct cxl_dport **t = &cxlsd->target[0];
714 int ways = cxlsd->cxld.interleave_ways;
715
716 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
717 if (ways > 1)
718 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
719 if (ways > 2)
720 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
721 if (ways > 3)
722 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
723 if (ways > 4)
724 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
725 if (ways > 5)
726 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
727 if (ways > 6)
728 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
729 if (ways > 7)
730 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
731 }
732
733 /*
734 * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
735 * committed or error within 10ms, but just be generous with 20ms to account for
736 * clock skew and other marginal behavior
737 */
738 #define COMMIT_TIMEOUT_MS 20
cxld_await_commit(void __iomem * hdm,int id)739 static int cxld_await_commit(void __iomem *hdm, int id)
740 {
741 u32 ctrl;
742 int i;
743
744 for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
745 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
746 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
747 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
748 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
749 return -EIO;
750 }
751 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
752 return 0;
753 fsleep(1000);
754 }
755
756 return -ETIMEDOUT;
757 }
758
setup_hw_decoder(struct cxl_decoder * cxld,void __iomem * hdm)759 static void setup_hw_decoder(struct cxl_decoder *cxld, void __iomem *hdm)
760 {
761 int id = cxld->id;
762 u64 base, size;
763 u32 ctrl;
764
765 /* common decoder settings */
766 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
767 cxld_set_interleave(cxld, &ctrl);
768 cxld_set_type(cxld, &ctrl);
769 base = cxld->hpa_range.start;
770 size = range_len(&cxld->hpa_range);
771
772 writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
773 writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
774 writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
775 writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
776
777 if (is_switch_decoder(&cxld->dev)) {
778 struct cxl_switch_decoder *cxlsd =
779 to_cxl_switch_decoder(&cxld->dev);
780 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
781 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
782 u64 targets;
783
784 cxlsd_set_targets(cxlsd, &targets);
785 writel(upper_32_bits(targets), tl_hi);
786 writel(lower_32_bits(targets), tl_lo);
787 } else {
788 struct cxl_endpoint_decoder *cxled =
789 to_cxl_endpoint_decoder(&cxld->dev);
790 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
791 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
792
793 writel(upper_32_bits(cxled->skip), sk_hi);
794 writel(lower_32_bits(cxled->skip), sk_lo);
795 }
796
797 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
798 }
799
cxl_decoder_commit(struct cxl_decoder * cxld)800 static int cxl_decoder_commit(struct cxl_decoder *cxld)
801 {
802 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
803 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
804 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
805 int id = cxld->id, rc;
806
807 if (cxld->flags & CXL_DECODER_F_ENABLE)
808 return 0;
809
810 if (cxl_num_decoders_committed(port) != id) {
811 dev_dbg(&port->dev,
812 "%s: out of order commit, expected decoder%d.%d\n",
813 dev_name(&cxld->dev), port->id,
814 cxl_num_decoders_committed(port));
815 return -EBUSY;
816 }
817
818 /*
819 * For endpoint decoders hosted on CXL memory devices that
820 * support the sanitize operation, make sure sanitize is not in-flight.
821 */
822 if (is_endpoint_decoder(&cxld->dev)) {
823 struct cxl_endpoint_decoder *cxled =
824 to_cxl_endpoint_decoder(&cxld->dev);
825 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
826 struct cxl_memdev_state *mds =
827 to_cxl_memdev_state(cxlmd->cxlds);
828
829 if (mds && mds->security.sanitize_active) {
830 dev_dbg(&cxlmd->dev,
831 "attempted to commit %s during sanitize\n",
832 dev_name(&cxld->dev));
833 return -EBUSY;
834 }
835 }
836
837 scoped_guard(rwsem_read, &cxl_rwsem.dpa)
838 setup_hw_decoder(cxld, hdm);
839
840 rc = cxld_await_commit(hdm, cxld->id);
841 if (rc) {
842 dev_dbg(&port->dev, "%s: error %d committing decoder\n",
843 dev_name(&cxld->dev), rc);
844 return rc;
845 }
846 port->commit_end++;
847 cxld->flags |= CXL_DECODER_F_ENABLE;
848
849 return 0;
850 }
851
commit_reap(struct device * dev,void * data)852 static int commit_reap(struct device *dev, void *data)
853 {
854 struct cxl_port *port = to_cxl_port(dev->parent);
855 struct cxl_decoder *cxld;
856
857 if (!is_switch_decoder(dev) && !is_endpoint_decoder(dev))
858 return 0;
859
860 cxld = to_cxl_decoder(dev);
861 if (port->commit_end == cxld->id &&
862 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
863 port->commit_end--;
864 dev_dbg(&port->dev, "reap: %s commit_end: %d\n",
865 dev_name(&cxld->dev), port->commit_end);
866 }
867
868 return 0;
869 }
870
cxl_port_commit_reap(struct cxl_decoder * cxld)871 void cxl_port_commit_reap(struct cxl_decoder *cxld)
872 {
873 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
874
875 lockdep_assert_held_write(&cxl_rwsem.region);
876
877 /*
878 * Once the highest committed decoder is disabled, free any other
879 * decoders that were pinned allocated by out-of-order release.
880 */
881 port->commit_end--;
882 dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev),
883 port->commit_end);
884 device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL,
885 commit_reap);
886 }
887 EXPORT_SYMBOL_NS_GPL(cxl_port_commit_reap, "CXL");
888
cxl_decoder_reset(struct cxl_decoder * cxld)889 static void cxl_decoder_reset(struct cxl_decoder *cxld)
890 {
891 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
892 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
893 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
894 int id = cxld->id;
895 u32 ctrl;
896
897 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
898 return;
899
900 if (cxld->flags & CXL_DECODER_F_LOCK)
901 return;
902
903 if (port->commit_end == id)
904 cxl_port_commit_reap(cxld);
905 else
906 dev_dbg(&port->dev,
907 "%s: out of order reset, expected decoder%d.%d\n",
908 dev_name(&cxld->dev), port->id, port->commit_end);
909
910 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
911 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
912 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
913
914 writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
915 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
916 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
917 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
918
919 cxld->flags &= ~CXL_DECODER_F_ENABLE;
920
921 /* Userspace is now responsible for reconfiguring this decoder */
922 if (is_endpoint_decoder(&cxld->dev)) {
923 struct cxl_endpoint_decoder *cxled;
924
925 cxled = to_cxl_endpoint_decoder(&cxld->dev);
926 cxled->state = CXL_DECODER_STATE_MANUAL;
927 }
928 }
929
cxl_setup_hdm_decoder_from_dvsec(struct cxl_port * port,struct cxl_decoder * cxld,u64 * dpa_base,int which,struct cxl_endpoint_dvsec_info * info)930 static int cxl_setup_hdm_decoder_from_dvsec(
931 struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
932 int which, struct cxl_endpoint_dvsec_info *info)
933 {
934 struct cxl_endpoint_decoder *cxled;
935 u64 len;
936 int rc;
937
938 if (!is_cxl_endpoint(port))
939 return -EOPNOTSUPP;
940
941 cxled = to_cxl_endpoint_decoder(&cxld->dev);
942 len = range_len(&info->dvsec_range[which]);
943 if (!len)
944 return -ENOENT;
945
946 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
947 cxld->commit = NULL;
948 cxld->reset = NULL;
949 cxld->hpa_range = info->dvsec_range[which];
950
951 /*
952 * Set the emulated decoder as locked pending additional support to
953 * change the range registers at run time.
954 */
955 cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
956 port->commit_end = cxld->id;
957
958 rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
959 if (rc) {
960 dev_err(&port->dev,
961 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx: %d\n",
962 port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
963 return rc;
964 }
965 *dpa_base += len;
966 cxled->state = CXL_DECODER_STATE_AUTO;
967
968 return 0;
969 }
970
init_hdm_decoder(struct cxl_port * port,struct cxl_decoder * cxld,void __iomem * hdm,int which,u64 * dpa_base,struct cxl_endpoint_dvsec_info * info)971 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
972 void __iomem *hdm, int which,
973 u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
974 {
975 struct cxl_endpoint_decoder *cxled = NULL;
976 u64 size, base, skip, dpa_size, lo, hi;
977 bool committed;
978 u32 remainder;
979 int i, rc;
980 u32 ctrl;
981 union {
982 u64 value;
983 unsigned char target_id[8];
984 } target_list;
985
986 if (should_emulate_decoders(info))
987 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
988 which, info);
989
990 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
991 lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
992 hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which));
993 base = (hi << 32) + lo;
994 lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
995 hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which));
996 size = (hi << 32) + lo;
997 committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
998 cxld->commit = cxl_decoder_commit;
999 cxld->reset = cxl_decoder_reset;
1000
1001 if (!committed)
1002 size = 0;
1003 if (base == U64_MAX || size == U64_MAX) {
1004 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
1005 port->id, cxld->id);
1006 return -ENXIO;
1007 }
1008
1009 if (info)
1010 cxled = to_cxl_endpoint_decoder(&cxld->dev);
1011 cxld->hpa_range = (struct range) {
1012 .start = base,
1013 .end = base + size - 1,
1014 };
1015
1016 /* decoders are enabled if committed */
1017 if (committed) {
1018 cxld->flags |= CXL_DECODER_F_ENABLE;
1019 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
1020 cxld->flags |= CXL_DECODER_F_LOCK;
1021 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl))
1022 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1023 else
1024 cxld->target_type = CXL_DECODER_DEVMEM;
1025
1026 guard(rwsem_write)(&cxl_rwsem.region);
1027 if (cxld->id != cxl_num_decoders_committed(port)) {
1028 dev_warn(&port->dev,
1029 "decoder%d.%d: Committed out of order\n",
1030 port->id, cxld->id);
1031 return -ENXIO;
1032 }
1033
1034 if (size == 0) {
1035 dev_warn(&port->dev,
1036 "decoder%d.%d: Committed with zero size\n",
1037 port->id, cxld->id);
1038 return -ENXIO;
1039 }
1040 port->commit_end = cxld->id;
1041 } else {
1042 if (cxled) {
1043 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1044 struct cxl_dev_state *cxlds = cxlmd->cxlds;
1045
1046 /*
1047 * Default by devtype until a device arrives that needs
1048 * more precision.
1049 */
1050 if (cxlds->type == CXL_DEVTYPE_CLASSMEM)
1051 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1052 else
1053 cxld->target_type = CXL_DECODER_DEVMEM;
1054 } else {
1055 /* To be overridden by region type at commit time */
1056 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1057 }
1058
1059 if (!FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl) &&
1060 cxld->target_type == CXL_DECODER_HOSTONLYMEM) {
1061 ctrl |= CXL_HDM_DECODER0_CTRL_HOSTONLY;
1062 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
1063 }
1064 }
1065 rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
1066 &cxld->interleave_ways);
1067 if (rc) {
1068 dev_warn(&port->dev,
1069 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
1070 port->id, cxld->id, ctrl);
1071 return rc;
1072 }
1073 rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
1074 &cxld->interleave_granularity);
1075 if (rc) {
1076 dev_warn(&port->dev,
1077 "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n",
1078 port->id, cxld->id, ctrl);
1079 return rc;
1080 }
1081
1082 dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n",
1083 port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
1084 cxld->interleave_ways, cxld->interleave_granularity);
1085
1086 if (!cxled) {
1087 lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which));
1088 hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
1089 target_list.value = (hi << 32) + lo;
1090 for (i = 0; i < cxld->interleave_ways; i++)
1091 cxld->target_map[i] = target_list.target_id[i];
1092
1093 return 0;
1094 }
1095
1096 if (!committed)
1097 return 0;
1098
1099 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
1100 if (remainder) {
1101 dev_err(&port->dev,
1102 "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
1103 port->id, cxld->id, size, cxld->interleave_ways);
1104 return -ENXIO;
1105 }
1106 lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
1107 hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which));
1108 skip = (hi << 32) + lo;
1109 rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
1110 if (rc) {
1111 dev_err(&port->dev,
1112 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx: %d\n",
1113 port->id, cxld->id, *dpa_base,
1114 *dpa_base + dpa_size + skip - 1, rc);
1115 return rc;
1116 }
1117 *dpa_base += dpa_size + skip;
1118
1119 cxled->state = CXL_DECODER_STATE_AUTO;
1120
1121 return 0;
1122 }
1123
cxl_settle_decoders(struct cxl_hdm * cxlhdm)1124 static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
1125 {
1126 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
1127 int committed, i;
1128 u32 ctrl;
1129
1130 if (!hdm)
1131 return;
1132
1133 /*
1134 * Since the register resource was recently claimed via request_region()
1135 * be careful about trusting the "not-committed" status until the commit
1136 * timeout has elapsed. The commit timeout is 10ms (CXL 2.0
1137 * 8.2.5.12.20), but double it to be tolerant of any clock skew between
1138 * host and target.
1139 */
1140 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
1141 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
1142 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
1143 committed++;
1144 }
1145
1146 /* ensure that future checks of committed can be trusted */
1147 if (committed != cxlhdm->decoder_count)
1148 msleep(20);
1149 }
1150
1151 /**
1152 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
1153 * @cxlhdm: Structure to populate with HDM capabilities
1154 * @info: cached DVSEC range register info
1155 */
devm_cxl_enumerate_decoders(struct cxl_hdm * cxlhdm,struct cxl_endpoint_dvsec_info * info)1156 static int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
1157 struct cxl_endpoint_dvsec_info *info)
1158 {
1159 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
1160 struct cxl_port *port = cxlhdm->port;
1161 int i;
1162 u64 dpa_base = 0;
1163
1164 cxl_settle_decoders(cxlhdm);
1165
1166 for (i = 0; i < cxlhdm->decoder_count; i++) {
1167 int rc, target_count = cxlhdm->target_count;
1168 struct cxl_decoder *cxld;
1169
1170 if (is_cxl_endpoint(port)) {
1171 struct cxl_endpoint_decoder *cxled;
1172
1173 cxled = cxl_endpoint_decoder_alloc(port);
1174 if (IS_ERR(cxled)) {
1175 dev_warn(&port->dev,
1176 "Failed to allocate decoder%d.%d\n",
1177 port->id, i);
1178 return PTR_ERR(cxled);
1179 }
1180 cxld = &cxled->cxld;
1181 } else {
1182 struct cxl_switch_decoder *cxlsd;
1183
1184 cxlsd = cxl_switch_decoder_alloc(port, target_count);
1185 if (IS_ERR(cxlsd)) {
1186 dev_warn(&port->dev,
1187 "Failed to allocate decoder%d.%d\n",
1188 port->id, i);
1189 return PTR_ERR(cxlsd);
1190 }
1191 cxld = &cxlsd->cxld;
1192 }
1193
1194 rc = init_hdm_decoder(port, cxld, hdm, i, &dpa_base, info);
1195 if (rc) {
1196 dev_warn(&port->dev,
1197 "Failed to initialize decoder%d.%d\n",
1198 port->id, i);
1199 put_device(&cxld->dev);
1200 return rc;
1201 }
1202 rc = add_hdm_decoder(port, cxld);
1203 if (rc) {
1204 dev_warn(&port->dev,
1205 "Failed to add decoder%d.%d\n", port->id, i);
1206 return rc;
1207 }
1208 }
1209
1210 return 0;
1211 }
1212
1213 /**
1214 * devm_cxl_switch_port_decoders_setup - allocate and setup switch decoders
1215 * @port: CXL port context
1216 *
1217 * Return 0 or -errno on error
1218 */
devm_cxl_switch_port_decoders_setup(struct cxl_port * port)1219 int devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
1220 {
1221 struct cxl_hdm *cxlhdm;
1222
1223 if (is_cxl_root(port) || is_cxl_endpoint(port))
1224 return -EOPNOTSUPP;
1225
1226 cxlhdm = devm_cxl_setup_hdm(port, NULL);
1227 if (!IS_ERR(cxlhdm))
1228 return devm_cxl_enumerate_decoders(cxlhdm, NULL);
1229
1230 if (PTR_ERR(cxlhdm) != -ENODEV) {
1231 dev_err(&port->dev, "Failed to map HDM decoder capability\n");
1232 return PTR_ERR(cxlhdm);
1233 }
1234
1235 if (cxl_port_get_possible_dports(port) == 1) {
1236 dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
1237 return devm_cxl_add_passthrough_decoder(port);
1238 }
1239
1240 dev_err(&port->dev, "HDM decoder capability not found\n");
1241 return -ENXIO;
1242 }
1243 EXPORT_SYMBOL_NS_GPL(devm_cxl_switch_port_decoders_setup, "CXL");
1244
1245 /**
1246 * devm_cxl_endpoint_decoders_setup - allocate and setup endpoint decoders
1247 * @port: CXL port context
1248 *
1249 * Return 0 or -errno on error
1250 */
devm_cxl_endpoint_decoders_setup(struct cxl_port * port)1251 int devm_cxl_endpoint_decoders_setup(struct cxl_port *port)
1252 {
1253 struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
1254 struct cxl_endpoint_dvsec_info info = { .port = port };
1255 struct cxl_dev_state *cxlds = cxlmd->cxlds;
1256 struct cxl_hdm *cxlhdm;
1257 int rc;
1258
1259 if (!is_cxl_endpoint(port))
1260 return -EOPNOTSUPP;
1261
1262 rc = cxl_dvsec_rr_decode(cxlds, &info);
1263 if (rc < 0)
1264 return rc;
1265
1266 cxlhdm = devm_cxl_setup_hdm(port, &info);
1267 if (IS_ERR(cxlhdm)) {
1268 if (PTR_ERR(cxlhdm) == -ENODEV)
1269 dev_err(&port->dev, "HDM decoder registers not found\n");
1270 return PTR_ERR(cxlhdm);
1271 }
1272
1273 rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
1274 if (rc)
1275 return rc;
1276
1277 return devm_cxl_enumerate_decoders(cxlhdm, &info);
1278 }
1279 EXPORT_SYMBOL_NS_GPL(devm_cxl_endpoint_decoders_setup, "CXL");
1280