1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/seq_file.h>
4 #include <linux/device.h>
5 #include <linux/delay.h>
6
7 #include "cxlmem.h"
8 #include "core.h"
9
10 /**
11 * DOC: cxl core hdm
12 *
13 * Compute Express Link Host Managed Device Memory, starting with the
14 * CXL 2.0 specification, is managed by an array of HDM Decoder register
15 * instances per CXL port and per CXL endpoint. Define common helpers
16 * for enumerating these registers and capabilities.
17 */
18
19 struct cxl_rwsem cxl_rwsem = {
20 .region = __RWSEM_INITIALIZER(cxl_rwsem.region),
21 .dpa = __RWSEM_INITIALIZER(cxl_rwsem.dpa),
22 };
23
add_hdm_decoder(struct cxl_port * port,struct cxl_decoder * cxld)24 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld)
25 {
26 int rc;
27
28 rc = cxl_decoder_add_locked(cxld);
29 if (rc) {
30 put_device(&cxld->dev);
31 dev_err(&port->dev, "Failed to add decoder\n");
32 return rc;
33 }
34
35 rc = cxl_decoder_autoremove(&port->dev, cxld);
36 if (rc)
37 return rc;
38
39 dev_dbg(port->uport_dev, "%s added to %s\n",
40 dev_name(&cxld->dev), dev_name(&port->dev));
41
42 return 0;
43 }
44
45 /*
46 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
47 * single ported host-bridges need not publish a decoder capability when a
48 * passthrough decode can be assumed, i.e. all transactions that the uport sees
49 * are claimed and passed to the single dport. Disable the range until the first
50 * CXL region is enumerated / activated.
51 */
devm_cxl_add_passthrough_decoder(struct cxl_port * port)52 static int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
53 {
54 struct cxl_switch_decoder *cxlsd;
55 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
56
57 /*
58 * Capability checks are moot for passthrough decoders, support
59 * any and all possibilities.
60 */
61 cxlhdm->interleave_mask = ~0U;
62 cxlhdm->iw_cap_mask = ~0UL;
63
64 cxlsd = cxl_switch_decoder_alloc(port, 1);
65 if (IS_ERR(cxlsd))
66 return PTR_ERR(cxlsd);
67
68 device_lock_assert(&port->dev);
69
70 return add_hdm_decoder(port, &cxlsd->cxld);
71 }
72
parse_hdm_decoder_caps(struct cxl_hdm * cxlhdm)73 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
74 {
75 u32 hdm_cap;
76
77 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
78 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
79 cxlhdm->target_count =
80 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
81 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
82 cxlhdm->interleave_mask |= GENMASK(11, 8);
83 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
84 cxlhdm->interleave_mask |= GENMASK(14, 12);
85 cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8);
86 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap))
87 cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12);
88 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap))
89 cxlhdm->iw_cap_mask |= BIT(16);
90 }
91
should_emulate_decoders(struct cxl_endpoint_dvsec_info * info)92 static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
93 {
94 struct cxl_hdm *cxlhdm;
95 void __iomem *hdm;
96 u32 ctrl;
97 int i;
98
99 if (!info)
100 return false;
101
102 cxlhdm = dev_get_drvdata(&info->port->dev);
103 hdm = cxlhdm->regs.hdm_decoder;
104
105 if (!hdm)
106 return true;
107
108 /*
109 * If HDM decoders are present and the driver is in control of
110 * Mem_Enable skip DVSEC based emulation
111 */
112 if (!info->mem_enabled)
113 return false;
114
115 /*
116 * If any decoders are committed already, there should not be any
117 * emulated DVSEC decoders.
118 */
119 for (i = 0; i < cxlhdm->decoder_count; i++) {
120 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
121 dev_dbg(&info->port->dev,
122 "decoder%d.%d: committed: %ld base: %#x_%.8x size: %#x_%.8x\n",
123 info->port->id, i,
124 FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl),
125 readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i)),
126 readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(i)),
127 readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i)),
128 readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i)));
129 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
130 return false;
131 }
132
133 return true;
134 }
135
136 /**
137 * devm_cxl_setup_hdm - map HDM decoder component registers
138 * @port: cxl_port to map
139 * @info: cached DVSEC range register info
140 */
devm_cxl_setup_hdm(struct cxl_port * port,struct cxl_endpoint_dvsec_info * info)141 static struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
142 struct cxl_endpoint_dvsec_info *info)
143 {
144 struct cxl_register_map *reg_map = &port->reg_map;
145 struct device *dev = &port->dev;
146 struct cxl_hdm *cxlhdm;
147 int rc;
148
149 cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
150 if (!cxlhdm)
151 return ERR_PTR(-ENOMEM);
152 cxlhdm->port = port;
153 dev_set_drvdata(dev, cxlhdm);
154
155 /* Memory devices can configure device HDM using DVSEC range regs. */
156 if (reg_map->resource == CXL_RESOURCE_NONE) {
157 if (!info || !info->mem_enabled) {
158 dev_err(dev, "No component registers mapped\n");
159 return ERR_PTR(-ENXIO);
160 }
161
162 cxlhdm->decoder_count = info->ranges;
163 return cxlhdm;
164 }
165
166 if (!reg_map->component_map.hdm_decoder.valid) {
167 dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
168 /* unique error code to indicate no HDM decoder capability */
169 return ERR_PTR(-ENODEV);
170 }
171
172 rc = cxl_map_component_regs(reg_map, &cxlhdm->regs,
173 BIT(CXL_CM_CAP_CAP_ID_HDM));
174 if (rc) {
175 dev_err(dev, "Failed to map HDM capability.\n");
176 return ERR_PTR(rc);
177 }
178
179 parse_hdm_decoder_caps(cxlhdm);
180 if (cxlhdm->decoder_count == 0) {
181 dev_err(dev, "Spec violation. Caps invalid\n");
182 return ERR_PTR(-ENXIO);
183 }
184
185 /*
186 * Now that the hdm capability is parsed, decide if range
187 * register emulation is needed and fixup cxlhdm accordingly.
188 */
189 if (should_emulate_decoders(info)) {
190 dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
191 str_plural(info->ranges));
192 cxlhdm->decoder_count = info->ranges;
193 }
194
195 return cxlhdm;
196 }
197
__cxl_dpa_debug(struct seq_file * file,struct resource * r,int depth)198 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
199 {
200 unsigned long long start = r->start, end = r->end;
201
202 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
203 r->name);
204 }
205
cxl_dpa_debug(struct seq_file * file,struct cxl_dev_state * cxlds)206 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
207 {
208 struct resource *p1, *p2;
209
210 guard(rwsem_read)(&cxl_rwsem.dpa);
211 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
212 __cxl_dpa_debug(file, p1, 0);
213 for (p2 = p1->child; p2; p2 = p2->sibling)
214 __cxl_dpa_debug(file, p2, 1);
215 }
216 }
217 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, "CXL");
218
219 /* See request_skip() kernel-doc */
__adjust_skip(struct cxl_dev_state * cxlds,const resource_size_t skip_base,const resource_size_t skip_len,const char * requester)220 static resource_size_t __adjust_skip(struct cxl_dev_state *cxlds,
221 const resource_size_t skip_base,
222 const resource_size_t skip_len,
223 const char *requester)
224 {
225 const resource_size_t skip_end = skip_base + skip_len - 1;
226
227 for (int i = 0; i < cxlds->nr_partitions; i++) {
228 const struct resource *part_res = &cxlds->part[i].res;
229 resource_size_t adjust_start, adjust_end, size;
230
231 adjust_start = max(skip_base, part_res->start);
232 adjust_end = min(skip_end, part_res->end);
233
234 if (adjust_end < adjust_start)
235 continue;
236
237 size = adjust_end - adjust_start + 1;
238
239 if (!requester)
240 __release_region(&cxlds->dpa_res, adjust_start, size);
241 else if (!__request_region(&cxlds->dpa_res, adjust_start, size,
242 requester, 0))
243 return adjust_start - skip_base;
244 }
245
246 return skip_len;
247 }
248 #define release_skip(c, b, l) __adjust_skip((c), (b), (l), NULL)
249
250 /*
251 * Must be called in a context that synchronizes against this decoder's
252 * port ->remove() callback (like an endpoint decoder sysfs attribute)
253 */
__cxl_dpa_release(struct cxl_endpoint_decoder * cxled)254 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
255 {
256 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
257 struct cxl_port *port = cxled_to_port(cxled);
258 struct cxl_dev_state *cxlds = cxlmd->cxlds;
259 struct resource *res = cxled->dpa_res;
260 resource_size_t skip_start;
261
262 lockdep_assert_held_write(&cxl_rwsem.dpa);
263
264 /* save @skip_start, before @res is released */
265 skip_start = res->start - cxled->skip;
266 __release_region(&cxlds->dpa_res, res->start, resource_size(res));
267 if (cxled->skip)
268 release_skip(cxlds, skip_start, cxled->skip);
269 cxled->skip = 0;
270 cxled->dpa_res = NULL;
271 put_device(&cxled->cxld.dev);
272 port->hdm_end--;
273 }
274
cxl_dpa_release(void * cxled)275 static void cxl_dpa_release(void *cxled)
276 {
277 guard(rwsem_write)(&cxl_rwsem.dpa);
278 __cxl_dpa_release(cxled);
279 }
280
281 /*
282 * Must be called from context that will not race port device
283 * unregistration, like decoder sysfs attribute methods
284 */
devm_cxl_dpa_release(struct cxl_endpoint_decoder * cxled)285 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
286 {
287 struct cxl_port *port = cxled_to_port(cxled);
288
289 lockdep_assert_held_write(&cxl_rwsem.dpa);
290 devm_remove_action(&port->dev, cxl_dpa_release, cxled);
291 __cxl_dpa_release(cxled);
292 }
293
294 /**
295 * request_skip() - Track DPA 'skip' in @cxlds->dpa_res resource tree
296 * @cxlds: CXL.mem device context that parents @cxled
297 * @cxled: Endpoint decoder establishing new allocation that skips lower DPA
298 * @skip_base: DPA < start of new DPA allocation (DPAnew)
299 * @skip_len: @skip_base + @skip_len == DPAnew
300 *
301 * DPA 'skip' arises from out-of-sequence DPA allocation events relative
302 * to free capacity across multiple partitions. It is a wasteful event
303 * as usable DPA gets thrown away, but if a deployment has, for example,
304 * a dual RAM+PMEM device, wants to use PMEM, and has unallocated RAM
305 * DPA, the free RAM DPA must be sacrificed to start allocating PMEM.
306 * See third "Implementation Note" in CXL 3.1 8.2.4.19.13 "Decoder
307 * Protection" for more details.
308 *
309 * A 'skip' always covers the last allocated DPA in a previous partition
310 * to the start of the current partition to allocate. Allocations never
311 * start in the middle of a partition, and allocations are always
312 * de-allocated in reverse order (see cxl_dpa_free(), or natural devm
313 * unwind order from forced in-order allocation).
314 *
315 * If @cxlds->nr_partitions was guaranteed to be <= 2 then the 'skip'
316 * would always be contained to a single partition. Given
317 * @cxlds->nr_partitions may be > 2 it results in cases where the 'skip'
318 * might span "tail capacity of partition[0], all of partition[1], ...,
319 * all of partition[N-1]" to support allocating from partition[N]. That
320 * in turn interacts with the partition 'struct resource' boundaries
321 * within @cxlds->dpa_res whereby 'skip' requests need to be divided by
322 * partition. I.e. this is a quirk of using a 'struct resource' tree to
323 * detect range conflicts while also tracking partition boundaries in
324 * @cxlds->dpa_res.
325 */
request_skip(struct cxl_dev_state * cxlds,struct cxl_endpoint_decoder * cxled,const resource_size_t skip_base,const resource_size_t skip_len)326 static int request_skip(struct cxl_dev_state *cxlds,
327 struct cxl_endpoint_decoder *cxled,
328 const resource_size_t skip_base,
329 const resource_size_t skip_len)
330 {
331 resource_size_t skipped = __adjust_skip(cxlds, skip_base, skip_len,
332 dev_name(&cxled->cxld.dev));
333
334 if (skipped == skip_len)
335 return 0;
336
337 dev_dbg(cxlds->dev,
338 "%s: failed to reserve skipped space (%pa %pa %pa)\n",
339 dev_name(&cxled->cxld.dev), &skip_base, &skip_len, &skipped);
340
341 release_skip(cxlds, skip_base, skipped);
342
343 return -EBUSY;
344 }
345
__cxl_dpa_reserve(struct cxl_endpoint_decoder * cxled,resource_size_t base,resource_size_t len,resource_size_t skipped)346 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
347 resource_size_t base, resource_size_t len,
348 resource_size_t skipped)
349 {
350 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
351 struct cxl_port *port = cxled_to_port(cxled);
352 struct cxl_dev_state *cxlds = cxlmd->cxlds;
353 struct device *dev = &port->dev;
354 struct resource *res;
355 int rc;
356
357 lockdep_assert_held_write(&cxl_rwsem.dpa);
358
359 if (!len) {
360 dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
361 port->id, cxled->cxld.id);
362 return -EINVAL;
363 }
364
365 if (cxled->dpa_res) {
366 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
367 port->id, cxled->cxld.id, cxled->dpa_res);
368 return -EBUSY;
369 }
370
371 if (port->hdm_end + 1 != cxled->cxld.id) {
372 /*
373 * Assumes alloc and commit order is always in hardware instance
374 * order per expectations from 8.2.5.12.20 Committing Decoder
375 * Programming that enforce decoder[m] committed before
376 * decoder[m+1] commit start.
377 */
378 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
379 cxled->cxld.id, port->id, port->hdm_end + 1);
380 return -EBUSY;
381 }
382
383 if (skipped) {
384 rc = request_skip(cxlds, cxled, base - skipped, skipped);
385 if (rc)
386 return rc;
387 }
388 res = __request_region(&cxlds->dpa_res, base, len,
389 dev_name(&cxled->cxld.dev), 0);
390 if (!res) {
391 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
392 port->id, cxled->cxld.id);
393 if (skipped)
394 release_skip(cxlds, base - skipped, skipped);
395 return -EBUSY;
396 }
397 cxled->dpa_res = res;
398 cxled->skip = skipped;
399
400 /*
401 * When allocating new capacity, ->part is already set, when
402 * discovering decoder settings at initial enumeration, ->part
403 * is not set.
404 */
405 if (cxled->part < 0)
406 for (int i = 0; cxlds->nr_partitions; i++)
407 if (resource_contains(&cxlds->part[i].res, res)) {
408 cxled->part = i;
409 break;
410 }
411
412 if (cxled->part < 0)
413 dev_warn(dev, "decoder%d.%d: %pr does not map any partition\n",
414 port->id, cxled->cxld.id, res);
415
416 port->hdm_end++;
417 get_device(&cxled->cxld.dev);
418 return 0;
419 }
420
add_dpa_res(struct device * dev,struct resource * parent,struct resource * res,resource_size_t start,resource_size_t size,const char * type)421 static int add_dpa_res(struct device *dev, struct resource *parent,
422 struct resource *res, resource_size_t start,
423 resource_size_t size, const char *type)
424 {
425 int rc;
426
427 *res = (struct resource) {
428 .name = type,
429 .start = start,
430 .end = start + size - 1,
431 .flags = IORESOURCE_MEM,
432 };
433 if (resource_size(res) == 0) {
434 dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
435 return 0;
436 }
437 rc = request_resource(parent, res);
438 if (rc) {
439 dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
440 res, rc);
441 return rc;
442 }
443
444 dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
445
446 return 0;
447 }
448
cxl_mode_name(enum cxl_partition_mode mode)449 static const char *cxl_mode_name(enum cxl_partition_mode mode)
450 {
451 switch (mode) {
452 case CXL_PARTMODE_RAM:
453 return "ram";
454 case CXL_PARTMODE_PMEM:
455 return "pmem";
456 default:
457 return "";
458 };
459 }
460
461 /* if this fails the caller must destroy @cxlds, there is no recovery */
cxl_dpa_setup(struct cxl_dev_state * cxlds,const struct cxl_dpa_info * info)462 int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info)
463 {
464 struct device *dev = cxlds->dev;
465
466 guard(rwsem_write)(&cxl_rwsem.dpa);
467
468 if (cxlds->nr_partitions)
469 return -EBUSY;
470
471 if (!info->size || !info->nr_partitions) {
472 cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
473 cxlds->nr_partitions = 0;
474 return 0;
475 }
476
477 cxlds->dpa_res = DEFINE_RES_MEM(0, info->size);
478
479 for (int i = 0; i < info->nr_partitions; i++) {
480 const struct cxl_dpa_part_info *part = &info->part[i];
481 int rc;
482
483 cxlds->part[i].perf.qos_class = CXL_QOS_CLASS_INVALID;
484 cxlds->part[i].mode = part->mode;
485
486 /* Require ordered + contiguous partitions */
487 if (i) {
488 const struct cxl_dpa_part_info *prev = &info->part[i - 1];
489
490 if (prev->range.end + 1 != part->range.start)
491 return -EINVAL;
492 }
493 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->part[i].res,
494 part->range.start, range_len(&part->range),
495 cxl_mode_name(part->mode));
496 if (rc)
497 return rc;
498 cxlds->nr_partitions++;
499 }
500
501 return 0;
502 }
503 EXPORT_SYMBOL_GPL(cxl_dpa_setup);
504
devm_cxl_dpa_reserve(struct cxl_endpoint_decoder * cxled,resource_size_t base,resource_size_t len,resource_size_t skipped)505 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
506 resource_size_t base, resource_size_t len,
507 resource_size_t skipped)
508 {
509 struct cxl_port *port = cxled_to_port(cxled);
510 int rc;
511
512 scoped_guard(rwsem_write, &cxl_rwsem.dpa)
513 rc = __cxl_dpa_reserve(cxled, base, len, skipped);
514
515 if (rc)
516 return rc;
517
518 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
519 }
520 EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, "CXL");
521
cxl_dpa_size(struct cxl_endpoint_decoder * cxled)522 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
523 {
524 guard(rwsem_read)(&cxl_rwsem.dpa);
525 if (cxled->dpa_res)
526 return resource_size(cxled->dpa_res);
527
528 return 0;
529 }
530
cxl_dpa_resource_start(struct cxl_endpoint_decoder * cxled)531 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
532 {
533 resource_size_t base = -1;
534
535 lockdep_assert_held(&cxl_rwsem.dpa);
536 if (cxled->dpa_res)
537 base = cxled->dpa_res->start;
538
539 return base;
540 }
541
cxl_resource_contains_addr(const struct resource * res,const resource_size_t addr)542 bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr)
543 {
544 struct resource _addr = DEFINE_RES_MEM(addr, 1);
545
546 return resource_contains(res, &_addr);
547 }
548
cxl_dpa_free(struct cxl_endpoint_decoder * cxled)549 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
550 {
551 struct cxl_port *port = cxled_to_port(cxled);
552 struct device *dev = &cxled->cxld.dev;
553
554 guard(rwsem_write)(&cxl_rwsem.dpa);
555 if (!cxled->dpa_res)
556 return 0;
557 if (cxled->cxld.region) {
558 dev_dbg(dev, "decoder assigned to: %s\n",
559 dev_name(&cxled->cxld.region->dev));
560 return -EBUSY;
561 }
562 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
563 dev_dbg(dev, "decoder enabled\n");
564 return -EBUSY;
565 }
566 if (cxled->cxld.id != port->hdm_end) {
567 dev_dbg(dev, "expected decoder%d.%d\n", port->id,
568 port->hdm_end);
569 return -EBUSY;
570 }
571
572 devm_cxl_dpa_release(cxled);
573 return 0;
574 }
575
cxl_dpa_set_part(struct cxl_endpoint_decoder * cxled,enum cxl_partition_mode mode)576 int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
577 enum cxl_partition_mode mode)
578 {
579 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
580 struct cxl_dev_state *cxlds = cxlmd->cxlds;
581 struct device *dev = &cxled->cxld.dev;
582 int part;
583
584 guard(rwsem_write)(&cxl_rwsem.dpa);
585 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)
586 return -EBUSY;
587
588 for (part = 0; part < cxlds->nr_partitions; part++)
589 if (cxlds->part[part].mode == mode)
590 break;
591
592 if (part >= cxlds->nr_partitions) {
593 dev_dbg(dev, "unsupported mode: %d\n", mode);
594 return -EINVAL;
595 }
596
597 if (!resource_size(&cxlds->part[part].res)) {
598 dev_dbg(dev, "no available capacity for mode: %d\n", mode);
599 return -ENXIO;
600 }
601
602 cxled->part = part;
603 return 0;
604 }
605
__cxl_dpa_alloc(struct cxl_endpoint_decoder * cxled,u64 size)606 static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
607 {
608 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
609 struct cxl_dev_state *cxlds = cxlmd->cxlds;
610 struct device *dev = &cxled->cxld.dev;
611 struct resource *res, *prev = NULL;
612 resource_size_t start, avail, skip, skip_start;
613 struct resource *p, *last;
614 int part;
615
616 guard(rwsem_write)(&cxl_rwsem.dpa);
617 if (cxled->cxld.region) {
618 dev_dbg(dev, "decoder attached to %s\n",
619 dev_name(&cxled->cxld.region->dev));
620 return -EBUSY;
621 }
622
623 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
624 dev_dbg(dev, "decoder enabled\n");
625 return -EBUSY;
626 }
627
628 part = cxled->part;
629 if (part < 0) {
630 dev_dbg(dev, "partition not set\n");
631 return -EBUSY;
632 }
633
634 res = &cxlds->part[part].res;
635 for (p = res->child, last = NULL; p; p = p->sibling)
636 last = p;
637 if (last)
638 start = last->end + 1;
639 else
640 start = res->start;
641
642 /*
643 * To allocate at partition N, a skip needs to be calculated for all
644 * unallocated space at lower partitions indices.
645 *
646 * If a partition has any allocations, the search can end because a
647 * previous cxl_dpa_alloc() invocation is assumed to have accounted for
648 * all previous partitions.
649 */
650 skip_start = CXL_RESOURCE_NONE;
651 for (int i = part; i; i--) {
652 prev = &cxlds->part[i - 1].res;
653 for (p = prev->child, last = NULL; p; p = p->sibling)
654 last = p;
655 if (last) {
656 skip_start = last->end + 1;
657 break;
658 }
659 skip_start = prev->start;
660 }
661
662 avail = res->end - start + 1;
663 if (skip_start == CXL_RESOURCE_NONE)
664 skip = 0;
665 else
666 skip = res->start - skip_start;
667
668 if (size > avail) {
669 dev_dbg(dev, "%llu exceeds available %s capacity: %llu\n", size,
670 res->name, (u64)avail);
671 return -ENOSPC;
672 }
673
674 return __cxl_dpa_reserve(cxled, start, size, skip);
675 }
676
cxl_dpa_alloc(struct cxl_endpoint_decoder * cxled,u64 size)677 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
678 {
679 struct cxl_port *port = cxled_to_port(cxled);
680 int rc;
681
682 rc = __cxl_dpa_alloc(cxled, size);
683 if (rc)
684 return rc;
685
686 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
687 }
688
cxld_set_interleave(struct cxl_decoder * cxld,u32 * ctrl)689 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
690 {
691 u16 eig;
692 u8 eiw;
693
694 /*
695 * Input validation ensures these warns never fire, but otherwise
696 * suppress unititalized variable usage warnings.
697 */
698 if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw),
699 "invalid interleave_ways: %d\n", cxld->interleave_ways))
700 return;
701 if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig),
702 "invalid interleave_granularity: %d\n",
703 cxld->interleave_granularity))
704 return;
705
706 u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
707 u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
708 *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
709 }
710
cxld_set_type(struct cxl_decoder * cxld,u32 * ctrl)711 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
712 {
713 u32p_replace_bits(ctrl,
714 !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM),
715 CXL_HDM_DECODER0_CTRL_HOSTONLY);
716 }
717
cxlsd_set_targets(struct cxl_switch_decoder * cxlsd,u64 * tgt)718 static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
719 {
720 struct cxl_dport **t = &cxlsd->target[0];
721 int ways = cxlsd->cxld.interleave_ways;
722
723 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
724 if (ways > 1)
725 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
726 if (ways > 2)
727 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
728 if (ways > 3)
729 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
730 if (ways > 4)
731 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
732 if (ways > 5)
733 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
734 if (ways > 6)
735 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
736 if (ways > 7)
737 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
738 }
739
740 /*
741 * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
742 * committed or error within 10ms, but just be generous with 20ms to account for
743 * clock skew and other marginal behavior
744 */
745 #define COMMIT_TIMEOUT_MS 20
cxld_await_commit(void __iomem * hdm,int id)746 static int cxld_await_commit(void __iomem *hdm, int id)
747 {
748 u32 ctrl;
749 int i;
750
751 for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
752 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
753 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
754 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
755 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
756 return -EIO;
757 }
758 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
759 return 0;
760 fsleep(1000);
761 }
762
763 return -ETIMEDOUT;
764 }
765
setup_hw_decoder(struct cxl_decoder * cxld,void __iomem * hdm)766 static void setup_hw_decoder(struct cxl_decoder *cxld, void __iomem *hdm)
767 {
768 int id = cxld->id;
769 u64 base, size;
770 u32 ctrl;
771
772 /* common decoder settings */
773 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
774 cxld_set_interleave(cxld, &ctrl);
775 cxld_set_type(cxld, &ctrl);
776 base = cxld->hpa_range.start;
777 size = range_len(&cxld->hpa_range);
778
779 writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
780 writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
781 writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
782 writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
783
784 if (is_switch_decoder(&cxld->dev)) {
785 struct cxl_switch_decoder *cxlsd =
786 to_cxl_switch_decoder(&cxld->dev);
787 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
788 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
789 u64 targets;
790
791 cxlsd_set_targets(cxlsd, &targets);
792 writel(upper_32_bits(targets), tl_hi);
793 writel(lower_32_bits(targets), tl_lo);
794 } else {
795 struct cxl_endpoint_decoder *cxled =
796 to_cxl_endpoint_decoder(&cxld->dev);
797 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
798 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
799
800 writel(upper_32_bits(cxled->skip), sk_hi);
801 writel(lower_32_bits(cxled->skip), sk_lo);
802 }
803
804 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
805 }
806
cxl_decoder_commit(struct cxl_decoder * cxld)807 static int cxl_decoder_commit(struct cxl_decoder *cxld)
808 {
809 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
810 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
811 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
812 int id = cxld->id, rc;
813
814 if (cxld->flags & CXL_DECODER_F_ENABLE)
815 return 0;
816
817 if (cxl_num_decoders_committed(port) != id) {
818 dev_dbg(&port->dev,
819 "%s: out of order commit, expected decoder%d.%d\n",
820 dev_name(&cxld->dev), port->id,
821 cxl_num_decoders_committed(port));
822 return -EBUSY;
823 }
824
825 /*
826 * For endpoint decoders hosted on CXL memory devices that
827 * support the sanitize operation, make sure sanitize is not in-flight.
828 */
829 if (is_endpoint_decoder(&cxld->dev)) {
830 struct cxl_endpoint_decoder *cxled =
831 to_cxl_endpoint_decoder(&cxld->dev);
832 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
833 struct cxl_memdev_state *mds =
834 to_cxl_memdev_state(cxlmd->cxlds);
835
836 if (mds && mds->security.sanitize_active) {
837 dev_dbg(&cxlmd->dev,
838 "attempted to commit %s during sanitize\n",
839 dev_name(&cxld->dev));
840 return -EBUSY;
841 }
842 }
843
844 scoped_guard(rwsem_read, &cxl_rwsem.dpa)
845 setup_hw_decoder(cxld, hdm);
846
847 port->commit_end++;
848 rc = cxld_await_commit(hdm, cxld->id);
849 if (rc) {
850 dev_dbg(&port->dev, "%s: error %d committing decoder\n",
851 dev_name(&cxld->dev), rc);
852 cxld->reset(cxld);
853 return rc;
854 }
855 cxld->flags |= CXL_DECODER_F_ENABLE;
856
857 return 0;
858 }
859
commit_reap(struct device * dev,void * data)860 static int commit_reap(struct device *dev, void *data)
861 {
862 struct cxl_port *port = to_cxl_port(dev->parent);
863 struct cxl_decoder *cxld;
864
865 if (!is_switch_decoder(dev) && !is_endpoint_decoder(dev))
866 return 0;
867
868 cxld = to_cxl_decoder(dev);
869 if (port->commit_end == cxld->id &&
870 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
871 port->commit_end--;
872 dev_dbg(&port->dev, "reap: %s commit_end: %d\n",
873 dev_name(&cxld->dev), port->commit_end);
874 }
875
876 return 0;
877 }
878
cxl_port_commit_reap(struct cxl_decoder * cxld)879 void cxl_port_commit_reap(struct cxl_decoder *cxld)
880 {
881 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
882
883 lockdep_assert_held_write(&cxl_rwsem.region);
884
885 /*
886 * Once the highest committed decoder is disabled, free any other
887 * decoders that were pinned allocated by out-of-order release.
888 */
889 port->commit_end--;
890 dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev),
891 port->commit_end);
892 device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL,
893 commit_reap);
894 }
895 EXPORT_SYMBOL_NS_GPL(cxl_port_commit_reap, "CXL");
896
cxl_decoder_reset(struct cxl_decoder * cxld)897 static void cxl_decoder_reset(struct cxl_decoder *cxld)
898 {
899 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
900 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
901 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
902 int id = cxld->id;
903 u32 ctrl;
904
905 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
906 return;
907
908 if (test_bit(CXL_DECODER_F_LOCK, &cxld->flags))
909 return;
910
911 if (port->commit_end == id)
912 cxl_port_commit_reap(cxld);
913 else
914 dev_dbg(&port->dev,
915 "%s: out of order reset, expected decoder%d.%d\n",
916 dev_name(&cxld->dev), port->id, port->commit_end);
917
918 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
919 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
920 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
921
922 writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
923 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
924 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
925 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
926
927 cxld->flags &= ~CXL_DECODER_F_ENABLE;
928
929 /* Userspace is now responsible for reconfiguring this decoder */
930 if (is_endpoint_decoder(&cxld->dev)) {
931 struct cxl_endpoint_decoder *cxled;
932
933 cxled = to_cxl_endpoint_decoder(&cxld->dev);
934 cxled->state = CXL_DECODER_STATE_MANUAL;
935 }
936 }
937
cxl_setup_hdm_decoder_from_dvsec(struct cxl_port * port,struct cxl_decoder * cxld,u64 * dpa_base,int which,struct cxl_endpoint_dvsec_info * info)938 static int cxl_setup_hdm_decoder_from_dvsec(
939 struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
940 int which, struct cxl_endpoint_dvsec_info *info)
941 {
942 struct cxl_endpoint_decoder *cxled;
943 u64 len;
944 int rc;
945
946 if (!is_cxl_endpoint(port))
947 return -EOPNOTSUPP;
948
949 cxled = to_cxl_endpoint_decoder(&cxld->dev);
950 len = range_len(&info->dvsec_range[which]);
951 if (!len)
952 return -ENOENT;
953
954 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
955 cxld->commit = NULL;
956 cxld->reset = NULL;
957 cxld->hpa_range = info->dvsec_range[which];
958
959 /*
960 * Set the emulated decoder as locked pending additional support to
961 * change the range registers at run time.
962 */
963 cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
964 port->commit_end = cxld->id;
965
966 rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
967 if (rc) {
968 dev_err(&port->dev,
969 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
970 port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
971 return rc;
972 }
973 *dpa_base += len;
974 cxled->state = CXL_DECODER_STATE_AUTO;
975
976 return 0;
977 }
978
init_hdm_decoder(struct cxl_port * port,struct cxl_decoder * cxld,void __iomem * hdm,int which,u64 * dpa_base,struct cxl_endpoint_dvsec_info * info)979 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
980 void __iomem *hdm, int which,
981 u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
982 {
983 struct cxl_endpoint_decoder *cxled = NULL;
984 u64 size, base, skip, dpa_size, lo, hi;
985 bool committed;
986 u32 remainder;
987 int i, rc;
988 u32 ctrl;
989 union {
990 u64 value;
991 unsigned char target_id[8];
992 } target_list;
993
994 if (should_emulate_decoders(info))
995 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
996 which, info);
997
998 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
999 lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
1000 hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which));
1001 base = (hi << 32) + lo;
1002 lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
1003 hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which));
1004 size = (hi << 32) + lo;
1005 committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
1006 cxld->commit = cxl_decoder_commit;
1007 cxld->reset = cxl_decoder_reset;
1008
1009 if (!committed)
1010 size = 0;
1011 if (base == U64_MAX || size == U64_MAX) {
1012 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
1013 port->id, cxld->id);
1014 return -ENXIO;
1015 }
1016
1017 if (info)
1018 cxled = to_cxl_endpoint_decoder(&cxld->dev);
1019 cxld->hpa_range = (struct range) {
1020 .start = base,
1021 .end = base + size - 1,
1022 };
1023
1024 /* decoders are enabled if committed */
1025 if (committed) {
1026 cxld->flags |= CXL_DECODER_F_ENABLE;
1027 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
1028 cxld->flags |= CXL_DECODER_F_LOCK;
1029 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl))
1030 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1031 else
1032 cxld->target_type = CXL_DECODER_DEVMEM;
1033
1034 guard(rwsem_write)(&cxl_rwsem.region);
1035 if (cxld->id != cxl_num_decoders_committed(port)) {
1036 dev_warn(&port->dev,
1037 "decoder%d.%d: Committed out of order\n",
1038 port->id, cxld->id);
1039 return -ENXIO;
1040 }
1041
1042 if (size == 0) {
1043 dev_warn(&port->dev,
1044 "decoder%d.%d: Committed with zero size\n",
1045 port->id, cxld->id);
1046 return -ENXIO;
1047 }
1048 port->commit_end = cxld->id;
1049 } else {
1050 if (cxled) {
1051 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1052 struct cxl_dev_state *cxlds = cxlmd->cxlds;
1053
1054 /*
1055 * Default by devtype until a device arrives that needs
1056 * more precision.
1057 */
1058 if (cxlds->type == CXL_DEVTYPE_CLASSMEM)
1059 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1060 else
1061 cxld->target_type = CXL_DECODER_DEVMEM;
1062 } else {
1063 /* To be overridden by region type at commit time */
1064 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1065 }
1066
1067 if (!FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl) &&
1068 cxld->target_type == CXL_DECODER_HOSTONLYMEM) {
1069 ctrl |= CXL_HDM_DECODER0_CTRL_HOSTONLY;
1070 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
1071 }
1072 }
1073 rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
1074 &cxld->interleave_ways);
1075 if (rc) {
1076 dev_warn(&port->dev,
1077 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
1078 port->id, cxld->id, ctrl);
1079 return rc;
1080 }
1081 rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
1082 &cxld->interleave_granularity);
1083 if (rc) {
1084 dev_warn(&port->dev,
1085 "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n",
1086 port->id, cxld->id, ctrl);
1087 return rc;
1088 }
1089
1090 dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n",
1091 port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
1092 cxld->interleave_ways, cxld->interleave_granularity);
1093
1094 if (!cxled) {
1095 lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which));
1096 hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
1097 target_list.value = (hi << 32) + lo;
1098 for (i = 0; i < cxld->interleave_ways; i++)
1099 cxld->target_map[i] = target_list.target_id[i];
1100
1101 return 0;
1102 }
1103
1104 if (!committed)
1105 return 0;
1106
1107 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
1108 if (remainder) {
1109 dev_err(&port->dev,
1110 "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
1111 port->id, cxld->id, size, cxld->interleave_ways);
1112 return -ENXIO;
1113 }
1114 lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
1115 hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which));
1116 skip = (hi << 32) + lo;
1117 rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
1118 if (rc) {
1119 dev_err(&port->dev,
1120 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
1121 port->id, cxld->id, *dpa_base,
1122 *dpa_base + dpa_size + skip - 1, rc);
1123 return rc;
1124 }
1125 *dpa_base += dpa_size + skip;
1126
1127 cxled->state = CXL_DECODER_STATE_AUTO;
1128
1129 return 0;
1130 }
1131
cxl_settle_decoders(struct cxl_hdm * cxlhdm)1132 static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
1133 {
1134 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
1135 int committed, i;
1136 u32 ctrl;
1137
1138 if (!hdm)
1139 return;
1140
1141 /*
1142 * Since the register resource was recently claimed via request_region()
1143 * be careful about trusting the "not-committed" status until the commit
1144 * timeout has elapsed. The commit timeout is 10ms (CXL 2.0
1145 * 8.2.5.12.20), but double it to be tolerant of any clock skew between
1146 * host and target.
1147 */
1148 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
1149 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
1150 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
1151 committed++;
1152 }
1153
1154 /* ensure that future checks of committed can be trusted */
1155 if (committed != cxlhdm->decoder_count)
1156 msleep(20);
1157 }
1158
1159 /**
1160 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
1161 * @cxlhdm: Structure to populate with HDM capabilities
1162 * @info: cached DVSEC range register info
1163 */
devm_cxl_enumerate_decoders(struct cxl_hdm * cxlhdm,struct cxl_endpoint_dvsec_info * info)1164 static int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
1165 struct cxl_endpoint_dvsec_info *info)
1166 {
1167 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
1168 struct cxl_port *port = cxlhdm->port;
1169 int i;
1170 u64 dpa_base = 0;
1171
1172 cxl_settle_decoders(cxlhdm);
1173
1174 for (i = 0; i < cxlhdm->decoder_count; i++) {
1175 int rc, target_count = cxlhdm->target_count;
1176 struct cxl_decoder *cxld;
1177
1178 if (is_cxl_endpoint(port)) {
1179 struct cxl_endpoint_decoder *cxled;
1180
1181 cxled = cxl_endpoint_decoder_alloc(port);
1182 if (IS_ERR(cxled)) {
1183 dev_warn(&port->dev,
1184 "Failed to allocate decoder%d.%d\n",
1185 port->id, i);
1186 return PTR_ERR(cxled);
1187 }
1188 cxld = &cxled->cxld;
1189 } else {
1190 struct cxl_switch_decoder *cxlsd;
1191
1192 cxlsd = cxl_switch_decoder_alloc(port, target_count);
1193 if (IS_ERR(cxlsd)) {
1194 dev_warn(&port->dev,
1195 "Failed to allocate decoder%d.%d\n",
1196 port->id, i);
1197 return PTR_ERR(cxlsd);
1198 }
1199 cxld = &cxlsd->cxld;
1200 }
1201
1202 rc = init_hdm_decoder(port, cxld, hdm, i, &dpa_base, info);
1203 if (rc) {
1204 dev_warn(&port->dev,
1205 "Failed to initialize decoder%d.%d\n",
1206 port->id, i);
1207 put_device(&cxld->dev);
1208 return rc;
1209 }
1210 rc = add_hdm_decoder(port, cxld);
1211 if (rc) {
1212 dev_warn(&port->dev,
1213 "Failed to add decoder%d.%d\n", port->id, i);
1214 return rc;
1215 }
1216 }
1217
1218 return 0;
1219 }
1220
1221 /**
1222 * __devm_cxl_switch_port_decoders_setup - allocate and setup switch decoders
1223 * @port: CXL port context
1224 *
1225 * Return 0 or -errno on error
1226 */
__devm_cxl_switch_port_decoders_setup(struct cxl_port * port)1227 int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
1228 {
1229 struct cxl_hdm *cxlhdm;
1230
1231 if (is_cxl_root(port) || is_cxl_endpoint(port))
1232 return -EOPNOTSUPP;
1233
1234 cxlhdm = devm_cxl_setup_hdm(port, NULL);
1235 if (!IS_ERR(cxlhdm))
1236 return devm_cxl_enumerate_decoders(cxlhdm, NULL);
1237
1238 if (PTR_ERR(cxlhdm) != -ENODEV) {
1239 dev_err(&port->dev, "Failed to map HDM decoder capability\n");
1240 return PTR_ERR(cxlhdm);
1241 }
1242
1243 if (cxl_port_get_possible_dports(port) == 1) {
1244 dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
1245 return devm_cxl_add_passthrough_decoder(port);
1246 }
1247
1248 dev_err(&port->dev, "HDM decoder capability not found\n");
1249 return -ENXIO;
1250 }
1251 EXPORT_SYMBOL_NS_GPL(__devm_cxl_switch_port_decoders_setup, "CXL");
1252
1253 /**
1254 * devm_cxl_endpoint_decoders_setup - allocate and setup endpoint decoders
1255 * @port: CXL port context
1256 *
1257 * Return 0 or -errno on error
1258 */
devm_cxl_endpoint_decoders_setup(struct cxl_port * port)1259 int devm_cxl_endpoint_decoders_setup(struct cxl_port *port)
1260 {
1261 struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
1262 struct cxl_endpoint_dvsec_info info = { .port = port };
1263 struct cxl_dev_state *cxlds = cxlmd->cxlds;
1264 struct cxl_hdm *cxlhdm;
1265 int rc;
1266
1267 if (!is_cxl_endpoint(port))
1268 return -EOPNOTSUPP;
1269
1270 rc = cxl_dvsec_rr_decode(cxlds, &info);
1271 if (rc < 0)
1272 return rc;
1273
1274 cxlhdm = devm_cxl_setup_hdm(port, &info);
1275 if (IS_ERR(cxlhdm)) {
1276 if (PTR_ERR(cxlhdm) == -ENODEV)
1277 dev_err(&port->dev, "HDM decoder registers not found\n");
1278 return PTR_ERR(cxlhdm);
1279 }
1280
1281 rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
1282 if (rc)
1283 return rc;
1284
1285 return devm_cxl_enumerate_decoders(cxlhdm, &info);
1286 }
1287 EXPORT_SYMBOL_NS_GPL(devm_cxl_endpoint_decoders_setup, "CXL");
1288