1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/memregion.h>
4 #include <linux/genalloc.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/memory.h>
8 #include <linux/slab.h>
9 #include <linux/uuid.h>
10 #include <linux/sort.h>
11 #include <linux/idr.h>
12 #include <linux/memory-tiers.h>
13 #include <cxlmem.h>
14 #include <cxl.h>
15 #include "core.h"
16
17 /**
18 * DOC: cxl core region
19 *
20 * CXL Regions represent mapped memory capacity in system physical address
21 * space. Whereas the CXL Root Decoders identify the bounds of potential CXL
22 * Memory ranges, Regions represent the active mapped capacity by the HDM
23 * Decoder Capability structures throughout the Host Bridges, Switches, and
24 * Endpoints in the topology.
25 *
26 * Region configuration has ordering constraints. UUID may be set at any time
27 * but is only visible for persistent regions.
28 * 1. Interleave granularity
29 * 2. Interleave size
30 * 3. Decoder targets
31 */
32
33 static struct cxl_region *to_cxl_region(struct device *dev);
34
35 #define __ACCESS_ATTR_RO(_level, _name) { \
36 .attr = { .name = __stringify(_name), .mode = 0444 }, \
37 .show = _name##_access##_level##_show, \
38 }
39
40 #define ACCESS_DEVICE_ATTR_RO(level, name) \
41 struct device_attribute dev_attr_access##level##_##name = __ACCESS_ATTR_RO(level, name)
42
43 #define ACCESS_ATTR_RO(level, attrib) \
44 static ssize_t attrib##_access##level##_show(struct device *dev, \
45 struct device_attribute *attr, \
46 char *buf) \
47 { \
48 struct cxl_region *cxlr = to_cxl_region(dev); \
49 \
50 if (cxlr->coord[level].attrib == 0) \
51 return -ENOENT; \
52 \
53 return sysfs_emit(buf, "%u\n", cxlr->coord[level].attrib); \
54 } \
55 static ACCESS_DEVICE_ATTR_RO(level, attrib)
56
57 ACCESS_ATTR_RO(0, read_bandwidth);
58 ACCESS_ATTR_RO(0, read_latency);
59 ACCESS_ATTR_RO(0, write_bandwidth);
60 ACCESS_ATTR_RO(0, write_latency);
61
62 #define ACCESS_ATTR_DECLARE(level, attrib) \
63 (&dev_attr_access##level##_##attrib.attr)
64
65 static struct attribute *access0_coordinate_attrs[] = {
66 ACCESS_ATTR_DECLARE(0, read_bandwidth),
67 ACCESS_ATTR_DECLARE(0, write_bandwidth),
68 ACCESS_ATTR_DECLARE(0, read_latency),
69 ACCESS_ATTR_DECLARE(0, write_latency),
70 NULL
71 };
72
73 ACCESS_ATTR_RO(1, read_bandwidth);
74 ACCESS_ATTR_RO(1, read_latency);
75 ACCESS_ATTR_RO(1, write_bandwidth);
76 ACCESS_ATTR_RO(1, write_latency);
77
78 static struct attribute *access1_coordinate_attrs[] = {
79 ACCESS_ATTR_DECLARE(1, read_bandwidth),
80 ACCESS_ATTR_DECLARE(1, write_bandwidth),
81 ACCESS_ATTR_DECLARE(1, read_latency),
82 ACCESS_ATTR_DECLARE(1, write_latency),
83 NULL
84 };
85
86 #define ACCESS_VISIBLE(level) \
87 static umode_t cxl_region_access##level##_coordinate_visible( \
88 struct kobject *kobj, struct attribute *a, int n) \
89 { \
90 struct device *dev = kobj_to_dev(kobj); \
91 struct cxl_region *cxlr = to_cxl_region(dev); \
92 \
93 if (a == &dev_attr_access##level##_read_latency.attr && \
94 cxlr->coord[level].read_latency == 0) \
95 return 0; \
96 \
97 if (a == &dev_attr_access##level##_write_latency.attr && \
98 cxlr->coord[level].write_latency == 0) \
99 return 0; \
100 \
101 if (a == &dev_attr_access##level##_read_bandwidth.attr && \
102 cxlr->coord[level].read_bandwidth == 0) \
103 return 0; \
104 \
105 if (a == &dev_attr_access##level##_write_bandwidth.attr && \
106 cxlr->coord[level].write_bandwidth == 0) \
107 return 0; \
108 \
109 return a->mode; \
110 }
111
112 ACCESS_VISIBLE(0);
113 ACCESS_VISIBLE(1);
114
115 static const struct attribute_group cxl_region_access0_coordinate_group = {
116 .name = "access0",
117 .attrs = access0_coordinate_attrs,
118 .is_visible = cxl_region_access0_coordinate_visible,
119 };
120
get_cxl_region_access0_group(void)121 static const struct attribute_group *get_cxl_region_access0_group(void)
122 {
123 return &cxl_region_access0_coordinate_group;
124 }
125
126 static const struct attribute_group cxl_region_access1_coordinate_group = {
127 .name = "access1",
128 .attrs = access1_coordinate_attrs,
129 .is_visible = cxl_region_access1_coordinate_visible,
130 };
131
get_cxl_region_access1_group(void)132 static const struct attribute_group *get_cxl_region_access1_group(void)
133 {
134 return &cxl_region_access1_coordinate_group;
135 }
136
uuid_show(struct device * dev,struct device_attribute * attr,char * buf)137 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
138 char *buf)
139 {
140 struct cxl_region *cxlr = to_cxl_region(dev);
141 struct cxl_region_params *p = &cxlr->params;
142 ssize_t rc;
143
144 rc = down_read_interruptible(&cxl_region_rwsem);
145 if (rc)
146 return rc;
147 if (cxlr->mode != CXL_DECODER_PMEM)
148 rc = sysfs_emit(buf, "\n");
149 else
150 rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
151 up_read(&cxl_region_rwsem);
152
153 return rc;
154 }
155
is_dup(struct device * match,void * data)156 static int is_dup(struct device *match, void *data)
157 {
158 struct cxl_region_params *p;
159 struct cxl_region *cxlr;
160 uuid_t *uuid = data;
161
162 if (!is_cxl_region(match))
163 return 0;
164
165 lockdep_assert_held(&cxl_region_rwsem);
166 cxlr = to_cxl_region(match);
167 p = &cxlr->params;
168
169 if (uuid_equal(&p->uuid, uuid)) {
170 dev_dbg(match, "already has uuid: %pUb\n", uuid);
171 return -EBUSY;
172 }
173
174 return 0;
175 }
176
uuid_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)177 static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
178 const char *buf, size_t len)
179 {
180 struct cxl_region *cxlr = to_cxl_region(dev);
181 struct cxl_region_params *p = &cxlr->params;
182 uuid_t temp;
183 ssize_t rc;
184
185 if (len != UUID_STRING_LEN + 1)
186 return -EINVAL;
187
188 rc = uuid_parse(buf, &temp);
189 if (rc)
190 return rc;
191
192 if (uuid_is_null(&temp))
193 return -EINVAL;
194
195 rc = down_write_killable(&cxl_region_rwsem);
196 if (rc)
197 return rc;
198
199 if (uuid_equal(&p->uuid, &temp))
200 goto out;
201
202 rc = -EBUSY;
203 if (p->state >= CXL_CONFIG_ACTIVE)
204 goto out;
205
206 rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
207 if (rc < 0)
208 goto out;
209
210 uuid_copy(&p->uuid, &temp);
211 out:
212 up_write(&cxl_region_rwsem);
213
214 if (rc)
215 return rc;
216 return len;
217 }
218 static DEVICE_ATTR_RW(uuid);
219
cxl_rr_load(struct cxl_port * port,struct cxl_region * cxlr)220 static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
221 struct cxl_region *cxlr)
222 {
223 return xa_load(&port->regions, (unsigned long)cxlr);
224 }
225
cxl_region_invalidate_memregion(struct cxl_region * cxlr)226 static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
227 {
228 if (!cpu_cache_has_invalidate_memregion()) {
229 if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
230 dev_info_once(
231 &cxlr->dev,
232 "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
233 return 0;
234 } else {
235 dev_err(&cxlr->dev,
236 "Failed to synchronize CPU cache state\n");
237 return -ENXIO;
238 }
239 }
240
241 cpu_cache_invalidate_memregion(IORES_DESC_CXL);
242 return 0;
243 }
244
cxl_region_decode_reset(struct cxl_region * cxlr,int count)245 static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
246 {
247 struct cxl_region_params *p = &cxlr->params;
248 int i, rc = 0;
249
250 /*
251 * Before region teardown attempt to flush, and if the flush
252 * fails cancel the region teardown for data consistency
253 * concerns
254 */
255 rc = cxl_region_invalidate_memregion(cxlr);
256 if (rc)
257 return rc;
258
259 for (i = count - 1; i >= 0; i--) {
260 struct cxl_endpoint_decoder *cxled = p->targets[i];
261 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
262 struct cxl_port *iter = cxled_to_port(cxled);
263 struct cxl_dev_state *cxlds = cxlmd->cxlds;
264 struct cxl_ep *ep;
265
266 if (cxlds->rcd)
267 goto endpoint_reset;
268
269 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
270 iter = to_cxl_port(iter->dev.parent);
271
272 for (ep = cxl_ep_load(iter, cxlmd); iter;
273 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
274 struct cxl_region_ref *cxl_rr;
275 struct cxl_decoder *cxld;
276
277 cxl_rr = cxl_rr_load(iter, cxlr);
278 cxld = cxl_rr->decoder;
279 if (cxld->reset)
280 rc = cxld->reset(cxld);
281 if (rc)
282 return rc;
283 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
284 }
285
286 endpoint_reset:
287 rc = cxled->cxld.reset(&cxled->cxld);
288 if (rc)
289 return rc;
290 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
291 }
292
293 /* all decoders associated with this region have been torn down */
294 clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
295
296 return 0;
297 }
298
commit_decoder(struct cxl_decoder * cxld)299 static int commit_decoder(struct cxl_decoder *cxld)
300 {
301 struct cxl_switch_decoder *cxlsd = NULL;
302
303 if (cxld->commit)
304 return cxld->commit(cxld);
305
306 if (is_switch_decoder(&cxld->dev))
307 cxlsd = to_cxl_switch_decoder(&cxld->dev);
308
309 if (dev_WARN_ONCE(&cxld->dev, !cxlsd || cxlsd->nr_targets > 1,
310 "->commit() is required\n"))
311 return -ENXIO;
312 return 0;
313 }
314
cxl_region_decode_commit(struct cxl_region * cxlr)315 static int cxl_region_decode_commit(struct cxl_region *cxlr)
316 {
317 struct cxl_region_params *p = &cxlr->params;
318 int i, rc = 0;
319
320 for (i = 0; i < p->nr_targets; i++) {
321 struct cxl_endpoint_decoder *cxled = p->targets[i];
322 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
323 struct cxl_region_ref *cxl_rr;
324 struct cxl_decoder *cxld;
325 struct cxl_port *iter;
326 struct cxl_ep *ep;
327
328 /* commit bottom up */
329 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
330 iter = to_cxl_port(iter->dev.parent)) {
331 cxl_rr = cxl_rr_load(iter, cxlr);
332 cxld = cxl_rr->decoder;
333 rc = commit_decoder(cxld);
334 if (rc)
335 break;
336 }
337
338 if (rc) {
339 /* programming @iter failed, teardown */
340 for (ep = cxl_ep_load(iter, cxlmd); ep && iter;
341 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
342 cxl_rr = cxl_rr_load(iter, cxlr);
343 cxld = cxl_rr->decoder;
344 if (cxld->reset)
345 cxld->reset(cxld);
346 }
347
348 cxled->cxld.reset(&cxled->cxld);
349 goto err;
350 }
351 }
352
353 return 0;
354
355 err:
356 /* undo the targets that were successfully committed */
357 cxl_region_decode_reset(cxlr, i);
358 return rc;
359 }
360
commit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)361 static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
362 const char *buf, size_t len)
363 {
364 struct cxl_region *cxlr = to_cxl_region(dev);
365 struct cxl_region_params *p = &cxlr->params;
366 bool commit;
367 ssize_t rc;
368
369 rc = kstrtobool(buf, &commit);
370 if (rc)
371 return rc;
372
373 rc = down_write_killable(&cxl_region_rwsem);
374 if (rc)
375 return rc;
376
377 /* Already in the requested state? */
378 if (commit && p->state >= CXL_CONFIG_COMMIT)
379 goto out;
380 if (!commit && p->state < CXL_CONFIG_COMMIT)
381 goto out;
382
383 /* Not ready to commit? */
384 if (commit && p->state < CXL_CONFIG_ACTIVE) {
385 rc = -ENXIO;
386 goto out;
387 }
388
389 /*
390 * Invalidate caches before region setup to drop any speculative
391 * consumption of this address space
392 */
393 rc = cxl_region_invalidate_memregion(cxlr);
394 if (rc)
395 goto out;
396
397 if (commit) {
398 rc = cxl_region_decode_commit(cxlr);
399 if (rc == 0)
400 p->state = CXL_CONFIG_COMMIT;
401 } else {
402 p->state = CXL_CONFIG_RESET_PENDING;
403 up_write(&cxl_region_rwsem);
404 device_release_driver(&cxlr->dev);
405 down_write(&cxl_region_rwsem);
406
407 /*
408 * The lock was dropped, so need to revalidate that the reset is
409 * still pending.
410 */
411 if (p->state == CXL_CONFIG_RESET_PENDING) {
412 rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
413 /*
414 * Revert to committed since there may still be active
415 * decoders associated with this region, or move forward
416 * to active to mark the reset successful
417 */
418 if (rc)
419 p->state = CXL_CONFIG_COMMIT;
420 else
421 p->state = CXL_CONFIG_ACTIVE;
422 }
423 }
424
425 out:
426 up_write(&cxl_region_rwsem);
427
428 if (rc)
429 return rc;
430 return len;
431 }
432
commit_show(struct device * dev,struct device_attribute * attr,char * buf)433 static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
434 char *buf)
435 {
436 struct cxl_region *cxlr = to_cxl_region(dev);
437 struct cxl_region_params *p = &cxlr->params;
438 ssize_t rc;
439
440 rc = down_read_interruptible(&cxl_region_rwsem);
441 if (rc)
442 return rc;
443 rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
444 up_read(&cxl_region_rwsem);
445
446 return rc;
447 }
448 static DEVICE_ATTR_RW(commit);
449
cxl_region_visible(struct kobject * kobj,struct attribute * a,int n)450 static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
451 int n)
452 {
453 struct device *dev = kobj_to_dev(kobj);
454 struct cxl_region *cxlr = to_cxl_region(dev);
455
456 /*
457 * Support tooling that expects to find a 'uuid' attribute for all
458 * regions regardless of mode.
459 */
460 if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
461 return 0444;
462 return a->mode;
463 }
464
interleave_ways_show(struct device * dev,struct device_attribute * attr,char * buf)465 static ssize_t interleave_ways_show(struct device *dev,
466 struct device_attribute *attr, char *buf)
467 {
468 struct cxl_region *cxlr = to_cxl_region(dev);
469 struct cxl_region_params *p = &cxlr->params;
470 ssize_t rc;
471
472 rc = down_read_interruptible(&cxl_region_rwsem);
473 if (rc)
474 return rc;
475 rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
476 up_read(&cxl_region_rwsem);
477
478 return rc;
479 }
480
481 static const struct attribute_group *get_cxl_region_target_group(void);
482
interleave_ways_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)483 static ssize_t interleave_ways_store(struct device *dev,
484 struct device_attribute *attr,
485 const char *buf, size_t len)
486 {
487 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
488 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
489 struct cxl_region *cxlr = to_cxl_region(dev);
490 struct cxl_region_params *p = &cxlr->params;
491 unsigned int val, save;
492 int rc;
493 u8 iw;
494
495 rc = kstrtouint(buf, 0, &val);
496 if (rc)
497 return rc;
498
499 rc = ways_to_eiw(val, &iw);
500 if (rc)
501 return rc;
502
503 /*
504 * Even for x3, x6, and x12 interleaves the region interleave must be a
505 * power of 2 multiple of the host bridge interleave.
506 */
507 if (!is_power_of_2(val / cxld->interleave_ways) ||
508 (val % cxld->interleave_ways)) {
509 dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val);
510 return -EINVAL;
511 }
512
513 rc = down_write_killable(&cxl_region_rwsem);
514 if (rc)
515 return rc;
516 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
517 rc = -EBUSY;
518 goto out;
519 }
520
521 save = p->interleave_ways;
522 p->interleave_ways = val;
523 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
524 if (rc)
525 p->interleave_ways = save;
526 out:
527 up_write(&cxl_region_rwsem);
528 if (rc)
529 return rc;
530 return len;
531 }
532 static DEVICE_ATTR_RW(interleave_ways);
533
interleave_granularity_show(struct device * dev,struct device_attribute * attr,char * buf)534 static ssize_t interleave_granularity_show(struct device *dev,
535 struct device_attribute *attr,
536 char *buf)
537 {
538 struct cxl_region *cxlr = to_cxl_region(dev);
539 struct cxl_region_params *p = &cxlr->params;
540 ssize_t rc;
541
542 rc = down_read_interruptible(&cxl_region_rwsem);
543 if (rc)
544 return rc;
545 rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
546 up_read(&cxl_region_rwsem);
547
548 return rc;
549 }
550
interleave_granularity_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)551 static ssize_t interleave_granularity_store(struct device *dev,
552 struct device_attribute *attr,
553 const char *buf, size_t len)
554 {
555 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
556 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
557 struct cxl_region *cxlr = to_cxl_region(dev);
558 struct cxl_region_params *p = &cxlr->params;
559 int rc, val;
560 u16 ig;
561
562 rc = kstrtoint(buf, 0, &val);
563 if (rc)
564 return rc;
565
566 rc = granularity_to_eig(val, &ig);
567 if (rc)
568 return rc;
569
570 /*
571 * When the host-bridge is interleaved, disallow region granularity !=
572 * root granularity. Regions with a granularity less than the root
573 * interleave result in needing multiple endpoints to support a single
574 * slot in the interleave (possible to support in the future). Regions
575 * with a granularity greater than the root interleave result in invalid
576 * DPA translations (invalid to support).
577 */
578 if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
579 return -EINVAL;
580
581 rc = down_write_killable(&cxl_region_rwsem);
582 if (rc)
583 return rc;
584 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
585 rc = -EBUSY;
586 goto out;
587 }
588
589 p->interleave_granularity = val;
590 out:
591 up_write(&cxl_region_rwsem);
592 if (rc)
593 return rc;
594 return len;
595 }
596 static DEVICE_ATTR_RW(interleave_granularity);
597
resource_show(struct device * dev,struct device_attribute * attr,char * buf)598 static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
599 char *buf)
600 {
601 struct cxl_region *cxlr = to_cxl_region(dev);
602 struct cxl_region_params *p = &cxlr->params;
603 u64 resource = -1ULL;
604 ssize_t rc;
605
606 rc = down_read_interruptible(&cxl_region_rwsem);
607 if (rc)
608 return rc;
609 if (p->res)
610 resource = p->res->start;
611 rc = sysfs_emit(buf, "%#llx\n", resource);
612 up_read(&cxl_region_rwsem);
613
614 return rc;
615 }
616 static DEVICE_ATTR_RO(resource);
617
mode_show(struct device * dev,struct device_attribute * attr,char * buf)618 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
619 char *buf)
620 {
621 struct cxl_region *cxlr = to_cxl_region(dev);
622
623 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode));
624 }
625 static DEVICE_ATTR_RO(mode);
626
alloc_hpa(struct cxl_region * cxlr,resource_size_t size)627 static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
628 {
629 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
630 struct cxl_region_params *p = &cxlr->params;
631 struct resource *res;
632 u64 remainder = 0;
633
634 lockdep_assert_held_write(&cxl_region_rwsem);
635
636 /* Nothing to do... */
637 if (p->res && resource_size(p->res) == size)
638 return 0;
639
640 /* To change size the old size must be freed first */
641 if (p->res)
642 return -EBUSY;
643
644 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
645 return -EBUSY;
646
647 /* ways, granularity and uuid (if PMEM) need to be set before HPA */
648 if (!p->interleave_ways || !p->interleave_granularity ||
649 (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
650 return -ENXIO;
651
652 div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
653 if (remainder)
654 return -EINVAL;
655
656 res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
657 dev_name(&cxlr->dev));
658 if (IS_ERR(res)) {
659 dev_dbg(&cxlr->dev,
660 "HPA allocation error (%ld) for size:%pap in %s %pr\n",
661 PTR_ERR(res), &size, cxlrd->res->name, cxlrd->res);
662 return PTR_ERR(res);
663 }
664
665 p->res = res;
666 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
667
668 return 0;
669 }
670
cxl_region_iomem_release(struct cxl_region * cxlr)671 static void cxl_region_iomem_release(struct cxl_region *cxlr)
672 {
673 struct cxl_region_params *p = &cxlr->params;
674
675 if (device_is_registered(&cxlr->dev))
676 lockdep_assert_held_write(&cxl_region_rwsem);
677 if (p->res) {
678 /*
679 * Autodiscovered regions may not have been able to insert their
680 * resource.
681 */
682 if (p->res->parent)
683 remove_resource(p->res);
684 kfree(p->res);
685 p->res = NULL;
686 }
687 }
688
free_hpa(struct cxl_region * cxlr)689 static int free_hpa(struct cxl_region *cxlr)
690 {
691 struct cxl_region_params *p = &cxlr->params;
692
693 lockdep_assert_held_write(&cxl_region_rwsem);
694
695 if (!p->res)
696 return 0;
697
698 if (p->state >= CXL_CONFIG_ACTIVE)
699 return -EBUSY;
700
701 cxl_region_iomem_release(cxlr);
702 p->state = CXL_CONFIG_IDLE;
703 return 0;
704 }
705
size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)706 static ssize_t size_store(struct device *dev, struct device_attribute *attr,
707 const char *buf, size_t len)
708 {
709 struct cxl_region *cxlr = to_cxl_region(dev);
710 u64 val;
711 int rc;
712
713 rc = kstrtou64(buf, 0, &val);
714 if (rc)
715 return rc;
716
717 rc = down_write_killable(&cxl_region_rwsem);
718 if (rc)
719 return rc;
720
721 if (val)
722 rc = alloc_hpa(cxlr, val);
723 else
724 rc = free_hpa(cxlr);
725 up_write(&cxl_region_rwsem);
726
727 if (rc)
728 return rc;
729
730 return len;
731 }
732
size_show(struct device * dev,struct device_attribute * attr,char * buf)733 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
734 char *buf)
735 {
736 struct cxl_region *cxlr = to_cxl_region(dev);
737 struct cxl_region_params *p = &cxlr->params;
738 u64 size = 0;
739 ssize_t rc;
740
741 rc = down_read_interruptible(&cxl_region_rwsem);
742 if (rc)
743 return rc;
744 if (p->res)
745 size = resource_size(p->res);
746 rc = sysfs_emit(buf, "%#llx\n", size);
747 up_read(&cxl_region_rwsem);
748
749 return rc;
750 }
751 static DEVICE_ATTR_RW(size);
752
753 static struct attribute *cxl_region_attrs[] = {
754 &dev_attr_uuid.attr,
755 &dev_attr_commit.attr,
756 &dev_attr_interleave_ways.attr,
757 &dev_attr_interleave_granularity.attr,
758 &dev_attr_resource.attr,
759 &dev_attr_size.attr,
760 &dev_attr_mode.attr,
761 NULL,
762 };
763
764 static const struct attribute_group cxl_region_group = {
765 .attrs = cxl_region_attrs,
766 .is_visible = cxl_region_visible,
767 };
768
show_targetN(struct cxl_region * cxlr,char * buf,int pos)769 static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
770 {
771 struct cxl_region_params *p = &cxlr->params;
772 struct cxl_endpoint_decoder *cxled;
773 int rc;
774
775 rc = down_read_interruptible(&cxl_region_rwsem);
776 if (rc)
777 return rc;
778
779 if (pos >= p->interleave_ways) {
780 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
781 p->interleave_ways);
782 rc = -ENXIO;
783 goto out;
784 }
785
786 cxled = p->targets[pos];
787 if (!cxled)
788 rc = sysfs_emit(buf, "\n");
789 else
790 rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
791 out:
792 up_read(&cxl_region_rwsem);
793
794 return rc;
795 }
796
match_free_decoder(struct device * dev,void * data)797 static int match_free_decoder(struct device *dev, void *data)
798 {
799 struct cxl_decoder *cxld;
800 int *id = data;
801
802 if (!is_switch_decoder(dev))
803 return 0;
804
805 cxld = to_cxl_decoder(dev);
806
807 /* enforce ordered allocation */
808 if (cxld->id != *id)
809 return 0;
810
811 if (!cxld->region)
812 return 1;
813
814 (*id)++;
815
816 return 0;
817 }
818
match_auto_decoder(struct device * dev,void * data)819 static int match_auto_decoder(struct device *dev, void *data)
820 {
821 struct cxl_region_params *p = data;
822 struct cxl_decoder *cxld;
823 struct range *r;
824
825 if (!is_switch_decoder(dev))
826 return 0;
827
828 cxld = to_cxl_decoder(dev);
829 r = &cxld->hpa_range;
830
831 if (p->res && p->res->start == r->start && p->res->end == r->end)
832 return 1;
833
834 return 0;
835 }
836
837 static struct cxl_decoder *
cxl_region_find_decoder(struct cxl_port * port,struct cxl_endpoint_decoder * cxled,struct cxl_region * cxlr)838 cxl_region_find_decoder(struct cxl_port *port,
839 struct cxl_endpoint_decoder *cxled,
840 struct cxl_region *cxlr)
841 {
842 struct device *dev;
843 int id = 0;
844
845 if (port == cxled_to_port(cxled))
846 return &cxled->cxld;
847
848 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
849 dev = device_find_child(&port->dev, &cxlr->params,
850 match_auto_decoder);
851 else
852 dev = device_find_child(&port->dev, &id, match_free_decoder);
853 if (!dev)
854 return NULL;
855 /*
856 * This decoder is pinned registered as long as the endpoint decoder is
857 * registered, and endpoint decoder unregistration holds the
858 * cxl_region_rwsem over unregister events, so no need to hold on to
859 * this extra reference.
860 */
861 put_device(dev);
862 return to_cxl_decoder(dev);
863 }
864
auto_order_ok(struct cxl_port * port,struct cxl_region * cxlr_iter,struct cxl_decoder * cxld)865 static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
866 struct cxl_decoder *cxld)
867 {
868 struct cxl_region_ref *rr = cxl_rr_load(port, cxlr_iter);
869 struct cxl_decoder *cxld_iter = rr->decoder;
870
871 /*
872 * Allow the out of order assembly of auto-discovered regions.
873 * Per CXL Spec 3.1 8.2.4.20.12 software must commit decoders
874 * in HPA order. Confirm that the decoder with the lesser HPA
875 * starting address has the lesser id.
876 */
877 dev_dbg(&cxld->dev, "check for HPA violation %s:%d < %s:%d\n",
878 dev_name(&cxld->dev), cxld->id,
879 dev_name(&cxld_iter->dev), cxld_iter->id);
880
881 if (cxld_iter->id > cxld->id)
882 return true;
883
884 return false;
885 }
886
887 static struct cxl_region_ref *
alloc_region_ref(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled)888 alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
889 struct cxl_endpoint_decoder *cxled)
890 {
891 struct cxl_region_params *p = &cxlr->params;
892 struct cxl_region_ref *cxl_rr, *iter;
893 unsigned long index;
894 int rc;
895
896 xa_for_each(&port->regions, index, iter) {
897 struct cxl_region_params *ip = &iter->region->params;
898
899 if (!ip->res || ip->res->start < p->res->start)
900 continue;
901
902 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
903 struct cxl_decoder *cxld;
904
905 cxld = cxl_region_find_decoder(port, cxled, cxlr);
906 if (auto_order_ok(port, iter->region, cxld))
907 continue;
908 }
909 dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n",
910 dev_name(&port->dev),
911 dev_name(&iter->region->dev), ip->res, p->res);
912
913 return ERR_PTR(-EBUSY);
914 }
915
916 cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
917 if (!cxl_rr)
918 return ERR_PTR(-ENOMEM);
919 cxl_rr->port = port;
920 cxl_rr->region = cxlr;
921 cxl_rr->nr_targets = 1;
922 xa_init(&cxl_rr->endpoints);
923
924 rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL);
925 if (rc) {
926 dev_dbg(&cxlr->dev,
927 "%s: failed to track region reference: %d\n",
928 dev_name(&port->dev), rc);
929 kfree(cxl_rr);
930 return ERR_PTR(rc);
931 }
932
933 return cxl_rr;
934 }
935
cxl_rr_free_decoder(struct cxl_region_ref * cxl_rr)936 static void cxl_rr_free_decoder(struct cxl_region_ref *cxl_rr)
937 {
938 struct cxl_region *cxlr = cxl_rr->region;
939 struct cxl_decoder *cxld = cxl_rr->decoder;
940
941 if (!cxld)
942 return;
943
944 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n");
945 if (cxld->region == cxlr) {
946 cxld->region = NULL;
947 put_device(&cxlr->dev);
948 }
949 }
950
free_region_ref(struct cxl_region_ref * cxl_rr)951 static void free_region_ref(struct cxl_region_ref *cxl_rr)
952 {
953 struct cxl_port *port = cxl_rr->port;
954 struct cxl_region *cxlr = cxl_rr->region;
955
956 cxl_rr_free_decoder(cxl_rr);
957 xa_erase(&port->regions, (unsigned long)cxlr);
958 xa_destroy(&cxl_rr->endpoints);
959 kfree(cxl_rr);
960 }
961
cxl_rr_ep_add(struct cxl_region_ref * cxl_rr,struct cxl_endpoint_decoder * cxled)962 static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
963 struct cxl_endpoint_decoder *cxled)
964 {
965 int rc;
966 struct cxl_port *port = cxl_rr->port;
967 struct cxl_region *cxlr = cxl_rr->region;
968 struct cxl_decoder *cxld = cxl_rr->decoder;
969 struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled));
970
971 if (ep) {
972 rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep,
973 GFP_KERNEL);
974 if (rc)
975 return rc;
976 }
977 cxl_rr->nr_eps++;
978
979 if (!cxld->region) {
980 cxld->region = cxlr;
981 get_device(&cxlr->dev);
982 }
983
984 return 0;
985 }
986
cxl_rr_alloc_decoder(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,struct cxl_region_ref * cxl_rr)987 static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
988 struct cxl_endpoint_decoder *cxled,
989 struct cxl_region_ref *cxl_rr)
990 {
991 struct cxl_decoder *cxld;
992
993 cxld = cxl_region_find_decoder(port, cxled, cxlr);
994 if (!cxld) {
995 dev_dbg(&cxlr->dev, "%s: no decoder available\n",
996 dev_name(&port->dev));
997 return -EBUSY;
998 }
999
1000 if (cxld->region) {
1001 dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
1002 dev_name(&port->dev), dev_name(&cxld->dev),
1003 dev_name(&cxld->region->dev));
1004 return -EBUSY;
1005 }
1006
1007 /*
1008 * Endpoints should already match the region type, but backstop that
1009 * assumption with an assertion. Switch-decoders change mapping-type
1010 * based on what is mapped when they are assigned to a region.
1011 */
1012 dev_WARN_ONCE(&cxlr->dev,
1013 port == cxled_to_port(cxled) &&
1014 cxld->target_type != cxlr->type,
1015 "%s:%s mismatch decoder type %d -> %d\n",
1016 dev_name(&cxled_to_memdev(cxled)->dev),
1017 dev_name(&cxld->dev), cxld->target_type, cxlr->type);
1018 cxld->target_type = cxlr->type;
1019 cxl_rr->decoder = cxld;
1020 return 0;
1021 }
1022
1023 /**
1024 * cxl_port_attach_region() - track a region's interest in a port by endpoint
1025 * @port: port to add a new region reference 'struct cxl_region_ref'
1026 * @cxlr: region to attach to @port
1027 * @cxled: endpoint decoder used to create or further pin a region reference
1028 * @pos: interleave position of @cxled in @cxlr
1029 *
1030 * The attach event is an opportunity to validate CXL decode setup
1031 * constraints and record metadata needed for programming HDM decoders,
1032 * in particular decoder target lists.
1033 *
1034 * The steps are:
1035 *
1036 * - validate that there are no other regions with a higher HPA already
1037 * associated with @port
1038 * - establish a region reference if one is not already present
1039 *
1040 * - additionally allocate a decoder instance that will host @cxlr on
1041 * @port
1042 *
1043 * - pin the region reference by the endpoint
1044 * - account for how many entries in @port's target list are needed to
1045 * cover all of the added endpoints.
1046 */
cxl_port_attach_region(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)1047 static int cxl_port_attach_region(struct cxl_port *port,
1048 struct cxl_region *cxlr,
1049 struct cxl_endpoint_decoder *cxled, int pos)
1050 {
1051 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1052 struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
1053 struct cxl_region_ref *cxl_rr;
1054 bool nr_targets_inc = false;
1055 struct cxl_decoder *cxld;
1056 unsigned long index;
1057 int rc = -EBUSY;
1058
1059 lockdep_assert_held_write(&cxl_region_rwsem);
1060
1061 cxl_rr = cxl_rr_load(port, cxlr);
1062 if (cxl_rr) {
1063 struct cxl_ep *ep_iter;
1064 int found = 0;
1065
1066 /*
1067 * Walk the existing endpoints that have been attached to
1068 * @cxlr at @port and see if they share the same 'next' port
1069 * in the downstream direction. I.e. endpoints that share common
1070 * upstream switch.
1071 */
1072 xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
1073 if (ep_iter == ep)
1074 continue;
1075 if (ep_iter->next == ep->next) {
1076 found++;
1077 break;
1078 }
1079 }
1080
1081 /*
1082 * New target port, or @port is an endpoint port that always
1083 * accounts its own local decode as a target.
1084 */
1085 if (!found || !ep->next) {
1086 cxl_rr->nr_targets++;
1087 nr_targets_inc = true;
1088 }
1089 } else {
1090 cxl_rr = alloc_region_ref(port, cxlr, cxled);
1091 if (IS_ERR(cxl_rr)) {
1092 dev_dbg(&cxlr->dev,
1093 "%s: failed to allocate region reference\n",
1094 dev_name(&port->dev));
1095 return PTR_ERR(cxl_rr);
1096 }
1097 nr_targets_inc = true;
1098
1099 rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr);
1100 if (rc)
1101 goto out_erase;
1102 }
1103 cxld = cxl_rr->decoder;
1104
1105 /*
1106 * the number of targets should not exceed the target_count
1107 * of the decoder
1108 */
1109 if (is_switch_decoder(&cxld->dev)) {
1110 struct cxl_switch_decoder *cxlsd;
1111
1112 cxlsd = to_cxl_switch_decoder(&cxld->dev);
1113 if (cxl_rr->nr_targets > cxlsd->nr_targets) {
1114 dev_dbg(&cxlr->dev,
1115 "%s:%s %s add: %s:%s @ %d overflows targets: %d\n",
1116 dev_name(port->uport_dev), dev_name(&port->dev),
1117 dev_name(&cxld->dev), dev_name(&cxlmd->dev),
1118 dev_name(&cxled->cxld.dev), pos,
1119 cxlsd->nr_targets);
1120 rc = -ENXIO;
1121 goto out_erase;
1122 }
1123 }
1124
1125 rc = cxl_rr_ep_add(cxl_rr, cxled);
1126 if (rc) {
1127 dev_dbg(&cxlr->dev,
1128 "%s: failed to track endpoint %s:%s reference\n",
1129 dev_name(&port->dev), dev_name(&cxlmd->dev),
1130 dev_name(&cxld->dev));
1131 goto out_erase;
1132 }
1133
1134 dev_dbg(&cxlr->dev,
1135 "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n",
1136 dev_name(port->uport_dev), dev_name(&port->dev),
1137 dev_name(&cxld->dev), dev_name(&cxlmd->dev),
1138 dev_name(&cxled->cxld.dev), pos,
1139 ep ? ep->next ? dev_name(ep->next->uport_dev) :
1140 dev_name(&cxlmd->dev) :
1141 "none",
1142 cxl_rr->nr_eps, cxl_rr->nr_targets);
1143
1144 return 0;
1145 out_erase:
1146 if (nr_targets_inc)
1147 cxl_rr->nr_targets--;
1148 if (cxl_rr->nr_eps == 0)
1149 free_region_ref(cxl_rr);
1150 return rc;
1151 }
1152
cxl_port_detach_region(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled)1153 static void cxl_port_detach_region(struct cxl_port *port,
1154 struct cxl_region *cxlr,
1155 struct cxl_endpoint_decoder *cxled)
1156 {
1157 struct cxl_region_ref *cxl_rr;
1158 struct cxl_ep *ep = NULL;
1159
1160 lockdep_assert_held_write(&cxl_region_rwsem);
1161
1162 cxl_rr = cxl_rr_load(port, cxlr);
1163 if (!cxl_rr)
1164 return;
1165
1166 /*
1167 * Endpoint ports do not carry cxl_ep references, and they
1168 * never target more than one endpoint by definition
1169 */
1170 if (cxl_rr->decoder == &cxled->cxld)
1171 cxl_rr->nr_eps--;
1172 else
1173 ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled);
1174 if (ep) {
1175 struct cxl_ep *ep_iter;
1176 unsigned long index;
1177 int found = 0;
1178
1179 cxl_rr->nr_eps--;
1180 xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
1181 if (ep_iter->next == ep->next) {
1182 found++;
1183 break;
1184 }
1185 }
1186 if (!found)
1187 cxl_rr->nr_targets--;
1188 }
1189
1190 if (cxl_rr->nr_eps == 0)
1191 free_region_ref(cxl_rr);
1192 }
1193
check_last_peer(struct cxl_endpoint_decoder * cxled,struct cxl_ep * ep,struct cxl_region_ref * cxl_rr,int distance)1194 static int check_last_peer(struct cxl_endpoint_decoder *cxled,
1195 struct cxl_ep *ep, struct cxl_region_ref *cxl_rr,
1196 int distance)
1197 {
1198 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1199 struct cxl_region *cxlr = cxl_rr->region;
1200 struct cxl_region_params *p = &cxlr->params;
1201 struct cxl_endpoint_decoder *cxled_peer;
1202 struct cxl_port *port = cxl_rr->port;
1203 struct cxl_memdev *cxlmd_peer;
1204 struct cxl_ep *ep_peer;
1205 int pos = cxled->pos;
1206
1207 /*
1208 * If this position wants to share a dport with the last endpoint mapped
1209 * then that endpoint, at index 'position - distance', must also be
1210 * mapped by this dport.
1211 */
1212 if (pos < distance) {
1213 dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n",
1214 dev_name(port->uport_dev), dev_name(&port->dev),
1215 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1216 return -ENXIO;
1217 }
1218 cxled_peer = p->targets[pos - distance];
1219 cxlmd_peer = cxled_to_memdev(cxled_peer);
1220 ep_peer = cxl_ep_load(port, cxlmd_peer);
1221 if (ep->dport != ep_peer->dport) {
1222 dev_dbg(&cxlr->dev,
1223 "%s:%s: %s:%s pos %d mismatched peer %s:%s\n",
1224 dev_name(port->uport_dev), dev_name(&port->dev),
1225 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos,
1226 dev_name(&cxlmd_peer->dev),
1227 dev_name(&cxled_peer->cxld.dev));
1228 return -ENXIO;
1229 }
1230
1231 return 0;
1232 }
1233
check_interleave_cap(struct cxl_decoder * cxld,int iw,int ig)1234 static int check_interleave_cap(struct cxl_decoder *cxld, int iw, int ig)
1235 {
1236 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
1237 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
1238 unsigned int interleave_mask;
1239 u8 eiw;
1240 u16 eig;
1241 int high_pos, low_pos;
1242
1243 if (!test_bit(iw, &cxlhdm->iw_cap_mask))
1244 return -ENXIO;
1245 /*
1246 * Per CXL specification r3.1(8.2.4.20.13 Decoder Protection),
1247 * if eiw < 8:
1248 * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + 8 + eiw]
1249 * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0]
1250 *
1251 * when the eiw is 0, all the bits of HPAOFFSET[51: 0] are used, the
1252 * interleave bits are none.
1253 *
1254 * if eiw >= 8:
1255 * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + eiw] / 3
1256 * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0]
1257 *
1258 * when the eiw is 8, all the bits of HPAOFFSET[51: 0] are used, the
1259 * interleave bits are none.
1260 */
1261 ways_to_eiw(iw, &eiw);
1262 if (eiw == 0 || eiw == 8)
1263 return 0;
1264
1265 granularity_to_eig(ig, &eig);
1266 if (eiw > 8)
1267 high_pos = eiw + eig - 1;
1268 else
1269 high_pos = eiw + eig + 7;
1270 low_pos = eig + 8;
1271 interleave_mask = GENMASK(high_pos, low_pos);
1272 if (interleave_mask & ~cxlhdm->interleave_mask)
1273 return -ENXIO;
1274
1275 return 0;
1276 }
1277
cxl_port_setup_targets(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled)1278 static int cxl_port_setup_targets(struct cxl_port *port,
1279 struct cxl_region *cxlr,
1280 struct cxl_endpoint_decoder *cxled)
1281 {
1282 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
1283 int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos;
1284 struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
1285 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
1286 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1287 struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
1288 struct cxl_region_params *p = &cxlr->params;
1289 struct cxl_decoder *cxld = cxl_rr->decoder;
1290 struct cxl_switch_decoder *cxlsd;
1291 u16 eig, peig;
1292 u8 eiw, peiw;
1293
1294 /*
1295 * While root level decoders support x3, x6, x12, switch level
1296 * decoders only support powers of 2 up to x16.
1297 */
1298 if (!is_power_of_2(cxl_rr->nr_targets)) {
1299 dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n",
1300 dev_name(port->uport_dev), dev_name(&port->dev),
1301 cxl_rr->nr_targets);
1302 return -EINVAL;
1303 }
1304
1305 cxlsd = to_cxl_switch_decoder(&cxld->dev);
1306 if (cxl_rr->nr_targets_set) {
1307 int i, distance;
1308
1309 /*
1310 * Passthrough decoders impose no distance requirements between
1311 * peers
1312 */
1313 if (cxl_rr->nr_targets == 1)
1314 distance = 0;
1315 else
1316 distance = p->nr_targets / cxl_rr->nr_targets;
1317 for (i = 0; i < cxl_rr->nr_targets_set; i++)
1318 if (ep->dport == cxlsd->target[i]) {
1319 rc = check_last_peer(cxled, ep, cxl_rr,
1320 distance);
1321 if (rc)
1322 return rc;
1323 goto out_target_set;
1324 }
1325 goto add_target;
1326 }
1327
1328 if (is_cxl_root(parent_port)) {
1329 /*
1330 * Root decoder IG is always set to value in CFMWS which
1331 * may be different than this region's IG. We can use the
1332 * region's IG here since interleave_granularity_store()
1333 * does not allow interleaved host-bridges with
1334 * root IG != region IG.
1335 */
1336 parent_ig = p->interleave_granularity;
1337 parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
1338 /*
1339 * For purposes of address bit routing, use power-of-2 math for
1340 * switch ports.
1341 */
1342 if (!is_power_of_2(parent_iw))
1343 parent_iw /= 3;
1344 } else {
1345 struct cxl_region_ref *parent_rr;
1346 struct cxl_decoder *parent_cxld;
1347
1348 parent_rr = cxl_rr_load(parent_port, cxlr);
1349 parent_cxld = parent_rr->decoder;
1350 parent_ig = parent_cxld->interleave_granularity;
1351 parent_iw = parent_cxld->interleave_ways;
1352 }
1353
1354 rc = granularity_to_eig(parent_ig, &peig);
1355 if (rc) {
1356 dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n",
1357 dev_name(parent_port->uport_dev),
1358 dev_name(&parent_port->dev), parent_ig);
1359 return rc;
1360 }
1361
1362 rc = ways_to_eiw(parent_iw, &peiw);
1363 if (rc) {
1364 dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n",
1365 dev_name(parent_port->uport_dev),
1366 dev_name(&parent_port->dev), parent_iw);
1367 return rc;
1368 }
1369
1370 iw = cxl_rr->nr_targets;
1371 rc = ways_to_eiw(iw, &eiw);
1372 if (rc) {
1373 dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n",
1374 dev_name(port->uport_dev), dev_name(&port->dev), iw);
1375 return rc;
1376 }
1377
1378 /*
1379 * Interleave granularity is a multiple of @parent_port granularity.
1380 * Multiplier is the parent port interleave ways.
1381 */
1382 rc = granularity_to_eig(parent_ig * parent_iw, &eig);
1383 if (rc) {
1384 dev_dbg(&cxlr->dev,
1385 "%s: invalid granularity calculation (%d * %d)\n",
1386 dev_name(&parent_port->dev), parent_ig, parent_iw);
1387 return rc;
1388 }
1389
1390 rc = eig_to_granularity(eig, &ig);
1391 if (rc) {
1392 dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n",
1393 dev_name(port->uport_dev), dev_name(&port->dev),
1394 256 << eig);
1395 return rc;
1396 }
1397
1398 if (iw > 8 || iw > cxlsd->nr_targets) {
1399 dev_dbg(&cxlr->dev,
1400 "%s:%s:%s: ways: %d overflows targets: %d\n",
1401 dev_name(port->uport_dev), dev_name(&port->dev),
1402 dev_name(&cxld->dev), iw, cxlsd->nr_targets);
1403 return -ENXIO;
1404 }
1405
1406 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1407 if (cxld->interleave_ways != iw ||
1408 cxld->interleave_granularity != ig ||
1409 cxld->hpa_range.start != p->res->start ||
1410 cxld->hpa_range.end != p->res->end ||
1411 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
1412 dev_err(&cxlr->dev,
1413 "%s:%s %s expected iw: %d ig: %d %pr\n",
1414 dev_name(port->uport_dev), dev_name(&port->dev),
1415 __func__, iw, ig, p->res);
1416 dev_err(&cxlr->dev,
1417 "%s:%s %s got iw: %d ig: %d state: %s %#llx:%#llx\n",
1418 dev_name(port->uport_dev), dev_name(&port->dev),
1419 __func__, cxld->interleave_ways,
1420 cxld->interleave_granularity,
1421 (cxld->flags & CXL_DECODER_F_ENABLE) ?
1422 "enabled" :
1423 "disabled",
1424 cxld->hpa_range.start, cxld->hpa_range.end);
1425 return -ENXIO;
1426 }
1427 } else {
1428 rc = check_interleave_cap(cxld, iw, ig);
1429 if (rc) {
1430 dev_dbg(&cxlr->dev,
1431 "%s:%s iw: %d ig: %d is not supported\n",
1432 dev_name(port->uport_dev),
1433 dev_name(&port->dev), iw, ig);
1434 return rc;
1435 }
1436
1437 cxld->interleave_ways = iw;
1438 cxld->interleave_granularity = ig;
1439 cxld->hpa_range = (struct range) {
1440 .start = p->res->start,
1441 .end = p->res->end,
1442 };
1443 }
1444 dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport_dev),
1445 dev_name(&port->dev), iw, ig);
1446 add_target:
1447 if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) {
1448 dev_dbg(&cxlr->dev,
1449 "%s:%s: targets full trying to add %s:%s at %d\n",
1450 dev_name(port->uport_dev), dev_name(&port->dev),
1451 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1452 return -ENXIO;
1453 }
1454 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1455 if (cxlsd->target[cxl_rr->nr_targets_set] != ep->dport) {
1456 dev_dbg(&cxlr->dev, "%s:%s: %s expected %s at %d\n",
1457 dev_name(port->uport_dev), dev_name(&port->dev),
1458 dev_name(&cxlsd->cxld.dev),
1459 dev_name(ep->dport->dport_dev),
1460 cxl_rr->nr_targets_set);
1461 return -ENXIO;
1462 }
1463 } else
1464 cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
1465 inc = 1;
1466 out_target_set:
1467 cxl_rr->nr_targets_set += inc;
1468 dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
1469 dev_name(port->uport_dev), dev_name(&port->dev),
1470 cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport_dev),
1471 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1472
1473 return 0;
1474 }
1475
cxl_port_reset_targets(struct cxl_port * port,struct cxl_region * cxlr)1476 static void cxl_port_reset_targets(struct cxl_port *port,
1477 struct cxl_region *cxlr)
1478 {
1479 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
1480 struct cxl_decoder *cxld;
1481
1482 /*
1483 * After the last endpoint has been detached the entire cxl_rr may now
1484 * be gone.
1485 */
1486 if (!cxl_rr)
1487 return;
1488 cxl_rr->nr_targets_set = 0;
1489
1490 cxld = cxl_rr->decoder;
1491 cxld->hpa_range = (struct range) {
1492 .start = 0,
1493 .end = -1,
1494 };
1495 }
1496
cxl_region_teardown_targets(struct cxl_region * cxlr)1497 static void cxl_region_teardown_targets(struct cxl_region *cxlr)
1498 {
1499 struct cxl_region_params *p = &cxlr->params;
1500 struct cxl_endpoint_decoder *cxled;
1501 struct cxl_dev_state *cxlds;
1502 struct cxl_memdev *cxlmd;
1503 struct cxl_port *iter;
1504 struct cxl_ep *ep;
1505 int i;
1506
1507 /*
1508 * In the auto-discovery case skip automatic teardown since the
1509 * address space is already active
1510 */
1511 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
1512 return;
1513
1514 for (i = 0; i < p->nr_targets; i++) {
1515 cxled = p->targets[i];
1516 cxlmd = cxled_to_memdev(cxled);
1517 cxlds = cxlmd->cxlds;
1518
1519 if (cxlds->rcd)
1520 continue;
1521
1522 iter = cxled_to_port(cxled);
1523 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
1524 iter = to_cxl_port(iter->dev.parent);
1525
1526 for (ep = cxl_ep_load(iter, cxlmd); iter;
1527 iter = ep->next, ep = cxl_ep_load(iter, cxlmd))
1528 cxl_port_reset_targets(iter, cxlr);
1529 }
1530 }
1531
cxl_region_setup_targets(struct cxl_region * cxlr)1532 static int cxl_region_setup_targets(struct cxl_region *cxlr)
1533 {
1534 struct cxl_region_params *p = &cxlr->params;
1535 struct cxl_endpoint_decoder *cxled;
1536 struct cxl_dev_state *cxlds;
1537 int i, rc, rch = 0, vh = 0;
1538 struct cxl_memdev *cxlmd;
1539 struct cxl_port *iter;
1540 struct cxl_ep *ep;
1541
1542 for (i = 0; i < p->nr_targets; i++) {
1543 cxled = p->targets[i];
1544 cxlmd = cxled_to_memdev(cxled);
1545 cxlds = cxlmd->cxlds;
1546
1547 /* validate that all targets agree on topology */
1548 if (!cxlds->rcd) {
1549 vh++;
1550 } else {
1551 rch++;
1552 continue;
1553 }
1554
1555 iter = cxled_to_port(cxled);
1556 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
1557 iter = to_cxl_port(iter->dev.parent);
1558
1559 /*
1560 * Descend the topology tree programming / validating
1561 * targets while looking for conflicts.
1562 */
1563 for (ep = cxl_ep_load(iter, cxlmd); iter;
1564 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
1565 rc = cxl_port_setup_targets(iter, cxlr, cxled);
1566 if (rc) {
1567 cxl_region_teardown_targets(cxlr);
1568 return rc;
1569 }
1570 }
1571 }
1572
1573 if (rch && vh) {
1574 dev_err(&cxlr->dev, "mismatched CXL topologies detected\n");
1575 cxl_region_teardown_targets(cxlr);
1576 return -ENXIO;
1577 }
1578
1579 return 0;
1580 }
1581
cxl_region_validate_position(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)1582 static int cxl_region_validate_position(struct cxl_region *cxlr,
1583 struct cxl_endpoint_decoder *cxled,
1584 int pos)
1585 {
1586 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1587 struct cxl_region_params *p = &cxlr->params;
1588 int i;
1589
1590 if (pos < 0 || pos >= p->interleave_ways) {
1591 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
1592 p->interleave_ways);
1593 return -ENXIO;
1594 }
1595
1596 if (p->targets[pos] == cxled)
1597 return 0;
1598
1599 if (p->targets[pos]) {
1600 struct cxl_endpoint_decoder *cxled_target = p->targets[pos];
1601 struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target);
1602
1603 dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n",
1604 pos, dev_name(&cxlmd_target->dev),
1605 dev_name(&cxled_target->cxld.dev));
1606 return -EBUSY;
1607 }
1608
1609 for (i = 0; i < p->interleave_ways; i++) {
1610 struct cxl_endpoint_decoder *cxled_target;
1611 struct cxl_memdev *cxlmd_target;
1612
1613 cxled_target = p->targets[i];
1614 if (!cxled_target)
1615 continue;
1616
1617 cxlmd_target = cxled_to_memdev(cxled_target);
1618 if (cxlmd_target == cxlmd) {
1619 dev_dbg(&cxlr->dev,
1620 "%s already specified at position %d via: %s\n",
1621 dev_name(&cxlmd->dev), pos,
1622 dev_name(&cxled_target->cxld.dev));
1623 return -EBUSY;
1624 }
1625 }
1626
1627 return 0;
1628 }
1629
cxl_region_attach_position(struct cxl_region * cxlr,struct cxl_root_decoder * cxlrd,struct cxl_endpoint_decoder * cxled,const struct cxl_dport * dport,int pos)1630 static int cxl_region_attach_position(struct cxl_region *cxlr,
1631 struct cxl_root_decoder *cxlrd,
1632 struct cxl_endpoint_decoder *cxled,
1633 const struct cxl_dport *dport, int pos)
1634 {
1635 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1636 struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
1637 struct cxl_decoder *cxld = &cxlsd->cxld;
1638 int iw = cxld->interleave_ways;
1639 struct cxl_port *iter;
1640 int rc;
1641
1642 if (dport != cxlrd->cxlsd.target[pos % iw]) {
1643 dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
1644 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1645 dev_name(&cxlrd->cxlsd.cxld.dev));
1646 return -ENXIO;
1647 }
1648
1649 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
1650 iter = to_cxl_port(iter->dev.parent)) {
1651 rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
1652 if (rc)
1653 goto err;
1654 }
1655
1656 return 0;
1657
1658 err:
1659 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
1660 iter = to_cxl_port(iter->dev.parent))
1661 cxl_port_detach_region(iter, cxlr, cxled);
1662 return rc;
1663 }
1664
cxl_region_attach_auto(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)1665 static int cxl_region_attach_auto(struct cxl_region *cxlr,
1666 struct cxl_endpoint_decoder *cxled, int pos)
1667 {
1668 struct cxl_region_params *p = &cxlr->params;
1669
1670 if (cxled->state != CXL_DECODER_STATE_AUTO) {
1671 dev_err(&cxlr->dev,
1672 "%s: unable to add decoder to autodetected region\n",
1673 dev_name(&cxled->cxld.dev));
1674 return -EINVAL;
1675 }
1676
1677 if (pos >= 0) {
1678 dev_dbg(&cxlr->dev, "%s: expected auto position, not %d\n",
1679 dev_name(&cxled->cxld.dev), pos);
1680 return -EINVAL;
1681 }
1682
1683 if (p->nr_targets >= p->interleave_ways) {
1684 dev_err(&cxlr->dev, "%s: no more target slots available\n",
1685 dev_name(&cxled->cxld.dev));
1686 return -ENXIO;
1687 }
1688
1689 /*
1690 * Temporarily record the endpoint decoder into the target array. Yes,
1691 * this means that userspace can view devices in the wrong position
1692 * before the region activates, and must be careful to understand when
1693 * it might be racing region autodiscovery.
1694 */
1695 pos = p->nr_targets;
1696 p->targets[pos] = cxled;
1697 cxled->pos = pos;
1698 p->nr_targets++;
1699
1700 return 0;
1701 }
1702
cmp_interleave_pos(const void * a,const void * b)1703 static int cmp_interleave_pos(const void *a, const void *b)
1704 {
1705 struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
1706 struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
1707
1708 return cxled_a->pos - cxled_b->pos;
1709 }
1710
next_port(struct cxl_port * port)1711 static struct cxl_port *next_port(struct cxl_port *port)
1712 {
1713 if (!port->parent_dport)
1714 return NULL;
1715 return port->parent_dport->port;
1716 }
1717
match_switch_decoder_by_range(struct device * dev,void * data)1718 static int match_switch_decoder_by_range(struct device *dev, void *data)
1719 {
1720 struct cxl_switch_decoder *cxlsd;
1721 struct range *r1, *r2 = data;
1722
1723 if (!is_switch_decoder(dev))
1724 return 0;
1725
1726 cxlsd = to_cxl_switch_decoder(dev);
1727 r1 = &cxlsd->cxld.hpa_range;
1728
1729 if (is_root_decoder(dev))
1730 return range_contains(r1, r2);
1731 return (r1->start == r2->start && r1->end == r2->end);
1732 }
1733
find_pos_and_ways(struct cxl_port * port,struct range * range,int * pos,int * ways)1734 static int find_pos_and_ways(struct cxl_port *port, struct range *range,
1735 int *pos, int *ways)
1736 {
1737 struct cxl_switch_decoder *cxlsd;
1738 struct cxl_port *parent;
1739 struct device *dev;
1740 int rc = -ENXIO;
1741
1742 parent = next_port(port);
1743 if (!parent)
1744 return rc;
1745
1746 dev = device_find_child(&parent->dev, range,
1747 match_switch_decoder_by_range);
1748 if (!dev) {
1749 dev_err(port->uport_dev,
1750 "failed to find decoder mapping %#llx-%#llx\n",
1751 range->start, range->end);
1752 return rc;
1753 }
1754 cxlsd = to_cxl_switch_decoder(dev);
1755 *ways = cxlsd->cxld.interleave_ways;
1756
1757 for (int i = 0; i < *ways; i++) {
1758 if (cxlsd->target[i] == port->parent_dport) {
1759 *pos = i;
1760 rc = 0;
1761 break;
1762 }
1763 }
1764 put_device(dev);
1765
1766 return rc;
1767 }
1768
1769 /**
1770 * cxl_calc_interleave_pos() - calculate an endpoint position in a region
1771 * @cxled: endpoint decoder member of given region
1772 *
1773 * The endpoint position is calculated by traversing the topology from
1774 * the endpoint to the root decoder and iteratively applying this
1775 * calculation:
1776 *
1777 * position = position * parent_ways + parent_pos;
1778 *
1779 * ...where @position is inferred from switch and root decoder target lists.
1780 *
1781 * Return: position >= 0 on success
1782 * -ENXIO on failure
1783 */
cxl_calc_interleave_pos(struct cxl_endpoint_decoder * cxled)1784 static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
1785 {
1786 struct cxl_port *iter, *port = cxled_to_port(cxled);
1787 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1788 struct range *range = &cxled->cxld.hpa_range;
1789 int parent_ways = 0, parent_pos = 0, pos = 0;
1790 int rc;
1791
1792 /*
1793 * Example: the expected interleave order of the 4-way region shown
1794 * below is: mem0, mem2, mem1, mem3
1795 *
1796 * root_port
1797 * / \
1798 * host_bridge_0 host_bridge_1
1799 * | | | |
1800 * mem0 mem1 mem2 mem3
1801 *
1802 * In the example the calculator will iterate twice. The first iteration
1803 * uses the mem position in the host-bridge and the ways of the host-
1804 * bridge to generate the first, or local, position. The second
1805 * iteration uses the host-bridge position in the root_port and the ways
1806 * of the root_port to refine the position.
1807 *
1808 * A trace of the calculation per endpoint looks like this:
1809 * mem0: pos = 0 * 2 + 0 mem2: pos = 0 * 2 + 0
1810 * pos = 0 * 2 + 0 pos = 0 * 2 + 1
1811 * pos: 0 pos: 1
1812 *
1813 * mem1: pos = 0 * 2 + 1 mem3: pos = 0 * 2 + 1
1814 * pos = 1 * 2 + 0 pos = 1 * 2 + 1
1815 * pos: 2 pos = 3
1816 *
1817 * Note that while this example is simple, the method applies to more
1818 * complex topologies, including those with switches.
1819 */
1820
1821 /* Iterate from endpoint to root_port refining the position */
1822 for (iter = port; iter; iter = next_port(iter)) {
1823 if (is_cxl_root(iter))
1824 break;
1825
1826 rc = find_pos_and_ways(iter, range, &parent_pos, &parent_ways);
1827 if (rc)
1828 return rc;
1829
1830 pos = pos * parent_ways + parent_pos;
1831 }
1832
1833 dev_dbg(&cxlmd->dev,
1834 "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n",
1835 dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent),
1836 dev_name(&port->dev), range->start, range->end, pos);
1837
1838 return pos;
1839 }
1840
cxl_region_sort_targets(struct cxl_region * cxlr)1841 static int cxl_region_sort_targets(struct cxl_region *cxlr)
1842 {
1843 struct cxl_region_params *p = &cxlr->params;
1844 int i, rc = 0;
1845
1846 for (i = 0; i < p->nr_targets; i++) {
1847 struct cxl_endpoint_decoder *cxled = p->targets[i];
1848
1849 cxled->pos = cxl_calc_interleave_pos(cxled);
1850 /*
1851 * Record that sorting failed, but still continue to calc
1852 * cxled->pos so that follow-on code paths can reliably
1853 * do p->targets[cxled->pos] to self-reference their entry.
1854 */
1855 if (cxled->pos < 0)
1856 rc = -ENXIO;
1857 }
1858 /* Keep the cxlr target list in interleave position order */
1859 sort(p->targets, p->nr_targets, sizeof(p->targets[0]),
1860 cmp_interleave_pos, NULL);
1861
1862 dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful");
1863 return rc;
1864 }
1865
cxl_region_attach(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)1866 static int cxl_region_attach(struct cxl_region *cxlr,
1867 struct cxl_endpoint_decoder *cxled, int pos)
1868 {
1869 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
1870 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1871 struct cxl_region_params *p = &cxlr->params;
1872 struct cxl_port *ep_port, *root_port;
1873 struct cxl_dport *dport;
1874 int rc = -ENXIO;
1875
1876 rc = check_interleave_cap(&cxled->cxld, p->interleave_ways,
1877 p->interleave_granularity);
1878 if (rc) {
1879 dev_dbg(&cxlr->dev, "%s iw: %d ig: %d is not supported\n",
1880 dev_name(&cxled->cxld.dev), p->interleave_ways,
1881 p->interleave_granularity);
1882 return rc;
1883 }
1884
1885 if (cxled->mode != cxlr->mode) {
1886 dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
1887 dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
1888 return -EINVAL;
1889 }
1890
1891 if (cxled->mode == CXL_DECODER_DEAD) {
1892 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
1893 return -ENODEV;
1894 }
1895
1896 /* all full of members, or interleave config not established? */
1897 if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
1898 dev_dbg(&cxlr->dev, "region already active\n");
1899 return -EBUSY;
1900 } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
1901 dev_dbg(&cxlr->dev, "interleave config missing\n");
1902 return -ENXIO;
1903 }
1904
1905 if (p->nr_targets >= p->interleave_ways) {
1906 dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
1907 p->nr_targets);
1908 return -EINVAL;
1909 }
1910
1911 ep_port = cxled_to_port(cxled);
1912 root_port = cxlrd_to_port(cxlrd);
1913 dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
1914 if (!dport) {
1915 dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n",
1916 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1917 dev_name(cxlr->dev.parent));
1918 return -ENXIO;
1919 }
1920
1921 if (cxled->cxld.target_type != cxlr->type) {
1922 dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
1923 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1924 cxled->cxld.target_type, cxlr->type);
1925 return -ENXIO;
1926 }
1927
1928 if (!cxled->dpa_res) {
1929 dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n",
1930 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev));
1931 return -ENXIO;
1932 }
1933
1934 if (resource_size(cxled->dpa_res) * p->interleave_ways !=
1935 resource_size(p->res)) {
1936 dev_dbg(&cxlr->dev,
1937 "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
1938 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1939 (u64)resource_size(cxled->dpa_res), p->interleave_ways,
1940 (u64)resource_size(p->res));
1941 return -EINVAL;
1942 }
1943
1944 cxl_region_perf_data_calculate(cxlr, cxled);
1945
1946 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1947 int i;
1948
1949 rc = cxl_region_attach_auto(cxlr, cxled, pos);
1950 if (rc)
1951 return rc;
1952
1953 /* await more targets to arrive... */
1954 if (p->nr_targets < p->interleave_ways)
1955 return 0;
1956
1957 /*
1958 * All targets are here, which implies all PCI enumeration that
1959 * affects this region has been completed. Walk the topology to
1960 * sort the devices into their relative region decode position.
1961 */
1962 rc = cxl_region_sort_targets(cxlr);
1963 if (rc)
1964 return rc;
1965
1966 for (i = 0; i < p->nr_targets; i++) {
1967 cxled = p->targets[i];
1968 ep_port = cxled_to_port(cxled);
1969 dport = cxl_find_dport_by_dev(root_port,
1970 ep_port->host_bridge);
1971 rc = cxl_region_attach_position(cxlr, cxlrd, cxled,
1972 dport, i);
1973 if (rc)
1974 return rc;
1975 }
1976
1977 rc = cxl_region_setup_targets(cxlr);
1978 if (rc)
1979 return rc;
1980
1981 /*
1982 * If target setup succeeds in the autodiscovery case
1983 * then the region is already committed.
1984 */
1985 p->state = CXL_CONFIG_COMMIT;
1986
1987 return 0;
1988 }
1989
1990 rc = cxl_region_validate_position(cxlr, cxled, pos);
1991 if (rc)
1992 return rc;
1993
1994 rc = cxl_region_attach_position(cxlr, cxlrd, cxled, dport, pos);
1995 if (rc)
1996 return rc;
1997
1998 p->targets[pos] = cxled;
1999 cxled->pos = pos;
2000 p->nr_targets++;
2001
2002 if (p->nr_targets == p->interleave_ways) {
2003 rc = cxl_region_setup_targets(cxlr);
2004 if (rc)
2005 return rc;
2006 p->state = CXL_CONFIG_ACTIVE;
2007 }
2008
2009 cxled->cxld.interleave_ways = p->interleave_ways;
2010 cxled->cxld.interleave_granularity = p->interleave_granularity;
2011 cxled->cxld.hpa_range = (struct range) {
2012 .start = p->res->start,
2013 .end = p->res->end,
2014 };
2015
2016 if (p->nr_targets != p->interleave_ways)
2017 return 0;
2018
2019 /*
2020 * Test the auto-discovery position calculator function
2021 * against this successfully created user-defined region.
2022 * A fail message here means that this interleave config
2023 * will fail when presented as CXL_REGION_F_AUTO.
2024 */
2025 for (int i = 0; i < p->nr_targets; i++) {
2026 struct cxl_endpoint_decoder *cxled = p->targets[i];
2027 int test_pos;
2028
2029 test_pos = cxl_calc_interleave_pos(cxled);
2030 dev_dbg(&cxled->cxld.dev,
2031 "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n",
2032 (test_pos == cxled->pos) ? "success" : "fail",
2033 test_pos, cxled->pos);
2034 }
2035
2036 return 0;
2037 }
2038
cxl_region_detach(struct cxl_endpoint_decoder * cxled)2039 static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
2040 {
2041 struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
2042 struct cxl_region *cxlr = cxled->cxld.region;
2043 struct cxl_region_params *p;
2044 int rc = 0;
2045
2046 lockdep_assert_held_write(&cxl_region_rwsem);
2047
2048 if (!cxlr)
2049 return 0;
2050
2051 p = &cxlr->params;
2052 get_device(&cxlr->dev);
2053
2054 if (p->state > CXL_CONFIG_ACTIVE) {
2055 /*
2056 * TODO: tear down all impacted regions if a device is
2057 * removed out of order
2058 */
2059 rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
2060 if (rc)
2061 goto out;
2062 p->state = CXL_CONFIG_ACTIVE;
2063 }
2064
2065 for (iter = ep_port; !is_cxl_root(iter);
2066 iter = to_cxl_port(iter->dev.parent))
2067 cxl_port_detach_region(iter, cxlr, cxled);
2068
2069 if (cxled->pos < 0 || cxled->pos >= p->interleave_ways ||
2070 p->targets[cxled->pos] != cxled) {
2071 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2072
2073 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
2074 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2075 cxled->pos);
2076 goto out;
2077 }
2078
2079 if (p->state == CXL_CONFIG_ACTIVE) {
2080 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
2081 cxl_region_teardown_targets(cxlr);
2082 }
2083 p->targets[cxled->pos] = NULL;
2084 p->nr_targets--;
2085 cxled->cxld.hpa_range = (struct range) {
2086 .start = 0,
2087 .end = -1,
2088 };
2089
2090 /* notify the region driver that one of its targets has departed */
2091 up_write(&cxl_region_rwsem);
2092 device_release_driver(&cxlr->dev);
2093 down_write(&cxl_region_rwsem);
2094 out:
2095 put_device(&cxlr->dev);
2096 return rc;
2097 }
2098
cxl_decoder_kill_region(struct cxl_endpoint_decoder * cxled)2099 void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
2100 {
2101 down_write(&cxl_region_rwsem);
2102 cxled->mode = CXL_DECODER_DEAD;
2103 cxl_region_detach(cxled);
2104 up_write(&cxl_region_rwsem);
2105 }
2106
attach_target(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos,unsigned int state)2107 static int attach_target(struct cxl_region *cxlr,
2108 struct cxl_endpoint_decoder *cxled, int pos,
2109 unsigned int state)
2110 {
2111 int rc = 0;
2112
2113 if (state == TASK_INTERRUPTIBLE)
2114 rc = down_write_killable(&cxl_region_rwsem);
2115 else
2116 down_write(&cxl_region_rwsem);
2117 if (rc)
2118 return rc;
2119
2120 down_read(&cxl_dpa_rwsem);
2121 rc = cxl_region_attach(cxlr, cxled, pos);
2122 up_read(&cxl_dpa_rwsem);
2123 up_write(&cxl_region_rwsem);
2124 return rc;
2125 }
2126
detach_target(struct cxl_region * cxlr,int pos)2127 static int detach_target(struct cxl_region *cxlr, int pos)
2128 {
2129 struct cxl_region_params *p = &cxlr->params;
2130 int rc;
2131
2132 rc = down_write_killable(&cxl_region_rwsem);
2133 if (rc)
2134 return rc;
2135
2136 if (pos >= p->interleave_ways) {
2137 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
2138 p->interleave_ways);
2139 rc = -ENXIO;
2140 goto out;
2141 }
2142
2143 if (!p->targets[pos]) {
2144 rc = 0;
2145 goto out;
2146 }
2147
2148 rc = cxl_region_detach(p->targets[pos]);
2149 out:
2150 up_write(&cxl_region_rwsem);
2151 return rc;
2152 }
2153
store_targetN(struct cxl_region * cxlr,const char * buf,int pos,size_t len)2154 static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
2155 size_t len)
2156 {
2157 int rc;
2158
2159 if (sysfs_streq(buf, "\n"))
2160 rc = detach_target(cxlr, pos);
2161 else {
2162 struct device *dev;
2163
2164 dev = bus_find_device_by_name(&cxl_bus_type, NULL, buf);
2165 if (!dev)
2166 return -ENODEV;
2167
2168 if (!is_endpoint_decoder(dev)) {
2169 rc = -EINVAL;
2170 goto out;
2171 }
2172
2173 rc = attach_target(cxlr, to_cxl_endpoint_decoder(dev), pos,
2174 TASK_INTERRUPTIBLE);
2175 out:
2176 put_device(dev);
2177 }
2178
2179 if (rc < 0)
2180 return rc;
2181 return len;
2182 }
2183
2184 #define TARGET_ATTR_RW(n) \
2185 static ssize_t target##n##_show( \
2186 struct device *dev, struct device_attribute *attr, char *buf) \
2187 { \
2188 return show_targetN(to_cxl_region(dev), buf, (n)); \
2189 } \
2190 static ssize_t target##n##_store(struct device *dev, \
2191 struct device_attribute *attr, \
2192 const char *buf, size_t len) \
2193 { \
2194 return store_targetN(to_cxl_region(dev), buf, (n), len); \
2195 } \
2196 static DEVICE_ATTR_RW(target##n)
2197
2198 TARGET_ATTR_RW(0);
2199 TARGET_ATTR_RW(1);
2200 TARGET_ATTR_RW(2);
2201 TARGET_ATTR_RW(3);
2202 TARGET_ATTR_RW(4);
2203 TARGET_ATTR_RW(5);
2204 TARGET_ATTR_RW(6);
2205 TARGET_ATTR_RW(7);
2206 TARGET_ATTR_RW(8);
2207 TARGET_ATTR_RW(9);
2208 TARGET_ATTR_RW(10);
2209 TARGET_ATTR_RW(11);
2210 TARGET_ATTR_RW(12);
2211 TARGET_ATTR_RW(13);
2212 TARGET_ATTR_RW(14);
2213 TARGET_ATTR_RW(15);
2214
2215 static struct attribute *target_attrs[] = {
2216 &dev_attr_target0.attr,
2217 &dev_attr_target1.attr,
2218 &dev_attr_target2.attr,
2219 &dev_attr_target3.attr,
2220 &dev_attr_target4.attr,
2221 &dev_attr_target5.attr,
2222 &dev_attr_target6.attr,
2223 &dev_attr_target7.attr,
2224 &dev_attr_target8.attr,
2225 &dev_attr_target9.attr,
2226 &dev_attr_target10.attr,
2227 &dev_attr_target11.attr,
2228 &dev_attr_target12.attr,
2229 &dev_attr_target13.attr,
2230 &dev_attr_target14.attr,
2231 &dev_attr_target15.attr,
2232 NULL,
2233 };
2234
cxl_region_target_visible(struct kobject * kobj,struct attribute * a,int n)2235 static umode_t cxl_region_target_visible(struct kobject *kobj,
2236 struct attribute *a, int n)
2237 {
2238 struct device *dev = kobj_to_dev(kobj);
2239 struct cxl_region *cxlr = to_cxl_region(dev);
2240 struct cxl_region_params *p = &cxlr->params;
2241
2242 if (n < p->interleave_ways)
2243 return a->mode;
2244 return 0;
2245 }
2246
2247 static const struct attribute_group cxl_region_target_group = {
2248 .attrs = target_attrs,
2249 .is_visible = cxl_region_target_visible,
2250 };
2251
get_cxl_region_target_group(void)2252 static const struct attribute_group *get_cxl_region_target_group(void)
2253 {
2254 return &cxl_region_target_group;
2255 }
2256
2257 static const struct attribute_group *region_groups[] = {
2258 &cxl_base_attribute_group,
2259 &cxl_region_group,
2260 &cxl_region_target_group,
2261 &cxl_region_access0_coordinate_group,
2262 &cxl_region_access1_coordinate_group,
2263 NULL,
2264 };
2265
cxl_region_release(struct device * dev)2266 static void cxl_region_release(struct device *dev)
2267 {
2268 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
2269 struct cxl_region *cxlr = to_cxl_region(dev);
2270 int id = atomic_read(&cxlrd->region_id);
2271
2272 /*
2273 * Try to reuse the recently idled id rather than the cached
2274 * next id to prevent the region id space from increasing
2275 * unnecessarily.
2276 */
2277 if (cxlr->id < id)
2278 if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) {
2279 memregion_free(id);
2280 goto out;
2281 }
2282
2283 memregion_free(cxlr->id);
2284 out:
2285 put_device(dev->parent);
2286 kfree(cxlr);
2287 }
2288
2289 const struct device_type cxl_region_type = {
2290 .name = "cxl_region",
2291 .release = cxl_region_release,
2292 .groups = region_groups
2293 };
2294
is_cxl_region(struct device * dev)2295 bool is_cxl_region(struct device *dev)
2296 {
2297 return dev->type == &cxl_region_type;
2298 }
2299 EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL);
2300
to_cxl_region(struct device * dev)2301 static struct cxl_region *to_cxl_region(struct device *dev)
2302 {
2303 if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type,
2304 "not a cxl_region device\n"))
2305 return NULL;
2306
2307 return container_of(dev, struct cxl_region, dev);
2308 }
2309
unregister_region(void * _cxlr)2310 static void unregister_region(void *_cxlr)
2311 {
2312 struct cxl_region *cxlr = _cxlr;
2313 struct cxl_region_params *p = &cxlr->params;
2314 int i;
2315
2316 unregister_memory_notifier(&cxlr->memory_notifier);
2317 unregister_mt_adistance_algorithm(&cxlr->adist_notifier);
2318 device_del(&cxlr->dev);
2319
2320 /*
2321 * Now that region sysfs is shutdown, the parameter block is now
2322 * read-only, so no need to hold the region rwsem to access the
2323 * region parameters.
2324 */
2325 for (i = 0; i < p->interleave_ways; i++)
2326 detach_target(cxlr, i);
2327
2328 cxl_region_iomem_release(cxlr);
2329 put_device(&cxlr->dev);
2330 }
2331
2332 static struct lock_class_key cxl_region_key;
2333
cxl_region_alloc(struct cxl_root_decoder * cxlrd,int id)2334 static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id)
2335 {
2336 struct cxl_region *cxlr;
2337 struct device *dev;
2338
2339 cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL);
2340 if (!cxlr) {
2341 memregion_free(id);
2342 return ERR_PTR(-ENOMEM);
2343 }
2344
2345 dev = &cxlr->dev;
2346 device_initialize(dev);
2347 lockdep_set_class(&dev->mutex, &cxl_region_key);
2348 dev->parent = &cxlrd->cxlsd.cxld.dev;
2349 /*
2350 * Keep root decoder pinned through cxl_region_release to fixup
2351 * region id allocations
2352 */
2353 get_device(dev->parent);
2354 device_set_pm_not_required(dev);
2355 dev->bus = &cxl_bus_type;
2356 dev->type = &cxl_region_type;
2357 cxlr->id = id;
2358
2359 return cxlr;
2360 }
2361
cxl_region_update_coordinates(struct cxl_region * cxlr,int nid)2362 static bool cxl_region_update_coordinates(struct cxl_region *cxlr, int nid)
2363 {
2364 int cset = 0;
2365 int rc;
2366
2367 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
2368 if (cxlr->coord[i].read_bandwidth) {
2369 rc = 0;
2370 if (cxl_need_node_perf_attrs_update(nid))
2371 node_set_perf_attrs(nid, &cxlr->coord[i], i);
2372 else
2373 rc = cxl_update_hmat_access_coordinates(nid, cxlr, i);
2374
2375 if (rc == 0)
2376 cset++;
2377 }
2378 }
2379
2380 if (!cset)
2381 return false;
2382
2383 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_access0_group());
2384 if (rc)
2385 dev_dbg(&cxlr->dev, "Failed to update access0 group\n");
2386
2387 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_access1_group());
2388 if (rc)
2389 dev_dbg(&cxlr->dev, "Failed to update access1 group\n");
2390
2391 return true;
2392 }
2393
cxl_region_nid(struct cxl_region * cxlr)2394 static int cxl_region_nid(struct cxl_region *cxlr)
2395 {
2396 struct cxl_region_params *p = &cxlr->params;
2397 struct resource *res;
2398
2399 guard(rwsem_read)(&cxl_region_rwsem);
2400 res = p->res;
2401 if (!res)
2402 return NUMA_NO_NODE;
2403 return phys_to_target_node(res->start);
2404 }
2405
cxl_region_perf_attrs_callback(struct notifier_block * nb,unsigned long action,void * arg)2406 static int cxl_region_perf_attrs_callback(struct notifier_block *nb,
2407 unsigned long action, void *arg)
2408 {
2409 struct cxl_region *cxlr = container_of(nb, struct cxl_region,
2410 memory_notifier);
2411 struct memory_notify *mnb = arg;
2412 int nid = mnb->status_change_nid;
2413 int region_nid;
2414
2415 if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
2416 return NOTIFY_DONE;
2417
2418 region_nid = cxl_region_nid(cxlr);
2419 if (nid != region_nid)
2420 return NOTIFY_DONE;
2421
2422 if (!cxl_region_update_coordinates(cxlr, nid))
2423 return NOTIFY_DONE;
2424
2425 return NOTIFY_OK;
2426 }
2427
cxl_region_calculate_adistance(struct notifier_block * nb,unsigned long nid,void * data)2428 static int cxl_region_calculate_adistance(struct notifier_block *nb,
2429 unsigned long nid, void *data)
2430 {
2431 struct cxl_region *cxlr = container_of(nb, struct cxl_region,
2432 adist_notifier);
2433 struct access_coordinate *perf;
2434 int *adist = data;
2435 int region_nid;
2436
2437 region_nid = cxl_region_nid(cxlr);
2438 if (nid != region_nid)
2439 return NOTIFY_OK;
2440
2441 perf = &cxlr->coord[ACCESS_COORDINATE_CPU];
2442
2443 if (mt_perf_to_adistance(perf, adist))
2444 return NOTIFY_OK;
2445
2446 return NOTIFY_STOP;
2447 }
2448
2449 /**
2450 * devm_cxl_add_region - Adds a region to a decoder
2451 * @cxlrd: root decoder
2452 * @id: memregion id to create, or memregion_free() on failure
2453 * @mode: mode for the endpoint decoders of this region
2454 * @type: select whether this is an expander or accelerator (type-2 or type-3)
2455 *
2456 * This is the second step of region initialization. Regions exist within an
2457 * address space which is mapped by a @cxlrd.
2458 *
2459 * Return: 0 if the region was added to the @cxlrd, else returns negative error
2460 * code. The region will be named "regionZ" where Z is the unique region number.
2461 */
devm_cxl_add_region(struct cxl_root_decoder * cxlrd,int id,enum cxl_decoder_mode mode,enum cxl_decoder_type type)2462 static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
2463 int id,
2464 enum cxl_decoder_mode mode,
2465 enum cxl_decoder_type type)
2466 {
2467 struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
2468 struct cxl_region *cxlr;
2469 struct device *dev;
2470 int rc;
2471
2472 cxlr = cxl_region_alloc(cxlrd, id);
2473 if (IS_ERR(cxlr))
2474 return cxlr;
2475 cxlr->mode = mode;
2476 cxlr->type = type;
2477
2478 dev = &cxlr->dev;
2479 rc = dev_set_name(dev, "region%d", id);
2480 if (rc)
2481 goto err;
2482
2483 rc = device_add(dev);
2484 if (rc)
2485 goto err;
2486
2487 cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback;
2488 cxlr->memory_notifier.priority = CXL_CALLBACK_PRI;
2489 register_memory_notifier(&cxlr->memory_notifier);
2490
2491 cxlr->adist_notifier.notifier_call = cxl_region_calculate_adistance;
2492 cxlr->adist_notifier.priority = 100;
2493 register_mt_adistance_algorithm(&cxlr->adist_notifier);
2494
2495 rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr);
2496 if (rc)
2497 return ERR_PTR(rc);
2498
2499 dev_dbg(port->uport_dev, "%s: created %s\n",
2500 dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev));
2501 return cxlr;
2502
2503 err:
2504 put_device(dev);
2505 return ERR_PTR(rc);
2506 }
2507
__create_region_show(struct cxl_root_decoder * cxlrd,char * buf)2508 static ssize_t __create_region_show(struct cxl_root_decoder *cxlrd, char *buf)
2509 {
2510 return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id));
2511 }
2512
create_pmem_region_show(struct device * dev,struct device_attribute * attr,char * buf)2513 static ssize_t create_pmem_region_show(struct device *dev,
2514 struct device_attribute *attr, char *buf)
2515 {
2516 return __create_region_show(to_cxl_root_decoder(dev), buf);
2517 }
2518
create_ram_region_show(struct device * dev,struct device_attribute * attr,char * buf)2519 static ssize_t create_ram_region_show(struct device *dev,
2520 struct device_attribute *attr, char *buf)
2521 {
2522 return __create_region_show(to_cxl_root_decoder(dev), buf);
2523 }
2524
__create_region(struct cxl_root_decoder * cxlrd,enum cxl_decoder_mode mode,int id)2525 static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
2526 enum cxl_decoder_mode mode, int id)
2527 {
2528 int rc;
2529
2530 switch (mode) {
2531 case CXL_DECODER_RAM:
2532 case CXL_DECODER_PMEM:
2533 break;
2534 default:
2535 dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
2536 return ERR_PTR(-EINVAL);
2537 }
2538
2539 rc = memregion_alloc(GFP_KERNEL);
2540 if (rc < 0)
2541 return ERR_PTR(rc);
2542
2543 if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) {
2544 memregion_free(rc);
2545 return ERR_PTR(-EBUSY);
2546 }
2547
2548 return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM);
2549 }
2550
create_pmem_region_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2551 static ssize_t create_pmem_region_store(struct device *dev,
2552 struct device_attribute *attr,
2553 const char *buf, size_t len)
2554 {
2555 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2556 struct cxl_region *cxlr;
2557 int rc, id;
2558
2559 rc = sscanf(buf, "region%d\n", &id);
2560 if (rc != 1)
2561 return -EINVAL;
2562
2563 cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id);
2564 if (IS_ERR(cxlr))
2565 return PTR_ERR(cxlr);
2566
2567 return len;
2568 }
2569 DEVICE_ATTR_RW(create_pmem_region);
2570
create_ram_region_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2571 static ssize_t create_ram_region_store(struct device *dev,
2572 struct device_attribute *attr,
2573 const char *buf, size_t len)
2574 {
2575 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2576 struct cxl_region *cxlr;
2577 int rc, id;
2578
2579 rc = sscanf(buf, "region%d\n", &id);
2580 if (rc != 1)
2581 return -EINVAL;
2582
2583 cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id);
2584 if (IS_ERR(cxlr))
2585 return PTR_ERR(cxlr);
2586
2587 return len;
2588 }
2589 DEVICE_ATTR_RW(create_ram_region);
2590
region_show(struct device * dev,struct device_attribute * attr,char * buf)2591 static ssize_t region_show(struct device *dev, struct device_attribute *attr,
2592 char *buf)
2593 {
2594 struct cxl_decoder *cxld = to_cxl_decoder(dev);
2595 ssize_t rc;
2596
2597 rc = down_read_interruptible(&cxl_region_rwsem);
2598 if (rc)
2599 return rc;
2600
2601 if (cxld->region)
2602 rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
2603 else
2604 rc = sysfs_emit(buf, "\n");
2605 up_read(&cxl_region_rwsem);
2606
2607 return rc;
2608 }
2609 DEVICE_ATTR_RO(region);
2610
2611 static struct cxl_region *
cxl_find_region_by_name(struct cxl_root_decoder * cxlrd,const char * name)2612 cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name)
2613 {
2614 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
2615 struct device *region_dev;
2616
2617 region_dev = device_find_child_by_name(&cxld->dev, name);
2618 if (!region_dev)
2619 return ERR_PTR(-ENODEV);
2620
2621 return to_cxl_region(region_dev);
2622 }
2623
delete_region_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2624 static ssize_t delete_region_store(struct device *dev,
2625 struct device_attribute *attr,
2626 const char *buf, size_t len)
2627 {
2628 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2629 struct cxl_port *port = to_cxl_port(dev->parent);
2630 struct cxl_region *cxlr;
2631
2632 cxlr = cxl_find_region_by_name(cxlrd, buf);
2633 if (IS_ERR(cxlr))
2634 return PTR_ERR(cxlr);
2635
2636 devm_release_action(port->uport_dev, unregister_region, cxlr);
2637 put_device(&cxlr->dev);
2638
2639 return len;
2640 }
2641 DEVICE_ATTR_WO(delete_region);
2642
cxl_pmem_region_release(struct device * dev)2643 static void cxl_pmem_region_release(struct device *dev)
2644 {
2645 struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
2646 int i;
2647
2648 for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
2649 struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
2650
2651 put_device(&cxlmd->dev);
2652 }
2653
2654 kfree(cxlr_pmem);
2655 }
2656
2657 static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
2658 &cxl_base_attribute_group,
2659 NULL,
2660 };
2661
2662 const struct device_type cxl_pmem_region_type = {
2663 .name = "cxl_pmem_region",
2664 .release = cxl_pmem_region_release,
2665 .groups = cxl_pmem_region_attribute_groups,
2666 };
2667
is_cxl_pmem_region(struct device * dev)2668 bool is_cxl_pmem_region(struct device *dev)
2669 {
2670 return dev->type == &cxl_pmem_region_type;
2671 }
2672 EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL);
2673
to_cxl_pmem_region(struct device * dev)2674 struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
2675 {
2676 if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
2677 "not a cxl_pmem_region device\n"))
2678 return NULL;
2679 return container_of(dev, struct cxl_pmem_region, dev);
2680 }
2681 EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
2682
2683 struct cxl_poison_context {
2684 struct cxl_port *port;
2685 enum cxl_decoder_mode mode;
2686 u64 offset;
2687 };
2688
cxl_get_poison_unmapped(struct cxl_memdev * cxlmd,struct cxl_poison_context * ctx)2689 static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd,
2690 struct cxl_poison_context *ctx)
2691 {
2692 struct cxl_dev_state *cxlds = cxlmd->cxlds;
2693 u64 offset, length;
2694 int rc = 0;
2695
2696 /*
2697 * Collect poison for the remaining unmapped resources
2698 * after poison is collected by committed endpoints.
2699 *
2700 * Knowing that PMEM must always follow RAM, get poison
2701 * for unmapped resources based on the last decoder's mode:
2702 * ram: scan remains of ram range, then any pmem range
2703 * pmem: scan remains of pmem range
2704 */
2705
2706 if (ctx->mode == CXL_DECODER_RAM) {
2707 offset = ctx->offset;
2708 length = resource_size(&cxlds->ram_res) - offset;
2709 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
2710 if (rc == -EFAULT)
2711 rc = 0;
2712 if (rc)
2713 return rc;
2714 }
2715 if (ctx->mode == CXL_DECODER_PMEM) {
2716 offset = ctx->offset;
2717 length = resource_size(&cxlds->dpa_res) - offset;
2718 if (!length)
2719 return 0;
2720 } else if (resource_size(&cxlds->pmem_res)) {
2721 offset = cxlds->pmem_res.start;
2722 length = resource_size(&cxlds->pmem_res);
2723 } else {
2724 return 0;
2725 }
2726
2727 return cxl_mem_get_poison(cxlmd, offset, length, NULL);
2728 }
2729
poison_by_decoder(struct device * dev,void * arg)2730 static int poison_by_decoder(struct device *dev, void *arg)
2731 {
2732 struct cxl_poison_context *ctx = arg;
2733 struct cxl_endpoint_decoder *cxled;
2734 struct cxl_memdev *cxlmd;
2735 u64 offset, length;
2736 int rc = 0;
2737
2738 if (!is_endpoint_decoder(dev))
2739 return rc;
2740
2741 cxled = to_cxl_endpoint_decoder(dev);
2742 if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
2743 return rc;
2744
2745 /*
2746 * Regions are only created with single mode decoders: pmem or ram.
2747 * Linux does not support mixed mode decoders. This means that
2748 * reading poison per endpoint decoder adheres to the requirement
2749 * that poison reads of pmem and ram must be separated.
2750 * CXL 3.0 Spec 8.2.9.8.4.1
2751 */
2752 if (cxled->mode == CXL_DECODER_MIXED) {
2753 dev_dbg(dev, "poison list read unsupported in mixed mode\n");
2754 return rc;
2755 }
2756
2757 cxlmd = cxled_to_memdev(cxled);
2758 if (cxled->skip) {
2759 offset = cxled->dpa_res->start - cxled->skip;
2760 length = cxled->skip;
2761 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
2762 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
2763 rc = 0;
2764 if (rc)
2765 return rc;
2766 }
2767
2768 offset = cxled->dpa_res->start;
2769 length = cxled->dpa_res->end - offset + 1;
2770 rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region);
2771 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
2772 rc = 0;
2773 if (rc)
2774 return rc;
2775
2776 /* Iterate until commit_end is reached */
2777 if (cxled->cxld.id == ctx->port->commit_end) {
2778 ctx->offset = cxled->dpa_res->end + 1;
2779 ctx->mode = cxled->mode;
2780 return 1;
2781 }
2782
2783 return 0;
2784 }
2785
cxl_get_poison_by_endpoint(struct cxl_port * port)2786 int cxl_get_poison_by_endpoint(struct cxl_port *port)
2787 {
2788 struct cxl_poison_context ctx;
2789 int rc = 0;
2790
2791 ctx = (struct cxl_poison_context) {
2792 .port = port
2793 };
2794
2795 rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder);
2796 if (rc == 1)
2797 rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev),
2798 &ctx);
2799
2800 return rc;
2801 }
2802
2803 struct cxl_dpa_to_region_context {
2804 struct cxl_region *cxlr;
2805 u64 dpa;
2806 };
2807
__cxl_dpa_to_region(struct device * dev,void * arg)2808 static int __cxl_dpa_to_region(struct device *dev, void *arg)
2809 {
2810 struct cxl_dpa_to_region_context *ctx = arg;
2811 struct cxl_endpoint_decoder *cxled;
2812 struct cxl_region *cxlr;
2813 u64 dpa = ctx->dpa;
2814
2815 if (!is_endpoint_decoder(dev))
2816 return 0;
2817
2818 cxled = to_cxl_endpoint_decoder(dev);
2819 if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res))
2820 return 0;
2821
2822 if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
2823 return 0;
2824
2825 /*
2826 * Stop the region search (return 1) when an endpoint mapping is
2827 * found. The region may not be fully constructed so offering
2828 * the cxlr in the context structure is not guaranteed.
2829 */
2830 cxlr = cxled->cxld.region;
2831 if (cxlr)
2832 dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
2833 dev_name(&cxlr->dev));
2834 else
2835 dev_dbg(dev, "dpa:0x%llx mapped in endpoint:%s\n", dpa,
2836 dev_name(dev));
2837
2838 ctx->cxlr = cxlr;
2839
2840 return 1;
2841 }
2842
cxl_dpa_to_region(const struct cxl_memdev * cxlmd,u64 dpa)2843 struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
2844 {
2845 struct cxl_dpa_to_region_context ctx;
2846 struct cxl_port *port;
2847
2848 ctx = (struct cxl_dpa_to_region_context) {
2849 .dpa = dpa,
2850 };
2851 port = cxlmd->endpoint;
2852 if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
2853 device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
2854
2855 return ctx.cxlr;
2856 }
2857
cxl_is_hpa_in_chunk(u64 hpa,struct cxl_region * cxlr,int pos)2858 static bool cxl_is_hpa_in_chunk(u64 hpa, struct cxl_region *cxlr, int pos)
2859 {
2860 struct cxl_region_params *p = &cxlr->params;
2861 int gran = p->interleave_granularity;
2862 int ways = p->interleave_ways;
2863 u64 offset;
2864
2865 /* Is the hpa in an expected chunk for its pos(-ition) */
2866 offset = hpa - p->res->start;
2867 offset = do_div(offset, gran * ways);
2868 if ((offset >= pos * gran) && (offset < (pos + 1) * gran))
2869 return true;
2870
2871 dev_dbg(&cxlr->dev,
2872 "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa);
2873
2874 return false;
2875 }
2876
cxl_dpa_to_hpa(struct cxl_region * cxlr,const struct cxl_memdev * cxlmd,u64 dpa)2877 u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
2878 u64 dpa)
2879 {
2880 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
2881 u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa;
2882 struct cxl_region_params *p = &cxlr->params;
2883 struct cxl_endpoint_decoder *cxled = NULL;
2884 u16 eig = 0;
2885 u8 eiw = 0;
2886 int pos;
2887
2888 for (int i = 0; i < p->nr_targets; i++) {
2889 cxled = p->targets[i];
2890 if (cxlmd == cxled_to_memdev(cxled))
2891 break;
2892 }
2893 if (!cxled || cxlmd != cxled_to_memdev(cxled))
2894 return ULLONG_MAX;
2895
2896 pos = cxled->pos;
2897 ways_to_eiw(p->interleave_ways, &eiw);
2898 granularity_to_eig(p->interleave_granularity, &eig);
2899
2900 /*
2901 * The device position in the region interleave set was removed
2902 * from the offset at HPA->DPA translation. To reconstruct the
2903 * HPA, place the 'pos' in the offset.
2904 *
2905 * The placement of 'pos' in the HPA is determined by interleave
2906 * ways and granularity and is defined in the CXL Spec 3.0 Section
2907 * 8.2.4.19.13 Implementation Note: Device Decode Logic
2908 */
2909
2910 /* Remove the dpa base */
2911 dpa_offset = dpa - cxl_dpa_resource_start(cxled);
2912
2913 mask_upper = GENMASK_ULL(51, eig + 8);
2914
2915 if (eiw < 8) {
2916 hpa_offset = (dpa_offset & mask_upper) << eiw;
2917 hpa_offset |= pos << (eig + 8);
2918 } else {
2919 bits_upper = (dpa_offset & mask_upper) >> (eig + 8);
2920 bits_upper = bits_upper * 3;
2921 hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8);
2922 }
2923
2924 /* The lower bits remain unchanged */
2925 hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
2926
2927 /* Apply the hpa_offset to the region base address */
2928 hpa = hpa_offset + p->res->start;
2929
2930 /* Root decoder translation overrides typical modulo decode */
2931 if (cxlrd->hpa_to_spa)
2932 hpa = cxlrd->hpa_to_spa(cxlrd, hpa);
2933
2934 if (hpa < p->res->start || hpa > p->res->end) {
2935 dev_dbg(&cxlr->dev,
2936 "Addr trans fail: hpa 0x%llx not in region\n", hpa);
2937 return ULLONG_MAX;
2938 }
2939
2940 /* Simple chunk check, by pos & gran, only applies to modulo decodes */
2941 if (!cxlrd->hpa_to_spa && (!cxl_is_hpa_in_chunk(hpa, cxlr, pos)))
2942 return ULLONG_MAX;
2943
2944 return hpa;
2945 }
2946
2947 static struct lock_class_key cxl_pmem_region_key;
2948
cxl_pmem_region_alloc(struct cxl_region * cxlr)2949 static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
2950 {
2951 struct cxl_region_params *p = &cxlr->params;
2952 struct cxl_nvdimm_bridge *cxl_nvb;
2953 struct device *dev;
2954 int i;
2955
2956 guard(rwsem_read)(&cxl_region_rwsem);
2957 if (p->state != CXL_CONFIG_COMMIT)
2958 return -ENXIO;
2959
2960 struct cxl_pmem_region *cxlr_pmem __free(kfree) =
2961 kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), GFP_KERNEL);
2962 if (!cxlr_pmem)
2963 return -ENOMEM;
2964
2965 cxlr_pmem->hpa_range.start = p->res->start;
2966 cxlr_pmem->hpa_range.end = p->res->end;
2967
2968 /* Snapshot the region configuration underneath the cxl_region_rwsem */
2969 cxlr_pmem->nr_mappings = p->nr_targets;
2970 for (i = 0; i < p->nr_targets; i++) {
2971 struct cxl_endpoint_decoder *cxled = p->targets[i];
2972 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2973 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
2974
2975 /*
2976 * Regions never span CXL root devices, so by definition the
2977 * bridge for one device is the same for all.
2978 */
2979 if (i == 0) {
2980 cxl_nvb = cxl_find_nvdimm_bridge(cxlmd->endpoint);
2981 if (!cxl_nvb)
2982 return -ENODEV;
2983 cxlr->cxl_nvb = cxl_nvb;
2984 }
2985 m->cxlmd = cxlmd;
2986 get_device(&cxlmd->dev);
2987 m->start = cxled->dpa_res->start;
2988 m->size = resource_size(cxled->dpa_res);
2989 m->position = i;
2990 }
2991
2992 dev = &cxlr_pmem->dev;
2993 device_initialize(dev);
2994 lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
2995 device_set_pm_not_required(dev);
2996 dev->parent = &cxlr->dev;
2997 dev->bus = &cxl_bus_type;
2998 dev->type = &cxl_pmem_region_type;
2999 cxlr_pmem->cxlr = cxlr;
3000 cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem);
3001
3002 return 0;
3003 }
3004
cxl_dax_region_release(struct device * dev)3005 static void cxl_dax_region_release(struct device *dev)
3006 {
3007 struct cxl_dax_region *cxlr_dax = to_cxl_dax_region(dev);
3008
3009 kfree(cxlr_dax);
3010 }
3011
3012 static const struct attribute_group *cxl_dax_region_attribute_groups[] = {
3013 &cxl_base_attribute_group,
3014 NULL,
3015 };
3016
3017 const struct device_type cxl_dax_region_type = {
3018 .name = "cxl_dax_region",
3019 .release = cxl_dax_region_release,
3020 .groups = cxl_dax_region_attribute_groups,
3021 };
3022
is_cxl_dax_region(struct device * dev)3023 static bool is_cxl_dax_region(struct device *dev)
3024 {
3025 return dev->type == &cxl_dax_region_type;
3026 }
3027
to_cxl_dax_region(struct device * dev)3028 struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
3029 {
3030 if (dev_WARN_ONCE(dev, !is_cxl_dax_region(dev),
3031 "not a cxl_dax_region device\n"))
3032 return NULL;
3033 return container_of(dev, struct cxl_dax_region, dev);
3034 }
3035 EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, CXL);
3036
3037 static struct lock_class_key cxl_dax_region_key;
3038
cxl_dax_region_alloc(struct cxl_region * cxlr)3039 static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
3040 {
3041 struct cxl_region_params *p = &cxlr->params;
3042 struct cxl_dax_region *cxlr_dax;
3043 struct device *dev;
3044
3045 down_read(&cxl_region_rwsem);
3046 if (p->state != CXL_CONFIG_COMMIT) {
3047 cxlr_dax = ERR_PTR(-ENXIO);
3048 goto out;
3049 }
3050
3051 cxlr_dax = kzalloc(sizeof(*cxlr_dax), GFP_KERNEL);
3052 if (!cxlr_dax) {
3053 cxlr_dax = ERR_PTR(-ENOMEM);
3054 goto out;
3055 }
3056
3057 cxlr_dax->hpa_range.start = p->res->start;
3058 cxlr_dax->hpa_range.end = p->res->end;
3059
3060 dev = &cxlr_dax->dev;
3061 cxlr_dax->cxlr = cxlr;
3062 device_initialize(dev);
3063 lockdep_set_class(&dev->mutex, &cxl_dax_region_key);
3064 device_set_pm_not_required(dev);
3065 dev->parent = &cxlr->dev;
3066 dev->bus = &cxl_bus_type;
3067 dev->type = &cxl_dax_region_type;
3068 out:
3069 up_read(&cxl_region_rwsem);
3070
3071 return cxlr_dax;
3072 }
3073
cxlr_pmem_unregister(void * _cxlr_pmem)3074 static void cxlr_pmem_unregister(void *_cxlr_pmem)
3075 {
3076 struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem;
3077 struct cxl_region *cxlr = cxlr_pmem->cxlr;
3078 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
3079
3080 /*
3081 * Either the bridge is in ->remove() context under the device_lock(),
3082 * or cxlr_release_nvdimm() is cancelling the bridge's release action
3083 * for @cxlr_pmem and doing it itself (while manually holding the bridge
3084 * lock).
3085 */
3086 device_lock_assert(&cxl_nvb->dev);
3087 cxlr->cxlr_pmem = NULL;
3088 cxlr_pmem->cxlr = NULL;
3089 device_unregister(&cxlr_pmem->dev);
3090 }
3091
cxlr_release_nvdimm(void * _cxlr)3092 static void cxlr_release_nvdimm(void *_cxlr)
3093 {
3094 struct cxl_region *cxlr = _cxlr;
3095 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
3096
3097 device_lock(&cxl_nvb->dev);
3098 if (cxlr->cxlr_pmem)
3099 devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
3100 cxlr->cxlr_pmem);
3101 device_unlock(&cxl_nvb->dev);
3102 cxlr->cxl_nvb = NULL;
3103 put_device(&cxl_nvb->dev);
3104 }
3105
3106 /**
3107 * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
3108 * @cxlr: parent CXL region for this pmem region bridge device
3109 *
3110 * Return: 0 on success negative error code on failure.
3111 */
devm_cxl_add_pmem_region(struct cxl_region * cxlr)3112 static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
3113 {
3114 struct cxl_pmem_region *cxlr_pmem;
3115 struct cxl_nvdimm_bridge *cxl_nvb;
3116 struct device *dev;
3117 int rc;
3118
3119 rc = cxl_pmem_region_alloc(cxlr);
3120 if (rc)
3121 return rc;
3122 cxlr_pmem = cxlr->cxlr_pmem;
3123 cxl_nvb = cxlr->cxl_nvb;
3124
3125 dev = &cxlr_pmem->dev;
3126 rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
3127 if (rc)
3128 goto err;
3129
3130 rc = device_add(dev);
3131 if (rc)
3132 goto err;
3133
3134 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
3135 dev_name(dev));
3136
3137 device_lock(&cxl_nvb->dev);
3138 if (cxl_nvb->dev.driver)
3139 rc = devm_add_action_or_reset(&cxl_nvb->dev,
3140 cxlr_pmem_unregister, cxlr_pmem);
3141 else
3142 rc = -ENXIO;
3143 device_unlock(&cxl_nvb->dev);
3144
3145 if (rc)
3146 goto err_bridge;
3147
3148 /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */
3149 return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr);
3150
3151 err:
3152 put_device(dev);
3153 err_bridge:
3154 put_device(&cxl_nvb->dev);
3155 cxlr->cxl_nvb = NULL;
3156 return rc;
3157 }
3158
cxlr_dax_unregister(void * _cxlr_dax)3159 static void cxlr_dax_unregister(void *_cxlr_dax)
3160 {
3161 struct cxl_dax_region *cxlr_dax = _cxlr_dax;
3162
3163 device_unregister(&cxlr_dax->dev);
3164 }
3165
devm_cxl_add_dax_region(struct cxl_region * cxlr)3166 static int devm_cxl_add_dax_region(struct cxl_region *cxlr)
3167 {
3168 struct cxl_dax_region *cxlr_dax;
3169 struct device *dev;
3170 int rc;
3171
3172 cxlr_dax = cxl_dax_region_alloc(cxlr);
3173 if (IS_ERR(cxlr_dax))
3174 return PTR_ERR(cxlr_dax);
3175
3176 dev = &cxlr_dax->dev;
3177 rc = dev_set_name(dev, "dax_region%d", cxlr->id);
3178 if (rc)
3179 goto err;
3180
3181 rc = device_add(dev);
3182 if (rc)
3183 goto err;
3184
3185 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
3186 dev_name(dev));
3187
3188 return devm_add_action_or_reset(&cxlr->dev, cxlr_dax_unregister,
3189 cxlr_dax);
3190 err:
3191 put_device(dev);
3192 return rc;
3193 }
3194
match_root_decoder_by_range(struct device * dev,void * data)3195 static int match_root_decoder_by_range(struct device *dev, void *data)
3196 {
3197 struct range *r1, *r2 = data;
3198 struct cxl_root_decoder *cxlrd;
3199
3200 if (!is_root_decoder(dev))
3201 return 0;
3202
3203 cxlrd = to_cxl_root_decoder(dev);
3204 r1 = &cxlrd->cxlsd.cxld.hpa_range;
3205 return range_contains(r1, r2);
3206 }
3207
match_region_by_range(struct device * dev,void * data)3208 static int match_region_by_range(struct device *dev, void *data)
3209 {
3210 struct cxl_region_params *p;
3211 struct cxl_region *cxlr;
3212 struct range *r = data;
3213 int rc = 0;
3214
3215 if (!is_cxl_region(dev))
3216 return 0;
3217
3218 cxlr = to_cxl_region(dev);
3219 p = &cxlr->params;
3220
3221 down_read(&cxl_region_rwsem);
3222 if (p->res && p->res->start == r->start && p->res->end == r->end)
3223 rc = 1;
3224 up_read(&cxl_region_rwsem);
3225
3226 return rc;
3227 }
3228
3229 /* Establish an empty region covering the given HPA range */
construct_region(struct cxl_root_decoder * cxlrd,struct cxl_endpoint_decoder * cxled)3230 static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
3231 struct cxl_endpoint_decoder *cxled)
3232 {
3233 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
3234 struct cxl_port *port = cxlrd_to_port(cxlrd);
3235 struct range *hpa = &cxled->cxld.hpa_range;
3236 struct cxl_region_params *p;
3237 struct cxl_region *cxlr;
3238 struct resource *res;
3239 int rc;
3240
3241 do {
3242 cxlr = __create_region(cxlrd, cxled->mode,
3243 atomic_read(&cxlrd->region_id));
3244 } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
3245
3246 if (IS_ERR(cxlr)) {
3247 dev_err(cxlmd->dev.parent,
3248 "%s:%s: %s failed assign region: %ld\n",
3249 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
3250 __func__, PTR_ERR(cxlr));
3251 return cxlr;
3252 }
3253
3254 down_write(&cxl_region_rwsem);
3255 p = &cxlr->params;
3256 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
3257 dev_err(cxlmd->dev.parent,
3258 "%s:%s: %s autodiscovery interrupted\n",
3259 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
3260 __func__);
3261 rc = -EBUSY;
3262 goto err;
3263 }
3264
3265 set_bit(CXL_REGION_F_AUTO, &cxlr->flags);
3266
3267 res = kmalloc(sizeof(*res), GFP_KERNEL);
3268 if (!res) {
3269 rc = -ENOMEM;
3270 goto err;
3271 }
3272
3273 *res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa),
3274 dev_name(&cxlr->dev));
3275 rc = insert_resource(cxlrd->res, res);
3276 if (rc) {
3277 /*
3278 * Platform-firmware may not have split resources like "System
3279 * RAM" on CXL window boundaries see cxl_region_iomem_release()
3280 */
3281 dev_warn(cxlmd->dev.parent,
3282 "%s:%s: %s %s cannot insert resource\n",
3283 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
3284 __func__, dev_name(&cxlr->dev));
3285 }
3286
3287 p->res = res;
3288 p->interleave_ways = cxled->cxld.interleave_ways;
3289 p->interleave_granularity = cxled->cxld.interleave_granularity;
3290 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
3291
3292 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
3293 if (rc)
3294 goto err;
3295
3296 dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n",
3297 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__,
3298 dev_name(&cxlr->dev), p->res, p->interleave_ways,
3299 p->interleave_granularity);
3300
3301 /* ...to match put_device() in cxl_add_to_region() */
3302 get_device(&cxlr->dev);
3303 up_write(&cxl_region_rwsem);
3304
3305 return cxlr;
3306
3307 err:
3308 up_write(&cxl_region_rwsem);
3309 devm_release_action(port->uport_dev, unregister_region, cxlr);
3310 return ERR_PTR(rc);
3311 }
3312
cxl_add_to_region(struct cxl_port * root,struct cxl_endpoint_decoder * cxled)3313 int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
3314 {
3315 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
3316 struct range *hpa = &cxled->cxld.hpa_range;
3317 struct cxl_decoder *cxld = &cxled->cxld;
3318 struct device *cxlrd_dev, *region_dev;
3319 struct cxl_root_decoder *cxlrd;
3320 struct cxl_region_params *p;
3321 struct cxl_region *cxlr;
3322 bool attach = false;
3323 int rc;
3324
3325 cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
3326 match_root_decoder_by_range);
3327 if (!cxlrd_dev) {
3328 dev_err(cxlmd->dev.parent,
3329 "%s:%s no CXL window for range %#llx:%#llx\n",
3330 dev_name(&cxlmd->dev), dev_name(&cxld->dev),
3331 cxld->hpa_range.start, cxld->hpa_range.end);
3332 return -ENXIO;
3333 }
3334
3335 cxlrd = to_cxl_root_decoder(cxlrd_dev);
3336
3337 /*
3338 * Ensure that if multiple threads race to construct_region() for @hpa
3339 * one does the construction and the others add to that.
3340 */
3341 mutex_lock(&cxlrd->range_lock);
3342 region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
3343 match_region_by_range);
3344 if (!region_dev) {
3345 cxlr = construct_region(cxlrd, cxled);
3346 region_dev = &cxlr->dev;
3347 } else
3348 cxlr = to_cxl_region(region_dev);
3349 mutex_unlock(&cxlrd->range_lock);
3350
3351 rc = PTR_ERR_OR_ZERO(cxlr);
3352 if (rc)
3353 goto out;
3354
3355 attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
3356
3357 down_read(&cxl_region_rwsem);
3358 p = &cxlr->params;
3359 attach = p->state == CXL_CONFIG_COMMIT;
3360 up_read(&cxl_region_rwsem);
3361
3362 if (attach) {
3363 /*
3364 * If device_attach() fails the range may still be active via
3365 * the platform-firmware memory map, otherwise the driver for
3366 * regions is local to this file, so driver matching can't fail.
3367 */
3368 if (device_attach(&cxlr->dev) < 0)
3369 dev_err(&cxlr->dev, "failed to enable, range: %pr\n",
3370 p->res);
3371 }
3372
3373 put_device(region_dev);
3374 out:
3375 put_device(cxlrd_dev);
3376 return rc;
3377 }
3378 EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL);
3379
is_system_ram(struct resource * res,void * arg)3380 static int is_system_ram(struct resource *res, void *arg)
3381 {
3382 struct cxl_region *cxlr = arg;
3383 struct cxl_region_params *p = &cxlr->params;
3384
3385 dev_dbg(&cxlr->dev, "%pr has System RAM: %pr\n", p->res, res);
3386 return 1;
3387 }
3388
cxl_region_probe(struct device * dev)3389 static int cxl_region_probe(struct device *dev)
3390 {
3391 struct cxl_region *cxlr = to_cxl_region(dev);
3392 struct cxl_region_params *p = &cxlr->params;
3393 int rc;
3394
3395 rc = down_read_interruptible(&cxl_region_rwsem);
3396 if (rc) {
3397 dev_dbg(&cxlr->dev, "probe interrupted\n");
3398 return rc;
3399 }
3400
3401 if (p->state < CXL_CONFIG_COMMIT) {
3402 dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
3403 rc = -ENXIO;
3404 goto out;
3405 }
3406
3407 if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {
3408 dev_err(&cxlr->dev,
3409 "failed to activate, re-commit region and retry\n");
3410 rc = -ENXIO;
3411 goto out;
3412 }
3413
3414 /*
3415 * From this point on any path that changes the region's state away from
3416 * CXL_CONFIG_COMMIT is also responsible for releasing the driver.
3417 */
3418 out:
3419 up_read(&cxl_region_rwsem);
3420
3421 if (rc)
3422 return rc;
3423
3424 switch (cxlr->mode) {
3425 case CXL_DECODER_PMEM:
3426 return devm_cxl_add_pmem_region(cxlr);
3427 case CXL_DECODER_RAM:
3428 /*
3429 * The region can not be manged by CXL if any portion of
3430 * it is already online as 'System RAM'
3431 */
3432 if (walk_iomem_res_desc(IORES_DESC_NONE,
3433 IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
3434 p->res->start, p->res->end, cxlr,
3435 is_system_ram) > 0)
3436 return 0;
3437 return devm_cxl_add_dax_region(cxlr);
3438 default:
3439 dev_dbg(&cxlr->dev, "unsupported region mode: %d\n",
3440 cxlr->mode);
3441 return -ENXIO;
3442 }
3443 }
3444
3445 static struct cxl_driver cxl_region_driver = {
3446 .name = "cxl_region",
3447 .probe = cxl_region_probe,
3448 .id = CXL_DEVICE_REGION,
3449 };
3450
cxl_region_init(void)3451 int cxl_region_init(void)
3452 {
3453 return cxl_driver_register(&cxl_region_driver);
3454 }
3455
cxl_region_exit(void)3456 void cxl_region_exit(void)
3457 {
3458 cxl_driver_unregister(&cxl_region_driver);
3459 }
3460
3461 MODULE_IMPORT_NS(CXL);
3462 MODULE_IMPORT_NS(DEVMEM);
3463 MODULE_ALIAS_CXL(CXL_DEVICE_REGION);
3464