1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/memregion.h>
4 #include <linux/genalloc.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/memory.h>
8 #include <linux/slab.h>
9 #include <linux/uuid.h>
10 #include <linux/sort.h>
11 #include <linux/idr.h>
12 #include <linux/memory-tiers.h>
13 #include <cxlmem.h>
14 #include <cxl.h>
15 #include "core.h"
16
17 /**
18 * DOC: cxl core region
19 *
20 * CXL Regions represent mapped memory capacity in system physical address
21 * space. Whereas the CXL Root Decoders identify the bounds of potential CXL
22 * Memory ranges, Regions represent the active mapped capacity by the HDM
23 * Decoder Capability structures throughout the Host Bridges, Switches, and
24 * Endpoints in the topology.
25 *
26 * Region configuration has ordering constraints. UUID may be set at any time
27 * but is only visible for persistent regions.
28 * 1. Interleave granularity
29 * 2. Interleave size
30 * 3. Decoder targets
31 */
32
33 static struct cxl_region *to_cxl_region(struct device *dev);
34
35 #define __ACCESS_ATTR_RO(_level, _name) { \
36 .attr = { .name = __stringify(_name), .mode = 0444 }, \
37 .show = _name##_access##_level##_show, \
38 }
39
40 #define ACCESS_DEVICE_ATTR_RO(level, name) \
41 struct device_attribute dev_attr_access##level##_##name = __ACCESS_ATTR_RO(level, name)
42
43 #define ACCESS_ATTR_RO(level, attrib) \
44 static ssize_t attrib##_access##level##_show(struct device *dev, \
45 struct device_attribute *attr, \
46 char *buf) \
47 { \
48 struct cxl_region *cxlr = to_cxl_region(dev); \
49 \
50 if (cxlr->coord[level].attrib == 0) \
51 return -ENOENT; \
52 \
53 return sysfs_emit(buf, "%u\n", cxlr->coord[level].attrib); \
54 } \
55 static ACCESS_DEVICE_ATTR_RO(level, attrib)
56
57 ACCESS_ATTR_RO(0, read_bandwidth);
58 ACCESS_ATTR_RO(0, read_latency);
59 ACCESS_ATTR_RO(0, write_bandwidth);
60 ACCESS_ATTR_RO(0, write_latency);
61
62 #define ACCESS_ATTR_DECLARE(level, attrib) \
63 (&dev_attr_access##level##_##attrib.attr)
64
65 static struct attribute *access0_coordinate_attrs[] = {
66 ACCESS_ATTR_DECLARE(0, read_bandwidth),
67 ACCESS_ATTR_DECLARE(0, write_bandwidth),
68 ACCESS_ATTR_DECLARE(0, read_latency),
69 ACCESS_ATTR_DECLARE(0, write_latency),
70 NULL
71 };
72
73 ACCESS_ATTR_RO(1, read_bandwidth);
74 ACCESS_ATTR_RO(1, read_latency);
75 ACCESS_ATTR_RO(1, write_bandwidth);
76 ACCESS_ATTR_RO(1, write_latency);
77
78 static struct attribute *access1_coordinate_attrs[] = {
79 ACCESS_ATTR_DECLARE(1, read_bandwidth),
80 ACCESS_ATTR_DECLARE(1, write_bandwidth),
81 ACCESS_ATTR_DECLARE(1, read_latency),
82 ACCESS_ATTR_DECLARE(1, write_latency),
83 NULL
84 };
85
86 #define ACCESS_VISIBLE(level) \
87 static umode_t cxl_region_access##level##_coordinate_visible( \
88 struct kobject *kobj, struct attribute *a, int n) \
89 { \
90 struct device *dev = kobj_to_dev(kobj); \
91 struct cxl_region *cxlr = to_cxl_region(dev); \
92 \
93 if (a == &dev_attr_access##level##_read_latency.attr && \
94 cxlr->coord[level].read_latency == 0) \
95 return 0; \
96 \
97 if (a == &dev_attr_access##level##_write_latency.attr && \
98 cxlr->coord[level].write_latency == 0) \
99 return 0; \
100 \
101 if (a == &dev_attr_access##level##_read_bandwidth.attr && \
102 cxlr->coord[level].read_bandwidth == 0) \
103 return 0; \
104 \
105 if (a == &dev_attr_access##level##_write_bandwidth.attr && \
106 cxlr->coord[level].write_bandwidth == 0) \
107 return 0; \
108 \
109 return a->mode; \
110 }
111
112 ACCESS_VISIBLE(0);
113 ACCESS_VISIBLE(1);
114
115 static const struct attribute_group cxl_region_access0_coordinate_group = {
116 .name = "access0",
117 .attrs = access0_coordinate_attrs,
118 .is_visible = cxl_region_access0_coordinate_visible,
119 };
120
get_cxl_region_access0_group(void)121 static const struct attribute_group *get_cxl_region_access0_group(void)
122 {
123 return &cxl_region_access0_coordinate_group;
124 }
125
126 static const struct attribute_group cxl_region_access1_coordinate_group = {
127 .name = "access1",
128 .attrs = access1_coordinate_attrs,
129 .is_visible = cxl_region_access1_coordinate_visible,
130 };
131
get_cxl_region_access1_group(void)132 static const struct attribute_group *get_cxl_region_access1_group(void)
133 {
134 return &cxl_region_access1_coordinate_group;
135 }
136
uuid_show(struct device * dev,struct device_attribute * attr,char * buf)137 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
138 char *buf)
139 {
140 struct cxl_region *cxlr = to_cxl_region(dev);
141 struct cxl_region_params *p = &cxlr->params;
142 ssize_t rc;
143
144 rc = down_read_interruptible(&cxl_region_rwsem);
145 if (rc)
146 return rc;
147 if (cxlr->mode != CXL_DECODER_PMEM)
148 rc = sysfs_emit(buf, "\n");
149 else
150 rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
151 up_read(&cxl_region_rwsem);
152
153 return rc;
154 }
155
is_dup(struct device * match,void * data)156 static int is_dup(struct device *match, void *data)
157 {
158 struct cxl_region_params *p;
159 struct cxl_region *cxlr;
160 uuid_t *uuid = data;
161
162 if (!is_cxl_region(match))
163 return 0;
164
165 lockdep_assert_held(&cxl_region_rwsem);
166 cxlr = to_cxl_region(match);
167 p = &cxlr->params;
168
169 if (uuid_equal(&p->uuid, uuid)) {
170 dev_dbg(match, "already has uuid: %pUb\n", uuid);
171 return -EBUSY;
172 }
173
174 return 0;
175 }
176
uuid_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)177 static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
178 const char *buf, size_t len)
179 {
180 struct cxl_region *cxlr = to_cxl_region(dev);
181 struct cxl_region_params *p = &cxlr->params;
182 uuid_t temp;
183 ssize_t rc;
184
185 if (len != UUID_STRING_LEN + 1)
186 return -EINVAL;
187
188 rc = uuid_parse(buf, &temp);
189 if (rc)
190 return rc;
191
192 if (uuid_is_null(&temp))
193 return -EINVAL;
194
195 rc = down_write_killable(&cxl_region_rwsem);
196 if (rc)
197 return rc;
198
199 if (uuid_equal(&p->uuid, &temp))
200 goto out;
201
202 rc = -EBUSY;
203 if (p->state >= CXL_CONFIG_ACTIVE)
204 goto out;
205
206 rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
207 if (rc < 0)
208 goto out;
209
210 uuid_copy(&p->uuid, &temp);
211 out:
212 up_write(&cxl_region_rwsem);
213
214 if (rc)
215 return rc;
216 return len;
217 }
218 static DEVICE_ATTR_RW(uuid);
219
cxl_rr_load(struct cxl_port * port,struct cxl_region * cxlr)220 static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
221 struct cxl_region *cxlr)
222 {
223 return xa_load(&port->regions, (unsigned long)cxlr);
224 }
225
cxl_region_invalidate_memregion(struct cxl_region * cxlr)226 static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
227 {
228 if (!cpu_cache_has_invalidate_memregion()) {
229 if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
230 dev_info_once(
231 &cxlr->dev,
232 "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
233 return 0;
234 } else {
235 dev_WARN(&cxlr->dev,
236 "Failed to synchronize CPU cache state\n");
237 return -ENXIO;
238 }
239 }
240
241 cpu_cache_invalidate_memregion(IORES_DESC_CXL);
242 return 0;
243 }
244
cxl_region_decode_reset(struct cxl_region * cxlr,int count)245 static void cxl_region_decode_reset(struct cxl_region *cxlr, int count)
246 {
247 struct cxl_region_params *p = &cxlr->params;
248 int i;
249
250 /*
251 * Before region teardown attempt to flush, evict any data cached for
252 * this region, or scream loudly about missing arch / platform support
253 * for CXL teardown.
254 */
255 cxl_region_invalidate_memregion(cxlr);
256
257 for (i = count - 1; i >= 0; i--) {
258 struct cxl_endpoint_decoder *cxled = p->targets[i];
259 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
260 struct cxl_port *iter = cxled_to_port(cxled);
261 struct cxl_dev_state *cxlds = cxlmd->cxlds;
262 struct cxl_ep *ep;
263
264 if (cxlds->rcd)
265 goto endpoint_reset;
266
267 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
268 iter = to_cxl_port(iter->dev.parent);
269
270 for (ep = cxl_ep_load(iter, cxlmd); iter;
271 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
272 struct cxl_region_ref *cxl_rr;
273 struct cxl_decoder *cxld;
274
275 cxl_rr = cxl_rr_load(iter, cxlr);
276 cxld = cxl_rr->decoder;
277 if (cxld->reset)
278 cxld->reset(cxld);
279 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
280 }
281
282 endpoint_reset:
283 cxled->cxld.reset(&cxled->cxld);
284 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
285 }
286
287 /* all decoders associated with this region have been torn down */
288 clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
289 }
290
commit_decoder(struct cxl_decoder * cxld)291 static int commit_decoder(struct cxl_decoder *cxld)
292 {
293 struct cxl_switch_decoder *cxlsd = NULL;
294
295 if (cxld->commit)
296 return cxld->commit(cxld);
297
298 if (is_switch_decoder(&cxld->dev))
299 cxlsd = to_cxl_switch_decoder(&cxld->dev);
300
301 if (dev_WARN_ONCE(&cxld->dev, !cxlsd || cxlsd->nr_targets > 1,
302 "->commit() is required\n"))
303 return -ENXIO;
304 return 0;
305 }
306
cxl_region_decode_commit(struct cxl_region * cxlr)307 static int cxl_region_decode_commit(struct cxl_region *cxlr)
308 {
309 struct cxl_region_params *p = &cxlr->params;
310 int i, rc = 0;
311
312 for (i = 0; i < p->nr_targets; i++) {
313 struct cxl_endpoint_decoder *cxled = p->targets[i];
314 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
315 struct cxl_region_ref *cxl_rr;
316 struct cxl_decoder *cxld;
317 struct cxl_port *iter;
318 struct cxl_ep *ep;
319
320 /* commit bottom up */
321 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
322 iter = to_cxl_port(iter->dev.parent)) {
323 cxl_rr = cxl_rr_load(iter, cxlr);
324 cxld = cxl_rr->decoder;
325 rc = commit_decoder(cxld);
326 if (rc)
327 break;
328 }
329
330 if (rc) {
331 /* programming @iter failed, teardown */
332 for (ep = cxl_ep_load(iter, cxlmd); ep && iter;
333 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
334 cxl_rr = cxl_rr_load(iter, cxlr);
335 cxld = cxl_rr->decoder;
336 if (cxld->reset)
337 cxld->reset(cxld);
338 }
339
340 cxled->cxld.reset(&cxled->cxld);
341 goto err;
342 }
343 }
344
345 return 0;
346
347 err:
348 /* undo the targets that were successfully committed */
349 cxl_region_decode_reset(cxlr, i);
350 return rc;
351 }
352
commit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)353 static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
354 const char *buf, size_t len)
355 {
356 struct cxl_region *cxlr = to_cxl_region(dev);
357 struct cxl_region_params *p = &cxlr->params;
358 bool commit;
359 ssize_t rc;
360
361 rc = kstrtobool(buf, &commit);
362 if (rc)
363 return rc;
364
365 rc = down_write_killable(&cxl_region_rwsem);
366 if (rc)
367 return rc;
368
369 /* Already in the requested state? */
370 if (commit && p->state >= CXL_CONFIG_COMMIT)
371 goto out;
372 if (!commit && p->state < CXL_CONFIG_COMMIT)
373 goto out;
374
375 /* Not ready to commit? */
376 if (commit && p->state < CXL_CONFIG_ACTIVE) {
377 rc = -ENXIO;
378 goto out;
379 }
380
381 /*
382 * Invalidate caches before region setup to drop any speculative
383 * consumption of this address space
384 */
385 rc = cxl_region_invalidate_memregion(cxlr);
386 if (rc)
387 goto out;
388
389 if (commit) {
390 rc = cxl_region_decode_commit(cxlr);
391 if (rc == 0)
392 p->state = CXL_CONFIG_COMMIT;
393 } else {
394 p->state = CXL_CONFIG_RESET_PENDING;
395 up_write(&cxl_region_rwsem);
396 device_release_driver(&cxlr->dev);
397 down_write(&cxl_region_rwsem);
398
399 /*
400 * The lock was dropped, so need to revalidate that the reset is
401 * still pending.
402 */
403 if (p->state == CXL_CONFIG_RESET_PENDING) {
404 cxl_region_decode_reset(cxlr, p->interleave_ways);
405 p->state = CXL_CONFIG_ACTIVE;
406 }
407 }
408
409 out:
410 up_write(&cxl_region_rwsem);
411
412 if (rc)
413 return rc;
414 return len;
415 }
416
commit_show(struct device * dev,struct device_attribute * attr,char * buf)417 static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
418 char *buf)
419 {
420 struct cxl_region *cxlr = to_cxl_region(dev);
421 struct cxl_region_params *p = &cxlr->params;
422 ssize_t rc;
423
424 rc = down_read_interruptible(&cxl_region_rwsem);
425 if (rc)
426 return rc;
427 rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
428 up_read(&cxl_region_rwsem);
429
430 return rc;
431 }
432 static DEVICE_ATTR_RW(commit);
433
cxl_region_visible(struct kobject * kobj,struct attribute * a,int n)434 static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
435 int n)
436 {
437 struct device *dev = kobj_to_dev(kobj);
438 struct cxl_region *cxlr = to_cxl_region(dev);
439
440 /*
441 * Support tooling that expects to find a 'uuid' attribute for all
442 * regions regardless of mode.
443 */
444 if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
445 return 0444;
446 return a->mode;
447 }
448
interleave_ways_show(struct device * dev,struct device_attribute * attr,char * buf)449 static ssize_t interleave_ways_show(struct device *dev,
450 struct device_attribute *attr, char *buf)
451 {
452 struct cxl_region *cxlr = to_cxl_region(dev);
453 struct cxl_region_params *p = &cxlr->params;
454 ssize_t rc;
455
456 rc = down_read_interruptible(&cxl_region_rwsem);
457 if (rc)
458 return rc;
459 rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
460 up_read(&cxl_region_rwsem);
461
462 return rc;
463 }
464
465 static const struct attribute_group *get_cxl_region_target_group(void);
466
interleave_ways_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)467 static ssize_t interleave_ways_store(struct device *dev,
468 struct device_attribute *attr,
469 const char *buf, size_t len)
470 {
471 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
472 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
473 struct cxl_region *cxlr = to_cxl_region(dev);
474 struct cxl_region_params *p = &cxlr->params;
475 unsigned int val, save;
476 int rc;
477 u8 iw;
478
479 rc = kstrtouint(buf, 0, &val);
480 if (rc)
481 return rc;
482
483 rc = ways_to_eiw(val, &iw);
484 if (rc)
485 return rc;
486
487 /*
488 * Even for x3, x6, and x12 interleaves the region interleave must be a
489 * power of 2 multiple of the host bridge interleave.
490 */
491 if (!is_power_of_2(val / cxld->interleave_ways) ||
492 (val % cxld->interleave_ways)) {
493 dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val);
494 return -EINVAL;
495 }
496
497 rc = down_write_killable(&cxl_region_rwsem);
498 if (rc)
499 return rc;
500 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
501 rc = -EBUSY;
502 goto out;
503 }
504
505 save = p->interleave_ways;
506 p->interleave_ways = val;
507 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
508 if (rc)
509 p->interleave_ways = save;
510 out:
511 up_write(&cxl_region_rwsem);
512 if (rc)
513 return rc;
514 return len;
515 }
516 static DEVICE_ATTR_RW(interleave_ways);
517
interleave_granularity_show(struct device * dev,struct device_attribute * attr,char * buf)518 static ssize_t interleave_granularity_show(struct device *dev,
519 struct device_attribute *attr,
520 char *buf)
521 {
522 struct cxl_region *cxlr = to_cxl_region(dev);
523 struct cxl_region_params *p = &cxlr->params;
524 ssize_t rc;
525
526 rc = down_read_interruptible(&cxl_region_rwsem);
527 if (rc)
528 return rc;
529 rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
530 up_read(&cxl_region_rwsem);
531
532 return rc;
533 }
534
interleave_granularity_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)535 static ssize_t interleave_granularity_store(struct device *dev,
536 struct device_attribute *attr,
537 const char *buf, size_t len)
538 {
539 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
540 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
541 struct cxl_region *cxlr = to_cxl_region(dev);
542 struct cxl_region_params *p = &cxlr->params;
543 int rc, val;
544 u16 ig;
545
546 rc = kstrtoint(buf, 0, &val);
547 if (rc)
548 return rc;
549
550 rc = granularity_to_eig(val, &ig);
551 if (rc)
552 return rc;
553
554 /*
555 * When the host-bridge is interleaved, disallow region granularity !=
556 * root granularity. Regions with a granularity less than the root
557 * interleave result in needing multiple endpoints to support a single
558 * slot in the interleave (possible to support in the future). Regions
559 * with a granularity greater than the root interleave result in invalid
560 * DPA translations (invalid to support).
561 */
562 if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
563 return -EINVAL;
564
565 rc = down_write_killable(&cxl_region_rwsem);
566 if (rc)
567 return rc;
568 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
569 rc = -EBUSY;
570 goto out;
571 }
572
573 p->interleave_granularity = val;
574 out:
575 up_write(&cxl_region_rwsem);
576 if (rc)
577 return rc;
578 return len;
579 }
580 static DEVICE_ATTR_RW(interleave_granularity);
581
resource_show(struct device * dev,struct device_attribute * attr,char * buf)582 static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
583 char *buf)
584 {
585 struct cxl_region *cxlr = to_cxl_region(dev);
586 struct cxl_region_params *p = &cxlr->params;
587 u64 resource = -1ULL;
588 ssize_t rc;
589
590 rc = down_read_interruptible(&cxl_region_rwsem);
591 if (rc)
592 return rc;
593 if (p->res)
594 resource = p->res->start;
595 rc = sysfs_emit(buf, "%#llx\n", resource);
596 up_read(&cxl_region_rwsem);
597
598 return rc;
599 }
600 static DEVICE_ATTR_RO(resource);
601
mode_show(struct device * dev,struct device_attribute * attr,char * buf)602 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
603 char *buf)
604 {
605 struct cxl_region *cxlr = to_cxl_region(dev);
606
607 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode));
608 }
609 static DEVICE_ATTR_RO(mode);
610
alloc_hpa(struct cxl_region * cxlr,resource_size_t size)611 static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
612 {
613 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
614 struct cxl_region_params *p = &cxlr->params;
615 struct resource *res;
616 u64 remainder = 0;
617
618 lockdep_assert_held_write(&cxl_region_rwsem);
619
620 /* Nothing to do... */
621 if (p->res && resource_size(p->res) == size)
622 return 0;
623
624 /* To change size the old size must be freed first */
625 if (p->res)
626 return -EBUSY;
627
628 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
629 return -EBUSY;
630
631 /* ways, granularity and uuid (if PMEM) need to be set before HPA */
632 if (!p->interleave_ways || !p->interleave_granularity ||
633 (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
634 return -ENXIO;
635
636 div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
637 if (remainder)
638 return -EINVAL;
639
640 res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
641 dev_name(&cxlr->dev));
642 if (IS_ERR(res)) {
643 dev_dbg(&cxlr->dev,
644 "HPA allocation error (%ld) for size:%pap in %s %pr\n",
645 PTR_ERR(res), &size, cxlrd->res->name, cxlrd->res);
646 return PTR_ERR(res);
647 }
648
649 p->res = res;
650 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
651
652 return 0;
653 }
654
cxl_region_iomem_release(struct cxl_region * cxlr)655 static void cxl_region_iomem_release(struct cxl_region *cxlr)
656 {
657 struct cxl_region_params *p = &cxlr->params;
658
659 if (device_is_registered(&cxlr->dev))
660 lockdep_assert_held_write(&cxl_region_rwsem);
661 if (p->res) {
662 /*
663 * Autodiscovered regions may not have been able to insert their
664 * resource.
665 */
666 if (p->res->parent)
667 remove_resource(p->res);
668 kfree(p->res);
669 p->res = NULL;
670 }
671 }
672
free_hpa(struct cxl_region * cxlr)673 static int free_hpa(struct cxl_region *cxlr)
674 {
675 struct cxl_region_params *p = &cxlr->params;
676
677 lockdep_assert_held_write(&cxl_region_rwsem);
678
679 if (!p->res)
680 return 0;
681
682 if (p->state >= CXL_CONFIG_ACTIVE)
683 return -EBUSY;
684
685 cxl_region_iomem_release(cxlr);
686 p->state = CXL_CONFIG_IDLE;
687 return 0;
688 }
689
size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)690 static ssize_t size_store(struct device *dev, struct device_attribute *attr,
691 const char *buf, size_t len)
692 {
693 struct cxl_region *cxlr = to_cxl_region(dev);
694 u64 val;
695 int rc;
696
697 rc = kstrtou64(buf, 0, &val);
698 if (rc)
699 return rc;
700
701 rc = down_write_killable(&cxl_region_rwsem);
702 if (rc)
703 return rc;
704
705 if (val)
706 rc = alloc_hpa(cxlr, val);
707 else
708 rc = free_hpa(cxlr);
709 up_write(&cxl_region_rwsem);
710
711 if (rc)
712 return rc;
713
714 return len;
715 }
716
size_show(struct device * dev,struct device_attribute * attr,char * buf)717 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
718 char *buf)
719 {
720 struct cxl_region *cxlr = to_cxl_region(dev);
721 struct cxl_region_params *p = &cxlr->params;
722 u64 size = 0;
723 ssize_t rc;
724
725 rc = down_read_interruptible(&cxl_region_rwsem);
726 if (rc)
727 return rc;
728 if (p->res)
729 size = resource_size(p->res);
730 rc = sysfs_emit(buf, "%#llx\n", size);
731 up_read(&cxl_region_rwsem);
732
733 return rc;
734 }
735 static DEVICE_ATTR_RW(size);
736
737 static struct attribute *cxl_region_attrs[] = {
738 &dev_attr_uuid.attr,
739 &dev_attr_commit.attr,
740 &dev_attr_interleave_ways.attr,
741 &dev_attr_interleave_granularity.attr,
742 &dev_attr_resource.attr,
743 &dev_attr_size.attr,
744 &dev_attr_mode.attr,
745 NULL,
746 };
747
748 static const struct attribute_group cxl_region_group = {
749 .attrs = cxl_region_attrs,
750 .is_visible = cxl_region_visible,
751 };
752
show_targetN(struct cxl_region * cxlr,char * buf,int pos)753 static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
754 {
755 struct cxl_region_params *p = &cxlr->params;
756 struct cxl_endpoint_decoder *cxled;
757 int rc;
758
759 rc = down_read_interruptible(&cxl_region_rwsem);
760 if (rc)
761 return rc;
762
763 if (pos >= p->interleave_ways) {
764 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
765 p->interleave_ways);
766 rc = -ENXIO;
767 goto out;
768 }
769
770 cxled = p->targets[pos];
771 if (!cxled)
772 rc = sysfs_emit(buf, "\n");
773 else
774 rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
775 out:
776 up_read(&cxl_region_rwsem);
777
778 return rc;
779 }
780
check_commit_order(struct device * dev,const void * data)781 static int check_commit_order(struct device *dev, const void *data)
782 {
783 struct cxl_decoder *cxld = to_cxl_decoder(dev);
784
785 /*
786 * if port->commit_end is not the only free decoder, then out of
787 * order shutdown has occurred, block further allocations until
788 * that is resolved
789 */
790 if (((cxld->flags & CXL_DECODER_F_ENABLE) == 0))
791 return -EBUSY;
792 return 0;
793 }
794
match_free_decoder(struct device * dev,void * data)795 static int match_free_decoder(struct device *dev, void *data)
796 {
797 struct cxl_port *port = to_cxl_port(dev->parent);
798 struct cxl_decoder *cxld;
799 int rc;
800
801 if (!is_switch_decoder(dev))
802 return 0;
803
804 cxld = to_cxl_decoder(dev);
805
806 if (cxld->id != port->commit_end + 1)
807 return 0;
808
809 if (cxld->region) {
810 dev_dbg(dev->parent,
811 "next decoder to commit (%s) is already reserved (%s)\n",
812 dev_name(dev), dev_name(&cxld->region->dev));
813 return 0;
814 }
815
816 rc = device_for_each_child_reverse_from(dev->parent, dev, NULL,
817 check_commit_order);
818 if (rc) {
819 dev_dbg(dev->parent,
820 "unable to allocate %s due to out of order shutdown\n",
821 dev_name(dev));
822 return 0;
823 }
824 return 1;
825 }
826
match_auto_decoder(struct device * dev,void * data)827 static int match_auto_decoder(struct device *dev, void *data)
828 {
829 struct cxl_region_params *p = data;
830 struct cxl_decoder *cxld;
831 struct range *r;
832
833 if (!is_switch_decoder(dev))
834 return 0;
835
836 cxld = to_cxl_decoder(dev);
837 r = &cxld->hpa_range;
838
839 if (p->res && p->res->start == r->start && p->res->end == r->end)
840 return 1;
841
842 return 0;
843 }
844
845 static struct cxl_decoder *
cxl_region_find_decoder(struct cxl_port * port,struct cxl_endpoint_decoder * cxled,struct cxl_region * cxlr)846 cxl_region_find_decoder(struct cxl_port *port,
847 struct cxl_endpoint_decoder *cxled,
848 struct cxl_region *cxlr)
849 {
850 struct device *dev;
851
852 if (port == cxled_to_port(cxled))
853 return &cxled->cxld;
854
855 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
856 dev = device_find_child(&port->dev, &cxlr->params,
857 match_auto_decoder);
858 else
859 dev = device_find_child(&port->dev, NULL, match_free_decoder);
860 if (!dev)
861 return NULL;
862 /*
863 * This decoder is pinned registered as long as the endpoint decoder is
864 * registered, and endpoint decoder unregistration holds the
865 * cxl_region_rwsem over unregister events, so no need to hold on to
866 * this extra reference.
867 */
868 put_device(dev);
869 return to_cxl_decoder(dev);
870 }
871
auto_order_ok(struct cxl_port * port,struct cxl_region * cxlr_iter,struct cxl_decoder * cxld)872 static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
873 struct cxl_decoder *cxld)
874 {
875 struct cxl_region_ref *rr = cxl_rr_load(port, cxlr_iter);
876 struct cxl_decoder *cxld_iter = rr->decoder;
877
878 /*
879 * Allow the out of order assembly of auto-discovered regions.
880 * Per CXL Spec 3.1 8.2.4.20.12 software must commit decoders
881 * in HPA order. Confirm that the decoder with the lesser HPA
882 * starting address has the lesser id.
883 */
884 dev_dbg(&cxld->dev, "check for HPA violation %s:%d < %s:%d\n",
885 dev_name(&cxld->dev), cxld->id,
886 dev_name(&cxld_iter->dev), cxld_iter->id);
887
888 if (cxld_iter->id > cxld->id)
889 return true;
890
891 return false;
892 }
893
894 static struct cxl_region_ref *
alloc_region_ref(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled)895 alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
896 struct cxl_endpoint_decoder *cxled)
897 {
898 struct cxl_region_params *p = &cxlr->params;
899 struct cxl_region_ref *cxl_rr, *iter;
900 unsigned long index;
901 int rc;
902
903 xa_for_each(&port->regions, index, iter) {
904 struct cxl_region_params *ip = &iter->region->params;
905
906 if (!ip->res || ip->res->start < p->res->start)
907 continue;
908
909 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
910 struct cxl_decoder *cxld;
911
912 cxld = cxl_region_find_decoder(port, cxled, cxlr);
913 if (auto_order_ok(port, iter->region, cxld))
914 continue;
915 }
916 dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n",
917 dev_name(&port->dev),
918 dev_name(&iter->region->dev), ip->res, p->res);
919
920 return ERR_PTR(-EBUSY);
921 }
922
923 cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
924 if (!cxl_rr)
925 return ERR_PTR(-ENOMEM);
926 cxl_rr->port = port;
927 cxl_rr->region = cxlr;
928 cxl_rr->nr_targets = 1;
929 xa_init(&cxl_rr->endpoints);
930
931 rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL);
932 if (rc) {
933 dev_dbg(&cxlr->dev,
934 "%s: failed to track region reference: %d\n",
935 dev_name(&port->dev), rc);
936 kfree(cxl_rr);
937 return ERR_PTR(rc);
938 }
939
940 return cxl_rr;
941 }
942
cxl_rr_free_decoder(struct cxl_region_ref * cxl_rr)943 static void cxl_rr_free_decoder(struct cxl_region_ref *cxl_rr)
944 {
945 struct cxl_region *cxlr = cxl_rr->region;
946 struct cxl_decoder *cxld = cxl_rr->decoder;
947
948 if (!cxld)
949 return;
950
951 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n");
952 if (cxld->region == cxlr) {
953 cxld->region = NULL;
954 put_device(&cxlr->dev);
955 }
956 }
957
free_region_ref(struct cxl_region_ref * cxl_rr)958 static void free_region_ref(struct cxl_region_ref *cxl_rr)
959 {
960 struct cxl_port *port = cxl_rr->port;
961 struct cxl_region *cxlr = cxl_rr->region;
962
963 cxl_rr_free_decoder(cxl_rr);
964 xa_erase(&port->regions, (unsigned long)cxlr);
965 xa_destroy(&cxl_rr->endpoints);
966 kfree(cxl_rr);
967 }
968
cxl_rr_ep_add(struct cxl_region_ref * cxl_rr,struct cxl_endpoint_decoder * cxled)969 static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
970 struct cxl_endpoint_decoder *cxled)
971 {
972 int rc;
973 struct cxl_port *port = cxl_rr->port;
974 struct cxl_region *cxlr = cxl_rr->region;
975 struct cxl_decoder *cxld = cxl_rr->decoder;
976 struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled));
977
978 if (ep) {
979 rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep,
980 GFP_KERNEL);
981 if (rc)
982 return rc;
983 }
984 cxl_rr->nr_eps++;
985
986 if (!cxld->region) {
987 cxld->region = cxlr;
988 get_device(&cxlr->dev);
989 }
990
991 return 0;
992 }
993
cxl_rr_alloc_decoder(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,struct cxl_region_ref * cxl_rr)994 static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
995 struct cxl_endpoint_decoder *cxled,
996 struct cxl_region_ref *cxl_rr)
997 {
998 struct cxl_decoder *cxld;
999
1000 cxld = cxl_region_find_decoder(port, cxled, cxlr);
1001 if (!cxld) {
1002 dev_dbg(&cxlr->dev, "%s: no decoder available\n",
1003 dev_name(&port->dev));
1004 return -EBUSY;
1005 }
1006
1007 if (cxld->region) {
1008 dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
1009 dev_name(&port->dev), dev_name(&cxld->dev),
1010 dev_name(&cxld->region->dev));
1011 return -EBUSY;
1012 }
1013
1014 /*
1015 * Endpoints should already match the region type, but backstop that
1016 * assumption with an assertion. Switch-decoders change mapping-type
1017 * based on what is mapped when they are assigned to a region.
1018 */
1019 dev_WARN_ONCE(&cxlr->dev,
1020 port == cxled_to_port(cxled) &&
1021 cxld->target_type != cxlr->type,
1022 "%s:%s mismatch decoder type %d -> %d\n",
1023 dev_name(&cxled_to_memdev(cxled)->dev),
1024 dev_name(&cxld->dev), cxld->target_type, cxlr->type);
1025 cxld->target_type = cxlr->type;
1026 cxl_rr->decoder = cxld;
1027 return 0;
1028 }
1029
1030 /**
1031 * cxl_port_attach_region() - track a region's interest in a port by endpoint
1032 * @port: port to add a new region reference 'struct cxl_region_ref'
1033 * @cxlr: region to attach to @port
1034 * @cxled: endpoint decoder used to create or further pin a region reference
1035 * @pos: interleave position of @cxled in @cxlr
1036 *
1037 * The attach event is an opportunity to validate CXL decode setup
1038 * constraints and record metadata needed for programming HDM decoders,
1039 * in particular decoder target lists.
1040 *
1041 * The steps are:
1042 *
1043 * - validate that there are no other regions with a higher HPA already
1044 * associated with @port
1045 * - establish a region reference if one is not already present
1046 *
1047 * - additionally allocate a decoder instance that will host @cxlr on
1048 * @port
1049 *
1050 * - pin the region reference by the endpoint
1051 * - account for how many entries in @port's target list are needed to
1052 * cover all of the added endpoints.
1053 */
cxl_port_attach_region(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)1054 static int cxl_port_attach_region(struct cxl_port *port,
1055 struct cxl_region *cxlr,
1056 struct cxl_endpoint_decoder *cxled, int pos)
1057 {
1058 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1059 struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
1060 struct cxl_region_ref *cxl_rr;
1061 bool nr_targets_inc = false;
1062 struct cxl_decoder *cxld;
1063 unsigned long index;
1064 int rc = -EBUSY;
1065
1066 lockdep_assert_held_write(&cxl_region_rwsem);
1067
1068 cxl_rr = cxl_rr_load(port, cxlr);
1069 if (cxl_rr) {
1070 struct cxl_ep *ep_iter;
1071 int found = 0;
1072
1073 /*
1074 * Walk the existing endpoints that have been attached to
1075 * @cxlr at @port and see if they share the same 'next' port
1076 * in the downstream direction. I.e. endpoints that share common
1077 * upstream switch.
1078 */
1079 xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
1080 if (ep_iter == ep)
1081 continue;
1082 if (ep_iter->next == ep->next) {
1083 found++;
1084 break;
1085 }
1086 }
1087
1088 /*
1089 * New target port, or @port is an endpoint port that always
1090 * accounts its own local decode as a target.
1091 */
1092 if (!found || !ep->next) {
1093 cxl_rr->nr_targets++;
1094 nr_targets_inc = true;
1095 }
1096 } else {
1097 cxl_rr = alloc_region_ref(port, cxlr, cxled);
1098 if (IS_ERR(cxl_rr)) {
1099 dev_dbg(&cxlr->dev,
1100 "%s: failed to allocate region reference\n",
1101 dev_name(&port->dev));
1102 return PTR_ERR(cxl_rr);
1103 }
1104 nr_targets_inc = true;
1105
1106 rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr);
1107 if (rc)
1108 goto out_erase;
1109 }
1110 cxld = cxl_rr->decoder;
1111
1112 /*
1113 * the number of targets should not exceed the target_count
1114 * of the decoder
1115 */
1116 if (is_switch_decoder(&cxld->dev)) {
1117 struct cxl_switch_decoder *cxlsd;
1118
1119 cxlsd = to_cxl_switch_decoder(&cxld->dev);
1120 if (cxl_rr->nr_targets > cxlsd->nr_targets) {
1121 dev_dbg(&cxlr->dev,
1122 "%s:%s %s add: %s:%s @ %d overflows targets: %d\n",
1123 dev_name(port->uport_dev), dev_name(&port->dev),
1124 dev_name(&cxld->dev), dev_name(&cxlmd->dev),
1125 dev_name(&cxled->cxld.dev), pos,
1126 cxlsd->nr_targets);
1127 rc = -ENXIO;
1128 goto out_erase;
1129 }
1130 }
1131
1132 rc = cxl_rr_ep_add(cxl_rr, cxled);
1133 if (rc) {
1134 dev_dbg(&cxlr->dev,
1135 "%s: failed to track endpoint %s:%s reference\n",
1136 dev_name(&port->dev), dev_name(&cxlmd->dev),
1137 dev_name(&cxld->dev));
1138 goto out_erase;
1139 }
1140
1141 dev_dbg(&cxlr->dev,
1142 "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n",
1143 dev_name(port->uport_dev), dev_name(&port->dev),
1144 dev_name(&cxld->dev), dev_name(&cxlmd->dev),
1145 dev_name(&cxled->cxld.dev), pos,
1146 ep ? ep->next ? dev_name(ep->next->uport_dev) :
1147 dev_name(&cxlmd->dev) :
1148 "none",
1149 cxl_rr->nr_eps, cxl_rr->nr_targets);
1150
1151 return 0;
1152 out_erase:
1153 if (nr_targets_inc)
1154 cxl_rr->nr_targets--;
1155 if (cxl_rr->nr_eps == 0)
1156 free_region_ref(cxl_rr);
1157 return rc;
1158 }
1159
cxl_port_detach_region(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled)1160 static void cxl_port_detach_region(struct cxl_port *port,
1161 struct cxl_region *cxlr,
1162 struct cxl_endpoint_decoder *cxled)
1163 {
1164 struct cxl_region_ref *cxl_rr;
1165 struct cxl_ep *ep = NULL;
1166
1167 lockdep_assert_held_write(&cxl_region_rwsem);
1168
1169 cxl_rr = cxl_rr_load(port, cxlr);
1170 if (!cxl_rr)
1171 return;
1172
1173 /*
1174 * Endpoint ports do not carry cxl_ep references, and they
1175 * never target more than one endpoint by definition
1176 */
1177 if (cxl_rr->decoder == &cxled->cxld)
1178 cxl_rr->nr_eps--;
1179 else
1180 ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled);
1181 if (ep) {
1182 struct cxl_ep *ep_iter;
1183 unsigned long index;
1184 int found = 0;
1185
1186 cxl_rr->nr_eps--;
1187 xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
1188 if (ep_iter->next == ep->next) {
1189 found++;
1190 break;
1191 }
1192 }
1193 if (!found)
1194 cxl_rr->nr_targets--;
1195 }
1196
1197 if (cxl_rr->nr_eps == 0)
1198 free_region_ref(cxl_rr);
1199 }
1200
check_last_peer(struct cxl_endpoint_decoder * cxled,struct cxl_ep * ep,struct cxl_region_ref * cxl_rr,int distance)1201 static int check_last_peer(struct cxl_endpoint_decoder *cxled,
1202 struct cxl_ep *ep, struct cxl_region_ref *cxl_rr,
1203 int distance)
1204 {
1205 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1206 struct cxl_region *cxlr = cxl_rr->region;
1207 struct cxl_region_params *p = &cxlr->params;
1208 struct cxl_endpoint_decoder *cxled_peer;
1209 struct cxl_port *port = cxl_rr->port;
1210 struct cxl_memdev *cxlmd_peer;
1211 struct cxl_ep *ep_peer;
1212 int pos = cxled->pos;
1213
1214 /*
1215 * If this position wants to share a dport with the last endpoint mapped
1216 * then that endpoint, at index 'position - distance', must also be
1217 * mapped by this dport.
1218 */
1219 if (pos < distance) {
1220 dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n",
1221 dev_name(port->uport_dev), dev_name(&port->dev),
1222 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1223 return -ENXIO;
1224 }
1225 cxled_peer = p->targets[pos - distance];
1226 cxlmd_peer = cxled_to_memdev(cxled_peer);
1227 ep_peer = cxl_ep_load(port, cxlmd_peer);
1228 if (ep->dport != ep_peer->dport) {
1229 dev_dbg(&cxlr->dev,
1230 "%s:%s: %s:%s pos %d mismatched peer %s:%s\n",
1231 dev_name(port->uport_dev), dev_name(&port->dev),
1232 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos,
1233 dev_name(&cxlmd_peer->dev),
1234 dev_name(&cxled_peer->cxld.dev));
1235 return -ENXIO;
1236 }
1237
1238 return 0;
1239 }
1240
check_interleave_cap(struct cxl_decoder * cxld,int iw,int ig)1241 static int check_interleave_cap(struct cxl_decoder *cxld, int iw, int ig)
1242 {
1243 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
1244 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
1245 unsigned int interleave_mask;
1246 u8 eiw;
1247 u16 eig;
1248 int high_pos, low_pos;
1249
1250 if (!test_bit(iw, &cxlhdm->iw_cap_mask))
1251 return -ENXIO;
1252 /*
1253 * Per CXL specification r3.1(8.2.4.20.13 Decoder Protection),
1254 * if eiw < 8:
1255 * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + 8 + eiw]
1256 * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0]
1257 *
1258 * when the eiw is 0, all the bits of HPAOFFSET[51: 0] are used, the
1259 * interleave bits are none.
1260 *
1261 * if eiw >= 8:
1262 * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + eiw] / 3
1263 * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0]
1264 *
1265 * when the eiw is 8, all the bits of HPAOFFSET[51: 0] are used, the
1266 * interleave bits are none.
1267 */
1268 ways_to_eiw(iw, &eiw);
1269 if (eiw == 0 || eiw == 8)
1270 return 0;
1271
1272 granularity_to_eig(ig, &eig);
1273 if (eiw > 8)
1274 high_pos = eiw + eig - 1;
1275 else
1276 high_pos = eiw + eig + 7;
1277 low_pos = eig + 8;
1278 interleave_mask = GENMASK(high_pos, low_pos);
1279 if (interleave_mask & ~cxlhdm->interleave_mask)
1280 return -ENXIO;
1281
1282 return 0;
1283 }
1284
cxl_port_setup_targets(struct cxl_port * port,struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled)1285 static int cxl_port_setup_targets(struct cxl_port *port,
1286 struct cxl_region *cxlr,
1287 struct cxl_endpoint_decoder *cxled)
1288 {
1289 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
1290 int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos;
1291 struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
1292 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
1293 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1294 struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
1295 struct cxl_region_params *p = &cxlr->params;
1296 struct cxl_decoder *cxld = cxl_rr->decoder;
1297 struct cxl_switch_decoder *cxlsd;
1298 u16 eig, peig;
1299 u8 eiw, peiw;
1300
1301 /*
1302 * While root level decoders support x3, x6, x12, switch level
1303 * decoders only support powers of 2 up to x16.
1304 */
1305 if (!is_power_of_2(cxl_rr->nr_targets)) {
1306 dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n",
1307 dev_name(port->uport_dev), dev_name(&port->dev),
1308 cxl_rr->nr_targets);
1309 return -EINVAL;
1310 }
1311
1312 cxlsd = to_cxl_switch_decoder(&cxld->dev);
1313 if (cxl_rr->nr_targets_set) {
1314 int i, distance;
1315
1316 /*
1317 * Passthrough decoders impose no distance requirements between
1318 * peers
1319 */
1320 if (cxl_rr->nr_targets == 1)
1321 distance = 0;
1322 else
1323 distance = p->nr_targets / cxl_rr->nr_targets;
1324 for (i = 0; i < cxl_rr->nr_targets_set; i++)
1325 if (ep->dport == cxlsd->target[i]) {
1326 rc = check_last_peer(cxled, ep, cxl_rr,
1327 distance);
1328 if (rc)
1329 return rc;
1330 goto out_target_set;
1331 }
1332 goto add_target;
1333 }
1334
1335 if (is_cxl_root(parent_port)) {
1336 /*
1337 * Root decoder IG is always set to value in CFMWS which
1338 * may be different than this region's IG. We can use the
1339 * region's IG here since interleave_granularity_store()
1340 * does not allow interleaved host-bridges with
1341 * root IG != region IG.
1342 */
1343 parent_ig = p->interleave_granularity;
1344 parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
1345 /*
1346 * For purposes of address bit routing, use power-of-2 math for
1347 * switch ports.
1348 */
1349 if (!is_power_of_2(parent_iw))
1350 parent_iw /= 3;
1351 } else {
1352 struct cxl_region_ref *parent_rr;
1353 struct cxl_decoder *parent_cxld;
1354
1355 parent_rr = cxl_rr_load(parent_port, cxlr);
1356 parent_cxld = parent_rr->decoder;
1357 parent_ig = parent_cxld->interleave_granularity;
1358 parent_iw = parent_cxld->interleave_ways;
1359 }
1360
1361 rc = granularity_to_eig(parent_ig, &peig);
1362 if (rc) {
1363 dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n",
1364 dev_name(parent_port->uport_dev),
1365 dev_name(&parent_port->dev), parent_ig);
1366 return rc;
1367 }
1368
1369 rc = ways_to_eiw(parent_iw, &peiw);
1370 if (rc) {
1371 dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n",
1372 dev_name(parent_port->uport_dev),
1373 dev_name(&parent_port->dev), parent_iw);
1374 return rc;
1375 }
1376
1377 iw = cxl_rr->nr_targets;
1378 rc = ways_to_eiw(iw, &eiw);
1379 if (rc) {
1380 dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n",
1381 dev_name(port->uport_dev), dev_name(&port->dev), iw);
1382 return rc;
1383 }
1384
1385 /*
1386 * Interleave granularity is a multiple of @parent_port granularity.
1387 * Multiplier is the parent port interleave ways.
1388 */
1389 rc = granularity_to_eig(parent_ig * parent_iw, &eig);
1390 if (rc) {
1391 dev_dbg(&cxlr->dev,
1392 "%s: invalid granularity calculation (%d * %d)\n",
1393 dev_name(&parent_port->dev), parent_ig, parent_iw);
1394 return rc;
1395 }
1396
1397 rc = eig_to_granularity(eig, &ig);
1398 if (rc) {
1399 dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n",
1400 dev_name(port->uport_dev), dev_name(&port->dev),
1401 256 << eig);
1402 return rc;
1403 }
1404
1405 if (iw > 8 || iw > cxlsd->nr_targets) {
1406 dev_dbg(&cxlr->dev,
1407 "%s:%s:%s: ways: %d overflows targets: %d\n",
1408 dev_name(port->uport_dev), dev_name(&port->dev),
1409 dev_name(&cxld->dev), iw, cxlsd->nr_targets);
1410 return -ENXIO;
1411 }
1412
1413 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1414 if (cxld->interleave_ways != iw ||
1415 cxld->interleave_granularity != ig ||
1416 cxld->hpa_range.start != p->res->start ||
1417 cxld->hpa_range.end != p->res->end ||
1418 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
1419 dev_err(&cxlr->dev,
1420 "%s:%s %s expected iw: %d ig: %d %pr\n",
1421 dev_name(port->uport_dev), dev_name(&port->dev),
1422 __func__, iw, ig, p->res);
1423 dev_err(&cxlr->dev,
1424 "%s:%s %s got iw: %d ig: %d state: %s %#llx:%#llx\n",
1425 dev_name(port->uport_dev), dev_name(&port->dev),
1426 __func__, cxld->interleave_ways,
1427 cxld->interleave_granularity,
1428 (cxld->flags & CXL_DECODER_F_ENABLE) ?
1429 "enabled" :
1430 "disabled",
1431 cxld->hpa_range.start, cxld->hpa_range.end);
1432 return -ENXIO;
1433 }
1434 } else {
1435 rc = check_interleave_cap(cxld, iw, ig);
1436 if (rc) {
1437 dev_dbg(&cxlr->dev,
1438 "%s:%s iw: %d ig: %d is not supported\n",
1439 dev_name(port->uport_dev),
1440 dev_name(&port->dev), iw, ig);
1441 return rc;
1442 }
1443
1444 cxld->interleave_ways = iw;
1445 cxld->interleave_granularity = ig;
1446 cxld->hpa_range = (struct range) {
1447 .start = p->res->start,
1448 .end = p->res->end,
1449 };
1450 }
1451 dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport_dev),
1452 dev_name(&port->dev), iw, ig);
1453 add_target:
1454 if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) {
1455 dev_dbg(&cxlr->dev,
1456 "%s:%s: targets full trying to add %s:%s at %d\n",
1457 dev_name(port->uport_dev), dev_name(&port->dev),
1458 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1459 return -ENXIO;
1460 }
1461 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1462 if (cxlsd->target[cxl_rr->nr_targets_set] != ep->dport) {
1463 dev_dbg(&cxlr->dev, "%s:%s: %s expected %s at %d\n",
1464 dev_name(port->uport_dev), dev_name(&port->dev),
1465 dev_name(&cxlsd->cxld.dev),
1466 dev_name(ep->dport->dport_dev),
1467 cxl_rr->nr_targets_set);
1468 return -ENXIO;
1469 }
1470 } else
1471 cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
1472 inc = 1;
1473 out_target_set:
1474 cxl_rr->nr_targets_set += inc;
1475 dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
1476 dev_name(port->uport_dev), dev_name(&port->dev),
1477 cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport_dev),
1478 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1479
1480 return 0;
1481 }
1482
cxl_port_reset_targets(struct cxl_port * port,struct cxl_region * cxlr)1483 static void cxl_port_reset_targets(struct cxl_port *port,
1484 struct cxl_region *cxlr)
1485 {
1486 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
1487 struct cxl_decoder *cxld;
1488
1489 /*
1490 * After the last endpoint has been detached the entire cxl_rr may now
1491 * be gone.
1492 */
1493 if (!cxl_rr)
1494 return;
1495 cxl_rr->nr_targets_set = 0;
1496
1497 cxld = cxl_rr->decoder;
1498 cxld->hpa_range = (struct range) {
1499 .start = 0,
1500 .end = -1,
1501 };
1502 }
1503
cxl_region_teardown_targets(struct cxl_region * cxlr)1504 static void cxl_region_teardown_targets(struct cxl_region *cxlr)
1505 {
1506 struct cxl_region_params *p = &cxlr->params;
1507 struct cxl_endpoint_decoder *cxled;
1508 struct cxl_dev_state *cxlds;
1509 struct cxl_memdev *cxlmd;
1510 struct cxl_port *iter;
1511 struct cxl_ep *ep;
1512 int i;
1513
1514 /*
1515 * In the auto-discovery case skip automatic teardown since the
1516 * address space is already active
1517 */
1518 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
1519 return;
1520
1521 for (i = 0; i < p->nr_targets; i++) {
1522 cxled = p->targets[i];
1523 cxlmd = cxled_to_memdev(cxled);
1524 cxlds = cxlmd->cxlds;
1525
1526 if (cxlds->rcd)
1527 continue;
1528
1529 iter = cxled_to_port(cxled);
1530 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
1531 iter = to_cxl_port(iter->dev.parent);
1532
1533 for (ep = cxl_ep_load(iter, cxlmd); iter;
1534 iter = ep->next, ep = cxl_ep_load(iter, cxlmd))
1535 cxl_port_reset_targets(iter, cxlr);
1536 }
1537 }
1538
cxl_region_setup_targets(struct cxl_region * cxlr)1539 static int cxl_region_setup_targets(struct cxl_region *cxlr)
1540 {
1541 struct cxl_region_params *p = &cxlr->params;
1542 struct cxl_endpoint_decoder *cxled;
1543 struct cxl_dev_state *cxlds;
1544 int i, rc, rch = 0, vh = 0;
1545 struct cxl_memdev *cxlmd;
1546 struct cxl_port *iter;
1547 struct cxl_ep *ep;
1548
1549 for (i = 0; i < p->nr_targets; i++) {
1550 cxled = p->targets[i];
1551 cxlmd = cxled_to_memdev(cxled);
1552 cxlds = cxlmd->cxlds;
1553
1554 /* validate that all targets agree on topology */
1555 if (!cxlds->rcd) {
1556 vh++;
1557 } else {
1558 rch++;
1559 continue;
1560 }
1561
1562 iter = cxled_to_port(cxled);
1563 while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
1564 iter = to_cxl_port(iter->dev.parent);
1565
1566 /*
1567 * Descend the topology tree programming / validating
1568 * targets while looking for conflicts.
1569 */
1570 for (ep = cxl_ep_load(iter, cxlmd); iter;
1571 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
1572 rc = cxl_port_setup_targets(iter, cxlr, cxled);
1573 if (rc) {
1574 cxl_region_teardown_targets(cxlr);
1575 return rc;
1576 }
1577 }
1578 }
1579
1580 if (rch && vh) {
1581 dev_err(&cxlr->dev, "mismatched CXL topologies detected\n");
1582 cxl_region_teardown_targets(cxlr);
1583 return -ENXIO;
1584 }
1585
1586 return 0;
1587 }
1588
cxl_region_validate_position(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)1589 static int cxl_region_validate_position(struct cxl_region *cxlr,
1590 struct cxl_endpoint_decoder *cxled,
1591 int pos)
1592 {
1593 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1594 struct cxl_region_params *p = &cxlr->params;
1595 int i;
1596
1597 if (pos < 0 || pos >= p->interleave_ways) {
1598 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
1599 p->interleave_ways);
1600 return -ENXIO;
1601 }
1602
1603 if (p->targets[pos] == cxled)
1604 return 0;
1605
1606 if (p->targets[pos]) {
1607 struct cxl_endpoint_decoder *cxled_target = p->targets[pos];
1608 struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target);
1609
1610 dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n",
1611 pos, dev_name(&cxlmd_target->dev),
1612 dev_name(&cxled_target->cxld.dev));
1613 return -EBUSY;
1614 }
1615
1616 for (i = 0; i < p->interleave_ways; i++) {
1617 struct cxl_endpoint_decoder *cxled_target;
1618 struct cxl_memdev *cxlmd_target;
1619
1620 cxled_target = p->targets[i];
1621 if (!cxled_target)
1622 continue;
1623
1624 cxlmd_target = cxled_to_memdev(cxled_target);
1625 if (cxlmd_target == cxlmd) {
1626 dev_dbg(&cxlr->dev,
1627 "%s already specified at position %d via: %s\n",
1628 dev_name(&cxlmd->dev), pos,
1629 dev_name(&cxled_target->cxld.dev));
1630 return -EBUSY;
1631 }
1632 }
1633
1634 return 0;
1635 }
1636
cxl_region_attach_position(struct cxl_region * cxlr,struct cxl_root_decoder * cxlrd,struct cxl_endpoint_decoder * cxled,const struct cxl_dport * dport,int pos)1637 static int cxl_region_attach_position(struct cxl_region *cxlr,
1638 struct cxl_root_decoder *cxlrd,
1639 struct cxl_endpoint_decoder *cxled,
1640 const struct cxl_dport *dport, int pos)
1641 {
1642 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1643 struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
1644 struct cxl_decoder *cxld = &cxlsd->cxld;
1645 int iw = cxld->interleave_ways;
1646 struct cxl_port *iter;
1647 int rc;
1648
1649 if (dport != cxlrd->cxlsd.target[pos % iw]) {
1650 dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
1651 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1652 dev_name(&cxlrd->cxlsd.cxld.dev));
1653 return -ENXIO;
1654 }
1655
1656 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
1657 iter = to_cxl_port(iter->dev.parent)) {
1658 rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
1659 if (rc)
1660 goto err;
1661 }
1662
1663 return 0;
1664
1665 err:
1666 for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
1667 iter = to_cxl_port(iter->dev.parent))
1668 cxl_port_detach_region(iter, cxlr, cxled);
1669 return rc;
1670 }
1671
cxl_region_attach_auto(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)1672 static int cxl_region_attach_auto(struct cxl_region *cxlr,
1673 struct cxl_endpoint_decoder *cxled, int pos)
1674 {
1675 struct cxl_region_params *p = &cxlr->params;
1676
1677 if (cxled->state != CXL_DECODER_STATE_AUTO) {
1678 dev_err(&cxlr->dev,
1679 "%s: unable to add decoder to autodetected region\n",
1680 dev_name(&cxled->cxld.dev));
1681 return -EINVAL;
1682 }
1683
1684 if (pos >= 0) {
1685 dev_dbg(&cxlr->dev, "%s: expected auto position, not %d\n",
1686 dev_name(&cxled->cxld.dev), pos);
1687 return -EINVAL;
1688 }
1689
1690 if (p->nr_targets >= p->interleave_ways) {
1691 dev_err(&cxlr->dev, "%s: no more target slots available\n",
1692 dev_name(&cxled->cxld.dev));
1693 return -ENXIO;
1694 }
1695
1696 /*
1697 * Temporarily record the endpoint decoder into the target array. Yes,
1698 * this means that userspace can view devices in the wrong position
1699 * before the region activates, and must be careful to understand when
1700 * it might be racing region autodiscovery.
1701 */
1702 pos = p->nr_targets;
1703 p->targets[pos] = cxled;
1704 cxled->pos = pos;
1705 p->nr_targets++;
1706
1707 return 0;
1708 }
1709
cmp_interleave_pos(const void * a,const void * b)1710 static int cmp_interleave_pos(const void *a, const void *b)
1711 {
1712 struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
1713 struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
1714
1715 return cxled_a->pos - cxled_b->pos;
1716 }
1717
next_port(struct cxl_port * port)1718 static struct cxl_port *next_port(struct cxl_port *port)
1719 {
1720 if (!port->parent_dport)
1721 return NULL;
1722 return port->parent_dport->port;
1723 }
1724
match_switch_decoder_by_range(struct device * dev,void * data)1725 static int match_switch_decoder_by_range(struct device *dev, void *data)
1726 {
1727 struct cxl_switch_decoder *cxlsd;
1728 struct range *r1, *r2 = data;
1729
1730 if (!is_switch_decoder(dev))
1731 return 0;
1732
1733 cxlsd = to_cxl_switch_decoder(dev);
1734 r1 = &cxlsd->cxld.hpa_range;
1735
1736 if (is_root_decoder(dev))
1737 return range_contains(r1, r2);
1738 return (r1->start == r2->start && r1->end == r2->end);
1739 }
1740
find_pos_and_ways(struct cxl_port * port,struct range * range,int * pos,int * ways)1741 static int find_pos_and_ways(struct cxl_port *port, struct range *range,
1742 int *pos, int *ways)
1743 {
1744 struct cxl_switch_decoder *cxlsd;
1745 struct cxl_port *parent;
1746 struct device *dev;
1747 int rc = -ENXIO;
1748
1749 parent = next_port(port);
1750 if (!parent)
1751 return rc;
1752
1753 dev = device_find_child(&parent->dev, range,
1754 match_switch_decoder_by_range);
1755 if (!dev) {
1756 dev_err(port->uport_dev,
1757 "failed to find decoder mapping %#llx-%#llx\n",
1758 range->start, range->end);
1759 return rc;
1760 }
1761 cxlsd = to_cxl_switch_decoder(dev);
1762 *ways = cxlsd->cxld.interleave_ways;
1763
1764 for (int i = 0; i < *ways; i++) {
1765 if (cxlsd->target[i] == port->parent_dport) {
1766 *pos = i;
1767 rc = 0;
1768 break;
1769 }
1770 }
1771 put_device(dev);
1772
1773 return rc;
1774 }
1775
1776 /**
1777 * cxl_calc_interleave_pos() - calculate an endpoint position in a region
1778 * @cxled: endpoint decoder member of given region
1779 *
1780 * The endpoint position is calculated by traversing the topology from
1781 * the endpoint to the root decoder and iteratively applying this
1782 * calculation:
1783 *
1784 * position = position * parent_ways + parent_pos;
1785 *
1786 * ...where @position is inferred from switch and root decoder target lists.
1787 *
1788 * Return: position >= 0 on success
1789 * -ENXIO on failure
1790 */
cxl_calc_interleave_pos(struct cxl_endpoint_decoder * cxled)1791 static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
1792 {
1793 struct cxl_port *iter, *port = cxled_to_port(cxled);
1794 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1795 struct range *range = &cxled->cxld.hpa_range;
1796 int parent_ways = 0, parent_pos = 0, pos = 0;
1797 int rc;
1798
1799 /*
1800 * Example: the expected interleave order of the 4-way region shown
1801 * below is: mem0, mem2, mem1, mem3
1802 *
1803 * root_port
1804 * / \
1805 * host_bridge_0 host_bridge_1
1806 * | | | |
1807 * mem0 mem1 mem2 mem3
1808 *
1809 * In the example the calculator will iterate twice. The first iteration
1810 * uses the mem position in the host-bridge and the ways of the host-
1811 * bridge to generate the first, or local, position. The second
1812 * iteration uses the host-bridge position in the root_port and the ways
1813 * of the root_port to refine the position.
1814 *
1815 * A trace of the calculation per endpoint looks like this:
1816 * mem0: pos = 0 * 2 + 0 mem2: pos = 0 * 2 + 0
1817 * pos = 0 * 2 + 0 pos = 0 * 2 + 1
1818 * pos: 0 pos: 1
1819 *
1820 * mem1: pos = 0 * 2 + 1 mem3: pos = 0 * 2 + 1
1821 * pos = 1 * 2 + 0 pos = 1 * 2 + 1
1822 * pos: 2 pos = 3
1823 *
1824 * Note that while this example is simple, the method applies to more
1825 * complex topologies, including those with switches.
1826 */
1827
1828 /* Iterate from endpoint to root_port refining the position */
1829 for (iter = port; iter; iter = next_port(iter)) {
1830 if (is_cxl_root(iter))
1831 break;
1832
1833 rc = find_pos_and_ways(iter, range, &parent_pos, &parent_ways);
1834 if (rc)
1835 return rc;
1836
1837 pos = pos * parent_ways + parent_pos;
1838 }
1839
1840 dev_dbg(&cxlmd->dev,
1841 "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n",
1842 dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent),
1843 dev_name(&port->dev), range->start, range->end, pos);
1844
1845 return pos;
1846 }
1847
cxl_region_sort_targets(struct cxl_region * cxlr)1848 static int cxl_region_sort_targets(struct cxl_region *cxlr)
1849 {
1850 struct cxl_region_params *p = &cxlr->params;
1851 int i, rc = 0;
1852
1853 for (i = 0; i < p->nr_targets; i++) {
1854 struct cxl_endpoint_decoder *cxled = p->targets[i];
1855
1856 cxled->pos = cxl_calc_interleave_pos(cxled);
1857 /*
1858 * Record that sorting failed, but still continue to calc
1859 * cxled->pos so that follow-on code paths can reliably
1860 * do p->targets[cxled->pos] to self-reference their entry.
1861 */
1862 if (cxled->pos < 0)
1863 rc = -ENXIO;
1864 }
1865 /* Keep the cxlr target list in interleave position order */
1866 sort(p->targets, p->nr_targets, sizeof(p->targets[0]),
1867 cmp_interleave_pos, NULL);
1868
1869 dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful");
1870 return rc;
1871 }
1872
cxl_region_attach(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos)1873 static int cxl_region_attach(struct cxl_region *cxlr,
1874 struct cxl_endpoint_decoder *cxled, int pos)
1875 {
1876 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
1877 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1878 struct cxl_region_params *p = &cxlr->params;
1879 struct cxl_port *ep_port, *root_port;
1880 struct cxl_dport *dport;
1881 int rc = -ENXIO;
1882
1883 rc = check_interleave_cap(&cxled->cxld, p->interleave_ways,
1884 p->interleave_granularity);
1885 if (rc) {
1886 dev_dbg(&cxlr->dev, "%s iw: %d ig: %d is not supported\n",
1887 dev_name(&cxled->cxld.dev), p->interleave_ways,
1888 p->interleave_granularity);
1889 return rc;
1890 }
1891
1892 if (cxled->mode != cxlr->mode) {
1893 dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
1894 dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
1895 return -EINVAL;
1896 }
1897
1898 if (cxled->mode == CXL_DECODER_DEAD) {
1899 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
1900 return -ENODEV;
1901 }
1902
1903 /* all full of members, or interleave config not established? */
1904 if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
1905 dev_dbg(&cxlr->dev, "region already active\n");
1906 return -EBUSY;
1907 } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
1908 dev_dbg(&cxlr->dev, "interleave config missing\n");
1909 return -ENXIO;
1910 }
1911
1912 if (p->nr_targets >= p->interleave_ways) {
1913 dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
1914 p->nr_targets);
1915 return -EINVAL;
1916 }
1917
1918 ep_port = cxled_to_port(cxled);
1919 root_port = cxlrd_to_port(cxlrd);
1920 dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
1921 if (!dport) {
1922 dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n",
1923 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1924 dev_name(cxlr->dev.parent));
1925 return -ENXIO;
1926 }
1927
1928 if (cxled->cxld.target_type != cxlr->type) {
1929 dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
1930 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1931 cxled->cxld.target_type, cxlr->type);
1932 return -ENXIO;
1933 }
1934
1935 if (!cxled->dpa_res) {
1936 dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n",
1937 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev));
1938 return -ENXIO;
1939 }
1940
1941 if (resource_size(cxled->dpa_res) * p->interleave_ways !=
1942 resource_size(p->res)) {
1943 dev_dbg(&cxlr->dev,
1944 "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
1945 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1946 (u64)resource_size(cxled->dpa_res), p->interleave_ways,
1947 (u64)resource_size(p->res));
1948 return -EINVAL;
1949 }
1950
1951 cxl_region_perf_data_calculate(cxlr, cxled);
1952
1953 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
1954 int i;
1955
1956 rc = cxl_region_attach_auto(cxlr, cxled, pos);
1957 if (rc)
1958 return rc;
1959
1960 /* await more targets to arrive... */
1961 if (p->nr_targets < p->interleave_ways)
1962 return 0;
1963
1964 /*
1965 * All targets are here, which implies all PCI enumeration that
1966 * affects this region has been completed. Walk the topology to
1967 * sort the devices into their relative region decode position.
1968 */
1969 rc = cxl_region_sort_targets(cxlr);
1970 if (rc)
1971 return rc;
1972
1973 for (i = 0; i < p->nr_targets; i++) {
1974 cxled = p->targets[i];
1975 ep_port = cxled_to_port(cxled);
1976 dport = cxl_find_dport_by_dev(root_port,
1977 ep_port->host_bridge);
1978 rc = cxl_region_attach_position(cxlr, cxlrd, cxled,
1979 dport, i);
1980 if (rc)
1981 return rc;
1982 }
1983
1984 rc = cxl_region_setup_targets(cxlr);
1985 if (rc)
1986 return rc;
1987
1988 /*
1989 * If target setup succeeds in the autodiscovery case
1990 * then the region is already committed.
1991 */
1992 p->state = CXL_CONFIG_COMMIT;
1993 cxl_region_shared_upstream_bandwidth_update(cxlr);
1994
1995 return 0;
1996 }
1997
1998 rc = cxl_region_validate_position(cxlr, cxled, pos);
1999 if (rc)
2000 return rc;
2001
2002 rc = cxl_region_attach_position(cxlr, cxlrd, cxled, dport, pos);
2003 if (rc)
2004 return rc;
2005
2006 p->targets[pos] = cxled;
2007 cxled->pos = pos;
2008 p->nr_targets++;
2009
2010 if (p->nr_targets == p->interleave_ways) {
2011 rc = cxl_region_setup_targets(cxlr);
2012 if (rc)
2013 return rc;
2014 p->state = CXL_CONFIG_ACTIVE;
2015 cxl_region_shared_upstream_bandwidth_update(cxlr);
2016 }
2017
2018 cxled->cxld.interleave_ways = p->interleave_ways;
2019 cxled->cxld.interleave_granularity = p->interleave_granularity;
2020 cxled->cxld.hpa_range = (struct range) {
2021 .start = p->res->start,
2022 .end = p->res->end,
2023 };
2024
2025 if (p->nr_targets != p->interleave_ways)
2026 return 0;
2027
2028 /*
2029 * Test the auto-discovery position calculator function
2030 * against this successfully created user-defined region.
2031 * A fail message here means that this interleave config
2032 * will fail when presented as CXL_REGION_F_AUTO.
2033 */
2034 for (int i = 0; i < p->nr_targets; i++) {
2035 struct cxl_endpoint_decoder *cxled = p->targets[i];
2036 int test_pos;
2037
2038 test_pos = cxl_calc_interleave_pos(cxled);
2039 dev_dbg(&cxled->cxld.dev,
2040 "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n",
2041 (test_pos == cxled->pos) ? "success" : "fail",
2042 test_pos, cxled->pos);
2043 }
2044
2045 return 0;
2046 }
2047
cxl_region_detach(struct cxl_endpoint_decoder * cxled)2048 static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
2049 {
2050 struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
2051 struct cxl_region *cxlr = cxled->cxld.region;
2052 struct cxl_region_params *p;
2053 int rc = 0;
2054
2055 lockdep_assert_held_write(&cxl_region_rwsem);
2056
2057 if (!cxlr)
2058 return 0;
2059
2060 p = &cxlr->params;
2061 get_device(&cxlr->dev);
2062
2063 if (p->state > CXL_CONFIG_ACTIVE) {
2064 cxl_region_decode_reset(cxlr, p->interleave_ways);
2065 p->state = CXL_CONFIG_ACTIVE;
2066 }
2067
2068 for (iter = ep_port; !is_cxl_root(iter);
2069 iter = to_cxl_port(iter->dev.parent))
2070 cxl_port_detach_region(iter, cxlr, cxled);
2071
2072 if (cxled->pos < 0 || cxled->pos >= p->interleave_ways ||
2073 p->targets[cxled->pos] != cxled) {
2074 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2075
2076 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
2077 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
2078 cxled->pos);
2079 goto out;
2080 }
2081
2082 if (p->state == CXL_CONFIG_ACTIVE) {
2083 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
2084 cxl_region_teardown_targets(cxlr);
2085 }
2086 p->targets[cxled->pos] = NULL;
2087 p->nr_targets--;
2088 cxled->cxld.hpa_range = (struct range) {
2089 .start = 0,
2090 .end = -1,
2091 };
2092
2093 /* notify the region driver that one of its targets has departed */
2094 up_write(&cxl_region_rwsem);
2095 device_release_driver(&cxlr->dev);
2096 down_write(&cxl_region_rwsem);
2097 out:
2098 put_device(&cxlr->dev);
2099 return rc;
2100 }
2101
cxl_decoder_kill_region(struct cxl_endpoint_decoder * cxled)2102 void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
2103 {
2104 down_write(&cxl_region_rwsem);
2105 cxled->mode = CXL_DECODER_DEAD;
2106 cxl_region_detach(cxled);
2107 up_write(&cxl_region_rwsem);
2108 }
2109
attach_target(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos,unsigned int state)2110 static int attach_target(struct cxl_region *cxlr,
2111 struct cxl_endpoint_decoder *cxled, int pos,
2112 unsigned int state)
2113 {
2114 int rc = 0;
2115
2116 if (state == TASK_INTERRUPTIBLE)
2117 rc = down_write_killable(&cxl_region_rwsem);
2118 else
2119 down_write(&cxl_region_rwsem);
2120 if (rc)
2121 return rc;
2122
2123 down_read(&cxl_dpa_rwsem);
2124 rc = cxl_region_attach(cxlr, cxled, pos);
2125 up_read(&cxl_dpa_rwsem);
2126 up_write(&cxl_region_rwsem);
2127 return rc;
2128 }
2129
detach_target(struct cxl_region * cxlr,int pos)2130 static int detach_target(struct cxl_region *cxlr, int pos)
2131 {
2132 struct cxl_region_params *p = &cxlr->params;
2133 int rc;
2134
2135 rc = down_write_killable(&cxl_region_rwsem);
2136 if (rc)
2137 return rc;
2138
2139 if (pos >= p->interleave_ways) {
2140 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
2141 p->interleave_ways);
2142 rc = -ENXIO;
2143 goto out;
2144 }
2145
2146 if (!p->targets[pos]) {
2147 rc = 0;
2148 goto out;
2149 }
2150
2151 rc = cxl_region_detach(p->targets[pos]);
2152 out:
2153 up_write(&cxl_region_rwsem);
2154 return rc;
2155 }
2156
store_targetN(struct cxl_region * cxlr,const char * buf,int pos,size_t len)2157 static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
2158 size_t len)
2159 {
2160 int rc;
2161
2162 if (sysfs_streq(buf, "\n"))
2163 rc = detach_target(cxlr, pos);
2164 else {
2165 struct device *dev;
2166
2167 dev = bus_find_device_by_name(&cxl_bus_type, NULL, buf);
2168 if (!dev)
2169 return -ENODEV;
2170
2171 if (!is_endpoint_decoder(dev)) {
2172 rc = -EINVAL;
2173 goto out;
2174 }
2175
2176 rc = attach_target(cxlr, to_cxl_endpoint_decoder(dev), pos,
2177 TASK_INTERRUPTIBLE);
2178 out:
2179 put_device(dev);
2180 }
2181
2182 if (rc < 0)
2183 return rc;
2184 return len;
2185 }
2186
2187 #define TARGET_ATTR_RW(n) \
2188 static ssize_t target##n##_show( \
2189 struct device *dev, struct device_attribute *attr, char *buf) \
2190 { \
2191 return show_targetN(to_cxl_region(dev), buf, (n)); \
2192 } \
2193 static ssize_t target##n##_store(struct device *dev, \
2194 struct device_attribute *attr, \
2195 const char *buf, size_t len) \
2196 { \
2197 return store_targetN(to_cxl_region(dev), buf, (n), len); \
2198 } \
2199 static DEVICE_ATTR_RW(target##n)
2200
2201 TARGET_ATTR_RW(0);
2202 TARGET_ATTR_RW(1);
2203 TARGET_ATTR_RW(2);
2204 TARGET_ATTR_RW(3);
2205 TARGET_ATTR_RW(4);
2206 TARGET_ATTR_RW(5);
2207 TARGET_ATTR_RW(6);
2208 TARGET_ATTR_RW(7);
2209 TARGET_ATTR_RW(8);
2210 TARGET_ATTR_RW(9);
2211 TARGET_ATTR_RW(10);
2212 TARGET_ATTR_RW(11);
2213 TARGET_ATTR_RW(12);
2214 TARGET_ATTR_RW(13);
2215 TARGET_ATTR_RW(14);
2216 TARGET_ATTR_RW(15);
2217
2218 static struct attribute *target_attrs[] = {
2219 &dev_attr_target0.attr,
2220 &dev_attr_target1.attr,
2221 &dev_attr_target2.attr,
2222 &dev_attr_target3.attr,
2223 &dev_attr_target4.attr,
2224 &dev_attr_target5.attr,
2225 &dev_attr_target6.attr,
2226 &dev_attr_target7.attr,
2227 &dev_attr_target8.attr,
2228 &dev_attr_target9.attr,
2229 &dev_attr_target10.attr,
2230 &dev_attr_target11.attr,
2231 &dev_attr_target12.attr,
2232 &dev_attr_target13.attr,
2233 &dev_attr_target14.attr,
2234 &dev_attr_target15.attr,
2235 NULL,
2236 };
2237
cxl_region_target_visible(struct kobject * kobj,struct attribute * a,int n)2238 static umode_t cxl_region_target_visible(struct kobject *kobj,
2239 struct attribute *a, int n)
2240 {
2241 struct device *dev = kobj_to_dev(kobj);
2242 struct cxl_region *cxlr = to_cxl_region(dev);
2243 struct cxl_region_params *p = &cxlr->params;
2244
2245 if (n < p->interleave_ways)
2246 return a->mode;
2247 return 0;
2248 }
2249
2250 static const struct attribute_group cxl_region_target_group = {
2251 .attrs = target_attrs,
2252 .is_visible = cxl_region_target_visible,
2253 };
2254
get_cxl_region_target_group(void)2255 static const struct attribute_group *get_cxl_region_target_group(void)
2256 {
2257 return &cxl_region_target_group;
2258 }
2259
2260 static const struct attribute_group *region_groups[] = {
2261 &cxl_base_attribute_group,
2262 &cxl_region_group,
2263 &cxl_region_target_group,
2264 &cxl_region_access0_coordinate_group,
2265 &cxl_region_access1_coordinate_group,
2266 NULL,
2267 };
2268
cxl_region_release(struct device * dev)2269 static void cxl_region_release(struct device *dev)
2270 {
2271 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
2272 struct cxl_region *cxlr = to_cxl_region(dev);
2273 int id = atomic_read(&cxlrd->region_id);
2274
2275 /*
2276 * Try to reuse the recently idled id rather than the cached
2277 * next id to prevent the region id space from increasing
2278 * unnecessarily.
2279 */
2280 if (cxlr->id < id)
2281 if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) {
2282 memregion_free(id);
2283 goto out;
2284 }
2285
2286 memregion_free(cxlr->id);
2287 out:
2288 put_device(dev->parent);
2289 kfree(cxlr);
2290 }
2291
2292 const struct device_type cxl_region_type = {
2293 .name = "cxl_region",
2294 .release = cxl_region_release,
2295 .groups = region_groups
2296 };
2297
is_cxl_region(struct device * dev)2298 bool is_cxl_region(struct device *dev)
2299 {
2300 return dev->type == &cxl_region_type;
2301 }
2302 EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL);
2303
to_cxl_region(struct device * dev)2304 static struct cxl_region *to_cxl_region(struct device *dev)
2305 {
2306 if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type,
2307 "not a cxl_region device\n"))
2308 return NULL;
2309
2310 return container_of(dev, struct cxl_region, dev);
2311 }
2312
unregister_region(void * _cxlr)2313 static void unregister_region(void *_cxlr)
2314 {
2315 struct cxl_region *cxlr = _cxlr;
2316 struct cxl_region_params *p = &cxlr->params;
2317 int i;
2318
2319 device_del(&cxlr->dev);
2320
2321 /*
2322 * Now that region sysfs is shutdown, the parameter block is now
2323 * read-only, so no need to hold the region rwsem to access the
2324 * region parameters.
2325 */
2326 for (i = 0; i < p->interleave_ways; i++)
2327 detach_target(cxlr, i);
2328
2329 cxl_region_iomem_release(cxlr);
2330 put_device(&cxlr->dev);
2331 }
2332
2333 static struct lock_class_key cxl_region_key;
2334
cxl_region_alloc(struct cxl_root_decoder * cxlrd,int id)2335 static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id)
2336 {
2337 struct cxl_region *cxlr;
2338 struct device *dev;
2339
2340 cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL);
2341 if (!cxlr) {
2342 memregion_free(id);
2343 return ERR_PTR(-ENOMEM);
2344 }
2345
2346 dev = &cxlr->dev;
2347 device_initialize(dev);
2348 lockdep_set_class(&dev->mutex, &cxl_region_key);
2349 dev->parent = &cxlrd->cxlsd.cxld.dev;
2350 /*
2351 * Keep root decoder pinned through cxl_region_release to fixup
2352 * region id allocations
2353 */
2354 get_device(dev->parent);
2355 device_set_pm_not_required(dev);
2356 dev->bus = &cxl_bus_type;
2357 dev->type = &cxl_region_type;
2358 cxlr->id = id;
2359
2360 return cxlr;
2361 }
2362
cxl_region_update_coordinates(struct cxl_region * cxlr,int nid)2363 static bool cxl_region_update_coordinates(struct cxl_region *cxlr, int nid)
2364 {
2365 int cset = 0;
2366 int rc;
2367
2368 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
2369 if (cxlr->coord[i].read_bandwidth) {
2370 rc = 0;
2371 if (cxl_need_node_perf_attrs_update(nid))
2372 node_set_perf_attrs(nid, &cxlr->coord[i], i);
2373 else
2374 rc = cxl_update_hmat_access_coordinates(nid, cxlr, i);
2375
2376 if (rc == 0)
2377 cset++;
2378 }
2379 }
2380
2381 if (!cset)
2382 return false;
2383
2384 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_access0_group());
2385 if (rc)
2386 dev_dbg(&cxlr->dev, "Failed to update access0 group\n");
2387
2388 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_access1_group());
2389 if (rc)
2390 dev_dbg(&cxlr->dev, "Failed to update access1 group\n");
2391
2392 return true;
2393 }
2394
cxl_region_perf_attrs_callback(struct notifier_block * nb,unsigned long action,void * arg)2395 static int cxl_region_perf_attrs_callback(struct notifier_block *nb,
2396 unsigned long action, void *arg)
2397 {
2398 struct cxl_region *cxlr = container_of(nb, struct cxl_region,
2399 memory_notifier);
2400 struct memory_notify *mnb = arg;
2401 int nid = mnb->status_change_nid;
2402 int region_nid;
2403
2404 if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
2405 return NOTIFY_DONE;
2406
2407 /*
2408 * No need to hold cxl_region_rwsem; region parameters are stable
2409 * within the cxl_region driver.
2410 */
2411 region_nid = phys_to_target_node(cxlr->params.res->start);
2412 if (nid != region_nid)
2413 return NOTIFY_DONE;
2414
2415 if (!cxl_region_update_coordinates(cxlr, nid))
2416 return NOTIFY_DONE;
2417
2418 return NOTIFY_OK;
2419 }
2420
cxl_region_calculate_adistance(struct notifier_block * nb,unsigned long nid,void * data)2421 static int cxl_region_calculate_adistance(struct notifier_block *nb,
2422 unsigned long nid, void *data)
2423 {
2424 struct cxl_region *cxlr = container_of(nb, struct cxl_region,
2425 adist_notifier);
2426 struct access_coordinate *perf;
2427 int *adist = data;
2428 int region_nid;
2429
2430 /*
2431 * No need to hold cxl_region_rwsem; region parameters are stable
2432 * within the cxl_region driver.
2433 */
2434 region_nid = phys_to_target_node(cxlr->params.res->start);
2435 if (nid != region_nid)
2436 return NOTIFY_OK;
2437
2438 perf = &cxlr->coord[ACCESS_COORDINATE_CPU];
2439
2440 if (mt_perf_to_adistance(perf, adist))
2441 return NOTIFY_OK;
2442
2443 return NOTIFY_STOP;
2444 }
2445
2446 /**
2447 * devm_cxl_add_region - Adds a region to a decoder
2448 * @cxlrd: root decoder
2449 * @id: memregion id to create, or memregion_free() on failure
2450 * @mode: mode for the endpoint decoders of this region
2451 * @type: select whether this is an expander or accelerator (type-2 or type-3)
2452 *
2453 * This is the second step of region initialization. Regions exist within an
2454 * address space which is mapped by a @cxlrd.
2455 *
2456 * Return: 0 if the region was added to the @cxlrd, else returns negative error
2457 * code. The region will be named "regionZ" where Z is the unique region number.
2458 */
devm_cxl_add_region(struct cxl_root_decoder * cxlrd,int id,enum cxl_decoder_mode mode,enum cxl_decoder_type type)2459 static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
2460 int id,
2461 enum cxl_decoder_mode mode,
2462 enum cxl_decoder_type type)
2463 {
2464 struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
2465 struct cxl_region *cxlr;
2466 struct device *dev;
2467 int rc;
2468
2469 cxlr = cxl_region_alloc(cxlrd, id);
2470 if (IS_ERR(cxlr))
2471 return cxlr;
2472 cxlr->mode = mode;
2473 cxlr->type = type;
2474
2475 dev = &cxlr->dev;
2476 rc = dev_set_name(dev, "region%d", id);
2477 if (rc)
2478 goto err;
2479
2480 rc = device_add(dev);
2481 if (rc)
2482 goto err;
2483
2484 rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr);
2485 if (rc)
2486 return ERR_PTR(rc);
2487
2488 dev_dbg(port->uport_dev, "%s: created %s\n",
2489 dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev));
2490 return cxlr;
2491
2492 err:
2493 put_device(dev);
2494 return ERR_PTR(rc);
2495 }
2496
__create_region_show(struct cxl_root_decoder * cxlrd,char * buf)2497 static ssize_t __create_region_show(struct cxl_root_decoder *cxlrd, char *buf)
2498 {
2499 return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id));
2500 }
2501
create_pmem_region_show(struct device * dev,struct device_attribute * attr,char * buf)2502 static ssize_t create_pmem_region_show(struct device *dev,
2503 struct device_attribute *attr, char *buf)
2504 {
2505 return __create_region_show(to_cxl_root_decoder(dev), buf);
2506 }
2507
create_ram_region_show(struct device * dev,struct device_attribute * attr,char * buf)2508 static ssize_t create_ram_region_show(struct device *dev,
2509 struct device_attribute *attr, char *buf)
2510 {
2511 return __create_region_show(to_cxl_root_decoder(dev), buf);
2512 }
2513
__create_region(struct cxl_root_decoder * cxlrd,enum cxl_decoder_mode mode,int id)2514 static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
2515 enum cxl_decoder_mode mode, int id)
2516 {
2517 int rc;
2518
2519 switch (mode) {
2520 case CXL_DECODER_RAM:
2521 case CXL_DECODER_PMEM:
2522 break;
2523 default:
2524 dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
2525 return ERR_PTR(-EINVAL);
2526 }
2527
2528 rc = memregion_alloc(GFP_KERNEL);
2529 if (rc < 0)
2530 return ERR_PTR(rc);
2531
2532 if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) {
2533 memregion_free(rc);
2534 return ERR_PTR(-EBUSY);
2535 }
2536
2537 return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM);
2538 }
2539
create_pmem_region_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2540 static ssize_t create_pmem_region_store(struct device *dev,
2541 struct device_attribute *attr,
2542 const char *buf, size_t len)
2543 {
2544 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2545 struct cxl_region *cxlr;
2546 int rc, id;
2547
2548 rc = sscanf(buf, "region%d\n", &id);
2549 if (rc != 1)
2550 return -EINVAL;
2551
2552 cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id);
2553 if (IS_ERR(cxlr))
2554 return PTR_ERR(cxlr);
2555
2556 return len;
2557 }
2558 DEVICE_ATTR_RW(create_pmem_region);
2559
create_ram_region_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2560 static ssize_t create_ram_region_store(struct device *dev,
2561 struct device_attribute *attr,
2562 const char *buf, size_t len)
2563 {
2564 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2565 struct cxl_region *cxlr;
2566 int rc, id;
2567
2568 rc = sscanf(buf, "region%d\n", &id);
2569 if (rc != 1)
2570 return -EINVAL;
2571
2572 cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id);
2573 if (IS_ERR(cxlr))
2574 return PTR_ERR(cxlr);
2575
2576 return len;
2577 }
2578 DEVICE_ATTR_RW(create_ram_region);
2579
region_show(struct device * dev,struct device_attribute * attr,char * buf)2580 static ssize_t region_show(struct device *dev, struct device_attribute *attr,
2581 char *buf)
2582 {
2583 struct cxl_decoder *cxld = to_cxl_decoder(dev);
2584 ssize_t rc;
2585
2586 rc = down_read_interruptible(&cxl_region_rwsem);
2587 if (rc)
2588 return rc;
2589
2590 if (cxld->region)
2591 rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
2592 else
2593 rc = sysfs_emit(buf, "\n");
2594 up_read(&cxl_region_rwsem);
2595
2596 return rc;
2597 }
2598 DEVICE_ATTR_RO(region);
2599
2600 static struct cxl_region *
cxl_find_region_by_name(struct cxl_root_decoder * cxlrd,const char * name)2601 cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name)
2602 {
2603 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
2604 struct device *region_dev;
2605
2606 region_dev = device_find_child_by_name(&cxld->dev, name);
2607 if (!region_dev)
2608 return ERR_PTR(-ENODEV);
2609
2610 return to_cxl_region(region_dev);
2611 }
2612
delete_region_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2613 static ssize_t delete_region_store(struct device *dev,
2614 struct device_attribute *attr,
2615 const char *buf, size_t len)
2616 {
2617 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
2618 struct cxl_port *port = to_cxl_port(dev->parent);
2619 struct cxl_region *cxlr;
2620
2621 cxlr = cxl_find_region_by_name(cxlrd, buf);
2622 if (IS_ERR(cxlr))
2623 return PTR_ERR(cxlr);
2624
2625 devm_release_action(port->uport_dev, unregister_region, cxlr);
2626 put_device(&cxlr->dev);
2627
2628 return len;
2629 }
2630 DEVICE_ATTR_WO(delete_region);
2631
cxl_pmem_region_release(struct device * dev)2632 static void cxl_pmem_region_release(struct device *dev)
2633 {
2634 struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
2635 int i;
2636
2637 for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
2638 struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
2639
2640 put_device(&cxlmd->dev);
2641 }
2642
2643 kfree(cxlr_pmem);
2644 }
2645
2646 static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
2647 &cxl_base_attribute_group,
2648 NULL,
2649 };
2650
2651 const struct device_type cxl_pmem_region_type = {
2652 .name = "cxl_pmem_region",
2653 .release = cxl_pmem_region_release,
2654 .groups = cxl_pmem_region_attribute_groups,
2655 };
2656
is_cxl_pmem_region(struct device * dev)2657 bool is_cxl_pmem_region(struct device *dev)
2658 {
2659 return dev->type == &cxl_pmem_region_type;
2660 }
2661 EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL);
2662
to_cxl_pmem_region(struct device * dev)2663 struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
2664 {
2665 if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
2666 "not a cxl_pmem_region device\n"))
2667 return NULL;
2668 return container_of(dev, struct cxl_pmem_region, dev);
2669 }
2670 EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
2671
2672 struct cxl_poison_context {
2673 struct cxl_port *port;
2674 enum cxl_decoder_mode mode;
2675 u64 offset;
2676 };
2677
cxl_get_poison_unmapped(struct cxl_memdev * cxlmd,struct cxl_poison_context * ctx)2678 static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd,
2679 struct cxl_poison_context *ctx)
2680 {
2681 struct cxl_dev_state *cxlds = cxlmd->cxlds;
2682 u64 offset, length;
2683 int rc = 0;
2684
2685 /*
2686 * Collect poison for the remaining unmapped resources
2687 * after poison is collected by committed endpoints.
2688 *
2689 * Knowing that PMEM must always follow RAM, get poison
2690 * for unmapped resources based on the last decoder's mode:
2691 * ram: scan remains of ram range, then any pmem range
2692 * pmem: scan remains of pmem range
2693 */
2694
2695 if (ctx->mode == CXL_DECODER_RAM) {
2696 offset = ctx->offset;
2697 length = resource_size(&cxlds->ram_res) - offset;
2698 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
2699 if (rc == -EFAULT)
2700 rc = 0;
2701 if (rc)
2702 return rc;
2703 }
2704 if (ctx->mode == CXL_DECODER_PMEM) {
2705 offset = ctx->offset;
2706 length = resource_size(&cxlds->dpa_res) - offset;
2707 if (!length)
2708 return 0;
2709 } else if (resource_size(&cxlds->pmem_res)) {
2710 offset = cxlds->pmem_res.start;
2711 length = resource_size(&cxlds->pmem_res);
2712 } else {
2713 return 0;
2714 }
2715
2716 return cxl_mem_get_poison(cxlmd, offset, length, NULL);
2717 }
2718
poison_by_decoder(struct device * dev,void * arg)2719 static int poison_by_decoder(struct device *dev, void *arg)
2720 {
2721 struct cxl_poison_context *ctx = arg;
2722 struct cxl_endpoint_decoder *cxled;
2723 struct cxl_memdev *cxlmd;
2724 u64 offset, length;
2725 int rc = 0;
2726
2727 if (!is_endpoint_decoder(dev))
2728 return rc;
2729
2730 cxled = to_cxl_endpoint_decoder(dev);
2731 if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
2732 return rc;
2733
2734 /*
2735 * Regions are only created with single mode decoders: pmem or ram.
2736 * Linux does not support mixed mode decoders. This means that
2737 * reading poison per endpoint decoder adheres to the requirement
2738 * that poison reads of pmem and ram must be separated.
2739 * CXL 3.0 Spec 8.2.9.8.4.1
2740 */
2741 if (cxled->mode == CXL_DECODER_MIXED) {
2742 dev_dbg(dev, "poison list read unsupported in mixed mode\n");
2743 return rc;
2744 }
2745
2746 cxlmd = cxled_to_memdev(cxled);
2747 if (cxled->skip) {
2748 offset = cxled->dpa_res->start - cxled->skip;
2749 length = cxled->skip;
2750 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
2751 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
2752 rc = 0;
2753 if (rc)
2754 return rc;
2755 }
2756
2757 offset = cxled->dpa_res->start;
2758 length = cxled->dpa_res->end - offset + 1;
2759 rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region);
2760 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
2761 rc = 0;
2762 if (rc)
2763 return rc;
2764
2765 /* Iterate until commit_end is reached */
2766 if (cxled->cxld.id == ctx->port->commit_end) {
2767 ctx->offset = cxled->dpa_res->end + 1;
2768 ctx->mode = cxled->mode;
2769 return 1;
2770 }
2771
2772 return 0;
2773 }
2774
cxl_get_poison_by_endpoint(struct cxl_port * port)2775 int cxl_get_poison_by_endpoint(struct cxl_port *port)
2776 {
2777 struct cxl_poison_context ctx;
2778 int rc = 0;
2779
2780 ctx = (struct cxl_poison_context) {
2781 .port = port
2782 };
2783
2784 rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder);
2785 if (rc == 1)
2786 rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev),
2787 &ctx);
2788
2789 return rc;
2790 }
2791
2792 struct cxl_dpa_to_region_context {
2793 struct cxl_region *cxlr;
2794 u64 dpa;
2795 };
2796
__cxl_dpa_to_region(struct device * dev,void * arg)2797 static int __cxl_dpa_to_region(struct device *dev, void *arg)
2798 {
2799 struct cxl_dpa_to_region_context *ctx = arg;
2800 struct cxl_endpoint_decoder *cxled;
2801 struct cxl_region *cxlr;
2802 u64 dpa = ctx->dpa;
2803
2804 if (!is_endpoint_decoder(dev))
2805 return 0;
2806
2807 cxled = to_cxl_endpoint_decoder(dev);
2808 if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res))
2809 return 0;
2810
2811 if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
2812 return 0;
2813
2814 /*
2815 * Stop the region search (return 1) when an endpoint mapping is
2816 * found. The region may not be fully constructed so offering
2817 * the cxlr in the context structure is not guaranteed.
2818 */
2819 cxlr = cxled->cxld.region;
2820 if (cxlr)
2821 dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
2822 dev_name(&cxlr->dev));
2823 else
2824 dev_dbg(dev, "dpa:0x%llx mapped in endpoint:%s\n", dpa,
2825 dev_name(dev));
2826
2827 ctx->cxlr = cxlr;
2828
2829 return 1;
2830 }
2831
cxl_dpa_to_region(const struct cxl_memdev * cxlmd,u64 dpa)2832 struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
2833 {
2834 struct cxl_dpa_to_region_context ctx;
2835 struct cxl_port *port;
2836
2837 ctx = (struct cxl_dpa_to_region_context) {
2838 .dpa = dpa,
2839 };
2840 port = cxlmd->endpoint;
2841 if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
2842 device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
2843
2844 return ctx.cxlr;
2845 }
2846
cxl_is_hpa_in_chunk(u64 hpa,struct cxl_region * cxlr,int pos)2847 static bool cxl_is_hpa_in_chunk(u64 hpa, struct cxl_region *cxlr, int pos)
2848 {
2849 struct cxl_region_params *p = &cxlr->params;
2850 int gran = p->interleave_granularity;
2851 int ways = p->interleave_ways;
2852 u64 offset;
2853
2854 /* Is the hpa in an expected chunk for its pos(-ition) */
2855 offset = hpa - p->res->start;
2856 offset = do_div(offset, gran * ways);
2857 if ((offset >= pos * gran) && (offset < (pos + 1) * gran))
2858 return true;
2859
2860 dev_dbg(&cxlr->dev,
2861 "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa);
2862
2863 return false;
2864 }
2865
cxl_dpa_to_hpa(struct cxl_region * cxlr,const struct cxl_memdev * cxlmd,u64 dpa)2866 u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
2867 u64 dpa)
2868 {
2869 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
2870 u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa;
2871 struct cxl_region_params *p = &cxlr->params;
2872 struct cxl_endpoint_decoder *cxled = NULL;
2873 u16 eig = 0;
2874 u8 eiw = 0;
2875 int pos;
2876
2877 for (int i = 0; i < p->nr_targets; i++) {
2878 cxled = p->targets[i];
2879 if (cxlmd == cxled_to_memdev(cxled))
2880 break;
2881 }
2882 if (!cxled || cxlmd != cxled_to_memdev(cxled))
2883 return ULLONG_MAX;
2884
2885 pos = cxled->pos;
2886 ways_to_eiw(p->interleave_ways, &eiw);
2887 granularity_to_eig(p->interleave_granularity, &eig);
2888
2889 /*
2890 * The device position in the region interleave set was removed
2891 * from the offset at HPA->DPA translation. To reconstruct the
2892 * HPA, place the 'pos' in the offset.
2893 *
2894 * The placement of 'pos' in the HPA is determined by interleave
2895 * ways and granularity and is defined in the CXL Spec 3.0 Section
2896 * 8.2.4.19.13 Implementation Note: Device Decode Logic
2897 */
2898
2899 /* Remove the dpa base */
2900 dpa_offset = dpa - cxl_dpa_resource_start(cxled);
2901
2902 mask_upper = GENMASK_ULL(51, eig + 8);
2903
2904 if (eiw < 8) {
2905 hpa_offset = (dpa_offset & mask_upper) << eiw;
2906 hpa_offset |= pos << (eig + 8);
2907 } else {
2908 bits_upper = (dpa_offset & mask_upper) >> (eig + 8);
2909 bits_upper = bits_upper * 3;
2910 hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8);
2911 }
2912
2913 /* The lower bits remain unchanged */
2914 hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
2915
2916 /* Apply the hpa_offset to the region base address */
2917 hpa = hpa_offset + p->res->start;
2918
2919 /* Root decoder translation overrides typical modulo decode */
2920 if (cxlrd->hpa_to_spa)
2921 hpa = cxlrd->hpa_to_spa(cxlrd, hpa);
2922
2923 if (hpa < p->res->start || hpa > p->res->end) {
2924 dev_dbg(&cxlr->dev,
2925 "Addr trans fail: hpa 0x%llx not in region\n", hpa);
2926 return ULLONG_MAX;
2927 }
2928
2929 /* Simple chunk check, by pos & gran, only applies to modulo decodes */
2930 if (!cxlrd->hpa_to_spa && (!cxl_is_hpa_in_chunk(hpa, cxlr, pos)))
2931 return ULLONG_MAX;
2932
2933 return hpa;
2934 }
2935
2936 static struct lock_class_key cxl_pmem_region_key;
2937
cxl_pmem_region_alloc(struct cxl_region * cxlr)2938 static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
2939 {
2940 struct cxl_region_params *p = &cxlr->params;
2941 struct cxl_nvdimm_bridge *cxl_nvb;
2942 struct device *dev;
2943 int i;
2944
2945 guard(rwsem_read)(&cxl_region_rwsem);
2946 if (p->state != CXL_CONFIG_COMMIT)
2947 return -ENXIO;
2948
2949 struct cxl_pmem_region *cxlr_pmem __free(kfree) =
2950 kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), GFP_KERNEL);
2951 if (!cxlr_pmem)
2952 return -ENOMEM;
2953
2954 cxlr_pmem->hpa_range.start = p->res->start;
2955 cxlr_pmem->hpa_range.end = p->res->end;
2956
2957 /* Snapshot the region configuration underneath the cxl_region_rwsem */
2958 cxlr_pmem->nr_mappings = p->nr_targets;
2959 for (i = 0; i < p->nr_targets; i++) {
2960 struct cxl_endpoint_decoder *cxled = p->targets[i];
2961 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
2962 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
2963
2964 /*
2965 * Regions never span CXL root devices, so by definition the
2966 * bridge for one device is the same for all.
2967 */
2968 if (i == 0) {
2969 cxl_nvb = cxl_find_nvdimm_bridge(cxlmd->endpoint);
2970 if (!cxl_nvb)
2971 return -ENODEV;
2972 cxlr->cxl_nvb = cxl_nvb;
2973 }
2974 m->cxlmd = cxlmd;
2975 get_device(&cxlmd->dev);
2976 m->start = cxled->dpa_res->start;
2977 m->size = resource_size(cxled->dpa_res);
2978 m->position = i;
2979 }
2980
2981 dev = &cxlr_pmem->dev;
2982 device_initialize(dev);
2983 lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
2984 device_set_pm_not_required(dev);
2985 dev->parent = &cxlr->dev;
2986 dev->bus = &cxl_bus_type;
2987 dev->type = &cxl_pmem_region_type;
2988 cxlr_pmem->cxlr = cxlr;
2989 cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem);
2990
2991 return 0;
2992 }
2993
cxl_dax_region_release(struct device * dev)2994 static void cxl_dax_region_release(struct device *dev)
2995 {
2996 struct cxl_dax_region *cxlr_dax = to_cxl_dax_region(dev);
2997
2998 kfree(cxlr_dax);
2999 }
3000
3001 static const struct attribute_group *cxl_dax_region_attribute_groups[] = {
3002 &cxl_base_attribute_group,
3003 NULL,
3004 };
3005
3006 const struct device_type cxl_dax_region_type = {
3007 .name = "cxl_dax_region",
3008 .release = cxl_dax_region_release,
3009 .groups = cxl_dax_region_attribute_groups,
3010 };
3011
is_cxl_dax_region(struct device * dev)3012 static bool is_cxl_dax_region(struct device *dev)
3013 {
3014 return dev->type == &cxl_dax_region_type;
3015 }
3016
to_cxl_dax_region(struct device * dev)3017 struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
3018 {
3019 if (dev_WARN_ONCE(dev, !is_cxl_dax_region(dev),
3020 "not a cxl_dax_region device\n"))
3021 return NULL;
3022 return container_of(dev, struct cxl_dax_region, dev);
3023 }
3024 EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, CXL);
3025
3026 static struct lock_class_key cxl_dax_region_key;
3027
cxl_dax_region_alloc(struct cxl_region * cxlr)3028 static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
3029 {
3030 struct cxl_region_params *p = &cxlr->params;
3031 struct cxl_dax_region *cxlr_dax;
3032 struct device *dev;
3033
3034 down_read(&cxl_region_rwsem);
3035 if (p->state != CXL_CONFIG_COMMIT) {
3036 cxlr_dax = ERR_PTR(-ENXIO);
3037 goto out;
3038 }
3039
3040 cxlr_dax = kzalloc(sizeof(*cxlr_dax), GFP_KERNEL);
3041 if (!cxlr_dax) {
3042 cxlr_dax = ERR_PTR(-ENOMEM);
3043 goto out;
3044 }
3045
3046 cxlr_dax->hpa_range.start = p->res->start;
3047 cxlr_dax->hpa_range.end = p->res->end;
3048
3049 dev = &cxlr_dax->dev;
3050 cxlr_dax->cxlr = cxlr;
3051 device_initialize(dev);
3052 lockdep_set_class(&dev->mutex, &cxl_dax_region_key);
3053 device_set_pm_not_required(dev);
3054 dev->parent = &cxlr->dev;
3055 dev->bus = &cxl_bus_type;
3056 dev->type = &cxl_dax_region_type;
3057 out:
3058 up_read(&cxl_region_rwsem);
3059
3060 return cxlr_dax;
3061 }
3062
cxlr_pmem_unregister(void * _cxlr_pmem)3063 static void cxlr_pmem_unregister(void *_cxlr_pmem)
3064 {
3065 struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem;
3066 struct cxl_region *cxlr = cxlr_pmem->cxlr;
3067 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
3068
3069 /*
3070 * Either the bridge is in ->remove() context under the device_lock(),
3071 * or cxlr_release_nvdimm() is cancelling the bridge's release action
3072 * for @cxlr_pmem and doing it itself (while manually holding the bridge
3073 * lock).
3074 */
3075 device_lock_assert(&cxl_nvb->dev);
3076 cxlr->cxlr_pmem = NULL;
3077 cxlr_pmem->cxlr = NULL;
3078 device_unregister(&cxlr_pmem->dev);
3079 }
3080
cxlr_release_nvdimm(void * _cxlr)3081 static void cxlr_release_nvdimm(void *_cxlr)
3082 {
3083 struct cxl_region *cxlr = _cxlr;
3084 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
3085
3086 scoped_guard(device, &cxl_nvb->dev) {
3087 if (cxlr->cxlr_pmem)
3088 devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
3089 cxlr->cxlr_pmem);
3090 }
3091 cxlr->cxl_nvb = NULL;
3092 put_device(&cxl_nvb->dev);
3093 }
3094
3095 /**
3096 * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
3097 * @cxlr: parent CXL region for this pmem region bridge device
3098 *
3099 * Return: 0 on success negative error code on failure.
3100 */
devm_cxl_add_pmem_region(struct cxl_region * cxlr)3101 static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
3102 {
3103 struct cxl_pmem_region *cxlr_pmem;
3104 struct cxl_nvdimm_bridge *cxl_nvb;
3105 struct device *dev;
3106 int rc;
3107
3108 rc = cxl_pmem_region_alloc(cxlr);
3109 if (rc)
3110 return rc;
3111 cxlr_pmem = cxlr->cxlr_pmem;
3112 cxl_nvb = cxlr->cxl_nvb;
3113
3114 dev = &cxlr_pmem->dev;
3115 rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
3116 if (rc)
3117 goto err;
3118
3119 rc = device_add(dev);
3120 if (rc)
3121 goto err;
3122
3123 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
3124 dev_name(dev));
3125
3126 scoped_guard(device, &cxl_nvb->dev) {
3127 if (cxl_nvb->dev.driver)
3128 rc = devm_add_action_or_reset(&cxl_nvb->dev,
3129 cxlr_pmem_unregister,
3130 cxlr_pmem);
3131 else
3132 rc = -ENXIO;
3133 }
3134
3135 if (rc)
3136 goto err_bridge;
3137
3138 /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */
3139 return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr);
3140
3141 err:
3142 put_device(dev);
3143 err_bridge:
3144 put_device(&cxl_nvb->dev);
3145 cxlr->cxl_nvb = NULL;
3146 return rc;
3147 }
3148
cxlr_dax_unregister(void * _cxlr_dax)3149 static void cxlr_dax_unregister(void *_cxlr_dax)
3150 {
3151 struct cxl_dax_region *cxlr_dax = _cxlr_dax;
3152
3153 device_unregister(&cxlr_dax->dev);
3154 }
3155
devm_cxl_add_dax_region(struct cxl_region * cxlr)3156 static int devm_cxl_add_dax_region(struct cxl_region *cxlr)
3157 {
3158 struct cxl_dax_region *cxlr_dax;
3159 struct device *dev;
3160 int rc;
3161
3162 cxlr_dax = cxl_dax_region_alloc(cxlr);
3163 if (IS_ERR(cxlr_dax))
3164 return PTR_ERR(cxlr_dax);
3165
3166 dev = &cxlr_dax->dev;
3167 rc = dev_set_name(dev, "dax_region%d", cxlr->id);
3168 if (rc)
3169 goto err;
3170
3171 rc = device_add(dev);
3172 if (rc)
3173 goto err;
3174
3175 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
3176 dev_name(dev));
3177
3178 return devm_add_action_or_reset(&cxlr->dev, cxlr_dax_unregister,
3179 cxlr_dax);
3180 err:
3181 put_device(dev);
3182 return rc;
3183 }
3184
match_root_decoder_by_range(struct device * dev,void * data)3185 static int match_root_decoder_by_range(struct device *dev, void *data)
3186 {
3187 struct range *r1, *r2 = data;
3188 struct cxl_root_decoder *cxlrd;
3189
3190 if (!is_root_decoder(dev))
3191 return 0;
3192
3193 cxlrd = to_cxl_root_decoder(dev);
3194 r1 = &cxlrd->cxlsd.cxld.hpa_range;
3195 return range_contains(r1, r2);
3196 }
3197
match_region_by_range(struct device * dev,void * data)3198 static int match_region_by_range(struct device *dev, void *data)
3199 {
3200 struct cxl_region_params *p;
3201 struct cxl_region *cxlr;
3202 struct range *r = data;
3203 int rc = 0;
3204
3205 if (!is_cxl_region(dev))
3206 return 0;
3207
3208 cxlr = to_cxl_region(dev);
3209 p = &cxlr->params;
3210
3211 down_read(&cxl_region_rwsem);
3212 if (p->res && p->res->start == r->start && p->res->end == r->end)
3213 rc = 1;
3214 up_read(&cxl_region_rwsem);
3215
3216 return rc;
3217 }
3218
3219 /* Establish an empty region covering the given HPA range */
construct_region(struct cxl_root_decoder * cxlrd,struct cxl_endpoint_decoder * cxled)3220 static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
3221 struct cxl_endpoint_decoder *cxled)
3222 {
3223 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
3224 struct cxl_port *port = cxlrd_to_port(cxlrd);
3225 struct range *hpa = &cxled->cxld.hpa_range;
3226 struct cxl_region_params *p;
3227 struct cxl_region *cxlr;
3228 struct resource *res;
3229 int rc;
3230
3231 do {
3232 cxlr = __create_region(cxlrd, cxled->mode,
3233 atomic_read(&cxlrd->region_id));
3234 } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
3235
3236 if (IS_ERR(cxlr)) {
3237 dev_err(cxlmd->dev.parent,
3238 "%s:%s: %s failed assign region: %ld\n",
3239 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
3240 __func__, PTR_ERR(cxlr));
3241 return cxlr;
3242 }
3243
3244 down_write(&cxl_region_rwsem);
3245 p = &cxlr->params;
3246 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
3247 dev_err(cxlmd->dev.parent,
3248 "%s:%s: %s autodiscovery interrupted\n",
3249 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
3250 __func__);
3251 rc = -EBUSY;
3252 goto err;
3253 }
3254
3255 set_bit(CXL_REGION_F_AUTO, &cxlr->flags);
3256
3257 res = kmalloc(sizeof(*res), GFP_KERNEL);
3258 if (!res) {
3259 rc = -ENOMEM;
3260 goto err;
3261 }
3262
3263 *res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa),
3264 dev_name(&cxlr->dev));
3265 rc = insert_resource(cxlrd->res, res);
3266 if (rc) {
3267 /*
3268 * Platform-firmware may not have split resources like "System
3269 * RAM" on CXL window boundaries see cxl_region_iomem_release()
3270 */
3271 dev_warn(cxlmd->dev.parent,
3272 "%s:%s: %s %s cannot insert resource\n",
3273 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
3274 __func__, dev_name(&cxlr->dev));
3275 }
3276
3277 p->res = res;
3278 p->interleave_ways = cxled->cxld.interleave_ways;
3279 p->interleave_granularity = cxled->cxld.interleave_granularity;
3280 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
3281
3282 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
3283 if (rc)
3284 goto err;
3285
3286 dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n",
3287 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__,
3288 dev_name(&cxlr->dev), p->res, p->interleave_ways,
3289 p->interleave_granularity);
3290
3291 /* ...to match put_device() in cxl_add_to_region() */
3292 get_device(&cxlr->dev);
3293 up_write(&cxl_region_rwsem);
3294
3295 return cxlr;
3296
3297 err:
3298 up_write(&cxl_region_rwsem);
3299 devm_release_action(port->uport_dev, unregister_region, cxlr);
3300 return ERR_PTR(rc);
3301 }
3302
cxl_add_to_region(struct cxl_port * root,struct cxl_endpoint_decoder * cxled)3303 int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
3304 {
3305 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
3306 struct range *hpa = &cxled->cxld.hpa_range;
3307 struct cxl_decoder *cxld = &cxled->cxld;
3308 struct device *cxlrd_dev, *region_dev;
3309 struct cxl_root_decoder *cxlrd;
3310 struct cxl_region_params *p;
3311 struct cxl_region *cxlr;
3312 bool attach = false;
3313 int rc;
3314
3315 cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
3316 match_root_decoder_by_range);
3317 if (!cxlrd_dev) {
3318 dev_err(cxlmd->dev.parent,
3319 "%s:%s no CXL window for range %#llx:%#llx\n",
3320 dev_name(&cxlmd->dev), dev_name(&cxld->dev),
3321 cxld->hpa_range.start, cxld->hpa_range.end);
3322 return -ENXIO;
3323 }
3324
3325 cxlrd = to_cxl_root_decoder(cxlrd_dev);
3326
3327 /*
3328 * Ensure that if multiple threads race to construct_region() for @hpa
3329 * one does the construction and the others add to that.
3330 */
3331 mutex_lock(&cxlrd->range_lock);
3332 region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
3333 match_region_by_range);
3334 if (!region_dev) {
3335 cxlr = construct_region(cxlrd, cxled);
3336 region_dev = &cxlr->dev;
3337 } else
3338 cxlr = to_cxl_region(region_dev);
3339 mutex_unlock(&cxlrd->range_lock);
3340
3341 rc = PTR_ERR_OR_ZERO(cxlr);
3342 if (rc)
3343 goto out;
3344
3345 attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
3346
3347 down_read(&cxl_region_rwsem);
3348 p = &cxlr->params;
3349 attach = p->state == CXL_CONFIG_COMMIT;
3350 up_read(&cxl_region_rwsem);
3351
3352 if (attach) {
3353 /*
3354 * If device_attach() fails the range may still be active via
3355 * the platform-firmware memory map, otherwise the driver for
3356 * regions is local to this file, so driver matching can't fail.
3357 */
3358 if (device_attach(&cxlr->dev) < 0)
3359 dev_err(&cxlr->dev, "failed to enable, range: %pr\n",
3360 p->res);
3361 }
3362
3363 put_device(region_dev);
3364 out:
3365 put_device(cxlrd_dev);
3366 return rc;
3367 }
3368 EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL);
3369
is_system_ram(struct resource * res,void * arg)3370 static int is_system_ram(struct resource *res, void *arg)
3371 {
3372 struct cxl_region *cxlr = arg;
3373 struct cxl_region_params *p = &cxlr->params;
3374
3375 dev_dbg(&cxlr->dev, "%pr has System RAM: %pr\n", p->res, res);
3376 return 1;
3377 }
3378
shutdown_notifiers(void * _cxlr)3379 static void shutdown_notifiers(void *_cxlr)
3380 {
3381 struct cxl_region *cxlr = _cxlr;
3382
3383 unregister_memory_notifier(&cxlr->memory_notifier);
3384 unregister_mt_adistance_algorithm(&cxlr->adist_notifier);
3385 }
3386
cxl_region_probe(struct device * dev)3387 static int cxl_region_probe(struct device *dev)
3388 {
3389 struct cxl_region *cxlr = to_cxl_region(dev);
3390 struct cxl_region_params *p = &cxlr->params;
3391 int rc;
3392
3393 rc = down_read_interruptible(&cxl_region_rwsem);
3394 if (rc) {
3395 dev_dbg(&cxlr->dev, "probe interrupted\n");
3396 return rc;
3397 }
3398
3399 if (p->state < CXL_CONFIG_COMMIT) {
3400 dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
3401 rc = -ENXIO;
3402 goto out;
3403 }
3404
3405 if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {
3406 dev_err(&cxlr->dev,
3407 "failed to activate, re-commit region and retry\n");
3408 rc = -ENXIO;
3409 goto out;
3410 }
3411
3412 /*
3413 * From this point on any path that changes the region's state away from
3414 * CXL_CONFIG_COMMIT is also responsible for releasing the driver.
3415 */
3416 out:
3417 up_read(&cxl_region_rwsem);
3418
3419 if (rc)
3420 return rc;
3421
3422 cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback;
3423 cxlr->memory_notifier.priority = CXL_CALLBACK_PRI;
3424 register_memory_notifier(&cxlr->memory_notifier);
3425
3426 cxlr->adist_notifier.notifier_call = cxl_region_calculate_adistance;
3427 cxlr->adist_notifier.priority = 100;
3428 register_mt_adistance_algorithm(&cxlr->adist_notifier);
3429
3430 rc = devm_add_action_or_reset(&cxlr->dev, shutdown_notifiers, cxlr);
3431 if (rc)
3432 return rc;
3433
3434 switch (cxlr->mode) {
3435 case CXL_DECODER_PMEM:
3436 return devm_cxl_add_pmem_region(cxlr);
3437 case CXL_DECODER_RAM:
3438 /*
3439 * The region can not be manged by CXL if any portion of
3440 * it is already online as 'System RAM'
3441 */
3442 if (walk_iomem_res_desc(IORES_DESC_NONE,
3443 IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
3444 p->res->start, p->res->end, cxlr,
3445 is_system_ram) > 0)
3446 return 0;
3447 return devm_cxl_add_dax_region(cxlr);
3448 default:
3449 dev_dbg(&cxlr->dev, "unsupported region mode: %d\n",
3450 cxlr->mode);
3451 return -ENXIO;
3452 }
3453 }
3454
3455 static struct cxl_driver cxl_region_driver = {
3456 .name = "cxl_region",
3457 .probe = cxl_region_probe,
3458 .id = CXL_DEVICE_REGION,
3459 };
3460
cxl_region_init(void)3461 int cxl_region_init(void)
3462 {
3463 return cxl_driver_register(&cxl_region_driver);
3464 }
3465
cxl_region_exit(void)3466 void cxl_region_exit(void)
3467 {
3468 cxl_driver_unregister(&cxl_region_driver);
3469 }
3470
3471 MODULE_IMPORT_NS(CXL);
3472 MODULE_IMPORT_NS(DEVMEM);
3473 MODULE_ALIAS_CXL(CXL_DEVICE_REGION);
3474