1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/platform_device.h>
4 #include <linux/memregion.h>
5 #include <linux/workqueue.h>
6 #include <linux/einj-cxl.h>
7 #include <linux/debugfs.h>
8 #include <linux/device.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/slab.h>
12 #include <linux/idr.h>
13 #include <linux/node.h>
14 #include <cxlmem.h>
15 #include <cxlpci.h>
16 #include <cxl.h>
17 #include "core.h"
18
19 /**
20 * DOC: cxl core
21 *
22 * The CXL core provides a set of interfaces that can be consumed by CXL aware
23 * drivers. The interfaces allow for creation, modification, and destruction of
24 * regions, memory devices, ports, and decoders. CXL aware drivers must register
25 * with the CXL core via these interfaces in order to be able to participate in
26 * cross-device interleave coordination. The CXL core also establishes and
27 * maintains the bridge to the nvdimm subsystem.
28 *
29 * CXL core introduces sysfs hierarchy to control the devices that are
30 * instantiated by the core.
31 */
32
33 /*
34 * All changes to the interleave configuration occur with this lock held
35 * for write.
36 */
37 DECLARE_RWSEM(cxl_region_rwsem);
38
39 static DEFINE_IDA(cxl_port_ida);
40 static DEFINE_XARRAY(cxl_root_buses);
41
cxl_num_decoders_committed(struct cxl_port * port)42 int cxl_num_decoders_committed(struct cxl_port *port)
43 {
44 lockdep_assert_held(&cxl_region_rwsem);
45
46 return port->commit_end + 1;
47 }
48
devtype_show(struct device * dev,struct device_attribute * attr,char * buf)49 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
50 char *buf)
51 {
52 return sysfs_emit(buf, "%s\n", dev->type->name);
53 }
54 static DEVICE_ATTR_RO(devtype);
55
cxl_device_id(const struct device * dev)56 static int cxl_device_id(const struct device *dev)
57 {
58 if (dev->type == &cxl_nvdimm_bridge_type)
59 return CXL_DEVICE_NVDIMM_BRIDGE;
60 if (dev->type == &cxl_nvdimm_type)
61 return CXL_DEVICE_NVDIMM;
62 if (dev->type == CXL_PMEM_REGION_TYPE())
63 return CXL_DEVICE_PMEM_REGION;
64 if (dev->type == CXL_DAX_REGION_TYPE())
65 return CXL_DEVICE_DAX_REGION;
66 if (is_cxl_port(dev)) {
67 if (is_cxl_root(to_cxl_port(dev)))
68 return CXL_DEVICE_ROOT;
69 return CXL_DEVICE_PORT;
70 }
71 if (is_cxl_memdev(dev))
72 return CXL_DEVICE_MEMORY_EXPANDER;
73 if (dev->type == CXL_REGION_TYPE())
74 return CXL_DEVICE_REGION;
75 if (dev->type == &cxl_pmu_type)
76 return CXL_DEVICE_PMU;
77 return 0;
78 }
79
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)80 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
81 char *buf)
82 {
83 return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev));
84 }
85 static DEVICE_ATTR_RO(modalias);
86
87 static struct attribute *cxl_base_attributes[] = {
88 &dev_attr_devtype.attr,
89 &dev_attr_modalias.attr,
90 NULL,
91 };
92
93 struct attribute_group cxl_base_attribute_group = {
94 .attrs = cxl_base_attributes,
95 };
96
start_show(struct device * dev,struct device_attribute * attr,char * buf)97 static ssize_t start_show(struct device *dev, struct device_attribute *attr,
98 char *buf)
99 {
100 struct cxl_decoder *cxld = to_cxl_decoder(dev);
101
102 return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start);
103 }
104 static DEVICE_ATTR_ADMIN_RO(start);
105
size_show(struct device * dev,struct device_attribute * attr,char * buf)106 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
107 char *buf)
108 {
109 struct cxl_decoder *cxld = to_cxl_decoder(dev);
110
111 return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range));
112 }
113 static DEVICE_ATTR_RO(size);
114
115 #define CXL_DECODER_FLAG_ATTR(name, flag) \
116 static ssize_t name##_show(struct device *dev, \
117 struct device_attribute *attr, char *buf) \
118 { \
119 struct cxl_decoder *cxld = to_cxl_decoder(dev); \
120 \
121 return sysfs_emit(buf, "%s\n", \
122 (cxld->flags & (flag)) ? "1" : "0"); \
123 } \
124 static DEVICE_ATTR_RO(name)
125
126 CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
127 CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
128 CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
129 CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
130 CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
131
target_type_show(struct device * dev,struct device_attribute * attr,char * buf)132 static ssize_t target_type_show(struct device *dev,
133 struct device_attribute *attr, char *buf)
134 {
135 struct cxl_decoder *cxld = to_cxl_decoder(dev);
136
137 switch (cxld->target_type) {
138 case CXL_DECODER_DEVMEM:
139 return sysfs_emit(buf, "accelerator\n");
140 case CXL_DECODER_HOSTONLYMEM:
141 return sysfs_emit(buf, "expander\n");
142 }
143 return -ENXIO;
144 }
145 static DEVICE_ATTR_RO(target_type);
146
emit_target_list(struct cxl_switch_decoder * cxlsd,char * buf)147 static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf)
148 {
149 struct cxl_decoder *cxld = &cxlsd->cxld;
150 ssize_t offset = 0;
151 int i, rc = 0;
152
153 for (i = 0; i < cxld->interleave_ways; i++) {
154 struct cxl_dport *dport = cxlsd->target[i];
155 struct cxl_dport *next = NULL;
156
157 if (!dport)
158 break;
159
160 if (i + 1 < cxld->interleave_ways)
161 next = cxlsd->target[i + 1];
162 rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
163 next ? "," : "");
164 if (rc < 0)
165 return rc;
166 offset += rc;
167 }
168
169 return offset;
170 }
171
target_list_show(struct device * dev,struct device_attribute * attr,char * buf)172 static ssize_t target_list_show(struct device *dev,
173 struct device_attribute *attr, char *buf)
174 {
175 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
176 ssize_t offset;
177 int rc;
178
179 guard(rwsem_read)(&cxl_region_rwsem);
180 rc = emit_target_list(cxlsd, buf);
181 if (rc < 0)
182 return rc;
183 offset = rc;
184
185 rc = sysfs_emit_at(buf, offset, "\n");
186 if (rc < 0)
187 return rc;
188
189 return offset + rc;
190 }
191 static DEVICE_ATTR_RO(target_list);
192
mode_show(struct device * dev,struct device_attribute * attr,char * buf)193 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
194 char *buf)
195 {
196 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
197
198 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode));
199 }
200
mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)201 static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
202 const char *buf, size_t len)
203 {
204 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
205 enum cxl_decoder_mode mode;
206 ssize_t rc;
207
208 if (sysfs_streq(buf, "pmem"))
209 mode = CXL_DECODER_PMEM;
210 else if (sysfs_streq(buf, "ram"))
211 mode = CXL_DECODER_RAM;
212 else
213 return -EINVAL;
214
215 rc = cxl_dpa_set_mode(cxled, mode);
216 if (rc)
217 return rc;
218
219 return len;
220 }
221 static DEVICE_ATTR_RW(mode);
222
dpa_resource_show(struct device * dev,struct device_attribute * attr,char * buf)223 static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr,
224 char *buf)
225 {
226 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
227
228 guard(rwsem_read)(&cxl_dpa_rwsem);
229 return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled));
230 }
231 static DEVICE_ATTR_RO(dpa_resource);
232
dpa_size_show(struct device * dev,struct device_attribute * attr,char * buf)233 static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr,
234 char *buf)
235 {
236 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
237 resource_size_t size = cxl_dpa_size(cxled);
238
239 return sysfs_emit(buf, "%pa\n", &size);
240 }
241
dpa_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)242 static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr,
243 const char *buf, size_t len)
244 {
245 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
246 unsigned long long size;
247 ssize_t rc;
248
249 rc = kstrtoull(buf, 0, &size);
250 if (rc)
251 return rc;
252
253 if (!IS_ALIGNED(size, SZ_256M))
254 return -EINVAL;
255
256 rc = cxl_dpa_free(cxled);
257 if (rc)
258 return rc;
259
260 if (size == 0)
261 return len;
262
263 rc = cxl_dpa_alloc(cxled, size);
264 if (rc)
265 return rc;
266
267 return len;
268 }
269 static DEVICE_ATTR_RW(dpa_size);
270
interleave_granularity_show(struct device * dev,struct device_attribute * attr,char * buf)271 static ssize_t interleave_granularity_show(struct device *dev,
272 struct device_attribute *attr,
273 char *buf)
274 {
275 struct cxl_decoder *cxld = to_cxl_decoder(dev);
276
277 return sysfs_emit(buf, "%d\n", cxld->interleave_granularity);
278 }
279
280 static DEVICE_ATTR_RO(interleave_granularity);
281
interleave_ways_show(struct device * dev,struct device_attribute * attr,char * buf)282 static ssize_t interleave_ways_show(struct device *dev,
283 struct device_attribute *attr, char *buf)
284 {
285 struct cxl_decoder *cxld = to_cxl_decoder(dev);
286
287 return sysfs_emit(buf, "%d\n", cxld->interleave_ways);
288 }
289
290 static DEVICE_ATTR_RO(interleave_ways);
291
qos_class_show(struct device * dev,struct device_attribute * attr,char * buf)292 static ssize_t qos_class_show(struct device *dev,
293 struct device_attribute *attr, char *buf)
294 {
295 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
296
297 return sysfs_emit(buf, "%d\n", cxlrd->qos_class);
298 }
299 static DEVICE_ATTR_RO(qos_class);
300
301 static struct attribute *cxl_decoder_base_attrs[] = {
302 &dev_attr_start.attr,
303 &dev_attr_size.attr,
304 &dev_attr_locked.attr,
305 &dev_attr_interleave_granularity.attr,
306 &dev_attr_interleave_ways.attr,
307 NULL,
308 };
309
310 static struct attribute_group cxl_decoder_base_attribute_group = {
311 .attrs = cxl_decoder_base_attrs,
312 };
313
314 static struct attribute *cxl_decoder_root_attrs[] = {
315 &dev_attr_cap_pmem.attr,
316 &dev_attr_cap_ram.attr,
317 &dev_attr_cap_type2.attr,
318 &dev_attr_cap_type3.attr,
319 &dev_attr_target_list.attr,
320 &dev_attr_qos_class.attr,
321 SET_CXL_REGION_ATTR(create_pmem_region)
322 SET_CXL_REGION_ATTR(create_ram_region)
323 SET_CXL_REGION_ATTR(delete_region)
324 NULL,
325 };
326
can_create_pmem(struct cxl_root_decoder * cxlrd)327 static bool can_create_pmem(struct cxl_root_decoder *cxlrd)
328 {
329 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM;
330
331 return (cxlrd->cxlsd.cxld.flags & flags) == flags;
332 }
333
can_create_ram(struct cxl_root_decoder * cxlrd)334 static bool can_create_ram(struct cxl_root_decoder *cxlrd)
335 {
336 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_RAM;
337
338 return (cxlrd->cxlsd.cxld.flags & flags) == flags;
339 }
340
cxl_root_decoder_visible(struct kobject * kobj,struct attribute * a,int n)341 static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n)
342 {
343 struct device *dev = kobj_to_dev(kobj);
344 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
345
346 if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd))
347 return 0;
348
349 if (a == CXL_REGION_ATTR(create_ram_region) && !can_create_ram(cxlrd))
350 return 0;
351
352 if (a == CXL_REGION_ATTR(delete_region) &&
353 !(can_create_pmem(cxlrd) || can_create_ram(cxlrd)))
354 return 0;
355
356 return a->mode;
357 }
358
359 static struct attribute_group cxl_decoder_root_attribute_group = {
360 .attrs = cxl_decoder_root_attrs,
361 .is_visible = cxl_root_decoder_visible,
362 };
363
364 static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
365 &cxl_decoder_root_attribute_group,
366 &cxl_decoder_base_attribute_group,
367 &cxl_base_attribute_group,
368 NULL,
369 };
370
371 static struct attribute *cxl_decoder_switch_attrs[] = {
372 &dev_attr_target_type.attr,
373 &dev_attr_target_list.attr,
374 SET_CXL_REGION_ATTR(region)
375 NULL,
376 };
377
378 static struct attribute_group cxl_decoder_switch_attribute_group = {
379 .attrs = cxl_decoder_switch_attrs,
380 };
381
382 static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
383 &cxl_decoder_switch_attribute_group,
384 &cxl_decoder_base_attribute_group,
385 &cxl_base_attribute_group,
386 NULL,
387 };
388
389 static struct attribute *cxl_decoder_endpoint_attrs[] = {
390 &dev_attr_target_type.attr,
391 &dev_attr_mode.attr,
392 &dev_attr_dpa_size.attr,
393 &dev_attr_dpa_resource.attr,
394 SET_CXL_REGION_ATTR(region)
395 NULL,
396 };
397
398 static struct attribute_group cxl_decoder_endpoint_attribute_group = {
399 .attrs = cxl_decoder_endpoint_attrs,
400 };
401
402 static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = {
403 &cxl_decoder_base_attribute_group,
404 &cxl_decoder_endpoint_attribute_group,
405 &cxl_base_attribute_group,
406 NULL,
407 };
408
__cxl_decoder_release(struct cxl_decoder * cxld)409 static void __cxl_decoder_release(struct cxl_decoder *cxld)
410 {
411 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
412
413 ida_free(&port->decoder_ida, cxld->id);
414 put_device(&port->dev);
415 }
416
cxl_endpoint_decoder_release(struct device * dev)417 static void cxl_endpoint_decoder_release(struct device *dev)
418 {
419 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
420
421 __cxl_decoder_release(&cxled->cxld);
422 kfree(cxled);
423 }
424
cxl_switch_decoder_release(struct device * dev)425 static void cxl_switch_decoder_release(struct device *dev)
426 {
427 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
428
429 __cxl_decoder_release(&cxlsd->cxld);
430 kfree(cxlsd);
431 }
432
to_cxl_root_decoder(struct device * dev)433 struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev)
434 {
435 if (dev_WARN_ONCE(dev, !is_root_decoder(dev),
436 "not a cxl_root_decoder device\n"))
437 return NULL;
438 return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev);
439 }
440 EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL);
441
cxl_root_decoder_release(struct device * dev)442 static void cxl_root_decoder_release(struct device *dev)
443 {
444 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
445
446 if (atomic_read(&cxlrd->region_id) >= 0)
447 memregion_free(atomic_read(&cxlrd->region_id));
448 __cxl_decoder_release(&cxlrd->cxlsd.cxld);
449 kfree(cxlrd);
450 }
451
452 static const struct device_type cxl_decoder_endpoint_type = {
453 .name = "cxl_decoder_endpoint",
454 .release = cxl_endpoint_decoder_release,
455 .groups = cxl_decoder_endpoint_attribute_groups,
456 };
457
458 static const struct device_type cxl_decoder_switch_type = {
459 .name = "cxl_decoder_switch",
460 .release = cxl_switch_decoder_release,
461 .groups = cxl_decoder_switch_attribute_groups,
462 };
463
464 static const struct device_type cxl_decoder_root_type = {
465 .name = "cxl_decoder_root",
466 .release = cxl_root_decoder_release,
467 .groups = cxl_decoder_root_attribute_groups,
468 };
469
is_endpoint_decoder(struct device * dev)470 bool is_endpoint_decoder(struct device *dev)
471 {
472 return dev->type == &cxl_decoder_endpoint_type;
473 }
474 EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, CXL);
475
is_root_decoder(struct device * dev)476 bool is_root_decoder(struct device *dev)
477 {
478 return dev->type == &cxl_decoder_root_type;
479 }
480 EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
481
is_switch_decoder(struct device * dev)482 bool is_switch_decoder(struct device *dev)
483 {
484 return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
485 }
486 EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL);
487
to_cxl_decoder(struct device * dev)488 struct cxl_decoder *to_cxl_decoder(struct device *dev)
489 {
490 if (dev_WARN_ONCE(dev,
491 !is_switch_decoder(dev) && !is_endpoint_decoder(dev),
492 "not a cxl_decoder device\n"))
493 return NULL;
494 return container_of(dev, struct cxl_decoder, dev);
495 }
496 EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
497
to_cxl_endpoint_decoder(struct device * dev)498 struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
499 {
500 if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev),
501 "not a cxl_endpoint_decoder device\n"))
502 return NULL;
503 return container_of(dev, struct cxl_endpoint_decoder, cxld.dev);
504 }
505 EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL);
506
to_cxl_switch_decoder(struct device * dev)507 struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
508 {
509 if (dev_WARN_ONCE(dev, !is_switch_decoder(dev),
510 "not a cxl_switch_decoder device\n"))
511 return NULL;
512 return container_of(dev, struct cxl_switch_decoder, cxld.dev);
513 }
514 EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL);
515
cxl_ep_release(struct cxl_ep * ep)516 static void cxl_ep_release(struct cxl_ep *ep)
517 {
518 put_device(ep->ep);
519 kfree(ep);
520 }
521
cxl_ep_remove(struct cxl_port * port,struct cxl_ep * ep)522 static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep)
523 {
524 if (!ep)
525 return;
526 xa_erase(&port->endpoints, (unsigned long) ep->ep);
527 cxl_ep_release(ep);
528 }
529
cxl_port_release(struct device * dev)530 static void cxl_port_release(struct device *dev)
531 {
532 struct cxl_port *port = to_cxl_port(dev);
533 unsigned long index;
534 struct cxl_ep *ep;
535
536 xa_for_each(&port->endpoints, index, ep)
537 cxl_ep_remove(port, ep);
538 xa_destroy(&port->endpoints);
539 xa_destroy(&port->dports);
540 xa_destroy(&port->regions);
541 ida_free(&cxl_port_ida, port->id);
542 if (is_cxl_root(port))
543 kfree(to_cxl_root(port));
544 else
545 kfree(port);
546 }
547
decoders_committed_show(struct device * dev,struct device_attribute * attr,char * buf)548 static ssize_t decoders_committed_show(struct device *dev,
549 struct device_attribute *attr, char *buf)
550 {
551 struct cxl_port *port = to_cxl_port(dev);
552 int rc;
553
554 down_read(&cxl_region_rwsem);
555 rc = sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
556 up_read(&cxl_region_rwsem);
557
558 return rc;
559 }
560
561 static DEVICE_ATTR_RO(decoders_committed);
562
563 static struct attribute *cxl_port_attrs[] = {
564 &dev_attr_decoders_committed.attr,
565 NULL,
566 };
567
568 static struct attribute_group cxl_port_attribute_group = {
569 .attrs = cxl_port_attrs,
570 };
571
572 static const struct attribute_group *cxl_port_attribute_groups[] = {
573 &cxl_base_attribute_group,
574 &cxl_port_attribute_group,
575 NULL,
576 };
577
578 static const struct device_type cxl_port_type = {
579 .name = "cxl_port",
580 .release = cxl_port_release,
581 .groups = cxl_port_attribute_groups,
582 };
583
is_cxl_port(const struct device * dev)584 bool is_cxl_port(const struct device *dev)
585 {
586 return dev->type == &cxl_port_type;
587 }
588 EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL);
589
to_cxl_port(const struct device * dev)590 struct cxl_port *to_cxl_port(const struct device *dev)
591 {
592 if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
593 "not a cxl_port device\n"))
594 return NULL;
595 return container_of(dev, struct cxl_port, dev);
596 }
597 EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL);
598
unregister_port(void * _port)599 static void unregister_port(void *_port)
600 {
601 struct cxl_port *port = _port;
602 struct cxl_port *parent;
603 struct device *lock_dev;
604
605 if (is_cxl_root(port))
606 parent = NULL;
607 else
608 parent = to_cxl_port(port->dev.parent);
609
610 /*
611 * CXL root port's and the first level of ports are unregistered
612 * under the platform firmware device lock, all other ports are
613 * unregistered while holding their parent port lock.
614 */
615 if (!parent)
616 lock_dev = port->uport_dev;
617 else if (is_cxl_root(parent))
618 lock_dev = parent->uport_dev;
619 else
620 lock_dev = &parent->dev;
621
622 device_lock_assert(lock_dev);
623 port->dead = true;
624 device_unregister(&port->dev);
625 }
626
cxl_unlink_uport(void * _port)627 static void cxl_unlink_uport(void *_port)
628 {
629 struct cxl_port *port = _port;
630
631 sysfs_remove_link(&port->dev.kobj, "uport");
632 }
633
devm_cxl_link_uport(struct device * host,struct cxl_port * port)634 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
635 {
636 int rc;
637
638 rc = sysfs_create_link(&port->dev.kobj, &port->uport_dev->kobj,
639 "uport");
640 if (rc)
641 return rc;
642 return devm_add_action_or_reset(host, cxl_unlink_uport, port);
643 }
644
cxl_unlink_parent_dport(void * _port)645 static void cxl_unlink_parent_dport(void *_port)
646 {
647 struct cxl_port *port = _port;
648
649 sysfs_remove_link(&port->dev.kobj, "parent_dport");
650 }
651
devm_cxl_link_parent_dport(struct device * host,struct cxl_port * port,struct cxl_dport * parent_dport)652 static int devm_cxl_link_parent_dport(struct device *host,
653 struct cxl_port *port,
654 struct cxl_dport *parent_dport)
655 {
656 int rc;
657
658 if (!parent_dport)
659 return 0;
660
661 rc = sysfs_create_link(&port->dev.kobj, &parent_dport->dport_dev->kobj,
662 "parent_dport");
663 if (rc)
664 return rc;
665 return devm_add_action_or_reset(host, cxl_unlink_parent_dport, port);
666 }
667
668 static struct lock_class_key cxl_port_key;
669
cxl_port_alloc(struct device * uport_dev,struct cxl_dport * parent_dport)670 static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
671 struct cxl_dport *parent_dport)
672 {
673 struct cxl_root *cxl_root __free(kfree) = NULL;
674 struct cxl_port *port, *_port __free(kfree) = NULL;
675 struct device *dev;
676 int rc;
677
678 /* No parent_dport, root cxl_port */
679 if (!parent_dport) {
680 cxl_root = kzalloc(sizeof(*cxl_root), GFP_KERNEL);
681 if (!cxl_root)
682 return ERR_PTR(-ENOMEM);
683 } else {
684 _port = kzalloc(sizeof(*port), GFP_KERNEL);
685 if (!_port)
686 return ERR_PTR(-ENOMEM);
687 }
688
689 rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
690 if (rc < 0)
691 return ERR_PTR(rc);
692
693 if (cxl_root)
694 port = &no_free_ptr(cxl_root)->port;
695 else
696 port = no_free_ptr(_port);
697
698 port->id = rc;
699 port->uport_dev = uport_dev;
700
701 /*
702 * The top-level cxl_port "cxl_root" does not have a cxl_port as
703 * its parent and it does not have any corresponding component
704 * registers as its decode is described by a fixed platform
705 * description.
706 */
707 dev = &port->dev;
708 if (parent_dport) {
709 struct cxl_port *parent_port = parent_dport->port;
710 struct cxl_port *iter;
711
712 dev->parent = &parent_port->dev;
713 port->depth = parent_port->depth + 1;
714 port->parent_dport = parent_dport;
715
716 /*
717 * walk to the host bridge, or the first ancestor that knows
718 * the host bridge
719 */
720 iter = port;
721 while (!iter->host_bridge &&
722 !is_cxl_root(to_cxl_port(iter->dev.parent)))
723 iter = to_cxl_port(iter->dev.parent);
724 if (iter->host_bridge)
725 port->host_bridge = iter->host_bridge;
726 else if (parent_dport->rch)
727 port->host_bridge = parent_dport->dport_dev;
728 else
729 port->host_bridge = iter->uport_dev;
730 dev_dbg(uport_dev, "host-bridge: %s\n",
731 dev_name(port->host_bridge));
732 } else
733 dev->parent = uport_dev;
734
735 ida_init(&port->decoder_ida);
736 port->hdm_end = -1;
737 port->commit_end = -1;
738 xa_init(&port->dports);
739 xa_init(&port->endpoints);
740 xa_init(&port->regions);
741
742 device_initialize(dev);
743 lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
744 device_set_pm_not_required(dev);
745 dev->bus = &cxl_bus_type;
746 dev->type = &cxl_port_type;
747
748 return port;
749 }
750
cxl_setup_comp_regs(struct device * host,struct cxl_register_map * map,resource_size_t component_reg_phys)751 static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
752 resource_size_t component_reg_phys)
753 {
754 *map = (struct cxl_register_map) {
755 .host = host,
756 .reg_type = CXL_REGLOC_RBI_EMPTY,
757 .resource = component_reg_phys,
758 };
759
760 if (component_reg_phys == CXL_RESOURCE_NONE)
761 return 0;
762
763 map->reg_type = CXL_REGLOC_RBI_COMPONENT;
764 map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE;
765
766 return cxl_setup_regs(map);
767 }
768
cxl_port_setup_regs(struct cxl_port * port,resource_size_t component_reg_phys)769 static int cxl_port_setup_regs(struct cxl_port *port,
770 resource_size_t component_reg_phys)
771 {
772 if (dev_is_platform(port->uport_dev))
773 return 0;
774 return cxl_setup_comp_regs(&port->dev, &port->reg_map,
775 component_reg_phys);
776 }
777
cxl_dport_setup_regs(struct device * host,struct cxl_dport * dport,resource_size_t component_reg_phys)778 static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport,
779 resource_size_t component_reg_phys)
780 {
781 int rc;
782
783 if (dev_is_platform(dport->dport_dev))
784 return 0;
785
786 /*
787 * use @dport->dport_dev for the context for error messages during
788 * register probing, and fixup @host after the fact, since @host may be
789 * NULL.
790 */
791 rc = cxl_setup_comp_regs(dport->dport_dev, &dport->reg_map,
792 component_reg_phys);
793 dport->reg_map.host = host;
794 return rc;
795 }
796
797 DEFINE_SHOW_ATTRIBUTE(einj_cxl_available_error_type);
798
cxl_einj_inject(void * data,u64 type)799 static int cxl_einj_inject(void *data, u64 type)
800 {
801 struct cxl_dport *dport = data;
802
803 if (dport->rch)
804 return einj_cxl_inject_rch_error(dport->rcrb.base, type);
805
806 return einj_cxl_inject_error(to_pci_dev(dport->dport_dev), type);
807 }
808 DEFINE_DEBUGFS_ATTRIBUTE(cxl_einj_inject_fops, NULL, cxl_einj_inject,
809 "0x%llx\n");
810
cxl_debugfs_create_dport_dir(struct cxl_dport * dport)811 static void cxl_debugfs_create_dport_dir(struct cxl_dport *dport)
812 {
813 struct dentry *dir;
814
815 if (!einj_cxl_is_initialized())
816 return;
817
818 /*
819 * dport_dev needs to be a PCIe port for CXL 2.0+ ports because
820 * EINJ expects a dport SBDF to be specified for 2.0 error injection.
821 */
822 if (!dport->rch && !dev_is_pci(dport->dport_dev))
823 return;
824
825 dir = cxl_debugfs_create_dir(dev_name(dport->dport_dev));
826
827 debugfs_create_file("einj_inject", 0200, dir, dport,
828 &cxl_einj_inject_fops);
829 }
830
__devm_cxl_add_port(struct device * host,struct device * uport_dev,resource_size_t component_reg_phys,struct cxl_dport * parent_dport)831 static struct cxl_port *__devm_cxl_add_port(struct device *host,
832 struct device *uport_dev,
833 resource_size_t component_reg_phys,
834 struct cxl_dport *parent_dport)
835 {
836 struct cxl_port *port;
837 struct device *dev;
838 int rc;
839
840 port = cxl_port_alloc(uport_dev, parent_dport);
841 if (IS_ERR(port))
842 return port;
843
844 dev = &port->dev;
845 if (is_cxl_memdev(uport_dev)) {
846 struct cxl_memdev *cxlmd = to_cxl_memdev(uport_dev);
847 struct cxl_dev_state *cxlds = cxlmd->cxlds;
848
849 rc = dev_set_name(dev, "endpoint%d", port->id);
850 if (rc)
851 goto err;
852
853 /*
854 * The endpoint driver already enumerated the component and RAS
855 * registers. Reuse that enumeration while prepping them to be
856 * mapped by the cxl_port driver.
857 */
858 port->reg_map = cxlds->reg_map;
859 port->reg_map.host = &port->dev;
860 cxlmd->endpoint = port;
861 } else if (parent_dport) {
862 rc = dev_set_name(dev, "port%d", port->id);
863 if (rc)
864 goto err;
865
866 rc = cxl_port_setup_regs(port, component_reg_phys);
867 if (rc)
868 goto err;
869 } else
870 rc = dev_set_name(dev, "root%d", port->id);
871 if (rc)
872 goto err;
873
874 rc = device_add(dev);
875 if (rc)
876 goto err;
877
878 rc = devm_add_action_or_reset(host, unregister_port, port);
879 if (rc)
880 return ERR_PTR(rc);
881
882 rc = devm_cxl_link_uport(host, port);
883 if (rc)
884 return ERR_PTR(rc);
885
886 rc = devm_cxl_link_parent_dport(host, port, parent_dport);
887 if (rc)
888 return ERR_PTR(rc);
889
890 if (parent_dport && dev_is_pci(uport_dev))
891 port->pci_latency = cxl_pci_get_latency(to_pci_dev(uport_dev));
892
893 return port;
894
895 err:
896 put_device(dev);
897 return ERR_PTR(rc);
898 }
899
900 /**
901 * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
902 * @host: host device for devm operations
903 * @uport_dev: "physical" device implementing this upstream port
904 * @component_reg_phys: (optional) for configurable cxl_port instances
905 * @parent_dport: next hop up in the CXL memory decode hierarchy
906 */
devm_cxl_add_port(struct device * host,struct device * uport_dev,resource_size_t component_reg_phys,struct cxl_dport * parent_dport)907 struct cxl_port *devm_cxl_add_port(struct device *host,
908 struct device *uport_dev,
909 resource_size_t component_reg_phys,
910 struct cxl_dport *parent_dport)
911 {
912 struct cxl_port *port, *parent_port;
913
914 port = __devm_cxl_add_port(host, uport_dev, component_reg_phys,
915 parent_dport);
916
917 parent_port = parent_dport ? parent_dport->port : NULL;
918 if (IS_ERR(port)) {
919 dev_dbg(uport_dev, "Failed to add%s%s%s: %ld\n",
920 parent_port ? " port to " : "",
921 parent_port ? dev_name(&parent_port->dev) : "",
922 parent_port ? "" : " root port",
923 PTR_ERR(port));
924 } else {
925 dev_dbg(uport_dev, "%s added%s%s%s\n",
926 dev_name(&port->dev),
927 parent_port ? " to " : "",
928 parent_port ? dev_name(&parent_port->dev) : "",
929 parent_port ? "" : " (root port)");
930 }
931
932 return port;
933 }
934 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
935
devm_cxl_add_root(struct device * host,const struct cxl_root_ops * ops)936 struct cxl_root *devm_cxl_add_root(struct device *host,
937 const struct cxl_root_ops *ops)
938 {
939 struct cxl_root *cxl_root;
940 struct cxl_port *port;
941
942 port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
943 if (IS_ERR(port))
944 return (struct cxl_root *)port;
945
946 cxl_root = to_cxl_root(port);
947 cxl_root->ops = ops;
948 return cxl_root;
949 }
950 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_root, CXL);
951
cxl_port_to_pci_bus(struct cxl_port * port)952 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
953 {
954 /* There is no pci_bus associated with a CXL platform-root port */
955 if (is_cxl_root(port))
956 return NULL;
957
958 if (dev_is_pci(port->uport_dev)) {
959 struct pci_dev *pdev = to_pci_dev(port->uport_dev);
960
961 return pdev->subordinate;
962 }
963
964 return xa_load(&cxl_root_buses, (unsigned long)port->uport_dev);
965 }
966 EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL);
967
unregister_pci_bus(void * uport_dev)968 static void unregister_pci_bus(void *uport_dev)
969 {
970 xa_erase(&cxl_root_buses, (unsigned long)uport_dev);
971 }
972
devm_cxl_register_pci_bus(struct device * host,struct device * uport_dev,struct pci_bus * bus)973 int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
974 struct pci_bus *bus)
975 {
976 int rc;
977
978 if (dev_is_pci(uport_dev))
979 return -EINVAL;
980
981 rc = xa_insert(&cxl_root_buses, (unsigned long)uport_dev, bus,
982 GFP_KERNEL);
983 if (rc)
984 return rc;
985 return devm_add_action_or_reset(host, unregister_pci_bus, uport_dev);
986 }
987 EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL);
988
dev_is_cxl_root_child(struct device * dev)989 static bool dev_is_cxl_root_child(struct device *dev)
990 {
991 struct cxl_port *port, *parent;
992
993 if (!is_cxl_port(dev))
994 return false;
995
996 port = to_cxl_port(dev);
997 if (is_cxl_root(port))
998 return false;
999
1000 parent = to_cxl_port(port->dev.parent);
1001 if (is_cxl_root(parent))
1002 return true;
1003
1004 return false;
1005 }
1006
find_cxl_root(struct cxl_port * port)1007 struct cxl_root *find_cxl_root(struct cxl_port *port)
1008 {
1009 struct cxl_port *iter = port;
1010
1011 while (iter && !is_cxl_root(iter))
1012 iter = to_cxl_port(iter->dev.parent);
1013
1014 if (!iter)
1015 return NULL;
1016 get_device(&iter->dev);
1017 return to_cxl_root(iter);
1018 }
1019 EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
1020
put_cxl_root(struct cxl_root * cxl_root)1021 void put_cxl_root(struct cxl_root *cxl_root)
1022 {
1023 if (!cxl_root)
1024 return;
1025
1026 put_device(&cxl_root->port.dev);
1027 }
1028 EXPORT_SYMBOL_NS_GPL(put_cxl_root, CXL);
1029
find_dport(struct cxl_port * port,int id)1030 static struct cxl_dport *find_dport(struct cxl_port *port, int id)
1031 {
1032 struct cxl_dport *dport;
1033 unsigned long index;
1034
1035 device_lock_assert(&port->dev);
1036 xa_for_each(&port->dports, index, dport)
1037 if (dport->port_id == id)
1038 return dport;
1039 return NULL;
1040 }
1041
add_dport(struct cxl_port * port,struct cxl_dport * dport)1042 static int add_dport(struct cxl_port *port, struct cxl_dport *dport)
1043 {
1044 struct cxl_dport *dup;
1045 int rc;
1046
1047 device_lock_assert(&port->dev);
1048 dup = find_dport(port, dport->port_id);
1049 if (dup) {
1050 dev_err(&port->dev,
1051 "unable to add dport%d-%s non-unique port id (%s)\n",
1052 dport->port_id, dev_name(dport->dport_dev),
1053 dev_name(dup->dport_dev));
1054 return -EBUSY;
1055 }
1056
1057 rc = xa_insert(&port->dports, (unsigned long)dport->dport_dev, dport,
1058 GFP_KERNEL);
1059 if (rc)
1060 return rc;
1061
1062 port->nr_dports++;
1063 return 0;
1064 }
1065
1066 /*
1067 * Since root-level CXL dports cannot be enumerated by PCI they are not
1068 * enumerated by the common port driver that acquires the port lock over
1069 * dport add/remove. Instead, root dports are manually added by a
1070 * platform driver and cond_cxl_root_lock() is used to take the missing
1071 * port lock in that case.
1072 */
cond_cxl_root_lock(struct cxl_port * port)1073 static void cond_cxl_root_lock(struct cxl_port *port)
1074 {
1075 if (is_cxl_root(port))
1076 device_lock(&port->dev);
1077 }
1078
cond_cxl_root_unlock(struct cxl_port * port)1079 static void cond_cxl_root_unlock(struct cxl_port *port)
1080 {
1081 if (is_cxl_root(port))
1082 device_unlock(&port->dev);
1083 }
1084
cxl_dport_remove(void * data)1085 static void cxl_dport_remove(void *data)
1086 {
1087 struct cxl_dport *dport = data;
1088 struct cxl_port *port = dport->port;
1089
1090 xa_erase(&port->dports, (unsigned long) dport->dport_dev);
1091 put_device(dport->dport_dev);
1092 }
1093
cxl_dport_unlink(void * data)1094 static void cxl_dport_unlink(void *data)
1095 {
1096 struct cxl_dport *dport = data;
1097 struct cxl_port *port = dport->port;
1098 char link_name[CXL_TARGET_STRLEN];
1099
1100 sprintf(link_name, "dport%d", dport->port_id);
1101 sysfs_remove_link(&port->dev.kobj, link_name);
1102 }
1103
1104 static struct cxl_dport *
__devm_cxl_add_dport(struct cxl_port * port,struct device * dport_dev,int port_id,resource_size_t component_reg_phys,resource_size_t rcrb)1105 __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
1106 int port_id, resource_size_t component_reg_phys,
1107 resource_size_t rcrb)
1108 {
1109 char link_name[CXL_TARGET_STRLEN];
1110 struct cxl_dport *dport;
1111 struct device *host;
1112 int rc;
1113
1114 if (is_cxl_root(port))
1115 host = port->uport_dev;
1116 else
1117 host = &port->dev;
1118
1119 if (!host->driver) {
1120 dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n",
1121 dev_name(dport_dev));
1122 return ERR_PTR(-ENXIO);
1123 }
1124
1125 if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
1126 CXL_TARGET_STRLEN)
1127 return ERR_PTR(-EINVAL);
1128
1129 dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL);
1130 if (!dport)
1131 return ERR_PTR(-ENOMEM);
1132
1133 dport->dport_dev = dport_dev;
1134 dport->port_id = port_id;
1135 dport->port = port;
1136
1137 if (rcrb == CXL_RESOURCE_NONE) {
1138 rc = cxl_dport_setup_regs(&port->dev, dport,
1139 component_reg_phys);
1140 if (rc)
1141 return ERR_PTR(rc);
1142 } else {
1143 dport->rcrb.base = rcrb;
1144 component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb,
1145 CXL_RCRB_DOWNSTREAM);
1146 if (component_reg_phys == CXL_RESOURCE_NONE) {
1147 dev_warn(dport_dev, "Invalid Component Registers in RCRB");
1148 return ERR_PTR(-ENXIO);
1149 }
1150
1151 /*
1152 * RCH @dport is not ready to map until associated with its
1153 * memdev
1154 */
1155 rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys);
1156 if (rc)
1157 return ERR_PTR(rc);
1158
1159 dport->rch = true;
1160 }
1161
1162 if (component_reg_phys != CXL_RESOURCE_NONE)
1163 dev_dbg(dport_dev, "Component Registers found for dport: %pa\n",
1164 &component_reg_phys);
1165
1166 cond_cxl_root_lock(port);
1167 rc = add_dport(port, dport);
1168 cond_cxl_root_unlock(port);
1169 if (rc)
1170 return ERR_PTR(rc);
1171
1172 get_device(dport_dev);
1173 rc = devm_add_action_or_reset(host, cxl_dport_remove, dport);
1174 if (rc)
1175 return ERR_PTR(rc);
1176
1177 rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
1178 if (rc)
1179 return ERR_PTR(rc);
1180
1181 rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport);
1182 if (rc)
1183 return ERR_PTR(rc);
1184
1185 if (dev_is_pci(dport_dev))
1186 dport->link_latency = cxl_pci_get_latency(to_pci_dev(dport_dev));
1187
1188 cxl_debugfs_create_dport_dir(dport);
1189
1190 return dport;
1191 }
1192
1193 /**
1194 * devm_cxl_add_dport - append VH downstream port data to a cxl_port
1195 * @port: the cxl_port that references this dport
1196 * @dport_dev: firmware or PCI device representing the dport
1197 * @port_id: identifier for this dport in a decoder's target list
1198 * @component_reg_phys: optional location of CXL component registers
1199 *
1200 * Note that dports are appended to the devm release action's of the
1201 * either the port's host (for root ports), or the port itself (for
1202 * switch ports)
1203 */
devm_cxl_add_dport(struct cxl_port * port,struct device * dport_dev,int port_id,resource_size_t component_reg_phys)1204 struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
1205 struct device *dport_dev, int port_id,
1206 resource_size_t component_reg_phys)
1207 {
1208 struct cxl_dport *dport;
1209
1210 dport = __devm_cxl_add_dport(port, dport_dev, port_id,
1211 component_reg_phys, CXL_RESOURCE_NONE);
1212 if (IS_ERR(dport)) {
1213 dev_dbg(dport_dev, "failed to add dport to %s: %ld\n",
1214 dev_name(&port->dev), PTR_ERR(dport));
1215 } else {
1216 dev_dbg(dport_dev, "dport added to %s\n",
1217 dev_name(&port->dev));
1218 }
1219
1220 return dport;
1221 }
1222 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
1223
1224 /**
1225 * devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port
1226 * @port: the cxl_port that references this dport
1227 * @dport_dev: firmware or PCI device representing the dport
1228 * @port_id: identifier for this dport in a decoder's target list
1229 * @rcrb: mandatory location of a Root Complex Register Block
1230 *
1231 * See CXL 3.0 9.11.8 CXL Devices Attached to an RCH
1232 */
devm_cxl_add_rch_dport(struct cxl_port * port,struct device * dport_dev,int port_id,resource_size_t rcrb)1233 struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
1234 struct device *dport_dev, int port_id,
1235 resource_size_t rcrb)
1236 {
1237 struct cxl_dport *dport;
1238
1239 if (rcrb == CXL_RESOURCE_NONE) {
1240 dev_dbg(&port->dev, "failed to add RCH dport, missing RCRB\n");
1241 return ERR_PTR(-EINVAL);
1242 }
1243
1244 dport = __devm_cxl_add_dport(port, dport_dev, port_id,
1245 CXL_RESOURCE_NONE, rcrb);
1246 if (IS_ERR(dport)) {
1247 dev_dbg(dport_dev, "failed to add RCH dport to %s: %ld\n",
1248 dev_name(&port->dev), PTR_ERR(dport));
1249 } else {
1250 dev_dbg(dport_dev, "RCH dport added to %s\n",
1251 dev_name(&port->dev));
1252 }
1253
1254 return dport;
1255 }
1256 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL);
1257
add_ep(struct cxl_ep * new)1258 static int add_ep(struct cxl_ep *new)
1259 {
1260 struct cxl_port *port = new->dport->port;
1261 int rc;
1262
1263 device_lock(&port->dev);
1264 if (port->dead) {
1265 device_unlock(&port->dev);
1266 return -ENXIO;
1267 }
1268 rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
1269 GFP_KERNEL);
1270 device_unlock(&port->dev);
1271
1272 return rc;
1273 }
1274
1275 /**
1276 * cxl_add_ep - register an endpoint's interest in a port
1277 * @dport: the dport that routes to @ep_dev
1278 * @ep_dev: device representing the endpoint
1279 *
1280 * Intermediate CXL ports are scanned based on the arrival of endpoints.
1281 * When those endpoints depart the port can be destroyed once all
1282 * endpoints that care about that port have been removed.
1283 */
cxl_add_ep(struct cxl_dport * dport,struct device * ep_dev)1284 static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev)
1285 {
1286 struct cxl_ep *ep;
1287 int rc;
1288
1289 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
1290 if (!ep)
1291 return -ENOMEM;
1292
1293 ep->ep = get_device(ep_dev);
1294 ep->dport = dport;
1295
1296 rc = add_ep(ep);
1297 if (rc)
1298 cxl_ep_release(ep);
1299 return rc;
1300 }
1301
1302 struct cxl_find_port_ctx {
1303 const struct device *dport_dev;
1304 const struct cxl_port *parent_port;
1305 struct cxl_dport **dport;
1306 };
1307
match_port_by_dport(struct device * dev,const void * data)1308 static int match_port_by_dport(struct device *dev, const void *data)
1309 {
1310 const struct cxl_find_port_ctx *ctx = data;
1311 struct cxl_dport *dport;
1312 struct cxl_port *port;
1313
1314 if (!is_cxl_port(dev))
1315 return 0;
1316 if (ctx->parent_port && dev->parent != &ctx->parent_port->dev)
1317 return 0;
1318
1319 port = to_cxl_port(dev);
1320 dport = cxl_find_dport_by_dev(port, ctx->dport_dev);
1321 if (ctx->dport)
1322 *ctx->dport = dport;
1323 return dport != NULL;
1324 }
1325
__find_cxl_port(struct cxl_find_port_ctx * ctx)1326 static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
1327 {
1328 struct device *dev;
1329
1330 if (!ctx->dport_dev)
1331 return NULL;
1332
1333 dev = bus_find_device(&cxl_bus_type, NULL, ctx, match_port_by_dport);
1334 if (dev)
1335 return to_cxl_port(dev);
1336 return NULL;
1337 }
1338
find_cxl_port(struct device * dport_dev,struct cxl_dport ** dport)1339 static struct cxl_port *find_cxl_port(struct device *dport_dev,
1340 struct cxl_dport **dport)
1341 {
1342 struct cxl_find_port_ctx ctx = {
1343 .dport_dev = dport_dev,
1344 .dport = dport,
1345 };
1346 struct cxl_port *port;
1347
1348 port = __find_cxl_port(&ctx);
1349 return port;
1350 }
1351
find_cxl_port_at(struct cxl_port * parent_port,struct device * dport_dev,struct cxl_dport ** dport)1352 static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
1353 struct device *dport_dev,
1354 struct cxl_dport **dport)
1355 {
1356 struct cxl_find_port_ctx ctx = {
1357 .dport_dev = dport_dev,
1358 .parent_port = parent_port,
1359 .dport = dport,
1360 };
1361 struct cxl_port *port;
1362
1363 port = __find_cxl_port(&ctx);
1364 return port;
1365 }
1366
1367 /*
1368 * All users of grandparent() are using it to walk PCIe-like switch port
1369 * hierarchy. A PCIe switch is comprised of a bridge device representing the
1370 * upstream switch port and N bridges representing downstream switch ports. When
1371 * bridges stack the grand-parent of a downstream switch port is another
1372 * downstream switch port in the immediate ancestor switch.
1373 */
grandparent(struct device * dev)1374 static struct device *grandparent(struct device *dev)
1375 {
1376 if (dev && dev->parent)
1377 return dev->parent->parent;
1378 return NULL;
1379 }
1380
endpoint_host(struct cxl_port * endpoint)1381 static struct device *endpoint_host(struct cxl_port *endpoint)
1382 {
1383 struct cxl_port *port = to_cxl_port(endpoint->dev.parent);
1384
1385 if (is_cxl_root(port))
1386 return port->uport_dev;
1387 return &port->dev;
1388 }
1389
delete_endpoint(void * data)1390 static void delete_endpoint(void *data)
1391 {
1392 struct cxl_memdev *cxlmd = data;
1393 struct cxl_port *endpoint = cxlmd->endpoint;
1394 struct device *host = endpoint_host(endpoint);
1395
1396 device_lock(host);
1397 if (host->driver && !endpoint->dead) {
1398 devm_release_action(host, cxl_unlink_parent_dport, endpoint);
1399 devm_release_action(host, cxl_unlink_uport, endpoint);
1400 devm_release_action(host, unregister_port, endpoint);
1401 }
1402 cxlmd->endpoint = NULL;
1403 device_unlock(host);
1404 put_device(&endpoint->dev);
1405 put_device(host);
1406 }
1407
cxl_endpoint_autoremove(struct cxl_memdev * cxlmd,struct cxl_port * endpoint)1408 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
1409 {
1410 struct device *host = endpoint_host(endpoint);
1411 struct device *dev = &cxlmd->dev;
1412
1413 get_device(host);
1414 get_device(&endpoint->dev);
1415 cxlmd->depth = endpoint->depth;
1416 return devm_add_action_or_reset(dev, delete_endpoint, cxlmd);
1417 }
1418 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
1419
1420 /*
1421 * The natural end of life of a non-root 'cxl_port' is when its parent port goes
1422 * through a ->remove() event ("top-down" unregistration). The unnatural trigger
1423 * for a port to be unregistered is when all memdevs beneath that port have gone
1424 * through ->remove(). This "bottom-up" removal selectively removes individual
1425 * child ports manually. This depends on devm_cxl_add_port() to not change is
1426 * devm action registration order, and for dports to have already been
1427 * destroyed by reap_dports().
1428 */
delete_switch_port(struct cxl_port * port)1429 static void delete_switch_port(struct cxl_port *port)
1430 {
1431 devm_release_action(port->dev.parent, cxl_unlink_parent_dport, port);
1432 devm_release_action(port->dev.parent, cxl_unlink_uport, port);
1433 devm_release_action(port->dev.parent, unregister_port, port);
1434 }
1435
reap_dports(struct cxl_port * port)1436 static void reap_dports(struct cxl_port *port)
1437 {
1438 struct cxl_dport *dport;
1439 unsigned long index;
1440
1441 device_lock_assert(&port->dev);
1442
1443 xa_for_each(&port->dports, index, dport) {
1444 devm_release_action(&port->dev, cxl_dport_unlink, dport);
1445 devm_release_action(&port->dev, cxl_dport_remove, dport);
1446 devm_kfree(&port->dev, dport);
1447 }
1448 }
1449
1450 struct detach_ctx {
1451 struct cxl_memdev *cxlmd;
1452 int depth;
1453 };
1454
port_has_memdev(struct device * dev,const void * data)1455 static int port_has_memdev(struct device *dev, const void *data)
1456 {
1457 const struct detach_ctx *ctx = data;
1458 struct cxl_port *port;
1459
1460 if (!is_cxl_port(dev))
1461 return 0;
1462
1463 port = to_cxl_port(dev);
1464 if (port->depth != ctx->depth)
1465 return 0;
1466
1467 return !!cxl_ep_load(port, ctx->cxlmd);
1468 }
1469
cxl_detach_ep(void * data)1470 static void cxl_detach_ep(void *data)
1471 {
1472 struct cxl_memdev *cxlmd = data;
1473
1474 for (int i = cxlmd->depth - 1; i >= 1; i--) {
1475 struct cxl_port *port, *parent_port;
1476 struct detach_ctx ctx = {
1477 .cxlmd = cxlmd,
1478 .depth = i,
1479 };
1480 struct device *dev;
1481 struct cxl_ep *ep;
1482 bool died = false;
1483
1484 dev = bus_find_device(&cxl_bus_type, NULL, &ctx,
1485 port_has_memdev);
1486 if (!dev)
1487 continue;
1488 port = to_cxl_port(dev);
1489
1490 parent_port = to_cxl_port(port->dev.parent);
1491 device_lock(&parent_port->dev);
1492 device_lock(&port->dev);
1493 ep = cxl_ep_load(port, cxlmd);
1494 dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
1495 ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
1496 cxl_ep_remove(port, ep);
1497 if (ep && !port->dead && xa_empty(&port->endpoints) &&
1498 !is_cxl_root(parent_port) && parent_port->dev.driver) {
1499 /*
1500 * This was the last ep attached to a dynamically
1501 * enumerated port. Block new cxl_add_ep() and garbage
1502 * collect the port.
1503 */
1504 died = true;
1505 port->dead = true;
1506 reap_dports(port);
1507 }
1508 device_unlock(&port->dev);
1509
1510 if (died) {
1511 dev_dbg(&cxlmd->dev, "delete %s\n",
1512 dev_name(&port->dev));
1513 delete_switch_port(port);
1514 }
1515 put_device(&port->dev);
1516 device_unlock(&parent_port->dev);
1517 }
1518 }
1519
find_component_registers(struct device * dev)1520 static resource_size_t find_component_registers(struct device *dev)
1521 {
1522 struct cxl_register_map map;
1523 struct pci_dev *pdev;
1524
1525 /*
1526 * Theoretically, CXL component registers can be hosted on a
1527 * non-PCI device, in practice, only cxl_test hits this case.
1528 */
1529 if (!dev_is_pci(dev))
1530 return CXL_RESOURCE_NONE;
1531
1532 pdev = to_pci_dev(dev);
1533
1534 cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
1535 return map.resource;
1536 }
1537
add_port_attach_ep(struct cxl_memdev * cxlmd,struct device * uport_dev,struct device * dport_dev)1538 static int add_port_attach_ep(struct cxl_memdev *cxlmd,
1539 struct device *uport_dev,
1540 struct device *dport_dev)
1541 {
1542 struct device *dparent = grandparent(dport_dev);
1543 struct cxl_port *port, *parent_port = NULL;
1544 struct cxl_dport *dport, *parent_dport;
1545 resource_size_t component_reg_phys;
1546 int rc;
1547
1548 if (!dparent) {
1549 /*
1550 * The iteration reached the topology root without finding the
1551 * CXL-root 'cxl_port' on a previous iteration, fail for now to
1552 * be re-probed after platform driver attaches.
1553 */
1554 dev_dbg(&cxlmd->dev, "%s is a root dport\n",
1555 dev_name(dport_dev));
1556 return -ENXIO;
1557 }
1558
1559 parent_port = find_cxl_port(dparent, &parent_dport);
1560 if (!parent_port) {
1561 /* iterate to create this parent_port */
1562 return -EAGAIN;
1563 }
1564
1565 device_lock(&parent_port->dev);
1566 if (!parent_port->dev.driver) {
1567 dev_warn(&cxlmd->dev,
1568 "port %s:%s disabled, failed to enumerate CXL.mem\n",
1569 dev_name(&parent_port->dev), dev_name(uport_dev));
1570 port = ERR_PTR(-ENXIO);
1571 goto out;
1572 }
1573
1574 port = find_cxl_port_at(parent_port, dport_dev, &dport);
1575 if (!port) {
1576 component_reg_phys = find_component_registers(uport_dev);
1577 port = devm_cxl_add_port(&parent_port->dev, uport_dev,
1578 component_reg_phys, parent_dport);
1579 /* retry find to pick up the new dport information */
1580 if (!IS_ERR(port))
1581 port = find_cxl_port_at(parent_port, dport_dev, &dport);
1582 }
1583 out:
1584 device_unlock(&parent_port->dev);
1585
1586 if (IS_ERR(port))
1587 rc = PTR_ERR(port);
1588 else {
1589 dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
1590 dev_name(&port->dev), dev_name(port->uport_dev));
1591 rc = cxl_add_ep(dport, &cxlmd->dev);
1592 if (rc == -EBUSY) {
1593 /*
1594 * "can't" happen, but this error code means
1595 * something to the caller, so translate it.
1596 */
1597 rc = -ENXIO;
1598 }
1599 put_device(&port->dev);
1600 }
1601
1602 put_device(&parent_port->dev);
1603 return rc;
1604 }
1605
devm_cxl_enumerate_ports(struct cxl_memdev * cxlmd)1606 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
1607 {
1608 struct device *dev = &cxlmd->dev;
1609 struct device *iter;
1610 int rc;
1611
1612 /*
1613 * Skip intermediate port enumeration in the RCH case, there
1614 * are no ports in between a host bridge and an endpoint.
1615 */
1616 if (cxlmd->cxlds->rcd)
1617 return 0;
1618
1619 rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd);
1620 if (rc)
1621 return rc;
1622
1623 /*
1624 * Scan for and add all cxl_ports in this device's ancestry.
1625 * Repeat until no more ports are added. Abort if a port add
1626 * attempt fails.
1627 */
1628 retry:
1629 for (iter = dev; iter; iter = grandparent(iter)) {
1630 struct device *dport_dev = grandparent(iter);
1631 struct device *uport_dev;
1632 struct cxl_dport *dport;
1633 struct cxl_port *port;
1634
1635 /*
1636 * The terminal "grandparent" in PCI is NULL and @platform_bus
1637 * for platform devices
1638 */
1639 if (!dport_dev || dport_dev == &platform_bus)
1640 return 0;
1641
1642 uport_dev = dport_dev->parent;
1643 if (!uport_dev) {
1644 dev_warn(dev, "at %s no parent for dport: %s\n",
1645 dev_name(iter), dev_name(dport_dev));
1646 return -ENXIO;
1647 }
1648
1649 dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n",
1650 dev_name(iter), dev_name(dport_dev),
1651 dev_name(uport_dev));
1652 port = find_cxl_port(dport_dev, &dport);
1653 if (port) {
1654 dev_dbg(&cxlmd->dev,
1655 "found already registered port %s:%s\n",
1656 dev_name(&port->dev),
1657 dev_name(port->uport_dev));
1658 rc = cxl_add_ep(dport, &cxlmd->dev);
1659
1660 /*
1661 * If the endpoint already exists in the port's list,
1662 * that's ok, it was added on a previous pass.
1663 * Otherwise, retry in add_port_attach_ep() after taking
1664 * the parent_port lock as the current port may be being
1665 * reaped.
1666 */
1667 if (rc && rc != -EBUSY) {
1668 put_device(&port->dev);
1669 return rc;
1670 }
1671
1672 /* Any more ports to add between this one and the root? */
1673 if (!dev_is_cxl_root_child(&port->dev)) {
1674 put_device(&port->dev);
1675 continue;
1676 }
1677
1678 put_device(&port->dev);
1679 return 0;
1680 }
1681
1682 rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev);
1683 /* port missing, try to add parent */
1684 if (rc == -EAGAIN)
1685 continue;
1686 /* failed to add ep or port */
1687 if (rc)
1688 return rc;
1689 /* port added, new descendants possible, start over */
1690 goto retry;
1691 }
1692
1693 return 0;
1694 }
1695 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
1696
cxl_pci_find_port(struct pci_dev * pdev,struct cxl_dport ** dport)1697 struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev,
1698 struct cxl_dport **dport)
1699 {
1700 return find_cxl_port(pdev->dev.parent, dport);
1701 }
1702 EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, CXL);
1703
cxl_mem_find_port(struct cxl_memdev * cxlmd,struct cxl_dport ** dport)1704 struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
1705 struct cxl_dport **dport)
1706 {
1707 return find_cxl_port(grandparent(&cxlmd->dev), dport);
1708 }
1709 EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
1710
decoder_populate_targets(struct cxl_switch_decoder * cxlsd,struct cxl_port * port,int * target_map)1711 static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
1712 struct cxl_port *port, int *target_map)
1713 {
1714 int i;
1715
1716 if (!target_map)
1717 return 0;
1718
1719 device_lock_assert(&port->dev);
1720
1721 if (xa_empty(&port->dports))
1722 return -EINVAL;
1723
1724 guard(rwsem_write)(&cxl_region_rwsem);
1725 for (i = 0; i < cxlsd->cxld.interleave_ways; i++) {
1726 struct cxl_dport *dport = find_dport(port, target_map[i]);
1727
1728 if (!dport)
1729 return -ENXIO;
1730 cxlsd->target[i] = dport;
1731 }
1732
1733 return 0;
1734 }
1735
1736 static struct lock_class_key cxl_decoder_key;
1737
1738 /**
1739 * cxl_decoder_init - Common decoder setup / initialization
1740 * @port: owning port of this decoder
1741 * @cxld: common decoder properties to initialize
1742 *
1743 * A port may contain one or more decoders. Each of those decoders
1744 * enable some address space for CXL.mem utilization. A decoder is
1745 * expected to be configured by the caller before registering via
1746 * cxl_decoder_add()
1747 */
cxl_decoder_init(struct cxl_port * port,struct cxl_decoder * cxld)1748 static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld)
1749 {
1750 struct device *dev;
1751 int rc;
1752
1753 rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
1754 if (rc < 0)
1755 return rc;
1756
1757 /* need parent to stick around to release the id */
1758 get_device(&port->dev);
1759 cxld->id = rc;
1760
1761 dev = &cxld->dev;
1762 device_initialize(dev);
1763 lockdep_set_class(&dev->mutex, &cxl_decoder_key);
1764 device_set_pm_not_required(dev);
1765 dev->parent = &port->dev;
1766 dev->bus = &cxl_bus_type;
1767
1768 /* Pre initialize an "empty" decoder */
1769 cxld->interleave_ways = 1;
1770 cxld->interleave_granularity = PAGE_SIZE;
1771 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1772 cxld->hpa_range = (struct range) {
1773 .start = 0,
1774 .end = -1,
1775 };
1776
1777 return 0;
1778 }
1779
cxl_switch_decoder_init(struct cxl_port * port,struct cxl_switch_decoder * cxlsd,int nr_targets)1780 static int cxl_switch_decoder_init(struct cxl_port *port,
1781 struct cxl_switch_decoder *cxlsd,
1782 int nr_targets)
1783 {
1784 if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
1785 return -EINVAL;
1786
1787 cxlsd->nr_targets = nr_targets;
1788 return cxl_decoder_init(port, &cxlsd->cxld);
1789 }
1790
1791 /**
1792 * cxl_root_decoder_alloc - Allocate a root level decoder
1793 * @port: owning CXL root of this decoder
1794 * @nr_targets: static number of downstream targets
1795 *
1796 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
1797 * 'CXL root' decoder is one that decodes from a top-level / static platform
1798 * firmware description of CXL resources into a CXL standard decode
1799 * topology.
1800 */
cxl_root_decoder_alloc(struct cxl_port * port,unsigned int nr_targets)1801 struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
1802 unsigned int nr_targets)
1803 {
1804 struct cxl_root_decoder *cxlrd;
1805 struct cxl_switch_decoder *cxlsd;
1806 struct cxl_decoder *cxld;
1807 int rc;
1808
1809 if (!is_cxl_root(port))
1810 return ERR_PTR(-EINVAL);
1811
1812 cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets),
1813 GFP_KERNEL);
1814 if (!cxlrd)
1815 return ERR_PTR(-ENOMEM);
1816
1817 cxlsd = &cxlrd->cxlsd;
1818 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
1819 if (rc) {
1820 kfree(cxlrd);
1821 return ERR_PTR(rc);
1822 }
1823
1824 mutex_init(&cxlrd->range_lock);
1825
1826 cxld = &cxlsd->cxld;
1827 cxld->dev.type = &cxl_decoder_root_type;
1828 /*
1829 * cxl_root_decoder_release() special cases negative ids to
1830 * detect memregion_alloc() failures.
1831 */
1832 atomic_set(&cxlrd->region_id, -1);
1833 rc = memregion_alloc(GFP_KERNEL);
1834 if (rc < 0) {
1835 put_device(&cxld->dev);
1836 return ERR_PTR(rc);
1837 }
1838
1839 atomic_set(&cxlrd->region_id, rc);
1840 cxlrd->qos_class = CXL_QOS_CLASS_INVALID;
1841 return cxlrd;
1842 }
1843 EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
1844
1845 /**
1846 * cxl_switch_decoder_alloc - Allocate a switch level decoder
1847 * @port: owning CXL switch port of this decoder
1848 * @nr_targets: max number of dynamically addressable downstream targets
1849 *
1850 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
1851 * 'switch' decoder is any decoder that can be enumerated by PCIe
1852 * topology and the HDM Decoder Capability. This includes the decoders
1853 * that sit between Switch Upstream Ports / Switch Downstream Ports and
1854 * Host Bridges / Root Ports.
1855 */
cxl_switch_decoder_alloc(struct cxl_port * port,unsigned int nr_targets)1856 struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
1857 unsigned int nr_targets)
1858 {
1859 struct cxl_switch_decoder *cxlsd;
1860 struct cxl_decoder *cxld;
1861 int rc;
1862
1863 if (is_cxl_root(port) || is_cxl_endpoint(port))
1864 return ERR_PTR(-EINVAL);
1865
1866 cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL);
1867 if (!cxlsd)
1868 return ERR_PTR(-ENOMEM);
1869
1870 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
1871 if (rc) {
1872 kfree(cxlsd);
1873 return ERR_PTR(rc);
1874 }
1875
1876 cxld = &cxlsd->cxld;
1877 cxld->dev.type = &cxl_decoder_switch_type;
1878 return cxlsd;
1879 }
1880 EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
1881
1882 /**
1883 * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder
1884 * @port: owning port of this decoder
1885 *
1886 * Return: A new cxl decoder to be registered by cxl_decoder_add()
1887 */
cxl_endpoint_decoder_alloc(struct cxl_port * port)1888 struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
1889 {
1890 struct cxl_endpoint_decoder *cxled;
1891 struct cxl_decoder *cxld;
1892 int rc;
1893
1894 if (!is_cxl_endpoint(port))
1895 return ERR_PTR(-EINVAL);
1896
1897 cxled = kzalloc(sizeof(*cxled), GFP_KERNEL);
1898 if (!cxled)
1899 return ERR_PTR(-ENOMEM);
1900
1901 cxled->pos = -1;
1902 cxld = &cxled->cxld;
1903 rc = cxl_decoder_init(port, cxld);
1904 if (rc) {
1905 kfree(cxled);
1906 return ERR_PTR(rc);
1907 }
1908
1909 cxld->dev.type = &cxl_decoder_endpoint_type;
1910 return cxled;
1911 }
1912 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
1913
1914 /**
1915 * cxl_decoder_add_locked - Add a decoder with targets
1916 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
1917 * @target_map: A list of downstream ports that this decoder can direct memory
1918 * traffic to. These numbers should correspond with the port number
1919 * in the PCIe Link Capabilities structure.
1920 *
1921 * Certain types of decoders may not have any targets. The main example of this
1922 * is an endpoint device. A more awkward example is a hostbridge whose root
1923 * ports get hot added (technically possible, though unlikely).
1924 *
1925 * This is the locked variant of cxl_decoder_add().
1926 *
1927 * Context: Process context. Expects the device lock of the port that owns the
1928 * @cxld to be held.
1929 *
1930 * Return: Negative error code if the decoder wasn't properly configured; else
1931 * returns 0.
1932 */
cxl_decoder_add_locked(struct cxl_decoder * cxld,int * target_map)1933 int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
1934 {
1935 struct cxl_port *port;
1936 struct device *dev;
1937 int rc;
1938
1939 if (WARN_ON_ONCE(!cxld))
1940 return -EINVAL;
1941
1942 if (WARN_ON_ONCE(IS_ERR(cxld)))
1943 return PTR_ERR(cxld);
1944
1945 if (cxld->interleave_ways < 1)
1946 return -EINVAL;
1947
1948 dev = &cxld->dev;
1949
1950 port = to_cxl_port(cxld->dev.parent);
1951 if (!is_endpoint_decoder(dev)) {
1952 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
1953
1954 rc = decoder_populate_targets(cxlsd, port, target_map);
1955 if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
1956 dev_err(&port->dev,
1957 "Failed to populate active decoder targets\n");
1958 return rc;
1959 }
1960 }
1961
1962 rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
1963 if (rc)
1964 return rc;
1965
1966 return device_add(dev);
1967 }
1968 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
1969
1970 /**
1971 * cxl_decoder_add - Add a decoder with targets
1972 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
1973 * @target_map: A list of downstream ports that this decoder can direct memory
1974 * traffic to. These numbers should correspond with the port number
1975 * in the PCIe Link Capabilities structure.
1976 *
1977 * This is the unlocked variant of cxl_decoder_add_locked().
1978 * See cxl_decoder_add_locked().
1979 *
1980 * Context: Process context. Takes and releases the device lock of the port that
1981 * owns the @cxld.
1982 */
cxl_decoder_add(struct cxl_decoder * cxld,int * target_map)1983 int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
1984 {
1985 struct cxl_port *port;
1986 int rc;
1987
1988 if (WARN_ON_ONCE(!cxld))
1989 return -EINVAL;
1990
1991 if (WARN_ON_ONCE(IS_ERR(cxld)))
1992 return PTR_ERR(cxld);
1993
1994 port = to_cxl_port(cxld->dev.parent);
1995
1996 device_lock(&port->dev);
1997 rc = cxl_decoder_add_locked(cxld, target_map);
1998 device_unlock(&port->dev);
1999
2000 return rc;
2001 }
2002 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
2003
cxld_unregister(void * dev)2004 static void cxld_unregister(void *dev)
2005 {
2006 struct cxl_endpoint_decoder *cxled;
2007
2008 if (is_endpoint_decoder(dev)) {
2009 cxled = to_cxl_endpoint_decoder(dev);
2010 cxl_decoder_kill_region(cxled);
2011 }
2012
2013 device_unregister(dev);
2014 }
2015
cxl_decoder_autoremove(struct device * host,struct cxl_decoder * cxld)2016 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
2017 {
2018 return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
2019 }
2020 EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
2021
2022 /**
2023 * __cxl_driver_register - register a driver for the cxl bus
2024 * @cxl_drv: cxl driver structure to attach
2025 * @owner: owning module/driver
2026 * @modname: KBUILD_MODNAME for parent driver
2027 */
__cxl_driver_register(struct cxl_driver * cxl_drv,struct module * owner,const char * modname)2028 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
2029 const char *modname)
2030 {
2031 if (!cxl_drv->probe) {
2032 pr_debug("%s ->probe() must be specified\n", modname);
2033 return -EINVAL;
2034 }
2035
2036 if (!cxl_drv->name) {
2037 pr_debug("%s ->name must be specified\n", modname);
2038 return -EINVAL;
2039 }
2040
2041 if (!cxl_drv->id) {
2042 pr_debug("%s ->id must be specified\n", modname);
2043 return -EINVAL;
2044 }
2045
2046 cxl_drv->drv.bus = &cxl_bus_type;
2047 cxl_drv->drv.owner = owner;
2048 cxl_drv->drv.mod_name = modname;
2049 cxl_drv->drv.name = cxl_drv->name;
2050
2051 return driver_register(&cxl_drv->drv);
2052 }
2053 EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
2054
cxl_driver_unregister(struct cxl_driver * cxl_drv)2055 void cxl_driver_unregister(struct cxl_driver *cxl_drv)
2056 {
2057 driver_unregister(&cxl_drv->drv);
2058 }
2059 EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
2060
cxl_bus_uevent(const struct device * dev,struct kobj_uevent_env * env)2061 static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
2062 {
2063 return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
2064 cxl_device_id(dev));
2065 }
2066
cxl_bus_match(struct device * dev,const struct device_driver * drv)2067 static int cxl_bus_match(struct device *dev, const struct device_driver *drv)
2068 {
2069 return cxl_device_id(dev) == to_cxl_drv(drv)->id;
2070 }
2071
cxl_bus_probe(struct device * dev)2072 static int cxl_bus_probe(struct device *dev)
2073 {
2074 int rc;
2075
2076 rc = to_cxl_drv(dev->driver)->probe(dev);
2077 dev_dbg(dev, "probe: %d\n", rc);
2078 return rc;
2079 }
2080
cxl_bus_remove(struct device * dev)2081 static void cxl_bus_remove(struct device *dev)
2082 {
2083 struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
2084
2085 if (cxl_drv->remove)
2086 cxl_drv->remove(dev);
2087 }
2088
2089 static struct workqueue_struct *cxl_bus_wq;
2090
cxl_bus_rescan_queue(struct work_struct * w)2091 static void cxl_bus_rescan_queue(struct work_struct *w)
2092 {
2093 int rc = bus_rescan_devices(&cxl_bus_type);
2094
2095 pr_debug("CXL bus rescan result: %d\n", rc);
2096 }
2097
cxl_bus_rescan(void)2098 void cxl_bus_rescan(void)
2099 {
2100 static DECLARE_WORK(rescan_work, cxl_bus_rescan_queue);
2101
2102 queue_work(cxl_bus_wq, &rescan_work);
2103 }
2104 EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL);
2105
cxl_bus_drain(void)2106 void cxl_bus_drain(void)
2107 {
2108 drain_workqueue(cxl_bus_wq);
2109 }
2110 EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, CXL);
2111
schedule_cxl_memdev_detach(struct cxl_memdev * cxlmd)2112 bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
2113 {
2114 return queue_work(cxl_bus_wq, &cxlmd->detach_work);
2115 }
2116 EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
2117
add_latency(struct access_coordinate * c,long latency)2118 static void add_latency(struct access_coordinate *c, long latency)
2119 {
2120 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
2121 c[i].write_latency += latency;
2122 c[i].read_latency += latency;
2123 }
2124 }
2125
coordinates_valid(struct access_coordinate * c)2126 static bool coordinates_valid(struct access_coordinate *c)
2127 {
2128 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
2129 if (c[i].read_bandwidth && c[i].write_bandwidth &&
2130 c[i].read_latency && c[i].write_latency)
2131 continue;
2132 return false;
2133 }
2134
2135 return true;
2136 }
2137
set_min_bandwidth(struct access_coordinate * c,unsigned int bw)2138 static void set_min_bandwidth(struct access_coordinate *c, unsigned int bw)
2139 {
2140 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
2141 c[i].write_bandwidth = min(c[i].write_bandwidth, bw);
2142 c[i].read_bandwidth = min(c[i].read_bandwidth, bw);
2143 }
2144 }
2145
set_access_coordinates(struct access_coordinate * out,struct access_coordinate * in)2146 static void set_access_coordinates(struct access_coordinate *out,
2147 struct access_coordinate *in)
2148 {
2149 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
2150 out[i] = in[i];
2151 }
2152
parent_port_is_cxl_root(struct cxl_port * port)2153 static bool parent_port_is_cxl_root(struct cxl_port *port)
2154 {
2155 return is_cxl_root(to_cxl_port(port->dev.parent));
2156 }
2157
2158 /**
2159 * cxl_endpoint_get_perf_coordinates - Retrieve performance numbers stored in dports
2160 * of CXL path
2161 * @port: endpoint cxl_port
2162 * @coord: output performance data
2163 *
2164 * Return: errno on failure, 0 on success.
2165 */
cxl_endpoint_get_perf_coordinates(struct cxl_port * port,struct access_coordinate * coord)2166 int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
2167 struct access_coordinate *coord)
2168 {
2169 struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
2170 struct access_coordinate c[] = {
2171 {
2172 .read_bandwidth = UINT_MAX,
2173 .write_bandwidth = UINT_MAX,
2174 },
2175 {
2176 .read_bandwidth = UINT_MAX,
2177 .write_bandwidth = UINT_MAX,
2178 },
2179 };
2180 struct cxl_port *iter = port;
2181 struct cxl_dport *dport;
2182 struct pci_dev *pdev;
2183 struct device *dev;
2184 unsigned int bw;
2185 bool is_cxl_root;
2186
2187 if (!is_cxl_endpoint(port))
2188 return -EINVAL;
2189
2190 /*
2191 * Skip calculation for RCD. Expectation is HMAT already covers RCD case
2192 * since RCH does not support hotplug.
2193 */
2194 if (cxlmd->cxlds->rcd)
2195 return 0;
2196
2197 /*
2198 * Exit the loop when the parent port of the current iter port is cxl
2199 * root. The iterative loop starts at the endpoint and gathers the
2200 * latency of the CXL link from the current device/port to the connected
2201 * downstream port each iteration.
2202 */
2203 do {
2204 dport = iter->parent_dport;
2205 iter = to_cxl_port(iter->dev.parent);
2206 is_cxl_root = parent_port_is_cxl_root(iter);
2207
2208 /*
2209 * There's no valid access_coordinate for a root port since RPs do not
2210 * have CDAT and therefore needs to be skipped.
2211 */
2212 if (!is_cxl_root) {
2213 if (!coordinates_valid(dport->coord))
2214 return -EINVAL;
2215 cxl_coordinates_combine(c, c, dport->coord);
2216 }
2217 add_latency(c, dport->link_latency);
2218 } while (!is_cxl_root);
2219
2220 dport = iter->parent_dport;
2221 /* Retrieve HB coords */
2222 if (!coordinates_valid(dport->coord))
2223 return -EINVAL;
2224 cxl_coordinates_combine(c, c, dport->coord);
2225
2226 dev = port->uport_dev->parent;
2227 if (!dev_is_pci(dev))
2228 return -ENODEV;
2229
2230 /* Get the calculated PCI paths bandwidth */
2231 pdev = to_pci_dev(dev);
2232 bw = pcie_bandwidth_available(pdev, NULL, NULL, NULL);
2233 if (bw == 0)
2234 return -ENXIO;
2235 bw /= BITS_PER_BYTE;
2236
2237 set_min_bandwidth(c, bw);
2238 set_access_coordinates(coord, c);
2239
2240 return 0;
2241 }
2242 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, CXL);
2243
2244 /* for user tooling to ensure port disable work has completed */
flush_store(const struct bus_type * bus,const char * buf,size_t count)2245 static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count)
2246 {
2247 if (sysfs_streq(buf, "1")) {
2248 flush_workqueue(cxl_bus_wq);
2249 return count;
2250 }
2251
2252 return -EINVAL;
2253 }
2254
2255 static BUS_ATTR_WO(flush);
2256
2257 static struct attribute *cxl_bus_attributes[] = {
2258 &bus_attr_flush.attr,
2259 NULL,
2260 };
2261
2262 static struct attribute_group cxl_bus_attribute_group = {
2263 .attrs = cxl_bus_attributes,
2264 };
2265
2266 static const struct attribute_group *cxl_bus_attribute_groups[] = {
2267 &cxl_bus_attribute_group,
2268 NULL,
2269 };
2270
2271 struct bus_type cxl_bus_type = {
2272 .name = "cxl",
2273 .uevent = cxl_bus_uevent,
2274 .match = cxl_bus_match,
2275 .probe = cxl_bus_probe,
2276 .remove = cxl_bus_remove,
2277 .bus_groups = cxl_bus_attribute_groups,
2278 };
2279 EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
2280
2281 static struct dentry *cxl_debugfs;
2282
cxl_debugfs_create_dir(const char * dir)2283 struct dentry *cxl_debugfs_create_dir(const char *dir)
2284 {
2285 return debugfs_create_dir(dir, cxl_debugfs);
2286 }
2287 EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL);
2288
cxl_core_init(void)2289 static __init int cxl_core_init(void)
2290 {
2291 int rc;
2292
2293 cxl_debugfs = debugfs_create_dir("cxl", NULL);
2294
2295 if (einj_cxl_is_initialized())
2296 debugfs_create_file("einj_types", 0400, cxl_debugfs, NULL,
2297 &einj_cxl_available_error_type_fops);
2298
2299 cxl_mbox_init();
2300
2301 rc = cxl_memdev_init();
2302 if (rc)
2303 return rc;
2304
2305 cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0);
2306 if (!cxl_bus_wq) {
2307 rc = -ENOMEM;
2308 goto err_wq;
2309 }
2310
2311 rc = bus_register(&cxl_bus_type);
2312 if (rc)
2313 goto err_bus;
2314
2315 rc = cxl_region_init();
2316 if (rc)
2317 goto err_region;
2318
2319 return 0;
2320
2321 err_region:
2322 bus_unregister(&cxl_bus_type);
2323 err_bus:
2324 destroy_workqueue(cxl_bus_wq);
2325 err_wq:
2326 cxl_memdev_exit();
2327 return rc;
2328 }
2329
cxl_core_exit(void)2330 static void cxl_core_exit(void)
2331 {
2332 cxl_region_exit();
2333 bus_unregister(&cxl_bus_type);
2334 destroy_workqueue(cxl_bus_wq);
2335 cxl_memdev_exit();
2336 debugfs_remove_recursive(cxl_debugfs);
2337 }
2338
2339 subsys_initcall(cxl_core_init);
2340 module_exit(cxl_core_exit);
2341 MODULE_DESCRIPTION("CXL: Core Compute Express Link support");
2342 MODULE_LICENSE("GPL v2");
2343 MODULE_IMPORT_NS(CXL);
2344