xref: /linux/drivers/cxl/core/port.c (revision 22511e665eadc2c49021886d07aaef39db90ca82)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/io-64-nonatomic-lo-hi.h>
4 #include <linux/workqueue.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/pci.h>
8 #include <linux/slab.h>
9 #include <linux/idr.h>
10 #include <cxlmem.h>
11 #include <cxlpci.h>
12 #include <cxl.h>
13 #include "core.h"
14 
15 /**
16  * DOC: cxl core
17  *
18  * The CXL core provides a set of interfaces that can be consumed by CXL aware
19  * drivers. The interfaces allow for creation, modification, and destruction of
20  * regions, memory devices, ports, and decoders. CXL aware drivers must register
21  * with the CXL core via these interfaces in order to be able to participate in
22  * cross-device interleave coordination. The CXL core also establishes and
23  * maintains the bridge to the nvdimm subsystem.
24  *
25  * CXL core introduces sysfs hierarchy to control the devices that are
26  * instantiated by the core.
27  */
28 
29 static DEFINE_IDA(cxl_port_ida);
30 static DEFINE_XARRAY(cxl_root_buses);
31 
32 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
33 			    char *buf)
34 {
35 	return sysfs_emit(buf, "%s\n", dev->type->name);
36 }
37 static DEVICE_ATTR_RO(devtype);
38 
39 static int cxl_device_id(struct device *dev)
40 {
41 	if (dev->type == &cxl_nvdimm_bridge_type)
42 		return CXL_DEVICE_NVDIMM_BRIDGE;
43 	if (dev->type == &cxl_nvdimm_type)
44 		return CXL_DEVICE_NVDIMM;
45 	if (is_cxl_port(dev)) {
46 		if (is_cxl_root(to_cxl_port(dev)))
47 			return CXL_DEVICE_ROOT;
48 		return CXL_DEVICE_PORT;
49 	}
50 	if (is_cxl_memdev(dev))
51 		return CXL_DEVICE_MEMORY_EXPANDER;
52 	return 0;
53 }
54 
55 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
56 			     char *buf)
57 {
58 	return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev));
59 }
60 static DEVICE_ATTR_RO(modalias);
61 
62 static struct attribute *cxl_base_attributes[] = {
63 	&dev_attr_devtype.attr,
64 	&dev_attr_modalias.attr,
65 	NULL,
66 };
67 
68 struct attribute_group cxl_base_attribute_group = {
69 	.attrs = cxl_base_attributes,
70 };
71 
72 static ssize_t start_show(struct device *dev, struct device_attribute *attr,
73 			  char *buf)
74 {
75 	struct cxl_decoder *cxld = to_cxl_decoder(dev);
76 	u64 start;
77 
78 	if (is_root_decoder(dev))
79 		start = cxld->platform_res.start;
80 	else
81 		start = cxld->decoder_range.start;
82 
83 	return sysfs_emit(buf, "%#llx\n", start);
84 }
85 static DEVICE_ATTR_ADMIN_RO(start);
86 
87 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
88 			char *buf)
89 {
90 	struct cxl_decoder *cxld = to_cxl_decoder(dev);
91 	u64 size;
92 
93 	if (is_root_decoder(dev))
94 		size = resource_size(&cxld->platform_res);
95 	else
96 		size = range_len(&cxld->decoder_range);
97 
98 	return sysfs_emit(buf, "%#llx\n", size);
99 }
100 static DEVICE_ATTR_RO(size);
101 
102 #define CXL_DECODER_FLAG_ATTR(name, flag)                            \
103 static ssize_t name##_show(struct device *dev,                       \
104 			   struct device_attribute *attr, char *buf) \
105 {                                                                    \
106 	struct cxl_decoder *cxld = to_cxl_decoder(dev);              \
107                                                                      \
108 	return sysfs_emit(buf, "%s\n",                               \
109 			  (cxld->flags & (flag)) ? "1" : "0");       \
110 }                                                                    \
111 static DEVICE_ATTR_RO(name)
112 
113 CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
114 CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
115 CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
116 CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
117 CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
118 
119 static ssize_t target_type_show(struct device *dev,
120 				struct device_attribute *attr, char *buf)
121 {
122 	struct cxl_decoder *cxld = to_cxl_decoder(dev);
123 
124 	switch (cxld->target_type) {
125 	case CXL_DECODER_ACCELERATOR:
126 		return sysfs_emit(buf, "accelerator\n");
127 	case CXL_DECODER_EXPANDER:
128 		return sysfs_emit(buf, "expander\n");
129 	}
130 	return -ENXIO;
131 }
132 static DEVICE_ATTR_RO(target_type);
133 
134 static ssize_t emit_target_list(struct cxl_decoder *cxld, char *buf)
135 {
136 	ssize_t offset = 0;
137 	int i, rc = 0;
138 
139 	for (i = 0; i < cxld->interleave_ways; i++) {
140 		struct cxl_dport *dport = cxld->target[i];
141 		struct cxl_dport *next = NULL;
142 
143 		if (!dport)
144 			break;
145 
146 		if (i + 1 < cxld->interleave_ways)
147 			next = cxld->target[i + 1];
148 		rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
149 				   next ? "," : "");
150 		if (rc < 0)
151 			return rc;
152 		offset += rc;
153 	}
154 
155 	return offset;
156 }
157 
158 static ssize_t target_list_show(struct device *dev,
159 				struct device_attribute *attr, char *buf)
160 {
161 	struct cxl_decoder *cxld = to_cxl_decoder(dev);
162 	ssize_t offset;
163 	unsigned int seq;
164 	int rc;
165 
166 	do {
167 		seq = read_seqbegin(&cxld->target_lock);
168 		rc = emit_target_list(cxld, buf);
169 	} while (read_seqretry(&cxld->target_lock, seq));
170 
171 	if (rc < 0)
172 		return rc;
173 	offset = rc;
174 
175 	rc = sysfs_emit_at(buf, offset, "\n");
176 	if (rc < 0)
177 		return rc;
178 
179 	return offset + rc;
180 }
181 static DEVICE_ATTR_RO(target_list);
182 
183 static struct attribute *cxl_decoder_base_attrs[] = {
184 	&dev_attr_start.attr,
185 	&dev_attr_size.attr,
186 	&dev_attr_locked.attr,
187 	NULL,
188 };
189 
190 static struct attribute_group cxl_decoder_base_attribute_group = {
191 	.attrs = cxl_decoder_base_attrs,
192 };
193 
194 static struct attribute *cxl_decoder_root_attrs[] = {
195 	&dev_attr_cap_pmem.attr,
196 	&dev_attr_cap_ram.attr,
197 	&dev_attr_cap_type2.attr,
198 	&dev_attr_cap_type3.attr,
199 	&dev_attr_target_list.attr,
200 	NULL,
201 };
202 
203 static struct attribute_group cxl_decoder_root_attribute_group = {
204 	.attrs = cxl_decoder_root_attrs,
205 };
206 
207 static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
208 	&cxl_decoder_root_attribute_group,
209 	&cxl_decoder_base_attribute_group,
210 	&cxl_base_attribute_group,
211 	NULL,
212 };
213 
214 static struct attribute *cxl_decoder_switch_attrs[] = {
215 	&dev_attr_target_type.attr,
216 	&dev_attr_target_list.attr,
217 	NULL,
218 };
219 
220 static struct attribute_group cxl_decoder_switch_attribute_group = {
221 	.attrs = cxl_decoder_switch_attrs,
222 };
223 
224 static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
225 	&cxl_decoder_switch_attribute_group,
226 	&cxl_decoder_base_attribute_group,
227 	&cxl_base_attribute_group,
228 	NULL,
229 };
230 
231 static struct attribute *cxl_decoder_endpoint_attrs[] = {
232 	&dev_attr_target_type.attr,
233 	NULL,
234 };
235 
236 static struct attribute_group cxl_decoder_endpoint_attribute_group = {
237 	.attrs = cxl_decoder_endpoint_attrs,
238 };
239 
240 static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = {
241 	&cxl_decoder_base_attribute_group,
242 	&cxl_decoder_endpoint_attribute_group,
243 	&cxl_base_attribute_group,
244 	NULL,
245 };
246 
247 static void cxl_decoder_release(struct device *dev)
248 {
249 	struct cxl_decoder *cxld = to_cxl_decoder(dev);
250 	struct cxl_port *port = to_cxl_port(dev->parent);
251 
252 	ida_free(&port->decoder_ida, cxld->id);
253 	kfree(cxld);
254 	put_device(&port->dev);
255 }
256 
257 static const struct device_type cxl_decoder_endpoint_type = {
258 	.name = "cxl_decoder_endpoint",
259 	.release = cxl_decoder_release,
260 	.groups = cxl_decoder_endpoint_attribute_groups,
261 };
262 
263 static const struct device_type cxl_decoder_switch_type = {
264 	.name = "cxl_decoder_switch",
265 	.release = cxl_decoder_release,
266 	.groups = cxl_decoder_switch_attribute_groups,
267 };
268 
269 static const struct device_type cxl_decoder_root_type = {
270 	.name = "cxl_decoder_root",
271 	.release = cxl_decoder_release,
272 	.groups = cxl_decoder_root_attribute_groups,
273 };
274 
275 static bool is_endpoint_decoder(struct device *dev)
276 {
277 	return dev->type == &cxl_decoder_endpoint_type;
278 }
279 
280 bool is_root_decoder(struct device *dev)
281 {
282 	return dev->type == &cxl_decoder_root_type;
283 }
284 EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
285 
286 bool is_cxl_decoder(struct device *dev)
287 {
288 	return dev->type && dev->type->release == cxl_decoder_release;
289 }
290 EXPORT_SYMBOL_NS_GPL(is_cxl_decoder, CXL);
291 
292 struct cxl_decoder *to_cxl_decoder(struct device *dev)
293 {
294 	if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
295 			  "not a cxl_decoder device\n"))
296 		return NULL;
297 	return container_of(dev, struct cxl_decoder, dev);
298 }
299 EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
300 
301 static void cxl_ep_release(struct cxl_ep *ep)
302 {
303 	if (!ep)
304 		return;
305 	list_del(&ep->list);
306 	put_device(ep->ep);
307 	kfree(ep);
308 }
309 
310 static void cxl_port_release(struct device *dev)
311 {
312 	struct cxl_port *port = to_cxl_port(dev);
313 	struct cxl_ep *ep, *_e;
314 
315 	cxl_device_lock(dev);
316 	list_for_each_entry_safe(ep, _e, &port->endpoints, list)
317 		cxl_ep_release(ep);
318 	cxl_device_unlock(dev);
319 	ida_free(&cxl_port_ida, port->id);
320 	kfree(port);
321 }
322 
323 static const struct attribute_group *cxl_port_attribute_groups[] = {
324 	&cxl_base_attribute_group,
325 	NULL,
326 };
327 
328 static const struct device_type cxl_port_type = {
329 	.name = "cxl_port",
330 	.release = cxl_port_release,
331 	.groups = cxl_port_attribute_groups,
332 };
333 
334 bool is_cxl_port(struct device *dev)
335 {
336 	return dev->type == &cxl_port_type;
337 }
338 EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL);
339 
340 struct cxl_port *to_cxl_port(struct device *dev)
341 {
342 	if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
343 			  "not a cxl_port device\n"))
344 		return NULL;
345 	return container_of(dev, struct cxl_port, dev);
346 }
347 EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL);
348 
349 static void unregister_port(void *_port)
350 {
351 	struct cxl_port *port = _port;
352 	struct cxl_port *parent;
353 	struct device *lock_dev;
354 
355 	if (is_cxl_root(port))
356 		parent = NULL;
357 	else
358 		parent = to_cxl_port(port->dev.parent);
359 
360 	/*
361 	 * CXL root port's and the first level of ports are unregistered
362 	 * under the platform firmware device lock, all other ports are
363 	 * unregistered while holding their parent port lock.
364 	 */
365 	if (!parent)
366 		lock_dev = port->uport;
367 	else if (is_cxl_root(parent))
368 		lock_dev = parent->uport;
369 	else
370 		lock_dev = &parent->dev;
371 
372 	device_lock_assert(lock_dev);
373 	port->uport = NULL;
374 	device_unregister(&port->dev);
375 }
376 
377 static void cxl_unlink_uport(void *_port)
378 {
379 	struct cxl_port *port = _port;
380 
381 	sysfs_remove_link(&port->dev.kobj, "uport");
382 }
383 
384 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
385 {
386 	int rc;
387 
388 	rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
389 	if (rc)
390 		return rc;
391 	return devm_add_action_or_reset(host, cxl_unlink_uport, port);
392 }
393 
394 static struct cxl_port *cxl_port_alloc(struct device *uport,
395 				       resource_size_t component_reg_phys,
396 				       struct cxl_port *parent_port)
397 {
398 	struct cxl_port *port;
399 	struct device *dev;
400 	int rc;
401 
402 	port = kzalloc(sizeof(*port), GFP_KERNEL);
403 	if (!port)
404 		return ERR_PTR(-ENOMEM);
405 
406 	rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
407 	if (rc < 0)
408 		goto err;
409 	port->id = rc;
410 
411 	/*
412 	 * The top-level cxl_port "cxl_root" does not have a cxl_port as
413 	 * its parent and it does not have any corresponding component
414 	 * registers as its decode is described by a fixed platform
415 	 * description.
416 	 */
417 	dev = &port->dev;
418 	if (parent_port)
419 		dev->parent = &parent_port->dev;
420 	else
421 		dev->parent = uport;
422 
423 	port->uport = uport;
424 	port->component_reg_phys = component_reg_phys;
425 	ida_init(&port->decoder_ida);
426 	INIT_LIST_HEAD(&port->dports);
427 	INIT_LIST_HEAD(&port->endpoints);
428 
429 	device_initialize(dev);
430 	device_set_pm_not_required(dev);
431 	dev->bus = &cxl_bus_type;
432 	dev->type = &cxl_port_type;
433 
434 	return port;
435 
436 err:
437 	kfree(port);
438 	return ERR_PTR(rc);
439 }
440 
441 /**
442  * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
443  * @host: host device for devm operations
444  * @uport: "physical" device implementing this upstream port
445  * @component_reg_phys: (optional) for configurable cxl_port instances
446  * @parent_port: next hop up in the CXL memory decode hierarchy
447  */
448 struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
449 				   resource_size_t component_reg_phys,
450 				   struct cxl_port *parent_port)
451 {
452 	struct cxl_port *port;
453 	struct device *dev;
454 	int rc;
455 
456 	port = cxl_port_alloc(uport, component_reg_phys, parent_port);
457 	if (IS_ERR(port))
458 		return port;
459 
460 	if (parent_port)
461 		port->depth = parent_port->depth + 1;
462 	dev = &port->dev;
463 	if (is_cxl_memdev(uport))
464 		rc = dev_set_name(dev, "endpoint%d", port->id);
465 	else if (parent_port)
466 		rc = dev_set_name(dev, "port%d", port->id);
467 	else
468 		rc = dev_set_name(dev, "root%d", port->id);
469 	if (rc)
470 		goto err;
471 
472 	rc = device_add(dev);
473 	if (rc)
474 		goto err;
475 
476 	rc = devm_add_action_or_reset(host, unregister_port, port);
477 	if (rc)
478 		return ERR_PTR(rc);
479 
480 	rc = devm_cxl_link_uport(host, port);
481 	if (rc)
482 		return ERR_PTR(rc);
483 
484 	return port;
485 
486 err:
487 	put_device(dev);
488 	return ERR_PTR(rc);
489 }
490 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
491 
492 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
493 {
494 	/* There is no pci_bus associated with a CXL platform-root port */
495 	if (is_cxl_root(port))
496 		return NULL;
497 
498 	if (dev_is_pci(port->uport)) {
499 		struct pci_dev *pdev = to_pci_dev(port->uport);
500 
501 		return pdev->subordinate;
502 	}
503 
504 	return xa_load(&cxl_root_buses, (unsigned long)port->uport);
505 }
506 EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL);
507 
508 static void unregister_pci_bus(void *uport)
509 {
510 	xa_erase(&cxl_root_buses, (unsigned long)uport);
511 }
512 
513 int devm_cxl_register_pci_bus(struct device *host, struct device *uport,
514 			      struct pci_bus *bus)
515 {
516 	int rc;
517 
518 	if (dev_is_pci(uport))
519 		return -EINVAL;
520 
521 	rc = xa_insert(&cxl_root_buses, (unsigned long)uport, bus, GFP_KERNEL);
522 	if (rc)
523 		return rc;
524 	return devm_add_action_or_reset(host, unregister_pci_bus, uport);
525 }
526 EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL);
527 
528 static bool dev_is_cxl_root_child(struct device *dev)
529 {
530 	struct cxl_port *port, *parent;
531 
532 	if (!is_cxl_port(dev))
533 		return false;
534 
535 	port = to_cxl_port(dev);
536 	if (is_cxl_root(port))
537 		return false;
538 
539 	parent = to_cxl_port(port->dev.parent);
540 	if (is_cxl_root(parent))
541 		return true;
542 
543 	return false;
544 }
545 
546 /* Find a 2nd level CXL port that has a dport that is an ancestor of @match */
547 static int match_root_child(struct device *dev, const void *match)
548 {
549 	const struct device *iter = NULL;
550 	struct cxl_dport *dport;
551 	struct cxl_port *port;
552 
553 	if (!dev_is_cxl_root_child(dev))
554 		return 0;
555 
556 	port = to_cxl_port(dev);
557 	cxl_device_lock(dev);
558 	list_for_each_entry(dport, &port->dports, list) {
559 		iter = match;
560 		while (iter) {
561 			if (iter == dport->dport)
562 				goto out;
563 			iter = iter->parent;
564 		}
565 	}
566 out:
567 	cxl_device_unlock(dev);
568 
569 	return !!iter;
570 }
571 
572 struct cxl_port *find_cxl_root(struct device *dev)
573 {
574 	struct device *port_dev;
575 	struct cxl_port *root;
576 
577 	port_dev = bus_find_device(&cxl_bus_type, NULL, dev, match_root_child);
578 	if (!port_dev)
579 		return NULL;
580 
581 	root = to_cxl_port(port_dev->parent);
582 	get_device(&root->dev);
583 	put_device(port_dev);
584 	return root;
585 }
586 EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
587 
588 static struct cxl_dport *find_dport(struct cxl_port *port, int id)
589 {
590 	struct cxl_dport *dport;
591 
592 	device_lock_assert(&port->dev);
593 	list_for_each_entry (dport, &port->dports, list)
594 		if (dport->port_id == id)
595 			return dport;
596 	return NULL;
597 }
598 
599 static int add_dport(struct cxl_port *port, struct cxl_dport *new)
600 {
601 	struct cxl_dport *dup;
602 
603 	device_lock_assert(&port->dev);
604 	dup = find_dport(port, new->port_id);
605 	if (dup)
606 		dev_err(&port->dev,
607 			"unable to add dport%d-%s non-unique port id (%s)\n",
608 			new->port_id, dev_name(new->dport),
609 			dev_name(dup->dport));
610 	else
611 		list_add_tail(&new->list, &port->dports);
612 
613 	return dup ? -EEXIST : 0;
614 }
615 
616 /*
617  * Since root-level CXL dports cannot be enumerated by PCI they are not
618  * enumerated by the common port driver that acquires the port lock over
619  * dport add/remove. Instead, root dports are manually added by a
620  * platform driver and cond_cxl_root_lock() is used to take the missing
621  * port lock in that case.
622  */
623 static void cond_cxl_root_lock(struct cxl_port *port)
624 {
625 	if (is_cxl_root(port))
626 		cxl_device_lock(&port->dev);
627 }
628 
629 static void cond_cxl_root_unlock(struct cxl_port *port)
630 {
631 	if (is_cxl_root(port))
632 		cxl_device_unlock(&port->dev);
633 }
634 
635 static void cxl_dport_remove(void *data)
636 {
637 	struct cxl_dport *dport = data;
638 	struct cxl_port *port = dport->port;
639 
640 	put_device(dport->dport);
641 	cond_cxl_root_lock(port);
642 	list_del(&dport->list);
643 	cond_cxl_root_unlock(port);
644 }
645 
646 static void cxl_dport_unlink(void *data)
647 {
648 	struct cxl_dport *dport = data;
649 	struct cxl_port *port = dport->port;
650 	char link_name[CXL_TARGET_STRLEN];
651 
652 	sprintf(link_name, "dport%d", dport->port_id);
653 	sysfs_remove_link(&port->dev.kobj, link_name);
654 }
655 
656 /**
657  * devm_cxl_add_dport - append downstream port data to a cxl_port
658  * @port: the cxl_port that references this dport
659  * @dport_dev: firmware or PCI device representing the dport
660  * @port_id: identifier for this dport in a decoder's target list
661  * @component_reg_phys: optional location of CXL component registers
662  *
663  * Note that dports are appended to the devm release action's of the
664  * either the port's host (for root ports), or the port itself (for
665  * switch ports)
666  */
667 struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
668 				     struct device *dport_dev, int port_id,
669 				     resource_size_t component_reg_phys)
670 {
671 	char link_name[CXL_TARGET_STRLEN];
672 	struct cxl_dport *dport;
673 	struct device *host;
674 	int rc;
675 
676 	if (is_cxl_root(port))
677 		host = port->uport;
678 	else
679 		host = &port->dev;
680 
681 	if (!host->driver) {
682 		dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n",
683 			      dev_name(dport_dev));
684 		return ERR_PTR(-ENXIO);
685 	}
686 
687 	if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
688 	    CXL_TARGET_STRLEN)
689 		return ERR_PTR(-EINVAL);
690 
691 	dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL);
692 	if (!dport)
693 		return ERR_PTR(-ENOMEM);
694 
695 	INIT_LIST_HEAD(&dport->list);
696 	dport->dport = dport_dev;
697 	dport->port_id = port_id;
698 	dport->component_reg_phys = component_reg_phys;
699 	dport->port = port;
700 
701 	cond_cxl_root_lock(port);
702 	rc = add_dport(port, dport);
703 	cond_cxl_root_unlock(port);
704 	if (rc)
705 		return ERR_PTR(rc);
706 
707 	get_device(dport_dev);
708 	rc = devm_add_action_or_reset(host, cxl_dport_remove, dport);
709 	if (rc)
710 		return ERR_PTR(rc);
711 
712 	rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
713 	if (rc)
714 		return ERR_PTR(rc);
715 
716 	rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport);
717 	if (rc)
718 		return ERR_PTR(rc);
719 
720 	return dport;
721 }
722 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
723 
724 static struct cxl_ep *find_ep(struct cxl_port *port, struct device *ep_dev)
725 {
726 	struct cxl_ep *ep;
727 
728 	device_lock_assert(&port->dev);
729 	list_for_each_entry(ep, &port->endpoints, list)
730 		if (ep->ep == ep_dev)
731 			return ep;
732 	return NULL;
733 }
734 
735 static int add_ep(struct cxl_port *port, struct cxl_ep *new)
736 {
737 	struct cxl_ep *dup;
738 
739 	cxl_device_lock(&port->dev);
740 	if (port->dead) {
741 		cxl_device_unlock(&port->dev);
742 		return -ENXIO;
743 	}
744 	dup = find_ep(port, new->ep);
745 	if (!dup)
746 		list_add_tail(&new->list, &port->endpoints);
747 	cxl_device_unlock(&port->dev);
748 
749 	return dup ? -EEXIST : 0;
750 }
751 
752 /**
753  * cxl_add_ep - register an endpoint's interest in a port
754  * @port: a port in the endpoint's topology ancestry
755  * @ep_dev: device representing the endpoint
756  *
757  * Intermediate CXL ports are scanned based on the arrival of endpoints.
758  * When those endpoints depart the port can be destroyed once all
759  * endpoints that care about that port have been removed.
760  */
761 static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev)
762 {
763 	struct cxl_ep *ep;
764 	int rc;
765 
766 	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
767 	if (!ep)
768 		return -ENOMEM;
769 
770 	INIT_LIST_HEAD(&ep->list);
771 	ep->ep = get_device(ep_dev);
772 
773 	rc = add_ep(port, ep);
774 	if (rc)
775 		cxl_ep_release(ep);
776 	return rc;
777 }
778 
779 struct cxl_find_port_ctx {
780 	const struct device *dport_dev;
781 	const struct cxl_port *parent_port;
782 };
783 
784 static int match_port_by_dport(struct device *dev, const void *data)
785 {
786 	const struct cxl_find_port_ctx *ctx = data;
787 	struct cxl_port *port;
788 
789 	if (!is_cxl_port(dev))
790 		return 0;
791 	if (ctx->parent_port && dev->parent != &ctx->parent_port->dev)
792 		return 0;
793 
794 	port = to_cxl_port(dev);
795 	return cxl_find_dport_by_dev(port, ctx->dport_dev) != NULL;
796 }
797 
798 static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
799 {
800 	struct device *dev;
801 
802 	if (!ctx->dport_dev)
803 		return NULL;
804 
805 	dev = bus_find_device(&cxl_bus_type, NULL, ctx, match_port_by_dport);
806 	if (dev)
807 		return to_cxl_port(dev);
808 	return NULL;
809 }
810 
811 static struct cxl_port *find_cxl_port(struct device *dport_dev)
812 {
813 	struct cxl_find_port_ctx ctx = {
814 		.dport_dev = dport_dev,
815 	};
816 
817 	return __find_cxl_port(&ctx);
818 }
819 
820 static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
821 					 struct device *dport_dev)
822 {
823 	struct cxl_find_port_ctx ctx = {
824 		.dport_dev = dport_dev,
825 		.parent_port = parent_port,
826 	};
827 
828 	return __find_cxl_port(&ctx);
829 }
830 
831 /*
832  * All users of grandparent() are using it to walk PCIe-like swich port
833  * hierarchy. A PCIe switch is comprised of a bridge device representing the
834  * upstream switch port and N bridges representing downstream switch ports. When
835  * bridges stack the grand-parent of a downstream switch port is another
836  * downstream switch port in the immediate ancestor switch.
837  */
838 static struct device *grandparent(struct device *dev)
839 {
840 	if (dev && dev->parent)
841 		return dev->parent->parent;
842 	return NULL;
843 }
844 
845 static void delete_endpoint(void *data)
846 {
847 	struct cxl_memdev *cxlmd = data;
848 	struct cxl_port *endpoint = dev_get_drvdata(&cxlmd->dev);
849 	struct cxl_port *parent_port;
850 	struct device *parent;
851 
852 	parent_port = cxl_mem_find_port(cxlmd);
853 	if (!parent_port)
854 		goto out;
855 	parent = &parent_port->dev;
856 
857 	cxl_device_lock(parent);
858 	if (parent->driver && endpoint->uport) {
859 		devm_release_action(parent, cxl_unlink_uport, endpoint);
860 		devm_release_action(parent, unregister_port, endpoint);
861 	}
862 	cxl_device_unlock(parent);
863 	put_device(parent);
864 out:
865 	put_device(&endpoint->dev);
866 }
867 
868 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
869 {
870 	struct device *dev = &cxlmd->dev;
871 
872 	get_device(&endpoint->dev);
873 	dev_set_drvdata(dev, endpoint);
874 	return devm_add_action_or_reset(dev, delete_endpoint, cxlmd);
875 }
876 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
877 
878 /*
879  * The natural end of life of a non-root 'cxl_port' is when its parent port goes
880  * through a ->remove() event ("top-down" unregistration). The unnatural trigger
881  * for a port to be unregistered is when all memdevs beneath that port have gone
882  * through ->remove(). This "bottom-up" removal selectively removes individual
883  * child ports manually. This depends on devm_cxl_add_port() to not change is
884  * devm action registration order.
885  */
886 static void delete_switch_port(struct cxl_port *port, struct list_head *dports)
887 {
888 	struct cxl_dport *dport, *_d;
889 
890 	list_for_each_entry_safe(dport, _d, dports, list) {
891 		devm_release_action(&port->dev, cxl_dport_unlink, dport);
892 		devm_release_action(&port->dev, cxl_dport_remove, dport);
893 		devm_kfree(&port->dev, dport);
894 	}
895 	devm_release_action(port->dev.parent, cxl_unlink_uport, port);
896 	devm_release_action(port->dev.parent, unregister_port, port);
897 }
898 
899 static void cxl_detach_ep(void *data)
900 {
901 	struct cxl_memdev *cxlmd = data;
902 	struct device *iter;
903 
904 	for (iter = &cxlmd->dev; iter; iter = grandparent(iter)) {
905 		struct device *dport_dev = grandparent(iter);
906 		struct cxl_port *port, *parent_port;
907 		LIST_HEAD(reap_dports);
908 		struct cxl_ep *ep;
909 
910 		if (!dport_dev)
911 			break;
912 
913 		port = find_cxl_port(dport_dev);
914 		if (!port)
915 			continue;
916 
917 		if (is_cxl_root(port)) {
918 			put_device(&port->dev);
919 			continue;
920 		}
921 
922 		parent_port = to_cxl_port(port->dev.parent);
923 		cxl_device_lock(&parent_port->dev);
924 		if (!parent_port->dev.driver) {
925 			/*
926 			 * The bottom-up race to delete the port lost to a
927 			 * top-down port disable, give up here, because the
928 			 * parent_port ->remove() will have cleaned up all
929 			 * descendants.
930 			 */
931 			cxl_device_unlock(&parent_port->dev);
932 			put_device(&port->dev);
933 			continue;
934 		}
935 
936 		cxl_device_lock(&port->dev);
937 		ep = find_ep(port, &cxlmd->dev);
938 		dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
939 			ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
940 		cxl_ep_release(ep);
941 		if (ep && !port->dead && list_empty(&port->endpoints) &&
942 		    !is_cxl_root(parent_port)) {
943 			/*
944 			 * This was the last ep attached to a dynamically
945 			 * enumerated port. Block new cxl_add_ep() and garbage
946 			 * collect the port.
947 			 */
948 			port->dead = true;
949 			list_splice_init(&port->dports, &reap_dports);
950 		}
951 		cxl_device_unlock(&port->dev);
952 
953 		if (!list_empty(&reap_dports)) {
954 			dev_dbg(&cxlmd->dev, "delete %s\n",
955 				dev_name(&port->dev));
956 			delete_switch_port(port, &reap_dports);
957 		}
958 		put_device(&port->dev);
959 		cxl_device_unlock(&parent_port->dev);
960 	}
961 }
962 
963 static resource_size_t find_component_registers(struct device *dev)
964 {
965 	struct cxl_register_map map;
966 	struct pci_dev *pdev;
967 
968 	/*
969 	 * Theoretically, CXL component registers can be hosted on a
970 	 * non-PCI device, in practice, only cxl_test hits this case.
971 	 */
972 	if (!dev_is_pci(dev))
973 		return CXL_RESOURCE_NONE;
974 
975 	pdev = to_pci_dev(dev);
976 
977 	cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
978 	return cxl_regmap_to_base(pdev, &map);
979 }
980 
981 static int add_port_attach_ep(struct cxl_memdev *cxlmd,
982 			      struct device *uport_dev,
983 			      struct device *dport_dev)
984 {
985 	struct device *dparent = grandparent(dport_dev);
986 	struct cxl_port *port, *parent_port = NULL;
987 	resource_size_t component_reg_phys;
988 	int rc;
989 
990 	if (!dparent) {
991 		/*
992 		 * The iteration reached the topology root without finding the
993 		 * CXL-root 'cxl_port' on a previous iteration, fail for now to
994 		 * be re-probed after platform driver attaches.
995 		 */
996 		dev_dbg(&cxlmd->dev, "%s is a root dport\n",
997 			dev_name(dport_dev));
998 		return -ENXIO;
999 	}
1000 
1001 	parent_port = find_cxl_port(dparent);
1002 	if (!parent_port) {
1003 		/* iterate to create this parent_port */
1004 		return -EAGAIN;
1005 	}
1006 
1007 	cxl_device_lock(&parent_port->dev);
1008 	if (!parent_port->dev.driver) {
1009 		dev_warn(&cxlmd->dev,
1010 			 "port %s:%s disabled, failed to enumerate CXL.mem\n",
1011 			 dev_name(&parent_port->dev), dev_name(uport_dev));
1012 		port = ERR_PTR(-ENXIO);
1013 		goto out;
1014 	}
1015 
1016 	port = find_cxl_port_at(parent_port, dport_dev);
1017 	if (!port) {
1018 		component_reg_phys = find_component_registers(uport_dev);
1019 		port = devm_cxl_add_port(&parent_port->dev, uport_dev,
1020 					 component_reg_phys, parent_port);
1021 		if (!IS_ERR(port))
1022 			get_device(&port->dev);
1023 	}
1024 out:
1025 	cxl_device_unlock(&parent_port->dev);
1026 
1027 	if (IS_ERR(port))
1028 		rc = PTR_ERR(port);
1029 	else {
1030 		dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
1031 			dev_name(&port->dev), dev_name(port->uport));
1032 		rc = cxl_add_ep(port, &cxlmd->dev);
1033 		if (rc == -EEXIST) {
1034 			/*
1035 			 * "can't" happen, but this error code means
1036 			 * something to the caller, so translate it.
1037 			 */
1038 			rc = -ENXIO;
1039 		}
1040 		put_device(&port->dev);
1041 	}
1042 
1043 	put_device(&parent_port->dev);
1044 	return rc;
1045 }
1046 
1047 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
1048 {
1049 	struct device *dev = &cxlmd->dev;
1050 	struct device *iter;
1051 	int rc;
1052 
1053 	rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd);
1054 	if (rc)
1055 		return rc;
1056 
1057 	/*
1058 	 * Scan for and add all cxl_ports in this device's ancestry.
1059 	 * Repeat until no more ports are added. Abort if a port add
1060 	 * attempt fails.
1061 	 */
1062 retry:
1063 	for (iter = dev; iter; iter = grandparent(iter)) {
1064 		struct device *dport_dev = grandparent(iter);
1065 		struct device *uport_dev;
1066 		struct cxl_port *port;
1067 
1068 		if (!dport_dev)
1069 			return 0;
1070 
1071 		uport_dev = dport_dev->parent;
1072 		if (!uport_dev) {
1073 			dev_warn(dev, "at %s no parent for dport: %s\n",
1074 				 dev_name(iter), dev_name(dport_dev));
1075 			return -ENXIO;
1076 		}
1077 
1078 		dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n",
1079 			dev_name(iter), dev_name(dport_dev),
1080 			dev_name(uport_dev));
1081 		port = find_cxl_port(dport_dev);
1082 		if (port) {
1083 			dev_dbg(&cxlmd->dev,
1084 				"found already registered port %s:%s\n",
1085 				dev_name(&port->dev), dev_name(port->uport));
1086 			rc = cxl_add_ep(port, &cxlmd->dev);
1087 
1088 			/*
1089 			 * If the endpoint already exists in the port's list,
1090 			 * that's ok, it was added on a previous pass.
1091 			 * Otherwise, retry in add_port_attach_ep() after taking
1092 			 * the parent_port lock as the current port may be being
1093 			 * reaped.
1094 			 */
1095 			if (rc && rc != -EEXIST) {
1096 				put_device(&port->dev);
1097 				return rc;
1098 			}
1099 
1100 			/* Any more ports to add between this one and the root? */
1101 			if (!dev_is_cxl_root_child(&port->dev)) {
1102 				put_device(&port->dev);
1103 				continue;
1104 			}
1105 
1106 			put_device(&port->dev);
1107 			return 0;
1108 		}
1109 
1110 		rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev);
1111 		/* port missing, try to add parent */
1112 		if (rc == -EAGAIN)
1113 			continue;
1114 		/* failed to add ep or port */
1115 		if (rc)
1116 			return rc;
1117 		/* port added, new descendants possible, start over */
1118 		goto retry;
1119 	}
1120 
1121 	return 0;
1122 }
1123 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
1124 
1125 struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd)
1126 {
1127 	return find_cxl_port(grandparent(&cxlmd->dev));
1128 }
1129 EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
1130 
1131 struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port,
1132 					const struct device *dev)
1133 {
1134 	struct cxl_dport *dport;
1135 
1136 	cxl_device_lock(&port->dev);
1137 	list_for_each_entry(dport, &port->dports, list)
1138 		if (dport->dport == dev) {
1139 			cxl_device_unlock(&port->dev);
1140 			return dport;
1141 		}
1142 
1143 	cxl_device_unlock(&port->dev);
1144 	return NULL;
1145 }
1146 EXPORT_SYMBOL_NS_GPL(cxl_find_dport_by_dev, CXL);
1147 
1148 static int decoder_populate_targets(struct cxl_decoder *cxld,
1149 				    struct cxl_port *port, int *target_map)
1150 {
1151 	int i, rc = 0;
1152 
1153 	if (!target_map)
1154 		return 0;
1155 
1156 	device_lock_assert(&port->dev);
1157 
1158 	if (list_empty(&port->dports))
1159 		return -EINVAL;
1160 
1161 	write_seqlock(&cxld->target_lock);
1162 	for (i = 0; i < cxld->nr_targets; i++) {
1163 		struct cxl_dport *dport = find_dport(port, target_map[i]);
1164 
1165 		if (!dport) {
1166 			rc = -ENXIO;
1167 			break;
1168 		}
1169 		cxld->target[i] = dport;
1170 	}
1171 	write_sequnlock(&cxld->target_lock);
1172 
1173 	return rc;
1174 }
1175 
1176 /**
1177  * cxl_decoder_alloc - Allocate a new CXL decoder
1178  * @port: owning port of this decoder
1179  * @nr_targets: downstream targets accessible by this decoder. All upstream
1180  *		ports and root ports must have at least 1 target. Endpoint
1181  *		devices will have 0 targets. Callers wishing to register an
1182  *		endpoint device should specify 0.
1183  *
1184  * A port should contain one or more decoders. Each of those decoders enable
1185  * some address space for CXL.mem utilization. A decoder is expected to be
1186  * configured by the caller before registering.
1187  *
1188  * Return: A new cxl decoder to be registered by cxl_decoder_add(). The decoder
1189  *	   is initialized to be a "passthrough" decoder.
1190  */
1191 static struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port,
1192 					     unsigned int nr_targets)
1193 {
1194 	struct cxl_decoder *cxld;
1195 	struct device *dev;
1196 	int rc = 0;
1197 
1198 	if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
1199 		return ERR_PTR(-EINVAL);
1200 
1201 	cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
1202 	if (!cxld)
1203 		return ERR_PTR(-ENOMEM);
1204 
1205 	rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
1206 	if (rc < 0)
1207 		goto err;
1208 
1209 	/* need parent to stick around to release the id */
1210 	get_device(&port->dev);
1211 	cxld->id = rc;
1212 
1213 	cxld->nr_targets = nr_targets;
1214 	seqlock_init(&cxld->target_lock);
1215 	dev = &cxld->dev;
1216 	device_initialize(dev);
1217 	device_set_pm_not_required(dev);
1218 	dev->parent = &port->dev;
1219 	dev->bus = &cxl_bus_type;
1220 	if (is_cxl_root(port))
1221 		cxld->dev.type = &cxl_decoder_root_type;
1222 	else if (is_cxl_endpoint(port))
1223 		cxld->dev.type = &cxl_decoder_endpoint_type;
1224 	else
1225 		cxld->dev.type = &cxl_decoder_switch_type;
1226 
1227 	/* Pre initialize an "empty" decoder */
1228 	cxld->interleave_ways = 1;
1229 	cxld->interleave_granularity = PAGE_SIZE;
1230 	cxld->target_type = CXL_DECODER_EXPANDER;
1231 	cxld->platform_res = (struct resource)DEFINE_RES_MEM(0, 0);
1232 
1233 	return cxld;
1234 err:
1235 	kfree(cxld);
1236 	return ERR_PTR(rc);
1237 }
1238 
1239 /**
1240  * cxl_root_decoder_alloc - Allocate a root level decoder
1241  * @port: owning CXL root of this decoder
1242  * @nr_targets: static number of downstream targets
1243  *
1244  * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
1245  * 'CXL root' decoder is one that decodes from a top-level / static platform
1246  * firmware description of CXL resources into a CXL standard decode
1247  * topology.
1248  */
1249 struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
1250 					   unsigned int nr_targets)
1251 {
1252 	if (!is_cxl_root(port))
1253 		return ERR_PTR(-EINVAL);
1254 
1255 	return cxl_decoder_alloc(port, nr_targets);
1256 }
1257 EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
1258 
1259 /**
1260  * cxl_switch_decoder_alloc - Allocate a switch level decoder
1261  * @port: owning CXL switch port of this decoder
1262  * @nr_targets: max number of dynamically addressable downstream targets
1263  *
1264  * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
1265  * 'switch' decoder is any decoder that can be enumerated by PCIe
1266  * topology and the HDM Decoder Capability. This includes the decoders
1267  * that sit between Switch Upstream Ports / Switch Downstream Ports and
1268  * Host Bridges / Root Ports.
1269  */
1270 struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
1271 					     unsigned int nr_targets)
1272 {
1273 	if (is_cxl_root(port) || is_cxl_endpoint(port))
1274 		return ERR_PTR(-EINVAL);
1275 
1276 	return cxl_decoder_alloc(port, nr_targets);
1277 }
1278 EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
1279 
1280 /**
1281  * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder
1282  * @port: owning port of this decoder
1283  *
1284  * Return: A new cxl decoder to be registered by cxl_decoder_add()
1285  */
1286 struct cxl_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
1287 {
1288 	if (!is_cxl_endpoint(port))
1289 		return ERR_PTR(-EINVAL);
1290 
1291 	return cxl_decoder_alloc(port, 0);
1292 }
1293 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
1294 
1295 /**
1296  * cxl_decoder_add_locked - Add a decoder with targets
1297  * @cxld: The cxl decoder allocated by cxl_decoder_alloc()
1298  * @target_map: A list of downstream ports that this decoder can direct memory
1299  *              traffic to. These numbers should correspond with the port number
1300  *              in the PCIe Link Capabilities structure.
1301  *
1302  * Certain types of decoders may not have any targets. The main example of this
1303  * is an endpoint device. A more awkward example is a hostbridge whose root
1304  * ports get hot added (technically possible, though unlikely).
1305  *
1306  * This is the locked variant of cxl_decoder_add().
1307  *
1308  * Context: Process context. Expects the device lock of the port that owns the
1309  *	    @cxld to be held.
1310  *
1311  * Return: Negative error code if the decoder wasn't properly configured; else
1312  *	   returns 0.
1313  */
1314 int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
1315 {
1316 	struct cxl_port *port;
1317 	struct device *dev;
1318 	int rc;
1319 
1320 	if (WARN_ON_ONCE(!cxld))
1321 		return -EINVAL;
1322 
1323 	if (WARN_ON_ONCE(IS_ERR(cxld)))
1324 		return PTR_ERR(cxld);
1325 
1326 	if (cxld->interleave_ways < 1)
1327 		return -EINVAL;
1328 
1329 	dev = &cxld->dev;
1330 
1331 	port = to_cxl_port(cxld->dev.parent);
1332 	if (!is_endpoint_decoder(dev)) {
1333 		rc = decoder_populate_targets(cxld, port, target_map);
1334 		if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
1335 			dev_err(&port->dev,
1336 				"Failed to populate active decoder targets\n");
1337 			return rc;
1338 		}
1339 	}
1340 
1341 	rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
1342 	if (rc)
1343 		return rc;
1344 
1345 	/*
1346 	 * Platform decoder resources should show up with a reasonable name. All
1347 	 * other resources are just sub ranges within the main decoder resource.
1348 	 */
1349 	if (is_root_decoder(dev))
1350 		cxld->platform_res.name = dev_name(dev);
1351 
1352 	return device_add(dev);
1353 }
1354 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
1355 
1356 /**
1357  * cxl_decoder_add - Add a decoder with targets
1358  * @cxld: The cxl decoder allocated by cxl_decoder_alloc()
1359  * @target_map: A list of downstream ports that this decoder can direct memory
1360  *              traffic to. These numbers should correspond with the port number
1361  *              in the PCIe Link Capabilities structure.
1362  *
1363  * This is the unlocked variant of cxl_decoder_add_locked().
1364  * See cxl_decoder_add_locked().
1365  *
1366  * Context: Process context. Takes and releases the device lock of the port that
1367  *	    owns the @cxld.
1368  */
1369 int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
1370 {
1371 	struct cxl_port *port;
1372 	int rc;
1373 
1374 	if (WARN_ON_ONCE(!cxld))
1375 		return -EINVAL;
1376 
1377 	if (WARN_ON_ONCE(IS_ERR(cxld)))
1378 		return PTR_ERR(cxld);
1379 
1380 	port = to_cxl_port(cxld->dev.parent);
1381 
1382 	cxl_device_lock(&port->dev);
1383 	rc = cxl_decoder_add_locked(cxld, target_map);
1384 	cxl_device_unlock(&port->dev);
1385 
1386 	return rc;
1387 }
1388 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
1389 
1390 static void cxld_unregister(void *dev)
1391 {
1392 	device_unregister(dev);
1393 }
1394 
1395 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
1396 {
1397 	return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
1398 }
1399 EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
1400 
1401 /**
1402  * __cxl_driver_register - register a driver for the cxl bus
1403  * @cxl_drv: cxl driver structure to attach
1404  * @owner: owning module/driver
1405  * @modname: KBUILD_MODNAME for parent driver
1406  */
1407 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
1408 			  const char *modname)
1409 {
1410 	if (!cxl_drv->probe) {
1411 		pr_debug("%s ->probe() must be specified\n", modname);
1412 		return -EINVAL;
1413 	}
1414 
1415 	if (!cxl_drv->name) {
1416 		pr_debug("%s ->name must be specified\n", modname);
1417 		return -EINVAL;
1418 	}
1419 
1420 	if (!cxl_drv->id) {
1421 		pr_debug("%s ->id must be specified\n", modname);
1422 		return -EINVAL;
1423 	}
1424 
1425 	cxl_drv->drv.bus = &cxl_bus_type;
1426 	cxl_drv->drv.owner = owner;
1427 	cxl_drv->drv.mod_name = modname;
1428 	cxl_drv->drv.name = cxl_drv->name;
1429 
1430 	return driver_register(&cxl_drv->drv);
1431 }
1432 EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
1433 
1434 void cxl_driver_unregister(struct cxl_driver *cxl_drv)
1435 {
1436 	driver_unregister(&cxl_drv->drv);
1437 }
1438 EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
1439 
1440 static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
1441 {
1442 	return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
1443 			      cxl_device_id(dev));
1444 }
1445 
1446 static int cxl_bus_match(struct device *dev, struct device_driver *drv)
1447 {
1448 	return cxl_device_id(dev) == to_cxl_drv(drv)->id;
1449 }
1450 
1451 static int cxl_bus_probe(struct device *dev)
1452 {
1453 	int rc;
1454 
1455 	/*
1456 	 * Take the CXL nested lock since the driver core only holds
1457 	 * @dev->mutex and not @dev->lockdep_mutex.
1458 	 */
1459 	cxl_nested_lock(dev);
1460 	rc = to_cxl_drv(dev->driver)->probe(dev);
1461 	cxl_nested_unlock(dev);
1462 
1463 	dev_dbg(dev, "probe: %d\n", rc);
1464 	return rc;
1465 }
1466 
1467 static void cxl_bus_remove(struct device *dev)
1468 {
1469 	struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
1470 
1471 	cxl_nested_lock(dev);
1472 	if (cxl_drv->remove)
1473 		cxl_drv->remove(dev);
1474 	cxl_nested_unlock(dev);
1475 }
1476 
1477 static struct workqueue_struct *cxl_bus_wq;
1478 
1479 int cxl_bus_rescan(void)
1480 {
1481 	return bus_rescan_devices(&cxl_bus_type);
1482 }
1483 EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL);
1484 
1485 bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
1486 {
1487 	return queue_work(cxl_bus_wq, &cxlmd->detach_work);
1488 }
1489 EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
1490 
1491 /* for user tooling to ensure port disable work has completed */
1492 static ssize_t flush_store(struct bus_type *bus, const char *buf, size_t count)
1493 {
1494 	if (sysfs_streq(buf, "1")) {
1495 		flush_workqueue(cxl_bus_wq);
1496 		return count;
1497 	}
1498 
1499 	return -EINVAL;
1500 }
1501 
1502 static BUS_ATTR_WO(flush);
1503 
1504 static struct attribute *cxl_bus_attributes[] = {
1505 	&bus_attr_flush.attr,
1506 	NULL,
1507 };
1508 
1509 static struct attribute_group cxl_bus_attribute_group = {
1510 	.attrs = cxl_bus_attributes,
1511 };
1512 
1513 static const struct attribute_group *cxl_bus_attribute_groups[] = {
1514 	&cxl_bus_attribute_group,
1515 	NULL,
1516 };
1517 
1518 struct bus_type cxl_bus_type = {
1519 	.name = "cxl",
1520 	.uevent = cxl_bus_uevent,
1521 	.match = cxl_bus_match,
1522 	.probe = cxl_bus_probe,
1523 	.remove = cxl_bus_remove,
1524 	.bus_groups = cxl_bus_attribute_groups,
1525 };
1526 EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
1527 
1528 static __init int cxl_core_init(void)
1529 {
1530 	int rc;
1531 
1532 	cxl_mbox_init();
1533 
1534 	rc = cxl_memdev_init();
1535 	if (rc)
1536 		return rc;
1537 
1538 	cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0);
1539 	if (!cxl_bus_wq) {
1540 		rc = -ENOMEM;
1541 		goto err_wq;
1542 	}
1543 
1544 	rc = bus_register(&cxl_bus_type);
1545 	if (rc)
1546 		goto err_bus;
1547 
1548 	return 0;
1549 
1550 err_bus:
1551 	destroy_workqueue(cxl_bus_wq);
1552 err_wq:
1553 	cxl_memdev_exit();
1554 	cxl_mbox_exit();
1555 	return rc;
1556 }
1557 
1558 static void cxl_core_exit(void)
1559 {
1560 	bus_unregister(&cxl_bus_type);
1561 	destroy_workqueue(cxl_bus_wq);
1562 	cxl_memdev_exit();
1563 	cxl_mbox_exit();
1564 }
1565 
1566 module_init(cxl_core_init);
1567 module_exit(cxl_core_exit);
1568 MODULE_LICENSE("GPL v2");
1569