xref: /linux/drivers/cxl/core/region_pmem.c (revision 12bffaef28820e0b94c644c75708195c61af78f7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/device.h>
4 #include <linux/slab.h>
5 #include <cxlmem.h>
6 #include <cxl.h>
7 #include "core.h"
8 
9 static void cxl_pmem_region_release(struct device *dev)
10 {
11 	struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
12 	int i;
13 
14 	for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
15 		struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
16 
17 		put_device(&cxlmd->dev);
18 	}
19 
20 	kfree(cxlr_pmem);
21 }
22 
23 static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
24 	&cxl_base_attribute_group,
25 	NULL
26 };
27 
28 const struct device_type cxl_pmem_region_type = {
29 	.name = "cxl_pmem_region",
30 	.release = cxl_pmem_region_release,
31 	.groups = cxl_pmem_region_attribute_groups,
32 };
33 
34 bool is_cxl_pmem_region(struct device *dev)
35 {
36 	return dev->type == &cxl_pmem_region_type;
37 }
38 EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, "CXL");
39 
40 struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
41 {
42 	if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
43 			  "not a cxl_pmem_region device\n"))
44 		return NULL;
45 	return container_of(dev, struct cxl_pmem_region, dev);
46 }
47 EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, "CXL");
48 
49 static struct lock_class_key cxl_pmem_region_key;
50 
51 static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
52 {
53 	struct cxl_region_params *p = &cxlr->params;
54 	struct cxl_nvdimm_bridge *cxl_nvb;
55 	struct device *dev;
56 	int i;
57 
58 	guard(rwsem_read)(&cxl_rwsem.region);
59 	if (p->state != CXL_CONFIG_COMMIT)
60 		return -ENXIO;
61 
62 	struct cxl_pmem_region *cxlr_pmem __free(kfree) =
63 		kzalloc_flex(*cxlr_pmem, mapping, p->nr_targets);
64 	if (!cxlr_pmem)
65 		return -ENOMEM;
66 
67 	cxlr_pmem->hpa_range.start = p->res->start;
68 	cxlr_pmem->hpa_range.end = p->res->end;
69 
70 	/* Snapshot the region configuration underneath the cxl_rwsem.region */
71 	cxlr_pmem->nr_mappings = p->nr_targets;
72 	for (i = 0; i < p->nr_targets; i++) {
73 		struct cxl_endpoint_decoder *cxled = p->targets[i];
74 		struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
75 		struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
76 
77 		/*
78 		 * Regions never span CXL root devices, so by definition the
79 		 * bridge for one device is the same for all.
80 		 */
81 		if (i == 0) {
82 			cxl_nvb = cxl_find_nvdimm_bridge(cxlmd->endpoint);
83 			if (!cxl_nvb)
84 				return -ENODEV;
85 			cxlr->cxl_nvb = cxl_nvb;
86 		}
87 		m->cxlmd = cxlmd;
88 		get_device(&cxlmd->dev);
89 		m->start = cxled->dpa_res->start;
90 		m->size = resource_size(cxled->dpa_res);
91 		m->position = i;
92 	}
93 
94 	dev = &cxlr_pmem->dev;
95 	device_initialize(dev);
96 	lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
97 	device_set_pm_not_required(dev);
98 	dev->parent = &cxlr->dev;
99 	dev->bus = &cxl_bus_type;
100 	dev->type = &cxl_pmem_region_type;
101 	cxlr_pmem->cxlr = cxlr;
102 	cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem);
103 
104 	return 0;
105 }
106 
107 static void cxlr_pmem_unregister(void *_cxlr_pmem)
108 {
109 	struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem;
110 	struct cxl_region *cxlr = cxlr_pmem->cxlr;
111 	struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
112 
113 	/*
114 	 * Either the bridge is in ->remove() context under the device_lock(),
115 	 * or cxlr_release_nvdimm() is cancelling the bridge's release action
116 	 * for @cxlr_pmem and doing it itself (while manually holding the bridge
117 	 * lock).
118 	 */
119 	device_lock_assert(&cxl_nvb->dev);
120 	cxlr->cxlr_pmem = NULL;
121 	cxlr_pmem->cxlr = NULL;
122 	device_unregister(&cxlr_pmem->dev);
123 }
124 
125 static void cxlr_release_nvdimm(void *_cxlr)
126 {
127 	struct cxl_region *cxlr = _cxlr;
128 	struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
129 
130 	scoped_guard(device, &cxl_nvb->dev) {
131 		if (cxlr->cxlr_pmem)
132 			devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
133 					    cxlr->cxlr_pmem);
134 	}
135 	cxlr->cxl_nvb = NULL;
136 	put_device(&cxl_nvb->dev);
137 }
138 
139 /**
140  * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
141  * @cxlr: parent CXL region for this pmem region bridge device
142  *
143  * Return: 0 on success negative error code on failure.
144  */
145 int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
146 {
147 	struct cxl_pmem_region *cxlr_pmem;
148 	struct cxl_nvdimm_bridge *cxl_nvb;
149 	struct device *dev;
150 	int rc;
151 
152 	rc = cxl_pmem_region_alloc(cxlr);
153 	if (rc)
154 		return rc;
155 	cxlr_pmem = cxlr->cxlr_pmem;
156 	cxl_nvb = cxlr->cxl_nvb;
157 
158 	dev = &cxlr_pmem->dev;
159 	rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
160 	if (rc)
161 		goto err;
162 
163 	rc = device_add(dev);
164 	if (rc)
165 		goto err;
166 
167 	dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
168 		dev_name(dev));
169 
170 	scoped_guard(device, &cxl_nvb->dev) {
171 		if (cxl_nvb->dev.driver)
172 			rc = devm_add_action_or_reset(&cxl_nvb->dev,
173 						      cxlr_pmem_unregister,
174 						      cxlr_pmem);
175 		else
176 			rc = -ENXIO;
177 	}
178 
179 	if (rc)
180 		goto err_bridge;
181 
182 	/* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */
183 	return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr);
184 
185 err:
186 	put_device(dev);
187 err_bridge:
188 	put_device(&cxl_nvb->dev);
189 	cxlr->cxl_nvb = NULL;
190 	return rc;
191 }
192