xref: /linux/drivers/cxl/port.c (revision c17ee635fd3a482b2ad2bf5e269755c2eae5f25e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/aer.h>
4 #include <linux/device.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 
8 #include "cxlmem.h"
9 #include "cxlpci.h"
10 
11 /**
12  * DOC: cxl port
13  *
14  * The port driver enumerates dport via PCI and scans for HDM
15  * (Host-managed-Device-Memory) decoder resources via the
16  * @component_reg_phys value passed in by the agent that registered the
17  * port. All descendant ports of a CXL root port (described by platform
18  * firmware) are managed in this drivers context. Each driver instance
19  * is responsible for tearing down the driver context of immediate
20  * descendant ports. The locking for this is validated by
21  * CONFIG_PROVE_CXL_LOCKING.
22  *
23  * The primary service this driver provides is presenting APIs to other
24  * drivers to utilize the decoders, and indicating to userspace (via bind
25  * status) the connectivity of the CXL.mem protocol throughout the
26  * PCIe topology.
27  */
28 
29 static void schedule_detach(void *cxlmd)
30 {
31 	schedule_cxl_memdev_detach(cxlmd);
32 }
33 
34 static int discover_region(struct device *dev, void *unused)
35 {
36 	struct cxl_endpoint_decoder *cxled;
37 	int rc;
38 
39 	if (!is_endpoint_decoder(dev))
40 		return 0;
41 
42 	cxled = to_cxl_endpoint_decoder(dev);
43 	if ((cxled->cxld.flags & CXL_DECODER_F_ENABLE) == 0)
44 		return 0;
45 
46 	if (cxled->state != CXL_DECODER_STATE_AUTO)
47 		return 0;
48 
49 	/*
50 	 * Region enumeration is opportunistic, if this add-event fails,
51 	 * continue to the next endpoint decoder.
52 	 */
53 	rc = cxl_add_to_region(cxled);
54 	if (rc)
55 		dev_dbg(dev, "failed to add to region: %#llx-%#llx\n",
56 			cxled->cxld.hpa_range.start, cxled->cxld.hpa_range.end);
57 
58 	return 0;
59 }
60 
61 static int cxl_switch_port_probe(struct cxl_port *port)
62 {
63 	/* Reset nr_dports for rebind of driver */
64 	port->nr_dports = 0;
65 
66 	/* Cache the data early to ensure is_visible() works */
67 	read_cdat_data(port);
68 
69 	return 0;
70 }
71 
72 static int cxl_ras_unmask(struct cxl_port *port)
73 {
74 	struct pci_dev *pdev;
75 	void __iomem *addr;
76 	u32 orig_val, val, mask;
77 	u16 cap;
78 	int rc;
79 
80 	if (!dev_is_pci(port->uport_dev))
81 		return 0;
82 	pdev = to_pci_dev(port->uport_dev);
83 
84 	if (!port->regs.ras) {
85 		pci_dbg(pdev, "No RAS registers.\n");
86 		return 0;
87 	}
88 
89 	/* BIOS has PCIe AER error control */
90 	if (!pcie_aer_is_native(pdev))
91 		return 0;
92 
93 	rc = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap);
94 	if (rc)
95 		return rc;
96 
97 	if (cap & PCI_EXP_DEVCTL_URRE) {
98 		addr = port->regs.ras + CXL_RAS_UNCORRECTABLE_MASK_OFFSET;
99 		orig_val = readl(addr);
100 
101 		mask = CXL_RAS_UNCORRECTABLE_MASK_MASK |
102 		       CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK;
103 		val = orig_val & ~mask;
104 		writel(val, addr);
105 		pci_dbg(pdev, "Uncorrectable RAS Errors Mask: %#x -> %#x\n",
106 			orig_val, val);
107 	}
108 
109 	if (cap & PCI_EXP_DEVCTL_CERE) {
110 		addr = port->regs.ras + CXL_RAS_CORRECTABLE_MASK_OFFSET;
111 		orig_val = readl(addr);
112 		val = orig_val & ~CXL_RAS_CORRECTABLE_MASK_MASK;
113 		writel(val, addr);
114 		pci_dbg(pdev, "Correctable RAS Errors Mask: %#x -> %#x\n",
115 			orig_val, val);
116 	}
117 
118 	return 0;
119 }
120 
121 static int cxl_endpoint_port_probe(struct cxl_port *port)
122 {
123 	struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
124 	struct cxl_dport *dport = port->parent_dport;
125 	int rc;
126 
127 	/* Cache the data early to ensure is_visible() works */
128 	read_cdat_data(port);
129 	cxl_endpoint_parse_cdat(port);
130 
131 	get_device(&cxlmd->dev);
132 	rc = devm_add_action_or_reset(&port->dev, schedule_detach, cxlmd);
133 	if (rc)
134 		return rc;
135 
136 	rc = devm_cxl_endpoint_decoders_setup(port);
137 	if (rc)
138 		return rc;
139 
140 	/*
141 	 * With VH (CXL Virtual Host) topology the cxl_port::add_dport() method
142 	 * handles RAS setup for downstream ports. With RCH (CXL Restricted CXL
143 	 * Host) topologies the downstream port is enumerated early by platform
144 	 * firmware, but the RCRB (root complex register block) is not mapped
145 	 * until after the cxl_pci driver attaches to the RCIeP (root complex
146 	 * integrated endpoint).
147 	 */
148 	if (dport->rch)
149 		devm_cxl_dport_rch_ras_setup(dport);
150 
151 	devm_cxl_port_ras_setup(port);
152 	if (cxl_ras_unmask(port))
153 		dev_dbg(&port->dev, "failed to unmask RAS interrupts\n");
154 
155 	/*
156 	 * Now that all endpoint decoders are successfully enumerated, try to
157 	 * assemble regions from committed decoders
158 	 */
159 	device_for_each_child(&port->dev, NULL, discover_region);
160 
161 	return 0;
162 }
163 
164 static int cxl_port_probe(struct device *dev)
165 {
166 	struct cxl_port *port = to_cxl_port(dev);
167 
168 	if (is_cxl_endpoint(port))
169 		return cxl_endpoint_port_probe(port);
170 	return cxl_switch_port_probe(port);
171 }
172 
173 static ssize_t CDAT_read(struct file *filp, struct kobject *kobj,
174 			 const struct bin_attribute *bin_attr, char *buf,
175 			 loff_t offset, size_t count)
176 {
177 	struct device *dev = kobj_to_dev(kobj);
178 	struct cxl_port *port = to_cxl_port(dev);
179 
180 	if (!port->cdat_available)
181 		return -ENXIO;
182 
183 	if (!port->cdat.table)
184 		return 0;
185 
186 	return memory_read_from_buffer(buf, count, &offset,
187 				       port->cdat.table,
188 				       port->cdat.length);
189 }
190 
191 static const BIN_ATTR_ADMIN_RO(CDAT, 0);
192 
193 static umode_t cxl_port_bin_attr_is_visible(struct kobject *kobj,
194 					    const struct bin_attribute *attr, int i)
195 {
196 	struct device *dev = kobj_to_dev(kobj);
197 	struct cxl_port *port = to_cxl_port(dev);
198 
199 	if ((attr == &bin_attr_CDAT) && port->cdat_available)
200 		return attr->attr.mode;
201 
202 	return 0;
203 }
204 
205 static const struct bin_attribute *const cxl_cdat_bin_attributes[] = {
206 	&bin_attr_CDAT,
207 	NULL,
208 };
209 
210 static const struct attribute_group cxl_cdat_attribute_group = {
211 	.bin_attrs = cxl_cdat_bin_attributes,
212 	.is_bin_visible = cxl_port_bin_attr_is_visible,
213 };
214 
215 static const struct attribute_group *cxl_port_attribute_groups[] = {
216 	&cxl_cdat_attribute_group,
217 	NULL,
218 };
219 
220 /* note this implicitly casts the group back to its @port */
221 DEFINE_FREE(cxl_port_release_dr_group, struct cxl_port *,
222 	    if (_T) devres_release_group(&_T->dev, _T))
223 
224 static struct cxl_dport *cxl_port_add_dport(struct cxl_port *port,
225 					    struct device *dport_dev)
226 {
227 	struct cxl_dport *dport;
228 	int rc;
229 
230 	/* Temp group for all "first dport" and "per dport" setup actions */
231 	void *port_dr_group __free(cxl_port_release_dr_group) =
232 		devres_open_group(&port->dev, port, GFP_KERNEL);
233 	if (!port_dr_group)
234 		return ERR_PTR(-ENOMEM);
235 
236 	if (port->nr_dports == 0) {
237 		/*
238 		 * Some host bridges are known to not have component regsisters
239 		 * available until a root port has trained CXL. Perform that
240 		 * setup now.
241 		 */
242 		rc = cxl_port_setup_regs(port, port->component_reg_phys);
243 		if (rc)
244 			return ERR_PTR(rc);
245 
246 		rc = devm_cxl_switch_port_decoders_setup(port);
247 		if (rc)
248 			return ERR_PTR(rc);
249 
250 		/*
251 		 * RAS setup is optional, either driver operation can continue
252 		 * on failure, or the device does not implement RAS registers.
253 		 */
254 		devm_cxl_port_ras_setup(port);
255 	}
256 
257 	dport = devm_cxl_add_dport_by_dev(port, dport_dev);
258 	if (IS_ERR(dport))
259 		return dport;
260 
261 	/* This group was only needed for early exit above */
262 	devres_remove_group(&port->dev, no_free_ptr(port_dr_group));
263 
264 	cxl_switch_parse_cdat(dport);
265 
266 	/* New dport added, update the decoder targets */
267 	cxl_port_update_decoder_targets(port, dport);
268 
269 	dev_dbg(&port->dev, "dport%d:%s added\n", dport->port_id,
270 		dev_name(dport_dev));
271 
272 	return dport;
273 }
274 
275 static struct cxl_driver cxl_port_driver = {
276 	.name = "cxl_port",
277 	.probe = cxl_port_probe,
278 	.add_dport = cxl_port_add_dport,
279 	.id = CXL_DEVICE_PORT,
280 	.drv = {
281 		.probe_type = PROBE_FORCE_SYNCHRONOUS,
282 		.dev_groups = cxl_port_attribute_groups,
283 	},
284 };
285 
286 int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd,
287 			  struct cxl_dport *parent_dport)
288 {
289 	struct cxl_port *parent_port = parent_dport->port;
290 	struct cxl_port *endpoint, *iter, *down;
291 	int rc;
292 
293 	/*
294 	 * Now that the path to the root is established record all the
295 	 * intervening ports in the chain.
296 	 */
297 	for (iter = parent_port, down = NULL; !is_cxl_root(iter);
298 	     down = iter, iter = to_cxl_port(iter->dev.parent)) {
299 		struct cxl_ep *ep;
300 
301 		ep = cxl_ep_load(iter, cxlmd);
302 		ep->next = down;
303 	}
304 
305 	/* Note: endpoint port component registers are derived from @cxlds */
306 	endpoint = devm_cxl_add_port(host, &cxlmd->dev, CXL_RESOURCE_NONE,
307 				     parent_dport);
308 	if (IS_ERR(endpoint))
309 		return PTR_ERR(endpoint);
310 
311 	rc = cxl_endpoint_autoremove(cxlmd, endpoint);
312 	if (rc)
313 		return rc;
314 
315 	if (!endpoint->dev.driver) {
316 		dev_err(&cxlmd->dev, "%s failed probe\n",
317 			dev_name(&endpoint->dev));
318 		return -ENXIO;
319 	}
320 
321 	return 0;
322 }
323 EXPORT_SYMBOL_FOR_MODULES(devm_cxl_add_endpoint, "cxl_mem");
324 
325 static int __init cxl_port_init(void)
326 {
327 	return cxl_driver_register(&cxl_port_driver);
328 }
329 /*
330  * Be ready to immediately enable ports emitted by the platform CXL root
331  * (e.g. cxl_acpi) when CONFIG_CXL_PORT=y.
332  */
333 subsys_initcall(cxl_port_init);
334 
335 static void __exit cxl_port_exit(void)
336 {
337 	cxl_driver_unregister(&cxl_port_driver);
338 }
339 module_exit(cxl_port_exit);
340 
341 MODULE_DESCRIPTION("CXL: Port enumeration and services");
342 MODULE_LICENSE("GPL v2");
343 MODULE_IMPORT_NS("CXL");
344 MODULE_ALIAS_CXL(CXL_DEVICE_PORT);
345