xref: /linux/drivers/cxl/core/hdm.c (revision 3f0a50f345f78183f6e9b39c2f45ca5dcaa511ca)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/io-64-nonatomic-hi-lo.h>
4 #include <linux/device.h>
5 #include <linux/delay.h>
6 
7 #include "cxlmem.h"
8 #include "core.h"
9 
10 /**
11  * DOC: cxl core hdm
12  *
13  * Compute Express Link Host Managed Device Memory, starting with the
14  * CXL 2.0 specification, is managed by an array of HDM Decoder register
15  * instances per CXL port and per CXL endpoint. Define common helpers
16  * for enumerating these registers and capabilities.
17  */
18 
19 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
20 			   int *target_map)
21 {
22 	int rc;
23 
24 	rc = cxl_decoder_add_locked(cxld, target_map);
25 	if (rc) {
26 		put_device(&cxld->dev);
27 		dev_err(&port->dev, "Failed to add decoder\n");
28 		return rc;
29 	}
30 
31 	rc = cxl_decoder_autoremove(&port->dev, cxld);
32 	if (rc)
33 		return rc;
34 
35 	dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
36 
37 	return 0;
38 }
39 
40 /*
41  * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
42  * single ported host-bridges need not publish a decoder capability when a
43  * passthrough decode can be assumed, i.e. all transactions that the uport sees
44  * are claimed and passed to the single dport. Disable the range until the first
45  * CXL region is enumerated / activated.
46  */
47 int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
48 {
49 	struct cxl_decoder *cxld;
50 	struct cxl_dport *dport;
51 	int single_port_map[1];
52 
53 	cxld = cxl_switch_decoder_alloc(port, 1);
54 	if (IS_ERR(cxld))
55 		return PTR_ERR(cxld);
56 
57 	device_lock_assert(&port->dev);
58 
59 	dport = list_first_entry(&port->dports, typeof(*dport), list);
60 	single_port_map[0] = dport->port_id;
61 
62 	return add_hdm_decoder(port, cxld, single_port_map);
63 }
64 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
65 
66 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
67 {
68 	u32 hdm_cap;
69 
70 	hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
71 	cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
72 	cxlhdm->target_count =
73 		FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
74 	if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
75 		cxlhdm->interleave_mask |= GENMASK(11, 8);
76 	if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
77 		cxlhdm->interleave_mask |= GENMASK(14, 12);
78 }
79 
80 static void __iomem *map_hdm_decoder_regs(struct cxl_port *port,
81 					  void __iomem *crb)
82 {
83 	struct cxl_component_reg_map map;
84 
85 	cxl_probe_component_regs(&port->dev, crb, &map);
86 	if (!map.hdm_decoder.valid) {
87 		dev_err(&port->dev, "HDM decoder registers invalid\n");
88 		return IOMEM_ERR_PTR(-ENXIO);
89 	}
90 
91 	return crb + map.hdm_decoder.offset;
92 }
93 
94 /**
95  * devm_cxl_setup_hdm - map HDM decoder component registers
96  * @port: cxl_port to map
97  */
98 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
99 {
100 	struct device *dev = &port->dev;
101 	void __iomem *crb, *hdm;
102 	struct cxl_hdm *cxlhdm;
103 
104 	cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
105 	if (!cxlhdm)
106 		return ERR_PTR(-ENOMEM);
107 
108 	cxlhdm->port = port;
109 	crb = devm_cxl_iomap_block(dev, port->component_reg_phys,
110 				   CXL_COMPONENT_REG_BLOCK_SIZE);
111 	if (!crb) {
112 		dev_err(dev, "No component registers mapped\n");
113 		return ERR_PTR(-ENXIO);
114 	}
115 
116 	hdm = map_hdm_decoder_regs(port, crb);
117 	if (IS_ERR(hdm))
118 		return ERR_CAST(hdm);
119 	cxlhdm->regs.hdm_decoder = hdm;
120 
121 	parse_hdm_decoder_caps(cxlhdm);
122 	if (cxlhdm->decoder_count == 0) {
123 		dev_err(dev, "Spec violation. Caps invalid\n");
124 		return ERR_PTR(-ENXIO);
125 	}
126 
127 	return cxlhdm;
128 }
129 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
130 
131 static int to_interleave_granularity(u32 ctrl)
132 {
133 	int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl);
134 
135 	return 256 << val;
136 }
137 
138 static int to_interleave_ways(u32 ctrl)
139 {
140 	int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl);
141 
142 	switch (val) {
143 	case 0 ... 4:
144 		return 1 << val;
145 	case 8 ... 10:
146 		return 3 << (val - 8);
147 	default:
148 		return 0;
149 	}
150 }
151 
152 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
153 			    int *target_map, void __iomem *hdm, int which)
154 {
155 	u64 size, base;
156 	u32 ctrl;
157 	int i;
158 	union {
159 		u64 value;
160 		unsigned char target_id[8];
161 	} target_list;
162 
163 	ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
164 	base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
165 	size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
166 
167 	if (!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED))
168 		size = 0;
169 	if (base == U64_MAX || size == U64_MAX) {
170 		dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
171 			 port->id, cxld->id);
172 		return -ENXIO;
173 	}
174 
175 	cxld->decoder_range = (struct range) {
176 		.start = base,
177 		.end = base + size - 1,
178 	};
179 
180 	/* switch decoders are always enabled if committed */
181 	if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED) {
182 		cxld->flags |= CXL_DECODER_F_ENABLE;
183 		if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
184 			cxld->flags |= CXL_DECODER_F_LOCK;
185 	}
186 	cxld->interleave_ways = to_interleave_ways(ctrl);
187 	if (!cxld->interleave_ways) {
188 		dev_warn(&port->dev,
189 			 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
190 			 port->id, cxld->id, ctrl);
191 		return -ENXIO;
192 	}
193 	cxld->interleave_granularity = to_interleave_granularity(ctrl);
194 
195 	if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
196 		cxld->target_type = CXL_DECODER_EXPANDER;
197 	else
198 		cxld->target_type = CXL_DECODER_ACCELERATOR;
199 
200 	if (is_cxl_endpoint(to_cxl_port(cxld->dev.parent)))
201 		return 0;
202 
203 	target_list.value =
204 		ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
205 	for (i = 0; i < cxld->interleave_ways; i++)
206 		target_map[i] = target_list.target_id[i];
207 
208 	return 0;
209 }
210 
211 /**
212  * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
213  * @cxlhdm: Structure to populate with HDM capabilities
214  */
215 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
216 {
217 	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
218 	struct cxl_port *port = cxlhdm->port;
219 	int i, committed, failed;
220 	u32 ctrl;
221 
222 	/*
223 	 * Since the register resource was recently claimed via request_region()
224 	 * be careful about trusting the "not-committed" status until the commit
225 	 * timeout has elapsed.  The commit timeout is 10ms (CXL 2.0
226 	 * 8.2.5.12.20), but double it to be tolerant of any clock skew between
227 	 * host and target.
228 	 */
229 	for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
230 		ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
231 		if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
232 			committed++;
233 	}
234 
235 	/* ensure that future checks of committed can be trusted */
236 	if (committed != cxlhdm->decoder_count)
237 		msleep(20);
238 
239 	for (i = 0, failed = 0; i < cxlhdm->decoder_count; i++) {
240 		int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
241 		int rc, target_count = cxlhdm->target_count;
242 		struct cxl_decoder *cxld;
243 
244 		if (is_cxl_endpoint(port))
245 			cxld = cxl_endpoint_decoder_alloc(port);
246 		else
247 			cxld = cxl_switch_decoder_alloc(port, target_count);
248 		if (IS_ERR(cxld)) {
249 			dev_warn(&port->dev,
250 				 "Failed to allocate the decoder\n");
251 			return PTR_ERR(cxld);
252 		}
253 
254 		rc = init_hdm_decoder(port, cxld, target_map,
255 				      cxlhdm->regs.hdm_decoder, i);
256 		if (rc) {
257 			put_device(&cxld->dev);
258 			failed++;
259 			continue;
260 		}
261 		rc = add_hdm_decoder(port, cxld, target_map);
262 		if (rc) {
263 			dev_warn(&port->dev,
264 				 "Failed to add decoder to port\n");
265 			return rc;
266 		}
267 	}
268 
269 	if (failed == cxlhdm->decoder_count) {
270 		dev_err(&port->dev, "No valid decoders found\n");
271 		return -ENXIO;
272 	}
273 
274 	return 0;
275 }
276 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);
277