xref: /linux/drivers/cxl/core/features.c (revision 4d1c09cef2c244bd19467c016a3e56ba28ecc59d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
3 #include <linux/fwctl.h>
4 #include <linux/device.h>
5 #include <cxl/mailbox.h>
6 #include <cxl/features.h>
7 #include <uapi/fwctl/cxl.h>
8 #include "cxl.h"
9 #include "core.h"
10 #include "cxlmem.h"
11 
12 /* All the features below are exclusive to the kernel */
13 static const uuid_t cxl_exclusive_feats[] = {
14 	CXL_FEAT_PATROL_SCRUB_UUID,
15 	CXL_FEAT_ECS_UUID,
16 	CXL_FEAT_SPPR_UUID,
17 	CXL_FEAT_HPPR_UUID,
18 	CXL_FEAT_CACHELINE_SPARING_UUID,
19 	CXL_FEAT_ROW_SPARING_UUID,
20 	CXL_FEAT_BANK_SPARING_UUID,
21 	CXL_FEAT_RANK_SPARING_UUID,
22 };
23 
24 static bool is_cxl_feature_exclusive(struct cxl_feat_entry *entry)
25 {
26 	for (int i = 0; i < ARRAY_SIZE(cxl_exclusive_feats); i++) {
27 		if (uuid_equal(&entry->uuid, &cxl_exclusive_feats[i]))
28 			return true;
29 	}
30 
31 	return false;
32 }
33 
34 inline struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
35 {
36 	return cxlds->cxlfs;
37 }
38 EXPORT_SYMBOL_NS_GPL(to_cxlfs, "CXL");
39 
40 static int cxl_get_supported_features_count(struct cxl_mailbox *cxl_mbox)
41 {
42 	struct cxl_mbox_get_sup_feats_out mbox_out;
43 	struct cxl_mbox_get_sup_feats_in mbox_in;
44 	struct cxl_mbox_cmd mbox_cmd;
45 	int rc;
46 
47 	memset(&mbox_in, 0, sizeof(mbox_in));
48 	mbox_in.count = cpu_to_le32(sizeof(mbox_out));
49 	memset(&mbox_out, 0, sizeof(mbox_out));
50 	mbox_cmd = (struct cxl_mbox_cmd) {
51 		.opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
52 		.size_in = sizeof(mbox_in),
53 		.payload_in = &mbox_in,
54 		.size_out = sizeof(mbox_out),
55 		.payload_out = &mbox_out,
56 		.min_out = sizeof(mbox_out),
57 	};
58 	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
59 	if (rc < 0)
60 		return rc;
61 
62 	return le16_to_cpu(mbox_out.supported_feats);
63 }
64 
65 static struct cxl_feat_entries *
66 get_supported_features(struct cxl_features_state *cxlfs)
67 {
68 	int remain_feats, max_size, max_feats, start, rc, hdr_size;
69 	struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
70 	int feat_size = sizeof(struct cxl_feat_entry);
71 	struct cxl_mbox_get_sup_feats_in mbox_in;
72 	struct cxl_feat_entry *entry;
73 	struct cxl_mbox_cmd mbox_cmd;
74 	int user_feats = 0;
75 	int count;
76 
77 	count = cxl_get_supported_features_count(cxl_mbox);
78 	if (count <= 0)
79 		return NULL;
80 
81 	struct cxl_feat_entries *entries __free(kvfree) =
82 		kvmalloc(struct_size(entries, ent, count), GFP_KERNEL);
83 	if (!entries)
84 		return NULL;
85 
86 	struct cxl_mbox_get_sup_feats_out *mbox_out __free(kvfree) =
87 		kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
88 	if (!mbox_out)
89 		return NULL;
90 
91 	hdr_size = struct_size(mbox_out, ents, 0);
92 	max_size = cxl_mbox->payload_size - hdr_size;
93 	/* max feat entries that can fit in mailbox max payload size */
94 	max_feats = max_size / feat_size;
95 	entry = entries->ent;
96 
97 	start = 0;
98 	remain_feats = count;
99 	do {
100 		int retrieved, alloc_size, copy_feats;
101 		int num_entries;
102 
103 		if (remain_feats > max_feats) {
104 			alloc_size = struct_size(mbox_out, ents, max_feats);
105 			remain_feats = remain_feats - max_feats;
106 			copy_feats = max_feats;
107 		} else {
108 			alloc_size = struct_size(mbox_out, ents, remain_feats);
109 			copy_feats = remain_feats;
110 			remain_feats = 0;
111 		}
112 
113 		memset(&mbox_in, 0, sizeof(mbox_in));
114 		mbox_in.count = cpu_to_le32(alloc_size);
115 		mbox_in.start_idx = cpu_to_le16(start);
116 		memset(mbox_out, 0, alloc_size);
117 		mbox_cmd = (struct cxl_mbox_cmd) {
118 			.opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
119 			.size_in = sizeof(mbox_in),
120 			.payload_in = &mbox_in,
121 			.size_out = alloc_size,
122 			.payload_out = mbox_out,
123 			.min_out = hdr_size,
124 		};
125 		rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
126 		if (rc < 0)
127 			return NULL;
128 
129 		if (mbox_cmd.size_out <= hdr_size)
130 			return NULL;
131 
132 		/*
133 		 * Make sure retrieved out buffer is multiple of feature
134 		 * entries.
135 		 */
136 		retrieved = mbox_cmd.size_out - hdr_size;
137 		if (retrieved % feat_size)
138 			return NULL;
139 
140 		num_entries = le16_to_cpu(mbox_out->num_entries);
141 		/*
142 		 * If the reported output entries * defined entry size !=
143 		 * retrieved output bytes, then the output package is incorrect.
144 		 */
145 		if (num_entries * feat_size != retrieved)
146 			return NULL;
147 
148 		memcpy(entry, mbox_out->ents, retrieved);
149 		for (int i = 0; i < num_entries; i++) {
150 			if (!is_cxl_feature_exclusive(entry + i))
151 				user_feats++;
152 		}
153 		entry += num_entries;
154 		/*
155 		 * If the number of output entries is less than expected, add the
156 		 * remaining entries to the next batch.
157 		 */
158 		remain_feats += copy_feats - num_entries;
159 		start += num_entries;
160 	} while (remain_feats);
161 
162 	entries->num_features = count;
163 	entries->num_user_features = user_feats;
164 
165 	return no_free_ptr(entries);
166 }
167 
168 static void free_cxlfs(void *_cxlfs)
169 {
170 	struct cxl_features_state *cxlfs = _cxlfs;
171 	struct cxl_dev_state *cxlds = cxlfs->cxlds;
172 
173 	cxlds->cxlfs = NULL;
174 	kvfree(cxlfs->entries);
175 	kfree(cxlfs);
176 }
177 
178 /**
179  * devm_cxl_setup_features() - Allocate and initialize features context
180  * @cxlds: CXL device context
181  *
182  * Return 0 on success or -errno on failure.
183  */
184 int devm_cxl_setup_features(struct cxl_dev_state *cxlds)
185 {
186 	struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
187 
188 	if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
189 		return -ENODEV;
190 
191 	struct cxl_features_state *cxlfs __free(kfree) =
192 		kzalloc(sizeof(*cxlfs), GFP_KERNEL);
193 	if (!cxlfs)
194 		return -ENOMEM;
195 
196 	cxlfs->cxlds = cxlds;
197 
198 	cxlfs->entries = get_supported_features(cxlfs);
199 	if (!cxlfs->entries)
200 		return -ENOMEM;
201 
202 	cxlds->cxlfs = cxlfs;
203 
204 	return devm_add_action_or_reset(cxlds->dev, free_cxlfs, no_free_ptr(cxlfs));
205 }
206 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_features, "CXL");
207 
208 size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
209 		       enum cxl_get_feat_selection selection,
210 		       void *feat_out, size_t feat_out_size, u16 offset,
211 		       u16 *return_code)
212 {
213 	size_t data_to_rd_size, size_out;
214 	struct cxl_mbox_get_feat_in pi;
215 	struct cxl_mbox_cmd mbox_cmd;
216 	size_t data_rcvd_size = 0;
217 	int rc;
218 
219 	if (return_code)
220 		*return_code = CXL_MBOX_CMD_RC_INPUT;
221 
222 	if (!feat_out || !feat_out_size)
223 		return 0;
224 
225 	size_out = min(feat_out_size, cxl_mbox->payload_size);
226 	uuid_copy(&pi.uuid, feat_uuid);
227 	pi.selection = selection;
228 	do {
229 		data_to_rd_size = min(feat_out_size - data_rcvd_size,
230 				      cxl_mbox->payload_size);
231 		pi.offset = cpu_to_le16(offset + data_rcvd_size);
232 		pi.count = cpu_to_le16(data_to_rd_size);
233 
234 		mbox_cmd = (struct cxl_mbox_cmd) {
235 			.opcode = CXL_MBOX_OP_GET_FEATURE,
236 			.size_in = sizeof(pi),
237 			.payload_in = &pi,
238 			.size_out = size_out,
239 			.payload_out = feat_out + data_rcvd_size,
240 			.min_out = data_to_rd_size,
241 		};
242 		rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
243 		if (rc < 0 || !mbox_cmd.size_out) {
244 			if (return_code)
245 				*return_code = mbox_cmd.return_code;
246 			return 0;
247 		}
248 		data_rcvd_size += mbox_cmd.size_out;
249 	} while (data_rcvd_size < feat_out_size);
250 
251 	if (return_code)
252 		*return_code = CXL_MBOX_CMD_RC_SUCCESS;
253 
254 	return data_rcvd_size;
255 }
256 
257 /*
258  * FEAT_DATA_MIN_PAYLOAD_SIZE - min extra number of bytes should be
259  * available in the mailbox for storing the actual feature data so that
260  * the feature data transfer would work as expected.
261  */
262 #define FEAT_DATA_MIN_PAYLOAD_SIZE 10
263 int cxl_set_feature(struct cxl_mailbox *cxl_mbox,
264 		    const uuid_t *feat_uuid, u8 feat_version,
265 		    const void *feat_data, size_t feat_data_size,
266 		    u32 feat_flag, u16 offset, u16 *return_code)
267 {
268 	size_t data_in_size, data_sent_size = 0;
269 	struct cxl_mbox_cmd mbox_cmd;
270 	size_t hdr_size;
271 
272 	if (return_code)
273 		*return_code = CXL_MBOX_CMD_RC_INPUT;
274 
275 	struct cxl_mbox_set_feat_in *pi __free(kfree) =
276 			kzalloc(cxl_mbox->payload_size, GFP_KERNEL);
277 	if (!pi)
278 		return -ENOMEM;
279 
280 	uuid_copy(&pi->uuid, feat_uuid);
281 	pi->version = feat_version;
282 	feat_flag &= ~CXL_SET_FEAT_FLAG_DATA_TRANSFER_MASK;
283 	feat_flag |= CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET;
284 	hdr_size = sizeof(pi->hdr);
285 	/*
286 	 * Check minimum mbox payload size is available for
287 	 * the feature data transfer.
288 	 */
289 	if (hdr_size + FEAT_DATA_MIN_PAYLOAD_SIZE > cxl_mbox->payload_size)
290 		return -ENOMEM;
291 
292 	if (hdr_size + feat_data_size <= cxl_mbox->payload_size) {
293 		pi->flags = cpu_to_le32(feat_flag |
294 					CXL_SET_FEAT_FLAG_FULL_DATA_TRANSFER);
295 		data_in_size = feat_data_size;
296 	} else {
297 		pi->flags = cpu_to_le32(feat_flag |
298 					CXL_SET_FEAT_FLAG_INITIATE_DATA_TRANSFER);
299 		data_in_size = cxl_mbox->payload_size - hdr_size;
300 	}
301 
302 	do {
303 		int rc;
304 
305 		pi->offset = cpu_to_le16(offset + data_sent_size);
306 		memcpy(pi->feat_data, feat_data + data_sent_size, data_in_size);
307 		mbox_cmd = (struct cxl_mbox_cmd) {
308 			.opcode = CXL_MBOX_OP_SET_FEATURE,
309 			.size_in = hdr_size + data_in_size,
310 			.payload_in = pi,
311 		};
312 		rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
313 		if (rc < 0) {
314 			if (return_code)
315 				*return_code = mbox_cmd.return_code;
316 			return rc;
317 		}
318 
319 		data_sent_size += data_in_size;
320 		if (data_sent_size >= feat_data_size) {
321 			if (return_code)
322 				*return_code = CXL_MBOX_CMD_RC_SUCCESS;
323 			return 0;
324 		}
325 
326 		if ((feat_data_size - data_sent_size) <= (cxl_mbox->payload_size - hdr_size)) {
327 			data_in_size = feat_data_size - data_sent_size;
328 			pi->flags = cpu_to_le32(feat_flag |
329 						CXL_SET_FEAT_FLAG_FINISH_DATA_TRANSFER);
330 		} else {
331 			pi->flags = cpu_to_le32(feat_flag |
332 						CXL_SET_FEAT_FLAG_CONTINUE_DATA_TRANSFER);
333 		}
334 	} while (true);
335 }
336 
337 /* FWCTL support */
338 
339 static inline struct cxl_memdev *fwctl_to_memdev(struct fwctl_device *fwctl_dev)
340 {
341 	return to_cxl_memdev(fwctl_dev->dev.parent);
342 }
343 
344 static int cxlctl_open_uctx(struct fwctl_uctx *uctx)
345 {
346 	return 0;
347 }
348 
349 static void cxlctl_close_uctx(struct fwctl_uctx *uctx)
350 {
351 }
352 
353 static void *cxlctl_get_supported_features(struct cxl_features_state *cxlfs,
354 					   const struct fwctl_rpc_cxl *rpc_in,
355 					   size_t *out_len)
356 {
357 	const struct cxl_mbox_get_sup_feats_in *feat_in;
358 	struct cxl_mbox_get_sup_feats_out *feat_out;
359 	struct cxl_feat_entry *pos;
360 	size_t out_size;
361 	int requested;
362 	u32 count;
363 	u16 start;
364 	int i;
365 
366 	if (rpc_in->op_size != sizeof(*feat_in))
367 		return ERR_PTR(-EINVAL);
368 
369 	feat_in = &rpc_in->get_sup_feats_in;
370 	count = le32_to_cpu(feat_in->count);
371 	start = le16_to_cpu(feat_in->start_idx);
372 	requested = count / sizeof(*pos);
373 
374 	/*
375 	 * Make sure that the total requested number of entries is not greater
376 	 * than the total number of supported features allowed for userspace.
377 	 */
378 	if (start >= cxlfs->entries->num_features)
379 		return ERR_PTR(-EINVAL);
380 
381 	requested = min_t(int, requested, cxlfs->entries->num_features - start);
382 
383 	out_size = sizeof(struct fwctl_rpc_cxl_out) +
384 		struct_size(feat_out, ents, requested);
385 
386 	struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
387 		kvzalloc(out_size, GFP_KERNEL);
388 	if (!rpc_out)
389 		return ERR_PTR(-ENOMEM);
390 
391 	rpc_out->size = struct_size(feat_out, ents, requested);
392 	feat_out = &rpc_out->get_sup_feats_out;
393 	if (requested == 0) {
394 		feat_out->num_entries = cpu_to_le16(requested);
395 		feat_out->supported_feats =
396 			cpu_to_le16(cxlfs->entries->num_features);
397 		rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
398 		*out_len = out_size;
399 		return no_free_ptr(rpc_out);
400 	}
401 
402 	for (i = start, pos = &feat_out->ents[0];
403 	     i < cxlfs->entries->num_features; i++, pos++) {
404 		if (i - start == requested)
405 			break;
406 
407 		memcpy(pos, &cxlfs->entries->ent[i], sizeof(*pos));
408 		/*
409 		 * If the feature is exclusive, set the set_feat_size to 0 to
410 		 * indicate that the feature is not changeable.
411 		 */
412 		if (is_cxl_feature_exclusive(pos)) {
413 			u32 flags;
414 
415 			pos->set_feat_size = 0;
416 			flags = le32_to_cpu(pos->flags);
417 			flags &= ~CXL_FEATURE_F_CHANGEABLE;
418 			pos->flags = cpu_to_le32(flags);
419 		}
420 	}
421 
422 	feat_out->num_entries = cpu_to_le16(requested);
423 	feat_out->supported_feats = cpu_to_le16(cxlfs->entries->num_features);
424 	rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
425 	*out_len = out_size;
426 
427 	return no_free_ptr(rpc_out);
428 }
429 
430 static bool cxlctl_validate_hw_command(struct cxl_features_state *cxlfs,
431 				       const struct fwctl_rpc_cxl *rpc_in,
432 				       enum fwctl_rpc_scope scope,
433 				       u16 opcode)
434 {
435 	struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
436 
437 	switch (opcode) {
438 	case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
439 		if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
440 			return false;
441 		if (scope >= FWCTL_RPC_CONFIGURATION)
442 			return true;
443 		return false;
444 	default:
445 		return false;
446 	}
447 }
448 
449 static void *cxlctl_handle_commands(struct cxl_features_state *cxlfs,
450 				    const struct fwctl_rpc_cxl *rpc_in,
451 				    size_t *out_len, u16 opcode)
452 {
453 	switch (opcode) {
454 	case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
455 		return cxlctl_get_supported_features(cxlfs, rpc_in, out_len);
456 	default:
457 		return ERR_PTR(-EOPNOTSUPP);
458 	}
459 }
460 
461 static void *cxlctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
462 			   void *in, size_t in_len, size_t *out_len)
463 {
464 	struct fwctl_device *fwctl_dev = uctx->fwctl;
465 	struct cxl_memdev *cxlmd = fwctl_to_memdev(fwctl_dev);
466 	struct cxl_features_state *cxlfs = to_cxlfs(cxlmd->cxlds);
467 	const struct fwctl_rpc_cxl *rpc_in = in;
468 	u16 opcode = rpc_in->opcode;
469 
470 	if (!cxlctl_validate_hw_command(cxlfs, rpc_in, scope, opcode))
471 		return ERR_PTR(-EINVAL);
472 
473 	return cxlctl_handle_commands(cxlfs, rpc_in, out_len, opcode);
474 }
475 
476 static const struct fwctl_ops cxlctl_ops = {
477 	.device_type = FWCTL_DEVICE_TYPE_CXL,
478 	.uctx_size = sizeof(struct fwctl_uctx),
479 	.open_uctx = cxlctl_open_uctx,
480 	.close_uctx = cxlctl_close_uctx,
481 	.fw_rpc = cxlctl_fw_rpc,
482 };
483 
484 DEFINE_FREE(free_fwctl_dev, struct fwctl_device *, if (_T) fwctl_put(_T))
485 
486 static void free_memdev_fwctl(void *_fwctl_dev)
487 {
488 	struct fwctl_device *fwctl_dev = _fwctl_dev;
489 
490 	fwctl_unregister(fwctl_dev);
491 	fwctl_put(fwctl_dev);
492 }
493 
494 int devm_cxl_setup_fwctl(struct cxl_memdev *cxlmd)
495 {
496 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
497 	struct cxl_features_state *cxlfs;
498 	int rc;
499 
500 	cxlfs = to_cxlfs(cxlds);
501 	if (!cxlfs)
502 		return -ENODEV;
503 
504 	/* No need to setup FWCTL if there are no user allowed features found */
505 	if (!cxlfs->entries->num_user_features)
506 		return -ENODEV;
507 
508 	struct fwctl_device *fwctl_dev __free(free_fwctl_dev) =
509 		_fwctl_alloc_device(&cxlmd->dev, &cxlctl_ops, sizeof(*fwctl_dev));
510 	if (!fwctl_dev)
511 		return -ENOMEM;
512 
513 	rc = fwctl_register(fwctl_dev);
514 	if (rc)
515 		return rc;
516 
517 	return devm_add_action_or_reset(&cxlmd->dev, free_memdev_fwctl,
518 					no_free_ptr(fwctl_dev));
519 }
520 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fwctl, "CXL");
521 
522 MODULE_IMPORT_NS("FWCTL");
523