xref: /linux/drivers/cxl/core/features.c (revision 588ca944c27729c7f950d1f44c6d6700a919969a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
3 #include <linux/fwctl.h>
4 #include <linux/device.h>
5 #include <cxl/mailbox.h>
6 #include <cxl/features.h>
7 #include <uapi/fwctl/cxl.h>
8 #include "cxl.h"
9 #include "core.h"
10 #include "cxlmem.h"
11 
12 /* All the features below are exclusive to the kernel */
13 static const uuid_t cxl_exclusive_feats[] = {
14 	CXL_FEAT_PATROL_SCRUB_UUID,
15 	CXL_FEAT_ECS_UUID,
16 	CXL_FEAT_SPPR_UUID,
17 	CXL_FEAT_HPPR_UUID,
18 	CXL_FEAT_CACHELINE_SPARING_UUID,
19 	CXL_FEAT_ROW_SPARING_UUID,
20 	CXL_FEAT_BANK_SPARING_UUID,
21 	CXL_FEAT_RANK_SPARING_UUID,
22 };
23 
24 static bool is_cxl_feature_exclusive_by_uuid(const uuid_t *uuid)
25 {
26 	for (int i = 0; i < ARRAY_SIZE(cxl_exclusive_feats); i++) {
27 		if (uuid_equal(uuid, &cxl_exclusive_feats[i]))
28 			return true;
29 	}
30 
31 	return false;
32 }
33 
34 static bool is_cxl_feature_exclusive(struct cxl_feat_entry *entry)
35 {
36 	return is_cxl_feature_exclusive_by_uuid(&entry->uuid);
37 }
38 
39 struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
40 {
41 	return cxlds->cxlfs;
42 }
43 EXPORT_SYMBOL_NS_GPL(to_cxlfs, "CXL");
44 
45 static int cxl_get_supported_features_count(struct cxl_mailbox *cxl_mbox)
46 {
47 	struct cxl_mbox_get_sup_feats_out mbox_out;
48 	struct cxl_mbox_get_sup_feats_in mbox_in;
49 	struct cxl_mbox_cmd mbox_cmd;
50 	int rc;
51 
52 	memset(&mbox_in, 0, sizeof(mbox_in));
53 	mbox_in.count = cpu_to_le32(sizeof(mbox_out));
54 	memset(&mbox_out, 0, sizeof(mbox_out));
55 	mbox_cmd = (struct cxl_mbox_cmd) {
56 		.opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
57 		.size_in = sizeof(mbox_in),
58 		.payload_in = &mbox_in,
59 		.size_out = sizeof(mbox_out),
60 		.payload_out = &mbox_out,
61 		.min_out = sizeof(mbox_out),
62 	};
63 	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
64 	if (rc < 0)
65 		return rc;
66 
67 	return le16_to_cpu(mbox_out.supported_feats);
68 }
69 
70 static struct cxl_feat_entries *
71 get_supported_features(struct cxl_features_state *cxlfs)
72 {
73 	int remain_feats, max_size, max_feats, start, rc, hdr_size;
74 	struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
75 	int feat_size = sizeof(struct cxl_feat_entry);
76 	struct cxl_mbox_get_sup_feats_in mbox_in;
77 	struct cxl_feat_entry *entry;
78 	struct cxl_mbox_cmd mbox_cmd;
79 	int user_feats = 0;
80 	int count;
81 
82 	count = cxl_get_supported_features_count(cxl_mbox);
83 	if (count <= 0)
84 		return NULL;
85 
86 	struct cxl_feat_entries *entries __free(kvfree) =
87 		kvmalloc(struct_size(entries, ent, count), GFP_KERNEL);
88 	if (!entries)
89 		return NULL;
90 
91 	struct cxl_mbox_get_sup_feats_out *mbox_out __free(kvfree) =
92 		kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
93 	if (!mbox_out)
94 		return NULL;
95 
96 	hdr_size = struct_size(mbox_out, ents, 0);
97 	max_size = cxl_mbox->payload_size - hdr_size;
98 	/* max feat entries that can fit in mailbox max payload size */
99 	max_feats = max_size / feat_size;
100 	entry = entries->ent;
101 
102 	start = 0;
103 	remain_feats = count;
104 	do {
105 		int retrieved, alloc_size, copy_feats;
106 		int num_entries;
107 
108 		if (remain_feats > max_feats) {
109 			alloc_size = struct_size(mbox_out, ents, max_feats);
110 			remain_feats = remain_feats - max_feats;
111 			copy_feats = max_feats;
112 		} else {
113 			alloc_size = struct_size(mbox_out, ents, remain_feats);
114 			copy_feats = remain_feats;
115 			remain_feats = 0;
116 		}
117 
118 		memset(&mbox_in, 0, sizeof(mbox_in));
119 		mbox_in.count = cpu_to_le32(alloc_size);
120 		mbox_in.start_idx = cpu_to_le16(start);
121 		memset(mbox_out, 0, alloc_size);
122 		mbox_cmd = (struct cxl_mbox_cmd) {
123 			.opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
124 			.size_in = sizeof(mbox_in),
125 			.payload_in = &mbox_in,
126 			.size_out = alloc_size,
127 			.payload_out = mbox_out,
128 			.min_out = hdr_size,
129 		};
130 		rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
131 		if (rc < 0)
132 			return NULL;
133 
134 		if (mbox_cmd.size_out <= hdr_size)
135 			return NULL;
136 
137 		/*
138 		 * Make sure retrieved out buffer is multiple of feature
139 		 * entries.
140 		 */
141 		retrieved = mbox_cmd.size_out - hdr_size;
142 		if (retrieved % feat_size)
143 			return NULL;
144 
145 		num_entries = le16_to_cpu(mbox_out->num_entries);
146 		/*
147 		 * If the reported output entries * defined entry size !=
148 		 * retrieved output bytes, then the output package is incorrect.
149 		 */
150 		if (num_entries * feat_size != retrieved)
151 			return NULL;
152 
153 		memcpy(entry, mbox_out->ents, retrieved);
154 		for (int i = 0; i < num_entries; i++) {
155 			if (!is_cxl_feature_exclusive(entry + i))
156 				user_feats++;
157 		}
158 		entry += num_entries;
159 		/*
160 		 * If the number of output entries is less than expected, add the
161 		 * remaining entries to the next batch.
162 		 */
163 		remain_feats += copy_feats - num_entries;
164 		start += num_entries;
165 	} while (remain_feats);
166 
167 	entries->num_features = count;
168 	entries->num_user_features = user_feats;
169 
170 	return no_free_ptr(entries);
171 }
172 
173 static void free_cxlfs(void *_cxlfs)
174 {
175 	struct cxl_features_state *cxlfs = _cxlfs;
176 	struct cxl_dev_state *cxlds = cxlfs->cxlds;
177 
178 	cxlds->cxlfs = NULL;
179 	kvfree(cxlfs->entries);
180 	kfree(cxlfs);
181 }
182 
183 /**
184  * devm_cxl_setup_features() - Allocate and initialize features context
185  * @cxlds: CXL device context
186  *
187  * Return 0 on success or -errno on failure.
188  */
189 int devm_cxl_setup_features(struct cxl_dev_state *cxlds)
190 {
191 	struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
192 
193 	if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
194 		return -ENODEV;
195 
196 	struct cxl_features_state *cxlfs __free(kfree) =
197 		kzalloc(sizeof(*cxlfs), GFP_KERNEL);
198 	if (!cxlfs)
199 		return -ENOMEM;
200 
201 	cxlfs->cxlds = cxlds;
202 
203 	cxlfs->entries = get_supported_features(cxlfs);
204 	if (!cxlfs->entries)
205 		return -ENOMEM;
206 
207 	cxlds->cxlfs = cxlfs;
208 
209 	return devm_add_action_or_reset(cxlds->dev, free_cxlfs, no_free_ptr(cxlfs));
210 }
211 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_features, "CXL");
212 
213 size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
214 		       enum cxl_get_feat_selection selection,
215 		       void *feat_out, size_t feat_out_size, u16 offset,
216 		       u16 *return_code)
217 {
218 	size_t data_to_rd_size, size_out;
219 	struct cxl_mbox_get_feat_in pi;
220 	struct cxl_mbox_cmd mbox_cmd;
221 	size_t data_rcvd_size = 0;
222 	int rc;
223 
224 	if (return_code)
225 		*return_code = CXL_MBOX_CMD_RC_INPUT;
226 
227 	if (!feat_out || !feat_out_size)
228 		return 0;
229 
230 	size_out = min(feat_out_size, cxl_mbox->payload_size);
231 	uuid_copy(&pi.uuid, feat_uuid);
232 	pi.selection = selection;
233 	do {
234 		data_to_rd_size = min(feat_out_size - data_rcvd_size,
235 				      cxl_mbox->payload_size);
236 		pi.offset = cpu_to_le16(offset + data_rcvd_size);
237 		pi.count = cpu_to_le16(data_to_rd_size);
238 
239 		mbox_cmd = (struct cxl_mbox_cmd) {
240 			.opcode = CXL_MBOX_OP_GET_FEATURE,
241 			.size_in = sizeof(pi),
242 			.payload_in = &pi,
243 			.size_out = size_out,
244 			.payload_out = feat_out + data_rcvd_size,
245 			.min_out = data_to_rd_size,
246 		};
247 		rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
248 		if (rc < 0 || !mbox_cmd.size_out) {
249 			if (return_code)
250 				*return_code = mbox_cmd.return_code;
251 			return 0;
252 		}
253 		data_rcvd_size += mbox_cmd.size_out;
254 	} while (data_rcvd_size < feat_out_size);
255 
256 	if (return_code)
257 		*return_code = CXL_MBOX_CMD_RC_SUCCESS;
258 
259 	return data_rcvd_size;
260 }
261 
262 /*
263  * FEAT_DATA_MIN_PAYLOAD_SIZE - min extra number of bytes should be
264  * available in the mailbox for storing the actual feature data so that
265  * the feature data transfer would work as expected.
266  */
267 #define FEAT_DATA_MIN_PAYLOAD_SIZE 10
268 int cxl_set_feature(struct cxl_mailbox *cxl_mbox,
269 		    const uuid_t *feat_uuid, u8 feat_version,
270 		    const void *feat_data, size_t feat_data_size,
271 		    u32 feat_flag, u16 offset, u16 *return_code)
272 {
273 	size_t data_in_size, data_sent_size = 0;
274 	struct cxl_mbox_cmd mbox_cmd;
275 	size_t hdr_size;
276 
277 	if (return_code)
278 		*return_code = CXL_MBOX_CMD_RC_INPUT;
279 
280 	struct cxl_mbox_set_feat_in *pi __free(kfree) =
281 			kzalloc(cxl_mbox->payload_size, GFP_KERNEL);
282 	if (!pi)
283 		return -ENOMEM;
284 
285 	uuid_copy(&pi->uuid, feat_uuid);
286 	pi->version = feat_version;
287 	feat_flag &= ~CXL_SET_FEAT_FLAG_DATA_TRANSFER_MASK;
288 	feat_flag |= CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET;
289 	hdr_size = sizeof(pi->hdr);
290 	/*
291 	 * Check minimum mbox payload size is available for
292 	 * the feature data transfer.
293 	 */
294 	if (hdr_size + FEAT_DATA_MIN_PAYLOAD_SIZE > cxl_mbox->payload_size)
295 		return -ENOMEM;
296 
297 	if (hdr_size + feat_data_size <= cxl_mbox->payload_size) {
298 		pi->flags = cpu_to_le32(feat_flag |
299 					CXL_SET_FEAT_FLAG_FULL_DATA_TRANSFER);
300 		data_in_size = feat_data_size;
301 	} else {
302 		pi->flags = cpu_to_le32(feat_flag |
303 					CXL_SET_FEAT_FLAG_INITIATE_DATA_TRANSFER);
304 		data_in_size = cxl_mbox->payload_size - hdr_size;
305 	}
306 
307 	do {
308 		int rc;
309 
310 		pi->offset = cpu_to_le16(offset + data_sent_size);
311 		memcpy(pi->feat_data, feat_data + data_sent_size, data_in_size);
312 		mbox_cmd = (struct cxl_mbox_cmd) {
313 			.opcode = CXL_MBOX_OP_SET_FEATURE,
314 			.size_in = hdr_size + data_in_size,
315 			.payload_in = pi,
316 		};
317 		rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
318 		if (rc < 0) {
319 			if (return_code)
320 				*return_code = mbox_cmd.return_code;
321 			return rc;
322 		}
323 
324 		data_sent_size += data_in_size;
325 		if (data_sent_size >= feat_data_size) {
326 			if (return_code)
327 				*return_code = CXL_MBOX_CMD_RC_SUCCESS;
328 			return 0;
329 		}
330 
331 		if ((feat_data_size - data_sent_size) <= (cxl_mbox->payload_size - hdr_size)) {
332 			data_in_size = feat_data_size - data_sent_size;
333 			pi->flags = cpu_to_le32(feat_flag |
334 						CXL_SET_FEAT_FLAG_FINISH_DATA_TRANSFER);
335 		} else {
336 			pi->flags = cpu_to_le32(feat_flag |
337 						CXL_SET_FEAT_FLAG_CONTINUE_DATA_TRANSFER);
338 		}
339 	} while (true);
340 }
341 
342 /* FWCTL support */
343 
344 static inline struct cxl_memdev *fwctl_to_memdev(struct fwctl_device *fwctl_dev)
345 {
346 	return to_cxl_memdev(fwctl_dev->dev.parent);
347 }
348 
349 static int cxlctl_open_uctx(struct fwctl_uctx *uctx)
350 {
351 	return 0;
352 }
353 
354 static void cxlctl_close_uctx(struct fwctl_uctx *uctx)
355 {
356 }
357 
358 struct cxl_feat_entry *
359 cxl_feature_info(struct cxl_features_state *cxlfs,
360 		 const uuid_t *uuid)
361 {
362 	struct cxl_feat_entry *feat;
363 
364 	for (int i = 0; i < cxlfs->entries->num_features; i++) {
365 		feat = &cxlfs->entries->ent[i];
366 		if (uuid_equal(uuid, &feat->uuid))
367 			return feat;
368 	}
369 
370 	return ERR_PTR(-EINVAL);
371 }
372 
373 static void *cxlctl_get_supported_features(struct cxl_features_state *cxlfs,
374 					   const struct fwctl_rpc_cxl *rpc_in,
375 					   size_t *out_len)
376 {
377 	const struct cxl_mbox_get_sup_feats_in *feat_in;
378 	struct cxl_mbox_get_sup_feats_out *feat_out;
379 	struct cxl_feat_entry *pos;
380 	size_t out_size;
381 	int requested;
382 	u32 count;
383 	u16 start;
384 	int i;
385 
386 	if (rpc_in->op_size != sizeof(*feat_in))
387 		return ERR_PTR(-EINVAL);
388 
389 	feat_in = &rpc_in->get_sup_feats_in;
390 	count = le32_to_cpu(feat_in->count);
391 	start = le16_to_cpu(feat_in->start_idx);
392 	requested = count / sizeof(*pos);
393 
394 	/*
395 	 * Make sure that the total requested number of entries is not greater
396 	 * than the total number of supported features allowed for userspace.
397 	 */
398 	if (start >= cxlfs->entries->num_features)
399 		return ERR_PTR(-EINVAL);
400 
401 	requested = min_t(int, requested, cxlfs->entries->num_features - start);
402 
403 	out_size = sizeof(struct fwctl_rpc_cxl_out) +
404 		struct_size(feat_out, ents, requested);
405 
406 	struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
407 		kvzalloc(out_size, GFP_KERNEL);
408 	if (!rpc_out)
409 		return ERR_PTR(-ENOMEM);
410 
411 	rpc_out->size = struct_size(feat_out, ents, requested);
412 	feat_out = &rpc_out->get_sup_feats_out;
413 	if (requested == 0) {
414 		feat_out->num_entries = cpu_to_le16(requested);
415 		feat_out->supported_feats =
416 			cpu_to_le16(cxlfs->entries->num_features);
417 		rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
418 		*out_len = out_size;
419 		return no_free_ptr(rpc_out);
420 	}
421 
422 	for (i = start, pos = &feat_out->ents[0];
423 	     i < cxlfs->entries->num_features; i++, pos++) {
424 		if (i - start == requested)
425 			break;
426 
427 		memcpy(pos, &cxlfs->entries->ent[i], sizeof(*pos));
428 		/*
429 		 * If the feature is exclusive, set the set_feat_size to 0 to
430 		 * indicate that the feature is not changeable.
431 		 */
432 		if (is_cxl_feature_exclusive(pos)) {
433 			u32 flags;
434 
435 			pos->set_feat_size = 0;
436 			flags = le32_to_cpu(pos->flags);
437 			flags &= ~CXL_FEATURE_F_CHANGEABLE;
438 			pos->flags = cpu_to_le32(flags);
439 		}
440 	}
441 
442 	feat_out->num_entries = cpu_to_le16(requested);
443 	feat_out->supported_feats = cpu_to_le16(cxlfs->entries->num_features);
444 	rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
445 	*out_len = out_size;
446 
447 	return no_free_ptr(rpc_out);
448 }
449 
450 static void *cxlctl_get_feature(struct cxl_features_state *cxlfs,
451 				const struct fwctl_rpc_cxl *rpc_in,
452 				size_t *out_len)
453 {
454 	struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
455 	const struct cxl_mbox_get_feat_in *feat_in;
456 	u16 offset, count, return_code;
457 	size_t out_size = *out_len;
458 
459 	if (rpc_in->op_size != sizeof(*feat_in))
460 		return ERR_PTR(-EINVAL);
461 
462 	feat_in = &rpc_in->get_feat_in;
463 	offset = le16_to_cpu(feat_in->offset);
464 	count = le16_to_cpu(feat_in->count);
465 
466 	if (!count)
467 		return ERR_PTR(-EINVAL);
468 
469 	struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
470 		kvzalloc(out_size, GFP_KERNEL);
471 	if (!rpc_out)
472 		return ERR_PTR(-ENOMEM);
473 
474 	out_size = cxl_get_feature(cxl_mbox, &feat_in->uuid,
475 				   feat_in->selection, rpc_out->payload,
476 				   count, offset, &return_code);
477 	*out_len = sizeof(struct fwctl_rpc_cxl_out);
478 	if (!out_size) {
479 		rpc_out->size = 0;
480 		rpc_out->retval = return_code;
481 		return no_free_ptr(rpc_out);
482 	}
483 
484 	rpc_out->size = out_size;
485 	rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
486 	*out_len += out_size;
487 
488 	return no_free_ptr(rpc_out);
489 }
490 
491 static void *cxlctl_set_feature(struct cxl_features_state *cxlfs,
492 				const struct fwctl_rpc_cxl *rpc_in,
493 				size_t *out_len)
494 {
495 	struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
496 	const struct cxl_mbox_set_feat_in *feat_in;
497 	size_t out_size, data_size;
498 	u16 offset, return_code;
499 	u32 flags;
500 	int rc;
501 
502 	if (rpc_in->op_size <= sizeof(feat_in->hdr))
503 		return ERR_PTR(-EINVAL);
504 
505 	feat_in = &rpc_in->set_feat_in;
506 
507 	if (is_cxl_feature_exclusive_by_uuid(&feat_in->uuid))
508 		return ERR_PTR(-EPERM);
509 
510 	offset = le16_to_cpu(feat_in->offset);
511 	flags = le32_to_cpu(feat_in->flags);
512 	out_size = *out_len;
513 
514 	struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
515 		kvzalloc(out_size, GFP_KERNEL);
516 	if (!rpc_out)
517 		return ERR_PTR(-ENOMEM);
518 
519 	rpc_out->size = 0;
520 
521 	data_size = rpc_in->op_size - sizeof(feat_in->hdr);
522 	rc = cxl_set_feature(cxl_mbox, &feat_in->uuid,
523 			     feat_in->version, feat_in->feat_data,
524 			     data_size, flags, offset, &return_code);
525 	*out_len = sizeof(*rpc_out);
526 	if (rc) {
527 		rpc_out->retval = return_code;
528 		return no_free_ptr(rpc_out);
529 	}
530 
531 	rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
532 
533 	return no_free_ptr(rpc_out);
534 }
535 
536 static bool cxlctl_validate_set_features(struct cxl_features_state *cxlfs,
537 					 const struct fwctl_rpc_cxl *rpc_in,
538 					 enum fwctl_rpc_scope scope)
539 {
540 	u16 effects, imm_mask, reset_mask;
541 	struct cxl_feat_entry *feat;
542 	u32 flags;
543 
544 	if (rpc_in->op_size < sizeof(uuid_t))
545 		return ERR_PTR(-EINVAL);
546 
547 	feat = cxl_feature_info(cxlfs, &rpc_in->set_feat_in.uuid);
548 	if (IS_ERR(feat))
549 		return false;
550 
551 	/* Ensure that the attribute is changeable */
552 	flags = le32_to_cpu(feat->flags);
553 	if (!(flags & CXL_FEATURE_F_CHANGEABLE))
554 		return false;
555 
556 	effects = le16_to_cpu(feat->effects);
557 
558 	/*
559 	 * Reserved bits are set, rejecting since the effects is not
560 	 * comprehended by the driver.
561 	 */
562 	if (effects & CXL_CMD_EFFECTS_RESERVED) {
563 		dev_warn_once(cxlfs->cxlds->dev,
564 			      "Reserved bits set in the Feature effects field!\n");
565 		return false;
566 	}
567 
568 	/* Currently no user background command support */
569 	if (effects & CXL_CMD_BACKGROUND)
570 		return false;
571 
572 	/* Effects cause immediate change, highest security scope is needed */
573 	imm_mask = CXL_CMD_CONFIG_CHANGE_IMMEDIATE |
574 		   CXL_CMD_DATA_CHANGE_IMMEDIATE |
575 		   CXL_CMD_POLICY_CHANGE_IMMEDIATE |
576 		   CXL_CMD_LOG_CHANGE_IMMEDIATE;
577 
578 	reset_mask = CXL_CMD_CONFIG_CHANGE_COLD_RESET |
579 		     CXL_CMD_CONFIG_CHANGE_CONV_RESET |
580 		     CXL_CMD_CONFIG_CHANGE_CXL_RESET;
581 
582 	/* If no immediate or reset effect set, The hardware has a bug */
583 	if (!(effects & imm_mask) && !(effects & reset_mask))
584 		return false;
585 
586 	/*
587 	 * If the Feature setting causes immediate configuration change
588 	 * then we need the full write permission policy.
589 	 */
590 	if (effects & imm_mask && scope >= FWCTL_RPC_DEBUG_WRITE_FULL)
591 		return true;
592 
593 	/*
594 	 * If the Feature setting only causes configuration change
595 	 * after a reset, then the lesser level of write permission
596 	 * policy is ok.
597 	 */
598 	if (!(effects & imm_mask) && scope >= FWCTL_RPC_DEBUG_WRITE)
599 		return true;
600 
601 	return false;
602 }
603 
604 static bool cxlctl_validate_hw_command(struct cxl_features_state *cxlfs,
605 				       const struct fwctl_rpc_cxl *rpc_in,
606 				       enum fwctl_rpc_scope scope,
607 				       u16 opcode)
608 {
609 	struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
610 
611 	switch (opcode) {
612 	case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
613 	case CXL_MBOX_OP_GET_FEATURE:
614 		if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
615 			return false;
616 		if (scope >= FWCTL_RPC_CONFIGURATION)
617 			return true;
618 		return false;
619 	case CXL_MBOX_OP_SET_FEATURE:
620 		if (cxl_mbox->feat_cap < CXL_FEATURES_RW)
621 			return false;
622 		return cxlctl_validate_set_features(cxlfs, rpc_in, scope);
623 	default:
624 		return false;
625 	}
626 }
627 
628 static void *cxlctl_handle_commands(struct cxl_features_state *cxlfs,
629 				    const struct fwctl_rpc_cxl *rpc_in,
630 				    size_t *out_len, u16 opcode)
631 {
632 	switch (opcode) {
633 	case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
634 		return cxlctl_get_supported_features(cxlfs, rpc_in, out_len);
635 	case CXL_MBOX_OP_GET_FEATURE:
636 		return cxlctl_get_feature(cxlfs, rpc_in, out_len);
637 	case CXL_MBOX_OP_SET_FEATURE:
638 		return cxlctl_set_feature(cxlfs, rpc_in, out_len);
639 	default:
640 		return ERR_PTR(-EOPNOTSUPP);
641 	}
642 }
643 
644 static void *cxlctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
645 			   void *in, size_t in_len, size_t *out_len)
646 {
647 	struct fwctl_device *fwctl_dev = uctx->fwctl;
648 	struct cxl_memdev *cxlmd = fwctl_to_memdev(fwctl_dev);
649 	struct cxl_features_state *cxlfs = to_cxlfs(cxlmd->cxlds);
650 	const struct fwctl_rpc_cxl *rpc_in = in;
651 	u16 opcode = rpc_in->opcode;
652 
653 	if (!cxlctl_validate_hw_command(cxlfs, rpc_in, scope, opcode))
654 		return ERR_PTR(-EINVAL);
655 
656 	return cxlctl_handle_commands(cxlfs, rpc_in, out_len, opcode);
657 }
658 
659 static const struct fwctl_ops cxlctl_ops = {
660 	.device_type = FWCTL_DEVICE_TYPE_CXL,
661 	.uctx_size = sizeof(struct fwctl_uctx),
662 	.open_uctx = cxlctl_open_uctx,
663 	.close_uctx = cxlctl_close_uctx,
664 	.fw_rpc = cxlctl_fw_rpc,
665 };
666 
667 DEFINE_FREE(free_fwctl_dev, struct fwctl_device *, if (_T) fwctl_put(_T))
668 
669 static void free_memdev_fwctl(void *_fwctl_dev)
670 {
671 	struct fwctl_device *fwctl_dev = _fwctl_dev;
672 
673 	fwctl_unregister(fwctl_dev);
674 	fwctl_put(fwctl_dev);
675 }
676 
677 int devm_cxl_setup_fwctl(struct device *host, struct cxl_memdev *cxlmd)
678 {
679 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
680 	struct cxl_features_state *cxlfs;
681 	int rc;
682 
683 	cxlfs = to_cxlfs(cxlds);
684 	if (!cxlfs)
685 		return -ENODEV;
686 
687 	/* No need to setup FWCTL if there are no user allowed features found */
688 	if (!cxlfs->entries->num_user_features)
689 		return -ENODEV;
690 
691 	struct fwctl_device *fwctl_dev __free(free_fwctl_dev) =
692 		_fwctl_alloc_device(&cxlmd->dev, &cxlctl_ops, sizeof(*fwctl_dev));
693 	if (!fwctl_dev)
694 		return -ENOMEM;
695 
696 	rc = fwctl_register(fwctl_dev);
697 	if (rc)
698 		return rc;
699 
700 	return devm_add_action_or_reset(host, free_memdev_fwctl,
701 					no_free_ptr(fwctl_dev));
702 }
703 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fwctl, "CXL");
704 
705 MODULE_IMPORT_NS("FWCTL");
706