xref: /linux/drivers/cxl/core/features.c (revision d542461211543522daecb34b7972d8ac1044bc97)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
3 #include <linux/fwctl.h>
4 #include <linux/device.h>
5 #include <cxl/mailbox.h>
6 #include <cxl/features.h>
7 #include <uapi/fwctl/cxl.h>
8 #include "cxl.h"
9 #include "core.h"
10 #include "cxlmem.h"
11 
12 /**
13  * DOC: cxl features
14  *
15  * A CXL device that includes a mailbox supports commands that allows
16  * listing, getting, and setting of optionally defined features such
17  * as memory sparing or post package sparing. Vendors may define custom
18  * features for the device.
19  */
20 
21 /* All the features below are exclusive to the kernel */
22 static const uuid_t cxl_exclusive_feats[] = {
23 	CXL_FEAT_PATROL_SCRUB_UUID,
24 	CXL_FEAT_ECS_UUID,
25 	CXL_FEAT_SPPR_UUID,
26 	CXL_FEAT_HPPR_UUID,
27 	CXL_FEAT_CACHELINE_SPARING_UUID,
28 	CXL_FEAT_ROW_SPARING_UUID,
29 	CXL_FEAT_BANK_SPARING_UUID,
30 	CXL_FEAT_RANK_SPARING_UUID,
31 };
32 
33 static bool is_cxl_feature_exclusive_by_uuid(const uuid_t *uuid)
34 {
35 	for (int i = 0; i < ARRAY_SIZE(cxl_exclusive_feats); i++) {
36 		if (uuid_equal(uuid, &cxl_exclusive_feats[i]))
37 			return true;
38 	}
39 
40 	return false;
41 }
42 
43 static bool is_cxl_feature_exclusive(struct cxl_feat_entry *entry)
44 {
45 	return is_cxl_feature_exclusive_by_uuid(&entry->uuid);
46 }
47 
48 inline struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
49 {
50 	return cxlds->cxlfs;
51 }
52 EXPORT_SYMBOL_NS_GPL(to_cxlfs, "CXL");
53 
54 static int cxl_get_supported_features_count(struct cxl_mailbox *cxl_mbox)
55 {
56 	struct cxl_mbox_get_sup_feats_out mbox_out;
57 	struct cxl_mbox_get_sup_feats_in mbox_in;
58 	struct cxl_mbox_cmd mbox_cmd;
59 	int rc;
60 
61 	memset(&mbox_in, 0, sizeof(mbox_in));
62 	mbox_in.count = cpu_to_le32(sizeof(mbox_out));
63 	memset(&mbox_out, 0, sizeof(mbox_out));
64 	mbox_cmd = (struct cxl_mbox_cmd) {
65 		.opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
66 		.size_in = sizeof(mbox_in),
67 		.payload_in = &mbox_in,
68 		.size_out = sizeof(mbox_out),
69 		.payload_out = &mbox_out,
70 		.min_out = sizeof(mbox_out),
71 	};
72 	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
73 	if (rc < 0)
74 		return rc;
75 
76 	return le16_to_cpu(mbox_out.supported_feats);
77 }
78 
79 static struct cxl_feat_entries *
80 get_supported_features(struct cxl_features_state *cxlfs)
81 {
82 	int remain_feats, max_size, max_feats, start, rc, hdr_size;
83 	struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
84 	int feat_size = sizeof(struct cxl_feat_entry);
85 	struct cxl_mbox_get_sup_feats_in mbox_in;
86 	struct cxl_feat_entry *entry;
87 	struct cxl_mbox_cmd mbox_cmd;
88 	int user_feats = 0;
89 	int count;
90 
91 	count = cxl_get_supported_features_count(cxl_mbox);
92 	if (count <= 0)
93 		return NULL;
94 
95 	struct cxl_feat_entries *entries __free(kvfree) =
96 		kvmalloc(struct_size(entries, ent, count), GFP_KERNEL);
97 	if (!entries)
98 		return NULL;
99 
100 	struct cxl_mbox_get_sup_feats_out *mbox_out __free(kvfree) =
101 		kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
102 	if (!mbox_out)
103 		return NULL;
104 
105 	hdr_size = struct_size(mbox_out, ents, 0);
106 	max_size = cxl_mbox->payload_size - hdr_size;
107 	/* max feat entries that can fit in mailbox max payload size */
108 	max_feats = max_size / feat_size;
109 	entry = entries->ent;
110 
111 	start = 0;
112 	remain_feats = count;
113 	do {
114 		int retrieved, alloc_size, copy_feats;
115 		int num_entries;
116 
117 		if (remain_feats > max_feats) {
118 			alloc_size = struct_size(mbox_out, ents, max_feats);
119 			remain_feats = remain_feats - max_feats;
120 			copy_feats = max_feats;
121 		} else {
122 			alloc_size = struct_size(mbox_out, ents, remain_feats);
123 			copy_feats = remain_feats;
124 			remain_feats = 0;
125 		}
126 
127 		memset(&mbox_in, 0, sizeof(mbox_in));
128 		mbox_in.count = cpu_to_le32(alloc_size);
129 		mbox_in.start_idx = cpu_to_le16(start);
130 		memset(mbox_out, 0, alloc_size);
131 		mbox_cmd = (struct cxl_mbox_cmd) {
132 			.opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
133 			.size_in = sizeof(mbox_in),
134 			.payload_in = &mbox_in,
135 			.size_out = alloc_size,
136 			.payload_out = mbox_out,
137 			.min_out = hdr_size,
138 		};
139 		rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
140 		if (rc < 0)
141 			return NULL;
142 
143 		if (mbox_cmd.size_out <= hdr_size)
144 			return NULL;
145 
146 		/*
147 		 * Make sure retrieved out buffer is multiple of feature
148 		 * entries.
149 		 */
150 		retrieved = mbox_cmd.size_out - hdr_size;
151 		if (retrieved % feat_size)
152 			return NULL;
153 
154 		num_entries = le16_to_cpu(mbox_out->num_entries);
155 		/*
156 		 * If the reported output entries * defined entry size !=
157 		 * retrieved output bytes, then the output package is incorrect.
158 		 */
159 		if (num_entries * feat_size != retrieved)
160 			return NULL;
161 
162 		memcpy(entry, mbox_out->ents, retrieved);
163 		for (int i = 0; i < num_entries; i++) {
164 			if (!is_cxl_feature_exclusive(entry + i))
165 				user_feats++;
166 		}
167 		entry += num_entries;
168 		/*
169 		 * If the number of output entries is less than expected, add the
170 		 * remaining entries to the next batch.
171 		 */
172 		remain_feats += copy_feats - num_entries;
173 		start += num_entries;
174 	} while (remain_feats);
175 
176 	entries->num_features = count;
177 	entries->num_user_features = user_feats;
178 
179 	return no_free_ptr(entries);
180 }
181 
182 static void free_cxlfs(void *_cxlfs)
183 {
184 	struct cxl_features_state *cxlfs = _cxlfs;
185 	struct cxl_dev_state *cxlds = cxlfs->cxlds;
186 
187 	cxlds->cxlfs = NULL;
188 	kvfree(cxlfs->entries);
189 	kfree(cxlfs);
190 }
191 
192 /**
193  * devm_cxl_setup_features() - Allocate and initialize features context
194  * @cxlds: CXL device context
195  *
196  * Return 0 on success or -errno on failure.
197  */
198 int devm_cxl_setup_features(struct cxl_dev_state *cxlds)
199 {
200 	struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
201 
202 	if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
203 		return -ENODEV;
204 
205 	struct cxl_features_state *cxlfs __free(kfree) =
206 		kzalloc(sizeof(*cxlfs), GFP_KERNEL);
207 	if (!cxlfs)
208 		return -ENOMEM;
209 
210 	cxlfs->cxlds = cxlds;
211 
212 	cxlfs->entries = get_supported_features(cxlfs);
213 	if (!cxlfs->entries)
214 		return -ENOMEM;
215 
216 	cxlds->cxlfs = cxlfs;
217 
218 	return devm_add_action_or_reset(cxlds->dev, free_cxlfs, no_free_ptr(cxlfs));
219 }
220 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_features, "CXL");
221 
222 size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
223 		       enum cxl_get_feat_selection selection,
224 		       void *feat_out, size_t feat_out_size, u16 offset,
225 		       u16 *return_code)
226 {
227 	size_t data_to_rd_size, size_out;
228 	struct cxl_mbox_get_feat_in pi;
229 	struct cxl_mbox_cmd mbox_cmd;
230 	size_t data_rcvd_size = 0;
231 	int rc;
232 
233 	if (return_code)
234 		*return_code = CXL_MBOX_CMD_RC_INPUT;
235 
236 	if (!feat_out || !feat_out_size)
237 		return 0;
238 
239 	size_out = min(feat_out_size, cxl_mbox->payload_size);
240 	uuid_copy(&pi.uuid, feat_uuid);
241 	pi.selection = selection;
242 	do {
243 		data_to_rd_size = min(feat_out_size - data_rcvd_size,
244 				      cxl_mbox->payload_size);
245 		pi.offset = cpu_to_le16(offset + data_rcvd_size);
246 		pi.count = cpu_to_le16(data_to_rd_size);
247 
248 		mbox_cmd = (struct cxl_mbox_cmd) {
249 			.opcode = CXL_MBOX_OP_GET_FEATURE,
250 			.size_in = sizeof(pi),
251 			.payload_in = &pi,
252 			.size_out = size_out,
253 			.payload_out = feat_out + data_rcvd_size,
254 			.min_out = data_to_rd_size,
255 		};
256 		rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
257 		if (rc < 0 || !mbox_cmd.size_out) {
258 			if (return_code)
259 				*return_code = mbox_cmd.return_code;
260 			return 0;
261 		}
262 		data_rcvd_size += mbox_cmd.size_out;
263 	} while (data_rcvd_size < feat_out_size);
264 
265 	if (return_code)
266 		*return_code = CXL_MBOX_CMD_RC_SUCCESS;
267 
268 	return data_rcvd_size;
269 }
270 
271 /*
272  * FEAT_DATA_MIN_PAYLOAD_SIZE - min extra number of bytes should be
273  * available in the mailbox for storing the actual feature data so that
274  * the feature data transfer would work as expected.
275  */
276 #define FEAT_DATA_MIN_PAYLOAD_SIZE 10
277 int cxl_set_feature(struct cxl_mailbox *cxl_mbox,
278 		    const uuid_t *feat_uuid, u8 feat_version,
279 		    const void *feat_data, size_t feat_data_size,
280 		    u32 feat_flag, u16 offset, u16 *return_code)
281 {
282 	size_t data_in_size, data_sent_size = 0;
283 	struct cxl_mbox_cmd mbox_cmd;
284 	size_t hdr_size;
285 
286 	if (return_code)
287 		*return_code = CXL_MBOX_CMD_RC_INPUT;
288 
289 	struct cxl_mbox_set_feat_in *pi __free(kfree) =
290 			kzalloc(cxl_mbox->payload_size, GFP_KERNEL);
291 	if (!pi)
292 		return -ENOMEM;
293 
294 	uuid_copy(&pi->uuid, feat_uuid);
295 	pi->version = feat_version;
296 	feat_flag &= ~CXL_SET_FEAT_FLAG_DATA_TRANSFER_MASK;
297 	feat_flag |= CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET;
298 	hdr_size = sizeof(pi->hdr);
299 	/*
300 	 * Check minimum mbox payload size is available for
301 	 * the feature data transfer.
302 	 */
303 	if (hdr_size + FEAT_DATA_MIN_PAYLOAD_SIZE > cxl_mbox->payload_size)
304 		return -ENOMEM;
305 
306 	if (hdr_size + feat_data_size <= cxl_mbox->payload_size) {
307 		pi->flags = cpu_to_le32(feat_flag |
308 					CXL_SET_FEAT_FLAG_FULL_DATA_TRANSFER);
309 		data_in_size = feat_data_size;
310 	} else {
311 		pi->flags = cpu_to_le32(feat_flag |
312 					CXL_SET_FEAT_FLAG_INITIATE_DATA_TRANSFER);
313 		data_in_size = cxl_mbox->payload_size - hdr_size;
314 	}
315 
316 	do {
317 		int rc;
318 
319 		pi->offset = cpu_to_le16(offset + data_sent_size);
320 		memcpy(pi->feat_data, feat_data + data_sent_size, data_in_size);
321 		mbox_cmd = (struct cxl_mbox_cmd) {
322 			.opcode = CXL_MBOX_OP_SET_FEATURE,
323 			.size_in = hdr_size + data_in_size,
324 			.payload_in = pi,
325 		};
326 		rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
327 		if (rc < 0) {
328 			if (return_code)
329 				*return_code = mbox_cmd.return_code;
330 			return rc;
331 		}
332 
333 		data_sent_size += data_in_size;
334 		if (data_sent_size >= feat_data_size) {
335 			if (return_code)
336 				*return_code = CXL_MBOX_CMD_RC_SUCCESS;
337 			return 0;
338 		}
339 
340 		if ((feat_data_size - data_sent_size) <= (cxl_mbox->payload_size - hdr_size)) {
341 			data_in_size = feat_data_size - data_sent_size;
342 			pi->flags = cpu_to_le32(feat_flag |
343 						CXL_SET_FEAT_FLAG_FINISH_DATA_TRANSFER);
344 		} else {
345 			pi->flags = cpu_to_le32(feat_flag |
346 						CXL_SET_FEAT_FLAG_CONTINUE_DATA_TRANSFER);
347 		}
348 	} while (true);
349 }
350 
351 /* FWCTL support */
352 
353 static inline struct cxl_memdev *fwctl_to_memdev(struct fwctl_device *fwctl_dev)
354 {
355 	return to_cxl_memdev(fwctl_dev->dev.parent);
356 }
357 
358 static int cxlctl_open_uctx(struct fwctl_uctx *uctx)
359 {
360 	return 0;
361 }
362 
363 static void cxlctl_close_uctx(struct fwctl_uctx *uctx)
364 {
365 }
366 
367 static struct cxl_feat_entry *
368 get_support_feature_info(struct cxl_features_state *cxlfs,
369 			 const struct fwctl_rpc_cxl *rpc_in)
370 {
371 	struct cxl_feat_entry *feat;
372 	const uuid_t *uuid;
373 
374 	if (rpc_in->op_size < sizeof(uuid))
375 		return ERR_PTR(-EINVAL);
376 
377 	uuid = &rpc_in->set_feat_in.uuid;
378 
379 	for (int i = 0; i < cxlfs->entries->num_features; i++) {
380 		feat = &cxlfs->entries->ent[i];
381 		if (uuid_equal(uuid, &feat->uuid))
382 			return feat;
383 	}
384 
385 	return ERR_PTR(-EINVAL);
386 }
387 
388 static void *cxlctl_get_supported_features(struct cxl_features_state *cxlfs,
389 					   const struct fwctl_rpc_cxl *rpc_in,
390 					   size_t *out_len)
391 {
392 	const struct cxl_mbox_get_sup_feats_in *feat_in;
393 	struct cxl_mbox_get_sup_feats_out *feat_out;
394 	struct cxl_feat_entry *pos;
395 	size_t out_size;
396 	int requested;
397 	u32 count;
398 	u16 start;
399 	int i;
400 
401 	if (rpc_in->op_size != sizeof(*feat_in))
402 		return ERR_PTR(-EINVAL);
403 
404 	feat_in = &rpc_in->get_sup_feats_in;
405 	count = le32_to_cpu(feat_in->count);
406 	start = le16_to_cpu(feat_in->start_idx);
407 	requested = count / sizeof(*pos);
408 
409 	/*
410 	 * Make sure that the total requested number of entries is not greater
411 	 * than the total number of supported features allowed for userspace.
412 	 */
413 	if (start >= cxlfs->entries->num_features)
414 		return ERR_PTR(-EINVAL);
415 
416 	requested = min_t(int, requested, cxlfs->entries->num_features - start);
417 
418 	out_size = sizeof(struct fwctl_rpc_cxl_out) +
419 		struct_size(feat_out, ents, requested);
420 
421 	struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
422 		kvzalloc(out_size, GFP_KERNEL);
423 	if (!rpc_out)
424 		return ERR_PTR(-ENOMEM);
425 
426 	rpc_out->size = struct_size(feat_out, ents, requested);
427 	feat_out = &rpc_out->get_sup_feats_out;
428 	if (requested == 0) {
429 		feat_out->num_entries = cpu_to_le16(requested);
430 		feat_out->supported_feats =
431 			cpu_to_le16(cxlfs->entries->num_features);
432 		rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
433 		*out_len = out_size;
434 		return no_free_ptr(rpc_out);
435 	}
436 
437 	for (i = start, pos = &feat_out->ents[0];
438 	     i < cxlfs->entries->num_features; i++, pos++) {
439 		if (i - start == requested)
440 			break;
441 
442 		memcpy(pos, &cxlfs->entries->ent[i], sizeof(*pos));
443 		/*
444 		 * If the feature is exclusive, set the set_feat_size to 0 to
445 		 * indicate that the feature is not changeable.
446 		 */
447 		if (is_cxl_feature_exclusive(pos)) {
448 			u32 flags;
449 
450 			pos->set_feat_size = 0;
451 			flags = le32_to_cpu(pos->flags);
452 			flags &= ~CXL_FEATURE_F_CHANGEABLE;
453 			pos->flags = cpu_to_le32(flags);
454 		}
455 	}
456 
457 	feat_out->num_entries = cpu_to_le16(requested);
458 	feat_out->supported_feats = cpu_to_le16(cxlfs->entries->num_features);
459 	rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
460 	*out_len = out_size;
461 
462 	return no_free_ptr(rpc_out);
463 }
464 
465 static void *cxlctl_get_feature(struct cxl_features_state *cxlfs,
466 				const struct fwctl_rpc_cxl *rpc_in,
467 				size_t *out_len)
468 {
469 	struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
470 	const struct cxl_mbox_get_feat_in *feat_in;
471 	u16 offset, count, return_code;
472 	size_t out_size = *out_len;
473 
474 	if (rpc_in->op_size != sizeof(*feat_in))
475 		return ERR_PTR(-EINVAL);
476 
477 	feat_in = &rpc_in->get_feat_in;
478 	offset = le16_to_cpu(feat_in->offset);
479 	count = le16_to_cpu(feat_in->count);
480 
481 	if (!count)
482 		return ERR_PTR(-EINVAL);
483 
484 	struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
485 		kvzalloc(out_size, GFP_KERNEL);
486 	if (!rpc_out)
487 		return ERR_PTR(-ENOMEM);
488 
489 	out_size = cxl_get_feature(cxl_mbox, &feat_in->uuid,
490 				   feat_in->selection, rpc_out->payload,
491 				   count, offset, &return_code);
492 	*out_len = sizeof(struct fwctl_rpc_cxl_out);
493 	if (!out_size) {
494 		rpc_out->size = 0;
495 		rpc_out->retval = return_code;
496 		return no_free_ptr(rpc_out);
497 	}
498 
499 	rpc_out->size = out_size;
500 	rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
501 	*out_len += out_size;
502 
503 	return no_free_ptr(rpc_out);
504 }
505 
506 static void *cxlctl_set_feature(struct cxl_features_state *cxlfs,
507 				const struct fwctl_rpc_cxl *rpc_in,
508 				size_t *out_len)
509 {
510 	struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
511 	const struct cxl_mbox_set_feat_in *feat_in;
512 	size_t out_size, data_size;
513 	u16 offset, return_code;
514 	u32 flags;
515 	int rc;
516 
517 	if (rpc_in->op_size <= sizeof(feat_in->hdr))
518 		return ERR_PTR(-EINVAL);
519 
520 	feat_in = &rpc_in->set_feat_in;
521 
522 	if (is_cxl_feature_exclusive_by_uuid(&feat_in->uuid))
523 		return ERR_PTR(-EPERM);
524 
525 	offset = le16_to_cpu(feat_in->offset);
526 	flags = le32_to_cpu(feat_in->flags);
527 	out_size = *out_len;
528 
529 	struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
530 		kvzalloc(out_size, GFP_KERNEL);
531 	if (!rpc_out)
532 		return ERR_PTR(-ENOMEM);
533 
534 	rpc_out->size = 0;
535 
536 	data_size = rpc_in->op_size - sizeof(feat_in->hdr);
537 	rc = cxl_set_feature(cxl_mbox, &feat_in->uuid,
538 			     feat_in->version, feat_in->feat_data,
539 			     data_size, flags, offset, &return_code);
540 	*out_len = sizeof(*rpc_out);
541 	if (rc) {
542 		rpc_out->retval = return_code;
543 		return no_free_ptr(rpc_out);
544 	}
545 
546 	rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
547 
548 	return no_free_ptr(rpc_out);
549 }
550 
551 static bool cxlctl_validate_set_features(struct cxl_features_state *cxlfs,
552 					 const struct fwctl_rpc_cxl *rpc_in,
553 					 enum fwctl_rpc_scope scope)
554 {
555 	u16 effects, imm_mask, reset_mask;
556 	struct cxl_feat_entry *feat;
557 	u32 flags;
558 
559 	feat = get_support_feature_info(cxlfs, rpc_in);
560 	if (IS_ERR(feat))
561 		return false;
562 
563 	/* Ensure that the attribute is changeable */
564 	flags = le32_to_cpu(feat->flags);
565 	if (!(flags & CXL_FEATURE_F_CHANGEABLE))
566 		return false;
567 
568 	effects = le16_to_cpu(feat->effects);
569 
570 	/*
571 	 * Reserved bits are set, rejecting since the effects is not
572 	 * comprehended by the driver.
573 	 */
574 	if (effects & CXL_CMD_EFFECTS_RESERVED) {
575 		dev_warn_once(cxlfs->cxlds->dev,
576 			      "Reserved bits set in the Feature effects field!\n");
577 		return false;
578 	}
579 
580 	/* Currently no user background command support */
581 	if (effects & CXL_CMD_BACKGROUND)
582 		return false;
583 
584 	/* Effects cause immediate change, highest security scope is needed */
585 	imm_mask = CXL_CMD_CONFIG_CHANGE_IMMEDIATE |
586 		   CXL_CMD_DATA_CHANGE_IMMEDIATE |
587 		   CXL_CMD_POLICY_CHANGE_IMMEDIATE |
588 		   CXL_CMD_LOG_CHANGE_IMMEDIATE;
589 
590 	reset_mask = CXL_CMD_CONFIG_CHANGE_COLD_RESET |
591 		     CXL_CMD_CONFIG_CHANGE_CONV_RESET |
592 		     CXL_CMD_CONFIG_CHANGE_CXL_RESET;
593 
594 	/* If no immediate or reset effect set, The hardware has a bug */
595 	if (!(effects & imm_mask) && !(effects & reset_mask))
596 		return false;
597 
598 	/*
599 	 * If the Feature setting causes immediate configuration change
600 	 * then we need the full write permission policy.
601 	 */
602 	if (effects & imm_mask && scope >= FWCTL_RPC_DEBUG_WRITE_FULL)
603 		return true;
604 
605 	/*
606 	 * If the Feature setting only causes configuration change
607 	 * after a reset, then the lesser level of write permission
608 	 * policy is ok.
609 	 */
610 	if (!(effects & imm_mask) && scope >= FWCTL_RPC_DEBUG_WRITE)
611 		return true;
612 
613 	return false;
614 }
615 
616 static bool cxlctl_validate_hw_command(struct cxl_features_state *cxlfs,
617 				       const struct fwctl_rpc_cxl *rpc_in,
618 				       enum fwctl_rpc_scope scope,
619 				       u16 opcode)
620 {
621 	struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
622 
623 	switch (opcode) {
624 	case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
625 	case CXL_MBOX_OP_GET_FEATURE:
626 		return cxl_mbox->feat_cap >= CXL_FEATURES_RO;
627 	case CXL_MBOX_OP_SET_FEATURE:
628 		if (cxl_mbox->feat_cap < CXL_FEATURES_RW)
629 			return false;
630 		return cxlctl_validate_set_features(cxlfs, rpc_in, scope);
631 	default:
632 		return false;
633 	}
634 }
635 
636 static void *cxlctl_handle_commands(struct cxl_features_state *cxlfs,
637 				    const struct fwctl_rpc_cxl *rpc_in,
638 				    size_t *out_len, u16 opcode)
639 {
640 	switch (opcode) {
641 	case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
642 		return cxlctl_get_supported_features(cxlfs, rpc_in, out_len);
643 	case CXL_MBOX_OP_GET_FEATURE:
644 		return cxlctl_get_feature(cxlfs, rpc_in, out_len);
645 	case CXL_MBOX_OP_SET_FEATURE:
646 		return cxlctl_set_feature(cxlfs, rpc_in, out_len);
647 	default:
648 		return ERR_PTR(-EOPNOTSUPP);
649 	}
650 }
651 
652 static void *cxlctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
653 			   void *in, size_t in_len, size_t *out_len)
654 {
655 	struct fwctl_device *fwctl_dev = uctx->fwctl;
656 	struct cxl_memdev *cxlmd = fwctl_to_memdev(fwctl_dev);
657 	struct cxl_features_state *cxlfs = to_cxlfs(cxlmd->cxlds);
658 	const struct fwctl_rpc_cxl *rpc_in = in;
659 	u16 opcode = rpc_in->opcode;
660 
661 	if (!cxlctl_validate_hw_command(cxlfs, rpc_in, scope, opcode))
662 		return ERR_PTR(-EINVAL);
663 
664 	return cxlctl_handle_commands(cxlfs, rpc_in, out_len, opcode);
665 }
666 
667 static const struct fwctl_ops cxlctl_ops = {
668 	.device_type = FWCTL_DEVICE_TYPE_CXL,
669 	.uctx_size = sizeof(struct fwctl_uctx),
670 	.open_uctx = cxlctl_open_uctx,
671 	.close_uctx = cxlctl_close_uctx,
672 	.fw_rpc = cxlctl_fw_rpc,
673 };
674 
675 DEFINE_FREE(free_fwctl_dev, struct fwctl_device *, if (_T) fwctl_put(_T))
676 
677 static void free_memdev_fwctl(void *_fwctl_dev)
678 {
679 	struct fwctl_device *fwctl_dev = _fwctl_dev;
680 
681 	fwctl_unregister(fwctl_dev);
682 	fwctl_put(fwctl_dev);
683 }
684 
685 int devm_cxl_setup_fwctl(struct device *host, struct cxl_memdev *cxlmd)
686 {
687 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
688 	struct cxl_features_state *cxlfs;
689 	int rc;
690 
691 	cxlfs = to_cxlfs(cxlds);
692 	if (!cxlfs)
693 		return -ENODEV;
694 
695 	/* No need to setup FWCTL if there are no user allowed features found */
696 	if (!cxlfs->entries->num_user_features)
697 		return -ENODEV;
698 
699 	struct fwctl_device *fwctl_dev __free(free_fwctl_dev) =
700 		_fwctl_alloc_device(&cxlmd->dev, &cxlctl_ops, sizeof(*fwctl_dev));
701 	if (!fwctl_dev)
702 		return -ENOMEM;
703 
704 	rc = fwctl_register(fwctl_dev);
705 	if (rc)
706 		return rc;
707 
708 	return devm_add_action_or_reset(host, free_memdev_fwctl,
709 					no_free_ptr(fwctl_dev));
710 }
711 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fwctl, "CXL");
712 
713 MODULE_IMPORT_NS("FWCTL");
714