xref: /linux/drivers/cxl/core/features.c (revision f97bdc61c76f654effa7b78e10338e64794da9fd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
3 #include <linux/fwctl.h>
4 #include <linux/device.h>
5 #include <cxl/mailbox.h>
6 #include <cxl/features.h>
7 #include <uapi/fwctl/cxl.h>
8 #include "cxl.h"
9 #include "core.h"
10 #include "cxlmem.h"
11 
12 /**
13  * DOC: cxl features
14  *
15  * CXL Features:
16  * A CXL device that includes a mailbox supports commands that allows
17  * listing, getting, and setting of optionally defined features such
18  * as memory sparing or post package sparing. Vendors may define custom
19  * features for the device.
20  */
21 
22 /* All the features below are exclusive to the kernel */
23 static const uuid_t cxl_exclusive_feats[] = {
24 	CXL_FEAT_PATROL_SCRUB_UUID,
25 	CXL_FEAT_ECS_UUID,
26 	CXL_FEAT_SPPR_UUID,
27 	CXL_FEAT_HPPR_UUID,
28 	CXL_FEAT_CACHELINE_SPARING_UUID,
29 	CXL_FEAT_ROW_SPARING_UUID,
30 	CXL_FEAT_BANK_SPARING_UUID,
31 	CXL_FEAT_RANK_SPARING_UUID,
32 };
33 
34 static bool is_cxl_feature_exclusive_by_uuid(const uuid_t *uuid)
35 {
36 	for (int i = 0; i < ARRAY_SIZE(cxl_exclusive_feats); i++) {
37 		if (uuid_equal(uuid, &cxl_exclusive_feats[i]))
38 			return true;
39 	}
40 
41 	return false;
42 }
43 
44 static bool is_cxl_feature_exclusive(struct cxl_feat_entry *entry)
45 {
46 	return is_cxl_feature_exclusive_by_uuid(&entry->uuid);
47 }
48 
49 inline struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
50 {
51 	return cxlds->cxlfs;
52 }
53 EXPORT_SYMBOL_NS_GPL(to_cxlfs, "CXL");
54 
55 static int cxl_get_supported_features_count(struct cxl_mailbox *cxl_mbox)
56 {
57 	struct cxl_mbox_get_sup_feats_out mbox_out;
58 	struct cxl_mbox_get_sup_feats_in mbox_in;
59 	struct cxl_mbox_cmd mbox_cmd;
60 	int rc;
61 
62 	memset(&mbox_in, 0, sizeof(mbox_in));
63 	mbox_in.count = cpu_to_le32(sizeof(mbox_out));
64 	memset(&mbox_out, 0, sizeof(mbox_out));
65 	mbox_cmd = (struct cxl_mbox_cmd) {
66 		.opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
67 		.size_in = sizeof(mbox_in),
68 		.payload_in = &mbox_in,
69 		.size_out = sizeof(mbox_out),
70 		.payload_out = &mbox_out,
71 		.min_out = sizeof(mbox_out),
72 	};
73 	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
74 	if (rc < 0)
75 		return rc;
76 
77 	return le16_to_cpu(mbox_out.supported_feats);
78 }
79 
80 static struct cxl_feat_entries *
81 get_supported_features(struct cxl_features_state *cxlfs)
82 {
83 	int remain_feats, max_size, max_feats, start, rc, hdr_size;
84 	struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
85 	int feat_size = sizeof(struct cxl_feat_entry);
86 	struct cxl_mbox_get_sup_feats_in mbox_in;
87 	struct cxl_feat_entry *entry;
88 	struct cxl_mbox_cmd mbox_cmd;
89 	int user_feats = 0;
90 	int count;
91 
92 	count = cxl_get_supported_features_count(cxl_mbox);
93 	if (count <= 0)
94 		return NULL;
95 
96 	struct cxl_feat_entries *entries __free(kvfree) =
97 		kvmalloc(struct_size(entries, ent, count), GFP_KERNEL);
98 	if (!entries)
99 		return NULL;
100 
101 	struct cxl_mbox_get_sup_feats_out *mbox_out __free(kvfree) =
102 		kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
103 	if (!mbox_out)
104 		return NULL;
105 
106 	hdr_size = struct_size(mbox_out, ents, 0);
107 	max_size = cxl_mbox->payload_size - hdr_size;
108 	/* max feat entries that can fit in mailbox max payload size */
109 	max_feats = max_size / feat_size;
110 	entry = entries->ent;
111 
112 	start = 0;
113 	remain_feats = count;
114 	do {
115 		int retrieved, alloc_size, copy_feats;
116 		int num_entries;
117 
118 		if (remain_feats > max_feats) {
119 			alloc_size = struct_size(mbox_out, ents, max_feats);
120 			remain_feats = remain_feats - max_feats;
121 			copy_feats = max_feats;
122 		} else {
123 			alloc_size = struct_size(mbox_out, ents, remain_feats);
124 			copy_feats = remain_feats;
125 			remain_feats = 0;
126 		}
127 
128 		memset(&mbox_in, 0, sizeof(mbox_in));
129 		mbox_in.count = cpu_to_le32(alloc_size);
130 		mbox_in.start_idx = cpu_to_le16(start);
131 		memset(mbox_out, 0, alloc_size);
132 		mbox_cmd = (struct cxl_mbox_cmd) {
133 			.opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
134 			.size_in = sizeof(mbox_in),
135 			.payload_in = &mbox_in,
136 			.size_out = alloc_size,
137 			.payload_out = mbox_out,
138 			.min_out = hdr_size,
139 		};
140 		rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
141 		if (rc < 0)
142 			return NULL;
143 
144 		if (mbox_cmd.size_out <= hdr_size)
145 			return NULL;
146 
147 		/*
148 		 * Make sure retrieved out buffer is multiple of feature
149 		 * entries.
150 		 */
151 		retrieved = mbox_cmd.size_out - hdr_size;
152 		if (retrieved % feat_size)
153 			return NULL;
154 
155 		num_entries = le16_to_cpu(mbox_out->num_entries);
156 		/*
157 		 * If the reported output entries * defined entry size !=
158 		 * retrieved output bytes, then the output package is incorrect.
159 		 */
160 		if (num_entries * feat_size != retrieved)
161 			return NULL;
162 
163 		memcpy(entry, mbox_out->ents, retrieved);
164 		for (int i = 0; i < num_entries; i++) {
165 			if (!is_cxl_feature_exclusive(entry + i))
166 				user_feats++;
167 		}
168 		entry += num_entries;
169 		/*
170 		 * If the number of output entries is less than expected, add the
171 		 * remaining entries to the next batch.
172 		 */
173 		remain_feats += copy_feats - num_entries;
174 		start += num_entries;
175 	} while (remain_feats);
176 
177 	entries->num_features = count;
178 	entries->num_user_features = user_feats;
179 
180 	return no_free_ptr(entries);
181 }
182 
183 static void free_cxlfs(void *_cxlfs)
184 {
185 	struct cxl_features_state *cxlfs = _cxlfs;
186 	struct cxl_dev_state *cxlds = cxlfs->cxlds;
187 
188 	cxlds->cxlfs = NULL;
189 	kvfree(cxlfs->entries);
190 	kfree(cxlfs);
191 }
192 
193 /**
194  * devm_cxl_setup_features() - Allocate and initialize features context
195  * @cxlds: CXL device context
196  *
197  * Return 0 on success or -errno on failure.
198  */
199 int devm_cxl_setup_features(struct cxl_dev_state *cxlds)
200 {
201 	struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
202 
203 	if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
204 		return -ENODEV;
205 
206 	struct cxl_features_state *cxlfs __free(kfree) =
207 		kzalloc(sizeof(*cxlfs), GFP_KERNEL);
208 	if (!cxlfs)
209 		return -ENOMEM;
210 
211 	cxlfs->cxlds = cxlds;
212 
213 	cxlfs->entries = get_supported_features(cxlfs);
214 	if (!cxlfs->entries)
215 		return -ENOMEM;
216 
217 	cxlds->cxlfs = cxlfs;
218 
219 	return devm_add_action_or_reset(cxlds->dev, free_cxlfs, no_free_ptr(cxlfs));
220 }
221 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_features, "CXL");
222 
223 size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
224 		       enum cxl_get_feat_selection selection,
225 		       void *feat_out, size_t feat_out_size, u16 offset,
226 		       u16 *return_code)
227 {
228 	size_t data_to_rd_size, size_out;
229 	struct cxl_mbox_get_feat_in pi;
230 	struct cxl_mbox_cmd mbox_cmd;
231 	size_t data_rcvd_size = 0;
232 	int rc;
233 
234 	if (return_code)
235 		*return_code = CXL_MBOX_CMD_RC_INPUT;
236 
237 	if (!feat_out || !feat_out_size)
238 		return 0;
239 
240 	size_out = min(feat_out_size, cxl_mbox->payload_size);
241 	uuid_copy(&pi.uuid, feat_uuid);
242 	pi.selection = selection;
243 	do {
244 		data_to_rd_size = min(feat_out_size - data_rcvd_size,
245 				      cxl_mbox->payload_size);
246 		pi.offset = cpu_to_le16(offset + data_rcvd_size);
247 		pi.count = cpu_to_le16(data_to_rd_size);
248 
249 		mbox_cmd = (struct cxl_mbox_cmd) {
250 			.opcode = CXL_MBOX_OP_GET_FEATURE,
251 			.size_in = sizeof(pi),
252 			.payload_in = &pi,
253 			.size_out = size_out,
254 			.payload_out = feat_out + data_rcvd_size,
255 			.min_out = data_to_rd_size,
256 		};
257 		rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
258 		if (rc < 0 || !mbox_cmd.size_out) {
259 			if (return_code)
260 				*return_code = mbox_cmd.return_code;
261 			return 0;
262 		}
263 		data_rcvd_size += mbox_cmd.size_out;
264 	} while (data_rcvd_size < feat_out_size);
265 
266 	if (return_code)
267 		*return_code = CXL_MBOX_CMD_RC_SUCCESS;
268 
269 	return data_rcvd_size;
270 }
271 
272 /*
273  * FEAT_DATA_MIN_PAYLOAD_SIZE - min extra number of bytes should be
274  * available in the mailbox for storing the actual feature data so that
275  * the feature data transfer would work as expected.
276  */
277 #define FEAT_DATA_MIN_PAYLOAD_SIZE 10
278 int cxl_set_feature(struct cxl_mailbox *cxl_mbox,
279 		    const uuid_t *feat_uuid, u8 feat_version,
280 		    const void *feat_data, size_t feat_data_size,
281 		    u32 feat_flag, u16 offset, u16 *return_code)
282 {
283 	size_t data_in_size, data_sent_size = 0;
284 	struct cxl_mbox_cmd mbox_cmd;
285 	size_t hdr_size;
286 
287 	if (return_code)
288 		*return_code = CXL_MBOX_CMD_RC_INPUT;
289 
290 	struct cxl_mbox_set_feat_in *pi __free(kfree) =
291 			kzalloc(cxl_mbox->payload_size, GFP_KERNEL);
292 	if (!pi)
293 		return -ENOMEM;
294 
295 	uuid_copy(&pi->uuid, feat_uuid);
296 	pi->version = feat_version;
297 	feat_flag &= ~CXL_SET_FEAT_FLAG_DATA_TRANSFER_MASK;
298 	feat_flag |= CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET;
299 	hdr_size = sizeof(pi->hdr);
300 	/*
301 	 * Check minimum mbox payload size is available for
302 	 * the feature data transfer.
303 	 */
304 	if (hdr_size + FEAT_DATA_MIN_PAYLOAD_SIZE > cxl_mbox->payload_size)
305 		return -ENOMEM;
306 
307 	if (hdr_size + feat_data_size <= cxl_mbox->payload_size) {
308 		pi->flags = cpu_to_le32(feat_flag |
309 					CXL_SET_FEAT_FLAG_FULL_DATA_TRANSFER);
310 		data_in_size = feat_data_size;
311 	} else {
312 		pi->flags = cpu_to_le32(feat_flag |
313 					CXL_SET_FEAT_FLAG_INITIATE_DATA_TRANSFER);
314 		data_in_size = cxl_mbox->payload_size - hdr_size;
315 	}
316 
317 	do {
318 		int rc;
319 
320 		pi->offset = cpu_to_le16(offset + data_sent_size);
321 		memcpy(pi->feat_data, feat_data + data_sent_size, data_in_size);
322 		mbox_cmd = (struct cxl_mbox_cmd) {
323 			.opcode = CXL_MBOX_OP_SET_FEATURE,
324 			.size_in = hdr_size + data_in_size,
325 			.payload_in = pi,
326 		};
327 		rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
328 		if (rc < 0) {
329 			if (return_code)
330 				*return_code = mbox_cmd.return_code;
331 			return rc;
332 		}
333 
334 		data_sent_size += data_in_size;
335 		if (data_sent_size >= feat_data_size) {
336 			if (return_code)
337 				*return_code = CXL_MBOX_CMD_RC_SUCCESS;
338 			return 0;
339 		}
340 
341 		if ((feat_data_size - data_sent_size) <= (cxl_mbox->payload_size - hdr_size)) {
342 			data_in_size = feat_data_size - data_sent_size;
343 			pi->flags = cpu_to_le32(feat_flag |
344 						CXL_SET_FEAT_FLAG_FINISH_DATA_TRANSFER);
345 		} else {
346 			pi->flags = cpu_to_le32(feat_flag |
347 						CXL_SET_FEAT_FLAG_CONTINUE_DATA_TRANSFER);
348 		}
349 	} while (true);
350 }
351 
352 /* FWCTL support */
353 
354 static inline struct cxl_memdev *fwctl_to_memdev(struct fwctl_device *fwctl_dev)
355 {
356 	return to_cxl_memdev(fwctl_dev->dev.parent);
357 }
358 
359 static int cxlctl_open_uctx(struct fwctl_uctx *uctx)
360 {
361 	return 0;
362 }
363 
364 static void cxlctl_close_uctx(struct fwctl_uctx *uctx)
365 {
366 }
367 
368 static struct cxl_feat_entry *
369 get_support_feature_info(struct cxl_features_state *cxlfs,
370 			 const struct fwctl_rpc_cxl *rpc_in)
371 {
372 	struct cxl_feat_entry *feat;
373 	const uuid_t *uuid;
374 
375 	if (rpc_in->op_size < sizeof(uuid))
376 		return ERR_PTR(-EINVAL);
377 
378 	uuid = &rpc_in->set_feat_in.uuid;
379 
380 	for (int i = 0; i < cxlfs->entries->num_features; i++) {
381 		feat = &cxlfs->entries->ent[i];
382 		if (uuid_equal(uuid, &feat->uuid))
383 			return feat;
384 	}
385 
386 	return ERR_PTR(-EINVAL);
387 }
388 
389 static void *cxlctl_get_supported_features(struct cxl_features_state *cxlfs,
390 					   const struct fwctl_rpc_cxl *rpc_in,
391 					   size_t *out_len)
392 {
393 	const struct cxl_mbox_get_sup_feats_in *feat_in;
394 	struct cxl_mbox_get_sup_feats_out *feat_out;
395 	struct cxl_feat_entry *pos;
396 	size_t out_size;
397 	int requested;
398 	u32 count;
399 	u16 start;
400 	int i;
401 
402 	if (rpc_in->op_size != sizeof(*feat_in))
403 		return ERR_PTR(-EINVAL);
404 
405 	feat_in = &rpc_in->get_sup_feats_in;
406 	count = le32_to_cpu(feat_in->count);
407 	start = le16_to_cpu(feat_in->start_idx);
408 	requested = count / sizeof(*pos);
409 
410 	/*
411 	 * Make sure that the total requested number of entries is not greater
412 	 * than the total number of supported features allowed for userspace.
413 	 */
414 	if (start >= cxlfs->entries->num_features)
415 		return ERR_PTR(-EINVAL);
416 
417 	requested = min_t(int, requested, cxlfs->entries->num_features - start);
418 
419 	out_size = sizeof(struct fwctl_rpc_cxl_out) +
420 		struct_size(feat_out, ents, requested);
421 
422 	struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
423 		kvzalloc(out_size, GFP_KERNEL);
424 	if (!rpc_out)
425 		return ERR_PTR(-ENOMEM);
426 
427 	rpc_out->size = struct_size(feat_out, ents, requested);
428 	feat_out = &rpc_out->get_sup_feats_out;
429 	if (requested == 0) {
430 		feat_out->num_entries = cpu_to_le16(requested);
431 		feat_out->supported_feats =
432 			cpu_to_le16(cxlfs->entries->num_features);
433 		rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
434 		*out_len = out_size;
435 		return no_free_ptr(rpc_out);
436 	}
437 
438 	for (i = start, pos = &feat_out->ents[0];
439 	     i < cxlfs->entries->num_features; i++, pos++) {
440 		if (i - start == requested)
441 			break;
442 
443 		memcpy(pos, &cxlfs->entries->ent[i], sizeof(*pos));
444 		/*
445 		 * If the feature is exclusive, set the set_feat_size to 0 to
446 		 * indicate that the feature is not changeable.
447 		 */
448 		if (is_cxl_feature_exclusive(pos)) {
449 			u32 flags;
450 
451 			pos->set_feat_size = 0;
452 			flags = le32_to_cpu(pos->flags);
453 			flags &= ~CXL_FEATURE_F_CHANGEABLE;
454 			pos->flags = cpu_to_le32(flags);
455 		}
456 	}
457 
458 	feat_out->num_entries = cpu_to_le16(requested);
459 	feat_out->supported_feats = cpu_to_le16(cxlfs->entries->num_features);
460 	rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
461 	*out_len = out_size;
462 
463 	return no_free_ptr(rpc_out);
464 }
465 
466 static void *cxlctl_get_feature(struct cxl_features_state *cxlfs,
467 				const struct fwctl_rpc_cxl *rpc_in,
468 				size_t *out_len)
469 {
470 	struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
471 	const struct cxl_mbox_get_feat_in *feat_in;
472 	u16 offset, count, return_code;
473 	size_t out_size = *out_len;
474 
475 	if (rpc_in->op_size != sizeof(*feat_in))
476 		return ERR_PTR(-EINVAL);
477 
478 	feat_in = &rpc_in->get_feat_in;
479 	offset = le16_to_cpu(feat_in->offset);
480 	count = le16_to_cpu(feat_in->count);
481 
482 	if (!count)
483 		return ERR_PTR(-EINVAL);
484 
485 	struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
486 		kvzalloc(out_size, GFP_KERNEL);
487 	if (!rpc_out)
488 		return ERR_PTR(-ENOMEM);
489 
490 	out_size = cxl_get_feature(cxl_mbox, &feat_in->uuid,
491 				   feat_in->selection, rpc_out->payload,
492 				   count, offset, &return_code);
493 	*out_len = sizeof(struct fwctl_rpc_cxl_out);
494 	if (!out_size) {
495 		rpc_out->size = 0;
496 		rpc_out->retval = return_code;
497 		return no_free_ptr(rpc_out);
498 	}
499 
500 	rpc_out->size = out_size;
501 	rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
502 	*out_len += out_size;
503 
504 	return no_free_ptr(rpc_out);
505 }
506 
507 static void *cxlctl_set_feature(struct cxl_features_state *cxlfs,
508 				const struct fwctl_rpc_cxl *rpc_in,
509 				size_t *out_len)
510 {
511 	struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
512 	const struct cxl_mbox_set_feat_in *feat_in;
513 	size_t out_size, data_size;
514 	u16 offset, return_code;
515 	u32 flags;
516 	int rc;
517 
518 	if (rpc_in->op_size <= sizeof(feat_in->hdr))
519 		return ERR_PTR(-EINVAL);
520 
521 	feat_in = &rpc_in->set_feat_in;
522 
523 	if (is_cxl_feature_exclusive_by_uuid(&feat_in->uuid))
524 		return ERR_PTR(-EPERM);
525 
526 	offset = le16_to_cpu(feat_in->offset);
527 	flags = le32_to_cpu(feat_in->flags);
528 	out_size = *out_len;
529 
530 	struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
531 		kvzalloc(out_size, GFP_KERNEL);
532 	if (!rpc_out)
533 		return ERR_PTR(-ENOMEM);
534 
535 	rpc_out->size = 0;
536 
537 	data_size = rpc_in->op_size - sizeof(feat_in->hdr);
538 	rc = cxl_set_feature(cxl_mbox, &feat_in->uuid,
539 			     feat_in->version, feat_in->feat_data,
540 			     data_size, flags, offset, &return_code);
541 	*out_len = sizeof(*rpc_out);
542 	if (rc) {
543 		rpc_out->retval = return_code;
544 		return no_free_ptr(rpc_out);
545 	}
546 
547 	rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
548 
549 	return no_free_ptr(rpc_out);
550 }
551 
552 static bool cxlctl_validate_set_features(struct cxl_features_state *cxlfs,
553 					 const struct fwctl_rpc_cxl *rpc_in,
554 					 enum fwctl_rpc_scope scope)
555 {
556 	u16 effects, imm_mask, reset_mask;
557 	struct cxl_feat_entry *feat;
558 	u32 flags;
559 
560 	feat = get_support_feature_info(cxlfs, rpc_in);
561 	if (IS_ERR(feat))
562 		return false;
563 
564 	/* Ensure that the attribute is changeable */
565 	flags = le32_to_cpu(feat->flags);
566 	if (!(flags & CXL_FEATURE_F_CHANGEABLE))
567 		return false;
568 
569 	effects = le16_to_cpu(feat->effects);
570 
571 	/*
572 	 * Reserved bits are set, rejecting since the effects is not
573 	 * comprehended by the driver.
574 	 */
575 	if (effects & CXL_CMD_EFFECTS_RESERVED) {
576 		dev_warn_once(cxlfs->cxlds->dev,
577 			      "Reserved bits set in the Feature effects field!\n");
578 		return false;
579 	}
580 
581 	/* Currently no user background command support */
582 	if (effects & CXL_CMD_BACKGROUND)
583 		return false;
584 
585 	/* Effects cause immediate change, highest security scope is needed */
586 	imm_mask = CXL_CMD_CONFIG_CHANGE_IMMEDIATE |
587 		   CXL_CMD_DATA_CHANGE_IMMEDIATE |
588 		   CXL_CMD_POLICY_CHANGE_IMMEDIATE |
589 		   CXL_CMD_LOG_CHANGE_IMMEDIATE;
590 
591 	reset_mask = CXL_CMD_CONFIG_CHANGE_COLD_RESET |
592 		     CXL_CMD_CONFIG_CHANGE_CONV_RESET |
593 		     CXL_CMD_CONFIG_CHANGE_CXL_RESET;
594 
595 	/* If no immediate or reset effect set, The hardware has a bug */
596 	if (!(effects & imm_mask) && !(effects & reset_mask))
597 		return false;
598 
599 	/*
600 	 * If the Feature setting causes immediate configuration change
601 	 * then we need the full write permission policy.
602 	 */
603 	if (effects & imm_mask && scope >= FWCTL_RPC_DEBUG_WRITE_FULL)
604 		return true;
605 
606 	/*
607 	 * If the Feature setting only causes configuration change
608 	 * after a reset, then the lesser level of write permission
609 	 * policy is ok.
610 	 */
611 	if (!(effects & imm_mask) && scope >= FWCTL_RPC_DEBUG_WRITE)
612 		return true;
613 
614 	return false;
615 }
616 
617 static bool cxlctl_validate_hw_command(struct cxl_features_state *cxlfs,
618 				       const struct fwctl_rpc_cxl *rpc_in,
619 				       enum fwctl_rpc_scope scope,
620 				       u16 opcode)
621 {
622 	struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
623 
624 	switch (opcode) {
625 	case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
626 	case CXL_MBOX_OP_GET_FEATURE:
627 		return cxl_mbox->feat_cap >= CXL_FEATURES_RO;
628 	case CXL_MBOX_OP_SET_FEATURE:
629 		if (cxl_mbox->feat_cap < CXL_FEATURES_RW)
630 			return false;
631 		return cxlctl_validate_set_features(cxlfs, rpc_in, scope);
632 	default:
633 		return false;
634 	}
635 }
636 
637 static void *cxlctl_handle_commands(struct cxl_features_state *cxlfs,
638 				    const struct fwctl_rpc_cxl *rpc_in,
639 				    size_t *out_len, u16 opcode)
640 {
641 	switch (opcode) {
642 	case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
643 		return cxlctl_get_supported_features(cxlfs, rpc_in, out_len);
644 	case CXL_MBOX_OP_GET_FEATURE:
645 		return cxlctl_get_feature(cxlfs, rpc_in, out_len);
646 	case CXL_MBOX_OP_SET_FEATURE:
647 		return cxlctl_set_feature(cxlfs, rpc_in, out_len);
648 	default:
649 		return ERR_PTR(-EOPNOTSUPP);
650 	}
651 }
652 
653 static void *cxlctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
654 			   void *in, size_t in_len, size_t *out_len)
655 {
656 	struct fwctl_device *fwctl_dev = uctx->fwctl;
657 	struct cxl_memdev *cxlmd = fwctl_to_memdev(fwctl_dev);
658 	struct cxl_features_state *cxlfs = to_cxlfs(cxlmd->cxlds);
659 	const struct fwctl_rpc_cxl *rpc_in = in;
660 	u16 opcode = rpc_in->opcode;
661 
662 	if (!cxlctl_validate_hw_command(cxlfs, rpc_in, scope, opcode))
663 		return ERR_PTR(-EINVAL);
664 
665 	return cxlctl_handle_commands(cxlfs, rpc_in, out_len, opcode);
666 }
667 
668 static const struct fwctl_ops cxlctl_ops = {
669 	.device_type = FWCTL_DEVICE_TYPE_CXL,
670 	.uctx_size = sizeof(struct fwctl_uctx),
671 	.open_uctx = cxlctl_open_uctx,
672 	.close_uctx = cxlctl_close_uctx,
673 	.fw_rpc = cxlctl_fw_rpc,
674 };
675 
676 DEFINE_FREE(free_fwctl_dev, struct fwctl_device *, if (_T) fwctl_put(_T))
677 
678 static void free_memdev_fwctl(void *_fwctl_dev)
679 {
680 	struct fwctl_device *fwctl_dev = _fwctl_dev;
681 
682 	fwctl_unregister(fwctl_dev);
683 	fwctl_put(fwctl_dev);
684 }
685 
686 int devm_cxl_setup_fwctl(struct device *host, struct cxl_memdev *cxlmd)
687 {
688 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
689 	struct cxl_features_state *cxlfs;
690 	int rc;
691 
692 	cxlfs = to_cxlfs(cxlds);
693 	if (!cxlfs)
694 		return -ENODEV;
695 
696 	/* No need to setup FWCTL if there are no user allowed features found */
697 	if (!cxlfs->entries->num_user_features)
698 		return -ENODEV;
699 
700 	struct fwctl_device *fwctl_dev __free(free_fwctl_dev) =
701 		_fwctl_alloc_device(&cxlmd->dev, &cxlctl_ops, sizeof(*fwctl_dev));
702 	if (!fwctl_dev)
703 		return -ENOMEM;
704 
705 	rc = fwctl_register(fwctl_dev);
706 	if (rc)
707 		return rc;
708 
709 	return devm_add_action_or_reset(host, free_memdev_fwctl,
710 					no_free_ptr(fwctl_dev));
711 }
712 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fwctl, "CXL");
713 
714 MODULE_IMPORT_NS("FWCTL");
715