xref: /linux/drivers/mailbox/riscv-sbi-mpxy-mbox.c (revision e3966940559d52aa1800a008dcfeec218dd31f88)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * RISC-V SBI Message Proxy (MPXY) mailbox controller driver
4  *
5  * Copyright (C) 2025 Ventana Micro Systems Inc.
6  */
7 
8 #include <linux/acpi.h>
9 #include <linux/cpu.h>
10 #include <linux/errno.h>
11 #include <linux/init.h>
12 #include <linux/irqchip/riscv-imsic.h>
13 #include <linux/mailbox_controller.h>
14 #include <linux/mailbox/riscv-rpmi-message.h>
15 #include <linux/minmax.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/msi.h>
19 #include <linux/of_irq.h>
20 #include <linux/percpu.h>
21 #include <linux/platform_device.h>
22 #include <linux/smp.h>
23 #include <linux/string.h>
24 #include <linux/types.h>
25 #include <asm/byteorder.h>
26 #include <asm/sbi.h>
27 
28 /* ====== SBI MPXY extension data structures ====== */
29 
30 /* SBI MPXY MSI related channel attributes */
31 struct sbi_mpxy_msi_info {
32 	/* Lower 32-bits of the MSI target address */
33 	u32 msi_addr_lo;
34 	/* Upper 32-bits of the MSI target address */
35 	u32 msi_addr_hi;
36 	/* MSI data value */
37 	u32 msi_data;
38 };
39 
40 /*
41  * SBI MPXY standard channel attributes.
42  *
43  * NOTE: The sequence of attribute fields are as-per the
44  * defined sequence in the attribute table in spec (or
45  * as-per the enum sbi_mpxy_attribute_id).
46  */
47 struct sbi_mpxy_channel_attrs {
48 	/* Message protocol ID */
49 	u32 msg_proto_id;
50 	/* Message protocol version */
51 	u32 msg_proto_version;
52 	/* Message protocol maximum message length */
53 	u32 msg_max_len;
54 	/* Message protocol message send timeout in microseconds */
55 	u32 msg_send_timeout;
56 	/* Message protocol message completion timeout in microseconds */
57 	u32 msg_completion_timeout;
58 	/* Bit array for channel capabilities */
59 	u32 capability;
60 	/* SSE event ID */
61 	u32 sse_event_id;
62 	/* MSI enable/disable control knob */
63 	u32 msi_control;
64 	/* Channel MSI info */
65 	struct sbi_mpxy_msi_info msi_info;
66 	/* Events state control */
67 	u32 events_state_ctrl;
68 };
69 
70 /*
71  * RPMI specific SBI MPXY channel attributes.
72  *
73  * NOTE: The sequence of attribute fields are as-per the
74  * defined sequence in the attribute table in spec (or
75  * as-per the enum sbi_mpxy_rpmi_attribute_id).
76  */
77 struct sbi_mpxy_rpmi_channel_attrs {
78 	/* RPMI service group ID */
79 	u32 servicegroup_id;
80 	/* RPMI service group version */
81 	u32 servicegroup_version;
82 	/* RPMI implementation ID */
83 	u32 impl_id;
84 	/* RPMI implementation version */
85 	u32 impl_version;
86 };
87 
88 /* SBI MPXY channel IDs data in shared memory */
89 struct sbi_mpxy_channel_ids_data {
90 	/* Remaining number of channel ids */
91 	__le32 remaining;
92 	/* Returned channel ids in current function call */
93 	__le32 returned;
94 	/* Returned channel id array */
95 	__le32 channel_array[];
96 };
97 
98 /* SBI MPXY notification data in shared memory */
99 struct sbi_mpxy_notification_data {
100 	/* Remaining number of notification events */
101 	__le32 remaining;
102 	/* Number of notification events returned */
103 	__le32 returned;
104 	/* Number of notification events lost */
105 	__le32 lost;
106 	/* Reserved for future use */
107 	__le32 reserved;
108 	/* Returned channel id array */
109 	u8 events_data[];
110 };
111 
112 /* ====== MPXY data structures & helper routines ====== */
113 
114 /* MPXY Per-CPU or local context */
115 struct mpxy_local {
116 	/* Shared memory base address */
117 	void *shmem;
118 	/* Shared memory physical address */
119 	phys_addr_t shmem_phys_addr;
120 	/* Flag representing whether shared memory is active or not */
121 	bool shmem_active;
122 };
123 
124 static DEFINE_PER_CPU(struct mpxy_local, mpxy_local);
125 static unsigned long mpxy_shmem_size;
126 static bool mpxy_shmem_init_done;
127 
128 static int mpxy_get_channel_count(u32 *channel_count)
129 {
130 	struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
131 	struct sbi_mpxy_channel_ids_data *sdata = mpxy->shmem;
132 	u32 remaining, returned;
133 	struct sbiret sret;
134 
135 	if (!mpxy->shmem_active)
136 		return -ENODEV;
137 	if (!channel_count)
138 		return -EINVAL;
139 
140 	get_cpu();
141 
142 	/* Get the remaining and returned fields to calculate total */
143 	sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_CHANNEL_IDS,
144 			 0, 0, 0, 0, 0, 0);
145 	if (sret.error)
146 		goto err_put_cpu;
147 
148 	remaining = le32_to_cpu(sdata->remaining);
149 	returned = le32_to_cpu(sdata->returned);
150 	*channel_count = remaining + returned;
151 
152 err_put_cpu:
153 	put_cpu();
154 	return sbi_err_map_linux_errno(sret.error);
155 }
156 
157 static int mpxy_get_channel_ids(u32 channel_count, u32 *channel_ids)
158 {
159 	struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
160 	struct sbi_mpxy_channel_ids_data *sdata = mpxy->shmem;
161 	u32 remaining, returned, count, start_index = 0;
162 	struct sbiret sret;
163 
164 	if (!mpxy->shmem_active)
165 		return -ENODEV;
166 	if (!channel_count || !channel_ids)
167 		return -EINVAL;
168 
169 	get_cpu();
170 
171 	do {
172 		sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_CHANNEL_IDS,
173 				 start_index, 0, 0, 0, 0, 0);
174 		if (sret.error)
175 			goto err_put_cpu;
176 
177 		remaining = le32_to_cpu(sdata->remaining);
178 		returned = le32_to_cpu(sdata->returned);
179 
180 		count = returned < (channel_count - start_index) ?
181 			returned : (channel_count - start_index);
182 		memcpy_from_le32(&channel_ids[start_index], sdata->channel_array, count);
183 		start_index += count;
184 	} while (remaining && start_index < channel_count);
185 
186 err_put_cpu:
187 	put_cpu();
188 	return sbi_err_map_linux_errno(sret.error);
189 }
190 
191 static int mpxy_read_attrs(u32 channel_id, u32 base_attrid, u32 attr_count,
192 			   u32 *attrs_buf)
193 {
194 	struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
195 	struct sbiret sret;
196 
197 	if (!mpxy->shmem_active)
198 		return -ENODEV;
199 	if (!attr_count || !attrs_buf)
200 		return -EINVAL;
201 
202 	get_cpu();
203 
204 	sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_READ_ATTRS,
205 			 channel_id, base_attrid, attr_count, 0, 0, 0);
206 	if (sret.error)
207 		goto err_put_cpu;
208 
209 	memcpy_from_le32(attrs_buf, (__le32 *)mpxy->shmem, attr_count);
210 
211 err_put_cpu:
212 	put_cpu();
213 	return sbi_err_map_linux_errno(sret.error);
214 }
215 
216 static int mpxy_write_attrs(u32 channel_id, u32 base_attrid, u32 attr_count,
217 			    u32 *attrs_buf)
218 {
219 	struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
220 	struct sbiret sret;
221 
222 	if (!mpxy->shmem_active)
223 		return -ENODEV;
224 	if (!attr_count || !attrs_buf)
225 		return -EINVAL;
226 
227 	get_cpu();
228 
229 	memcpy_to_le32((__le32 *)mpxy->shmem, attrs_buf, attr_count);
230 	sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_WRITE_ATTRS,
231 			 channel_id, base_attrid, attr_count, 0, 0, 0);
232 
233 	put_cpu();
234 	return sbi_err_map_linux_errno(sret.error);
235 }
236 
237 static int mpxy_send_message_with_resp(u32 channel_id, u32 msg_id,
238 				       void *tx, unsigned long tx_len,
239 				       void *rx, unsigned long max_rx_len,
240 				       unsigned long *rx_len)
241 {
242 	struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
243 	unsigned long rx_bytes;
244 	struct sbiret sret;
245 
246 	if (!mpxy->shmem_active)
247 		return -ENODEV;
248 	if (!tx && tx_len)
249 		return -EINVAL;
250 
251 	get_cpu();
252 
253 	/* Message protocols allowed to have no data in messages */
254 	if (tx_len)
255 		memcpy(mpxy->shmem, tx, tx_len);
256 
257 	sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SEND_MSG_WITH_RESP,
258 			 channel_id, msg_id, tx_len, 0, 0, 0);
259 	if (rx && !sret.error) {
260 		rx_bytes = sret.value;
261 		if (rx_bytes > max_rx_len) {
262 			put_cpu();
263 			return -ENOSPC;
264 		}
265 
266 		memcpy(rx, mpxy->shmem, rx_bytes);
267 		if (rx_len)
268 			*rx_len = rx_bytes;
269 	}
270 
271 	put_cpu();
272 	return sbi_err_map_linux_errno(sret.error);
273 }
274 
275 static int mpxy_send_message_without_resp(u32 channel_id, u32 msg_id,
276 					  void *tx, unsigned long tx_len)
277 {
278 	struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
279 	struct sbiret sret;
280 
281 	if (!mpxy->shmem_active)
282 		return -ENODEV;
283 	if (!tx && tx_len)
284 		return -EINVAL;
285 
286 	get_cpu();
287 
288 	/* Message protocols allowed to have no data in messages */
289 	if (tx_len)
290 		memcpy(mpxy->shmem, tx, tx_len);
291 
292 	sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SEND_MSG_WITHOUT_RESP,
293 			 channel_id, msg_id, tx_len, 0, 0, 0);
294 
295 	put_cpu();
296 	return sbi_err_map_linux_errno(sret.error);
297 }
298 
299 static int mpxy_get_notifications(u32 channel_id,
300 				  struct sbi_mpxy_notification_data *notif_data,
301 				  unsigned long *events_data_len)
302 {
303 	struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
304 	struct sbiret sret;
305 
306 	if (!mpxy->shmem_active)
307 		return -ENODEV;
308 	if (!notif_data || !events_data_len)
309 		return -EINVAL;
310 
311 	get_cpu();
312 
313 	sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_NOTIFICATION_EVENTS,
314 			 channel_id, 0, 0, 0, 0, 0);
315 	if (sret.error)
316 		goto err_put_cpu;
317 
318 	memcpy(notif_data, mpxy->shmem, sret.value + 16);
319 	*events_data_len = sret.value;
320 
321 err_put_cpu:
322 	put_cpu();
323 	return sbi_err_map_linux_errno(sret.error);
324 }
325 
326 static int mpxy_get_shmem_size(unsigned long *shmem_size)
327 {
328 	struct sbiret sret;
329 
330 	sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_SHMEM_SIZE,
331 			 0, 0, 0, 0, 0, 0);
332 	if (sret.error)
333 		return sbi_err_map_linux_errno(sret.error);
334 	if (shmem_size)
335 		*shmem_size = sret.value;
336 	return 0;
337 }
338 
339 static int mpxy_setup_shmem(unsigned int cpu)
340 {
341 	struct page *shmem_page;
342 	struct mpxy_local *mpxy;
343 	struct sbiret sret;
344 
345 	mpxy = per_cpu_ptr(&mpxy_local, cpu);
346 	if (mpxy->shmem_active)
347 		return 0;
348 
349 	shmem_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(mpxy_shmem_size));
350 	if (!shmem_page)
351 		return -ENOMEM;
352 
353 	/*
354 	 * Linux setup of shmem is done in mpxy OVERWRITE mode.
355 	 * flags[1:0] = 00b
356 	 */
357 	sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SET_SHMEM,
358 			 page_to_phys(shmem_page), 0, 0, 0, 0, 0);
359 	if (sret.error) {
360 		free_pages((unsigned long)page_to_virt(shmem_page),
361 			   get_order(mpxy_shmem_size));
362 		return sbi_err_map_linux_errno(sret.error);
363 	}
364 
365 	mpxy->shmem = page_to_virt(shmem_page);
366 	mpxy->shmem_phys_addr = page_to_phys(shmem_page);
367 	mpxy->shmem_active = true;
368 
369 	return 0;
370 }
371 
372 /* ====== MPXY mailbox data structures ====== */
373 
374 /* MPXY mailbox channel */
375 struct mpxy_mbox_channel {
376 	struct mpxy_mbox *mbox;
377 	u32 channel_id;
378 	struct sbi_mpxy_channel_attrs attrs;
379 	struct sbi_mpxy_rpmi_channel_attrs rpmi_attrs;
380 	struct sbi_mpxy_notification_data *notif;
381 	u32 max_xfer_len;
382 	bool have_events_state;
383 	u32 msi_index;
384 	u32 msi_irq;
385 	bool started;
386 };
387 
388 /* MPXY mailbox */
389 struct mpxy_mbox {
390 	struct device *dev;
391 	u32 channel_count;
392 	struct mpxy_mbox_channel *channels;
393 	u32 msi_count;
394 	struct mpxy_mbox_channel **msi_index_to_channel;
395 	struct mbox_controller controller;
396 };
397 
398 /* ====== MPXY RPMI processing ====== */
399 
400 static void mpxy_mbox_send_rpmi_data(struct mpxy_mbox_channel *mchan,
401 				     struct rpmi_mbox_message *msg)
402 {
403 	msg->error = 0;
404 	switch (msg->type) {
405 	case RPMI_MBOX_MSG_TYPE_GET_ATTRIBUTE:
406 		switch (msg->attr.id) {
407 		case RPMI_MBOX_ATTR_SPEC_VERSION:
408 			msg->attr.value = mchan->attrs.msg_proto_version;
409 			break;
410 		case RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE:
411 			msg->attr.value = mchan->max_xfer_len;
412 			break;
413 		case RPMI_MBOX_ATTR_SERVICEGROUP_ID:
414 			msg->attr.value = mchan->rpmi_attrs.servicegroup_id;
415 			break;
416 		case RPMI_MBOX_ATTR_SERVICEGROUP_VERSION:
417 			msg->attr.value = mchan->rpmi_attrs.servicegroup_version;
418 			break;
419 		case RPMI_MBOX_ATTR_IMPL_ID:
420 			msg->attr.value = mchan->rpmi_attrs.impl_id;
421 			break;
422 		case RPMI_MBOX_ATTR_IMPL_VERSION:
423 			msg->attr.value = mchan->rpmi_attrs.impl_version;
424 			break;
425 		default:
426 			msg->error = -EOPNOTSUPP;
427 			break;
428 		}
429 		break;
430 	case RPMI_MBOX_MSG_TYPE_SET_ATTRIBUTE:
431 		/* None of the RPMI linux mailbox attributes are writeable */
432 		msg->error = -EOPNOTSUPP;
433 		break;
434 	case RPMI_MBOX_MSG_TYPE_SEND_WITH_RESPONSE:
435 		if ((!msg->data.request && msg->data.request_len) ||
436 		    (msg->data.request && msg->data.request_len > mchan->max_xfer_len) ||
437 		    (!msg->data.response && msg->data.max_response_len)) {
438 			msg->error = -EINVAL;
439 			break;
440 		}
441 		if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_SEND_WITH_RESP)) {
442 			msg->error = -EIO;
443 			break;
444 		}
445 		msg->error = mpxy_send_message_with_resp(mchan->channel_id,
446 							 msg->data.service_id,
447 							 msg->data.request,
448 							 msg->data.request_len,
449 							 msg->data.response,
450 							 msg->data.max_response_len,
451 							 &msg->data.out_response_len);
452 		break;
453 	case RPMI_MBOX_MSG_TYPE_SEND_WITHOUT_RESPONSE:
454 		if ((!msg->data.request && msg->data.request_len) ||
455 		    (msg->data.request && msg->data.request_len > mchan->max_xfer_len)) {
456 			msg->error = -EINVAL;
457 			break;
458 		}
459 		if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_SEND_WITHOUT_RESP)) {
460 			msg->error = -EIO;
461 			break;
462 		}
463 		msg->error = mpxy_send_message_without_resp(mchan->channel_id,
464 							    msg->data.service_id,
465 							    msg->data.request,
466 							    msg->data.request_len);
467 		break;
468 	default:
469 		msg->error = -EOPNOTSUPP;
470 		break;
471 	}
472 }
473 
474 static void mpxy_mbox_peek_rpmi_data(struct mbox_chan *chan,
475 				     struct mpxy_mbox_channel *mchan,
476 				     struct sbi_mpxy_notification_data *notif,
477 				     unsigned long events_data_len)
478 {
479 	struct rpmi_notification_event *event;
480 	struct rpmi_mbox_message msg;
481 	unsigned long pos = 0;
482 
483 	while (pos < events_data_len && (events_data_len - pos) <= sizeof(*event)) {
484 		event = (struct rpmi_notification_event *)(notif->events_data + pos);
485 
486 		msg.type = RPMI_MBOX_MSG_TYPE_NOTIFICATION_EVENT;
487 		msg.notif.event_datalen = le16_to_cpu(event->event_datalen);
488 		msg.notif.event_id = event->event_id;
489 		msg.notif.event_data = event->event_data;
490 		msg.error = 0;
491 
492 		mbox_chan_received_data(chan, &msg);
493 		pos += sizeof(*event) + msg.notif.event_datalen;
494 	}
495 }
496 
497 static int mpxy_mbox_read_rpmi_attrs(struct mpxy_mbox_channel *mchan)
498 {
499 	return mpxy_read_attrs(mchan->channel_id,
500 			       SBI_MPXY_ATTR_MSGPROTO_ATTR_START,
501 			       sizeof(mchan->rpmi_attrs) / sizeof(u32),
502 			       (u32 *)&mchan->rpmi_attrs);
503 }
504 
505 /* ====== MPXY mailbox callbacks ====== */
506 
507 static int mpxy_mbox_send_data(struct mbox_chan *chan, void *data)
508 {
509 	struct mpxy_mbox_channel *mchan = chan->con_priv;
510 
511 	if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) {
512 		mpxy_mbox_send_rpmi_data(mchan, data);
513 		return 0;
514 	}
515 
516 	return -EOPNOTSUPP;
517 }
518 
519 static bool mpxy_mbox_peek_data(struct mbox_chan *chan)
520 {
521 	struct mpxy_mbox_channel *mchan = chan->con_priv;
522 	struct sbi_mpxy_notification_data *notif = mchan->notif;
523 	bool have_notifications = false;
524 	unsigned long data_len;
525 	int rc;
526 
527 	if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS))
528 		return false;
529 
530 	do {
531 		rc = mpxy_get_notifications(mchan->channel_id, notif, &data_len);
532 		if (rc || !data_len)
533 			break;
534 
535 		if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID)
536 			mpxy_mbox_peek_rpmi_data(chan, mchan, notif, data_len);
537 
538 		have_notifications = true;
539 	} while (1);
540 
541 	return have_notifications;
542 }
543 
544 static irqreturn_t mpxy_mbox_irq_thread(int irq, void *dev_id)
545 {
546 	mpxy_mbox_peek_data(dev_id);
547 	return IRQ_HANDLED;
548 }
549 
550 static int mpxy_mbox_setup_msi(struct mbox_chan *chan,
551 			       struct mpxy_mbox_channel *mchan)
552 {
553 	struct device *dev = mchan->mbox->dev;
554 	int rc;
555 
556 	/* Do nothing if MSI not supported */
557 	if (mchan->msi_irq == U32_MAX)
558 		return 0;
559 
560 	/* Fail if MSI already enabled */
561 	if (mchan->attrs.msi_control)
562 		return -EALREADY;
563 
564 	/* Request channel MSI handler */
565 	rc = request_threaded_irq(mchan->msi_irq, NULL, mpxy_mbox_irq_thread,
566 				  0, dev_name(dev), chan);
567 	if (rc) {
568 		dev_err(dev, "failed to request MPXY channel 0x%x IRQ\n",
569 			mchan->channel_id);
570 		return rc;
571 	}
572 
573 	/* Enable channel MSI control */
574 	mchan->attrs.msi_control = 1;
575 	rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_CONTROL,
576 			      1, &mchan->attrs.msi_control);
577 	if (rc) {
578 		dev_err(dev, "enable MSI control failed for MPXY channel 0x%x\n",
579 			mchan->channel_id);
580 		mchan->attrs.msi_control = 0;
581 		free_irq(mchan->msi_irq, chan);
582 		return rc;
583 	}
584 
585 	return 0;
586 }
587 
588 static void mpxy_mbox_cleanup_msi(struct mbox_chan *chan,
589 				  struct mpxy_mbox_channel *mchan)
590 {
591 	struct device *dev = mchan->mbox->dev;
592 	int rc;
593 
594 	/* Do nothing if MSI not supported */
595 	if (mchan->msi_irq == U32_MAX)
596 		return;
597 
598 	/* Do nothing if MSI already disabled */
599 	if (!mchan->attrs.msi_control)
600 		return;
601 
602 	/* Disable channel MSI control */
603 	mchan->attrs.msi_control = 0;
604 	rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_CONTROL,
605 			      1, &mchan->attrs.msi_control);
606 	if (rc) {
607 		dev_err(dev, "disable MSI control failed for MPXY channel 0x%x\n",
608 			mchan->channel_id);
609 	}
610 
611 	/* Free channel MSI handler */
612 	free_irq(mchan->msi_irq, chan);
613 }
614 
615 static int mpxy_mbox_setup_events(struct mpxy_mbox_channel *mchan)
616 {
617 	struct device *dev = mchan->mbox->dev;
618 	int rc;
619 
620 	/* Do nothing if events state not supported */
621 	if (!mchan->have_events_state)
622 		return 0;
623 
624 	/* Fail if events state already enabled */
625 	if (mchan->attrs.events_state_ctrl)
626 		return -EALREADY;
627 
628 	/* Enable channel events state */
629 	mchan->attrs.events_state_ctrl = 1;
630 	rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL,
631 			      1, &mchan->attrs.events_state_ctrl);
632 	if (rc) {
633 		dev_err(dev, "enable events state failed for MPXY channel 0x%x\n",
634 			mchan->channel_id);
635 		mchan->attrs.events_state_ctrl = 0;
636 		return rc;
637 	}
638 
639 	return 0;
640 }
641 
642 static void mpxy_mbox_cleanup_events(struct mpxy_mbox_channel *mchan)
643 {
644 	struct device *dev = mchan->mbox->dev;
645 	int rc;
646 
647 	/* Do nothing if events state not supported */
648 	if (!mchan->have_events_state)
649 		return;
650 
651 	/* Do nothing if events state already disabled */
652 	if (!mchan->attrs.events_state_ctrl)
653 		return;
654 
655 	/* Disable channel events state */
656 	mchan->attrs.events_state_ctrl = 0;
657 	rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL,
658 			      1, &mchan->attrs.events_state_ctrl);
659 	if (rc)
660 		dev_err(dev, "disable events state failed for MPXY channel 0x%x\n",
661 			mchan->channel_id);
662 }
663 
664 static int mpxy_mbox_startup(struct mbox_chan *chan)
665 {
666 	struct mpxy_mbox_channel *mchan = chan->con_priv;
667 	int rc;
668 
669 	if (mchan->started)
670 		return -EALREADY;
671 
672 	/* Setup channel MSI */
673 	rc = mpxy_mbox_setup_msi(chan, mchan);
674 	if (rc)
675 		return rc;
676 
677 	/* Setup channel notification events */
678 	rc = mpxy_mbox_setup_events(mchan);
679 	if (rc) {
680 		mpxy_mbox_cleanup_msi(chan, mchan);
681 		return rc;
682 	}
683 
684 	/* Mark the channel as started */
685 	mchan->started = true;
686 
687 	return 0;
688 }
689 
690 static void mpxy_mbox_shutdown(struct mbox_chan *chan)
691 {
692 	struct mpxy_mbox_channel *mchan = chan->con_priv;
693 
694 	if (!mchan->started)
695 		return;
696 
697 	/* Mark the channel as stopped */
698 	mchan->started = false;
699 
700 	/* Cleanup channel notification events */
701 	mpxy_mbox_cleanup_events(mchan);
702 
703 	/* Cleanup channel MSI */
704 	mpxy_mbox_cleanup_msi(chan, mchan);
705 }
706 
707 static const struct mbox_chan_ops mpxy_mbox_ops = {
708 	.send_data = mpxy_mbox_send_data,
709 	.peek_data = mpxy_mbox_peek_data,
710 	.startup = mpxy_mbox_startup,
711 	.shutdown = mpxy_mbox_shutdown,
712 };
713 
714 /* ====== MPXY platform driver ===== */
715 
716 static void mpxy_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
717 {
718 	struct device *dev = msi_desc_to_dev(desc);
719 	struct mpxy_mbox *mbox = dev_get_drvdata(dev);
720 	struct mpxy_mbox_channel *mchan;
721 	struct sbi_mpxy_msi_info *minfo;
722 	int rc;
723 
724 	mchan = mbox->msi_index_to_channel[desc->msi_index];
725 	if (!mchan) {
726 		dev_warn(dev, "MPXY channel not available for MSI index %d\n",
727 			 desc->msi_index);
728 		return;
729 	}
730 
731 	minfo = &mchan->attrs.msi_info;
732 	minfo->msi_addr_lo = msg->address_lo;
733 	minfo->msi_addr_hi = msg->address_hi;
734 	minfo->msi_data = msg->data;
735 
736 	rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_ADDR_LO,
737 			      sizeof(*minfo) / sizeof(u32), (u32 *)minfo);
738 	if (rc) {
739 		dev_warn(dev, "failed to write MSI info for MPXY channel 0x%x\n",
740 			 mchan->channel_id);
741 	}
742 }
743 
744 static struct mbox_chan *mpxy_mbox_fw_xlate(struct mbox_controller *ctlr,
745 					    const struct fwnode_reference_args *pa)
746 {
747 	struct mpxy_mbox *mbox = container_of(ctlr, struct mpxy_mbox, controller);
748 	struct mpxy_mbox_channel *mchan;
749 	u32 i;
750 
751 	if (pa->nargs != 2)
752 		return ERR_PTR(-EINVAL);
753 
754 	for (i = 0; i < mbox->channel_count; i++) {
755 		mchan = &mbox->channels[i];
756 		if (mchan->channel_id == pa->args[0] &&
757 		    mchan->attrs.msg_proto_id == pa->args[1])
758 			return &mbox->controller.chans[i];
759 	}
760 
761 	return ERR_PTR(-ENOENT);
762 }
763 
764 static int mpxy_mbox_populate_channels(struct mpxy_mbox *mbox)
765 {
766 	u32 i, *channel_ids __free(kfree) = NULL;
767 	struct mpxy_mbox_channel *mchan;
768 	int rc;
769 
770 	/* Find-out of number of channels */
771 	rc = mpxy_get_channel_count(&mbox->channel_count);
772 	if (rc)
773 		return dev_err_probe(mbox->dev, rc, "failed to get number of MPXY channels\n");
774 	if (!mbox->channel_count)
775 		return dev_err_probe(mbox->dev, -ENODEV, "no MPXY channels available\n");
776 
777 	/* Allocate and fetch all channel IDs */
778 	channel_ids = kcalloc(mbox->channel_count, sizeof(*channel_ids), GFP_KERNEL);
779 	if (!channel_ids)
780 		return -ENOMEM;
781 	rc = mpxy_get_channel_ids(mbox->channel_count, channel_ids);
782 	if (rc)
783 		return dev_err_probe(mbox->dev, rc, "failed to get MPXY channel IDs\n");
784 
785 	/* Populate all channels */
786 	mbox->channels = devm_kcalloc(mbox->dev, mbox->channel_count,
787 				      sizeof(*mbox->channels), GFP_KERNEL);
788 	if (!mbox->channels)
789 		return -ENOMEM;
790 	for (i = 0; i < mbox->channel_count; i++) {
791 		mchan = &mbox->channels[i];
792 		mchan->mbox = mbox;
793 		mchan->channel_id = channel_ids[i];
794 
795 		rc = mpxy_read_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSG_PROT_ID,
796 				     sizeof(mchan->attrs) / sizeof(u32),
797 				     (u32 *)&mchan->attrs);
798 		if (rc) {
799 			return dev_err_probe(mbox->dev, rc,
800 					     "MPXY channel 0x%x read attrs failed\n",
801 					     mchan->channel_id);
802 		}
803 
804 		if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) {
805 			rc = mpxy_mbox_read_rpmi_attrs(mchan);
806 			if (rc) {
807 				return dev_err_probe(mbox->dev, rc,
808 						     "MPXY channel 0x%x read RPMI attrs failed\n",
809 						     mchan->channel_id);
810 			}
811 		}
812 
813 		mchan->notif = devm_kzalloc(mbox->dev, mpxy_shmem_size, GFP_KERNEL);
814 		if (!mchan->notif)
815 			return -ENOMEM;
816 
817 		mchan->max_xfer_len = min(mpxy_shmem_size, mchan->attrs.msg_max_len);
818 
819 		if ((mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS) &&
820 		    (mchan->attrs.capability & SBI_MPXY_CHAN_CAP_EVENTS_STATE))
821 			mchan->have_events_state = true;
822 
823 		if ((mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS) &&
824 		    (mchan->attrs.capability & SBI_MPXY_CHAN_CAP_MSI))
825 			mchan->msi_index = mbox->msi_count++;
826 		else
827 			mchan->msi_index = U32_MAX;
828 		mchan->msi_irq = U32_MAX;
829 	}
830 
831 	return 0;
832 }
833 
834 static int mpxy_mbox_probe(struct platform_device *pdev)
835 {
836 	struct device *dev = &pdev->dev;
837 	struct mpxy_mbox_channel *mchan;
838 	struct mpxy_mbox *mbox;
839 	int msi_idx, rc;
840 	u32 i;
841 
842 	/*
843 	 * Initialize MPXY shared memory only once. This also ensures
844 	 * that SBI MPXY mailbox is probed only once.
845 	 */
846 	if (mpxy_shmem_init_done) {
847 		dev_err(dev, "SBI MPXY mailbox already initialized\n");
848 		return -EALREADY;
849 	}
850 
851 	/* Probe for SBI MPXY extension */
852 	if (sbi_spec_version < sbi_mk_version(1, 0) ||
853 	    sbi_probe_extension(SBI_EXT_MPXY) <= 0) {
854 		dev_info(dev, "SBI MPXY extension not available\n");
855 		return -ENODEV;
856 	}
857 
858 	/* Find-out shared memory size */
859 	rc = mpxy_get_shmem_size(&mpxy_shmem_size);
860 	if (rc)
861 		return dev_err_probe(dev, rc, "failed to get MPXY shared memory size\n");
862 
863 	/*
864 	 * Setup MPXY shared memory on each CPU
865 	 *
866 	 * Note: Don't cleanup MPXY shared memory upon CPU power-down
867 	 * because the RPMI System MSI irqchip driver needs it to be
868 	 * available when migrating IRQs in CPU power-down path.
869 	 */
870 	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/sbi-mpxy-shmem",
871 			  mpxy_setup_shmem, NULL);
872 
873 	/* Mark as MPXY shared memory initialization done */
874 	mpxy_shmem_init_done = true;
875 
876 	/* Allocate mailbox instance */
877 	mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
878 	if (!mbox)
879 		return -ENOMEM;
880 	mbox->dev = dev;
881 	platform_set_drvdata(pdev, mbox);
882 
883 	/* Populate mailbox channels */
884 	rc = mpxy_mbox_populate_channels(mbox);
885 	if (rc)
886 		return rc;
887 
888 	/* Initialize mailbox controller */
889 	mbox->controller.txdone_irq = false;
890 	mbox->controller.txdone_poll = false;
891 	mbox->controller.ops = &mpxy_mbox_ops;
892 	mbox->controller.dev = dev;
893 	mbox->controller.num_chans = mbox->channel_count;
894 	mbox->controller.fw_xlate = mpxy_mbox_fw_xlate;
895 	mbox->controller.chans = devm_kcalloc(dev, mbox->channel_count,
896 					      sizeof(*mbox->controller.chans),
897 					      GFP_KERNEL);
898 	if (!mbox->controller.chans)
899 		return -ENOMEM;
900 	for (i = 0; i < mbox->channel_count; i++)
901 		mbox->controller.chans[i].con_priv = &mbox->channels[i];
902 
903 	/* Setup MSIs for mailbox (if required) */
904 	if (mbox->msi_count) {
905 		/*
906 		 * The device MSI domain for platform devices on RISC-V architecture
907 		 * is only available after the MSI controller driver is probed so,
908 		 * explicitly configure here.
909 		 */
910 		if (!dev_get_msi_domain(dev)) {
911 			struct fwnode_handle *fwnode = dev_fwnode(dev);
912 
913 			/*
914 			 * The device MSI domain for OF devices is only set at the
915 			 * time of populating/creating OF device. If the device MSI
916 			 * domain is discovered later after the OF device is created
917 			 * then we need to set it explicitly before using any platform
918 			 * MSI functions.
919 			 */
920 			if (is_of_node(fwnode)) {
921 				of_msi_configure(dev, dev_of_node(dev));
922 			} else if (is_acpi_device_node(fwnode)) {
923 				struct irq_domain *msi_domain;
924 
925 				msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev),
926 								      DOMAIN_BUS_PLATFORM_MSI);
927 				dev_set_msi_domain(dev, msi_domain);
928 			}
929 
930 			if (!dev_get_msi_domain(dev))
931 				return -EPROBE_DEFER;
932 		}
933 
934 		mbox->msi_index_to_channel = devm_kcalloc(dev, mbox->msi_count,
935 							  sizeof(*mbox->msi_index_to_channel),
936 							  GFP_KERNEL);
937 		if (!mbox->msi_index_to_channel)
938 			return -ENOMEM;
939 
940 		for (msi_idx = 0; msi_idx < mbox->msi_count; msi_idx++) {
941 			for (i = 0; i < mbox->channel_count; i++) {
942 				mchan = &mbox->channels[i];
943 				if (mchan->msi_index == msi_idx) {
944 					mbox->msi_index_to_channel[msi_idx] = mchan;
945 					break;
946 				}
947 			}
948 		}
949 
950 		rc = platform_device_msi_init_and_alloc_irqs(dev, mbox->msi_count,
951 							     mpxy_mbox_msi_write);
952 		if (rc) {
953 			return dev_err_probe(dev, rc, "Failed to allocate %d MSIs\n",
954 					     mbox->msi_count);
955 		}
956 
957 		for (i = 0; i < mbox->channel_count; i++) {
958 			mchan = &mbox->channels[i];
959 			if (mchan->msi_index == U32_MAX)
960 				continue;
961 			mchan->msi_irq = msi_get_virq(dev, mchan->msi_index);
962 		}
963 	}
964 
965 	/* Register mailbox controller */
966 	rc = devm_mbox_controller_register(dev, &mbox->controller);
967 	if (rc) {
968 		dev_err_probe(dev, rc, "Registering SBI MPXY mailbox failed\n");
969 		if (mbox->msi_count)
970 			platform_device_msi_free_irqs_all(dev);
971 		return rc;
972 	}
973 
974 #ifdef CONFIG_ACPI
975 	struct acpi_device *adev = ACPI_COMPANION(dev);
976 
977 	if (adev)
978 		acpi_dev_clear_dependencies(adev);
979 #endif
980 
981 	dev_info(dev, "mailbox registered with %d channels\n",
982 		 mbox->channel_count);
983 	return 0;
984 }
985 
986 static void mpxy_mbox_remove(struct platform_device *pdev)
987 {
988 	struct mpxy_mbox *mbox = platform_get_drvdata(pdev);
989 
990 	if (mbox->msi_count)
991 		platform_device_msi_free_irqs_all(mbox->dev);
992 }
993 
994 static const struct of_device_id mpxy_mbox_of_match[] = {
995 	{ .compatible = "riscv,sbi-mpxy-mbox" },
996 	{}
997 };
998 MODULE_DEVICE_TABLE(of, mpxy_mbox_of_match);
999 
1000 static const struct acpi_device_id mpxy_mbox_acpi_match[] = {
1001 	{ "RSCV0005" },
1002 	{}
1003 };
1004 MODULE_DEVICE_TABLE(acpi, mpxy_mbox_acpi_match);
1005 
1006 static struct platform_driver mpxy_mbox_driver = {
1007 	.driver = {
1008 		.name = "riscv-sbi-mpxy-mbox",
1009 		.of_match_table = mpxy_mbox_of_match,
1010 		.acpi_match_table = mpxy_mbox_acpi_match,
1011 	},
1012 	.probe = mpxy_mbox_probe,
1013 	.remove = mpxy_mbox_remove,
1014 };
1015 module_platform_driver(mpxy_mbox_driver);
1016 
1017 MODULE_LICENSE("GPL");
1018 MODULE_AUTHOR("Anup Patel <apatel@ventanamicro.com>");
1019 MODULE_DESCRIPTION("RISC-V SBI MPXY mailbox controller driver");
1020