xref: /linux/drivers/accel/amdxdna/amdxdna_mailbox.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/drm_device.h>
7 #include <drm/drm_managed.h>
8 #include <linux/bitfield.h>
9 #include <linux/interrupt.h>
10 #include <linux/iopoll.h>
11 #include <linux/slab.h>
12 #include <linux/xarray.h>
13 
14 #define CREATE_TRACE_POINTS
15 #include <trace/events/amdxdna.h>
16 
17 #include "amdxdna_mailbox.h"
18 
19 #define MB_ERR(chann, fmt, args...) \
20 ({ \
21 	typeof(chann) _chann = chann; \
22 	dev_err((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
23 		(_chann)->msix_irq, ##args); \
24 })
25 #define MB_DBG(chann, fmt, args...) \
26 ({ \
27 	typeof(chann) _chann = chann; \
28 	dev_dbg((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
29 		(_chann)->msix_irq, ##args); \
30 })
31 #define MB_WARN_ONCE(chann, fmt, args...) \
32 ({ \
33 	typeof(chann) _chann = chann; \
34 	dev_warn_once((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
35 		      (_chann)->msix_irq, ##args); \
36 })
37 
38 #define MAGIC_VAL			0x1D000000U
39 #define MAGIC_VAL_MASK			0xFF000000
40 #define MAX_MSG_ID_ENTRIES		256
41 #define MSG_RX_TIMER			200 /* milliseconds */
42 #define MAILBOX_NAME			"xdna_mailbox"
43 
44 enum channel_res_type {
45 	CHAN_RES_X2I,
46 	CHAN_RES_I2X,
47 	CHAN_RES_NUM
48 };
49 
50 struct mailbox {
51 	struct device		*dev;
52 	struct xdna_mailbox_res	res;
53 };
54 
55 struct mailbox_channel {
56 	struct mailbox			*mb;
57 	struct xdna_mailbox_chann_res	res[CHAN_RES_NUM];
58 	int				msix_irq;
59 	u32				iohub_int_addr;
60 	struct xarray			chan_xa;
61 	u32				next_msgid;
62 	u32				x2i_tail;
63 
64 	/* Received msg related fields */
65 	struct workqueue_struct		*work_q;
66 	struct work_struct		rx_work;
67 	u32				i2x_head;
68 	bool				bad_state;
69 };
70 
71 #define MSG_BODY_SZ		GENMASK(10, 0)
72 #define MSG_PROTO_VER		GENMASK(23, 16)
73 struct xdna_msg_header {
74 	__u32 total_size;
75 	__u32 sz_ver;
76 	__u32 id;
77 	__u32 opcode;
78 } __packed;
79 
80 static_assert(sizeof(struct xdna_msg_header) == 16);
81 
82 struct mailbox_pkg {
83 	struct xdna_msg_header	header;
84 	__u32			payload[];
85 };
86 
87 /* The protocol version. */
88 #define MSG_PROTOCOL_VERSION	0x1
89 /* The tombstone value. */
90 #define TOMBSTONE		0xDEADFACE
91 
92 struct mailbox_msg {
93 	void			*handle;
94 	int			(*notify_cb)(void *handle, void __iomem *data, size_t size);
95 	size_t			pkg_size; /* package size in bytes */
96 	struct mailbox_pkg	pkg;
97 };
98 
99 static void mailbox_reg_write(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 data)
100 {
101 	struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
102 	void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
103 
104 	writel(data, ringbuf_addr);
105 }
106 
107 static u32 mailbox_reg_read(struct mailbox_channel *mb_chann, u32 mbox_reg)
108 {
109 	struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
110 	void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
111 
112 	return readl(ringbuf_addr);
113 }
114 
115 static int mailbox_reg_read_non_zero(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 *val)
116 {
117 	struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
118 	void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
119 	int ret, value;
120 
121 	/* Poll till value is not zero */
122 	ret = readx_poll_timeout(readl, ringbuf_addr, value,
123 				 value, 1 /* us */, 100);
124 	if (ret < 0)
125 		return ret;
126 
127 	*val = value;
128 	return 0;
129 }
130 
131 static inline void
132 mailbox_set_headptr(struct mailbox_channel *mb_chann, u32 headptr_val)
133 {
134 	mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_head_ptr_reg, headptr_val);
135 	mb_chann->i2x_head = headptr_val;
136 }
137 
138 static inline void
139 mailbox_set_tailptr(struct mailbox_channel *mb_chann, u32 tailptr_val)
140 {
141 	mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_X2I].mb_tail_ptr_reg, tailptr_val);
142 	mb_chann->x2i_tail = tailptr_val;
143 }
144 
145 static inline u32
146 mailbox_get_headptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
147 {
148 	return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_head_ptr_reg);
149 }
150 
151 static inline u32
152 mailbox_get_tailptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
153 {
154 	return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_tail_ptr_reg);
155 }
156 
157 static inline u32
158 mailbox_get_ringbuf_size(struct mailbox_channel *mb_chann, enum channel_res_type type)
159 {
160 	return mb_chann->res[type].rb_size;
161 }
162 
163 static inline int mailbox_validate_msgid(int msg_id)
164 {
165 	return (msg_id & MAGIC_VAL_MASK) == MAGIC_VAL;
166 }
167 
168 static int mailbox_acquire_msgid(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
169 {
170 	u32 msg_id;
171 	int ret;
172 
173 	ret = xa_alloc_cyclic_irq(&mb_chann->chan_xa, &msg_id, mb_msg,
174 				  XA_LIMIT(0, MAX_MSG_ID_ENTRIES - 1),
175 				  &mb_chann->next_msgid, GFP_NOWAIT);
176 	if (ret < 0)
177 		return ret;
178 
179 	/*
180 	 * Add MAGIC_VAL to the higher bits.
181 	 */
182 	msg_id |= MAGIC_VAL;
183 	return msg_id;
184 }
185 
186 static void mailbox_release_msgid(struct mailbox_channel *mb_chann, int msg_id)
187 {
188 	msg_id &= ~MAGIC_VAL_MASK;
189 	xa_erase_irq(&mb_chann->chan_xa, msg_id);
190 }
191 
192 static void mailbox_release_msg(struct mailbox_channel *mb_chann,
193 				struct mailbox_msg *mb_msg)
194 {
195 	MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x",
196 	       mb_msg->pkg.header.id, mb_msg->pkg.header.opcode);
197 	if (mb_msg->notify_cb)
198 		mb_msg->notify_cb(mb_msg->handle, NULL, 0);
199 	kfree(mb_msg);
200 }
201 
202 static int
203 mailbox_send_msg(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
204 {
205 	void __iomem *write_addr;
206 	u32 ringbuf_size;
207 	u32 head, tail;
208 	u32 start_addr;
209 	u32 tmp_tail;
210 
211 	head = mailbox_get_headptr(mb_chann, CHAN_RES_X2I);
212 	tail = mb_chann->x2i_tail;
213 	ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I);
214 	start_addr = mb_chann->res[CHAN_RES_X2I].rb_start_addr;
215 	tmp_tail = tail + mb_msg->pkg_size;
216 
217 	if (tail < head && tmp_tail >= head)
218 		goto no_space;
219 
220 	if (tail >= head && (tmp_tail > ringbuf_size - sizeof(u32) &&
221 			     mb_msg->pkg_size >= head))
222 		goto no_space;
223 
224 	if (tail >= head && tmp_tail > ringbuf_size - sizeof(u32)) {
225 		write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
226 		writel(TOMBSTONE, write_addr);
227 
228 		/* tombstone is set. Write from the start of the ringbuf */
229 		tail = 0;
230 	}
231 
232 	write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
233 	memcpy_toio(write_addr, &mb_msg->pkg, mb_msg->pkg_size);
234 	mailbox_set_tailptr(mb_chann, tail + mb_msg->pkg_size);
235 
236 	trace_mbox_set_tail(MAILBOX_NAME, mb_chann->msix_irq,
237 			    mb_msg->pkg.header.opcode,
238 			    mb_msg->pkg.header.id);
239 
240 	return 0;
241 
242 no_space:
243 	return -ENOSPC;
244 }
245 
246 static int
247 mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *header,
248 		 void __iomem *data)
249 {
250 	struct mailbox_msg *mb_msg;
251 	int msg_id;
252 	int ret = 0;
253 
254 	msg_id = header->id;
255 	if (!mailbox_validate_msgid(msg_id)) {
256 		MB_ERR(mb_chann, "Bad message ID 0x%x", msg_id);
257 		return -EINVAL;
258 	}
259 
260 	msg_id &= ~MAGIC_VAL_MASK;
261 	mb_msg = xa_erase_irq(&mb_chann->chan_xa, msg_id);
262 	if (!mb_msg) {
263 		MB_ERR(mb_chann, "Cannot find msg 0x%x", msg_id);
264 		return -EINVAL;
265 	}
266 
267 	MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
268 	       header->opcode, header->total_size, header->id);
269 	if (mb_msg->notify_cb) {
270 		ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
271 		if (unlikely(ret))
272 			MB_ERR(mb_chann, "Message callback ret %d", ret);
273 	}
274 
275 	kfree(mb_msg);
276 	return ret;
277 }
278 
279 static int mailbox_get_msg(struct mailbox_channel *mb_chann)
280 {
281 	struct xdna_msg_header header;
282 	void __iomem *read_addr;
283 	u32 msg_size, rest;
284 	u32 ringbuf_size;
285 	u32 head, tail;
286 	u32 start_addr;
287 	int ret;
288 
289 	if (mailbox_reg_read_non_zero(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_tail_ptr_reg, &tail))
290 		return -EINVAL;
291 	head = mb_chann->i2x_head;
292 	ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_I2X);
293 	start_addr = mb_chann->res[CHAN_RES_I2X].rb_start_addr;
294 
295 	if (unlikely(tail > ringbuf_size || !IS_ALIGNED(tail, 4))) {
296 		MB_WARN_ONCE(mb_chann, "Invalid tail 0x%x", tail);
297 		return -EINVAL;
298 	}
299 
300 	/* ringbuf empty */
301 	if (head == tail)
302 		return -ENOENT;
303 
304 	if (head == ringbuf_size)
305 		head = 0;
306 
307 	/* Peek size of the message or TOMBSTONE */
308 	read_addr = mb_chann->mb->res.ringbuf_base + start_addr + head;
309 	header.total_size = readl(read_addr);
310 	/* size is TOMBSTONE, set next read from 0 */
311 	if (header.total_size == TOMBSTONE) {
312 		if (head < tail) {
313 			MB_WARN_ONCE(mb_chann, "Tombstone, head 0x%x tail 0x%x",
314 				     head, tail);
315 			return -EINVAL;
316 		}
317 		mailbox_set_headptr(mb_chann, 0);
318 		return 0;
319 	}
320 
321 	if (unlikely(!header.total_size || !IS_ALIGNED(header.total_size, 4))) {
322 		MB_WARN_ONCE(mb_chann, "Invalid total size 0x%x", header.total_size);
323 		return -EINVAL;
324 	}
325 	msg_size = sizeof(header) + header.total_size;
326 
327 	if (msg_size > ringbuf_size - head || msg_size > tail - head) {
328 		MB_WARN_ONCE(mb_chann, "Invalid message size %d, tail %d, head %d",
329 			     msg_size, tail, head);
330 		return -EINVAL;
331 	}
332 
333 	rest = sizeof(header) - sizeof(u32);
334 	read_addr += sizeof(u32);
335 	memcpy_fromio((u32 *)&header + 1, read_addr, rest);
336 	read_addr += rest;
337 
338 	ret = mailbox_get_resp(mb_chann, &header, read_addr);
339 
340 	mailbox_set_headptr(mb_chann, head + msg_size);
341 	/* After update head, it can equal to ringbuf_size. This is expected. */
342 	trace_mbox_set_head(MAILBOX_NAME, mb_chann->msix_irq,
343 			    header.opcode, header.id);
344 
345 	return ret;
346 }
347 
348 static irqreturn_t mailbox_irq_handler(int irq, void *p)
349 {
350 	struct mailbox_channel *mb_chann = p;
351 
352 	trace_mbox_irq_handle(MAILBOX_NAME, irq);
353 	/* Schedule a rx_work to call the callback functions */
354 	queue_work(mb_chann->work_q, &mb_chann->rx_work);
355 
356 	return IRQ_HANDLED;
357 }
358 
359 static void mailbox_rx_worker(struct work_struct *rx_work)
360 {
361 	struct mailbox_channel *mb_chann;
362 	int ret;
363 
364 	mb_chann = container_of(rx_work, struct mailbox_channel, rx_work);
365 
366 	if (READ_ONCE(mb_chann->bad_state)) {
367 		MB_ERR(mb_chann, "Channel in bad state, work aborted");
368 		return;
369 	}
370 
371 again:
372 	mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
373 
374 	while (1) {
375 		/*
376 		 * If return is 0, keep consuming next message, until there is
377 		 * no messages or an error happened.
378 		 */
379 		ret = mailbox_get_msg(mb_chann);
380 		if (ret == -ENOENT)
381 			break;
382 
383 		/* Other error means device doesn't look good, disable irq. */
384 		if (unlikely(ret)) {
385 			MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret);
386 			WRITE_ONCE(mb_chann->bad_state, true);
387 			return;
388 		}
389 	}
390 
391 	/*
392 	 * The hardware will not generate interrupt if firmware creates a new
393 	 * response right after driver clears interrupt register. Check
394 	 * the interrupt register to make sure there is not any new response
395 	 * before exiting.
396 	 */
397 	if (mailbox_reg_read(mb_chann, mb_chann->iohub_int_addr))
398 		goto again;
399 }
400 
401 int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann,
402 			  const struct xdna_mailbox_msg *msg, u64 tx_timeout)
403 {
404 	struct xdna_msg_header *header;
405 	struct mailbox_msg *mb_msg;
406 	size_t pkg_size;
407 	int ret;
408 
409 	pkg_size = sizeof(*header) + msg->send_size;
410 	if (pkg_size > mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I)) {
411 		MB_ERR(mb_chann, "Message size larger than ringbuf size");
412 		return -EINVAL;
413 	}
414 
415 	if (unlikely(!IS_ALIGNED(msg->send_size, 4))) {
416 		MB_ERR(mb_chann, "Message must be 4 bytes align");
417 		return -EINVAL;
418 	}
419 
420 	/* The fist word in payload can NOT be TOMBSTONE */
421 	if (unlikely(((u32 *)msg->send_data)[0] == TOMBSTONE)) {
422 		MB_ERR(mb_chann, "Tomb stone in data");
423 		return -EINVAL;
424 	}
425 
426 	if (READ_ONCE(mb_chann->bad_state)) {
427 		MB_ERR(mb_chann, "Channel in bad state");
428 		return -EPIPE;
429 	}
430 
431 	mb_msg = kzalloc(sizeof(*mb_msg) + pkg_size, GFP_KERNEL);
432 	if (!mb_msg)
433 		return -ENOMEM;
434 
435 	mb_msg->handle = msg->handle;
436 	mb_msg->notify_cb = msg->notify_cb;
437 	mb_msg->pkg_size = pkg_size;
438 
439 	header = &mb_msg->pkg.header;
440 	/*
441 	 * Hardware use total_size and size to split huge message.
442 	 * We do not support it here. Thus the values are the same.
443 	 */
444 	header->total_size = msg->send_size;
445 	header->sz_ver = FIELD_PREP(MSG_BODY_SZ, msg->send_size) |
446 			FIELD_PREP(MSG_PROTO_VER, MSG_PROTOCOL_VERSION);
447 	header->opcode = msg->opcode;
448 	memcpy(mb_msg->pkg.payload, msg->send_data, msg->send_size);
449 
450 	ret = mailbox_acquire_msgid(mb_chann, mb_msg);
451 	if (unlikely(ret < 0)) {
452 		MB_ERR(mb_chann, "mailbox_acquire_msgid failed");
453 		goto msg_id_failed;
454 	}
455 	header->id = ret;
456 
457 	MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
458 	       header->opcode, header->total_size, header->id);
459 
460 	ret = mailbox_send_msg(mb_chann, mb_msg);
461 	if (ret) {
462 		MB_DBG(mb_chann, "Error in mailbox send msg, ret %d", ret);
463 		goto release_id;
464 	}
465 
466 	return 0;
467 
468 release_id:
469 	mailbox_release_msgid(mb_chann, header->id);
470 msg_id_failed:
471 	kfree(mb_msg);
472 	return ret;
473 }
474 
475 struct mailbox_channel *
476 xdna_mailbox_create_channel(struct mailbox *mb,
477 			    const struct xdna_mailbox_chann_res *x2i,
478 			    const struct xdna_mailbox_chann_res *i2x,
479 			    u32 iohub_int_addr,
480 			    int mb_irq)
481 {
482 	struct mailbox_channel *mb_chann;
483 	int ret;
484 
485 	if (!is_power_of_2(x2i->rb_size) || !is_power_of_2(i2x->rb_size)) {
486 		pr_err("Ring buf size must be power of 2");
487 		return NULL;
488 	}
489 
490 	mb_chann = kzalloc(sizeof(*mb_chann), GFP_KERNEL);
491 	if (!mb_chann)
492 		return NULL;
493 
494 	mb_chann->mb = mb;
495 	mb_chann->msix_irq = mb_irq;
496 	mb_chann->iohub_int_addr = iohub_int_addr;
497 	memcpy(&mb_chann->res[CHAN_RES_X2I], x2i, sizeof(*x2i));
498 	memcpy(&mb_chann->res[CHAN_RES_I2X], i2x, sizeof(*i2x));
499 
500 	xa_init_flags(&mb_chann->chan_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
501 	mb_chann->x2i_tail = mailbox_get_tailptr(mb_chann, CHAN_RES_X2I);
502 	mb_chann->i2x_head = mailbox_get_headptr(mb_chann, CHAN_RES_I2X);
503 
504 	INIT_WORK(&mb_chann->rx_work, mailbox_rx_worker);
505 	mb_chann->work_q = create_singlethread_workqueue(MAILBOX_NAME);
506 	if (!mb_chann->work_q) {
507 		MB_ERR(mb_chann, "Create workqueue failed");
508 		goto free_and_out;
509 	}
510 
511 	/* Everything look good. Time to enable irq handler */
512 	ret = request_irq(mb_irq, mailbox_irq_handler, 0, MAILBOX_NAME, mb_chann);
513 	if (ret) {
514 		MB_ERR(mb_chann, "Failed to request irq %d ret %d", mb_irq, ret);
515 		goto destroy_wq;
516 	}
517 
518 	mb_chann->bad_state = false;
519 	mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
520 
521 	MB_DBG(mb_chann, "Mailbox channel created (irq: %d)", mb_chann->msix_irq);
522 	return mb_chann;
523 
524 destroy_wq:
525 	destroy_workqueue(mb_chann->work_q);
526 free_and_out:
527 	kfree(mb_chann);
528 	return NULL;
529 }
530 
531 int xdna_mailbox_destroy_channel(struct mailbox_channel *mb_chann)
532 {
533 	struct mailbox_msg *mb_msg;
534 	unsigned long msg_id;
535 
536 	MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
537 	free_irq(mb_chann->msix_irq, mb_chann);
538 	destroy_workqueue(mb_chann->work_q);
539 	/* We can clean up and release resources */
540 
541 	xa_for_each(&mb_chann->chan_xa, msg_id, mb_msg)
542 		mailbox_release_msg(mb_chann, mb_msg);
543 
544 	xa_destroy(&mb_chann->chan_xa);
545 
546 	MB_DBG(mb_chann, "Mailbox channel destroyed, irq: %d", mb_chann->msix_irq);
547 	kfree(mb_chann);
548 	return 0;
549 }
550 
551 void xdna_mailbox_stop_channel(struct mailbox_channel *mb_chann)
552 {
553 	/* Disable an irq and wait. This might sleep. */
554 	disable_irq(mb_chann->msix_irq);
555 
556 	/* Cancel RX work and wait for it to finish */
557 	cancel_work_sync(&mb_chann->rx_work);
558 	MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
559 }
560 
561 struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
562 				     const struct xdna_mailbox_res *res)
563 {
564 	struct mailbox *mb;
565 
566 	mb = drmm_kzalloc(ddev, sizeof(*mb), GFP_KERNEL);
567 	if (!mb)
568 		return NULL;
569 	mb->dev = ddev->dev;
570 
571 	/* mailbox and ring buf base and size information */
572 	memcpy(&mb->res, res, sizeof(*res));
573 
574 	return mb;
575 }
576