xref: /linux/drivers/accel/amdxdna/amdxdna_mailbox.c (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/drm_device.h>
7 #include <drm/drm_managed.h>
8 #include <linux/bitfield.h>
9 #include <linux/interrupt.h>
10 #include <linux/iopoll.h>
11 #include <linux/xarray.h>
12 
13 #define CREATE_TRACE_POINTS
14 #include <trace/events/amdxdna.h>
15 
16 #include "amdxdna_mailbox.h"
17 
18 #define MB_ERR(chann, fmt, args...) \
19 ({ \
20 	typeof(chann) _chann = chann; \
21 	dev_err((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
22 		(_chann)->msix_irq, ##args); \
23 })
24 #define MB_DBG(chann, fmt, args...) \
25 ({ \
26 	typeof(chann) _chann = chann; \
27 	dev_dbg((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
28 		(_chann)->msix_irq, ##args); \
29 })
30 #define MB_WARN_ONCE(chann, fmt, args...) \
31 ({ \
32 	typeof(chann) _chann = chann; \
33 	dev_warn_once((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
34 		      (_chann)->msix_irq, ##args); \
35 })
36 
37 #define MAGIC_VAL			0x1D000000U
38 #define MAGIC_VAL_MASK			0xFF000000
39 #define MAX_MSG_ID_ENTRIES		256
40 #define MSG_RX_TIMER			200 /* milliseconds */
41 #define MAILBOX_NAME			"xdna_mailbox"
42 
43 enum channel_res_type {
44 	CHAN_RES_X2I,
45 	CHAN_RES_I2X,
46 	CHAN_RES_NUM
47 };
48 
49 struct mailbox {
50 	struct device		*dev;
51 	struct xdna_mailbox_res	res;
52 };
53 
54 struct mailbox_channel {
55 	struct mailbox			*mb;
56 	struct xdna_mailbox_chann_res	res[CHAN_RES_NUM];
57 	int				msix_irq;
58 	u32				iohub_int_addr;
59 	struct xarray			chan_xa;
60 	u32				next_msgid;
61 	u32				x2i_tail;
62 
63 	/* Received msg related fields */
64 	struct workqueue_struct		*work_q;
65 	struct work_struct		rx_work;
66 	u32				i2x_head;
67 	bool				bad_state;
68 };
69 
70 #define MSG_BODY_SZ		GENMASK(10, 0)
71 #define MSG_PROTO_VER		GENMASK(23, 16)
72 struct xdna_msg_header {
73 	__u32 total_size;
74 	__u32 sz_ver;
75 	__u32 id;
76 	__u32 opcode;
77 } __packed;
78 
79 static_assert(sizeof(struct xdna_msg_header) == 16);
80 
81 struct mailbox_pkg {
82 	struct xdna_msg_header	header;
83 	__u32			payload[];
84 };
85 
86 /* The protocol version. */
87 #define MSG_PROTOCOL_VERSION	0x1
88 /* The tombstone value. */
89 #define TOMBSTONE		0xDEADFACE
90 
91 struct mailbox_msg {
92 	void			*handle;
93 	int			(*notify_cb)(void *handle, const u32 *data, size_t size);
94 	size_t			pkg_size; /* package size in bytes */
95 	struct mailbox_pkg	pkg;
96 };
97 
98 static void mailbox_reg_write(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 data)
99 {
100 	struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
101 	void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
102 
103 	writel(data, ringbuf_addr);
104 }
105 
106 static u32 mailbox_reg_read(struct mailbox_channel *mb_chann, u32 mbox_reg)
107 {
108 	struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
109 	void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
110 
111 	return readl(ringbuf_addr);
112 }
113 
114 static int mailbox_reg_read_non_zero(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 *val)
115 {
116 	struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
117 	void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
118 	int ret, value;
119 
120 	/* Poll till value is not zero */
121 	ret = readx_poll_timeout(readl, ringbuf_addr, value,
122 				 value, 1 /* us */, 100);
123 	if (ret < 0)
124 		return ret;
125 
126 	*val = value;
127 	return 0;
128 }
129 
130 static inline void
131 mailbox_set_headptr(struct mailbox_channel *mb_chann, u32 headptr_val)
132 {
133 	mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_head_ptr_reg, headptr_val);
134 	mb_chann->i2x_head = headptr_val;
135 }
136 
137 static inline void
138 mailbox_set_tailptr(struct mailbox_channel *mb_chann, u32 tailptr_val)
139 {
140 	mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_X2I].mb_tail_ptr_reg, tailptr_val);
141 	mb_chann->x2i_tail = tailptr_val;
142 }
143 
144 static inline u32
145 mailbox_get_headptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
146 {
147 	return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_head_ptr_reg);
148 }
149 
150 static inline u32
151 mailbox_get_tailptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
152 {
153 	return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_tail_ptr_reg);
154 }
155 
156 static inline u32
157 mailbox_get_ringbuf_size(struct mailbox_channel *mb_chann, enum channel_res_type type)
158 {
159 	return mb_chann->res[type].rb_size;
160 }
161 
162 static inline int mailbox_validate_msgid(int msg_id)
163 {
164 	return (msg_id & MAGIC_VAL_MASK) == MAGIC_VAL;
165 }
166 
167 static int mailbox_acquire_msgid(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
168 {
169 	u32 msg_id;
170 	int ret;
171 
172 	ret = xa_alloc_cyclic_irq(&mb_chann->chan_xa, &msg_id, mb_msg,
173 				  XA_LIMIT(0, MAX_MSG_ID_ENTRIES - 1),
174 				  &mb_chann->next_msgid, GFP_NOWAIT);
175 	if (ret < 0)
176 		return ret;
177 
178 	/*
179 	 * Add MAGIC_VAL to the higher bits.
180 	 */
181 	msg_id |= MAGIC_VAL;
182 	return msg_id;
183 }
184 
185 static void mailbox_release_msgid(struct mailbox_channel *mb_chann, int msg_id)
186 {
187 	msg_id &= ~MAGIC_VAL_MASK;
188 	xa_erase_irq(&mb_chann->chan_xa, msg_id);
189 }
190 
191 static void mailbox_release_msg(struct mailbox_channel *mb_chann,
192 				struct mailbox_msg *mb_msg)
193 {
194 	MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x",
195 	       mb_msg->pkg.header.id, mb_msg->pkg.header.opcode);
196 	mb_msg->notify_cb(mb_msg->handle, NULL, 0);
197 	kfree(mb_msg);
198 }
199 
200 static int
201 mailbox_send_msg(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
202 {
203 	void __iomem *write_addr;
204 	u32 ringbuf_size;
205 	u32 head, tail;
206 	u32 start_addr;
207 	u32 tmp_tail;
208 
209 	head = mailbox_get_headptr(mb_chann, CHAN_RES_X2I);
210 	tail = mb_chann->x2i_tail;
211 	ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I);
212 	start_addr = mb_chann->res[CHAN_RES_X2I].rb_start_addr;
213 	tmp_tail = tail + mb_msg->pkg_size;
214 
215 	if (tail < head && tmp_tail >= head)
216 		goto no_space;
217 
218 	if (tail >= head && (tmp_tail > ringbuf_size - sizeof(u32) &&
219 			     mb_msg->pkg_size >= head))
220 		goto no_space;
221 
222 	if (tail >= head && tmp_tail > ringbuf_size - sizeof(u32)) {
223 		write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
224 		writel(TOMBSTONE, write_addr);
225 
226 		/* tombstone is set. Write from the start of the ringbuf */
227 		tail = 0;
228 	}
229 
230 	write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
231 	memcpy_toio(write_addr, &mb_msg->pkg, mb_msg->pkg_size);
232 	mailbox_set_tailptr(mb_chann, tail + mb_msg->pkg_size);
233 
234 	trace_mbox_set_tail(MAILBOX_NAME, mb_chann->msix_irq,
235 			    mb_msg->pkg.header.opcode,
236 			    mb_msg->pkg.header.id);
237 
238 	return 0;
239 
240 no_space:
241 	return -ENOSPC;
242 }
243 
244 static int
245 mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *header,
246 		 void *data)
247 {
248 	struct mailbox_msg *mb_msg;
249 	int msg_id;
250 	int ret;
251 
252 	msg_id = header->id;
253 	if (!mailbox_validate_msgid(msg_id)) {
254 		MB_ERR(mb_chann, "Bad message ID 0x%x", msg_id);
255 		return -EINVAL;
256 	}
257 
258 	msg_id &= ~MAGIC_VAL_MASK;
259 	mb_msg = xa_erase_irq(&mb_chann->chan_xa, msg_id);
260 	if (!mb_msg) {
261 		MB_ERR(mb_chann, "Cannot find msg 0x%x", msg_id);
262 		return -EINVAL;
263 	}
264 
265 	MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
266 	       header->opcode, header->total_size, header->id);
267 	ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
268 	if (unlikely(ret))
269 		MB_ERR(mb_chann, "Message callback ret %d", ret);
270 
271 	kfree(mb_msg);
272 	return ret;
273 }
274 
275 static int mailbox_get_msg(struct mailbox_channel *mb_chann)
276 {
277 	struct xdna_msg_header header;
278 	void __iomem *read_addr;
279 	u32 msg_size, rest;
280 	u32 ringbuf_size;
281 	u32 head, tail;
282 	u32 start_addr;
283 	int ret;
284 
285 	if (mailbox_reg_read_non_zero(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_tail_ptr_reg, &tail))
286 		return -EINVAL;
287 	head = mb_chann->i2x_head;
288 	ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_I2X);
289 	start_addr = mb_chann->res[CHAN_RES_I2X].rb_start_addr;
290 
291 	if (unlikely(tail > ringbuf_size || !IS_ALIGNED(tail, 4))) {
292 		MB_WARN_ONCE(mb_chann, "Invalid tail 0x%x", tail);
293 		return -EINVAL;
294 	}
295 
296 	/* ringbuf empty */
297 	if (head == tail)
298 		return -ENOENT;
299 
300 	if (head == ringbuf_size)
301 		head = 0;
302 
303 	/* Peek size of the message or TOMBSTONE */
304 	read_addr = mb_chann->mb->res.ringbuf_base + start_addr + head;
305 	header.total_size = readl(read_addr);
306 	/* size is TOMBSTONE, set next read from 0 */
307 	if (header.total_size == TOMBSTONE) {
308 		if (head < tail) {
309 			MB_WARN_ONCE(mb_chann, "Tombstone, head 0x%x tail 0x%x",
310 				     head, tail);
311 			return -EINVAL;
312 		}
313 		mailbox_set_headptr(mb_chann, 0);
314 		return 0;
315 	}
316 
317 	if (unlikely(!header.total_size || !IS_ALIGNED(header.total_size, 4))) {
318 		MB_WARN_ONCE(mb_chann, "Invalid total size 0x%x", header.total_size);
319 		return -EINVAL;
320 	}
321 	msg_size = sizeof(header) + header.total_size;
322 
323 	if (msg_size > ringbuf_size - head || msg_size > tail - head) {
324 		MB_WARN_ONCE(mb_chann, "Invalid message size %d, tail %d, head %d",
325 			     msg_size, tail, head);
326 		return -EINVAL;
327 	}
328 
329 	rest = sizeof(header) - sizeof(u32);
330 	read_addr += sizeof(u32);
331 	memcpy_fromio((u32 *)&header + 1, read_addr, rest);
332 	read_addr += rest;
333 
334 	ret = mailbox_get_resp(mb_chann, &header, (u32 *)read_addr);
335 
336 	mailbox_set_headptr(mb_chann, head + msg_size);
337 	/* After update head, it can equal to ringbuf_size. This is expected. */
338 	trace_mbox_set_head(MAILBOX_NAME, mb_chann->msix_irq,
339 			    header.opcode, header.id);
340 
341 	return ret;
342 }
343 
344 static irqreturn_t mailbox_irq_handler(int irq, void *p)
345 {
346 	struct mailbox_channel *mb_chann = p;
347 
348 	trace_mbox_irq_handle(MAILBOX_NAME, irq);
349 	/* Schedule a rx_work to call the callback functions */
350 	queue_work(mb_chann->work_q, &mb_chann->rx_work);
351 	/* Clear IOHUB register */
352 	mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
353 
354 	return IRQ_HANDLED;
355 }
356 
357 static void mailbox_rx_worker(struct work_struct *rx_work)
358 {
359 	struct mailbox_channel *mb_chann;
360 	int ret;
361 
362 	mb_chann = container_of(rx_work, struct mailbox_channel, rx_work);
363 
364 	if (READ_ONCE(mb_chann->bad_state)) {
365 		MB_ERR(mb_chann, "Channel in bad state, work aborted");
366 		return;
367 	}
368 
369 	while (1) {
370 		/*
371 		 * If return is 0, keep consuming next message, until there is
372 		 * no messages or an error happened.
373 		 */
374 		ret = mailbox_get_msg(mb_chann);
375 		if (ret == -ENOENT)
376 			break;
377 
378 		/* Other error means device doesn't look good, disable irq. */
379 		if (unlikely(ret)) {
380 			MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret);
381 			WRITE_ONCE(mb_chann->bad_state, true);
382 			disable_irq(mb_chann->msix_irq);
383 			break;
384 		}
385 	}
386 }
387 
388 int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann,
389 			  const struct xdna_mailbox_msg *msg, u64 tx_timeout)
390 {
391 	struct xdna_msg_header *header;
392 	struct mailbox_msg *mb_msg;
393 	size_t pkg_size;
394 	int ret;
395 
396 	pkg_size = sizeof(*header) + msg->send_size;
397 	if (pkg_size > mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I)) {
398 		MB_ERR(mb_chann, "Message size larger than ringbuf size");
399 		return -EINVAL;
400 	}
401 
402 	if (unlikely(!IS_ALIGNED(msg->send_size, 4))) {
403 		MB_ERR(mb_chann, "Message must be 4 bytes align");
404 		return -EINVAL;
405 	}
406 
407 	/* The fist word in payload can NOT be TOMBSTONE */
408 	if (unlikely(((u32 *)msg->send_data)[0] == TOMBSTONE)) {
409 		MB_ERR(mb_chann, "Tomb stone in data");
410 		return -EINVAL;
411 	}
412 
413 	if (READ_ONCE(mb_chann->bad_state)) {
414 		MB_ERR(mb_chann, "Channel in bad state");
415 		return -EPIPE;
416 	}
417 
418 	mb_msg = kzalloc(sizeof(*mb_msg) + pkg_size, GFP_KERNEL);
419 	if (!mb_msg)
420 		return -ENOMEM;
421 
422 	mb_msg->handle = msg->handle;
423 	mb_msg->notify_cb = msg->notify_cb;
424 	mb_msg->pkg_size = pkg_size;
425 
426 	header = &mb_msg->pkg.header;
427 	/*
428 	 * Hardware use total_size and size to split huge message.
429 	 * We do not support it here. Thus the values are the same.
430 	 */
431 	header->total_size = msg->send_size;
432 	header->sz_ver = FIELD_PREP(MSG_BODY_SZ, msg->send_size) |
433 			FIELD_PREP(MSG_PROTO_VER, MSG_PROTOCOL_VERSION);
434 	header->opcode = msg->opcode;
435 	memcpy(mb_msg->pkg.payload, msg->send_data, msg->send_size);
436 
437 	ret = mailbox_acquire_msgid(mb_chann, mb_msg);
438 	if (unlikely(ret < 0)) {
439 		MB_ERR(mb_chann, "mailbox_acquire_msgid failed");
440 		goto msg_id_failed;
441 	}
442 	header->id = ret;
443 
444 	MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
445 	       header->opcode, header->total_size, header->id);
446 
447 	ret = mailbox_send_msg(mb_chann, mb_msg);
448 	if (ret) {
449 		MB_DBG(mb_chann, "Error in mailbox send msg, ret %d", ret);
450 		goto release_id;
451 	}
452 
453 	return 0;
454 
455 release_id:
456 	mailbox_release_msgid(mb_chann, header->id);
457 msg_id_failed:
458 	kfree(mb_msg);
459 	return ret;
460 }
461 
462 struct mailbox_channel *
463 xdna_mailbox_create_channel(struct mailbox *mb,
464 			    const struct xdna_mailbox_chann_res *x2i,
465 			    const struct xdna_mailbox_chann_res *i2x,
466 			    u32 iohub_int_addr,
467 			    int mb_irq)
468 {
469 	struct mailbox_channel *mb_chann;
470 	int ret;
471 
472 	if (!is_power_of_2(x2i->rb_size) || !is_power_of_2(i2x->rb_size)) {
473 		pr_err("Ring buf size must be power of 2");
474 		return NULL;
475 	}
476 
477 	mb_chann = kzalloc(sizeof(*mb_chann), GFP_KERNEL);
478 	if (!mb_chann)
479 		return NULL;
480 
481 	mb_chann->mb = mb;
482 	mb_chann->msix_irq = mb_irq;
483 	mb_chann->iohub_int_addr = iohub_int_addr;
484 	memcpy(&mb_chann->res[CHAN_RES_X2I], x2i, sizeof(*x2i));
485 	memcpy(&mb_chann->res[CHAN_RES_I2X], i2x, sizeof(*i2x));
486 
487 	xa_init_flags(&mb_chann->chan_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
488 	mb_chann->x2i_tail = mailbox_get_tailptr(mb_chann, CHAN_RES_X2I);
489 	mb_chann->i2x_head = mailbox_get_headptr(mb_chann, CHAN_RES_I2X);
490 
491 	INIT_WORK(&mb_chann->rx_work, mailbox_rx_worker);
492 	mb_chann->work_q = create_singlethread_workqueue(MAILBOX_NAME);
493 	if (!mb_chann->work_q) {
494 		MB_ERR(mb_chann, "Create workqueue failed");
495 		goto free_and_out;
496 	}
497 
498 	/* Everything look good. Time to enable irq handler */
499 	ret = request_irq(mb_irq, mailbox_irq_handler, 0, MAILBOX_NAME, mb_chann);
500 	if (ret) {
501 		MB_ERR(mb_chann, "Failed to request irq %d ret %d", mb_irq, ret);
502 		goto destroy_wq;
503 	}
504 
505 	mb_chann->bad_state = false;
506 
507 	MB_DBG(mb_chann, "Mailbox channel created (irq: %d)", mb_chann->msix_irq);
508 	return mb_chann;
509 
510 destroy_wq:
511 	destroy_workqueue(mb_chann->work_q);
512 free_and_out:
513 	kfree(mb_chann);
514 	return NULL;
515 }
516 
517 int xdna_mailbox_destroy_channel(struct mailbox_channel *mb_chann)
518 {
519 	struct mailbox_msg *mb_msg;
520 	unsigned long msg_id;
521 
522 	MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
523 	free_irq(mb_chann->msix_irq, mb_chann);
524 	destroy_workqueue(mb_chann->work_q);
525 	/* We can clean up and release resources */
526 
527 	xa_for_each(&mb_chann->chan_xa, msg_id, mb_msg)
528 		mailbox_release_msg(mb_chann, mb_msg);
529 
530 	xa_destroy(&mb_chann->chan_xa);
531 
532 	MB_DBG(mb_chann, "Mailbox channel destroyed, irq: %d", mb_chann->msix_irq);
533 	kfree(mb_chann);
534 	return 0;
535 }
536 
537 void xdna_mailbox_stop_channel(struct mailbox_channel *mb_chann)
538 {
539 	/* Disable an irq and wait. This might sleep. */
540 	disable_irq(mb_chann->msix_irq);
541 
542 	/* Cancel RX work and wait for it to finish */
543 	cancel_work_sync(&mb_chann->rx_work);
544 	MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
545 }
546 
547 struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
548 				     const struct xdna_mailbox_res *res)
549 {
550 	struct mailbox *mb;
551 
552 	mb = drmm_kzalloc(ddev, sizeof(*mb), GFP_KERNEL);
553 	if (!mb)
554 		return NULL;
555 	mb->dev = ddev->dev;
556 
557 	/* mailbox and ring buf base and size information */
558 	memcpy(&mb->res, res, sizeof(*res));
559 
560 	return mb;
561 }
562