xref: /linux/drivers/accel/ivpu/ivpu_ipc.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2024 Intel Corporation
4  */
5 
6 #include <linux/genalloc.h>
7 #include <linux/highmem.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/wait.h>
10 
11 #include "ivpu_drv.h"
12 #include "ivpu_gem.h"
13 #include "ivpu_hw.h"
14 #include "ivpu_hw_reg_io.h"
15 #include "ivpu_ipc.h"
16 #include "ivpu_jsm_msg.h"
17 #include "ivpu_pm.h"
18 
19 #define IPC_MAX_RX_MSG	128
20 
21 struct ivpu_ipc_tx_buf {
22 	struct ivpu_ipc_hdr ipc;
23 	struct vpu_jsm_msg jsm;
24 };
25 
ivpu_ipc_msg_dump(struct ivpu_device * vdev,char * c,struct ivpu_ipc_hdr * ipc_hdr,u32 vpu_addr)26 static void ivpu_ipc_msg_dump(struct ivpu_device *vdev, char *c,
27 			      struct ivpu_ipc_hdr *ipc_hdr, u32 vpu_addr)
28 {
29 	ivpu_dbg(vdev, IPC,
30 		 "%s: vpu:0x%x (data_addr:0x%08x, data_size:0x%x, channel:0x%x, src_node:0x%x, dst_node:0x%x, status:0x%x)",
31 		 c, vpu_addr, ipc_hdr->data_addr, ipc_hdr->data_size, ipc_hdr->channel,
32 		 ipc_hdr->src_node, ipc_hdr->dst_node, ipc_hdr->status);
33 }
34 
ivpu_jsm_msg_dump(struct ivpu_device * vdev,char * c,struct vpu_jsm_msg * jsm_msg,u32 vpu_addr)35 static void ivpu_jsm_msg_dump(struct ivpu_device *vdev, char *c,
36 			      struct vpu_jsm_msg *jsm_msg, u32 vpu_addr)
37 {
38 	u32 *payload = (u32 *)&jsm_msg->payload;
39 
40 	ivpu_dbg(vdev, JSM,
41 		 "%s: vpu:0x%08x (type:%s, status:0x%x, id: 0x%x, result: 0x%x, payload:0x%x 0x%x 0x%x 0x%x 0x%x)\n",
42 		 c, vpu_addr, ivpu_jsm_msg_type_to_str(jsm_msg->type),
43 		 jsm_msg->status, jsm_msg->request_id, jsm_msg->result,
44 		 payload[0], payload[1], payload[2], payload[3], payload[4]);
45 }
46 
47 static void
ivpu_ipc_rx_mark_free(struct ivpu_device * vdev,struct ivpu_ipc_hdr * ipc_hdr,struct vpu_jsm_msg * jsm_msg)48 ivpu_ipc_rx_mark_free(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
49 		      struct vpu_jsm_msg *jsm_msg)
50 {
51 	ipc_hdr->status = IVPU_IPC_HDR_FREE;
52 	if (jsm_msg)
53 		jsm_msg->status = VPU_JSM_MSG_FREE;
54 	wmb(); /* Flush WC buffers for message statuses */
55 }
56 
ivpu_ipc_mem_fini(struct ivpu_device * vdev)57 static void ivpu_ipc_mem_fini(struct ivpu_device *vdev)
58 {
59 	struct ivpu_ipc_info *ipc = vdev->ipc;
60 
61 	ivpu_bo_free(ipc->mem_rx);
62 	ivpu_bo_free(ipc->mem_tx);
63 }
64 
65 static int
ivpu_ipc_tx_prepare(struct ivpu_device * vdev,struct ivpu_ipc_consumer * cons,struct vpu_jsm_msg * req)66 ivpu_ipc_tx_prepare(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
67 		    struct vpu_jsm_msg *req)
68 {
69 	struct ivpu_ipc_info *ipc = vdev->ipc;
70 	struct ivpu_ipc_tx_buf *tx_buf;
71 	u32 tx_buf_vpu_addr;
72 	u32 jsm_vpu_addr;
73 
74 	tx_buf_vpu_addr = gen_pool_alloc(ipc->mm_tx, sizeof(*tx_buf));
75 	if (!tx_buf_vpu_addr) {
76 		ivpu_err_ratelimited(vdev, "Failed to reserve IPC buffer, size %ld\n",
77 				     sizeof(*tx_buf));
78 		return -ENOMEM;
79 	}
80 
81 	tx_buf = ivpu_to_cpu_addr(ipc->mem_tx, tx_buf_vpu_addr);
82 	if (drm_WARN_ON(&vdev->drm, !tx_buf)) {
83 		gen_pool_free(ipc->mm_tx, tx_buf_vpu_addr, sizeof(*tx_buf));
84 		return -EIO;
85 	}
86 
87 	jsm_vpu_addr = tx_buf_vpu_addr + offsetof(struct ivpu_ipc_tx_buf, jsm);
88 
89 	if (tx_buf->ipc.status != IVPU_IPC_HDR_FREE)
90 		ivpu_warn_ratelimited(vdev, "IPC message vpu:0x%x not released by firmware\n",
91 				      tx_buf_vpu_addr);
92 
93 	if (tx_buf->jsm.status != VPU_JSM_MSG_FREE)
94 		ivpu_warn_ratelimited(vdev, "JSM message vpu:0x%x not released by firmware\n",
95 				      jsm_vpu_addr);
96 
97 	memset(tx_buf, 0, sizeof(*tx_buf));
98 	tx_buf->ipc.data_addr = jsm_vpu_addr;
99 	/* TODO: Set data_size to actual JSM message size, not union of all messages */
100 	tx_buf->ipc.data_size = sizeof(*req);
101 	tx_buf->ipc.channel = cons->channel;
102 	tx_buf->ipc.src_node = 0;
103 	tx_buf->ipc.dst_node = 1;
104 	tx_buf->ipc.status = IVPU_IPC_HDR_ALLOCATED;
105 	tx_buf->jsm.type = req->type;
106 	tx_buf->jsm.status = VPU_JSM_MSG_ALLOCATED;
107 	tx_buf->jsm.payload = req->payload;
108 
109 	req->request_id = atomic_inc_return(&ipc->request_id);
110 	tx_buf->jsm.request_id = req->request_id;
111 	cons->request_id = req->request_id;
112 	wmb(); /* Flush WC buffers for IPC, JSM msgs */
113 
114 	cons->tx_vpu_addr = tx_buf_vpu_addr;
115 
116 	ivpu_jsm_msg_dump(vdev, "TX", &tx_buf->jsm, jsm_vpu_addr);
117 	ivpu_ipc_msg_dump(vdev, "TX", &tx_buf->ipc, tx_buf_vpu_addr);
118 
119 	return 0;
120 }
121 
ivpu_ipc_tx_release(struct ivpu_device * vdev,u32 vpu_addr)122 static void ivpu_ipc_tx_release(struct ivpu_device *vdev, u32 vpu_addr)
123 {
124 	struct ivpu_ipc_info *ipc = vdev->ipc;
125 
126 	if (vpu_addr)
127 		gen_pool_free(ipc->mm_tx, vpu_addr, sizeof(struct ivpu_ipc_tx_buf));
128 }
129 
ivpu_ipc_tx(struct ivpu_device * vdev,u32 vpu_addr)130 static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr)
131 {
132 	ivpu_hw_ipc_tx_set(vdev, vpu_addr);
133 }
134 
135 static void
ivpu_ipc_rx_msg_add(struct ivpu_device * vdev,struct ivpu_ipc_consumer * cons,struct ivpu_ipc_hdr * ipc_hdr,struct vpu_jsm_msg * jsm_msg)136 ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
137 		    struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
138 {
139 	struct ivpu_ipc_info *ipc = vdev->ipc;
140 	struct ivpu_ipc_rx_msg *rx_msg;
141 
142 	lockdep_assert_held(&ipc->cons_lock);
143 	lockdep_assert_irqs_disabled();
144 
145 	rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
146 	if (!rx_msg) {
147 		ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
148 		return;
149 	}
150 
151 	atomic_inc(&ipc->rx_msg_count);
152 
153 	rx_msg->ipc_hdr = ipc_hdr;
154 	rx_msg->jsm_msg = jsm_msg;
155 	rx_msg->callback = cons->rx_callback;
156 
157 	if (rx_msg->callback) {
158 		list_add_tail(&rx_msg->link, &ipc->cb_msg_list);
159 	} else {
160 		spin_lock(&cons->rx_lock);
161 		list_add_tail(&rx_msg->link, &cons->rx_msg_list);
162 		spin_unlock(&cons->rx_lock);
163 		wake_up(&cons->rx_msg_wq);
164 	}
165 }
166 
167 static void
ivpu_ipc_rx_msg_del(struct ivpu_device * vdev,struct ivpu_ipc_rx_msg * rx_msg)168 ivpu_ipc_rx_msg_del(struct ivpu_device *vdev, struct ivpu_ipc_rx_msg *rx_msg)
169 {
170 	list_del(&rx_msg->link);
171 	ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
172 	atomic_dec(&vdev->ipc->rx_msg_count);
173 	kfree(rx_msg);
174 }
175 
ivpu_ipc_consumer_add(struct ivpu_device * vdev,struct ivpu_ipc_consumer * cons,u32 channel,ivpu_ipc_rx_callback_t rx_callback)176 void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
177 			   u32 channel, ivpu_ipc_rx_callback_t rx_callback)
178 {
179 	struct ivpu_ipc_info *ipc = vdev->ipc;
180 
181 	INIT_LIST_HEAD(&cons->link);
182 	cons->channel = channel;
183 	cons->tx_vpu_addr = 0;
184 	cons->request_id = 0;
185 	cons->aborted = false;
186 	cons->rx_callback = rx_callback;
187 	spin_lock_init(&cons->rx_lock);
188 	INIT_LIST_HEAD(&cons->rx_msg_list);
189 	init_waitqueue_head(&cons->rx_msg_wq);
190 
191 	spin_lock_irq(&ipc->cons_lock);
192 	list_add_tail(&cons->link, &ipc->cons_list);
193 	spin_unlock_irq(&ipc->cons_lock);
194 }
195 
ivpu_ipc_consumer_del(struct ivpu_device * vdev,struct ivpu_ipc_consumer * cons)196 void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons)
197 {
198 	struct ivpu_ipc_info *ipc = vdev->ipc;
199 	struct ivpu_ipc_rx_msg *rx_msg, *r;
200 
201 	spin_lock_irq(&ipc->cons_lock);
202 	list_del(&cons->link);
203 	spin_unlock_irq(&ipc->cons_lock);
204 
205 	spin_lock_irq(&cons->rx_lock);
206 	list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link)
207 		ivpu_ipc_rx_msg_del(vdev, rx_msg);
208 	spin_unlock_irq(&cons->rx_lock);
209 
210 	ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr);
211 }
212 
ivpu_ipc_send(struct ivpu_device * vdev,struct ivpu_ipc_consumer * cons,struct vpu_jsm_msg * req)213 int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req)
214 {
215 	struct ivpu_ipc_info *ipc = vdev->ipc;
216 	int ret;
217 
218 	mutex_lock(&ipc->lock);
219 
220 	if (!ipc->on) {
221 		ret = -EAGAIN;
222 		goto unlock;
223 	}
224 
225 	ret = ivpu_ipc_tx_prepare(vdev, cons, req);
226 	if (ret)
227 		goto unlock;
228 
229 	ivpu_ipc_tx(vdev, cons->tx_vpu_addr);
230 
231 unlock:
232 	mutex_unlock(&ipc->lock);
233 	return ret;
234 }
235 
ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer * cons)236 static bool ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons)
237 {
238 	bool ret;
239 
240 	spin_lock_irq(&cons->rx_lock);
241 	ret = !list_empty(&cons->rx_msg_list) || cons->aborted;
242 	spin_unlock_irq(&cons->rx_lock);
243 
244 	return ret;
245 }
246 
ivpu_ipc_receive(struct ivpu_device * vdev,struct ivpu_ipc_consumer * cons,struct ivpu_ipc_hdr * ipc_buf,struct vpu_jsm_msg * jsm_msg,unsigned long timeout_ms)247 int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
248 		     struct ivpu_ipc_hdr *ipc_buf,
249 		     struct vpu_jsm_msg *jsm_msg, unsigned long timeout_ms)
250 {
251 	struct ivpu_ipc_rx_msg *rx_msg;
252 	int wait_ret, ret = 0;
253 
254 	if (drm_WARN_ONCE(&vdev->drm, cons->rx_callback, "Consumer works only in async mode\n"))
255 		return -EINVAL;
256 
257 	wait_ret = wait_event_timeout(cons->rx_msg_wq,
258 				      ivpu_ipc_rx_need_wakeup(cons),
259 				      msecs_to_jiffies(timeout_ms));
260 
261 	if (wait_ret == 0)
262 		return -ETIMEDOUT;
263 
264 	spin_lock_irq(&cons->rx_lock);
265 	if (cons->aborted) {
266 		spin_unlock_irq(&cons->rx_lock);
267 		return -ECANCELED;
268 	}
269 	rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link);
270 	if (!rx_msg) {
271 		spin_unlock_irq(&cons->rx_lock);
272 		return -EAGAIN;
273 	}
274 
275 	if (ipc_buf)
276 		memcpy(ipc_buf, rx_msg->ipc_hdr, sizeof(*ipc_buf));
277 	if (rx_msg->jsm_msg) {
278 		u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*jsm_msg));
279 
280 		if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
281 			ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
282 			ret = -EBADMSG;
283 		}
284 
285 		if (jsm_msg)
286 			memcpy(jsm_msg, rx_msg->jsm_msg, size);
287 	}
288 
289 	ivpu_ipc_rx_msg_del(vdev, rx_msg);
290 	spin_unlock_irq(&cons->rx_lock);
291 	return ret;
292 }
293 
294 static int
ivpu_ipc_send_receive_internal(struct ivpu_device * vdev,struct vpu_jsm_msg * req,enum vpu_ipc_msg_type expected_resp_type,struct vpu_jsm_msg * resp,u32 channel,unsigned long timeout_ms)295 ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
296 			       enum vpu_ipc_msg_type expected_resp_type,
297 			       struct vpu_jsm_msg *resp, u32 channel,
298 			       unsigned long timeout_ms)
299 {
300 	struct ivpu_ipc_consumer cons;
301 	int ret;
302 
303 	ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
304 
305 	ret = ivpu_ipc_send(vdev, &cons, req);
306 	if (ret) {
307 		ivpu_warn_ratelimited(vdev, "IPC send failed: %d\n", ret);
308 		goto consumer_del;
309 	}
310 
311 	ret = ivpu_ipc_receive(vdev, &cons, NULL, resp, timeout_ms);
312 	if (ret) {
313 		ivpu_warn_ratelimited(vdev, "IPC receive failed: type %s, ret %d\n",
314 				      ivpu_jsm_msg_type_to_str(req->type), ret);
315 		goto consumer_del;
316 	}
317 
318 	if (resp->type != expected_resp_type) {
319 		ivpu_warn_ratelimited(vdev, "Invalid JSM response type: 0x%x\n", resp->type);
320 		ret = -EBADE;
321 	}
322 
323 consumer_del:
324 	ivpu_ipc_consumer_del(vdev, &cons);
325 	return ret;
326 }
327 
ivpu_ipc_send_receive_active(struct ivpu_device * vdev,struct vpu_jsm_msg * req,enum vpu_ipc_msg_type expected_resp,struct vpu_jsm_msg * resp,u32 channel,unsigned long timeout_ms)328 int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
329 				 enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
330 				 u32 channel, unsigned long timeout_ms)
331 {
332 	struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
333 	struct vpu_jsm_msg hb_resp;
334 	int ret, hb_ret;
335 
336 	drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
337 
338 	ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp, resp, channel, timeout_ms);
339 	if (ret != -ETIMEDOUT)
340 		return ret;
341 
342 	hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
343 						&hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
344 						vdev->timeout.jsm);
345 	if (hb_ret == -ETIMEDOUT)
346 		ivpu_pm_trigger_recovery(vdev, "IPC timeout");
347 
348 	return ret;
349 }
350 
ivpu_ipc_send_receive(struct ivpu_device * vdev,struct vpu_jsm_msg * req,enum vpu_ipc_msg_type expected_resp,struct vpu_jsm_msg * resp,u32 channel,unsigned long timeout_ms)351 int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
352 			  enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
353 			  u32 channel, unsigned long timeout_ms)
354 {
355 	int ret;
356 
357 	ret = ivpu_rpm_get(vdev);
358 	if (ret < 0)
359 		return ret;
360 
361 	ret = ivpu_ipc_send_receive_active(vdev, req, expected_resp, resp, channel, timeout_ms);
362 
363 	ivpu_rpm_put(vdev);
364 	return ret;
365 }
366 
367 static bool
ivpu_ipc_match_consumer(struct ivpu_device * vdev,struct ivpu_ipc_consumer * cons,struct ivpu_ipc_hdr * ipc_hdr,struct vpu_jsm_msg * jsm_msg)368 ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
369 			struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
370 {
371 	if (cons->channel != ipc_hdr->channel)
372 		return false;
373 
374 	if (!jsm_msg || jsm_msg->request_id == cons->request_id)
375 		return true;
376 
377 	return false;
378 }
379 
ivpu_ipc_irq_handler(struct ivpu_device * vdev)380 void ivpu_ipc_irq_handler(struct ivpu_device *vdev)
381 {
382 	struct ivpu_ipc_info *ipc = vdev->ipc;
383 	struct ivpu_ipc_consumer *cons;
384 	struct ivpu_ipc_hdr *ipc_hdr;
385 	struct vpu_jsm_msg *jsm_msg;
386 	unsigned long flags;
387 	bool dispatched;
388 	u32 vpu_addr;
389 
390 	/*
391 	 * Driver needs to purge all messages from IPC FIFO to clear IPC interrupt.
392 	 * Without purge IPC FIFO to 0 next IPC interrupts won't be generated.
393 	 */
394 	while (ivpu_hw_ipc_rx_count_get(vdev)) {
395 		vpu_addr = ivpu_hw_ipc_rx_addr_get(vdev);
396 		if (vpu_addr == REG_IO_ERROR) {
397 			ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n");
398 			return;
399 		}
400 
401 		ipc_hdr = ivpu_to_cpu_addr(ipc->mem_rx, vpu_addr);
402 		if (!ipc_hdr) {
403 			ivpu_warn_ratelimited(vdev, "IPC msg 0x%x out of range\n", vpu_addr);
404 			continue;
405 		}
406 		ivpu_ipc_msg_dump(vdev, "RX", ipc_hdr, vpu_addr);
407 
408 		jsm_msg = NULL;
409 		if (ipc_hdr->channel != IVPU_IPC_CHAN_BOOT_MSG) {
410 			jsm_msg = ivpu_to_cpu_addr(ipc->mem_rx, ipc_hdr->data_addr);
411 			if (!jsm_msg) {
412 				ivpu_warn_ratelimited(vdev, "JSM msg 0x%x out of range\n",
413 						      ipc_hdr->data_addr);
414 				ivpu_ipc_rx_mark_free(vdev, ipc_hdr, NULL);
415 				continue;
416 			}
417 			ivpu_jsm_msg_dump(vdev, "RX", jsm_msg, ipc_hdr->data_addr);
418 		}
419 
420 		if (atomic_read(&ipc->rx_msg_count) > IPC_MAX_RX_MSG) {
421 			ivpu_warn_ratelimited(vdev, "IPC RX msg dropped, msg count %d\n",
422 					      IPC_MAX_RX_MSG);
423 			ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
424 			continue;
425 		}
426 
427 		dispatched = false;
428 		spin_lock_irqsave(&ipc->cons_lock, flags);
429 		list_for_each_entry(cons, &ipc->cons_list, link) {
430 			if (ivpu_ipc_match_consumer(vdev, cons, ipc_hdr, jsm_msg)) {
431 				ivpu_ipc_rx_msg_add(vdev, cons, ipc_hdr, jsm_msg);
432 				dispatched = true;
433 				break;
434 			}
435 		}
436 		spin_unlock_irqrestore(&ipc->cons_lock, flags);
437 
438 		if (!dispatched) {
439 			ivpu_dbg(vdev, IPC, "IPC RX msg 0x%x dropped (no consumer)\n", vpu_addr);
440 			ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
441 		}
442 	}
443 
444 	if (!list_empty(&ipc->cb_msg_list))
445 		if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_IPC))
446 			ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
447 }
448 
ivpu_ipc_irq_thread_handler(struct ivpu_device * vdev)449 void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
450 {
451 	struct ivpu_ipc_info *ipc = vdev->ipc;
452 	struct ivpu_ipc_rx_msg *rx_msg, *r;
453 	struct list_head cb_msg_list;
454 
455 	INIT_LIST_HEAD(&cb_msg_list);
456 
457 	spin_lock_irq(&ipc->cons_lock);
458 	list_splice_tail_init(&ipc->cb_msg_list, &cb_msg_list);
459 	spin_unlock_irq(&ipc->cons_lock);
460 
461 	list_for_each_entry_safe(rx_msg, r, &cb_msg_list, link) {
462 		rx_msg->callback(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
463 		ivpu_ipc_rx_msg_del(vdev, rx_msg);
464 	}
465 }
466 
ivpu_ipc_init(struct ivpu_device * vdev)467 int ivpu_ipc_init(struct ivpu_device *vdev)
468 {
469 	struct ivpu_ipc_info *ipc = vdev->ipc;
470 	int ret;
471 
472 	ipc->mem_tx = ivpu_bo_create_global(vdev, SZ_16K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
473 	if (!ipc->mem_tx) {
474 		ivpu_err(vdev, "Failed to allocate mem_tx\n");
475 		return -ENOMEM;
476 	}
477 
478 	ipc->mem_rx = ivpu_bo_create_global(vdev, SZ_16K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
479 	if (!ipc->mem_rx) {
480 		ivpu_err(vdev, "Failed to allocate mem_rx\n");
481 		ret = -ENOMEM;
482 		goto err_free_tx;
483 	}
484 
485 	ipc->mm_tx = devm_gen_pool_create(vdev->drm.dev, __ffs(IVPU_IPC_ALIGNMENT),
486 					  -1, "TX_IPC_JSM");
487 	if (IS_ERR(ipc->mm_tx)) {
488 		ret = PTR_ERR(ipc->mm_tx);
489 		ivpu_err(vdev, "Failed to create gen pool, %pe\n", ipc->mm_tx);
490 		goto err_free_rx;
491 	}
492 
493 	ret = gen_pool_add(ipc->mm_tx, ipc->mem_tx->vpu_addr, ivpu_bo_size(ipc->mem_tx), -1);
494 	if (ret) {
495 		ivpu_err(vdev, "gen_pool_add failed, ret %d\n", ret);
496 		goto err_free_rx;
497 	}
498 
499 	spin_lock_init(&ipc->cons_lock);
500 	INIT_LIST_HEAD(&ipc->cons_list);
501 	INIT_LIST_HEAD(&ipc->cb_msg_list);
502 	ret = drmm_mutex_init(&vdev->drm, &ipc->lock);
503 	if (ret) {
504 		ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret);
505 		goto err_free_rx;
506 	}
507 	ivpu_ipc_reset(vdev);
508 	return 0;
509 
510 err_free_rx:
511 	ivpu_bo_free(ipc->mem_rx);
512 err_free_tx:
513 	ivpu_bo_free(ipc->mem_tx);
514 	return ret;
515 }
516 
ivpu_ipc_fini(struct ivpu_device * vdev)517 void ivpu_ipc_fini(struct ivpu_device *vdev)
518 {
519 	struct ivpu_ipc_info *ipc = vdev->ipc;
520 
521 	drm_WARN_ON(&vdev->drm, ipc->on);
522 	drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cons_list));
523 	drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
524 	drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
525 
526 	ivpu_ipc_mem_fini(vdev);
527 }
528 
ivpu_ipc_enable(struct ivpu_device * vdev)529 void ivpu_ipc_enable(struct ivpu_device *vdev)
530 {
531 	struct ivpu_ipc_info *ipc = vdev->ipc;
532 
533 	mutex_lock(&ipc->lock);
534 	ipc->on = true;
535 	mutex_unlock(&ipc->lock);
536 }
537 
ivpu_ipc_disable(struct ivpu_device * vdev)538 void ivpu_ipc_disable(struct ivpu_device *vdev)
539 {
540 	struct ivpu_ipc_info *ipc = vdev->ipc;
541 	struct ivpu_ipc_consumer *cons, *c;
542 	struct ivpu_ipc_rx_msg *rx_msg, *r;
543 
544 	drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
545 
546 	mutex_lock(&ipc->lock);
547 	ipc->on = false;
548 	mutex_unlock(&ipc->lock);
549 
550 	spin_lock_irq(&ipc->cons_lock);
551 	list_for_each_entry_safe(cons, c, &ipc->cons_list, link) {
552 		spin_lock(&cons->rx_lock);
553 		if (!cons->rx_callback)
554 			cons->aborted = true;
555 		list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link)
556 			ivpu_ipc_rx_msg_del(vdev, rx_msg);
557 		spin_unlock(&cons->rx_lock);
558 		wake_up(&cons->rx_msg_wq);
559 	}
560 	spin_unlock_irq(&ipc->cons_lock);
561 
562 	drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
563 }
564 
ivpu_ipc_reset(struct ivpu_device * vdev)565 void ivpu_ipc_reset(struct ivpu_device *vdev)
566 {
567 	struct ivpu_ipc_info *ipc = vdev->ipc;
568 
569 	mutex_lock(&ipc->lock);
570 	drm_WARN_ON(&vdev->drm, ipc->on);
571 
572 	memset(ivpu_bo_vaddr(ipc->mem_tx), 0, ivpu_bo_size(ipc->mem_tx));
573 	memset(ivpu_bo_vaddr(ipc->mem_rx), 0, ivpu_bo_size(ipc->mem_rx));
574 	wmb(); /* Flush WC buffers for TX and RX rings */
575 
576 	mutex_unlock(&ipc->lock);
577 }
578