xref: /linux/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c (revision 6beeaf48db6c548fcfc2ad32739d33af2fef3a5b)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include <linux/circ_buf.h>
7 #include <linux/ktime.h>
8 #include <linux/time64.h>
9 #include <linux/timekeeping.h>
10 
11 #include "i915_drv.h"
12 #include "intel_guc_ct.h"
13 #include "gt/intel_gt.h"
14 
15 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
16 {
17 	return container_of(ct, struct intel_guc, ct);
18 }
19 
20 static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct)
21 {
22 	return guc_to_gt(ct_to_guc(ct));
23 }
24 
25 static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
26 {
27 	return ct_to_gt(ct)->i915;
28 }
29 
30 static inline struct drm_device *ct_to_drm(struct intel_guc_ct *ct)
31 {
32 	return &ct_to_i915(ct)->drm;
33 }
34 
35 #define CT_ERROR(_ct, _fmt, ...) \
36 	drm_err(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
37 #ifdef CONFIG_DRM_I915_DEBUG_GUC
38 #define CT_DEBUG(_ct, _fmt, ...) \
39 	drm_dbg(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
40 #else
41 #define CT_DEBUG(...)	do { } while (0)
42 #endif
43 #define CT_PROBE_ERROR(_ct, _fmt, ...) \
44 	i915_probe_error(ct_to_i915(ct), "CT: " _fmt, ##__VA_ARGS__)
45 
46 /**
47  * DOC: CTB Blob
48  *
49  * We allocate single blob to hold both CTB descriptors and buffers:
50  *
51  *      +--------+-----------------------------------------------+------+
52  *      | offset | contents                                      | size |
53  *      +========+===============================================+======+
54  *      | 0x0000 | H2G `CTB Descriptor`_ (send)                  |      |
55  *      +--------+-----------------------------------------------+  4K  |
56  *      | 0x0800 | G2H `CTB Descriptor`_ (recv)                  |      |
57  *      +--------+-----------------------------------------------+------+
58  *      | 0x1000 | H2G `CT Buffer`_ (send)                       | n*4K |
59  *      |        |                                               |      |
60  *      +--------+-----------------------------------------------+------+
61  *      | 0x1000 | G2H `CT Buffer`_ (recv)                       | m*4K |
62  *      | + n*4K |                                               |      |
63  *      +--------+-----------------------------------------------+------+
64  *
65  * Size of each `CT Buffer`_ must be multiple of 4K.
66  * We don't expect too many messages in flight at any time, unless we are
67  * using the GuC submission. In that case each request requires a minimum
68  * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
69  * enough space to avoid backpressure on the driver. We increase the size
70  * of the receive buffer (relative to the send) to ensure a G2H response
71  * CTB has a landing spot.
72  */
73 #define CTB_DESC_SIZE		ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
74 #define CTB_H2G_BUFFER_SIZE	(SZ_4K)
75 #define CTB_G2H_BUFFER_SIZE	(4 * CTB_H2G_BUFFER_SIZE)
76 #define G2H_ROOM_BUFFER_SIZE	(CTB_G2H_BUFFER_SIZE / 4)
77 
78 struct ct_request {
79 	struct list_head link;
80 	u32 fence;
81 	u32 status;
82 	u32 response_len;
83 	u32 *response_buf;
84 };
85 
86 struct ct_incoming_msg {
87 	struct list_head link;
88 	u32 size;
89 	u32 msg[];
90 };
91 
92 enum { CTB_SEND = 0, CTB_RECV = 1 };
93 
94 enum { CTB_OWNER_HOST = 0 };
95 
96 static void ct_receive_tasklet_func(struct tasklet_struct *t);
97 static void ct_incoming_request_worker_func(struct work_struct *w);
98 
99 /**
100  * intel_guc_ct_init_early - Initialize CT state without requiring device access
101  * @ct: pointer to CT struct
102  */
103 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
104 {
105 	spin_lock_init(&ct->ctbs.send.lock);
106 	spin_lock_init(&ct->ctbs.recv.lock);
107 	spin_lock_init(&ct->requests.lock);
108 	INIT_LIST_HEAD(&ct->requests.pending);
109 	INIT_LIST_HEAD(&ct->requests.incoming);
110 	INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
111 	tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
112 	init_waitqueue_head(&ct->wq);
113 }
114 
115 static inline const char *guc_ct_buffer_type_to_str(u32 type)
116 {
117 	switch (type) {
118 	case GUC_CTB_TYPE_HOST2GUC:
119 		return "SEND";
120 	case GUC_CTB_TYPE_GUC2HOST:
121 		return "RECV";
122 	default:
123 		return "<invalid>";
124 	}
125 }
126 
127 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc)
128 {
129 	memset(desc, 0, sizeof(*desc));
130 }
131 
132 static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb)
133 {
134 	u32 space;
135 
136 	ctb->broken = false;
137 	ctb->tail = 0;
138 	ctb->head = 0;
139 	space = CIRC_SPACE(ctb->tail, ctb->head, ctb->size) - ctb->resv_space;
140 	atomic_set(&ctb->space, space);
141 
142 	guc_ct_buffer_desc_init(ctb->desc);
143 }
144 
145 static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb,
146 			       struct guc_ct_buffer_desc *desc,
147 			       u32 *cmds, u32 size_in_bytes, u32 resv_space)
148 {
149 	GEM_BUG_ON(size_in_bytes % 4);
150 
151 	ctb->desc = desc;
152 	ctb->cmds = cmds;
153 	ctb->size = size_in_bytes / 4;
154 	ctb->resv_space = resv_space / 4;
155 
156 	guc_ct_buffer_reset(ctb);
157 }
158 
159 static int guc_action_register_ct_buffer(struct intel_guc *guc, u32 type,
160 					 u32 desc_addr, u32 buff_addr, u32 size)
161 {
162 	u32 request[HOST2GUC_REGISTER_CTB_REQUEST_MSG_LEN] = {
163 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
164 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
165 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_REGISTER_CTB),
166 		FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_SIZE, size / SZ_4K - 1) |
167 		FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_TYPE, type),
168 		FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_2_DESC_ADDR, desc_addr),
169 		FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_3_BUFF_ADDR, buff_addr),
170 	};
171 
172 	GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST);
173 	GEM_BUG_ON(size % SZ_4K);
174 
175 	/* CT registration must go over MMIO */
176 	return intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
177 }
178 
179 static int ct_register_buffer(struct intel_guc_ct *ct, u32 type,
180 			      u32 desc_addr, u32 buff_addr, u32 size)
181 {
182 	int err;
183 
184 	err = i915_inject_probe_error(guc_to_gt(ct_to_guc(ct))->i915, -ENXIO);
185 	if (unlikely(err))
186 		return err;
187 
188 	err = guc_action_register_ct_buffer(ct_to_guc(ct), type,
189 					    desc_addr, buff_addr, size);
190 	if (unlikely(err))
191 		CT_ERROR(ct, "Failed to register %s buffer (err=%d)\n",
192 			 guc_ct_buffer_type_to_str(type), err);
193 	return err;
194 }
195 
196 static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type)
197 {
198 	u32 request[HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_LEN] = {
199 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
200 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
201 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_DEREGISTER_CTB),
202 		FIELD_PREP(HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_TYPE, type),
203 	};
204 
205 	GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST);
206 
207 	/* CT deregistration must go over MMIO */
208 	return intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
209 }
210 
211 static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
212 {
213 	int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type);
214 
215 	if (unlikely(err))
216 		CT_ERROR(ct, "Failed to deregister %s buffer (err=%d)\n",
217 			 guc_ct_buffer_type_to_str(type), err);
218 	return err;
219 }
220 
221 /**
222  * intel_guc_ct_init - Init buffer-based communication
223  * @ct: pointer to CT struct
224  *
225  * Allocate memory required for buffer-based communication.
226  *
227  * Return: 0 on success, a negative errno code on failure.
228  */
229 int intel_guc_ct_init(struct intel_guc_ct *ct)
230 {
231 	struct intel_guc *guc = ct_to_guc(ct);
232 	struct guc_ct_buffer_desc *desc;
233 	u32 blob_size;
234 	u32 cmds_size;
235 	u32 resv_space;
236 	void *blob;
237 	u32 *cmds;
238 	int err;
239 
240 	err = i915_inject_probe_error(guc_to_gt(guc)->i915, -ENXIO);
241 	if (err)
242 		return err;
243 
244 	GEM_BUG_ON(ct->vma);
245 
246 	blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE;
247 	err = intel_guc_allocate_and_map_vma(guc, blob_size, &ct->vma, &blob);
248 	if (unlikely(err)) {
249 		CT_PROBE_ERROR(ct, "Failed to allocate %u for CTB data (%pe)\n",
250 			       blob_size, ERR_PTR(err));
251 		return err;
252 	}
253 
254 	CT_DEBUG(ct, "base=%#x size=%u\n", intel_guc_ggtt_offset(guc, ct->vma), blob_size);
255 
256 	/* store pointers to desc and cmds for send ctb */
257 	desc = blob;
258 	cmds = blob + 2 * CTB_DESC_SIZE;
259 	cmds_size = CTB_H2G_BUFFER_SIZE;
260 	resv_space = 0;
261 	CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "send",
262 		 ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
263 		 resv_space);
264 
265 	guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size, resv_space);
266 
267 	/* store pointers to desc and cmds for recv ctb */
268 	desc = blob + CTB_DESC_SIZE;
269 	cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE;
270 	cmds_size = CTB_G2H_BUFFER_SIZE;
271 	resv_space = G2H_ROOM_BUFFER_SIZE;
272 	CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "recv",
273 		 ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
274 		 resv_space);
275 
276 	guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size, resv_space);
277 
278 	return 0;
279 }
280 
281 /**
282  * intel_guc_ct_fini - Fini buffer-based communication
283  * @ct: pointer to CT struct
284  *
285  * Deallocate memory required for buffer-based communication.
286  */
287 void intel_guc_ct_fini(struct intel_guc_ct *ct)
288 {
289 	GEM_BUG_ON(ct->enabled);
290 
291 	tasklet_kill(&ct->receive_tasklet);
292 	i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
293 	memset(ct, 0, sizeof(*ct));
294 }
295 
296 /**
297  * intel_guc_ct_enable - Enable buffer based command transport.
298  * @ct: pointer to CT struct
299  *
300  * Return: 0 on success, a negative errno code on failure.
301  */
302 int intel_guc_ct_enable(struct intel_guc_ct *ct)
303 {
304 	struct intel_guc *guc = ct_to_guc(ct);
305 	u32 base, desc, cmds;
306 	void *blob;
307 	int err;
308 
309 	GEM_BUG_ON(ct->enabled);
310 
311 	/* vma should be already allocated and map'ed */
312 	GEM_BUG_ON(!ct->vma);
313 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj));
314 	base = intel_guc_ggtt_offset(guc, ct->vma);
315 
316 	/* blob should start with send descriptor */
317 	blob = __px_vaddr(ct->vma->obj);
318 	GEM_BUG_ON(blob != ct->ctbs.send.desc);
319 
320 	/* (re)initialize descriptors */
321 	guc_ct_buffer_reset(&ct->ctbs.send);
322 	guc_ct_buffer_reset(&ct->ctbs.recv);
323 
324 	/*
325 	 * Register both CT buffers starting with RECV buffer.
326 	 * Descriptors are in first half of the blob.
327 	 */
328 	desc = base + ptrdiff(ct->ctbs.recv.desc, blob);
329 	cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob);
330 	err = ct_register_buffer(ct, GUC_CTB_TYPE_GUC2HOST,
331 				 desc, cmds, ct->ctbs.recv.size * 4);
332 
333 	if (unlikely(err))
334 		goto err_out;
335 
336 	desc = base + ptrdiff(ct->ctbs.send.desc, blob);
337 	cmds = base + ptrdiff(ct->ctbs.send.cmds, blob);
338 	err = ct_register_buffer(ct, GUC_CTB_TYPE_HOST2GUC,
339 				 desc, cmds, ct->ctbs.send.size * 4);
340 
341 	if (unlikely(err))
342 		goto err_deregister;
343 
344 	ct->enabled = true;
345 	ct->stall_time = KTIME_MAX;
346 
347 	return 0;
348 
349 err_deregister:
350 	ct_deregister_buffer(ct, GUC_CTB_TYPE_GUC2HOST);
351 err_out:
352 	CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err));
353 	return err;
354 }
355 
356 /**
357  * intel_guc_ct_disable - Disable buffer based command transport.
358  * @ct: pointer to CT struct
359  */
360 void intel_guc_ct_disable(struct intel_guc_ct *ct)
361 {
362 	struct intel_guc *guc = ct_to_guc(ct);
363 
364 	GEM_BUG_ON(!ct->enabled);
365 
366 	ct->enabled = false;
367 
368 	if (intel_guc_is_fw_running(guc)) {
369 		ct_deregister_buffer(ct, GUC_CTB_TYPE_HOST2GUC);
370 		ct_deregister_buffer(ct, GUC_CTB_TYPE_GUC2HOST);
371 	}
372 }
373 
374 static u32 ct_get_next_fence(struct intel_guc_ct *ct)
375 {
376 	/* For now it's trivial */
377 	return ++ct->requests.last_fence;
378 }
379 
380 static void write_barrier(struct intel_guc_ct *ct)
381 {
382 	struct intel_guc *guc = ct_to_guc(ct);
383 	struct intel_gt *gt = guc_to_gt(guc);
384 
385 	if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
386 		GEM_BUG_ON(guc->send_regs.fw_domains);
387 		/*
388 		 * This register is used by the i915 and GuC for MMIO based
389 		 * communication. Once we are in this code CTBs are the only
390 		 * method the i915 uses to communicate with the GuC so it is
391 		 * safe to write to this register (a value of 0 is NOP for MMIO
392 		 * communication). If we ever start mixing CTBs and MMIOs a new
393 		 * register will have to be chosen.
394 		 */
395 		intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
396 	} else {
397 		/* wmb() sufficient for a barrier if in smem */
398 		wmb();
399 	}
400 }
401 
402 static int ct_write(struct intel_guc_ct *ct,
403 		    const u32 *action,
404 		    u32 len /* in dwords */,
405 		    u32 fence, u32 flags)
406 {
407 	struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
408 	struct guc_ct_buffer_desc *desc = ctb->desc;
409 	u32 tail = ctb->tail;
410 	u32 size = ctb->size;
411 	u32 header;
412 	u32 hxg;
413 	u32 type;
414 	u32 *cmds = ctb->cmds;
415 	unsigned int i;
416 
417 	if (unlikely(desc->status))
418 		goto corrupted;
419 
420 	GEM_BUG_ON(tail > size);
421 
422 #ifdef CONFIG_DRM_I915_DEBUG_GUC
423 	if (unlikely(tail != READ_ONCE(desc->tail))) {
424 		CT_ERROR(ct, "Tail was modified %u != %u\n",
425 			 desc->tail, tail);
426 		desc->status |= GUC_CTB_STATUS_MISMATCH;
427 		goto corrupted;
428 	}
429 	if (unlikely(READ_ONCE(desc->head) >= size)) {
430 		CT_ERROR(ct, "Invalid head offset %u >= %u)\n",
431 			 desc->head, size);
432 		desc->status |= GUC_CTB_STATUS_OVERFLOW;
433 		goto corrupted;
434 	}
435 #endif
436 
437 	/*
438 	 * dw0: CT header (including fence)
439 	 * dw1: HXG header (including action code)
440 	 * dw2+: action data
441 	 */
442 	header = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
443 		 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
444 		 FIELD_PREP(GUC_CTB_MSG_0_FENCE, fence);
445 
446 	type = (flags & INTEL_GUC_CT_SEND_NB) ? GUC_HXG_TYPE_EVENT :
447 		GUC_HXG_TYPE_REQUEST;
448 	hxg = FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) |
449 		FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
450 			   GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
451 
452 	CT_DEBUG(ct, "writing (tail %u) %*ph %*ph %*ph\n",
453 		 tail, 4, &header, 4, &hxg, 4 * (len - 1), &action[1]);
454 
455 	cmds[tail] = header;
456 	tail = (tail + 1) % size;
457 
458 	cmds[tail] = hxg;
459 	tail = (tail + 1) % size;
460 
461 	for (i = 1; i < len; i++) {
462 		cmds[tail] = action[i];
463 		tail = (tail + 1) % size;
464 	}
465 	GEM_BUG_ON(tail > size);
466 
467 	/*
468 	 * make sure H2G buffer update and LRC tail update (if this triggering a
469 	 * submission) are visible before updating the descriptor tail
470 	 */
471 	write_barrier(ct);
472 
473 	/* update local copies */
474 	ctb->tail = tail;
475 	GEM_BUG_ON(atomic_read(&ctb->space) < len + GUC_CTB_HDR_LEN);
476 	atomic_sub(len + GUC_CTB_HDR_LEN, &ctb->space);
477 
478 	/* now update descriptor */
479 	WRITE_ONCE(desc->tail, tail);
480 
481 	return 0;
482 
483 corrupted:
484 	CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
485 		 desc->head, desc->tail, desc->status);
486 	ctb->broken = true;
487 	return -EPIPE;
488 }
489 
490 /**
491  * wait_for_ct_request_update - Wait for CT request state update.
492  * @req:	pointer to pending request
493  * @status:	placeholder for status
494  *
495  * For each sent request, GuC shall send back CT response message.
496  * Our message handler will update status of tracked request once
497  * response message with given fence is received. Wait here and
498  * check for valid response status value.
499  *
500  * Return:
501  * *	0 response received (status is valid)
502  * *	-ETIMEDOUT no response within hardcoded timeout
503  */
504 static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
505 {
506 	int err;
507 
508 	/*
509 	 * Fast commands should complete in less than 10us, so sample quickly
510 	 * up to that length of time, then switch to a slower sleep-wait loop.
511 	 * No GuC command should ever take longer than 10ms but many GuC
512 	 * commands can be inflight at time, so use a 1s timeout on the slower
513 	 * sleep-wait loop.
514 	 */
515 #define GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS 10
516 #define GUC_CTB_RESPONSE_TIMEOUT_LONG_MS 1000
517 #define done \
518 	(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \
519 	 GUC_HXG_ORIGIN_GUC)
520 	err = wait_for_us(done, GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS);
521 	if (err)
522 		err = wait_for(done, GUC_CTB_RESPONSE_TIMEOUT_LONG_MS);
523 #undef done
524 
525 	if (unlikely(err))
526 		DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
527 
528 	*status = req->status;
529 	return err;
530 }
531 
532 #define GUC_CTB_TIMEOUT_MS	1500
533 static inline bool ct_deadlocked(struct intel_guc_ct *ct)
534 {
535 	long timeout = GUC_CTB_TIMEOUT_MS;
536 	bool ret = ktime_ms_delta(ktime_get(), ct->stall_time) > timeout;
537 
538 	if (unlikely(ret)) {
539 		struct guc_ct_buffer_desc *send = ct->ctbs.send.desc;
540 		struct guc_ct_buffer_desc *recv = ct->ctbs.send.desc;
541 
542 		CT_ERROR(ct, "Communication stalled for %lld ms, desc status=%#x,%#x\n",
543 			 ktime_ms_delta(ktime_get(), ct->stall_time),
544 			 send->status, recv->status);
545 		ct->ctbs.send.broken = true;
546 	}
547 
548 	return ret;
549 }
550 
551 static inline bool g2h_has_room(struct intel_guc_ct *ct, u32 g2h_len_dw)
552 {
553 	struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
554 
555 	/*
556 	 * We leave a certain amount of space in the G2H CTB buffer for
557 	 * unexpected G2H CTBs (e.g. logging, engine hang, etc...)
558 	 */
559 	return !g2h_len_dw || atomic_read(&ctb->space) >= g2h_len_dw;
560 }
561 
562 static inline void g2h_reserve_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
563 {
564 	lockdep_assert_held(&ct->ctbs.send.lock);
565 
566 	GEM_BUG_ON(!g2h_has_room(ct, g2h_len_dw));
567 
568 	if (g2h_len_dw)
569 		atomic_sub(g2h_len_dw, &ct->ctbs.recv.space);
570 }
571 
572 static inline void g2h_release_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
573 {
574 	atomic_add(g2h_len_dw, &ct->ctbs.recv.space);
575 }
576 
577 static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw)
578 {
579 	struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
580 	struct guc_ct_buffer_desc *desc = ctb->desc;
581 	u32 head;
582 	u32 space;
583 
584 	if (atomic_read(&ctb->space) >= len_dw)
585 		return true;
586 
587 	head = READ_ONCE(desc->head);
588 	if (unlikely(head > ctb->size)) {
589 		CT_ERROR(ct, "Invalid head offset %u >= %u)\n",
590 			 head, ctb->size);
591 		desc->status |= GUC_CTB_STATUS_OVERFLOW;
592 		ctb->broken = true;
593 		return false;
594 	}
595 
596 	space = CIRC_SPACE(ctb->tail, head, ctb->size);
597 	atomic_set(&ctb->space, space);
598 
599 	return space >= len_dw;
600 }
601 
602 static int has_room_nb(struct intel_guc_ct *ct, u32 h2g_dw, u32 g2h_dw)
603 {
604 	lockdep_assert_held(&ct->ctbs.send.lock);
605 
606 	if (unlikely(!h2g_has_room(ct, h2g_dw) || !g2h_has_room(ct, g2h_dw))) {
607 		if (ct->stall_time == KTIME_MAX)
608 			ct->stall_time = ktime_get();
609 
610 		if (unlikely(ct_deadlocked(ct)))
611 			return -EPIPE;
612 		else
613 			return -EBUSY;
614 	}
615 
616 	ct->stall_time = KTIME_MAX;
617 	return 0;
618 }
619 
620 #define G2H_LEN_DW(f) ({ \
621 	typeof(f) f_ = (f); \
622 	FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) ? \
623 	FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) + \
624 	GUC_CTB_HXG_MSG_MIN_LEN : 0; \
625 })
626 static int ct_send_nb(struct intel_guc_ct *ct,
627 		      const u32 *action,
628 		      u32 len,
629 		      u32 flags)
630 {
631 	struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
632 	unsigned long spin_flags;
633 	u32 g2h_len_dw = G2H_LEN_DW(flags);
634 	u32 fence;
635 	int ret;
636 
637 	spin_lock_irqsave(&ctb->lock, spin_flags);
638 
639 	ret = has_room_nb(ct, len + GUC_CTB_HDR_LEN, g2h_len_dw);
640 	if (unlikely(ret))
641 		goto out;
642 
643 	fence = ct_get_next_fence(ct);
644 	ret = ct_write(ct, action, len, fence, flags);
645 	if (unlikely(ret))
646 		goto out;
647 
648 	g2h_reserve_space(ct, g2h_len_dw);
649 	intel_guc_notify(ct_to_guc(ct));
650 
651 out:
652 	spin_unlock_irqrestore(&ctb->lock, spin_flags);
653 
654 	return ret;
655 }
656 
657 static int ct_send(struct intel_guc_ct *ct,
658 		   const u32 *action,
659 		   u32 len,
660 		   u32 *response_buf,
661 		   u32 response_buf_size,
662 		   u32 *status)
663 {
664 	struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
665 	struct ct_request request;
666 	unsigned long flags;
667 	unsigned int sleep_period_ms = 1;
668 	u32 fence;
669 	int err;
670 
671 	GEM_BUG_ON(!ct->enabled);
672 	GEM_BUG_ON(!len);
673 	GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
674 	GEM_BUG_ON(!response_buf && response_buf_size);
675 	might_sleep();
676 
677 	/*
678 	 * We use a lazy spin wait loop here as we believe that if the CT
679 	 * buffers are sized correctly the flow control condition should be
680 	 * rare. Reserving the maximum size in the G2H credits as we don't know
681 	 * how big the response is going to be.
682 	 */
683 retry:
684 	spin_lock_irqsave(&ctb->lock, flags);
685 	if (unlikely(!h2g_has_room(ct, len + GUC_CTB_HDR_LEN) ||
686 		     !g2h_has_room(ct, GUC_CTB_HXG_MSG_MAX_LEN))) {
687 		if (ct->stall_time == KTIME_MAX)
688 			ct->stall_time = ktime_get();
689 		spin_unlock_irqrestore(&ctb->lock, flags);
690 
691 		if (unlikely(ct_deadlocked(ct)))
692 			return -EPIPE;
693 
694 		if (msleep_interruptible(sleep_period_ms))
695 			return -EINTR;
696 		sleep_period_ms = sleep_period_ms << 1;
697 
698 		goto retry;
699 	}
700 
701 	ct->stall_time = KTIME_MAX;
702 
703 	fence = ct_get_next_fence(ct);
704 	request.fence = fence;
705 	request.status = 0;
706 	request.response_len = response_buf_size;
707 	request.response_buf = response_buf;
708 
709 	spin_lock(&ct->requests.lock);
710 	list_add_tail(&request.link, &ct->requests.pending);
711 	spin_unlock(&ct->requests.lock);
712 
713 	err = ct_write(ct, action, len, fence, 0);
714 	g2h_reserve_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
715 
716 	spin_unlock_irqrestore(&ctb->lock, flags);
717 
718 	if (unlikely(err))
719 		goto unlink;
720 
721 	intel_guc_notify(ct_to_guc(ct));
722 
723 	err = wait_for_ct_request_update(&request, status);
724 	g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
725 	if (unlikely(err))
726 		goto unlink;
727 
728 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
729 		err = -EIO;
730 		goto unlink;
731 	}
732 
733 	if (response_buf) {
734 		/* There shall be no data in the status */
735 		WARN_ON(FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, request.status));
736 		/* Return actual response len */
737 		err = request.response_len;
738 	} else {
739 		/* There shall be no response payload */
740 		WARN_ON(request.response_len);
741 		/* Return data decoded from the status dword */
742 		err = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, *status);
743 	}
744 
745 unlink:
746 	spin_lock_irqsave(&ct->requests.lock, flags);
747 	list_del(&request.link);
748 	spin_unlock_irqrestore(&ct->requests.lock, flags);
749 
750 	return err;
751 }
752 
753 /*
754  * Command Transport (CT) buffer based GuC send function.
755  */
756 int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
757 		      u32 *response_buf, u32 response_buf_size, u32 flags)
758 {
759 	u32 status = ~0; /* undefined */
760 	int ret;
761 
762 	if (unlikely(!ct->enabled)) {
763 		struct intel_guc *guc = ct_to_guc(ct);
764 		struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
765 
766 		WARN(!uc->reset_in_progress, "Unexpected send: action=%#x\n", *action);
767 		return -ENODEV;
768 	}
769 
770 	if (unlikely(ct->ctbs.send.broken))
771 		return -EPIPE;
772 
773 	if (flags & INTEL_GUC_CT_SEND_NB)
774 		return ct_send_nb(ct, action, len, flags);
775 
776 	ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
777 	if (unlikely(ret < 0)) {
778 		CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n",
779 			 action[0], ret, status);
780 	} else if (unlikely(ret)) {
781 		CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
782 			 action[0], ret, ret);
783 	}
784 
785 	return ret;
786 }
787 
788 static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords)
789 {
790 	struct ct_incoming_msg *msg;
791 
792 	msg = kmalloc(sizeof(*msg) + sizeof(u32) * num_dwords, GFP_ATOMIC);
793 	if (msg)
794 		msg->size = num_dwords;
795 	return msg;
796 }
797 
798 static void ct_free_msg(struct ct_incoming_msg *msg)
799 {
800 	kfree(msg);
801 }
802 
803 /*
804  * Return: number available remaining dwords to read (0 if empty)
805  *         or a negative error code on failure
806  */
807 static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
808 {
809 	struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
810 	struct guc_ct_buffer_desc *desc = ctb->desc;
811 	u32 head = ctb->head;
812 	u32 tail = READ_ONCE(desc->tail);
813 	u32 size = ctb->size;
814 	u32 *cmds = ctb->cmds;
815 	s32 available;
816 	unsigned int len;
817 	unsigned int i;
818 	u32 header;
819 
820 	if (unlikely(ctb->broken))
821 		return -EPIPE;
822 
823 	if (unlikely(desc->status))
824 		goto corrupted;
825 
826 	GEM_BUG_ON(head > size);
827 
828 #ifdef CONFIG_DRM_I915_DEBUG_GUC
829 	if (unlikely(head != READ_ONCE(desc->head))) {
830 		CT_ERROR(ct, "Head was modified %u != %u\n",
831 			 desc->head, head);
832 		desc->status |= GUC_CTB_STATUS_MISMATCH;
833 		goto corrupted;
834 	}
835 #endif
836 	if (unlikely(tail >= size)) {
837 		CT_ERROR(ct, "Invalid tail offset %u >= %u)\n",
838 			 tail, size);
839 		desc->status |= GUC_CTB_STATUS_OVERFLOW;
840 		goto corrupted;
841 	}
842 
843 	/* tail == head condition indicates empty */
844 	available = tail - head;
845 	if (unlikely(available == 0)) {
846 		*msg = NULL;
847 		return 0;
848 	}
849 
850 	/* beware of buffer wrap case */
851 	if (unlikely(available < 0))
852 		available += size;
853 	CT_DEBUG(ct, "available %d (%u:%u:%u)\n", available, head, tail, size);
854 	GEM_BUG_ON(available < 0);
855 
856 	header = cmds[head];
857 	head = (head + 1) % size;
858 
859 	/* message len with header */
860 	len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, header) + GUC_CTB_MSG_MIN_LEN;
861 	if (unlikely(len > (u32)available)) {
862 		CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
863 			 4, &header,
864 			 4 * (head + available - 1 > size ?
865 			      size - head : available - 1), &cmds[head],
866 			 4 * (head + available - 1 > size ?
867 			      available - 1 - size + head : 0), &cmds[0]);
868 		desc->status |= GUC_CTB_STATUS_UNDERFLOW;
869 		goto corrupted;
870 	}
871 
872 	*msg = ct_alloc_msg(len);
873 	if (!*msg) {
874 		CT_ERROR(ct, "No memory for message %*ph %*ph %*ph\n",
875 			 4, &header,
876 			 4 * (head + available - 1 > size ?
877 			      size - head : available - 1), &cmds[head],
878 			 4 * (head + available - 1 > size ?
879 			      available - 1 - size + head : 0), &cmds[0]);
880 		return available;
881 	}
882 
883 	(*msg)->msg[0] = header;
884 
885 	for (i = 1; i < len; i++) {
886 		(*msg)->msg[i] = cmds[head];
887 		head = (head + 1) % size;
888 	}
889 	CT_DEBUG(ct, "received %*ph\n", 4 * len, (*msg)->msg);
890 
891 	/* update local copies */
892 	ctb->head = head;
893 
894 	/* now update descriptor */
895 	WRITE_ONCE(desc->head, head);
896 
897 	return available - len;
898 
899 corrupted:
900 	CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
901 		 desc->head, desc->tail, desc->status);
902 	ctb->broken = true;
903 	return -EPIPE;
904 }
905 
906 static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response)
907 {
908 	u32 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, response->msg[0]);
909 	u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, response->msg[0]);
910 	const u32 *hxg = &response->msg[GUC_CTB_MSG_MIN_LEN];
911 	const u32 *data = &hxg[GUC_HXG_MSG_MIN_LEN];
912 	u32 datalen = len - GUC_HXG_MSG_MIN_LEN;
913 	struct ct_request *req;
914 	unsigned long flags;
915 	bool found = false;
916 	int err = 0;
917 
918 	GEM_BUG_ON(len < GUC_HXG_MSG_MIN_LEN);
919 	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]) != GUC_HXG_ORIGIN_GUC);
920 	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_SUCCESS &&
921 		   FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_FAILURE);
922 
923 	CT_DEBUG(ct, "response fence %u status %#x\n", fence, hxg[0]);
924 
925 	spin_lock_irqsave(&ct->requests.lock, flags);
926 	list_for_each_entry(req, &ct->requests.pending, link) {
927 		if (unlikely(fence != req->fence)) {
928 			CT_DEBUG(ct, "request %u awaits response\n",
929 				 req->fence);
930 			continue;
931 		}
932 		if (unlikely(datalen > req->response_len)) {
933 			CT_ERROR(ct, "Response %u too long (datalen %u > %u)\n",
934 				 req->fence, datalen, req->response_len);
935 			datalen = min(datalen, req->response_len);
936 			err = -EMSGSIZE;
937 		}
938 		if (datalen)
939 			memcpy(req->response_buf, data, 4 * datalen);
940 		req->response_len = datalen;
941 		WRITE_ONCE(req->status, hxg[0]);
942 		found = true;
943 		break;
944 	}
945 	if (!found) {
946 		CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence);
947 		CT_ERROR(ct, "Could not find fence=%u, last_fence=%u\n", fence,
948 			 ct->requests.last_fence);
949 		list_for_each_entry(req, &ct->requests.pending, link)
950 			CT_ERROR(ct, "request %u awaits response\n",
951 				 req->fence);
952 		err = -ENOKEY;
953 	}
954 	spin_unlock_irqrestore(&ct->requests.lock, flags);
955 
956 	if (unlikely(err))
957 		return err;
958 
959 	ct_free_msg(response);
960 	return 0;
961 }
962 
963 static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
964 {
965 	struct intel_guc *guc = ct_to_guc(ct);
966 	const u32 *hxg;
967 	const u32 *payload;
968 	u32 hxg_len, action, len;
969 	int ret;
970 
971 	hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
972 	hxg_len = request->size - GUC_CTB_MSG_MIN_LEN;
973 	payload = &hxg[GUC_HXG_MSG_MIN_LEN];
974 	action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
975 	len = hxg_len - GUC_HXG_MSG_MIN_LEN;
976 
977 	CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
978 
979 	switch (action) {
980 	case INTEL_GUC_ACTION_DEFAULT:
981 		ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
982 		break;
983 	case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
984 		ret = intel_guc_deregister_done_process_msg(guc, payload,
985 							    len);
986 		break;
987 	case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
988 		ret = intel_guc_sched_done_process_msg(guc, payload, len);
989 		break;
990 	case INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
991 		ret = intel_guc_context_reset_process_msg(guc, payload, len);
992 		break;
993 	case INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
994 		ret = intel_guc_engine_failure_process_msg(guc, payload, len);
995 		break;
996 	default:
997 		ret = -EOPNOTSUPP;
998 		break;
999 	}
1000 
1001 	if (unlikely(ret)) {
1002 		CT_ERROR(ct, "Failed to process request %04x (%pe)\n",
1003 			 action, ERR_PTR(ret));
1004 		return ret;
1005 	}
1006 
1007 	ct_free_msg(request);
1008 	return 0;
1009 }
1010 
1011 static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
1012 {
1013 	unsigned long flags;
1014 	struct ct_incoming_msg *request;
1015 	bool done;
1016 	int err;
1017 
1018 	spin_lock_irqsave(&ct->requests.lock, flags);
1019 	request = list_first_entry_or_null(&ct->requests.incoming,
1020 					   struct ct_incoming_msg, link);
1021 	if (request)
1022 		list_del(&request->link);
1023 	done = !!list_empty(&ct->requests.incoming);
1024 	spin_unlock_irqrestore(&ct->requests.lock, flags);
1025 
1026 	if (!request)
1027 		return true;
1028 
1029 	err = ct_process_request(ct, request);
1030 	if (unlikely(err)) {
1031 		CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
1032 			 ERR_PTR(err), 4 * request->size, request->msg);
1033 		ct_free_msg(request);
1034 	}
1035 
1036 	return done;
1037 }
1038 
1039 static void ct_incoming_request_worker_func(struct work_struct *w)
1040 {
1041 	struct intel_guc_ct *ct =
1042 		container_of(w, struct intel_guc_ct, requests.worker);
1043 	bool done;
1044 
1045 	done = ct_process_incoming_requests(ct);
1046 	if (!done)
1047 		queue_work(system_unbound_wq, &ct->requests.worker);
1048 }
1049 
1050 static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
1051 {
1052 	const u32 *hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
1053 	u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1054 	unsigned long flags;
1055 
1056 	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT);
1057 
1058 	/*
1059 	 * Adjusting the space must be done in IRQ or deadlock can occur as the
1060 	 * CTB processing in the below workqueue can send CTBs which creates a
1061 	 * circular dependency if the space was returned there.
1062 	 */
1063 	switch (action) {
1064 	case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1065 	case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1066 		g2h_release_space(ct, request->size);
1067 	}
1068 
1069 	spin_lock_irqsave(&ct->requests.lock, flags);
1070 	list_add_tail(&request->link, &ct->requests.incoming);
1071 	spin_unlock_irqrestore(&ct->requests.lock, flags);
1072 
1073 	queue_work(system_unbound_wq, &ct->requests.worker);
1074 	return 0;
1075 }
1076 
1077 static int ct_handle_hxg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
1078 {
1079 	u32 origin, type;
1080 	u32 *hxg;
1081 	int err;
1082 
1083 	if (unlikely(msg->size < GUC_CTB_HXG_MSG_MIN_LEN))
1084 		return -EBADMSG;
1085 
1086 	hxg = &msg->msg[GUC_CTB_MSG_MIN_LEN];
1087 
1088 	origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
1089 	if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
1090 		err = -EPROTO;
1091 		goto failed;
1092 	}
1093 
1094 	type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1095 	switch (type) {
1096 	case GUC_HXG_TYPE_EVENT:
1097 		err = ct_handle_event(ct, msg);
1098 		break;
1099 	case GUC_HXG_TYPE_RESPONSE_SUCCESS:
1100 	case GUC_HXG_TYPE_RESPONSE_FAILURE:
1101 		err = ct_handle_response(ct, msg);
1102 		break;
1103 	default:
1104 		err = -EOPNOTSUPP;
1105 	}
1106 
1107 	if (unlikely(err)) {
1108 failed:
1109 		CT_ERROR(ct, "Failed to handle HXG message (%pe) %*ph\n",
1110 			 ERR_PTR(err), 4 * GUC_HXG_MSG_MIN_LEN, hxg);
1111 	}
1112 	return err;
1113 }
1114 
1115 static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
1116 {
1117 	u32 format = FIELD_GET(GUC_CTB_MSG_0_FORMAT, msg->msg[0]);
1118 	int err;
1119 
1120 	if (format == GUC_CTB_FORMAT_HXG)
1121 		err = ct_handle_hxg(ct, msg);
1122 	else
1123 		err = -EOPNOTSUPP;
1124 
1125 	if (unlikely(err)) {
1126 		CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
1127 			 ERR_PTR(err), 4 * msg->size, msg->msg);
1128 		ct_free_msg(msg);
1129 	}
1130 }
1131 
1132 /*
1133  * Return: number available remaining dwords to read (0 if empty)
1134  *         or a negative error code on failure
1135  */
1136 static int ct_receive(struct intel_guc_ct *ct)
1137 {
1138 	struct ct_incoming_msg *msg = NULL;
1139 	unsigned long flags;
1140 	int ret;
1141 
1142 	spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
1143 	ret = ct_read(ct, &msg);
1144 	spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
1145 	if (ret < 0)
1146 		return ret;
1147 
1148 	if (msg)
1149 		ct_handle_msg(ct, msg);
1150 
1151 	return ret;
1152 }
1153 
1154 static void ct_try_receive_message(struct intel_guc_ct *ct)
1155 {
1156 	int ret;
1157 
1158 	if (GEM_WARN_ON(!ct->enabled))
1159 		return;
1160 
1161 	ret = ct_receive(ct);
1162 	if (ret > 0)
1163 		tasklet_hi_schedule(&ct->receive_tasklet);
1164 }
1165 
1166 static void ct_receive_tasklet_func(struct tasklet_struct *t)
1167 {
1168 	struct intel_guc_ct *ct = from_tasklet(ct, t, receive_tasklet);
1169 
1170 	ct_try_receive_message(ct);
1171 }
1172 
1173 /*
1174  * When we're communicating with the GuC over CT, GuC uses events
1175  * to notify us about new messages being posted on the RECV buffer.
1176  */
1177 void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
1178 {
1179 	if (unlikely(!ct->enabled)) {
1180 		WARN(1, "Unexpected GuC event received while CT disabled!\n");
1181 		return;
1182 	}
1183 
1184 	ct_try_receive_message(ct);
1185 }
1186 
1187 void intel_guc_ct_print_info(struct intel_guc_ct *ct,
1188 			     struct drm_printer *p)
1189 {
1190 	drm_printf(p, "CT %s\n", enableddisabled(ct->enabled));
1191 
1192 	if (!ct->enabled)
1193 		return;
1194 
1195 	drm_printf(p, "H2G Space: %u\n",
1196 		   atomic_read(&ct->ctbs.send.space) * 4);
1197 	drm_printf(p, "Head: %u\n",
1198 		   ct->ctbs.send.desc->head);
1199 	drm_printf(p, "Tail: %u\n",
1200 		   ct->ctbs.send.desc->tail);
1201 	drm_printf(p, "G2H Space: %u\n",
1202 		   atomic_read(&ct->ctbs.recv.space) * 4);
1203 	drm_printf(p, "Head: %u\n",
1204 		   ct->ctbs.recv.desc->head);
1205 	drm_printf(p, "Tail: %u\n",
1206 		   ct->ctbs.recv.desc->tail);
1207 }
1208