xref: /linux/drivers/gpu/drm/xe/xe_pxp_submit.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright(c) 2024 Intel Corporation.
4  */
5 
6 #include "xe_pxp_submit.h"
7 
8 #include <linux/delay.h>
9 #include <uapi/drm/xe_drm.h>
10 
11 #include "xe_device_types.h"
12 #include "xe_bb.h"
13 #include "xe_bo.h"
14 #include "xe_exec_queue.h"
15 #include "xe_gsc_submit.h"
16 #include "xe_gt.h"
17 #include "xe_lrc.h"
18 #include "xe_map.h"
19 #include "xe_pxp.h"
20 #include "xe_pxp_types.h"
21 #include "xe_sched_job.h"
22 #include "xe_vm.h"
23 #include "abi/gsc_command_header_abi.h"
24 #include "abi/gsc_pxp_commands_abi.h"
25 #include "instructions/xe_gsc_commands.h"
26 #include "instructions/xe_mfx_commands.h"
27 #include "instructions/xe_mi_commands.h"
28 
29 /*
30  * The VCS is used for kernel-owned GGTT submissions to issue key termination.
31  * Terminations are serialized, so we only need a single queue and a single
32  * batch.
33  */
34 static int allocate_vcs_execution_resources(struct xe_pxp *pxp)
35 {
36 	struct xe_gt *gt = pxp->gt;
37 	struct xe_device *xe = pxp->xe;
38 	struct xe_tile *tile = gt_to_tile(gt);
39 	struct xe_hw_engine *hwe;
40 	struct xe_exec_queue *q;
41 	struct xe_bo *bo;
42 	int err;
43 
44 	hwe = xe_gt_hw_engine(gt, XE_ENGINE_CLASS_VIDEO_DECODE, 0, true);
45 	if (!hwe)
46 		return -ENODEV;
47 
48 	q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1, hwe,
49 				 EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_PERMANENT, 0);
50 	if (IS_ERR(q))
51 		return PTR_ERR(q);
52 
53 	/*
54 	 * Each termination is 16 DWORDS, so 4K is enough to contain a
55 	 * termination for each sessions.
56 	 */
57 	bo = xe_bo_create_pin_map_novm(xe, tile, SZ_4K, ttm_bo_type_kernel,
58 				       XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT,
59 				       false);
60 	if (IS_ERR(bo)) {
61 		err = PTR_ERR(bo);
62 		goto out_queue;
63 	}
64 
65 	pxp->vcs_exec.q = q;
66 	pxp->vcs_exec.bo = bo;
67 
68 	return 0;
69 
70 out_queue:
71 	xe_exec_queue_put(q);
72 	return err;
73 }
74 
75 static void destroy_vcs_execution_resources(struct xe_pxp *pxp)
76 {
77 	if (pxp->vcs_exec.bo)
78 		xe_bo_unpin_map_no_vm(pxp->vcs_exec.bo);
79 
80 	if (pxp->vcs_exec.q)
81 		xe_exec_queue_put(pxp->vcs_exec.q);
82 }
83 
84 #define PXP_BB_SIZE		XE_PAGE_SIZE
85 static int allocate_gsc_client_resources(struct xe_gt *gt,
86 					 struct xe_pxp_gsc_client_resources *gsc_res,
87 					 size_t inout_size)
88 {
89 	struct xe_tile *tile = gt_to_tile(gt);
90 	struct xe_device *xe = tile_to_xe(tile);
91 	struct xe_validation_ctx ctx;
92 	struct xe_hw_engine *hwe;
93 	struct drm_exec exec;
94 	struct xe_vm *vm;
95 	struct xe_bo *bo;
96 	struct xe_exec_queue *q;
97 	struct dma_fence *fence;
98 	long timeout;
99 	int err = 0;
100 
101 	hwe = xe_gt_hw_engine(gt, XE_ENGINE_CLASS_OTHER, 0, true);
102 
103 	/* we shouldn't reach here if the GSC engine is not available */
104 	xe_assert(xe, hwe);
105 
106 	/* PXP instructions must be issued from PPGTT */
107 	vm = xe_vm_create(xe, XE_VM_FLAG_GSC, NULL);
108 	if (IS_ERR(vm))
109 		return PTR_ERR(vm);
110 
111 	/* We allocate a single object for the batch and the in/out memory */
112 
113 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags){}, err) {
114 		err = xe_vm_drm_exec_lock(vm, &exec);
115 		drm_exec_retry_on_contention(&exec);
116 		if (err)
117 			break;
118 
119 		bo = xe_bo_create_pin_map(xe, tile, vm, PXP_BB_SIZE + inout_size * 2,
120 					  ttm_bo_type_kernel,
121 					  XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED |
122 					  XE_BO_FLAG_NEEDS_UC, &exec);
123 		drm_exec_retry_on_contention(&exec);
124 		if (IS_ERR(bo)) {
125 			err = PTR_ERR(bo);
126 			xe_validation_retry_on_oom(&ctx, &err);
127 			break;
128 		}
129 	}
130 	if (err)
131 		goto vm_out;
132 
133 	fence = xe_vm_bind_kernel_bo(vm, bo, NULL, 0, XE_CACHE_WB);
134 	if (IS_ERR(fence)) {
135 		err = PTR_ERR(fence);
136 		goto bo_out;
137 	}
138 
139 	timeout = dma_fence_wait_timeout(fence, false, HZ);
140 	dma_fence_put(fence);
141 	if (timeout <= 0) {
142 		err = timeout ?: -ETIME;
143 		goto bo_out;
144 	}
145 
146 	q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance), 1, hwe,
147 				 EXEC_QUEUE_FLAG_KERNEL |
148 				 EXEC_QUEUE_FLAG_PERMANENT, 0);
149 	if (IS_ERR(q)) {
150 		err = PTR_ERR(q);
151 		goto bo_out;
152 	}
153 
154 	gsc_res->vm = vm;
155 	gsc_res->bo = bo;
156 	gsc_res->inout_size = inout_size;
157 	gsc_res->batch = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0);
158 	gsc_res->msg_in = IOSYS_MAP_INIT_OFFSET(&bo->vmap, PXP_BB_SIZE);
159 	gsc_res->msg_out = IOSYS_MAP_INIT_OFFSET(&bo->vmap, PXP_BB_SIZE + inout_size);
160 	gsc_res->q = q;
161 
162 	/* initialize host-session-handle (for all Xe-to-gsc-firmware PXP cmds) */
163 	gsc_res->host_session_handle = xe_gsc_create_host_session_id();
164 
165 	return 0;
166 
167 bo_out:
168 	xe_bo_unpin_map_no_vm(bo);
169 vm_out:
170 	xe_vm_close_and_put(vm);
171 
172 	return err;
173 }
174 
175 static void destroy_gsc_client_resources(struct xe_pxp_gsc_client_resources *gsc_res)
176 {
177 	if (!gsc_res->q)
178 		return;
179 
180 	xe_exec_queue_put(gsc_res->q);
181 	xe_bo_unpin_map_no_vm(gsc_res->bo);
182 	xe_vm_close_and_put(gsc_res->vm);
183 }
184 
185 /**
186  * xe_pxp_allocate_execution_resources - Allocate PXP submission objects
187  * @pxp: the xe_pxp structure
188  *
189  * Allocates exec_queues objects for VCS and GSCCS submission. The GSCCS
190  * submissions are done via PPGTT, so this function allocates a VM for it and
191  * maps the object into it.
192  *
193  * Returns 0 if the allocation and mapping is successful, an errno value
194  * otherwise.
195  */
196 int xe_pxp_allocate_execution_resources(struct xe_pxp *pxp)
197 {
198 	int err;
199 
200 	err = allocate_vcs_execution_resources(pxp);
201 	if (err)
202 		return err;
203 
204 	/*
205 	 * PXP commands can require a lot of BO space (see PXP_MAX_PACKET_SIZE),
206 	 * but we currently only support a subset of commands that are small
207 	 * (< 20 dwords), so a single page is enough for now.
208 	 */
209 	err = allocate_gsc_client_resources(pxp->gt, &pxp->gsc_res, XE_PAGE_SIZE);
210 	if (err)
211 		goto destroy_vcs_context;
212 
213 	return 0;
214 
215 destroy_vcs_context:
216 	destroy_vcs_execution_resources(pxp);
217 	return err;
218 }
219 
220 void xe_pxp_destroy_execution_resources(struct xe_pxp *pxp)
221 {
222 	destroy_gsc_client_resources(&pxp->gsc_res);
223 	destroy_vcs_execution_resources(pxp);
224 }
225 
226 #define emit_cmd(xe_, map_, offset_, val_) \
227 	xe_map_wr(xe_, map_, (offset_) * sizeof(u32), u32, val_)
228 
229 /* stall until prior PXP and MFX/HCP/HUC objects are completed */
230 #define MFX_WAIT_PXP (MFX_WAIT | \
231 		      MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG | \
232 		      MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG)
233 static u32 pxp_emit_wait(struct xe_device *xe, struct iosys_map *batch, u32 offset)
234 {
235 	/* wait for cmds to go through */
236 	emit_cmd(xe, batch, offset++, MFX_WAIT_PXP);
237 	emit_cmd(xe, batch, offset++, 0);
238 
239 	return offset;
240 }
241 
242 static u32 pxp_emit_session_selection(struct xe_device *xe, struct iosys_map *batch,
243 				      u32 offset, u32 idx)
244 {
245 	offset = pxp_emit_wait(xe, batch, offset);
246 
247 	/* pxp off */
248 	emit_cmd(xe, batch, offset++, MI_FLUSH_DW | MI_FLUSH_IMM_DW);
249 	emit_cmd(xe, batch, offset++, 0);
250 	emit_cmd(xe, batch, offset++, 0);
251 	emit_cmd(xe, batch, offset++, 0);
252 
253 	/* select session */
254 	emit_cmd(xe, batch, offset++, MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx));
255 	emit_cmd(xe, batch, offset++, 0);
256 
257 	offset = pxp_emit_wait(xe, batch, offset);
258 
259 	/* pxp on */
260 	emit_cmd(xe, batch, offset++, MI_FLUSH_DW |
261 				      MI_FLUSH_DW_PROTECTED_MEM_EN |
262 				      MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX |
263 				      MI_FLUSH_IMM_DW);
264 	emit_cmd(xe, batch, offset++, LRC_PPHWSP_PXP_INVAL_SCRATCH_ADDR |
265 				      MI_FLUSH_DW_USE_GTT);
266 	emit_cmd(xe, batch, offset++, 0);
267 	emit_cmd(xe, batch, offset++, 0);
268 
269 	offset = pxp_emit_wait(xe, batch, offset);
270 
271 	return offset;
272 }
273 
274 static u32 pxp_emit_inline_termination(struct xe_device *xe,
275 				       struct iosys_map *batch, u32 offset)
276 {
277 	/* session inline termination */
278 	emit_cmd(xe, batch, offset++, CRYPTO_KEY_EXCHANGE);
279 	emit_cmd(xe, batch, offset++, 0);
280 
281 	return offset;
282 }
283 
284 static u32 pxp_emit_session_termination(struct xe_device *xe, struct iosys_map *batch,
285 					u32 offset, u32 idx)
286 {
287 	offset = pxp_emit_session_selection(xe, batch, offset, idx);
288 	offset = pxp_emit_inline_termination(xe, batch, offset);
289 
290 	return offset;
291 }
292 
293 /**
294  * xe_pxp_submit_session_termination - submits a PXP inline termination
295  * @pxp: the xe_pxp structure
296  * @id: the session to terminate
297  *
298  * Emit an inline termination via the VCS engine to terminate a session.
299  *
300  * Returns 0 if the submission is successful, an errno value otherwise.
301  */
302 int xe_pxp_submit_session_termination(struct xe_pxp *pxp, u32 id)
303 {
304 	struct xe_sched_job *job;
305 	struct dma_fence *fence;
306 	long timeout;
307 	u32 offset = 0;
308 	u64 addr = xe_bo_ggtt_addr(pxp->vcs_exec.bo);
309 
310 	offset = pxp_emit_session_termination(pxp->xe, &pxp->vcs_exec.bo->vmap, offset, id);
311 	offset = pxp_emit_wait(pxp->xe, &pxp->vcs_exec.bo->vmap, offset);
312 	emit_cmd(pxp->xe, &pxp->vcs_exec.bo->vmap, offset, MI_BATCH_BUFFER_END);
313 
314 	job = xe_sched_job_create(pxp->vcs_exec.q, &addr);
315 	if (IS_ERR(job))
316 		return PTR_ERR(job);
317 
318 	xe_sched_job_arm(job);
319 	fence = dma_fence_get(&job->drm.s_fence->finished);
320 	xe_sched_job_push(job);
321 
322 	timeout = dma_fence_wait_timeout(fence, false, HZ);
323 
324 	dma_fence_put(fence);
325 
326 	if (!timeout)
327 		return -ETIMEDOUT;
328 	else if (timeout < 0)
329 		return timeout;
330 
331 	return 0;
332 }
333 
334 static bool
335 is_fw_err_platform_config(u32 type)
336 {
337 	switch (type) {
338 	case PXP_STATUS_ERROR_API_VERSION:
339 	case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF:
340 	case PXP_STATUS_PLATFCONFIG_KF1_BAD:
341 	case PXP_STATUS_PLATFCONFIG_FIXED_KF1_NOT_SUPPORTED:
342 		return true;
343 	default:
344 		break;
345 	}
346 	return false;
347 }
348 
349 static const char *
350 fw_err_to_string(u32 type)
351 {
352 	switch (type) {
353 	case PXP_STATUS_ERROR_API_VERSION:
354 		return "ERR_API_VERSION";
355 	case PXP_STATUS_NOT_READY:
356 		return "ERR_NOT_READY";
357 	case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF:
358 	case PXP_STATUS_PLATFCONFIG_KF1_BAD:
359 	case PXP_STATUS_PLATFCONFIG_FIXED_KF1_NOT_SUPPORTED:
360 		return "ERR_PLATFORM_CONFIG";
361 	default:
362 		break;
363 	}
364 	return NULL;
365 }
366 
367 static int pxp_pkt_submit(struct xe_exec_queue *q, u64 batch_addr)
368 {
369 	struct xe_gt *gt = q->gt;
370 	struct xe_device *xe = gt_to_xe(gt);
371 	struct xe_sched_job *job;
372 	struct dma_fence *fence;
373 	long timeout;
374 
375 	xe_assert(xe, q->hwe->engine_id == XE_HW_ENGINE_GSCCS0);
376 
377 	job = xe_sched_job_create(q, &batch_addr);
378 	if (IS_ERR(job))
379 		return PTR_ERR(job);
380 
381 	xe_sched_job_arm(job);
382 	fence = dma_fence_get(&job->drm.s_fence->finished);
383 	xe_sched_job_push(job);
384 
385 	timeout = dma_fence_wait_timeout(fence, false, HZ);
386 	dma_fence_put(fence);
387 	if (timeout < 0)
388 		return timeout;
389 	else if (!timeout)
390 		return -ETIME;
391 
392 	return 0;
393 }
394 
395 static void emit_pxp_heci_cmd(struct xe_device *xe, struct iosys_map *batch,
396 			      u64 addr_in, u32 size_in, u64 addr_out, u32 size_out)
397 {
398 	u32 len = 0;
399 
400 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, GSC_HECI_CMD_PKT);
401 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, lower_32_bits(addr_in));
402 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, upper_32_bits(addr_in));
403 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, size_in);
404 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, lower_32_bits(addr_out));
405 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, upper_32_bits(addr_out));
406 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, size_out);
407 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, 0);
408 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, MI_BATCH_BUFFER_END);
409 }
410 
411 #define GSC_PENDING_RETRY_MAXCOUNT 40
412 #define GSC_PENDING_RETRY_PAUSE_MS 50
413 static int gsccs_send_message(struct xe_pxp_gsc_client_resources *gsc_res,
414 			      void *msg_in, size_t msg_in_size,
415 			      void *msg_out, size_t msg_out_size_max)
416 {
417 	struct xe_device *xe = gsc_res->vm->xe;
418 	const size_t max_msg_size = gsc_res->inout_size - sizeof(struct intel_gsc_mtl_header);
419 	u32 wr_offset;
420 	u32 rd_offset;
421 	u32 reply_size;
422 	u32 min_reply_size = 0;
423 	int ret;
424 	int retry = GSC_PENDING_RETRY_MAXCOUNT;
425 
426 	if (msg_in_size > max_msg_size || msg_out_size_max > max_msg_size)
427 		return -ENOSPC;
428 
429 	wr_offset = xe_gsc_emit_header(xe, &gsc_res->msg_in, 0,
430 				       HECI_MEADDRESS_PXP,
431 				       gsc_res->host_session_handle,
432 				       msg_in_size);
433 
434 	/* NOTE: zero size packets are used for session-cleanups */
435 	if (msg_in && msg_in_size) {
436 		xe_map_memcpy_to(xe, &gsc_res->msg_in, wr_offset,
437 				 msg_in, msg_in_size);
438 		min_reply_size = sizeof(struct pxp_cmd_header);
439 	}
440 
441 	/* Make sure the reply header does not contain stale data */
442 	xe_gsc_poison_header(xe, &gsc_res->msg_out, 0);
443 
444 	/*
445 	 * The BO is mapped at address 0 of the PPGTT, so no need to add its
446 	 * base offset when calculating the in/out addresses.
447 	 */
448 	emit_pxp_heci_cmd(xe, &gsc_res->batch, PXP_BB_SIZE,
449 			  wr_offset + msg_in_size, PXP_BB_SIZE + gsc_res->inout_size,
450 			  wr_offset + msg_out_size_max);
451 
452 	xe_device_wmb(xe);
453 
454 	/*
455 	 * If the GSC needs to communicate with CSME to complete our request,
456 	 * it'll set the "pending" flag in the return header. In this scenario
457 	 * we're expected to wait 50ms to give some time to the proxy code to
458 	 * handle the GSC<->CSME communication and then try again. Note that,
459 	 * although in most case the 50ms window is enough, the proxy flow is
460 	 * not actually guaranteed to complete within that time period, so we
461 	 * might have to try multiple times, up to a worst case of 2 seconds,
462 	 * after which the request is considered aborted.
463 	 */
464 	do {
465 		ret = pxp_pkt_submit(gsc_res->q, 0);
466 		if (ret)
467 			break;
468 
469 		if (xe_gsc_check_and_update_pending(xe, &gsc_res->msg_in, 0,
470 						    &gsc_res->msg_out, 0)) {
471 			ret = -EAGAIN;
472 			msleep(GSC_PENDING_RETRY_PAUSE_MS);
473 		}
474 	} while (--retry && ret == -EAGAIN);
475 
476 	if (ret) {
477 		drm_err(&xe->drm, "failed to submit GSC PXP message (%pe)\n", ERR_PTR(ret));
478 		return ret;
479 	}
480 
481 	ret = xe_gsc_read_out_header(xe, &gsc_res->msg_out, 0,
482 				     min_reply_size, &rd_offset);
483 	if (ret) {
484 		drm_err(&xe->drm, "invalid GSC reply for PXP (%pe)\n", ERR_PTR(ret));
485 		return ret;
486 	}
487 
488 	if (msg_out && min_reply_size) {
489 		reply_size = xe_map_rd_field(xe, &gsc_res->msg_out, rd_offset,
490 					     struct pxp_cmd_header, buffer_len);
491 		reply_size += sizeof(struct pxp_cmd_header);
492 
493 		if (reply_size > msg_out_size_max) {
494 			drm_warn(&xe->drm, "PXP reply size overflow: %u (%zu)\n",
495 				 reply_size, msg_out_size_max);
496 			reply_size = msg_out_size_max;
497 		}
498 
499 		xe_map_memcpy_from(xe, msg_out, &gsc_res->msg_out,
500 				   rd_offset, reply_size);
501 	}
502 
503 	xe_gsc_poison_header(xe, &gsc_res->msg_in, 0);
504 
505 	return ret;
506 }
507 
508 /**
509  * xe_pxp_submit_session_init - submits a PXP GSC session initialization
510  * @gsc_res: the pxp client resources
511  * @id: the session to initialize
512  *
513  * Submit a message to the GSC FW to initialize (i.e. start) a PXP session.
514  *
515  * Returns 0 if the submission is successful, an errno value otherwise.
516  */
517 int xe_pxp_submit_session_init(struct xe_pxp_gsc_client_resources *gsc_res, u32 id)
518 {
519 	struct xe_device *xe = gsc_res->vm->xe;
520 	struct pxp43_create_arb_in msg_in = {0};
521 	struct pxp43_create_arb_out msg_out = {0};
522 	int ret;
523 
524 	msg_in.header.api_version = PXP_APIVER(4, 3);
525 	msg_in.header.command_id = PXP43_CMDID_INIT_SESSION;
526 	msg_in.header.stream_id = (FIELD_PREP(PXP43_INIT_SESSION_APPID, id) |
527 				   FIELD_PREP(PXP43_INIT_SESSION_VALID, 1) |
528 				   FIELD_PREP(PXP43_INIT_SESSION_APPTYPE, 0));
529 	msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
530 
531 	if (id == DRM_XE_PXP_HWDRM_DEFAULT_SESSION)
532 		msg_in.protection_mode = PXP43_INIT_SESSION_PROTECTION_ARB;
533 
534 	ret = gsccs_send_message(gsc_res, &msg_in, sizeof(msg_in),
535 				 &msg_out, sizeof(msg_out));
536 	if (ret) {
537 		drm_err(&xe->drm, "Failed to init PXP session %u (%pe)\n", id, ERR_PTR(ret));
538 	} else if (msg_out.header.status != 0) {
539 		ret = -EIO;
540 
541 		if (is_fw_err_platform_config(msg_out.header.status))
542 			drm_info_once(&xe->drm,
543 				      "Failed to init PXP session %u due to BIOS/SOC, s=0x%x(%s)\n",
544 				      id, msg_out.header.status,
545 				      fw_err_to_string(msg_out.header.status));
546 		else
547 			drm_dbg(&xe->drm, "Failed to init PXP session %u, s=0x%x\n",
548 				id, msg_out.header.status);
549 	}
550 
551 	return ret;
552 }
553 
554 /**
555  * xe_pxp_submit_session_invalidation - submits a PXP GSC invalidation
556  * @gsc_res: the pxp client resources
557  * @id: the session to invalidate
558  *
559  * Submit a message to the GSC FW to notify it that a session has been
560  * terminated and is therefore invalid.
561  *
562  * Returns 0 if the submission is successful, an errno value otherwise.
563  */
564 int xe_pxp_submit_session_invalidation(struct xe_pxp_gsc_client_resources *gsc_res, u32 id)
565 {
566 	struct xe_device *xe = gsc_res->vm->xe;
567 	struct pxp43_inv_stream_key_in msg_in = {0};
568 	struct pxp43_inv_stream_key_out msg_out = {0};
569 	int ret = 0;
570 
571 	/*
572 	 * Stream key invalidation reuses the same version 4.2 input/output
573 	 * command format but firmware requires 4.3 API interaction
574 	 */
575 	msg_in.header.api_version = PXP_APIVER(4, 3);
576 	msg_in.header.command_id = PXP43_CMDID_INVALIDATE_STREAM_KEY;
577 	msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
578 
579 	msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1);
580 	msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0);
581 	msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, id);
582 
583 	ret = gsccs_send_message(gsc_res, &msg_in, sizeof(msg_in),
584 				 &msg_out, sizeof(msg_out));
585 	if (ret) {
586 		drm_err(&xe->drm, "Failed to invalidate PXP stream-key %u (%pe)\n",
587 			id, ERR_PTR(ret));
588 	} else if (msg_out.header.status != 0) {
589 		ret = -EIO;
590 
591 		if (is_fw_err_platform_config(msg_out.header.status))
592 			drm_info_once(&xe->drm,
593 				      "Failed to invalidate PXP stream-key %u: BIOS/SOC 0x%08x(%s)\n",
594 				      id, msg_out.header.status,
595 				      fw_err_to_string(msg_out.header.status));
596 		else
597 			drm_dbg(&xe->drm, "Failed to invalidate stream-key %u, s=0x%08x\n",
598 				id, msg_out.header.status);
599 	}
600 
601 	return ret;
602 }
603