xref: /linux/drivers/gpu/drm/xe/xe_pxp.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright(c) 2024 Intel Corporation.
4  */
5 
6 #include "xe_pxp.h"
7 
8 #include <drm/drm_managed.h>
9 #include <uapi/drm/xe_drm.h>
10 
11 #include "xe_bo.h"
12 #include "xe_bo_types.h"
13 #include "xe_device_types.h"
14 #include "xe_exec_queue.h"
15 #include "xe_force_wake.h"
16 #include "xe_guc_submit.h"
17 #include "xe_gsc_proxy.h"
18 #include "xe_gt_types.h"
19 #include "xe_huc.h"
20 #include "xe_mmio.h"
21 #include "xe_pm.h"
22 #include "xe_pxp_submit.h"
23 #include "xe_pxp_types.h"
24 #include "xe_uc_fw.h"
25 #include "regs/xe_irq_regs.h"
26 #include "regs/xe_pxp_regs.h"
27 
28 /**
29  * DOC: PXP
30  *
31  * PXP (Protected Xe Path) allows execution and flip to display of protected
32  * (i.e. encrypted) objects. This feature is currently only supported in
33  * integrated parts.
34  */
35 
36 #define ARB_SESSION DRM_XE_PXP_HWDRM_DEFAULT_SESSION /* shorter define */
37 
38 /*
39  * A submission to GSC can take up to 250ms to complete, so use a 300ms
40  * timeout for activation where only one of those is involved. Termination
41  * additionally requires a submission to VCS and an interaction with KCR, so
42  * bump the timeout to 500ms for that.
43  */
44 #define PXP_ACTIVATION_TIMEOUT_MS 300
45 #define PXP_TERMINATION_TIMEOUT_MS 500
46 
47 bool xe_pxp_is_supported(const struct xe_device *xe)
48 {
49 	return xe->info.has_pxp && IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY);
50 }
51 
52 bool xe_pxp_is_enabled(const struct xe_pxp *pxp)
53 {
54 	return pxp;
55 }
56 
57 static bool pxp_prerequisites_done(const struct xe_pxp *pxp)
58 {
59 	struct xe_gt *gt = pxp->gt;
60 	bool ready;
61 
62 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
63 
64 	/*
65 	 * If force_wake fails we could falsely report the prerequisites as not
66 	 * done even if they are; the consequence of this would be that the
67 	 * callers won't go ahead with using PXP, but if force_wake doesn't work
68 	 * the GT is very likely in a bad state so not really a problem to abort
69 	 * PXP. Therefore, we can just log the force_wake error and not escalate
70 	 * it.
71 	 */
72 	XE_WARN_ON(!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL));
73 
74 	/* PXP requires both HuC authentication via GSC and GSC proxy initialized */
75 	ready = xe_huc_is_authenticated(&gt->uc.huc, XE_HUC_AUTH_VIA_GSC) &&
76 		xe_gsc_proxy_init_done(&gt->uc.gsc);
77 
78 	return ready;
79 }
80 
81 /**
82  * xe_pxp_get_readiness_status - check whether PXP is ready for userspace use
83  * @pxp: the xe_pxp pointer (can be NULL if PXP is disabled)
84  *
85  * Returns: 0 if PXP is not ready yet, 1 if it is ready, a negative errno value
86  * if PXP is not supported/enabled or if something went wrong in the
87  * initialization of the prerequisites. Note that the return values of this
88  * function follow the uapi (see drm_xe_query_pxp_status), so they can be used
89  * directly in the query ioctl.
90  */
91 int xe_pxp_get_readiness_status(struct xe_pxp *pxp)
92 {
93 	int ret = 0;
94 
95 	if (!xe_pxp_is_enabled(pxp))
96 		return -ENODEV;
97 
98 	/* if the GSC or HuC FW are in an error state, PXP will never work */
99 	if (xe_uc_fw_status_to_error(pxp->gt->uc.huc.fw.status) ||
100 	    xe_uc_fw_status_to_error(pxp->gt->uc.gsc.fw.status))
101 		return -EIO;
102 
103 	guard(xe_pm_runtime)(pxp->xe);
104 
105 	/* PXP requires both HuC loaded and GSC proxy initialized */
106 	if (pxp_prerequisites_done(pxp))
107 		ret = 1;
108 
109 	return ret;
110 }
111 
112 static bool pxp_session_is_in_play(struct xe_pxp *pxp, u32 id)
113 {
114 	struct xe_gt *gt = pxp->gt;
115 
116 	return xe_mmio_read32(&gt->mmio, KCR_SIP) & BIT(id);
117 }
118 
119 static int pxp_wait_for_session_state(struct xe_pxp *pxp, u32 id, bool in_play)
120 {
121 	struct xe_gt *gt = pxp->gt;
122 	u32 mask = BIT(id);
123 
124 	return xe_mmio_wait32(&gt->mmio, KCR_SIP, mask, in_play ? mask : 0,
125 			      250, NULL, false);
126 }
127 
128 static void pxp_invalidate_queues(struct xe_pxp *pxp);
129 
130 static int pxp_terminate_hw(struct xe_pxp *pxp)
131 {
132 	struct xe_gt *gt = pxp->gt;
133 	int ret = 0;
134 
135 	drm_dbg(&pxp->xe->drm, "Terminating PXP\n");
136 
137 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
138 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT))
139 		return -EIO;
140 
141 	/* terminate the hw session */
142 	ret = xe_pxp_submit_session_termination(pxp, ARB_SESSION);
143 	if (ret)
144 		return ret;
145 
146 	ret = pxp_wait_for_session_state(pxp, ARB_SESSION, false);
147 	if (ret)
148 		return ret;
149 
150 	/* Trigger full HW cleanup */
151 	xe_mmio_write32(&gt->mmio, KCR_GLOBAL_TERMINATE, 1);
152 
153 	/* now we can tell the GSC to clean up its own state */
154 	return xe_pxp_submit_session_invalidation(&pxp->gsc_res, ARB_SESSION);
155 }
156 
157 static void mark_termination_in_progress(struct xe_pxp *pxp)
158 {
159 	lockdep_assert_held(&pxp->mutex);
160 
161 	reinit_completion(&pxp->termination);
162 	pxp->status = XE_PXP_TERMINATION_IN_PROGRESS;
163 }
164 
165 static void pxp_terminate(struct xe_pxp *pxp)
166 {
167 	int ret = 0;
168 	struct xe_device *xe = pxp->xe;
169 
170 	if (!wait_for_completion_timeout(&pxp->activation,
171 					 msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
172 		drm_err(&xe->drm, "failed to wait for PXP start before termination\n");
173 
174 	mutex_lock(&pxp->mutex);
175 
176 	if (pxp->status == XE_PXP_ACTIVE)
177 		pxp->key_instance++;
178 
179 	/*
180 	 * we'll mark the status as needing termination on resume, so no need to
181 	 * emit a termination now.
182 	 */
183 	if (pxp->status == XE_PXP_SUSPENDED) {
184 		mutex_unlock(&pxp->mutex);
185 		return;
186 	}
187 
188 	/*
189 	 * If we have a termination already in progress, we need to wait for
190 	 * it to complete before queueing another one. Once the first
191 	 * termination is completed we'll set the state back to
192 	 * NEEDS_TERMINATION and leave it to the pxp start code to issue it.
193 	 */
194 	if (pxp->status == XE_PXP_TERMINATION_IN_PROGRESS) {
195 		pxp->status = XE_PXP_NEEDS_ADDITIONAL_TERMINATION;
196 		mutex_unlock(&pxp->mutex);
197 		return;
198 	}
199 
200 	mark_termination_in_progress(pxp);
201 
202 	mutex_unlock(&pxp->mutex);
203 
204 	pxp_invalidate_queues(pxp);
205 
206 	ret = pxp_terminate_hw(pxp);
207 	if (ret) {
208 		drm_err(&xe->drm, "PXP termination failed: %pe\n", ERR_PTR(ret));
209 		mutex_lock(&pxp->mutex);
210 		pxp->status = XE_PXP_ERROR;
211 		complete_all(&pxp->termination);
212 		mutex_unlock(&pxp->mutex);
213 	}
214 }
215 
216 static void pxp_terminate_complete(struct xe_pxp *pxp)
217 {
218 	/*
219 	 * We expect PXP to be in one of 3 states when we get here:
220 	 * - XE_PXP_TERMINATION_IN_PROGRESS: a single termination event was
221 	 * requested and it is now completing, so we're ready to start.
222 	 * - XE_PXP_NEEDS_ADDITIONAL_TERMINATION: a second termination was
223 	 * requested while the first one was still being processed.
224 	 * - XE_PXP_SUSPENDED: PXP is now suspended, so we defer everything to
225 	 * when we come back on resume.
226 	 */
227 	mutex_lock(&pxp->mutex);
228 
229 	switch (pxp->status) {
230 	case XE_PXP_TERMINATION_IN_PROGRESS:
231 		pxp->status = XE_PXP_READY_TO_START;
232 		break;
233 	case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
234 		pxp->status = XE_PXP_NEEDS_TERMINATION;
235 		break;
236 	case XE_PXP_SUSPENDED:
237 		/* Nothing to do */
238 		break;
239 	default:
240 		drm_err(&pxp->xe->drm,
241 			"PXP termination complete while status was %u\n",
242 			pxp->status);
243 	}
244 
245 	complete_all(&pxp->termination);
246 
247 	mutex_unlock(&pxp->mutex);
248 }
249 
250 static void pxp_irq_work(struct work_struct *work)
251 {
252 	struct xe_pxp *pxp = container_of(work, typeof(*pxp), irq.work);
253 	struct xe_device *xe = pxp->xe;
254 	u32 events = 0;
255 
256 	spin_lock_irq(&xe->irq.lock);
257 	events = pxp->irq.events;
258 	pxp->irq.events = 0;
259 	spin_unlock_irq(&xe->irq.lock);
260 
261 	if (!events)
262 		return;
263 
264 	/*
265 	 * If we're processing a termination irq while suspending then don't
266 	 * bother, we're going to re-init everything on resume anyway.
267 	 */
268 	if ((events & PXP_TERMINATION_REQUEST) && !xe_pm_runtime_get_if_active(xe))
269 		return;
270 
271 	if (events & PXP_TERMINATION_REQUEST) {
272 		events &= ~PXP_TERMINATION_COMPLETE;
273 		pxp_terminate(pxp);
274 	}
275 
276 	if (events & PXP_TERMINATION_COMPLETE)
277 		pxp_terminate_complete(pxp);
278 
279 	if (events & PXP_TERMINATION_REQUEST)
280 		xe_pm_runtime_put(xe);
281 }
282 
283 /**
284  * xe_pxp_irq_handler - Handles PXP interrupts.
285  * @xe: the xe_device structure
286  * @iir: interrupt vector
287  */
288 void xe_pxp_irq_handler(struct xe_device *xe, u16 iir)
289 {
290 	struct xe_pxp *pxp = xe->pxp;
291 
292 	if (!xe_pxp_is_enabled(pxp)) {
293 		drm_err(&xe->drm, "PXP irq 0x%x received with PXP disabled!\n", iir);
294 		return;
295 	}
296 
297 	lockdep_assert_held(&xe->irq.lock);
298 
299 	if (unlikely(!iir))
300 		return;
301 
302 	if (iir & (KCR_PXP_STATE_TERMINATED_INTERRUPT |
303 		   KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT))
304 		pxp->irq.events |= PXP_TERMINATION_REQUEST;
305 
306 	if (iir & KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT)
307 		pxp->irq.events |= PXP_TERMINATION_COMPLETE;
308 
309 	if (pxp->irq.events)
310 		queue_work(pxp->irq.wq, &pxp->irq.work);
311 }
312 
313 static int kcr_pxp_set_status(const struct xe_pxp *pxp, bool enable)
314 {
315 	u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
316 		  _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
317 
318 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(pxp->gt), XE_FW_GT);
319 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT))
320 		return -EIO;
321 
322 	xe_mmio_write32(&pxp->gt->mmio, KCR_INIT, val);
323 
324 	return 0;
325 }
326 
327 static int kcr_pxp_enable(const struct xe_pxp *pxp)
328 {
329 	return kcr_pxp_set_status(pxp, true);
330 }
331 
332 static int kcr_pxp_disable(const struct xe_pxp *pxp)
333 {
334 	return kcr_pxp_set_status(pxp, false);
335 }
336 
337 static void pxp_fini(void *arg)
338 {
339 	struct xe_pxp *pxp = arg;
340 
341 	destroy_workqueue(pxp->irq.wq);
342 	xe_pxp_destroy_execution_resources(pxp);
343 
344 	/* no need to explicitly disable KCR since we're going to do an FLR */
345 }
346 
347 /**
348  * xe_pxp_init - initialize PXP support
349  * @xe: the xe_device structure
350  *
351  * Initialize the HW state and allocate the objects required for PXP support.
352  * Note that some of the requirement for PXP support (GSC proxy init, HuC auth)
353  * are performed asynchronously as part of the GSC init. PXP can only be used
354  * after both this function and the async worker have completed.
355  *
356  * Returns 0 if PXP is not supported or if PXP initialization is successful,
357  * other errno value if there is an error during the init.
358  */
359 int xe_pxp_init(struct xe_device *xe)
360 {
361 	struct xe_gt *gt = xe->tiles[0].media_gt;
362 	struct xe_pxp *pxp;
363 	int err;
364 
365 	if (!xe_pxp_is_supported(xe))
366 		return 0;
367 
368 	/* we only support PXP on single tile devices with a media GT */
369 	if (xe->info.tile_count > 1 || !gt)
370 		return 0;
371 
372 	/* The GSCCS is required for submissions to the GSC FW */
373 	if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
374 		return 0;
375 
376 	/* PXP requires both GSC and HuC firmwares to be available */
377 	if (!xe_uc_fw_is_loadable(&gt->uc.gsc.fw) ||
378 	    !xe_uc_fw_is_loadable(&gt->uc.huc.fw)) {
379 		drm_info(&xe->drm, "skipping PXP init due to missing FW dependencies");
380 		return 0;
381 	}
382 
383 	pxp = drmm_kzalloc(&xe->drm, sizeof(struct xe_pxp), GFP_KERNEL);
384 	if (!pxp) {
385 		err = -ENOMEM;
386 		goto out;
387 	}
388 
389 	INIT_LIST_HEAD(&pxp->queues.list);
390 	spin_lock_init(&pxp->queues.lock);
391 	INIT_WORK(&pxp->irq.work, pxp_irq_work);
392 	pxp->xe = xe;
393 	pxp->gt = gt;
394 
395 	pxp->key_instance = 1;
396 	pxp->last_suspend_key_instance = 1;
397 
398 	/*
399 	 * we'll use the completions to check if there is an action pending,
400 	 * so we start them as completed and we reinit it when an action is
401 	 * triggered.
402 	 */
403 	init_completion(&pxp->activation);
404 	init_completion(&pxp->termination);
405 	complete_all(&pxp->termination);
406 	complete_all(&pxp->activation);
407 
408 	mutex_init(&pxp->mutex);
409 
410 	pxp->irq.wq = alloc_ordered_workqueue("pxp-wq", 0);
411 	if (!pxp->irq.wq) {
412 		err = -ENOMEM;
413 		goto out_free;
414 	}
415 
416 	err = kcr_pxp_enable(pxp);
417 	if (err)
418 		goto out_wq;
419 
420 	err = xe_pxp_allocate_execution_resources(pxp);
421 	if (err)
422 		goto out_kcr_disable;
423 
424 	xe->pxp = pxp;
425 
426 	return devm_add_action_or_reset(xe->drm.dev, pxp_fini, pxp);
427 
428 out_kcr_disable:
429 	kcr_pxp_disable(pxp);
430 out_wq:
431 	destroy_workqueue(pxp->irq.wq);
432 out_free:
433 	drmm_kfree(&xe->drm, pxp);
434 out:
435 	drm_err(&xe->drm, "PXP initialization failed: %pe\n", ERR_PTR(err));
436 	return err;
437 }
438 
439 static int __pxp_start_arb_session(struct xe_pxp *pxp)
440 {
441 	int ret;
442 
443 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(pxp->gt), XE_FW_GT);
444 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT))
445 		return -EIO;
446 
447 	if (pxp_session_is_in_play(pxp, ARB_SESSION))
448 		return -EEXIST;
449 
450 	ret = xe_pxp_submit_session_init(&pxp->gsc_res, ARB_SESSION);
451 	if (ret) {
452 		drm_err(&pxp->xe->drm, "Failed to init PXP arb session: %pe\n", ERR_PTR(ret));
453 		return ret;
454 	}
455 
456 	ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true);
457 	if (ret) {
458 		drm_err(&pxp->xe->drm, "PXP ARB session failed to go in play%pe\n", ERR_PTR(ret));
459 		return ret;
460 	}
461 
462 	drm_dbg(&pxp->xe->drm, "PXP ARB session is active\n");
463 	return 0;
464 }
465 
466 /**
467  * xe_pxp_exec_queue_set_type - Mark a queue as using PXP
468  * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
469  * @q: the queue to mark as using PXP
470  * @type: the type of PXP session this queue will use
471  *
472  * Returns 0 if the selected PXP type is supported, -ENODEV otherwise.
473  */
474 int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type)
475 {
476 	if (!xe_pxp_is_enabled(pxp))
477 		return -ENODEV;
478 
479 	/* we only support HWDRM sessions right now */
480 	xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
481 
482 	q->pxp.type = type;
483 
484 	return 0;
485 }
486 
487 static int __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
488 {
489 	int ret = 0;
490 
491 	/*
492 	 * A queue can be added to the list only if the PXP is in active status,
493 	 * otherwise the termination might not handle it correctly.
494 	 */
495 	mutex_lock(&pxp->mutex);
496 
497 	if (pxp->status == XE_PXP_ACTIVE) {
498 		spin_lock_irq(&pxp->queues.lock);
499 		list_add_tail(&q->pxp.link, &pxp->queues.list);
500 		spin_unlock_irq(&pxp->queues.lock);
501 	} else if (pxp->status == XE_PXP_ERROR || pxp->status == XE_PXP_SUSPENDED) {
502 		ret = -EIO;
503 	} else {
504 		ret = -EBUSY; /* try again later */
505 	}
506 
507 	mutex_unlock(&pxp->mutex);
508 
509 	return ret;
510 }
511 
512 static int pxp_start(struct xe_pxp *pxp, u8 type)
513 {
514 	int ret = 0;
515 	bool restart = false;
516 
517 	if (!xe_pxp_is_enabled(pxp))
518 		return -ENODEV;
519 
520 	/* we only support HWDRM sessions right now */
521 	xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
522 
523 	/* get_readiness_status() returns 0 for in-progress and 1 for done */
524 	ret = xe_pxp_get_readiness_status(pxp);
525 	if (ret <= 0)
526 		return ret ?: -EBUSY;
527 
528 	ret = 0;
529 
530 wait_for_idle:
531 	/*
532 	 * if there is an action in progress, wait for it. We need to wait
533 	 * outside the lock because the completion is done from within the lock.
534 	 * Note that the two actions should never be pending at the same time.
535 	 */
536 	if (!wait_for_completion_timeout(&pxp->termination,
537 					 msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS)))
538 		return -ETIMEDOUT;
539 
540 	if (!wait_for_completion_timeout(&pxp->activation,
541 					 msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
542 		return -ETIMEDOUT;
543 
544 	mutex_lock(&pxp->mutex);
545 
546 	/* If PXP is not already active, turn it on */
547 	switch (pxp->status) {
548 	case XE_PXP_ERROR:
549 		ret = -EIO;
550 		goto out_unlock;
551 	case XE_PXP_ACTIVE:
552 		goto out_unlock;
553 	case XE_PXP_READY_TO_START:
554 		pxp->status = XE_PXP_START_IN_PROGRESS;
555 		reinit_completion(&pxp->activation);
556 		break;
557 	case XE_PXP_START_IN_PROGRESS:
558 		/* If a start is in progress then the completion must not be done */
559 		XE_WARN_ON(completion_done(&pxp->activation));
560 		restart = true;
561 		goto out_unlock;
562 	case XE_PXP_NEEDS_TERMINATION:
563 		mark_termination_in_progress(pxp);
564 		break;
565 	case XE_PXP_TERMINATION_IN_PROGRESS:
566 	case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
567 		/* If a termination is in progress then the completion must not be done */
568 		XE_WARN_ON(completion_done(&pxp->termination));
569 		restart = true;
570 		goto out_unlock;
571 	case XE_PXP_SUSPENDED:
572 	default:
573 		drm_err(&pxp->xe->drm, "unexpected state during PXP start: %u\n", pxp->status);
574 		ret = -EIO;
575 		goto out_unlock;
576 	}
577 
578 	mutex_unlock(&pxp->mutex);
579 
580 	if (!completion_done(&pxp->termination)) {
581 		ret = pxp_terminate_hw(pxp);
582 		if (ret) {
583 			drm_err(&pxp->xe->drm, "PXP termination failed before start\n");
584 			mutex_lock(&pxp->mutex);
585 			pxp->status = XE_PXP_ERROR;
586 
587 			goto out_unlock;
588 		}
589 
590 		goto wait_for_idle;
591 	}
592 
593 	/* All the cases except for start should have exited earlier */
594 	XE_WARN_ON(completion_done(&pxp->activation));
595 	ret = __pxp_start_arb_session(pxp);
596 
597 	mutex_lock(&pxp->mutex);
598 
599 	complete_all(&pxp->activation);
600 
601 	/*
602 	 * Any other process should wait until the state goes away from
603 	 * XE_PXP_START_IN_PROGRESS, so if the state is not that something went
604 	 * wrong. Mark the status as needing termination and try again.
605 	 */
606 	if (pxp->status != XE_PXP_START_IN_PROGRESS) {
607 		drm_err(&pxp->xe->drm, "unexpected state after PXP start: %u\n", pxp->status);
608 		pxp->status = XE_PXP_NEEDS_TERMINATION;
609 		restart = true;
610 		goto out_unlock;
611 	}
612 
613 	/* If everything went ok, update the status and add the queue to the list */
614 	if (!ret)
615 		pxp->status = XE_PXP_ACTIVE;
616 	else
617 		pxp->status = XE_PXP_ERROR;
618 
619 out_unlock:
620 	mutex_unlock(&pxp->mutex);
621 
622 	if (restart)
623 		goto wait_for_idle;
624 
625 	return ret;
626 }
627 
628 /**
629  * xe_pxp_exec_queue_add - add a queue to the PXP list
630  * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
631  * @q: the queue to add to the list
632  *
633  * If PXP is enabled and the prerequisites are done, start the PXP default
634  * session (if not already running) and add the queue to the PXP list.
635  *
636  * Returns 0 if the PXP session is running and the queue is in the list,
637  * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done,
638  * other errno value if something goes wrong during the session start.
639  */
640 int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
641 {
642 	int ret;
643 
644 	if (!xe_pxp_is_enabled(pxp))
645 		return -ENODEV;
646 
647 	/*
648 	 * Runtime suspend kills PXP, so we take a reference to prevent it from
649 	 * happening while we have active queues that use PXP
650 	 */
651 	xe_pm_runtime_get(pxp->xe);
652 
653 start:
654 	ret = pxp_start(pxp, q->pxp.type);
655 
656 	if (!ret) {
657 		ret = __exec_queue_add(pxp, q);
658 		if (ret == -EBUSY)
659 			goto start;
660 	}
661 
662 	/*
663 	 * in the successful case the PM ref is released from
664 	 * xe_pxp_exec_queue_remove
665 	 */
666 	if (ret)
667 		xe_pm_runtime_put(pxp->xe);
668 
669 	return ret;
670 }
671 ALLOW_ERROR_INJECTION(xe_pxp_exec_queue_add, ERRNO);
672 
673 static void __pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q, bool lock)
674 {
675 	bool need_pm_put = false;
676 
677 	if (!xe_pxp_is_enabled(pxp))
678 		return;
679 
680 	if (lock)
681 		spin_lock_irq(&pxp->queues.lock);
682 
683 	if (!list_empty(&q->pxp.link)) {
684 		list_del_init(&q->pxp.link);
685 		need_pm_put = true;
686 	}
687 
688 	q->pxp.type = DRM_XE_PXP_TYPE_NONE;
689 
690 	if (lock)
691 		spin_unlock_irq(&pxp->queues.lock);
692 
693 	if (need_pm_put)
694 		xe_pm_runtime_put(pxp->xe);
695 }
696 
697 /**
698  * xe_pxp_exec_queue_remove - remove a queue from the PXP list
699  * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
700  * @q: the queue to remove from the list
701  *
702  * If PXP is enabled and the exec_queue is in the list, the queue will be
703  * removed from the list and its PM reference will be released. It is safe to
704  * call this function multiple times for the same queue.
705  */
706 void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q)
707 {
708 	__pxp_exec_queue_remove(pxp, q, true);
709 }
710 
711 static void pxp_invalidate_queues(struct xe_pxp *pxp)
712 {
713 	struct xe_exec_queue *tmp, *q;
714 	LIST_HEAD(to_clean);
715 
716 	spin_lock_irq(&pxp->queues.lock);
717 
718 	list_for_each_entry_safe(q, tmp, &pxp->queues.list, pxp.link) {
719 		q = xe_exec_queue_get_unless_zero(q);
720 		if (!q)
721 			continue;
722 
723 		list_move_tail(&q->pxp.link, &to_clean);
724 	}
725 	spin_unlock_irq(&pxp->queues.lock);
726 
727 	list_for_each_entry_safe(q, tmp, &to_clean, pxp.link) {
728 		xe_exec_queue_kill(q);
729 
730 		/*
731 		 * We hold a ref to the queue so there is no risk of racing with
732 		 * the calls to exec_queue_remove coming from exec_queue_destroy.
733 		 */
734 		__pxp_exec_queue_remove(pxp, q, false);
735 
736 		xe_exec_queue_put(q);
737 	}
738 }
739 
740 /**
741  * xe_pxp_key_assign - mark a BO as using the current PXP key iteration
742  * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
743  * @bo: the BO to mark
744  *
745  * Returns: -ENODEV if PXP is disabled, 0 otherwise.
746  */
747 int xe_pxp_key_assign(struct xe_pxp *pxp, struct xe_bo *bo)
748 {
749 	if (!xe_pxp_is_enabled(pxp))
750 		return -ENODEV;
751 
752 	xe_assert(pxp->xe, !bo->pxp_key_instance);
753 
754 	/*
755 	 * Note that the PXP key handling is inherently racey, because the key
756 	 * can theoretically change at any time (although it's unlikely to do
757 	 * so without triggers), even right after we copy it. Taking a lock
758 	 * wouldn't help because the value might still change as soon as we
759 	 * release the lock.
760 	 * Userspace needs to handle the fact that their BOs can go invalid at
761 	 * any point.
762 	 */
763 	bo->pxp_key_instance = pxp->key_instance;
764 
765 	return 0;
766 }
767 
768 /**
769  * xe_pxp_bo_key_check - check if the key used by a xe_bo is valid
770  * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
771  * @bo: the BO we want to check
772  *
773  * Checks whether a BO was encrypted with the current key or an obsolete one.
774  *
775  * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the
776  * BO is not using PXP,  -ENOEXEC if the key is not valid.
777  */
778 int xe_pxp_bo_key_check(struct xe_pxp *pxp, struct xe_bo *bo)
779 {
780 	if (!xe_pxp_is_enabled(pxp))
781 		return -ENODEV;
782 
783 	if (!xe_bo_is_protected(bo))
784 		return -EINVAL;
785 
786 	xe_assert(pxp->xe, bo->pxp_key_instance);
787 
788 	/*
789 	 * Note that the PXP key handling is inherently racey, because the key
790 	 * can theoretically change at any time (although it's unlikely to do
791 	 * so without triggers), even right after we check it. Taking a lock
792 	 * wouldn't help because the value might still change as soon as we
793 	 * release the lock.
794 	 * We mitigate the risk by checking the key at multiple points (on each
795 	 * submission involving the BO and right before flipping it on the
796 	 * display), but there is still a very small chance that we could
797 	 * operate on an invalid BO for a single submission or a single frame
798 	 * flip. This is a compromise made to protect the encrypted data (which
799 	 * is what the key termination is for).
800 	 */
801 	if (bo->pxp_key_instance != pxp->key_instance)
802 		return -ENOEXEC;
803 
804 	return 0;
805 }
806 
807 /**
808  * xe_pxp_obj_key_check - check if the key used by a drm_gem_obj is valid
809  * @obj: the drm_gem_obj we want to check
810  *
811  * Checks whether a drm_gem_obj was encrypted with the current key or an
812  * obsolete one.
813  *
814  * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the
815  * obj is not using PXP,  -ENOEXEC if the key is not valid.
816  */
817 int xe_pxp_obj_key_check(struct drm_gem_object *obj)
818 {
819 	struct xe_bo *bo = gem_to_xe_bo(obj);
820 	struct xe_device *xe = xe_bo_device(bo);
821 	struct xe_pxp *pxp = xe->pxp;
822 
823 	return xe_pxp_bo_key_check(pxp, bo);
824 }
825 
826 /**
827  * xe_pxp_pm_suspend - prepare PXP for HW suspend
828  * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
829  *
830  * Makes sure all PXP actions have completed and invalidates all PXP queues
831  * and objects before we go into a suspend state.
832  *
833  * Returns: 0 if successful, a negative errno value otherwise.
834  */
835 int xe_pxp_pm_suspend(struct xe_pxp *pxp)
836 {
837 	bool needs_queue_inval = false;
838 	int ret = 0;
839 
840 	if (!xe_pxp_is_enabled(pxp))
841 		return 0;
842 
843 wait_for_activation:
844 	if (!wait_for_completion_timeout(&pxp->activation,
845 					 msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
846 		ret = -ETIMEDOUT;
847 
848 	mutex_lock(&pxp->mutex);
849 
850 	switch (pxp->status) {
851 	case XE_PXP_ERROR:
852 	case XE_PXP_READY_TO_START:
853 	case XE_PXP_SUSPENDED:
854 	case XE_PXP_TERMINATION_IN_PROGRESS:
855 	case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
856 		/*
857 		 * If PXP is not running there is nothing to cleanup. If there
858 		 * is a termination pending then no need to issue another one.
859 		 */
860 		break;
861 	case XE_PXP_START_IN_PROGRESS:
862 		mutex_unlock(&pxp->mutex);
863 		goto wait_for_activation;
864 	case XE_PXP_NEEDS_TERMINATION:
865 		/* If PXP was never used we can skip the cleanup */
866 		if (pxp->key_instance == pxp->last_suspend_key_instance)
867 			break;
868 		fallthrough;
869 	case XE_PXP_ACTIVE:
870 		pxp->key_instance++;
871 		needs_queue_inval = true;
872 		break;
873 	default:
874 		drm_err(&pxp->xe->drm, "unexpected state during PXP suspend: %u",
875 			pxp->status);
876 		ret = -EIO;
877 		goto out;
878 	}
879 
880 	/*
881 	 * We set this even if we were in error state, hoping the suspend clears
882 	 * the error. Worse case we fail again and go in error state again.
883 	 */
884 	pxp->status = XE_PXP_SUSPENDED;
885 
886 	mutex_unlock(&pxp->mutex);
887 
888 	if (needs_queue_inval)
889 		pxp_invalidate_queues(pxp);
890 
891 	/*
892 	 * if there is a termination in progress, wait for it.
893 	 * We need to wait outside the lock because the completion is done from
894 	 * within the lock
895 	 */
896 	if (!wait_for_completion_timeout(&pxp->termination,
897 					 msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS)))
898 		ret = -ETIMEDOUT;
899 
900 	pxp->last_suspend_key_instance = pxp->key_instance;
901 
902 out:
903 	return ret;
904 }
905 
906 /**
907  * xe_pxp_pm_resume - re-init PXP after HW suspend
908  * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
909  */
910 void xe_pxp_pm_resume(struct xe_pxp *pxp)
911 {
912 	int err;
913 
914 	if (!xe_pxp_is_enabled(pxp))
915 		return;
916 
917 	err = kcr_pxp_enable(pxp);
918 
919 	mutex_lock(&pxp->mutex);
920 
921 	xe_assert(pxp->xe, pxp->status == XE_PXP_SUSPENDED);
922 
923 	if (err)
924 		pxp->status = XE_PXP_ERROR;
925 	else
926 		pxp->status = XE_PXP_NEEDS_TERMINATION;
927 
928 	mutex_unlock(&pxp->mutex);
929 }
930