xref: /linux/drivers/gpu/drm/xe/xe_pxp.c (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright(c) 2024 Intel Corporation.
4  */
5 
6 #include "xe_pxp.h"
7 
8 #include <drm/drm_managed.h>
9 #include <uapi/drm/xe_drm.h>
10 
11 #include "xe_bo.h"
12 #include "xe_bo_types.h"
13 #include "xe_device_types.h"
14 #include "xe_exec_queue.h"
15 #include "xe_force_wake.h"
16 #include "xe_guc_submit.h"
17 #include "xe_gsc_proxy.h"
18 #include "xe_gt.h"
19 #include "xe_gt_types.h"
20 #include "xe_huc.h"
21 #include "xe_mmio.h"
22 #include "xe_pm.h"
23 #include "xe_pxp_submit.h"
24 #include "xe_pxp_types.h"
25 #include "xe_uc_fw.h"
26 #include "regs/xe_irq_regs.h"
27 #include "regs/xe_pxp_regs.h"
28 
29 /**
30  * DOC: PXP
31  *
32  * PXP (Protected Xe Path) allows execution and flip to display of protected
33  * (i.e. encrypted) objects. This feature is currently only supported in
34  * integrated parts.
35  */
36 
37 #define ARB_SESSION DRM_XE_PXP_HWDRM_DEFAULT_SESSION /* shorter define */
38 
39 /*
40  * A submission to GSC can take up to 250ms to complete, so use a 300ms
41  * timeout for activation where only one of those is involved. Termination
42  * additionally requires a submission to VCS and an interaction with KCR, so
43  * bump the timeout to 500ms for that.
44  */
45 #define PXP_ACTIVATION_TIMEOUT_MS 300
46 #define PXP_TERMINATION_TIMEOUT_MS 500
47 
xe_pxp_is_supported(const struct xe_device * xe)48 bool xe_pxp_is_supported(const struct xe_device *xe)
49 {
50 	return xe->info.has_pxp && IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY);
51 }
52 
xe_pxp_is_enabled(const struct xe_pxp * pxp)53 bool xe_pxp_is_enabled(const struct xe_pxp *pxp)
54 {
55 	return pxp;
56 }
57 
pxp_prerequisites_done(const struct xe_pxp * pxp)58 static bool pxp_prerequisites_done(const struct xe_pxp *pxp)
59 {
60 	struct xe_gt *gt = pxp->gt;
61 	unsigned int fw_ref;
62 	bool ready;
63 
64 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
65 
66 	/*
67 	 * If force_wake fails we could falsely report the prerequisites as not
68 	 * done even if they are; the consequence of this would be that the
69 	 * callers won't go ahead with using PXP, but if force_wake doesn't work
70 	 * the GT is very likely in a bad state so not really a problem to abort
71 	 * PXP. Therefore, we can just log the force_wake error and not escalate
72 	 * it.
73 	 */
74 	XE_WARN_ON(!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL));
75 
76 	/* PXP requires both HuC authentication via GSC and GSC proxy initialized */
77 	ready = xe_huc_is_authenticated(&gt->uc.huc, XE_HUC_AUTH_VIA_GSC) &&
78 		xe_gsc_proxy_init_done(&gt->uc.gsc);
79 
80 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
81 
82 	return ready;
83 }
84 
85 /**
86  * xe_pxp_get_readiness_status - check whether PXP is ready for userspace use
87  * @pxp: the xe_pxp pointer (can be NULL if PXP is disabled)
88  *
89  * Returns: 0 if PXP is not ready yet, 1 if it is ready, a negative errno value
90  * if PXP is not supported/enabled or if something went wrong in the
91  * initialization of the prerequisites. Note that the return values of this
92  * function follow the uapi (see drm_xe_query_pxp_status), so they can be used
93  * directly in the query ioctl.
94  */
xe_pxp_get_readiness_status(struct xe_pxp * pxp)95 int xe_pxp_get_readiness_status(struct xe_pxp *pxp)
96 {
97 	int ret = 0;
98 
99 	if (!xe_pxp_is_enabled(pxp))
100 		return -ENODEV;
101 
102 	/* if the GSC or HuC FW are in an error state, PXP will never work */
103 	if (xe_uc_fw_status_to_error(pxp->gt->uc.huc.fw.status) ||
104 	    xe_uc_fw_status_to_error(pxp->gt->uc.gsc.fw.status))
105 		return -EIO;
106 
107 	xe_pm_runtime_get(pxp->xe);
108 
109 	/* PXP requires both HuC loaded and GSC proxy initialized */
110 	if (pxp_prerequisites_done(pxp))
111 		ret = 1;
112 
113 	xe_pm_runtime_put(pxp->xe);
114 	return ret;
115 }
116 
pxp_session_is_in_play(struct xe_pxp * pxp,u32 id)117 static bool pxp_session_is_in_play(struct xe_pxp *pxp, u32 id)
118 {
119 	struct xe_gt *gt = pxp->gt;
120 
121 	return xe_mmio_read32(&gt->mmio, KCR_SIP) & BIT(id);
122 }
123 
pxp_wait_for_session_state(struct xe_pxp * pxp,u32 id,bool in_play)124 static int pxp_wait_for_session_state(struct xe_pxp *pxp, u32 id, bool in_play)
125 {
126 	struct xe_gt *gt = pxp->gt;
127 	u32 mask = BIT(id);
128 
129 	return xe_mmio_wait32(&gt->mmio, KCR_SIP, mask, in_play ? mask : 0,
130 			      250, NULL, false);
131 }
132 
133 static void pxp_invalidate_queues(struct xe_pxp *pxp);
134 
pxp_terminate_hw(struct xe_pxp * pxp)135 static int pxp_terminate_hw(struct xe_pxp *pxp)
136 {
137 	struct xe_gt *gt = pxp->gt;
138 	unsigned int fw_ref;
139 	int ret = 0;
140 
141 	drm_dbg(&pxp->xe->drm, "Terminating PXP\n");
142 
143 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
144 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
145 		ret = -EIO;
146 		goto out;
147 	}
148 
149 	/* terminate the hw session */
150 	ret = xe_pxp_submit_session_termination(pxp, ARB_SESSION);
151 	if (ret)
152 		goto out;
153 
154 	ret = pxp_wait_for_session_state(pxp, ARB_SESSION, false);
155 	if (ret)
156 		goto out;
157 
158 	/* Trigger full HW cleanup */
159 	xe_mmio_write32(&gt->mmio, KCR_GLOBAL_TERMINATE, 1);
160 
161 	/* now we can tell the GSC to clean up its own state */
162 	ret = xe_pxp_submit_session_invalidation(&pxp->gsc_res, ARB_SESSION);
163 
164 out:
165 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
166 	return ret;
167 }
168 
mark_termination_in_progress(struct xe_pxp * pxp)169 static void mark_termination_in_progress(struct xe_pxp *pxp)
170 {
171 	lockdep_assert_held(&pxp->mutex);
172 
173 	reinit_completion(&pxp->termination);
174 	pxp->status = XE_PXP_TERMINATION_IN_PROGRESS;
175 }
176 
pxp_terminate(struct xe_pxp * pxp)177 static void pxp_terminate(struct xe_pxp *pxp)
178 {
179 	int ret = 0;
180 	struct xe_device *xe = pxp->xe;
181 
182 	if (!wait_for_completion_timeout(&pxp->activation,
183 					 msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
184 		drm_err(&xe->drm, "failed to wait for PXP start before termination\n");
185 
186 	mutex_lock(&pxp->mutex);
187 
188 	if (pxp->status == XE_PXP_ACTIVE)
189 		pxp->key_instance++;
190 
191 	/*
192 	 * we'll mark the status as needing termination on resume, so no need to
193 	 * emit a termination now.
194 	 */
195 	if (pxp->status == XE_PXP_SUSPENDED) {
196 		mutex_unlock(&pxp->mutex);
197 		return;
198 	}
199 
200 	/*
201 	 * If we have a termination already in progress, we need to wait for
202 	 * it to complete before queueing another one. Once the first
203 	 * termination is completed we'll set the state back to
204 	 * NEEDS_TERMINATION and leave it to the pxp start code to issue it.
205 	 */
206 	if (pxp->status == XE_PXP_TERMINATION_IN_PROGRESS) {
207 		pxp->status = XE_PXP_NEEDS_ADDITIONAL_TERMINATION;
208 		mutex_unlock(&pxp->mutex);
209 		return;
210 	}
211 
212 	mark_termination_in_progress(pxp);
213 
214 	mutex_unlock(&pxp->mutex);
215 
216 	pxp_invalidate_queues(pxp);
217 
218 	ret = pxp_terminate_hw(pxp);
219 	if (ret) {
220 		drm_err(&xe->drm, "PXP termination failed: %pe\n", ERR_PTR(ret));
221 		mutex_lock(&pxp->mutex);
222 		pxp->status = XE_PXP_ERROR;
223 		complete_all(&pxp->termination);
224 		mutex_unlock(&pxp->mutex);
225 	}
226 }
227 
pxp_terminate_complete(struct xe_pxp * pxp)228 static void pxp_terminate_complete(struct xe_pxp *pxp)
229 {
230 	/*
231 	 * We expect PXP to be in one of 3 states when we get here:
232 	 * - XE_PXP_TERMINATION_IN_PROGRESS: a single termination event was
233 	 * requested and it is now completing, so we're ready to start.
234 	 * - XE_PXP_NEEDS_ADDITIONAL_TERMINATION: a second termination was
235 	 * requested while the first one was still being processed.
236 	 * - XE_PXP_SUSPENDED: PXP is now suspended, so we defer everything to
237 	 * when we come back on resume.
238 	 */
239 	mutex_lock(&pxp->mutex);
240 
241 	switch (pxp->status) {
242 	case XE_PXP_TERMINATION_IN_PROGRESS:
243 		pxp->status = XE_PXP_READY_TO_START;
244 		break;
245 	case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
246 		pxp->status = XE_PXP_NEEDS_TERMINATION;
247 		break;
248 	case XE_PXP_SUSPENDED:
249 		/* Nothing to do */
250 		break;
251 	default:
252 		drm_err(&pxp->xe->drm,
253 			"PXP termination complete while status was %u\n",
254 			pxp->status);
255 	}
256 
257 	complete_all(&pxp->termination);
258 
259 	mutex_unlock(&pxp->mutex);
260 }
261 
pxp_irq_work(struct work_struct * work)262 static void pxp_irq_work(struct work_struct *work)
263 {
264 	struct xe_pxp *pxp = container_of(work, typeof(*pxp), irq.work);
265 	struct xe_device *xe = pxp->xe;
266 	u32 events = 0;
267 
268 	spin_lock_irq(&xe->irq.lock);
269 	events = pxp->irq.events;
270 	pxp->irq.events = 0;
271 	spin_unlock_irq(&xe->irq.lock);
272 
273 	if (!events)
274 		return;
275 
276 	/*
277 	 * If we're processing a termination irq while suspending then don't
278 	 * bother, we're going to re-init everything on resume anyway.
279 	 */
280 	if ((events & PXP_TERMINATION_REQUEST) && !xe_pm_runtime_get_if_active(xe))
281 		return;
282 
283 	if (events & PXP_TERMINATION_REQUEST) {
284 		events &= ~PXP_TERMINATION_COMPLETE;
285 		pxp_terminate(pxp);
286 	}
287 
288 	if (events & PXP_TERMINATION_COMPLETE)
289 		pxp_terminate_complete(pxp);
290 
291 	if (events & PXP_TERMINATION_REQUEST)
292 		xe_pm_runtime_put(xe);
293 }
294 
295 /**
296  * xe_pxp_irq_handler - Handles PXP interrupts.
297  * @xe: the xe_device structure
298  * @iir: interrupt vector
299  */
xe_pxp_irq_handler(struct xe_device * xe,u16 iir)300 void xe_pxp_irq_handler(struct xe_device *xe, u16 iir)
301 {
302 	struct xe_pxp *pxp = xe->pxp;
303 
304 	if (!xe_pxp_is_enabled(pxp)) {
305 		drm_err(&xe->drm, "PXP irq 0x%x received with PXP disabled!\n", iir);
306 		return;
307 	}
308 
309 	lockdep_assert_held(&xe->irq.lock);
310 
311 	if (unlikely(!iir))
312 		return;
313 
314 	if (iir & (KCR_PXP_STATE_TERMINATED_INTERRUPT |
315 		   KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT))
316 		pxp->irq.events |= PXP_TERMINATION_REQUEST;
317 
318 	if (iir & KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT)
319 		pxp->irq.events |= PXP_TERMINATION_COMPLETE;
320 
321 	if (pxp->irq.events)
322 		queue_work(pxp->irq.wq, &pxp->irq.work);
323 }
324 
kcr_pxp_set_status(const struct xe_pxp * pxp,bool enable)325 static int kcr_pxp_set_status(const struct xe_pxp *pxp, bool enable)
326 {
327 	u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
328 		  _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
329 	unsigned int fw_ref;
330 
331 	fw_ref = xe_force_wake_get(gt_to_fw(pxp->gt), XE_FW_GT);
332 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT))
333 		return -EIO;
334 
335 	xe_mmio_write32(&pxp->gt->mmio, KCR_INIT, val);
336 	xe_force_wake_put(gt_to_fw(pxp->gt), fw_ref);
337 
338 	return 0;
339 }
340 
kcr_pxp_enable(const struct xe_pxp * pxp)341 static int kcr_pxp_enable(const struct xe_pxp *pxp)
342 {
343 	return kcr_pxp_set_status(pxp, true);
344 }
345 
kcr_pxp_disable(const struct xe_pxp * pxp)346 static int kcr_pxp_disable(const struct xe_pxp *pxp)
347 {
348 	return kcr_pxp_set_status(pxp, false);
349 }
350 
pxp_fini(void * arg)351 static void pxp_fini(void *arg)
352 {
353 	struct xe_pxp *pxp = arg;
354 
355 	destroy_workqueue(pxp->irq.wq);
356 	xe_pxp_destroy_execution_resources(pxp);
357 
358 	/* no need to explicitly disable KCR since we're going to do an FLR */
359 }
360 
361 /**
362  * xe_pxp_init - initialize PXP support
363  * @xe: the xe_device structure
364  *
365  * Initialize the HW state and allocate the objects required for PXP support.
366  * Note that some of the requirement for PXP support (GSC proxy init, HuC auth)
367  * are performed asynchronously as part of the GSC init. PXP can only be used
368  * after both this function and the async worker have completed.
369  *
370  * Returns 0 if PXP is not supported or if PXP initialization is successful,
371  * other errno value if there is an error during the init.
372  */
xe_pxp_init(struct xe_device * xe)373 int xe_pxp_init(struct xe_device *xe)
374 {
375 	struct xe_gt *gt = xe->tiles[0].media_gt;
376 	struct xe_pxp *pxp;
377 	int err;
378 
379 	if (!xe_pxp_is_supported(xe))
380 		return 0;
381 
382 	/* we only support PXP on single tile devices with a media GT */
383 	if (xe->info.tile_count > 1 || !gt)
384 		return 0;
385 
386 	/* The GSCCS is required for submissions to the GSC FW */
387 	if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
388 		return 0;
389 
390 	/* PXP requires both GSC and HuC firmwares to be available */
391 	if (!xe_uc_fw_is_loadable(&gt->uc.gsc.fw) ||
392 	    !xe_uc_fw_is_loadable(&gt->uc.huc.fw)) {
393 		drm_info(&xe->drm, "skipping PXP init due to missing FW dependencies");
394 		return 0;
395 	}
396 
397 	pxp = drmm_kzalloc(&xe->drm, sizeof(struct xe_pxp), GFP_KERNEL);
398 	if (!pxp) {
399 		err = -ENOMEM;
400 		goto out;
401 	}
402 
403 	INIT_LIST_HEAD(&pxp->queues.list);
404 	spin_lock_init(&pxp->queues.lock);
405 	INIT_WORK(&pxp->irq.work, pxp_irq_work);
406 	pxp->xe = xe;
407 	pxp->gt = gt;
408 
409 	pxp->key_instance = 1;
410 	pxp->last_suspend_key_instance = 1;
411 
412 	/*
413 	 * we'll use the completions to check if there is an action pending,
414 	 * so we start them as completed and we reinit it when an action is
415 	 * triggered.
416 	 */
417 	init_completion(&pxp->activation);
418 	init_completion(&pxp->termination);
419 	complete_all(&pxp->termination);
420 	complete_all(&pxp->activation);
421 
422 	mutex_init(&pxp->mutex);
423 
424 	pxp->irq.wq = alloc_ordered_workqueue("pxp-wq", 0);
425 	if (!pxp->irq.wq) {
426 		err = -ENOMEM;
427 		goto out_free;
428 	}
429 
430 	err = kcr_pxp_enable(pxp);
431 	if (err)
432 		goto out_wq;
433 
434 	err = xe_pxp_allocate_execution_resources(pxp);
435 	if (err)
436 		goto out_kcr_disable;
437 
438 	xe->pxp = pxp;
439 
440 	return devm_add_action_or_reset(xe->drm.dev, pxp_fini, pxp);
441 
442 out_kcr_disable:
443 	kcr_pxp_disable(pxp);
444 out_wq:
445 	destroy_workqueue(pxp->irq.wq);
446 out_free:
447 	drmm_kfree(&xe->drm, pxp);
448 out:
449 	drm_err(&xe->drm, "PXP initialization failed: %pe\n", ERR_PTR(err));
450 	return err;
451 }
452 
__pxp_start_arb_session(struct xe_pxp * pxp)453 static int __pxp_start_arb_session(struct xe_pxp *pxp)
454 {
455 	int ret;
456 	unsigned int fw_ref;
457 
458 	fw_ref = xe_force_wake_get(gt_to_fw(pxp->gt), XE_FW_GT);
459 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT))
460 		return -EIO;
461 
462 	if (pxp_session_is_in_play(pxp, ARB_SESSION)) {
463 		ret = -EEXIST;
464 		goto out_force_wake;
465 	}
466 
467 	ret = xe_pxp_submit_session_init(&pxp->gsc_res, ARB_SESSION);
468 	if (ret) {
469 		drm_err(&pxp->xe->drm, "Failed to init PXP arb session: %pe\n", ERR_PTR(ret));
470 		goto out_force_wake;
471 	}
472 
473 	ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true);
474 	if (ret) {
475 		drm_err(&pxp->xe->drm, "PXP ARB session failed to go in play%pe\n", ERR_PTR(ret));
476 		goto out_force_wake;
477 	}
478 
479 	drm_dbg(&pxp->xe->drm, "PXP ARB session is active\n");
480 
481 out_force_wake:
482 	xe_force_wake_put(gt_to_fw(pxp->gt), fw_ref);
483 	return ret;
484 }
485 
486 /**
487  * xe_pxp_exec_queue_set_type - Mark a queue as using PXP
488  * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
489  * @q: the queue to mark as using PXP
490  * @type: the type of PXP session this queue will use
491  *
492  * Returns 0 if the selected PXP type is supported, -ENODEV otherwise.
493  */
xe_pxp_exec_queue_set_type(struct xe_pxp * pxp,struct xe_exec_queue * q,u8 type)494 int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type)
495 {
496 	if (!xe_pxp_is_enabled(pxp))
497 		return -ENODEV;
498 
499 	/* we only support HWDRM sessions right now */
500 	xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
501 
502 	q->pxp.type = type;
503 
504 	return 0;
505 }
506 
__exec_queue_add(struct xe_pxp * pxp,struct xe_exec_queue * q)507 static int __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
508 {
509 	int ret = 0;
510 
511 	/*
512 	 * A queue can be added to the list only if the PXP is in active status,
513 	 * otherwise the termination might not handle it correctly.
514 	 */
515 	mutex_lock(&pxp->mutex);
516 
517 	if (pxp->status == XE_PXP_ACTIVE) {
518 		spin_lock_irq(&pxp->queues.lock);
519 		list_add_tail(&q->pxp.link, &pxp->queues.list);
520 		spin_unlock_irq(&pxp->queues.lock);
521 	} else if (pxp->status == XE_PXP_ERROR || pxp->status == XE_PXP_SUSPENDED) {
522 		ret = -EIO;
523 	} else {
524 		ret = -EBUSY; /* try again later */
525 	}
526 
527 	mutex_unlock(&pxp->mutex);
528 
529 	return ret;
530 }
531 
pxp_start(struct xe_pxp * pxp,u8 type)532 static int pxp_start(struct xe_pxp *pxp, u8 type)
533 {
534 	int ret = 0;
535 	bool restart = false;
536 
537 	if (!xe_pxp_is_enabled(pxp))
538 		return -ENODEV;
539 
540 	/* we only support HWDRM sessions right now */
541 	xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
542 
543 	/* get_readiness_status() returns 0 for in-progress and 1 for done */
544 	ret = xe_pxp_get_readiness_status(pxp);
545 	if (ret <= 0)
546 		return ret ?: -EBUSY;
547 
548 	ret = 0;
549 
550 wait_for_idle:
551 	/*
552 	 * if there is an action in progress, wait for it. We need to wait
553 	 * outside the lock because the completion is done from within the lock.
554 	 * Note that the two actions should never be pending at the same time.
555 	 */
556 	if (!wait_for_completion_timeout(&pxp->termination,
557 					 msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS)))
558 		return -ETIMEDOUT;
559 
560 	if (!wait_for_completion_timeout(&pxp->activation,
561 					 msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
562 		return -ETIMEDOUT;
563 
564 	mutex_lock(&pxp->mutex);
565 
566 	/* If PXP is not already active, turn it on */
567 	switch (pxp->status) {
568 	case XE_PXP_ERROR:
569 		ret = -EIO;
570 		goto out_unlock;
571 	case XE_PXP_ACTIVE:
572 		goto out_unlock;
573 	case XE_PXP_READY_TO_START:
574 		pxp->status = XE_PXP_START_IN_PROGRESS;
575 		reinit_completion(&pxp->activation);
576 		break;
577 	case XE_PXP_START_IN_PROGRESS:
578 		/* If a start is in progress then the completion must not be done */
579 		XE_WARN_ON(completion_done(&pxp->activation));
580 		restart = true;
581 		goto out_unlock;
582 	case XE_PXP_NEEDS_TERMINATION:
583 		mark_termination_in_progress(pxp);
584 		break;
585 	case XE_PXP_TERMINATION_IN_PROGRESS:
586 	case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
587 		/* If a termination is in progress then the completion must not be done */
588 		XE_WARN_ON(completion_done(&pxp->termination));
589 		restart = true;
590 		goto out_unlock;
591 	case XE_PXP_SUSPENDED:
592 	default:
593 		drm_err(&pxp->xe->drm, "unexpected state during PXP start: %u\n", pxp->status);
594 		ret = -EIO;
595 		goto out_unlock;
596 	}
597 
598 	mutex_unlock(&pxp->mutex);
599 
600 	if (!completion_done(&pxp->termination)) {
601 		ret = pxp_terminate_hw(pxp);
602 		if (ret) {
603 			drm_err(&pxp->xe->drm, "PXP termination failed before start\n");
604 			mutex_lock(&pxp->mutex);
605 			pxp->status = XE_PXP_ERROR;
606 
607 			goto out_unlock;
608 		}
609 
610 		goto wait_for_idle;
611 	}
612 
613 	/* All the cases except for start should have exited earlier */
614 	XE_WARN_ON(completion_done(&pxp->activation));
615 	ret = __pxp_start_arb_session(pxp);
616 
617 	mutex_lock(&pxp->mutex);
618 
619 	complete_all(&pxp->activation);
620 
621 	/*
622 	 * Any other process should wait until the state goes away from
623 	 * XE_PXP_START_IN_PROGRESS, so if the state is not that something went
624 	 * wrong. Mark the status as needing termination and try again.
625 	 */
626 	if (pxp->status != XE_PXP_START_IN_PROGRESS) {
627 		drm_err(&pxp->xe->drm, "unexpected state after PXP start: %u\n", pxp->status);
628 		pxp->status = XE_PXP_NEEDS_TERMINATION;
629 		restart = true;
630 		goto out_unlock;
631 	}
632 
633 	/* If everything went ok, update the status and add the queue to the list */
634 	if (!ret)
635 		pxp->status = XE_PXP_ACTIVE;
636 	else
637 		pxp->status = XE_PXP_ERROR;
638 
639 out_unlock:
640 	mutex_unlock(&pxp->mutex);
641 
642 	if (restart)
643 		goto wait_for_idle;
644 
645 	return ret;
646 }
647 
648 /**
649  * xe_pxp_exec_queue_add - add a queue to the PXP list
650  * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
651  * @q: the queue to add to the list
652  *
653  * If PXP is enabled and the prerequisites are done, start the PXP default
654  * session (if not already running) and add the queue to the PXP list.
655  *
656  * Returns 0 if the PXP session is running and the queue is in the list,
657  * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done,
658  * other errno value if something goes wrong during the session start.
659  */
xe_pxp_exec_queue_add(struct xe_pxp * pxp,struct xe_exec_queue * q)660 int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
661 {
662 	int ret;
663 
664 	if (!xe_pxp_is_enabled(pxp))
665 		return -ENODEV;
666 
667 	/*
668 	 * Runtime suspend kills PXP, so we take a reference to prevent it from
669 	 * happening while we have active queues that use PXP
670 	 */
671 	xe_pm_runtime_get(pxp->xe);
672 
673 start:
674 	ret = pxp_start(pxp, q->pxp.type);
675 
676 	if (!ret) {
677 		ret = __exec_queue_add(pxp, q);
678 		if (ret == -EBUSY)
679 			goto start;
680 	}
681 
682 	/*
683 	 * in the successful case the PM ref is released from
684 	 * xe_pxp_exec_queue_remove
685 	 */
686 	if (ret)
687 		xe_pm_runtime_put(pxp->xe);
688 
689 	return ret;
690 }
691 
__pxp_exec_queue_remove(struct xe_pxp * pxp,struct xe_exec_queue * q,bool lock)692 static void __pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q, bool lock)
693 {
694 	bool need_pm_put = false;
695 
696 	if (!xe_pxp_is_enabled(pxp))
697 		return;
698 
699 	if (lock)
700 		spin_lock_irq(&pxp->queues.lock);
701 
702 	if (!list_empty(&q->pxp.link)) {
703 		list_del_init(&q->pxp.link);
704 		need_pm_put = true;
705 	}
706 
707 	q->pxp.type = DRM_XE_PXP_TYPE_NONE;
708 
709 	if (lock)
710 		spin_unlock_irq(&pxp->queues.lock);
711 
712 	if (need_pm_put)
713 		xe_pm_runtime_put(pxp->xe);
714 }
715 
716 /**
717  * xe_pxp_exec_queue_remove - remove a queue from the PXP list
718  * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
719  * @q: the queue to remove from the list
720  *
721  * If PXP is enabled and the exec_queue is in the list, the queue will be
722  * removed from the list and its PM reference will be released. It is safe to
723  * call this function multiple times for the same queue.
724  */
xe_pxp_exec_queue_remove(struct xe_pxp * pxp,struct xe_exec_queue * q)725 void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q)
726 {
727 	__pxp_exec_queue_remove(pxp, q, true);
728 }
729 
pxp_invalidate_queues(struct xe_pxp * pxp)730 static void pxp_invalidate_queues(struct xe_pxp *pxp)
731 {
732 	struct xe_exec_queue *tmp, *q;
733 	LIST_HEAD(to_clean);
734 
735 	spin_lock_irq(&pxp->queues.lock);
736 
737 	list_for_each_entry_safe(q, tmp, &pxp->queues.list, pxp.link) {
738 		q = xe_exec_queue_get_unless_zero(q);
739 		if (!q)
740 			continue;
741 
742 		list_move_tail(&q->pxp.link, &to_clean);
743 	}
744 	spin_unlock_irq(&pxp->queues.lock);
745 
746 	list_for_each_entry_safe(q, tmp, &to_clean, pxp.link) {
747 		xe_exec_queue_kill(q);
748 
749 		/*
750 		 * We hold a ref to the queue so there is no risk of racing with
751 		 * the calls to exec_queue_remove coming from exec_queue_destroy.
752 		 */
753 		__pxp_exec_queue_remove(pxp, q, false);
754 
755 		xe_exec_queue_put(q);
756 	}
757 }
758 
759 /**
760  * xe_pxp_key_assign - mark a BO as using the current PXP key iteration
761  * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
762  * @bo: the BO to mark
763  *
764  * Returns: -ENODEV if PXP is disabled, 0 otherwise.
765  */
xe_pxp_key_assign(struct xe_pxp * pxp,struct xe_bo * bo)766 int xe_pxp_key_assign(struct xe_pxp *pxp, struct xe_bo *bo)
767 {
768 	if (!xe_pxp_is_enabled(pxp))
769 		return -ENODEV;
770 
771 	xe_assert(pxp->xe, !bo->pxp_key_instance);
772 
773 	/*
774 	 * Note that the PXP key handling is inherently racey, because the key
775 	 * can theoretically change at any time (although it's unlikely to do
776 	 * so without triggers), even right after we copy it. Taking a lock
777 	 * wouldn't help because the value might still change as soon as we
778 	 * release the lock.
779 	 * Userspace needs to handle the fact that their BOs can go invalid at
780 	 * any point.
781 	 */
782 	bo->pxp_key_instance = pxp->key_instance;
783 
784 	return 0;
785 }
786 
787 /**
788  * xe_pxp_bo_key_check - check if the key used by a xe_bo is valid
789  * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
790  * @bo: the BO we want to check
791  *
792  * Checks whether a BO was encrypted with the current key or an obsolete one.
793  *
794  * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the
795  * BO is not using PXP,  -ENOEXEC if the key is not valid.
796  */
xe_pxp_bo_key_check(struct xe_pxp * pxp,struct xe_bo * bo)797 int xe_pxp_bo_key_check(struct xe_pxp *pxp, struct xe_bo *bo)
798 {
799 	if (!xe_pxp_is_enabled(pxp))
800 		return -ENODEV;
801 
802 	if (!xe_bo_is_protected(bo))
803 		return -EINVAL;
804 
805 	xe_assert(pxp->xe, bo->pxp_key_instance);
806 
807 	/*
808 	 * Note that the PXP key handling is inherently racey, because the key
809 	 * can theoretically change at any time (although it's unlikely to do
810 	 * so without triggers), even right after we check it. Taking a lock
811 	 * wouldn't help because the value might still change as soon as we
812 	 * release the lock.
813 	 * We mitigate the risk by checking the key at multiple points (on each
814 	 * submission involving the BO and right before flipping it on the
815 	 * display), but there is still a very small chance that we could
816 	 * operate on an invalid BO for a single submission or a single frame
817 	 * flip. This is a compromise made to protect the encrypted data (which
818 	 * is what the key termination is for).
819 	 */
820 	if (bo->pxp_key_instance != pxp->key_instance)
821 		return -ENOEXEC;
822 
823 	return 0;
824 }
825 
826 /**
827  * xe_pxp_obj_key_check - check if the key used by a drm_gem_obj is valid
828  * @obj: the drm_gem_obj we want to check
829  *
830  * Checks whether a drm_gem_obj was encrypted with the current key or an
831  * obsolete one.
832  *
833  * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the
834  * obj is not using PXP,  -ENOEXEC if the key is not valid.
835  */
xe_pxp_obj_key_check(struct drm_gem_object * obj)836 int xe_pxp_obj_key_check(struct drm_gem_object *obj)
837 {
838 	struct xe_bo *bo = gem_to_xe_bo(obj);
839 	struct xe_device *xe = xe_bo_device(bo);
840 	struct xe_pxp *pxp = xe->pxp;
841 
842 	return xe_pxp_bo_key_check(pxp, bo);
843 }
844 
845 /**
846  * xe_pxp_pm_suspend - prepare PXP for HW suspend
847  * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
848  *
849  * Makes sure all PXP actions have completed and invalidates all PXP queues
850  * and objects before we go into a suspend state.
851  *
852  * Returns: 0 if successful, a negative errno value otherwise.
853  */
xe_pxp_pm_suspend(struct xe_pxp * pxp)854 int xe_pxp_pm_suspend(struct xe_pxp *pxp)
855 {
856 	bool needs_queue_inval = false;
857 	int ret = 0;
858 
859 	if (!xe_pxp_is_enabled(pxp))
860 		return 0;
861 
862 wait_for_activation:
863 	if (!wait_for_completion_timeout(&pxp->activation,
864 					 msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
865 		ret = -ETIMEDOUT;
866 
867 	mutex_lock(&pxp->mutex);
868 
869 	switch (pxp->status) {
870 	case XE_PXP_ERROR:
871 	case XE_PXP_READY_TO_START:
872 	case XE_PXP_SUSPENDED:
873 	case XE_PXP_TERMINATION_IN_PROGRESS:
874 	case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
875 		/*
876 		 * If PXP is not running there is nothing to cleanup. If there
877 		 * is a termination pending then no need to issue another one.
878 		 */
879 		break;
880 	case XE_PXP_START_IN_PROGRESS:
881 		mutex_unlock(&pxp->mutex);
882 		goto wait_for_activation;
883 	case XE_PXP_NEEDS_TERMINATION:
884 		/* If PXP was never used we can skip the cleanup */
885 		if (pxp->key_instance == pxp->last_suspend_key_instance)
886 			break;
887 		fallthrough;
888 	case XE_PXP_ACTIVE:
889 		pxp->key_instance++;
890 		needs_queue_inval = true;
891 		break;
892 	default:
893 		drm_err(&pxp->xe->drm, "unexpected state during PXP suspend: %u",
894 			pxp->status);
895 		ret = -EIO;
896 		goto out;
897 	}
898 
899 	/*
900 	 * We set this even if we were in error state, hoping the suspend clears
901 	 * the error. Worse case we fail again and go in error state again.
902 	 */
903 	pxp->status = XE_PXP_SUSPENDED;
904 
905 	mutex_unlock(&pxp->mutex);
906 
907 	if (needs_queue_inval)
908 		pxp_invalidate_queues(pxp);
909 
910 	/*
911 	 * if there is a termination in progress, wait for it.
912 	 * We need to wait outside the lock because the completion is done from
913 	 * within the lock
914 	 */
915 	if (!wait_for_completion_timeout(&pxp->termination,
916 					 msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS)))
917 		ret = -ETIMEDOUT;
918 
919 	pxp->last_suspend_key_instance = pxp->key_instance;
920 
921 out:
922 	return ret;
923 }
924 
925 /**
926  * xe_pxp_pm_resume - re-init PXP after HW suspend
927  * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
928  */
xe_pxp_pm_resume(struct xe_pxp * pxp)929 void xe_pxp_pm_resume(struct xe_pxp *pxp)
930 {
931 	int err;
932 
933 	if (!xe_pxp_is_enabled(pxp))
934 		return;
935 
936 	err = kcr_pxp_enable(pxp);
937 
938 	mutex_lock(&pxp->mutex);
939 
940 	xe_assert(pxp->xe, pxp->status == XE_PXP_SUSPENDED);
941 
942 	if (err)
943 		pxp->status = XE_PXP_ERROR;
944 	else
945 		pxp->status = XE_PXP_NEEDS_TERMINATION;
946 
947 	mutex_unlock(&pxp->mutex);
948 }
949