1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright(c) 2024 Intel Corporation.
4 */
5
6 #include "xe_pxp.h"
7
8 #include <drm/drm_managed.h>
9 #include <uapi/drm/xe_drm.h>
10
11 #include "xe_bo.h"
12 #include "xe_bo_types.h"
13 #include "xe_device_types.h"
14 #include "xe_exec_queue.h"
15 #include "xe_force_wake.h"
16 #include "xe_guc_submit.h"
17 #include "xe_gsc_proxy.h"
18 #include "xe_gt.h"
19 #include "xe_gt_types.h"
20 #include "xe_huc.h"
21 #include "xe_mmio.h"
22 #include "xe_pm.h"
23 #include "xe_pxp_submit.h"
24 #include "xe_pxp_types.h"
25 #include "xe_uc_fw.h"
26 #include "regs/xe_irq_regs.h"
27 #include "regs/xe_pxp_regs.h"
28
29 /**
30 * DOC: PXP
31 *
32 * PXP (Protected Xe Path) allows execution and flip to display of protected
33 * (i.e. encrypted) objects. This feature is currently only supported in
34 * integrated parts.
35 */
36
37 #define ARB_SESSION DRM_XE_PXP_HWDRM_DEFAULT_SESSION /* shorter define */
38
39 /*
40 * A submission to GSC can take up to 250ms to complete, so use a 300ms
41 * timeout for activation where only one of those is involved. Termination
42 * additionally requires a submission to VCS and an interaction with KCR, so
43 * bump the timeout to 500ms for that.
44 */
45 #define PXP_ACTIVATION_TIMEOUT_MS 300
46 #define PXP_TERMINATION_TIMEOUT_MS 500
47
xe_pxp_is_supported(const struct xe_device * xe)48 bool xe_pxp_is_supported(const struct xe_device *xe)
49 {
50 return xe->info.has_pxp && IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY);
51 }
52
xe_pxp_is_enabled(const struct xe_pxp * pxp)53 bool xe_pxp_is_enabled(const struct xe_pxp *pxp)
54 {
55 return pxp;
56 }
57
pxp_prerequisites_done(const struct xe_pxp * pxp)58 static bool pxp_prerequisites_done(const struct xe_pxp *pxp)
59 {
60 struct xe_gt *gt = pxp->gt;
61 unsigned int fw_ref;
62 bool ready;
63
64 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
65
66 /*
67 * If force_wake fails we could falsely report the prerequisites as not
68 * done even if they are; the consequence of this would be that the
69 * callers won't go ahead with using PXP, but if force_wake doesn't work
70 * the GT is very likely in a bad state so not really a problem to abort
71 * PXP. Therefore, we can just log the force_wake error and not escalate
72 * it.
73 */
74 XE_WARN_ON(!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL));
75
76 /* PXP requires both HuC authentication via GSC and GSC proxy initialized */
77 ready = xe_huc_is_authenticated(>->uc.huc, XE_HUC_AUTH_VIA_GSC) &&
78 xe_gsc_proxy_init_done(>->uc.gsc);
79
80 xe_force_wake_put(gt_to_fw(gt), fw_ref);
81
82 return ready;
83 }
84
85 /**
86 * xe_pxp_get_readiness_status - check whether PXP is ready for userspace use
87 * @pxp: the xe_pxp pointer (can be NULL if PXP is disabled)
88 *
89 * Returns: 0 if PXP is not ready yet, 1 if it is ready, a negative errno value
90 * if PXP is not supported/enabled or if something went wrong in the
91 * initialization of the prerequisites. Note that the return values of this
92 * function follow the uapi (see drm_xe_query_pxp_status), so they can be used
93 * directly in the query ioctl.
94 */
xe_pxp_get_readiness_status(struct xe_pxp * pxp)95 int xe_pxp_get_readiness_status(struct xe_pxp *pxp)
96 {
97 int ret = 0;
98
99 if (!xe_pxp_is_enabled(pxp))
100 return -ENODEV;
101
102 /* if the GSC or HuC FW are in an error state, PXP will never work */
103 if (xe_uc_fw_status_to_error(pxp->gt->uc.huc.fw.status) ||
104 xe_uc_fw_status_to_error(pxp->gt->uc.gsc.fw.status))
105 return -EIO;
106
107 xe_pm_runtime_get(pxp->xe);
108
109 /* PXP requires both HuC loaded and GSC proxy initialized */
110 if (pxp_prerequisites_done(pxp))
111 ret = 1;
112
113 xe_pm_runtime_put(pxp->xe);
114 return ret;
115 }
116
pxp_session_is_in_play(struct xe_pxp * pxp,u32 id)117 static bool pxp_session_is_in_play(struct xe_pxp *pxp, u32 id)
118 {
119 struct xe_gt *gt = pxp->gt;
120
121 return xe_mmio_read32(>->mmio, KCR_SIP) & BIT(id);
122 }
123
pxp_wait_for_session_state(struct xe_pxp * pxp,u32 id,bool in_play)124 static int pxp_wait_for_session_state(struct xe_pxp *pxp, u32 id, bool in_play)
125 {
126 struct xe_gt *gt = pxp->gt;
127 u32 mask = BIT(id);
128
129 return xe_mmio_wait32(>->mmio, KCR_SIP, mask, in_play ? mask : 0,
130 250, NULL, false);
131 }
132
133 static void pxp_invalidate_queues(struct xe_pxp *pxp);
134
pxp_terminate_hw(struct xe_pxp * pxp)135 static int pxp_terminate_hw(struct xe_pxp *pxp)
136 {
137 struct xe_gt *gt = pxp->gt;
138 unsigned int fw_ref;
139 int ret = 0;
140
141 drm_dbg(&pxp->xe->drm, "Terminating PXP\n");
142
143 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
144 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
145 ret = -EIO;
146 goto out;
147 }
148
149 /* terminate the hw session */
150 ret = xe_pxp_submit_session_termination(pxp, ARB_SESSION);
151 if (ret)
152 goto out;
153
154 ret = pxp_wait_for_session_state(pxp, ARB_SESSION, false);
155 if (ret)
156 goto out;
157
158 /* Trigger full HW cleanup */
159 xe_mmio_write32(>->mmio, KCR_GLOBAL_TERMINATE, 1);
160
161 /* now we can tell the GSC to clean up its own state */
162 ret = xe_pxp_submit_session_invalidation(&pxp->gsc_res, ARB_SESSION);
163
164 out:
165 xe_force_wake_put(gt_to_fw(gt), fw_ref);
166 return ret;
167 }
168
mark_termination_in_progress(struct xe_pxp * pxp)169 static void mark_termination_in_progress(struct xe_pxp *pxp)
170 {
171 lockdep_assert_held(&pxp->mutex);
172
173 reinit_completion(&pxp->termination);
174 pxp->status = XE_PXP_TERMINATION_IN_PROGRESS;
175 }
176
pxp_terminate(struct xe_pxp * pxp)177 static void pxp_terminate(struct xe_pxp *pxp)
178 {
179 int ret = 0;
180 struct xe_device *xe = pxp->xe;
181
182 if (!wait_for_completion_timeout(&pxp->activation,
183 msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
184 drm_err(&xe->drm, "failed to wait for PXP start before termination\n");
185
186 mutex_lock(&pxp->mutex);
187
188 if (pxp->status == XE_PXP_ACTIVE)
189 pxp->key_instance++;
190
191 /*
192 * we'll mark the status as needing termination on resume, so no need to
193 * emit a termination now.
194 */
195 if (pxp->status == XE_PXP_SUSPENDED) {
196 mutex_unlock(&pxp->mutex);
197 return;
198 }
199
200 /*
201 * If we have a termination already in progress, we need to wait for
202 * it to complete before queueing another one. Once the first
203 * termination is completed we'll set the state back to
204 * NEEDS_TERMINATION and leave it to the pxp start code to issue it.
205 */
206 if (pxp->status == XE_PXP_TERMINATION_IN_PROGRESS) {
207 pxp->status = XE_PXP_NEEDS_ADDITIONAL_TERMINATION;
208 mutex_unlock(&pxp->mutex);
209 return;
210 }
211
212 mark_termination_in_progress(pxp);
213
214 mutex_unlock(&pxp->mutex);
215
216 pxp_invalidate_queues(pxp);
217
218 ret = pxp_terminate_hw(pxp);
219 if (ret) {
220 drm_err(&xe->drm, "PXP termination failed: %pe\n", ERR_PTR(ret));
221 mutex_lock(&pxp->mutex);
222 pxp->status = XE_PXP_ERROR;
223 complete_all(&pxp->termination);
224 mutex_unlock(&pxp->mutex);
225 }
226 }
227
pxp_terminate_complete(struct xe_pxp * pxp)228 static void pxp_terminate_complete(struct xe_pxp *pxp)
229 {
230 /*
231 * We expect PXP to be in one of 3 states when we get here:
232 * - XE_PXP_TERMINATION_IN_PROGRESS: a single termination event was
233 * requested and it is now completing, so we're ready to start.
234 * - XE_PXP_NEEDS_ADDITIONAL_TERMINATION: a second termination was
235 * requested while the first one was still being processed.
236 * - XE_PXP_SUSPENDED: PXP is now suspended, so we defer everything to
237 * when we come back on resume.
238 */
239 mutex_lock(&pxp->mutex);
240
241 switch (pxp->status) {
242 case XE_PXP_TERMINATION_IN_PROGRESS:
243 pxp->status = XE_PXP_READY_TO_START;
244 break;
245 case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
246 pxp->status = XE_PXP_NEEDS_TERMINATION;
247 break;
248 case XE_PXP_SUSPENDED:
249 /* Nothing to do */
250 break;
251 default:
252 drm_err(&pxp->xe->drm,
253 "PXP termination complete while status was %u\n",
254 pxp->status);
255 }
256
257 complete_all(&pxp->termination);
258
259 mutex_unlock(&pxp->mutex);
260 }
261
pxp_irq_work(struct work_struct * work)262 static void pxp_irq_work(struct work_struct *work)
263 {
264 struct xe_pxp *pxp = container_of(work, typeof(*pxp), irq.work);
265 struct xe_device *xe = pxp->xe;
266 u32 events = 0;
267
268 spin_lock_irq(&xe->irq.lock);
269 events = pxp->irq.events;
270 pxp->irq.events = 0;
271 spin_unlock_irq(&xe->irq.lock);
272
273 if (!events)
274 return;
275
276 /*
277 * If we're processing a termination irq while suspending then don't
278 * bother, we're going to re-init everything on resume anyway.
279 */
280 if ((events & PXP_TERMINATION_REQUEST) && !xe_pm_runtime_get_if_active(xe))
281 return;
282
283 if (events & PXP_TERMINATION_REQUEST) {
284 events &= ~PXP_TERMINATION_COMPLETE;
285 pxp_terminate(pxp);
286 }
287
288 if (events & PXP_TERMINATION_COMPLETE)
289 pxp_terminate_complete(pxp);
290
291 if (events & PXP_TERMINATION_REQUEST)
292 xe_pm_runtime_put(xe);
293 }
294
295 /**
296 * xe_pxp_irq_handler - Handles PXP interrupts.
297 * @xe: the xe_device structure
298 * @iir: interrupt vector
299 */
xe_pxp_irq_handler(struct xe_device * xe,u16 iir)300 void xe_pxp_irq_handler(struct xe_device *xe, u16 iir)
301 {
302 struct xe_pxp *pxp = xe->pxp;
303
304 if (!xe_pxp_is_enabled(pxp)) {
305 drm_err(&xe->drm, "PXP irq 0x%x received with PXP disabled!\n", iir);
306 return;
307 }
308
309 lockdep_assert_held(&xe->irq.lock);
310
311 if (unlikely(!iir))
312 return;
313
314 if (iir & (KCR_PXP_STATE_TERMINATED_INTERRUPT |
315 KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT))
316 pxp->irq.events |= PXP_TERMINATION_REQUEST;
317
318 if (iir & KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT)
319 pxp->irq.events |= PXP_TERMINATION_COMPLETE;
320
321 if (pxp->irq.events)
322 queue_work(pxp->irq.wq, &pxp->irq.work);
323 }
324
kcr_pxp_set_status(const struct xe_pxp * pxp,bool enable)325 static int kcr_pxp_set_status(const struct xe_pxp *pxp, bool enable)
326 {
327 u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
328 _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
329 unsigned int fw_ref;
330
331 fw_ref = xe_force_wake_get(gt_to_fw(pxp->gt), XE_FW_GT);
332 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT))
333 return -EIO;
334
335 xe_mmio_write32(&pxp->gt->mmio, KCR_INIT, val);
336 xe_force_wake_put(gt_to_fw(pxp->gt), fw_ref);
337
338 return 0;
339 }
340
kcr_pxp_enable(const struct xe_pxp * pxp)341 static int kcr_pxp_enable(const struct xe_pxp *pxp)
342 {
343 return kcr_pxp_set_status(pxp, true);
344 }
345
kcr_pxp_disable(const struct xe_pxp * pxp)346 static int kcr_pxp_disable(const struct xe_pxp *pxp)
347 {
348 return kcr_pxp_set_status(pxp, false);
349 }
350
pxp_fini(void * arg)351 static void pxp_fini(void *arg)
352 {
353 struct xe_pxp *pxp = arg;
354
355 destroy_workqueue(pxp->irq.wq);
356 xe_pxp_destroy_execution_resources(pxp);
357
358 /* no need to explicitly disable KCR since we're going to do an FLR */
359 }
360
361 /**
362 * xe_pxp_init - initialize PXP support
363 * @xe: the xe_device structure
364 *
365 * Initialize the HW state and allocate the objects required for PXP support.
366 * Note that some of the requirement for PXP support (GSC proxy init, HuC auth)
367 * are performed asynchronously as part of the GSC init. PXP can only be used
368 * after both this function and the async worker have completed.
369 *
370 * Returns 0 if PXP is not supported or if PXP initialization is successful,
371 * other errno value if there is an error during the init.
372 */
xe_pxp_init(struct xe_device * xe)373 int xe_pxp_init(struct xe_device *xe)
374 {
375 struct xe_gt *gt = xe->tiles[0].media_gt;
376 struct xe_pxp *pxp;
377 int err;
378
379 if (!xe_pxp_is_supported(xe))
380 return 0;
381
382 /* we only support PXP on single tile devices with a media GT */
383 if (xe->info.tile_count > 1 || !gt)
384 return 0;
385
386 /* The GSCCS is required for submissions to the GSC FW */
387 if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
388 return 0;
389
390 /* PXP requires both GSC and HuC firmwares to be available */
391 if (!xe_uc_fw_is_loadable(>->uc.gsc.fw) ||
392 !xe_uc_fw_is_loadable(>->uc.huc.fw)) {
393 drm_info(&xe->drm, "skipping PXP init due to missing FW dependencies");
394 return 0;
395 }
396
397 pxp = drmm_kzalloc(&xe->drm, sizeof(struct xe_pxp), GFP_KERNEL);
398 if (!pxp) {
399 err = -ENOMEM;
400 goto out;
401 }
402
403 INIT_LIST_HEAD(&pxp->queues.list);
404 spin_lock_init(&pxp->queues.lock);
405 INIT_WORK(&pxp->irq.work, pxp_irq_work);
406 pxp->xe = xe;
407 pxp->gt = gt;
408
409 pxp->key_instance = 1;
410 pxp->last_suspend_key_instance = 1;
411
412 /*
413 * we'll use the completions to check if there is an action pending,
414 * so we start them as completed and we reinit it when an action is
415 * triggered.
416 */
417 init_completion(&pxp->activation);
418 init_completion(&pxp->termination);
419 complete_all(&pxp->termination);
420 complete_all(&pxp->activation);
421
422 mutex_init(&pxp->mutex);
423
424 pxp->irq.wq = alloc_ordered_workqueue("pxp-wq", 0);
425 if (!pxp->irq.wq) {
426 err = -ENOMEM;
427 goto out_free;
428 }
429
430 err = kcr_pxp_enable(pxp);
431 if (err)
432 goto out_wq;
433
434 err = xe_pxp_allocate_execution_resources(pxp);
435 if (err)
436 goto out_kcr_disable;
437
438 xe->pxp = pxp;
439
440 return devm_add_action_or_reset(xe->drm.dev, pxp_fini, pxp);
441
442 out_kcr_disable:
443 kcr_pxp_disable(pxp);
444 out_wq:
445 destroy_workqueue(pxp->irq.wq);
446 out_free:
447 drmm_kfree(&xe->drm, pxp);
448 out:
449 drm_err(&xe->drm, "PXP initialization failed: %pe\n", ERR_PTR(err));
450 return err;
451 }
452
__pxp_start_arb_session(struct xe_pxp * pxp)453 static int __pxp_start_arb_session(struct xe_pxp *pxp)
454 {
455 int ret;
456 unsigned int fw_ref;
457
458 fw_ref = xe_force_wake_get(gt_to_fw(pxp->gt), XE_FW_GT);
459 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT))
460 return -EIO;
461
462 if (pxp_session_is_in_play(pxp, ARB_SESSION)) {
463 ret = -EEXIST;
464 goto out_force_wake;
465 }
466
467 ret = xe_pxp_submit_session_init(&pxp->gsc_res, ARB_SESSION);
468 if (ret) {
469 drm_err(&pxp->xe->drm, "Failed to init PXP arb session: %pe\n", ERR_PTR(ret));
470 goto out_force_wake;
471 }
472
473 ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true);
474 if (ret) {
475 drm_err(&pxp->xe->drm, "PXP ARB session failed to go in play%pe\n", ERR_PTR(ret));
476 goto out_force_wake;
477 }
478
479 drm_dbg(&pxp->xe->drm, "PXP ARB session is active\n");
480
481 out_force_wake:
482 xe_force_wake_put(gt_to_fw(pxp->gt), fw_ref);
483 return ret;
484 }
485
486 /**
487 * xe_pxp_exec_queue_set_type - Mark a queue as using PXP
488 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
489 * @q: the queue to mark as using PXP
490 * @type: the type of PXP session this queue will use
491 *
492 * Returns 0 if the selected PXP type is supported, -ENODEV otherwise.
493 */
xe_pxp_exec_queue_set_type(struct xe_pxp * pxp,struct xe_exec_queue * q,u8 type)494 int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type)
495 {
496 if (!xe_pxp_is_enabled(pxp))
497 return -ENODEV;
498
499 /* we only support HWDRM sessions right now */
500 xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
501
502 q->pxp.type = type;
503
504 return 0;
505 }
506
__exec_queue_add(struct xe_pxp * pxp,struct xe_exec_queue * q)507 static void __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
508 {
509 spin_lock_irq(&pxp->queues.lock);
510 list_add_tail(&q->pxp.link, &pxp->queues.list);
511 spin_unlock_irq(&pxp->queues.lock);
512 }
513
514 /**
515 * xe_pxp_exec_queue_add - add a queue to the PXP list
516 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
517 * @q: the queue to add to the list
518 *
519 * If PXP is enabled and the prerequisites are done, start the PXP ARB
520 * session (if not already running) and add the queue to the PXP list. Note
521 * that the queue must have previously been marked as using PXP with
522 * xe_pxp_exec_queue_set_type.
523 *
524 * Returns 0 if the PXP ARB session is running and the queue is in the list,
525 * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done,
526 * other errno value if something goes wrong during the session start.
527 */
xe_pxp_exec_queue_add(struct xe_pxp * pxp,struct xe_exec_queue * q)528 int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
529 {
530 int ret = 0;
531
532 if (!xe_pxp_is_enabled(pxp))
533 return -ENODEV;
534
535 /* we only support HWDRM sessions right now */
536 xe_assert(pxp->xe, q->pxp.type == DRM_XE_PXP_TYPE_HWDRM);
537
538 /*
539 * Runtime suspend kills PXP, so we take a reference to prevent it from
540 * happening while we have active queues that use PXP
541 */
542 xe_pm_runtime_get(pxp->xe);
543
544 /* get_readiness_status() returns 0 for in-progress and 1 for done */
545 ret = xe_pxp_get_readiness_status(pxp);
546 if (ret <= 0) {
547 if (!ret)
548 ret = -EBUSY;
549 goto out;
550 }
551 ret = 0;
552
553 wait_for_idle:
554 /*
555 * if there is an action in progress, wait for it. We need to wait
556 * outside the lock because the completion is done from within the lock.
557 * Note that the two action should never be pending at the same time.
558 */
559 if (!wait_for_completion_timeout(&pxp->termination,
560 msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS))) {
561 ret = -ETIMEDOUT;
562 goto out;
563 }
564
565 if (!wait_for_completion_timeout(&pxp->activation,
566 msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS))) {
567 ret = -ETIMEDOUT;
568 goto out;
569 }
570
571 mutex_lock(&pxp->mutex);
572
573 /* If PXP is not already active, turn it on */
574 switch (pxp->status) {
575 case XE_PXP_ERROR:
576 ret = -EIO;
577 break;
578 case XE_PXP_ACTIVE:
579 __exec_queue_add(pxp, q);
580 mutex_unlock(&pxp->mutex);
581 goto out;
582 case XE_PXP_READY_TO_START:
583 pxp->status = XE_PXP_START_IN_PROGRESS;
584 reinit_completion(&pxp->activation);
585 break;
586 case XE_PXP_START_IN_PROGRESS:
587 /* If a start is in progress then the completion must not be done */
588 XE_WARN_ON(completion_done(&pxp->activation));
589 mutex_unlock(&pxp->mutex);
590 goto wait_for_idle;
591 case XE_PXP_NEEDS_TERMINATION:
592 mark_termination_in_progress(pxp);
593 break;
594 case XE_PXP_TERMINATION_IN_PROGRESS:
595 case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
596 /* If a termination is in progress then the completion must not be done */
597 XE_WARN_ON(completion_done(&pxp->termination));
598 mutex_unlock(&pxp->mutex);
599 goto wait_for_idle;
600 case XE_PXP_SUSPENDED:
601 default:
602 drm_err(&pxp->xe->drm, "unexpected state during PXP start: %u\n", pxp->status);
603 ret = -EIO;
604 break;
605 }
606
607 mutex_unlock(&pxp->mutex);
608
609 if (ret)
610 goto out;
611
612 if (!completion_done(&pxp->termination)) {
613 ret = pxp_terminate_hw(pxp);
614 if (ret) {
615 drm_err(&pxp->xe->drm, "PXP termination failed before start\n");
616 mutex_lock(&pxp->mutex);
617 pxp->status = XE_PXP_ERROR;
618 mutex_unlock(&pxp->mutex);
619
620 goto out;
621 }
622
623 goto wait_for_idle;
624 }
625
626 /* All the cases except for start should have exited earlier */
627 XE_WARN_ON(completion_done(&pxp->activation));
628 ret = __pxp_start_arb_session(pxp);
629
630 mutex_lock(&pxp->mutex);
631
632 complete_all(&pxp->activation);
633
634 /*
635 * Any other process should wait until the state goes away from
636 * XE_PXP_START_IN_PROGRESS, so if the state is not that something went
637 * wrong. Mark the status as needing termination and try again.
638 */
639 if (pxp->status != XE_PXP_START_IN_PROGRESS) {
640 drm_err(&pxp->xe->drm, "unexpected state after PXP start: %u\n", pxp->status);
641 pxp->status = XE_PXP_NEEDS_TERMINATION;
642 mutex_unlock(&pxp->mutex);
643 goto wait_for_idle;
644 }
645
646 /* If everything went ok, update the status and add the queue to the list */
647 if (!ret) {
648 pxp->status = XE_PXP_ACTIVE;
649 __exec_queue_add(pxp, q);
650 } else {
651 pxp->status = XE_PXP_ERROR;
652 }
653
654 mutex_unlock(&pxp->mutex);
655
656 out:
657 /*
658 * in the successful case the PM ref is released from
659 * xe_pxp_exec_queue_remove
660 */
661 if (ret)
662 xe_pm_runtime_put(pxp->xe);
663
664 return ret;
665 }
666
__pxp_exec_queue_remove(struct xe_pxp * pxp,struct xe_exec_queue * q,bool lock)667 static void __pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q, bool lock)
668 {
669 bool need_pm_put = false;
670
671 if (!xe_pxp_is_enabled(pxp))
672 return;
673
674 if (lock)
675 spin_lock_irq(&pxp->queues.lock);
676
677 if (!list_empty(&q->pxp.link)) {
678 list_del_init(&q->pxp.link);
679 need_pm_put = true;
680 }
681
682 q->pxp.type = DRM_XE_PXP_TYPE_NONE;
683
684 if (lock)
685 spin_unlock_irq(&pxp->queues.lock);
686
687 if (need_pm_put)
688 xe_pm_runtime_put(pxp->xe);
689 }
690
691 /**
692 * xe_pxp_exec_queue_remove - remove a queue from the PXP list
693 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
694 * @q: the queue to remove from the list
695 *
696 * If PXP is enabled and the exec_queue is in the list, the queue will be
697 * removed from the list and its PM reference will be released. It is safe to
698 * call this function multiple times for the same queue.
699 */
xe_pxp_exec_queue_remove(struct xe_pxp * pxp,struct xe_exec_queue * q)700 void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q)
701 {
702 __pxp_exec_queue_remove(pxp, q, true);
703 }
704
pxp_invalidate_queues(struct xe_pxp * pxp)705 static void pxp_invalidate_queues(struct xe_pxp *pxp)
706 {
707 struct xe_exec_queue *tmp, *q;
708 LIST_HEAD(to_clean);
709
710 spin_lock_irq(&pxp->queues.lock);
711
712 list_for_each_entry_safe(q, tmp, &pxp->queues.list, pxp.link) {
713 q = xe_exec_queue_get_unless_zero(q);
714 if (!q)
715 continue;
716
717 list_move_tail(&q->pxp.link, &to_clean);
718 }
719 spin_unlock_irq(&pxp->queues.lock);
720
721 list_for_each_entry_safe(q, tmp, &to_clean, pxp.link) {
722 xe_exec_queue_kill(q);
723
724 /*
725 * We hold a ref to the queue so there is no risk of racing with
726 * the calls to exec_queue_remove coming from exec_queue_destroy.
727 */
728 __pxp_exec_queue_remove(pxp, q, false);
729
730 xe_exec_queue_put(q);
731 }
732 }
733
734 /**
735 * xe_pxp_key_assign - mark a BO as using the current PXP key iteration
736 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
737 * @bo: the BO to mark
738 *
739 * Returns: -ENODEV if PXP is disabled, 0 otherwise.
740 */
xe_pxp_key_assign(struct xe_pxp * pxp,struct xe_bo * bo)741 int xe_pxp_key_assign(struct xe_pxp *pxp, struct xe_bo *bo)
742 {
743 if (!xe_pxp_is_enabled(pxp))
744 return -ENODEV;
745
746 xe_assert(pxp->xe, !bo->pxp_key_instance);
747
748 /*
749 * Note that the PXP key handling is inherently racey, because the key
750 * can theoretically change at any time (although it's unlikely to do
751 * so without triggers), even right after we copy it. Taking a lock
752 * wouldn't help because the value might still change as soon as we
753 * release the lock.
754 * Userspace needs to handle the fact that their BOs can go invalid at
755 * any point.
756 */
757 bo->pxp_key_instance = pxp->key_instance;
758
759 return 0;
760 }
761
762 /**
763 * xe_pxp_bo_key_check - check if the key used by a xe_bo is valid
764 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
765 * @bo: the BO we want to check
766 *
767 * Checks whether a BO was encrypted with the current key or an obsolete one.
768 *
769 * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the
770 * BO is not using PXP, -ENOEXEC if the key is not valid.
771 */
xe_pxp_bo_key_check(struct xe_pxp * pxp,struct xe_bo * bo)772 int xe_pxp_bo_key_check(struct xe_pxp *pxp, struct xe_bo *bo)
773 {
774 if (!xe_pxp_is_enabled(pxp))
775 return -ENODEV;
776
777 if (!xe_bo_is_protected(bo))
778 return -EINVAL;
779
780 xe_assert(pxp->xe, bo->pxp_key_instance);
781
782 /*
783 * Note that the PXP key handling is inherently racey, because the key
784 * can theoretically change at any time (although it's unlikely to do
785 * so without triggers), even right after we check it. Taking a lock
786 * wouldn't help because the value might still change as soon as we
787 * release the lock.
788 * We mitigate the risk by checking the key at multiple points (on each
789 * submission involving the BO and right before flipping it on the
790 * display), but there is still a very small chance that we could
791 * operate on an invalid BO for a single submission or a single frame
792 * flip. This is a compromise made to protect the encrypted data (which
793 * is what the key termination is for).
794 */
795 if (bo->pxp_key_instance != pxp->key_instance)
796 return -ENOEXEC;
797
798 return 0;
799 }
800
801 /**
802 * xe_pxp_obj_key_check - check if the key used by a drm_gem_obj is valid
803 * @obj: the drm_gem_obj we want to check
804 *
805 * Checks whether a drm_gem_obj was encrypted with the current key or an
806 * obsolete one.
807 *
808 * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the
809 * obj is not using PXP, -ENOEXEC if the key is not valid.
810 */
xe_pxp_obj_key_check(struct drm_gem_object * obj)811 int xe_pxp_obj_key_check(struct drm_gem_object *obj)
812 {
813 struct xe_bo *bo = gem_to_xe_bo(obj);
814 struct xe_device *xe = xe_bo_device(bo);
815 struct xe_pxp *pxp = xe->pxp;
816
817 return xe_pxp_bo_key_check(pxp, bo);
818 }
819
820 /**
821 * xe_pxp_pm_suspend - prepare PXP for HW suspend
822 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
823 *
824 * Makes sure all PXP actions have completed and invalidates all PXP queues
825 * and objects before we go into a suspend state.
826 *
827 * Returns: 0 if successful, a negative errno value otherwise.
828 */
xe_pxp_pm_suspend(struct xe_pxp * pxp)829 int xe_pxp_pm_suspend(struct xe_pxp *pxp)
830 {
831 bool needs_queue_inval = false;
832 int ret = 0;
833
834 if (!xe_pxp_is_enabled(pxp))
835 return 0;
836
837 wait_for_activation:
838 if (!wait_for_completion_timeout(&pxp->activation,
839 msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
840 ret = -ETIMEDOUT;
841
842 mutex_lock(&pxp->mutex);
843
844 switch (pxp->status) {
845 case XE_PXP_ERROR:
846 case XE_PXP_READY_TO_START:
847 case XE_PXP_SUSPENDED:
848 case XE_PXP_TERMINATION_IN_PROGRESS:
849 case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
850 /*
851 * If PXP is not running there is nothing to cleanup. If there
852 * is a termination pending then no need to issue another one.
853 */
854 break;
855 case XE_PXP_START_IN_PROGRESS:
856 mutex_unlock(&pxp->mutex);
857 goto wait_for_activation;
858 case XE_PXP_NEEDS_TERMINATION:
859 /* If PXP was never used we can skip the cleanup */
860 if (pxp->key_instance == pxp->last_suspend_key_instance)
861 break;
862 fallthrough;
863 case XE_PXP_ACTIVE:
864 pxp->key_instance++;
865 needs_queue_inval = true;
866 break;
867 default:
868 drm_err(&pxp->xe->drm, "unexpected state during PXP suspend: %u",
869 pxp->status);
870 ret = -EIO;
871 goto out;
872 }
873
874 /*
875 * We set this even if we were in error state, hoping the suspend clears
876 * the error. Worse case we fail again and go in error state again.
877 */
878 pxp->status = XE_PXP_SUSPENDED;
879
880 mutex_unlock(&pxp->mutex);
881
882 if (needs_queue_inval)
883 pxp_invalidate_queues(pxp);
884
885 /*
886 * if there is a termination in progress, wait for it.
887 * We need to wait outside the lock because the completion is done from
888 * within the lock
889 */
890 if (!wait_for_completion_timeout(&pxp->termination,
891 msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS)))
892 ret = -ETIMEDOUT;
893
894 pxp->last_suspend_key_instance = pxp->key_instance;
895
896 out:
897 return ret;
898 }
899
900 /**
901 * xe_pxp_pm_resume - re-init PXP after HW suspend
902 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
903 */
xe_pxp_pm_resume(struct xe_pxp * pxp)904 void xe_pxp_pm_resume(struct xe_pxp *pxp)
905 {
906 int err;
907
908 if (!xe_pxp_is_enabled(pxp))
909 return;
910
911 err = kcr_pxp_enable(pxp);
912
913 mutex_lock(&pxp->mutex);
914
915 xe_assert(pxp->xe, pxp->status == XE_PXP_SUSPENDED);
916
917 if (err)
918 pxp->status = XE_PXP_ERROR;
919 else
920 pxp->status = XE_PXP_NEEDS_TERMINATION;
921
922 mutex_unlock(&pxp->mutex);
923 }
924