1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright(c) 2024 Intel Corporation. 4 */ 5 6 #include "xe_pxp.h" 7 8 #include <drm/drm_managed.h> 9 #include <uapi/drm/xe_drm.h> 10 11 #include "xe_bo.h" 12 #include "xe_bo_types.h" 13 #include "xe_device_types.h" 14 #include "xe_exec_queue.h" 15 #include "xe_force_wake.h" 16 #include "xe_guc_submit.h" 17 #include "xe_gsc_proxy.h" 18 #include "xe_gt.h" 19 #include "xe_gt_types.h" 20 #include "xe_huc.h" 21 #include "xe_mmio.h" 22 #include "xe_pm.h" 23 #include "xe_pxp_submit.h" 24 #include "xe_pxp_types.h" 25 #include "xe_uc_fw.h" 26 #include "regs/xe_irq_regs.h" 27 #include "regs/xe_pxp_regs.h" 28 29 /** 30 * DOC: PXP 31 * 32 * PXP (Protected Xe Path) allows execution and flip to display of protected 33 * (i.e. encrypted) objects. This feature is currently only supported in 34 * integrated parts. 35 */ 36 37 #define ARB_SESSION DRM_XE_PXP_HWDRM_DEFAULT_SESSION /* shorter define */ 38 39 /* 40 * A submission to GSC can take up to 250ms to complete, so use a 300ms 41 * timeout for activation where only one of those is involved. Termination 42 * additionally requires a submission to VCS and an interaction with KCR, so 43 * bump the timeout to 500ms for that. 44 */ 45 #define PXP_ACTIVATION_TIMEOUT_MS 300 46 #define PXP_TERMINATION_TIMEOUT_MS 500 47 48 bool xe_pxp_is_supported(const struct xe_device *xe) 49 { 50 return xe->info.has_pxp && IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY); 51 } 52 53 bool xe_pxp_is_enabled(const struct xe_pxp *pxp) 54 { 55 return pxp; 56 } 57 58 static bool pxp_prerequisites_done(const struct xe_pxp *pxp) 59 { 60 struct xe_gt *gt = pxp->gt; 61 unsigned int fw_ref; 62 bool ready; 63 64 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); 65 66 /* 67 * If force_wake fails we could falsely report the prerequisites as not 68 * done even if they are; the consequence of this would be that the 69 * callers won't go ahead with using PXP, but if force_wake doesn't work 70 * the GT is very likely in a bad state so not really a problem to abort 71 * PXP. Therefore, we can just log the force_wake error and not escalate 72 * it. 73 */ 74 XE_WARN_ON(!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)); 75 76 /* PXP requires both HuC authentication via GSC and GSC proxy initialized */ 77 ready = xe_huc_is_authenticated(>->uc.huc, XE_HUC_AUTH_VIA_GSC) && 78 xe_gsc_proxy_init_done(>->uc.gsc); 79 80 xe_force_wake_put(gt_to_fw(gt), fw_ref); 81 82 return ready; 83 } 84 85 /** 86 * xe_pxp_get_readiness_status - check whether PXP is ready for userspace use 87 * @pxp: the xe_pxp pointer (can be NULL if PXP is disabled) 88 * 89 * Returns: 0 if PXP is not ready yet, 1 if it is ready, a negative errno value 90 * if PXP is not supported/enabled or if something went wrong in the 91 * initialization of the prerequisites. Note that the return values of this 92 * function follow the uapi (see drm_xe_query_pxp_status), so they can be used 93 * directly in the query ioctl. 94 */ 95 int xe_pxp_get_readiness_status(struct xe_pxp *pxp) 96 { 97 int ret = 0; 98 99 if (!xe_pxp_is_enabled(pxp)) 100 return -ENODEV; 101 102 /* if the GSC or HuC FW are in an error state, PXP will never work */ 103 if (xe_uc_fw_status_to_error(pxp->gt->uc.huc.fw.status) || 104 xe_uc_fw_status_to_error(pxp->gt->uc.gsc.fw.status)) 105 return -EIO; 106 107 xe_pm_runtime_get(pxp->xe); 108 109 /* PXP requires both HuC loaded and GSC proxy initialized */ 110 if (pxp_prerequisites_done(pxp)) 111 ret = 1; 112 113 xe_pm_runtime_put(pxp->xe); 114 return ret; 115 } 116 117 static bool pxp_session_is_in_play(struct xe_pxp *pxp, u32 id) 118 { 119 struct xe_gt *gt = pxp->gt; 120 121 return xe_mmio_read32(>->mmio, KCR_SIP) & BIT(id); 122 } 123 124 static int pxp_wait_for_session_state(struct xe_pxp *pxp, u32 id, bool in_play) 125 { 126 struct xe_gt *gt = pxp->gt; 127 u32 mask = BIT(id); 128 129 return xe_mmio_wait32(>->mmio, KCR_SIP, mask, in_play ? mask : 0, 130 250, NULL, false); 131 } 132 133 static void pxp_invalidate_queues(struct xe_pxp *pxp); 134 135 static int pxp_terminate_hw(struct xe_pxp *pxp) 136 { 137 struct xe_gt *gt = pxp->gt; 138 unsigned int fw_ref; 139 int ret = 0; 140 141 drm_dbg(&pxp->xe->drm, "Terminating PXP\n"); 142 143 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 144 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) { 145 ret = -EIO; 146 goto out; 147 } 148 149 /* terminate the hw session */ 150 ret = xe_pxp_submit_session_termination(pxp, ARB_SESSION); 151 if (ret) 152 goto out; 153 154 ret = pxp_wait_for_session_state(pxp, ARB_SESSION, false); 155 if (ret) 156 goto out; 157 158 /* Trigger full HW cleanup */ 159 xe_mmio_write32(>->mmio, KCR_GLOBAL_TERMINATE, 1); 160 161 /* now we can tell the GSC to clean up its own state */ 162 ret = xe_pxp_submit_session_invalidation(&pxp->gsc_res, ARB_SESSION); 163 164 out: 165 xe_force_wake_put(gt_to_fw(gt), fw_ref); 166 return ret; 167 } 168 169 static void mark_termination_in_progress(struct xe_pxp *pxp) 170 { 171 lockdep_assert_held(&pxp->mutex); 172 173 reinit_completion(&pxp->termination); 174 pxp->status = XE_PXP_TERMINATION_IN_PROGRESS; 175 } 176 177 static void pxp_terminate(struct xe_pxp *pxp) 178 { 179 int ret = 0; 180 struct xe_device *xe = pxp->xe; 181 182 if (!wait_for_completion_timeout(&pxp->activation, 183 msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS))) 184 drm_err(&xe->drm, "failed to wait for PXP start before termination\n"); 185 186 mutex_lock(&pxp->mutex); 187 188 if (pxp->status == XE_PXP_ACTIVE) 189 pxp->key_instance++; 190 191 /* 192 * we'll mark the status as needing termination on resume, so no need to 193 * emit a termination now. 194 */ 195 if (pxp->status == XE_PXP_SUSPENDED) { 196 mutex_unlock(&pxp->mutex); 197 return; 198 } 199 200 /* 201 * If we have a termination already in progress, we need to wait for 202 * it to complete before queueing another one. Once the first 203 * termination is completed we'll set the state back to 204 * NEEDS_TERMINATION and leave it to the pxp start code to issue it. 205 */ 206 if (pxp->status == XE_PXP_TERMINATION_IN_PROGRESS) { 207 pxp->status = XE_PXP_NEEDS_ADDITIONAL_TERMINATION; 208 mutex_unlock(&pxp->mutex); 209 return; 210 } 211 212 mark_termination_in_progress(pxp); 213 214 mutex_unlock(&pxp->mutex); 215 216 pxp_invalidate_queues(pxp); 217 218 ret = pxp_terminate_hw(pxp); 219 if (ret) { 220 drm_err(&xe->drm, "PXP termination failed: %pe\n", ERR_PTR(ret)); 221 mutex_lock(&pxp->mutex); 222 pxp->status = XE_PXP_ERROR; 223 complete_all(&pxp->termination); 224 mutex_unlock(&pxp->mutex); 225 } 226 } 227 228 static void pxp_terminate_complete(struct xe_pxp *pxp) 229 { 230 /* 231 * We expect PXP to be in one of 3 states when we get here: 232 * - XE_PXP_TERMINATION_IN_PROGRESS: a single termination event was 233 * requested and it is now completing, so we're ready to start. 234 * - XE_PXP_NEEDS_ADDITIONAL_TERMINATION: a second termination was 235 * requested while the first one was still being processed. 236 * - XE_PXP_SUSPENDED: PXP is now suspended, so we defer everything to 237 * when we come back on resume. 238 */ 239 mutex_lock(&pxp->mutex); 240 241 switch (pxp->status) { 242 case XE_PXP_TERMINATION_IN_PROGRESS: 243 pxp->status = XE_PXP_READY_TO_START; 244 break; 245 case XE_PXP_NEEDS_ADDITIONAL_TERMINATION: 246 pxp->status = XE_PXP_NEEDS_TERMINATION; 247 break; 248 case XE_PXP_SUSPENDED: 249 /* Nothing to do */ 250 break; 251 default: 252 drm_err(&pxp->xe->drm, 253 "PXP termination complete while status was %u\n", 254 pxp->status); 255 } 256 257 complete_all(&pxp->termination); 258 259 mutex_unlock(&pxp->mutex); 260 } 261 262 static void pxp_irq_work(struct work_struct *work) 263 { 264 struct xe_pxp *pxp = container_of(work, typeof(*pxp), irq.work); 265 struct xe_device *xe = pxp->xe; 266 u32 events = 0; 267 268 spin_lock_irq(&xe->irq.lock); 269 events = pxp->irq.events; 270 pxp->irq.events = 0; 271 spin_unlock_irq(&xe->irq.lock); 272 273 if (!events) 274 return; 275 276 /* 277 * If we're processing a termination irq while suspending then don't 278 * bother, we're going to re-init everything on resume anyway. 279 */ 280 if ((events & PXP_TERMINATION_REQUEST) && !xe_pm_runtime_get_if_active(xe)) 281 return; 282 283 if (events & PXP_TERMINATION_REQUEST) { 284 events &= ~PXP_TERMINATION_COMPLETE; 285 pxp_terminate(pxp); 286 } 287 288 if (events & PXP_TERMINATION_COMPLETE) 289 pxp_terminate_complete(pxp); 290 291 if (events & PXP_TERMINATION_REQUEST) 292 xe_pm_runtime_put(xe); 293 } 294 295 /** 296 * xe_pxp_irq_handler - Handles PXP interrupts. 297 * @xe: the xe_device structure 298 * @iir: interrupt vector 299 */ 300 void xe_pxp_irq_handler(struct xe_device *xe, u16 iir) 301 { 302 struct xe_pxp *pxp = xe->pxp; 303 304 if (!xe_pxp_is_enabled(pxp)) { 305 drm_err(&xe->drm, "PXP irq 0x%x received with PXP disabled!\n", iir); 306 return; 307 } 308 309 lockdep_assert_held(&xe->irq.lock); 310 311 if (unlikely(!iir)) 312 return; 313 314 if (iir & (KCR_PXP_STATE_TERMINATED_INTERRUPT | 315 KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT)) 316 pxp->irq.events |= PXP_TERMINATION_REQUEST; 317 318 if (iir & KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT) 319 pxp->irq.events |= PXP_TERMINATION_COMPLETE; 320 321 if (pxp->irq.events) 322 queue_work(pxp->irq.wq, &pxp->irq.work); 323 } 324 325 static int kcr_pxp_set_status(const struct xe_pxp *pxp, bool enable) 326 { 327 u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) : 328 _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES); 329 unsigned int fw_ref; 330 331 fw_ref = xe_force_wake_get(gt_to_fw(pxp->gt), XE_FW_GT); 332 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) 333 return -EIO; 334 335 xe_mmio_write32(&pxp->gt->mmio, KCR_INIT, val); 336 xe_force_wake_put(gt_to_fw(pxp->gt), fw_ref); 337 338 return 0; 339 } 340 341 static int kcr_pxp_enable(const struct xe_pxp *pxp) 342 { 343 return kcr_pxp_set_status(pxp, true); 344 } 345 346 static int kcr_pxp_disable(const struct xe_pxp *pxp) 347 { 348 return kcr_pxp_set_status(pxp, false); 349 } 350 351 static void pxp_fini(void *arg) 352 { 353 struct xe_pxp *pxp = arg; 354 355 destroy_workqueue(pxp->irq.wq); 356 xe_pxp_destroy_execution_resources(pxp); 357 358 /* no need to explicitly disable KCR since we're going to do an FLR */ 359 } 360 361 /** 362 * xe_pxp_init - initialize PXP support 363 * @xe: the xe_device structure 364 * 365 * Initialize the HW state and allocate the objects required for PXP support. 366 * Note that some of the requirement for PXP support (GSC proxy init, HuC auth) 367 * are performed asynchronously as part of the GSC init. PXP can only be used 368 * after both this function and the async worker have completed. 369 * 370 * Returns 0 if PXP is not supported or if PXP initialization is successful, 371 * other errno value if there is an error during the init. 372 */ 373 int xe_pxp_init(struct xe_device *xe) 374 { 375 struct xe_gt *gt = xe->tiles[0].media_gt; 376 struct xe_pxp *pxp; 377 int err; 378 379 if (!xe_pxp_is_supported(xe)) 380 return 0; 381 382 /* we only support PXP on single tile devices with a media GT */ 383 if (xe->info.tile_count > 1 || !gt) 384 return 0; 385 386 /* The GSCCS is required for submissions to the GSC FW */ 387 if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0))) 388 return 0; 389 390 /* PXP requires both GSC and HuC firmwares to be available */ 391 if (!xe_uc_fw_is_loadable(>->uc.gsc.fw) || 392 !xe_uc_fw_is_loadable(>->uc.huc.fw)) { 393 drm_info(&xe->drm, "skipping PXP init due to missing FW dependencies"); 394 return 0; 395 } 396 397 pxp = drmm_kzalloc(&xe->drm, sizeof(struct xe_pxp), GFP_KERNEL); 398 if (!pxp) { 399 err = -ENOMEM; 400 goto out; 401 } 402 403 INIT_LIST_HEAD(&pxp->queues.list); 404 spin_lock_init(&pxp->queues.lock); 405 INIT_WORK(&pxp->irq.work, pxp_irq_work); 406 pxp->xe = xe; 407 pxp->gt = gt; 408 409 pxp->key_instance = 1; 410 pxp->last_suspend_key_instance = 1; 411 412 /* 413 * we'll use the completions to check if there is an action pending, 414 * so we start them as completed and we reinit it when an action is 415 * triggered. 416 */ 417 init_completion(&pxp->activation); 418 init_completion(&pxp->termination); 419 complete_all(&pxp->termination); 420 complete_all(&pxp->activation); 421 422 mutex_init(&pxp->mutex); 423 424 pxp->irq.wq = alloc_ordered_workqueue("pxp-wq", 0); 425 if (!pxp->irq.wq) { 426 err = -ENOMEM; 427 goto out_free; 428 } 429 430 err = kcr_pxp_enable(pxp); 431 if (err) 432 goto out_wq; 433 434 err = xe_pxp_allocate_execution_resources(pxp); 435 if (err) 436 goto out_kcr_disable; 437 438 xe->pxp = pxp; 439 440 return devm_add_action_or_reset(xe->drm.dev, pxp_fini, pxp); 441 442 out_kcr_disable: 443 kcr_pxp_disable(pxp); 444 out_wq: 445 destroy_workqueue(pxp->irq.wq); 446 out_free: 447 drmm_kfree(&xe->drm, pxp); 448 out: 449 drm_err(&xe->drm, "PXP initialization failed: %pe\n", ERR_PTR(err)); 450 return err; 451 } 452 453 static int __pxp_start_arb_session(struct xe_pxp *pxp) 454 { 455 int ret; 456 unsigned int fw_ref; 457 458 fw_ref = xe_force_wake_get(gt_to_fw(pxp->gt), XE_FW_GT); 459 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) 460 return -EIO; 461 462 if (pxp_session_is_in_play(pxp, ARB_SESSION)) { 463 ret = -EEXIST; 464 goto out_force_wake; 465 } 466 467 ret = xe_pxp_submit_session_init(&pxp->gsc_res, ARB_SESSION); 468 if (ret) { 469 drm_err(&pxp->xe->drm, "Failed to init PXP arb session: %pe\n", ERR_PTR(ret)); 470 goto out_force_wake; 471 } 472 473 ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true); 474 if (ret) { 475 drm_err(&pxp->xe->drm, "PXP ARB session failed to go in play%pe\n", ERR_PTR(ret)); 476 goto out_force_wake; 477 } 478 479 drm_dbg(&pxp->xe->drm, "PXP ARB session is active\n"); 480 481 out_force_wake: 482 xe_force_wake_put(gt_to_fw(pxp->gt), fw_ref); 483 return ret; 484 } 485 486 /** 487 * xe_pxp_exec_queue_set_type - Mark a queue as using PXP 488 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) 489 * @q: the queue to mark as using PXP 490 * @type: the type of PXP session this queue will use 491 * 492 * Returns 0 if the selected PXP type is supported, -ENODEV otherwise. 493 */ 494 int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type) 495 { 496 if (!xe_pxp_is_enabled(pxp)) 497 return -ENODEV; 498 499 /* we only support HWDRM sessions right now */ 500 xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM); 501 502 q->pxp.type = type; 503 504 return 0; 505 } 506 507 static void __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q) 508 { 509 spin_lock_irq(&pxp->queues.lock); 510 list_add_tail(&q->pxp.link, &pxp->queues.list); 511 spin_unlock_irq(&pxp->queues.lock); 512 } 513 514 /** 515 * xe_pxp_exec_queue_add - add a queue to the PXP list 516 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) 517 * @q: the queue to add to the list 518 * 519 * If PXP is enabled and the prerequisites are done, start the PXP ARB 520 * session (if not already running) and add the queue to the PXP list. Note 521 * that the queue must have previously been marked as using PXP with 522 * xe_pxp_exec_queue_set_type. 523 * 524 * Returns 0 if the PXP ARB session is running and the queue is in the list, 525 * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done, 526 * other errno value if something goes wrong during the session start. 527 */ 528 int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q) 529 { 530 int ret = 0; 531 532 if (!xe_pxp_is_enabled(pxp)) 533 return -ENODEV; 534 535 /* we only support HWDRM sessions right now */ 536 xe_assert(pxp->xe, q->pxp.type == DRM_XE_PXP_TYPE_HWDRM); 537 538 /* 539 * Runtime suspend kills PXP, so we take a reference to prevent it from 540 * happening while we have active queues that use PXP 541 */ 542 xe_pm_runtime_get(pxp->xe); 543 544 if (!pxp_prerequisites_done(pxp)) { 545 ret = -EBUSY; 546 goto out; 547 } 548 549 wait_for_idle: 550 /* 551 * if there is an action in progress, wait for it. We need to wait 552 * outside the lock because the completion is done from within the lock. 553 * Note that the two action should never be pending at the same time. 554 */ 555 if (!wait_for_completion_timeout(&pxp->termination, 556 msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS))) { 557 ret = -ETIMEDOUT; 558 goto out; 559 } 560 561 if (!wait_for_completion_timeout(&pxp->activation, 562 msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS))) { 563 ret = -ETIMEDOUT; 564 goto out; 565 } 566 567 mutex_lock(&pxp->mutex); 568 569 /* If PXP is not already active, turn it on */ 570 switch (pxp->status) { 571 case XE_PXP_ERROR: 572 ret = -EIO; 573 break; 574 case XE_PXP_ACTIVE: 575 __exec_queue_add(pxp, q); 576 mutex_unlock(&pxp->mutex); 577 goto out; 578 case XE_PXP_READY_TO_START: 579 pxp->status = XE_PXP_START_IN_PROGRESS; 580 reinit_completion(&pxp->activation); 581 break; 582 case XE_PXP_START_IN_PROGRESS: 583 /* If a start is in progress then the completion must not be done */ 584 XE_WARN_ON(completion_done(&pxp->activation)); 585 mutex_unlock(&pxp->mutex); 586 goto wait_for_idle; 587 case XE_PXP_NEEDS_TERMINATION: 588 mark_termination_in_progress(pxp); 589 break; 590 case XE_PXP_TERMINATION_IN_PROGRESS: 591 case XE_PXP_NEEDS_ADDITIONAL_TERMINATION: 592 /* If a termination is in progress then the completion must not be done */ 593 XE_WARN_ON(completion_done(&pxp->termination)); 594 mutex_unlock(&pxp->mutex); 595 goto wait_for_idle; 596 case XE_PXP_SUSPENDED: 597 default: 598 drm_err(&pxp->xe->drm, "unexpected state during PXP start: %u\n", pxp->status); 599 ret = -EIO; 600 break; 601 } 602 603 mutex_unlock(&pxp->mutex); 604 605 if (ret) 606 goto out; 607 608 if (!completion_done(&pxp->termination)) { 609 ret = pxp_terminate_hw(pxp); 610 if (ret) { 611 drm_err(&pxp->xe->drm, "PXP termination failed before start\n"); 612 mutex_lock(&pxp->mutex); 613 pxp->status = XE_PXP_ERROR; 614 mutex_unlock(&pxp->mutex); 615 616 goto out; 617 } 618 619 goto wait_for_idle; 620 } 621 622 /* All the cases except for start should have exited earlier */ 623 XE_WARN_ON(completion_done(&pxp->activation)); 624 ret = __pxp_start_arb_session(pxp); 625 626 mutex_lock(&pxp->mutex); 627 628 complete_all(&pxp->activation); 629 630 /* 631 * Any other process should wait until the state goes away from 632 * XE_PXP_START_IN_PROGRESS, so if the state is not that something went 633 * wrong. Mark the status as needing termination and try again. 634 */ 635 if (pxp->status != XE_PXP_START_IN_PROGRESS) { 636 drm_err(&pxp->xe->drm, "unexpected state after PXP start: %u\n", pxp->status); 637 pxp->status = XE_PXP_NEEDS_TERMINATION; 638 mutex_unlock(&pxp->mutex); 639 goto wait_for_idle; 640 } 641 642 /* If everything went ok, update the status and add the queue to the list */ 643 if (!ret) { 644 pxp->status = XE_PXP_ACTIVE; 645 __exec_queue_add(pxp, q); 646 } else { 647 pxp->status = XE_PXP_ERROR; 648 } 649 650 mutex_unlock(&pxp->mutex); 651 652 out: 653 /* 654 * in the successful case the PM ref is released from 655 * xe_pxp_exec_queue_remove 656 */ 657 if (ret) 658 xe_pm_runtime_put(pxp->xe); 659 660 return ret; 661 } 662 663 static void __pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q, bool lock) 664 { 665 bool need_pm_put = false; 666 667 if (!xe_pxp_is_enabled(pxp)) 668 return; 669 670 if (lock) 671 spin_lock_irq(&pxp->queues.lock); 672 673 if (!list_empty(&q->pxp.link)) { 674 list_del_init(&q->pxp.link); 675 need_pm_put = true; 676 } 677 678 q->pxp.type = DRM_XE_PXP_TYPE_NONE; 679 680 if (lock) 681 spin_unlock_irq(&pxp->queues.lock); 682 683 if (need_pm_put) 684 xe_pm_runtime_put(pxp->xe); 685 } 686 687 /** 688 * xe_pxp_exec_queue_remove - remove a queue from the PXP list 689 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) 690 * @q: the queue to remove from the list 691 * 692 * If PXP is enabled and the exec_queue is in the list, the queue will be 693 * removed from the list and its PM reference will be released. It is safe to 694 * call this function multiple times for the same queue. 695 */ 696 void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q) 697 { 698 __pxp_exec_queue_remove(pxp, q, true); 699 } 700 701 static void pxp_invalidate_queues(struct xe_pxp *pxp) 702 { 703 struct xe_exec_queue *tmp, *q; 704 LIST_HEAD(to_clean); 705 706 spin_lock_irq(&pxp->queues.lock); 707 708 list_for_each_entry_safe(q, tmp, &pxp->queues.list, pxp.link) { 709 q = xe_exec_queue_get_unless_zero(q); 710 if (!q) 711 continue; 712 713 list_move_tail(&q->pxp.link, &to_clean); 714 } 715 spin_unlock_irq(&pxp->queues.lock); 716 717 list_for_each_entry_safe(q, tmp, &to_clean, pxp.link) { 718 xe_exec_queue_kill(q); 719 720 /* 721 * We hold a ref to the queue so there is no risk of racing with 722 * the calls to exec_queue_remove coming from exec_queue_destroy. 723 */ 724 __pxp_exec_queue_remove(pxp, q, false); 725 726 xe_exec_queue_put(q); 727 } 728 } 729 730 /** 731 * xe_pxp_key_assign - mark a BO as using the current PXP key iteration 732 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) 733 * @bo: the BO to mark 734 * 735 * Returns: -ENODEV if PXP is disabled, 0 otherwise. 736 */ 737 int xe_pxp_key_assign(struct xe_pxp *pxp, struct xe_bo *bo) 738 { 739 if (!xe_pxp_is_enabled(pxp)) 740 return -ENODEV; 741 742 xe_assert(pxp->xe, !bo->pxp_key_instance); 743 744 /* 745 * Note that the PXP key handling is inherently racey, because the key 746 * can theoretically change at any time (although it's unlikely to do 747 * so without triggers), even right after we copy it. Taking a lock 748 * wouldn't help because the value might still change as soon as we 749 * release the lock. 750 * Userspace needs to handle the fact that their BOs can go invalid at 751 * any point. 752 */ 753 bo->pxp_key_instance = pxp->key_instance; 754 755 return 0; 756 } 757 758 /** 759 * xe_pxp_bo_key_check - check if the key used by a xe_bo is valid 760 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) 761 * @bo: the BO we want to check 762 * 763 * Checks whether a BO was encrypted with the current key or an obsolete one. 764 * 765 * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the 766 * BO is not using PXP, -ENOEXEC if the key is not valid. 767 */ 768 int xe_pxp_bo_key_check(struct xe_pxp *pxp, struct xe_bo *bo) 769 { 770 if (!xe_pxp_is_enabled(pxp)) 771 return -ENODEV; 772 773 if (!xe_bo_is_protected(bo)) 774 return -EINVAL; 775 776 xe_assert(pxp->xe, bo->pxp_key_instance); 777 778 /* 779 * Note that the PXP key handling is inherently racey, because the key 780 * can theoretically change at any time (although it's unlikely to do 781 * so without triggers), even right after we check it. Taking a lock 782 * wouldn't help because the value might still change as soon as we 783 * release the lock. 784 * We mitigate the risk by checking the key at multiple points (on each 785 * submission involving the BO and right before flipping it on the 786 * display), but there is still a very small chance that we could 787 * operate on an invalid BO for a single submission or a single frame 788 * flip. This is a compromise made to protect the encrypted data (which 789 * is what the key termination is for). 790 */ 791 if (bo->pxp_key_instance != pxp->key_instance) 792 return -ENOEXEC; 793 794 return 0; 795 } 796 797 /** 798 * xe_pxp_obj_key_check - check if the key used by a drm_gem_obj is valid 799 * @obj: the drm_gem_obj we want to check 800 * 801 * Checks whether a drm_gem_obj was encrypted with the current key or an 802 * obsolete one. 803 * 804 * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the 805 * obj is not using PXP, -ENOEXEC if the key is not valid. 806 */ 807 int xe_pxp_obj_key_check(struct drm_gem_object *obj) 808 { 809 struct xe_bo *bo = gem_to_xe_bo(obj); 810 struct xe_device *xe = xe_bo_device(bo); 811 struct xe_pxp *pxp = xe->pxp; 812 813 return xe_pxp_bo_key_check(pxp, bo); 814 } 815 816 /** 817 * xe_pxp_pm_suspend - prepare PXP for HW suspend 818 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) 819 * 820 * Makes sure all PXP actions have completed and invalidates all PXP queues 821 * and objects before we go into a suspend state. 822 * 823 * Returns: 0 if successful, a negative errno value otherwise. 824 */ 825 int xe_pxp_pm_suspend(struct xe_pxp *pxp) 826 { 827 bool needs_queue_inval = false; 828 int ret = 0; 829 830 if (!xe_pxp_is_enabled(pxp)) 831 return 0; 832 833 wait_for_activation: 834 if (!wait_for_completion_timeout(&pxp->activation, 835 msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS))) 836 ret = -ETIMEDOUT; 837 838 mutex_lock(&pxp->mutex); 839 840 switch (pxp->status) { 841 case XE_PXP_ERROR: 842 case XE_PXP_READY_TO_START: 843 case XE_PXP_SUSPENDED: 844 case XE_PXP_TERMINATION_IN_PROGRESS: 845 case XE_PXP_NEEDS_ADDITIONAL_TERMINATION: 846 /* 847 * If PXP is not running there is nothing to cleanup. If there 848 * is a termination pending then no need to issue another one. 849 */ 850 break; 851 case XE_PXP_START_IN_PROGRESS: 852 mutex_unlock(&pxp->mutex); 853 goto wait_for_activation; 854 case XE_PXP_NEEDS_TERMINATION: 855 /* If PXP was never used we can skip the cleanup */ 856 if (pxp->key_instance == pxp->last_suspend_key_instance) 857 break; 858 fallthrough; 859 case XE_PXP_ACTIVE: 860 pxp->key_instance++; 861 needs_queue_inval = true; 862 break; 863 default: 864 drm_err(&pxp->xe->drm, "unexpected state during PXP suspend: %u", 865 pxp->status); 866 ret = -EIO; 867 goto out; 868 } 869 870 /* 871 * We set this even if we were in error state, hoping the suspend clears 872 * the error. Worse case we fail again and go in error state again. 873 */ 874 pxp->status = XE_PXP_SUSPENDED; 875 876 mutex_unlock(&pxp->mutex); 877 878 if (needs_queue_inval) 879 pxp_invalidate_queues(pxp); 880 881 /* 882 * if there is a termination in progress, wait for it. 883 * We need to wait outside the lock because the completion is done from 884 * within the lock 885 */ 886 if (!wait_for_completion_timeout(&pxp->termination, 887 msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS))) 888 ret = -ETIMEDOUT; 889 890 pxp->last_suspend_key_instance = pxp->key_instance; 891 892 out: 893 return ret; 894 } 895 896 /** 897 * xe_pxp_pm_resume - re-init PXP after HW suspend 898 * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) 899 */ 900 void xe_pxp_pm_resume(struct xe_pxp *pxp) 901 { 902 int err; 903 904 if (!xe_pxp_is_enabled(pxp)) 905 return; 906 907 err = kcr_pxp_enable(pxp); 908 909 mutex_lock(&pxp->mutex); 910 911 xe_assert(pxp->xe, pxp->status == XE_PXP_SUSPENDED); 912 913 if (err) 914 pxp->status = XE_PXP_ERROR; 915 else 916 pxp->status = XE_PXP_NEEDS_TERMINATION; 917 918 mutex_unlock(&pxp->mutex); 919 } 920