1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2014 Intel Corporation 4 */ 5 6 #include <linux/circ_buf.h> 7 8 #include "gem/i915_gem_context.h" 9 #include "gem/i915_gem_lmem.h" 10 #include "gt/gen8_engine_cs.h" 11 #include "gt/intel_breadcrumbs.h" 12 #include "gt/intel_context.h" 13 #include "gt/intel_engine_heartbeat.h" 14 #include "gt/intel_engine_pm.h" 15 #include "gt/intel_engine_regs.h" 16 #include "gt/intel_gpu_commands.h" 17 #include "gt/intel_gt.h" 18 #include "gt/intel_gt_clock_utils.h" 19 #include "gt/intel_gt_irq.h" 20 #include "gt/intel_gt_pm.h" 21 #include "gt/intel_gt_regs.h" 22 #include "gt/intel_gt_requests.h" 23 #include "gt/intel_lrc.h" 24 #include "gt/intel_lrc_reg.h" 25 #include "gt/intel_mocs.h" 26 #include "gt/intel_ring.h" 27 28 #include "intel_guc_ads.h" 29 #include "intel_guc_capture.h" 30 #include "intel_guc_print.h" 31 #include "intel_guc_submission.h" 32 33 #include "i915_drv.h" 34 #include "i915_reg.h" 35 #include "i915_irq.h" 36 #include "i915_trace.h" 37 38 /** 39 * DOC: GuC-based command submission 40 * 41 * The Scratch registers: 42 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes 43 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then 44 * triggers an interrupt on the GuC via another register write (0xC4C8). 45 * Firmware writes a success/fail code back to the action register after 46 * processes the request. The kernel driver polls waiting for this update and 47 * then proceeds. 48 * 49 * Command Transport buffers (CTBs): 50 * Covered in detail in other sections but CTBs (Host to GuC - H2G, GuC to Host 51 * - G2H) are a message interface between the i915 and GuC. 52 * 53 * Context registration: 54 * Before a context can be submitted it must be registered with the GuC via a 55 * H2G. A unique guc_id is associated with each context. The context is either 56 * registered at request creation time (normal operation) or at submission time 57 * (abnormal operation, e.g. after a reset). 58 * 59 * Context submission: 60 * The i915 updates the LRC tail value in memory. The i915 must enable the 61 * scheduling of the context within the GuC for the GuC to actually consider it. 62 * Therefore, the first time a disabled context is submitted we use a schedule 63 * enable H2G, while follow up submissions are done via the context submit H2G, 64 * which informs the GuC that a previously enabled context has new work 65 * available. 66 * 67 * Context unpin: 68 * To unpin a context a H2G is used to disable scheduling. When the 69 * corresponding G2H returns indicating the scheduling disable operation has 70 * completed it is safe to unpin the context. While a disable is in flight it 71 * isn't safe to resubmit the context so a fence is used to stall all future 72 * requests of that context until the G2H is returned. Because this interaction 73 * with the GuC takes a non-zero amount of time we delay the disabling of 74 * scheduling after the pin count goes to zero by a configurable period of time 75 * (see SCHED_DISABLE_DELAY_MS). The thought is this gives the user a window of 76 * time to resubmit something on the context before doing this costly operation. 77 * This delay is only done if the context isn't closed and the guc_id usage is 78 * less than a threshold (see NUM_SCHED_DISABLE_GUC_IDS_THRESHOLD). 79 * 80 * Context deregistration: 81 * Before a context can be destroyed or if we steal its guc_id we must 82 * deregister the context with the GuC via H2G. If stealing the guc_id it isn't 83 * safe to submit anything to this guc_id until the deregister completes so a 84 * fence is used to stall all requests associated with this guc_id until the 85 * corresponding G2H returns indicating the guc_id has been deregistered. 86 * 87 * submission_state.guc_ids: 88 * Unique number associated with private GuC context data passed in during 89 * context registration / submission / deregistration. 64k available. Simple ida 90 * is used for allocation. 91 * 92 * Stealing guc_ids: 93 * If no guc_ids are available they can be stolen from another context at 94 * request creation time if that context is unpinned. If a guc_id can't be found 95 * we punt this problem to the user as we believe this is near impossible to hit 96 * during normal use cases. 97 * 98 * Locking: 99 * In the GuC submission code we have 3 basic spin locks which protect 100 * everything. Details about each below. 101 * 102 * sched_engine->lock 103 * This is the submission lock for all contexts that share an i915 schedule 104 * engine (sched_engine), thus only one of the contexts which share a 105 * sched_engine can be submitting at a time. Currently only one sched_engine is 106 * used for all of GuC submission but that could change in the future. 107 * 108 * guc->submission_state.lock 109 * Global lock for GuC submission state. Protects guc_ids and destroyed contexts 110 * list. 111 * 112 * ce->guc_state.lock 113 * Protects everything under ce->guc_state. Ensures that a context is in the 114 * correct state before issuing a H2G. e.g. We don't issue a schedule disable 115 * on a disabled context (bad idea), we don't issue a schedule enable when a 116 * schedule disable is in flight, etc... Also protects list of inflight requests 117 * on the context and the priority management state. Lock is individual to each 118 * context. 119 * 120 * Lock ordering rules: 121 * sched_engine->lock -> ce->guc_state.lock 122 * guc->submission_state.lock -> ce->guc_state.lock 123 * 124 * Reset races: 125 * When a full GT reset is triggered it is assumed that some G2H responses to 126 * H2Gs can be lost as the GuC is also reset. Losing these G2H can prove to be 127 * fatal as we do certain operations upon receiving a G2H (e.g. destroy 128 * contexts, release guc_ids, etc...). When this occurs we can scrub the 129 * context state and cleanup appropriately, however this is quite racey. 130 * To avoid races, the reset code must disable submission before scrubbing for 131 * the missing G2H, while the submission code must check for submission being 132 * disabled and skip sending H2Gs and updating context states when it is. Both 133 * sides must also make sure to hold the relevant locks. 134 */ 135 136 /* GuC Virtual Engine */ 137 struct guc_virtual_engine { 138 struct intel_engine_cs base; 139 struct intel_context context; 140 }; 141 142 static struct intel_context * 143 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, 144 unsigned long flags); 145 146 static struct intel_context * 147 guc_create_parallel(struct intel_engine_cs **engines, 148 unsigned int num_siblings, 149 unsigned int width); 150 151 #define GUC_REQUEST_SIZE 64 /* bytes */ 152 153 /* 154 * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous 155 * per the GuC submission interface. A different allocation algorithm is used 156 * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to 157 * partition the guc_id space. We believe the number of multi-lrc contexts in 158 * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for 159 * multi-lrc. 160 */ 161 #define NUMBER_MULTI_LRC_GUC_ID(guc) \ 162 ((guc)->submission_state.num_guc_ids / 16) 163 164 /* 165 * Below is a set of functions which control the GuC scheduling state which 166 * require a lock. 167 */ 168 #define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER BIT(0) 169 #define SCHED_STATE_DESTROYED BIT(1) 170 #define SCHED_STATE_PENDING_DISABLE BIT(2) 171 #define SCHED_STATE_BANNED BIT(3) 172 #define SCHED_STATE_ENABLED BIT(4) 173 #define SCHED_STATE_PENDING_ENABLE BIT(5) 174 #define SCHED_STATE_REGISTERED BIT(6) 175 #define SCHED_STATE_POLICY_REQUIRED BIT(7) 176 #define SCHED_STATE_CLOSED BIT(8) 177 #define SCHED_STATE_BLOCKED_SHIFT 9 178 #define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT) 179 #define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT) 180 181 static inline void init_sched_state(struct intel_context *ce) 182 { 183 lockdep_assert_held(&ce->guc_state.lock); 184 ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK; 185 } 186 187 /* 188 * Kernel contexts can have SCHED_STATE_REGISTERED after suspend. 189 * A context close can race with the submission path, so SCHED_STATE_CLOSED 190 * can be set immediately before we try to register. 191 */ 192 #define SCHED_STATE_VALID_INIT \ 193 (SCHED_STATE_BLOCKED_MASK | \ 194 SCHED_STATE_CLOSED | \ 195 SCHED_STATE_REGISTERED) 196 197 __maybe_unused 198 static bool sched_state_is_init(struct intel_context *ce) 199 { 200 return !(ce->guc_state.sched_state & ~SCHED_STATE_VALID_INIT); 201 } 202 203 static inline bool 204 context_wait_for_deregister_to_register(struct intel_context *ce) 205 { 206 return ce->guc_state.sched_state & 207 SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER; 208 } 209 210 static inline void 211 set_context_wait_for_deregister_to_register(struct intel_context *ce) 212 { 213 lockdep_assert_held(&ce->guc_state.lock); 214 ce->guc_state.sched_state |= 215 SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER; 216 } 217 218 static inline void 219 clr_context_wait_for_deregister_to_register(struct intel_context *ce) 220 { 221 lockdep_assert_held(&ce->guc_state.lock); 222 ce->guc_state.sched_state &= 223 ~SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER; 224 } 225 226 static inline bool 227 context_destroyed(struct intel_context *ce) 228 { 229 return ce->guc_state.sched_state & SCHED_STATE_DESTROYED; 230 } 231 232 static inline void 233 set_context_destroyed(struct intel_context *ce) 234 { 235 lockdep_assert_held(&ce->guc_state.lock); 236 ce->guc_state.sched_state |= SCHED_STATE_DESTROYED; 237 } 238 239 static inline void 240 clr_context_destroyed(struct intel_context *ce) 241 { 242 lockdep_assert_held(&ce->guc_state.lock); 243 ce->guc_state.sched_state &= ~SCHED_STATE_DESTROYED; 244 } 245 246 static inline bool context_pending_disable(struct intel_context *ce) 247 { 248 return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE; 249 } 250 251 static inline void set_context_pending_disable(struct intel_context *ce) 252 { 253 lockdep_assert_held(&ce->guc_state.lock); 254 ce->guc_state.sched_state |= SCHED_STATE_PENDING_DISABLE; 255 } 256 257 static inline void clr_context_pending_disable(struct intel_context *ce) 258 { 259 lockdep_assert_held(&ce->guc_state.lock); 260 ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE; 261 } 262 263 static inline bool context_banned(struct intel_context *ce) 264 { 265 return ce->guc_state.sched_state & SCHED_STATE_BANNED; 266 } 267 268 static inline void set_context_banned(struct intel_context *ce) 269 { 270 lockdep_assert_held(&ce->guc_state.lock); 271 ce->guc_state.sched_state |= SCHED_STATE_BANNED; 272 } 273 274 static inline void clr_context_banned(struct intel_context *ce) 275 { 276 lockdep_assert_held(&ce->guc_state.lock); 277 ce->guc_state.sched_state &= ~SCHED_STATE_BANNED; 278 } 279 280 static inline bool context_enabled(struct intel_context *ce) 281 { 282 return ce->guc_state.sched_state & SCHED_STATE_ENABLED; 283 } 284 285 static inline void set_context_enabled(struct intel_context *ce) 286 { 287 lockdep_assert_held(&ce->guc_state.lock); 288 ce->guc_state.sched_state |= SCHED_STATE_ENABLED; 289 } 290 291 static inline void clr_context_enabled(struct intel_context *ce) 292 { 293 lockdep_assert_held(&ce->guc_state.lock); 294 ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED; 295 } 296 297 static inline bool context_pending_enable(struct intel_context *ce) 298 { 299 return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE; 300 } 301 302 static inline void set_context_pending_enable(struct intel_context *ce) 303 { 304 lockdep_assert_held(&ce->guc_state.lock); 305 ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE; 306 } 307 308 static inline void clr_context_pending_enable(struct intel_context *ce) 309 { 310 lockdep_assert_held(&ce->guc_state.lock); 311 ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE; 312 } 313 314 static inline bool context_registered(struct intel_context *ce) 315 { 316 return ce->guc_state.sched_state & SCHED_STATE_REGISTERED; 317 } 318 319 static inline void set_context_registered(struct intel_context *ce) 320 { 321 lockdep_assert_held(&ce->guc_state.lock); 322 ce->guc_state.sched_state |= SCHED_STATE_REGISTERED; 323 } 324 325 static inline void clr_context_registered(struct intel_context *ce) 326 { 327 lockdep_assert_held(&ce->guc_state.lock); 328 ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED; 329 } 330 331 static inline bool context_policy_required(struct intel_context *ce) 332 { 333 return ce->guc_state.sched_state & SCHED_STATE_POLICY_REQUIRED; 334 } 335 336 static inline void set_context_policy_required(struct intel_context *ce) 337 { 338 lockdep_assert_held(&ce->guc_state.lock); 339 ce->guc_state.sched_state |= SCHED_STATE_POLICY_REQUIRED; 340 } 341 342 static inline void clr_context_policy_required(struct intel_context *ce) 343 { 344 lockdep_assert_held(&ce->guc_state.lock); 345 ce->guc_state.sched_state &= ~SCHED_STATE_POLICY_REQUIRED; 346 } 347 348 static inline bool context_close_done(struct intel_context *ce) 349 { 350 return ce->guc_state.sched_state & SCHED_STATE_CLOSED; 351 } 352 353 static inline void set_context_close_done(struct intel_context *ce) 354 { 355 lockdep_assert_held(&ce->guc_state.lock); 356 ce->guc_state.sched_state |= SCHED_STATE_CLOSED; 357 } 358 359 static inline u32 context_blocked(struct intel_context *ce) 360 { 361 return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >> 362 SCHED_STATE_BLOCKED_SHIFT; 363 } 364 365 static inline void incr_context_blocked(struct intel_context *ce) 366 { 367 lockdep_assert_held(&ce->guc_state.lock); 368 369 ce->guc_state.sched_state += SCHED_STATE_BLOCKED; 370 371 GEM_BUG_ON(!context_blocked(ce)); /* Overflow check */ 372 } 373 374 static inline void decr_context_blocked(struct intel_context *ce) 375 { 376 lockdep_assert_held(&ce->guc_state.lock); 377 378 GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */ 379 380 ce->guc_state.sched_state -= SCHED_STATE_BLOCKED; 381 } 382 383 static struct intel_context * 384 request_to_scheduling_context(struct i915_request *rq) 385 { 386 return intel_context_to_parent(rq->context); 387 } 388 389 static inline bool context_guc_id_invalid(struct intel_context *ce) 390 { 391 return ce->guc_id.id == GUC_INVALID_CONTEXT_ID; 392 } 393 394 static inline void set_context_guc_id_invalid(struct intel_context *ce) 395 { 396 ce->guc_id.id = GUC_INVALID_CONTEXT_ID; 397 } 398 399 static inline struct intel_guc *ce_to_guc(struct intel_context *ce) 400 { 401 return gt_to_guc(ce->engine->gt); 402 } 403 404 static inline struct i915_priolist *to_priolist(struct rb_node *rb) 405 { 406 return rb_entry(rb, struct i915_priolist, node); 407 } 408 409 /* 410 * When using multi-lrc submission a scratch memory area is reserved in the 411 * parent's context state for the process descriptor, work queue, and handshake 412 * between the parent + children contexts to insert safe preemption points 413 * between each of the BBs. Currently the scratch area is sized to a page. 414 * 415 * The layout of this scratch area is below: 416 * 0 guc_process_desc 417 * + sizeof(struct guc_process_desc) child go 418 * + CACHELINE_BYTES child join[0] 419 * ... 420 * + CACHELINE_BYTES child join[n - 1] 421 * ... unused 422 * PARENT_SCRATCH_SIZE / 2 work queue start 423 * ... work queue 424 * PARENT_SCRATCH_SIZE - 1 work queue end 425 */ 426 #define WQ_SIZE (PARENT_SCRATCH_SIZE / 2) 427 #define WQ_OFFSET (PARENT_SCRATCH_SIZE - WQ_SIZE) 428 429 struct sync_semaphore { 430 u32 semaphore; 431 u8 unused[CACHELINE_BYTES - sizeof(u32)]; 432 }; 433 434 struct parent_scratch { 435 union guc_descs { 436 struct guc_sched_wq_desc wq_desc; 437 struct guc_process_desc_v69 pdesc; 438 } descs; 439 440 struct sync_semaphore go; 441 struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1]; 442 443 u8 unused[WQ_OFFSET - sizeof(union guc_descs) - 444 sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)]; 445 446 u32 wq[WQ_SIZE / sizeof(u32)]; 447 }; 448 449 static u32 __get_parent_scratch_offset(struct intel_context *ce) 450 { 451 GEM_BUG_ON(!ce->parallel.guc.parent_page); 452 453 return ce->parallel.guc.parent_page * PAGE_SIZE; 454 } 455 456 static u32 __get_wq_offset(struct intel_context *ce) 457 { 458 BUILD_BUG_ON(offsetof(struct parent_scratch, wq) != WQ_OFFSET); 459 460 return __get_parent_scratch_offset(ce) + WQ_OFFSET; 461 } 462 463 static struct parent_scratch * 464 __get_parent_scratch(struct intel_context *ce) 465 { 466 BUILD_BUG_ON(sizeof(struct parent_scratch) != PARENT_SCRATCH_SIZE); 467 BUILD_BUG_ON(sizeof(struct sync_semaphore) != CACHELINE_BYTES); 468 469 /* 470 * Need to subtract LRC_STATE_OFFSET here as the 471 * parallel.guc.parent_page is the offset into ce->state while 472 * ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET. 473 */ 474 return (struct parent_scratch *) 475 (ce->lrc_reg_state + 476 ((__get_parent_scratch_offset(ce) - 477 LRC_STATE_OFFSET) / sizeof(u32))); 478 } 479 480 static struct guc_process_desc_v69 * 481 __get_process_desc_v69(struct intel_context *ce) 482 { 483 struct parent_scratch *ps = __get_parent_scratch(ce); 484 485 return &ps->descs.pdesc; 486 } 487 488 static struct guc_sched_wq_desc * 489 __get_wq_desc_v70(struct intel_context *ce) 490 { 491 struct parent_scratch *ps = __get_parent_scratch(ce); 492 493 return &ps->descs.wq_desc; 494 } 495 496 static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size) 497 { 498 /* 499 * Check for space in work queue. Caching a value of head pointer in 500 * intel_context structure in order reduce the number accesses to shared 501 * GPU memory which may be across a PCIe bus. 502 */ 503 #define AVAILABLE_SPACE \ 504 CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE) 505 if (wqi_size > AVAILABLE_SPACE) { 506 ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head); 507 508 if (wqi_size > AVAILABLE_SPACE) 509 return NULL; 510 } 511 #undef AVAILABLE_SPACE 512 513 return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)]; 514 } 515 516 static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id) 517 { 518 struct intel_context *ce = xa_load(&guc->context_lookup, id); 519 520 GEM_BUG_ON(id >= GUC_MAX_CONTEXT_ID); 521 522 return ce; 523 } 524 525 static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index) 526 { 527 struct guc_lrc_desc_v69 *base = guc->lrc_desc_pool_vaddr_v69; 528 529 if (!base) 530 return NULL; 531 532 GEM_BUG_ON(index >= GUC_MAX_CONTEXT_ID); 533 534 return &base[index]; 535 } 536 537 static int guc_lrc_desc_pool_create_v69(struct intel_guc *guc) 538 { 539 u32 size; 540 int ret; 541 542 size = PAGE_ALIGN(sizeof(struct guc_lrc_desc_v69) * 543 GUC_MAX_CONTEXT_ID); 544 ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool_v69, 545 (void **)&guc->lrc_desc_pool_vaddr_v69); 546 if (ret) 547 return ret; 548 549 return 0; 550 } 551 552 static void guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc) 553 { 554 if (!guc->lrc_desc_pool_vaddr_v69) 555 return; 556 557 guc->lrc_desc_pool_vaddr_v69 = NULL; 558 i915_vma_unpin_and_release(&guc->lrc_desc_pool_v69, I915_VMA_RELEASE_MAP); 559 } 560 561 static inline bool guc_submission_initialized(struct intel_guc *guc) 562 { 563 return guc->submission_initialized; 564 } 565 566 static inline void _reset_lrc_desc_v69(struct intel_guc *guc, u32 id) 567 { 568 struct guc_lrc_desc_v69 *desc = __get_lrc_desc_v69(guc, id); 569 570 if (desc) 571 memset(desc, 0, sizeof(*desc)); 572 } 573 574 static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id) 575 { 576 return __get_context(guc, id); 577 } 578 579 static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id, 580 struct intel_context *ce) 581 { 582 unsigned long flags; 583 584 /* 585 * xarray API doesn't have xa_save_irqsave wrapper, so calling the 586 * lower level functions directly. 587 */ 588 xa_lock_irqsave(&guc->context_lookup, flags); 589 __xa_store(&guc->context_lookup, id, ce, GFP_ATOMIC); 590 xa_unlock_irqrestore(&guc->context_lookup, flags); 591 } 592 593 static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id) 594 { 595 unsigned long flags; 596 597 if (unlikely(!guc_submission_initialized(guc))) 598 return; 599 600 _reset_lrc_desc_v69(guc, id); 601 602 /* 603 * xarray API doesn't have xa_erase_irqsave wrapper, so calling 604 * the lower level functions directly. 605 */ 606 xa_lock_irqsave(&guc->context_lookup, flags); 607 __xa_erase(&guc->context_lookup, id); 608 xa_unlock_irqrestore(&guc->context_lookup, flags); 609 } 610 611 static void decr_outstanding_submission_g2h(struct intel_guc *guc) 612 { 613 if (atomic_dec_and_test(&guc->outstanding_submission_g2h)) 614 wake_up_all(&guc->ct.wq); 615 } 616 617 static int guc_submission_send_busy_loop(struct intel_guc *guc, 618 const u32 *action, 619 u32 len, 620 u32 g2h_len_dw, 621 bool loop) 622 { 623 int ret; 624 625 /* 626 * We always loop when a send requires a reply (i.e. g2h_len_dw > 0), 627 * so we don't handle the case where we don't get a reply because we 628 * aborted the send due to the channel being busy. 629 */ 630 GEM_BUG_ON(g2h_len_dw && !loop); 631 632 if (g2h_len_dw) 633 atomic_inc(&guc->outstanding_submission_g2h); 634 635 ret = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop); 636 if (ret) 637 atomic_dec(&guc->outstanding_submission_g2h); 638 639 return ret; 640 } 641 642 int intel_guc_wait_for_pending_msg(struct intel_guc *guc, 643 atomic_t *wait_var, 644 bool interruptible, 645 long timeout) 646 { 647 const int state = interruptible ? 648 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; 649 DEFINE_WAIT(wait); 650 651 might_sleep(); 652 GEM_BUG_ON(timeout < 0); 653 654 if (!atomic_read(wait_var)) 655 return 0; 656 657 if (!timeout) 658 return -ETIME; 659 660 for (;;) { 661 prepare_to_wait(&guc->ct.wq, &wait, state); 662 663 if (!atomic_read(wait_var)) 664 break; 665 666 if (signal_pending_state(state, current)) { 667 timeout = -EINTR; 668 break; 669 } 670 671 if (!timeout) { 672 timeout = -ETIME; 673 break; 674 } 675 676 timeout = io_schedule_timeout(timeout); 677 } 678 finish_wait(&guc->ct.wq, &wait); 679 680 return (timeout < 0) ? timeout : 0; 681 } 682 683 int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout) 684 { 685 if (!intel_uc_uses_guc_submission(&guc_to_gt(guc)->uc)) 686 return 0; 687 688 return intel_guc_wait_for_pending_msg(guc, 689 &guc->outstanding_submission_g2h, 690 true, timeout); 691 } 692 693 static int guc_context_policy_init_v70(struct intel_context *ce, bool loop); 694 static int try_context_registration(struct intel_context *ce, bool loop); 695 696 static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq) 697 { 698 int err = 0; 699 struct intel_context *ce = request_to_scheduling_context(rq); 700 u32 action[3]; 701 int len = 0; 702 u32 g2h_len_dw = 0; 703 bool enabled; 704 705 lockdep_assert_held(&rq->engine->sched_engine->lock); 706 707 /* 708 * Corner case where requests were sitting in the priority list or a 709 * request resubmitted after the context was banned. 710 */ 711 if (unlikely(!intel_context_is_schedulable(ce))) { 712 i915_request_put(i915_request_mark_eio(rq)); 713 intel_engine_signal_breadcrumbs(ce->engine); 714 return 0; 715 } 716 717 GEM_BUG_ON(!atomic_read(&ce->guc_id.ref)); 718 GEM_BUG_ON(context_guc_id_invalid(ce)); 719 720 if (context_policy_required(ce)) { 721 err = guc_context_policy_init_v70(ce, false); 722 if (err) 723 return err; 724 } 725 726 spin_lock(&ce->guc_state.lock); 727 728 /* 729 * The request / context will be run on the hardware when scheduling 730 * gets enabled in the unblock. For multi-lrc we still submit the 731 * context to move the LRC tails. 732 */ 733 if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce))) 734 goto out; 735 736 enabled = context_enabled(ce) || context_blocked(ce); 737 738 if (!enabled) { 739 action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET; 740 action[len++] = ce->guc_id.id; 741 action[len++] = GUC_CONTEXT_ENABLE; 742 set_context_pending_enable(ce); 743 intel_context_get(ce); 744 g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET; 745 } else { 746 action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT; 747 action[len++] = ce->guc_id.id; 748 } 749 750 err = intel_guc_send_nb(guc, action, len, g2h_len_dw); 751 if (!enabled && !err) { 752 trace_intel_context_sched_enable(ce); 753 atomic_inc(&guc->outstanding_submission_g2h); 754 set_context_enabled(ce); 755 756 /* 757 * Without multi-lrc KMD does the submission step (moving the 758 * lrc tail) so enabling scheduling is sufficient to submit the 759 * context. This isn't the case in multi-lrc submission as the 760 * GuC needs to move the tails, hence the need for another H2G 761 * to submit a multi-lrc context after enabling scheduling. 762 */ 763 if (intel_context_is_parent(ce)) { 764 action[0] = INTEL_GUC_ACTION_SCHED_CONTEXT; 765 err = intel_guc_send_nb(guc, action, len - 1, 0); 766 } 767 } else if (!enabled) { 768 clr_context_pending_enable(ce); 769 intel_context_put(ce); 770 } 771 if (likely(!err)) 772 trace_i915_request_guc_submit(rq); 773 774 out: 775 spin_unlock(&ce->guc_state.lock); 776 return err; 777 } 778 779 static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) 780 { 781 int ret = __guc_add_request(guc, rq); 782 783 if (unlikely(ret == -EBUSY)) { 784 guc->stalled_request = rq; 785 guc->submission_stall_reason = STALL_ADD_REQUEST; 786 } 787 788 return ret; 789 } 790 791 static inline void guc_set_lrc_tail(struct i915_request *rq) 792 { 793 rq->context->lrc_reg_state[CTX_RING_TAIL] = 794 intel_ring_set_tail(rq->ring, rq->tail); 795 } 796 797 static inline int rq_prio(const struct i915_request *rq) 798 { 799 return rq->sched.attr.priority; 800 } 801 802 static bool is_multi_lrc_rq(struct i915_request *rq) 803 { 804 return intel_context_is_parallel(rq->context); 805 } 806 807 static bool can_merge_rq(struct i915_request *rq, 808 struct i915_request *last) 809 { 810 return request_to_scheduling_context(rq) == 811 request_to_scheduling_context(last); 812 } 813 814 static u32 wq_space_until_wrap(struct intel_context *ce) 815 { 816 return (WQ_SIZE - ce->parallel.guc.wqi_tail); 817 } 818 819 static void write_wqi(struct intel_context *ce, u32 wqi_size) 820 { 821 BUILD_BUG_ON(!is_power_of_2(WQ_SIZE)); 822 823 /* 824 * Ensure WQI are visible before updating tail 825 */ 826 intel_guc_write_barrier(ce_to_guc(ce)); 827 828 ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) & 829 (WQ_SIZE - 1); 830 WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail); 831 } 832 833 static int guc_wq_noop_append(struct intel_context *ce) 834 { 835 u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce)); 836 u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1; 837 838 if (!wqi) 839 return -EBUSY; 840 841 GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw)); 842 843 *wqi = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) | 844 FIELD_PREP(WQ_LEN_MASK, len_dw); 845 ce->parallel.guc.wqi_tail = 0; 846 847 return 0; 848 } 849 850 static int __guc_wq_item_append(struct i915_request *rq) 851 { 852 struct intel_context *ce = request_to_scheduling_context(rq); 853 struct intel_context *child; 854 unsigned int wqi_size = (ce->parallel.number_children + 4) * 855 sizeof(u32); 856 u32 *wqi; 857 u32 len_dw = (wqi_size / sizeof(u32)) - 1; 858 int ret; 859 860 /* Ensure context is in correct state updating work queue */ 861 GEM_BUG_ON(!atomic_read(&ce->guc_id.ref)); 862 GEM_BUG_ON(context_guc_id_invalid(ce)); 863 GEM_BUG_ON(context_wait_for_deregister_to_register(ce)); 864 GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)); 865 866 /* Insert NOOP if this work queue item will wrap the tail pointer. */ 867 if (wqi_size > wq_space_until_wrap(ce)) { 868 ret = guc_wq_noop_append(ce); 869 if (ret) 870 return ret; 871 } 872 873 wqi = get_wq_pointer(ce, wqi_size); 874 if (!wqi) 875 return -EBUSY; 876 877 GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw)); 878 879 *wqi++ = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) | 880 FIELD_PREP(WQ_LEN_MASK, len_dw); 881 *wqi++ = ce->lrc.lrca; 882 *wqi++ = FIELD_PREP(WQ_GUC_ID_MASK, ce->guc_id.id) | 883 FIELD_PREP(WQ_RING_TAIL_MASK, ce->ring->tail / sizeof(u64)); 884 *wqi++ = 0; /* fence_id */ 885 for_each_child(ce, child) 886 *wqi++ = child->ring->tail / sizeof(u64); 887 888 write_wqi(ce, wqi_size); 889 890 return 0; 891 } 892 893 static int guc_wq_item_append(struct intel_guc *guc, 894 struct i915_request *rq) 895 { 896 struct intel_context *ce = request_to_scheduling_context(rq); 897 int ret; 898 899 if (unlikely(!intel_context_is_schedulable(ce))) 900 return 0; 901 902 ret = __guc_wq_item_append(rq); 903 if (unlikely(ret == -EBUSY)) { 904 guc->stalled_request = rq; 905 guc->submission_stall_reason = STALL_MOVE_LRC_TAIL; 906 } 907 908 return ret; 909 } 910 911 static bool multi_lrc_submit(struct i915_request *rq) 912 { 913 struct intel_context *ce = request_to_scheduling_context(rq); 914 915 intel_ring_set_tail(rq->ring, rq->tail); 916 917 /* 918 * We expect the front end (execbuf IOCTL) to set this flag on the last 919 * request generated from a multi-BB submission. This indicates to the 920 * backend (GuC interface) that we should submit this context thus 921 * submitting all the requests generated in parallel. 922 */ 923 return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) || 924 !intel_context_is_schedulable(ce); 925 } 926 927 static int guc_dequeue_one_context(struct intel_guc *guc) 928 { 929 struct i915_sched_engine * const sched_engine = guc->sched_engine; 930 struct i915_request *last = NULL; 931 bool submit = false; 932 struct rb_node *rb; 933 int ret; 934 935 lockdep_assert_held(&sched_engine->lock); 936 937 if (guc->stalled_request) { 938 submit = true; 939 last = guc->stalled_request; 940 941 switch (guc->submission_stall_reason) { 942 case STALL_REGISTER_CONTEXT: 943 goto register_context; 944 case STALL_MOVE_LRC_TAIL: 945 goto move_lrc_tail; 946 case STALL_ADD_REQUEST: 947 goto add_request; 948 default: 949 MISSING_CASE(guc->submission_stall_reason); 950 } 951 } 952 953 while ((rb = rb_first_cached(&sched_engine->queue))) { 954 struct i915_priolist *p = to_priolist(rb); 955 struct i915_request *rq, *rn; 956 957 priolist_for_each_request_consume(rq, rn, p) { 958 if (last && !can_merge_rq(rq, last)) 959 goto register_context; 960 961 list_del_init(&rq->sched.link); 962 963 __i915_request_submit(rq); 964 965 trace_i915_request_in(rq, 0); 966 last = rq; 967 968 if (is_multi_lrc_rq(rq)) { 969 /* 970 * We need to coalesce all multi-lrc requests in 971 * a relationship into a single H2G. We are 972 * guaranteed that all of these requests will be 973 * submitted sequentially. 974 */ 975 if (multi_lrc_submit(rq)) { 976 submit = true; 977 goto register_context; 978 } 979 } else { 980 submit = true; 981 } 982 } 983 984 rb_erase_cached(&p->node, &sched_engine->queue); 985 i915_priolist_free(p); 986 } 987 988 register_context: 989 if (submit) { 990 struct intel_context *ce = request_to_scheduling_context(last); 991 992 if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) && 993 intel_context_is_schedulable(ce))) { 994 ret = try_context_registration(ce, false); 995 if (unlikely(ret == -EPIPE)) { 996 goto deadlk; 997 } else if (ret == -EBUSY) { 998 guc->stalled_request = last; 999 guc->submission_stall_reason = 1000 STALL_REGISTER_CONTEXT; 1001 goto schedule_tasklet; 1002 } else if (ret != 0) { 1003 GEM_WARN_ON(ret); /* Unexpected */ 1004 goto deadlk; 1005 } 1006 } 1007 1008 move_lrc_tail: 1009 if (is_multi_lrc_rq(last)) { 1010 ret = guc_wq_item_append(guc, last); 1011 if (ret == -EBUSY) { 1012 goto schedule_tasklet; 1013 } else if (ret != 0) { 1014 GEM_WARN_ON(ret); /* Unexpected */ 1015 goto deadlk; 1016 } 1017 } else { 1018 guc_set_lrc_tail(last); 1019 } 1020 1021 add_request: 1022 ret = guc_add_request(guc, last); 1023 if (unlikely(ret == -EPIPE)) { 1024 goto deadlk; 1025 } else if (ret == -EBUSY) { 1026 goto schedule_tasklet; 1027 } else if (ret != 0) { 1028 GEM_WARN_ON(ret); /* Unexpected */ 1029 goto deadlk; 1030 } 1031 } 1032 1033 guc->stalled_request = NULL; 1034 guc->submission_stall_reason = STALL_NONE; 1035 return submit; 1036 1037 deadlk: 1038 sched_engine->tasklet.callback = NULL; 1039 tasklet_disable_nosync(&sched_engine->tasklet); 1040 return false; 1041 1042 schedule_tasklet: 1043 tasklet_schedule(&sched_engine->tasklet); 1044 return false; 1045 } 1046 1047 static void guc_submission_tasklet(struct tasklet_struct *t) 1048 { 1049 struct i915_sched_engine *sched_engine = 1050 from_tasklet(sched_engine, t, tasklet); 1051 unsigned long flags; 1052 bool loop; 1053 1054 spin_lock_irqsave(&sched_engine->lock, flags); 1055 1056 do { 1057 loop = guc_dequeue_one_context(sched_engine->private_data); 1058 } while (loop); 1059 1060 i915_sched_engine_reset_on_empty(sched_engine); 1061 1062 spin_unlock_irqrestore(&sched_engine->lock, flags); 1063 } 1064 1065 static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir) 1066 { 1067 if (iir & GT_RENDER_USER_INTERRUPT) 1068 intel_engine_signal_breadcrumbs(engine); 1069 } 1070 1071 static void __guc_context_destroy(struct intel_context *ce); 1072 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce); 1073 static void guc_signal_context_fence(struct intel_context *ce); 1074 static void guc_cancel_context_requests(struct intel_context *ce); 1075 static void guc_blocked_fence_complete(struct intel_context *ce); 1076 1077 static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) 1078 { 1079 struct intel_context *ce; 1080 unsigned long index, flags; 1081 bool pending_disable, pending_enable, deregister, destroyed, banned; 1082 1083 xa_lock_irqsave(&guc->context_lookup, flags); 1084 xa_for_each(&guc->context_lookup, index, ce) { 1085 /* 1086 * Corner case where the ref count on the object is zero but and 1087 * deregister G2H was lost. In this case we don't touch the ref 1088 * count and finish the destroy of the context. 1089 */ 1090 bool do_put = kref_get_unless_zero(&ce->ref); 1091 1092 xa_unlock(&guc->context_lookup); 1093 1094 if (test_bit(CONTEXT_GUC_INIT, &ce->flags) && 1095 (cancel_delayed_work(&ce->guc_state.sched_disable_delay_work))) { 1096 /* successful cancel so jump straight to close it */ 1097 intel_context_sched_disable_unpin(ce); 1098 } 1099 1100 spin_lock(&ce->guc_state.lock); 1101 1102 /* 1103 * Once we are at this point submission_disabled() is guaranteed 1104 * to be visible to all callers who set the below flags (see above 1105 * flush and flushes in reset_prepare). If submission_disabled() 1106 * is set, the caller shouldn't set these flags. 1107 */ 1108 1109 destroyed = context_destroyed(ce); 1110 pending_enable = context_pending_enable(ce); 1111 pending_disable = context_pending_disable(ce); 1112 deregister = context_wait_for_deregister_to_register(ce); 1113 banned = context_banned(ce); 1114 init_sched_state(ce); 1115 1116 spin_unlock(&ce->guc_state.lock); 1117 1118 if (pending_enable || destroyed || deregister) { 1119 decr_outstanding_submission_g2h(guc); 1120 if (deregister) 1121 guc_signal_context_fence(ce); 1122 if (destroyed) { 1123 intel_gt_pm_put_async_untracked(guc_to_gt(guc)); 1124 release_guc_id(guc, ce); 1125 __guc_context_destroy(ce); 1126 } 1127 if (pending_enable || deregister) 1128 intel_context_put(ce); 1129 } 1130 1131 /* Not mutualy exclusive with above if statement. */ 1132 if (pending_disable) { 1133 guc_signal_context_fence(ce); 1134 if (banned) { 1135 guc_cancel_context_requests(ce); 1136 intel_engine_signal_breadcrumbs(ce->engine); 1137 } 1138 intel_context_sched_disable_unpin(ce); 1139 decr_outstanding_submission_g2h(guc); 1140 1141 spin_lock(&ce->guc_state.lock); 1142 guc_blocked_fence_complete(ce); 1143 spin_unlock(&ce->guc_state.lock); 1144 1145 intel_context_put(ce); 1146 } 1147 1148 if (do_put) 1149 intel_context_put(ce); 1150 xa_lock(&guc->context_lookup); 1151 } 1152 xa_unlock_irqrestore(&guc->context_lookup, flags); 1153 } 1154 1155 /* 1156 * GuC stores busyness stats for each engine at context in/out boundaries. A 1157 * context 'in' logs execution start time, 'out' adds in -> out delta to total. 1158 * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with 1159 * GuC. 1160 * 1161 * __i915_pmu_event_read samples engine busyness. When sampling, if context id 1162 * is valid (!= ~0) and start is non-zero, the engine is considered to be 1163 * active. For an active engine total busyness = total + (now - start), where 1164 * 'now' is the time at which the busyness is sampled. For inactive engine, 1165 * total busyness = total. 1166 * 1167 * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain. 1168 * 1169 * The start and total values provided by GuC are 32 bits and wrap around in a 1170 * few minutes. Since perf pmu provides busyness as 64 bit monotonically 1171 * increasing ns values, there is a need for this implementation to account for 1172 * overflows and extend the GuC provided values to 64 bits before returning 1173 * busyness to the user. In order to do that, a worker runs periodically at 1174 * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in 1175 * 27 seconds for a gt clock frequency of 19.2 MHz). 1176 */ 1177 1178 #define WRAP_TIME_CLKS U32_MAX 1179 #define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3) 1180 1181 static void 1182 __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start) 1183 { 1184 u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp); 1185 u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp); 1186 1187 if (new_start == lower_32_bits(*prev_start)) 1188 return; 1189 1190 /* 1191 * When gt is unparked, we update the gt timestamp and start the ping 1192 * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt 1193 * is unparked, all switched in contexts will have a start time that is 1194 * within +/- POLL_TIME_CLKS of the most recent gt_stamp. 1195 * 1196 * If neither gt_stamp nor new_start has rolled over, then the 1197 * gt_stamp_hi does not need to be adjusted, however if one of them has 1198 * rolled over, we need to adjust gt_stamp_hi accordingly. 1199 * 1200 * The below conditions address the cases of new_start rollover and 1201 * gt_stamp_last rollover respectively. 1202 */ 1203 if (new_start < gt_stamp_last && 1204 (new_start - gt_stamp_last) <= POLL_TIME_CLKS) 1205 gt_stamp_hi++; 1206 1207 if (new_start > gt_stamp_last && 1208 (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi) 1209 gt_stamp_hi--; 1210 1211 *prev_start = ((u64)gt_stamp_hi << 32) | new_start; 1212 } 1213 1214 #define record_read(map_, field_) \ 1215 iosys_map_rd_field(map_, 0, struct guc_engine_usage_record, field_) 1216 1217 /* 1218 * GuC updates shared memory and KMD reads it. Since this is not synchronized, 1219 * we run into a race where the value read is inconsistent. Sometimes the 1220 * inconsistency is in reading the upper MSB bytes of the last_in value when 1221 * this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper 1222 * 24 bits are zero. Since these are non-zero values, it is non-trivial to 1223 * determine validity of these values. Instead we read the values multiple times 1224 * until they are consistent. In test runs, 3 attempts results in consistent 1225 * values. The upper bound is set to 6 attempts and may need to be tuned as per 1226 * any new occurences. 1227 */ 1228 static void __get_engine_usage_record(struct intel_engine_cs *engine, 1229 u32 *last_in, u32 *id, u32 *total) 1230 { 1231 struct iosys_map rec_map = intel_guc_engine_usage_record_map(engine); 1232 int i = 0; 1233 1234 do { 1235 *last_in = record_read(&rec_map, last_switch_in_stamp); 1236 *id = record_read(&rec_map, current_context_index); 1237 *total = record_read(&rec_map, total_runtime); 1238 1239 if (record_read(&rec_map, last_switch_in_stamp) == *last_in && 1240 record_read(&rec_map, current_context_index) == *id && 1241 record_read(&rec_map, total_runtime) == *total) 1242 break; 1243 } while (++i < 6); 1244 } 1245 1246 static void __set_engine_usage_record(struct intel_engine_cs *engine, 1247 u32 last_in, u32 id, u32 total) 1248 { 1249 struct iosys_map rec_map = intel_guc_engine_usage_record_map(engine); 1250 1251 #define record_write(map_, field_, val_) \ 1252 iosys_map_wr_field(map_, 0, struct guc_engine_usage_record, field_, val_) 1253 1254 record_write(&rec_map, last_switch_in_stamp, last_in); 1255 record_write(&rec_map, current_context_index, id); 1256 record_write(&rec_map, total_runtime, total); 1257 1258 #undef record_write 1259 } 1260 1261 static void guc_update_engine_gt_clks(struct intel_engine_cs *engine) 1262 { 1263 struct intel_engine_guc_stats *stats = &engine->stats.guc; 1264 struct intel_guc *guc = gt_to_guc(engine->gt); 1265 u32 last_switch, ctx_id, total; 1266 1267 lockdep_assert_held(&guc->timestamp.lock); 1268 1269 __get_engine_usage_record(engine, &last_switch, &ctx_id, &total); 1270 1271 stats->running = ctx_id != ~0U && last_switch; 1272 if (stats->running) 1273 __extend_last_switch(guc, &stats->start_gt_clk, last_switch); 1274 1275 /* 1276 * Instead of adjusting the total for overflow, just add the 1277 * difference from previous sample stats->total_gt_clks 1278 */ 1279 if (total && total != ~0U) { 1280 stats->total_gt_clks += (u32)(total - stats->prev_total); 1281 stats->prev_total = total; 1282 } 1283 } 1284 1285 static u32 gpm_timestamp_shift(struct intel_gt *gt) 1286 { 1287 intel_wakeref_t wakeref; 1288 u32 reg, shift; 1289 1290 with_intel_runtime_pm(gt->uncore->rpm, wakeref) 1291 reg = intel_uncore_read(gt->uncore, RPM_CONFIG0); 1292 1293 shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >> 1294 GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT; 1295 1296 return 3 - shift; 1297 } 1298 1299 static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now) 1300 { 1301 struct intel_gt *gt = guc_to_gt(guc); 1302 u32 gt_stamp_lo, gt_stamp_hi; 1303 u64 gpm_ts; 1304 1305 lockdep_assert_held(&guc->timestamp.lock); 1306 1307 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp); 1308 gpm_ts = intel_uncore_read64_2x32(gt->uncore, MISC_STATUS0, 1309 MISC_STATUS1) >> guc->timestamp.shift; 1310 gt_stamp_lo = lower_32_bits(gpm_ts); 1311 *now = ktime_get(); 1312 1313 if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp)) 1314 gt_stamp_hi++; 1315 1316 guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo; 1317 } 1318 1319 /* 1320 * Unlike the execlist mode of submission total and active times are in terms of 1321 * gt clocks. The *now parameter is retained to return the cpu time at which the 1322 * busyness was sampled. 1323 */ 1324 static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) 1325 { 1326 struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc; 1327 struct i915_gpu_error *gpu_error = &engine->i915->gpu_error; 1328 struct intel_gt *gt = engine->gt; 1329 struct intel_guc *guc = gt_to_guc(gt); 1330 u64 total, gt_stamp_saved; 1331 unsigned long flags; 1332 u32 reset_count; 1333 bool in_reset; 1334 intel_wakeref_t wakeref; 1335 1336 spin_lock_irqsave(&guc->timestamp.lock, flags); 1337 1338 /* 1339 * If a reset happened, we risk reading partially updated engine 1340 * busyness from GuC, so we just use the driver stored copy of busyness. 1341 * Synchronize with gt reset using reset_count and the 1342 * I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count 1343 * after I915_RESET_BACKOFF flag, so ensure that the reset_count is 1344 * usable by checking the flag afterwards. 1345 */ 1346 reset_count = i915_reset_count(gpu_error); 1347 in_reset = test_bit(I915_RESET_BACKOFF, >->reset.flags); 1348 1349 *now = ktime_get(); 1350 1351 /* 1352 * The active busyness depends on start_gt_clk and gt_stamp. 1353 * gt_stamp is updated by i915 only when gt is awake and the 1354 * start_gt_clk is derived from GuC state. To get a consistent 1355 * view of activity, we query the GuC state only if gt is awake. 1356 */ 1357 wakeref = in_reset ? NULL : intel_gt_pm_get_if_awake(gt); 1358 if (wakeref) { 1359 stats_saved = *stats; 1360 gt_stamp_saved = guc->timestamp.gt_stamp; 1361 /* 1362 * Update gt_clks, then gt timestamp to simplify the 'gt_stamp - 1363 * start_gt_clk' calculation below for active engines. 1364 */ 1365 guc_update_engine_gt_clks(engine); 1366 guc_update_pm_timestamp(guc, now); 1367 intel_gt_pm_put_async(gt, wakeref); 1368 if (i915_reset_count(gpu_error) != reset_count) { 1369 *stats = stats_saved; 1370 guc->timestamp.gt_stamp = gt_stamp_saved; 1371 } 1372 } 1373 1374 total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks); 1375 if (stats->running) { 1376 u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk; 1377 1378 total += intel_gt_clock_interval_to_ns(gt, clk); 1379 } 1380 1381 if (total > stats->total) 1382 stats->total = total; 1383 1384 spin_unlock_irqrestore(&guc->timestamp.lock, flags); 1385 1386 return ns_to_ktime(stats->total); 1387 } 1388 1389 static void guc_enable_busyness_worker(struct intel_guc *guc) 1390 { 1391 mod_delayed_work(system_highpri_wq, &guc->timestamp.work, guc->timestamp.ping_delay); 1392 } 1393 1394 static void guc_cancel_busyness_worker(struct intel_guc *guc) 1395 { 1396 /* 1397 * There are many different call stacks that can get here. Some of them 1398 * hold the reset mutex. The busyness worker also attempts to acquire the 1399 * reset mutex. Synchronously flushing a worker thread requires acquiring 1400 * the worker mutex. Lockdep sees this as a conflict. It thinks that the 1401 * flush can deadlock because it holds the worker mutex while waiting for 1402 * the reset mutex, but another thread is holding the reset mutex and might 1403 * attempt to use other worker functions. 1404 * 1405 * In practice, this scenario does not exist because the busyness worker 1406 * does not block waiting for the reset mutex. It does a try-lock on it and 1407 * immediately exits if the lock is already held. Unfortunately, the mutex 1408 * in question (I915_RESET_BACKOFF) is an i915 implementation which has lockdep 1409 * annotation but not to the extent of explaining the 'might lock' is also a 1410 * 'does not need to lock'. So one option would be to add more complex lockdep 1411 * annotations to ignore the issue (if at all possible). A simpler option is to 1412 * just not flush synchronously when a rest in progress. Given that the worker 1413 * will just early exit and re-schedule itself anyway, there is no advantage 1414 * to running it immediately. 1415 * 1416 * If a reset is not in progress, then the synchronous flush may be required. 1417 * As noted many call stacks lead here, some during suspend and driver unload 1418 * which do require a synchronous flush to make sure the worker is stopped 1419 * before memory is freed. 1420 * 1421 * Trying to pass a 'need_sync' or 'in_reset' flag all the way down through 1422 * every possible call stack is unfeasible. It would be too intrusive to many 1423 * areas that really don't care about the GuC backend. However, there is the 1424 * I915_RESET_BACKOFF flag and the gt->reset.mutex can be tested for is_locked. 1425 * So just use those. Note that testing both is required due to the hideously 1426 * complex nature of the i915 driver's reset code paths. 1427 * 1428 * And note that in the case of a reset occurring during driver unload 1429 * (wedged_on_fini), skipping the cancel in reset_prepare/reset_fini (when the 1430 * reset flag/mutex are set) is fine because there is another explicit cancel in 1431 * intel_guc_submission_fini (when the reset flag/mutex are not). 1432 */ 1433 if (mutex_is_locked(&guc_to_gt(guc)->reset.mutex) || 1434 test_bit(I915_RESET_BACKOFF, &guc_to_gt(guc)->reset.flags)) 1435 cancel_delayed_work(&guc->timestamp.work); 1436 else 1437 cancel_delayed_work_sync(&guc->timestamp.work); 1438 } 1439 1440 static void __reset_guc_busyness_stats(struct intel_guc *guc) 1441 { 1442 struct intel_gt *gt = guc_to_gt(guc); 1443 struct intel_engine_cs *engine; 1444 enum intel_engine_id id; 1445 unsigned long flags; 1446 ktime_t unused; 1447 1448 spin_lock_irqsave(&guc->timestamp.lock, flags); 1449 1450 guc_update_pm_timestamp(guc, &unused); 1451 for_each_engine(engine, gt, id) { 1452 struct intel_engine_guc_stats *stats = &engine->stats.guc; 1453 1454 guc_update_engine_gt_clks(engine); 1455 1456 /* 1457 * If resetting a running context, accumulate the active 1458 * time as well since there will be no context switch. 1459 */ 1460 if (stats->running) { 1461 u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk; 1462 1463 stats->total_gt_clks += clk; 1464 } 1465 stats->prev_total = 0; 1466 stats->running = 0; 1467 } 1468 1469 spin_unlock_irqrestore(&guc->timestamp.lock, flags); 1470 } 1471 1472 static void __update_guc_busyness_stats(struct intel_guc *guc) 1473 { 1474 struct intel_gt *gt = guc_to_gt(guc); 1475 struct intel_engine_cs *engine; 1476 enum intel_engine_id id; 1477 unsigned long flags; 1478 ktime_t unused; 1479 1480 guc->timestamp.last_stat_jiffies = jiffies; 1481 1482 spin_lock_irqsave(&guc->timestamp.lock, flags); 1483 1484 guc_update_pm_timestamp(guc, &unused); 1485 for_each_engine(engine, gt, id) 1486 guc_update_engine_gt_clks(engine); 1487 1488 spin_unlock_irqrestore(&guc->timestamp.lock, flags); 1489 } 1490 1491 static void __guc_context_update_stats(struct intel_context *ce) 1492 { 1493 struct intel_guc *guc = ce_to_guc(ce); 1494 unsigned long flags; 1495 1496 spin_lock_irqsave(&guc->timestamp.lock, flags); 1497 lrc_update_runtime(ce); 1498 spin_unlock_irqrestore(&guc->timestamp.lock, flags); 1499 } 1500 1501 static void guc_context_update_stats(struct intel_context *ce) 1502 { 1503 if (!intel_context_pin_if_active(ce)) 1504 return; 1505 1506 __guc_context_update_stats(ce); 1507 intel_context_unpin(ce); 1508 } 1509 1510 static void guc_timestamp_ping(struct work_struct *wrk) 1511 { 1512 struct intel_guc *guc = container_of(wrk, typeof(*guc), 1513 timestamp.work.work); 1514 struct intel_uc *uc = container_of(guc, typeof(*uc), guc); 1515 struct intel_gt *gt = guc_to_gt(guc); 1516 struct intel_context *ce; 1517 intel_wakeref_t wakeref; 1518 unsigned long index; 1519 int srcu, ret; 1520 1521 /* 1522 * Ideally the busyness worker should take a gt pm wakeref because the 1523 * worker only needs to be active while gt is awake. However, the 1524 * gt_park path cancels the worker synchronously and this complicates 1525 * the flow if the worker is also running at the same time. The cancel 1526 * waits for the worker and when the worker releases the wakeref, that 1527 * would call gt_park and would lead to a deadlock. 1528 * 1529 * The resolution is to take the global pm wakeref if runtime pm is 1530 * already active. If not, we don't need to update the busyness stats as 1531 * the stats would already be updated when the gt was parked. 1532 * 1533 * Note: 1534 * - We do not requeue the worker if we cannot take a reference to runtime 1535 * pm since intel_guc_busyness_unpark would requeue the worker in the 1536 * resume path. 1537 * 1538 * - If the gt was parked longer than time taken for GT timestamp to roll 1539 * over, we ignore those rollovers since we don't care about tracking 1540 * the exact GT time. We only care about roll overs when the gt is 1541 * active and running workloads. 1542 * 1543 * - There is a window of time between gt_park and runtime suspend, 1544 * where the worker may run. This is acceptable since the worker will 1545 * not find any new data to update busyness. 1546 */ 1547 wakeref = intel_runtime_pm_get_if_active(>->i915->runtime_pm); 1548 if (!wakeref) 1549 return; 1550 1551 /* 1552 * Synchronize with gt reset to make sure the worker does not 1553 * corrupt the engine/guc stats. NB: can't actually block waiting 1554 * for a reset to complete as the reset requires flushing out 1555 * this worker thread if started. So waiting would deadlock. 1556 */ 1557 ret = intel_gt_reset_trylock(gt, &srcu); 1558 if (ret) 1559 goto err_trylock; 1560 1561 __update_guc_busyness_stats(guc); 1562 1563 /* adjust context stats for overflow */ 1564 xa_for_each(&guc->context_lookup, index, ce) 1565 guc_context_update_stats(ce); 1566 1567 intel_gt_reset_unlock(gt, srcu); 1568 1569 guc_enable_busyness_worker(guc); 1570 1571 err_trylock: 1572 intel_runtime_pm_put(>->i915->runtime_pm, wakeref); 1573 } 1574 1575 static int guc_action_enable_usage_stats(struct intel_guc *guc) 1576 { 1577 struct intel_gt *gt = guc_to_gt(guc); 1578 struct intel_engine_cs *engine; 1579 enum intel_engine_id id; 1580 u32 offset = intel_guc_engine_usage_offset(guc); 1581 u32 action[] = { 1582 INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF, 1583 offset, 1584 0, 1585 }; 1586 1587 for_each_engine(engine, gt, id) 1588 __set_engine_usage_record(engine, 0, 0xffffffff, 0); 1589 1590 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 1591 } 1592 1593 static int guc_init_engine_stats(struct intel_guc *guc) 1594 { 1595 struct intel_gt *gt = guc_to_gt(guc); 1596 intel_wakeref_t wakeref; 1597 int ret; 1598 1599 with_intel_runtime_pm(>->i915->runtime_pm, wakeref) 1600 ret = guc_action_enable_usage_stats(guc); 1601 1602 if (ret) 1603 guc_err(guc, "Failed to enable usage stats: %pe\n", ERR_PTR(ret)); 1604 else 1605 guc_enable_busyness_worker(guc); 1606 1607 return ret; 1608 } 1609 1610 static void guc_fini_engine_stats(struct intel_guc *guc) 1611 { 1612 guc_cancel_busyness_worker(guc); 1613 } 1614 1615 void intel_guc_busyness_park(struct intel_gt *gt) 1616 { 1617 struct intel_guc *guc = gt_to_guc(gt); 1618 1619 if (!guc_submission_initialized(guc)) 1620 return; 1621 1622 /* 1623 * There is a race with suspend flow where the worker runs after suspend 1624 * and causes an unclaimed register access warning. Cancel the worker 1625 * synchronously here. 1626 */ 1627 guc_cancel_busyness_worker(guc); 1628 1629 /* 1630 * Before parking, we should sample engine busyness stats if we need to. 1631 * We can skip it if we are less than half a ping from the last time we 1632 * sampled the busyness stats. 1633 */ 1634 if (guc->timestamp.last_stat_jiffies && 1635 !time_after(jiffies, guc->timestamp.last_stat_jiffies + 1636 (guc->timestamp.ping_delay / 2))) 1637 return; 1638 1639 __update_guc_busyness_stats(guc); 1640 } 1641 1642 void intel_guc_busyness_unpark(struct intel_gt *gt) 1643 { 1644 struct intel_guc *guc = gt_to_guc(gt); 1645 unsigned long flags; 1646 ktime_t unused; 1647 1648 if (!guc_submission_initialized(guc)) 1649 return; 1650 1651 spin_lock_irqsave(&guc->timestamp.lock, flags); 1652 guc_update_pm_timestamp(guc, &unused); 1653 spin_unlock_irqrestore(&guc->timestamp.lock, flags); 1654 guc_enable_busyness_worker(guc); 1655 } 1656 1657 static inline bool 1658 submission_disabled(struct intel_guc *guc) 1659 { 1660 struct i915_sched_engine * const sched_engine = guc->sched_engine; 1661 1662 return unlikely(!sched_engine || 1663 !__tasklet_is_enabled(&sched_engine->tasklet) || 1664 intel_gt_is_wedged(guc_to_gt(guc))); 1665 } 1666 1667 static void disable_submission(struct intel_guc *guc) 1668 { 1669 struct i915_sched_engine * const sched_engine = guc->sched_engine; 1670 1671 if (__tasklet_is_enabled(&sched_engine->tasklet)) { 1672 GEM_BUG_ON(!guc->ct.enabled); 1673 __tasklet_disable_sync_once(&sched_engine->tasklet); 1674 sched_engine->tasklet.callback = NULL; 1675 } 1676 } 1677 1678 static void enable_submission(struct intel_guc *guc) 1679 { 1680 struct i915_sched_engine * const sched_engine = guc->sched_engine; 1681 unsigned long flags; 1682 1683 spin_lock_irqsave(&guc->sched_engine->lock, flags); 1684 sched_engine->tasklet.callback = guc_submission_tasklet; 1685 wmb(); /* Make sure callback visible */ 1686 if (!__tasklet_is_enabled(&sched_engine->tasklet) && 1687 __tasklet_enable(&sched_engine->tasklet)) { 1688 GEM_BUG_ON(!guc->ct.enabled); 1689 1690 /* And kick in case we missed a new request submission. */ 1691 tasklet_hi_schedule(&sched_engine->tasklet); 1692 } 1693 spin_unlock_irqrestore(&guc->sched_engine->lock, flags); 1694 } 1695 1696 static void guc_flush_submissions(struct intel_guc *guc) 1697 { 1698 struct i915_sched_engine * const sched_engine = guc->sched_engine; 1699 unsigned long flags; 1700 1701 spin_lock_irqsave(&sched_engine->lock, flags); 1702 spin_unlock_irqrestore(&sched_engine->lock, flags); 1703 } 1704 1705 void intel_guc_submission_flush_work(struct intel_guc *guc) 1706 { 1707 flush_work(&guc->submission_state.destroyed_worker); 1708 } 1709 1710 static void guc_flush_destroyed_contexts(struct intel_guc *guc); 1711 1712 void intel_guc_submission_reset_prepare(struct intel_guc *guc) 1713 { 1714 if (unlikely(!guc_submission_initialized(guc))) { 1715 /* Reset called during driver load? GuC not yet initialised! */ 1716 return; 1717 } 1718 1719 intel_gt_park_heartbeats(guc_to_gt(guc)); 1720 disable_submission(guc); 1721 guc->interrupts.disable(guc); 1722 __reset_guc_busyness_stats(guc); 1723 1724 /* Flush IRQ handler */ 1725 spin_lock_irq(guc_to_gt(guc)->irq_lock); 1726 spin_unlock_irq(guc_to_gt(guc)->irq_lock); 1727 1728 /* Flush tasklet */ 1729 tasklet_disable(&guc->ct.receive_tasklet); 1730 tasklet_enable(&guc->ct.receive_tasklet); 1731 1732 guc_flush_submissions(guc); 1733 guc_flush_destroyed_contexts(guc); 1734 flush_work(&guc->ct.requests.worker); 1735 1736 scrub_guc_desc_for_outstanding_g2h(guc); 1737 } 1738 1739 static struct intel_engine_cs * 1740 guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling) 1741 { 1742 struct intel_engine_cs *engine; 1743 intel_engine_mask_t tmp, mask = ve->mask; 1744 unsigned int num_siblings = 0; 1745 1746 for_each_engine_masked(engine, ve->gt, mask, tmp) 1747 if (num_siblings++ == sibling) 1748 return engine; 1749 1750 return NULL; 1751 } 1752 1753 static inline struct intel_engine_cs * 1754 __context_to_physical_engine(struct intel_context *ce) 1755 { 1756 struct intel_engine_cs *engine = ce->engine; 1757 1758 if (intel_engine_is_virtual(engine)) 1759 engine = guc_virtual_get_sibling(engine, 0); 1760 1761 return engine; 1762 } 1763 1764 static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub) 1765 { 1766 struct intel_engine_cs *engine = __context_to_physical_engine(ce); 1767 1768 if (!intel_context_is_schedulable(ce)) 1769 return; 1770 1771 GEM_BUG_ON(!intel_context_is_pinned(ce)); 1772 1773 /* 1774 * We want a simple context + ring to execute the breadcrumb update. 1775 * We cannot rely on the context being intact across the GPU hang, 1776 * so clear it and rebuild just what we need for the breadcrumb. 1777 * All pending requests for this context will be zapped, and any 1778 * future request will be after userspace has had the opportunity 1779 * to recreate its own state. 1780 */ 1781 if (scrub) 1782 lrc_init_regs(ce, engine, true); 1783 1784 /* Rerun the request; its payload has been neutered (if guilty). */ 1785 lrc_update_regs(ce, engine, head); 1786 } 1787 1788 static void guc_engine_reset_prepare(struct intel_engine_cs *engine) 1789 { 1790 /* 1791 * Wa_22011802037: In addition to stopping the cs, we need 1792 * to wait for any pending mi force wakeups 1793 */ 1794 if (intel_engine_reset_needs_wa_22011802037(engine->gt)) { 1795 intel_engine_stop_cs(engine); 1796 intel_engine_wait_for_pending_mi_fw(engine); 1797 } 1798 } 1799 1800 static void guc_reset_nop(struct intel_engine_cs *engine) 1801 { 1802 } 1803 1804 static void guc_rewind_nop(struct intel_engine_cs *engine, bool stalled) 1805 { 1806 } 1807 1808 static void 1809 __unwind_incomplete_requests(struct intel_context *ce) 1810 { 1811 struct i915_request *rq, *rn; 1812 struct list_head *pl; 1813 int prio = I915_PRIORITY_INVALID; 1814 struct i915_sched_engine * const sched_engine = 1815 ce->engine->sched_engine; 1816 unsigned long flags; 1817 1818 spin_lock_irqsave(&sched_engine->lock, flags); 1819 spin_lock(&ce->guc_state.lock); 1820 list_for_each_entry_safe_reverse(rq, rn, 1821 &ce->guc_state.requests, 1822 sched.link) { 1823 if (i915_request_completed(rq)) 1824 continue; 1825 1826 list_del_init(&rq->sched.link); 1827 __i915_request_unsubmit(rq); 1828 1829 /* Push the request back into the queue for later resubmission. */ 1830 GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); 1831 if (rq_prio(rq) != prio) { 1832 prio = rq_prio(rq); 1833 pl = i915_sched_lookup_priolist(sched_engine, prio); 1834 } 1835 GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine)); 1836 1837 list_add(&rq->sched.link, pl); 1838 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); 1839 } 1840 spin_unlock(&ce->guc_state.lock); 1841 spin_unlock_irqrestore(&sched_engine->lock, flags); 1842 } 1843 1844 static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled) 1845 { 1846 bool guilty; 1847 struct i915_request *rq; 1848 unsigned long flags; 1849 u32 head; 1850 int i, number_children = ce->parallel.number_children; 1851 struct intel_context *parent = ce; 1852 1853 GEM_BUG_ON(intel_context_is_child(ce)); 1854 1855 intel_context_get(ce); 1856 1857 /* 1858 * GuC will implicitly mark the context as non-schedulable when it sends 1859 * the reset notification. Make sure our state reflects this change. The 1860 * context will be marked enabled on resubmission. 1861 */ 1862 spin_lock_irqsave(&ce->guc_state.lock, flags); 1863 clr_context_enabled(ce); 1864 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 1865 1866 /* 1867 * For each context in the relationship find the hanging request 1868 * resetting each context / request as needed 1869 */ 1870 for (i = 0; i < number_children + 1; ++i) { 1871 if (!intel_context_is_pinned(ce)) 1872 goto next_context; 1873 1874 guilty = false; 1875 rq = intel_context_get_active_request(ce); 1876 if (!rq) { 1877 head = ce->ring->tail; 1878 goto out_replay; 1879 } 1880 1881 if (i915_request_started(rq)) 1882 guilty = stalled & ce->engine->mask; 1883 1884 GEM_BUG_ON(i915_active_is_idle(&ce->active)); 1885 head = intel_ring_wrap(ce->ring, rq->head); 1886 1887 __i915_request_reset(rq, guilty); 1888 i915_request_put(rq); 1889 out_replay: 1890 guc_reset_state(ce, head, guilty); 1891 next_context: 1892 if (i != number_children) 1893 ce = list_next_entry(ce, parallel.child_link); 1894 } 1895 1896 __unwind_incomplete_requests(parent); 1897 intel_context_put(parent); 1898 } 1899 1900 void wake_up_all_tlb_invalidate(struct intel_guc *guc) 1901 { 1902 struct intel_guc_tlb_wait *wait; 1903 unsigned long i; 1904 1905 if (!intel_guc_tlb_invalidation_is_available(guc)) 1906 return; 1907 1908 xa_lock_irq(&guc->tlb_lookup); 1909 xa_for_each(&guc->tlb_lookup, i, wait) 1910 wake_up(&wait->wq); 1911 xa_unlock_irq(&guc->tlb_lookup); 1912 } 1913 1914 void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled) 1915 { 1916 struct intel_context *ce; 1917 unsigned long index; 1918 unsigned long flags; 1919 1920 if (unlikely(!guc_submission_initialized(guc))) { 1921 /* Reset called during driver load? GuC not yet initialised! */ 1922 return; 1923 } 1924 1925 xa_lock_irqsave(&guc->context_lookup, flags); 1926 xa_for_each(&guc->context_lookup, index, ce) { 1927 if (!kref_get_unless_zero(&ce->ref)) 1928 continue; 1929 1930 xa_unlock(&guc->context_lookup); 1931 1932 if (intel_context_is_pinned(ce) && 1933 !intel_context_is_child(ce)) 1934 __guc_reset_context(ce, stalled); 1935 1936 intel_context_put(ce); 1937 1938 xa_lock(&guc->context_lookup); 1939 } 1940 xa_unlock_irqrestore(&guc->context_lookup, flags); 1941 1942 /* GuC is blown away, drop all references to contexts */ 1943 xa_destroy(&guc->context_lookup); 1944 } 1945 1946 static void guc_cancel_context_requests(struct intel_context *ce) 1947 { 1948 struct i915_sched_engine *sched_engine = ce_to_guc(ce)->sched_engine; 1949 struct i915_request *rq; 1950 unsigned long flags; 1951 1952 /* Mark all executing requests as skipped. */ 1953 spin_lock_irqsave(&sched_engine->lock, flags); 1954 spin_lock(&ce->guc_state.lock); 1955 list_for_each_entry(rq, &ce->guc_state.requests, sched.link) 1956 i915_request_put(i915_request_mark_eio(rq)); 1957 spin_unlock(&ce->guc_state.lock); 1958 spin_unlock_irqrestore(&sched_engine->lock, flags); 1959 } 1960 1961 static void 1962 guc_cancel_sched_engine_requests(struct i915_sched_engine *sched_engine) 1963 { 1964 struct i915_request *rq, *rn; 1965 struct rb_node *rb; 1966 unsigned long flags; 1967 1968 /* Can be called during boot if GuC fails to load */ 1969 if (!sched_engine) 1970 return; 1971 1972 /* 1973 * Before we call engine->cancel_requests(), we should have exclusive 1974 * access to the submission state. This is arranged for us by the 1975 * caller disabling the interrupt generation, the tasklet and other 1976 * threads that may then access the same state, giving us a free hand 1977 * to reset state. However, we still need to let lockdep be aware that 1978 * we know this state may be accessed in hardirq context, so we 1979 * disable the irq around this manipulation and we want to keep 1980 * the spinlock focused on its duties and not accidentally conflate 1981 * coverage to the submission's irq state. (Similarly, although we 1982 * shouldn't need to disable irq around the manipulation of the 1983 * submission's irq state, we also wish to remind ourselves that 1984 * it is irq state.) 1985 */ 1986 spin_lock_irqsave(&sched_engine->lock, flags); 1987 1988 /* Flush the queued requests to the timeline list (for retiring). */ 1989 while ((rb = rb_first_cached(&sched_engine->queue))) { 1990 struct i915_priolist *p = to_priolist(rb); 1991 1992 priolist_for_each_request_consume(rq, rn, p) { 1993 list_del_init(&rq->sched.link); 1994 1995 __i915_request_submit(rq); 1996 1997 i915_request_put(i915_request_mark_eio(rq)); 1998 } 1999 2000 rb_erase_cached(&p->node, &sched_engine->queue); 2001 i915_priolist_free(p); 2002 } 2003 2004 /* Remaining _unready_ requests will be nop'ed when submitted */ 2005 2006 sched_engine->queue_priority_hint = INT_MIN; 2007 sched_engine->queue = RB_ROOT_CACHED; 2008 2009 spin_unlock_irqrestore(&sched_engine->lock, flags); 2010 } 2011 2012 void intel_guc_submission_cancel_requests(struct intel_guc *guc) 2013 { 2014 struct intel_context *ce; 2015 unsigned long index; 2016 unsigned long flags; 2017 2018 xa_lock_irqsave(&guc->context_lookup, flags); 2019 xa_for_each(&guc->context_lookup, index, ce) { 2020 if (!kref_get_unless_zero(&ce->ref)) 2021 continue; 2022 2023 xa_unlock(&guc->context_lookup); 2024 2025 if (intel_context_is_pinned(ce) && 2026 !intel_context_is_child(ce)) 2027 guc_cancel_context_requests(ce); 2028 2029 intel_context_put(ce); 2030 2031 xa_lock(&guc->context_lookup); 2032 } 2033 xa_unlock_irqrestore(&guc->context_lookup, flags); 2034 2035 guc_cancel_sched_engine_requests(guc->sched_engine); 2036 2037 /* GuC is blown away, drop all references to contexts */ 2038 xa_destroy(&guc->context_lookup); 2039 2040 /* 2041 * Wedged GT won't respond to any TLB invalidation request. Simply 2042 * release all the blocked waiters. 2043 */ 2044 wake_up_all_tlb_invalidate(guc); 2045 } 2046 2047 void intel_guc_submission_reset_finish(struct intel_guc *guc) 2048 { 2049 int outstanding; 2050 2051 /* Reset called during driver load or during wedge? */ 2052 if (unlikely(!guc_submission_initialized(guc) || 2053 !intel_guc_is_fw_running(guc) || 2054 intel_gt_is_wedged(guc_to_gt(guc)))) { 2055 return; 2056 } 2057 2058 /* 2059 * Technically possible for either of these values to be non-zero here, 2060 * but very unlikely + harmless. Regardless let's add an error so we can 2061 * see in CI if this happens frequently / a precursor to taking down the 2062 * machine. 2063 */ 2064 outstanding = atomic_read(&guc->outstanding_submission_g2h); 2065 if (outstanding) 2066 guc_err(guc, "Unexpected outstanding GuC to Host response(s) in reset finish: %d\n", 2067 outstanding); 2068 atomic_set(&guc->outstanding_submission_g2h, 0); 2069 2070 intel_guc_global_policies_update(guc); 2071 enable_submission(guc); 2072 intel_gt_unpark_heartbeats(guc_to_gt(guc)); 2073 2074 /* 2075 * The full GT reset will have cleared the TLB caches and flushed the 2076 * G2H message queue; we can release all the blocked waiters. 2077 */ 2078 wake_up_all_tlb_invalidate(guc); 2079 } 2080 2081 static void destroyed_worker_func(struct work_struct *w); 2082 static void reset_fail_worker_func(struct work_struct *w); 2083 2084 bool intel_guc_tlb_invalidation_is_available(struct intel_guc *guc) 2085 { 2086 return HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915) && 2087 intel_guc_is_ready(guc); 2088 } 2089 2090 static int init_tlb_lookup(struct intel_guc *guc) 2091 { 2092 struct intel_guc_tlb_wait *wait; 2093 int err; 2094 2095 if (!HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915)) 2096 return 0; 2097 2098 xa_init_flags(&guc->tlb_lookup, XA_FLAGS_ALLOC); 2099 2100 wait = kzalloc(sizeof(*wait), GFP_KERNEL); 2101 if (!wait) 2102 return -ENOMEM; 2103 2104 init_waitqueue_head(&wait->wq); 2105 2106 /* Preallocate a shared id for use under memory pressure. */ 2107 err = xa_alloc_cyclic_irq(&guc->tlb_lookup, &guc->serial_slot, wait, 2108 xa_limit_32b, &guc->next_seqno, GFP_KERNEL); 2109 if (err < 0) { 2110 kfree(wait); 2111 return err; 2112 } 2113 2114 return 0; 2115 } 2116 2117 static void fini_tlb_lookup(struct intel_guc *guc) 2118 { 2119 struct intel_guc_tlb_wait *wait; 2120 2121 if (!HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915)) 2122 return; 2123 2124 wait = xa_load(&guc->tlb_lookup, guc->serial_slot); 2125 if (wait && wait->busy) 2126 guc_err(guc, "Unexpected busy item in tlb_lookup on fini\n"); 2127 kfree(wait); 2128 2129 xa_destroy(&guc->tlb_lookup); 2130 } 2131 2132 /* 2133 * Set up the memory resources to be shared with the GuC (via the GGTT) 2134 * at firmware loading time. 2135 */ 2136 int intel_guc_submission_init(struct intel_guc *guc) 2137 { 2138 struct intel_gt *gt = guc_to_gt(guc); 2139 int ret; 2140 2141 if (guc->submission_initialized) 2142 return 0; 2143 2144 if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 0, 0)) { 2145 ret = guc_lrc_desc_pool_create_v69(guc); 2146 if (ret) 2147 return ret; 2148 } 2149 2150 ret = init_tlb_lookup(guc); 2151 if (ret) 2152 goto destroy_pool; 2153 2154 guc->submission_state.guc_ids_bitmap = 2155 bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL); 2156 if (!guc->submission_state.guc_ids_bitmap) { 2157 ret = -ENOMEM; 2158 goto destroy_tlb; 2159 } 2160 2161 guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ; 2162 guc->timestamp.shift = gpm_timestamp_shift(gt); 2163 guc->submission_initialized = true; 2164 2165 return 0; 2166 2167 destroy_tlb: 2168 fini_tlb_lookup(guc); 2169 destroy_pool: 2170 guc_lrc_desc_pool_destroy_v69(guc); 2171 return ret; 2172 } 2173 2174 void intel_guc_submission_fini(struct intel_guc *guc) 2175 { 2176 if (!guc->submission_initialized) 2177 return; 2178 2179 guc_fini_engine_stats(guc); 2180 guc_flush_destroyed_contexts(guc); 2181 guc_lrc_desc_pool_destroy_v69(guc); 2182 i915_sched_engine_put(guc->sched_engine); 2183 bitmap_free(guc->submission_state.guc_ids_bitmap); 2184 fini_tlb_lookup(guc); 2185 guc->submission_initialized = false; 2186 } 2187 2188 static inline void queue_request(struct i915_sched_engine *sched_engine, 2189 struct i915_request *rq, 2190 int prio) 2191 { 2192 GEM_BUG_ON(!list_empty(&rq->sched.link)); 2193 list_add_tail(&rq->sched.link, 2194 i915_sched_lookup_priolist(sched_engine, prio)); 2195 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); 2196 tasklet_hi_schedule(&sched_engine->tasklet); 2197 } 2198 2199 static int guc_bypass_tasklet_submit(struct intel_guc *guc, 2200 struct i915_request *rq) 2201 { 2202 int ret = 0; 2203 2204 __i915_request_submit(rq); 2205 2206 trace_i915_request_in(rq, 0); 2207 2208 if (is_multi_lrc_rq(rq)) { 2209 if (multi_lrc_submit(rq)) { 2210 ret = guc_wq_item_append(guc, rq); 2211 if (!ret) 2212 ret = guc_add_request(guc, rq); 2213 } 2214 } else { 2215 guc_set_lrc_tail(rq); 2216 ret = guc_add_request(guc, rq); 2217 } 2218 2219 if (unlikely(ret == -EPIPE)) 2220 disable_submission(guc); 2221 2222 return ret; 2223 } 2224 2225 static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq) 2226 { 2227 struct i915_sched_engine *sched_engine = rq->engine->sched_engine; 2228 struct intel_context *ce = request_to_scheduling_context(rq); 2229 2230 return submission_disabled(guc) || guc->stalled_request || 2231 !i915_sched_engine_is_empty(sched_engine) || 2232 !ctx_id_mapped(guc, ce->guc_id.id); 2233 } 2234 2235 static void guc_submit_request(struct i915_request *rq) 2236 { 2237 struct i915_sched_engine *sched_engine = rq->engine->sched_engine; 2238 struct intel_guc *guc = gt_to_guc(rq->engine->gt); 2239 unsigned long flags; 2240 2241 /* Will be called from irq-context when using foreign fences. */ 2242 spin_lock_irqsave(&sched_engine->lock, flags); 2243 2244 if (need_tasklet(guc, rq)) 2245 queue_request(sched_engine, rq, rq_prio(rq)); 2246 else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY) 2247 tasklet_hi_schedule(&sched_engine->tasklet); 2248 2249 spin_unlock_irqrestore(&sched_engine->lock, flags); 2250 } 2251 2252 static int new_guc_id(struct intel_guc *guc, struct intel_context *ce) 2253 { 2254 int ret; 2255 2256 GEM_BUG_ON(intel_context_is_child(ce)); 2257 2258 if (intel_context_is_parent(ce)) 2259 ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap, 2260 NUMBER_MULTI_LRC_GUC_ID(guc), 2261 order_base_2(ce->parallel.number_children 2262 + 1)); 2263 else 2264 ret = ida_alloc_range(&guc->submission_state.guc_ids, 2265 NUMBER_MULTI_LRC_GUC_ID(guc), 2266 guc->submission_state.num_guc_ids - 1, 2267 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 2268 if (unlikely(ret < 0)) 2269 return ret; 2270 2271 if (!intel_context_is_parent(ce)) 2272 ++guc->submission_state.guc_ids_in_use; 2273 2274 ce->guc_id.id = ret; 2275 return 0; 2276 } 2277 2278 static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce) 2279 { 2280 GEM_BUG_ON(intel_context_is_child(ce)); 2281 2282 if (!context_guc_id_invalid(ce)) { 2283 if (intel_context_is_parent(ce)) { 2284 bitmap_release_region(guc->submission_state.guc_ids_bitmap, 2285 ce->guc_id.id, 2286 order_base_2(ce->parallel.number_children 2287 + 1)); 2288 } else { 2289 --guc->submission_state.guc_ids_in_use; 2290 ida_free(&guc->submission_state.guc_ids, 2291 ce->guc_id.id); 2292 } 2293 clr_ctx_id_mapping(guc, ce->guc_id.id); 2294 set_context_guc_id_invalid(ce); 2295 } 2296 if (!list_empty(&ce->guc_id.link)) 2297 list_del_init(&ce->guc_id.link); 2298 } 2299 2300 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce) 2301 { 2302 unsigned long flags; 2303 2304 spin_lock_irqsave(&guc->submission_state.lock, flags); 2305 __release_guc_id(guc, ce); 2306 spin_unlock_irqrestore(&guc->submission_state.lock, flags); 2307 } 2308 2309 static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce) 2310 { 2311 struct intel_context *cn; 2312 2313 lockdep_assert_held(&guc->submission_state.lock); 2314 GEM_BUG_ON(intel_context_is_child(ce)); 2315 GEM_BUG_ON(intel_context_is_parent(ce)); 2316 2317 if (!list_empty(&guc->submission_state.guc_id_list)) { 2318 cn = list_first_entry(&guc->submission_state.guc_id_list, 2319 struct intel_context, 2320 guc_id.link); 2321 2322 GEM_BUG_ON(atomic_read(&cn->guc_id.ref)); 2323 GEM_BUG_ON(context_guc_id_invalid(cn)); 2324 GEM_BUG_ON(intel_context_is_child(cn)); 2325 GEM_BUG_ON(intel_context_is_parent(cn)); 2326 2327 list_del_init(&cn->guc_id.link); 2328 ce->guc_id.id = cn->guc_id.id; 2329 2330 spin_lock(&cn->guc_state.lock); 2331 clr_context_registered(cn); 2332 spin_unlock(&cn->guc_state.lock); 2333 2334 set_context_guc_id_invalid(cn); 2335 2336 #ifdef CONFIG_DRM_I915_SELFTEST 2337 guc->number_guc_id_stolen++; 2338 #endif 2339 2340 return 0; 2341 } else { 2342 return -EAGAIN; 2343 } 2344 } 2345 2346 static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce) 2347 { 2348 int ret; 2349 2350 lockdep_assert_held(&guc->submission_state.lock); 2351 GEM_BUG_ON(intel_context_is_child(ce)); 2352 2353 ret = new_guc_id(guc, ce); 2354 if (unlikely(ret < 0)) { 2355 if (intel_context_is_parent(ce)) 2356 return -ENOSPC; 2357 2358 ret = steal_guc_id(guc, ce); 2359 if (ret < 0) 2360 return ret; 2361 } 2362 2363 if (intel_context_is_parent(ce)) { 2364 struct intel_context *child; 2365 int i = 1; 2366 2367 for_each_child(ce, child) 2368 child->guc_id.id = ce->guc_id.id + i++; 2369 } 2370 2371 return 0; 2372 } 2373 2374 #define PIN_GUC_ID_TRIES 4 2375 static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce) 2376 { 2377 int ret = 0; 2378 unsigned long flags, tries = PIN_GUC_ID_TRIES; 2379 2380 GEM_BUG_ON(atomic_read(&ce->guc_id.ref)); 2381 2382 try_again: 2383 spin_lock_irqsave(&guc->submission_state.lock, flags); 2384 2385 might_lock(&ce->guc_state.lock); 2386 2387 if (context_guc_id_invalid(ce)) { 2388 ret = assign_guc_id(guc, ce); 2389 if (ret) 2390 goto out_unlock; 2391 ret = 1; /* Indidcates newly assigned guc_id */ 2392 } 2393 if (!list_empty(&ce->guc_id.link)) 2394 list_del_init(&ce->guc_id.link); 2395 atomic_inc(&ce->guc_id.ref); 2396 2397 out_unlock: 2398 spin_unlock_irqrestore(&guc->submission_state.lock, flags); 2399 2400 /* 2401 * -EAGAIN indicates no guc_id are available, let's retire any 2402 * outstanding requests to see if that frees up a guc_id. If the first 2403 * retire didn't help, insert a sleep with the timeslice duration before 2404 * attempting to retire more requests. Double the sleep period each 2405 * subsequent pass before finally giving up. The sleep period has max of 2406 * 100ms and minimum of 1ms. 2407 */ 2408 if (ret == -EAGAIN && --tries) { 2409 if (PIN_GUC_ID_TRIES - tries > 1) { 2410 unsigned int timeslice_shifted = 2411 ce->engine->props.timeslice_duration_ms << 2412 (PIN_GUC_ID_TRIES - tries - 2); 2413 unsigned int max = min_t(unsigned int, 100, 2414 timeslice_shifted); 2415 2416 msleep(max_t(unsigned int, max, 1)); 2417 } 2418 intel_gt_retire_requests(guc_to_gt(guc)); 2419 goto try_again; 2420 } 2421 2422 return ret; 2423 } 2424 2425 static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce) 2426 { 2427 unsigned long flags; 2428 2429 GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0); 2430 GEM_BUG_ON(intel_context_is_child(ce)); 2431 2432 if (unlikely(context_guc_id_invalid(ce) || 2433 intel_context_is_parent(ce))) 2434 return; 2435 2436 spin_lock_irqsave(&guc->submission_state.lock, flags); 2437 if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) && 2438 !atomic_read(&ce->guc_id.ref)) 2439 list_add_tail(&ce->guc_id.link, 2440 &guc->submission_state.guc_id_list); 2441 spin_unlock_irqrestore(&guc->submission_state.lock, flags); 2442 } 2443 2444 static int __guc_action_register_multi_lrc_v69(struct intel_guc *guc, 2445 struct intel_context *ce, 2446 u32 guc_id, 2447 u32 offset, 2448 bool loop) 2449 { 2450 struct intel_context *child; 2451 u32 action[4 + MAX_ENGINE_INSTANCE]; 2452 int len = 0; 2453 2454 GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE); 2455 2456 action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC; 2457 action[len++] = guc_id; 2458 action[len++] = ce->parallel.number_children + 1; 2459 action[len++] = offset; 2460 for_each_child(ce, child) { 2461 offset += sizeof(struct guc_lrc_desc_v69); 2462 action[len++] = offset; 2463 } 2464 2465 return guc_submission_send_busy_loop(guc, action, len, 0, loop); 2466 } 2467 2468 static int __guc_action_register_multi_lrc_v70(struct intel_guc *guc, 2469 struct intel_context *ce, 2470 struct guc_ctxt_registration_info *info, 2471 bool loop) 2472 { 2473 struct intel_context *child; 2474 u32 action[13 + (MAX_ENGINE_INSTANCE * 2)]; 2475 int len = 0; 2476 u32 next_id; 2477 2478 GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE); 2479 2480 action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC; 2481 action[len++] = info->flags; 2482 action[len++] = info->context_idx; 2483 action[len++] = info->engine_class; 2484 action[len++] = info->engine_submit_mask; 2485 action[len++] = info->wq_desc_lo; 2486 action[len++] = info->wq_desc_hi; 2487 action[len++] = info->wq_base_lo; 2488 action[len++] = info->wq_base_hi; 2489 action[len++] = info->wq_size; 2490 action[len++] = ce->parallel.number_children + 1; 2491 action[len++] = info->hwlrca_lo; 2492 action[len++] = info->hwlrca_hi; 2493 2494 next_id = info->context_idx + 1; 2495 for_each_child(ce, child) { 2496 GEM_BUG_ON(next_id++ != child->guc_id.id); 2497 2498 /* 2499 * NB: GuC interface supports 64 bit LRCA even though i915/HW 2500 * only supports 32 bit currently. 2501 */ 2502 action[len++] = lower_32_bits(child->lrc.lrca); 2503 action[len++] = upper_32_bits(child->lrc.lrca); 2504 } 2505 2506 GEM_BUG_ON(len > ARRAY_SIZE(action)); 2507 2508 return guc_submission_send_busy_loop(guc, action, len, 0, loop); 2509 } 2510 2511 static int __guc_action_register_context_v69(struct intel_guc *guc, 2512 u32 guc_id, 2513 u32 offset, 2514 bool loop) 2515 { 2516 u32 action[] = { 2517 INTEL_GUC_ACTION_REGISTER_CONTEXT, 2518 guc_id, 2519 offset, 2520 }; 2521 2522 return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 2523 0, loop); 2524 } 2525 2526 static int __guc_action_register_context_v70(struct intel_guc *guc, 2527 struct guc_ctxt_registration_info *info, 2528 bool loop) 2529 { 2530 u32 action[] = { 2531 INTEL_GUC_ACTION_REGISTER_CONTEXT, 2532 info->flags, 2533 info->context_idx, 2534 info->engine_class, 2535 info->engine_submit_mask, 2536 info->wq_desc_lo, 2537 info->wq_desc_hi, 2538 info->wq_base_lo, 2539 info->wq_base_hi, 2540 info->wq_size, 2541 info->hwlrca_lo, 2542 info->hwlrca_hi, 2543 }; 2544 2545 return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 2546 0, loop); 2547 } 2548 2549 static void prepare_context_registration_info_v69(struct intel_context *ce); 2550 static void prepare_context_registration_info_v70(struct intel_context *ce, 2551 struct guc_ctxt_registration_info *info); 2552 2553 static int 2554 register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop) 2555 { 2556 u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool_v69) + 2557 ce->guc_id.id * sizeof(struct guc_lrc_desc_v69); 2558 2559 prepare_context_registration_info_v69(ce); 2560 2561 if (intel_context_is_parent(ce)) 2562 return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id, 2563 offset, loop); 2564 else 2565 return __guc_action_register_context_v69(guc, ce->guc_id.id, 2566 offset, loop); 2567 } 2568 2569 static int 2570 register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop) 2571 { 2572 struct guc_ctxt_registration_info info; 2573 2574 prepare_context_registration_info_v70(ce, &info); 2575 2576 if (intel_context_is_parent(ce)) 2577 return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop); 2578 else 2579 return __guc_action_register_context_v70(guc, &info, loop); 2580 } 2581 2582 static int register_context(struct intel_context *ce, bool loop) 2583 { 2584 struct intel_guc *guc = ce_to_guc(ce); 2585 int ret; 2586 2587 GEM_BUG_ON(intel_context_is_child(ce)); 2588 trace_intel_context_register(ce); 2589 2590 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) 2591 ret = register_context_v70(guc, ce, loop); 2592 else 2593 ret = register_context_v69(guc, ce, loop); 2594 2595 if (likely(!ret)) { 2596 unsigned long flags; 2597 2598 spin_lock_irqsave(&ce->guc_state.lock, flags); 2599 set_context_registered(ce); 2600 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 2601 2602 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) 2603 guc_context_policy_init_v70(ce, loop); 2604 } 2605 2606 return ret; 2607 } 2608 2609 static int __guc_action_deregister_context(struct intel_guc *guc, 2610 u32 guc_id) 2611 { 2612 u32 action[] = { 2613 INTEL_GUC_ACTION_DEREGISTER_CONTEXT, 2614 guc_id, 2615 }; 2616 2617 return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 2618 G2H_LEN_DW_DEREGISTER_CONTEXT, 2619 true); 2620 } 2621 2622 static int deregister_context(struct intel_context *ce, u32 guc_id) 2623 { 2624 struct intel_guc *guc = ce_to_guc(ce); 2625 2626 GEM_BUG_ON(intel_context_is_child(ce)); 2627 trace_intel_context_deregister(ce); 2628 2629 return __guc_action_deregister_context(guc, guc_id); 2630 } 2631 2632 static inline void clear_children_join_go_memory(struct intel_context *ce) 2633 { 2634 struct parent_scratch *ps = __get_parent_scratch(ce); 2635 int i; 2636 2637 ps->go.semaphore = 0; 2638 for (i = 0; i < ce->parallel.number_children + 1; ++i) 2639 ps->join[i].semaphore = 0; 2640 } 2641 2642 static inline u32 get_children_go_value(struct intel_context *ce) 2643 { 2644 return __get_parent_scratch(ce)->go.semaphore; 2645 } 2646 2647 static inline u32 get_children_join_value(struct intel_context *ce, 2648 u8 child_index) 2649 { 2650 return __get_parent_scratch(ce)->join[child_index].semaphore; 2651 } 2652 2653 struct context_policy { 2654 u32 count; 2655 struct guc_update_context_policy h2g; 2656 }; 2657 2658 static u32 __guc_context_policy_action_size(struct context_policy *policy) 2659 { 2660 size_t bytes = sizeof(policy->h2g.header) + 2661 (sizeof(policy->h2g.klv[0]) * policy->count); 2662 2663 return bytes / sizeof(u32); 2664 } 2665 2666 static void __guc_context_policy_start_klv(struct context_policy *policy, u16 guc_id) 2667 { 2668 policy->h2g.header.action = INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES; 2669 policy->h2g.header.ctx_id = guc_id; 2670 policy->count = 0; 2671 } 2672 2673 #define MAKE_CONTEXT_POLICY_ADD(func, id) \ 2674 static void __guc_context_policy_add_##func(struct context_policy *policy, u32 data) \ 2675 { \ 2676 GEM_BUG_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \ 2677 policy->h2g.klv[policy->count].kl = \ 2678 FIELD_PREP(GUC_KLV_0_KEY, GUC_CONTEXT_POLICIES_KLV_ID_##id) | \ 2679 FIELD_PREP(GUC_KLV_0_LEN, 1); \ 2680 policy->h2g.klv[policy->count].value = data; \ 2681 policy->count++; \ 2682 } 2683 2684 MAKE_CONTEXT_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM) 2685 MAKE_CONTEXT_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT) 2686 MAKE_CONTEXT_POLICY_ADD(priority, SCHEDULING_PRIORITY) 2687 MAKE_CONTEXT_POLICY_ADD(preempt_to_idle, PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY) 2688 MAKE_CONTEXT_POLICY_ADD(slpc_ctx_freq_req, SLPM_GT_FREQUENCY) 2689 2690 #undef MAKE_CONTEXT_POLICY_ADD 2691 2692 static int __guc_context_set_context_policies(struct intel_guc *guc, 2693 struct context_policy *policy, 2694 bool loop) 2695 { 2696 return guc_submission_send_busy_loop(guc, (u32 *)&policy->h2g, 2697 __guc_context_policy_action_size(policy), 2698 0, loop); 2699 } 2700 2701 static int guc_context_policy_init_v70(struct intel_context *ce, bool loop) 2702 { 2703 struct intel_engine_cs *engine = ce->engine; 2704 struct intel_guc *guc = gt_to_guc(engine->gt); 2705 struct context_policy policy; 2706 u32 execution_quantum; 2707 u32 preemption_timeout; 2708 u32 slpc_ctx_freq_req = 0; 2709 unsigned long flags; 2710 int ret; 2711 2712 /* NB: For both of these, zero means disabled. */ 2713 GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000, 2714 execution_quantum)); 2715 GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000, 2716 preemption_timeout)); 2717 execution_quantum = engine->props.timeslice_duration_ms * 1000; 2718 preemption_timeout = engine->props.preempt_timeout_ms * 1000; 2719 2720 if (ce->flags & BIT(CONTEXT_LOW_LATENCY)) 2721 slpc_ctx_freq_req |= SLPC_CTX_FREQ_REQ_IS_COMPUTE; 2722 2723 __guc_context_policy_start_klv(&policy, ce->guc_id.id); 2724 2725 __guc_context_policy_add_priority(&policy, ce->guc_state.prio); 2726 __guc_context_policy_add_execution_quantum(&policy, execution_quantum); 2727 __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout); 2728 __guc_context_policy_add_slpc_ctx_freq_req(&policy, slpc_ctx_freq_req); 2729 2730 if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION) 2731 __guc_context_policy_add_preempt_to_idle(&policy, 1); 2732 2733 ret = __guc_context_set_context_policies(guc, &policy, loop); 2734 2735 spin_lock_irqsave(&ce->guc_state.lock, flags); 2736 if (ret != 0) 2737 set_context_policy_required(ce); 2738 else 2739 clr_context_policy_required(ce); 2740 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 2741 2742 return ret; 2743 } 2744 2745 static void guc_context_policy_init_v69(struct intel_engine_cs *engine, 2746 struct guc_lrc_desc_v69 *desc) 2747 { 2748 desc->policy_flags = 0; 2749 2750 if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION) 2751 desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69; 2752 2753 /* NB: For both of these, zero means disabled. */ 2754 GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000, 2755 desc->execution_quantum)); 2756 GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000, 2757 desc->preemption_timeout)); 2758 desc->execution_quantum = engine->props.timeslice_duration_ms * 1000; 2759 desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000; 2760 } 2761 2762 static u32 map_guc_prio_to_lrc_desc_prio(u8 prio) 2763 { 2764 /* 2765 * this matches the mapping we do in map_i915_prio_to_guc_prio() 2766 * (e.g. prio < I915_PRIORITY_NORMAL maps to GUC_CLIENT_PRIORITY_NORMAL) 2767 */ 2768 switch (prio) { 2769 default: 2770 MISSING_CASE(prio); 2771 fallthrough; 2772 case GUC_CLIENT_PRIORITY_KMD_NORMAL: 2773 return GEN12_CTX_PRIORITY_NORMAL; 2774 case GUC_CLIENT_PRIORITY_NORMAL: 2775 return GEN12_CTX_PRIORITY_LOW; 2776 case GUC_CLIENT_PRIORITY_HIGH: 2777 case GUC_CLIENT_PRIORITY_KMD_HIGH: 2778 return GEN12_CTX_PRIORITY_HIGH; 2779 } 2780 } 2781 2782 static void prepare_context_registration_info_v69(struct intel_context *ce) 2783 { 2784 struct intel_engine_cs *engine = ce->engine; 2785 struct intel_guc *guc = gt_to_guc(engine->gt); 2786 u32 ctx_id = ce->guc_id.id; 2787 struct guc_lrc_desc_v69 *desc; 2788 struct intel_context *child; 2789 2790 GEM_BUG_ON(!engine->mask); 2791 2792 /* 2793 * Ensure LRC + CT vmas are is same region as write barrier is done 2794 * based on CT vma region. 2795 */ 2796 GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) != 2797 i915_gem_object_is_lmem(ce->ring->vma->obj)); 2798 2799 desc = __get_lrc_desc_v69(guc, ctx_id); 2800 GEM_BUG_ON(!desc); 2801 desc->engine_class = engine_class_to_guc_class(engine->class); 2802 desc->engine_submit_mask = engine->logical_mask; 2803 desc->hw_context_desc = ce->lrc.lrca; 2804 desc->priority = ce->guc_state.prio; 2805 desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; 2806 guc_context_policy_init_v69(engine, desc); 2807 2808 /* 2809 * If context is a parent, we need to register a process descriptor 2810 * describing a work queue and register all child contexts. 2811 */ 2812 if (intel_context_is_parent(ce)) { 2813 struct guc_process_desc_v69 *pdesc; 2814 2815 ce->parallel.guc.wqi_tail = 0; 2816 ce->parallel.guc.wqi_head = 0; 2817 2818 desc->process_desc = i915_ggtt_offset(ce->state) + 2819 __get_parent_scratch_offset(ce); 2820 desc->wq_addr = i915_ggtt_offset(ce->state) + 2821 __get_wq_offset(ce); 2822 desc->wq_size = WQ_SIZE; 2823 2824 pdesc = __get_process_desc_v69(ce); 2825 memset(pdesc, 0, sizeof(*(pdesc))); 2826 pdesc->stage_id = ce->guc_id.id; 2827 pdesc->wq_base_addr = desc->wq_addr; 2828 pdesc->wq_size_bytes = desc->wq_size; 2829 pdesc->wq_status = WQ_STATUS_ACTIVE; 2830 2831 ce->parallel.guc.wq_head = &pdesc->head; 2832 ce->parallel.guc.wq_tail = &pdesc->tail; 2833 ce->parallel.guc.wq_status = &pdesc->wq_status; 2834 2835 for_each_child(ce, child) { 2836 desc = __get_lrc_desc_v69(guc, child->guc_id.id); 2837 2838 desc->engine_class = 2839 engine_class_to_guc_class(engine->class); 2840 desc->hw_context_desc = child->lrc.lrca; 2841 desc->priority = ce->guc_state.prio; 2842 desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; 2843 guc_context_policy_init_v69(engine, desc); 2844 } 2845 2846 clear_children_join_go_memory(ce); 2847 } 2848 } 2849 2850 static void prepare_context_registration_info_v70(struct intel_context *ce, 2851 struct guc_ctxt_registration_info *info) 2852 { 2853 struct intel_engine_cs *engine = ce->engine; 2854 struct intel_guc *guc = gt_to_guc(engine->gt); 2855 u32 ctx_id = ce->guc_id.id; 2856 2857 GEM_BUG_ON(!engine->mask); 2858 2859 /* 2860 * Ensure LRC + CT vmas are is same region as write barrier is done 2861 * based on CT vma region. 2862 */ 2863 GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) != 2864 i915_gem_object_is_lmem(ce->ring->vma->obj)); 2865 2866 memset(info, 0, sizeof(*info)); 2867 info->context_idx = ctx_id; 2868 info->engine_class = engine_class_to_guc_class(engine->class); 2869 info->engine_submit_mask = engine->logical_mask; 2870 /* 2871 * NB: GuC interface supports 64 bit LRCA even though i915/HW 2872 * only supports 32 bit currently. 2873 */ 2874 info->hwlrca_lo = lower_32_bits(ce->lrc.lrca); 2875 info->hwlrca_hi = upper_32_bits(ce->lrc.lrca); 2876 if (engine->flags & I915_ENGINE_HAS_EU_PRIORITY) 2877 info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio); 2878 info->flags = CONTEXT_REGISTRATION_FLAG_KMD; 2879 2880 /* 2881 * If context is a parent, we need to register a process descriptor 2882 * describing a work queue and register all child contexts. 2883 */ 2884 if (intel_context_is_parent(ce)) { 2885 struct guc_sched_wq_desc *wq_desc; 2886 u64 wq_desc_offset, wq_base_offset; 2887 2888 ce->parallel.guc.wqi_tail = 0; 2889 ce->parallel.guc.wqi_head = 0; 2890 2891 wq_desc_offset = (u64)i915_ggtt_offset(ce->state) + 2892 __get_parent_scratch_offset(ce); 2893 wq_base_offset = (u64)i915_ggtt_offset(ce->state) + 2894 __get_wq_offset(ce); 2895 info->wq_desc_lo = lower_32_bits(wq_desc_offset); 2896 info->wq_desc_hi = upper_32_bits(wq_desc_offset); 2897 info->wq_base_lo = lower_32_bits(wq_base_offset); 2898 info->wq_base_hi = upper_32_bits(wq_base_offset); 2899 info->wq_size = WQ_SIZE; 2900 2901 wq_desc = __get_wq_desc_v70(ce); 2902 memset(wq_desc, 0, sizeof(*wq_desc)); 2903 wq_desc->wq_status = WQ_STATUS_ACTIVE; 2904 2905 ce->parallel.guc.wq_head = &wq_desc->head; 2906 ce->parallel.guc.wq_tail = &wq_desc->tail; 2907 ce->parallel.guc.wq_status = &wq_desc->wq_status; 2908 2909 clear_children_join_go_memory(ce); 2910 } 2911 } 2912 2913 static int try_context_registration(struct intel_context *ce, bool loop) 2914 { 2915 struct intel_engine_cs *engine = ce->engine; 2916 struct intel_runtime_pm *runtime_pm = engine->uncore->rpm; 2917 struct intel_guc *guc = gt_to_guc(engine->gt); 2918 intel_wakeref_t wakeref; 2919 u32 ctx_id = ce->guc_id.id; 2920 bool context_registered; 2921 int ret = 0; 2922 2923 GEM_BUG_ON(!sched_state_is_init(ce)); 2924 2925 context_registered = ctx_id_mapped(guc, ctx_id); 2926 2927 clr_ctx_id_mapping(guc, ctx_id); 2928 set_ctx_id_mapping(guc, ctx_id, ce); 2929 2930 /* 2931 * The context_lookup xarray is used to determine if the hardware 2932 * context is currently registered. There are two cases in which it 2933 * could be registered either the guc_id has been stolen from another 2934 * context or the lrc descriptor address of this context has changed. In 2935 * either case the context needs to be deregistered with the GuC before 2936 * registering this context. 2937 */ 2938 if (context_registered) { 2939 bool disabled; 2940 unsigned long flags; 2941 2942 trace_intel_context_steal_guc_id(ce); 2943 GEM_BUG_ON(!loop); 2944 2945 /* Seal race with Reset */ 2946 spin_lock_irqsave(&ce->guc_state.lock, flags); 2947 disabled = submission_disabled(guc); 2948 if (likely(!disabled)) { 2949 set_context_wait_for_deregister_to_register(ce); 2950 intel_context_get(ce); 2951 } 2952 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 2953 if (unlikely(disabled)) { 2954 clr_ctx_id_mapping(guc, ctx_id); 2955 return 0; /* Will get registered later */ 2956 } 2957 2958 /* 2959 * If stealing the guc_id, this ce has the same guc_id as the 2960 * context whose guc_id was stolen. 2961 */ 2962 with_intel_runtime_pm(runtime_pm, wakeref) 2963 ret = deregister_context(ce, ce->guc_id.id); 2964 if (unlikely(ret == -ENODEV)) 2965 ret = 0; /* Will get registered later */ 2966 } else { 2967 with_intel_runtime_pm(runtime_pm, wakeref) 2968 ret = register_context(ce, loop); 2969 if (unlikely(ret == -EBUSY)) { 2970 clr_ctx_id_mapping(guc, ctx_id); 2971 } else if (unlikely(ret == -ENODEV)) { 2972 clr_ctx_id_mapping(guc, ctx_id); 2973 ret = 0; /* Will get registered later */ 2974 } 2975 } 2976 2977 return ret; 2978 } 2979 2980 static int __guc_context_pre_pin(struct intel_context *ce, 2981 struct intel_engine_cs *engine, 2982 struct i915_gem_ww_ctx *ww, 2983 void **vaddr) 2984 { 2985 return lrc_pre_pin(ce, engine, ww, vaddr); 2986 } 2987 2988 static int __guc_context_pin(struct intel_context *ce, 2989 struct intel_engine_cs *engine, 2990 void *vaddr) 2991 { 2992 if (i915_ggtt_offset(ce->state) != 2993 (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) 2994 set_bit(CONTEXT_LRCA_DIRTY, &ce->flags); 2995 2996 /* 2997 * GuC context gets pinned in guc_request_alloc. See that function for 2998 * explaination of why. 2999 */ 3000 3001 return lrc_pin(ce, engine, vaddr); 3002 } 3003 3004 static int guc_context_pre_pin(struct intel_context *ce, 3005 struct i915_gem_ww_ctx *ww, 3006 void **vaddr) 3007 { 3008 return __guc_context_pre_pin(ce, ce->engine, ww, vaddr); 3009 } 3010 3011 static int guc_context_pin(struct intel_context *ce, void *vaddr) 3012 { 3013 int ret = __guc_context_pin(ce, ce->engine, vaddr); 3014 3015 if (likely(!ret && !intel_context_is_barrier(ce))) 3016 intel_engine_pm_get(ce->engine); 3017 3018 return ret; 3019 } 3020 3021 static void guc_context_unpin(struct intel_context *ce) 3022 { 3023 struct intel_guc *guc = ce_to_guc(ce); 3024 3025 __guc_context_update_stats(ce); 3026 unpin_guc_id(guc, ce); 3027 lrc_unpin(ce); 3028 3029 if (likely(!intel_context_is_barrier(ce))) 3030 intel_engine_pm_put_async(ce->engine); 3031 } 3032 3033 static void guc_context_post_unpin(struct intel_context *ce) 3034 { 3035 lrc_post_unpin(ce); 3036 } 3037 3038 static void __guc_context_sched_enable(struct intel_guc *guc, 3039 struct intel_context *ce) 3040 { 3041 u32 action[] = { 3042 INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET, 3043 ce->guc_id.id, 3044 GUC_CONTEXT_ENABLE 3045 }; 3046 3047 trace_intel_context_sched_enable(ce); 3048 3049 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 3050 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true); 3051 } 3052 3053 static void __guc_context_sched_disable(struct intel_guc *guc, 3054 struct intel_context *ce, 3055 u16 guc_id) 3056 { 3057 u32 action[] = { 3058 INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET, 3059 guc_id, /* ce->guc_id.id not stable */ 3060 GUC_CONTEXT_DISABLE 3061 }; 3062 3063 GEM_BUG_ON(guc_id == GUC_INVALID_CONTEXT_ID); 3064 3065 GEM_BUG_ON(intel_context_is_child(ce)); 3066 trace_intel_context_sched_disable(ce); 3067 3068 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 3069 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true); 3070 } 3071 3072 static void guc_blocked_fence_complete(struct intel_context *ce) 3073 { 3074 lockdep_assert_held(&ce->guc_state.lock); 3075 3076 if (!i915_sw_fence_done(&ce->guc_state.blocked)) 3077 i915_sw_fence_complete(&ce->guc_state.blocked); 3078 } 3079 3080 static void guc_blocked_fence_reinit(struct intel_context *ce) 3081 { 3082 lockdep_assert_held(&ce->guc_state.lock); 3083 GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked)); 3084 3085 /* 3086 * This fence is always complete unless a pending schedule disable is 3087 * outstanding. We arm the fence here and complete it when we receive 3088 * the pending schedule disable complete message. 3089 */ 3090 i915_sw_fence_fini(&ce->guc_state.blocked); 3091 i915_sw_fence_reinit(&ce->guc_state.blocked); 3092 i915_sw_fence_await(&ce->guc_state.blocked); 3093 i915_sw_fence_commit(&ce->guc_state.blocked); 3094 } 3095 3096 static u16 prep_context_pending_disable(struct intel_context *ce) 3097 { 3098 lockdep_assert_held(&ce->guc_state.lock); 3099 3100 set_context_pending_disable(ce); 3101 clr_context_enabled(ce); 3102 guc_blocked_fence_reinit(ce); 3103 intel_context_get(ce); 3104 3105 return ce->guc_id.id; 3106 } 3107 3108 static struct i915_sw_fence *guc_context_block(struct intel_context *ce) 3109 { 3110 struct intel_guc *guc = ce_to_guc(ce); 3111 unsigned long flags; 3112 struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm; 3113 intel_wakeref_t wakeref; 3114 u16 guc_id; 3115 bool enabled; 3116 3117 GEM_BUG_ON(intel_context_is_child(ce)); 3118 3119 spin_lock_irqsave(&ce->guc_state.lock, flags); 3120 3121 incr_context_blocked(ce); 3122 3123 enabled = context_enabled(ce); 3124 if (unlikely(!enabled || submission_disabled(guc))) { 3125 if (enabled) 3126 clr_context_enabled(ce); 3127 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 3128 return &ce->guc_state.blocked; 3129 } 3130 3131 /* 3132 * We add +2 here as the schedule disable complete CTB handler calls 3133 * intel_context_sched_disable_unpin (-2 to pin_count). 3134 */ 3135 atomic_add(2, &ce->pin_count); 3136 3137 guc_id = prep_context_pending_disable(ce); 3138 3139 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 3140 3141 with_intel_runtime_pm(runtime_pm, wakeref) 3142 __guc_context_sched_disable(guc, ce, guc_id); 3143 3144 return &ce->guc_state.blocked; 3145 } 3146 3147 #define SCHED_STATE_MULTI_BLOCKED_MASK \ 3148 (SCHED_STATE_BLOCKED_MASK & ~SCHED_STATE_BLOCKED) 3149 #define SCHED_STATE_NO_UNBLOCK \ 3150 (SCHED_STATE_MULTI_BLOCKED_MASK | \ 3151 SCHED_STATE_PENDING_DISABLE | \ 3152 SCHED_STATE_BANNED) 3153 3154 static bool context_cant_unblock(struct intel_context *ce) 3155 { 3156 lockdep_assert_held(&ce->guc_state.lock); 3157 3158 return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) || 3159 context_guc_id_invalid(ce) || 3160 !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) || 3161 !intel_context_is_pinned(ce); 3162 } 3163 3164 static void guc_context_unblock(struct intel_context *ce) 3165 { 3166 struct intel_guc *guc = ce_to_guc(ce); 3167 unsigned long flags; 3168 struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm; 3169 intel_wakeref_t wakeref; 3170 bool enable; 3171 3172 GEM_BUG_ON(context_enabled(ce)); 3173 GEM_BUG_ON(intel_context_is_child(ce)); 3174 3175 spin_lock_irqsave(&ce->guc_state.lock, flags); 3176 3177 if (unlikely(submission_disabled(guc) || 3178 context_cant_unblock(ce))) { 3179 enable = false; 3180 } else { 3181 enable = true; 3182 set_context_pending_enable(ce); 3183 set_context_enabled(ce); 3184 intel_context_get(ce); 3185 } 3186 3187 decr_context_blocked(ce); 3188 3189 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 3190 3191 if (enable) { 3192 with_intel_runtime_pm(runtime_pm, wakeref) 3193 __guc_context_sched_enable(guc, ce); 3194 } 3195 } 3196 3197 static void guc_context_cancel_request(struct intel_context *ce, 3198 struct i915_request *rq) 3199 { 3200 struct intel_context *block_context = 3201 request_to_scheduling_context(rq); 3202 3203 if (i915_sw_fence_signaled(&rq->submit)) { 3204 struct i915_sw_fence *fence; 3205 3206 intel_context_get(ce); 3207 fence = guc_context_block(block_context); 3208 i915_sw_fence_wait(fence); 3209 if (!i915_request_completed(rq)) { 3210 __i915_request_skip(rq); 3211 guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head), 3212 true); 3213 } 3214 3215 guc_context_unblock(block_context); 3216 intel_context_put(ce); 3217 } 3218 } 3219 3220 static void __guc_context_set_preemption_timeout(struct intel_guc *guc, 3221 u16 guc_id, 3222 u32 preemption_timeout) 3223 { 3224 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) { 3225 struct context_policy policy; 3226 3227 __guc_context_policy_start_klv(&policy, guc_id); 3228 __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout); 3229 __guc_context_set_context_policies(guc, &policy, true); 3230 } else { 3231 u32 action[] = { 3232 INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT, 3233 guc_id, 3234 preemption_timeout 3235 }; 3236 3237 intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true); 3238 } 3239 } 3240 3241 static void 3242 guc_context_revoke(struct intel_context *ce, struct i915_request *rq, 3243 unsigned int preempt_timeout_ms) 3244 { 3245 struct intel_guc *guc = ce_to_guc(ce); 3246 struct intel_runtime_pm *runtime_pm = 3247 &ce->engine->gt->i915->runtime_pm; 3248 intel_wakeref_t wakeref; 3249 unsigned long flags; 3250 3251 GEM_BUG_ON(intel_context_is_child(ce)); 3252 3253 guc_flush_submissions(guc); 3254 3255 spin_lock_irqsave(&ce->guc_state.lock, flags); 3256 set_context_banned(ce); 3257 3258 if (submission_disabled(guc) || 3259 (!context_enabled(ce) && !context_pending_disable(ce))) { 3260 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 3261 3262 guc_cancel_context_requests(ce); 3263 intel_engine_signal_breadcrumbs(ce->engine); 3264 } else if (!context_pending_disable(ce)) { 3265 u16 guc_id; 3266 3267 /* 3268 * We add +2 here as the schedule disable complete CTB handler 3269 * calls intel_context_sched_disable_unpin (-2 to pin_count). 3270 */ 3271 atomic_add(2, &ce->pin_count); 3272 3273 guc_id = prep_context_pending_disable(ce); 3274 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 3275 3276 /* 3277 * In addition to disabling scheduling, set the preemption 3278 * timeout to the minimum value (1 us) so the banned context 3279 * gets kicked off the HW ASAP. 3280 */ 3281 with_intel_runtime_pm(runtime_pm, wakeref) { 3282 __guc_context_set_preemption_timeout(guc, guc_id, 3283 preempt_timeout_ms); 3284 __guc_context_sched_disable(guc, ce, guc_id); 3285 } 3286 } else { 3287 if (!context_guc_id_invalid(ce)) 3288 with_intel_runtime_pm(runtime_pm, wakeref) 3289 __guc_context_set_preemption_timeout(guc, 3290 ce->guc_id.id, 3291 preempt_timeout_ms); 3292 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 3293 } 3294 } 3295 3296 static void do_sched_disable(struct intel_guc *guc, struct intel_context *ce, 3297 unsigned long flags) 3298 __releases(ce->guc_state.lock) 3299 { 3300 struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm; 3301 intel_wakeref_t wakeref; 3302 u16 guc_id; 3303 3304 lockdep_assert_held(&ce->guc_state.lock); 3305 guc_id = prep_context_pending_disable(ce); 3306 3307 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 3308 3309 with_intel_runtime_pm(runtime_pm, wakeref) 3310 __guc_context_sched_disable(guc, ce, guc_id); 3311 } 3312 3313 static bool bypass_sched_disable(struct intel_guc *guc, 3314 struct intel_context *ce) 3315 { 3316 lockdep_assert_held(&ce->guc_state.lock); 3317 GEM_BUG_ON(intel_context_is_child(ce)); 3318 3319 if (submission_disabled(guc) || context_guc_id_invalid(ce) || 3320 !ctx_id_mapped(guc, ce->guc_id.id)) { 3321 clr_context_enabled(ce); 3322 return true; 3323 } 3324 3325 return !context_enabled(ce); 3326 } 3327 3328 static void __delay_sched_disable(struct work_struct *wrk) 3329 { 3330 struct intel_context *ce = 3331 container_of(wrk, typeof(*ce), guc_state.sched_disable_delay_work.work); 3332 struct intel_guc *guc = ce_to_guc(ce); 3333 unsigned long flags; 3334 3335 spin_lock_irqsave(&ce->guc_state.lock, flags); 3336 3337 if (bypass_sched_disable(guc, ce)) { 3338 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 3339 intel_context_sched_disable_unpin(ce); 3340 } else { 3341 do_sched_disable(guc, ce, flags); 3342 } 3343 } 3344 3345 static bool guc_id_pressure(struct intel_guc *guc, struct intel_context *ce) 3346 { 3347 /* 3348 * parent contexts are perma-pinned, if we are unpinning do schedule 3349 * disable immediately. 3350 */ 3351 if (intel_context_is_parent(ce)) 3352 return true; 3353 3354 /* 3355 * If we are beyond the threshold for avail guc_ids, do schedule disable immediately. 3356 */ 3357 return guc->submission_state.guc_ids_in_use > 3358 guc->submission_state.sched_disable_gucid_threshold; 3359 } 3360 3361 static void guc_context_sched_disable(struct intel_context *ce) 3362 { 3363 struct intel_guc *guc = ce_to_guc(ce); 3364 u64 delay = guc->submission_state.sched_disable_delay_ms; 3365 unsigned long flags; 3366 3367 spin_lock_irqsave(&ce->guc_state.lock, flags); 3368 3369 if (bypass_sched_disable(guc, ce)) { 3370 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 3371 intel_context_sched_disable_unpin(ce); 3372 } else if (!intel_context_is_closed(ce) && !guc_id_pressure(guc, ce) && 3373 delay) { 3374 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 3375 mod_delayed_work(system_unbound_wq, 3376 &ce->guc_state.sched_disable_delay_work, 3377 msecs_to_jiffies(delay)); 3378 } else { 3379 do_sched_disable(guc, ce, flags); 3380 } 3381 } 3382 3383 static void guc_context_close(struct intel_context *ce) 3384 { 3385 unsigned long flags; 3386 3387 if (test_bit(CONTEXT_GUC_INIT, &ce->flags) && 3388 cancel_delayed_work(&ce->guc_state.sched_disable_delay_work)) 3389 __delay_sched_disable(&ce->guc_state.sched_disable_delay_work.work); 3390 3391 spin_lock_irqsave(&ce->guc_state.lock, flags); 3392 set_context_close_done(ce); 3393 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 3394 } 3395 3396 static inline int guc_lrc_desc_unpin(struct intel_context *ce) 3397 { 3398 struct intel_guc *guc = ce_to_guc(ce); 3399 struct intel_gt *gt = guc_to_gt(guc); 3400 unsigned long flags; 3401 bool disabled; 3402 int ret; 3403 3404 GEM_BUG_ON(!intel_gt_pm_is_awake(gt)); 3405 GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id)); 3406 GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id)); 3407 GEM_BUG_ON(context_enabled(ce)); 3408 3409 /* Seal race with Reset */ 3410 spin_lock_irqsave(&ce->guc_state.lock, flags); 3411 disabled = submission_disabled(guc); 3412 if (likely(!disabled)) { 3413 /* 3414 * Take a gt-pm ref and change context state to be destroyed. 3415 * NOTE: a G2H IRQ that comes after will put this gt-pm ref back 3416 */ 3417 __intel_gt_pm_get(gt); 3418 set_context_destroyed(ce); 3419 clr_context_registered(ce); 3420 } 3421 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 3422 3423 if (unlikely(disabled)) { 3424 release_guc_id(guc, ce); 3425 __guc_context_destroy(ce); 3426 return 0; 3427 } 3428 3429 /* 3430 * GuC is active, lets destroy this context, but at this point we can still be racing 3431 * with suspend, so we undo everything if the H2G fails in deregister_context so 3432 * that GuC reset will find this context during clean up. 3433 */ 3434 ret = deregister_context(ce, ce->guc_id.id); 3435 if (ret) { 3436 spin_lock(&ce->guc_state.lock); 3437 set_context_registered(ce); 3438 clr_context_destroyed(ce); 3439 spin_unlock(&ce->guc_state.lock); 3440 /* 3441 * As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements 3442 * the wakeref immediately but per function spec usage call this after unlock. 3443 */ 3444 intel_wakeref_put_async(>->wakeref); 3445 } 3446 3447 return ret; 3448 } 3449 3450 static void __guc_context_destroy(struct intel_context *ce) 3451 { 3452 GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] || 3453 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] || 3454 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] || 3455 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]); 3456 3457 lrc_fini(ce); 3458 intel_context_fini(ce); 3459 3460 if (intel_engine_is_virtual(ce->engine)) { 3461 struct guc_virtual_engine *ve = 3462 container_of(ce, typeof(*ve), context); 3463 3464 if (ve->base.breadcrumbs) 3465 intel_breadcrumbs_put(ve->base.breadcrumbs); 3466 3467 kfree(ve); 3468 } else { 3469 intel_context_free(ce); 3470 } 3471 } 3472 3473 static void guc_flush_destroyed_contexts(struct intel_guc *guc) 3474 { 3475 struct intel_context *ce; 3476 unsigned long flags; 3477 3478 GEM_BUG_ON(!submission_disabled(guc) && 3479 guc_submission_initialized(guc)); 3480 3481 while (!list_empty(&guc->submission_state.destroyed_contexts)) { 3482 spin_lock_irqsave(&guc->submission_state.lock, flags); 3483 ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts, 3484 struct intel_context, 3485 destroyed_link); 3486 if (ce) 3487 list_del_init(&ce->destroyed_link); 3488 spin_unlock_irqrestore(&guc->submission_state.lock, flags); 3489 3490 if (!ce) 3491 break; 3492 3493 release_guc_id(guc, ce); 3494 __guc_context_destroy(ce); 3495 } 3496 } 3497 3498 static void deregister_destroyed_contexts(struct intel_guc *guc) 3499 { 3500 struct intel_context *ce; 3501 unsigned long flags; 3502 3503 while (!list_empty(&guc->submission_state.destroyed_contexts)) { 3504 spin_lock_irqsave(&guc->submission_state.lock, flags); 3505 ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts, 3506 struct intel_context, 3507 destroyed_link); 3508 if (ce) 3509 list_del_init(&ce->destroyed_link); 3510 spin_unlock_irqrestore(&guc->submission_state.lock, flags); 3511 3512 if (!ce) 3513 break; 3514 3515 if (guc_lrc_desc_unpin(ce)) { 3516 /* 3517 * This means GuC's CT link severed mid-way which could happen 3518 * in suspend-resume corner cases. In this case, put the 3519 * context back into the destroyed_contexts list which will 3520 * get picked up on the next context deregistration event or 3521 * purged in a GuC sanitization event (reset/unload/wedged/...). 3522 */ 3523 spin_lock_irqsave(&guc->submission_state.lock, flags); 3524 list_add_tail(&ce->destroyed_link, 3525 &guc->submission_state.destroyed_contexts); 3526 spin_unlock_irqrestore(&guc->submission_state.lock, flags); 3527 /* Bail now since the list might never be emptied if h2gs fail */ 3528 break; 3529 } 3530 3531 } 3532 } 3533 3534 static void destroyed_worker_func(struct work_struct *w) 3535 { 3536 struct intel_guc *guc = container_of(w, struct intel_guc, 3537 submission_state.destroyed_worker); 3538 struct intel_gt *gt = guc_to_gt(guc); 3539 intel_wakeref_t wakeref; 3540 3541 /* 3542 * In rare cases we can get here via async context-free fence-signals that 3543 * come very late in suspend flow or very early in resume flows. In these 3544 * cases, GuC won't be ready but just skipping it here is fine as these 3545 * pending-destroy-contexts get destroyed totally at GuC reset time at the 3546 * end of suspend.. OR.. this worker can be picked up later on the next 3547 * context destruction trigger after resume-completes 3548 */ 3549 if (!intel_guc_is_ready(guc)) 3550 return; 3551 3552 with_intel_gt_pm(gt, wakeref) 3553 deregister_destroyed_contexts(guc); 3554 } 3555 3556 static void guc_context_destroy(struct kref *kref) 3557 { 3558 struct intel_context *ce = container_of(kref, typeof(*ce), ref); 3559 struct intel_guc *guc = ce_to_guc(ce); 3560 unsigned long flags; 3561 bool destroy; 3562 3563 /* 3564 * If the guc_id is invalid this context has been stolen and we can free 3565 * it immediately. Also can be freed immediately if the context is not 3566 * registered with the GuC or the GuC is in the middle of a reset. 3567 */ 3568 spin_lock_irqsave(&guc->submission_state.lock, flags); 3569 destroy = submission_disabled(guc) || context_guc_id_invalid(ce) || 3570 !ctx_id_mapped(guc, ce->guc_id.id); 3571 if (likely(!destroy)) { 3572 if (!list_empty(&ce->guc_id.link)) 3573 list_del_init(&ce->guc_id.link); 3574 list_add_tail(&ce->destroyed_link, 3575 &guc->submission_state.destroyed_contexts); 3576 } else { 3577 __release_guc_id(guc, ce); 3578 } 3579 spin_unlock_irqrestore(&guc->submission_state.lock, flags); 3580 if (unlikely(destroy)) { 3581 __guc_context_destroy(ce); 3582 return; 3583 } 3584 3585 /* 3586 * We use a worker to issue the H2G to deregister the context as we can 3587 * take the GT PM for the first time which isn't allowed from an atomic 3588 * context. 3589 */ 3590 queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker); 3591 } 3592 3593 static int guc_context_alloc(struct intel_context *ce) 3594 { 3595 return lrc_alloc(ce, ce->engine); 3596 } 3597 3598 static void __guc_context_set_prio(struct intel_guc *guc, 3599 struct intel_context *ce) 3600 { 3601 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) { 3602 struct context_policy policy; 3603 3604 __guc_context_policy_start_klv(&policy, ce->guc_id.id); 3605 __guc_context_policy_add_priority(&policy, ce->guc_state.prio); 3606 __guc_context_set_context_policies(guc, &policy, true); 3607 } else { 3608 u32 action[] = { 3609 INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY, 3610 ce->guc_id.id, 3611 ce->guc_state.prio, 3612 }; 3613 3614 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true); 3615 } 3616 } 3617 3618 static void guc_context_set_prio(struct intel_guc *guc, 3619 struct intel_context *ce, 3620 u8 prio) 3621 { 3622 GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH || 3623 prio > GUC_CLIENT_PRIORITY_NORMAL); 3624 lockdep_assert_held(&ce->guc_state.lock); 3625 3626 if (ce->guc_state.prio == prio || submission_disabled(guc) || 3627 !context_registered(ce)) { 3628 ce->guc_state.prio = prio; 3629 return; 3630 } 3631 3632 ce->guc_state.prio = prio; 3633 __guc_context_set_prio(guc, ce); 3634 3635 trace_intel_context_set_prio(ce); 3636 } 3637 3638 static inline u8 map_i915_prio_to_guc_prio(int prio) 3639 { 3640 if (prio == I915_PRIORITY_NORMAL) 3641 return GUC_CLIENT_PRIORITY_KMD_NORMAL; 3642 else if (prio < I915_PRIORITY_NORMAL) 3643 return GUC_CLIENT_PRIORITY_NORMAL; 3644 else if (prio < I915_PRIORITY_DISPLAY) 3645 return GUC_CLIENT_PRIORITY_HIGH; 3646 else 3647 return GUC_CLIENT_PRIORITY_KMD_HIGH; 3648 } 3649 3650 static inline void add_context_inflight_prio(struct intel_context *ce, 3651 u8 guc_prio) 3652 { 3653 lockdep_assert_held(&ce->guc_state.lock); 3654 GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count)); 3655 3656 ++ce->guc_state.prio_count[guc_prio]; 3657 3658 /* Overflow protection */ 3659 GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]); 3660 } 3661 3662 static inline void sub_context_inflight_prio(struct intel_context *ce, 3663 u8 guc_prio) 3664 { 3665 lockdep_assert_held(&ce->guc_state.lock); 3666 GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count)); 3667 3668 /* Underflow protection */ 3669 GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]); 3670 3671 --ce->guc_state.prio_count[guc_prio]; 3672 } 3673 3674 static inline void update_context_prio(struct intel_context *ce) 3675 { 3676 struct intel_guc *guc = &ce->engine->gt->uc.guc; 3677 int i; 3678 3679 BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0); 3680 BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL); 3681 3682 lockdep_assert_held(&ce->guc_state.lock); 3683 3684 for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) { 3685 if (ce->guc_state.prio_count[i]) { 3686 guc_context_set_prio(guc, ce, i); 3687 break; 3688 } 3689 } 3690 } 3691 3692 static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio) 3693 { 3694 /* Lower value is higher priority */ 3695 return new_guc_prio < old_guc_prio; 3696 } 3697 3698 static void add_to_context(struct i915_request *rq) 3699 { 3700 struct intel_context *ce = request_to_scheduling_context(rq); 3701 u8 new_guc_prio = map_i915_prio_to_guc_prio(rq_prio(rq)); 3702 3703 GEM_BUG_ON(intel_context_is_child(ce)); 3704 GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI); 3705 3706 spin_lock(&ce->guc_state.lock); 3707 list_move_tail(&rq->sched.link, &ce->guc_state.requests); 3708 3709 if (rq->guc_prio == GUC_PRIO_INIT) { 3710 rq->guc_prio = new_guc_prio; 3711 add_context_inflight_prio(ce, rq->guc_prio); 3712 } else if (new_guc_prio_higher(rq->guc_prio, new_guc_prio)) { 3713 sub_context_inflight_prio(ce, rq->guc_prio); 3714 rq->guc_prio = new_guc_prio; 3715 add_context_inflight_prio(ce, rq->guc_prio); 3716 } 3717 update_context_prio(ce); 3718 3719 spin_unlock(&ce->guc_state.lock); 3720 } 3721 3722 static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce) 3723 { 3724 lockdep_assert_held(&ce->guc_state.lock); 3725 3726 if (rq->guc_prio != GUC_PRIO_INIT && 3727 rq->guc_prio != GUC_PRIO_FINI) { 3728 sub_context_inflight_prio(ce, rq->guc_prio); 3729 update_context_prio(ce); 3730 } 3731 rq->guc_prio = GUC_PRIO_FINI; 3732 } 3733 3734 static void remove_from_context(struct i915_request *rq) 3735 { 3736 struct intel_context *ce = request_to_scheduling_context(rq); 3737 3738 GEM_BUG_ON(intel_context_is_child(ce)); 3739 3740 spin_lock_irq(&ce->guc_state.lock); 3741 3742 list_del_init(&rq->sched.link); 3743 clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); 3744 3745 /* Prevent further __await_execution() registering a cb, then flush */ 3746 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); 3747 3748 guc_prio_fini(rq, ce); 3749 3750 spin_unlock_irq(&ce->guc_state.lock); 3751 3752 atomic_dec(&ce->guc_id.ref); 3753 i915_request_notify_execute_cb_imm(rq); 3754 } 3755 3756 static const struct intel_context_ops guc_context_ops = { 3757 .flags = COPS_RUNTIME_CYCLES, 3758 .alloc = guc_context_alloc, 3759 3760 .close = guc_context_close, 3761 3762 .pre_pin = guc_context_pre_pin, 3763 .pin = guc_context_pin, 3764 .unpin = guc_context_unpin, 3765 .post_unpin = guc_context_post_unpin, 3766 3767 .revoke = guc_context_revoke, 3768 3769 .cancel_request = guc_context_cancel_request, 3770 3771 .enter = intel_context_enter_engine, 3772 .exit = intel_context_exit_engine, 3773 3774 .sched_disable = guc_context_sched_disable, 3775 3776 .update_stats = guc_context_update_stats, 3777 3778 .reset = lrc_reset, 3779 .destroy = guc_context_destroy, 3780 3781 .create_virtual = guc_create_virtual, 3782 .create_parallel = guc_create_parallel, 3783 }; 3784 3785 static void submit_work_cb(struct irq_work *wrk) 3786 { 3787 struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work); 3788 3789 might_lock(&rq->engine->sched_engine->lock); 3790 i915_sw_fence_complete(&rq->submit); 3791 } 3792 3793 static void __guc_signal_context_fence(struct intel_context *ce) 3794 { 3795 struct i915_request *rq, *rn; 3796 3797 lockdep_assert_held(&ce->guc_state.lock); 3798 3799 if (!list_empty(&ce->guc_state.fences)) 3800 trace_intel_context_fence_release(ce); 3801 3802 /* 3803 * Use an IRQ to ensure locking order of sched_engine->lock -> 3804 * ce->guc_state.lock is preserved. 3805 */ 3806 list_for_each_entry_safe(rq, rn, &ce->guc_state.fences, 3807 guc_fence_link) { 3808 list_del(&rq->guc_fence_link); 3809 irq_work_queue(&rq->submit_work); 3810 } 3811 3812 INIT_LIST_HEAD(&ce->guc_state.fences); 3813 } 3814 3815 static void guc_signal_context_fence(struct intel_context *ce) 3816 { 3817 unsigned long flags; 3818 3819 GEM_BUG_ON(intel_context_is_child(ce)); 3820 3821 spin_lock_irqsave(&ce->guc_state.lock, flags); 3822 clr_context_wait_for_deregister_to_register(ce); 3823 __guc_signal_context_fence(ce); 3824 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 3825 } 3826 3827 static bool context_needs_register(struct intel_context *ce, bool new_guc_id) 3828 { 3829 return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) || 3830 !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) && 3831 !submission_disabled(ce_to_guc(ce)); 3832 } 3833 3834 static void guc_context_init(struct intel_context *ce) 3835 { 3836 const struct i915_gem_context *ctx; 3837 int prio = I915_CONTEXT_DEFAULT_PRIORITY; 3838 3839 rcu_read_lock(); 3840 ctx = rcu_dereference(ce->gem_context); 3841 if (ctx) 3842 prio = ctx->sched.priority; 3843 rcu_read_unlock(); 3844 3845 ce->guc_state.prio = map_i915_prio_to_guc_prio(prio); 3846 3847 INIT_DELAYED_WORK(&ce->guc_state.sched_disable_delay_work, 3848 __delay_sched_disable); 3849 3850 set_bit(CONTEXT_GUC_INIT, &ce->flags); 3851 } 3852 3853 static int guc_request_alloc(struct i915_request *rq) 3854 { 3855 struct intel_context *ce = request_to_scheduling_context(rq); 3856 struct intel_guc *guc = ce_to_guc(ce); 3857 unsigned long flags; 3858 int ret; 3859 3860 GEM_BUG_ON(!intel_context_is_pinned(rq->context)); 3861 3862 /* 3863 * Flush enough space to reduce the likelihood of waiting after 3864 * we start building the request - in which case we will just 3865 * have to repeat work. 3866 */ 3867 rq->reserved_space += GUC_REQUEST_SIZE; 3868 3869 /* 3870 * Note that after this point, we have committed to using 3871 * this request as it is being used to both track the 3872 * state of engine initialisation and liveness of the 3873 * golden renderstate above. Think twice before you try 3874 * to cancel/unwind this request now. 3875 */ 3876 3877 /* Unconditionally invalidate GPU caches and TLBs. */ 3878 ret = rq->engine->emit_flush(rq, EMIT_INVALIDATE); 3879 if (ret) 3880 return ret; 3881 3882 rq->reserved_space -= GUC_REQUEST_SIZE; 3883 3884 if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags))) 3885 guc_context_init(ce); 3886 3887 /* 3888 * If the context gets closed while the execbuf is ongoing, the context 3889 * close code will race with the below code to cancel the delayed work. 3890 * If the context close wins the race and cancels the work, it will 3891 * immediately call the sched disable (see guc_context_close), so there 3892 * is a chance we can get past this check while the sched_disable code 3893 * is being executed. To make sure that code completes before we check 3894 * the status further down, we wait for the close process to complete. 3895 * Else, this code path could send a request down thinking that the 3896 * context is still in a schedule-enable mode while the GuC ends up 3897 * dropping the request completely because the disable did go from the 3898 * context_close path right to GuC just prior. In the event the CT is 3899 * full, we could potentially need to wait up to 1.5 seconds. 3900 */ 3901 if (cancel_delayed_work_sync(&ce->guc_state.sched_disable_delay_work)) 3902 intel_context_sched_disable_unpin(ce); 3903 else if (intel_context_is_closed(ce)) 3904 if (wait_for(context_close_done(ce), 1500)) 3905 guc_warn(guc, "timed out waiting on context sched close before realloc\n"); 3906 /* 3907 * Call pin_guc_id here rather than in the pinning step as with 3908 * dma_resv, contexts can be repeatedly pinned / unpinned trashing the 3909 * guc_id and creating horrible race conditions. This is especially bad 3910 * when guc_id are being stolen due to over subscription. By the time 3911 * this function is reached, it is guaranteed that the guc_id will be 3912 * persistent until the generated request is retired. Thus, sealing these 3913 * race conditions. It is still safe to fail here if guc_id are 3914 * exhausted and return -EAGAIN to the user indicating that they can try 3915 * again in the future. 3916 * 3917 * There is no need for a lock here as the timeline mutex ensures at 3918 * most one context can be executing this code path at once. The 3919 * guc_id_ref is incremented once for every request in flight and 3920 * decremented on each retire. When it is zero, a lock around the 3921 * increment (in pin_guc_id) is needed to seal a race with unpin_guc_id. 3922 */ 3923 if (atomic_add_unless(&ce->guc_id.ref, 1, 0)) 3924 goto out; 3925 3926 ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */ 3927 if (unlikely(ret < 0)) 3928 return ret; 3929 if (context_needs_register(ce, !!ret)) { 3930 ret = try_context_registration(ce, true); 3931 if (unlikely(ret)) { /* unwind */ 3932 if (ret == -EPIPE) { 3933 disable_submission(guc); 3934 goto out; /* GPU will be reset */ 3935 } 3936 atomic_dec(&ce->guc_id.ref); 3937 unpin_guc_id(guc, ce); 3938 return ret; 3939 } 3940 } 3941 3942 clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags); 3943 3944 out: 3945 /* 3946 * We block all requests on this context if a G2H is pending for a 3947 * schedule disable or context deregistration as the GuC will fail a 3948 * schedule enable or context registration if either G2H is pending 3949 * respectfully. Once a G2H returns, the fence is released that is 3950 * blocking these requests (see guc_signal_context_fence). 3951 */ 3952 spin_lock_irqsave(&ce->guc_state.lock, flags); 3953 if (context_wait_for_deregister_to_register(ce) || 3954 context_pending_disable(ce)) { 3955 init_irq_work(&rq->submit_work, submit_work_cb); 3956 i915_sw_fence_await(&rq->submit); 3957 3958 list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences); 3959 } 3960 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 3961 3962 return 0; 3963 } 3964 3965 static int guc_virtual_context_pre_pin(struct intel_context *ce, 3966 struct i915_gem_ww_ctx *ww, 3967 void **vaddr) 3968 { 3969 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); 3970 3971 return __guc_context_pre_pin(ce, engine, ww, vaddr); 3972 } 3973 3974 static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr) 3975 { 3976 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); 3977 int ret = __guc_context_pin(ce, engine, vaddr); 3978 intel_engine_mask_t tmp, mask = ce->engine->mask; 3979 3980 if (likely(!ret)) 3981 for_each_engine_masked(engine, ce->engine->gt, mask, tmp) 3982 intel_engine_pm_get(engine); 3983 3984 return ret; 3985 } 3986 3987 static void guc_virtual_context_unpin(struct intel_context *ce) 3988 { 3989 intel_engine_mask_t tmp, mask = ce->engine->mask; 3990 struct intel_engine_cs *engine; 3991 struct intel_guc *guc = ce_to_guc(ce); 3992 3993 GEM_BUG_ON(context_enabled(ce)); 3994 GEM_BUG_ON(intel_context_is_barrier(ce)); 3995 3996 unpin_guc_id(guc, ce); 3997 lrc_unpin(ce); 3998 3999 for_each_engine_masked(engine, ce->engine->gt, mask, tmp) 4000 intel_engine_pm_put_async(engine); 4001 } 4002 4003 static void guc_virtual_context_enter(struct intel_context *ce) 4004 { 4005 intel_engine_mask_t tmp, mask = ce->engine->mask; 4006 struct intel_engine_cs *engine; 4007 4008 for_each_engine_masked(engine, ce->engine->gt, mask, tmp) 4009 intel_engine_pm_get(engine); 4010 4011 intel_timeline_enter(ce->timeline); 4012 } 4013 4014 static void guc_virtual_context_exit(struct intel_context *ce) 4015 { 4016 intel_engine_mask_t tmp, mask = ce->engine->mask; 4017 struct intel_engine_cs *engine; 4018 4019 for_each_engine_masked(engine, ce->engine->gt, mask, tmp) 4020 intel_engine_pm_put(engine); 4021 4022 intel_timeline_exit(ce->timeline); 4023 } 4024 4025 static int guc_virtual_context_alloc(struct intel_context *ce) 4026 { 4027 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); 4028 4029 return lrc_alloc(ce, engine); 4030 } 4031 4032 static const struct intel_context_ops virtual_guc_context_ops = { 4033 .flags = COPS_RUNTIME_CYCLES, 4034 .alloc = guc_virtual_context_alloc, 4035 4036 .close = guc_context_close, 4037 4038 .pre_pin = guc_virtual_context_pre_pin, 4039 .pin = guc_virtual_context_pin, 4040 .unpin = guc_virtual_context_unpin, 4041 .post_unpin = guc_context_post_unpin, 4042 4043 .revoke = guc_context_revoke, 4044 4045 .cancel_request = guc_context_cancel_request, 4046 4047 .enter = guc_virtual_context_enter, 4048 .exit = guc_virtual_context_exit, 4049 4050 .sched_disable = guc_context_sched_disable, 4051 .update_stats = guc_context_update_stats, 4052 4053 .destroy = guc_context_destroy, 4054 4055 .get_sibling = guc_virtual_get_sibling, 4056 }; 4057 4058 static int guc_parent_context_pin(struct intel_context *ce, void *vaddr) 4059 { 4060 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); 4061 struct intel_guc *guc = ce_to_guc(ce); 4062 int ret; 4063 4064 GEM_BUG_ON(!intel_context_is_parent(ce)); 4065 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); 4066 4067 ret = pin_guc_id(guc, ce); 4068 if (unlikely(ret < 0)) 4069 return ret; 4070 4071 return __guc_context_pin(ce, engine, vaddr); 4072 } 4073 4074 static int guc_child_context_pin(struct intel_context *ce, void *vaddr) 4075 { 4076 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); 4077 4078 GEM_BUG_ON(!intel_context_is_child(ce)); 4079 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); 4080 4081 __intel_context_pin(ce->parallel.parent); 4082 return __guc_context_pin(ce, engine, vaddr); 4083 } 4084 4085 static void guc_parent_context_unpin(struct intel_context *ce) 4086 { 4087 struct intel_guc *guc = ce_to_guc(ce); 4088 4089 GEM_BUG_ON(context_enabled(ce)); 4090 GEM_BUG_ON(intel_context_is_barrier(ce)); 4091 GEM_BUG_ON(!intel_context_is_parent(ce)); 4092 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); 4093 4094 unpin_guc_id(guc, ce); 4095 lrc_unpin(ce); 4096 } 4097 4098 static void guc_child_context_unpin(struct intel_context *ce) 4099 { 4100 GEM_BUG_ON(context_enabled(ce)); 4101 GEM_BUG_ON(intel_context_is_barrier(ce)); 4102 GEM_BUG_ON(!intel_context_is_child(ce)); 4103 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); 4104 4105 lrc_unpin(ce); 4106 } 4107 4108 static void guc_child_context_post_unpin(struct intel_context *ce) 4109 { 4110 GEM_BUG_ON(!intel_context_is_child(ce)); 4111 GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent)); 4112 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); 4113 4114 lrc_post_unpin(ce); 4115 intel_context_unpin(ce->parallel.parent); 4116 } 4117 4118 static void guc_child_context_destroy(struct kref *kref) 4119 { 4120 struct intel_context *ce = container_of(kref, typeof(*ce), ref); 4121 4122 __guc_context_destroy(ce); 4123 } 4124 4125 static const struct intel_context_ops virtual_parent_context_ops = { 4126 .alloc = guc_virtual_context_alloc, 4127 4128 .close = guc_context_close, 4129 4130 .pre_pin = guc_context_pre_pin, 4131 .pin = guc_parent_context_pin, 4132 .unpin = guc_parent_context_unpin, 4133 .post_unpin = guc_context_post_unpin, 4134 4135 .revoke = guc_context_revoke, 4136 4137 .cancel_request = guc_context_cancel_request, 4138 4139 .enter = guc_virtual_context_enter, 4140 .exit = guc_virtual_context_exit, 4141 4142 .sched_disable = guc_context_sched_disable, 4143 4144 .destroy = guc_context_destroy, 4145 4146 .get_sibling = guc_virtual_get_sibling, 4147 }; 4148 4149 static const struct intel_context_ops virtual_child_context_ops = { 4150 .alloc = guc_virtual_context_alloc, 4151 4152 .pre_pin = guc_context_pre_pin, 4153 .pin = guc_child_context_pin, 4154 .unpin = guc_child_context_unpin, 4155 .post_unpin = guc_child_context_post_unpin, 4156 4157 .cancel_request = guc_context_cancel_request, 4158 4159 .enter = guc_virtual_context_enter, 4160 .exit = guc_virtual_context_exit, 4161 4162 .destroy = guc_child_context_destroy, 4163 4164 .get_sibling = guc_virtual_get_sibling, 4165 }; 4166 4167 /* 4168 * The below override of the breadcrumbs is enabled when the user configures a 4169 * context for parallel submission (multi-lrc, parent-child). 4170 * 4171 * The overridden breadcrumbs implements an algorithm which allows the GuC to 4172 * safely preempt all the hw contexts configured for parallel submission 4173 * between each BB. The contract between the i915 and GuC is if the parent 4174 * context can be preempted, all the children can be preempted, and the GuC will 4175 * always try to preempt the parent before the children. A handshake between the 4176 * parent / children breadcrumbs ensures the i915 holds up its end of the deal 4177 * creating a window to preempt between each set of BBs. 4178 */ 4179 static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq, 4180 u64 offset, u32 len, 4181 const unsigned int flags); 4182 static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq, 4183 u64 offset, u32 len, 4184 const unsigned int flags); 4185 static u32 * 4186 emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq, 4187 u32 *cs); 4188 static u32 * 4189 emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq, 4190 u32 *cs); 4191 4192 static struct intel_context * 4193 guc_create_parallel(struct intel_engine_cs **engines, 4194 unsigned int num_siblings, 4195 unsigned int width) 4196 { 4197 struct intel_engine_cs **siblings = NULL; 4198 struct intel_context *parent = NULL, *ce, *err; 4199 int i, j; 4200 4201 siblings = kmalloc_array(num_siblings, 4202 sizeof(*siblings), 4203 GFP_KERNEL); 4204 if (!siblings) 4205 return ERR_PTR(-ENOMEM); 4206 4207 for (i = 0; i < width; ++i) { 4208 for (j = 0; j < num_siblings; ++j) 4209 siblings[j] = engines[i * num_siblings + j]; 4210 4211 ce = intel_engine_create_virtual(siblings, num_siblings, 4212 FORCE_VIRTUAL); 4213 if (IS_ERR(ce)) { 4214 err = ERR_CAST(ce); 4215 goto unwind; 4216 } 4217 4218 if (i == 0) { 4219 parent = ce; 4220 parent->ops = &virtual_parent_context_ops; 4221 } else { 4222 ce->ops = &virtual_child_context_ops; 4223 intel_context_bind_parent_child(parent, ce); 4224 } 4225 } 4226 4227 parent->parallel.fence_context = dma_fence_context_alloc(1); 4228 4229 parent->engine->emit_bb_start = 4230 emit_bb_start_parent_no_preempt_mid_batch; 4231 parent->engine->emit_fini_breadcrumb = 4232 emit_fini_breadcrumb_parent_no_preempt_mid_batch; 4233 parent->engine->emit_fini_breadcrumb_dw = 4234 12 + 4 * parent->parallel.number_children; 4235 for_each_child(parent, ce) { 4236 ce->engine->emit_bb_start = 4237 emit_bb_start_child_no_preempt_mid_batch; 4238 ce->engine->emit_fini_breadcrumb = 4239 emit_fini_breadcrumb_child_no_preempt_mid_batch; 4240 ce->engine->emit_fini_breadcrumb_dw = 16; 4241 } 4242 4243 kfree(siblings); 4244 return parent; 4245 4246 unwind: 4247 if (parent) 4248 intel_context_put(parent); 4249 kfree(siblings); 4250 return err; 4251 } 4252 4253 static bool 4254 guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b) 4255 { 4256 struct intel_engine_cs *sibling; 4257 intel_engine_mask_t tmp, mask = b->engine_mask; 4258 bool result = false; 4259 4260 for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp) 4261 result |= intel_engine_irq_enable(sibling); 4262 4263 return result; 4264 } 4265 4266 static void 4267 guc_irq_disable_breadcrumbs(struct intel_breadcrumbs *b) 4268 { 4269 struct intel_engine_cs *sibling; 4270 intel_engine_mask_t tmp, mask = b->engine_mask; 4271 4272 for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp) 4273 intel_engine_irq_disable(sibling); 4274 } 4275 4276 static void guc_init_breadcrumbs(struct intel_engine_cs *engine) 4277 { 4278 int i; 4279 4280 /* 4281 * In GuC submission mode we do not know which physical engine a request 4282 * will be scheduled on, this creates a problem because the breadcrumb 4283 * interrupt is per physical engine. To work around this we attach 4284 * requests and direct all breadcrumb interrupts to the first instance 4285 * of an engine per class. In addition all breadcrumb interrupts are 4286 * enabled / disabled across an engine class in unison. 4287 */ 4288 for (i = 0; i < MAX_ENGINE_INSTANCE; ++i) { 4289 struct intel_engine_cs *sibling = 4290 engine->gt->engine_class[engine->class][i]; 4291 4292 if (sibling) { 4293 if (engine->breadcrumbs != sibling->breadcrumbs) { 4294 intel_breadcrumbs_put(engine->breadcrumbs); 4295 engine->breadcrumbs = 4296 intel_breadcrumbs_get(sibling->breadcrumbs); 4297 } 4298 break; 4299 } 4300 } 4301 4302 if (engine->breadcrumbs) { 4303 engine->breadcrumbs->engine_mask |= engine->mask; 4304 engine->breadcrumbs->irq_enable = guc_irq_enable_breadcrumbs; 4305 engine->breadcrumbs->irq_disable = guc_irq_disable_breadcrumbs; 4306 } 4307 } 4308 4309 static void guc_bump_inflight_request_prio(struct i915_request *rq, 4310 int prio) 4311 { 4312 struct intel_context *ce = request_to_scheduling_context(rq); 4313 u8 new_guc_prio = map_i915_prio_to_guc_prio(prio); 4314 4315 /* Short circuit function */ 4316 if (prio < I915_PRIORITY_NORMAL) 4317 return; 4318 4319 spin_lock(&ce->guc_state.lock); 4320 4321 if (rq->guc_prio == GUC_PRIO_FINI) 4322 goto exit; 4323 4324 if (!new_guc_prio_higher(rq->guc_prio, new_guc_prio)) 4325 goto exit; 4326 4327 if (rq->guc_prio != GUC_PRIO_INIT) 4328 sub_context_inflight_prio(ce, rq->guc_prio); 4329 4330 rq->guc_prio = new_guc_prio; 4331 add_context_inflight_prio(ce, rq->guc_prio); 4332 update_context_prio(ce); 4333 4334 exit: 4335 spin_unlock(&ce->guc_state.lock); 4336 } 4337 4338 static void guc_retire_inflight_request_prio(struct i915_request *rq) 4339 { 4340 struct intel_context *ce = request_to_scheduling_context(rq); 4341 4342 spin_lock(&ce->guc_state.lock); 4343 guc_prio_fini(rq, ce); 4344 spin_unlock(&ce->guc_state.lock); 4345 } 4346 4347 static void sanitize_hwsp(struct intel_engine_cs *engine) 4348 { 4349 struct intel_timeline *tl; 4350 4351 list_for_each_entry(tl, &engine->status_page.timelines, engine_link) 4352 intel_timeline_reset_seqno(tl); 4353 } 4354 4355 static void guc_sanitize(struct intel_engine_cs *engine) 4356 { 4357 /* 4358 * Poison residual state on resume, in case the suspend didn't! 4359 * 4360 * We have to assume that across suspend/resume (or other loss 4361 * of control) that the contents of our pinned buffers has been 4362 * lost, replaced by garbage. Since this doesn't always happen, 4363 * let's poison such state so that we more quickly spot when 4364 * we falsely assume it has been preserved. 4365 */ 4366 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 4367 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); 4368 4369 /* 4370 * The kernel_context HWSP is stored in the status_page. As above, 4371 * that may be lost on resume/initialisation, and so we need to 4372 * reset the value in the HWSP. 4373 */ 4374 sanitize_hwsp(engine); 4375 4376 /* And scrub the dirty cachelines for the HWSP */ 4377 drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE); 4378 4379 intel_engine_reset_pinned_contexts(engine); 4380 } 4381 4382 static void setup_hwsp(struct intel_engine_cs *engine) 4383 { 4384 intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ 4385 4386 ENGINE_WRITE_FW(engine, 4387 RING_HWS_PGA, 4388 i915_ggtt_offset(engine->status_page.vma)); 4389 } 4390 4391 static void start_engine(struct intel_engine_cs *engine) 4392 { 4393 ENGINE_WRITE_FW(engine, 4394 RING_MODE_GEN7, 4395 _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE)); 4396 4397 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 4398 ENGINE_POSTING_READ(engine, RING_MI_MODE); 4399 } 4400 4401 static int guc_resume(struct intel_engine_cs *engine) 4402 { 4403 assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL); 4404 4405 intel_mocs_init_engine(engine); 4406 4407 intel_breadcrumbs_reset(engine->breadcrumbs); 4408 4409 setup_hwsp(engine); 4410 start_engine(engine); 4411 4412 if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) 4413 xehp_enable_ccs_engines(engine); 4414 4415 return 0; 4416 } 4417 4418 static bool guc_sched_engine_disabled(struct i915_sched_engine *sched_engine) 4419 { 4420 return !sched_engine->tasklet.callback; 4421 } 4422 4423 static void guc_set_default_submission(struct intel_engine_cs *engine) 4424 { 4425 engine->submit_request = guc_submit_request; 4426 } 4427 4428 static inline int guc_kernel_context_pin(struct intel_guc *guc, 4429 struct intel_context *ce) 4430 { 4431 int ret; 4432 4433 /* 4434 * Note: we purposefully do not check the returns below because 4435 * the registration can only fail if a reset is just starting. 4436 * This is called at the end of reset so presumably another reset 4437 * isn't happening and even it did this code would be run again. 4438 */ 4439 4440 if (context_guc_id_invalid(ce)) { 4441 ret = pin_guc_id(guc, ce); 4442 4443 if (ret < 0) 4444 return ret; 4445 } 4446 4447 if (!test_bit(CONTEXT_GUC_INIT, &ce->flags)) 4448 guc_context_init(ce); 4449 4450 ret = try_context_registration(ce, true); 4451 if (ret) 4452 unpin_guc_id(guc, ce); 4453 4454 return ret; 4455 } 4456 4457 static inline int guc_init_submission(struct intel_guc *guc) 4458 { 4459 struct intel_gt *gt = guc_to_gt(guc); 4460 struct intel_engine_cs *engine; 4461 enum intel_engine_id id; 4462 4463 /* make sure all descriptors are clean... */ 4464 xa_destroy(&guc->context_lookup); 4465 4466 /* 4467 * A reset might have occurred while we had a pending stalled request, 4468 * so make sure we clean that up. 4469 */ 4470 guc->stalled_request = NULL; 4471 guc->submission_stall_reason = STALL_NONE; 4472 4473 /* 4474 * Some contexts might have been pinned before we enabled GuC 4475 * submission, so we need to add them to the GuC bookeeping. 4476 * Also, after a reset the of the GuC we want to make sure that the 4477 * information shared with GuC is properly reset. The kernel LRCs are 4478 * not attached to the gem_context, so they need to be added separately. 4479 */ 4480 for_each_engine(engine, gt, id) { 4481 struct intel_context *ce; 4482 4483 list_for_each_entry(ce, &engine->pinned_contexts_list, 4484 pinned_contexts_link) { 4485 int ret = guc_kernel_context_pin(guc, ce); 4486 4487 if (ret) { 4488 /* No point in trying to clean up as i915 will wedge on failure */ 4489 return ret; 4490 } 4491 } 4492 } 4493 4494 return 0; 4495 } 4496 4497 static void guc_release(struct intel_engine_cs *engine) 4498 { 4499 engine->sanitize = NULL; /* no longer in control, nothing to sanitize */ 4500 4501 intel_engine_cleanup_common(engine); 4502 lrc_fini_wa_ctx(engine); 4503 } 4504 4505 static void virtual_guc_bump_serial(struct intel_engine_cs *engine) 4506 { 4507 struct intel_engine_cs *e; 4508 intel_engine_mask_t tmp, mask = engine->mask; 4509 4510 for_each_engine_masked(e, engine->gt, mask, tmp) 4511 e->serial++; 4512 } 4513 4514 static void guc_default_vfuncs(struct intel_engine_cs *engine) 4515 { 4516 /* Default vfuncs which can be overridden by each engine. */ 4517 4518 engine->resume = guc_resume; 4519 4520 engine->cops = &guc_context_ops; 4521 engine->request_alloc = guc_request_alloc; 4522 engine->add_active_request = add_to_context; 4523 engine->remove_active_request = remove_from_context; 4524 4525 engine->sched_engine->schedule = i915_schedule; 4526 4527 engine->reset.prepare = guc_engine_reset_prepare; 4528 engine->reset.rewind = guc_rewind_nop; 4529 engine->reset.cancel = guc_reset_nop; 4530 engine->reset.finish = guc_reset_nop; 4531 4532 engine->emit_flush = gen8_emit_flush_xcs; 4533 engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; 4534 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs; 4535 if (GRAPHICS_VER(engine->i915) >= 12) { 4536 engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs; 4537 engine->emit_flush = gen12_emit_flush_xcs; 4538 } 4539 engine->set_default_submission = guc_set_default_submission; 4540 engine->busyness = guc_engine_busyness; 4541 4542 engine->flags |= I915_ENGINE_SUPPORTS_STATS; 4543 engine->flags |= I915_ENGINE_HAS_PREEMPTION; 4544 engine->flags |= I915_ENGINE_HAS_TIMESLICES; 4545 4546 /* Wa_14014475959:dg2 */ 4547 if (engine->class == COMPUTE_CLASS) 4548 if (IS_GFX_GT_IP_STEP(engine->gt, IP_VER(12, 70), STEP_A0, STEP_B0) || 4549 IS_DG2(engine->i915)) 4550 engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT; 4551 4552 /* Wa_16019325821 */ 4553 /* Wa_14019159160 */ 4554 if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) && 4555 IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74))) 4556 engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT; 4557 4558 /* 4559 * TODO: GuC supports timeslicing and semaphores as well, but they're 4560 * handled by the firmware so some minor tweaks are required before 4561 * enabling. 4562 * 4563 * engine->flags |= I915_ENGINE_HAS_SEMAPHORES; 4564 */ 4565 4566 engine->emit_bb_start = gen8_emit_bb_start; 4567 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) 4568 engine->emit_bb_start = xehp_emit_bb_start; 4569 } 4570 4571 static void rcs_submission_override(struct intel_engine_cs *engine) 4572 { 4573 switch (GRAPHICS_VER(engine->i915)) { 4574 case 12: 4575 engine->emit_flush = gen12_emit_flush_rcs; 4576 engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs; 4577 break; 4578 case 11: 4579 engine->emit_flush = gen11_emit_flush_rcs; 4580 engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs; 4581 break; 4582 default: 4583 engine->emit_flush = gen8_emit_flush_rcs; 4584 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs; 4585 break; 4586 } 4587 } 4588 4589 static inline void guc_default_irqs(struct intel_engine_cs *engine) 4590 { 4591 engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT; 4592 intel_engine_set_irq_handler(engine, cs_irq_handler); 4593 } 4594 4595 static void guc_sched_engine_destroy(struct kref *kref) 4596 { 4597 struct i915_sched_engine *sched_engine = 4598 container_of(kref, typeof(*sched_engine), ref); 4599 struct intel_guc *guc = sched_engine->private_data; 4600 4601 guc->sched_engine = NULL; 4602 tasklet_kill(&sched_engine->tasklet); /* flush the callback */ 4603 kfree(sched_engine); 4604 } 4605 4606 int intel_guc_submission_setup(struct intel_engine_cs *engine) 4607 { 4608 struct drm_i915_private *i915 = engine->i915; 4609 struct intel_guc *guc = gt_to_guc(engine->gt); 4610 4611 /* 4612 * The setup relies on several assumptions (e.g. irqs always enabled) 4613 * that are only valid on gen11+ 4614 */ 4615 GEM_BUG_ON(GRAPHICS_VER(i915) < 11); 4616 4617 if (!guc->sched_engine) { 4618 guc->sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL); 4619 if (!guc->sched_engine) 4620 return -ENOMEM; 4621 4622 guc->sched_engine->schedule = i915_schedule; 4623 guc->sched_engine->disabled = guc_sched_engine_disabled; 4624 guc->sched_engine->private_data = guc; 4625 guc->sched_engine->destroy = guc_sched_engine_destroy; 4626 guc->sched_engine->bump_inflight_request_prio = 4627 guc_bump_inflight_request_prio; 4628 guc->sched_engine->retire_inflight_request_prio = 4629 guc_retire_inflight_request_prio; 4630 tasklet_setup(&guc->sched_engine->tasklet, 4631 guc_submission_tasklet); 4632 } 4633 i915_sched_engine_put(engine->sched_engine); 4634 engine->sched_engine = i915_sched_engine_get(guc->sched_engine); 4635 4636 guc_default_vfuncs(engine); 4637 guc_default_irqs(engine); 4638 guc_init_breadcrumbs(engine); 4639 4640 if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE) 4641 rcs_submission_override(engine); 4642 4643 lrc_init_wa_ctx(engine); 4644 4645 /* Finally, take ownership and responsibility for cleanup! */ 4646 engine->sanitize = guc_sanitize; 4647 engine->release = guc_release; 4648 4649 return 0; 4650 } 4651 4652 struct scheduling_policy { 4653 /* internal data */ 4654 u32 max_words, num_words; 4655 u32 count; 4656 /* API data */ 4657 struct guc_update_scheduling_policy h2g; 4658 }; 4659 4660 static u32 __guc_scheduling_policy_action_size(struct scheduling_policy *policy) 4661 { 4662 u32 *start = (void *)&policy->h2g; 4663 u32 *end = policy->h2g.data + policy->num_words; 4664 size_t delta = end - start; 4665 4666 return delta; 4667 } 4668 4669 static struct scheduling_policy *__guc_scheduling_policy_start_klv(struct scheduling_policy *policy) 4670 { 4671 policy->h2g.header.action = INTEL_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV; 4672 policy->max_words = ARRAY_SIZE(policy->h2g.data); 4673 policy->num_words = 0; 4674 policy->count = 0; 4675 4676 return policy; 4677 } 4678 4679 static void __guc_scheduling_policy_add_klv(struct scheduling_policy *policy, 4680 u32 action, u32 *data, u32 len) 4681 { 4682 u32 *klv_ptr = policy->h2g.data + policy->num_words; 4683 4684 GEM_BUG_ON((policy->num_words + 1 + len) > policy->max_words); 4685 *(klv_ptr++) = FIELD_PREP(GUC_KLV_0_KEY, action) | 4686 FIELD_PREP(GUC_KLV_0_LEN, len); 4687 memcpy(klv_ptr, data, sizeof(u32) * len); 4688 policy->num_words += 1 + len; 4689 policy->count++; 4690 } 4691 4692 static int __guc_action_set_scheduling_policies(struct intel_guc *guc, 4693 struct scheduling_policy *policy) 4694 { 4695 int ret; 4696 4697 ret = intel_guc_send(guc, (u32 *)&policy->h2g, 4698 __guc_scheduling_policy_action_size(policy)); 4699 if (ret < 0) { 4700 guc_probe_error(guc, "Failed to configure global scheduling policies: %pe!\n", 4701 ERR_PTR(ret)); 4702 return ret; 4703 } 4704 4705 if (ret != policy->count) { 4706 guc_warn(guc, "global scheduler policy processed %d of %d KLVs!", 4707 ret, policy->count); 4708 if (ret > policy->count) 4709 return -EPROTO; 4710 } 4711 4712 return 0; 4713 } 4714 4715 static int guc_init_global_schedule_policy(struct intel_guc *guc) 4716 { 4717 struct scheduling_policy policy; 4718 struct intel_gt *gt = guc_to_gt(guc); 4719 intel_wakeref_t wakeref; 4720 int ret; 4721 4722 if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0)) 4723 return 0; 4724 4725 __guc_scheduling_policy_start_klv(&policy); 4726 4727 with_intel_runtime_pm(>->i915->runtime_pm, wakeref) { 4728 u32 yield[] = { 4729 GLOBAL_SCHEDULE_POLICY_RC_YIELD_DURATION, 4730 GLOBAL_SCHEDULE_POLICY_RC_YIELD_RATIO, 4731 }; 4732 4733 __guc_scheduling_policy_add_klv(&policy, 4734 GUC_SCHEDULING_POLICIES_KLV_ID_RENDER_COMPUTE_YIELD, 4735 yield, ARRAY_SIZE(yield)); 4736 4737 ret = __guc_action_set_scheduling_policies(guc, &policy); 4738 } 4739 4740 return ret; 4741 } 4742 4743 static void guc_route_semaphores(struct intel_guc *guc, bool to_guc) 4744 { 4745 struct intel_gt *gt = guc_to_gt(guc); 4746 u32 val; 4747 4748 if (GRAPHICS_VER(gt->i915) < 12) 4749 return; 4750 4751 if (to_guc) 4752 val = GUC_SEM_INTR_ROUTE_TO_GUC | GUC_SEM_INTR_ENABLE_ALL; 4753 else 4754 val = 0; 4755 4756 intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, val); 4757 } 4758 4759 int intel_guc_submission_enable(struct intel_guc *guc) 4760 { 4761 int ret; 4762 4763 /* Semaphore interrupt enable and route to GuC */ 4764 guc_route_semaphores(guc, true); 4765 4766 ret = guc_init_submission(guc); 4767 if (ret) 4768 goto fail_sem; 4769 4770 ret = guc_init_engine_stats(guc); 4771 if (ret) 4772 goto fail_sem; 4773 4774 ret = guc_init_global_schedule_policy(guc); 4775 if (ret) 4776 goto fail_stats; 4777 4778 return 0; 4779 4780 fail_stats: 4781 guc_fini_engine_stats(guc); 4782 fail_sem: 4783 guc_route_semaphores(guc, false); 4784 return ret; 4785 } 4786 4787 /* Note: By the time we're here, GuC may have already been reset */ 4788 void intel_guc_submission_disable(struct intel_guc *guc) 4789 { 4790 guc_cancel_busyness_worker(guc); 4791 4792 /* Semaphore interrupt disable and route to host */ 4793 guc_route_semaphores(guc, false); 4794 } 4795 4796 static bool __guc_submission_supported(struct intel_guc *guc) 4797 { 4798 /* GuC submission is unavailable for pre-Gen11 */ 4799 return intel_guc_is_supported(guc) && 4800 GRAPHICS_VER(guc_to_i915(guc)) >= 11; 4801 } 4802 4803 static bool __guc_submission_selected(struct intel_guc *guc) 4804 { 4805 struct drm_i915_private *i915 = guc_to_i915(guc); 4806 4807 if (!intel_guc_submission_is_supported(guc)) 4808 return false; 4809 4810 return i915->params.enable_guc & ENABLE_GUC_SUBMISSION; 4811 } 4812 4813 int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc) 4814 { 4815 return guc->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc); 4816 } 4817 4818 /* 4819 * This default value of 33 milisecs (+1 milisec round up) ensures 30fps or higher 4820 * workloads are able to enjoy the latency reduction when delaying the schedule-disable 4821 * operation. This matches the 30fps game-render + encode (real world) workload this 4822 * knob was tested against. 4823 */ 4824 #define SCHED_DISABLE_DELAY_MS 34 4825 4826 /* 4827 * A threshold of 75% is a reasonable starting point considering that real world apps 4828 * generally don't get anywhere near this. 4829 */ 4830 #define NUM_SCHED_DISABLE_GUCIDS_DEFAULT_THRESHOLD(__guc) \ 4831 (((intel_guc_sched_disable_gucid_threshold_max(guc)) * 3) / 4) 4832 4833 void intel_guc_submission_init_early(struct intel_guc *guc) 4834 { 4835 xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ); 4836 4837 spin_lock_init(&guc->submission_state.lock); 4838 INIT_LIST_HEAD(&guc->submission_state.guc_id_list); 4839 ida_init(&guc->submission_state.guc_ids); 4840 INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts); 4841 INIT_WORK(&guc->submission_state.destroyed_worker, 4842 destroyed_worker_func); 4843 INIT_WORK(&guc->submission_state.reset_fail_worker, 4844 reset_fail_worker_func); 4845 4846 spin_lock_init(&guc->timestamp.lock); 4847 INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping); 4848 4849 guc->submission_state.sched_disable_delay_ms = SCHED_DISABLE_DELAY_MS; 4850 guc->submission_state.num_guc_ids = GUC_MAX_CONTEXT_ID; 4851 guc->submission_state.sched_disable_gucid_threshold = 4852 NUM_SCHED_DISABLE_GUCIDS_DEFAULT_THRESHOLD(guc); 4853 guc->submission_supported = __guc_submission_supported(guc); 4854 guc->submission_selected = __guc_submission_selected(guc); 4855 } 4856 4857 static inline struct intel_context * 4858 g2h_context_lookup(struct intel_guc *guc, u32 ctx_id) 4859 { 4860 struct intel_context *ce; 4861 4862 if (unlikely(ctx_id >= GUC_MAX_CONTEXT_ID)) { 4863 guc_err(guc, "Invalid ctx_id %u\n", ctx_id); 4864 return NULL; 4865 } 4866 4867 ce = __get_context(guc, ctx_id); 4868 if (unlikely(!ce)) { 4869 guc_err(guc, "Context is NULL, ctx_id %u\n", ctx_id); 4870 return NULL; 4871 } 4872 4873 if (unlikely(intel_context_is_child(ce))) { 4874 guc_err(guc, "Context is child, ctx_id %u\n", ctx_id); 4875 return NULL; 4876 } 4877 4878 return ce; 4879 } 4880 4881 static void wait_wake_outstanding_tlb_g2h(struct intel_guc *guc, u32 seqno) 4882 { 4883 struct intel_guc_tlb_wait *wait; 4884 unsigned long flags; 4885 4886 xa_lock_irqsave(&guc->tlb_lookup, flags); 4887 wait = xa_load(&guc->tlb_lookup, seqno); 4888 4889 if (wait) 4890 wake_up(&wait->wq); 4891 else 4892 guc_dbg(guc, 4893 "Stale TLB invalidation response with seqno %d\n", seqno); 4894 4895 xa_unlock_irqrestore(&guc->tlb_lookup, flags); 4896 } 4897 4898 int intel_guc_tlb_invalidation_done(struct intel_guc *guc, 4899 const u32 *payload, u32 len) 4900 { 4901 if (len < 1) 4902 return -EPROTO; 4903 4904 wait_wake_outstanding_tlb_g2h(guc, payload[0]); 4905 return 0; 4906 } 4907 4908 static long must_wait_woken(struct wait_queue_entry *wq_entry, long timeout) 4909 { 4910 /* 4911 * This is equivalent to wait_woken() with the exception that 4912 * we do not wake up early if the kthread task has been completed. 4913 * As we are called from page reclaim in any task context, 4914 * we may be invoked from stopped kthreads, but we *must* 4915 * complete the wait from the HW. 4916 */ 4917 do { 4918 set_current_state(TASK_UNINTERRUPTIBLE); 4919 if (wq_entry->flags & WQ_FLAG_WOKEN) 4920 break; 4921 4922 timeout = schedule_timeout(timeout); 4923 } while (timeout); 4924 4925 /* See wait_woken() and woken_wake_function() */ 4926 __set_current_state(TASK_RUNNING); 4927 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); 4928 4929 return timeout; 4930 } 4931 4932 static bool intel_gt_is_enabled(const struct intel_gt *gt) 4933 { 4934 /* Check if GT is wedged or suspended */ 4935 if (intel_gt_is_wedged(gt) || !intel_irqs_enabled(gt->i915)) 4936 return false; 4937 return true; 4938 } 4939 4940 static int guc_send_invalidate_tlb(struct intel_guc *guc, 4941 enum intel_guc_tlb_invalidation_type type) 4942 { 4943 struct intel_guc_tlb_wait _wq, *wq = &_wq; 4944 struct intel_gt *gt = guc_to_gt(guc); 4945 DEFINE_WAIT_FUNC(wait, woken_wake_function); 4946 int err; 4947 u32 seqno; 4948 u32 action[] = { 4949 INTEL_GUC_ACTION_TLB_INVALIDATION, 4950 0, 4951 REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK, type) | 4952 REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK, 4953 INTEL_GUC_TLB_INVAL_MODE_HEAVY) | 4954 INTEL_GUC_TLB_INVAL_FLUSH_CACHE, 4955 }; 4956 u32 size = ARRAY_SIZE(action); 4957 4958 /* 4959 * Early guard against GT enablement. TLB invalidation should not be 4960 * attempted if the GT is disabled due to suspend/wedge. 4961 */ 4962 if (!intel_gt_is_enabled(gt)) 4963 return -EINVAL; 4964 4965 init_waitqueue_head(&_wq.wq); 4966 4967 if (xa_alloc_cyclic_irq(&guc->tlb_lookup, &seqno, wq, 4968 xa_limit_32b, &guc->next_seqno, 4969 GFP_ATOMIC | __GFP_NOWARN) < 0) { 4970 /* Under severe memory pressure? Serialise TLB allocations */ 4971 xa_lock_irq(&guc->tlb_lookup); 4972 wq = xa_load(&guc->tlb_lookup, guc->serial_slot); 4973 wait_event_lock_irq(wq->wq, 4974 !READ_ONCE(wq->busy), 4975 guc->tlb_lookup.xa_lock); 4976 /* 4977 * Update wq->busy under lock to ensure only one waiter can 4978 * issue the TLB invalidation command using the serial slot at a 4979 * time. The condition is set to true before releasing the lock 4980 * so that other caller continue to wait until woken up again. 4981 */ 4982 wq->busy = true; 4983 xa_unlock_irq(&guc->tlb_lookup); 4984 4985 seqno = guc->serial_slot; 4986 } 4987 4988 action[1] = seqno; 4989 4990 add_wait_queue(&wq->wq, &wait); 4991 4992 /* This is a critical reclaim path and thus we must loop here. */ 4993 err = intel_guc_send_busy_loop(guc, action, size, G2H_LEN_DW_INVALIDATE_TLB, true); 4994 if (err) 4995 goto out; 4996 4997 /* 4998 * Late guard against GT enablement. It is not an error for the TLB 4999 * invalidation to time out if the GT is disabled during the process 5000 * due to suspend/wedge. In fact, the TLB invalidation is cancelled 5001 * in this case. 5002 */ 5003 if (!must_wait_woken(&wait, intel_guc_ct_max_queue_time_jiffies()) && 5004 intel_gt_is_enabled(gt)) { 5005 guc_err(guc, 5006 "TLB invalidation response timed out for seqno %u\n", seqno); 5007 err = -ETIME; 5008 } 5009 out: 5010 remove_wait_queue(&wq->wq, &wait); 5011 if (seqno != guc->serial_slot) 5012 xa_erase_irq(&guc->tlb_lookup, seqno); 5013 5014 return err; 5015 } 5016 5017 /* Send a H2G command to invalidate the TLBs at engine level and beyond. */ 5018 int intel_guc_invalidate_tlb_engines(struct intel_guc *guc) 5019 { 5020 return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_ENGINES); 5021 } 5022 5023 /* Send a H2G command to invalidate the GuC's internal TLB. */ 5024 int intel_guc_invalidate_tlb_guc(struct intel_guc *guc) 5025 { 5026 return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_GUC); 5027 } 5028 5029 int intel_guc_deregister_done_process_msg(struct intel_guc *guc, 5030 const u32 *msg, 5031 u32 len) 5032 { 5033 struct intel_context *ce; 5034 u32 ctx_id; 5035 5036 if (unlikely(len < 1)) { 5037 guc_err(guc, "Invalid length %u\n", len); 5038 return -EPROTO; 5039 } 5040 ctx_id = msg[0]; 5041 5042 ce = g2h_context_lookup(guc, ctx_id); 5043 if (unlikely(!ce)) 5044 return -EPROTO; 5045 5046 trace_intel_context_deregister_done(ce); 5047 5048 #ifdef CONFIG_DRM_I915_SELFTEST 5049 if (unlikely(ce->drop_deregister)) { 5050 ce->drop_deregister = false; 5051 return 0; 5052 } 5053 #endif 5054 5055 if (context_wait_for_deregister_to_register(ce)) { 5056 struct intel_runtime_pm *runtime_pm = 5057 &ce->engine->gt->i915->runtime_pm; 5058 intel_wakeref_t wakeref; 5059 5060 /* 5061 * Previous owner of this guc_id has been deregistered, now safe 5062 * register this context. 5063 */ 5064 with_intel_runtime_pm(runtime_pm, wakeref) 5065 register_context(ce, true); 5066 guc_signal_context_fence(ce); 5067 intel_context_put(ce); 5068 } else if (context_destroyed(ce)) { 5069 /* Context has been destroyed */ 5070 intel_gt_pm_put_async_untracked(guc_to_gt(guc)); 5071 release_guc_id(guc, ce); 5072 __guc_context_destroy(ce); 5073 } 5074 5075 decr_outstanding_submission_g2h(guc); 5076 5077 return 0; 5078 } 5079 5080 int intel_guc_sched_done_process_msg(struct intel_guc *guc, 5081 const u32 *msg, 5082 u32 len) 5083 { 5084 struct intel_context *ce; 5085 unsigned long flags; 5086 u32 ctx_id; 5087 5088 if (unlikely(len < 2)) { 5089 guc_err(guc, "Invalid length %u\n", len); 5090 return -EPROTO; 5091 } 5092 ctx_id = msg[0]; 5093 5094 ce = g2h_context_lookup(guc, ctx_id); 5095 if (unlikely(!ce)) 5096 return -EPROTO; 5097 5098 if (unlikely(context_destroyed(ce) || 5099 (!context_pending_enable(ce) && 5100 !context_pending_disable(ce)))) { 5101 guc_err(guc, "Bad context sched_state 0x%x, ctx_id %u\n", 5102 ce->guc_state.sched_state, ctx_id); 5103 return -EPROTO; 5104 } 5105 5106 trace_intel_context_sched_done(ce); 5107 5108 if (context_pending_enable(ce)) { 5109 #ifdef CONFIG_DRM_I915_SELFTEST 5110 if (unlikely(ce->drop_schedule_enable)) { 5111 ce->drop_schedule_enable = false; 5112 return 0; 5113 } 5114 #endif 5115 5116 spin_lock_irqsave(&ce->guc_state.lock, flags); 5117 clr_context_pending_enable(ce); 5118 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 5119 } else if (context_pending_disable(ce)) { 5120 bool banned; 5121 5122 #ifdef CONFIG_DRM_I915_SELFTEST 5123 if (unlikely(ce->drop_schedule_disable)) { 5124 ce->drop_schedule_disable = false; 5125 return 0; 5126 } 5127 #endif 5128 5129 /* 5130 * Unpin must be done before __guc_signal_context_fence, 5131 * otherwise a race exists between the requests getting 5132 * submitted + retired before this unpin completes resulting in 5133 * the pin_count going to zero and the context still being 5134 * enabled. 5135 */ 5136 intel_context_sched_disable_unpin(ce); 5137 5138 spin_lock_irqsave(&ce->guc_state.lock, flags); 5139 banned = context_banned(ce); 5140 clr_context_banned(ce); 5141 clr_context_pending_disable(ce); 5142 __guc_signal_context_fence(ce); 5143 guc_blocked_fence_complete(ce); 5144 spin_unlock_irqrestore(&ce->guc_state.lock, flags); 5145 5146 if (banned) { 5147 guc_cancel_context_requests(ce); 5148 intel_engine_signal_breadcrumbs(ce->engine); 5149 } 5150 } 5151 5152 decr_outstanding_submission_g2h(guc); 5153 intel_context_put(ce); 5154 5155 return 0; 5156 } 5157 5158 static void capture_error_state(struct intel_guc *guc, 5159 struct intel_context *ce) 5160 { 5161 struct intel_gt *gt = guc_to_gt(guc); 5162 struct drm_i915_private *i915 = gt->i915; 5163 intel_wakeref_t wakeref; 5164 intel_engine_mask_t engine_mask; 5165 5166 if (intel_engine_is_virtual(ce->engine)) { 5167 struct intel_engine_cs *e; 5168 intel_engine_mask_t tmp, virtual_mask = ce->engine->mask; 5169 5170 engine_mask = 0; 5171 for_each_engine_masked(e, ce->engine->gt, virtual_mask, tmp) { 5172 bool match = intel_guc_capture_is_matching_engine(gt, ce, e); 5173 5174 if (match) { 5175 intel_engine_set_hung_context(e, ce); 5176 engine_mask |= e->mask; 5177 i915_increase_reset_engine_count(&i915->gpu_error, 5178 e); 5179 } 5180 } 5181 5182 if (!engine_mask) { 5183 guc_warn(guc, "No matching physical engine capture for virtual engine context 0x%04X / %s", 5184 ce->guc_id.id, ce->engine->name); 5185 engine_mask = ~0U; 5186 } 5187 } else { 5188 intel_engine_set_hung_context(ce->engine, ce); 5189 engine_mask = ce->engine->mask; 5190 i915_increase_reset_engine_count(&i915->gpu_error, ce->engine); 5191 } 5192 5193 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 5194 i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_IS_GUC_CAPTURE); 5195 } 5196 5197 static void guc_context_replay(struct intel_context *ce) 5198 { 5199 struct i915_sched_engine *sched_engine = ce->engine->sched_engine; 5200 5201 __guc_reset_context(ce, ce->engine->mask); 5202 tasklet_hi_schedule(&sched_engine->tasklet); 5203 } 5204 5205 static void guc_handle_context_reset(struct intel_guc *guc, 5206 struct intel_context *ce) 5207 { 5208 bool capture = intel_context_is_schedulable(ce); 5209 5210 trace_intel_context_reset(ce); 5211 5212 guc_dbg(guc, "%s context reset notification: 0x%04X on %s, exiting = %s, banned = %s\n", 5213 capture ? "Got" : "Ignoring", 5214 ce->guc_id.id, ce->engine->name, 5215 str_yes_no(intel_context_is_exiting(ce)), 5216 str_yes_no(intel_context_is_banned(ce))); 5217 5218 if (capture) { 5219 capture_error_state(guc, ce); 5220 guc_context_replay(ce); 5221 } 5222 } 5223 5224 int intel_guc_context_reset_process_msg(struct intel_guc *guc, 5225 const u32 *msg, u32 len) 5226 { 5227 struct intel_context *ce; 5228 unsigned long flags; 5229 int ctx_id; 5230 5231 if (unlikely(len != 1)) { 5232 guc_err(guc, "Invalid length %u", len); 5233 return -EPROTO; 5234 } 5235 5236 ctx_id = msg[0]; 5237 5238 /* 5239 * The context lookup uses the xarray but lookups only require an RCU lock 5240 * not the full spinlock. So take the lock explicitly and keep it until the 5241 * context has been reference count locked to ensure it can't be destroyed 5242 * asynchronously until the reset is done. 5243 */ 5244 xa_lock_irqsave(&guc->context_lookup, flags); 5245 ce = g2h_context_lookup(guc, ctx_id); 5246 if (ce) 5247 intel_context_get(ce); 5248 xa_unlock_irqrestore(&guc->context_lookup, flags); 5249 5250 if (unlikely(!ce)) 5251 return -EPROTO; 5252 5253 guc_handle_context_reset(guc, ce); 5254 intel_context_put(ce); 5255 5256 return 0; 5257 } 5258 5259 int intel_guc_error_capture_process_msg(struct intel_guc *guc, 5260 const u32 *msg, u32 len) 5261 { 5262 u32 status; 5263 5264 if (unlikely(len != 1)) { 5265 guc_dbg(guc, "Invalid length %u", len); 5266 return -EPROTO; 5267 } 5268 5269 status = msg[0] & INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK; 5270 if (status == INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE) 5271 guc_warn(guc, "No space for error capture"); 5272 5273 intel_guc_capture_process(guc); 5274 5275 return 0; 5276 } 5277 5278 struct intel_engine_cs * 5279 intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance) 5280 { 5281 struct intel_gt *gt = guc_to_gt(guc); 5282 u8 engine_class = guc_class_to_engine_class(guc_class); 5283 5284 /* Class index is checked in class converter */ 5285 GEM_BUG_ON(instance > MAX_ENGINE_INSTANCE); 5286 5287 return gt->engine_class[engine_class][instance]; 5288 } 5289 5290 static void reset_fail_worker_func(struct work_struct *w) 5291 { 5292 struct intel_guc *guc = container_of(w, struct intel_guc, 5293 submission_state.reset_fail_worker); 5294 struct intel_gt *gt = guc_to_gt(guc); 5295 intel_engine_mask_t reset_fail_mask; 5296 unsigned long flags; 5297 5298 spin_lock_irqsave(&guc->submission_state.lock, flags); 5299 reset_fail_mask = guc->submission_state.reset_fail_mask; 5300 guc->submission_state.reset_fail_mask = 0; 5301 spin_unlock_irqrestore(&guc->submission_state.lock, flags); 5302 5303 if (likely(reset_fail_mask)) { 5304 struct intel_engine_cs *engine; 5305 enum intel_engine_id id; 5306 5307 /* 5308 * GuC is toast at this point - it dead loops after sending the failed 5309 * reset notification. So need to manually determine the guilty context. 5310 * Note that it should be reliable to do this here because the GuC is 5311 * toast and will not be scheduling behind the KMD's back. 5312 */ 5313 for_each_engine_masked(engine, gt, reset_fail_mask, id) 5314 intel_guc_find_hung_context(engine); 5315 5316 intel_gt_handle_error(gt, reset_fail_mask, 5317 I915_ERROR_CAPTURE, 5318 "GuC failed to reset engine mask=0x%x", 5319 reset_fail_mask); 5320 } 5321 } 5322 5323 int intel_guc_engine_failure_process_msg(struct intel_guc *guc, 5324 const u32 *msg, u32 len) 5325 { 5326 struct intel_engine_cs *engine; 5327 u8 guc_class, instance; 5328 u32 reason; 5329 unsigned long flags; 5330 5331 if (unlikely(len != 3)) { 5332 guc_err(guc, "Invalid length %u", len); 5333 return -EPROTO; 5334 } 5335 5336 guc_class = msg[0]; 5337 instance = msg[1]; 5338 reason = msg[2]; 5339 5340 engine = intel_guc_lookup_engine(guc, guc_class, instance); 5341 if (unlikely(!engine)) { 5342 guc_err(guc, "Invalid engine %d:%d", guc_class, instance); 5343 return -EPROTO; 5344 } 5345 5346 /* 5347 * This is an unexpected failure of a hardware feature. So, log a real 5348 * error message not just the informational that comes with the reset. 5349 */ 5350 guc_err(guc, "Engine reset failed on %d:%d (%s) because 0x%08X", 5351 guc_class, instance, engine->name, reason); 5352 5353 spin_lock_irqsave(&guc->submission_state.lock, flags); 5354 guc->submission_state.reset_fail_mask |= engine->mask; 5355 spin_unlock_irqrestore(&guc->submission_state.lock, flags); 5356 5357 /* 5358 * A GT reset flushes this worker queue (G2H handler) so we must use 5359 * another worker to trigger a GT reset. 5360 */ 5361 queue_work(system_unbound_wq, &guc->submission_state.reset_fail_worker); 5362 5363 return 0; 5364 } 5365 5366 void intel_guc_find_hung_context(struct intel_engine_cs *engine) 5367 { 5368 struct intel_guc *guc = gt_to_guc(engine->gt); 5369 struct intel_context *ce; 5370 struct i915_request *rq; 5371 unsigned long index; 5372 unsigned long flags; 5373 5374 /* Reset called during driver load? GuC not yet initialised! */ 5375 if (unlikely(!guc_submission_initialized(guc))) 5376 return; 5377 5378 xa_lock_irqsave(&guc->context_lookup, flags); 5379 xa_for_each(&guc->context_lookup, index, ce) { 5380 bool found; 5381 5382 if (!kref_get_unless_zero(&ce->ref)) 5383 continue; 5384 5385 xa_unlock(&guc->context_lookup); 5386 5387 if (!intel_context_is_pinned(ce)) 5388 goto next; 5389 5390 if (intel_engine_is_virtual(ce->engine)) { 5391 if (!(ce->engine->mask & engine->mask)) 5392 goto next; 5393 } else { 5394 if (ce->engine != engine) 5395 goto next; 5396 } 5397 5398 found = false; 5399 spin_lock(&ce->guc_state.lock); 5400 list_for_each_entry(rq, &ce->guc_state.requests, sched.link) { 5401 if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE) 5402 continue; 5403 5404 found = true; 5405 break; 5406 } 5407 spin_unlock(&ce->guc_state.lock); 5408 5409 if (found) { 5410 intel_engine_set_hung_context(engine, ce); 5411 5412 /* Can only cope with one hang at a time... */ 5413 intel_context_put(ce); 5414 xa_lock(&guc->context_lookup); 5415 goto done; 5416 } 5417 5418 next: 5419 intel_context_put(ce); 5420 xa_lock(&guc->context_lookup); 5421 } 5422 done: 5423 xa_unlock_irqrestore(&guc->context_lookup, flags); 5424 } 5425 5426 void intel_guc_dump_active_requests(struct intel_engine_cs *engine, 5427 struct i915_request *hung_rq, 5428 struct drm_printer *m) 5429 { 5430 struct intel_guc *guc = gt_to_guc(engine->gt); 5431 struct intel_context *ce; 5432 unsigned long index; 5433 unsigned long flags; 5434 5435 /* Reset called during driver load? GuC not yet initialised! */ 5436 if (unlikely(!guc_submission_initialized(guc))) 5437 return; 5438 5439 xa_lock_irqsave(&guc->context_lookup, flags); 5440 xa_for_each(&guc->context_lookup, index, ce) { 5441 if (!kref_get_unless_zero(&ce->ref)) 5442 continue; 5443 5444 xa_unlock(&guc->context_lookup); 5445 5446 if (!intel_context_is_pinned(ce)) 5447 goto next; 5448 5449 if (intel_engine_is_virtual(ce->engine)) { 5450 if (!(ce->engine->mask & engine->mask)) 5451 goto next; 5452 } else { 5453 if (ce->engine != engine) 5454 goto next; 5455 } 5456 5457 spin_lock(&ce->guc_state.lock); 5458 intel_engine_dump_active_requests(&ce->guc_state.requests, 5459 hung_rq, m); 5460 spin_unlock(&ce->guc_state.lock); 5461 5462 next: 5463 intel_context_put(ce); 5464 xa_lock(&guc->context_lookup); 5465 } 5466 xa_unlock_irqrestore(&guc->context_lookup, flags); 5467 } 5468 5469 void intel_guc_submission_print_info(struct intel_guc *guc, 5470 struct drm_printer *p) 5471 { 5472 struct i915_sched_engine *sched_engine = guc->sched_engine; 5473 struct rb_node *rb; 5474 unsigned long flags; 5475 5476 if (!sched_engine) 5477 return; 5478 5479 drm_printf(p, "GuC Submission API Version: %d.%d.%d\n", 5480 guc->submission_version.major, guc->submission_version.minor, 5481 guc->submission_version.patch); 5482 drm_printf(p, "GuC Number Outstanding Submission G2H: %u\n", 5483 atomic_read(&guc->outstanding_submission_g2h)); 5484 drm_printf(p, "GuC tasklet count: %u\n", 5485 atomic_read(&sched_engine->tasklet.count)); 5486 5487 spin_lock_irqsave(&sched_engine->lock, flags); 5488 drm_printf(p, "Requests in GuC submit tasklet:\n"); 5489 for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) { 5490 struct i915_priolist *pl = to_priolist(rb); 5491 struct i915_request *rq; 5492 5493 priolist_for_each_request(rq, pl) 5494 drm_printf(p, "guc_id=%u, seqno=%llu\n", 5495 rq->context->guc_id.id, 5496 rq->fence.seqno); 5497 } 5498 spin_unlock_irqrestore(&sched_engine->lock, flags); 5499 drm_printf(p, "\n"); 5500 } 5501 5502 static inline void guc_log_context_priority(struct drm_printer *p, 5503 struct intel_context *ce) 5504 { 5505 int i; 5506 5507 drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio); 5508 drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n"); 5509 for (i = GUC_CLIENT_PRIORITY_KMD_HIGH; 5510 i < GUC_CLIENT_PRIORITY_NUM; ++i) { 5511 drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n", 5512 i, ce->guc_state.prio_count[i]); 5513 } 5514 drm_printf(p, "\n"); 5515 } 5516 5517 static inline void guc_log_context(struct drm_printer *p, 5518 struct intel_context *ce) 5519 { 5520 drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id); 5521 drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca); 5522 drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n", 5523 ce->ring->head, 5524 ce->lrc_reg_state[CTX_RING_HEAD]); 5525 drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n", 5526 ce->ring->tail, 5527 ce->lrc_reg_state[CTX_RING_TAIL]); 5528 drm_printf(p, "\t\tContext Pin Count: %u\n", 5529 atomic_read(&ce->pin_count)); 5530 drm_printf(p, "\t\tGuC ID Ref Count: %u\n", 5531 atomic_read(&ce->guc_id.ref)); 5532 drm_printf(p, "\t\tSchedule State: 0x%x\n", 5533 ce->guc_state.sched_state); 5534 } 5535 5536 void intel_guc_submission_print_context_info(struct intel_guc *guc, 5537 struct drm_printer *p) 5538 { 5539 struct intel_context *ce; 5540 unsigned long index; 5541 unsigned long flags; 5542 5543 xa_lock_irqsave(&guc->context_lookup, flags); 5544 xa_for_each(&guc->context_lookup, index, ce) { 5545 GEM_BUG_ON(intel_context_is_child(ce)); 5546 5547 guc_log_context(p, ce); 5548 guc_log_context_priority(p, ce); 5549 5550 if (intel_context_is_parent(ce)) { 5551 struct intel_context *child; 5552 5553 drm_printf(p, "\t\tNumber children: %u\n", 5554 ce->parallel.number_children); 5555 5556 if (ce->parallel.guc.wq_status) { 5557 drm_printf(p, "\t\tWQI Head: %u\n", 5558 READ_ONCE(*ce->parallel.guc.wq_head)); 5559 drm_printf(p, "\t\tWQI Tail: %u\n", 5560 READ_ONCE(*ce->parallel.guc.wq_tail)); 5561 drm_printf(p, "\t\tWQI Status: %u\n", 5562 READ_ONCE(*ce->parallel.guc.wq_status)); 5563 } 5564 5565 if (ce->engine->emit_bb_start == 5566 emit_bb_start_parent_no_preempt_mid_batch) { 5567 u8 i; 5568 5569 drm_printf(p, "\t\tChildren Go: %u\n", 5570 get_children_go_value(ce)); 5571 for (i = 0; i < ce->parallel.number_children; ++i) 5572 drm_printf(p, "\t\tChildren Join: %u\n", 5573 get_children_join_value(ce, i)); 5574 } 5575 5576 for_each_child(ce, child) 5577 guc_log_context(p, child); 5578 } 5579 } 5580 xa_unlock_irqrestore(&guc->context_lookup, flags); 5581 } 5582 5583 static inline u32 get_children_go_addr(struct intel_context *ce) 5584 { 5585 GEM_BUG_ON(!intel_context_is_parent(ce)); 5586 5587 return i915_ggtt_offset(ce->state) + 5588 __get_parent_scratch_offset(ce) + 5589 offsetof(struct parent_scratch, go.semaphore); 5590 } 5591 5592 static inline u32 get_children_join_addr(struct intel_context *ce, 5593 u8 child_index) 5594 { 5595 GEM_BUG_ON(!intel_context_is_parent(ce)); 5596 5597 return i915_ggtt_offset(ce->state) + 5598 __get_parent_scratch_offset(ce) + 5599 offsetof(struct parent_scratch, join[child_index].semaphore); 5600 } 5601 5602 #define PARENT_GO_BB 1 5603 #define PARENT_GO_FINI_BREADCRUMB 0 5604 #define CHILD_GO_BB 1 5605 #define CHILD_GO_FINI_BREADCRUMB 0 5606 static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq, 5607 u64 offset, u32 len, 5608 const unsigned int flags) 5609 { 5610 struct intel_context *ce = rq->context; 5611 u32 *cs; 5612 u8 i; 5613 5614 GEM_BUG_ON(!intel_context_is_parent(ce)); 5615 5616 cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children); 5617 if (IS_ERR(cs)) 5618 return PTR_ERR(cs); 5619 5620 /* Wait on children */ 5621 for (i = 0; i < ce->parallel.number_children; ++i) { 5622 *cs++ = (MI_SEMAPHORE_WAIT | 5623 MI_SEMAPHORE_GLOBAL_GTT | 5624 MI_SEMAPHORE_POLL | 5625 MI_SEMAPHORE_SAD_EQ_SDD); 5626 *cs++ = PARENT_GO_BB; 5627 *cs++ = get_children_join_addr(ce, i); 5628 *cs++ = 0; 5629 } 5630 5631 /* Turn off preemption */ 5632 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 5633 *cs++ = MI_NOOP; 5634 5635 /* Tell children go */ 5636 cs = gen8_emit_ggtt_write(cs, 5637 CHILD_GO_BB, 5638 get_children_go_addr(ce), 5639 0); 5640 5641 /* Jump to batch */ 5642 *cs++ = MI_BATCH_BUFFER_START_GEN8 | 5643 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); 5644 *cs++ = lower_32_bits(offset); 5645 *cs++ = upper_32_bits(offset); 5646 *cs++ = MI_NOOP; 5647 5648 intel_ring_advance(rq, cs); 5649 5650 return 0; 5651 } 5652 5653 static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq, 5654 u64 offset, u32 len, 5655 const unsigned int flags) 5656 { 5657 struct intel_context *ce = rq->context; 5658 struct intel_context *parent = intel_context_to_parent(ce); 5659 u32 *cs; 5660 5661 GEM_BUG_ON(!intel_context_is_child(ce)); 5662 5663 cs = intel_ring_begin(rq, 12); 5664 if (IS_ERR(cs)) 5665 return PTR_ERR(cs); 5666 5667 /* Signal parent */ 5668 cs = gen8_emit_ggtt_write(cs, 5669 PARENT_GO_BB, 5670 get_children_join_addr(parent, 5671 ce->parallel.child_index), 5672 0); 5673 5674 /* Wait on parent for go */ 5675 *cs++ = (MI_SEMAPHORE_WAIT | 5676 MI_SEMAPHORE_GLOBAL_GTT | 5677 MI_SEMAPHORE_POLL | 5678 MI_SEMAPHORE_SAD_EQ_SDD); 5679 *cs++ = CHILD_GO_BB; 5680 *cs++ = get_children_go_addr(parent); 5681 *cs++ = 0; 5682 5683 /* Turn off preemption */ 5684 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 5685 5686 /* Jump to batch */ 5687 *cs++ = MI_BATCH_BUFFER_START_GEN8 | 5688 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); 5689 *cs++ = lower_32_bits(offset); 5690 *cs++ = upper_32_bits(offset); 5691 5692 intel_ring_advance(rq, cs); 5693 5694 return 0; 5695 } 5696 5697 static u32 * 5698 __emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq, 5699 u32 *cs) 5700 { 5701 struct intel_context *ce = rq->context; 5702 u8 i; 5703 5704 GEM_BUG_ON(!intel_context_is_parent(ce)); 5705 5706 /* Wait on children */ 5707 for (i = 0; i < ce->parallel.number_children; ++i) { 5708 *cs++ = (MI_SEMAPHORE_WAIT | 5709 MI_SEMAPHORE_GLOBAL_GTT | 5710 MI_SEMAPHORE_POLL | 5711 MI_SEMAPHORE_SAD_EQ_SDD); 5712 *cs++ = PARENT_GO_FINI_BREADCRUMB; 5713 *cs++ = get_children_join_addr(ce, i); 5714 *cs++ = 0; 5715 } 5716 5717 /* Turn on preemption */ 5718 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 5719 *cs++ = MI_NOOP; 5720 5721 /* Tell children go */ 5722 cs = gen8_emit_ggtt_write(cs, 5723 CHILD_GO_FINI_BREADCRUMB, 5724 get_children_go_addr(ce), 5725 0); 5726 5727 return cs; 5728 } 5729 5730 /* 5731 * If this true, a submission of multi-lrc requests had an error and the 5732 * requests need to be skipped. The front end (execuf IOCTL) should've called 5733 * i915_request_skip which squashes the BB but we still need to emit the fini 5734 * breadrcrumbs seqno write. At this point we don't know how many of the 5735 * requests in the multi-lrc submission were generated so we can't do the 5736 * handshake between the parent and children (e.g. if 4 requests should be 5737 * generated but 2nd hit an error only 1 would be seen by the GuC backend). 5738 * Simply skip the handshake, but still emit the breadcrumbd seqno, if an error 5739 * has occurred on any of the requests in submission / relationship. 5740 */ 5741 static inline bool skip_handshake(struct i915_request *rq) 5742 { 5743 return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags); 5744 } 5745 5746 #define NON_SKIP_LEN 6 5747 static u32 * 5748 emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq, 5749 u32 *cs) 5750 { 5751 struct intel_context *ce = rq->context; 5752 __maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs; 5753 __maybe_unused u32 *start_fini_breadcrumb_cs = cs; 5754 5755 GEM_BUG_ON(!intel_context_is_parent(ce)); 5756 5757 if (unlikely(skip_handshake(rq))) { 5758 /* 5759 * NOP everything in __emit_fini_breadcrumb_parent_no_preempt_mid_batch, 5760 * the NON_SKIP_LEN comes from the length of the emits below. 5761 */ 5762 memset(cs, 0, sizeof(u32) * 5763 (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN)); 5764 cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN; 5765 } else { 5766 cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs); 5767 } 5768 5769 /* Emit fini breadcrumb */ 5770 before_fini_breadcrumb_user_interrupt_cs = cs; 5771 cs = gen8_emit_ggtt_write(cs, 5772 rq->fence.seqno, 5773 i915_request_active_timeline(rq)->hwsp_offset, 5774 0); 5775 5776 /* User interrupt */ 5777 *cs++ = MI_USER_INTERRUPT; 5778 *cs++ = MI_NOOP; 5779 5780 /* Ensure our math for skip + emit is correct */ 5781 GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN != 5782 cs); 5783 GEM_BUG_ON(start_fini_breadcrumb_cs + 5784 ce->engine->emit_fini_breadcrumb_dw != cs); 5785 5786 rq->tail = intel_ring_offset(rq, cs); 5787 5788 return cs; 5789 } 5790 5791 static u32 * 5792 __emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq, 5793 u32 *cs) 5794 { 5795 struct intel_context *ce = rq->context; 5796 struct intel_context *parent = intel_context_to_parent(ce); 5797 5798 GEM_BUG_ON(!intel_context_is_child(ce)); 5799 5800 /* Turn on preemption */ 5801 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 5802 *cs++ = MI_NOOP; 5803 5804 /* Signal parent */ 5805 cs = gen8_emit_ggtt_write(cs, 5806 PARENT_GO_FINI_BREADCRUMB, 5807 get_children_join_addr(parent, 5808 ce->parallel.child_index), 5809 0); 5810 5811 /* Wait parent on for go */ 5812 *cs++ = (MI_SEMAPHORE_WAIT | 5813 MI_SEMAPHORE_GLOBAL_GTT | 5814 MI_SEMAPHORE_POLL | 5815 MI_SEMAPHORE_SAD_EQ_SDD); 5816 *cs++ = CHILD_GO_FINI_BREADCRUMB; 5817 *cs++ = get_children_go_addr(parent); 5818 *cs++ = 0; 5819 5820 return cs; 5821 } 5822 5823 static u32 * 5824 emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq, 5825 u32 *cs) 5826 { 5827 struct intel_context *ce = rq->context; 5828 __maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs; 5829 __maybe_unused u32 *start_fini_breadcrumb_cs = cs; 5830 5831 GEM_BUG_ON(!intel_context_is_child(ce)); 5832 5833 if (unlikely(skip_handshake(rq))) { 5834 /* 5835 * NOP everything in __emit_fini_breadcrumb_child_no_preempt_mid_batch, 5836 * the NON_SKIP_LEN comes from the length of the emits below. 5837 */ 5838 memset(cs, 0, sizeof(u32) * 5839 (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN)); 5840 cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN; 5841 } else { 5842 cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs); 5843 } 5844 5845 /* Emit fini breadcrumb */ 5846 before_fini_breadcrumb_user_interrupt_cs = cs; 5847 cs = gen8_emit_ggtt_write(cs, 5848 rq->fence.seqno, 5849 i915_request_active_timeline(rq)->hwsp_offset, 5850 0); 5851 5852 /* User interrupt */ 5853 *cs++ = MI_USER_INTERRUPT; 5854 *cs++ = MI_NOOP; 5855 5856 /* Ensure our math for skip + emit is correct */ 5857 GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN != 5858 cs); 5859 GEM_BUG_ON(start_fini_breadcrumb_cs + 5860 ce->engine->emit_fini_breadcrumb_dw != cs); 5861 5862 rq->tail = intel_ring_offset(rq, cs); 5863 5864 return cs; 5865 } 5866 5867 #undef NON_SKIP_LEN 5868 5869 static struct intel_context * 5870 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, 5871 unsigned long flags) 5872 { 5873 struct guc_virtual_engine *ve; 5874 struct intel_guc *guc; 5875 unsigned int n; 5876 int err; 5877 5878 ve = kzalloc(sizeof(*ve), GFP_KERNEL); 5879 if (!ve) 5880 return ERR_PTR(-ENOMEM); 5881 5882 guc = gt_to_guc(siblings[0]->gt); 5883 5884 ve->base.i915 = siblings[0]->i915; 5885 ve->base.gt = siblings[0]->gt; 5886 ve->base.uncore = siblings[0]->uncore; 5887 ve->base.id = -1; 5888 5889 ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; 5890 ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; 5891 ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; 5892 ve->base.saturated = ALL_ENGINES; 5893 5894 snprintf(ve->base.name, sizeof(ve->base.name), "virtual"); 5895 5896 ve->base.sched_engine = i915_sched_engine_get(guc->sched_engine); 5897 5898 ve->base.cops = &virtual_guc_context_ops; 5899 ve->base.request_alloc = guc_request_alloc; 5900 ve->base.bump_serial = virtual_guc_bump_serial; 5901 5902 ve->base.submit_request = guc_submit_request; 5903 5904 ve->base.flags = I915_ENGINE_IS_VIRTUAL; 5905 5906 BUILD_BUG_ON(ilog2(VIRTUAL_ENGINES) < I915_NUM_ENGINES); 5907 ve->base.mask = VIRTUAL_ENGINES; 5908 5909 intel_context_init(&ve->context, &ve->base); 5910 5911 for (n = 0; n < count; n++) { 5912 struct intel_engine_cs *sibling = siblings[n]; 5913 5914 GEM_BUG_ON(!is_power_of_2(sibling->mask)); 5915 if (sibling->mask & ve->base.mask) { 5916 guc_dbg(guc, "duplicate %s entry in load balancer\n", 5917 sibling->name); 5918 err = -EINVAL; 5919 goto err_put; 5920 } 5921 5922 ve->base.mask |= sibling->mask; 5923 ve->base.logical_mask |= sibling->logical_mask; 5924 5925 if (n != 0 && ve->base.class != sibling->class) { 5926 guc_dbg(guc, "invalid mixing of engine class, sibling %d, already %d\n", 5927 sibling->class, ve->base.class); 5928 err = -EINVAL; 5929 goto err_put; 5930 } else if (n == 0) { 5931 ve->base.class = sibling->class; 5932 ve->base.uabi_class = sibling->uabi_class; 5933 snprintf(ve->base.name, sizeof(ve->base.name), 5934 "v%dx%d", ve->base.class, count); 5935 ve->base.context_size = sibling->context_size; 5936 5937 ve->base.add_active_request = 5938 sibling->add_active_request; 5939 ve->base.remove_active_request = 5940 sibling->remove_active_request; 5941 ve->base.emit_bb_start = sibling->emit_bb_start; 5942 ve->base.emit_flush = sibling->emit_flush; 5943 ve->base.emit_init_breadcrumb = 5944 sibling->emit_init_breadcrumb; 5945 ve->base.emit_fini_breadcrumb = 5946 sibling->emit_fini_breadcrumb; 5947 ve->base.emit_fini_breadcrumb_dw = 5948 sibling->emit_fini_breadcrumb_dw; 5949 ve->base.breadcrumbs = 5950 intel_breadcrumbs_get(sibling->breadcrumbs); 5951 5952 ve->base.flags |= sibling->flags; 5953 5954 ve->base.props.timeslice_duration_ms = 5955 sibling->props.timeslice_duration_ms; 5956 ve->base.props.preempt_timeout_ms = 5957 sibling->props.preempt_timeout_ms; 5958 } 5959 } 5960 5961 return &ve->context; 5962 5963 err_put: 5964 intel_context_put(&ve->context); 5965 return ERR_PTR(err); 5966 } 5967 5968 bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve) 5969 { 5970 struct intel_engine_cs *engine; 5971 intel_engine_mask_t tmp, mask = ve->mask; 5972 5973 for_each_engine_masked(engine, ve->gt, mask, tmp) 5974 if (READ_ONCE(engine->props.heartbeat_interval_ms)) 5975 return true; 5976 5977 return false; 5978 } 5979 5980 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 5981 #include "selftest_guc.c" 5982 #include "selftest_guc_multi_lrc.c" 5983 #include "selftest_guc_hangcheck.c" 5984 #endif 5985