1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2014-2019 Intel Corporation 4 */ 5 6 #ifndef _INTEL_GUC_H_ 7 #define _INTEL_GUC_H_ 8 9 #include <linux/delay.h> 10 #include <linux/iosys-map.h> 11 #include <linux/xarray.h> 12 13 #include "intel_guc_ct.h" 14 #include "intel_guc_fw.h" 15 #include "intel_guc_fwif.h" 16 #include "intel_guc_log.h" 17 #include "intel_guc_reg.h" 18 #include "intel_guc_slpc_types.h" 19 #include "intel_uc_fw.h" 20 #include "intel_uncore.h" 21 #include "i915_utils.h" 22 #include "i915_vma.h" 23 24 struct __guc_ads_blob; 25 struct intel_guc_state_capture; 26 27 /** 28 * struct intel_guc - Top level structure of GuC. 29 * 30 * It handles firmware loading and manages client pool. intel_guc owns an 31 * i915_sched_engine for submission. 32 */ 33 struct intel_guc { 34 /** @fw: the GuC firmware */ 35 struct intel_uc_fw fw; 36 /** @log: sub-structure containing GuC log related data and objects */ 37 struct intel_guc_log log; 38 /** @ct: the command transport communication channel */ 39 struct intel_guc_ct ct; 40 /** @slpc: sub-structure containing SLPC related data and objects */ 41 struct intel_guc_slpc slpc; 42 /** @capture: the error-state-capture module's data and objects */ 43 struct intel_guc_state_capture *capture; 44 45 /** @dbgfs_node: debugfs node */ 46 struct dentry *dbgfs_node; 47 48 /** @sched_engine: Global engine used to submit requests to GuC */ 49 struct i915_sched_engine *sched_engine; 50 /** 51 * @stalled_request: if GuC can't process a request for any reason, we 52 * save it until GuC restarts processing. No other request can be 53 * submitted until the stalled request is processed. 54 */ 55 struct i915_request *stalled_request; 56 /** 57 * @submission_stall_reason: reason why submission is stalled 58 */ 59 enum { 60 STALL_NONE, 61 STALL_REGISTER_CONTEXT, 62 STALL_MOVE_LRC_TAIL, 63 STALL_ADD_REQUEST, 64 } submission_stall_reason; 65 66 /* intel_guc_recv interrupt related state */ 67 /** @irq_lock: protects GuC irq state */ 68 spinlock_t irq_lock; 69 /** 70 * @msg_enabled_mask: mask of events that are processed when receiving 71 * an INTEL_GUC_ACTION_DEFAULT G2H message. 72 */ 73 unsigned int msg_enabled_mask; 74 75 /** 76 * @outstanding_submission_g2h: number of outstanding GuC to Host 77 * responses related to GuC submission, used to determine if the GT is 78 * idle 79 */ 80 atomic_t outstanding_submission_g2h; 81 82 /** @tlb_lookup: xarray to store all pending TLB invalidation requests */ 83 struct xarray tlb_lookup; 84 85 /** 86 * @serial_slot: id to the initial waiter created in tlb_lookup, 87 * which is used only when failed to allocate new waiter. 88 */ 89 u32 serial_slot; 90 91 /** @next_seqno: the next id (sequence number) to allocate. */ 92 u32 next_seqno; 93 94 /** @interrupts: pointers to GuC interrupt-managing functions. */ 95 struct { 96 bool enabled; 97 void (*reset)(struct intel_guc *guc); 98 void (*enable)(struct intel_guc *guc); 99 void (*disable)(struct intel_guc *guc); 100 } interrupts; 101 102 /** 103 * @submission_state: sub-structure for submission state protected by 104 * single lock 105 */ 106 struct { 107 /** 108 * @lock: protects everything in submission_state, 109 * ce->guc_id.id, and ce->guc_id.ref when transitioning in and 110 * out of zero 111 */ 112 spinlock_t lock; 113 /** 114 * @guc_ids: used to allocate new guc_ids, single-lrc 115 */ 116 struct ida guc_ids; 117 /** 118 * @num_guc_ids: Number of guc_ids, selftest feature to be able 119 * to reduce this number while testing. 120 */ 121 int num_guc_ids; 122 /** 123 * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc 124 */ 125 unsigned long *guc_ids_bitmap; 126 /** 127 * @guc_id_list: list of intel_context with valid guc_ids but no 128 * refs 129 */ 130 struct list_head guc_id_list; 131 /** 132 * @guc_ids_in_use: Number single-lrc guc_ids in use 133 */ 134 unsigned int guc_ids_in_use; 135 /** 136 * @destroyed_contexts: list of contexts waiting to be destroyed 137 * (deregistered with the GuC) 138 */ 139 struct list_head destroyed_contexts; 140 /** 141 * @destroyed_worker: worker to deregister contexts, need as we 142 * need to take a GT PM reference and can't from destroy 143 * function as it might be in an atomic context (no sleeping) 144 */ 145 struct work_struct destroyed_worker; 146 /** 147 * @reset_fail_worker: worker to trigger a GT reset after an 148 * engine reset fails 149 */ 150 struct work_struct reset_fail_worker; 151 /** 152 * @reset_fail_mask: mask of engines that failed to reset 153 */ 154 intel_engine_mask_t reset_fail_mask; 155 /** 156 * @sched_disable_delay_ms: schedule disable delay, in ms, for 157 * contexts 158 */ 159 unsigned int sched_disable_delay_ms; 160 /** 161 * @sched_disable_gucid_threshold: threshold of min remaining available 162 * guc_ids before we start bypassing the schedule disable delay 163 */ 164 unsigned int sched_disable_gucid_threshold; 165 } submission_state; 166 167 /** 168 * @submission_supported: tracks whether we support GuC submission on 169 * the current platform 170 */ 171 bool submission_supported; 172 /** @submission_selected: tracks whether the user enabled GuC submission */ 173 bool submission_selected; 174 /** @submission_initialized: tracks whether GuC submission has been initialised */ 175 bool submission_initialized; 176 /** @submission_version: Submission API version of the currently loaded firmware */ 177 struct intel_uc_fw_ver submission_version; 178 179 /** 180 * @rc_supported: tracks whether we support GuC rc on the current platform 181 */ 182 bool rc_supported; 183 /** @rc_selected: tracks whether the user enabled GuC rc */ 184 bool rc_selected; 185 186 /** @ads_vma: object allocated to hold the GuC ADS */ 187 struct i915_vma *ads_vma; 188 /** @ads_map: contents of the GuC ADS */ 189 struct iosys_map ads_map; 190 /** @ads_regset_size: size of the save/restore regsets in the ADS */ 191 u32 ads_regset_size; 192 /** 193 * @ads_regset_count: number of save/restore registers in the ADS for 194 * each engine 195 */ 196 u32 ads_regset_count[I915_NUM_ENGINES]; 197 /** @ads_regset: save/restore regsets in the ADS */ 198 struct guc_mmio_reg *ads_regset; 199 /** @ads_golden_ctxt_size: size of the golden contexts in the ADS */ 200 u32 ads_golden_ctxt_size; 201 /** @ads_capture_size: size of register lists in the ADS used for error capture */ 202 u32 ads_capture_size; 203 /** @ads_engine_usage_size: size of engine usage in the ADS */ 204 u32 ads_engine_usage_size; 205 206 /** @lrc_desc_pool_v69: object allocated to hold the GuC LRC descriptor pool */ 207 struct i915_vma *lrc_desc_pool_v69; 208 /** @lrc_desc_pool_vaddr_v69: contents of the GuC LRC descriptor pool */ 209 void *lrc_desc_pool_vaddr_v69; 210 211 /** 212 * @context_lookup: used to resolve intel_context from guc_id, if a 213 * context is present in this structure it is registered with the GuC 214 */ 215 struct xarray context_lookup; 216 217 /** @params: Control params for fw initialization */ 218 u32 params[GUC_CTL_MAX_DWORDS]; 219 220 /** @send_regs: GuC's FW specific registers used for sending MMIO H2G */ 221 struct { 222 u32 base; 223 unsigned int count; 224 enum forcewake_domains fw_domains; 225 } send_regs; 226 227 /** @notify_reg: register used to send interrupts to the GuC FW */ 228 i915_reg_t notify_reg; 229 230 /** 231 * @mmio_msg: notification bitmask that the GuC writes in one of its 232 * registers when the CT channel is disabled, to be processed when the 233 * channel is back up. 234 */ 235 u32 mmio_msg; 236 237 /** @send_mutex: used to serialize the intel_guc_send actions */ 238 struct mutex send_mutex; 239 240 /** 241 * @timestamp: GT timestamp object that stores a copy of the timestamp 242 * and adjusts it for overflow using a worker. 243 */ 244 struct { 245 /** 246 * @lock: Lock protecting the below fields and the engine stats. 247 */ 248 spinlock_t lock; 249 250 /** 251 * @gt_stamp: 64 bit extended value of the GT timestamp. 252 */ 253 u64 gt_stamp; 254 255 /** 256 * @ping_delay: Period for polling the GT timestamp for 257 * overflow. 258 */ 259 unsigned long ping_delay; 260 261 /** 262 * @work: Periodic work to adjust GT timestamp, engine and 263 * context usage for overflows. 264 */ 265 struct delayed_work work; 266 267 /** 268 * @shift: Right shift value for the gpm timestamp 269 */ 270 u32 shift; 271 272 /** 273 * @last_stat_jiffies: jiffies at last actual stats collection time 274 * We use this timestamp to ensure we don't oversample the 275 * stats because runtime power management events can trigger 276 * stats collection at much higher rates than required. 277 */ 278 unsigned long last_stat_jiffies; 279 } timestamp; 280 281 /** 282 * @dead_guc_worker: Asynchronous worker thread for forcing a GuC reset. 283 * Specifically used when the G2H handler wants to issue a reset. Resets 284 * require flushing the G2H queue. So, the G2H processing itself must not 285 * trigger a reset directly. Instead, go via this worker. 286 */ 287 struct work_struct dead_guc_worker; 288 /** 289 * @last_dead_guc_jiffies: timestamp of previous 'dead guc' occurrance 290 * used to prevent a fundamentally broken system from continuously 291 * reloading the GuC. 292 */ 293 unsigned long last_dead_guc_jiffies; 294 295 #ifdef CONFIG_DRM_I915_SELFTEST 296 /** 297 * @number_guc_id_stolen: The number of guc_ids that have been stolen 298 */ 299 int number_guc_id_stolen; 300 /** 301 * @fast_response_selftest: Backdoor to CT handler for fast response selftest 302 */ 303 u32 fast_response_selftest; 304 #endif 305 }; 306 307 struct intel_guc_tlb_wait { 308 struct wait_queue_head wq; 309 bool busy; 310 }; 311 312 /* 313 * GuC version number components are only 8-bit, so converting to a 32bit 8.8.8 314 * integer works. 315 */ 316 #define MAKE_GUC_VER(maj, min, pat) (((maj) << 16) | ((min) << 8) | (pat)) 317 #define MAKE_GUC_VER_STRUCT(ver) MAKE_GUC_VER((ver).major, (ver).minor, (ver).patch) 318 #define GUC_SUBMIT_VER(guc) MAKE_GUC_VER_STRUCT((guc)->submission_version) 319 #define GUC_FIRMWARE_VER(guc) MAKE_GUC_VER_STRUCT((guc)->fw.file_selected.ver) 320 321 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) 322 { 323 return container_of(log, struct intel_guc, log); 324 } 325 326 static 327 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) 328 { 329 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 0); 330 } 331 332 static 333 inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len, 334 u32 g2h_len_dw) 335 { 336 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 337 MAKE_SEND_FLAGS(g2h_len_dw)); 338 } 339 340 static inline int 341 intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len, 342 u32 *response_buf, u32 response_buf_size) 343 { 344 return intel_guc_ct_send(&guc->ct, action, len, 345 response_buf, response_buf_size, 0); 346 } 347 348 static inline int intel_guc_send_busy_loop(struct intel_guc *guc, 349 const u32 *action, 350 u32 len, 351 u32 g2h_len_dw, 352 bool loop) 353 { 354 int err; 355 unsigned int sleep_period_ms = 1; 356 bool not_atomic = !in_atomic() && !irqs_disabled(); 357 358 /* 359 * FIXME: Have caller pass in if we are in an atomic context to avoid 360 * using in_atomic(). It is likely safe here as we check for irqs 361 * disabled which basically all the spin locks in the i915 do but 362 * regardless this should be cleaned up. 363 */ 364 365 /* No sleeping with spin locks, just busy loop */ 366 might_sleep_if(loop && not_atomic); 367 368 retry: 369 err = intel_guc_send_nb(guc, action, len, g2h_len_dw); 370 if (unlikely(err == -EBUSY && loop)) { 371 if (likely(not_atomic)) { 372 if (msleep_interruptible(sleep_period_ms)) 373 return -EINTR; 374 sleep_period_ms = sleep_period_ms << 1; 375 } else { 376 cpu_relax(); 377 } 378 goto retry; 379 } 380 381 return err; 382 } 383 384 /* Only call this from the interrupt handler code */ 385 static inline void intel_guc_to_host_event_handler(struct intel_guc *guc) 386 { 387 if (guc->interrupts.enabled) 388 intel_guc_ct_event_handler(&guc->ct); 389 } 390 391 /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */ 392 #define GUC_GGTT_TOP 0xFEE00000 393 394 /** 395 * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma 396 * @guc: intel_guc structure. 397 * @vma: i915 graphics virtual memory area. 398 * 399 * GuC does not allow any gfx GGTT address that falls into range 400 * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM. 401 * Currently, in order to exclude [0, ggtt.pin_bias) address space from 402 * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma() 403 * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias. 404 * 405 * Return: GGTT offset of the @vma. 406 */ 407 static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, 408 struct i915_vma *vma) 409 { 410 u32 offset = i915_ggtt_offset(vma); 411 412 GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma)); 413 GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP)); 414 415 return offset; 416 } 417 418 void intel_guc_init_early(struct intel_guc *guc); 419 void intel_guc_init_late(struct intel_guc *guc); 420 void intel_guc_init_send_regs(struct intel_guc *guc); 421 void intel_guc_write_params(struct intel_guc *guc); 422 int intel_guc_init(struct intel_guc *guc); 423 void intel_guc_fini(struct intel_guc *guc); 424 void intel_guc_notify(struct intel_guc *guc); 425 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, 426 u32 *response_buf, u32 response_buf_size); 427 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, 428 const u32 *payload, u32 len); 429 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); 430 int intel_guc_suspend(struct intel_guc *guc); 431 int intel_guc_resume(struct intel_guc *guc); 432 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); 433 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size, 434 struct i915_vma **out_vma, void **out_vaddr); 435 int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value); 436 int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value); 437 438 static inline bool intel_guc_is_supported(struct intel_guc *guc) 439 { 440 return intel_uc_fw_is_supported(&guc->fw); 441 } 442 443 static inline bool intel_guc_is_wanted(struct intel_guc *guc) 444 { 445 return intel_uc_fw_is_enabled(&guc->fw); 446 } 447 448 static inline bool intel_guc_is_used(struct intel_guc *guc) 449 { 450 GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED); 451 return intel_uc_fw_is_available(&guc->fw); 452 } 453 454 static inline bool intel_guc_is_fw_running(struct intel_guc *guc) 455 { 456 return intel_uc_fw_is_running(&guc->fw); 457 } 458 459 static inline bool intel_guc_is_ready(struct intel_guc *guc) 460 { 461 return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct); 462 } 463 464 static inline void intel_guc_reset_interrupts(struct intel_guc *guc) 465 { 466 guc->interrupts.reset(guc); 467 } 468 469 static inline void intel_guc_enable_interrupts(struct intel_guc *guc) 470 { 471 guc->interrupts.enable(guc); 472 } 473 474 static inline void intel_guc_disable_interrupts(struct intel_guc *guc) 475 { 476 guc->interrupts.disable(guc); 477 } 478 479 static inline int intel_guc_sanitize(struct intel_guc *guc) 480 { 481 intel_uc_fw_sanitize(&guc->fw); 482 intel_guc_disable_interrupts(guc); 483 intel_guc_ct_sanitize(&guc->ct); 484 guc->mmio_msg = 0; 485 486 return 0; 487 } 488 489 static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask) 490 { 491 spin_lock_irq(&guc->irq_lock); 492 guc->msg_enabled_mask |= mask; 493 spin_unlock_irq(&guc->irq_lock); 494 } 495 496 static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask) 497 { 498 spin_lock_irq(&guc->irq_lock); 499 guc->msg_enabled_mask &= ~mask; 500 spin_unlock_irq(&guc->irq_lock); 501 } 502 503 int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout); 504 505 int intel_guc_deregister_done_process_msg(struct intel_guc *guc, 506 const u32 *msg, u32 len); 507 int intel_guc_sched_done_process_msg(struct intel_guc *guc, 508 const u32 *msg, u32 len); 509 int intel_guc_context_reset_process_msg(struct intel_guc *guc, 510 const u32 *msg, u32 len); 511 int intel_guc_engine_failure_process_msg(struct intel_guc *guc, 512 const u32 *msg, u32 len); 513 int intel_guc_error_capture_process_msg(struct intel_guc *guc, 514 const u32 *msg, u32 len); 515 int intel_guc_crash_process_msg(struct intel_guc *guc, u32 action); 516 517 struct intel_engine_cs * 518 intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance); 519 520 void intel_guc_find_hung_context(struct intel_engine_cs *engine); 521 522 int intel_guc_global_policies_update(struct intel_guc *guc); 523 524 void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq); 525 526 void intel_guc_submission_reset_prepare(struct intel_guc *guc); 527 void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled); 528 void intel_guc_submission_reset_finish(struct intel_guc *guc); 529 void intel_guc_submission_cancel_requests(struct intel_guc *guc); 530 531 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p); 532 533 void intel_guc_write_barrier(struct intel_guc *guc); 534 535 void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p); 536 537 int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc); 538 539 bool intel_guc_tlb_invalidation_is_available(struct intel_guc *guc); 540 int intel_guc_invalidate_tlb_engines(struct intel_guc *guc); 541 int intel_guc_invalidate_tlb_guc(struct intel_guc *guc); 542 int intel_guc_tlb_invalidation_done(struct intel_guc *guc, 543 const u32 *payload, u32 len); 544 void wake_up_all_tlb_invalidate(struct intel_guc *guc); 545 #endif 546