Lines Matching +full:0 +full:xe

128  * @xe: the &xe_device to initialize
130 void xe_sriov_vf_init_early(struct xe_device *xe) in xe_sriov_vf_init_early() argument
132 INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func); in xe_sriov_vf_init_early()
137 * @xe: the &xe_device struct instance
143 * Returns: 0 if the operation completed successfully, or a negative error
146 static int vf_post_migration_requery_guc(struct xe_device *xe) in vf_post_migration_requery_guc() argument
150 int err, ret = 0; in vf_post_migration_requery_guc()
152 for_each_gt(gt, xe, id) { in vf_post_migration_requery_guc()
162 * @xe: the &xe_device struct instance
167 static bool vf_post_migration_imminent(struct xe_device *xe) in vf_post_migration_imminent() argument
169 return xe->sriov.vf.migration.gt_flags != 0 || in vf_post_migration_imminent()
170 work_pending(&xe->sriov.vf.migration.worker); in vf_post_migration_imminent()
176 static void vf_post_migration_notify_resfix_done(struct xe_device *xe) in vf_post_migration_notify_resfix_done() argument
181 for_each_gt(gt, xe, id) { in vf_post_migration_notify_resfix_done()
182 if (vf_post_migration_imminent(xe)) in vf_post_migration_notify_resfix_done()
189 drm_dbg(&xe->drm, "another recovery imminent, skipping notifications\n"); in vf_post_migration_notify_resfix_done()
192 static void vf_post_migration_recovery(struct xe_device *xe) in vf_post_migration_recovery() argument
196 drm_dbg(&xe->drm, "migration recovery in progress\n"); in vf_post_migration_recovery()
197 xe_pm_runtime_get(xe); in vf_post_migration_recovery()
198 err = vf_post_migration_requery_guc(xe); in vf_post_migration_recovery()
199 if (vf_post_migration_imminent(xe)) in vf_post_migration_recovery()
205 vf_post_migration_notify_resfix_done(xe); in vf_post_migration_recovery()
206 xe_pm_runtime_put(xe); in vf_post_migration_recovery()
207 drm_notice(&xe->drm, "migration recovery ended\n"); in vf_post_migration_recovery()
210 xe_pm_runtime_put(xe); in vf_post_migration_recovery()
211 drm_dbg(&xe->drm, "migration recovery deferred\n"); in vf_post_migration_recovery()
214 xe_pm_runtime_put(xe); in vf_post_migration_recovery()
215 drm_err(&xe->drm, "migration recovery failed (%pe)\n", ERR_PTR(err)); in vf_post_migration_recovery()
216 xe_device_declare_wedged(xe); in vf_post_migration_recovery()
221 struct xe_device *xe = container_of(w, struct xe_device, in migration_worker_func() local
224 vf_post_migration_recovery(xe); in migration_worker_func()
227 static bool vf_ready_to_recovery_on_all_gts(struct xe_device *xe) in vf_ready_to_recovery_on_all_gts() argument
232 for_each_gt(gt, xe, id) { in vf_ready_to_recovery_on_all_gts()
233 if (!test_bit(id, &xe->sriov.vf.migration.gt_flags)) { in vf_ready_to_recovery_on_all_gts()
243 * @xe: the &xe_device to start recovery on
247 void xe_sriov_vf_start_migration_recovery(struct xe_device *xe) in xe_sriov_vf_start_migration_recovery() argument
251 xe_assert(xe, IS_SRIOV_VF(xe)); in xe_sriov_vf_start_migration_recovery()
253 if (!vf_ready_to_recovery_on_all_gts(xe)) in xe_sriov_vf_start_migration_recovery()
256 WRITE_ONCE(xe->sriov.vf.migration.gt_flags, 0); in xe_sriov_vf_start_migration_recovery()
260 started = queue_work(xe->sriov.wq, &xe->sriov.vf.migration.worker); in xe_sriov_vf_start_migration_recovery()
261 drm_info(&xe->drm, "VF migration recovery %s\n", started ? in xe_sriov_vf_start_migration_recovery()