1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023-2024 Intel Corporation 4 */ 5 6 #include <drm/drm_managed.h> 7 8 #include "xe_assert.h" 9 #include "xe_device.h" 10 #include "xe_gt_sriov_printk.h" 11 #include "xe_sriov.h" 12 #include "xe_sriov_printk.h" 13 #include "xe_sriov_vf.h" 14 15 static void migration_worker_func(struct work_struct *w); 16 17 /** 18 * xe_sriov_vf_init_early - Initialize SR-IOV VF specific data. 19 * @xe: the &xe_device to initialize 20 */ 21 void xe_sriov_vf_init_early(struct xe_device *xe) 22 { 23 INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func); 24 } 25 26 static void vf_post_migration_recovery(struct xe_device *xe) 27 { 28 drm_dbg(&xe->drm, "migration recovery in progress\n"); 29 /* FIXME: add the recovery steps */ 30 drm_notice(&xe->drm, "migration recovery ended\n"); 31 } 32 33 static void migration_worker_func(struct work_struct *w) 34 { 35 struct xe_device *xe = container_of(w, struct xe_device, 36 sriov.vf.migration.worker); 37 38 vf_post_migration_recovery(xe); 39 } 40 41 static bool vf_ready_to_recovery_on_all_gts(struct xe_device *xe) 42 { 43 struct xe_gt *gt; 44 unsigned int id; 45 46 for_each_gt(gt, xe, id) { 47 if (!test_bit(id, &xe->sriov.vf.migration.gt_flags)) { 48 xe_gt_sriov_dbg_verbose(gt, "still not ready to recover\n"); 49 return false; 50 } 51 } 52 return true; 53 } 54 55 /** 56 * xe_sriov_vf_start_migration_recovery - Start VF migration recovery. 57 * @xe: the &xe_device to start recovery on 58 * 59 * This function shall be called only by VF. 60 */ 61 void xe_sriov_vf_start_migration_recovery(struct xe_device *xe) 62 { 63 bool started; 64 65 xe_assert(xe, IS_SRIOV_VF(xe)); 66 67 if (!vf_ready_to_recovery_on_all_gts(xe)) 68 return; 69 70 WRITE_ONCE(xe->sriov.vf.migration.gt_flags, 0); 71 /* Ensure other threads see that no flags are set now. */ 72 smp_mb(); 73 74 started = queue_work(xe->sriov.wq, &xe->sriov.vf.migration.worker); 75 drm_info(&xe->drm, "VF migration recovery %s\n", started ? 76 "scheduled" : "already in progress"); 77 } 78