1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023-2024 Intel Corporation 4 */ 5 6 #include <drm/drm_managed.h> 7 8 #include "xe_assert.h" 9 #include "xe_device.h" 10 #include "xe_gt.h" 11 #include "xe_gt_sriov_printk.h" 12 #include "xe_gt_sriov_vf.h" 13 #include "xe_guc_ct.h" 14 #include "xe_pm.h" 15 #include "xe_sriov.h" 16 #include "xe_sriov_printk.h" 17 #include "xe_sriov_vf.h" 18 #include "xe_tile_sriov_vf.h" 19 20 /** 21 * DOC: VF restore procedure in PF KMD and VF KMD 22 * 23 * Restoring previously saved state of a VF is one of core features of 24 * SR-IOV. All major VM Management applications allow saving and restoring 25 * the VM state, and doing that to a VM which uses SRIOV VF as one of 26 * the accessible devices requires support from KMD on both PF and VF side. 27 * VMM initiates all required operations through VFIO module, which then 28 * translates them into PF KMD calls. This description will focus on these 29 * calls, leaving out the module which initiates these steps (VFIO). 30 * 31 * In order to start the restore procedure, GuC needs to keep the VF in 32 * proper state. The PF driver can ensure GuC set it to VF_READY state 33 * by provisioning the VF, which in turn can be done after Function Level 34 * Reset of said VF (or after it was freshly created - in that case FLR 35 * is not needed). The FLR procedure ends with GuC sending message 36 * `GUC_PF_NOTIFY_VF_FLR_DONE`, and then provisioning data is sent to GuC. 37 * After the provisioning is completed, the VF needs to be paused, and 38 * at that point the actual restore can begin. 39 * 40 * During VF Restore, state of several resources is restored. These may 41 * include local memory content (system memory is restored by VMM itself), 42 * values of MMIO registers, stateless compression metadata and others. 43 * The final resource which also needs restoring is state of the VF 44 * submission maintained within GuC. For that, `GUC_PF_OPCODE_VF_RESTORE` 45 * message is used, with reference to the state blob to be consumed by 46 * GuC. 47 * 48 * Next, when VFIO is asked to set the VM into running state, the PF driver 49 * sends `GUC_PF_TRIGGER_VF_RESUME` to GuC. When sent after restore, this 50 * changes VF state within GuC to `VF_RESFIX_BLOCKED` rather than the 51 * usual `VF_RUNNING`. At this point GuC triggers an interrupt to inform 52 * the VF KMD within the VM that it was migrated. 53 * 54 * As soon as Virtual GPU of the VM starts, the VF driver within receives 55 * the MIGRATED interrupt and schedules post-migration recovery worker. 56 * That worker queries GuC for new provisioning (using MMIO communication), 57 * and applies fixups to any non-virtualized resources used by the VF. 58 * 59 * When the VF driver is ready to continue operation on the newly connected 60 * hardware, it sends `VF2GUC_NOTIFY_RESFIX_DONE` which causes it to 61 * enter the long awaited `VF_RUNNING` state, and therefore start handling 62 * CTB messages and scheduling workloads from the VF:: 63 * 64 * PF GuC VF 65 * [ ] | | 66 * [ ] PF2GUC_VF_CONTROL(pause) | | 67 * [ ]---------------------------> [ ] | 68 * [ ] [ ] GuC sets new VF state to | 69 * [ ] [ ]------- VF_READY_PAUSED | 70 * [ ] [ ] | | 71 * [ ] [ ] <----- | 72 * [ ] success [ ] | 73 * [ ] <---------------------------[ ] | 74 * [ ] | | 75 * [ ] PF loads resources from the | | 76 * [ ]------- saved image supplied | | 77 * [ ] | | | 78 * [ ] <----- | | 79 * [ ] | | 80 * [ ] GUC_PF_OPCODE_VF_RESTORE | | 81 * [ ]---------------------------> [ ] | 82 * [ ] [ ] GuC loads contexts and CTB | 83 * [ ] [ ]------- state from image | 84 * [ ] [ ] | | 85 * [ ] [ ] <----- | 86 * [ ] [ ] | 87 * [ ] [ ] GuC sets new VF state to | 88 * [ ] [ ]------- VF_RESFIX_PAUSED | 89 * [ ] [ ] | | 90 * [ ] success [ ] <----- | 91 * [ ] <---------------------------[ ] | 92 * [ ] | | 93 * [ ] GUC_PF_TRIGGER_VF_RESUME | | 94 * [ ]---------------------------> [ ] | 95 * [ ] [ ] GuC sets new VF state to | 96 * [ ] [ ]------- VF_RESFIX_BLOCKED | 97 * [ ] [ ] | | 98 * [ ] [ ] <----- | 99 * [ ] [ ] | 100 * [ ] [ ] GUC_INTR_SW_INT_0 | 101 * [ ] success [ ]---------------------------> [ ] 102 * [ ] <---------------------------[ ] [ ] 103 * | | VF2GUC_QUERY_SINGLE_KLV [ ] 104 * | [ ] <---------------------------[ ] 105 * | [ ] [ ] 106 * | [ ] new VF provisioning [ ] 107 * | [ ]---------------------------> [ ] 108 * | | [ ] 109 * | | VF driver applies post [ ] 110 * | | migration fixups -------[ ] 111 * | | | [ ] 112 * | | -----> [ ] 113 * | | [ ] 114 * | | VF2GUC_NOTIFY_RESFIX_DONE [ ] 115 * | [ ] <---------------------------[ ] 116 * | [ ] [ ] 117 * | [ ] GuC sets new VF state to [ ] 118 * | [ ]------- VF_RUNNING [ ] 119 * | [ ] | [ ] 120 * | [ ] <----- [ ] 121 * | [ ] success [ ] 122 * | [ ]---------------------------> [ ] 123 * | | | 124 * | | | 125 */ 126 127 static bool vf_migration_supported(struct xe_device *xe) 128 { 129 /* 130 * TODO: Add conditions to allow specific platforms, when they're 131 * supported at production quality. 132 */ 133 return IS_ENABLED(CONFIG_DRM_XE_DEBUG); 134 } 135 136 static void migration_worker_func(struct work_struct *w); 137 138 /** 139 * xe_sriov_vf_init_early - Initialize SR-IOV VF specific data. 140 * @xe: the &xe_device to initialize 141 */ 142 void xe_sriov_vf_init_early(struct xe_device *xe) 143 { 144 INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func); 145 146 if (!vf_migration_supported(xe)) 147 xe_sriov_info(xe, "migration not supported by this module version\n"); 148 } 149 150 /** 151 * vf_post_migration_requery_guc - Re-query GuC for current VF provisioning. 152 * @xe: the &xe_device struct instance 153 * 154 * After migration, we need to re-query all VF configuration to make sure 155 * they match previous provisioning. Note that most of VF provisioning 156 * shall be the same, except GGTT range, since GGTT is not virtualized per-VF. 157 * 158 * Returns: 0 if the operation completed successfully, or a negative error 159 * code otherwise. 160 */ 161 static int vf_post_migration_requery_guc(struct xe_device *xe) 162 { 163 struct xe_gt *gt; 164 unsigned int id; 165 int err, ret = 0; 166 167 for_each_gt(gt, xe, id) { 168 err = xe_gt_sriov_vf_query_config(gt); 169 ret = ret ?: err; 170 } 171 172 return ret; 173 } 174 175 static void vf_post_migration_fixup_ctb(struct xe_device *xe) 176 { 177 struct xe_gt *gt; 178 unsigned int id; 179 180 xe_assert(xe, IS_SRIOV_VF(xe)); 181 182 for_each_gt(gt, xe, id) { 183 s32 shift = xe_gt_sriov_vf_ggtt_shift(gt); 184 185 xe_guc_ct_fixup_messages_with_ggtt(>->uc.guc.ct, shift); 186 } 187 } 188 189 /* 190 * vf_post_migration_imminent - Check if post-restore recovery is coming. 191 * @xe: the &xe_device struct instance 192 * 193 * Return: True if migration recovery worker will soon be running. Any worker currently 194 * executing does not affect the result. 195 */ 196 static bool vf_post_migration_imminent(struct xe_device *xe) 197 { 198 return xe->sriov.vf.migration.gt_flags != 0 || 199 work_pending(&xe->sriov.vf.migration.worker); 200 } 201 202 static bool vf_post_migration_fixup_ggtt_nodes(struct xe_device *xe) 203 { 204 bool need_fixups = false; 205 struct xe_tile *tile; 206 unsigned int id; 207 208 for_each_tile(tile, xe, id) { 209 struct xe_gt *gt = tile->primary_gt; 210 s64 shift; 211 212 shift = xe_gt_sriov_vf_ggtt_shift(gt); 213 if (shift) { 214 need_fixups = true; 215 xe_tile_sriov_vf_fixup_ggtt_nodes(tile, shift); 216 } 217 } 218 return need_fixups; 219 } 220 221 /* 222 * Notify all GuCs about resource fixups apply finished. 223 */ 224 static void vf_post_migration_notify_resfix_done(struct xe_device *xe) 225 { 226 struct xe_gt *gt; 227 unsigned int id; 228 229 for_each_gt(gt, xe, id) { 230 if (vf_post_migration_imminent(xe)) 231 goto skip; 232 xe_gt_sriov_vf_notify_resfix_done(gt); 233 } 234 return; 235 236 skip: 237 drm_dbg(&xe->drm, "another recovery imminent, skipping notifications\n"); 238 } 239 240 static void vf_post_migration_recovery(struct xe_device *xe) 241 { 242 bool need_fixups; 243 int err; 244 245 drm_dbg(&xe->drm, "migration recovery in progress\n"); 246 xe_pm_runtime_get(xe); 247 err = vf_post_migration_requery_guc(xe); 248 if (vf_post_migration_imminent(xe)) 249 goto defer; 250 if (unlikely(err)) 251 goto fail; 252 if (!vf_migration_supported(xe)) { 253 xe_sriov_err(xe, "migration not supported by this module version\n"); 254 err = -ENOTRECOVERABLE; 255 goto fail; 256 } 257 258 need_fixups = vf_post_migration_fixup_ggtt_nodes(xe); 259 /* FIXME: add the recovery steps */ 260 if (need_fixups) 261 vf_post_migration_fixup_ctb(xe); 262 263 vf_post_migration_notify_resfix_done(xe); 264 xe_pm_runtime_put(xe); 265 drm_notice(&xe->drm, "migration recovery ended\n"); 266 return; 267 defer: 268 xe_pm_runtime_put(xe); 269 drm_dbg(&xe->drm, "migration recovery deferred\n"); 270 return; 271 fail: 272 xe_pm_runtime_put(xe); 273 drm_err(&xe->drm, "migration recovery failed (%pe)\n", ERR_PTR(err)); 274 xe_device_declare_wedged(xe); 275 } 276 277 static void migration_worker_func(struct work_struct *w) 278 { 279 struct xe_device *xe = container_of(w, struct xe_device, 280 sriov.vf.migration.worker); 281 282 vf_post_migration_recovery(xe); 283 } 284 285 static bool vf_ready_to_recovery_on_all_gts(struct xe_device *xe) 286 { 287 struct xe_gt *gt; 288 unsigned int id; 289 290 for_each_gt(gt, xe, id) { 291 if (!test_bit(id, &xe->sriov.vf.migration.gt_flags)) { 292 xe_gt_sriov_dbg_verbose(gt, "still not ready to recover\n"); 293 return false; 294 } 295 } 296 return true; 297 } 298 299 /** 300 * xe_sriov_vf_start_migration_recovery - Start VF migration recovery. 301 * @xe: the &xe_device to start recovery on 302 * 303 * This function shall be called only by VF. 304 */ 305 void xe_sriov_vf_start_migration_recovery(struct xe_device *xe) 306 { 307 bool started; 308 309 xe_assert(xe, IS_SRIOV_VF(xe)); 310 311 if (!vf_ready_to_recovery_on_all_gts(xe)) 312 return; 313 314 WRITE_ONCE(xe->sriov.vf.migration.gt_flags, 0); 315 /* Ensure other threads see that no flags are set now. */ 316 smp_mb(); 317 318 started = queue_work(xe->sriov.wq, &xe->sriov.vf.migration.worker); 319 drm_info(&xe->drm, "VF migration recovery %s\n", started ? 320 "scheduled" : "already in progress"); 321 } 322