Lines Matching refs:mvdev

303 	queue_work(migf->mvdev->cb_wq,  in mlx5vf_mig_file_set_save_work()
336 struct mlx5vf_pci_core_device *mvdev = migf->mvdev; in mlx5vf_mig_file_save_work() local
339 mutex_lock(&mvdev->state_mutex); in mlx5vf_mig_file_save_work()
349 if (mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, true, false)) in mlx5vf_mig_file_save_work()
359 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_mig_file_save_work()
408 static int mlx5vf_prep_stop_copy(struct mlx5vf_pci_core_device *mvdev, in mlx5vf_prep_stop_copy() argument
419 if (mvdev->chunk_mode) { in mlx5vf_prep_stop_copy()
438 num_chunks = mvdev->chunk_mode ? MAX_NUM_CHUNKS : 1; in mlx5vf_prep_stop_copy()
454 if (mvdev->chunk_mode) { in mlx5vf_prep_stop_copy()
487 struct mlx5vf_pci_core_device *mvdev = migf->mvdev; in mlx5vf_precopy_ioctl() local
507 mutex_lock(&mvdev->state_mutex); in mlx5vf_precopy_ioctl()
508 if (mvdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY && in mlx5vf_precopy_ioctl()
509 mvdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY_P2P) { in mlx5vf_precopy_ioctl()
519 if (mvdev->mig_state == VFIO_DEVICE_STATE_PRE_COPY) { in mlx5vf_precopy_ioctl()
525 ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &inc_length, in mlx5vf_precopy_ioctl()
563 ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, true, true); in mlx5vf_precopy_ioctl()
571 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_precopy_ioctl()
579 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_precopy_ioctl()
592 static int mlx5vf_pci_save_device_inc_data(struct mlx5vf_pci_core_device *mvdev) in mlx5vf_pci_save_device_inc_data() argument
594 struct mlx5_vf_migration_file *migf = mvdev->saving_migf; in mlx5vf_pci_save_device_inc_data()
602 ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &length, NULL, in mlx5vf_pci_save_device_inc_data()
613 ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, true, false); in mlx5vf_pci_save_device_inc_data()
627 mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev, bool track) in mlx5vf_pci_save_device_data() argument
647 migf->mvdev = mvdev; in mlx5vf_pci_save_device_data()
658 mlx5_cmd_init_async_ctx(mvdev->mdev, &migf->async_ctx); in mlx5vf_pci_save_device_data()
668 ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &length, &full_size, 0); in mlx5vf_pci_save_device_data()
672 ret = mlx5vf_prep_stop_copy(mvdev, migf, length, full_size, track); in mlx5vf_pci_save_device_data()
689 ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, false, track); in mlx5vf_pci_save_device_data()
902 mutex_lock(&migf->mvdev->state_mutex); in mlx5vf_resume_write()
974 ret = mlx5vf_cmd_load_vhca_state(migf->mvdev, migf, vhca_buf); in mlx5vf_resume_write()
994 mlx5vf_state_mutex_unlock(migf->mvdev); in mlx5vf_resume_write()
1005 mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev) in mlx5vf_pci_resume_device_data() argument
1028 migf->mvdev = mvdev; in mlx5vf_pci_resume_device_data()
1060 void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev, in mlx5vf_disable_fds() argument
1063 if (mvdev->resuming_migf) { in mlx5vf_disable_fds()
1064 mlx5vf_disable_fd(mvdev->resuming_migf); in mlx5vf_disable_fds()
1065 mlx5fv_cmd_clean_migf_resources(mvdev->resuming_migf); in mlx5vf_disable_fds()
1066 fput(mvdev->resuming_migf->filp); in mlx5vf_disable_fds()
1067 mvdev->resuming_migf = NULL; in mlx5vf_disable_fds()
1069 if (mvdev->saving_migf) { in mlx5vf_disable_fds()
1070 mlx5_cmd_cleanup_async_ctx(&mvdev->saving_migf->async_ctx); in mlx5vf_disable_fds()
1071 cancel_work_sync(&mvdev->saving_migf->async_data.work); in mlx5vf_disable_fds()
1073 *last_save_state = mvdev->saving_migf->state; in mlx5vf_disable_fds()
1074 mlx5vf_disable_fd(mvdev->saving_migf); in mlx5vf_disable_fds()
1075 wake_up_interruptible(&mvdev->saving_migf->poll_wait); in mlx5vf_disable_fds()
1076 mlx5fv_cmd_clean_migf_resources(mvdev->saving_migf); in mlx5vf_disable_fds()
1077 fput(mvdev->saving_migf->filp); in mlx5vf_disable_fds()
1078 mvdev->saving_migf = NULL; in mlx5vf_disable_fds()
1083 mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev, in mlx5vf_pci_step_device_state_locked() argument
1086 u32 cur = mvdev->mig_state; in mlx5vf_pci_step_device_state_locked()
1090 ret = mlx5vf_cmd_suspend_vhca(mvdev, in mlx5vf_pci_step_device_state_locked()
1098 ret = mlx5vf_cmd_resume_vhca(mvdev, in mlx5vf_pci_step_device_state_locked()
1107 ret = mlx5vf_cmd_suspend_vhca(mvdev, in mlx5vf_pci_step_device_state_locked()
1116 ret = mlx5vf_cmd_resume_vhca(mvdev, in mlx5vf_pci_step_device_state_locked()
1126 migf = mlx5vf_pci_save_device_data(mvdev, false); in mlx5vf_pci_step_device_state_locked()
1130 mvdev->saving_migf = migf; in mlx5vf_pci_step_device_state_locked()
1135 mlx5vf_disable_fds(mvdev, NULL); in mlx5vf_pci_step_device_state_locked()
1142 struct mlx5_vf_migration_file *migf = mvdev->saving_migf; in mlx5vf_pci_step_device_state_locked()
1147 ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &size, NULL, in mlx5vf_pci_step_device_state_locked()
1155 ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, false, false); in mlx5vf_pci_step_device_state_locked()
1160 mlx5vf_disable_fds(mvdev, &state); in mlx5vf_pci_step_device_state_locked()
1167 migf = mlx5vf_pci_resume_device_data(mvdev); in mlx5vf_pci_step_device_state_locked()
1171 mvdev->resuming_migf = migf; in mlx5vf_pci_step_device_state_locked()
1176 mlx5vf_disable_fds(mvdev, NULL); in mlx5vf_pci_step_device_state_locked()
1185 migf = mlx5vf_pci_save_device_data(mvdev, true); in mlx5vf_pci_step_device_state_locked()
1189 mvdev->saving_migf = migf; in mlx5vf_pci_step_device_state_locked()
1194 ret = mlx5vf_cmd_suspend_vhca(mvdev, in mlx5vf_pci_step_device_state_locked()
1198 ret = mlx5vf_pci_save_device_inc_data(mvdev); in mlx5vf_pci_step_device_state_locked()
1213 void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev) in mlx5vf_state_mutex_unlock() argument
1216 spin_lock(&mvdev->reset_lock); in mlx5vf_state_mutex_unlock()
1217 if (mvdev->deferred_reset) { in mlx5vf_state_mutex_unlock()
1218 mvdev->deferred_reset = false; in mlx5vf_state_mutex_unlock()
1219 spin_unlock(&mvdev->reset_lock); in mlx5vf_state_mutex_unlock()
1220 mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING; in mlx5vf_state_mutex_unlock()
1221 mlx5vf_disable_fds(mvdev, NULL); in mlx5vf_state_mutex_unlock()
1224 mutex_unlock(&mvdev->state_mutex); in mlx5vf_state_mutex_unlock()
1225 spin_unlock(&mvdev->reset_lock); in mlx5vf_state_mutex_unlock()
1232 struct mlx5vf_pci_core_device *mvdev = container_of( in mlx5vf_pci_set_device_state() local
1238 mutex_lock(&mvdev->state_mutex); in mlx5vf_pci_set_device_state()
1239 while (new_state != mvdev->mig_state) { in mlx5vf_pci_set_device_state()
1240 ret = vfio_mig_get_next_state(vdev, mvdev->mig_state, in mlx5vf_pci_set_device_state()
1246 res = mlx5vf_pci_step_device_state_locked(mvdev, next_state); in mlx5vf_pci_set_device_state()
1249 mvdev->mig_state = next_state; in mlx5vf_pci_set_device_state()
1250 if (WARN_ON(res && new_state != mvdev->mig_state)) { in mlx5vf_pci_set_device_state()
1256 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_pci_set_device_state()
1263 struct mlx5vf_pci_core_device *mvdev = container_of( in mlx5vf_pci_get_data_size() local
1269 mutex_lock(&mvdev->state_mutex); in mlx5vf_pci_get_data_size()
1270 ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &state_size, in mlx5vf_pci_get_data_size()
1274 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_pci_get_data_size()
1281 struct mlx5vf_pci_core_device *mvdev = container_of( in mlx5vf_pci_get_device_state() local
1284 mutex_lock(&mvdev->state_mutex); in mlx5vf_pci_get_device_state()
1285 *curr_state = mvdev->mig_state; in mlx5vf_pci_get_device_state()
1286 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_pci_get_device_state()
1292 struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev); in mlx5vf_pci_aer_reset_done() local
1294 if (!mvdev->migrate_cap) in mlx5vf_pci_aer_reset_done()
1304 spin_lock(&mvdev->reset_lock); in mlx5vf_pci_aer_reset_done()
1305 mvdev->deferred_reset = true; in mlx5vf_pci_aer_reset_done()
1306 if (!mutex_trylock(&mvdev->state_mutex)) { in mlx5vf_pci_aer_reset_done()
1307 spin_unlock(&mvdev->reset_lock); in mlx5vf_pci_aer_reset_done()
1310 spin_unlock(&mvdev->reset_lock); in mlx5vf_pci_aer_reset_done()
1311 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_pci_aer_reset_done()
1316 struct mlx5vf_pci_core_device *mvdev = container_of( in mlx5vf_pci_open_device() local
1318 struct vfio_pci_core_device *vdev = &mvdev->core_device; in mlx5vf_pci_open_device()
1325 if (mvdev->migrate_cap) in mlx5vf_pci_open_device()
1326 mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING; in mlx5vf_pci_open_device()
1333 struct mlx5vf_pci_core_device *mvdev = container_of( in mlx5vf_pci_close_device() local
1336 mlx5vf_cmd_close_migratable(mvdev); in mlx5vf_pci_close_device()
1354 struct mlx5vf_pci_core_device *mvdev = container_of(core_vdev, in mlx5vf_pci_init_dev() local
1362 mlx5vf_cmd_set_migratable(mvdev, &mlx5vf_pci_mig_ops, in mlx5vf_pci_init_dev()
1370 struct mlx5vf_pci_core_device *mvdev = container_of(core_vdev, in mlx5vf_pci_release_dev() local
1373 mlx5vf_cmd_remove_migratable(mvdev); in mlx5vf_pci_release_dev()
1399 struct mlx5vf_pci_core_device *mvdev; in mlx5vf_pci_probe() local
1402 mvdev = vfio_alloc_device(mlx5vf_pci_core_device, core_device.vdev, in mlx5vf_pci_probe()
1404 if (IS_ERR(mvdev)) in mlx5vf_pci_probe()
1405 return PTR_ERR(mvdev); in mlx5vf_pci_probe()
1407 dev_set_drvdata(&pdev->dev, &mvdev->core_device); in mlx5vf_pci_probe()
1408 ret = vfio_pci_core_register_device(&mvdev->core_device); in mlx5vf_pci_probe()
1414 vfio_put_device(&mvdev->core_device.vdev); in mlx5vf_pci_probe()
1420 struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev); in mlx5vf_pci_remove() local
1422 vfio_pci_core_unregister_device(&mvdev->core_device); in mlx5vf_pci_remove()
1423 vfio_put_device(&mvdev->core_device.vdev); in mlx5vf_pci_remove()