Lines Matching refs:mvdev

36 _mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev);
38 int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod) in mlx5vf_cmd_suspend_vhca() argument
40 struct mlx5_vf_migration_file *migf = mvdev->saving_migf; in mlx5vf_cmd_suspend_vhca()
45 lockdep_assert_held(&mvdev->state_mutex); in mlx5vf_cmd_suspend_vhca()
46 if (mvdev->mdev_detach) in mlx5vf_cmd_suspend_vhca()
62 MLX5_SET(suspend_vhca_in, in, vhca_id, mvdev->vhca_id); in mlx5vf_cmd_suspend_vhca()
65 err = mlx5_cmd_exec_inout(mvdev->mdev, suspend_vhca, in, out); in mlx5vf_cmd_suspend_vhca()
72 int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod) in mlx5vf_cmd_resume_vhca() argument
77 lockdep_assert_held(&mvdev->state_mutex); in mlx5vf_cmd_resume_vhca()
78 if (mvdev->mdev_detach) in mlx5vf_cmd_resume_vhca()
82 MLX5_SET(resume_vhca_in, in, vhca_id, mvdev->vhca_id); in mlx5vf_cmd_resume_vhca()
85 return mlx5_cmd_exec_inout(mvdev->mdev, resume_vhca, in, out); in mlx5vf_cmd_resume_vhca()
88 int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev, in mlx5vf_cmd_query_vhca_migration_state() argument
97 lockdep_assert_held(&mvdev->state_mutex); in mlx5vf_cmd_query_vhca_migration_state()
98 if (mvdev->mdev_detach) in mlx5vf_cmd_query_vhca_migration_state()
108 ret = wait_for_completion_interruptible(&mvdev->saving_migf->save_comp); in mlx5vf_cmd_query_vhca_migration_state()
112 if (mvdev->saving_migf->state == MLX5_MIGF_STATE_PRE_COPY_ERROR && in mlx5vf_cmd_query_vhca_migration_state()
120 complete(&mvdev->saving_migf->save_comp); in mlx5vf_cmd_query_vhca_migration_state()
126 if (mvdev->saving_migf->state == MLX5_MIGF_STATE_ERROR) { in mlx5vf_cmd_query_vhca_migration_state()
127 complete(&mvdev->saving_migf->save_comp); in mlx5vf_cmd_query_vhca_migration_state()
134 MLX5_SET(query_vhca_migration_state_in, in, vhca_id, mvdev->vhca_id); in mlx5vf_cmd_query_vhca_migration_state()
138 MLX5_SET(query_vhca_migration_state_in, in, chunk, mvdev->chunk_mode); in mlx5vf_cmd_query_vhca_migration_state()
140 ret = mlx5_cmd_exec_inout(mvdev->mdev, query_vhca_migration_state, in, in mlx5vf_cmd_query_vhca_migration_state()
143 complete(&mvdev->saving_migf->save_comp); in mlx5vf_cmd_query_vhca_migration_state()
151 *total_size = mvdev->chunk_mode ? in mlx5vf_cmd_query_vhca_migration_state()
158 static void set_tracker_change_event(struct mlx5vf_pci_core_device *mvdev) in set_tracker_change_event() argument
160 mvdev->tracker.object_changed = true; in set_tracker_change_event()
161 complete(&mvdev->tracker_comp); in set_tracker_change_event()
164 static void set_tracker_error(struct mlx5vf_pci_core_device *mvdev) in set_tracker_error() argument
167 mvdev->tracker.is_err = true; in set_tracker_error()
168 complete(&mvdev->tracker_comp); in set_tracker_error()
174 struct mlx5vf_pci_core_device *mvdev = in mlx5fv_vf_event() local
179 mutex_lock(&mvdev->state_mutex); in mlx5fv_vf_event()
180 mvdev->mdev_detach = false; in mlx5fv_vf_event()
181 mlx5vf_state_mutex_unlock(mvdev); in mlx5fv_vf_event()
184 mlx5vf_cmd_close_migratable(mvdev); in mlx5fv_vf_event()
185 mutex_lock(&mvdev->state_mutex); in mlx5fv_vf_event()
186 mvdev->mdev_detach = true; in mlx5fv_vf_event()
187 mlx5vf_state_mutex_unlock(mvdev); in mlx5fv_vf_event()
196 void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev) in mlx5vf_cmd_close_migratable() argument
198 if (!mvdev->migrate_cap) in mlx5vf_cmd_close_migratable()
202 set_tracker_error(mvdev); in mlx5vf_cmd_close_migratable()
203 mutex_lock(&mvdev->state_mutex); in mlx5vf_cmd_close_migratable()
204 mlx5vf_disable_fds(mvdev, NULL); in mlx5vf_cmd_close_migratable()
205 _mlx5vf_free_page_tracker_resources(mvdev); in mlx5vf_cmd_close_migratable()
206 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_cmd_close_migratable()
209 void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev) in mlx5vf_cmd_remove_migratable() argument
211 if (!mvdev->migrate_cap) in mlx5vf_cmd_remove_migratable()
214 mlx5_sriov_blocking_notifier_unregister(mvdev->mdev, mvdev->vf_id, in mlx5vf_cmd_remove_migratable()
215 &mvdev->nb); in mlx5vf_cmd_remove_migratable()
216 destroy_workqueue(mvdev->cb_wq); in mlx5vf_cmd_remove_migratable()
219 void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev, in mlx5vf_cmd_set_migratable() argument
223 struct pci_dev *pdev = mvdev->core_device.pdev; in mlx5vf_cmd_set_migratable()
229 mvdev->mdev = mlx5_vf_get_core_dev(pdev); in mlx5vf_cmd_set_migratable()
230 if (!mvdev->mdev) in mlx5vf_cmd_set_migratable()
233 if (!MLX5_CAP_GEN(mvdev->mdev, migration)) in mlx5vf_cmd_set_migratable()
236 if (!(MLX5_CAP_GEN_2(mvdev->mdev, migration_multi_load) && in mlx5vf_cmd_set_migratable()
237 MLX5_CAP_GEN_2(mvdev->mdev, migration_tracking_state))) in mlx5vf_cmd_set_migratable()
240 mvdev->vf_id = pci_iov_vf_id(pdev); in mlx5vf_cmd_set_migratable()
241 if (mvdev->vf_id < 0) in mlx5vf_cmd_set_migratable()
244 ret = mlx5vf_is_migratable(mvdev->mdev, mvdev->vf_id + 1); in mlx5vf_cmd_set_migratable()
248 if (mlx5vf_cmd_get_vhca_id(mvdev->mdev, mvdev->vf_id + 1, in mlx5vf_cmd_set_migratable()
249 &mvdev->vhca_id)) in mlx5vf_cmd_set_migratable()
252 mvdev->cb_wq = alloc_ordered_workqueue("mlx5vf_wq", 0); in mlx5vf_cmd_set_migratable()
253 if (!mvdev->cb_wq) in mlx5vf_cmd_set_migratable()
256 mutex_init(&mvdev->state_mutex); in mlx5vf_cmd_set_migratable()
257 spin_lock_init(&mvdev->reset_lock); in mlx5vf_cmd_set_migratable()
258 mvdev->nb.notifier_call = mlx5fv_vf_event; in mlx5vf_cmd_set_migratable()
259 ret = mlx5_sriov_blocking_notifier_register(mvdev->mdev, mvdev->vf_id, in mlx5vf_cmd_set_migratable()
260 &mvdev->nb); in mlx5vf_cmd_set_migratable()
262 destroy_workqueue(mvdev->cb_wq); in mlx5vf_cmd_set_migratable()
266 mvdev->migrate_cap = 1; in mlx5vf_cmd_set_migratable()
267 mvdev->core_device.vdev.migration_flags = in mlx5vf_cmd_set_migratable()
272 mvdev->core_device.vdev.mig_ops = mig_ops; in mlx5vf_cmd_set_migratable()
273 init_completion(&mvdev->tracker_comp); in mlx5vf_cmd_set_migratable()
274 if (MLX5_CAP_GEN(mvdev->mdev, adv_virtualization)) in mlx5vf_cmd_set_migratable()
275 mvdev->core_device.vdev.log_ops = log_ops; in mlx5vf_cmd_set_migratable()
277 if (MLX5_CAP_GEN_2(mvdev->mdev, migration_in_chunks)) in mlx5vf_cmd_set_migratable()
278 mvdev->chunk_mode = 1; in mlx5vf_cmd_set_migratable()
281 mlx5_vf_put_core_dev(mvdev->mdev); in mlx5vf_cmd_set_migratable()
370 struct mlx5vf_pci_core_device *mvdev = buf->migf->mvdev; in mlx5vf_dma_data_buffer() local
371 struct mlx5_core_dev *mdev = mvdev->mdev; in mlx5vf_dma_data_buffer()
374 lockdep_assert_held(&mvdev->state_mutex); in mlx5vf_dma_data_buffer()
375 if (mvdev->mdev_detach) in mlx5vf_dma_data_buffer()
402 lockdep_assert_held(&migf->mvdev->state_mutex); in mlx5vf_free_data_buffer()
403 WARN_ON(migf->mvdev->mdev_detach); in mlx5vf_free_data_buffer()
406 mlx5_core_destroy_mkey(migf->mvdev->mdev, buf->mkey); in mlx5vf_free_data_buffer()
407 dma_unmap_sgtable(migf->mvdev->mdev->device, &buf->table.sgt, in mlx5vf_free_data_buffer()
514 lockdep_assert_held(&migf->mvdev->state_mutex); in mlx5vf_get_data_buffer()
515 if (migf->mvdev->mdev_detach) in mlx5vf_get_data_buffer()
675 mlx5_cmd_out_err(migf->mvdev->mdev, MLX5_CMD_OP_SAVE_VHCA_STATE, 0, in mlx5vf_save_callback()
680 queue_work(migf->mvdev->cb_wq, &async_data->work); in mlx5vf_save_callback()
683 int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev, in mlx5vf_cmd_save_vhca_state() argument
695 lockdep_assert_held(&mvdev->state_mutex); in mlx5vf_cmd_save_vhca_state()
696 if (mvdev->mdev_detach) in mlx5vf_cmd_save_vhca_state()
717 MLX5_SET(save_vhca_state_in, in, vhca_id, mvdev->vhca_id); in mlx5vf_cmd_save_vhca_state()
774 int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev, in mlx5vf_cmd_load_vhca_state() argument
782 lockdep_assert_held(&mvdev->state_mutex); in mlx5vf_cmd_load_vhca_state()
783 if (mvdev->mdev_detach) in mlx5vf_cmd_load_vhca_state()
795 MLX5_SET(load_vhca_state_in, in, vhca_id, mvdev->vhca_id); in mlx5vf_cmd_load_vhca_state()
798 return mlx5_cmd_exec_inout(mvdev->mdev, load_vhca_state, in, out); in mlx5vf_cmd_load_vhca_state()
805 lockdep_assert_held(&migf->mvdev->state_mutex); in mlx5vf_cmd_alloc_pd()
806 if (migf->mvdev->mdev_detach) in mlx5vf_cmd_alloc_pd()
809 err = mlx5_core_alloc_pd(migf->mvdev->mdev, &migf->pdn); in mlx5vf_cmd_alloc_pd()
815 lockdep_assert_held(&migf->mvdev->state_mutex); in mlx5vf_cmd_dealloc_pd()
816 if (migf->mvdev->mdev_detach) in mlx5vf_cmd_dealloc_pd()
819 mlx5_core_dealloc_pd(migf->mvdev->mdev, migf->pdn); in mlx5vf_cmd_dealloc_pd()
827 lockdep_assert_held(&migf->mvdev->state_mutex); in mlx5fv_cmd_clean_migf_resources()
828 WARN_ON(migf->mvdev->mdev_detach); in mlx5fv_cmd_clean_migf_resources()
854 struct mlx5vf_pci_core_device *mvdev, in mlx5vf_create_tracker() argument
859 struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker; in mlx5vf_create_tracker()
892 MLX5_SET(page_track, obj_context, vhca_id, mvdev->vhca_id); in mlx5vf_create_tracker()
1050 struct mlx5vf_pci_core_device *mvdev = container_of( in mlx5vf_event_notifier() local
1070 set_tracker_error(mvdev); in mlx5vf_event_notifier()
1076 set_tracker_change_event(mvdev); in mlx5vf_event_notifier()
1088 struct mlx5vf_pci_core_device *mvdev = in mlx5vf_cq_complete() local
1092 complete(&mvdev->tracker_comp); in mlx5vf_cq_complete()
1470 _mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev) in _mlx5vf_free_page_tracker_resources() argument
1472 struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker; in _mlx5vf_free_page_tracker_resources()
1473 struct mlx5_core_dev *mdev = mvdev->mdev; in _mlx5vf_free_page_tracker_resources()
1475 lockdep_assert_held(&mvdev->state_mutex); in _mlx5vf_free_page_tracker_resources()
1477 if (!mvdev->log_active) in _mlx5vf_free_page_tracker_resources()
1480 WARN_ON(mvdev->mdev_detach); in _mlx5vf_free_page_tracker_resources()
1490 mvdev->log_active = false; in _mlx5vf_free_page_tracker_resources()
1495 struct mlx5vf_pci_core_device *mvdev = container_of( in mlx5vf_stop_page_tracker() local
1498 mutex_lock(&mvdev->state_mutex); in mlx5vf_stop_page_tracker()
1499 if (!mvdev->log_active) in mlx5vf_stop_page_tracker()
1502 _mlx5vf_free_page_tracker_resources(mvdev); in mlx5vf_stop_page_tracker()
1503 mvdev->log_active = false; in mlx5vf_stop_page_tracker()
1505 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_stop_page_tracker()
1513 struct mlx5vf_pci_core_device *mvdev = container_of( in mlx5vf_start_page_tracker() local
1515 struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker; in mlx5vf_start_page_tracker()
1526 mutex_lock(&mvdev->state_mutex); in mlx5vf_start_page_tracker()
1527 if (mvdev->mdev_detach) { in mlx5vf_start_page_tracker()
1532 if (mvdev->log_active) { in mlx5vf_start_page_tracker()
1537 mdev = mvdev->mdev; in mlx5vf_start_page_tracker()
1599 err = mlx5vf_create_tracker(mdev, mvdev, ranges, nnodes); in mlx5vf_start_page_tracker()
1606 mvdev->log_active = true; in mlx5vf_start_page_tracker()
1607 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_start_page_tracker()
1623 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_start_page_tracker()
1742 struct mlx5vf_pci_core_device *mvdev = container_of( in mlx5vf_tracker_read_and_clear() local
1744 struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker; in mlx5vf_tracker_read_and_clear()
1749 mutex_lock(&mvdev->state_mutex); in mlx5vf_tracker_read_and_clear()
1750 if (!mvdev->log_active) { in mlx5vf_tracker_read_and_clear()
1755 if (mvdev->mdev_detach) { in mlx5vf_tracker_read_and_clear()
1765 mdev = mvdev->mdev; in mlx5vf_tracker_read_and_clear()
1782 wait_for_completion(&mvdev->tracker_comp); in mlx5vf_tracker_read_and_clear()
1805 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_tracker_read_and_clear()