xref: /linux/drivers/vfio/pci/mlx5/cmd.h (revision 1a2ac6d7ecdcde74a4e16f31de64124160fc7237)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4  */
5 
6 #ifndef MLX5_VFIO_CMD_H
7 #define MLX5_VFIO_CMD_H
8 
9 #include <linux/kernel.h>
10 #include <linux/vfio_pci_core.h>
11 #include <linux/mlx5/driver.h>
12 #include <linux/mlx5/vport.h>
13 #include <linux/mlx5/cq.h>
14 #include <linux/mlx5/qp.h>
15 
16 #define MLX5VF_PRE_COPY_SUPP(mvdev) \
17 	((mvdev)->core_device.vdev.migration_flags & VFIO_MIGRATION_PRE_COPY)
18 
19 enum mlx5_vf_migf_state {
20 	MLX5_MIGF_STATE_ERROR = 1,
21 	MLX5_MIGF_STATE_PRE_COPY_ERROR,
22 	MLX5_MIGF_STATE_PRE_COPY,
23 	MLX5_MIGF_STATE_SAVE_LAST,
24 	MLX5_MIGF_STATE_COMPLETE,
25 };
26 
27 enum mlx5_vf_load_state {
28 	MLX5_VF_LOAD_STATE_READ_IMAGE_NO_HEADER,
29 	MLX5_VF_LOAD_STATE_READ_HEADER,
30 	MLX5_VF_LOAD_STATE_PREP_HEADER_DATA,
31 	MLX5_VF_LOAD_STATE_READ_HEADER_DATA,
32 	MLX5_VF_LOAD_STATE_PREP_IMAGE,
33 	MLX5_VF_LOAD_STATE_READ_IMAGE,
34 	MLX5_VF_LOAD_STATE_LOAD_IMAGE,
35 };
36 
37 struct mlx5_vf_migration_tag_stop_copy_data {
38 	__le64 stop_copy_size;
39 };
40 
41 enum mlx5_vf_migf_header_flags {
42 	MLX5_MIGF_HEADER_FLAGS_TAG_MANDATORY = 0,
43 	MLX5_MIGF_HEADER_FLAGS_TAG_OPTIONAL = 1 << 0,
44 };
45 
46 enum mlx5_vf_migf_header_tag {
47 	MLX5_MIGF_HEADER_TAG_FW_DATA = 0,
48 	MLX5_MIGF_HEADER_TAG_STOP_COPY_SIZE = 1 << 0,
49 };
50 
51 struct mlx5_vf_migration_header {
52 	__le64 record_size;
53 	/* For future use in case we may need to change the kernel protocol */
54 	__le32 flags; /* Use mlx5_vf_migf_header_flags */
55 	__le32 tag; /* Use mlx5_vf_migf_header_tag */
56 	__u8 data[]; /* Its size is given in the record_size */
57 };
58 
59 struct mlx5_vhca_data_buffer {
60 	struct sg_append_table table;
61 	loff_t start_pos;
62 	u64 length;
63 	u64 allocated_length;
64 	u32 mkey;
65 	enum dma_data_direction dma_dir;
66 	u8 dmaed:1;
67 	struct list_head buf_elm;
68 	struct mlx5_vf_migration_file *migf;
69 	/* Optimize mlx5vf_get_migration_page() for sequential access */
70 	struct scatterlist *last_offset_sg;
71 	unsigned int sg_last_entry;
72 	unsigned long last_offset;
73 };
74 
75 struct mlx5vf_async_data {
76 	struct mlx5_async_work cb_work;
77 	struct work_struct work;
78 	struct mlx5_vhca_data_buffer *buf;
79 	struct mlx5_vhca_data_buffer *header_buf;
80 	int status;
81 	u8 last_chunk:1;
82 	void *out;
83 };
84 
85 struct mlx5_vf_migration_file {
86 	struct file *filp;
87 	struct mutex lock;
88 	enum mlx5_vf_migf_state state;
89 
90 	enum mlx5_vf_load_state load_state;
91 	u32 pdn;
92 	loff_t max_pos;
93 	u64 record_size;
94 	u32 record_tag;
95 	u64 stop_copy_prep_size;
96 	u64 pre_copy_initial_bytes;
97 	struct mlx5_vhca_data_buffer *buf;
98 	struct mlx5_vhca_data_buffer *buf_header;
99 	spinlock_t list_lock;
100 	struct list_head buf_list;
101 	struct list_head avail_list;
102 	struct mlx5vf_pci_core_device *mvdev;
103 	wait_queue_head_t poll_wait;
104 	struct completion save_comp;
105 	struct mlx5_async_ctx async_ctx;
106 	struct mlx5vf_async_data async_data;
107 };
108 
109 struct mlx5_vhca_cq_buf {
110 	struct mlx5_frag_buf_ctrl fbc;
111 	struct mlx5_frag_buf frag_buf;
112 	int cqe_size;
113 	int nent;
114 };
115 
116 struct mlx5_vhca_cq {
117 	struct mlx5_vhca_cq_buf buf;
118 	struct mlx5_db db;
119 	struct mlx5_core_cq mcq;
120 	size_t ncqe;
121 };
122 
123 struct mlx5_vhca_recv_buf {
124 	u32 npages;
125 	struct page **page_list;
126 	dma_addr_t *dma_addrs;
127 	u32 next_rq_offset;
128 	u32 mkey;
129 };
130 
131 struct mlx5_vhca_qp {
132 	struct mlx5_frag_buf buf;
133 	struct mlx5_db db;
134 	struct mlx5_vhca_recv_buf recv_buf;
135 	u32 tracked_page_size;
136 	u32 max_msg_size;
137 	u32 qpn;
138 	struct {
139 		unsigned int pc;
140 		unsigned int cc;
141 		unsigned int wqe_cnt;
142 		__be32 *db;
143 		struct mlx5_frag_buf_ctrl fbc;
144 	} rq;
145 };
146 
147 struct mlx5_vhca_page_tracker {
148 	u32 id;
149 	u32 pdn;
150 	u8 is_err:1;
151 	struct mlx5_uars_page *uar;
152 	struct mlx5_vhca_cq cq;
153 	struct mlx5_vhca_qp *host_qp;
154 	struct mlx5_vhca_qp *fw_qp;
155 	struct mlx5_nb nb;
156 	int status;
157 };
158 
159 struct mlx5vf_pci_core_device {
160 	struct vfio_pci_core_device core_device;
161 	int vf_id;
162 	u16 vhca_id;
163 	u8 migrate_cap:1;
164 	u8 deferred_reset:1;
165 	u8 mdev_detach:1;
166 	u8 log_active:1;
167 	struct completion tracker_comp;
168 	/* protect migration state */
169 	struct mutex state_mutex;
170 	enum vfio_device_mig_state mig_state;
171 	/* protect the reset_done flow */
172 	spinlock_t reset_lock;
173 	struct mlx5_vf_migration_file *resuming_migf;
174 	struct mlx5_vf_migration_file *saving_migf;
175 	struct mlx5_vhca_page_tracker tracker;
176 	struct workqueue_struct *cb_wq;
177 	struct notifier_block nb;
178 	struct mlx5_core_dev *mdev;
179 };
180 
181 enum {
182 	MLX5VF_QUERY_INC = (1UL << 0),
183 	MLX5VF_QUERY_FINAL = (1UL << 1),
184 };
185 
186 int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
187 int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
188 int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
189 					  size_t *state_size, u8 query_flags);
190 void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
191 			       const struct vfio_migration_ops *mig_ops,
192 			       const struct vfio_log_ops *log_ops);
193 void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev);
194 void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev);
195 int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
196 			       struct mlx5_vf_migration_file *migf,
197 			       struct mlx5_vhca_data_buffer *buf, bool inc,
198 			       bool track);
199 int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
200 			       struct mlx5_vf_migration_file *migf,
201 			       struct mlx5_vhca_data_buffer *buf);
202 int mlx5vf_cmd_alloc_pd(struct mlx5_vf_migration_file *migf);
203 void mlx5vf_cmd_dealloc_pd(struct mlx5_vf_migration_file *migf);
204 void mlx5fv_cmd_clean_migf_resources(struct mlx5_vf_migration_file *migf);
205 struct mlx5_vhca_data_buffer *
206 mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf,
207 			 size_t length, enum dma_data_direction dma_dir);
208 void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf);
209 struct mlx5_vhca_data_buffer *
210 mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf,
211 		       size_t length, enum dma_data_direction dma_dir);
212 void mlx5vf_put_data_buffer(struct mlx5_vhca_data_buffer *buf);
213 int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
214 			       unsigned int npages);
215 struct page *mlx5vf_get_migration_page(struct mlx5_vhca_data_buffer *buf,
216 				       unsigned long offset);
217 void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev);
218 void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev);
219 void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work);
220 int mlx5vf_start_page_tracker(struct vfio_device *vdev,
221 		struct rb_root_cached *ranges, u32 nnodes, u64 *page_size);
222 int mlx5vf_stop_page_tracker(struct vfio_device *vdev);
223 int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova,
224 			unsigned long length, struct iova_bitmap *dirty);
225 #endif /* MLX5_VFIO_CMD_H */
226