1bb208810SXin Zeng // SPDX-License-Identifier: GPL-2.0-only
2bb208810SXin Zeng /* Copyright(c) 2024 Intel Corporation */
3bb208810SXin Zeng
4bb208810SXin Zeng #include <linux/anon_inodes.h>
5bb208810SXin Zeng #include <linux/container_of.h>
6bb208810SXin Zeng #include <linux/device.h>
7bb208810SXin Zeng #include <linux/file.h>
8bb208810SXin Zeng #include <linux/init.h>
9bb208810SXin Zeng #include <linux/kernel.h>
10bb208810SXin Zeng #include <linux/module.h>
11bb208810SXin Zeng #include <linux/mutex.h>
12bb208810SXin Zeng #include <linux/pci.h>
13bb208810SXin Zeng #include <linux/sizes.h>
14bb208810SXin Zeng #include <linux/types.h>
15bb208810SXin Zeng #include <linux/uaccess.h>
16bb208810SXin Zeng #include <linux/vfio_pci_core.h>
17bb208810SXin Zeng #include <linux/qat/qat_mig_dev.h>
18bb208810SXin Zeng
19bb208810SXin Zeng /*
20bb208810SXin Zeng * The migration data of each Intel QAT VF device is encapsulated into a
21bb208810SXin Zeng * 4096 bytes block. The data consists of two parts.
22bb208810SXin Zeng * The first is a pre-configured set of attributes of the VF being migrated,
23bb208810SXin Zeng * which are only set when it is created. This can be migrated during pre-copy
24bb208810SXin Zeng * stage and used for a device compatibility check.
25bb208810SXin Zeng * The second is the VF state. This includes the required MMIO regions and
26bb208810SXin Zeng * the shadow states maintained by the QAT PF driver. This part can only be
27bb208810SXin Zeng * saved when the VF is fully quiesced and be migrated during stop-copy stage.
28bb208810SXin Zeng * Both these 2 parts of data are saved in hierarchical structures including
29bb208810SXin Zeng * a preamble section and several raw state sections.
30bb208810SXin Zeng * When the pre-configured part of the migration data is fully retrieved from
31bb208810SXin Zeng * user space, the preamble section are used to validate the correctness of
32bb208810SXin Zeng * the data blocks and check the version compatibility. The raw state sections
33bb208810SXin Zeng * are then used to do a device compatibility check.
34bb208810SXin Zeng * When the device transits from RESUMING state, the VF states are extracted
35bb208810SXin Zeng * from the raw state sections of the VF state part of the migration data and
36bb208810SXin Zeng * then loaded into the device.
37bb208810SXin Zeng */
38bb208810SXin Zeng
39bb208810SXin Zeng struct qat_vf_migration_file {
40bb208810SXin Zeng struct file *filp;
41bb208810SXin Zeng /* protects migration region context */
42bb208810SXin Zeng struct mutex lock;
43bb208810SXin Zeng bool disabled;
44bb208810SXin Zeng struct qat_vf_core_device *qat_vdev;
45bb208810SXin Zeng ssize_t filled_size;
46bb208810SXin Zeng };
47bb208810SXin Zeng
48bb208810SXin Zeng struct qat_vf_core_device {
49bb208810SXin Zeng struct vfio_pci_core_device core_device;
50bb208810SXin Zeng struct qat_mig_dev *mdev;
51bb208810SXin Zeng /* protects migration state */
52bb208810SXin Zeng struct mutex state_mutex;
53bb208810SXin Zeng enum vfio_device_mig_state mig_state;
54bb208810SXin Zeng struct qat_vf_migration_file *resuming_migf;
55bb208810SXin Zeng struct qat_vf_migration_file *saving_migf;
56bb208810SXin Zeng };
57bb208810SXin Zeng
qat_vf_pci_open_device(struct vfio_device * core_vdev)58bb208810SXin Zeng static int qat_vf_pci_open_device(struct vfio_device *core_vdev)
59bb208810SXin Zeng {
60bb208810SXin Zeng struct qat_vf_core_device *qat_vdev =
61bb208810SXin Zeng container_of(core_vdev, struct qat_vf_core_device,
62bb208810SXin Zeng core_device.vdev);
63bb208810SXin Zeng struct vfio_pci_core_device *vdev = &qat_vdev->core_device;
64bb208810SXin Zeng int ret;
65bb208810SXin Zeng
66bb208810SXin Zeng ret = vfio_pci_core_enable(vdev);
67bb208810SXin Zeng if (ret)
68bb208810SXin Zeng return ret;
69bb208810SXin Zeng
70bb208810SXin Zeng ret = qat_vfmig_open(qat_vdev->mdev);
71bb208810SXin Zeng if (ret) {
72bb208810SXin Zeng vfio_pci_core_disable(vdev);
73bb208810SXin Zeng return ret;
74bb208810SXin Zeng }
75bb208810SXin Zeng qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
76bb208810SXin Zeng
77bb208810SXin Zeng vfio_pci_core_finish_enable(vdev);
78bb208810SXin Zeng
79bb208810SXin Zeng return 0;
80bb208810SXin Zeng }
81bb208810SXin Zeng
qat_vf_disable_fd(struct qat_vf_migration_file * migf)82bb208810SXin Zeng static void qat_vf_disable_fd(struct qat_vf_migration_file *migf)
83bb208810SXin Zeng {
84bb208810SXin Zeng mutex_lock(&migf->lock);
85bb208810SXin Zeng migf->disabled = true;
86bb208810SXin Zeng migf->filp->f_pos = 0;
87bb208810SXin Zeng migf->filled_size = 0;
88bb208810SXin Zeng mutex_unlock(&migf->lock);
89bb208810SXin Zeng }
90bb208810SXin Zeng
qat_vf_disable_fds(struct qat_vf_core_device * qat_vdev)91bb208810SXin Zeng static void qat_vf_disable_fds(struct qat_vf_core_device *qat_vdev)
92bb208810SXin Zeng {
93bb208810SXin Zeng if (qat_vdev->resuming_migf) {
94bb208810SXin Zeng qat_vf_disable_fd(qat_vdev->resuming_migf);
95bb208810SXin Zeng fput(qat_vdev->resuming_migf->filp);
96bb208810SXin Zeng qat_vdev->resuming_migf = NULL;
97bb208810SXin Zeng }
98bb208810SXin Zeng
99bb208810SXin Zeng if (qat_vdev->saving_migf) {
100bb208810SXin Zeng qat_vf_disable_fd(qat_vdev->saving_migf);
101bb208810SXin Zeng fput(qat_vdev->saving_migf->filp);
102bb208810SXin Zeng qat_vdev->saving_migf = NULL;
103bb208810SXin Zeng }
104bb208810SXin Zeng }
105bb208810SXin Zeng
qat_vf_pci_close_device(struct vfio_device * core_vdev)106bb208810SXin Zeng static void qat_vf_pci_close_device(struct vfio_device *core_vdev)
107bb208810SXin Zeng {
108bb208810SXin Zeng struct qat_vf_core_device *qat_vdev = container_of(core_vdev,
109bb208810SXin Zeng struct qat_vf_core_device, core_device.vdev);
110bb208810SXin Zeng
111bb208810SXin Zeng qat_vfmig_close(qat_vdev->mdev);
112bb208810SXin Zeng qat_vf_disable_fds(qat_vdev);
113bb208810SXin Zeng vfio_pci_core_close_device(core_vdev);
114bb208810SXin Zeng }
115bb208810SXin Zeng
qat_vf_precopy_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)116bb208810SXin Zeng static long qat_vf_precopy_ioctl(struct file *filp, unsigned int cmd,
117bb208810SXin Zeng unsigned long arg)
118bb208810SXin Zeng {
119bb208810SXin Zeng struct qat_vf_migration_file *migf = filp->private_data;
120bb208810SXin Zeng struct qat_vf_core_device *qat_vdev = migf->qat_vdev;
121bb208810SXin Zeng struct qat_mig_dev *mig_dev = qat_vdev->mdev;
122bb208810SXin Zeng struct vfio_precopy_info info;
123bb208810SXin Zeng loff_t *pos = &filp->f_pos;
124bb208810SXin Zeng unsigned long minsz;
125bb208810SXin Zeng int ret = 0;
126bb208810SXin Zeng
127bb208810SXin Zeng if (cmd != VFIO_MIG_GET_PRECOPY_INFO)
128bb208810SXin Zeng return -ENOTTY;
129bb208810SXin Zeng
130bb208810SXin Zeng minsz = offsetofend(struct vfio_precopy_info, dirty_bytes);
131bb208810SXin Zeng
132bb208810SXin Zeng if (copy_from_user(&info, (void __user *)arg, minsz))
133bb208810SXin Zeng return -EFAULT;
134bb208810SXin Zeng if (info.argsz < minsz)
135bb208810SXin Zeng return -EINVAL;
136bb208810SXin Zeng
137bb208810SXin Zeng mutex_lock(&qat_vdev->state_mutex);
138bb208810SXin Zeng if (qat_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY &&
139bb208810SXin Zeng qat_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY_P2P) {
140bb208810SXin Zeng mutex_unlock(&qat_vdev->state_mutex);
141bb208810SXin Zeng return -EINVAL;
142bb208810SXin Zeng }
143bb208810SXin Zeng
144bb208810SXin Zeng mutex_lock(&migf->lock);
145bb208810SXin Zeng if (migf->disabled) {
146bb208810SXin Zeng ret = -ENODEV;
147bb208810SXin Zeng goto out;
148bb208810SXin Zeng }
149bb208810SXin Zeng
150bb208810SXin Zeng if (*pos > mig_dev->setup_size) {
151bb208810SXin Zeng ret = -EINVAL;
152bb208810SXin Zeng goto out;
153bb208810SXin Zeng }
154bb208810SXin Zeng
155bb208810SXin Zeng info.dirty_bytes = 0;
156bb208810SXin Zeng info.initial_bytes = mig_dev->setup_size - *pos;
157bb208810SXin Zeng
158bb208810SXin Zeng out:
159bb208810SXin Zeng mutex_unlock(&migf->lock);
160bb208810SXin Zeng mutex_unlock(&qat_vdev->state_mutex);
161bb208810SXin Zeng if (ret)
162bb208810SXin Zeng return ret;
163bb208810SXin Zeng return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
164bb208810SXin Zeng }
165bb208810SXin Zeng
qat_vf_save_read(struct file * filp,char __user * buf,size_t len,loff_t * pos)166bb208810SXin Zeng static ssize_t qat_vf_save_read(struct file *filp, char __user *buf,
167bb208810SXin Zeng size_t len, loff_t *pos)
168bb208810SXin Zeng {
169bb208810SXin Zeng struct qat_vf_migration_file *migf = filp->private_data;
170bb208810SXin Zeng struct qat_mig_dev *mig_dev = migf->qat_vdev->mdev;
171bb208810SXin Zeng ssize_t done = 0;
172bb208810SXin Zeng loff_t *offs;
173bb208810SXin Zeng int ret;
174bb208810SXin Zeng
175bb208810SXin Zeng if (pos)
176bb208810SXin Zeng return -ESPIPE;
177bb208810SXin Zeng offs = &filp->f_pos;
178bb208810SXin Zeng
179bb208810SXin Zeng mutex_lock(&migf->lock);
180bb208810SXin Zeng if (*offs > migf->filled_size || *offs < 0) {
181bb208810SXin Zeng done = -EINVAL;
182bb208810SXin Zeng goto out_unlock;
183bb208810SXin Zeng }
184bb208810SXin Zeng
185bb208810SXin Zeng if (migf->disabled) {
186bb208810SXin Zeng done = -ENODEV;
187bb208810SXin Zeng goto out_unlock;
188bb208810SXin Zeng }
189bb208810SXin Zeng
190bb208810SXin Zeng len = min_t(size_t, migf->filled_size - *offs, len);
191bb208810SXin Zeng if (len) {
192bb208810SXin Zeng ret = copy_to_user(buf, mig_dev->state + *offs, len);
193bb208810SXin Zeng if (ret) {
194bb208810SXin Zeng done = -EFAULT;
195bb208810SXin Zeng goto out_unlock;
196bb208810SXin Zeng }
197bb208810SXin Zeng *offs += len;
198bb208810SXin Zeng done = len;
199bb208810SXin Zeng }
200bb208810SXin Zeng
201bb208810SXin Zeng out_unlock:
202bb208810SXin Zeng mutex_unlock(&migf->lock);
203bb208810SXin Zeng return done;
204bb208810SXin Zeng }
205bb208810SXin Zeng
qat_vf_release_file(struct inode * inode,struct file * filp)206bb208810SXin Zeng static int qat_vf_release_file(struct inode *inode, struct file *filp)
207bb208810SXin Zeng {
208bb208810SXin Zeng struct qat_vf_migration_file *migf = filp->private_data;
209bb208810SXin Zeng
210bb208810SXin Zeng qat_vf_disable_fd(migf);
211bb208810SXin Zeng mutex_destroy(&migf->lock);
212bb208810SXin Zeng kfree(migf);
213bb208810SXin Zeng
214bb208810SXin Zeng return 0;
215bb208810SXin Zeng }
216bb208810SXin Zeng
217bb208810SXin Zeng static const struct file_operations qat_vf_save_fops = {
218bb208810SXin Zeng .owner = THIS_MODULE,
219bb208810SXin Zeng .read = qat_vf_save_read,
220bb208810SXin Zeng .unlocked_ioctl = qat_vf_precopy_ioctl,
221bb208810SXin Zeng .compat_ioctl = compat_ptr_ioctl,
222bb208810SXin Zeng .release = qat_vf_release_file,
223bb208810SXin Zeng };
224bb208810SXin Zeng
qat_vf_save_state(struct qat_vf_core_device * qat_vdev,struct qat_vf_migration_file * migf)225bb208810SXin Zeng static int qat_vf_save_state(struct qat_vf_core_device *qat_vdev,
226bb208810SXin Zeng struct qat_vf_migration_file *migf)
227bb208810SXin Zeng {
228bb208810SXin Zeng int ret;
229bb208810SXin Zeng
230bb208810SXin Zeng ret = qat_vfmig_save_state(qat_vdev->mdev);
231bb208810SXin Zeng if (ret)
232bb208810SXin Zeng return ret;
233bb208810SXin Zeng migf->filled_size = qat_vdev->mdev->state_size;
234bb208810SXin Zeng
235bb208810SXin Zeng return 0;
236bb208810SXin Zeng }
237bb208810SXin Zeng
qat_vf_save_setup(struct qat_vf_core_device * qat_vdev,struct qat_vf_migration_file * migf)238bb208810SXin Zeng static int qat_vf_save_setup(struct qat_vf_core_device *qat_vdev,
239bb208810SXin Zeng struct qat_vf_migration_file *migf)
240bb208810SXin Zeng {
241bb208810SXin Zeng int ret;
242bb208810SXin Zeng
243bb208810SXin Zeng ret = qat_vfmig_save_setup(qat_vdev->mdev);
244bb208810SXin Zeng if (ret)
245bb208810SXin Zeng return ret;
246bb208810SXin Zeng migf->filled_size = qat_vdev->mdev->setup_size;
247bb208810SXin Zeng
248bb208810SXin Zeng return 0;
249bb208810SXin Zeng }
250bb208810SXin Zeng
251bb208810SXin Zeng /*
252bb208810SXin Zeng * Allocate a file handler for user space and then save the migration data for
253bb208810SXin Zeng * the device being migrated. If this is called in the pre-copy stage, save the
254bb208810SXin Zeng * pre-configured device data. Otherwise, if this is called in the stop-copy
255bb208810SXin Zeng * stage, save the device state. In both cases, update the data size which can
256bb208810SXin Zeng * then be read from user space.
257bb208810SXin Zeng */
258bb208810SXin Zeng static struct qat_vf_migration_file *
qat_vf_save_device_data(struct qat_vf_core_device * qat_vdev,bool pre_copy)259bb208810SXin Zeng qat_vf_save_device_data(struct qat_vf_core_device *qat_vdev, bool pre_copy)
260bb208810SXin Zeng {
261bb208810SXin Zeng struct qat_vf_migration_file *migf;
262bb208810SXin Zeng int ret;
263bb208810SXin Zeng
264bb208810SXin Zeng migf = kzalloc(sizeof(*migf), GFP_KERNEL);
265bb208810SXin Zeng if (!migf)
266bb208810SXin Zeng return ERR_PTR(-ENOMEM);
267bb208810SXin Zeng
268bb208810SXin Zeng migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_save_fops,
269bb208810SXin Zeng migf, O_RDONLY);
270bb208810SXin Zeng ret = PTR_ERR_OR_ZERO(migf->filp);
271bb208810SXin Zeng if (ret) {
272bb208810SXin Zeng kfree(migf);
273bb208810SXin Zeng return ERR_PTR(ret);
274bb208810SXin Zeng }
275bb208810SXin Zeng
276bb208810SXin Zeng stream_open(migf->filp->f_inode, migf->filp);
277bb208810SXin Zeng mutex_init(&migf->lock);
278bb208810SXin Zeng
279bb208810SXin Zeng if (pre_copy)
280bb208810SXin Zeng ret = qat_vf_save_setup(qat_vdev, migf);
281bb208810SXin Zeng else
282bb208810SXin Zeng ret = qat_vf_save_state(qat_vdev, migf);
283bb208810SXin Zeng if (ret) {
284bb208810SXin Zeng fput(migf->filp);
285bb208810SXin Zeng return ERR_PTR(ret);
286bb208810SXin Zeng }
287bb208810SXin Zeng
288bb208810SXin Zeng migf->qat_vdev = qat_vdev;
289bb208810SXin Zeng
290bb208810SXin Zeng return migf;
291bb208810SXin Zeng }
292bb208810SXin Zeng
qat_vf_resume_write(struct file * filp,const char __user * buf,size_t len,loff_t * pos)293bb208810SXin Zeng static ssize_t qat_vf_resume_write(struct file *filp, const char __user *buf,
294bb208810SXin Zeng size_t len, loff_t *pos)
295bb208810SXin Zeng {
296bb208810SXin Zeng struct qat_vf_migration_file *migf = filp->private_data;
297bb208810SXin Zeng struct qat_mig_dev *mig_dev = migf->qat_vdev->mdev;
298bb208810SXin Zeng loff_t end, *offs;
299bb208810SXin Zeng ssize_t done = 0;
300bb208810SXin Zeng int ret;
301bb208810SXin Zeng
302bb208810SXin Zeng if (pos)
303bb208810SXin Zeng return -ESPIPE;
304bb208810SXin Zeng offs = &filp->f_pos;
305bb208810SXin Zeng
306bb208810SXin Zeng if (*offs < 0 ||
3079283b739SGiovanni Cabiddu check_add_overflow(len, *offs, &end))
308bb208810SXin Zeng return -EOVERFLOW;
309bb208810SXin Zeng
310bb208810SXin Zeng if (end > mig_dev->state_size)
311bb208810SXin Zeng return -ENOMEM;
312bb208810SXin Zeng
313bb208810SXin Zeng mutex_lock(&migf->lock);
314bb208810SXin Zeng if (migf->disabled) {
315bb208810SXin Zeng done = -ENODEV;
316bb208810SXin Zeng goto out_unlock;
317bb208810SXin Zeng }
318bb208810SXin Zeng
319bb208810SXin Zeng ret = copy_from_user(mig_dev->state + *offs, buf, len);
320bb208810SXin Zeng if (ret) {
321bb208810SXin Zeng done = -EFAULT;
322bb208810SXin Zeng goto out_unlock;
323bb208810SXin Zeng }
324bb208810SXin Zeng *offs += len;
325bb208810SXin Zeng migf->filled_size += len;
326bb208810SXin Zeng
327bb208810SXin Zeng /*
328bb208810SXin Zeng * Load the pre-configured device data first to check if the target
329bb208810SXin Zeng * device is compatible with the source device.
330bb208810SXin Zeng */
331bb208810SXin Zeng ret = qat_vfmig_load_setup(mig_dev, migf->filled_size);
332bb208810SXin Zeng if (ret && ret != -EAGAIN) {
333bb208810SXin Zeng done = ret;
334bb208810SXin Zeng goto out_unlock;
335bb208810SXin Zeng }
336bb208810SXin Zeng done = len;
337bb208810SXin Zeng
338bb208810SXin Zeng out_unlock:
339bb208810SXin Zeng mutex_unlock(&migf->lock);
340bb208810SXin Zeng return done;
341bb208810SXin Zeng }
342bb208810SXin Zeng
343bb208810SXin Zeng static const struct file_operations qat_vf_resume_fops = {
344bb208810SXin Zeng .owner = THIS_MODULE,
345bb208810SXin Zeng .write = qat_vf_resume_write,
346bb208810SXin Zeng .release = qat_vf_release_file,
347bb208810SXin Zeng };
348bb208810SXin Zeng
349bb208810SXin Zeng static struct qat_vf_migration_file *
qat_vf_resume_device_data(struct qat_vf_core_device * qat_vdev)350bb208810SXin Zeng qat_vf_resume_device_data(struct qat_vf_core_device *qat_vdev)
351bb208810SXin Zeng {
352bb208810SXin Zeng struct qat_vf_migration_file *migf;
353bb208810SXin Zeng int ret;
354bb208810SXin Zeng
355bb208810SXin Zeng migf = kzalloc(sizeof(*migf), GFP_KERNEL);
356bb208810SXin Zeng if (!migf)
357bb208810SXin Zeng return ERR_PTR(-ENOMEM);
358bb208810SXin Zeng
359bb208810SXin Zeng migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_resume_fops, migf, O_WRONLY);
360bb208810SXin Zeng ret = PTR_ERR_OR_ZERO(migf->filp);
361bb208810SXin Zeng if (ret) {
362bb208810SXin Zeng kfree(migf);
363bb208810SXin Zeng return ERR_PTR(ret);
364bb208810SXin Zeng }
365bb208810SXin Zeng
366bb208810SXin Zeng migf->qat_vdev = qat_vdev;
367bb208810SXin Zeng migf->filled_size = 0;
368bb208810SXin Zeng stream_open(migf->filp->f_inode, migf->filp);
369bb208810SXin Zeng mutex_init(&migf->lock);
370bb208810SXin Zeng
371bb208810SXin Zeng return migf;
372bb208810SXin Zeng }
373bb208810SXin Zeng
qat_vf_load_device_data(struct qat_vf_core_device * qat_vdev)374bb208810SXin Zeng static int qat_vf_load_device_data(struct qat_vf_core_device *qat_vdev)
375bb208810SXin Zeng {
376bb208810SXin Zeng return qat_vfmig_load_state(qat_vdev->mdev);
377bb208810SXin Zeng }
378bb208810SXin Zeng
qat_vf_pci_step_device_state(struct qat_vf_core_device * qat_vdev,u32 new)379bb208810SXin Zeng static struct file *qat_vf_pci_step_device_state(struct qat_vf_core_device *qat_vdev, u32 new)
380bb208810SXin Zeng {
381bb208810SXin Zeng u32 cur = qat_vdev->mig_state;
382bb208810SXin Zeng int ret;
383bb208810SXin Zeng
384bb208810SXin Zeng /*
385bb208810SXin Zeng * As the device is not capable of just stopping P2P DMAs, suspend the
386bb208810SXin Zeng * device completely once any of the P2P states are reached.
387bb208810SXin Zeng * When it is suspended, all its MMIO registers can still be operated
388bb208810SXin Zeng * correctly, jobs submitted through ring are queued while no jobs are
389bb208810SXin Zeng * processed by the device. The MMIO states can be safely migrated to
390bb208810SXin Zeng * the target VF during stop-copy stage and restored correctly in the
391bb208810SXin Zeng * target VF. All queued jobs can be resumed then.
392bb208810SXin Zeng */
393bb208810SXin Zeng if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) ||
394bb208810SXin Zeng (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) {
395bb208810SXin Zeng ret = qat_vfmig_suspend(qat_vdev->mdev);
396bb208810SXin Zeng if (ret)
397bb208810SXin Zeng return ERR_PTR(ret);
398bb208810SXin Zeng return NULL;
399bb208810SXin Zeng }
400bb208810SXin Zeng
401bb208810SXin Zeng if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) ||
402bb208810SXin Zeng (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_PRE_COPY)) {
403bb208810SXin Zeng qat_vfmig_resume(qat_vdev->mdev);
404bb208810SXin Zeng return NULL;
405bb208810SXin Zeng }
406bb208810SXin Zeng
407bb208810SXin Zeng if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) ||
408bb208810SXin Zeng (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P))
409bb208810SXin Zeng return NULL;
410bb208810SXin Zeng
411bb208810SXin Zeng if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
412bb208810SXin Zeng struct qat_vf_migration_file *migf;
413bb208810SXin Zeng
414bb208810SXin Zeng migf = qat_vf_save_device_data(qat_vdev, false);
415bb208810SXin Zeng if (IS_ERR(migf))
416bb208810SXin Zeng return ERR_CAST(migf);
417bb208810SXin Zeng get_file(migf->filp);
418bb208810SXin Zeng qat_vdev->saving_migf = migf;
419bb208810SXin Zeng return migf->filp;
420bb208810SXin Zeng }
421bb208810SXin Zeng
422bb208810SXin Zeng if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
423bb208810SXin Zeng struct qat_vf_migration_file *migf;
424bb208810SXin Zeng
425bb208810SXin Zeng migf = qat_vf_resume_device_data(qat_vdev);
426bb208810SXin Zeng if (IS_ERR(migf))
427bb208810SXin Zeng return ERR_CAST(migf);
428bb208810SXin Zeng get_file(migf->filp);
429bb208810SXin Zeng qat_vdev->resuming_migf = migf;
430bb208810SXin Zeng return migf->filp;
431bb208810SXin Zeng }
432bb208810SXin Zeng
433bb208810SXin Zeng if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) ||
434bb208810SXin Zeng (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) ||
435bb208810SXin Zeng (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_RUNNING_P2P)) {
436bb208810SXin Zeng qat_vf_disable_fds(qat_vdev);
437bb208810SXin Zeng return NULL;
438bb208810SXin Zeng }
439bb208810SXin Zeng
440bb208810SXin Zeng if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) ||
441bb208810SXin Zeng (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) {
442bb208810SXin Zeng struct qat_vf_migration_file *migf;
443bb208810SXin Zeng
444bb208810SXin Zeng migf = qat_vf_save_device_data(qat_vdev, true);
445bb208810SXin Zeng if (IS_ERR(migf))
446bb208810SXin Zeng return ERR_CAST(migf);
447bb208810SXin Zeng get_file(migf->filp);
448bb208810SXin Zeng qat_vdev->saving_migf = migf;
449bb208810SXin Zeng return migf->filp;
450bb208810SXin Zeng }
451bb208810SXin Zeng
452bb208810SXin Zeng if (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_STOP_COPY) {
453bb208810SXin Zeng struct qat_vf_migration_file *migf = qat_vdev->saving_migf;
454bb208810SXin Zeng
455bb208810SXin Zeng if (!migf)
456bb208810SXin Zeng return ERR_PTR(-EINVAL);
457bb208810SXin Zeng ret = qat_vf_save_state(qat_vdev, migf);
458bb208810SXin Zeng if (ret)
459bb208810SXin Zeng return ERR_PTR(ret);
460bb208810SXin Zeng return NULL;
461bb208810SXin Zeng }
462bb208810SXin Zeng
463bb208810SXin Zeng if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
464bb208810SXin Zeng ret = qat_vf_load_device_data(qat_vdev);
465bb208810SXin Zeng if (ret)
466bb208810SXin Zeng return ERR_PTR(ret);
467bb208810SXin Zeng
468bb208810SXin Zeng qat_vf_disable_fds(qat_vdev);
469bb208810SXin Zeng return NULL;
470bb208810SXin Zeng }
471bb208810SXin Zeng
472bb208810SXin Zeng /* vfio_mig_get_next_state() does not use arcs other than the above */
473bb208810SXin Zeng WARN_ON(true);
474bb208810SXin Zeng return ERR_PTR(-EINVAL);
475bb208810SXin Zeng }
476bb208810SXin Zeng
qat_vf_reset_done(struct qat_vf_core_device * qat_vdev)477bb208810SXin Zeng static void qat_vf_reset_done(struct qat_vf_core_device *qat_vdev)
478bb208810SXin Zeng {
479bb208810SXin Zeng qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
480bb208810SXin Zeng qat_vfmig_reset(qat_vdev->mdev);
481bb208810SXin Zeng qat_vf_disable_fds(qat_vdev);
482bb208810SXin Zeng }
483bb208810SXin Zeng
qat_vf_pci_set_device_state(struct vfio_device * vdev,enum vfio_device_mig_state new_state)484bb208810SXin Zeng static struct file *qat_vf_pci_set_device_state(struct vfio_device *vdev,
485bb208810SXin Zeng enum vfio_device_mig_state new_state)
486bb208810SXin Zeng {
487bb208810SXin Zeng struct qat_vf_core_device *qat_vdev = container_of(vdev,
488bb208810SXin Zeng struct qat_vf_core_device, core_device.vdev);
489bb208810SXin Zeng enum vfio_device_mig_state next_state;
490bb208810SXin Zeng struct file *res = NULL;
491bb208810SXin Zeng int ret;
492bb208810SXin Zeng
493bb208810SXin Zeng mutex_lock(&qat_vdev->state_mutex);
494bb208810SXin Zeng while (new_state != qat_vdev->mig_state) {
495bb208810SXin Zeng ret = vfio_mig_get_next_state(vdev, qat_vdev->mig_state,
496bb208810SXin Zeng new_state, &next_state);
497bb208810SXin Zeng if (ret) {
498bb208810SXin Zeng res = ERR_PTR(ret);
499bb208810SXin Zeng break;
500bb208810SXin Zeng }
501bb208810SXin Zeng res = qat_vf_pci_step_device_state(qat_vdev, next_state);
502bb208810SXin Zeng if (IS_ERR(res))
503bb208810SXin Zeng break;
504bb208810SXin Zeng qat_vdev->mig_state = next_state;
505bb208810SXin Zeng if (WARN_ON(res && new_state != qat_vdev->mig_state)) {
506bb208810SXin Zeng fput(res);
507bb208810SXin Zeng res = ERR_PTR(-EINVAL);
508bb208810SXin Zeng break;
509bb208810SXin Zeng }
510bb208810SXin Zeng }
511bb208810SXin Zeng mutex_unlock(&qat_vdev->state_mutex);
512bb208810SXin Zeng
513bb208810SXin Zeng return res;
514bb208810SXin Zeng }
515bb208810SXin Zeng
qat_vf_pci_get_device_state(struct vfio_device * vdev,enum vfio_device_mig_state * curr_state)516bb208810SXin Zeng static int qat_vf_pci_get_device_state(struct vfio_device *vdev,
517bb208810SXin Zeng enum vfio_device_mig_state *curr_state)
518bb208810SXin Zeng {
519bb208810SXin Zeng struct qat_vf_core_device *qat_vdev = container_of(vdev,
520bb208810SXin Zeng struct qat_vf_core_device, core_device.vdev);
521bb208810SXin Zeng
522bb208810SXin Zeng mutex_lock(&qat_vdev->state_mutex);
523bb208810SXin Zeng *curr_state = qat_vdev->mig_state;
524bb208810SXin Zeng mutex_unlock(&qat_vdev->state_mutex);
525bb208810SXin Zeng
526bb208810SXin Zeng return 0;
527bb208810SXin Zeng }
528bb208810SXin Zeng
qat_vf_pci_get_data_size(struct vfio_device * vdev,unsigned long * stop_copy_length)529bb208810SXin Zeng static int qat_vf_pci_get_data_size(struct vfio_device *vdev,
530bb208810SXin Zeng unsigned long *stop_copy_length)
531bb208810SXin Zeng {
532bb208810SXin Zeng struct qat_vf_core_device *qat_vdev = container_of(vdev,
533bb208810SXin Zeng struct qat_vf_core_device, core_device.vdev);
534bb208810SXin Zeng
535bb208810SXin Zeng mutex_lock(&qat_vdev->state_mutex);
536bb208810SXin Zeng *stop_copy_length = qat_vdev->mdev->state_size;
537bb208810SXin Zeng mutex_unlock(&qat_vdev->state_mutex);
538bb208810SXin Zeng
539bb208810SXin Zeng return 0;
540bb208810SXin Zeng }
541bb208810SXin Zeng
542bb208810SXin Zeng static const struct vfio_migration_ops qat_vf_pci_mig_ops = {
543bb208810SXin Zeng .migration_set_state = qat_vf_pci_set_device_state,
544bb208810SXin Zeng .migration_get_state = qat_vf_pci_get_device_state,
545bb208810SXin Zeng .migration_get_data_size = qat_vf_pci_get_data_size,
546bb208810SXin Zeng };
547bb208810SXin Zeng
qat_vf_pci_release_dev(struct vfio_device * core_vdev)548bb208810SXin Zeng static void qat_vf_pci_release_dev(struct vfio_device *core_vdev)
549bb208810SXin Zeng {
550bb208810SXin Zeng struct qat_vf_core_device *qat_vdev = container_of(core_vdev,
551bb208810SXin Zeng struct qat_vf_core_device, core_device.vdev);
552bb208810SXin Zeng
553bb208810SXin Zeng qat_vfmig_cleanup(qat_vdev->mdev);
554bb208810SXin Zeng qat_vfmig_destroy(qat_vdev->mdev);
555bb208810SXin Zeng mutex_destroy(&qat_vdev->state_mutex);
556bb208810SXin Zeng vfio_pci_core_release_dev(core_vdev);
557bb208810SXin Zeng }
558bb208810SXin Zeng
qat_vf_pci_init_dev(struct vfio_device * core_vdev)559bb208810SXin Zeng static int qat_vf_pci_init_dev(struct vfio_device *core_vdev)
560bb208810SXin Zeng {
561bb208810SXin Zeng struct qat_vf_core_device *qat_vdev = container_of(core_vdev,
562bb208810SXin Zeng struct qat_vf_core_device, core_device.vdev);
563bb208810SXin Zeng struct qat_mig_dev *mdev;
564bb208810SXin Zeng struct pci_dev *parent;
565bb208810SXin Zeng int ret, vf_id;
566bb208810SXin Zeng
567bb208810SXin Zeng core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P |
568bb208810SXin Zeng VFIO_MIGRATION_PRE_COPY;
569bb208810SXin Zeng core_vdev->mig_ops = &qat_vf_pci_mig_ops;
570bb208810SXin Zeng
571bb208810SXin Zeng ret = vfio_pci_core_init_dev(core_vdev);
572bb208810SXin Zeng if (ret)
573bb208810SXin Zeng return ret;
574bb208810SXin Zeng
575bb208810SXin Zeng mutex_init(&qat_vdev->state_mutex);
576bb208810SXin Zeng
577bb208810SXin Zeng parent = pci_physfn(qat_vdev->core_device.pdev);
578bb208810SXin Zeng vf_id = pci_iov_vf_id(qat_vdev->core_device.pdev);
579bb208810SXin Zeng if (vf_id < 0) {
580bb208810SXin Zeng ret = -ENODEV;
581bb208810SXin Zeng goto err_rel;
582bb208810SXin Zeng }
583bb208810SXin Zeng
584bb208810SXin Zeng mdev = qat_vfmig_create(parent, vf_id);
585bb208810SXin Zeng if (IS_ERR(mdev)) {
586bb208810SXin Zeng ret = PTR_ERR(mdev);
587bb208810SXin Zeng goto err_rel;
588bb208810SXin Zeng }
589bb208810SXin Zeng
590bb208810SXin Zeng ret = qat_vfmig_init(mdev);
591bb208810SXin Zeng if (ret)
592bb208810SXin Zeng goto err_destroy;
593bb208810SXin Zeng
594bb208810SXin Zeng qat_vdev->mdev = mdev;
595bb208810SXin Zeng
596bb208810SXin Zeng return 0;
597bb208810SXin Zeng
598bb208810SXin Zeng err_destroy:
599bb208810SXin Zeng qat_vfmig_destroy(mdev);
600bb208810SXin Zeng err_rel:
601bb208810SXin Zeng vfio_pci_core_release_dev(core_vdev);
602bb208810SXin Zeng return ret;
603bb208810SXin Zeng }
604bb208810SXin Zeng
605bb208810SXin Zeng static const struct vfio_device_ops qat_vf_pci_ops = {
606bb208810SXin Zeng .name = "qat-vf-vfio-pci",
607bb208810SXin Zeng .init = qat_vf_pci_init_dev,
608bb208810SXin Zeng .release = qat_vf_pci_release_dev,
609bb208810SXin Zeng .open_device = qat_vf_pci_open_device,
610bb208810SXin Zeng .close_device = qat_vf_pci_close_device,
611bb208810SXin Zeng .ioctl = vfio_pci_core_ioctl,
612bb208810SXin Zeng .read = vfio_pci_core_read,
613bb208810SXin Zeng .write = vfio_pci_core_write,
614bb208810SXin Zeng .mmap = vfio_pci_core_mmap,
615bb208810SXin Zeng .request = vfio_pci_core_request,
616bb208810SXin Zeng .match = vfio_pci_core_match,
617bb208810SXin Zeng .bind_iommufd = vfio_iommufd_physical_bind,
618bb208810SXin Zeng .unbind_iommufd = vfio_iommufd_physical_unbind,
619bb208810SXin Zeng .attach_ioas = vfio_iommufd_physical_attach_ioas,
620bb208810SXin Zeng .detach_ioas = vfio_iommufd_physical_detach_ioas,
621bb208810SXin Zeng };
622bb208810SXin Zeng
qat_vf_drvdata(struct pci_dev * pdev)623bb208810SXin Zeng static struct qat_vf_core_device *qat_vf_drvdata(struct pci_dev *pdev)
624bb208810SXin Zeng {
625bb208810SXin Zeng struct vfio_pci_core_device *core_device = pci_get_drvdata(pdev);
626bb208810SXin Zeng
627bb208810SXin Zeng return container_of(core_device, struct qat_vf_core_device, core_device);
628bb208810SXin Zeng }
629bb208810SXin Zeng
qat_vf_pci_aer_reset_done(struct pci_dev * pdev)630bb208810SXin Zeng static void qat_vf_pci_aer_reset_done(struct pci_dev *pdev)
631bb208810SXin Zeng {
632bb208810SXin Zeng struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev);
633bb208810SXin Zeng
634bb208810SXin Zeng if (!qat_vdev->mdev)
635bb208810SXin Zeng return;
636bb208810SXin Zeng
637bb208810SXin Zeng mutex_lock(&qat_vdev->state_mutex);
638bb208810SXin Zeng qat_vf_reset_done(qat_vdev);
639bb208810SXin Zeng mutex_unlock(&qat_vdev->state_mutex);
640bb208810SXin Zeng }
641bb208810SXin Zeng
642bb208810SXin Zeng static int
qat_vf_vfio_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)643bb208810SXin Zeng qat_vf_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
644bb208810SXin Zeng {
645bb208810SXin Zeng struct device *dev = &pdev->dev;
646bb208810SXin Zeng struct qat_vf_core_device *qat_vdev;
647bb208810SXin Zeng int ret;
648bb208810SXin Zeng
649bb208810SXin Zeng qat_vdev = vfio_alloc_device(qat_vf_core_device, core_device.vdev, dev, &qat_vf_pci_ops);
650bb208810SXin Zeng if (IS_ERR(qat_vdev))
651bb208810SXin Zeng return PTR_ERR(qat_vdev);
652bb208810SXin Zeng
653bb208810SXin Zeng pci_set_drvdata(pdev, &qat_vdev->core_device);
654bb208810SXin Zeng ret = vfio_pci_core_register_device(&qat_vdev->core_device);
655bb208810SXin Zeng if (ret)
656bb208810SXin Zeng goto out_put_device;
657bb208810SXin Zeng
658bb208810SXin Zeng return 0;
659bb208810SXin Zeng
660bb208810SXin Zeng out_put_device:
661bb208810SXin Zeng vfio_put_device(&qat_vdev->core_device.vdev);
662bb208810SXin Zeng return ret;
663bb208810SXin Zeng }
664bb208810SXin Zeng
qat_vf_vfio_pci_remove(struct pci_dev * pdev)665bb208810SXin Zeng static void qat_vf_vfio_pci_remove(struct pci_dev *pdev)
666bb208810SXin Zeng {
667bb208810SXin Zeng struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev);
668bb208810SXin Zeng
669bb208810SXin Zeng vfio_pci_core_unregister_device(&qat_vdev->core_device);
670bb208810SXin Zeng vfio_put_device(&qat_vdev->core_device.vdev);
671bb208810SXin Zeng }
672bb208810SXin Zeng
673bb208810SXin Zeng static const struct pci_device_id qat_vf_vfio_pci_table[] = {
674bb208810SXin Zeng /* Intel QAT GEN4 4xxx VF device */
675bb208810SXin Zeng { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4941) },
676bb208810SXin Zeng { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4943) },
677bb208810SXin Zeng { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4945) },
678bb208810SXin Zeng {}
679bb208810SXin Zeng };
680bb208810SXin Zeng MODULE_DEVICE_TABLE(pci, qat_vf_vfio_pci_table);
681bb208810SXin Zeng
682bb208810SXin Zeng static const struct pci_error_handlers qat_vf_err_handlers = {
683bb208810SXin Zeng .reset_done = qat_vf_pci_aer_reset_done,
684bb208810SXin Zeng .error_detected = vfio_pci_core_aer_err_detected,
685bb208810SXin Zeng };
686bb208810SXin Zeng
687bb208810SXin Zeng static struct pci_driver qat_vf_vfio_pci_driver = {
688bb208810SXin Zeng .name = "qat_vfio_pci",
689bb208810SXin Zeng .id_table = qat_vf_vfio_pci_table,
690bb208810SXin Zeng .probe = qat_vf_vfio_pci_probe,
691bb208810SXin Zeng .remove = qat_vf_vfio_pci_remove,
692bb208810SXin Zeng .err_handler = &qat_vf_err_handlers,
693bb208810SXin Zeng .driver_managed_dma = true,
694bb208810SXin Zeng };
695bb208810SXin Zeng module_pci_driver(qat_vf_vfio_pci_driver);
696bb208810SXin Zeng
697bb208810SXin Zeng MODULE_LICENSE("GPL");
698bb208810SXin Zeng MODULE_AUTHOR("Xin Zeng <xin.zeng@intel.com>");
699bb208810SXin Zeng MODULE_DESCRIPTION("QAT VFIO PCI - VFIO PCI driver with live migration support for Intel(R) QAT GEN4 device family");
700*cdd30ebbSPeter Zijlstra MODULE_IMPORT_NS("CRYPTO_QAT");
701