xref: /linux/drivers/vfio/pci/qat/main.c (revision da5b2ad1c2f18834cb1ce429e2e5a5cf5cbdf21b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2024 Intel Corporation */
3 
4 #include <linux/anon_inodes.h>
5 #include <linux/container_of.h>
6 #include <linux/device.h>
7 #include <linux/file.h>
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/mutex.h>
12 #include <linux/pci.h>
13 #include <linux/sizes.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 #include <linux/vfio_pci_core.h>
17 #include <linux/qat/qat_mig_dev.h>
18 
19 /*
20  * The migration data of each Intel QAT VF device is encapsulated into a
21  * 4096 bytes block. The data consists of two parts.
22  * The first is a pre-configured set of attributes of the VF being migrated,
23  * which are only set when it is created. This can be migrated during pre-copy
24  * stage and used for a device compatibility check.
25  * The second is the VF state. This includes the required MMIO regions and
26  * the shadow states maintained by the QAT PF driver. This part can only be
27  * saved when the VF is fully quiesced and be migrated during stop-copy stage.
28  * Both these 2 parts of data are saved in hierarchical structures including
29  * a preamble section and several raw state sections.
30  * When the pre-configured part of the migration data is fully retrieved from
31  * user space, the preamble section are used to validate the correctness of
32  * the data blocks and check the version compatibility. The raw state sections
33  * are then used to do a device compatibility check.
34  * When the device transits from RESUMING state, the VF states are extracted
35  * from the raw state sections of the VF state part of the migration data and
36  * then loaded into the device.
37  */
38 
39 struct qat_vf_migration_file {
40 	struct file *filp;
41 	/* protects migration region context */
42 	struct mutex lock;
43 	bool disabled;
44 	struct qat_vf_core_device *qat_vdev;
45 	ssize_t filled_size;
46 };
47 
48 struct qat_vf_core_device {
49 	struct vfio_pci_core_device core_device;
50 	struct qat_mig_dev *mdev;
51 	/* protects migration state */
52 	struct mutex state_mutex;
53 	enum vfio_device_mig_state mig_state;
54 	struct qat_vf_migration_file *resuming_migf;
55 	struct qat_vf_migration_file *saving_migf;
56 };
57 
58 static int qat_vf_pci_open_device(struct vfio_device *core_vdev)
59 {
60 	struct qat_vf_core_device *qat_vdev =
61 		container_of(core_vdev, struct qat_vf_core_device,
62 			     core_device.vdev);
63 	struct vfio_pci_core_device *vdev = &qat_vdev->core_device;
64 	int ret;
65 
66 	ret = vfio_pci_core_enable(vdev);
67 	if (ret)
68 		return ret;
69 
70 	ret = qat_vfmig_open(qat_vdev->mdev);
71 	if (ret) {
72 		vfio_pci_core_disable(vdev);
73 		return ret;
74 	}
75 	qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
76 
77 	vfio_pci_core_finish_enable(vdev);
78 
79 	return 0;
80 }
81 
82 static void qat_vf_disable_fd(struct qat_vf_migration_file *migf)
83 {
84 	mutex_lock(&migf->lock);
85 	migf->disabled = true;
86 	migf->filp->f_pos = 0;
87 	migf->filled_size = 0;
88 	mutex_unlock(&migf->lock);
89 }
90 
91 static void qat_vf_disable_fds(struct qat_vf_core_device *qat_vdev)
92 {
93 	if (qat_vdev->resuming_migf) {
94 		qat_vf_disable_fd(qat_vdev->resuming_migf);
95 		fput(qat_vdev->resuming_migf->filp);
96 		qat_vdev->resuming_migf = NULL;
97 	}
98 
99 	if (qat_vdev->saving_migf) {
100 		qat_vf_disable_fd(qat_vdev->saving_migf);
101 		fput(qat_vdev->saving_migf->filp);
102 		qat_vdev->saving_migf = NULL;
103 	}
104 }
105 
106 static void qat_vf_pci_close_device(struct vfio_device *core_vdev)
107 {
108 	struct qat_vf_core_device *qat_vdev = container_of(core_vdev,
109 			struct qat_vf_core_device, core_device.vdev);
110 
111 	qat_vfmig_close(qat_vdev->mdev);
112 	qat_vf_disable_fds(qat_vdev);
113 	vfio_pci_core_close_device(core_vdev);
114 }
115 
116 static long qat_vf_precopy_ioctl(struct file *filp, unsigned int cmd,
117 				 unsigned long arg)
118 {
119 	struct qat_vf_migration_file *migf = filp->private_data;
120 	struct qat_vf_core_device *qat_vdev = migf->qat_vdev;
121 	struct qat_mig_dev *mig_dev = qat_vdev->mdev;
122 	struct vfio_precopy_info info;
123 	loff_t *pos = &filp->f_pos;
124 	unsigned long minsz;
125 	int ret = 0;
126 
127 	if (cmd != VFIO_MIG_GET_PRECOPY_INFO)
128 		return -ENOTTY;
129 
130 	minsz = offsetofend(struct vfio_precopy_info, dirty_bytes);
131 
132 	if (copy_from_user(&info, (void __user *)arg, minsz))
133 		return -EFAULT;
134 	if (info.argsz < minsz)
135 		return -EINVAL;
136 
137 	mutex_lock(&qat_vdev->state_mutex);
138 	if (qat_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY &&
139 	    qat_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY_P2P) {
140 		mutex_unlock(&qat_vdev->state_mutex);
141 		return -EINVAL;
142 	}
143 
144 	mutex_lock(&migf->lock);
145 	if (migf->disabled) {
146 		ret = -ENODEV;
147 		goto out;
148 	}
149 
150 	if (*pos > mig_dev->setup_size) {
151 		ret = -EINVAL;
152 		goto out;
153 	}
154 
155 	info.dirty_bytes = 0;
156 	info.initial_bytes = mig_dev->setup_size - *pos;
157 
158 out:
159 	mutex_unlock(&migf->lock);
160 	mutex_unlock(&qat_vdev->state_mutex);
161 	if (ret)
162 		return ret;
163 	return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
164 }
165 
166 static ssize_t qat_vf_save_read(struct file *filp, char __user *buf,
167 				size_t len, loff_t *pos)
168 {
169 	struct qat_vf_migration_file *migf = filp->private_data;
170 	struct qat_mig_dev *mig_dev = migf->qat_vdev->mdev;
171 	ssize_t done = 0;
172 	loff_t *offs;
173 	int ret;
174 
175 	if (pos)
176 		return -ESPIPE;
177 	offs = &filp->f_pos;
178 
179 	mutex_lock(&migf->lock);
180 	if (*offs > migf->filled_size || *offs < 0) {
181 		done = -EINVAL;
182 		goto out_unlock;
183 	}
184 
185 	if (migf->disabled) {
186 		done = -ENODEV;
187 		goto out_unlock;
188 	}
189 
190 	len = min_t(size_t, migf->filled_size - *offs, len);
191 	if (len) {
192 		ret = copy_to_user(buf, mig_dev->state + *offs, len);
193 		if (ret) {
194 			done = -EFAULT;
195 			goto out_unlock;
196 		}
197 		*offs += len;
198 		done = len;
199 	}
200 
201 out_unlock:
202 	mutex_unlock(&migf->lock);
203 	return done;
204 }
205 
206 static int qat_vf_release_file(struct inode *inode, struct file *filp)
207 {
208 	struct qat_vf_migration_file *migf = filp->private_data;
209 
210 	qat_vf_disable_fd(migf);
211 	mutex_destroy(&migf->lock);
212 	kfree(migf);
213 
214 	return 0;
215 }
216 
217 static const struct file_operations qat_vf_save_fops = {
218 	.owner = THIS_MODULE,
219 	.read = qat_vf_save_read,
220 	.unlocked_ioctl = qat_vf_precopy_ioctl,
221 	.compat_ioctl = compat_ptr_ioctl,
222 	.release = qat_vf_release_file,
223 	.llseek = no_llseek,
224 };
225 
226 static int qat_vf_save_state(struct qat_vf_core_device *qat_vdev,
227 			     struct qat_vf_migration_file *migf)
228 {
229 	int ret;
230 
231 	ret = qat_vfmig_save_state(qat_vdev->mdev);
232 	if (ret)
233 		return ret;
234 	migf->filled_size = qat_vdev->mdev->state_size;
235 
236 	return 0;
237 }
238 
239 static int qat_vf_save_setup(struct qat_vf_core_device *qat_vdev,
240 			     struct qat_vf_migration_file *migf)
241 {
242 	int ret;
243 
244 	ret = qat_vfmig_save_setup(qat_vdev->mdev);
245 	if (ret)
246 		return ret;
247 	migf->filled_size = qat_vdev->mdev->setup_size;
248 
249 	return 0;
250 }
251 
252 /*
253  * Allocate a file handler for user space and then save the migration data for
254  * the device being migrated. If this is called in the pre-copy stage, save the
255  * pre-configured device data. Otherwise, if this is called in the stop-copy
256  * stage, save the device state. In both cases, update the data size which can
257  * then be read from user space.
258  */
259 static struct qat_vf_migration_file *
260 qat_vf_save_device_data(struct qat_vf_core_device *qat_vdev, bool pre_copy)
261 {
262 	struct qat_vf_migration_file *migf;
263 	int ret;
264 
265 	migf = kzalloc(sizeof(*migf), GFP_KERNEL);
266 	if (!migf)
267 		return ERR_PTR(-ENOMEM);
268 
269 	migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_save_fops,
270 					migf, O_RDONLY);
271 	ret = PTR_ERR_OR_ZERO(migf->filp);
272 	if (ret) {
273 		kfree(migf);
274 		return ERR_PTR(ret);
275 	}
276 
277 	stream_open(migf->filp->f_inode, migf->filp);
278 	mutex_init(&migf->lock);
279 
280 	if (pre_copy)
281 		ret = qat_vf_save_setup(qat_vdev, migf);
282 	else
283 		ret = qat_vf_save_state(qat_vdev, migf);
284 	if (ret) {
285 		fput(migf->filp);
286 		return ERR_PTR(ret);
287 	}
288 
289 	migf->qat_vdev = qat_vdev;
290 
291 	return migf;
292 }
293 
294 static ssize_t qat_vf_resume_write(struct file *filp, const char __user *buf,
295 				   size_t len, loff_t *pos)
296 {
297 	struct qat_vf_migration_file *migf = filp->private_data;
298 	struct qat_mig_dev *mig_dev = migf->qat_vdev->mdev;
299 	loff_t end, *offs;
300 	ssize_t done = 0;
301 	int ret;
302 
303 	if (pos)
304 		return -ESPIPE;
305 	offs = &filp->f_pos;
306 
307 	if (*offs < 0 ||
308 	    check_add_overflow((loff_t)len, *offs, &end))
309 		return -EOVERFLOW;
310 
311 	if (end > mig_dev->state_size)
312 		return -ENOMEM;
313 
314 	mutex_lock(&migf->lock);
315 	if (migf->disabled) {
316 		done = -ENODEV;
317 		goto out_unlock;
318 	}
319 
320 	ret = copy_from_user(mig_dev->state + *offs, buf, len);
321 	if (ret) {
322 		done = -EFAULT;
323 		goto out_unlock;
324 	}
325 	*offs += len;
326 	migf->filled_size += len;
327 
328 	/*
329 	 * Load the pre-configured device data first to check if the target
330 	 * device is compatible with the source device.
331 	 */
332 	ret = qat_vfmig_load_setup(mig_dev, migf->filled_size);
333 	if (ret && ret != -EAGAIN) {
334 		done = ret;
335 		goto out_unlock;
336 	}
337 	done = len;
338 
339 out_unlock:
340 	mutex_unlock(&migf->lock);
341 	return done;
342 }
343 
344 static const struct file_operations qat_vf_resume_fops = {
345 	.owner = THIS_MODULE,
346 	.write = qat_vf_resume_write,
347 	.release = qat_vf_release_file,
348 	.llseek = no_llseek,
349 };
350 
351 static struct qat_vf_migration_file *
352 qat_vf_resume_device_data(struct qat_vf_core_device *qat_vdev)
353 {
354 	struct qat_vf_migration_file *migf;
355 	int ret;
356 
357 	migf = kzalloc(sizeof(*migf), GFP_KERNEL);
358 	if (!migf)
359 		return ERR_PTR(-ENOMEM);
360 
361 	migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_resume_fops, migf, O_WRONLY);
362 	ret = PTR_ERR_OR_ZERO(migf->filp);
363 	if (ret) {
364 		kfree(migf);
365 		return ERR_PTR(ret);
366 	}
367 
368 	migf->qat_vdev = qat_vdev;
369 	migf->filled_size = 0;
370 	stream_open(migf->filp->f_inode, migf->filp);
371 	mutex_init(&migf->lock);
372 
373 	return migf;
374 }
375 
376 static int qat_vf_load_device_data(struct qat_vf_core_device *qat_vdev)
377 {
378 	return qat_vfmig_load_state(qat_vdev->mdev);
379 }
380 
381 static struct file *qat_vf_pci_step_device_state(struct qat_vf_core_device *qat_vdev, u32 new)
382 {
383 	u32 cur = qat_vdev->mig_state;
384 	int ret;
385 
386 	/*
387 	 * As the device is not capable of just stopping P2P DMAs, suspend the
388 	 * device completely once any of the P2P states are reached.
389 	 * When it is suspended, all its MMIO registers can still be operated
390 	 * correctly, jobs submitted through ring are queued while no jobs are
391 	 * processed by the device. The MMIO states can be safely migrated to
392 	 * the target VF during stop-copy stage and restored correctly in the
393 	 * target VF. All queued jobs can be resumed then.
394 	 */
395 	if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) ||
396 	    (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) {
397 		ret = qat_vfmig_suspend(qat_vdev->mdev);
398 		if (ret)
399 			return ERR_PTR(ret);
400 		return NULL;
401 	}
402 
403 	if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) ||
404 	    (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_PRE_COPY)) {
405 		qat_vfmig_resume(qat_vdev->mdev);
406 		return NULL;
407 	}
408 
409 	if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) ||
410 	    (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P))
411 		return NULL;
412 
413 	if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
414 		struct qat_vf_migration_file *migf;
415 
416 		migf = qat_vf_save_device_data(qat_vdev, false);
417 		if (IS_ERR(migf))
418 			return ERR_CAST(migf);
419 		get_file(migf->filp);
420 		qat_vdev->saving_migf = migf;
421 		return migf->filp;
422 	}
423 
424 	if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
425 		struct qat_vf_migration_file *migf;
426 
427 		migf = qat_vf_resume_device_data(qat_vdev);
428 		if (IS_ERR(migf))
429 			return ERR_CAST(migf);
430 		get_file(migf->filp);
431 		qat_vdev->resuming_migf = migf;
432 		return migf->filp;
433 	}
434 
435 	if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) ||
436 	    (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) ||
437 	    (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_RUNNING_P2P)) {
438 		qat_vf_disable_fds(qat_vdev);
439 		return NULL;
440 	}
441 
442 	if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) ||
443 	    (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) {
444 		struct qat_vf_migration_file *migf;
445 
446 		migf = qat_vf_save_device_data(qat_vdev, true);
447 		if (IS_ERR(migf))
448 			return ERR_CAST(migf);
449 		get_file(migf->filp);
450 		qat_vdev->saving_migf = migf;
451 		return migf->filp;
452 	}
453 
454 	if (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_STOP_COPY) {
455 		struct qat_vf_migration_file *migf = qat_vdev->saving_migf;
456 
457 		if (!migf)
458 			return ERR_PTR(-EINVAL);
459 		ret = qat_vf_save_state(qat_vdev, migf);
460 		if (ret)
461 			return ERR_PTR(ret);
462 		return NULL;
463 	}
464 
465 	if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
466 		ret = qat_vf_load_device_data(qat_vdev);
467 		if (ret)
468 			return ERR_PTR(ret);
469 
470 		qat_vf_disable_fds(qat_vdev);
471 		return NULL;
472 	}
473 
474 	/* vfio_mig_get_next_state() does not use arcs other than the above */
475 	WARN_ON(true);
476 	return ERR_PTR(-EINVAL);
477 }
478 
479 static void qat_vf_reset_done(struct qat_vf_core_device *qat_vdev)
480 {
481 	qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
482 	qat_vfmig_reset(qat_vdev->mdev);
483 	qat_vf_disable_fds(qat_vdev);
484 }
485 
486 static struct file *qat_vf_pci_set_device_state(struct vfio_device *vdev,
487 						enum vfio_device_mig_state new_state)
488 {
489 	struct qat_vf_core_device *qat_vdev = container_of(vdev,
490 			struct qat_vf_core_device, core_device.vdev);
491 	enum vfio_device_mig_state next_state;
492 	struct file *res = NULL;
493 	int ret;
494 
495 	mutex_lock(&qat_vdev->state_mutex);
496 	while (new_state != qat_vdev->mig_state) {
497 		ret = vfio_mig_get_next_state(vdev, qat_vdev->mig_state,
498 					      new_state, &next_state);
499 		if (ret) {
500 			res = ERR_PTR(ret);
501 			break;
502 		}
503 		res = qat_vf_pci_step_device_state(qat_vdev, next_state);
504 		if (IS_ERR(res))
505 			break;
506 		qat_vdev->mig_state = next_state;
507 		if (WARN_ON(res && new_state != qat_vdev->mig_state)) {
508 			fput(res);
509 			res = ERR_PTR(-EINVAL);
510 			break;
511 		}
512 	}
513 	mutex_unlock(&qat_vdev->state_mutex);
514 
515 	return res;
516 }
517 
518 static int qat_vf_pci_get_device_state(struct vfio_device *vdev,
519 				       enum vfio_device_mig_state *curr_state)
520 {
521 	struct qat_vf_core_device *qat_vdev = container_of(vdev,
522 			struct qat_vf_core_device, core_device.vdev);
523 
524 	mutex_lock(&qat_vdev->state_mutex);
525 	*curr_state = qat_vdev->mig_state;
526 	mutex_unlock(&qat_vdev->state_mutex);
527 
528 	return 0;
529 }
530 
531 static int qat_vf_pci_get_data_size(struct vfio_device *vdev,
532 				    unsigned long *stop_copy_length)
533 {
534 	struct qat_vf_core_device *qat_vdev = container_of(vdev,
535 			struct qat_vf_core_device, core_device.vdev);
536 
537 	mutex_lock(&qat_vdev->state_mutex);
538 	*stop_copy_length = qat_vdev->mdev->state_size;
539 	mutex_unlock(&qat_vdev->state_mutex);
540 
541 	return 0;
542 }
543 
544 static const struct vfio_migration_ops qat_vf_pci_mig_ops = {
545 	.migration_set_state = qat_vf_pci_set_device_state,
546 	.migration_get_state = qat_vf_pci_get_device_state,
547 	.migration_get_data_size = qat_vf_pci_get_data_size,
548 };
549 
550 static void qat_vf_pci_release_dev(struct vfio_device *core_vdev)
551 {
552 	struct qat_vf_core_device *qat_vdev = container_of(core_vdev,
553 			struct qat_vf_core_device, core_device.vdev);
554 
555 	qat_vfmig_cleanup(qat_vdev->mdev);
556 	qat_vfmig_destroy(qat_vdev->mdev);
557 	mutex_destroy(&qat_vdev->state_mutex);
558 	vfio_pci_core_release_dev(core_vdev);
559 }
560 
561 static int qat_vf_pci_init_dev(struct vfio_device *core_vdev)
562 {
563 	struct qat_vf_core_device *qat_vdev = container_of(core_vdev,
564 			struct qat_vf_core_device, core_device.vdev);
565 	struct qat_mig_dev *mdev;
566 	struct pci_dev *parent;
567 	int ret, vf_id;
568 
569 	core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P |
570 				     VFIO_MIGRATION_PRE_COPY;
571 	core_vdev->mig_ops = &qat_vf_pci_mig_ops;
572 
573 	ret = vfio_pci_core_init_dev(core_vdev);
574 	if (ret)
575 		return ret;
576 
577 	mutex_init(&qat_vdev->state_mutex);
578 
579 	parent = pci_physfn(qat_vdev->core_device.pdev);
580 	vf_id = pci_iov_vf_id(qat_vdev->core_device.pdev);
581 	if (vf_id < 0) {
582 		ret = -ENODEV;
583 		goto err_rel;
584 	}
585 
586 	mdev = qat_vfmig_create(parent, vf_id);
587 	if (IS_ERR(mdev)) {
588 		ret = PTR_ERR(mdev);
589 		goto err_rel;
590 	}
591 
592 	ret = qat_vfmig_init(mdev);
593 	if (ret)
594 		goto err_destroy;
595 
596 	qat_vdev->mdev = mdev;
597 
598 	return 0;
599 
600 err_destroy:
601 	qat_vfmig_destroy(mdev);
602 err_rel:
603 	vfio_pci_core_release_dev(core_vdev);
604 	return ret;
605 }
606 
607 static const struct vfio_device_ops qat_vf_pci_ops = {
608 	.name = "qat-vf-vfio-pci",
609 	.init = qat_vf_pci_init_dev,
610 	.release = qat_vf_pci_release_dev,
611 	.open_device = qat_vf_pci_open_device,
612 	.close_device = qat_vf_pci_close_device,
613 	.ioctl = vfio_pci_core_ioctl,
614 	.read = vfio_pci_core_read,
615 	.write = vfio_pci_core_write,
616 	.mmap = vfio_pci_core_mmap,
617 	.request = vfio_pci_core_request,
618 	.match = vfio_pci_core_match,
619 	.bind_iommufd = vfio_iommufd_physical_bind,
620 	.unbind_iommufd = vfio_iommufd_physical_unbind,
621 	.attach_ioas = vfio_iommufd_physical_attach_ioas,
622 	.detach_ioas = vfio_iommufd_physical_detach_ioas,
623 };
624 
625 static struct qat_vf_core_device *qat_vf_drvdata(struct pci_dev *pdev)
626 {
627 	struct vfio_pci_core_device *core_device = pci_get_drvdata(pdev);
628 
629 	return container_of(core_device, struct qat_vf_core_device, core_device);
630 }
631 
632 static void qat_vf_pci_aer_reset_done(struct pci_dev *pdev)
633 {
634 	struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev);
635 
636 	if (!qat_vdev->mdev)
637 		return;
638 
639 	mutex_lock(&qat_vdev->state_mutex);
640 	qat_vf_reset_done(qat_vdev);
641 	mutex_unlock(&qat_vdev->state_mutex);
642 }
643 
644 static int
645 qat_vf_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
646 {
647 	struct device *dev = &pdev->dev;
648 	struct qat_vf_core_device *qat_vdev;
649 	int ret;
650 
651 	qat_vdev = vfio_alloc_device(qat_vf_core_device, core_device.vdev, dev, &qat_vf_pci_ops);
652 	if (IS_ERR(qat_vdev))
653 		return PTR_ERR(qat_vdev);
654 
655 	pci_set_drvdata(pdev, &qat_vdev->core_device);
656 	ret = vfio_pci_core_register_device(&qat_vdev->core_device);
657 	if (ret)
658 		goto out_put_device;
659 
660 	return 0;
661 
662 out_put_device:
663 	vfio_put_device(&qat_vdev->core_device.vdev);
664 	return ret;
665 }
666 
667 static void qat_vf_vfio_pci_remove(struct pci_dev *pdev)
668 {
669 	struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev);
670 
671 	vfio_pci_core_unregister_device(&qat_vdev->core_device);
672 	vfio_put_device(&qat_vdev->core_device.vdev);
673 }
674 
675 static const struct pci_device_id qat_vf_vfio_pci_table[] = {
676 	/* Intel QAT GEN4 4xxx VF device */
677 	{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4941) },
678 	{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4943) },
679 	{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4945) },
680 	{}
681 };
682 MODULE_DEVICE_TABLE(pci, qat_vf_vfio_pci_table);
683 
684 static const struct pci_error_handlers qat_vf_err_handlers = {
685 	.reset_done = qat_vf_pci_aer_reset_done,
686 	.error_detected = vfio_pci_core_aer_err_detected,
687 };
688 
689 static struct pci_driver qat_vf_vfio_pci_driver = {
690 	.name = "qat_vfio_pci",
691 	.id_table = qat_vf_vfio_pci_table,
692 	.probe = qat_vf_vfio_pci_probe,
693 	.remove = qat_vf_vfio_pci_remove,
694 	.err_handler = &qat_vf_err_handlers,
695 	.driver_managed_dma = true,
696 };
697 module_pci_driver(qat_vf_vfio_pci_driver);
698 
699 MODULE_LICENSE("GPL");
700 MODULE_AUTHOR("Xin Zeng <xin.zeng@intel.com>");
701 MODULE_DESCRIPTION("QAT VFIO PCI - VFIO PCI driver with live migration support for Intel(R) QAT GEN4 device family");
702 MODULE_IMPORT_NS(CRYPTO_QAT);
703