1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2024 Intel Corporation */ 3 4 #include <linux/anon_inodes.h> 5 #include <linux/container_of.h> 6 #include <linux/device.h> 7 #include <linux/file.h> 8 #include <linux/init.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/mutex.h> 12 #include <linux/pci.h> 13 #include <linux/sizes.h> 14 #include <linux/types.h> 15 #include <linux/uaccess.h> 16 #include <linux/vfio_pci_core.h> 17 #include <linux/qat/qat_mig_dev.h> 18 19 /* 20 * The migration data of each Intel QAT VF device is encapsulated into a 21 * 4096 bytes block. The data consists of two parts. 22 * The first is a pre-configured set of attributes of the VF being migrated, 23 * which are only set when it is created. This can be migrated during pre-copy 24 * stage and used for a device compatibility check. 25 * The second is the VF state. This includes the required MMIO regions and 26 * the shadow states maintained by the QAT PF driver. This part can only be 27 * saved when the VF is fully quiesced and be migrated during stop-copy stage. 28 * Both these 2 parts of data are saved in hierarchical structures including 29 * a preamble section and several raw state sections. 30 * When the pre-configured part of the migration data is fully retrieved from 31 * user space, the preamble section are used to validate the correctness of 32 * the data blocks and check the version compatibility. The raw state sections 33 * are then used to do a device compatibility check. 34 * When the device transits from RESUMING state, the VF states are extracted 35 * from the raw state sections of the VF state part of the migration data and 36 * then loaded into the device. 37 */ 38 39 struct qat_vf_migration_file { 40 struct file *filp; 41 /* protects migration region context */ 42 struct mutex lock; 43 bool disabled; 44 struct qat_vf_core_device *qat_vdev; 45 ssize_t filled_size; 46 }; 47 48 struct qat_vf_core_device { 49 struct vfio_pci_core_device core_device; 50 struct qat_mig_dev *mdev; 51 /* protects migration state */ 52 struct mutex state_mutex; 53 enum vfio_device_mig_state mig_state; 54 struct qat_vf_migration_file *resuming_migf; 55 struct qat_vf_migration_file *saving_migf; 56 }; 57 58 static int qat_vf_pci_open_device(struct vfio_device *core_vdev) 59 { 60 struct qat_vf_core_device *qat_vdev = 61 container_of(core_vdev, struct qat_vf_core_device, 62 core_device.vdev); 63 struct vfio_pci_core_device *vdev = &qat_vdev->core_device; 64 int ret; 65 66 ret = vfio_pci_core_enable(vdev); 67 if (ret) 68 return ret; 69 70 ret = qat_vfmig_open(qat_vdev->mdev); 71 if (ret) { 72 vfio_pci_core_disable(vdev); 73 return ret; 74 } 75 qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING; 76 77 vfio_pci_core_finish_enable(vdev); 78 79 return 0; 80 } 81 82 static void qat_vf_disable_fd(struct qat_vf_migration_file *migf) 83 { 84 mutex_lock(&migf->lock); 85 migf->disabled = true; 86 migf->filp->f_pos = 0; 87 migf->filled_size = 0; 88 mutex_unlock(&migf->lock); 89 } 90 91 static void qat_vf_disable_fds(struct qat_vf_core_device *qat_vdev) 92 { 93 if (qat_vdev->resuming_migf) { 94 qat_vf_disable_fd(qat_vdev->resuming_migf); 95 fput(qat_vdev->resuming_migf->filp); 96 qat_vdev->resuming_migf = NULL; 97 } 98 99 if (qat_vdev->saving_migf) { 100 qat_vf_disable_fd(qat_vdev->saving_migf); 101 fput(qat_vdev->saving_migf->filp); 102 qat_vdev->saving_migf = NULL; 103 } 104 } 105 106 static void qat_vf_pci_close_device(struct vfio_device *core_vdev) 107 { 108 struct qat_vf_core_device *qat_vdev = container_of(core_vdev, 109 struct qat_vf_core_device, core_device.vdev); 110 111 qat_vfmig_close(qat_vdev->mdev); 112 qat_vf_disable_fds(qat_vdev); 113 vfio_pci_core_close_device(core_vdev); 114 } 115 116 static long qat_vf_precopy_ioctl(struct file *filp, unsigned int cmd, 117 unsigned long arg) 118 { 119 struct qat_vf_migration_file *migf = filp->private_data; 120 struct qat_vf_core_device *qat_vdev = migf->qat_vdev; 121 struct qat_mig_dev *mig_dev = qat_vdev->mdev; 122 struct vfio_precopy_info info; 123 loff_t *pos = &filp->f_pos; 124 unsigned long minsz; 125 int ret = 0; 126 127 if (cmd != VFIO_MIG_GET_PRECOPY_INFO) 128 return -ENOTTY; 129 130 minsz = offsetofend(struct vfio_precopy_info, dirty_bytes); 131 132 if (copy_from_user(&info, (void __user *)arg, minsz)) 133 return -EFAULT; 134 if (info.argsz < minsz) 135 return -EINVAL; 136 137 mutex_lock(&qat_vdev->state_mutex); 138 if (qat_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY && 139 qat_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY_P2P) { 140 mutex_unlock(&qat_vdev->state_mutex); 141 return -EINVAL; 142 } 143 144 mutex_lock(&migf->lock); 145 if (migf->disabled) { 146 ret = -ENODEV; 147 goto out; 148 } 149 150 if (*pos > mig_dev->setup_size) { 151 ret = -EINVAL; 152 goto out; 153 } 154 155 info.dirty_bytes = 0; 156 info.initial_bytes = mig_dev->setup_size - *pos; 157 158 out: 159 mutex_unlock(&migf->lock); 160 mutex_unlock(&qat_vdev->state_mutex); 161 if (ret) 162 return ret; 163 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; 164 } 165 166 static ssize_t qat_vf_save_read(struct file *filp, char __user *buf, 167 size_t len, loff_t *pos) 168 { 169 struct qat_vf_migration_file *migf = filp->private_data; 170 struct qat_mig_dev *mig_dev = migf->qat_vdev->mdev; 171 ssize_t done = 0; 172 loff_t *offs; 173 int ret; 174 175 if (pos) 176 return -ESPIPE; 177 offs = &filp->f_pos; 178 179 mutex_lock(&migf->lock); 180 if (*offs > migf->filled_size || *offs < 0) { 181 done = -EINVAL; 182 goto out_unlock; 183 } 184 185 if (migf->disabled) { 186 done = -ENODEV; 187 goto out_unlock; 188 } 189 190 len = min_t(size_t, migf->filled_size - *offs, len); 191 if (len) { 192 ret = copy_to_user(buf, mig_dev->state + *offs, len); 193 if (ret) { 194 done = -EFAULT; 195 goto out_unlock; 196 } 197 *offs += len; 198 done = len; 199 } 200 201 out_unlock: 202 mutex_unlock(&migf->lock); 203 return done; 204 } 205 206 static int qat_vf_release_file(struct inode *inode, struct file *filp) 207 { 208 struct qat_vf_migration_file *migf = filp->private_data; 209 210 qat_vf_disable_fd(migf); 211 mutex_destroy(&migf->lock); 212 kfree(migf); 213 214 return 0; 215 } 216 217 static const struct file_operations qat_vf_save_fops = { 218 .owner = THIS_MODULE, 219 .read = qat_vf_save_read, 220 .unlocked_ioctl = qat_vf_precopy_ioctl, 221 .compat_ioctl = compat_ptr_ioctl, 222 .release = qat_vf_release_file, 223 }; 224 225 static int qat_vf_save_state(struct qat_vf_core_device *qat_vdev, 226 struct qat_vf_migration_file *migf) 227 { 228 int ret; 229 230 ret = qat_vfmig_save_state(qat_vdev->mdev); 231 if (ret) 232 return ret; 233 migf->filled_size = qat_vdev->mdev->state_size; 234 235 return 0; 236 } 237 238 static int qat_vf_save_setup(struct qat_vf_core_device *qat_vdev, 239 struct qat_vf_migration_file *migf) 240 { 241 int ret; 242 243 ret = qat_vfmig_save_setup(qat_vdev->mdev); 244 if (ret) 245 return ret; 246 migf->filled_size = qat_vdev->mdev->setup_size; 247 248 return 0; 249 } 250 251 /* 252 * Allocate a file handler for user space and then save the migration data for 253 * the device being migrated. If this is called in the pre-copy stage, save the 254 * pre-configured device data. Otherwise, if this is called in the stop-copy 255 * stage, save the device state. In both cases, update the data size which can 256 * then be read from user space. 257 */ 258 static struct qat_vf_migration_file * 259 qat_vf_save_device_data(struct qat_vf_core_device *qat_vdev, bool pre_copy) 260 { 261 struct qat_vf_migration_file *migf; 262 int ret; 263 264 migf = kzalloc(sizeof(*migf), GFP_KERNEL); 265 if (!migf) 266 return ERR_PTR(-ENOMEM); 267 268 migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_save_fops, 269 migf, O_RDONLY); 270 ret = PTR_ERR_OR_ZERO(migf->filp); 271 if (ret) { 272 kfree(migf); 273 return ERR_PTR(ret); 274 } 275 276 stream_open(migf->filp->f_inode, migf->filp); 277 mutex_init(&migf->lock); 278 279 if (pre_copy) 280 ret = qat_vf_save_setup(qat_vdev, migf); 281 else 282 ret = qat_vf_save_state(qat_vdev, migf); 283 if (ret) { 284 fput(migf->filp); 285 return ERR_PTR(ret); 286 } 287 288 migf->qat_vdev = qat_vdev; 289 290 return migf; 291 } 292 293 static ssize_t qat_vf_resume_write(struct file *filp, const char __user *buf, 294 size_t len, loff_t *pos) 295 { 296 struct qat_vf_migration_file *migf = filp->private_data; 297 struct qat_mig_dev *mig_dev = migf->qat_vdev->mdev; 298 loff_t end, *offs; 299 ssize_t done = 0; 300 int ret; 301 302 if (pos) 303 return -ESPIPE; 304 offs = &filp->f_pos; 305 306 if (*offs < 0 || 307 check_add_overflow((loff_t)len, *offs, &end)) 308 return -EOVERFLOW; 309 310 if (end > mig_dev->state_size) 311 return -ENOMEM; 312 313 mutex_lock(&migf->lock); 314 if (migf->disabled) { 315 done = -ENODEV; 316 goto out_unlock; 317 } 318 319 ret = copy_from_user(mig_dev->state + *offs, buf, len); 320 if (ret) { 321 done = -EFAULT; 322 goto out_unlock; 323 } 324 *offs += len; 325 migf->filled_size += len; 326 327 /* 328 * Load the pre-configured device data first to check if the target 329 * device is compatible with the source device. 330 */ 331 ret = qat_vfmig_load_setup(mig_dev, migf->filled_size); 332 if (ret && ret != -EAGAIN) { 333 done = ret; 334 goto out_unlock; 335 } 336 done = len; 337 338 out_unlock: 339 mutex_unlock(&migf->lock); 340 return done; 341 } 342 343 static const struct file_operations qat_vf_resume_fops = { 344 .owner = THIS_MODULE, 345 .write = qat_vf_resume_write, 346 .release = qat_vf_release_file, 347 }; 348 349 static struct qat_vf_migration_file * 350 qat_vf_resume_device_data(struct qat_vf_core_device *qat_vdev) 351 { 352 struct qat_vf_migration_file *migf; 353 int ret; 354 355 migf = kzalloc(sizeof(*migf), GFP_KERNEL); 356 if (!migf) 357 return ERR_PTR(-ENOMEM); 358 359 migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_resume_fops, migf, O_WRONLY); 360 ret = PTR_ERR_OR_ZERO(migf->filp); 361 if (ret) { 362 kfree(migf); 363 return ERR_PTR(ret); 364 } 365 366 migf->qat_vdev = qat_vdev; 367 migf->filled_size = 0; 368 stream_open(migf->filp->f_inode, migf->filp); 369 mutex_init(&migf->lock); 370 371 return migf; 372 } 373 374 static int qat_vf_load_device_data(struct qat_vf_core_device *qat_vdev) 375 { 376 return qat_vfmig_load_state(qat_vdev->mdev); 377 } 378 379 static struct file *qat_vf_pci_step_device_state(struct qat_vf_core_device *qat_vdev, u32 new) 380 { 381 u32 cur = qat_vdev->mig_state; 382 int ret; 383 384 /* 385 * As the device is not capable of just stopping P2P DMAs, suspend the 386 * device completely once any of the P2P states are reached. 387 * When it is suspended, all its MMIO registers can still be operated 388 * correctly, jobs submitted through ring are queued while no jobs are 389 * processed by the device. The MMIO states can be safely migrated to 390 * the target VF during stop-copy stage and restored correctly in the 391 * target VF. All queued jobs can be resumed then. 392 */ 393 if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) || 394 (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) { 395 ret = qat_vfmig_suspend(qat_vdev->mdev); 396 if (ret) 397 return ERR_PTR(ret); 398 return NULL; 399 } 400 401 if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) || 402 (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_PRE_COPY)) { 403 qat_vfmig_resume(qat_vdev->mdev); 404 return NULL; 405 } 406 407 if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) || 408 (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P)) 409 return NULL; 410 411 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) { 412 struct qat_vf_migration_file *migf; 413 414 migf = qat_vf_save_device_data(qat_vdev, false); 415 if (IS_ERR(migf)) 416 return ERR_CAST(migf); 417 get_file(migf->filp); 418 qat_vdev->saving_migf = migf; 419 return migf->filp; 420 } 421 422 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) { 423 struct qat_vf_migration_file *migf; 424 425 migf = qat_vf_resume_device_data(qat_vdev); 426 if (IS_ERR(migf)) 427 return ERR_CAST(migf); 428 get_file(migf->filp); 429 qat_vdev->resuming_migf = migf; 430 return migf->filp; 431 } 432 433 if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) || 434 (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) || 435 (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_RUNNING_P2P)) { 436 qat_vf_disable_fds(qat_vdev); 437 return NULL; 438 } 439 440 if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) || 441 (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) { 442 struct qat_vf_migration_file *migf; 443 444 migf = qat_vf_save_device_data(qat_vdev, true); 445 if (IS_ERR(migf)) 446 return ERR_CAST(migf); 447 get_file(migf->filp); 448 qat_vdev->saving_migf = migf; 449 return migf->filp; 450 } 451 452 if (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_STOP_COPY) { 453 struct qat_vf_migration_file *migf = qat_vdev->saving_migf; 454 455 if (!migf) 456 return ERR_PTR(-EINVAL); 457 ret = qat_vf_save_state(qat_vdev, migf); 458 if (ret) 459 return ERR_PTR(ret); 460 return NULL; 461 } 462 463 if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) { 464 ret = qat_vf_load_device_data(qat_vdev); 465 if (ret) 466 return ERR_PTR(ret); 467 468 qat_vf_disable_fds(qat_vdev); 469 return NULL; 470 } 471 472 /* vfio_mig_get_next_state() does not use arcs other than the above */ 473 WARN_ON(true); 474 return ERR_PTR(-EINVAL); 475 } 476 477 static void qat_vf_reset_done(struct qat_vf_core_device *qat_vdev) 478 { 479 qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING; 480 qat_vfmig_reset(qat_vdev->mdev); 481 qat_vf_disable_fds(qat_vdev); 482 } 483 484 static struct file *qat_vf_pci_set_device_state(struct vfio_device *vdev, 485 enum vfio_device_mig_state new_state) 486 { 487 struct qat_vf_core_device *qat_vdev = container_of(vdev, 488 struct qat_vf_core_device, core_device.vdev); 489 enum vfio_device_mig_state next_state; 490 struct file *res = NULL; 491 int ret; 492 493 mutex_lock(&qat_vdev->state_mutex); 494 while (new_state != qat_vdev->mig_state) { 495 ret = vfio_mig_get_next_state(vdev, qat_vdev->mig_state, 496 new_state, &next_state); 497 if (ret) { 498 res = ERR_PTR(ret); 499 break; 500 } 501 res = qat_vf_pci_step_device_state(qat_vdev, next_state); 502 if (IS_ERR(res)) 503 break; 504 qat_vdev->mig_state = next_state; 505 if (WARN_ON(res && new_state != qat_vdev->mig_state)) { 506 fput(res); 507 res = ERR_PTR(-EINVAL); 508 break; 509 } 510 } 511 mutex_unlock(&qat_vdev->state_mutex); 512 513 return res; 514 } 515 516 static int qat_vf_pci_get_device_state(struct vfio_device *vdev, 517 enum vfio_device_mig_state *curr_state) 518 { 519 struct qat_vf_core_device *qat_vdev = container_of(vdev, 520 struct qat_vf_core_device, core_device.vdev); 521 522 mutex_lock(&qat_vdev->state_mutex); 523 *curr_state = qat_vdev->mig_state; 524 mutex_unlock(&qat_vdev->state_mutex); 525 526 return 0; 527 } 528 529 static int qat_vf_pci_get_data_size(struct vfio_device *vdev, 530 unsigned long *stop_copy_length) 531 { 532 struct qat_vf_core_device *qat_vdev = container_of(vdev, 533 struct qat_vf_core_device, core_device.vdev); 534 535 mutex_lock(&qat_vdev->state_mutex); 536 *stop_copy_length = qat_vdev->mdev->state_size; 537 mutex_unlock(&qat_vdev->state_mutex); 538 539 return 0; 540 } 541 542 static const struct vfio_migration_ops qat_vf_pci_mig_ops = { 543 .migration_set_state = qat_vf_pci_set_device_state, 544 .migration_get_state = qat_vf_pci_get_device_state, 545 .migration_get_data_size = qat_vf_pci_get_data_size, 546 }; 547 548 static void qat_vf_pci_release_dev(struct vfio_device *core_vdev) 549 { 550 struct qat_vf_core_device *qat_vdev = container_of(core_vdev, 551 struct qat_vf_core_device, core_device.vdev); 552 553 qat_vfmig_cleanup(qat_vdev->mdev); 554 qat_vfmig_destroy(qat_vdev->mdev); 555 mutex_destroy(&qat_vdev->state_mutex); 556 vfio_pci_core_release_dev(core_vdev); 557 } 558 559 static int qat_vf_pci_init_dev(struct vfio_device *core_vdev) 560 { 561 struct qat_vf_core_device *qat_vdev = container_of(core_vdev, 562 struct qat_vf_core_device, core_device.vdev); 563 struct qat_mig_dev *mdev; 564 struct pci_dev *parent; 565 int ret, vf_id; 566 567 core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P | 568 VFIO_MIGRATION_PRE_COPY; 569 core_vdev->mig_ops = &qat_vf_pci_mig_ops; 570 571 ret = vfio_pci_core_init_dev(core_vdev); 572 if (ret) 573 return ret; 574 575 mutex_init(&qat_vdev->state_mutex); 576 577 parent = pci_physfn(qat_vdev->core_device.pdev); 578 vf_id = pci_iov_vf_id(qat_vdev->core_device.pdev); 579 if (vf_id < 0) { 580 ret = -ENODEV; 581 goto err_rel; 582 } 583 584 mdev = qat_vfmig_create(parent, vf_id); 585 if (IS_ERR(mdev)) { 586 ret = PTR_ERR(mdev); 587 goto err_rel; 588 } 589 590 ret = qat_vfmig_init(mdev); 591 if (ret) 592 goto err_destroy; 593 594 qat_vdev->mdev = mdev; 595 596 return 0; 597 598 err_destroy: 599 qat_vfmig_destroy(mdev); 600 err_rel: 601 vfio_pci_core_release_dev(core_vdev); 602 return ret; 603 } 604 605 static const struct vfio_device_ops qat_vf_pci_ops = { 606 .name = "qat-vf-vfio-pci", 607 .init = qat_vf_pci_init_dev, 608 .release = qat_vf_pci_release_dev, 609 .open_device = qat_vf_pci_open_device, 610 .close_device = qat_vf_pci_close_device, 611 .ioctl = vfio_pci_core_ioctl, 612 .read = vfio_pci_core_read, 613 .write = vfio_pci_core_write, 614 .mmap = vfio_pci_core_mmap, 615 .request = vfio_pci_core_request, 616 .match = vfio_pci_core_match, 617 .bind_iommufd = vfio_iommufd_physical_bind, 618 .unbind_iommufd = vfio_iommufd_physical_unbind, 619 .attach_ioas = vfio_iommufd_physical_attach_ioas, 620 .detach_ioas = vfio_iommufd_physical_detach_ioas, 621 }; 622 623 static struct qat_vf_core_device *qat_vf_drvdata(struct pci_dev *pdev) 624 { 625 struct vfio_pci_core_device *core_device = pci_get_drvdata(pdev); 626 627 return container_of(core_device, struct qat_vf_core_device, core_device); 628 } 629 630 static void qat_vf_pci_aer_reset_done(struct pci_dev *pdev) 631 { 632 struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev); 633 634 if (!qat_vdev->mdev) 635 return; 636 637 mutex_lock(&qat_vdev->state_mutex); 638 qat_vf_reset_done(qat_vdev); 639 mutex_unlock(&qat_vdev->state_mutex); 640 } 641 642 static int 643 qat_vf_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 644 { 645 struct device *dev = &pdev->dev; 646 struct qat_vf_core_device *qat_vdev; 647 int ret; 648 649 qat_vdev = vfio_alloc_device(qat_vf_core_device, core_device.vdev, dev, &qat_vf_pci_ops); 650 if (IS_ERR(qat_vdev)) 651 return PTR_ERR(qat_vdev); 652 653 pci_set_drvdata(pdev, &qat_vdev->core_device); 654 ret = vfio_pci_core_register_device(&qat_vdev->core_device); 655 if (ret) 656 goto out_put_device; 657 658 return 0; 659 660 out_put_device: 661 vfio_put_device(&qat_vdev->core_device.vdev); 662 return ret; 663 } 664 665 static void qat_vf_vfio_pci_remove(struct pci_dev *pdev) 666 { 667 struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev); 668 669 vfio_pci_core_unregister_device(&qat_vdev->core_device); 670 vfio_put_device(&qat_vdev->core_device.vdev); 671 } 672 673 static const struct pci_device_id qat_vf_vfio_pci_table[] = { 674 /* Intel QAT GEN4 4xxx VF device */ 675 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4941) }, 676 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4943) }, 677 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4945) }, 678 {} 679 }; 680 MODULE_DEVICE_TABLE(pci, qat_vf_vfio_pci_table); 681 682 static const struct pci_error_handlers qat_vf_err_handlers = { 683 .reset_done = qat_vf_pci_aer_reset_done, 684 .error_detected = vfio_pci_core_aer_err_detected, 685 }; 686 687 static struct pci_driver qat_vf_vfio_pci_driver = { 688 .name = "qat_vfio_pci", 689 .id_table = qat_vf_vfio_pci_table, 690 .probe = qat_vf_vfio_pci_probe, 691 .remove = qat_vf_vfio_pci_remove, 692 .err_handler = &qat_vf_err_handlers, 693 .driver_managed_dma = true, 694 }; 695 module_pci_driver(qat_vf_vfio_pci_driver); 696 697 MODULE_LICENSE("GPL"); 698 MODULE_AUTHOR("Xin Zeng <xin.zeng@intel.com>"); 699 MODULE_DESCRIPTION("QAT VFIO PCI - VFIO PCI driver with live migration support for Intel(R) QAT GEN4 device family"); 700 MODULE_IMPORT_NS(CRYPTO_QAT); 701