1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VDPA device simulator core. 4 * 5 * Copyright (c) 2020, Red Hat Inc. All rights reserved. 6 * Author: Jason Wang <jasowang@redhat.com> 7 * 8 */ 9 10 #include <linux/init.h> 11 #include <linux/module.h> 12 #include <linux/device.h> 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/sched.h> 16 #include <linux/dma-map-ops.h> 17 #include <linux/vringh.h> 18 #include <linux/vdpa.h> 19 #include <linux/vhost_iotlb.h> 20 #include <linux/iova.h> 21 #include <uapi/linux/vdpa.h> 22 23 #include "vdpa_sim.h" 24 25 #define DRV_VERSION "0.1" 26 #define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>" 27 #define DRV_DESC "vDPA Device Simulator core" 28 #define DRV_LICENSE "GPL v2" 29 30 static int batch_mapping = 1; 31 module_param(batch_mapping, int, 0444); 32 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable"); 33 34 static int max_iotlb_entries = 2048; 35 module_param(max_iotlb_entries, int, 0444); 36 MODULE_PARM_DESC(max_iotlb_entries, 37 "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)"); 38 39 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE 40 #define VDPASIM_QUEUE_MAX 256 41 #define VDPASIM_VENDOR_ID 0 42 43 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa) 44 { 45 return container_of(vdpa, struct vdpasim, vdpa); 46 } 47 48 static struct vdpasim *dev_to_sim(struct device *dev) 49 { 50 struct vdpa_device *vdpa = dev_to_vdpa(dev); 51 52 return vdpa_to_sim(vdpa); 53 } 54 55 static void vdpasim_vq_notify(struct vringh *vring) 56 { 57 struct vdpasim_virtqueue *vq = 58 container_of(vring, struct vdpasim_virtqueue, vring); 59 60 if (!vq->cb) 61 return; 62 63 vq->cb(vq->private); 64 } 65 66 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx) 67 { 68 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 69 uint16_t last_avail_idx = vq->vring.last_avail_idx; 70 71 vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, false, 72 (struct vring_desc *)(uintptr_t)vq->desc_addr, 73 (struct vring_avail *) 74 (uintptr_t)vq->driver_addr, 75 (struct vring_used *) 76 (uintptr_t)vq->device_addr); 77 78 vq->vring.last_avail_idx = last_avail_idx; 79 vq->vring.notify = vdpasim_vq_notify; 80 } 81 82 static void vdpasim_vq_reset(struct vdpasim *vdpasim, 83 struct vdpasim_virtqueue *vq) 84 { 85 vq->ready = false; 86 vq->desc_addr = 0; 87 vq->driver_addr = 0; 88 vq->device_addr = 0; 89 vq->cb = NULL; 90 vq->private = NULL; 91 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features, 92 VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL); 93 94 vq->vring.notify = NULL; 95 } 96 97 static void vdpasim_do_reset(struct vdpasim *vdpasim) 98 { 99 int i; 100 101 spin_lock(&vdpasim->iommu_lock); 102 103 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) { 104 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]); 105 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0], 106 &vdpasim->iommu_lock); 107 } 108 109 for (i = 0; i < vdpasim->dev_attr.nas; i++) 110 vhost_iotlb_reset(&vdpasim->iommu[i]); 111 112 vdpasim->running = true; 113 spin_unlock(&vdpasim->iommu_lock); 114 115 vdpasim->features = 0; 116 vdpasim->status = 0; 117 ++vdpasim->generation; 118 } 119 120 static int dir_to_perm(enum dma_data_direction dir) 121 { 122 int perm = -EFAULT; 123 124 switch (dir) { 125 case DMA_FROM_DEVICE: 126 perm = VHOST_MAP_WO; 127 break; 128 case DMA_TO_DEVICE: 129 perm = VHOST_MAP_RO; 130 break; 131 case DMA_BIDIRECTIONAL: 132 perm = VHOST_MAP_RW; 133 break; 134 default: 135 break; 136 } 137 138 return perm; 139 } 140 141 static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr, 142 size_t size, unsigned int perm) 143 { 144 struct iova *iova; 145 dma_addr_t dma_addr; 146 int ret; 147 148 /* We set the limit_pfn to the maximum (ULONG_MAX - 1) */ 149 iova = alloc_iova(&vdpasim->iova, size >> iova_shift(&vdpasim->iova), 150 ULONG_MAX - 1, true); 151 if (!iova) 152 return DMA_MAPPING_ERROR; 153 154 dma_addr = iova_dma_addr(&vdpasim->iova, iova); 155 156 spin_lock(&vdpasim->iommu_lock); 157 ret = vhost_iotlb_add_range(&vdpasim->iommu[0], (u64)dma_addr, 158 (u64)dma_addr + size - 1, (u64)paddr, perm); 159 spin_unlock(&vdpasim->iommu_lock); 160 161 if (ret) { 162 __free_iova(&vdpasim->iova, iova); 163 return DMA_MAPPING_ERROR; 164 } 165 166 return dma_addr; 167 } 168 169 static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr, 170 size_t size) 171 { 172 spin_lock(&vdpasim->iommu_lock); 173 vhost_iotlb_del_range(&vdpasim->iommu[0], (u64)dma_addr, 174 (u64)dma_addr + size - 1); 175 spin_unlock(&vdpasim->iommu_lock); 176 177 free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr)); 178 } 179 180 static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page, 181 unsigned long offset, size_t size, 182 enum dma_data_direction dir, 183 unsigned long attrs) 184 { 185 struct vdpasim *vdpasim = dev_to_sim(dev); 186 phys_addr_t paddr = page_to_phys(page) + offset; 187 int perm = dir_to_perm(dir); 188 189 if (perm < 0) 190 return DMA_MAPPING_ERROR; 191 192 return vdpasim_map_range(vdpasim, paddr, size, perm); 193 } 194 195 static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr, 196 size_t size, enum dma_data_direction dir, 197 unsigned long attrs) 198 { 199 struct vdpasim *vdpasim = dev_to_sim(dev); 200 201 vdpasim_unmap_range(vdpasim, dma_addr, size); 202 } 203 204 static void *vdpasim_alloc_coherent(struct device *dev, size_t size, 205 dma_addr_t *dma_addr, gfp_t flag, 206 unsigned long attrs) 207 { 208 struct vdpasim *vdpasim = dev_to_sim(dev); 209 phys_addr_t paddr; 210 void *addr; 211 212 addr = kmalloc(size, flag); 213 if (!addr) { 214 *dma_addr = DMA_MAPPING_ERROR; 215 return NULL; 216 } 217 218 paddr = virt_to_phys(addr); 219 220 *dma_addr = vdpasim_map_range(vdpasim, paddr, size, VHOST_MAP_RW); 221 if (*dma_addr == DMA_MAPPING_ERROR) { 222 kfree(addr); 223 return NULL; 224 } 225 226 return addr; 227 } 228 229 static void vdpasim_free_coherent(struct device *dev, size_t size, 230 void *vaddr, dma_addr_t dma_addr, 231 unsigned long attrs) 232 { 233 struct vdpasim *vdpasim = dev_to_sim(dev); 234 235 vdpasim_unmap_range(vdpasim, dma_addr, size); 236 237 kfree(vaddr); 238 } 239 240 static const struct dma_map_ops vdpasim_dma_ops = { 241 .map_page = vdpasim_map_page, 242 .unmap_page = vdpasim_unmap_page, 243 .alloc = vdpasim_alloc_coherent, 244 .free = vdpasim_free_coherent, 245 }; 246 247 static const struct vdpa_config_ops vdpasim_config_ops; 248 static const struct vdpa_config_ops vdpasim_batch_config_ops; 249 250 struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr, 251 const struct vdpa_dev_set_config *config) 252 { 253 const struct vdpa_config_ops *ops; 254 struct vdpasim *vdpasim; 255 struct device *dev; 256 int i, ret = -ENOMEM; 257 258 if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { 259 if (config->device_features & 260 ~dev_attr->supported_features) 261 return ERR_PTR(-EINVAL); 262 dev_attr->supported_features = 263 config->device_features; 264 } 265 266 if (batch_mapping) 267 ops = &vdpasim_batch_config_ops; 268 else 269 ops = &vdpasim_config_ops; 270 271 vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, 272 dev_attr->ngroups, dev_attr->nas, 273 dev_attr->name, false); 274 if (IS_ERR(vdpasim)) { 275 ret = PTR_ERR(vdpasim); 276 goto err_alloc; 277 } 278 279 vdpasim->dev_attr = *dev_attr; 280 INIT_WORK(&vdpasim->work, dev_attr->work_fn); 281 spin_lock_init(&vdpasim->lock); 282 spin_lock_init(&vdpasim->iommu_lock); 283 284 dev = &vdpasim->vdpa.dev; 285 dev->dma_mask = &dev->coherent_dma_mask; 286 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) 287 goto err_iommu; 288 set_dma_ops(dev, &vdpasim_dma_ops); 289 vdpasim->vdpa.mdev = dev_attr->mgmt_dev; 290 291 vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL); 292 if (!vdpasim->config) 293 goto err_iommu; 294 295 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue), 296 GFP_KERNEL); 297 if (!vdpasim->vqs) 298 goto err_iommu; 299 300 vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas, 301 sizeof(*vdpasim->iommu), GFP_KERNEL); 302 if (!vdpasim->iommu) 303 goto err_iommu; 304 305 for (i = 0; i < vdpasim->dev_attr.nas; i++) 306 vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0); 307 308 vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL); 309 if (!vdpasim->buffer) 310 goto err_iommu; 311 312 for (i = 0; i < dev_attr->nvqs; i++) 313 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0], 314 &vdpasim->iommu_lock); 315 316 ret = iova_cache_get(); 317 if (ret) 318 goto err_iommu; 319 320 /* For simplicity we use an IOVA allocator with byte granularity */ 321 init_iova_domain(&vdpasim->iova, 1, 0); 322 323 vdpasim->vdpa.dma_dev = dev; 324 325 return vdpasim; 326 327 err_iommu: 328 put_device(dev); 329 err_alloc: 330 return ERR_PTR(ret); 331 } 332 EXPORT_SYMBOL_GPL(vdpasim_create); 333 334 static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx, 335 u64 desc_area, u64 driver_area, 336 u64 device_area) 337 { 338 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 339 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 340 341 vq->desc_addr = desc_area; 342 vq->driver_addr = driver_area; 343 vq->device_addr = device_area; 344 345 return 0; 346 } 347 348 static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num) 349 { 350 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 351 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 352 353 vq->num = num; 354 } 355 356 static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx) 357 { 358 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 359 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 360 361 if (vq->ready) 362 schedule_work(&vdpasim->work); 363 } 364 365 static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx, 366 struct vdpa_callback *cb) 367 { 368 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 369 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 370 371 vq->cb = cb->callback; 372 vq->private = cb->private; 373 } 374 375 static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready) 376 { 377 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 378 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 379 bool old_ready; 380 381 spin_lock(&vdpasim->lock); 382 old_ready = vq->ready; 383 vq->ready = ready; 384 if (vq->ready && !old_ready) { 385 vdpasim_queue_ready(vdpasim, idx); 386 } 387 spin_unlock(&vdpasim->lock); 388 } 389 390 static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx) 391 { 392 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 393 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 394 395 return vq->ready; 396 } 397 398 static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, 399 const struct vdpa_vq_state *state) 400 { 401 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 402 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 403 struct vringh *vrh = &vq->vring; 404 405 spin_lock(&vdpasim->lock); 406 vrh->last_avail_idx = state->split.avail_index; 407 spin_unlock(&vdpasim->lock); 408 409 return 0; 410 } 411 412 static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx, 413 struct vdpa_vq_state *state) 414 { 415 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 416 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 417 struct vringh *vrh = &vq->vring; 418 419 state->split.avail_index = vrh->last_avail_idx; 420 return 0; 421 } 422 423 static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa) 424 { 425 return VDPASIM_QUEUE_ALIGN; 426 } 427 428 static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx) 429 { 430 /* RX and TX belongs to group 0, CVQ belongs to group 1 */ 431 if (idx == 2) 432 return 1; 433 else 434 return 0; 435 } 436 437 static u64 vdpasim_get_device_features(struct vdpa_device *vdpa) 438 { 439 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 440 441 return vdpasim->dev_attr.supported_features; 442 } 443 444 static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features) 445 { 446 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 447 448 /* DMA mapping must be done by driver */ 449 if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) 450 return -EINVAL; 451 452 vdpasim->features = features & vdpasim->dev_attr.supported_features; 453 454 return 0; 455 } 456 457 static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa) 458 { 459 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 460 461 return vdpasim->features; 462 } 463 464 static void vdpasim_set_config_cb(struct vdpa_device *vdpa, 465 struct vdpa_callback *cb) 466 { 467 /* We don't support config interrupt */ 468 } 469 470 static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa) 471 { 472 return VDPASIM_QUEUE_MAX; 473 } 474 475 static u32 vdpasim_get_device_id(struct vdpa_device *vdpa) 476 { 477 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 478 479 return vdpasim->dev_attr.id; 480 } 481 482 static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa) 483 { 484 return VDPASIM_VENDOR_ID; 485 } 486 487 static u8 vdpasim_get_status(struct vdpa_device *vdpa) 488 { 489 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 490 u8 status; 491 492 spin_lock(&vdpasim->lock); 493 status = vdpasim->status; 494 spin_unlock(&vdpasim->lock); 495 496 return status; 497 } 498 499 static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status) 500 { 501 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 502 503 spin_lock(&vdpasim->lock); 504 vdpasim->status = status; 505 spin_unlock(&vdpasim->lock); 506 } 507 508 static int vdpasim_reset(struct vdpa_device *vdpa) 509 { 510 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 511 512 spin_lock(&vdpasim->lock); 513 vdpasim->status = 0; 514 vdpasim_do_reset(vdpasim); 515 spin_unlock(&vdpasim->lock); 516 517 return 0; 518 } 519 520 static int vdpasim_suspend(struct vdpa_device *vdpa) 521 { 522 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 523 524 spin_lock(&vdpasim->lock); 525 vdpasim->running = false; 526 spin_unlock(&vdpasim->lock); 527 528 return 0; 529 } 530 531 static size_t vdpasim_get_config_size(struct vdpa_device *vdpa) 532 { 533 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 534 535 return vdpasim->dev_attr.config_size; 536 } 537 538 static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset, 539 void *buf, unsigned int len) 540 { 541 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 542 543 if (offset + len > vdpasim->dev_attr.config_size) 544 return; 545 546 if (vdpasim->dev_attr.get_config) 547 vdpasim->dev_attr.get_config(vdpasim, vdpasim->config); 548 549 memcpy(buf, vdpasim->config + offset, len); 550 } 551 552 static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset, 553 const void *buf, unsigned int len) 554 { 555 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 556 557 if (offset + len > vdpasim->dev_attr.config_size) 558 return; 559 560 memcpy(vdpasim->config + offset, buf, len); 561 562 if (vdpasim->dev_attr.set_config) 563 vdpasim->dev_attr.set_config(vdpasim, vdpasim->config); 564 } 565 566 static u32 vdpasim_get_generation(struct vdpa_device *vdpa) 567 { 568 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 569 570 return vdpasim->generation; 571 } 572 573 static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa) 574 { 575 struct vdpa_iova_range range = { 576 .first = 0ULL, 577 .last = ULLONG_MAX, 578 }; 579 580 return range; 581 } 582 583 static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group, 584 unsigned int asid) 585 { 586 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 587 struct vhost_iotlb *iommu; 588 int i; 589 590 if (group > vdpasim->dev_attr.ngroups) 591 return -EINVAL; 592 593 if (asid >= vdpasim->dev_attr.nas) 594 return -EINVAL; 595 596 iommu = &vdpasim->iommu[asid]; 597 598 spin_lock(&vdpasim->lock); 599 600 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) 601 if (vdpasim_get_vq_group(vdpa, i) == group) 602 vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu, 603 &vdpasim->iommu_lock); 604 605 spin_unlock(&vdpasim->lock); 606 607 return 0; 608 } 609 610 static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid, 611 struct vhost_iotlb *iotlb) 612 { 613 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 614 struct vhost_iotlb_map *map; 615 struct vhost_iotlb *iommu; 616 u64 start = 0ULL, last = 0ULL - 1; 617 int ret; 618 619 if (asid >= vdpasim->dev_attr.nas) 620 return -EINVAL; 621 622 spin_lock(&vdpasim->iommu_lock); 623 624 iommu = &vdpasim->iommu[asid]; 625 vhost_iotlb_reset(iommu); 626 627 for (map = vhost_iotlb_itree_first(iotlb, start, last); map; 628 map = vhost_iotlb_itree_next(map, start, last)) { 629 ret = vhost_iotlb_add_range(iommu, map->start, 630 map->last, map->addr, map->perm); 631 if (ret) 632 goto err; 633 } 634 spin_unlock(&vdpasim->iommu_lock); 635 return 0; 636 637 err: 638 vhost_iotlb_reset(iommu); 639 spin_unlock(&vdpasim->iommu_lock); 640 return ret; 641 } 642 643 static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid, 644 u64 iova, u64 size, 645 u64 pa, u32 perm, void *opaque) 646 { 647 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 648 int ret; 649 650 if (asid >= vdpasim->dev_attr.nas) 651 return -EINVAL; 652 653 spin_lock(&vdpasim->iommu_lock); 654 ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova, 655 iova + size - 1, pa, perm, opaque); 656 spin_unlock(&vdpasim->iommu_lock); 657 658 return ret; 659 } 660 661 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid, 662 u64 iova, u64 size) 663 { 664 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 665 666 if (asid >= vdpasim->dev_attr.nas) 667 return -EINVAL; 668 669 spin_lock(&vdpasim->iommu_lock); 670 vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1); 671 spin_unlock(&vdpasim->iommu_lock); 672 673 return 0; 674 } 675 676 static void vdpasim_free(struct vdpa_device *vdpa) 677 { 678 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 679 int i; 680 681 cancel_work_sync(&vdpasim->work); 682 683 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) { 684 vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov); 685 vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov); 686 } 687 688 if (vdpa_get_dma_dev(vdpa)) { 689 put_iova_domain(&vdpasim->iova); 690 iova_cache_put(); 691 } 692 693 kvfree(vdpasim->buffer); 694 for (i = 0; i < vdpasim->dev_attr.nas; i++) 695 vhost_iotlb_reset(&vdpasim->iommu[i]); 696 kfree(vdpasim->iommu); 697 kfree(vdpasim->vqs); 698 kfree(vdpasim->config); 699 } 700 701 static const struct vdpa_config_ops vdpasim_config_ops = { 702 .set_vq_address = vdpasim_set_vq_address, 703 .set_vq_num = vdpasim_set_vq_num, 704 .kick_vq = vdpasim_kick_vq, 705 .set_vq_cb = vdpasim_set_vq_cb, 706 .set_vq_ready = vdpasim_set_vq_ready, 707 .get_vq_ready = vdpasim_get_vq_ready, 708 .set_vq_state = vdpasim_set_vq_state, 709 .get_vq_state = vdpasim_get_vq_state, 710 .get_vq_align = vdpasim_get_vq_align, 711 .get_vq_group = vdpasim_get_vq_group, 712 .get_device_features = vdpasim_get_device_features, 713 .set_driver_features = vdpasim_set_driver_features, 714 .get_driver_features = vdpasim_get_driver_features, 715 .set_config_cb = vdpasim_set_config_cb, 716 .get_vq_num_max = vdpasim_get_vq_num_max, 717 .get_device_id = vdpasim_get_device_id, 718 .get_vendor_id = vdpasim_get_vendor_id, 719 .get_status = vdpasim_get_status, 720 .set_status = vdpasim_set_status, 721 .reset = vdpasim_reset, 722 .suspend = vdpasim_suspend, 723 .get_config_size = vdpasim_get_config_size, 724 .get_config = vdpasim_get_config, 725 .set_config = vdpasim_set_config, 726 .get_generation = vdpasim_get_generation, 727 .get_iova_range = vdpasim_get_iova_range, 728 .set_group_asid = vdpasim_set_group_asid, 729 .dma_map = vdpasim_dma_map, 730 .dma_unmap = vdpasim_dma_unmap, 731 .free = vdpasim_free, 732 }; 733 734 static const struct vdpa_config_ops vdpasim_batch_config_ops = { 735 .set_vq_address = vdpasim_set_vq_address, 736 .set_vq_num = vdpasim_set_vq_num, 737 .kick_vq = vdpasim_kick_vq, 738 .set_vq_cb = vdpasim_set_vq_cb, 739 .set_vq_ready = vdpasim_set_vq_ready, 740 .get_vq_ready = vdpasim_get_vq_ready, 741 .set_vq_state = vdpasim_set_vq_state, 742 .get_vq_state = vdpasim_get_vq_state, 743 .get_vq_align = vdpasim_get_vq_align, 744 .get_vq_group = vdpasim_get_vq_group, 745 .get_device_features = vdpasim_get_device_features, 746 .set_driver_features = vdpasim_set_driver_features, 747 .get_driver_features = vdpasim_get_driver_features, 748 .set_config_cb = vdpasim_set_config_cb, 749 .get_vq_num_max = vdpasim_get_vq_num_max, 750 .get_device_id = vdpasim_get_device_id, 751 .get_vendor_id = vdpasim_get_vendor_id, 752 .get_status = vdpasim_get_status, 753 .set_status = vdpasim_set_status, 754 .reset = vdpasim_reset, 755 .suspend = vdpasim_suspend, 756 .get_config_size = vdpasim_get_config_size, 757 .get_config = vdpasim_get_config, 758 .set_config = vdpasim_set_config, 759 .get_generation = vdpasim_get_generation, 760 .get_iova_range = vdpasim_get_iova_range, 761 .set_group_asid = vdpasim_set_group_asid, 762 .set_map = vdpasim_set_map, 763 .free = vdpasim_free, 764 }; 765 766 MODULE_VERSION(DRV_VERSION); 767 MODULE_LICENSE(DRV_LICENSE); 768 MODULE_AUTHOR(DRV_AUTHOR); 769 MODULE_DESCRIPTION(DRV_DESC); 770