1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2022-2023, Advanced Micro Devices, Inc. 4 */ 5 6 #include <linux/vfio.h> 7 #include <linux/cdx/cdx_bus.h> 8 9 #include "private.h" 10 11 static int vfio_cdx_open_device(struct vfio_device *core_vdev) 12 { 13 struct vfio_cdx_device *vdev = 14 container_of(core_vdev, struct vfio_cdx_device, vdev); 15 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev); 16 int count = cdx_dev->res_count; 17 int i, ret; 18 19 vdev->regions = kcalloc(count, sizeof(struct vfio_cdx_region), 20 GFP_KERNEL_ACCOUNT); 21 if (!vdev->regions) 22 return -ENOMEM; 23 24 for (i = 0; i < count; i++) { 25 struct resource *res = &cdx_dev->res[i]; 26 27 vdev->regions[i].addr = res->start; 28 vdev->regions[i].size = resource_size(res); 29 vdev->regions[i].type = res->flags; 30 /* 31 * Only regions addressed with PAGE granularity may be 32 * MMAP'ed securely. 33 */ 34 if (!(vdev->regions[i].addr & ~PAGE_MASK) && 35 !(vdev->regions[i].size & ~PAGE_MASK)) 36 vdev->regions[i].flags |= 37 VFIO_REGION_INFO_FLAG_MMAP; 38 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ; 39 if (!(cdx_dev->res[i].flags & IORESOURCE_READONLY)) 40 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE; 41 } 42 ret = cdx_dev_reset(core_vdev->dev); 43 if (ret) { 44 kfree(vdev->regions); 45 vdev->regions = NULL; 46 return ret; 47 } 48 ret = cdx_clear_master(cdx_dev); 49 if (ret) 50 vdev->flags &= ~BME_SUPPORT; 51 else 52 vdev->flags |= BME_SUPPORT; 53 54 return 0; 55 } 56 57 static void vfio_cdx_close_device(struct vfio_device *core_vdev) 58 { 59 struct vfio_cdx_device *vdev = 60 container_of(core_vdev, struct vfio_cdx_device, vdev); 61 62 kfree(vdev->regions); 63 cdx_dev_reset(core_vdev->dev); 64 vfio_cdx_irqs_cleanup(vdev); 65 } 66 67 static int vfio_cdx_bm_ctrl(struct vfio_device *core_vdev, u32 flags, 68 void __user *arg, size_t argsz) 69 { 70 size_t minsz = 71 offsetofend(struct vfio_device_feature_bus_master, op); 72 struct vfio_cdx_device *vdev = 73 container_of(core_vdev, struct vfio_cdx_device, vdev); 74 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev); 75 struct vfio_device_feature_bus_master ops; 76 int ret; 77 78 if (!(vdev->flags & BME_SUPPORT)) 79 return -ENOTTY; 80 81 ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, 82 sizeof(ops)); 83 if (ret != 1) 84 return ret; 85 86 if (copy_from_user(&ops, arg, minsz)) 87 return -EFAULT; 88 89 switch (ops.op) { 90 case VFIO_DEVICE_FEATURE_CLEAR_MASTER: 91 return cdx_clear_master(cdx_dev); 92 case VFIO_DEVICE_FEATURE_SET_MASTER: 93 return cdx_set_master(cdx_dev); 94 default: 95 return -EINVAL; 96 } 97 } 98 99 static int vfio_cdx_ioctl_feature(struct vfio_device *device, u32 flags, 100 void __user *arg, size_t argsz) 101 { 102 switch (flags & VFIO_DEVICE_FEATURE_MASK) { 103 case VFIO_DEVICE_FEATURE_BUS_MASTER: 104 return vfio_cdx_bm_ctrl(device, flags, arg, argsz); 105 default: 106 return -ENOTTY; 107 } 108 } 109 110 static int vfio_cdx_ioctl_get_info(struct vfio_cdx_device *vdev, 111 struct vfio_device_info __user *arg) 112 { 113 unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs); 114 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev); 115 struct vfio_device_info info; 116 117 if (copy_from_user(&info, arg, minsz)) 118 return -EFAULT; 119 120 if (info.argsz < minsz) 121 return -EINVAL; 122 123 info.flags = VFIO_DEVICE_FLAGS_CDX; 124 info.flags |= VFIO_DEVICE_FLAGS_RESET; 125 126 info.num_regions = cdx_dev->res_count; 127 info.num_irqs = cdx_dev->num_msi ? 1 : 0; 128 129 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0; 130 } 131 132 static int vfio_cdx_ioctl_get_region_info(struct vfio_device *core_vdev, 133 struct vfio_region_info __user *arg) 134 { 135 struct vfio_cdx_device *vdev = 136 container_of(core_vdev, struct vfio_cdx_device, vdev); 137 unsigned long minsz = offsetofend(struct vfio_region_info, offset); 138 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev); 139 struct vfio_region_info info; 140 141 if (copy_from_user(&info, arg, minsz)) 142 return -EFAULT; 143 144 if (info.argsz < minsz) 145 return -EINVAL; 146 147 if (info.index >= cdx_dev->res_count) 148 return -EINVAL; 149 150 /* map offset to the physical address */ 151 info.offset = vfio_cdx_index_to_offset(info.index); 152 info.size = vdev->regions[info.index].size; 153 info.flags = vdev->regions[info.index].flags; 154 155 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0; 156 } 157 158 static int vfio_cdx_ioctl_get_irq_info(struct vfio_cdx_device *vdev, 159 struct vfio_irq_info __user *arg) 160 { 161 unsigned long minsz = offsetofend(struct vfio_irq_info, count); 162 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev); 163 struct vfio_irq_info info; 164 165 if (copy_from_user(&info, arg, minsz)) 166 return -EFAULT; 167 168 if (info.argsz < minsz) 169 return -EINVAL; 170 171 if (info.index >= 1) 172 return -EINVAL; 173 174 if (!cdx_dev->num_msi) 175 return -EINVAL; 176 177 info.flags = VFIO_IRQ_INFO_EVENTFD | VFIO_IRQ_INFO_NORESIZE; 178 info.count = cdx_dev->num_msi; 179 180 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0; 181 } 182 183 static int vfio_cdx_ioctl_set_irqs(struct vfio_cdx_device *vdev, 184 struct vfio_irq_set __user *arg) 185 { 186 unsigned long minsz = offsetofend(struct vfio_irq_set, count); 187 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev); 188 struct vfio_irq_set hdr; 189 size_t data_size = 0; 190 u8 *data = NULL; 191 int ret = 0; 192 193 if (copy_from_user(&hdr, arg, minsz)) 194 return -EFAULT; 195 196 ret = vfio_set_irqs_validate_and_prepare(&hdr, cdx_dev->num_msi, 197 1, &data_size); 198 if (ret) 199 return ret; 200 201 if (data_size) { 202 data = memdup_user(arg->data, data_size); 203 if (IS_ERR(data)) 204 return PTR_ERR(data); 205 } 206 207 ret = vfio_cdx_set_irqs_ioctl(vdev, hdr.flags, hdr.index, 208 hdr.start, hdr.count, data); 209 kfree(data); 210 211 return ret; 212 } 213 214 static long vfio_cdx_ioctl(struct vfio_device *core_vdev, 215 unsigned int cmd, unsigned long arg) 216 { 217 struct vfio_cdx_device *vdev = 218 container_of(core_vdev, struct vfio_cdx_device, vdev); 219 void __user *uarg = (void __user *)arg; 220 221 switch (cmd) { 222 case VFIO_DEVICE_GET_INFO: 223 return vfio_cdx_ioctl_get_info(vdev, uarg); 224 case VFIO_DEVICE_GET_IRQ_INFO: 225 return vfio_cdx_ioctl_get_irq_info(vdev, uarg); 226 case VFIO_DEVICE_SET_IRQS: 227 return vfio_cdx_ioctl_set_irqs(vdev, uarg); 228 case VFIO_DEVICE_RESET: 229 return cdx_dev_reset(core_vdev->dev); 230 default: 231 return -ENOTTY; 232 } 233 } 234 235 static int vfio_cdx_mmap_mmio(struct vfio_cdx_region region, 236 struct vm_area_struct *vma) 237 { 238 u64 size = vma->vm_end - vma->vm_start; 239 u64 pgoff, base; 240 241 pgoff = vma->vm_pgoff & 242 ((1U << (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT)) - 1); 243 base = pgoff << PAGE_SHIFT; 244 245 if (base + size > region.size) 246 return -EINVAL; 247 248 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff; 249 vma->vm_page_prot = pgprot_device(vma->vm_page_prot); 250 251 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 252 size, vma->vm_page_prot); 253 } 254 255 static int vfio_cdx_mmap(struct vfio_device *core_vdev, 256 struct vm_area_struct *vma) 257 { 258 struct vfio_cdx_device *vdev = 259 container_of(core_vdev, struct vfio_cdx_device, vdev); 260 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev); 261 unsigned int index; 262 263 index = vma->vm_pgoff >> (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT); 264 265 if (index >= cdx_dev->res_count) 266 return -EINVAL; 267 268 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP)) 269 return -EINVAL; 270 271 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ) && 272 (vma->vm_flags & VM_READ)) 273 return -EPERM; 274 275 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE) && 276 (vma->vm_flags & VM_WRITE)) 277 return -EPERM; 278 279 return vfio_cdx_mmap_mmio(vdev->regions[index], vma); 280 } 281 282 static const struct vfio_device_ops vfio_cdx_ops = { 283 .name = "vfio-cdx", 284 .open_device = vfio_cdx_open_device, 285 .close_device = vfio_cdx_close_device, 286 .ioctl = vfio_cdx_ioctl, 287 .get_region_info = vfio_cdx_ioctl_get_region_info, 288 .device_feature = vfio_cdx_ioctl_feature, 289 .mmap = vfio_cdx_mmap, 290 .bind_iommufd = vfio_iommufd_physical_bind, 291 .unbind_iommufd = vfio_iommufd_physical_unbind, 292 .attach_ioas = vfio_iommufd_physical_attach_ioas, 293 }; 294 295 static int vfio_cdx_probe(struct cdx_device *cdx_dev) 296 { 297 struct vfio_cdx_device *vdev; 298 struct device *dev = &cdx_dev->dev; 299 int ret; 300 301 vdev = vfio_alloc_device(vfio_cdx_device, vdev, dev, 302 &vfio_cdx_ops); 303 if (IS_ERR(vdev)) 304 return PTR_ERR(vdev); 305 306 ret = vfio_register_group_dev(&vdev->vdev); 307 if (ret) 308 goto out_uninit; 309 310 dev_set_drvdata(dev, vdev); 311 return 0; 312 313 out_uninit: 314 vfio_put_device(&vdev->vdev); 315 return ret; 316 } 317 318 static int vfio_cdx_remove(struct cdx_device *cdx_dev) 319 { 320 struct device *dev = &cdx_dev->dev; 321 struct vfio_cdx_device *vdev = dev_get_drvdata(dev); 322 323 vfio_unregister_group_dev(&vdev->vdev); 324 vfio_put_device(&vdev->vdev); 325 326 return 0; 327 } 328 329 static const struct cdx_device_id vfio_cdx_table[] = { 330 { CDX_DEVICE_DRIVER_OVERRIDE(CDX_ANY_ID, CDX_ANY_ID, 331 CDX_ID_F_VFIO_DRIVER_OVERRIDE) }, /* match all by default */ 332 {} 333 }; 334 335 MODULE_DEVICE_TABLE(cdx, vfio_cdx_table); 336 337 static struct cdx_driver vfio_cdx_driver = { 338 .probe = vfio_cdx_probe, 339 .remove = vfio_cdx_remove, 340 .match_id_table = vfio_cdx_table, 341 .driver = { 342 .name = "vfio-cdx", 343 }, 344 .driver_managed_dma = true, 345 }; 346 347 module_driver(vfio_cdx_driver, cdx_driver_register, cdx_driver_unregister); 348 349 MODULE_LICENSE("GPL"); 350 MODULE_DESCRIPTION("VFIO for CDX devices - User Level meta-driver"); 351 MODULE_IMPORT_NS("CDX_BUS"); 352