1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * vfio-ISM driver for s390 4 * 5 * Copyright IBM Corp. 6 */ 7 8 #include <linux/slab.h> 9 #include "../vfio_pci_priv.h" 10 11 #define ISM_VFIO_PCI_OFFSET_SHIFT 48 12 #define ISM_VFIO_PCI_OFFSET_TO_INDEX(off) ((off) >> ISM_VFIO_PCI_OFFSET_SHIFT) 13 #define ISM_VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << ISM_VFIO_PCI_OFFSET_SHIFT) 14 #define ISM_VFIO_PCI_OFFSET_MASK (((u64)(1) << ISM_VFIO_PCI_OFFSET_SHIFT) - 1) 15 16 /* 17 * Use __zpci_load() to bypass automatic use of 18 * PCI MIO instructions which are not supported on ISM devices 19 */ 20 #define ISM_READ(size) \ 21 static int ism_read##size(struct zpci_dev *zdev, int bar, \ 22 size_t *filled, char __user *buf, \ 23 loff_t off) \ 24 { \ 25 u64 req, tmp; \ 26 u##size val; \ 27 int ret; \ 28 \ 29 req = ZPCI_CREATE_REQ(READ_ONCE(zdev->fh), bar, sizeof(val)); \ 30 ret = __zpci_load(&tmp, req, off); \ 31 if (ret) \ 32 return ret; \ 33 val = (u##size)tmp; \ 34 if (copy_to_user(buf, &val, sizeof(val))) \ 35 return -EFAULT; \ 36 *filled = sizeof(val); \ 37 return 0; \ 38 } 39 40 ISM_READ(64); 41 ISM_READ(32); 42 ISM_READ(16); 43 ISM_READ(8); 44 45 struct ism_vfio_pci_core_device { 46 struct vfio_pci_core_device core_device; 47 struct kmem_cache *store_block_cache; 48 }; 49 50 static int ism_vfio_pci_open_device(struct vfio_device *core_vdev) 51 { 52 struct ism_vfio_pci_core_device *ivpcd; 53 struct vfio_pci_core_device *vdev; 54 int ret; 55 56 ivpcd = container_of(core_vdev, struct ism_vfio_pci_core_device, 57 core_device.vdev); 58 vdev = &ivpcd->core_device; 59 60 ret = vfio_pci_core_enable(vdev); 61 if (ret) 62 return ret; 63 64 vfio_pci_core_finish_enable(vdev); 65 return 0; 66 } 67 68 /* 69 * ism_vfio_pci_do_io_r() 70 * 71 * On s390, kernel primitives such as ioread() and iowrite() are switched over 72 * from function-handle-based PCI load/stores instructions to PCI memory-I/O (MIO) 73 * loads/stores when these are available and not explicitly disabled. Since these 74 * instructions cannot be used with ISM devices, ensure that classic 75 * function-handle-based PCI instructions are used instead. 76 */ 77 static ssize_t ism_vfio_pci_do_io_r(struct vfio_pci_core_device *vdev, 78 char __user *buf, loff_t off, size_t count, 79 int bar) 80 { 81 struct zpci_dev *zdev = to_zpci(vdev->pdev); 82 ssize_t done = 0; 83 int ret; 84 85 while (count) { 86 size_t filled; 87 88 if (count >= 8 && IS_ALIGNED(off, 8)) { 89 ret = ism_read64(zdev, bar, &filled, buf, off); 90 if (ret) 91 return ret; 92 } else if (count >= 4 && IS_ALIGNED(off, 4)) { 93 ret = ism_read32(zdev, bar, &filled, buf, off); 94 if (ret) 95 return ret; 96 } else if (count >= 2 && IS_ALIGNED(off, 2)) { 97 ret = ism_read16(zdev, bar, &filled, buf, off); 98 if (ret) 99 return ret; 100 } else { 101 ret = ism_read8(zdev, bar, &filled, buf, off); 102 if (ret) 103 return ret; 104 } 105 106 count -= filled; 107 done += filled; 108 off += filled; 109 buf += filled; 110 } 111 112 return done; 113 } 114 115 /* 116 * ism_vfio_pci_do_io_w() 117 * 118 * Ensure that the PCI store block (PCISTB) instruction is used as required by the 119 * ISM device. The ISM device also uses a 256 TiB BAR 0 for write operations, 120 * which requires a 48bit region address space (ISM_VFIO_PCI_OFFSET_SHIFT). 121 */ 122 static ssize_t ism_vfio_pci_do_io_w(struct vfio_pci_core_device *vdev, 123 char __user *buf, loff_t off, size_t count, 124 int bar) 125 { 126 struct zpci_dev *zdev = to_zpci(vdev->pdev); 127 struct ism_vfio_pci_core_device *ivpcd; 128 ssize_t ret; 129 void *data; 130 u64 req; 131 132 if (count > zdev->maxstbl) 133 return -EINVAL; 134 if (((off % PAGE_SIZE) + count) > PAGE_SIZE) 135 return -EINVAL; 136 137 ivpcd = container_of(vdev, struct ism_vfio_pci_core_device, 138 core_device); 139 data = kmem_cache_alloc(ivpcd->store_block_cache, GFP_KERNEL); 140 if (!data) 141 return -ENOMEM; 142 143 if (copy_from_user(data, buf, count)) { 144 ret = -EFAULT; 145 goto out_free; 146 } 147 148 req = ZPCI_CREATE_REQ(READ_ONCE(zdev->fh), bar, count); 149 ret = __zpci_store_block(data, req, off); 150 if (ret) 151 goto out_free; 152 153 ret = count; 154 155 out_free: 156 kmem_cache_free(ivpcd->store_block_cache, data); 157 return ret; 158 } 159 160 static ssize_t ism_vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, 161 char __user *buf, size_t count, loff_t *ppos, 162 bool iswrite) 163 { 164 int bar = ISM_VFIO_PCI_OFFSET_TO_INDEX(*ppos); 165 loff_t pos = *ppos & ISM_VFIO_PCI_OFFSET_MASK; 166 resource_size_t end; 167 ssize_t done = 0; 168 169 if (pci_resource_start(vdev->pdev, bar)) 170 end = pci_resource_len(vdev->pdev, bar); 171 else 172 return -EINVAL; 173 174 if (pos >= end) 175 return -EINVAL; 176 177 count = min(count, (size_t)(end - pos)); 178 179 if (iswrite) 180 done = ism_vfio_pci_do_io_w(vdev, buf, pos, count, bar); 181 else 182 done = ism_vfio_pci_do_io_r(vdev, buf, pos, count, bar); 183 184 if (done >= 0) 185 *ppos += done; 186 187 return done; 188 } 189 190 static ssize_t ism_vfio_pci_config_rw(struct vfio_pci_core_device *vdev, 191 char __user *buf, size_t count, 192 loff_t *ppos, bool iswrite) 193 { 194 loff_t pos = *ppos; 195 size_t done = 0; 196 int ret = 0; 197 198 pos &= ISM_VFIO_PCI_OFFSET_MASK; 199 200 while (count) { 201 /* 202 * zPCI must not use MIO instructions for config space access, 203 * so we can use common code path here. 204 */ 205 ret = vfio_pci_config_rw_single(vdev, buf, count, &pos, iswrite); 206 if (ret < 0) 207 return ret; 208 209 count -= ret; 210 done += ret; 211 buf += ret; 212 pos += ret; 213 } 214 215 *ppos += done; 216 217 return done; 218 } 219 220 static ssize_t ism_vfio_pci_rw(struct vfio_device *core_vdev, char __user *buf, 221 size_t count, loff_t *ppos, bool iswrite) 222 { 223 unsigned int index = ISM_VFIO_PCI_OFFSET_TO_INDEX(*ppos); 224 struct vfio_pci_core_device *vdev; 225 int ret; 226 227 vdev = container_of(core_vdev, struct vfio_pci_core_device, vdev); 228 229 if (!count) 230 return 0; 231 232 switch (index) { 233 case VFIO_PCI_CONFIG_REGION_INDEX: 234 ret = ism_vfio_pci_config_rw(vdev, buf, count, ppos, iswrite); 235 break; 236 237 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: 238 ret = ism_vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite); 239 break; 240 241 default: 242 return -EINVAL; 243 } 244 245 return ret; 246 } 247 248 static ssize_t ism_vfio_pci_read(struct vfio_device *core_vdev, 249 char __user *buf, size_t count, loff_t *ppos) 250 { 251 return ism_vfio_pci_rw(core_vdev, buf, count, ppos, false); 252 } 253 254 static ssize_t ism_vfio_pci_write(struct vfio_device *core_vdev, 255 const char __user *buf, size_t count, 256 loff_t *ppos) 257 { 258 return ism_vfio_pci_rw(core_vdev, (char __user *)buf, count, ppos, 259 true); 260 } 261 262 static int ism_vfio_pci_ioctl_get_region_info(struct vfio_device *core_vdev, 263 struct vfio_region_info *info, 264 struct vfio_info_cap *caps) 265 { 266 struct vfio_pci_core_device *vdev = 267 container_of(core_vdev, struct vfio_pci_core_device, vdev); 268 struct pci_dev *pdev = vdev->pdev; 269 270 switch (info->index) { 271 case VFIO_PCI_CONFIG_REGION_INDEX: 272 info->offset = ISM_VFIO_PCI_INDEX_TO_OFFSET(info->index); 273 info->size = pdev->cfg_size; 274 info->flags = VFIO_REGION_INFO_FLAG_READ | 275 VFIO_REGION_INFO_FLAG_WRITE; 276 break; 277 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: 278 info->offset = ISM_VFIO_PCI_INDEX_TO_OFFSET(info->index); 279 info->size = pci_resource_len(pdev, info->index); 280 if (!info->size) { 281 info->flags = 0; 282 break; 283 } 284 info->flags = VFIO_REGION_INFO_FLAG_READ | 285 VFIO_REGION_INFO_FLAG_WRITE; 286 break; 287 default: 288 info->offset = 0; 289 info->size = 0; 290 info->flags = 0; 291 return -EINVAL; 292 } 293 return 0; 294 } 295 296 static int ism_vfio_pci_init_dev(struct vfio_device *core_vdev) 297 { 298 struct zpci_dev *zdev = to_zpci(to_pci_dev(core_vdev->dev)); 299 struct ism_vfio_pci_core_device *ivpcd; 300 char cache_name[20]; 301 int ret; 302 303 ivpcd = container_of(core_vdev, struct ism_vfio_pci_core_device, 304 core_device.vdev); 305 306 snprintf(cache_name, sizeof(cache_name), "ism_sb_fid_%08x", zdev->fid); 307 308 ivpcd->store_block_cache = 309 kmem_cache_create(cache_name, zdev->maxstbl, 310 (&(struct kmem_cache_args){ 311 .align = PAGE_SIZE, 312 .useroffset = 0, 313 .usersize = zdev->maxstbl, 314 }), 315 (SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT)); 316 if (!ivpcd->store_block_cache) 317 return -ENOMEM; 318 319 ret = vfio_pci_core_init_dev(core_vdev); 320 if (ret) 321 kmem_cache_destroy(ivpcd->store_block_cache); 322 323 return ret; 324 } 325 326 static void ism_vfio_pci_release_dev(struct vfio_device *core_vdev) 327 { 328 struct ism_vfio_pci_core_device *ivpcd = container_of( 329 core_vdev, struct ism_vfio_pci_core_device, core_device.vdev); 330 331 kmem_cache_destroy(ivpcd->store_block_cache); 332 vfio_pci_core_release_dev(core_vdev); 333 } 334 335 static const struct vfio_device_ops ism_pci_ops = { 336 .name = "ism-vfio-pci", 337 .init = ism_vfio_pci_init_dev, 338 .release = ism_vfio_pci_release_dev, 339 .open_device = ism_vfio_pci_open_device, 340 .close_device = vfio_pci_core_close_device, 341 .ioctl = vfio_pci_core_ioctl, 342 .get_region_info_caps = ism_vfio_pci_ioctl_get_region_info, 343 .device_feature = vfio_pci_core_ioctl_feature, 344 .read = ism_vfio_pci_read, 345 .write = ism_vfio_pci_write, 346 .request = vfio_pci_core_request, 347 .match = vfio_pci_core_match, 348 .match_token_uuid = vfio_pci_core_match_token_uuid, 349 .bind_iommufd = vfio_iommufd_physical_bind, 350 .unbind_iommufd = vfio_iommufd_physical_unbind, 351 .attach_ioas = vfio_iommufd_physical_attach_ioas, 352 .detach_ioas = vfio_iommufd_physical_detach_ioas, 353 }; 354 355 static int ism_vfio_pci_probe(struct pci_dev *pdev, 356 const struct pci_device_id *id) 357 { 358 struct ism_vfio_pci_core_device *ivpcd; 359 int ret; 360 361 ivpcd = vfio_alloc_device(ism_vfio_pci_core_device, core_device.vdev, 362 &pdev->dev, &ism_pci_ops); 363 if (IS_ERR(ivpcd)) 364 return PTR_ERR(ivpcd); 365 366 dev_set_drvdata(&pdev->dev, &ivpcd->core_device); 367 368 ret = vfio_pci_core_register_device(&ivpcd->core_device); 369 if (ret) 370 vfio_put_device(&ivpcd->core_device.vdev); 371 372 return ret; 373 } 374 375 static void ism_vfio_pci_remove(struct pci_dev *pdev) 376 { 377 struct vfio_pci_core_device *core_device; 378 struct ism_vfio_pci_core_device *ivpcd; 379 380 core_device = dev_get_drvdata(&pdev->dev); 381 ivpcd = container_of(core_device, struct ism_vfio_pci_core_device, 382 core_device); 383 384 vfio_pci_core_unregister_device(&ivpcd->core_device); 385 vfio_put_device(&ivpcd->core_device.vdev); 386 } 387 388 static const struct pci_device_id ism_device_table[] = { 389 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_IBM, 390 PCI_DEVICE_ID_IBM_ISM) }, 391 {} 392 }; 393 MODULE_DEVICE_TABLE(pci, ism_device_table); 394 395 static struct pci_driver ism_vfio_pci_driver = { 396 .name = KBUILD_MODNAME, 397 .id_table = ism_device_table, 398 .probe = ism_vfio_pci_probe, 399 .remove = ism_vfio_pci_remove, 400 .err_handler = &vfio_pci_core_err_handlers, 401 .driver_managed_dma = true, 402 }; 403 404 module_pci_driver(ism_vfio_pci_driver); 405 406 MODULE_LICENSE("GPL"); 407 MODULE_DESCRIPTION("vfio-pci variant driver for the IBM Internal Shared Memory (ISM) device"); 408 MODULE_AUTHOR("IBM Corporation"); 409