1 /* 2 * IOMMU API for s390 PCI devices 3 * 4 * Copyright IBM Corp. 2015 5 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> 6 */ 7 8 #include <linux/pci.h> 9 #include <linux/iommu.h> 10 #include <linux/iommu-helper.h> 11 #include <linux/pci.h> 12 #include <linux/sizes.h> 13 #include <asm/pci_dma.h> 14 15 /* 16 * Physically contiguous memory regions can be mapped with 4 KiB alignment, 17 * we allow all page sizes that are an order of 4KiB (no special large page 18 * support so far). 19 */ 20 #define S390_IOMMU_PGSIZES (~0xFFFUL) 21 22 struct s390_domain { 23 struct iommu_domain domain; 24 struct list_head devices; 25 unsigned long *dma_table; 26 spinlock_t dma_table_lock; 27 spinlock_t list_lock; 28 }; 29 30 struct s390_domain_device { 31 struct list_head list; 32 struct zpci_dev *zdev; 33 }; 34 35 static struct s390_domain *to_s390_domain(struct iommu_domain *dom) 36 { 37 return container_of(dom, struct s390_domain, domain); 38 } 39 40 static bool s390_iommu_capable(enum iommu_cap cap) 41 { 42 switch (cap) { 43 case IOMMU_CAP_CACHE_COHERENCY: 44 return true; 45 case IOMMU_CAP_INTR_REMAP: 46 return true; 47 default: 48 return false; 49 } 50 } 51 52 struct iommu_domain *s390_domain_alloc(unsigned domain_type) 53 { 54 struct s390_domain *s390_domain; 55 56 if (domain_type != IOMMU_DOMAIN_UNMANAGED) 57 return NULL; 58 59 s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL); 60 if (!s390_domain) 61 return NULL; 62 63 s390_domain->dma_table = dma_alloc_cpu_table(); 64 if (!s390_domain->dma_table) { 65 kfree(s390_domain); 66 return NULL; 67 } 68 69 spin_lock_init(&s390_domain->dma_table_lock); 70 spin_lock_init(&s390_domain->list_lock); 71 INIT_LIST_HEAD(&s390_domain->devices); 72 73 return &s390_domain->domain; 74 } 75 76 void s390_domain_free(struct iommu_domain *domain) 77 { 78 struct s390_domain *s390_domain = to_s390_domain(domain); 79 80 dma_cleanup_tables(s390_domain->dma_table); 81 kfree(s390_domain); 82 } 83 84 static int s390_iommu_attach_device(struct iommu_domain *domain, 85 struct device *dev) 86 { 87 struct s390_domain *s390_domain = to_s390_domain(domain); 88 struct zpci_dev *zdev = to_pci_dev(dev)->sysdata; 89 struct s390_domain_device *domain_device; 90 unsigned long flags; 91 int rc; 92 93 if (!zdev) 94 return -ENODEV; 95 96 domain_device = kzalloc(sizeof(*domain_device), GFP_KERNEL); 97 if (!domain_device) 98 return -ENOMEM; 99 100 if (zdev->dma_table) 101 zpci_dma_exit_device(zdev); 102 103 zdev->dma_table = s390_domain->dma_table; 104 rc = zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, 105 zdev->start_dma + zdev->iommu_size - 1, 106 (u64) zdev->dma_table); 107 if (rc) 108 goto out_restore; 109 110 spin_lock_irqsave(&s390_domain->list_lock, flags); 111 /* First device defines the DMA range limits */ 112 if (list_empty(&s390_domain->devices)) { 113 domain->geometry.aperture_start = zdev->start_dma; 114 domain->geometry.aperture_end = zdev->end_dma; 115 domain->geometry.force_aperture = true; 116 /* Allow only devices with identical DMA range limits */ 117 } else if (domain->geometry.aperture_start != zdev->start_dma || 118 domain->geometry.aperture_end != zdev->end_dma) { 119 rc = -EINVAL; 120 spin_unlock_irqrestore(&s390_domain->list_lock, flags); 121 goto out_restore; 122 } 123 domain_device->zdev = zdev; 124 zdev->s390_domain = s390_domain; 125 list_add(&domain_device->list, &s390_domain->devices); 126 spin_unlock_irqrestore(&s390_domain->list_lock, flags); 127 128 return 0; 129 130 out_restore: 131 zpci_dma_init_device(zdev); 132 kfree(domain_device); 133 134 return rc; 135 } 136 137 static void s390_iommu_detach_device(struct iommu_domain *domain, 138 struct device *dev) 139 { 140 struct s390_domain *s390_domain = to_s390_domain(domain); 141 struct zpci_dev *zdev = to_pci_dev(dev)->sysdata; 142 struct s390_domain_device *domain_device, *tmp; 143 unsigned long flags; 144 int found = 0; 145 146 if (!zdev) 147 return; 148 149 spin_lock_irqsave(&s390_domain->list_lock, flags); 150 list_for_each_entry_safe(domain_device, tmp, &s390_domain->devices, 151 list) { 152 if (domain_device->zdev == zdev) { 153 list_del(&domain_device->list); 154 kfree(domain_device); 155 found = 1; 156 break; 157 } 158 } 159 spin_unlock_irqrestore(&s390_domain->list_lock, flags); 160 161 if (found) { 162 zdev->s390_domain = NULL; 163 zpci_unregister_ioat(zdev, 0); 164 zpci_dma_init_device(zdev); 165 } 166 } 167 168 static int s390_iommu_add_device(struct device *dev) 169 { 170 struct iommu_group *group; 171 int rc; 172 173 group = iommu_group_get(dev); 174 if (!group) { 175 group = iommu_group_alloc(); 176 if (IS_ERR(group)) 177 return PTR_ERR(group); 178 } 179 180 rc = iommu_group_add_device(group, dev); 181 iommu_group_put(group); 182 183 return rc; 184 } 185 186 static void s390_iommu_remove_device(struct device *dev) 187 { 188 struct zpci_dev *zdev = to_pci_dev(dev)->sysdata; 189 struct iommu_domain *domain; 190 191 /* 192 * This is a workaround for a scenario where the IOMMU API common code 193 * "forgets" to call the detach_dev callback: After binding a device 194 * to vfio-pci and completing the VFIO_SET_IOMMU ioctl (which triggers 195 * the attach_dev), removing the device via 196 * "echo 1 > /sys/bus/pci/devices/.../remove" won't trigger detach_dev, 197 * only remove_device will be called via the BUS_NOTIFY_REMOVED_DEVICE 198 * notifier. 199 * 200 * So let's call detach_dev from here if it hasn't been called before. 201 */ 202 if (zdev && zdev->s390_domain) { 203 domain = iommu_get_domain_for_dev(dev); 204 if (domain) 205 s390_iommu_detach_device(domain, dev); 206 } 207 208 iommu_group_remove_device(dev); 209 } 210 211 static int s390_iommu_update_trans(struct s390_domain *s390_domain, 212 unsigned long pa, dma_addr_t dma_addr, 213 size_t size, int flags) 214 { 215 struct s390_domain_device *domain_device; 216 u8 *page_addr = (u8 *) (pa & PAGE_MASK); 217 dma_addr_t start_dma_addr = dma_addr; 218 unsigned long irq_flags, nr_pages, i; 219 int rc = 0; 220 221 if (dma_addr < s390_domain->domain.geometry.aperture_start || 222 dma_addr + size > s390_domain->domain.geometry.aperture_end) 223 return -EINVAL; 224 225 nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 226 if (!nr_pages) 227 return 0; 228 229 spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags); 230 for (i = 0; i < nr_pages; i++) { 231 dma_update_cpu_trans(s390_domain->dma_table, page_addr, 232 dma_addr, flags); 233 page_addr += PAGE_SIZE; 234 dma_addr += PAGE_SIZE; 235 } 236 237 spin_lock(&s390_domain->list_lock); 238 list_for_each_entry(domain_device, &s390_domain->devices, list) { 239 rc = zpci_refresh_trans((u64) domain_device->zdev->fh << 32, 240 start_dma_addr, nr_pages * PAGE_SIZE); 241 if (rc) 242 break; 243 } 244 spin_unlock(&s390_domain->list_lock); 245 spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags); 246 247 return rc; 248 } 249 250 static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova, 251 phys_addr_t paddr, size_t size, int prot) 252 { 253 struct s390_domain *s390_domain = to_s390_domain(domain); 254 int flags = ZPCI_PTE_VALID, rc = 0; 255 256 if (!(prot & IOMMU_READ)) 257 return -EINVAL; 258 259 if (!(prot & IOMMU_WRITE)) 260 flags |= ZPCI_TABLE_PROTECTED; 261 262 rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova, 263 size, flags); 264 265 return rc; 266 } 267 268 static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain, 269 dma_addr_t iova) 270 { 271 struct s390_domain *s390_domain = to_s390_domain(domain); 272 unsigned long *sto, *pto, *rto, flags; 273 unsigned int rtx, sx, px; 274 phys_addr_t phys = 0; 275 276 if (iova < domain->geometry.aperture_start || 277 iova > domain->geometry.aperture_end) 278 return 0; 279 280 rtx = calc_rtx(iova); 281 sx = calc_sx(iova); 282 px = calc_px(iova); 283 rto = s390_domain->dma_table; 284 285 spin_lock_irqsave(&s390_domain->dma_table_lock, flags); 286 if (rto && reg_entry_isvalid(rto[rtx])) { 287 sto = get_rt_sto(rto[rtx]); 288 if (sto && reg_entry_isvalid(sto[sx])) { 289 pto = get_st_pto(sto[sx]); 290 if (pto && pt_entry_isvalid(pto[px])) 291 phys = pto[px] & ZPCI_PTE_ADDR_MASK; 292 } 293 } 294 spin_unlock_irqrestore(&s390_domain->dma_table_lock, flags); 295 296 return phys; 297 } 298 299 static size_t s390_iommu_unmap(struct iommu_domain *domain, 300 unsigned long iova, size_t size) 301 { 302 struct s390_domain *s390_domain = to_s390_domain(domain); 303 int flags = ZPCI_PTE_INVALID; 304 phys_addr_t paddr; 305 int rc; 306 307 paddr = s390_iommu_iova_to_phys(domain, iova); 308 if (!paddr) 309 return 0; 310 311 rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova, 312 size, flags); 313 if (rc) 314 return 0; 315 316 return size; 317 } 318 319 static struct iommu_ops s390_iommu_ops = { 320 .capable = s390_iommu_capable, 321 .domain_alloc = s390_domain_alloc, 322 .domain_free = s390_domain_free, 323 .attach_dev = s390_iommu_attach_device, 324 .detach_dev = s390_iommu_detach_device, 325 .map = s390_iommu_map, 326 .unmap = s390_iommu_unmap, 327 .iova_to_phys = s390_iommu_iova_to_phys, 328 .add_device = s390_iommu_add_device, 329 .remove_device = s390_iommu_remove_device, 330 .pgsize_bitmap = S390_IOMMU_PGSIZES, 331 }; 332 333 static int __init s390_iommu_init(void) 334 { 335 return bus_set_iommu(&pci_bus_type, &s390_iommu_ops); 336 } 337 subsys_initcall(s390_iommu_init); 338