1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Access to PCI I/O memory from user space programs. 4 * 5 * Copyright IBM Corp. 2014 6 * Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com> 7 */ 8 #include <linux/kernel.h> 9 #include <linux/syscalls.h> 10 #include <linux/init.h> 11 #include <linux/mm.h> 12 #include <linux/errno.h> 13 #include <linux/pci.h> 14 #include <asm/asm-extable.h> 15 #include <asm/pci_io.h> 16 #include <asm/pci_debug.h> 17 #include <asm/asm.h> 18 19 static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset) 20 { 21 struct { 22 u64 offset; 23 u8 cc; 24 u8 status; 25 } data = {offset, cc, status}; 26 27 zpci_err_hex(&data, sizeof(data)); 28 } 29 30 static inline int __pcistb_mio_inuser( 31 void __iomem *ioaddr, const void __user *src, 32 u64 len, u8 *status) 33 { 34 int cc, exception; 35 36 exception = 1; 37 asm volatile ( 38 " sacf 256\n" 39 "0: .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n" 40 "1: lhi %[exc],0\n" 41 "2: sacf 768\n" 42 CC_IPM(cc) 43 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) 44 : CC_OUT(cc, cc), [len] "+d" (len), [exc] "+d" (exception) 45 : [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src)) 46 : CC_CLOBBER_LIST("memory")); 47 *status = len >> 24 & 0xff; 48 return exception ? -ENXIO : CC_TRANSFORM(cc); 49 } 50 51 static inline int __pcistg_mio_inuser( 52 void __iomem *ioaddr, const void __user *src, 53 u64 ulen, u8 *status) 54 { 55 union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen}; 56 int cc, exception; 57 u64 val = 0; 58 u64 cnt = ulen; 59 u8 tmp; 60 61 /* 62 * copy 0 < @len <= 8 bytes from @src into the right most bytes of 63 * a register, then store it to PCI at @ioaddr while in secondary 64 * address space. pcistg then uses the user mappings. 65 */ 66 exception = 1; 67 asm volatile ( 68 " sacf 256\n" 69 "0: llgc %[tmp],0(%[src])\n" 70 "4: sllg %[val],%[val],8\n" 71 " aghi %[src],1\n" 72 " ogr %[val],%[tmp]\n" 73 " brctg %[cnt],0b\n" 74 "1: .insn rre,0xb9d40000,%[val],%[ioaddr_len]\n" 75 "2: lhi %[exc],0\n" 76 "3: sacf 768\n" 77 CC_IPM(cc) 78 EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b) 79 : [src] "+a" (src), [cnt] "+d" (cnt), 80 [val] "+d" (val), [tmp] "=d" (tmp), [exc] "+d" (exception), 81 CC_OUT(cc, cc), [ioaddr_len] "+&d" (ioaddr_len.pair) 82 : 83 : CC_CLOBBER_LIST("memory")); 84 *status = ioaddr_len.odd >> 24 & 0xff; 85 86 cc = exception ? -ENXIO : CC_TRANSFORM(cc); 87 /* did we read everything from user memory? */ 88 if (!cc && cnt != 0) 89 cc = -EFAULT; 90 91 return cc; 92 } 93 94 static inline int __memcpy_toio_inuser(void __iomem *dst, 95 const void __user *src, size_t n) 96 { 97 int size, rc = 0; 98 u8 status = 0; 99 100 if (!src) 101 return -EINVAL; 102 103 while (n > 0) { 104 size = zpci_get_max_io_size((u64 __force) dst, 105 (u64 __force) src, n, 106 ZPCI_MAX_WRITE_SIZE); 107 if (size > 8) /* main path */ 108 rc = __pcistb_mio_inuser(dst, src, size, &status); 109 else 110 rc = __pcistg_mio_inuser(dst, src, size, &status); 111 if (rc) 112 break; 113 src += size; 114 dst += size; 115 n -= size; 116 } 117 if (rc) 118 zpci_err_mmio(rc, status, (__force u64) dst); 119 return rc; 120 } 121 122 SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, 123 const void __user *, user_buffer, size_t, length) 124 { 125 struct follow_pfnmap_args args = { }; 126 u8 local_buf[64]; 127 void __iomem *io_addr; 128 void *buf; 129 struct vm_area_struct *vma; 130 long ret; 131 132 if (!zpci_is_enabled()) 133 return -ENODEV; 134 135 if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length) 136 return -EINVAL; 137 138 /* 139 * We only support write access to MIO capable devices if we are on 140 * a MIO enabled system. Otherwise we would have to check for every 141 * address if it is a special ZPCI_ADDR and would have to do 142 * a pfn lookup which we don't need for MIO capable devices. Currently 143 * ISM devices are the only devices without MIO support and there is no 144 * known need for accessing these from userspace. 145 */ 146 if (static_branch_likely(&have_mio)) { 147 ret = __memcpy_toio_inuser((void __iomem *) mmio_addr, 148 user_buffer, 149 length); 150 return ret; 151 } 152 153 if (length > 64) { 154 buf = kmalloc(length, GFP_KERNEL); 155 if (!buf) 156 return -ENOMEM; 157 } else 158 buf = local_buf; 159 160 ret = -EFAULT; 161 if (copy_from_user(buf, user_buffer, length)) 162 goto out_free; 163 164 mmap_read_lock(current->mm); 165 ret = -EINVAL; 166 vma = vma_lookup(current->mm, mmio_addr); 167 if (!vma) 168 goto out_unlock_mmap; 169 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 170 goto out_unlock_mmap; 171 ret = -EACCES; 172 if (!(vma->vm_flags & VM_WRITE)) 173 goto out_unlock_mmap; 174 175 args.address = mmio_addr; 176 args.vma = vma; 177 ret = follow_pfnmap_start(&args); 178 if (ret) 179 goto out_unlock_mmap; 180 181 io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) | 182 (mmio_addr & ~PAGE_MASK)); 183 184 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) 185 goto out_unlock_pt; 186 187 ret = zpci_memcpy_toio(io_addr, buf, length); 188 out_unlock_pt: 189 follow_pfnmap_end(&args); 190 out_unlock_mmap: 191 mmap_read_unlock(current->mm); 192 out_free: 193 if (buf != local_buf) 194 kfree(buf); 195 return ret; 196 } 197 198 static inline int __pcilg_mio_inuser( 199 void __user *dst, const void __iomem *ioaddr, 200 u64 ulen, u8 *status) 201 { 202 union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen}; 203 u64 cnt = ulen; 204 int shift = ulen * 8; 205 int cc, exception; 206 u64 val, tmp; 207 208 /* 209 * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in 210 * user space) into a register using pcilg then store these bytes at 211 * user address @dst 212 */ 213 exception = 1; 214 asm volatile ( 215 " sacf 256\n" 216 "0: .insn rre,0xb9d60000,%[val],%[ioaddr_len]\n" 217 "1: lhi %[exc],0\n" 218 " jne 4f\n" 219 "2: ahi %[shift],-8\n" 220 " srlg %[tmp],%[val],0(%[shift])\n" 221 "3: stc %[tmp],0(%[dst])\n" 222 "5: aghi %[dst],1\n" 223 " brctg %[cnt],2b\n" 224 /* 225 * Use xr to clear exc and set condition code to zero 226 * to ensure flag output is correct for this branch. 227 */ 228 " xr %[exc],%[exc]\n" 229 "4: sacf 768\n" 230 CC_IPM(cc) 231 EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b) 232 : [ioaddr_len] "+&d" (ioaddr_len.pair), [exc] "+d" (exception), 233 CC_OUT(cc, cc), [val] "=d" (val), 234 [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp), 235 [shift] "+d" (shift) 236 : 237 : CC_CLOBBER_LIST("memory")); 238 239 cc = exception ? -ENXIO : CC_TRANSFORM(cc); 240 /* did we write everything to the user space buffer? */ 241 if (!cc && cnt != 0) 242 cc = -EFAULT; 243 244 *status = ioaddr_len.odd >> 24 & 0xff; 245 return cc; 246 } 247 248 static inline int __memcpy_fromio_inuser(void __user *dst, 249 const void __iomem *src, 250 unsigned long n) 251 { 252 int size, rc = 0; 253 u8 status; 254 255 while (n > 0) { 256 size = zpci_get_max_io_size((u64 __force) src, 257 (u64 __force) dst, n, 258 ZPCI_MAX_READ_SIZE); 259 rc = __pcilg_mio_inuser(dst, src, size, &status); 260 if (rc) 261 break; 262 src += size; 263 dst += size; 264 n -= size; 265 } 266 if (rc) 267 zpci_err_mmio(rc, status, (__force u64) dst); 268 return rc; 269 } 270 271 SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, 272 void __user *, user_buffer, size_t, length) 273 { 274 struct follow_pfnmap_args args = { }; 275 u8 local_buf[64]; 276 void __iomem *io_addr; 277 void *buf; 278 struct vm_area_struct *vma; 279 long ret; 280 281 if (!zpci_is_enabled()) 282 return -ENODEV; 283 284 if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length) 285 return -EINVAL; 286 287 /* 288 * We only support read access to MIO capable devices if we are on 289 * a MIO enabled system. Otherwise we would have to check for every 290 * address if it is a special ZPCI_ADDR and would have to do 291 * a pfn lookup which we don't need for MIO capable devices. Currently 292 * ISM devices are the only devices without MIO support and there is no 293 * known need for accessing these from userspace. 294 */ 295 if (static_branch_likely(&have_mio)) { 296 ret = __memcpy_fromio_inuser( 297 user_buffer, (const void __iomem *)mmio_addr, 298 length); 299 return ret; 300 } 301 302 if (length > 64) { 303 buf = kmalloc(length, GFP_KERNEL); 304 if (!buf) 305 return -ENOMEM; 306 } else { 307 buf = local_buf; 308 } 309 310 mmap_read_lock(current->mm); 311 ret = -EINVAL; 312 vma = vma_lookup(current->mm, mmio_addr); 313 if (!vma) 314 goto out_unlock_mmap; 315 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 316 goto out_unlock_mmap; 317 ret = -EACCES; 318 if (!(vma->vm_flags & VM_WRITE)) 319 goto out_unlock_mmap; 320 321 args.vma = vma; 322 args.address = mmio_addr; 323 ret = follow_pfnmap_start(&args); 324 if (ret) 325 goto out_unlock_mmap; 326 327 io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) | 328 (mmio_addr & ~PAGE_MASK)); 329 330 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) { 331 ret = -EFAULT; 332 goto out_unlock_pt; 333 } 334 ret = zpci_memcpy_fromio(buf, io_addr, length); 335 336 out_unlock_pt: 337 follow_pfnmap_end(&args); 338 out_unlock_mmap: 339 mmap_read_unlock(current->mm); 340 341 if (!ret && copy_to_user(user_buffer, buf, length)) 342 ret = -EFAULT; 343 344 if (buf != local_buf) 345 kfree(buf); 346 return ret; 347 } 348