1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/init.h> 4 #include <linux/errno.h> 5 #include <linux/mm.h> 6 #include <linux/mman.h> 7 #include <linux/slab.h> 8 #include <linux/vmalloc.h> 9 #include <linux/io_uring.h> 10 #include <linux/io_uring_types.h> 11 #include <asm/shmparam.h> 12 13 #include "memmap.h" 14 #include "kbuf.h" 15 #include "rsrc.h" 16 17 static void *io_mem_alloc_compound(struct page **pages, int nr_pages, 18 size_t size, gfp_t gfp) 19 { 20 struct page *page; 21 int i, order; 22 23 order = get_order(size); 24 if (order > MAX_PAGE_ORDER) 25 return ERR_PTR(-ENOMEM); 26 else if (order) 27 gfp |= __GFP_COMP; 28 29 page = alloc_pages(gfp, order); 30 if (!page) 31 return ERR_PTR(-ENOMEM); 32 33 for (i = 0; i < nr_pages; i++) 34 pages[i] = page + i; 35 36 return page_address(page); 37 } 38 39 struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages) 40 { 41 unsigned long start, end, nr_pages; 42 struct page **pages; 43 int ret; 44 45 if (check_add_overflow(uaddr, len, &end)) 46 return ERR_PTR(-EOVERFLOW); 47 if (check_add_overflow(end, PAGE_SIZE - 1, &end)) 48 return ERR_PTR(-EOVERFLOW); 49 50 end = end >> PAGE_SHIFT; 51 start = uaddr >> PAGE_SHIFT; 52 nr_pages = end - start; 53 if (WARN_ON_ONCE(!nr_pages)) 54 return ERR_PTR(-EINVAL); 55 if (WARN_ON_ONCE(nr_pages > INT_MAX)) 56 return ERR_PTR(-EOVERFLOW); 57 58 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); 59 if (!pages) 60 return ERR_PTR(-ENOMEM); 61 62 ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM, 63 pages); 64 /* success, mapped all pages */ 65 if (ret == nr_pages) { 66 *npages = nr_pages; 67 return pages; 68 } 69 70 /* partial map, or didn't map anything */ 71 if (ret >= 0) { 72 /* if we did partial map, release any pages we did get */ 73 if (ret) 74 unpin_user_pages(pages, ret); 75 ret = -EFAULT; 76 } 77 kvfree(pages); 78 return ERR_PTR(ret); 79 } 80 81 enum { 82 /* memory was vmap'ed for the kernel, freeing the region vunmap's it */ 83 IO_REGION_F_VMAP = 1, 84 /* memory is provided by user and pinned by the kernel */ 85 IO_REGION_F_USER_PROVIDED = 2, 86 /* only the first page in the array is ref'ed */ 87 IO_REGION_F_SINGLE_REF = 4, 88 }; 89 90 void io_free_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr) 91 { 92 if (mr->pages) { 93 long nr_refs = mr->nr_pages; 94 95 if (mr->flags & IO_REGION_F_SINGLE_REF) 96 nr_refs = 1; 97 98 if (mr->flags & IO_REGION_F_USER_PROVIDED) 99 unpin_user_pages(mr->pages, nr_refs); 100 else 101 release_pages(mr->pages, nr_refs); 102 103 kvfree(mr->pages); 104 } 105 if ((mr->flags & IO_REGION_F_VMAP) && mr->ptr) 106 vunmap(mr->ptr); 107 if (mr->nr_pages && ctx->user) 108 __io_unaccount_mem(ctx->user, mr->nr_pages); 109 110 memset(mr, 0, sizeof(*mr)); 111 } 112 113 static int io_region_init_ptr(struct io_mapped_region *mr) 114 { 115 struct io_imu_folio_data ifd; 116 void *ptr; 117 118 if (io_check_coalesce_buffer(mr->pages, mr->nr_pages, &ifd)) { 119 if (ifd.nr_folios == 1) { 120 mr->ptr = page_address(mr->pages[0]); 121 return 0; 122 } 123 } 124 ptr = vmap(mr->pages, mr->nr_pages, VM_MAP, PAGE_KERNEL); 125 if (!ptr) 126 return -ENOMEM; 127 128 mr->ptr = ptr; 129 mr->flags |= IO_REGION_F_VMAP; 130 return 0; 131 } 132 133 static int io_region_pin_pages(struct io_ring_ctx *ctx, 134 struct io_mapped_region *mr, 135 struct io_uring_region_desc *reg) 136 { 137 unsigned long size = mr->nr_pages << PAGE_SHIFT; 138 struct page **pages; 139 int nr_pages; 140 141 pages = io_pin_pages(reg->user_addr, size, &nr_pages); 142 if (IS_ERR(pages)) 143 return PTR_ERR(pages); 144 if (WARN_ON_ONCE(nr_pages != mr->nr_pages)) 145 return -EFAULT; 146 147 mr->pages = pages; 148 mr->flags |= IO_REGION_F_USER_PROVIDED; 149 return 0; 150 } 151 152 static int io_region_allocate_pages(struct io_ring_ctx *ctx, 153 struct io_mapped_region *mr, 154 struct io_uring_region_desc *reg, 155 unsigned long mmap_offset) 156 { 157 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN; 158 unsigned long size = mr->nr_pages << PAGE_SHIFT; 159 unsigned long nr_allocated; 160 struct page **pages; 161 void *p; 162 163 pages = kvmalloc_array(mr->nr_pages, sizeof(*pages), gfp); 164 if (!pages) 165 return -ENOMEM; 166 167 p = io_mem_alloc_compound(pages, mr->nr_pages, size, gfp); 168 if (!IS_ERR(p)) { 169 mr->flags |= IO_REGION_F_SINGLE_REF; 170 goto done; 171 } 172 173 nr_allocated = alloc_pages_bulk_node(gfp, NUMA_NO_NODE, 174 mr->nr_pages, pages); 175 if (nr_allocated != mr->nr_pages) { 176 if (nr_allocated) 177 release_pages(pages, nr_allocated); 178 kvfree(pages); 179 return -ENOMEM; 180 } 181 done: 182 reg->mmap_offset = mmap_offset; 183 mr->pages = pages; 184 return 0; 185 } 186 187 int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr, 188 struct io_uring_region_desc *reg, 189 unsigned long mmap_offset) 190 { 191 int nr_pages, ret; 192 u64 end; 193 194 if (WARN_ON_ONCE(mr->pages || mr->ptr || mr->nr_pages)) 195 return -EFAULT; 196 if (memchr_inv(®->__resv, 0, sizeof(reg->__resv))) 197 return -EINVAL; 198 if (reg->flags & ~IORING_MEM_REGION_TYPE_USER) 199 return -EINVAL; 200 /* user_addr should be set IFF it's a user memory backed region */ 201 if ((reg->flags & IORING_MEM_REGION_TYPE_USER) != !!reg->user_addr) 202 return -EFAULT; 203 if (!reg->size || reg->mmap_offset || reg->id) 204 return -EINVAL; 205 if ((reg->size >> PAGE_SHIFT) > INT_MAX) 206 return -E2BIG; 207 if ((reg->user_addr | reg->size) & ~PAGE_MASK) 208 return -EINVAL; 209 if (check_add_overflow(reg->user_addr, reg->size, &end)) 210 return -EOVERFLOW; 211 212 nr_pages = reg->size >> PAGE_SHIFT; 213 if (ctx->user) { 214 ret = __io_account_mem(ctx->user, nr_pages); 215 if (ret) 216 return ret; 217 } 218 mr->nr_pages = nr_pages; 219 220 if (reg->flags & IORING_MEM_REGION_TYPE_USER) 221 ret = io_region_pin_pages(ctx, mr, reg); 222 else 223 ret = io_region_allocate_pages(ctx, mr, reg, mmap_offset); 224 if (ret) 225 goto out_free; 226 227 ret = io_region_init_ptr(mr); 228 if (ret) 229 goto out_free; 230 return 0; 231 out_free: 232 io_free_region(ctx, mr); 233 return ret; 234 } 235 236 int io_create_region_mmap_safe(struct io_ring_ctx *ctx, struct io_mapped_region *mr, 237 struct io_uring_region_desc *reg, 238 unsigned long mmap_offset) 239 { 240 struct io_mapped_region tmp_mr; 241 int ret; 242 243 memcpy(&tmp_mr, mr, sizeof(tmp_mr)); 244 ret = io_create_region(ctx, &tmp_mr, reg, mmap_offset); 245 if (ret) 246 return ret; 247 248 /* 249 * Once published mmap can find it without holding only the ->mmap_lock 250 * and not ->uring_lock. 251 */ 252 guard(mutex)(&ctx->mmap_lock); 253 memcpy(mr, &tmp_mr, sizeof(tmp_mr)); 254 return 0; 255 } 256 257 static struct io_mapped_region *io_mmap_get_region(struct io_ring_ctx *ctx, 258 loff_t pgoff) 259 { 260 loff_t offset = pgoff << PAGE_SHIFT; 261 unsigned int bgid; 262 263 switch (offset & IORING_OFF_MMAP_MASK) { 264 case IORING_OFF_SQ_RING: 265 case IORING_OFF_CQ_RING: 266 return &ctx->ring_region; 267 case IORING_OFF_SQES: 268 return &ctx->sq_region; 269 case IORING_OFF_PBUF_RING: 270 bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT; 271 return io_pbuf_get_region(ctx, bgid); 272 case IORING_MAP_OFF_PARAM_REGION: 273 return &ctx->param_region; 274 case IORING_MAP_OFF_ZCRX_REGION: 275 return &ctx->zcrx_region; 276 } 277 return NULL; 278 } 279 280 static void *io_region_validate_mmap(struct io_ring_ctx *ctx, 281 struct io_mapped_region *mr) 282 { 283 lockdep_assert_held(&ctx->mmap_lock); 284 285 if (!io_region_is_set(mr)) 286 return ERR_PTR(-EINVAL); 287 if (mr->flags & IO_REGION_F_USER_PROVIDED) 288 return ERR_PTR(-EINVAL); 289 290 return io_region_get_ptr(mr); 291 } 292 293 static void *io_uring_validate_mmap_request(struct file *file, loff_t pgoff, 294 size_t sz) 295 { 296 struct io_ring_ctx *ctx = file->private_data; 297 struct io_mapped_region *region; 298 299 region = io_mmap_get_region(ctx, pgoff); 300 if (!region) 301 return ERR_PTR(-EINVAL); 302 return io_region_validate_mmap(ctx, region); 303 } 304 305 #ifdef CONFIG_MMU 306 307 static int io_region_mmap(struct io_ring_ctx *ctx, 308 struct io_mapped_region *mr, 309 struct vm_area_struct *vma, 310 unsigned max_pages) 311 { 312 unsigned long nr_pages = min(mr->nr_pages, max_pages); 313 314 vm_flags_set(vma, VM_DONTEXPAND); 315 return vm_insert_pages(vma, vma->vm_start, mr->pages, &nr_pages); 316 } 317 318 __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma) 319 { 320 struct io_ring_ctx *ctx = file->private_data; 321 size_t sz = vma->vm_end - vma->vm_start; 322 long offset = vma->vm_pgoff << PAGE_SHIFT; 323 unsigned int page_limit = UINT_MAX; 324 struct io_mapped_region *region; 325 void *ptr; 326 327 guard(mutex)(&ctx->mmap_lock); 328 329 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz); 330 if (IS_ERR(ptr)) 331 return PTR_ERR(ptr); 332 333 switch (offset & IORING_OFF_MMAP_MASK) { 334 case IORING_OFF_SQ_RING: 335 case IORING_OFF_CQ_RING: 336 page_limit = (sz + PAGE_SIZE - 1) >> PAGE_SHIFT; 337 break; 338 } 339 340 region = io_mmap_get_region(ctx, vma->vm_pgoff); 341 return io_region_mmap(ctx, region, vma, page_limit); 342 } 343 344 unsigned long io_uring_get_unmapped_area(struct file *filp, unsigned long addr, 345 unsigned long len, unsigned long pgoff, 346 unsigned long flags) 347 { 348 struct io_ring_ctx *ctx = filp->private_data; 349 void *ptr; 350 351 /* 352 * Do not allow to map to user-provided address to avoid breaking the 353 * aliasing rules. Userspace is not able to guess the offset address of 354 * kernel kmalloc()ed memory area. 355 */ 356 if (addr) 357 return -EINVAL; 358 359 guard(mutex)(&ctx->mmap_lock); 360 361 ptr = io_uring_validate_mmap_request(filp, pgoff, len); 362 if (IS_ERR(ptr)) 363 return -ENOMEM; 364 365 /* 366 * Some architectures have strong cache aliasing requirements. 367 * For such architectures we need a coherent mapping which aliases 368 * kernel memory *and* userspace memory. To achieve that: 369 * - use a NULL file pointer to reference physical memory, and 370 * - use the kernel virtual address of the shared io_uring context 371 * (instead of the userspace-provided address, which has to be 0UL 372 * anyway). 373 * - use the same pgoff which the get_unmapped_area() uses to 374 * calculate the page colouring. 375 * For architectures without such aliasing requirements, the 376 * architecture will return any suitable mapping because addr is 0. 377 */ 378 filp = NULL; 379 flags |= MAP_SHARED; 380 pgoff = 0; /* has been translated to ptr above */ 381 #ifdef SHM_COLOUR 382 addr = (uintptr_t) ptr; 383 pgoff = addr >> PAGE_SHIFT; 384 #else 385 addr = 0UL; 386 #endif 387 return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); 388 } 389 390 #else /* !CONFIG_MMU */ 391 392 int io_uring_mmap(struct file *file, struct vm_area_struct *vma) 393 { 394 return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -EINVAL; 395 } 396 397 unsigned int io_uring_nommu_mmap_capabilities(struct file *file) 398 { 399 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE; 400 } 401 402 unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr, 403 unsigned long len, unsigned long pgoff, 404 unsigned long flags) 405 { 406 struct io_ring_ctx *ctx = file->private_data; 407 void *ptr; 408 409 guard(mutex)(&ctx->mmap_lock); 410 411 ptr = io_uring_validate_mmap_request(file, pgoff, len); 412 if (IS_ERR(ptr)) 413 return PTR_ERR(ptr); 414 415 return (unsigned long) ptr; 416 } 417 418 #endif /* !CONFIG_MMU */ 419