Lines Matching refs:mr

91 void io_free_region(struct user_struct *user, struct io_mapped_region *mr)  in io_free_region()  argument
93 if (mr->pages) { in io_free_region()
94 long nr_refs = mr->nr_pages; in io_free_region()
96 if (mr->flags & IO_REGION_F_SINGLE_REF) in io_free_region()
99 if (mr->flags & IO_REGION_F_USER_PROVIDED) in io_free_region()
100 unpin_user_pages(mr->pages, nr_refs); in io_free_region()
102 release_pages(mr->pages, nr_refs); in io_free_region()
104 kvfree(mr->pages); in io_free_region()
106 if ((mr->flags & IO_REGION_F_VMAP) && mr->ptr) in io_free_region()
107 vunmap(mr->ptr); in io_free_region()
108 if (mr->nr_pages && user) in io_free_region()
109 __io_unaccount_mem(user, mr->nr_pages); in io_free_region()
111 memset(mr, 0, sizeof(*mr)); in io_free_region()
114 static int io_region_init_ptr(struct io_mapped_region *mr) in io_region_init_ptr() argument
119 if (io_check_coalesce_buffer(mr->pages, mr->nr_pages, &ifd)) { in io_region_init_ptr()
120 if (ifd.nr_folios == 1 && !PageHighMem(mr->pages[0])) { in io_region_init_ptr()
121 mr->ptr = page_address(mr->pages[0]); in io_region_init_ptr()
125 ptr = vmap(mr->pages, mr->nr_pages, VM_MAP, PAGE_KERNEL); in io_region_init_ptr()
129 mr->ptr = ptr; in io_region_init_ptr()
130 mr->flags |= IO_REGION_F_VMAP; in io_region_init_ptr()
134 static int io_region_pin_pages(struct io_mapped_region *mr, in io_region_pin_pages() argument
137 size_t size = io_region_size(mr); in io_region_pin_pages()
144 if (WARN_ON_ONCE(nr_pages != mr->nr_pages)) in io_region_pin_pages()
147 mr->pages = pages; in io_region_pin_pages()
148 mr->flags |= IO_REGION_F_USER_PROVIDED; in io_region_pin_pages()
152 static int io_region_allocate_pages(struct io_mapped_region *mr, in io_region_allocate_pages() argument
157 size_t size = io_region_size(mr); in io_region_allocate_pages()
161 pages = kvmalloc_array(mr->nr_pages, sizeof(*pages), gfp); in io_region_allocate_pages()
165 if (io_mem_alloc_compound(pages, mr->nr_pages, size, gfp)) { in io_region_allocate_pages()
166 mr->flags |= IO_REGION_F_SINGLE_REF; in io_region_allocate_pages()
171 mr->nr_pages, pages); in io_region_allocate_pages()
172 if (nr_allocated != mr->nr_pages) { in io_region_allocate_pages()
180 mr->pages = pages; in io_region_allocate_pages()
184 int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr, in io_create_region() argument
191 if (WARN_ON_ONCE(mr->pages || mr->ptr || mr->nr_pages)) in io_create_region()
215 mr->nr_pages = nr_pages; in io_create_region()
218 ret = io_region_pin_pages(mr, reg); in io_create_region()
220 ret = io_region_allocate_pages(mr, reg, mmap_offset); in io_create_region()
224 ret = io_region_init_ptr(mr); in io_create_region()
229 io_free_region(ctx->user, mr); in io_create_region()
259 struct io_mapped_region *mr) in io_region_validate_mmap() argument
263 if (!io_region_is_set(mr)) in io_region_validate_mmap()
265 if (mr->flags & IO_REGION_F_USER_PROVIDED) in io_region_validate_mmap()
268 return io_region_get_ptr(mr); in io_region_validate_mmap()
285 struct io_mapped_region *mr, in io_region_mmap() argument
289 unsigned long nr_pages = min(mr->nr_pages, max_pages); in io_region_mmap()
292 return vm_insert_pages(vma, vma->vm_start, mr->pages, &nr_pages); in io_region_mmap()