1 #ifndef IO_URING_MEMMAP_H 2 #define IO_URING_MEMMAP_H 3 4 #define IORING_MAP_OFF_PARAM_REGION 0x20000000ULL 5 #define IORING_MAP_OFF_ZCRX_REGION 0x30000000ULL 6 7 #define IORING_OFF_ZCRX_SHIFT 16 8 9 struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages); 10 11 #ifndef CONFIG_MMU 12 unsigned int io_uring_nommu_mmap_capabilities(struct file *file); 13 #endif 14 unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr, 15 unsigned long len, unsigned long pgoff, 16 unsigned long flags); 17 int io_uring_mmap(struct file *file, struct vm_area_struct *vma); 18 19 void io_free_region(struct user_struct *user, struct io_mapped_region *mr); 20 int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr, 21 struct io_uring_region_desc *reg, 22 unsigned long mmap_offset); 23 24 static inline void *io_region_get_ptr(struct io_mapped_region *mr) 25 { 26 return mr->ptr; 27 } 28 29 static inline bool io_region_is_set(struct io_mapped_region *mr) 30 { 31 return !!mr->nr_pages; 32 } 33 34 static inline void io_region_publish(struct io_ring_ctx *ctx, 35 struct io_mapped_region *src_region, 36 struct io_mapped_region *dst_region) 37 { 38 /* 39 * Once published mmap can find it without holding only the ->mmap_lock 40 * and not ->uring_lock. 41 */ 42 guard(mutex)(&ctx->mmap_lock); 43 *dst_region = *src_region; 44 } 45 46 static inline size_t io_region_size(struct io_mapped_region *mr) 47 { 48 return (size_t) mr->nr_pages << PAGE_SHIFT; 49 } 50 51 #endif 52