xref: /freebsd/sys/compat/linuxkpi/common/include/linux/mm.h (revision 8ddb146abcdf061be9f2c0db7e391697dafad85c)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * Copyright (c) 2015 François Tigeot
7  * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice unmodified, this list of conditions, and the following
15  *    disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 #ifndef	_LINUXKPI_LINUX_MM_H_
34 #define	_LINUXKPI_LINUX_MM_H_
35 
36 #include <linux/spinlock.h>
37 #include <linux/gfp.h>
38 #include <linux/kernel.h>
39 #include <linux/mm_types.h>
40 #include <linux/pfn.h>
41 #include <linux/list.h>
42 #include <linux/mmap_lock.h>
43 
44 #include <asm/pgtable.h>
45 
46 #define	PAGE_ALIGN(x)	ALIGN(x, PAGE_SIZE)
47 
48 /*
49  * Make sure our LinuxKPI defined virtual memory flags don't conflict
50  * with the ones defined by FreeBSD:
51  */
52 CTASSERT((VM_PROT_ALL & -(1 << 8)) == 0);
53 
54 #define	VM_READ			VM_PROT_READ
55 #define	VM_WRITE		VM_PROT_WRITE
56 #define	VM_EXEC			VM_PROT_EXECUTE
57 
58 #define	VM_PFNINTERNAL		(1 << 8)	/* FreeBSD private flag to vm_insert_pfn() */
59 #define	VM_MIXEDMAP		(1 << 9)
60 #define	VM_NORESERVE		(1 << 10)
61 #define	VM_PFNMAP		(1 << 11)
62 #define	VM_IO			(1 << 12)
63 #define	VM_MAYWRITE		(1 << 13)
64 #define	VM_DONTCOPY		(1 << 14)
65 #define	VM_DONTEXPAND		(1 << 15)
66 #define	VM_DONTDUMP		(1 << 16)
67 #define	VM_SHARED		(1 << 17)
68 
69 #define	VMA_MAX_PREFAULT_RECORD	1
70 
71 #define	FOLL_WRITE		(1 << 0)
72 #define	FOLL_FORCE		(1 << 1)
73 
74 #define	VM_FAULT_OOM		(1 << 0)
75 #define	VM_FAULT_SIGBUS		(1 << 1)
76 #define	VM_FAULT_MAJOR		(1 << 2)
77 #define	VM_FAULT_WRITE		(1 << 3)
78 #define	VM_FAULT_HWPOISON	(1 << 4)
79 #define	VM_FAULT_HWPOISON_LARGE	(1 << 5)
80 #define	VM_FAULT_SIGSEGV	(1 << 6)
81 #define	VM_FAULT_NOPAGE		(1 << 7)
82 #define	VM_FAULT_LOCKED		(1 << 8)
83 #define	VM_FAULT_RETRY		(1 << 9)
84 #define	VM_FAULT_FALLBACK	(1 << 10)
85 
86 #define	VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
87 	VM_FAULT_HWPOISON |VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
88 
89 #define	FAULT_FLAG_WRITE	(1 << 0)
90 #define	FAULT_FLAG_MKWRITE	(1 << 1)
91 #define	FAULT_FLAG_ALLOW_RETRY	(1 << 2)
92 #define	FAULT_FLAG_RETRY_NOWAIT	(1 << 3)
93 #define	FAULT_FLAG_KILLABLE	(1 << 4)
94 #define	FAULT_FLAG_TRIED	(1 << 5)
95 #define	FAULT_FLAG_USER		(1 << 6)
96 #define	FAULT_FLAG_REMOTE	(1 << 7)
97 #define	FAULT_FLAG_INSTRUCTION	(1 << 8)
98 
99 #define fault_flag_allow_retry_first(flags) \
100 	(((flags) & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_TRIED)) == FAULT_FLAG_ALLOW_RETRY)
101 
102 typedef int (*pte_fn_t)(linux_pte_t *, unsigned long addr, void *data);
103 
104 struct vm_area_struct {
105 	vm_offset_t vm_start;
106 	vm_offset_t vm_end;
107 	vm_offset_t vm_pgoff;
108 	pgprot_t vm_page_prot;
109 	unsigned long vm_flags;
110 	struct mm_struct *vm_mm;
111 	void   *vm_private_data;
112 	const struct vm_operations_struct *vm_ops;
113 	struct linux_file *vm_file;
114 
115 	/* internal operation */
116 	vm_paddr_t vm_pfn;		/* PFN for memory map */
117 	vm_size_t vm_len;		/* length for memory map */
118 	vm_pindex_t vm_pfn_first;
119 	int	vm_pfn_count;
120 	int    *vm_pfn_pcount;
121 	vm_object_t vm_obj;
122 	vm_map_t vm_cached_map;
123 	TAILQ_ENTRY(vm_area_struct) vm_entry;
124 };
125 
126 struct vm_fault {
127 	unsigned int flags;
128 	pgoff_t	pgoff;
129 	union {
130 		/* user-space address */
131 		void *virtual_address;	/* < 4.11 */
132 		unsigned long address;	/* >= 4.11 */
133 	};
134 	struct page *page;
135 	struct vm_area_struct *vma;
136 };
137 
138 struct vm_operations_struct {
139 	void    (*open) (struct vm_area_struct *);
140 	void    (*close) (struct vm_area_struct *);
141 	int     (*fault) (struct vm_fault *);
142 	int	(*access) (struct vm_area_struct *, unsigned long, void *, int, int);
143 };
144 
145 struct sysinfo {
146 	uint64_t totalram;
147 	uint64_t totalhigh;
148 	uint32_t mem_unit;
149 };
150 
151 /*
152  * Compute log2 of the power of two rounded up count of pages
153  * needed for size bytes.
154  */
155 static inline int
156 get_order(unsigned long size)
157 {
158 	int order;
159 
160 	size = (size - 1) >> PAGE_SHIFT;
161 	order = 0;
162 	while (size) {
163 		order++;
164 		size >>= 1;
165 	}
166 	return (order);
167 }
168 
169 static inline void *
170 lowmem_page_address(struct page *page)
171 {
172 	return (page_address(page));
173 }
174 
175 /*
176  * This only works via memory map operations.
177  */
178 static inline int
179 io_remap_pfn_range(struct vm_area_struct *vma,
180     unsigned long addr, unsigned long pfn, unsigned long size,
181     vm_memattr_t prot)
182 {
183 	vma->vm_page_prot = prot;
184 	vma->vm_pfn = pfn;
185 	vma->vm_len = size;
186 
187 	return (0);
188 }
189 
190 vm_fault_t
191 lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
192     unsigned long pfn, pgprot_t prot);
193 
194 static inline vm_fault_t
195 vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
196     unsigned long pfn, pgprot_t prot)
197 {
198 	vm_fault_t ret;
199 
200 	VM_OBJECT_WLOCK(vma->vm_obj);
201 	ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot);
202 	VM_OBJECT_WUNLOCK(vma->vm_obj);
203 
204 	return (ret);
205 }
206 #define	vmf_insert_pfn_prot(...)	\
207 	_Static_assert(false,		\
208 "This function is always called in a loop. Consider using the locked version")
209 
210 static inline int
211 apply_to_page_range(struct mm_struct *mm, unsigned long address,
212     unsigned long size, pte_fn_t fn, void *data)
213 {
214 	return (-ENOTSUP);
215 }
216 
217 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
218     unsigned long size);
219 
220 static inline int
221 remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
222     unsigned long pfn, unsigned long size, pgprot_t prot)
223 {
224 	return (-ENOTSUP);
225 }
226 
227 static inline unsigned long
228 vma_pages(struct vm_area_struct *vma)
229 {
230 	return ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
231 }
232 
233 #define	offset_in_page(off)	((unsigned long)(off) & (PAGE_SIZE - 1))
234 
235 static inline void
236 set_page_dirty(struct vm_page *page)
237 {
238 	vm_page_dirty(page);
239 }
240 
241 static inline void
242 mark_page_accessed(struct vm_page *page)
243 {
244 	vm_page_reference(page);
245 }
246 
247 static inline void
248 get_page(struct vm_page *page)
249 {
250 	vm_page_wire(page);
251 }
252 
253 extern long
254 get_user_pages(unsigned long start, unsigned long nr_pages,
255     int gup_flags, struct page **,
256     struct vm_area_struct **);
257 
258 extern int
259 __get_user_pages_fast(unsigned long start, int nr_pages, int write,
260     struct page **);
261 
262 extern long
263 get_user_pages_remote(struct task_struct *, struct mm_struct *,
264     unsigned long start, unsigned long nr_pages,
265     int gup_flags, struct page **,
266     struct vm_area_struct **);
267 
268 static inline void
269 put_page(struct vm_page *page)
270 {
271 	vm_page_unwire(page, PQ_ACTIVE);
272 }
273 
274 #define	copy_highpage(to, from) pmap_copy_page(from, to)
275 
276 static inline pgprot_t
277 vm_get_page_prot(unsigned long vm_flags)
278 {
279 	return (vm_flags & VM_PROT_ALL);
280 }
281 
282 static inline vm_page_t
283 vmalloc_to_page(const void *addr)
284 {
285 	vm_paddr_t paddr;
286 
287 	paddr = pmap_kextract((vm_offset_t)addr);
288 	return (PHYS_TO_VM_PAGE(paddr));
289 }
290 
291 static inline int
292 trylock_page(struct page *page)
293 {
294 	return (vm_page_trylock(page));
295 }
296 
297 static inline void
298 unlock_page(struct page *page)
299 {
300 
301 	vm_page_unlock(page);
302 }
303 
304 extern int is_vmalloc_addr(const void *addr);
305 void si_meminfo(struct sysinfo *si);
306 
307 #define	unmap_mapping_range(...)	lkpi_unmap_mapping_range(__VA_ARGS__)
308 void lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused,
309     loff_t const holelen, int even_cows __unused);
310 
311 #define PAGE_ALIGNED(p)	__is_aligned(p, PAGE_SIZE)
312 
313 #endif					/* _LINUXKPI_LINUX_MM_H_ */
314