xref: /freebsd/sys/compat/linuxkpi/common/include/linux/gfp.h (revision e3b16f53a6455903a7e814045584fe203d4fff64)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #ifndef	_LINUXKPI_LINUX_GFP_H_
30 #define	_LINUXKPI_LINUX_GFP_H_
31 
32 #include <sys/types.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 
36 #include <linux/page.h>
37 
38 #include <vm/vm_param.h>
39 #include <vm/vm_object.h>
40 #include <vm/vm_extern.h>
41 #include <vm/vm_kern.h>
42 
43 #define	__GFP_NOWARN	0
44 #define	__GFP_HIGHMEM	0
45 #define	__GFP_ZERO	M_ZERO
46 #define	__GFP_NOMEMALLOC 0
47 #define	__GFP_RECLAIM   0
48 #define	__GFP_RECLAIMABLE   0
49 #define	__GFP_RETRY_MAYFAIL 0
50 #define	__GFP_MOVABLE	0
51 #define	__GFP_COMP	0
52 #define	__GFP_KSWAPD_RECLAIM 0
53 
54 #define	__GFP_IO	0
55 #define	__GFP_NO_KSWAPD	0
56 #define	__GFP_KSWAPD_RECLAIM	0
57 #define	__GFP_WAIT	M_WAITOK
58 #define	__GFP_DMA32	(1U << 24) /* LinuxKPI only */
59 #define	__GFP_NORETRY	(1U << 25) /* LinuxKPI only */
60 #define	__GFP_BITS_SHIFT 26
61 #define	__GFP_BITS_MASK	((1 << __GFP_BITS_SHIFT) - 1)
62 #define	__GFP_NOFAIL	M_WAITOK
63 
64 #define	GFP_NOWAIT	M_NOWAIT
65 #define	GFP_ATOMIC	(M_NOWAIT | M_USE_RESERVE)
66 #define	GFP_KERNEL	M_WAITOK
67 #define	GFP_USER	M_WAITOK
68 #define	GFP_HIGHUSER	M_WAITOK
69 #define	GFP_HIGHUSER_MOVABLE	M_WAITOK
70 #define	GFP_IOFS	M_NOWAIT
71 #define	GFP_NOIO	M_NOWAIT
72 #define	GFP_NOFS	M_NOWAIT
73 #define	GFP_DMA32	__GFP_DMA32
74 #define	GFP_TEMPORARY	M_NOWAIT
75 #define	GFP_NATIVE_MASK	(M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_ZERO)
76 #define	GFP_TRANSHUGE	0
77 #define	GFP_TRANSHUGE_LIGHT	0
78 
79 CTASSERT((__GFP_DMA32 & GFP_NATIVE_MASK) == 0);
80 CTASSERT((__GFP_BITS_MASK & GFP_NATIVE_MASK) == GFP_NATIVE_MASK);
81 
82 struct page_frag_cache {
83 	void *va;
84 	int pagecnt_bias;
85 };
86 
87 /*
88  * Page management for unmapped pages:
89  */
90 struct page *linux_alloc_pages(gfp_t flags, unsigned int order);
91 void linux_free_pages(struct page *page, unsigned int order);
92 void *linuxkpi_page_frag_alloc(struct page_frag_cache *, size_t, gfp_t);
93 void linuxkpi_page_frag_free(void *);
94 void linuxkpi__page_frag_cache_drain(struct page *, size_t);
95 
96 static inline struct page *
alloc_page(gfp_t flags)97 alloc_page(gfp_t flags)
98 {
99 
100 	return (linux_alloc_pages(flags, 0));
101 }
102 
103 static inline struct page *
alloc_pages(gfp_t flags,unsigned int order)104 alloc_pages(gfp_t flags, unsigned int order)
105 {
106 
107 	return (linux_alloc_pages(flags, order));
108 }
109 
110 static inline struct page *
alloc_pages_node(int node_id,gfp_t flags,unsigned int order)111 alloc_pages_node(int node_id, gfp_t flags, unsigned int order)
112 {
113 
114 	return (linux_alloc_pages(flags, order));
115 }
116 
117 static inline void
__free_pages(struct page * page,unsigned int order)118 __free_pages(struct page *page, unsigned int order)
119 {
120 
121 	linux_free_pages(page, order);
122 }
123 
124 static inline void
__free_page(struct page * page)125 __free_page(struct page *page)
126 {
127 
128 	linux_free_pages(page, 0);
129 }
130 
131 static inline struct page *
dev_alloc_pages(unsigned int order)132 dev_alloc_pages(unsigned int order)
133 {
134 	return (linux_alloc_pages(GFP_ATOMIC, order));
135 }
136 
137 struct folio *folio_alloc(gfp_t gfp, unsigned int order);
138 
139 /*
140  * Page management for mapped pages:
141  */
142 vm_offset_t linux_alloc_kmem(gfp_t flags, unsigned int order);
143 void linux_free_kmem(vm_offset_t, unsigned int order);
144 
145 static inline vm_offset_t
get_zeroed_page(gfp_t flags)146 get_zeroed_page(gfp_t flags)
147 {
148 
149 	return (linux_alloc_kmem(flags | __GFP_ZERO, 0));
150 }
151 
152 static inline vm_offset_t
__get_free_page(gfp_t flags)153 __get_free_page(gfp_t flags)
154 {
155 
156 	return (linux_alloc_kmem(flags, 0));
157 }
158 
159 static inline vm_offset_t
__get_free_pages(gfp_t flags,unsigned int order)160 __get_free_pages(gfp_t flags, unsigned int order)
161 {
162 
163 	return (linux_alloc_kmem(flags, order));
164 }
165 
166 static inline void
free_pages(uintptr_t addr,unsigned int order)167 free_pages(uintptr_t addr, unsigned int order)
168 {
169 	if (addr == 0)
170 		return;
171 
172 	linux_free_kmem(addr, order);
173 }
174 
175 static inline void
free_page(uintptr_t addr)176 free_page(uintptr_t addr)
177 {
178 	if (addr == 0)
179 		return;
180 
181 	linux_free_kmem(addr, 0);
182 }
183 
184 static inline void *
page_frag_alloc(struct page_frag_cache * pfc,size_t fragsz,gfp_t gfp)185 page_frag_alloc(struct page_frag_cache *pfc, size_t fragsz, gfp_t gfp)
186 {
187 
188 	return (linuxkpi_page_frag_alloc(pfc, fragsz, gfp));
189 }
190 
191 static inline void
page_frag_free(void * addr)192 page_frag_free(void *addr)
193 {
194 
195 	linuxkpi_page_frag_free(addr);
196 }
197 
198 static inline void
__page_frag_cache_drain(struct page * page,size_t count)199 __page_frag_cache_drain(struct page *page, size_t count)
200 {
201 
202 	linuxkpi__page_frag_cache_drain(page, count);
203 }
204 
205 static inline bool
gfpflags_allow_blocking(const gfp_t gfp_flags)206 gfpflags_allow_blocking(const gfp_t gfp_flags)
207 {
208 	return ((gfp_flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK);
209 }
210 
211 #define	SetPageReserved(page)	do { } while (0)	/* NOP */
212 #define	ClearPageReserved(page)	do { } while (0)	/* NOP */
213 
214 #endif	/* _LINUXKPI_LINUX_GFP_H_ */
215