1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29 #ifndef _LINUXKPI_LINUX_GFP_H_
30 #define _LINUXKPI_LINUX_GFP_H_
31
32 #include <sys/types.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35
36 #include <linux/page.h>
37 #include <linux/topology.h>
38
39 #include <vm/vm_param.h>
40 #include <vm/vm_object.h>
41 #include <vm/vm_extern.h>
42 #include <vm/vm_kern.h>
43
44 #define __GFP_NOWARN 0
45 #define __GFP_HIGHMEM 0
46 #define __GFP_ZERO M_ZERO
47 #define __GFP_NOMEMALLOC 0
48 #define __GFP_RECLAIM 0
49 #define __GFP_RECLAIMABLE 0
50 #define __GFP_RETRY_MAYFAIL 0
51 #define __GFP_MOVABLE 0
52 #define __GFP_COMP 0
53 #define __GFP_KSWAPD_RECLAIM 0
54
55 #define __GFP_IO 0
56 #define __GFP_NO_KSWAPD 0
57 #define __GFP_KSWAPD_RECLAIM 0
58 #define __GFP_WAIT M_WAITOK
59 #define __GFP_DMA32 (1U << 24) /* LinuxKPI only */
60 #define __GFP_NORETRY (1U << 25) /* LinuxKPI only */
61 #define __GFP_BITS_SHIFT 26
62 #define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
63 #define __GFP_NOFAIL M_WAITOK
64
65 #define GFP_NOWAIT M_NOWAIT
66 #define GFP_ATOMIC (M_NOWAIT | M_USE_RESERVE)
67 #define GFP_KERNEL M_WAITOK
68 #define GFP_USER M_WAITOK
69 #define GFP_HIGHUSER M_WAITOK
70 #define GFP_HIGHUSER_MOVABLE M_WAITOK
71 #define GFP_IOFS M_NOWAIT
72 #define GFP_NOIO M_NOWAIT
73 #define GFP_NOFS M_NOWAIT
74 #define GFP_DMA32 __GFP_DMA32
75 #define GFP_TEMPORARY M_NOWAIT
76 #define GFP_NATIVE_MASK (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_ZERO)
77 #define GFP_TRANSHUGE 0
78 #define GFP_TRANSHUGE_LIGHT 0
79
80 CTASSERT((__GFP_DMA32 & GFP_NATIVE_MASK) == 0);
81 CTASSERT((__GFP_BITS_MASK & GFP_NATIVE_MASK) == GFP_NATIVE_MASK);
82
83 struct page_frag_cache {
84 void *va;
85 int pagecnt_bias;
86 };
87
88 /*
89 * Page management for unmapped pages:
90 */
91 struct page *linux_alloc_pages(gfp_t flags, unsigned int order);
92 void linux_free_pages(struct page *page, unsigned int order);
93 void *linuxkpi_page_frag_alloc(struct page_frag_cache *, size_t, gfp_t);
94 void linuxkpi_page_frag_free(void *);
95 void linuxkpi__page_frag_cache_drain(struct page *, size_t);
96
97 static inline struct page *
alloc_page(gfp_t flags)98 alloc_page(gfp_t flags)
99 {
100
101 return (linux_alloc_pages(flags, 0));
102 }
103
104 static inline struct page *
alloc_pages(gfp_t flags,unsigned int order)105 alloc_pages(gfp_t flags, unsigned int order)
106 {
107
108 return (linux_alloc_pages(flags, order));
109 }
110
111 static inline struct page *
alloc_pages_node(int node_id,gfp_t flags,unsigned int order)112 alloc_pages_node(int node_id, gfp_t flags, unsigned int order)
113 {
114
115 return (linux_alloc_pages(flags, order));
116 }
117
118 static inline void
__free_pages(struct page * page,unsigned int order)119 __free_pages(struct page *page, unsigned int order)
120 {
121
122 linux_free_pages(page, order);
123 }
124
125 static inline void
__free_page(struct page * page)126 __free_page(struct page *page)
127 {
128
129 linux_free_pages(page, 0);
130 }
131
132 static inline struct page *
dev_alloc_pages(unsigned int order)133 dev_alloc_pages(unsigned int order)
134 {
135 return (linux_alloc_pages(GFP_ATOMIC, order));
136 }
137
138 struct folio *folio_alloc(gfp_t gfp, unsigned int order);
139
140 /*
141 * Page management for mapped pages:
142 */
143 vm_offset_t linux_alloc_kmem(gfp_t flags, unsigned int order);
144 void linux_free_kmem(vm_offset_t, unsigned int order);
145
146 static inline vm_offset_t
get_zeroed_page(gfp_t flags)147 get_zeroed_page(gfp_t flags)
148 {
149
150 return (linux_alloc_kmem(flags | __GFP_ZERO, 0));
151 }
152
153 static inline vm_offset_t
__get_free_page(gfp_t flags)154 __get_free_page(gfp_t flags)
155 {
156
157 return (linux_alloc_kmem(flags, 0));
158 }
159
160 static inline vm_offset_t
__get_free_pages(gfp_t flags,unsigned int order)161 __get_free_pages(gfp_t flags, unsigned int order)
162 {
163
164 return (linux_alloc_kmem(flags, order));
165 }
166
167 static inline void
free_pages(uintptr_t addr,unsigned int order)168 free_pages(uintptr_t addr, unsigned int order)
169 {
170 if (addr == 0)
171 return;
172
173 linux_free_kmem(addr, order);
174 }
175
176 static inline void
free_page(uintptr_t addr)177 free_page(uintptr_t addr)
178 {
179 if (addr == 0)
180 return;
181
182 linux_free_kmem(addr, 0);
183 }
184
185 static inline void *
page_frag_alloc(struct page_frag_cache * pfc,size_t fragsz,gfp_t gfp)186 page_frag_alloc(struct page_frag_cache *pfc, size_t fragsz, gfp_t gfp)
187 {
188
189 return (linuxkpi_page_frag_alloc(pfc, fragsz, gfp));
190 }
191
192 static inline void
page_frag_free(void * addr)193 page_frag_free(void *addr)
194 {
195
196 linuxkpi_page_frag_free(addr);
197 }
198
199 static inline void
__page_frag_cache_drain(struct page * page,size_t count)200 __page_frag_cache_drain(struct page *page, size_t count)
201 {
202
203 linuxkpi__page_frag_cache_drain(page, count);
204 }
205
206 static inline bool
gfpflags_allow_blocking(const gfp_t gfp_flags)207 gfpflags_allow_blocking(const gfp_t gfp_flags)
208 {
209 return ((gfp_flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK);
210 }
211
212 #define SetPageReserved(page) do { } while (0) /* NOP */
213 #define ClearPageReserved(page) do { } while (0) /* NOP */
214
215 #endif /* _LINUXKPI_LINUX_GFP_H_ */
216