xref: /freebsd/sys/compat/linuxkpi/common/include/linux/gfp.h (revision 63d1fd5970ec814904aa0f4580b10a0d302d08b2)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #ifndef	_LINUX_GFP_H_
32 #define	_LINUX_GFP_H_
33 
34 #include <sys/cdefs.h>
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 
39 #include <linux/page.h>
40 
41 #include <vm/vm_param.h>
42 #include <vm/vm_object.h>
43 #include <vm/vm_extern.h>
44 #include <vm/vm_kern.h>
45 
46 #define	__GFP_NOWARN	0
47 #define	__GFP_HIGHMEM	0
48 #define	__GFP_ZERO	M_ZERO
49 #define	__GFP_NORETRY	0
50 #define	__GFP_RECLAIM   0
51 #define	__GFP_RECLAIMABLE   0
52 
53 #define	__GFP_IO	0
54 #define	__GFP_NO_KSWAPD	0
55 #define	__GFP_WAIT	M_WAITOK
56 #define	__GFP_DMA32     0
57 
58 #define	GFP_NOWAIT	M_NOWAIT
59 #define	GFP_ATOMIC	(M_NOWAIT | M_USE_RESERVE)
60 #define	GFP_KERNEL	M_WAITOK
61 #define	GFP_USER	M_WAITOK
62 #define	GFP_HIGHUSER	M_WAITOK
63 #define	GFP_HIGHUSER_MOVABLE	M_WAITOK
64 #define	GFP_IOFS	M_NOWAIT
65 #define	GFP_NOIO	M_NOWAIT
66 #define	GFP_DMA32	0
67 #define	GFP_TEMPORARY	M_NOWAIT
68 
69 static inline void *
70 page_address(struct page *page)
71 {
72 
73 	if (page->object != kmem_object && page->object != kernel_object)
74 		return (NULL);
75 	return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS +
76 	    IDX_TO_OFF(page->pindex)));
77 }
78 
79 static inline unsigned long
80 linux_get_page(gfp_t mask)
81 {
82 
83 	return kmem_malloc(kmem_arena, PAGE_SIZE, mask);
84 }
85 
86 #define	get_zeroed_page(mask)	linux_get_page((mask) | M_ZERO)
87 #define	alloc_page(mask)	virt_to_page(linux_get_page((mask)))
88 #define	__get_free_page(mask)	linux_get_page((mask))
89 
90 static inline void
91 free_page(unsigned long page)
92 {
93 
94 	if (page == 0)
95 		return;
96 	kmem_free(kmem_arena, page, PAGE_SIZE);
97 }
98 
99 static inline void
100 __free_page(struct page *m)
101 {
102 
103 	if (m->object != kmem_object)
104 		panic("__free_page:  Freed page %p not allocated via wrappers.",
105 		    m);
106 	kmem_free(kmem_arena, (vm_offset_t)page_address(m), PAGE_SIZE);
107 }
108 
109 static inline void
110 __free_pages(struct page *m, unsigned int order)
111 {
112 	size_t size;
113 
114 	if (m == NULL)
115 		return;
116 	size = PAGE_SIZE << order;
117 	kmem_free(kmem_arena, (vm_offset_t)page_address(m), size);
118 }
119 
120 static inline void free_pages(uintptr_t addr, unsigned int order)
121 {
122 	if (addr == 0)
123 		return;
124 	__free_pages(virt_to_page((void *)addr), order);
125 }
126 
127 /*
128  * Alloc pages allocates directly from the buddy allocator on linux so
129  * order specifies a power of two bucket of pages and the results
130  * are expected to be aligned on the size as well.
131  */
132 static inline struct page *
133 alloc_pages(gfp_t gfp_mask, unsigned int order)
134 {
135 	unsigned long page;
136 	size_t size;
137 
138 	size = PAGE_SIZE << order;
139 	page = kmem_alloc_contig(kmem_arena, size, gfp_mask,
140 	    0, ~(vm_paddr_t)0, size, 0, VM_MEMATTR_DEFAULT);
141 	if (page == 0)
142 		return (NULL);
143         return (virt_to_page(page));
144 }
145 
146 static inline uintptr_t __get_free_pages(gfp_t gfp_mask, unsigned int order)
147 {
148 	struct page *page;
149 
150 	page = alloc_pages(gfp_mask, order);
151 	if (page == NULL)
152 		return (0);
153 	return ((uintptr_t)page_address(page));
154 }
155 
156 #define alloc_pages_node(node, mask, order)     alloc_pages(mask, order)
157 
158 #define kmalloc_node(chunk, mask, node)         kmalloc(chunk, mask)
159 
160 #define	SetPageReserved(page)	do { } while (0)	/* NOP */
161 #define	ClearPageReserved(page)	do { } while (0)	/* NOP */
162 
163 #endif	/* _LINUX_GFP_H_ */
164