1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
4 * Copyright (C) 2007 The Regents of the University of California.
5 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
6 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
7 * UCRL-CODE-235197
8 *
9 * This file is part of the SPL, Solaris Porting Layer.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 */
24
25 #ifndef _SPL_KMEM_H
26 #define _SPL_KMEM_H
27
28 #include <sys/debug.h>
29 #include <linux/slab.h>
30 #include <linux/sched.h>
31 #include <linux/mm.h>
32 #include <linux/vmalloc.h>
33
34 extern int kmem_debugging(void);
35 __attribute__((format(printf, 1, 0)))
36 extern char *kmem_vasprintf(const char *fmt, va_list ap);
37 __attribute__((format(printf, 1, 2)))
38 extern char *kmem_asprintf(const char *fmt, ...);
39 extern char *kmem_strdup(const char *str);
40 extern void kmem_strfree(char *str);
41
42 #define kmem_scnprintf scnprintf
43
44 #define POINTER_IS_VALID(p) (!((uintptr_t)(p) & 0x3))
45 #define POINTER_INVALIDATE(pp) (*(pp) = (void *)((uintptr_t)(*(pp)) | 0x1))
46
47 /*
48 * Memory allocation interfaces
49 */
50 #define KM_SLEEP 0x0000 /* can block for memory; success guaranteed */
51 #define KM_NOSLEEP 0x0001 /* cannot block for memory; may fail */
52 #define KM_PUSHPAGE 0x0004 /* can block for memory; may use reserve */
53 #define KM_ZERO 0x1000 /* zero the allocation */
54 #define KM_VMEM 0x2000 /* caller is vmem_* wrapper */
55
56 #define KM_PUBLIC_MASK (KM_SLEEP | KM_NOSLEEP | KM_PUSHPAGE)
57
58 static int spl_fstrans_check(void);
59 void *spl_kvmalloc(size_t size, gfp_t flags);
60
61 /*
62 * Convert a KM_* flags mask to its Linux GFP_* counterpart. The conversion
63 * function is context aware which means that KM_SLEEP allocations can be
64 * safely used in syncing contexts which have set SPL_FSTRANS.
65 */
66 static inline gfp_t
kmem_flags_convert(int flags)67 kmem_flags_convert(int flags)
68 {
69 gfp_t lflags = __GFP_NOWARN | __GFP_COMP;
70
71 if (flags & KM_NOSLEEP) {
72 lflags |= GFP_ATOMIC | __GFP_NORETRY;
73 } else {
74 lflags |= GFP_KERNEL;
75 if (spl_fstrans_check())
76 lflags &= ~(__GFP_IO|__GFP_FS);
77 }
78
79 if (flags & KM_PUSHPAGE)
80 lflags |= __GFP_HIGH;
81
82 if (flags & KM_ZERO)
83 lflags |= __GFP_ZERO;
84
85 return (lflags);
86 }
87
88 typedef struct {
89 struct task_struct *fstrans_thread;
90 unsigned int saved_flags;
91 } fstrans_cookie_t;
92
93 /*
94 * SPL_FSTRANS is the set of flags that indicate that the task is in a
95 * filesystem or IO codepath, and so any allocation must not call back into
96 * those codepaths (eg to swap).
97 */
98 #define SPL_FSTRANS (PF_MEMALLOC_NOIO)
99
100 static inline fstrans_cookie_t
spl_fstrans_mark(void)101 spl_fstrans_mark(void)
102 {
103 fstrans_cookie_t cookie;
104
105 BUILD_BUG_ON(SPL_FSTRANS == 0);
106
107 cookie.fstrans_thread = current;
108 cookie.saved_flags = current->flags & SPL_FSTRANS;
109 current->flags |= SPL_FSTRANS;
110
111 return (cookie);
112 }
113
114 static inline void
spl_fstrans_unmark(fstrans_cookie_t cookie)115 spl_fstrans_unmark(fstrans_cookie_t cookie)
116 {
117 ASSERT3P(cookie.fstrans_thread, ==, current);
118 ASSERT((current->flags & SPL_FSTRANS) == SPL_FSTRANS);
119
120 current->flags &= ~SPL_FSTRANS;
121 current->flags |= cookie.saved_flags;
122 }
123
124 static inline int
spl_fstrans_check(void)125 spl_fstrans_check(void)
126 {
127 return (current->flags & SPL_FSTRANS);
128 }
129
130 extern atomic64_t kmem_alloc_used;
131 extern uint64_t kmem_alloc_max;
132
133 extern unsigned int spl_kmem_alloc_warn;
134 extern unsigned int spl_kmem_alloc_max;
135
136 #define kmem_alloc(sz, fl) spl_kmem_alloc((sz), (fl), __func__, __LINE__)
137 #define kmem_zalloc(sz, fl) spl_kmem_zalloc((sz), (fl), __func__, __LINE__)
138 #define kmem_free(ptr, sz) spl_kmem_free((ptr), (sz))
139 #define kmem_cache_reap_active spl_kmem_cache_reap_active
140
141 __attribute__((malloc, alloc_size(1)))
142 extern void *spl_kmem_alloc(size_t sz, int fl, const char *func, int line);
143 __attribute__((malloc, alloc_size(1)))
144 extern void *spl_kmem_zalloc(size_t sz, int fl, const char *func, int line);
145 extern void spl_kmem_free(const void *ptr, size_t sz);
146
147 /*
148 * 5.8 API change, pgprot_t argument removed.
149 */
150 #ifdef HAVE_VMALLOC_PAGE_KERNEL
151 #define spl_vmalloc(size, flags) __vmalloc(size, flags, PAGE_KERNEL)
152 #else
153 #define spl_vmalloc(size, flags) __vmalloc(size, flags)
154 #endif
155
156 /*
157 * The following functions are only available for internal use.
158 */
159 extern void *spl_kmem_alloc_impl(size_t size, int flags, int node);
160 extern void *spl_kmem_alloc_debug(size_t size, int flags, int node);
161 extern void *spl_kmem_alloc_track(size_t size, int flags,
162 const char *func, int line, int node);
163 extern void spl_kmem_free_impl(const void *buf, size_t size);
164 extern void spl_kmem_free_debug(const void *buf, size_t size);
165 extern void spl_kmem_free_track(const void *buf, size_t size);
166
167 extern int spl_kmem_init(void);
168 extern void spl_kmem_fini(void);
169 extern int spl_kmem_cache_reap_active(void);
170
171 #endif /* _SPL_KMEM_H */
172