1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KASAN_H
3 #define _LINUX_KASAN_H
4
5 #include <linux/bug.h>
6 #include <linux/kasan-enabled.h>
7 #include <linux/kasan-tags.h>
8 #include <linux/kernel.h>
9 #include <linux/static_key.h>
10 #include <linux/types.h>
11
12 struct kmem_cache;
13 struct page;
14 struct slab;
15 struct vm_struct;
16 struct task_struct;
17
18 #ifdef CONFIG_KASAN
19
20 #include <linux/linkage.h>
21 #include <asm/kasan.h>
22
23 #endif
24
25 typedef unsigned int __bitwise kasan_vmalloc_flags_t;
26
27 #define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u)
28 #define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
29 #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
30 #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
31 #define KASAN_VMALLOC_KEEP_TAG ((__force kasan_vmalloc_flags_t)0x08u)
32
33 #define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
34 #define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */
35
36 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
37
38 #include <linux/pgtable.h>
39
40 /* Software KASAN implementations use shadow memory. */
41
42 #ifdef CONFIG_KASAN_SW_TAGS
43 /* This matches KASAN_TAG_INVALID. */
44 #define KASAN_SHADOW_INIT 0xFE
45 #else
46 #define KASAN_SHADOW_INIT 0
47 #endif
48
49 #ifndef PTE_HWTABLE_PTRS
50 #define PTE_HWTABLE_PTRS 0
51 #endif
52
53 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
54 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
55 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
56 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
57 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
58
59 int kasan_populate_early_shadow(const void *shadow_start,
60 const void *shadow_end);
61
62 #ifndef kasan_mem_to_shadow
kasan_mem_to_shadow(const void * addr)63 static inline void *kasan_mem_to_shadow(const void *addr)
64 {
65 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
66 + KASAN_SHADOW_OFFSET;
67 }
68 #endif
69
70 int kasan_add_zero_shadow(void *start, unsigned long size);
71 void kasan_remove_zero_shadow(void *start, unsigned long size);
72
73 /* Enable reporting bugs after kasan_disable_current() */
74 extern void kasan_enable_current(void);
75
76 /* Disable reporting bugs for current task */
77 extern void kasan_disable_current(void);
78
79 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
80
kasan_add_zero_shadow(void * start,unsigned long size)81 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
82 {
83 return 0;
84 }
kasan_remove_zero_shadow(void * start,unsigned long size)85 static inline void kasan_remove_zero_shadow(void *start,
86 unsigned long size)
87 {}
88
kasan_enable_current(void)89 static inline void kasan_enable_current(void) {}
kasan_disable_current(void)90 static inline void kasan_disable_current(void) {}
91
92 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
93
94 #ifdef CONFIG_KASAN_HW_TAGS
95
96 #else /* CONFIG_KASAN_HW_TAGS */
97
98 #endif /* CONFIG_KASAN_HW_TAGS */
99
kasan_has_integrated_init(void)100 static inline bool kasan_has_integrated_init(void)
101 {
102 return kasan_hw_tags_enabled();
103 }
104
105 #ifdef CONFIG_KASAN
106 void __kasan_unpoison_range(const void *addr, size_t size);
kasan_unpoison_range(const void * addr,size_t size)107 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
108 {
109 if (kasan_enabled())
110 __kasan_unpoison_range(addr, size);
111 }
112
113 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
kasan_poison_pages(struct page * page,unsigned int order,bool init)114 static __always_inline void kasan_poison_pages(struct page *page,
115 unsigned int order, bool init)
116 {
117 if (kasan_enabled())
118 __kasan_poison_pages(page, order, init);
119 }
120
121 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
kasan_unpoison_pages(struct page * page,unsigned int order,bool init)122 static __always_inline bool kasan_unpoison_pages(struct page *page,
123 unsigned int order, bool init)
124 {
125 if (kasan_enabled())
126 return __kasan_unpoison_pages(page, order, init);
127 return false;
128 }
129
130 void __kasan_poison_slab(struct slab *slab);
kasan_poison_slab(struct slab * slab)131 static __always_inline void kasan_poison_slab(struct slab *slab)
132 {
133 if (kasan_enabled())
134 __kasan_poison_slab(slab);
135 }
136
137 void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
138 /**
139 * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
140 * @cache: Cache the object belong to.
141 * @object: Pointer to the object.
142 *
143 * This function is intended for the slab allocator's internal use. It
144 * temporarily unpoisons an object from a newly allocated slab without doing
145 * anything else. The object must later be repoisoned by
146 * kasan_poison_new_object().
147 */
kasan_unpoison_new_object(struct kmem_cache * cache,void * object)148 static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
149 void *object)
150 {
151 if (kasan_enabled())
152 __kasan_unpoison_new_object(cache, object);
153 }
154
155 void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
156 /**
157 * kasan_poison_new_object - Repoison a new slab object.
158 * @cache: Cache the object belong to.
159 * @object: Pointer to the object.
160 *
161 * This function is intended for the slab allocator's internal use. It
162 * repoisons an object that was previously unpoisoned by
163 * kasan_unpoison_new_object() without doing anything else.
164 */
kasan_poison_new_object(struct kmem_cache * cache,void * object)165 static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
166 void *object)
167 {
168 if (kasan_enabled())
169 __kasan_poison_new_object(cache, object);
170 }
171
172 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
173 const void *object);
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)174 static __always_inline void * __must_check kasan_init_slab_obj(
175 struct kmem_cache *cache, const void *object)
176 {
177 if (kasan_enabled())
178 return __kasan_init_slab_obj(cache, object);
179 return (void *)object;
180 }
181
182 bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
183 unsigned long ip);
184 /**
185 * kasan_slab_pre_free - Check whether freeing a slab object is safe.
186 * @object: Object to be freed.
187 *
188 * This function checks whether freeing the given object is safe. It may
189 * check for double-free and invalid-free bugs and report them.
190 *
191 * This function is intended only for use by the slab allocator.
192 *
193 * @Return true if freeing the object is unsafe; false otherwise.
194 */
kasan_slab_pre_free(struct kmem_cache * s,void * object)195 static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
196 void *object)
197 {
198 if (kasan_enabled())
199 return __kasan_slab_pre_free(s, object, _RET_IP_);
200 return false;
201 }
202
203 bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
204 bool still_accessible, bool no_quarantine);
205 /**
206 * kasan_slab_free - Poison, initialize, and quarantine a slab object.
207 * @object: Object to be freed.
208 * @init: Whether to initialize the object.
209 * @still_accessible: Whether the object contents are still accessible.
210 *
211 * This function informs that a slab object has been freed and is not
212 * supposed to be accessed anymore, except when @still_accessible is set
213 * (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU
214 * grace period might not have passed yet).
215 *
216 * For KASAN modes that have integrated memory initialization
217 * (kasan_has_integrated_init() == true), this function also initializes
218 * the object's memory. For other modes, the @init argument is ignored.
219 *
220 * This function might also take ownership of the object to quarantine it.
221 * When this happens, KASAN will defer freeing the object to a later
222 * stage and handle it internally until then. The return value indicates
223 * whether KASAN took ownership of the object.
224 *
225 * This function is intended only for use by the slab allocator.
226 *
227 * @Return true if KASAN took ownership of the object; false otherwise.
228 */
kasan_slab_free(struct kmem_cache * s,void * object,bool init,bool still_accessible,bool no_quarantine)229 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
230 void *object, bool init,
231 bool still_accessible,
232 bool no_quarantine)
233 {
234 if (kasan_enabled())
235 return __kasan_slab_free(s, object, init, still_accessible,
236 no_quarantine);
237 return false;
238 }
239
240 void __kasan_kfree_large(void *ptr, unsigned long ip);
kasan_kfree_large(void * ptr)241 static __always_inline void kasan_kfree_large(void *ptr)
242 {
243 if (kasan_enabled())
244 __kasan_kfree_large(ptr, _RET_IP_);
245 }
246
247 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
248 void *object, gfp_t flags, bool init);
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags,bool init)249 static __always_inline void * __must_check kasan_slab_alloc(
250 struct kmem_cache *s, void *object, gfp_t flags, bool init)
251 {
252 if (kasan_enabled())
253 return __kasan_slab_alloc(s, object, flags, init);
254 return object;
255 }
256
257 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
258 size_t size, gfp_t flags);
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)259 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
260 const void *object, size_t size, gfp_t flags)
261 {
262 if (kasan_enabled())
263 return __kasan_kmalloc(s, object, size, flags);
264 return (void *)object;
265 }
266
267 void * __must_check __kasan_kmalloc_large(const void *ptr,
268 size_t size, gfp_t flags);
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)269 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
270 size_t size, gfp_t flags)
271 {
272 if (kasan_enabled())
273 return __kasan_kmalloc_large(ptr, size, flags);
274 return (void *)ptr;
275 }
276
277 void * __must_check __kasan_krealloc(const void *object,
278 size_t new_size, gfp_t flags);
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)279 static __always_inline void * __must_check kasan_krealloc(const void *object,
280 size_t new_size, gfp_t flags)
281 {
282 if (kasan_enabled())
283 return __kasan_krealloc(object, new_size, flags);
284 return (void *)object;
285 }
286
287 bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
288 unsigned long ip);
289 /**
290 * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
291 * @page: Pointer to the page allocation.
292 * @order: Order of the allocation.
293 *
294 * This function is intended for kernel subsystems that cache page allocations
295 * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
296 *
297 * This function is similar to kasan_mempool_poison_object() but operates on
298 * page allocations.
299 *
300 * Before the poisoned allocation can be reused, it must be unpoisoned via
301 * kasan_mempool_unpoison_pages().
302 *
303 * Return: true if the allocation can be safely reused; false otherwise.
304 */
kasan_mempool_poison_pages(struct page * page,unsigned int order)305 static __always_inline bool kasan_mempool_poison_pages(struct page *page,
306 unsigned int order)
307 {
308 if (kasan_enabled())
309 return __kasan_mempool_poison_pages(page, order, _RET_IP_);
310 return true;
311 }
312
313 void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
314 unsigned long ip);
315 /**
316 * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
317 * @page: Pointer to the page allocation.
318 * @order: Order of the allocation.
319 *
320 * This function is intended for kernel subsystems that cache page allocations
321 * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
322 *
323 * This function unpoisons a page allocation that was previously poisoned by
324 * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
325 * the tag-based modes, this function assigns a new tag to the allocation.
326 */
kasan_mempool_unpoison_pages(struct page * page,unsigned int order)327 static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
328 unsigned int order)
329 {
330 if (kasan_enabled())
331 __kasan_mempool_unpoison_pages(page, order, _RET_IP_);
332 }
333
334 bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
335 /**
336 * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
337 * @ptr: Pointer to the slab allocation.
338 *
339 * This function is intended for kernel subsystems that cache slab allocations
340 * to reuse them instead of freeing them back to the slab allocator (e.g.
341 * mempool).
342 *
343 * This function poisons a slab allocation and saves a free stack trace for it
344 * without initializing the allocation's memory and without putting it into the
345 * quarantine (for the Generic mode).
346 *
347 * This function also performs checks to detect double-free and invalid-free
348 * bugs and reports them. The caller can use the return value of this function
349 * to find out if the allocation is buggy.
350 *
351 * Before the poisoned allocation can be reused, it must be unpoisoned via
352 * kasan_mempool_unpoison_object().
353 *
354 * This function operates on all slab allocations including large kmalloc
355 * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
356 * size > KMALLOC_MAX_SIZE).
357 *
358 * Return: true if the allocation can be safely reused; false otherwise.
359 */
kasan_mempool_poison_object(void * ptr)360 static __always_inline bool kasan_mempool_poison_object(void *ptr)
361 {
362 if (kasan_enabled())
363 return __kasan_mempool_poison_object(ptr, _RET_IP_);
364 return true;
365 }
366
367 void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
368 /**
369 * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
370 * @ptr: Pointer to the slab allocation.
371 * @size: Size to be unpoisoned.
372 *
373 * This function is intended for kernel subsystems that cache slab allocations
374 * to reuse them instead of freeing them back to the slab allocator (e.g.
375 * mempool).
376 *
377 * This function unpoisons a slab allocation that was previously poisoned via
378 * kasan_mempool_poison_object() and saves an alloc stack trace for it without
379 * initializing the allocation's memory. For the tag-based modes, this function
380 * does not assign a new tag to the allocation and instead restores the
381 * original tags based on the pointer value.
382 *
383 * This function operates on all slab allocations including large kmalloc
384 * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
385 * size > KMALLOC_MAX_SIZE).
386 */
kasan_mempool_unpoison_object(void * ptr,size_t size)387 static __always_inline void kasan_mempool_unpoison_object(void *ptr,
388 size_t size)
389 {
390 if (kasan_enabled())
391 __kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
392 }
393
394 /*
395 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
396 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
397 */
398 bool __kasan_check_byte(const void *addr, unsigned long ip);
kasan_check_byte(const void * addr)399 static __always_inline bool kasan_check_byte(const void *addr)
400 {
401 if (kasan_enabled())
402 return __kasan_check_byte(addr, _RET_IP_);
403 return true;
404 }
405
406 #else /* CONFIG_KASAN */
407
kasan_unpoison_range(const void * address,size_t size)408 static inline void kasan_unpoison_range(const void *address, size_t size) {}
kasan_poison_pages(struct page * page,unsigned int order,bool init)409 static inline void kasan_poison_pages(struct page *page, unsigned int order,
410 bool init) {}
kasan_unpoison_pages(struct page * page,unsigned int order,bool init)411 static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
412 bool init)
413 {
414 return false;
415 }
kasan_poison_slab(struct slab * slab)416 static inline void kasan_poison_slab(struct slab *slab) {}
kasan_unpoison_new_object(struct kmem_cache * cache,void * object)417 static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
418 void *object) {}
kasan_poison_new_object(struct kmem_cache * cache,void * object)419 static inline void kasan_poison_new_object(struct kmem_cache *cache,
420 void *object) {}
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)421 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
422 const void *object)
423 {
424 return (void *)object;
425 }
426
kasan_slab_pre_free(struct kmem_cache * s,void * object)427 static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
428 {
429 return false;
430 }
431
kasan_slab_free(struct kmem_cache * s,void * object,bool init,bool still_accessible,bool no_quarantine)432 static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
433 bool init, bool still_accessible,
434 bool no_quarantine)
435 {
436 return false;
437 }
kasan_kfree_large(void * ptr)438 static inline void kasan_kfree_large(void *ptr) {}
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags,bool init)439 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
440 gfp_t flags, bool init)
441 {
442 return object;
443 }
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)444 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
445 size_t size, gfp_t flags)
446 {
447 return (void *)object;
448 }
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)449 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
450 {
451 return (void *)ptr;
452 }
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)453 static inline void *kasan_krealloc(const void *object, size_t new_size,
454 gfp_t flags)
455 {
456 return (void *)object;
457 }
kasan_mempool_poison_pages(struct page * page,unsigned int order)458 static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
459 {
460 return true;
461 }
kasan_mempool_unpoison_pages(struct page * page,unsigned int order)462 static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
kasan_mempool_poison_object(void * ptr)463 static inline bool kasan_mempool_poison_object(void *ptr)
464 {
465 return true;
466 }
kasan_mempool_unpoison_object(void * ptr,size_t size)467 static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
468
kasan_check_byte(const void * address)469 static inline bool kasan_check_byte(const void *address)
470 {
471 return true;
472 }
473
474 #endif /* CONFIG_KASAN */
475
476 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
477 void kasan_unpoison_task_stack(struct task_struct *task);
478 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
479 #else
kasan_unpoison_task_stack(struct task_struct * task)480 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
kasan_unpoison_task_stack_below(const void * watermark)481 static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
482 #endif
483
484 #ifdef CONFIG_KASAN_GENERIC
485
486 struct kasan_cache {
487 int alloc_meta_offset;
488 int free_meta_offset;
489 };
490
491 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
492 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
493 slab_flags_t *flags);
494
495 void kasan_cache_shrink(struct kmem_cache *cache);
496 void kasan_cache_shutdown(struct kmem_cache *cache);
497 void kasan_record_aux_stack(void *ptr);
498
499 #else /* CONFIG_KASAN_GENERIC */
500
501 /* Tag-based KASAN modes do not use per-object metadata. */
kasan_metadata_size(struct kmem_cache * cache,bool in_object)502 static inline size_t kasan_metadata_size(struct kmem_cache *cache,
503 bool in_object)
504 {
505 return 0;
506 }
507 /* And no cache-related metadata initialization is required. */
kasan_cache_create(struct kmem_cache * cache,unsigned int * size,slab_flags_t * flags)508 static inline void kasan_cache_create(struct kmem_cache *cache,
509 unsigned int *size,
510 slab_flags_t *flags) {}
511
kasan_cache_shrink(struct kmem_cache * cache)512 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
kasan_cache_shutdown(struct kmem_cache * cache)513 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
kasan_record_aux_stack(void * ptr)514 static inline void kasan_record_aux_stack(void *ptr) {}
515
516 #endif /* CONFIG_KASAN_GENERIC */
517
518 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
519
kasan_reset_tag(const void * addr)520 static inline void *kasan_reset_tag(const void *addr)
521 {
522 return (void *)arch_kasan_reset_tag(addr);
523 }
524
525 /**
526 * kasan_report - print a report about a bad memory access detected by KASAN
527 * @addr: address of the bad access
528 * @size: size of the bad access
529 * @is_write: whether the bad access is a write or a read
530 * @ip: instruction pointer for the accessibility check or the bad access itself
531 */
532 bool kasan_report(const void *addr, size_t size,
533 bool is_write, unsigned long ip);
534
535 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
536
kasan_reset_tag(const void * addr)537 static inline void *kasan_reset_tag(const void *addr)
538 {
539 return (void *)addr;
540 }
541
542 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
543
544 #ifdef CONFIG_KASAN_HW_TAGS
545
546 void kasan_report_async(void);
547
548 #endif /* CONFIG_KASAN_HW_TAGS */
549
550 #ifdef CONFIG_KASAN_GENERIC
551 void __init kasan_init_generic(void);
552 #else
kasan_init_generic(void)553 static inline void kasan_init_generic(void) { }
554 #endif
555
556 #ifdef CONFIG_KASAN_SW_TAGS
557 void __init kasan_init_sw_tags(void);
558 #else
kasan_init_sw_tags(void)559 static inline void kasan_init_sw_tags(void) { }
560 #endif
561
562 #ifdef CONFIG_KASAN_HW_TAGS
563 void kasan_init_hw_tags_cpu(void);
564 void __init kasan_init_hw_tags(void);
565 #else
kasan_init_hw_tags_cpu(void)566 static inline void kasan_init_hw_tags_cpu(void) { }
kasan_init_hw_tags(void)567 static inline void kasan_init_hw_tags(void) { }
568 #endif
569
570 #ifdef CONFIG_KASAN_VMALLOC
571
572 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
573
574 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
575 int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
kasan_populate_vmalloc(unsigned long addr,unsigned long size,gfp_t gfp_mask)576 static inline int kasan_populate_vmalloc(unsigned long addr,
577 unsigned long size, gfp_t gfp_mask)
578 {
579 if (kasan_enabled())
580 return __kasan_populate_vmalloc(addr, size, gfp_mask);
581 return 0;
582 }
583 void __kasan_release_vmalloc(unsigned long start, unsigned long end,
584 unsigned long free_region_start,
585 unsigned long free_region_end,
586 unsigned long flags);
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end,unsigned long flags)587 static inline void kasan_release_vmalloc(unsigned long start, unsigned long end,
588 unsigned long free_region_start,
589 unsigned long free_region_end,
590 unsigned long flags)
591 {
592 if (kasan_enabled())
593 return __kasan_release_vmalloc(start, end, free_region_start,
594 free_region_end, flags);
595 }
596
597 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
598
kasan_populate_early_vm_area_shadow(void * start,unsigned long size)599 static inline void kasan_populate_early_vm_area_shadow(void *start,
600 unsigned long size)
601 { }
kasan_populate_vmalloc(unsigned long start,unsigned long size,gfp_t gfp_mask)602 static inline int kasan_populate_vmalloc(unsigned long start,
603 unsigned long size, gfp_t gfp_mask)
604 {
605 return 0;
606 }
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end,unsigned long flags)607 static inline void kasan_release_vmalloc(unsigned long start,
608 unsigned long end,
609 unsigned long free_region_start,
610 unsigned long free_region_end,
611 unsigned long flags) { }
612
613 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
614
615 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
616 kasan_vmalloc_flags_t flags);
kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)617 static __always_inline void *kasan_unpoison_vmalloc(const void *start,
618 unsigned long size,
619 kasan_vmalloc_flags_t flags)
620 {
621 if (kasan_enabled())
622 return __kasan_unpoison_vmalloc(start, size, flags);
623 return (void *)start;
624 }
625
626 void __kasan_poison_vmalloc(const void *start, unsigned long size);
kasan_poison_vmalloc(const void * start,unsigned long size)627 static __always_inline void kasan_poison_vmalloc(const void *start,
628 unsigned long size)
629 {
630 if (kasan_enabled())
631 __kasan_poison_vmalloc(start, size);
632 }
633
634 void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
635 kasan_vmalloc_flags_t flags);
636 static __always_inline void
kasan_unpoison_vmap_areas(struct vm_struct ** vms,int nr_vms,kasan_vmalloc_flags_t flags)637 kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
638 kasan_vmalloc_flags_t flags)
639 {
640 if (kasan_enabled())
641 __kasan_unpoison_vmap_areas(vms, nr_vms, flags);
642 }
643
644 #else /* CONFIG_KASAN_VMALLOC */
645
kasan_populate_early_vm_area_shadow(void * start,unsigned long size)646 static inline void kasan_populate_early_vm_area_shadow(void *start,
647 unsigned long size) { }
kasan_populate_vmalloc(unsigned long start,unsigned long size,gfp_t gfp_mask)648 static inline int kasan_populate_vmalloc(unsigned long start,
649 unsigned long size, gfp_t gfp_mask)
650 {
651 return 0;
652 }
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end,unsigned long flags)653 static inline void kasan_release_vmalloc(unsigned long start,
654 unsigned long end,
655 unsigned long free_region_start,
656 unsigned long free_region_end,
657 unsigned long flags) { }
658
kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)659 static inline void *kasan_unpoison_vmalloc(const void *start,
660 unsigned long size,
661 kasan_vmalloc_flags_t flags)
662 {
663 return (void *)start;
664 }
kasan_poison_vmalloc(const void * start,unsigned long size)665 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
666 { }
667
668 static __always_inline void
kasan_unpoison_vmap_areas(struct vm_struct ** vms,int nr_vms,kasan_vmalloc_flags_t flags)669 kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
670 kasan_vmalloc_flags_t flags)
671 { }
672
673 #endif /* CONFIG_KASAN_VMALLOC */
674
675 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
676 !defined(CONFIG_KASAN_VMALLOC)
677
678 /*
679 * These functions allocate and free shadow memory for kernel modules.
680 * They are only required when KASAN_VMALLOC is not supported, as otherwise
681 * shadow memory is allocated by the generic vmalloc handlers.
682 */
683 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
684 void kasan_free_module_shadow(const struct vm_struct *vm);
685
686 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
687
kasan_alloc_module_shadow(void * addr,size_t size,gfp_t gfp_mask)688 static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
kasan_free_module_shadow(const struct vm_struct * vm)689 static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
690
691 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
692
693 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
694 void kasan_non_canonical_hook(unsigned long addr);
695 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
kasan_non_canonical_hook(unsigned long addr)696 static inline void kasan_non_canonical_hook(unsigned long addr) { }
697 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
698
699 #endif /* LINUX_KASAN_H */
700