1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KASAN_H
3 #define _LINUX_KASAN_H
4
5 #include <linux/bug.h>
6 #include <linux/kasan-enabled.h>
7 #include <linux/kasan-tags.h>
8 #include <linux/kernel.h>
9 #include <linux/static_key.h>
10 #include <linux/types.h>
11
12 struct kmem_cache;
13 struct page;
14 struct slab;
15 struct vm_struct;
16 struct task_struct;
17
18 #ifdef CONFIG_KASAN
19
20 #include <linux/linkage.h>
21 #include <asm/kasan.h>
22
23 #endif
24
25 typedef unsigned int __bitwise kasan_vmalloc_flags_t;
26
27 #define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u)
28 #define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
29 #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
30 #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
31
32 #define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
33 #define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */
34
35 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
36
37 #include <linux/pgtable.h>
38
39 /* Software KASAN implementations use shadow memory. */
40
41 #ifdef CONFIG_KASAN_SW_TAGS
42 /* This matches KASAN_TAG_INVALID. */
43 #define KASAN_SHADOW_INIT 0xFE
44 #else
45 #define KASAN_SHADOW_INIT 0
46 #endif
47
48 #ifndef PTE_HWTABLE_PTRS
49 #define PTE_HWTABLE_PTRS 0
50 #endif
51
52 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
53 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
54 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
55 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
56 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
57
58 int kasan_populate_early_shadow(const void *shadow_start,
59 const void *shadow_end);
60
61 #ifndef kasan_mem_to_shadow
kasan_mem_to_shadow(const void * addr)62 static inline void *kasan_mem_to_shadow(const void *addr)
63 {
64 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
65 + KASAN_SHADOW_OFFSET;
66 }
67 #endif
68
69 int kasan_add_zero_shadow(void *start, unsigned long size);
70 void kasan_remove_zero_shadow(void *start, unsigned long size);
71
72 /* Enable reporting bugs after kasan_disable_current() */
73 extern void kasan_enable_current(void);
74
75 /* Disable reporting bugs for current task */
76 extern void kasan_disable_current(void);
77
78 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
79
kasan_add_zero_shadow(void * start,unsigned long size)80 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
81 {
82 return 0;
83 }
kasan_remove_zero_shadow(void * start,unsigned long size)84 static inline void kasan_remove_zero_shadow(void *start,
85 unsigned long size)
86 {}
87
kasan_enable_current(void)88 static inline void kasan_enable_current(void) {}
kasan_disable_current(void)89 static inline void kasan_disable_current(void) {}
90
91 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
92
93 #ifdef CONFIG_KASAN_HW_TAGS
94
95 #else /* CONFIG_KASAN_HW_TAGS */
96
97 #endif /* CONFIG_KASAN_HW_TAGS */
98
kasan_has_integrated_init(void)99 static inline bool kasan_has_integrated_init(void)
100 {
101 return kasan_hw_tags_enabled();
102 }
103
104 #ifdef CONFIG_KASAN
105 void __kasan_unpoison_range(const void *addr, size_t size);
kasan_unpoison_range(const void * addr,size_t size)106 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
107 {
108 if (kasan_enabled())
109 __kasan_unpoison_range(addr, size);
110 }
111
112 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
kasan_poison_pages(struct page * page,unsigned int order,bool init)113 static __always_inline void kasan_poison_pages(struct page *page,
114 unsigned int order, bool init)
115 {
116 if (kasan_enabled())
117 __kasan_poison_pages(page, order, init);
118 }
119
120 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
kasan_unpoison_pages(struct page * page,unsigned int order,bool init)121 static __always_inline bool kasan_unpoison_pages(struct page *page,
122 unsigned int order, bool init)
123 {
124 if (kasan_enabled())
125 return __kasan_unpoison_pages(page, order, init);
126 return false;
127 }
128
129 void __kasan_poison_slab(struct slab *slab);
kasan_poison_slab(struct slab * slab)130 static __always_inline void kasan_poison_slab(struct slab *slab)
131 {
132 if (kasan_enabled())
133 __kasan_poison_slab(slab);
134 }
135
136 void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
137 /**
138 * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
139 * @cache: Cache the object belong to.
140 * @object: Pointer to the object.
141 *
142 * This function is intended for the slab allocator's internal use. It
143 * temporarily unpoisons an object from a newly allocated slab without doing
144 * anything else. The object must later be repoisoned by
145 * kasan_poison_new_object().
146 */
kasan_unpoison_new_object(struct kmem_cache * cache,void * object)147 static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
148 void *object)
149 {
150 if (kasan_enabled())
151 __kasan_unpoison_new_object(cache, object);
152 }
153
154 void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
155 /**
156 * kasan_poison_new_object - Repoison a new slab object.
157 * @cache: Cache the object belong to.
158 * @object: Pointer to the object.
159 *
160 * This function is intended for the slab allocator's internal use. It
161 * repoisons an object that was previously unpoisoned by
162 * kasan_unpoison_new_object() without doing anything else.
163 */
kasan_poison_new_object(struct kmem_cache * cache,void * object)164 static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
165 void *object)
166 {
167 if (kasan_enabled())
168 __kasan_poison_new_object(cache, object);
169 }
170
171 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
172 const void *object);
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)173 static __always_inline void * __must_check kasan_init_slab_obj(
174 struct kmem_cache *cache, const void *object)
175 {
176 if (kasan_enabled())
177 return __kasan_init_slab_obj(cache, object);
178 return (void *)object;
179 }
180
181 bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
182 unsigned long ip);
183 /**
184 * kasan_slab_pre_free - Check whether freeing a slab object is safe.
185 * @object: Object to be freed.
186 *
187 * This function checks whether freeing the given object is safe. It may
188 * check for double-free and invalid-free bugs and report them.
189 *
190 * This function is intended only for use by the slab allocator.
191 *
192 * @Return true if freeing the object is unsafe; false otherwise.
193 */
kasan_slab_pre_free(struct kmem_cache * s,void * object)194 static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
195 void *object)
196 {
197 if (kasan_enabled())
198 return __kasan_slab_pre_free(s, object, _RET_IP_);
199 return false;
200 }
201
202 bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
203 bool still_accessible, bool no_quarantine);
204 /**
205 * kasan_slab_free - Poison, initialize, and quarantine a slab object.
206 * @object: Object to be freed.
207 * @init: Whether to initialize the object.
208 * @still_accessible: Whether the object contents are still accessible.
209 *
210 * This function informs that a slab object has been freed and is not
211 * supposed to be accessed anymore, except when @still_accessible is set
212 * (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU
213 * grace period might not have passed yet).
214 *
215 * For KASAN modes that have integrated memory initialization
216 * (kasan_has_integrated_init() == true), this function also initializes
217 * the object's memory. For other modes, the @init argument is ignored.
218 *
219 * This function might also take ownership of the object to quarantine it.
220 * When this happens, KASAN will defer freeing the object to a later
221 * stage and handle it internally until then. The return value indicates
222 * whether KASAN took ownership of the object.
223 *
224 * This function is intended only for use by the slab allocator.
225 *
226 * @Return true if KASAN took ownership of the object; false otherwise.
227 */
kasan_slab_free(struct kmem_cache * s,void * object,bool init,bool still_accessible,bool no_quarantine)228 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
229 void *object, bool init,
230 bool still_accessible,
231 bool no_quarantine)
232 {
233 if (kasan_enabled())
234 return __kasan_slab_free(s, object, init, still_accessible,
235 no_quarantine);
236 return false;
237 }
238
239 void __kasan_kfree_large(void *ptr, unsigned long ip);
kasan_kfree_large(void * ptr)240 static __always_inline void kasan_kfree_large(void *ptr)
241 {
242 if (kasan_enabled())
243 __kasan_kfree_large(ptr, _RET_IP_);
244 }
245
246 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
247 void *object, gfp_t flags, bool init);
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags,bool init)248 static __always_inline void * __must_check kasan_slab_alloc(
249 struct kmem_cache *s, void *object, gfp_t flags, bool init)
250 {
251 if (kasan_enabled())
252 return __kasan_slab_alloc(s, object, flags, init);
253 return object;
254 }
255
256 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
257 size_t size, gfp_t flags);
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)258 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
259 const void *object, size_t size, gfp_t flags)
260 {
261 if (kasan_enabled())
262 return __kasan_kmalloc(s, object, size, flags);
263 return (void *)object;
264 }
265
266 void * __must_check __kasan_kmalloc_large(const void *ptr,
267 size_t size, gfp_t flags);
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)268 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
269 size_t size, gfp_t flags)
270 {
271 if (kasan_enabled())
272 return __kasan_kmalloc_large(ptr, size, flags);
273 return (void *)ptr;
274 }
275
276 void * __must_check __kasan_krealloc(const void *object,
277 size_t new_size, gfp_t flags);
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)278 static __always_inline void * __must_check kasan_krealloc(const void *object,
279 size_t new_size, gfp_t flags)
280 {
281 if (kasan_enabled())
282 return __kasan_krealloc(object, new_size, flags);
283 return (void *)object;
284 }
285
286 bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
287 unsigned long ip);
288 /**
289 * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
290 * @page: Pointer to the page allocation.
291 * @order: Order of the allocation.
292 *
293 * This function is intended for kernel subsystems that cache page allocations
294 * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
295 *
296 * This function is similar to kasan_mempool_poison_object() but operates on
297 * page allocations.
298 *
299 * Before the poisoned allocation can be reused, it must be unpoisoned via
300 * kasan_mempool_unpoison_pages().
301 *
302 * Return: true if the allocation can be safely reused; false otherwise.
303 */
kasan_mempool_poison_pages(struct page * page,unsigned int order)304 static __always_inline bool kasan_mempool_poison_pages(struct page *page,
305 unsigned int order)
306 {
307 if (kasan_enabled())
308 return __kasan_mempool_poison_pages(page, order, _RET_IP_);
309 return true;
310 }
311
312 void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
313 unsigned long ip);
314 /**
315 * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
316 * @page: Pointer to the page allocation.
317 * @order: Order of the allocation.
318 *
319 * This function is intended for kernel subsystems that cache page allocations
320 * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
321 *
322 * This function unpoisons a page allocation that was previously poisoned by
323 * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
324 * the tag-based modes, this function assigns a new tag to the allocation.
325 */
kasan_mempool_unpoison_pages(struct page * page,unsigned int order)326 static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
327 unsigned int order)
328 {
329 if (kasan_enabled())
330 __kasan_mempool_unpoison_pages(page, order, _RET_IP_);
331 }
332
333 bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
334 /**
335 * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
336 * @ptr: Pointer to the slab allocation.
337 *
338 * This function is intended for kernel subsystems that cache slab allocations
339 * to reuse them instead of freeing them back to the slab allocator (e.g.
340 * mempool).
341 *
342 * This function poisons a slab allocation and saves a free stack trace for it
343 * without initializing the allocation's memory and without putting it into the
344 * quarantine (for the Generic mode).
345 *
346 * This function also performs checks to detect double-free and invalid-free
347 * bugs and reports them. The caller can use the return value of this function
348 * to find out if the allocation is buggy.
349 *
350 * Before the poisoned allocation can be reused, it must be unpoisoned via
351 * kasan_mempool_unpoison_object().
352 *
353 * This function operates on all slab allocations including large kmalloc
354 * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
355 * size > KMALLOC_MAX_SIZE).
356 *
357 * Return: true if the allocation can be safely reused; false otherwise.
358 */
kasan_mempool_poison_object(void * ptr)359 static __always_inline bool kasan_mempool_poison_object(void *ptr)
360 {
361 if (kasan_enabled())
362 return __kasan_mempool_poison_object(ptr, _RET_IP_);
363 return true;
364 }
365
366 void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
367 /**
368 * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
369 * @ptr: Pointer to the slab allocation.
370 * @size: Size to be unpoisoned.
371 *
372 * This function is intended for kernel subsystems that cache slab allocations
373 * to reuse them instead of freeing them back to the slab allocator (e.g.
374 * mempool).
375 *
376 * This function unpoisons a slab allocation that was previously poisoned via
377 * kasan_mempool_poison_object() and saves an alloc stack trace for it without
378 * initializing the allocation's memory. For the tag-based modes, this function
379 * does not assign a new tag to the allocation and instead restores the
380 * original tags based on the pointer value.
381 *
382 * This function operates on all slab allocations including large kmalloc
383 * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
384 * size > KMALLOC_MAX_SIZE).
385 */
kasan_mempool_unpoison_object(void * ptr,size_t size)386 static __always_inline void kasan_mempool_unpoison_object(void *ptr,
387 size_t size)
388 {
389 if (kasan_enabled())
390 __kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
391 }
392
393 /*
394 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
395 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
396 */
397 bool __kasan_check_byte(const void *addr, unsigned long ip);
kasan_check_byte(const void * addr)398 static __always_inline bool kasan_check_byte(const void *addr)
399 {
400 if (kasan_enabled())
401 return __kasan_check_byte(addr, _RET_IP_);
402 return true;
403 }
404
405 #else /* CONFIG_KASAN */
406
kasan_unpoison_range(const void * address,size_t size)407 static inline void kasan_unpoison_range(const void *address, size_t size) {}
kasan_poison_pages(struct page * page,unsigned int order,bool init)408 static inline void kasan_poison_pages(struct page *page, unsigned int order,
409 bool init) {}
kasan_unpoison_pages(struct page * page,unsigned int order,bool init)410 static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
411 bool init)
412 {
413 return false;
414 }
kasan_poison_slab(struct slab * slab)415 static inline void kasan_poison_slab(struct slab *slab) {}
kasan_unpoison_new_object(struct kmem_cache * cache,void * object)416 static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
417 void *object) {}
kasan_poison_new_object(struct kmem_cache * cache,void * object)418 static inline void kasan_poison_new_object(struct kmem_cache *cache,
419 void *object) {}
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)420 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
421 const void *object)
422 {
423 return (void *)object;
424 }
425
kasan_slab_pre_free(struct kmem_cache * s,void * object)426 static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
427 {
428 return false;
429 }
430
kasan_slab_free(struct kmem_cache * s,void * object,bool init,bool still_accessible,bool no_quarantine)431 static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
432 bool init, bool still_accessible,
433 bool no_quarantine)
434 {
435 return false;
436 }
kasan_kfree_large(void * ptr)437 static inline void kasan_kfree_large(void *ptr) {}
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags,bool init)438 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
439 gfp_t flags, bool init)
440 {
441 return object;
442 }
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)443 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
444 size_t size, gfp_t flags)
445 {
446 return (void *)object;
447 }
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)448 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
449 {
450 return (void *)ptr;
451 }
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)452 static inline void *kasan_krealloc(const void *object, size_t new_size,
453 gfp_t flags)
454 {
455 return (void *)object;
456 }
kasan_mempool_poison_pages(struct page * page,unsigned int order)457 static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
458 {
459 return true;
460 }
kasan_mempool_unpoison_pages(struct page * page,unsigned int order)461 static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
kasan_mempool_poison_object(void * ptr)462 static inline bool kasan_mempool_poison_object(void *ptr)
463 {
464 return true;
465 }
kasan_mempool_unpoison_object(void * ptr,size_t size)466 static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
467
kasan_check_byte(const void * address)468 static inline bool kasan_check_byte(const void *address)
469 {
470 return true;
471 }
472
473 #endif /* CONFIG_KASAN */
474
475 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
476 void kasan_unpoison_task_stack(struct task_struct *task);
477 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
478 #else
kasan_unpoison_task_stack(struct task_struct * task)479 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
kasan_unpoison_task_stack_below(const void * watermark)480 static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
481 #endif
482
483 #ifdef CONFIG_KASAN_GENERIC
484
485 struct kasan_cache {
486 int alloc_meta_offset;
487 int free_meta_offset;
488 };
489
490 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
491 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
492 slab_flags_t *flags);
493
494 void kasan_cache_shrink(struct kmem_cache *cache);
495 void kasan_cache_shutdown(struct kmem_cache *cache);
496 void kasan_record_aux_stack(void *ptr);
497
498 #else /* CONFIG_KASAN_GENERIC */
499
500 /* Tag-based KASAN modes do not use per-object metadata. */
kasan_metadata_size(struct kmem_cache * cache,bool in_object)501 static inline size_t kasan_metadata_size(struct kmem_cache *cache,
502 bool in_object)
503 {
504 return 0;
505 }
506 /* And no cache-related metadata initialization is required. */
kasan_cache_create(struct kmem_cache * cache,unsigned int * size,slab_flags_t * flags)507 static inline void kasan_cache_create(struct kmem_cache *cache,
508 unsigned int *size,
509 slab_flags_t *flags) {}
510
kasan_cache_shrink(struct kmem_cache * cache)511 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
kasan_cache_shutdown(struct kmem_cache * cache)512 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
kasan_record_aux_stack(void * ptr)513 static inline void kasan_record_aux_stack(void *ptr) {}
514
515 #endif /* CONFIG_KASAN_GENERIC */
516
517 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
518
kasan_reset_tag(const void * addr)519 static inline void *kasan_reset_tag(const void *addr)
520 {
521 return (void *)arch_kasan_reset_tag(addr);
522 }
523
524 /**
525 * kasan_report - print a report about a bad memory access detected by KASAN
526 * @addr: address of the bad access
527 * @size: size of the bad access
528 * @is_write: whether the bad access is a write or a read
529 * @ip: instruction pointer for the accessibility check or the bad access itself
530 */
531 bool kasan_report(const void *addr, size_t size,
532 bool is_write, unsigned long ip);
533
534 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
535
kasan_reset_tag(const void * addr)536 static inline void *kasan_reset_tag(const void *addr)
537 {
538 return (void *)addr;
539 }
540
541 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
542
543 #ifdef CONFIG_KASAN_HW_TAGS
544
545 void kasan_report_async(void);
546
547 #endif /* CONFIG_KASAN_HW_TAGS */
548
549 #ifdef CONFIG_KASAN_GENERIC
550 void __init kasan_init_generic(void);
551 #else
kasan_init_generic(void)552 static inline void kasan_init_generic(void) { }
553 #endif
554
555 #ifdef CONFIG_KASAN_SW_TAGS
556 void __init kasan_init_sw_tags(void);
557 #else
kasan_init_sw_tags(void)558 static inline void kasan_init_sw_tags(void) { }
559 #endif
560
561 #ifdef CONFIG_KASAN_HW_TAGS
562 void kasan_init_hw_tags_cpu(void);
563 void __init kasan_init_hw_tags(void);
564 #else
kasan_init_hw_tags_cpu(void)565 static inline void kasan_init_hw_tags_cpu(void) { }
kasan_init_hw_tags(void)566 static inline void kasan_init_hw_tags(void) { }
567 #endif
568
569 #ifdef CONFIG_KASAN_VMALLOC
570
571 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
572
573 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
574 int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
575 void kasan_release_vmalloc(unsigned long start, unsigned long end,
576 unsigned long free_region_start,
577 unsigned long free_region_end,
578 unsigned long flags);
579
580 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
581
kasan_populate_early_vm_area_shadow(void * start,unsigned long size)582 static inline void kasan_populate_early_vm_area_shadow(void *start,
583 unsigned long size)
584 { }
kasan_populate_vmalloc(unsigned long start,unsigned long size,gfp_t gfp_mask)585 static inline int kasan_populate_vmalloc(unsigned long start,
586 unsigned long size, gfp_t gfp_mask)
587 {
588 return 0;
589 }
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end,unsigned long flags)590 static inline void kasan_release_vmalloc(unsigned long start,
591 unsigned long end,
592 unsigned long free_region_start,
593 unsigned long free_region_end,
594 unsigned long flags) { }
595
596 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
597
598 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
599 kasan_vmalloc_flags_t flags);
kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)600 static __always_inline void *kasan_unpoison_vmalloc(const void *start,
601 unsigned long size,
602 kasan_vmalloc_flags_t flags)
603 {
604 if (kasan_enabled())
605 return __kasan_unpoison_vmalloc(start, size, flags);
606 return (void *)start;
607 }
608
609 void __kasan_poison_vmalloc(const void *start, unsigned long size);
kasan_poison_vmalloc(const void * start,unsigned long size)610 static __always_inline void kasan_poison_vmalloc(const void *start,
611 unsigned long size)
612 {
613 if (kasan_enabled())
614 __kasan_poison_vmalloc(start, size);
615 }
616
617 #else /* CONFIG_KASAN_VMALLOC */
618
kasan_populate_early_vm_area_shadow(void * start,unsigned long size)619 static inline void kasan_populate_early_vm_area_shadow(void *start,
620 unsigned long size) { }
kasan_populate_vmalloc(unsigned long start,unsigned long size,gfp_t gfp_mask)621 static inline int kasan_populate_vmalloc(unsigned long start,
622 unsigned long size, gfp_t gfp_mask)
623 {
624 return 0;
625 }
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end,unsigned long flags)626 static inline void kasan_release_vmalloc(unsigned long start,
627 unsigned long end,
628 unsigned long free_region_start,
629 unsigned long free_region_end,
630 unsigned long flags) { }
631
kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)632 static inline void *kasan_unpoison_vmalloc(const void *start,
633 unsigned long size,
634 kasan_vmalloc_flags_t flags)
635 {
636 return (void *)start;
637 }
kasan_poison_vmalloc(const void * start,unsigned long size)638 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
639 { }
640
641 #endif /* CONFIG_KASAN_VMALLOC */
642
643 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
644 !defined(CONFIG_KASAN_VMALLOC)
645
646 /*
647 * These functions allocate and free shadow memory for kernel modules.
648 * They are only required when KASAN_VMALLOC is not supported, as otherwise
649 * shadow memory is allocated by the generic vmalloc handlers.
650 */
651 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
652 void kasan_free_module_shadow(const struct vm_struct *vm);
653
654 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
655
kasan_alloc_module_shadow(void * addr,size_t size,gfp_t gfp_mask)656 static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
kasan_free_module_shadow(const struct vm_struct * vm)657 static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
658
659 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
660
661 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
662 void kasan_non_canonical_hook(unsigned long addr);
663 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
kasan_non_canonical_hook(unsigned long addr)664 static inline void kasan_non_canonical_hook(unsigned long addr) { }
665 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
666
667 #endif /* LINUX_KASAN_H */
668