Lines Matching +full:memory +full:- +full:mapped
1 // SPDX-License-Identifier: GPL-2.0
5 * These functions handle creation of KMSAN metadata for memory allocations.
7 * Copyright (C) 2018-2022 Google LLC
13 #include <linux/dma-direction.h>
55 * There's a ctor or this is an RCU cache - do nothing. The memory in kmsan_slab_alloc()
58 if (s->ctor || (s->flags & SLAB_TYPESAFE_BY_RCU)) in kmsan_slab_alloc()
63 kmsan_internal_unpoison_memory(object, s->object_size, in kmsan_slab_alloc()
66 kmsan_internal_poison_memory(object, s->object_size, flags, in kmsan_slab_alloc()
77 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) in kmsan_slab_free()
80 * If there's a constructor, freed memory must remain in the same state in kmsan_slab_free()
82 * use-after-free bugs, instead we just keep it unpoisoned. in kmsan_slab_free()
84 if (s->ctor) in kmsan_slab_free()
87 kmsan_internal_poison_memory(object, s->object_size, GFP_KERNEL, in kmsan_slab_free()
145 * This function creates new shadow/origin pages for the physical pages mapped
146 * into the virtual memory. If those physical pages already had shadow/origin,
156 int nr, err = 0, clean = 0, mapped; in kmsan_ioremap_page_range() local
161 nr = (end - start) / PAGE_SIZE; in kmsan_ioremap_page_range()
167 err = -ENOMEM; in kmsan_ioremap_page_range()
170 mapped = __vmap_pages_range_noflush( in kmsan_ioremap_page_range()
174 if (mapped) { in kmsan_ioremap_page_range()
175 err = mapped; in kmsan_ioremap_page_range()
179 mapped = __vmap_pages_range_noflush( in kmsan_ioremap_page_range()
183 if (mapped) { in kmsan_ioremap_page_range()
187 err = mapped; in kmsan_ioremap_page_range()
228 nr = (end - start) / PAGE_SIZE; in kmsan_iounmap_page_range()
256 * At this point we've copied the memory already. It's hard to check it in kmsan_copy_to_user()
270 /* This is a user memory access, check it. */ in kmsan_copy_to_user()
271 kmsan_internal_check_memory((void *)from, to_copy - left, to, in kmsan_copy_to_user()
274 /* Otherwise this is a kernel memory access. This happens when a in kmsan_copy_to_user()
281 to_copy - left); in kmsan_copy_to_user()
304 kmsan_internal_check_memory(urb->transfer_buffer, in kmsan_handle_urb()
305 urb->transfer_buffer_length, in kmsan_handle_urb()
309 kmsan_internal_unpoison_memory(urb->transfer_buffer, in kmsan_handle_urb()
310 urb->transfer_buffer_length, in kmsan_handle_urb()
354 to_go = min(PAGE_SIZE - page_offset, (u64)size); in kmsan_handle_dma()
357 size -= to_go; in kmsan_handle_dma()
369 kmsan_handle_dma(sg_page(item), item->offset, item->length, in kmsan_handle_dma_sg()
373 /* Functions from kmsan-checks.h follow. */
385 /* The users may want to poison/unpoison random memory. */ in kmsan_poison_memory()
404 /* The users may want to poison/unpoison random memory. */ in kmsan_unpoison_memory()
430 KMSAN_WARN_ON(current->kmsan_ctx.depth == 0); in kmsan_enable_current()
431 current->kmsan_ctx.depth--; in kmsan_enable_current()
437 current->kmsan_ctx.depth++; in kmsan_disable_current()
438 KMSAN_WARN_ON(current->kmsan_ctx.depth == 0); in kmsan_disable_current()