xref: /freebsd/sys/arm64/include/msan.h (revision 90010126b03b098edc70a5a0e63c1c0a65cd47fb)
1*90010126SMark Johnston /*-
2*90010126SMark Johnston  * SPDX-License-Identifier: BSD-2-Clause
3*90010126SMark Johnston  *
4*90010126SMark Johnston  * Copyright (c) 2021 The FreeBSD Foundation
5*90010126SMark Johnston  * Copyright (c) 2023 Juniper Networks, Inc.
6*90010126SMark Johnston  *
7*90010126SMark Johnston  * This software was developed by Mark Johnston under sponsorship from the
8*90010126SMark Johnston  * FreeBSD Foundation.
9*90010126SMark Johnston  *
10*90010126SMark Johnston  * Redistribution and use in source and binary forms, with or without
11*90010126SMark Johnston  * modification, are permitted provided that the following conditions are
12*90010126SMark Johnston  * met:
13*90010126SMark Johnston  * 1. Redistributions of source code must retain the above copyright
14*90010126SMark Johnston  *    notice, this list of conditions and the following disclaimer.
15*90010126SMark Johnston  * 2. Redistributions in binary form must reproduce the above copyright
16*90010126SMark Johnston  *    notice, this list of conditions and the following disclaimer in
17*90010126SMark Johnston  *    the documentation and/or other materials provided with the distribution.
18*90010126SMark Johnston  *
19*90010126SMark Johnston  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20*90010126SMark Johnston  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21*90010126SMark Johnston  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22*90010126SMark Johnston  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23*90010126SMark Johnston  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24*90010126SMark Johnston  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25*90010126SMark Johnston  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26*90010126SMark Johnston  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27*90010126SMark Johnston  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28*90010126SMark Johnston  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29*90010126SMark Johnston  * SUCH DAMAGE.
30*90010126SMark Johnston  */
31*90010126SMark Johnston 
32*90010126SMark Johnston #ifndef _MACHINE_MSAN_H_
33*90010126SMark Johnston #define	_MACHINE_MSAN_H_
34*90010126SMark Johnston 
35*90010126SMark Johnston #ifdef KMSAN
36*90010126SMark Johnston 
37*90010126SMark Johnston #include <vm/vm.h>
38*90010126SMark Johnston #include <vm/pmap.h>
39*90010126SMark Johnston #include <vm/vm_page.h>
40*90010126SMark Johnston #include <machine/vmparam.h>
41*90010126SMark Johnston 
42*90010126SMark Johnston typedef uint32_t msan_orig_t;
43*90010126SMark Johnston 
44*90010126SMark Johnston /*
45*90010126SMark Johnston  * Our 32-bit origin cells encode a 2-bit type and 30-bit pointer to a kernel
46*90010126SMark Johnston  * instruction.  The pointer is compressed by making it a positive offset
47*90010126SMark Johnston  * relative to KERNBASE.
48*90010126SMark Johnston  */
49*90010126SMark Johnston #define	KMSAN_ORIG_TYPE_SHIFT	30u
50*90010126SMark Johnston #define	KMSAN_ORIG_PTR_MASK	((1ul << KMSAN_ORIG_TYPE_SHIFT) - 1)
51*90010126SMark Johnston 
52*90010126SMark Johnston static inline msan_orig_t
kmsan_md_orig_encode(int type,uintptr_t ptr)53*90010126SMark Johnston kmsan_md_orig_encode(int type, uintptr_t ptr)
54*90010126SMark Johnston {
55*90010126SMark Johnston 	return ((type << KMSAN_ORIG_TYPE_SHIFT) |
56*90010126SMark Johnston 	    ((ptr & KMSAN_ORIG_PTR_MASK)));
57*90010126SMark Johnston }
58*90010126SMark Johnston 
59*90010126SMark Johnston static inline void
kmsan_md_orig_decode(msan_orig_t orig,int * type,uintptr_t * ptr)60*90010126SMark Johnston kmsan_md_orig_decode(msan_orig_t orig, int *type, uintptr_t *ptr)
61*90010126SMark Johnston {
62*90010126SMark Johnston 	*type = orig >> KMSAN_ORIG_TYPE_SHIFT;
63*90010126SMark Johnston 	*ptr = (orig & KMSAN_ORIG_PTR_MASK) | KERNBASE;
64*90010126SMark Johnston }
65*90010126SMark Johnston 
66*90010126SMark Johnston static inline vm_offset_t
kmsan_md_addr_to_shad(vm_offset_t addr)67*90010126SMark Johnston kmsan_md_addr_to_shad(vm_offset_t addr)
68*90010126SMark Johnston {
69*90010126SMark Johnston 	return (addr - VM_MIN_KERNEL_ADDRESS + KMSAN_SHAD_MIN_ADDRESS);
70*90010126SMark Johnston }
71*90010126SMark Johnston 
72*90010126SMark Johnston static inline vm_offset_t
kmsan_md_addr_to_orig(vm_offset_t addr)73*90010126SMark Johnston kmsan_md_addr_to_orig(vm_offset_t addr)
74*90010126SMark Johnston {
75*90010126SMark Johnston 	return (addr - VM_MIN_KERNEL_ADDRESS + KMSAN_ORIG_MIN_ADDRESS);
76*90010126SMark Johnston }
77*90010126SMark Johnston 
78*90010126SMark Johnston static inline bool
kmsan_md_unsupported(vm_offset_t addr)79*90010126SMark Johnston kmsan_md_unsupported(vm_offset_t addr)
80*90010126SMark Johnston {
81*90010126SMark Johnston 	/*
82*90010126SMark Johnston 	 * It would be cheaper to use VM_MAX_KERNEL_ADDRESS as the upper bound,
83*90010126SMark Johnston 	 * but we need to exclude device mappings above kernel_vm_end but within
84*90010126SMark Johnston 	 * the kernel map.
85*90010126SMark Johnston 	 */
86*90010126SMark Johnston 	return (addr < VM_MIN_KERNEL_ADDRESS || addr >= kernel_vm_end);
87*90010126SMark Johnston }
88*90010126SMark Johnston 
89*90010126SMark Johnston #endif /* KMSAN */
90*90010126SMark Johnston 
91*90010126SMark Johnston #endif /* !_MACHINE_MSAN_H_ */
92