xref: /freebsd/sys/kern/subr_coverage.c (revision 3e8eb5c7f4909209c042403ddee340b2ee7003a5)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2018 The FreeBSD Foundation. All rights reserved.
5  * Copyright (C) 2018, 2019 Andrew Turner
6  *
7  * This software was developed by Mitchell Horne under sponsorship of
8  * the FreeBSD Foundation.
9  *
10  * This software was developed by SRI International and the University of
11  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
12  * ("CTSRD"), as part of the DARPA CRASH research programme.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $FreeBSD$
36  */
37 
38 #ifdef SAN_NEEDS_INTERCEPTORS
39 #define	SAN_RUNTIME
40 #endif
41 
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
44 
45 #include <sys/param.h>
46 #include <sys/coverage.h>
47 
48 #include <machine/atomic.h>
49 
50 void __sanitizer_cov_trace_pc(void);
51 void __sanitizer_cov_trace_cmp1(uint8_t, uint8_t);
52 void __sanitizer_cov_trace_cmp2(uint16_t, uint16_t);
53 void __sanitizer_cov_trace_cmp4(uint32_t, uint32_t);
54 void __sanitizer_cov_trace_cmp8(uint64_t, uint64_t);
55 void __sanitizer_cov_trace_const_cmp1(uint8_t, uint8_t);
56 void __sanitizer_cov_trace_const_cmp2(uint16_t, uint16_t);
57 void __sanitizer_cov_trace_const_cmp4(uint32_t, uint32_t);
58 void __sanitizer_cov_trace_const_cmp8(uint64_t, uint64_t);
59 void __sanitizer_cov_trace_switch(uint64_t, uint64_t *);
60 
61 static cov_trace_pc_t cov_trace_pc;
62 static cov_trace_cmp_t cov_trace_cmp;
63 
64 void
65 cov_register_pc(cov_trace_pc_t trace_pc)
66 {
67 
68 	atomic_store_ptr(&cov_trace_pc, trace_pc);
69 }
70 
71 void
72 cov_unregister_pc(void)
73 {
74 
75 	atomic_store_ptr(&cov_trace_pc, NULL);
76 }
77 
78 void
79 cov_register_cmp(cov_trace_cmp_t trace_cmp)
80 {
81 
82 	atomic_store_ptr(&cov_trace_cmp, trace_cmp);
83 }
84 
85 void
86 cov_unregister_cmp(void)
87 {
88 
89 	atomic_store_ptr(&cov_trace_cmp, NULL);
90 }
91 
92 /*
93  * Main entry point. A call to this function will be inserted
94  * at every edge, and if coverage is enabled for the thread
95  * this function will add the PC to the buffer.
96  */
97 void
98 __sanitizer_cov_trace_pc(void)
99 {
100 	cov_trace_pc_t trace_pc;
101 
102 	trace_pc = atomic_load_ptr(&cov_trace_pc);
103 	if (trace_pc != NULL)
104 		trace_pc((uint64_t)__builtin_return_address(0));
105 }
106 
107 /*
108  * Comparison entry points. When the kernel performs a comparison
109  * operation the compiler inserts a call to one of the following
110  * functions to record the operation.
111  */
112 void
113 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
114 {
115 	cov_trace_cmp_t trace_cmp;
116 
117 	trace_cmp = atomic_load_ptr(&cov_trace_cmp);
118 	if (trace_cmp != NULL)
119 		trace_cmp(COV_CMP_SIZE(0), arg1, arg2,
120 		    (uint64_t)__builtin_return_address(0));
121 }
122 
123 void
124 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
125 {
126 	cov_trace_cmp_t trace_cmp;
127 
128 	trace_cmp = atomic_load_ptr(&cov_trace_cmp);
129 	if (trace_cmp != NULL)
130 		trace_cmp(COV_CMP_SIZE(1), arg1, arg2,
131 		    (uint64_t)__builtin_return_address(0));
132 }
133 
134 void
135 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
136 {
137 	cov_trace_cmp_t trace_cmp;
138 
139 	trace_cmp = atomic_load_ptr(&cov_trace_cmp);
140 	if (trace_cmp != NULL)
141 		trace_cmp(COV_CMP_SIZE(2), arg1, arg2,
142 		    (uint64_t)__builtin_return_address(0));
143 }
144 
145 void
146 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
147 {
148 	cov_trace_cmp_t trace_cmp;
149 
150 	trace_cmp = atomic_load_ptr(&cov_trace_cmp);
151 	if (trace_cmp != NULL)
152 		trace_cmp(COV_CMP_SIZE(3), arg1, arg2,
153 		    (uint64_t)__builtin_return_address(0));
154 }
155 
156 void
157 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
158 {
159 	cov_trace_cmp_t trace_cmp;
160 
161 	trace_cmp = atomic_load_ptr(&cov_trace_cmp);
162 	if (trace_cmp != NULL)
163 		trace_cmp(COV_CMP_SIZE(0) | COV_CMP_CONST, arg1, arg2,
164 		    (uint64_t)__builtin_return_address(0));
165 }
166 
167 void
168 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
169 {
170 	cov_trace_cmp_t trace_cmp;
171 
172 	trace_cmp = atomic_load_ptr(&cov_trace_cmp);
173 	if (trace_cmp != NULL)
174 		trace_cmp(COV_CMP_SIZE(1) | COV_CMP_CONST, arg1, arg2,
175 		    (uint64_t)__builtin_return_address(0));
176 }
177 
178 void
179 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
180 {
181 	cov_trace_cmp_t trace_cmp;
182 
183 	trace_cmp = atomic_load_ptr(&cov_trace_cmp);
184 	if (trace_cmp != NULL)
185 		trace_cmp(COV_CMP_SIZE(2) | COV_CMP_CONST, arg1, arg2,
186 		    (uint64_t)__builtin_return_address(0));
187 }
188 
189 void
190 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
191 {
192 	cov_trace_cmp_t trace_cmp;
193 
194 	trace_cmp = atomic_load_ptr(&cov_trace_cmp);
195 	if (trace_cmp != NULL)
196 		trace_cmp(COV_CMP_SIZE(3) | COV_CMP_CONST, arg1, arg2,
197 		    (uint64_t)__builtin_return_address(0));
198 }
199 
200 /*
201  * val is the switch operand
202  * cases[0] is the number of case constants
203  * cases[1] is the size of val in bits
204  * cases[2..n] are the case constants
205  */
206 void
207 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
208 {
209 	uint64_t i, count, ret, type;
210 	cov_trace_cmp_t trace_cmp;
211 
212 	trace_cmp = atomic_load_ptr(&cov_trace_cmp);
213 	if (trace_cmp == NULL)
214 		return;
215 
216 	count = cases[0];
217 	ret = (uint64_t)__builtin_return_address(0);
218 
219 	switch (cases[1]) {
220 	case 8:
221 		type = COV_CMP_SIZE(0);
222 		break;
223 	case 16:
224 		type = COV_CMP_SIZE(1);
225 		break;
226 	case 32:
227 		type = COV_CMP_SIZE(2);
228 		break;
229 	case 64:
230 		type = COV_CMP_SIZE(3);
231 		break;
232 	default:
233 		return;
234 	}
235 
236 	val |= COV_CMP_CONST;
237 
238 	for (i = 0; i < count; i++)
239 		if (!trace_cmp(type, val, cases[i + 2], ret))
240 			return;
241 }
242