1 /*-
2 * Copyright (c) 2015 Netflix, Inc.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 */
26
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/bus.h>
30 #include <sys/callout.h>
31 #include <sys/kernel.h>
32 #include <sys/ktr.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35 #include <sys/module.h>
36 #include <sys/mutex.h>
37 #include <sys/sdt.h>
38 #include <sys/smp.h>
39 #include <sys/sysctl.h>
40 #include <sys/taskqueue.h>
41 #include <sys/queue.h>
42 #include <tests/kern_testfrwk.h>
43 #ifdef SMP
44 #include <machine/cpu.h>
45 #endif
46
47 struct kern_test_list {
48 TAILQ_ENTRY(kern_test_list) next;
49 char name[TEST_NAME_LEN];
50 kerntfunc func;
51 };
52
53 TAILQ_HEAD(ktestlist, kern_test_list);
54
55 struct kern_test_entry {
56 TAILQ_ENTRY(kern_test_entry) next;
57 struct kern_test_list *kt_e;
58 struct kern_test kt_data;
59 };
60
61 TAILQ_HEAD(ktestqueue, kern_test_entry);
62
63 MALLOC_DEFINE(M_KTFRWK, "kern_tfrwk", "Kernel Test Framework");
64 struct kern_totfrwk {
65 struct taskqueue *kfrwk_tq;
66 struct task kfrwk_que;
67 struct ktestlist kfrwk_testlist;
68 struct ktestqueue kfrwk_testq;
69 struct mtx kfrwk_mtx;
70 int kfrwk_waiting;
71 };
72
73 struct kern_totfrwk kfrwk;
74 static int ktest_frwk_inited = 0;
75
76 #define KTFRWK_MUTEX_INIT() mtx_init(&kfrwk.kfrwk_mtx, "kern_test_frwk", "tfrwk", MTX_DEF)
77
78 #define KTFRWK_DESTROY() mtx_destroy(&kfrwk.kfrwk_mtx)
79
80 #define KTFRWK_LOCK() mtx_lock(&kfrwk.kfrwk_mtx)
81
82 #define KTFRWK_UNLOCK() mtx_unlock(&kfrwk.kfrwk_mtx)
83
84 static void
kfrwk_task(void * context,int pending)85 kfrwk_task(void *context, int pending)
86 {
87 struct kern_totfrwk *tf;
88 struct kern_test_entry *wk;
89 int free_mem = 0;
90 struct kern_test kt_data;
91 kerntfunc ktf;
92
93 memset(&kt_data, 0, sizeof(kt_data));
94 ktf = NULL;
95 tf = (struct kern_totfrwk *)context;
96 KTFRWK_LOCK();
97 wk = TAILQ_FIRST(&tf->kfrwk_testq);
98 if (wk) {
99 wk->kt_data.tot_threads_running--;
100 tf->kfrwk_waiting--;
101 memcpy(&kt_data, &wk->kt_data, sizeof(kt_data));
102 if (wk->kt_data.tot_threads_running == 0) {
103 TAILQ_REMOVE(&tf->kfrwk_testq, wk, next);
104 free_mem = 1;
105 } else {
106 /* Wake one of my colleages up to help too */
107 taskqueue_enqueue(tf->kfrwk_tq, &tf->kfrwk_que);
108 }
109 if (wk->kt_e) {
110 ktf = wk->kt_e->func;
111 }
112 }
113 KTFRWK_UNLOCK();
114 if (wk && free_mem) {
115 free(wk, M_KTFRWK);
116 }
117 /* Execute the test */
118 if (ktf) {
119 (*ktf) (&kt_data);
120 }
121 /* We are done */
122 atomic_add_int(&tf->kfrwk_waiting, 1);
123 }
124
125 static int
kerntest_frwk_init(void)126 kerntest_frwk_init(void)
127 {
128 u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
129
130 KTFRWK_MUTEX_INIT();
131 TAILQ_INIT(&kfrwk.kfrwk_testq);
132 TAILQ_INIT(&kfrwk.kfrwk_testlist);
133 /* Now lets start up a number of tasks to do the work */
134 TASK_INIT(&kfrwk.kfrwk_que, 0, kfrwk_task, &kfrwk);
135 kfrwk.kfrwk_tq = taskqueue_create_fast("sbtls_task", M_NOWAIT,
136 taskqueue_thread_enqueue, &kfrwk.kfrwk_tq);
137 if (kfrwk.kfrwk_tq == NULL) {
138 printf("Can't start taskqueue for Kernel Test Framework\n");
139 panic("Taskqueue init fails for kfrwk");
140 }
141 taskqueue_start_threads(&kfrwk.kfrwk_tq, ncpus, PI_NET, "[kt_frwk task]");
142 kfrwk.kfrwk_waiting = ncpus;
143 ktest_frwk_inited = 1;
144 return (0);
145 }
146
147 static int
kerntest_frwk_fini(void)148 kerntest_frwk_fini(void)
149 {
150 KTFRWK_LOCK();
151 if (!TAILQ_EMPTY(&kfrwk.kfrwk_testlist)) {
152 /* Still modules registered */
153 KTFRWK_UNLOCK();
154 return (EBUSY);
155 }
156 ktest_frwk_inited = 0;
157 KTFRWK_UNLOCK();
158 taskqueue_free(kfrwk.kfrwk_tq);
159 /* Ok lets destroy the mutex on the way outs */
160 KTFRWK_DESTROY();
161 return (0);
162 }
163
164
165 static int kerntest_execute(SYSCTL_HANDLER_ARGS);
166
167 SYSCTL_NODE(_kern, OID_AUTO, testfrwk, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
168 "Kernel Test Framework");
169 SYSCTL_PROC(_kern_testfrwk, OID_AUTO, runtest,
170 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
171 0, 0, kerntest_execute, "IU",
172 "Execute a kernel test");
173
174 int
kerntest_execute(SYSCTL_HANDLER_ARGS)175 kerntest_execute(SYSCTL_HANDLER_ARGS)
176 {
177 struct kern_test kt;
178 struct kern_test_list *li, *te = NULL;
179 struct kern_test_entry *kte = NULL;
180 int error = 0;
181
182 if (ktest_frwk_inited == 0) {
183 return (ENOENT);
184 }
185 /* Find the entry if possible */
186 error = SYSCTL_IN(req, &kt, sizeof(struct kern_test));
187 if (error) {
188 return (error);
189 }
190 if (kt.num_threads <= 0) {
191 return (EINVAL);
192 }
193 /* Grab some memory */
194 kte = malloc(sizeof(struct kern_test_entry), M_KTFRWK, M_WAITOK);
195 KTFRWK_LOCK();
196 TAILQ_FOREACH(li, &kfrwk.kfrwk_testlist, next) {
197 if (strcmp(li->name, kt.name) == 0) {
198 te = li;
199 break;
200 }
201 }
202 if (te == NULL) {
203 printf("Can't find the test %s\n", kt.name);
204 error = ENOENT;
205 free(kte, M_KTFRWK);
206 goto out;
207 }
208 /* Ok we have a test item to run, can we? */
209 if (!TAILQ_EMPTY(&kfrwk.kfrwk_testq)) {
210 /* We don't know if there is enough threads */
211 error = EAGAIN;
212 free(kte, M_KTFRWK);
213 goto out;
214 }
215 if (kfrwk.kfrwk_waiting < kt.num_threads) {
216 error = E2BIG;
217 free(kte, M_KTFRWK);
218 goto out;
219 }
220 kt.tot_threads_running = kt.num_threads;
221 /* Ok it looks like we can do it, lets get an entry */
222 kte->kt_e = li;
223 memcpy(&kte->kt_data, &kt, sizeof(kt));
224 TAILQ_INSERT_TAIL(&kfrwk.kfrwk_testq, kte, next);
225 taskqueue_enqueue(kfrwk.kfrwk_tq, &kfrwk.kfrwk_que);
226 out:
227 KTFRWK_UNLOCK();
228 return (error);
229 }
230
231 int
kern_testframework_register(const char * name,kerntfunc func)232 kern_testframework_register(const char *name, kerntfunc func)
233 {
234 int error = 0;
235 struct kern_test_list *li, *te = NULL;
236 int len;
237
238 len = strlen(name);
239 if (len >= TEST_NAME_LEN) {
240 return (E2BIG);
241 }
242 te = malloc(sizeof(struct kern_test_list), M_KTFRWK, M_WAITOK);
243 KTFRWK_LOCK();
244 /* First does it already exist? */
245 TAILQ_FOREACH(li, &kfrwk.kfrwk_testlist, next) {
246 if (strcmp(li->name, name) == 0) {
247 error = EALREADY;
248 free(te, M_KTFRWK);
249 goto out;
250 }
251 }
252 /* Ok we can do it, lets add it to the list */
253 te->func = func;
254 strcpy(te->name, name);
255 TAILQ_INSERT_TAIL(&kfrwk.kfrwk_testlist, te, next);
256 out:
257 KTFRWK_UNLOCK();
258 return (error);
259 }
260
261 int
kern_testframework_deregister(const char * name)262 kern_testframework_deregister(const char *name)
263 {
264 struct kern_test_list *li, *te = NULL;
265 u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
266 int error = 0;
267
268 KTFRWK_LOCK();
269 /* First does it already exist? */
270 TAILQ_FOREACH(li, &kfrwk.kfrwk_testlist, next) {
271 if (strcmp(li->name, name) == 0) {
272 te = li;
273 break;
274 }
275 }
276 if (te == NULL) {
277 /* It is not registered so no problem */
278 goto out;
279 }
280 if (ncpus != kfrwk.kfrwk_waiting) {
281 /* We are busy executing something -- can't unload */
282 error = EBUSY;
283 goto out;
284 }
285 if (!TAILQ_EMPTY(&kfrwk.kfrwk_testq)) {
286 /* Something still to execute */
287 error = EBUSY;
288 goto out;
289 }
290 /* Ok we can remove the dude safely */
291 TAILQ_REMOVE(&kfrwk.kfrwk_testlist, te, next);
292 memset(te, 0, sizeof(struct kern_test_list));
293 free(te, M_KTFRWK);
294 out:
295 KTFRWK_UNLOCK();
296 return (error);
297 }
298
299 static int
kerntest_mod_init(module_t mod,int type,void * data)300 kerntest_mod_init(module_t mod, int type, void *data)
301 {
302 int err;
303
304 switch (type) {
305 case MOD_LOAD:
306 err = kerntest_frwk_init();
307 break;
308 case MOD_QUIESCE:
309 KTFRWK_LOCK();
310 if (TAILQ_EMPTY(&kfrwk.kfrwk_testlist)) {
311 err = 0;
312 } else {
313 err = EBUSY;
314 }
315 KTFRWK_UNLOCK();
316 break;
317 case MOD_UNLOAD:
318 err = kerntest_frwk_fini();
319 break;
320 default:
321 return (EOPNOTSUPP);
322 }
323 return (err);
324 }
325
326 static moduledata_t kern_test_framework = {
327 .name = "kernel_testfrwk",
328 .evhand = kerntest_mod_init,
329 .priv = 0
330 };
331
332 MODULE_VERSION(kern_testframework, 1);
333 DECLARE_MODULE(kern_testframework, kern_test_framework, SI_SUB_PSEUDO, SI_ORDER_ANY);
334