1 /*- 2 * Copyright (c) 2015 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/param.h> 28 #include <sys/systm.h> 29 #include <sys/bus.h> 30 #include <sys/callout.h> 31 #include <sys/kernel.h> 32 #include <sys/ktr.h> 33 #include <sys/lock.h> 34 #include <sys/malloc.h> 35 #include <sys/module.h> 36 #include <sys/mutex.h> 37 #include <sys/sdt.h> 38 #include <sys/smp.h> 39 #include <sys/sysctl.h> 40 #include <sys/taskqueue.h> 41 #include <sys/queue.h> 42 #include <tests/kern_testfrwk.h> 43 #ifdef SMP 44 #include <machine/cpu.h> 45 #endif 46 47 struct kern_test_list { 48 TAILQ_ENTRY(kern_test_list) next; 49 char name[TEST_NAME_LEN]; 50 kerntfunc func; 51 }; 52 53 TAILQ_HEAD(ktestlist, kern_test_list); 54 55 struct kern_test_entry { 56 TAILQ_ENTRY(kern_test_entry) next; 57 struct kern_test_list *kt_e; 58 struct kern_test kt_data; 59 }; 60 61 TAILQ_HEAD(ktestqueue, kern_test_entry); 62 63 MALLOC_DEFINE(M_KTFRWK, "kern_tfrwk", "Kernel Test Framework"); 64 struct kern_totfrwk { 65 struct taskqueue *kfrwk_tq; 66 struct task kfrwk_que; 67 struct ktestlist kfrwk_testlist; 68 struct ktestqueue kfrwk_testq; 69 struct mtx kfrwk_mtx; 70 int kfrwk_waiting; 71 }; 72 73 struct kern_totfrwk kfrwk; 74 static int ktest_frwk_inited = 0; 75 76 #define KTFRWK_MUTEX_INIT() mtx_init(&kfrwk.kfrwk_mtx, "kern_test_frwk", "tfrwk", MTX_DEF) 77 78 #define KTFRWK_DESTROY() mtx_destroy(&kfrwk.kfrwk_mtx) 79 80 #define KTFRWK_LOCK() mtx_lock(&kfrwk.kfrwk_mtx) 81 82 #define KTFRWK_UNLOCK() mtx_unlock(&kfrwk.kfrwk_mtx) 83 84 static void 85 kfrwk_task(void *context, int pending) 86 { 87 struct kern_totfrwk *tf; 88 struct kern_test_entry *wk; 89 int free_mem = 0; 90 struct kern_test kt_data; 91 kerntfunc ktf; 92 93 memset(&kt_data, 0, sizeof(kt_data)); 94 ktf = NULL; 95 tf = (struct kern_totfrwk *)context; 96 KTFRWK_LOCK(); 97 wk = TAILQ_FIRST(&tf->kfrwk_testq); 98 if (wk) { 99 wk->kt_data.tot_threads_running--; 100 tf->kfrwk_waiting--; 101 memcpy(&kt_data, &wk->kt_data, sizeof(kt_data)); 102 if (wk->kt_data.tot_threads_running == 0) { 103 TAILQ_REMOVE(&tf->kfrwk_testq, wk, next); 104 free_mem = 1; 105 } else { 106 /* Wake one of my colleages up to help too */ 107 taskqueue_enqueue(tf->kfrwk_tq, &tf->kfrwk_que); 108 } 109 if (wk->kt_e) { 110 ktf = wk->kt_e->func; 111 } 112 } 113 KTFRWK_UNLOCK(); 114 if (wk && free_mem) { 115 free(wk, M_KTFRWK); 116 } 117 /* Execute the test */ 118 if (ktf) { 119 (*ktf) (&kt_data); 120 } 121 /* We are done */ 122 atomic_add_int(&tf->kfrwk_waiting, 1); 123 } 124 125 static int 126 kerntest_frwk_init(void) 127 { 128 u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU; 129 130 KTFRWK_MUTEX_INIT(); 131 TAILQ_INIT(&kfrwk.kfrwk_testq); 132 TAILQ_INIT(&kfrwk.kfrwk_testlist); 133 /* Now lets start up a number of tasks to do the work */ 134 TASK_INIT(&kfrwk.kfrwk_que, 0, kfrwk_task, &kfrwk); 135 kfrwk.kfrwk_tq = taskqueue_create_fast("sbtls_task", M_NOWAIT, 136 taskqueue_thread_enqueue, &kfrwk.kfrwk_tq); 137 if (kfrwk.kfrwk_tq == NULL) { 138 printf("Can't start taskqueue for Kernel Test Framework\n"); 139 panic("Taskqueue init fails for kfrwk"); 140 } 141 taskqueue_start_threads(&kfrwk.kfrwk_tq, ncpus, PI_NET, "[kt_frwk task]"); 142 kfrwk.kfrwk_waiting = ncpus; 143 ktest_frwk_inited = 1; 144 return (0); 145 } 146 147 static int 148 kerntest_frwk_fini(void) 149 { 150 KTFRWK_LOCK(); 151 if (!TAILQ_EMPTY(&kfrwk.kfrwk_testlist)) { 152 /* Still modules registered */ 153 KTFRWK_UNLOCK(); 154 return (EBUSY); 155 } 156 ktest_frwk_inited = 0; 157 KTFRWK_UNLOCK(); 158 taskqueue_free(kfrwk.kfrwk_tq); 159 /* Ok lets destroy the mutex on the way outs */ 160 KTFRWK_DESTROY(); 161 return (0); 162 } 163 164 165 static int kerntest_execute(SYSCTL_HANDLER_ARGS); 166 167 SYSCTL_NODE(_kern, OID_AUTO, testfrwk, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 168 "Kernel Test Framework"); 169 SYSCTL_PROC(_kern_testfrwk, OID_AUTO, runtest, 170 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 171 0, 0, kerntest_execute, "IU", 172 "Execute a kernel test"); 173 174 int 175 kerntest_execute(SYSCTL_HANDLER_ARGS) 176 { 177 struct kern_test kt; 178 struct kern_test_list *li, *te = NULL; 179 struct kern_test_entry *kte = NULL; 180 int error = 0; 181 182 if (ktest_frwk_inited == 0) { 183 return (ENOENT); 184 } 185 /* Find the entry if possible */ 186 error = SYSCTL_IN(req, &kt, sizeof(struct kern_test)); 187 if (error) { 188 return (error); 189 } 190 if (kt.num_threads <= 0) { 191 return (EINVAL); 192 } 193 /* Grab some memory */ 194 kte = malloc(sizeof(struct kern_test_entry), M_KTFRWK, M_WAITOK); 195 if (kte == NULL) { 196 error = ENOMEM; 197 goto out; 198 } 199 KTFRWK_LOCK(); 200 TAILQ_FOREACH(li, &kfrwk.kfrwk_testlist, next) { 201 if (strcmp(li->name, kt.name) == 0) { 202 te = li; 203 break; 204 } 205 } 206 if (te == NULL) { 207 printf("Can't find the test %s\n", kt.name); 208 error = ENOENT; 209 free(kte, M_KTFRWK); 210 goto out; 211 } 212 /* Ok we have a test item to run, can we? */ 213 if (!TAILQ_EMPTY(&kfrwk.kfrwk_testq)) { 214 /* We don't know if there is enough threads */ 215 error = EAGAIN; 216 free(kte, M_KTFRWK); 217 goto out; 218 } 219 if (kfrwk.kfrwk_waiting < kt.num_threads) { 220 error = E2BIG; 221 free(kte, M_KTFRWK); 222 goto out; 223 } 224 kt.tot_threads_running = kt.num_threads; 225 /* Ok it looks like we can do it, lets get an entry */ 226 kte->kt_e = li; 227 memcpy(&kte->kt_data, &kt, sizeof(kt)); 228 TAILQ_INSERT_TAIL(&kfrwk.kfrwk_testq, kte, next); 229 taskqueue_enqueue(kfrwk.kfrwk_tq, &kfrwk.kfrwk_que); 230 out: 231 KTFRWK_UNLOCK(); 232 return (error); 233 } 234 235 int 236 kern_testframework_register(const char *name, kerntfunc func) 237 { 238 int error = 0; 239 struct kern_test_list *li, *te = NULL; 240 int len; 241 242 len = strlen(name); 243 if (len >= TEST_NAME_LEN) { 244 return (E2BIG); 245 } 246 te = malloc(sizeof(struct kern_test_list), M_KTFRWK, M_WAITOK); 247 if (te == NULL) { 248 error = ENOMEM; 249 goto out; 250 } 251 KTFRWK_LOCK(); 252 /* First does it already exist? */ 253 TAILQ_FOREACH(li, &kfrwk.kfrwk_testlist, next) { 254 if (strcmp(li->name, name) == 0) { 255 error = EALREADY; 256 free(te, M_KTFRWK); 257 goto out; 258 } 259 } 260 /* Ok we can do it, lets add it to the list */ 261 te->func = func; 262 strcpy(te->name, name); 263 TAILQ_INSERT_TAIL(&kfrwk.kfrwk_testlist, te, next); 264 out: 265 KTFRWK_UNLOCK(); 266 return (error); 267 } 268 269 int 270 kern_testframework_deregister(const char *name) 271 { 272 struct kern_test_list *li, *te = NULL; 273 u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU; 274 int error = 0; 275 276 KTFRWK_LOCK(); 277 /* First does it already exist? */ 278 TAILQ_FOREACH(li, &kfrwk.kfrwk_testlist, next) { 279 if (strcmp(li->name, name) == 0) { 280 te = li; 281 break; 282 } 283 } 284 if (te == NULL) { 285 /* It is not registered so no problem */ 286 goto out; 287 } 288 if (ncpus != kfrwk.kfrwk_waiting) { 289 /* We are busy executing something -- can't unload */ 290 error = EBUSY; 291 goto out; 292 } 293 if (!TAILQ_EMPTY(&kfrwk.kfrwk_testq)) { 294 /* Something still to execute */ 295 error = EBUSY; 296 goto out; 297 } 298 /* Ok we can remove the dude safely */ 299 TAILQ_REMOVE(&kfrwk.kfrwk_testlist, te, next); 300 memset(te, 0, sizeof(struct kern_test_list)); 301 free(te, M_KTFRWK); 302 out: 303 KTFRWK_UNLOCK(); 304 return (error); 305 } 306 307 static int 308 kerntest_mod_init(module_t mod, int type, void *data) 309 { 310 int err; 311 312 switch (type) { 313 case MOD_LOAD: 314 err = kerntest_frwk_init(); 315 break; 316 case MOD_QUIESCE: 317 KTFRWK_LOCK(); 318 if (TAILQ_EMPTY(&kfrwk.kfrwk_testlist)) { 319 err = 0; 320 } else { 321 err = EBUSY; 322 } 323 KTFRWK_UNLOCK(); 324 break; 325 case MOD_UNLOAD: 326 err = kerntest_frwk_fini(); 327 break; 328 default: 329 return (EOPNOTSUPP); 330 } 331 return (err); 332 } 333 334 static moduledata_t kern_test_framework = { 335 .name = "kernel_testfrwk", 336 .evhand = kerntest_mod_init, 337 .priv = 0 338 }; 339 340 MODULE_VERSION(kern_testframework, 1); 341 DECLARE_MODULE(kern_testframework, kern_test_framework, SI_SUB_PSEUDO, SI_ORDER_ANY); 342