1 /*- 2 * Copyright (c) 2010,2013 Lawrence Stewart <lstewart@freebsd.org> 3 * Copyright (c) 2010 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * This software was developed by Lawrence Stewart while studying at the Centre 7 * for Advanced Internet Architectures, Swinburne University of Technology, 8 * made possible in part by grants from the FreeBSD Foundation and Cisco 9 * University Research Program Fund at Community Foundation Silicon Valley. 10 * 11 * Portions of this software were developed at the Centre for Advanced 12 * Internet Architectures, Swinburne University of Technology, Melbourne, 13 * Australia by Lawrence Stewart under sponsorship from the FreeBSD Foundation. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include <sys/param.h> 41 #include <sys/kernel.h> 42 #include <sys/hhook.h> 43 #include <sys/khelp.h> 44 #include <sys/malloc.h> 45 #include <sys/module.h> 46 #include <sys/module_khelp.h> 47 #include <sys/osd.h> 48 #include <sys/queue.h> 49 #include <sys/refcount.h> 50 #include <sys/systm.h> 51 52 #include <net/vnet.h> 53 54 struct hhook { 55 hhook_func_t hhk_func; 56 struct helper *hhk_helper; 57 void *hhk_udata; 58 STAILQ_ENTRY(hhook) hhk_next; 59 }; 60 61 static MALLOC_DEFINE(M_HHOOK, "hhook", "Helper hooks are linked off hhook_head lists"); 62 63 LIST_HEAD(hhookheadhead, hhook_head); 64 struct hhookheadhead hhook_head_list; 65 VNET_DEFINE(struct hhookheadhead, hhook_vhead_list); 66 #define V_hhook_vhead_list VNET(hhook_vhead_list) 67 68 static struct mtx hhook_head_list_lock; 69 MTX_SYSINIT(hhookheadlistlock, &hhook_head_list_lock, "hhook_head list lock", 70 MTX_DEF); 71 72 /* Protected by hhook_head_list_lock. */ 73 static uint32_t n_hhookheads; 74 75 /* Private function prototypes. */ 76 static void hhook_head_destroy(struct hhook_head *hhh); 77 78 #define HHHLIST_LOCK() mtx_lock(&hhook_head_list_lock) 79 #define HHHLIST_UNLOCK() mtx_unlock(&hhook_head_list_lock) 80 #define HHHLIST_LOCK_ASSERT() mtx_assert(&hhook_head_list_lock, MA_OWNED) 81 82 #define HHH_LOCK_INIT(hhh) rm_init(&(hhh)->hhh_lock, "hhook_head rm lock") 83 #define HHH_LOCK_DESTROY(hhh) rm_destroy(&(hhh)->hhh_lock) 84 #define HHH_WLOCK(hhh) rm_wlock(&(hhh)->hhh_lock) 85 #define HHH_WUNLOCK(hhh) rm_wunlock(&(hhh)->hhh_lock) 86 #define HHH_RLOCK(hhh, rmpt) rm_rlock(&(hhh)->hhh_lock, (rmpt)) 87 #define HHH_RUNLOCK(hhh, rmpt) rm_runlock(&(hhh)->hhh_lock, (rmpt)) 88 89 /* 90 * Run all helper hook functions for a given hook point. 91 */ 92 void 93 hhook_run_hooks(struct hhook_head *hhh, void *ctx_data, struct osd *hosd) 94 { 95 struct hhook *hhk; 96 void *hdata; 97 struct rm_priotracker rmpt; 98 99 KASSERT(hhh->hhh_refcount > 0, ("hhook_head %p refcount is 0", hhh)); 100 101 HHH_RLOCK(hhh, &rmpt); 102 STAILQ_FOREACH(hhk, &hhh->hhh_hooks, hhk_next) { 103 if (hhk->hhk_helper->h_flags & HELPER_NEEDS_OSD) { 104 hdata = osd_get(OSD_KHELP, hosd, hhk->hhk_helper->h_id); 105 if (hdata == NULL) 106 continue; 107 } else 108 hdata = NULL; 109 110 /* 111 * XXXLAS: We currently ignore the int returned by the hook, 112 * but will likely want to handle it in future to allow hhook to 113 * be used like pfil and effect changes at the hhook calling 114 * site e.g. we could define a new hook type of HHOOK_TYPE_PFIL 115 * and standardise what particular return values mean and set 116 * the context data to pass exactly the same information as pfil 117 * hooks currently receive, thus replicating pfil with hhook. 118 */ 119 hhk->hhk_func(hhh->hhh_type, hhh->hhh_id, hhk->hhk_udata, 120 ctx_data, hdata, hosd); 121 } 122 HHH_RUNLOCK(hhh, &rmpt); 123 } 124 125 /* 126 * Register a new helper hook function with a helper hook point. 127 */ 128 int 129 hhook_add_hook(struct hhook_head *hhh, struct hookinfo *hki, uint32_t flags) 130 { 131 struct hhook *hhk, *tmp; 132 int error; 133 134 error = 0; 135 136 if (hhh == NULL) 137 return (ENOENT); 138 139 hhk = malloc(sizeof(struct hhook), M_HHOOK, 140 M_ZERO | ((flags & HHOOK_WAITOK) ? M_WAITOK : M_NOWAIT)); 141 142 if (hhk == NULL) 143 return (ENOMEM); 144 145 hhk->hhk_helper = hki->hook_helper; 146 hhk->hhk_func = hki->hook_func; 147 hhk->hhk_udata = hki->hook_udata; 148 149 HHH_WLOCK(hhh); 150 STAILQ_FOREACH(tmp, &hhh->hhh_hooks, hhk_next) { 151 if (tmp->hhk_func == hki->hook_func && 152 tmp->hhk_udata == hki->hook_udata) { 153 /* The helper hook function is already registered. */ 154 error = EEXIST; 155 break; 156 } 157 } 158 159 if (!error) { 160 STAILQ_INSERT_TAIL(&hhh->hhh_hooks, hhk, hhk_next); 161 hhh->hhh_nhooks++; 162 } else 163 free(hhk, M_HHOOK); 164 165 HHH_WUNLOCK(hhh); 166 167 return (error); 168 } 169 170 /* 171 * Register a helper hook function with a helper hook point (including all 172 * virtual instances of the hook point if it is virtualised). 173 * 174 * The logic is unfortunately far more complex than for 175 * hhook_remove_hook_lookup() because hhook_add_hook() can call malloc() with 176 * M_WAITOK and thus we cannot call hhook_add_hook() with the 177 * hhook_head_list_lock held. 178 * 179 * The logic assembles an array of hhook_head structs that correspond to the 180 * helper hook point being hooked and bumps the refcount on each (all done with 181 * the hhook_head_list_lock held). The hhook_head_list_lock is then dropped, and 182 * hhook_add_hook() is called and the refcount dropped for each hhook_head 183 * struct in the array. 184 */ 185 int 186 hhook_add_hook_lookup(struct hookinfo *hki, uint32_t flags) 187 { 188 struct hhook_head **heads_to_hook, *hhh; 189 int error, i, n_heads_to_hook; 190 191 tryagain: 192 error = i = 0; 193 /* 194 * Accessing n_hhookheads without hhook_head_list_lock held opens up a 195 * race with hhook_head_register() which we are unlikely to lose, but 196 * nonetheless have to cope with - hence the complex goto logic. 197 */ 198 n_heads_to_hook = n_hhookheads; 199 heads_to_hook = malloc(n_heads_to_hook * sizeof(struct hhook_head *), 200 M_HHOOK, flags & HHOOK_WAITOK ? M_WAITOK : M_NOWAIT); 201 if (heads_to_hook == NULL) 202 return (ENOMEM); 203 204 HHHLIST_LOCK(); 205 LIST_FOREACH(hhh, &hhook_head_list, hhh_next) { 206 if (hhh->hhh_type == hki->hook_type && 207 hhh->hhh_id == hki->hook_id) { 208 if (i < n_heads_to_hook) { 209 heads_to_hook[i] = hhh; 210 refcount_acquire(&heads_to_hook[i]->hhh_refcount); 211 i++; 212 } else { 213 /* 214 * We raced with hhook_head_register() which 215 * inserted a hhook_head that we need to hook 216 * but did not malloc space for. Abort this run 217 * and try again. 218 */ 219 for (i--; i >= 0; i--) 220 refcount_release(&heads_to_hook[i]->hhh_refcount); 221 free(heads_to_hook, M_HHOOK); 222 HHHLIST_UNLOCK(); 223 goto tryagain; 224 } 225 } 226 } 227 HHHLIST_UNLOCK(); 228 229 for (i--; i >= 0; i--) { 230 if (!error) 231 error = hhook_add_hook(heads_to_hook[i], hki, flags); 232 refcount_release(&heads_to_hook[i]->hhh_refcount); 233 } 234 235 free(heads_to_hook, M_HHOOK); 236 237 return (error); 238 } 239 240 /* 241 * Remove a helper hook function from a helper hook point. 242 */ 243 int 244 hhook_remove_hook(struct hhook_head *hhh, struct hookinfo *hki) 245 { 246 struct hhook *tmp; 247 248 if (hhh == NULL) 249 return (ENOENT); 250 251 HHH_WLOCK(hhh); 252 STAILQ_FOREACH(tmp, &hhh->hhh_hooks, hhk_next) { 253 if (tmp->hhk_func == hki->hook_func && 254 tmp->hhk_udata == hki->hook_udata) { 255 STAILQ_REMOVE(&hhh->hhh_hooks, tmp, hhook, hhk_next); 256 free(tmp, M_HHOOK); 257 hhh->hhh_nhooks--; 258 break; 259 } 260 } 261 HHH_WUNLOCK(hhh); 262 263 return (0); 264 } 265 266 /* 267 * Remove a helper hook function from a helper hook point (including all 268 * virtual instances of the hook point if it is virtualised). 269 */ 270 int 271 hhook_remove_hook_lookup(struct hookinfo *hki) 272 { 273 struct hhook_head *hhh; 274 275 HHHLIST_LOCK(); 276 LIST_FOREACH(hhh, &hhook_head_list, hhh_next) { 277 if (hhh->hhh_type == hki->hook_type && 278 hhh->hhh_id == hki->hook_id) 279 hhook_remove_hook(hhh, hki); 280 } 281 HHHLIST_UNLOCK(); 282 283 return (0); 284 } 285 286 /* 287 * Register a new helper hook point. 288 */ 289 int 290 hhook_head_register(int32_t hhook_type, int32_t hhook_id, struct hhook_head **hhh, 291 uint32_t flags) 292 { 293 struct hhook_head *tmphhh; 294 295 tmphhh = hhook_head_get(hhook_type, hhook_id); 296 297 if (tmphhh != NULL) { 298 /* Hook point previously registered. */ 299 hhook_head_release(tmphhh); 300 return (EEXIST); 301 } 302 303 tmphhh = malloc(sizeof(struct hhook_head), M_HHOOK, 304 M_ZERO | ((flags & HHOOK_WAITOK) ? M_WAITOK : M_NOWAIT)); 305 306 if (tmphhh == NULL) 307 return (ENOMEM); 308 309 tmphhh->hhh_type = hhook_type; 310 tmphhh->hhh_id = hhook_id; 311 tmphhh->hhh_nhooks = 0; 312 STAILQ_INIT(&tmphhh->hhh_hooks); 313 HHH_LOCK_INIT(tmphhh); 314 315 if (hhh != NULL) { 316 refcount_init(&tmphhh->hhh_refcount, 1); 317 *hhh = tmphhh; 318 } else 319 refcount_init(&tmphhh->hhh_refcount, 0); 320 321 HHHLIST_LOCK(); 322 if (flags & HHOOK_HEADISINVNET) { 323 tmphhh->hhh_flags |= HHH_ISINVNET; 324 #ifdef VIMAGE 325 KASSERT(curvnet != NULL, ("curvnet is NULL")); 326 tmphhh->hhh_vid = (uintptr_t)curvnet; 327 LIST_INSERT_HEAD(&V_hhook_vhead_list, tmphhh, hhh_vnext); 328 #endif 329 } 330 LIST_INSERT_HEAD(&hhook_head_list, tmphhh, hhh_next); 331 n_hhookheads++; 332 HHHLIST_UNLOCK(); 333 334 return (0); 335 } 336 337 static void 338 hhook_head_destroy(struct hhook_head *hhh) 339 { 340 struct hhook *tmp, *tmp2; 341 342 HHHLIST_LOCK_ASSERT(); 343 KASSERT(n_hhookheads > 0, ("n_hhookheads should be > 0")); 344 345 LIST_REMOVE(hhh, hhh_next); 346 #ifdef VIMAGE 347 if (hhook_head_is_virtualised(hhh) == HHOOK_HEADISINVNET) 348 LIST_REMOVE(hhh, hhh_vnext); 349 #endif 350 HHH_WLOCK(hhh); 351 STAILQ_FOREACH_SAFE(tmp, &hhh->hhh_hooks, hhk_next, tmp2) 352 free(tmp, M_HHOOK); 353 HHH_WUNLOCK(hhh); 354 HHH_LOCK_DESTROY(hhh); 355 free(hhh, M_HHOOK); 356 n_hhookheads--; 357 } 358 359 /* 360 * Remove a helper hook point. 361 */ 362 int 363 hhook_head_deregister(struct hhook_head *hhh) 364 { 365 int error; 366 367 error = 0; 368 369 HHHLIST_LOCK(); 370 if (hhh == NULL) 371 error = ENOENT; 372 else if (hhh->hhh_refcount > 1) 373 error = EBUSY; 374 else 375 hhook_head_destroy(hhh); 376 HHHLIST_UNLOCK(); 377 378 return (error); 379 } 380 381 /* 382 * Remove a helper hook point via a hhook_head lookup. 383 */ 384 int 385 hhook_head_deregister_lookup(int32_t hhook_type, int32_t hhook_id) 386 { 387 struct hhook_head *hhh; 388 int error; 389 390 hhh = hhook_head_get(hhook_type, hhook_id); 391 error = hhook_head_deregister(hhh); 392 393 if (error == EBUSY) 394 hhook_head_release(hhh); 395 396 return (error); 397 } 398 399 /* 400 * Lookup and return the hhook_head struct associated with the specified type 401 * and id, or NULL if not found. If found, the hhook_head's refcount is bumped. 402 */ 403 struct hhook_head * 404 hhook_head_get(int32_t hhook_type, int32_t hhook_id) 405 { 406 struct hhook_head *hhh; 407 408 HHHLIST_LOCK(); 409 LIST_FOREACH(hhh, &hhook_head_list, hhh_next) { 410 if (hhh->hhh_type == hhook_type && hhh->hhh_id == hhook_id) { 411 #ifdef VIMAGE 412 if (hhook_head_is_virtualised(hhh) == 413 HHOOK_HEADISINVNET) { 414 KASSERT(curvnet != NULL, ("curvnet is NULL")); 415 if (hhh->hhh_vid != (uintptr_t)curvnet) 416 continue; 417 } 418 #endif 419 refcount_acquire(&hhh->hhh_refcount); 420 break; 421 } 422 } 423 HHHLIST_UNLOCK(); 424 425 return (hhh); 426 } 427 428 void 429 hhook_head_release(struct hhook_head *hhh) 430 { 431 432 refcount_release(&hhh->hhh_refcount); 433 } 434 435 /* 436 * Check the hhook_head private flags and return the appropriate public 437 * representation of the flag to the caller. The function is implemented in a 438 * way that allows us to cope with other subsystems becoming virtualised in the 439 * future. 440 */ 441 uint32_t 442 hhook_head_is_virtualised(struct hhook_head *hhh) 443 { 444 uint32_t ret; 445 446 ret = 0; 447 448 if (hhh != NULL) { 449 if (hhh->hhh_flags & HHH_ISINVNET) 450 ret = HHOOK_HEADISINVNET; 451 } 452 453 return (ret); 454 } 455 456 uint32_t 457 hhook_head_is_virtualised_lookup(int32_t hook_type, int32_t hook_id) 458 { 459 struct hhook_head *hhh; 460 uint32_t ret; 461 462 hhh = hhook_head_get(hook_type, hook_id); 463 464 if (hhh == NULL) 465 return (0); 466 467 ret = hhook_head_is_virtualised(hhh); 468 hhook_head_release(hhh); 469 470 return (ret); 471 } 472 473 /* 474 * Vnet created and being initialised. 475 */ 476 static void 477 hhook_vnet_init(const void *unused __unused) 478 { 479 480 LIST_INIT(&V_hhook_vhead_list); 481 } 482 483 /* 484 * Vnet being torn down and destroyed. 485 */ 486 static void 487 hhook_vnet_uninit(const void *unused __unused) 488 { 489 struct hhook_head *hhh, *tmphhh; 490 491 /* 492 * If subsystems which export helper hook points use the hhook KPI 493 * correctly, the loop below should have no work to do because the 494 * subsystem should have already called hhook_head_deregister(). 495 */ 496 HHHLIST_LOCK(); 497 LIST_FOREACH_SAFE(hhh, &V_hhook_vhead_list, hhh_vnext, tmphhh) { 498 printf("%s: hhook_head type=%d, id=%d cleanup required\n", 499 __func__, hhh->hhh_type, hhh->hhh_id); 500 hhook_head_destroy(hhh); 501 } 502 HHHLIST_UNLOCK(); 503 } 504 505 506 /* 507 * When a vnet is created and being initialised, init the V_hhook_vhead_list. 508 */ 509 VNET_SYSINIT(hhook_vnet_init, SI_SUB_PROTO_BEGIN, SI_ORDER_FIRST, 510 hhook_vnet_init, NULL); 511 512 /* 513 * The hhook KPI provides a mechanism for subsystems which export helper hook 514 * points to clean up on vnet tear down, but in case the KPI is misused, 515 * provide a function to clean up and free memory for a vnet being destroyed. 516 */ 517 VNET_SYSUNINIT(hhook_vnet_uninit, SI_SUB_PROTO_BEGIN, SI_ORDER_FIRST, 518 hhook_vnet_uninit, NULL); 519