1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2010,2013 Lawrence Stewart <lstewart@freebsd.org> 5 * Copyright (c) 2010 The FreeBSD Foundation 6 * All rights reserved. 7 * 8 * This software was developed by Lawrence Stewart while studying at the Centre 9 * for Advanced Internet Architectures, Swinburne University of Technology, 10 * made possible in part by grants from the FreeBSD Foundation and Cisco 11 * University Research Program Fund at Community Foundation Silicon Valley. 12 * 13 * Portions of this software were developed at the Centre for Advanced 14 * Internet Architectures, Swinburne University of Technology, Melbourne, 15 * Australia by Lawrence Stewart under sponsorship from the FreeBSD Foundation. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 #include <sys/param.h> 41 #include <sys/kernel.h> 42 #include <sys/hhook.h> 43 #include <sys/khelp.h> 44 #include <sys/malloc.h> 45 #include <sys/module.h> 46 #include <sys/module_khelp.h> 47 #include <sys/osd.h> 48 #include <sys/queue.h> 49 #include <sys/refcount.h> 50 #include <sys/systm.h> 51 52 #include <net/vnet.h> 53 54 struct hhook { 55 hhook_func_t hhk_func; 56 struct helper *hhk_helper; 57 void *hhk_udata; 58 STAILQ_ENTRY(hhook) hhk_next; 59 }; 60 61 static MALLOC_DEFINE(M_HHOOK, "hhook", "Helper hooks are linked off hhook_head lists"); 62 63 LIST_HEAD(hhookheadhead, hhook_head); 64 struct hhookheadhead hhook_head_list; 65 VNET_DEFINE(struct hhookheadhead, hhook_vhead_list); 66 #define V_hhook_vhead_list VNET(hhook_vhead_list) 67 68 static struct mtx hhook_head_list_lock; 69 MTX_SYSINIT(hhookheadlistlock, &hhook_head_list_lock, "hhook_head list lock", 70 MTX_DEF); 71 72 /* Protected by hhook_head_list_lock. */ 73 static uint32_t n_hhookheads; 74 75 /* Private function prototypes. */ 76 static void hhook_head_destroy(struct hhook_head *hhh); 77 void khelp_new_hhook_registered(struct hhook_head *hhh, uint32_t flags); 78 79 #define HHHLIST_LOCK() mtx_lock(&hhook_head_list_lock) 80 #define HHHLIST_UNLOCK() mtx_unlock(&hhook_head_list_lock) 81 #define HHHLIST_LOCK_ASSERT() mtx_assert(&hhook_head_list_lock, MA_OWNED) 82 83 #define HHH_LOCK_INIT(hhh) rm_init(&(hhh)->hhh_lock, "hhook_head rm lock") 84 #define HHH_LOCK_DESTROY(hhh) rm_destroy(&(hhh)->hhh_lock) 85 #define HHH_WLOCK(hhh) rm_wlock(&(hhh)->hhh_lock) 86 #define HHH_WUNLOCK(hhh) rm_wunlock(&(hhh)->hhh_lock) 87 #define HHH_RLOCK(hhh, rmpt) rm_rlock(&(hhh)->hhh_lock, (rmpt)) 88 #define HHH_RUNLOCK(hhh, rmpt) rm_runlock(&(hhh)->hhh_lock, (rmpt)) 89 90 /* 91 * Run all helper hook functions for a given hook point. 92 */ 93 void 94 hhook_run_hooks(struct hhook_head *hhh, void *ctx_data, struct osd *hosd) 95 { 96 struct hhook *hhk; 97 void *hdata; 98 struct rm_priotracker rmpt; 99 100 KASSERT(hhh->hhh_refcount > 0, ("hhook_head %p refcount is 0", hhh)); 101 102 HHH_RLOCK(hhh, &rmpt); 103 STAILQ_FOREACH(hhk, &hhh->hhh_hooks, hhk_next) { 104 if (hhk->hhk_helper != NULL && 105 hhk->hhk_helper->h_flags & HELPER_NEEDS_OSD) { 106 hdata = osd_get(OSD_KHELP, hosd, hhk->hhk_helper->h_id); 107 if (hdata == NULL) 108 continue; 109 } else 110 hdata = NULL; 111 112 /* 113 * XXXLAS: We currently ignore the int returned by the hook, 114 * but will likely want to handle it in future to allow hhook to 115 * be used like pfil and effect changes at the hhook calling 116 * site e.g. we could define a new hook type of HHOOK_TYPE_PFIL 117 * and standardise what particular return values mean and set 118 * the context data to pass exactly the same information as pfil 119 * hooks currently receive, thus replicating pfil with hhook. 120 */ 121 hhk->hhk_func(hhh->hhh_type, hhh->hhh_id, hhk->hhk_udata, 122 ctx_data, hdata, hosd); 123 } 124 HHH_RUNLOCK(hhh, &rmpt); 125 } 126 127 /* 128 * Register a new helper hook function with a helper hook point. 129 */ 130 int 131 hhook_add_hook(struct hhook_head *hhh, struct hookinfo *hki, uint32_t flags) 132 { 133 struct hhook *hhk, *tmp; 134 int error; 135 136 error = 0; 137 138 if (hhh == NULL) 139 return (ENOENT); 140 141 hhk = malloc(sizeof(struct hhook), M_HHOOK, 142 M_ZERO | ((flags & HHOOK_WAITOK) ? M_WAITOK : M_NOWAIT)); 143 144 if (hhk == NULL) 145 return (ENOMEM); 146 147 hhk->hhk_helper = hki->hook_helper; 148 hhk->hhk_func = hki->hook_func; 149 hhk->hhk_udata = hki->hook_udata; 150 151 HHH_WLOCK(hhh); 152 STAILQ_FOREACH(tmp, &hhh->hhh_hooks, hhk_next) { 153 if (tmp->hhk_func == hki->hook_func && 154 tmp->hhk_udata == hki->hook_udata) { 155 /* The helper hook function is already registered. */ 156 error = EEXIST; 157 break; 158 } 159 } 160 161 if (!error) { 162 STAILQ_INSERT_TAIL(&hhh->hhh_hooks, hhk, hhk_next); 163 hhh->hhh_nhooks++; 164 } else 165 free(hhk, M_HHOOK); 166 167 HHH_WUNLOCK(hhh); 168 169 return (error); 170 } 171 172 /* 173 * Register a helper hook function with a helper hook point (including all 174 * virtual instances of the hook point if it is virtualised). 175 * 176 * The logic is unfortunately far more complex than for 177 * hhook_remove_hook_lookup() because hhook_add_hook() can call malloc() with 178 * M_WAITOK and thus we cannot call hhook_add_hook() with the 179 * hhook_head_list_lock held. 180 * 181 * The logic assembles an array of hhook_head structs that correspond to the 182 * helper hook point being hooked and bumps the refcount on each (all done with 183 * the hhook_head_list_lock held). The hhook_head_list_lock is then dropped, and 184 * hhook_add_hook() is called and the refcount dropped for each hhook_head 185 * struct in the array. 186 */ 187 int 188 hhook_add_hook_lookup(struct hookinfo *hki, uint32_t flags) 189 { 190 struct hhook_head **heads_to_hook, *hhh; 191 int error, i, n_heads_to_hook; 192 193 tryagain: 194 error = i = 0; 195 /* 196 * Accessing n_hhookheads without hhook_head_list_lock held opens up a 197 * race with hhook_head_register() which we are unlikely to lose, but 198 * nonetheless have to cope with - hence the complex goto logic. 199 */ 200 n_heads_to_hook = n_hhookheads; 201 heads_to_hook = malloc(n_heads_to_hook * sizeof(struct hhook_head *), 202 M_HHOOK, flags & HHOOK_WAITOK ? M_WAITOK : M_NOWAIT); 203 if (heads_to_hook == NULL) 204 return (ENOMEM); 205 206 HHHLIST_LOCK(); 207 LIST_FOREACH(hhh, &hhook_head_list, hhh_next) { 208 if (hhh->hhh_type == hki->hook_type && 209 hhh->hhh_id == hki->hook_id) { 210 if (i < n_heads_to_hook) { 211 heads_to_hook[i] = hhh; 212 refcount_acquire(&heads_to_hook[i]->hhh_refcount); 213 i++; 214 } else { 215 /* 216 * We raced with hhook_head_register() which 217 * inserted a hhook_head that we need to hook 218 * but did not malloc space for. Abort this run 219 * and try again. 220 */ 221 for (i--; i >= 0; i--) 222 refcount_release(&heads_to_hook[i]->hhh_refcount); 223 free(heads_to_hook, M_HHOOK); 224 HHHLIST_UNLOCK(); 225 goto tryagain; 226 } 227 } 228 } 229 HHHLIST_UNLOCK(); 230 231 for (i--; i >= 0; i--) { 232 if (!error) 233 error = hhook_add_hook(heads_to_hook[i], hki, flags); 234 refcount_release(&heads_to_hook[i]->hhh_refcount); 235 } 236 237 free(heads_to_hook, M_HHOOK); 238 239 return (error); 240 } 241 242 /* 243 * Remove a helper hook function from a helper hook point. 244 */ 245 int 246 hhook_remove_hook(struct hhook_head *hhh, struct hookinfo *hki) 247 { 248 struct hhook *tmp; 249 250 if (hhh == NULL) 251 return (ENOENT); 252 253 HHH_WLOCK(hhh); 254 STAILQ_FOREACH(tmp, &hhh->hhh_hooks, hhk_next) { 255 if (tmp->hhk_func == hki->hook_func && 256 tmp->hhk_udata == hki->hook_udata) { 257 STAILQ_REMOVE(&hhh->hhh_hooks, tmp, hhook, hhk_next); 258 free(tmp, M_HHOOK); 259 hhh->hhh_nhooks--; 260 break; 261 } 262 } 263 HHH_WUNLOCK(hhh); 264 265 return (0); 266 } 267 268 /* 269 * Remove a helper hook function from a helper hook point (including all 270 * virtual instances of the hook point if it is virtualised). 271 */ 272 int 273 hhook_remove_hook_lookup(struct hookinfo *hki) 274 { 275 struct hhook_head *hhh; 276 277 HHHLIST_LOCK(); 278 LIST_FOREACH(hhh, &hhook_head_list, hhh_next) { 279 if (hhh->hhh_type == hki->hook_type && 280 hhh->hhh_id == hki->hook_id) 281 hhook_remove_hook(hhh, hki); 282 } 283 HHHLIST_UNLOCK(); 284 285 return (0); 286 } 287 288 /* 289 * Register a new helper hook point. 290 */ 291 int 292 hhook_head_register(int32_t hhook_type, int32_t hhook_id, struct hhook_head **hhh, 293 uint32_t flags) 294 { 295 struct hhook_head *tmphhh; 296 297 tmphhh = hhook_head_get(hhook_type, hhook_id); 298 299 if (tmphhh != NULL) { 300 /* Hook point previously registered. */ 301 hhook_head_release(tmphhh); 302 return (EEXIST); 303 } 304 305 tmphhh = malloc(sizeof(struct hhook_head), M_HHOOK, 306 M_ZERO | ((flags & HHOOK_WAITOK) ? M_WAITOK : M_NOWAIT)); 307 308 if (tmphhh == NULL) 309 return (ENOMEM); 310 311 tmphhh->hhh_type = hhook_type; 312 tmphhh->hhh_id = hhook_id; 313 tmphhh->hhh_nhooks = 0; 314 STAILQ_INIT(&tmphhh->hhh_hooks); 315 HHH_LOCK_INIT(tmphhh); 316 refcount_init(&tmphhh->hhh_refcount, 1); 317 318 HHHLIST_LOCK(); 319 if (flags & HHOOK_HEADISINVNET) { 320 tmphhh->hhh_flags |= HHH_ISINVNET; 321 #ifdef VIMAGE 322 KASSERT(curvnet != NULL, ("curvnet is NULL")); 323 tmphhh->hhh_vid = (uintptr_t)curvnet; 324 LIST_INSERT_HEAD(&V_hhook_vhead_list, tmphhh, hhh_vnext); 325 #endif 326 } 327 LIST_INSERT_HEAD(&hhook_head_list, tmphhh, hhh_next); 328 n_hhookheads++; 329 HHHLIST_UNLOCK(); 330 331 khelp_new_hhook_registered(tmphhh, flags); 332 333 if (hhh != NULL) 334 *hhh = tmphhh; 335 else 336 refcount_release(&tmphhh->hhh_refcount); 337 338 return (0); 339 } 340 341 static void 342 hhook_head_destroy(struct hhook_head *hhh) 343 { 344 struct hhook *tmp, *tmp2; 345 346 HHHLIST_LOCK_ASSERT(); 347 KASSERT(n_hhookheads > 0, ("n_hhookheads should be > 0")); 348 349 LIST_REMOVE(hhh, hhh_next); 350 #ifdef VIMAGE 351 if (hhook_head_is_virtualised(hhh) == HHOOK_HEADISINVNET) 352 LIST_REMOVE(hhh, hhh_vnext); 353 #endif 354 HHH_WLOCK(hhh); 355 STAILQ_FOREACH_SAFE(tmp, &hhh->hhh_hooks, hhk_next, tmp2) 356 free(tmp, M_HHOOK); 357 HHH_WUNLOCK(hhh); 358 HHH_LOCK_DESTROY(hhh); 359 free(hhh, M_HHOOK); 360 n_hhookheads--; 361 } 362 363 /* 364 * Remove a helper hook point. 365 */ 366 int 367 hhook_head_deregister(struct hhook_head *hhh) 368 { 369 int error; 370 371 error = 0; 372 373 HHHLIST_LOCK(); 374 if (hhh == NULL) 375 error = ENOENT; 376 else if (hhh->hhh_refcount > 1) 377 error = EBUSY; 378 else 379 hhook_head_destroy(hhh); 380 HHHLIST_UNLOCK(); 381 382 return (error); 383 } 384 385 /* 386 * Remove a helper hook point via a hhook_head lookup. 387 */ 388 int 389 hhook_head_deregister_lookup(int32_t hhook_type, int32_t hhook_id) 390 { 391 struct hhook_head *hhh; 392 int error; 393 394 hhh = hhook_head_get(hhook_type, hhook_id); 395 error = hhook_head_deregister(hhh); 396 397 if (error == EBUSY) 398 hhook_head_release(hhh); 399 400 return (error); 401 } 402 403 /* 404 * Lookup and return the hhook_head struct associated with the specified type 405 * and id, or NULL if not found. If found, the hhook_head's refcount is bumped. 406 */ 407 struct hhook_head * 408 hhook_head_get(int32_t hhook_type, int32_t hhook_id) 409 { 410 struct hhook_head *hhh; 411 412 HHHLIST_LOCK(); 413 LIST_FOREACH(hhh, &hhook_head_list, hhh_next) { 414 if (hhh->hhh_type == hhook_type && hhh->hhh_id == hhook_id) { 415 #ifdef VIMAGE 416 if (hhook_head_is_virtualised(hhh) == 417 HHOOK_HEADISINVNET) { 418 KASSERT(curvnet != NULL, ("curvnet is NULL")); 419 if (hhh->hhh_vid != (uintptr_t)curvnet) 420 continue; 421 } 422 #endif 423 refcount_acquire(&hhh->hhh_refcount); 424 break; 425 } 426 } 427 HHHLIST_UNLOCK(); 428 429 return (hhh); 430 } 431 432 void 433 hhook_head_release(struct hhook_head *hhh) 434 { 435 436 refcount_release(&hhh->hhh_refcount); 437 } 438 439 /* 440 * Check the hhook_head private flags and return the appropriate public 441 * representation of the flag to the caller. The function is implemented in a 442 * way that allows us to cope with other subsystems becoming virtualised in the 443 * future. 444 */ 445 uint32_t 446 hhook_head_is_virtualised(struct hhook_head *hhh) 447 { 448 uint32_t ret; 449 450 ret = 0; 451 452 if (hhh != NULL) { 453 if (hhh->hhh_flags & HHH_ISINVNET) 454 ret = HHOOK_HEADISINVNET; 455 } 456 457 return (ret); 458 } 459 460 uint32_t 461 hhook_head_is_virtualised_lookup(int32_t hook_type, int32_t hook_id) 462 { 463 struct hhook_head *hhh; 464 uint32_t ret; 465 466 hhh = hhook_head_get(hook_type, hook_id); 467 468 if (hhh == NULL) 469 return (0); 470 471 ret = hhook_head_is_virtualised(hhh); 472 hhook_head_release(hhh); 473 474 return (ret); 475 } 476 477 /* 478 * Vnet created and being initialised. 479 */ 480 static void 481 hhook_vnet_init(const void *unused __unused) 482 { 483 484 LIST_INIT(&V_hhook_vhead_list); 485 } 486 487 /* 488 * Vnet being torn down and destroyed. 489 */ 490 static void 491 hhook_vnet_uninit(const void *unused __unused) 492 { 493 struct hhook_head *hhh, *tmphhh; 494 495 /* 496 * If subsystems which export helper hook points use the hhook KPI 497 * correctly, the loop below should have no work to do because the 498 * subsystem should have already called hhook_head_deregister(). 499 */ 500 HHHLIST_LOCK(); 501 LIST_FOREACH_SAFE(hhh, &V_hhook_vhead_list, hhh_vnext, tmphhh) { 502 printf("%s: hhook_head type=%d, id=%d cleanup required\n", 503 __func__, hhh->hhh_type, hhh->hhh_id); 504 hhook_head_destroy(hhh); 505 } 506 HHHLIST_UNLOCK(); 507 } 508 509 /* 510 * When a vnet is created and being initialised, init the V_hhook_vhead_list. 511 */ 512 VNET_SYSINIT(hhook_vnet_init, SI_SUB_INIT_IF, SI_ORDER_FIRST, 513 hhook_vnet_init, NULL); 514 515 /* 516 * The hhook KPI provides a mechanism for subsystems which export helper hook 517 * points to clean up on vnet tear down, but in case the KPI is misused, 518 * provide a function to clean up and free memory for a vnet being destroyed. 519 */ 520 VNET_SYSUNINIT(hhook_vnet_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST, 521 hhook_vnet_uninit, NULL); 522