1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2020 Alexander V. Chernikov 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include "opt_inet.h" 29 #include "opt_route.h" 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/lock.h> 34 #include <sys/rmlock.h> 35 #include <sys/rwlock.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/refcount.h> 39 #include <sys/socket.h> 40 #include <sys/sysctl.h> 41 #include <sys/kernel.h> 42 43 #include <net/if.h> 44 #include <net/if_var.h> 45 #include <net/if_dl.h> 46 #include <net/route.h> 47 #include <net/route/route_ctl.h> 48 #include <net/route/route_var.h> 49 #include <net/vnet.h> 50 51 #include <netinet/in.h> 52 #include <netinet/in_var.h> 53 #include <netinet/in_fib.h> 54 55 #include <net/route/nhop_utils.h> 56 #include <net/route/nhop.h> 57 #include <net/route/nhop_var.h> 58 #include <net/route/nhgrp_var.h> 59 60 #define DEBUG_MOD_NAME nhgrp 61 #define DEBUG_MAX_LEVEL LOG_DEBUG 62 #include <net/route/route_debug.h> 63 _DECLARE_DEBUG(LOG_INFO); 64 65 /* 66 * This file contains data structures management logic for the nexthop 67 * groups ("nhgrp") route subsystem. 68 * 69 * Nexthop groups are used to store multiple routes available for the specific 70 * prefix. Nexthop groups are immutable and can be shared across multiple 71 * prefixes. 72 * 73 * Each group consists of a control plane part and a dataplane part. 74 * Control plane is basically a collection of nexthop objects with 75 * weights and refcount. 76 * 77 * Datapath consists of a array of nexthop pointers, compiled from control 78 * plane data to support O(1) nexthop selection. 79 * 80 * For example, consider the following group: 81 * [(nh1, weight=100), (nh2, weight=200)] 82 * It will compile to the following array: 83 * [nh1, nh2, nh2] 84 * 85 */ 86 87 static void consider_resize(struct nh_control *ctl, uint32_t new_gr_buckets, 88 uint32_t new_idx_items); 89 90 static int cmp_nhgrp(const struct nhgrp_priv *a, const struct nhgrp_priv *b); 91 static unsigned int hash_nhgrp(const struct nhgrp_priv *obj); 92 93 static unsigned 94 djb_hash(const unsigned char *h, const int len) 95 { 96 unsigned int result = 0; 97 int i; 98 99 for (i = 0; i < len; i++) 100 result = 33 * result ^ h[i]; 101 102 return (result); 103 } 104 105 static int 106 cmp_nhgrp(const struct nhgrp_priv *a, const struct nhgrp_priv *b) 107 { 108 109 /* 110 * In case of consistent hashing, there can be multiple nexthop groups 111 * with the same "control plane" list of nexthops with weights and a 112 * different set of "data plane" nexthops. 113 * For now, ignore the data plane and focus on the control plane list. 114 */ 115 if (a->nhg_nh_count != b->nhg_nh_count || a->nhg_uidx != b->nhg_uidx) 116 return (0); 117 return !memcmp(a->nhg_nh_weights, b->nhg_nh_weights, 118 sizeof(struct weightened_nhop) * a->nhg_nh_count); 119 } 120 121 /* 122 * Hash callback: calculate hash of an object 123 */ 124 static unsigned int 125 hash_nhgrp(const struct nhgrp_priv *obj) 126 { 127 const unsigned char *key; 128 129 key = (const unsigned char *)obj->nhg_nh_weights; 130 131 return (djb_hash(key, sizeof(struct weightened_nhop) * obj->nhg_nh_count)); 132 } 133 134 /* 135 * Returns object referenced and unlocked 136 */ 137 struct nhgrp_priv * 138 find_nhgrp(struct nh_control *ctl, const struct nhgrp_priv *key) 139 { 140 struct nhgrp_priv *priv_ret; 141 142 NHOPS_RLOCK(ctl); 143 CHT_SLIST_FIND_BYOBJ(&ctl->gr_head, mpath, key, priv_ret); 144 if (priv_ret != NULL) { 145 if (refcount_acquire_if_not_zero(&priv_ret->nhg_refcount) == 0) { 146 /* refcount is 0 -> group is being deleted */ 147 priv_ret = NULL; 148 } 149 } 150 NHOPS_RUNLOCK(ctl); 151 152 return (priv_ret); 153 } 154 155 int 156 link_nhgrp(struct nh_control *ctl, struct nhgrp_priv *grp_priv) 157 { 158 uint16_t idx; 159 uint32_t new_num_buckets, new_num_items; 160 161 NHOPS_WLOCK(ctl); 162 /* Check if we need to resize hash and index */ 163 new_num_buckets = CHT_SLIST_GET_RESIZE_BUCKETS(&ctl->gr_head); 164 new_num_items = bitmask_get_resize_items(&ctl->nh_idx_head); 165 166 if (bitmask_alloc_idx(&ctl->nh_idx_head, &idx) != 0) { 167 NHOPS_WUNLOCK(ctl); 168 FIB_RH_LOG(LOG_DEBUG, ctl->ctl_rh, "Unable to allocate nhg index"); 169 consider_resize(ctl, new_num_buckets, new_num_items); 170 return (0); 171 } 172 173 grp_priv->nhg_idx = idx; 174 grp_priv->nh_control = ctl; 175 CHT_SLIST_INSERT_HEAD(&ctl->gr_head, mpath, grp_priv); 176 177 NHOPS_WUNLOCK(ctl); 178 179 IF_DEBUG_LEVEL(LOG_DEBUG2) { 180 char nhgrp_buf[NHOP_PRINT_BUFSIZE] __unused; 181 FIB_RH_LOG(LOG_DEBUG2, ctl->ctl_rh, "linked %s", 182 nhgrp_print_buf(grp_priv->nhg, nhgrp_buf, sizeof(nhgrp_buf))); 183 } 184 consider_resize(ctl, new_num_buckets, new_num_items); 185 186 return (1); 187 } 188 189 struct nhgrp_priv * 190 unlink_nhgrp(struct nh_control *ctl, struct nhgrp_priv *key) 191 { 192 struct nhgrp_priv *nhg_priv_ret; 193 int idx; 194 195 NHOPS_WLOCK(ctl); 196 197 CHT_SLIST_REMOVE(&ctl->gr_head, mpath, key, nhg_priv_ret); 198 199 if (nhg_priv_ret == NULL) { 200 FIB_RH_LOG(LOG_DEBUG, ctl->ctl_rh, "Unable to find nhg"); 201 NHOPS_WUNLOCK(ctl); 202 return (NULL); 203 } 204 205 idx = nhg_priv_ret->nhg_idx; 206 bitmask_free_idx(&ctl->nh_idx_head, idx); 207 nhg_priv_ret->nhg_idx = 0; 208 nhg_priv_ret->nh_control = NULL; 209 210 NHOPS_WUNLOCK(ctl); 211 212 IF_DEBUG_LEVEL(LOG_DEBUG2) { 213 char nhgrp_buf[NHOP_PRINT_BUFSIZE]; 214 nhgrp_print_buf(nhg_priv_ret->nhg, nhgrp_buf, sizeof(nhgrp_buf)); 215 FIB_RH_LOG(LOG_DEBUG2, ctl->ctl_rh, "unlinked idx#%d %s", idx, 216 nhgrp_buf); 217 } 218 219 return (nhg_priv_ret); 220 } 221 222 /* 223 * Checks if hash needs resizing and performs this resize if necessary 224 * 225 */ 226 static void 227 consider_resize(struct nh_control *ctl, uint32_t new_gr_bucket, uint32_t new_idx_items) 228 { 229 void *gr_ptr, *gr_idx_ptr; 230 void *old_idx_ptr; 231 size_t alloc_size; 232 233 gr_ptr = NULL ; 234 if (new_gr_bucket != 0) { 235 alloc_size = CHT_SLIST_GET_RESIZE_SIZE(new_gr_bucket); 236 gr_ptr = malloc(alloc_size, M_NHOP, M_NOWAIT | M_ZERO); 237 } 238 239 gr_idx_ptr = NULL; 240 if (new_idx_items != 0) { 241 alloc_size = bitmask_get_size(new_idx_items); 242 gr_idx_ptr = malloc(alloc_size, M_NHOP, M_NOWAIT | M_ZERO); 243 } 244 245 if (gr_ptr == NULL && gr_idx_ptr == NULL) { 246 /* Either resize is not required or allocations have failed. */ 247 return; 248 } 249 250 FIB_RH_LOG(LOG_DEBUG, ctl->ctl_rh, 251 "going to resize nhg hash: [ptr:%p sz:%u] idx:[ptr:%p sz:%u]", 252 gr_ptr, new_gr_bucket, gr_idx_ptr, new_idx_items); 253 254 old_idx_ptr = NULL; 255 256 NHOPS_WLOCK(ctl); 257 if (gr_ptr != NULL) { 258 CHT_SLIST_RESIZE(&ctl->gr_head, mpath, gr_ptr, new_gr_bucket); 259 } 260 if (gr_idx_ptr != NULL) { 261 if (bitmask_copy(&ctl->nh_idx_head, gr_idx_ptr, new_idx_items) == 0) 262 bitmask_swap(&ctl->nh_idx_head, gr_idx_ptr, new_idx_items, &old_idx_ptr); 263 } 264 NHOPS_WUNLOCK(ctl); 265 266 if (gr_ptr != NULL) 267 free(gr_ptr, M_NHOP); 268 if (old_idx_ptr != NULL) 269 free(old_idx_ptr, M_NHOP); 270 } 271 272 /* 273 * Function allocating the necessary group data structures. 274 */ 275 bool 276 nhgrp_ctl_alloc_default(struct nh_control *ctl, int malloc_flags) 277 { 278 size_t alloc_size; 279 uint32_t num_buckets; 280 void *cht_ptr; 281 282 malloc_flags = (malloc_flags & (M_NOWAIT | M_WAITOK)) | M_ZERO; 283 284 num_buckets = 8; 285 alloc_size = CHT_SLIST_GET_RESIZE_SIZE(num_buckets); 286 cht_ptr = malloc(alloc_size, M_NHOP, malloc_flags); 287 288 if (cht_ptr == NULL) { 289 FIB_RH_LOG(LOG_WARNING, ctl->ctl_rh, "multipath init failed"); 290 return (false); 291 } 292 293 NHOPS_WLOCK(ctl); 294 295 if (ctl->gr_head.hash_size == 0) { 296 /* Init hash and bitmask */ 297 CHT_SLIST_INIT(&ctl->gr_head, cht_ptr, num_buckets); 298 NHOPS_WUNLOCK(ctl); 299 } else { 300 /* Other thread has already initiliazed hash/bitmask */ 301 NHOPS_WUNLOCK(ctl); 302 free(cht_ptr, M_NHOP); 303 } 304 305 FIB_RH_LOG(LOG_DEBUG, ctl->ctl_rh, "multipath init done"); 306 307 return (true); 308 } 309 310 int 311 nhgrp_ctl_init(struct nh_control *ctl) 312 { 313 314 /* 315 * By default, do not allocate datastructures as multipath 316 * routes will not be necessarily used. 317 */ 318 CHT_SLIST_INIT(&ctl->gr_head, NULL, 0); 319 return (0); 320 } 321 322 void 323 nhgrp_ctl_free(struct nh_control *ctl) 324 { 325 if (ctl->gr_head.ptr != NULL) 326 free(ctl->gr_head.ptr, M_NHOP); 327 } 328 329 void 330 nhgrp_ctl_unlink_all(struct nh_control *ctl) 331 { 332 struct nhgrp_priv *nhg_priv; 333 334 NHOPS_WLOCK_ASSERT(ctl); 335 336 CHT_SLIST_FOREACH(&ctl->gr_head, mpath, nhg_priv) { 337 IF_DEBUG_LEVEL(LOG_DEBUG2) { 338 char nhgbuf[NHOP_PRINT_BUFSIZE] __unused; 339 FIB_RH_LOG(LOG_DEBUG2, ctl->ctl_rh, "marking %s unlinked", 340 nhgrp_print_buf(nhg_priv->nhg, nhgbuf, sizeof(nhgbuf))); 341 } 342 refcount_release(&nhg_priv->nhg_linked); 343 } CHT_SLIST_FOREACH_END; 344 } 345 346