1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #include "opt_inet.h" 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/module.h> 35 #include <sys/bus.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/rwlock.h> 39 #include <sys/socket.h> 40 #include <net/if.h> 41 #include <net/ethernet.h> 42 #include <net/if_vlan_var.h> 43 #include <net/if_dl.h> 44 #include <net/if_llatbl.h> 45 #include <net/route.h> 46 #include <netinet/in.h> 47 #include <netinet/in_var.h> 48 #include <netinet/if_ether.h> 49 50 #include "common/common.h" 51 #include "common/jhash.h" 52 #include "common/t4_msg.h" 53 #include "offload.h" 54 #include "t4_l2t.h" 55 56 /* identifies sync vs async L2T_WRITE_REQs */ 57 #define S_SYNC_WR 12 58 #define V_SYNC_WR(x) ((x) << S_SYNC_WR) 59 #define F_SYNC_WR V_SYNC_WR(1) 60 61 enum { 62 L2T_STATE_VALID, /* entry is up to date */ 63 L2T_STATE_STALE, /* entry may be used but needs revalidation */ 64 L2T_STATE_RESOLVING, /* entry needs address resolution */ 65 L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ 66 67 /* when state is one of the below the entry is not hashed */ 68 L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ 69 L2T_STATE_UNUSED /* entry not in use */ 70 }; 71 72 struct l2t_data { 73 struct rwlock lock; 74 volatile int nfree; /* number of free entries */ 75 struct l2t_entry *rover;/* starting point for next allocation */ 76 struct l2t_entry l2tab[L2T_SIZE]; 77 }; 78 79 /* 80 * Module locking notes: There is a RW lock protecting the L2 table as a 81 * whole plus a spinlock per L2T entry. Entry lookups and allocations happen 82 * under the protection of the table lock, individual entry changes happen 83 * while holding that entry's spinlock. The table lock nests outside the 84 * entry locks. Allocations of new entries take the table lock as writers so 85 * no other lookups can happen while allocating new entries. Entry updates 86 * take the table lock as readers so multiple entries can be updated in 87 * parallel. An L2T entry can be dropped by decrementing its reference count 88 * and therefore can happen in parallel with entry allocation but no entry 89 * can change state or increment its ref count during allocation as both of 90 * these perform lookups. 91 * 92 * Note: We do not take refereces to ifnets in this module because both 93 * the TOE and the sockets already hold references to the interfaces and the 94 * lifetime of an L2T entry is fully contained in the lifetime of the TOE. 95 */ 96 static inline unsigned int 97 vlan_prio(const struct l2t_entry *e) 98 { 99 return e->vlan >> 13; 100 } 101 102 static inline void 103 l2t_hold(struct l2t_data *d, struct l2t_entry *e) 104 { 105 if (atomic_fetchadd_int(&e->refcnt, 1) == 0) /* 0 -> 1 transition */ 106 atomic_add_int(&d->nfree, -1); 107 } 108 109 /* 110 * To avoid having to check address families we do not allow v4 and v6 111 * neighbors to be on the same hash chain. We keep v4 entries in the first 112 * half of available hash buckets and v6 in the second. 113 */ 114 enum { 115 L2T_SZ_HALF = L2T_SIZE / 2, 116 L2T_HASH_MASK = L2T_SZ_HALF - 1 117 }; 118 119 static inline unsigned int 120 arp_hash(const uint32_t *key, int ifindex) 121 { 122 return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK; 123 } 124 125 static inline unsigned int 126 ipv6_hash(const uint32_t *key, int ifindex) 127 { 128 uint32_t xor = key[0] ^ key[1] ^ key[2] ^ key[3]; 129 130 return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK); 131 } 132 133 static inline unsigned int 134 addr_hash(const uint32_t *addr, int addr_len, int ifindex) 135 { 136 return addr_len == 4 ? arp_hash(addr, ifindex) : 137 ipv6_hash(addr, ifindex); 138 } 139 140 /* 141 * Checks if an L2T entry is for the given IP/IPv6 address. It does not check 142 * whether the L2T entry and the address are of the same address family. 143 * Callers ensure an address is only checked against L2T entries of the same 144 * family, something made trivial by the separation of IP and IPv6 hash chains 145 * mentioned above. Returns 0 if there's a match, 146 */ 147 static inline int 148 addreq(const struct l2t_entry *e, const uint32_t *addr) 149 { 150 if (e->v6) 151 return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) | 152 (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]); 153 return e->addr[0] ^ addr[0]; 154 } 155 156 /* 157 * Write an L2T entry. Must be called with the entry locked (XXX: really?). 158 * The write may be synchronous or asynchronous. 159 */ 160 static int 161 write_l2e(struct adapter *sc, struct l2t_entry *e, int sync) 162 { 163 struct mbuf *m; 164 struct cpl_l2t_write_req *req; 165 166 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL) 167 return (ENOMEM); 168 169 req = mtod(m, struct cpl_l2t_write_req *); 170 m->m_pkthdr.len = m->m_len = sizeof(*req); 171 172 INIT_TP_WR(req, 0); 173 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx | 174 V_SYNC_WR(sync) | V_TID_QID(sc->sge.fwq.abs_id))); 175 req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!sync)); 176 req->l2t_idx = htons(e->idx); 177 req->vlan = htons(e->vlan); 178 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); 179 180 t4_mgmt_tx(sc, m); 181 182 if (sync && e->state != L2T_STATE_SWITCHING) 183 e->state = L2T_STATE_SYNC_WRITE; 184 185 return (0); 186 } 187 188 /* 189 * Add a packet to an L2T entry's queue of packets awaiting resolution. 190 * Must be called with the entry's lock held. 191 */ 192 static inline void 193 arpq_enqueue(struct l2t_entry *e, struct mbuf *m) 194 { 195 mtx_assert(&e->lock, MA_OWNED); 196 197 m->m_next = NULL; 198 if (e->arpq_head) 199 e->arpq_tail->m_next = m; 200 else 201 e->arpq_head = m; 202 e->arpq_tail = m; 203 } 204 205 /* 206 * Allocate a free L2T entry. Must be called with l2t_data.lock held. 207 */ 208 static struct l2t_entry * 209 alloc_l2e(struct l2t_data *d) 210 { 211 struct l2t_entry *end, *e, **p; 212 213 rw_assert(&d->lock, RA_WLOCKED); 214 215 if (!atomic_load_acq_int(&d->nfree)) 216 return (NULL); 217 218 /* there's definitely a free entry */ 219 for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e) 220 if (atomic_load_acq_int(&e->refcnt) == 0) 221 goto found; 222 223 for (e = d->l2tab; atomic_load_acq_int(&e->refcnt); ++e) ; 224 found: 225 d->rover = e + 1; 226 atomic_add_int(&d->nfree, -1); 227 228 /* 229 * The entry we found may be an inactive entry that is 230 * presently in the hash table. We need to remove it. 231 */ 232 if (e->state < L2T_STATE_SWITCHING) { 233 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) { 234 if (*p == e) { 235 *p = e->next; 236 e->next = NULL; 237 break; 238 } 239 } 240 } 241 242 e->state = L2T_STATE_UNUSED; 243 return e; 244 } 245 246 /* 247 * Called when an L2T entry has no more users. The entry is left in the hash 248 * table since it is likely to be reused but we also bump nfree to indicate 249 * that the entry can be reallocated for a different neighbor. We also drop 250 * the existing neighbor reference in case the neighbor is going away and is 251 * waiting on our reference. 252 * 253 * Because entries can be reallocated to other neighbors once their ref count 254 * drops to 0 we need to take the entry's lock to avoid races with a new 255 * incarnation. 256 */ 257 static void 258 t4_l2e_free(struct l2t_entry *e) 259 { 260 struct llentry *lle = NULL; 261 struct l2t_data *d; 262 263 mtx_lock(&e->lock); 264 if (atomic_load_acq_int(&e->refcnt) == 0) { /* hasn't been recycled */ 265 lle = e->lle; 266 e->lle = NULL; 267 /* 268 * Don't need to worry about the arpq, an L2T entry can't be 269 * released if any packets are waiting for resolution as we 270 * need to be able to communicate with the device to close a 271 * connection. 272 */ 273 } 274 mtx_unlock(&e->lock); 275 276 d = container_of(e, struct l2t_data, l2tab[e->idx]); 277 atomic_add_int(&d->nfree, 1); 278 279 if (lle) 280 LLE_FREE(lle); 281 } 282 283 void 284 t4_l2t_release(struct l2t_entry *e) 285 { 286 if (atomic_fetchadd_int(&e->refcnt, -1) == 1) 287 t4_l2e_free(e); 288 } 289 290 /* 291 * Allocate an L2T entry for use by a switching rule. Such need to be 292 * explicitly freed and while busy they are not on any hash chain, so normal 293 * address resolution updates do not see them. 294 */ 295 struct l2t_entry * 296 t4_l2t_alloc_switching(struct l2t_data *d) 297 { 298 struct l2t_entry *e; 299 300 rw_rlock(&d->lock); 301 e = alloc_l2e(d); 302 if (e) { 303 mtx_lock(&e->lock); /* avoid race with t4_l2t_free */ 304 e->state = L2T_STATE_SWITCHING; 305 atomic_store_rel_int(&e->refcnt, 1); 306 mtx_unlock(&e->lock); 307 } 308 rw_runlock(&d->lock); 309 return e; 310 } 311 312 /* 313 * Sets/updates the contents of a switching L2T entry that has been allocated 314 * with an earlier call to @t4_l2t_alloc_switching. 315 */ 316 int 317 t4_l2t_set_switching(struct adapter *sc, struct l2t_entry *e, uint16_t vlan, 318 uint8_t port, uint8_t *eth_addr) 319 { 320 e->vlan = vlan; 321 e->lport = port; 322 memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN); 323 return write_l2e(sc, e, 0); 324 } 325 326 struct l2t_data * 327 t4_init_l2t(int flags) 328 { 329 int i; 330 struct l2t_data *d; 331 332 d = malloc(sizeof(*d), M_CXGBE, M_ZERO | flags); 333 if (!d) 334 return (NULL); 335 336 d->rover = d->l2tab; 337 atomic_store_rel_int(&d->nfree, L2T_SIZE); 338 rw_init(&d->lock, "L2T"); 339 340 for (i = 0; i < L2T_SIZE; i++) { 341 d->l2tab[i].idx = i; 342 d->l2tab[i].state = L2T_STATE_UNUSED; 343 mtx_init(&d->l2tab[i].lock, "L2T_E", NULL, MTX_DEF); 344 atomic_store_rel_int(&d->l2tab[i].refcnt, 0); 345 } 346 347 return (d); 348 } 349 350 int 351 t4_free_l2t(struct l2t_data *d) 352 { 353 int i; 354 355 for (i = 0; i < L2T_SIZE; i++) 356 mtx_destroy(&d->l2tab[i].lock); 357 rw_destroy(&d->lock); 358 free(d, M_CXGBE); 359 360 return (0); 361 } 362