1 /*- 2 * Copyright (c) 2012 Chelsio Communications, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #include "opt_inet.h" 30 #include "opt_inet6.h" 31 32 #include <sys/param.h> 33 #include <sys/eventhandler.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/bus.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <sys/rwlock.h> 41 #include <sys/socket.h> 42 #include <sys/sbuf.h> 43 #include <netinet/in.h> 44 45 #include "common/common.h" 46 #include "common/t4_msg.h" 47 #include "t4_l2t.h" 48 49 /* 50 * Module locking notes: There is a RW lock protecting the L2 table as a 51 * whole plus a spinlock per L2T entry. Entry lookups and allocations happen 52 * under the protection of the table lock, individual entry changes happen 53 * while holding that entry's spinlock. The table lock nests outside the 54 * entry locks. Allocations of new entries take the table lock as writers so 55 * no other lookups can happen while allocating new entries. Entry updates 56 * take the table lock as readers so multiple entries can be updated in 57 * parallel. An L2T entry can be dropped by decrementing its reference count 58 * and therefore can happen in parallel with entry allocation but no entry 59 * can change state or increment its ref count during allocation as both of 60 * these perform lookups. 61 * 62 * Note: We do not take refereces to ifnets in this module because both 63 * the TOE and the sockets already hold references to the interfaces and the 64 * lifetime of an L2T entry is fully contained in the lifetime of the TOE. 65 */ 66 67 /* 68 * Allocate a free L2T entry. Must be called with l2t_data.lock held. 69 */ 70 struct l2t_entry * 71 t4_alloc_l2e(struct l2t_data *d) 72 { 73 struct l2t_entry *end, *e, **p; 74 75 rw_assert(&d->lock, RA_WLOCKED); 76 77 if (!atomic_load_acq_int(&d->nfree)) 78 return (NULL); 79 80 /* there's definitely a free entry */ 81 for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e) 82 if (atomic_load_acq_int(&e->refcnt) == 0) 83 goto found; 84 85 for (e = d->l2tab; atomic_load_acq_int(&e->refcnt); ++e) 86 continue; 87 found: 88 d->rover = e + 1; 89 atomic_subtract_int(&d->nfree, 1); 90 91 /* 92 * The entry we found may be an inactive entry that is 93 * presently in the hash table. We need to remove it. 94 */ 95 if (e->state < L2T_STATE_SWITCHING) { 96 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) { 97 if (*p == e) { 98 *p = e->next; 99 e->next = NULL; 100 break; 101 } 102 } 103 } 104 105 e->state = L2T_STATE_UNUSED; 106 return (e); 107 } 108 109 /* 110 * Write an L2T entry. Must be called with the entry locked. 111 * The write may be synchronous or asynchronous. 112 */ 113 int 114 t4_write_l2e(struct adapter *sc, struct l2t_entry *e, int sync) 115 { 116 struct wrq_cookie cookie; 117 struct cpl_l2t_write_req *req; 118 int idx = e->idx + sc->vres.l2t.start; 119 120 mtx_assert(&e->lock, MA_OWNED); 121 122 req = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*req), 16), &cookie); 123 if (req == NULL) 124 return (ENOMEM); 125 126 INIT_TP_WR(req, 0); 127 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx | 128 V_SYNC_WR(sync) | V_TID_QID(sc->sge.fwq.abs_id))); 129 req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!sync)); 130 req->l2t_idx = htons(idx); 131 req->vlan = htons(e->vlan); 132 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); 133 134 commit_wrq_wr(&sc->sge.mgmtq, req, &cookie); 135 136 if (sync && e->state != L2T_STATE_SWITCHING) 137 e->state = L2T_STATE_SYNC_WRITE; 138 139 return (0); 140 } 141 142 /* 143 * Allocate an L2T entry for use by a switching rule. Such need to be 144 * explicitly freed and while busy they are not on any hash chain, so normal 145 * address resolution updates do not see them. 146 */ 147 struct l2t_entry * 148 t4_l2t_alloc_switching(struct l2t_data *d) 149 { 150 struct l2t_entry *e; 151 152 rw_wlock(&d->lock); 153 e = t4_alloc_l2e(d); 154 if (e) { 155 mtx_lock(&e->lock); /* avoid race with t4_l2t_free */ 156 e->state = L2T_STATE_SWITCHING; 157 atomic_store_rel_int(&e->refcnt, 1); 158 mtx_unlock(&e->lock); 159 } 160 rw_wunlock(&d->lock); 161 return e; 162 } 163 164 /* 165 * Sets/updates the contents of a switching L2T entry that has been allocated 166 * with an earlier call to @t4_l2t_alloc_switching. 167 */ 168 int 169 t4_l2t_set_switching(struct adapter *sc, struct l2t_entry *e, uint16_t vlan, 170 uint8_t port, uint8_t *eth_addr) 171 { 172 int rc; 173 174 e->vlan = vlan; 175 e->lport = port; 176 memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN); 177 mtx_lock(&e->lock); 178 rc = t4_write_l2e(sc, e, 0); 179 mtx_unlock(&e->lock); 180 return (rc); 181 } 182 183 int 184 t4_init_l2t(struct adapter *sc, int flags) 185 { 186 int i, l2t_size; 187 struct l2t_data *d; 188 189 l2t_size = sc->vres.l2t.size; 190 if (l2t_size < 2) /* At least 1 bucket for IP and 1 for IPv6 */ 191 return (EINVAL); 192 193 d = malloc(sizeof(*d) + l2t_size * sizeof (struct l2t_entry), M_CXGBE, 194 M_ZERO | flags); 195 if (!d) 196 return (ENOMEM); 197 198 d->l2t_size = l2t_size; 199 d->rover = d->l2tab; 200 atomic_store_rel_int(&d->nfree, l2t_size); 201 rw_init(&d->lock, "L2T"); 202 203 for (i = 0; i < l2t_size; i++) { 204 struct l2t_entry *e = &d->l2tab[i]; 205 206 e->idx = i; 207 e->state = L2T_STATE_UNUSED; 208 mtx_init(&e->lock, "L2T_E", NULL, MTX_DEF); 209 STAILQ_INIT(&e->wr_list); 210 atomic_store_rel_int(&e->refcnt, 0); 211 } 212 213 sc->l2t = d; 214 t4_register_cpl_handler(sc, CPL_L2T_WRITE_RPL, do_l2t_write_rpl); 215 216 return (0); 217 } 218 219 int 220 t4_free_l2t(struct l2t_data *d) 221 { 222 int i; 223 224 for (i = 0; i < d->l2t_size; i++) 225 mtx_destroy(&d->l2tab[i].lock); 226 rw_destroy(&d->lock); 227 free(d, M_CXGBE); 228 229 return (0); 230 } 231 232 int 233 do_l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, 234 struct mbuf *m) 235 { 236 const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1); 237 unsigned int tid = GET_TID(rpl); 238 unsigned int idx = tid % L2T_SIZE; 239 240 if (__predict_false(rpl->status != CPL_ERR_NONE)) { 241 log(LOG_ERR, 242 "Unexpected L2T_WRITE_RPL (%u) for entry at hw_idx %u\n", 243 rpl->status, idx); 244 return (EINVAL); 245 } 246 247 return (0); 248 } 249 250 #ifdef SBUF_DRAIN 251 static inline unsigned int 252 vlan_prio(const struct l2t_entry *e) 253 { 254 return e->vlan >> 13; 255 } 256 257 static char 258 l2e_state(const struct l2t_entry *e) 259 { 260 switch (e->state) { 261 case L2T_STATE_VALID: return 'V'; /* valid, fast-path entry */ 262 case L2T_STATE_STALE: return 'S'; /* needs revalidation, but usable */ 263 case L2T_STATE_SYNC_WRITE: return 'W'; 264 case L2T_STATE_RESOLVING: return STAILQ_EMPTY(&e->wr_list) ? 'R' : 'A'; 265 case L2T_STATE_SWITCHING: return 'X'; 266 default: return 'U'; 267 } 268 } 269 270 int 271 sysctl_l2t(SYSCTL_HANDLER_ARGS) 272 { 273 struct adapter *sc = arg1; 274 struct l2t_data *l2t = sc->l2t; 275 struct l2t_entry *e; 276 struct sbuf *sb; 277 int rc, i, header = 0; 278 char ip[INET6_ADDRSTRLEN]; 279 280 if (l2t == NULL) 281 return (ENXIO); 282 283 rc = sysctl_wire_old_buffer(req, 0); 284 if (rc != 0) 285 return (rc); 286 287 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 288 if (sb == NULL) 289 return (ENOMEM); 290 291 e = &l2t->l2tab[0]; 292 for (i = 0; i < l2t->l2t_size; i++, e++) { 293 mtx_lock(&e->lock); 294 if (e->state == L2T_STATE_UNUSED) 295 goto skip; 296 297 if (header == 0) { 298 sbuf_printf(sb, " Idx IP address " 299 "Ethernet address VLAN/P LP State Users Port"); 300 header = 1; 301 } 302 if (e->state == L2T_STATE_SWITCHING) 303 ip[0] = 0; 304 else { 305 inet_ntop(e->ipv6 ? AF_INET6 : AF_INET, &e->addr[0], 306 &ip[0], sizeof(ip)); 307 } 308 309 /* 310 * XXX: e->ifp may not be around. 311 * XXX: IPv6 addresses may not align properly in the output. 312 */ 313 sbuf_printf(sb, "\n%4u %-15s %02x:%02x:%02x:%02x:%02x:%02x %4d" 314 " %u %2u %c %5u %s", 315 e->idx, ip, e->dmac[0], e->dmac[1], e->dmac[2], 316 e->dmac[3], e->dmac[4], e->dmac[5], 317 e->vlan & 0xfff, vlan_prio(e), e->lport, 318 l2e_state(e), atomic_load_acq_int(&e->refcnt), 319 e->ifp->if_xname); 320 skip: 321 mtx_unlock(&e->lock); 322 } 323 324 rc = sbuf_finish(sb); 325 sbuf_delete(sb); 326 327 return (rc); 328 } 329 #endif 330