1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2012 Chelsio Communications, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28 #include <sys/cdefs.h>
29 #include "opt_inet.h"
30 #include "opt_inet6.h"
31
32 #include <sys/param.h>
33 #include <sys/eventhandler.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/bus.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/rwlock.h>
41 #include <sys/socket.h>
42 #include <sys/sbuf.h>
43 #include <netinet/in.h>
44
45 #include "common/common.h"
46 #include "common/t4_msg.h"
47 #include "t4_l2t.h"
48
49 /*
50 * Module locking notes: There is a RW lock protecting the L2 table as a
51 * whole plus a spinlock per L2T entry. Entry lookups and allocations happen
52 * under the protection of the table lock, individual entry changes happen
53 * while holding that entry's spinlock. The table lock nests outside the
54 * entry locks. Allocations of new entries take the table lock as writers so
55 * no other lookups can happen while allocating new entries. Entry updates
56 * take the table lock as readers so multiple entries can be updated in
57 * parallel. An L2T entry can be dropped by decrementing its reference count
58 * and therefore can happen in parallel with entry allocation but no entry
59 * can change state or increment its ref count during allocation as both of
60 * these perform lookups.
61 *
62 * Note: We do not take references to ifnets in this module because both
63 * the TOE and the sockets already hold references to the interfaces and the
64 * lifetime of an L2T entry is fully contained in the lifetime of the TOE.
65 */
66
67 /*
68 * Allocate a free L2T entry. Must be called with l2t_data.lock held.
69 */
70 struct l2t_entry *
t4_alloc_l2e(struct l2t_data * d)71 t4_alloc_l2e(struct l2t_data *d)
72 {
73 struct l2t_entry *end, *e, **p;
74
75 rw_assert(&d->lock, RA_WLOCKED);
76 if (__predict_false(d->l2t_stopped))
77 return (NULL);
78 if (!atomic_load_acq_int(&d->nfree))
79 return (NULL);
80
81 /* there's definitely a free entry */
82 for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
83 if (atomic_load_acq_int(&e->refcnt) == 0)
84 goto found;
85
86 for (e = d->l2tab; atomic_load_acq_int(&e->refcnt); ++e)
87 continue;
88 found:
89 d->rover = e + 1;
90 atomic_subtract_int(&d->nfree, 1);
91
92 /*
93 * The entry we found may be an inactive entry that is
94 * presently in the hash table. We need to remove it.
95 */
96 if (e->state < L2T_STATE_SWITCHING) {
97 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) {
98 if (*p == e) {
99 *p = e->next;
100 e->next = NULL;
101 break;
102 }
103 }
104 }
105
106 e->state = L2T_STATE_UNUSED;
107 return (e);
108 }
109
110 static struct l2t_entry *
find_or_alloc_l2e(struct l2t_data * d,uint16_t vlan,uint8_t port,uint8_t * dmac)111 find_or_alloc_l2e(struct l2t_data *d, uint16_t vlan, uint8_t port, uint8_t *dmac)
112 {
113 struct l2t_entry *end, *e, **p;
114 struct l2t_entry *first_free = NULL;
115
116 for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
117 if (atomic_load_acq_int(&e->refcnt) == 0) {
118 if (!first_free)
119 first_free = e;
120 } else if (e->state == L2T_STATE_SWITCHING &&
121 memcmp(e->dmac, dmac, ETHER_ADDR_LEN) == 0 &&
122 e->vlan == vlan && e->lport == port)
123 return (e); /* Found existing entry that matches. */
124 }
125
126 if (first_free == NULL)
127 return (NULL); /* No match and no room for a new entry. */
128
129 /*
130 * The entry we found may be an inactive entry that is
131 * presently in the hash table. We need to remove it.
132 */
133 e = first_free;
134 if (e->state < L2T_STATE_SWITCHING) {
135 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) {
136 if (*p == e) {
137 *p = e->next;
138 e->next = NULL;
139 break;
140 }
141 }
142 }
143 e->state = L2T_STATE_UNUSED;
144 return (e);
145 }
146
147 static void
mk_write_l2e(struct adapter * sc,struct l2t_entry * e,int sync,int reply,void * dst)148 mk_write_l2e(struct adapter *sc, struct l2t_entry *e, int sync, int reply,
149 void *dst)
150 {
151 struct cpl_l2t_write_req *req;
152 int idx;
153
154 req = dst;
155 idx = e->idx + sc->vres.l2t.start;
156 INIT_TP_WR(req, 0);
157 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx |
158 V_SYNC_WR(sync) | V_TID_QID(e->iqid)));
159 req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!reply));
160 req->l2t_idx = htons(idx);
161 req->vlan = htons(e->vlan);
162 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
163 }
164
165 /*
166 * Write an L2T entry. Must be called with the entry locked.
167 * The write may be synchronous or asynchronous.
168 */
169 int
t4_write_l2e(struct l2t_entry * e,int sync)170 t4_write_l2e(struct l2t_entry *e, int sync)
171 {
172 struct sge_wrq *wrq;
173 struct adapter *sc;
174 struct wrq_cookie cookie;
175 struct cpl_l2t_write_req *req;
176
177 mtx_assert(&e->lock, MA_OWNED);
178 MPASS(e->wrq != NULL);
179
180 wrq = e->wrq;
181 sc = wrq->adapter;
182
183 req = start_wrq_wr(wrq, howmany(sizeof(*req), 16), &cookie);
184 if (req == NULL)
185 return (ENOMEM);
186
187 mk_write_l2e(sc, e, sync, sync, req);
188
189 commit_wrq_wr(wrq, req, &cookie);
190
191 if (sync && e->state != L2T_STATE_SWITCHING)
192 e->state = L2T_STATE_SYNC_WRITE;
193
194 return (0);
195 }
196
197 /*
198 * Allocate an L2T entry for use by a TLS connection. These entries are
199 * associated with a specific VLAN and destination MAC that never changes.
200 * However, multiple TLS connections might share a single entry.
201 *
202 * If a new L2T entry is allocated, a work request to initialize it is
203 * written to 'txq' and 'ndesc' will be set to 1. Otherwise, 'ndesc'
204 * will be set to 0.
205 *
206 * To avoid races, separate L2T entries are reserved for individual
207 * queues since the L2T entry update is written to a txq just prior to
208 * TLS work requests that will depend on it being written.
209 */
210 struct l2t_entry *
t4_l2t_alloc_tls(struct adapter * sc,struct sge_txq * txq,void * dst,int * ndesc,uint16_t vlan,uint8_t port,uint8_t * eth_addr)211 t4_l2t_alloc_tls(struct adapter *sc, struct sge_txq *txq, void *dst,
212 int *ndesc, uint16_t vlan, uint8_t port, uint8_t *eth_addr)
213 {
214 struct l2t_data *d;
215 struct l2t_entry *e;
216 int i;
217
218 TXQ_LOCK_ASSERT_OWNED(txq);
219
220 d = sc->l2t;
221 *ndesc = 0;
222
223 rw_rlock(&d->lock);
224
225 /* First, try to find an existing entry. */
226 for (i = 0; i < d->l2t_size; i++) {
227 e = &d->l2tab[i];
228 if (e->state != L2T_STATE_TLS)
229 continue;
230 if (e->vlan == vlan && e->lport == port &&
231 e->wrq == (struct sge_wrq *)txq &&
232 memcmp(e->dmac, eth_addr, ETHER_ADDR_LEN) == 0) {
233 if (atomic_fetchadd_int(&e->refcnt, 1) == 0) {
234 /*
235 * This entry wasn't held but is still
236 * valid, so decrement nfree.
237 */
238 atomic_subtract_int(&d->nfree, 1);
239 }
240 KASSERT(e->refcnt > 0,
241 ("%s: refcount overflow", __func__));
242 rw_runlock(&d->lock);
243 return (e);
244 }
245 }
246
247 /*
248 * Don't bother rechecking if the upgrade fails since the txq is
249 * already locked.
250 */
251 if (!rw_try_upgrade(&d->lock)) {
252 rw_runlock(&d->lock);
253 rw_wlock(&d->lock);
254 }
255
256 /* Match not found, allocate a new entry. */
257 e = t4_alloc_l2e(d);
258 if (e == NULL) {
259 rw_wunlock(&d->lock);
260 return (e);
261 }
262
263 /* Initialize the entry. */
264 e->state = L2T_STATE_TLS;
265 e->vlan = vlan;
266 e->lport = port;
267 e->iqid = sc->sge.fwq.abs_id;
268 e->wrq = (struct sge_wrq *)txq;
269 memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
270 atomic_store_rel_int(&e->refcnt, 1);
271 rw_wunlock(&d->lock);
272
273 /* Write out the work request. */
274 *ndesc = howmany(sizeof(struct cpl_l2t_write_req), EQ_ESIZE);
275 MPASS(*ndesc == 1);
276 mk_write_l2e(sc, e, 1, 0, dst);
277
278 return (e);
279 }
280
281 /*
282 * Allocate an L2T entry for use by a switching rule. Such need to be
283 * explicitly freed and while busy they are not on any hash chain, so normal
284 * address resolution updates do not see them.
285 */
286 struct l2t_entry *
t4_l2t_alloc_switching(struct adapter * sc,uint16_t vlan,uint8_t port,uint8_t * eth_addr)287 t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port,
288 uint8_t *eth_addr)
289 {
290 struct l2t_data *d = sc->l2t;
291 struct l2t_entry *e;
292 int rc;
293
294 rw_wlock(&d->lock);
295 if (__predict_false(d->l2t_stopped))
296 e = NULL;
297 else
298 e = find_or_alloc_l2e(d, vlan, port, eth_addr);
299 if (e) {
300 if (atomic_load_acq_int(&e->refcnt) == 0) {
301 mtx_lock(&e->lock); /* avoid race with t4_l2t_free */
302 e->wrq = &sc->sge.ctrlq[0];
303 e->iqid = sc->sge.fwq.abs_id;
304 e->state = L2T_STATE_SWITCHING;
305 e->vlan = vlan;
306 e->lport = port;
307 memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
308 atomic_store_rel_int(&e->refcnt, 1);
309 atomic_subtract_int(&d->nfree, 1);
310 rc = t4_write_l2e(e, 0);
311 mtx_unlock(&e->lock);
312 if (rc != 0)
313 e = NULL;
314 } else {
315 MPASS(e->vlan == vlan);
316 MPASS(e->lport == port);
317 atomic_add_int(&e->refcnt, 1);
318 }
319 }
320 rw_wunlock(&d->lock);
321 return (e);
322 }
323
324 int
t4_init_l2t(struct adapter * sc,int flags)325 t4_init_l2t(struct adapter *sc, int flags)
326 {
327 int i, l2t_size;
328 struct l2t_data *d;
329
330 l2t_size = sc->vres.l2t.size;
331 if (l2t_size < 2) /* At least 1 bucket for IP and 1 for IPv6 */
332 return (EINVAL);
333
334 d = malloc(sizeof(*d) + l2t_size * sizeof (struct l2t_entry), M_CXGBE,
335 M_ZERO | flags);
336 if (!d)
337 return (ENOMEM);
338
339 d->l2t_size = l2t_size;
340 d->l2t_stopped = false;
341 d->rover = d->l2tab;
342 atomic_store_rel_int(&d->nfree, l2t_size);
343 rw_init(&d->lock, "L2T");
344
345 for (i = 0; i < l2t_size; i++) {
346 struct l2t_entry *e = &d->l2tab[i];
347
348 e->idx = i;
349 e->state = L2T_STATE_UNUSED;
350 mtx_init(&e->lock, "L2T_E", NULL, MTX_DEF);
351 STAILQ_INIT(&e->wr_list);
352 atomic_store_rel_int(&e->refcnt, 0);
353 }
354
355 sc->l2t = d;
356
357 return (0);
358 }
359
360 int
t4_free_l2t(struct adapter * sc)361 t4_free_l2t(struct adapter *sc)
362 {
363 struct l2t_data *d = sc->l2t;
364 int i;
365
366 for (i = 0; i < d->l2t_size; i++)
367 mtx_destroy(&d->l2tab[i].lock);
368 rw_destroy(&d->lock);
369 free(d, M_CXGBE);
370
371 return (0);
372 }
373
374 int
t4_stop_l2t(struct adapter * sc)375 t4_stop_l2t(struct adapter *sc)
376 {
377 struct l2t_data *d = sc->l2t;
378
379 rw_wlock(&d->lock);
380 d->l2t_stopped = true;
381 rw_wunlock(&d->lock);
382
383 return (0);
384 }
385
386 int
t4_restart_l2t(struct adapter * sc)387 t4_restart_l2t(struct adapter *sc)
388 {
389 struct l2t_data *d = sc->l2t;
390
391 rw_wlock(&d->lock);
392 d->l2t_stopped = false;
393 rw_wunlock(&d->lock);
394
395 return (0);
396 }
397
398 int
do_l2t_write_rpl(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)399 do_l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss,
400 struct mbuf *m)
401 {
402 struct adapter *sc = iq->adapter;
403 const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1);
404 const u_int hwidx = GET_TID(rpl) & ~(F_SYNC_WR | V_TID_QID(M_TID_QID));
405 const bool sync = GET_TID(rpl) & F_SYNC_WR;
406
407 MPASS(iq->abs_id == G_TID_QID(GET_TID(rpl)));
408
409 if (__predict_false(hwidx < sc->vres.l2t.start) ||
410 __predict_false(hwidx >= sc->vres.l2t.start + sc->vres.l2t.size) ||
411 __predict_false(rpl->status != CPL_ERR_NONE)) {
412 CH_ERR(sc, "%s: hwidx %u, rpl %u, sync %u; L2T st %u, sz %u\n",
413 __func__, hwidx, rpl->status, sync, sc->vres.l2t.start,
414 sc->vres.l2t.size);
415 return (EINVAL);
416 }
417
418 return (0);
419 }
420
421 static inline unsigned int
vlan_prio(const struct l2t_entry * e)422 vlan_prio(const struct l2t_entry *e)
423 {
424 return e->vlan >> 13;
425 }
426
427 static char
l2e_state(const struct l2t_entry * e)428 l2e_state(const struct l2t_entry *e)
429 {
430 switch (e->state) {
431 case L2T_STATE_VALID: return 'V'; /* valid, fast-path entry */
432 case L2T_STATE_STALE: return 'S'; /* needs revalidation, but usable */
433 case L2T_STATE_SYNC_WRITE: return 'W';
434 case L2T_STATE_RESOLVING: return STAILQ_EMPTY(&e->wr_list) ? 'R' : 'A';
435 case L2T_STATE_SWITCHING: return 'X';
436 case L2T_STATE_TLS: return 'T';
437 default: return 'U';
438 }
439 }
440
441 int
sysctl_l2t(SYSCTL_HANDLER_ARGS)442 sysctl_l2t(SYSCTL_HANDLER_ARGS)
443 {
444 struct adapter *sc = arg1;
445 struct l2t_data *l2t = sc->l2t;
446 struct l2t_entry *e;
447 struct sbuf *sb;
448 int rc, i, header = 0;
449 char ip[INET6_ADDRSTRLEN];
450
451 if (l2t == NULL)
452 return (ENXIO);
453
454 rc = sysctl_wire_old_buffer(req, 0);
455 if (rc != 0)
456 return (rc);
457
458 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
459 if (sb == NULL)
460 return (ENOMEM);
461
462 e = &l2t->l2tab[0];
463 for (i = 0; i < l2t->l2t_size; i++, e++) {
464 mtx_lock(&e->lock);
465 if (e->state == L2T_STATE_UNUSED)
466 goto skip;
467
468 if (header == 0) {
469 sbuf_printf(sb, " Idx IP address "
470 "Ethernet address VLAN/P LP State Users Port");
471 header = 1;
472 }
473 if (e->state >= L2T_STATE_SWITCHING)
474 ip[0] = 0;
475 else {
476 inet_ntop(e->ipv6 ? AF_INET6 : AF_INET, &e->addr[0],
477 &ip[0], sizeof(ip));
478 }
479
480 /*
481 * XXX: IPv6 addresses may not align properly in the output.
482 */
483 sbuf_printf(sb, "\n%4u %-15s %02x:%02x:%02x:%02x:%02x:%02x %4d"
484 " %u %2u %c %5u %s",
485 e->idx, ip, e->dmac[0], e->dmac[1], e->dmac[2],
486 e->dmac[3], e->dmac[4], e->dmac[5],
487 e->vlan & 0xfff, vlan_prio(e), e->lport,
488 l2e_state(e), atomic_load_acq_int(&e->refcnt),
489 e->ifp ? if_name(e->ifp) : "-");
490 skip:
491 mtx_unlock(&e->lock);
492 }
493
494 rc = sbuf_finish(sb);
495 sbuf_delete(sb);
496
497 return (rc);
498 }
499