14dba21f1SNavdeep Parhar /*-
24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause
3718cf2ccSPedro F. Giffuni *
409fe6320SNavdeep Parhar * Copyright (c) 2012 Chelsio Communications, Inc.
54dba21f1SNavdeep Parhar * All rights reserved.
64dba21f1SNavdeep Parhar *
74dba21f1SNavdeep Parhar * Redistribution and use in source and binary forms, with or without
84dba21f1SNavdeep Parhar * modification, are permitted provided that the following conditions
94dba21f1SNavdeep Parhar * are met:
104dba21f1SNavdeep Parhar * 1. Redistributions of source code must retain the above copyright
114dba21f1SNavdeep Parhar * notice, this list of conditions and the following disclaimer.
124dba21f1SNavdeep Parhar * 2. Redistributions in binary form must reproduce the above copyright
134dba21f1SNavdeep Parhar * notice, this list of conditions and the following disclaimer in the
144dba21f1SNavdeep Parhar * documentation and/or other materials provided with the distribution.
154dba21f1SNavdeep Parhar *
164dba21f1SNavdeep Parhar * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
174dba21f1SNavdeep Parhar * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
184dba21f1SNavdeep Parhar * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
194dba21f1SNavdeep Parhar * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
204dba21f1SNavdeep Parhar * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
214dba21f1SNavdeep Parhar * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
224dba21f1SNavdeep Parhar * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
234dba21f1SNavdeep Parhar * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
244dba21f1SNavdeep Parhar * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
254dba21f1SNavdeep Parhar * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
264dba21f1SNavdeep Parhar * SUCH DAMAGE.
274dba21f1SNavdeep Parhar */
284dba21f1SNavdeep Parhar #include <sys/cdefs.h>
294dba21f1SNavdeep Parhar #include "opt_inet.h"
30a1ea9a82SNavdeep Parhar #include "opt_inet6.h"
314dba21f1SNavdeep Parhar
324dba21f1SNavdeep Parhar #include <sys/param.h>
33c3322cb9SGleb Smirnoff #include <sys/eventhandler.h>
344dba21f1SNavdeep Parhar #include <sys/systm.h>
354dba21f1SNavdeep Parhar #include <sys/kernel.h>
364dba21f1SNavdeep Parhar #include <sys/module.h>
374dba21f1SNavdeep Parhar #include <sys/bus.h>
384dba21f1SNavdeep Parhar #include <sys/lock.h>
394dba21f1SNavdeep Parhar #include <sys/mutex.h>
404dba21f1SNavdeep Parhar #include <sys/rwlock.h>
414dba21f1SNavdeep Parhar #include <sys/socket.h>
42733b9277SNavdeep Parhar #include <sys/sbuf.h>
434dba21f1SNavdeep Parhar #include <netinet/in.h>
444dba21f1SNavdeep Parhar
454dba21f1SNavdeep Parhar #include "common/common.h"
464dba21f1SNavdeep Parhar #include "common/t4_msg.h"
474dba21f1SNavdeep Parhar #include "t4_l2t.h"
484dba21f1SNavdeep Parhar
49733b9277SNavdeep Parhar /*
50733b9277SNavdeep Parhar * Module locking notes: There is a RW lock protecting the L2 table as a
51733b9277SNavdeep Parhar * whole plus a spinlock per L2T entry. Entry lookups and allocations happen
52733b9277SNavdeep Parhar * under the protection of the table lock, individual entry changes happen
53733b9277SNavdeep Parhar * while holding that entry's spinlock. The table lock nests outside the
54733b9277SNavdeep Parhar * entry locks. Allocations of new entries take the table lock as writers so
55733b9277SNavdeep Parhar * no other lookups can happen while allocating new entries. Entry updates
56733b9277SNavdeep Parhar * take the table lock as readers so multiple entries can be updated in
57733b9277SNavdeep Parhar * parallel. An L2T entry can be dropped by decrementing its reference count
58733b9277SNavdeep Parhar * and therefore can happen in parallel with entry allocation but no entry
59733b9277SNavdeep Parhar * can change state or increment its ref count during allocation as both of
60733b9277SNavdeep Parhar * these perform lookups.
61733b9277SNavdeep Parhar *
62453130d9SPedro F. Giffuni * Note: We do not take references to ifnets in this module because both
63733b9277SNavdeep Parhar * the TOE and the sockets already hold references to the interfaces and the
64733b9277SNavdeep Parhar * lifetime of an L2T entry is fully contained in the lifetime of the TOE.
65733b9277SNavdeep Parhar */
66733b9277SNavdeep Parhar
674dba21f1SNavdeep Parhar /*
68733b9277SNavdeep Parhar * Allocate a free L2T entry. Must be called with l2t_data.lock held.
694dba21f1SNavdeep Parhar */
7009fe6320SNavdeep Parhar struct l2t_entry *
t4_alloc_l2e(struct l2t_data * d)7109fe6320SNavdeep Parhar t4_alloc_l2e(struct l2t_data *d)
72733b9277SNavdeep Parhar {
73733b9277SNavdeep Parhar struct l2t_entry *end, *e, **p;
74733b9277SNavdeep Parhar
75733b9277SNavdeep Parhar rw_assert(&d->lock, RA_WLOCKED);
76*cd93fdeeSNavdeep Parhar if (__predict_false(d->l2t_stopped))
77*cd93fdeeSNavdeep Parhar return (NULL);
78733b9277SNavdeep Parhar if (!atomic_load_acq_int(&d->nfree))
79733b9277SNavdeep Parhar return (NULL);
80733b9277SNavdeep Parhar
81733b9277SNavdeep Parhar /* there's definitely a free entry */
820a0a697cSNavdeep Parhar for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
83733b9277SNavdeep Parhar if (atomic_load_acq_int(&e->refcnt) == 0)
84733b9277SNavdeep Parhar goto found;
85733b9277SNavdeep Parhar
8609fe6320SNavdeep Parhar for (e = d->l2tab; atomic_load_acq_int(&e->refcnt); ++e)
8709fe6320SNavdeep Parhar continue;
88733b9277SNavdeep Parhar found:
89733b9277SNavdeep Parhar d->rover = e + 1;
90733b9277SNavdeep Parhar atomic_subtract_int(&d->nfree, 1);
91733b9277SNavdeep Parhar
92733b9277SNavdeep Parhar /*
93733b9277SNavdeep Parhar * The entry we found may be an inactive entry that is
94733b9277SNavdeep Parhar * presently in the hash table. We need to remove it.
95733b9277SNavdeep Parhar */
96733b9277SNavdeep Parhar if (e->state < L2T_STATE_SWITCHING) {
97733b9277SNavdeep Parhar for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) {
98733b9277SNavdeep Parhar if (*p == e) {
99733b9277SNavdeep Parhar *p = e->next;
100733b9277SNavdeep Parhar e->next = NULL;
101733b9277SNavdeep Parhar break;
102733b9277SNavdeep Parhar }
103733b9277SNavdeep Parhar }
104733b9277SNavdeep Parhar }
105733b9277SNavdeep Parhar
106733b9277SNavdeep Parhar e->state = L2T_STATE_UNUSED;
107733b9277SNavdeep Parhar return (e);
108733b9277SNavdeep Parhar }
109733b9277SNavdeep Parhar
110061bbaf7SNavdeep Parhar static struct l2t_entry *
find_or_alloc_l2e(struct l2t_data * d,uint16_t vlan,uint8_t port,uint8_t * dmac)111061bbaf7SNavdeep Parhar find_or_alloc_l2e(struct l2t_data *d, uint16_t vlan, uint8_t port, uint8_t *dmac)
112061bbaf7SNavdeep Parhar {
113061bbaf7SNavdeep Parhar struct l2t_entry *end, *e, **p;
114061bbaf7SNavdeep Parhar struct l2t_entry *first_free = NULL;
115061bbaf7SNavdeep Parhar
116061bbaf7SNavdeep Parhar for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
117061bbaf7SNavdeep Parhar if (atomic_load_acq_int(&e->refcnt) == 0) {
118061bbaf7SNavdeep Parhar if (!first_free)
119061bbaf7SNavdeep Parhar first_free = e;
120061bbaf7SNavdeep Parhar } else if (e->state == L2T_STATE_SWITCHING &&
121061bbaf7SNavdeep Parhar memcmp(e->dmac, dmac, ETHER_ADDR_LEN) == 0 &&
122061bbaf7SNavdeep Parhar e->vlan == vlan && e->lport == port)
123061bbaf7SNavdeep Parhar return (e); /* Found existing entry that matches. */
124061bbaf7SNavdeep Parhar }
125061bbaf7SNavdeep Parhar
126061bbaf7SNavdeep Parhar if (first_free == NULL)
127061bbaf7SNavdeep Parhar return (NULL); /* No match and no room for a new entry. */
128061bbaf7SNavdeep Parhar
129061bbaf7SNavdeep Parhar /*
130061bbaf7SNavdeep Parhar * The entry we found may be an inactive entry that is
131061bbaf7SNavdeep Parhar * presently in the hash table. We need to remove it.
132061bbaf7SNavdeep Parhar */
133061bbaf7SNavdeep Parhar e = first_free;
134061bbaf7SNavdeep Parhar if (e->state < L2T_STATE_SWITCHING) {
135061bbaf7SNavdeep Parhar for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) {
136061bbaf7SNavdeep Parhar if (*p == e) {
137061bbaf7SNavdeep Parhar *p = e->next;
138061bbaf7SNavdeep Parhar e->next = NULL;
139061bbaf7SNavdeep Parhar break;
140061bbaf7SNavdeep Parhar }
141061bbaf7SNavdeep Parhar }
142061bbaf7SNavdeep Parhar }
143061bbaf7SNavdeep Parhar e->state = L2T_STATE_UNUSED;
144061bbaf7SNavdeep Parhar return (e);
145061bbaf7SNavdeep Parhar }
146061bbaf7SNavdeep Parhar
147bddf7343SJohn Baldwin static void
mk_write_l2e(struct adapter * sc,struct l2t_entry * e,int sync,int reply,void * dst)148bddf7343SJohn Baldwin mk_write_l2e(struct adapter *sc, struct l2t_entry *e, int sync, int reply,
149bddf7343SJohn Baldwin void *dst)
150bddf7343SJohn Baldwin {
151bddf7343SJohn Baldwin struct cpl_l2t_write_req *req;
152bddf7343SJohn Baldwin int idx;
153bddf7343SJohn Baldwin
154bddf7343SJohn Baldwin req = dst;
155bddf7343SJohn Baldwin idx = e->idx + sc->vres.l2t.start;
156bddf7343SJohn Baldwin INIT_TP_WR(req, 0);
157bddf7343SJohn Baldwin OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx |
158bddf7343SJohn Baldwin V_SYNC_WR(sync) | V_TID_QID(e->iqid)));
159bddf7343SJohn Baldwin req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!reply));
160bddf7343SJohn Baldwin req->l2t_idx = htons(idx);
161bddf7343SJohn Baldwin req->vlan = htons(e->vlan);
162bddf7343SJohn Baldwin memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
163bddf7343SJohn Baldwin }
164061bbaf7SNavdeep Parhar
165733b9277SNavdeep Parhar /*
166733b9277SNavdeep Parhar * Write an L2T entry. Must be called with the entry locked.
167733b9277SNavdeep Parhar * The write may be synchronous or asynchronous.
168733b9277SNavdeep Parhar */
16909fe6320SNavdeep Parhar int
t4_write_l2e(struct l2t_entry * e,int sync)170671bf2b8SNavdeep Parhar t4_write_l2e(struct l2t_entry *e, int sync)
171733b9277SNavdeep Parhar {
172671bf2b8SNavdeep Parhar struct sge_wrq *wrq;
173671bf2b8SNavdeep Parhar struct adapter *sc;
1747951040fSNavdeep Parhar struct wrq_cookie cookie;
175733b9277SNavdeep Parhar struct cpl_l2t_write_req *req;
176733b9277SNavdeep Parhar
177733b9277SNavdeep Parhar mtx_assert(&e->lock, MA_OWNED);
178671bf2b8SNavdeep Parhar MPASS(e->wrq != NULL);
179733b9277SNavdeep Parhar
180671bf2b8SNavdeep Parhar wrq = e->wrq;
181671bf2b8SNavdeep Parhar sc = wrq->adapter;
182671bf2b8SNavdeep Parhar
183671bf2b8SNavdeep Parhar req = start_wrq_wr(wrq, howmany(sizeof(*req), 16), &cookie);
1847951040fSNavdeep Parhar if (req == NULL)
185733b9277SNavdeep Parhar return (ENOMEM);
186733b9277SNavdeep Parhar
187bddf7343SJohn Baldwin mk_write_l2e(sc, e, sync, sync, req);
188733b9277SNavdeep Parhar
189671bf2b8SNavdeep Parhar commit_wrq_wr(wrq, req, &cookie);
190733b9277SNavdeep Parhar
191733b9277SNavdeep Parhar if (sync && e->state != L2T_STATE_SWITCHING)
192733b9277SNavdeep Parhar e->state = L2T_STATE_SYNC_WRITE;
193733b9277SNavdeep Parhar
194733b9277SNavdeep Parhar return (0);
195733b9277SNavdeep Parhar }
196733b9277SNavdeep Parhar
197733b9277SNavdeep Parhar /*
198bddf7343SJohn Baldwin * Allocate an L2T entry for use by a TLS connection. These entries are
199bddf7343SJohn Baldwin * associated with a specific VLAN and destination MAC that never changes.
200bddf7343SJohn Baldwin * However, multiple TLS connections might share a single entry.
201bddf7343SJohn Baldwin *
202bddf7343SJohn Baldwin * If a new L2T entry is allocated, a work request to initialize it is
203bddf7343SJohn Baldwin * written to 'txq' and 'ndesc' will be set to 1. Otherwise, 'ndesc'
204bddf7343SJohn Baldwin * will be set to 0.
205bddf7343SJohn Baldwin *
206bddf7343SJohn Baldwin * To avoid races, separate L2T entries are reserved for individual
207bddf7343SJohn Baldwin * queues since the L2T entry update is written to a txq just prior to
208bddf7343SJohn Baldwin * TLS work requests that will depend on it being written.
209bddf7343SJohn Baldwin */
210bddf7343SJohn Baldwin struct l2t_entry *
t4_l2t_alloc_tls(struct adapter * sc,struct sge_txq * txq,void * dst,int * ndesc,uint16_t vlan,uint8_t port,uint8_t * eth_addr)211bddf7343SJohn Baldwin t4_l2t_alloc_tls(struct adapter *sc, struct sge_txq *txq, void *dst,
212bddf7343SJohn Baldwin int *ndesc, uint16_t vlan, uint8_t port, uint8_t *eth_addr)
213bddf7343SJohn Baldwin {
214bddf7343SJohn Baldwin struct l2t_data *d;
215bddf7343SJohn Baldwin struct l2t_entry *e;
216bddf7343SJohn Baldwin int i;
217bddf7343SJohn Baldwin
218bddf7343SJohn Baldwin TXQ_LOCK_ASSERT_OWNED(txq);
219bddf7343SJohn Baldwin
220bddf7343SJohn Baldwin d = sc->l2t;
221bddf7343SJohn Baldwin *ndesc = 0;
222bddf7343SJohn Baldwin
223bddf7343SJohn Baldwin rw_rlock(&d->lock);
224bddf7343SJohn Baldwin
225bddf7343SJohn Baldwin /* First, try to find an existing entry. */
226bddf7343SJohn Baldwin for (i = 0; i < d->l2t_size; i++) {
227bddf7343SJohn Baldwin e = &d->l2tab[i];
228bddf7343SJohn Baldwin if (e->state != L2T_STATE_TLS)
229bddf7343SJohn Baldwin continue;
230bddf7343SJohn Baldwin if (e->vlan == vlan && e->lport == port &&
231bddf7343SJohn Baldwin e->wrq == (struct sge_wrq *)txq &&
232bddf7343SJohn Baldwin memcmp(e->dmac, eth_addr, ETHER_ADDR_LEN) == 0) {
233bddf7343SJohn Baldwin if (atomic_fetchadd_int(&e->refcnt, 1) == 0) {
234bddf7343SJohn Baldwin /*
235bddf7343SJohn Baldwin * This entry wasn't held but is still
236bddf7343SJohn Baldwin * valid, so decrement nfree.
237bddf7343SJohn Baldwin */
238bddf7343SJohn Baldwin atomic_subtract_int(&d->nfree, 1);
239bddf7343SJohn Baldwin }
240bddf7343SJohn Baldwin KASSERT(e->refcnt > 0,
241bddf7343SJohn Baldwin ("%s: refcount overflow", __func__));
242bddf7343SJohn Baldwin rw_runlock(&d->lock);
243bddf7343SJohn Baldwin return (e);
244bddf7343SJohn Baldwin }
245bddf7343SJohn Baldwin }
246bddf7343SJohn Baldwin
247bddf7343SJohn Baldwin /*
248bddf7343SJohn Baldwin * Don't bother rechecking if the upgrade fails since the txq is
249bddf7343SJohn Baldwin * already locked.
250bddf7343SJohn Baldwin */
251bddf7343SJohn Baldwin if (!rw_try_upgrade(&d->lock)) {
252bddf7343SJohn Baldwin rw_runlock(&d->lock);
253bddf7343SJohn Baldwin rw_wlock(&d->lock);
254bddf7343SJohn Baldwin }
255bddf7343SJohn Baldwin
256bddf7343SJohn Baldwin /* Match not found, allocate a new entry. */
257bddf7343SJohn Baldwin e = t4_alloc_l2e(d);
258bddf7343SJohn Baldwin if (e == NULL) {
259bddf7343SJohn Baldwin rw_wunlock(&d->lock);
260bddf7343SJohn Baldwin return (e);
261bddf7343SJohn Baldwin }
262bddf7343SJohn Baldwin
263bddf7343SJohn Baldwin /* Initialize the entry. */
264bddf7343SJohn Baldwin e->state = L2T_STATE_TLS;
265bddf7343SJohn Baldwin e->vlan = vlan;
266bddf7343SJohn Baldwin e->lport = port;
267bddf7343SJohn Baldwin e->iqid = sc->sge.fwq.abs_id;
268bddf7343SJohn Baldwin e->wrq = (struct sge_wrq *)txq;
269bddf7343SJohn Baldwin memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
270bddf7343SJohn Baldwin atomic_store_rel_int(&e->refcnt, 1);
271bddf7343SJohn Baldwin rw_wunlock(&d->lock);
272bddf7343SJohn Baldwin
273bddf7343SJohn Baldwin /* Write out the work request. */
274bddf7343SJohn Baldwin *ndesc = howmany(sizeof(struct cpl_l2t_write_req), EQ_ESIZE);
275bddf7343SJohn Baldwin MPASS(*ndesc == 1);
276bddf7343SJohn Baldwin mk_write_l2e(sc, e, 1, 0, dst);
277bddf7343SJohn Baldwin
278bddf7343SJohn Baldwin return (e);
279bddf7343SJohn Baldwin }
280bddf7343SJohn Baldwin
281bddf7343SJohn Baldwin /*
282733b9277SNavdeep Parhar * Allocate an L2T entry for use by a switching rule. Such need to be
283733b9277SNavdeep Parhar * explicitly freed and while busy they are not on any hash chain, so normal
284733b9277SNavdeep Parhar * address resolution updates do not see them.
285733b9277SNavdeep Parhar */
286733b9277SNavdeep Parhar struct l2t_entry *
t4_l2t_alloc_switching(struct adapter * sc,uint16_t vlan,uint8_t port,uint8_t * eth_addr)287061bbaf7SNavdeep Parhar t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port,
288061bbaf7SNavdeep Parhar uint8_t *eth_addr)
289733b9277SNavdeep Parhar {
290061bbaf7SNavdeep Parhar struct l2t_data *d = sc->l2t;
291733b9277SNavdeep Parhar struct l2t_entry *e;
292733b9277SNavdeep Parhar int rc;
293733b9277SNavdeep Parhar
294061bbaf7SNavdeep Parhar rw_wlock(&d->lock);
295*cd93fdeeSNavdeep Parhar if (__predict_false(d->l2t_stopped))
296*cd93fdeeSNavdeep Parhar e = NULL;
297*cd93fdeeSNavdeep Parhar else
298061bbaf7SNavdeep Parhar e = find_or_alloc_l2e(d, vlan, port, eth_addr);
299061bbaf7SNavdeep Parhar if (e) {
300061bbaf7SNavdeep Parhar if (atomic_load_acq_int(&e->refcnt) == 0) {
301061bbaf7SNavdeep Parhar mtx_lock(&e->lock); /* avoid race with t4_l2t_free */
30237310a98SNavdeep Parhar e->wrq = &sc->sge.ctrlq[0];
303671bf2b8SNavdeep Parhar e->iqid = sc->sge.fwq.abs_id;
304061bbaf7SNavdeep Parhar e->state = L2T_STATE_SWITCHING;
305061bbaf7SNavdeep Parhar e->vlan = vlan;
306061bbaf7SNavdeep Parhar e->lport = port;
307733b9277SNavdeep Parhar memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
308061bbaf7SNavdeep Parhar atomic_store_rel_int(&e->refcnt, 1);
309061bbaf7SNavdeep Parhar atomic_subtract_int(&d->nfree, 1);
310671bf2b8SNavdeep Parhar rc = t4_write_l2e(e, 0);
311733b9277SNavdeep Parhar mtx_unlock(&e->lock);
312061bbaf7SNavdeep Parhar if (rc != 0)
313061bbaf7SNavdeep Parhar e = NULL;
314061bbaf7SNavdeep Parhar } else {
315061bbaf7SNavdeep Parhar MPASS(e->vlan == vlan);
316061bbaf7SNavdeep Parhar MPASS(e->lport == port);
317061bbaf7SNavdeep Parhar atomic_add_int(&e->refcnt, 1);
318061bbaf7SNavdeep Parhar }
319061bbaf7SNavdeep Parhar }
320061bbaf7SNavdeep Parhar rw_wunlock(&d->lock);
321061bbaf7SNavdeep Parhar return (e);
322733b9277SNavdeep Parhar }
323733b9277SNavdeep Parhar
324733b9277SNavdeep Parhar int
t4_init_l2t(struct adapter * sc,int flags)325733b9277SNavdeep Parhar t4_init_l2t(struct adapter *sc, int flags)
326733b9277SNavdeep Parhar {
3270a0a697cSNavdeep Parhar int i, l2t_size;
328733b9277SNavdeep Parhar struct l2t_data *d;
329733b9277SNavdeep Parhar
3300a0a697cSNavdeep Parhar l2t_size = sc->vres.l2t.size;
3310a0a697cSNavdeep Parhar if (l2t_size < 2) /* At least 1 bucket for IP and 1 for IPv6 */
3320a0a697cSNavdeep Parhar return (EINVAL);
3330a0a697cSNavdeep Parhar
3340a0a697cSNavdeep Parhar d = malloc(sizeof(*d) + l2t_size * sizeof (struct l2t_entry), M_CXGBE,
3350a0a697cSNavdeep Parhar M_ZERO | flags);
336733b9277SNavdeep Parhar if (!d)
337733b9277SNavdeep Parhar return (ENOMEM);
338733b9277SNavdeep Parhar
3390a0a697cSNavdeep Parhar d->l2t_size = l2t_size;
340*cd93fdeeSNavdeep Parhar d->l2t_stopped = false;
341733b9277SNavdeep Parhar d->rover = d->l2tab;
3420a0a697cSNavdeep Parhar atomic_store_rel_int(&d->nfree, l2t_size);
343733b9277SNavdeep Parhar rw_init(&d->lock, "L2T");
344733b9277SNavdeep Parhar
3450a0a697cSNavdeep Parhar for (i = 0; i < l2t_size; i++) {
34609fe6320SNavdeep Parhar struct l2t_entry *e = &d->l2tab[i];
34709fe6320SNavdeep Parhar
34809fe6320SNavdeep Parhar e->idx = i;
34909fe6320SNavdeep Parhar e->state = L2T_STATE_UNUSED;
35009fe6320SNavdeep Parhar mtx_init(&e->lock, "L2T_E", NULL, MTX_DEF);
35109fe6320SNavdeep Parhar STAILQ_INIT(&e->wr_list);
35209fe6320SNavdeep Parhar atomic_store_rel_int(&e->refcnt, 0);
353733b9277SNavdeep Parhar }
354733b9277SNavdeep Parhar
355733b9277SNavdeep Parhar sc->l2t = d;
356733b9277SNavdeep Parhar
357733b9277SNavdeep Parhar return (0);
358733b9277SNavdeep Parhar }
359733b9277SNavdeep Parhar
360733b9277SNavdeep Parhar int
t4_free_l2t(struct adapter * sc)361*cd93fdeeSNavdeep Parhar t4_free_l2t(struct adapter *sc)
362733b9277SNavdeep Parhar {
363*cd93fdeeSNavdeep Parhar struct l2t_data *d = sc->l2t;
364733b9277SNavdeep Parhar int i;
365733b9277SNavdeep Parhar
3660a0a697cSNavdeep Parhar for (i = 0; i < d->l2t_size; i++)
367733b9277SNavdeep Parhar mtx_destroy(&d->l2tab[i].lock);
368733b9277SNavdeep Parhar rw_destroy(&d->lock);
369733b9277SNavdeep Parhar free(d, M_CXGBE);
370733b9277SNavdeep Parhar
371733b9277SNavdeep Parhar return (0);
372733b9277SNavdeep Parhar }
373733b9277SNavdeep Parhar
37409fe6320SNavdeep Parhar int
t4_stop_l2t(struct adapter * sc)375*cd93fdeeSNavdeep Parhar t4_stop_l2t(struct adapter *sc)
376*cd93fdeeSNavdeep Parhar {
377*cd93fdeeSNavdeep Parhar struct l2t_data *d = sc->l2t;
378*cd93fdeeSNavdeep Parhar
379*cd93fdeeSNavdeep Parhar rw_wlock(&d->lock);
380*cd93fdeeSNavdeep Parhar d->l2t_stopped = true;
381*cd93fdeeSNavdeep Parhar rw_wunlock(&d->lock);
382*cd93fdeeSNavdeep Parhar
383*cd93fdeeSNavdeep Parhar return (0);
384*cd93fdeeSNavdeep Parhar }
385*cd93fdeeSNavdeep Parhar
386*cd93fdeeSNavdeep Parhar int
t4_restart_l2t(struct adapter * sc)387*cd93fdeeSNavdeep Parhar t4_restart_l2t(struct adapter *sc)
388*cd93fdeeSNavdeep Parhar {
389*cd93fdeeSNavdeep Parhar struct l2t_data *d = sc->l2t;
390*cd93fdeeSNavdeep Parhar
391*cd93fdeeSNavdeep Parhar rw_wlock(&d->lock);
392*cd93fdeeSNavdeep Parhar d->l2t_stopped = false;
393*cd93fdeeSNavdeep Parhar rw_wunlock(&d->lock);
394*cd93fdeeSNavdeep Parhar
395*cd93fdeeSNavdeep Parhar return (0);
396*cd93fdeeSNavdeep Parhar }
397*cd93fdeeSNavdeep Parhar
398*cd93fdeeSNavdeep Parhar int
do_l2t_write_rpl(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)39909fe6320SNavdeep Parhar do_l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss,
40009fe6320SNavdeep Parhar struct mbuf *m)
40109fe6320SNavdeep Parhar {
402cfcfd3c7SNavdeep Parhar struct adapter *sc = iq->adapter;
40309fe6320SNavdeep Parhar const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1);
404cfcfd3c7SNavdeep Parhar const u_int hwidx = GET_TID(rpl) & ~(F_SYNC_WR | V_TID_QID(M_TID_QID));
405cfcfd3c7SNavdeep Parhar const bool sync = GET_TID(rpl) & F_SYNC_WR;
40609fe6320SNavdeep Parhar
407cfcfd3c7SNavdeep Parhar MPASS(iq->abs_id == G_TID_QID(GET_TID(rpl)));
408cfcfd3c7SNavdeep Parhar
409cfcfd3c7SNavdeep Parhar if (__predict_false(hwidx < sc->vres.l2t.start) ||
410cfcfd3c7SNavdeep Parhar __predict_false(hwidx >= sc->vres.l2t.start + sc->vres.l2t.size) ||
411cfcfd3c7SNavdeep Parhar __predict_false(rpl->status != CPL_ERR_NONE)) {
412cfcfd3c7SNavdeep Parhar CH_ERR(sc, "%s: hwidx %u, rpl %u, sync %u; L2T st %u, sz %u\n",
413cfcfd3c7SNavdeep Parhar __func__, hwidx, rpl->status, sync, sc->vres.l2t.start,
414cfcfd3c7SNavdeep Parhar sc->vres.l2t.size);
41509fe6320SNavdeep Parhar return (EINVAL);
41609fe6320SNavdeep Parhar }
41709fe6320SNavdeep Parhar
41809fe6320SNavdeep Parhar return (0);
41909fe6320SNavdeep Parhar }
42009fe6320SNavdeep Parhar
4214dba21f1SNavdeep Parhar static inline unsigned int
vlan_prio(const struct l2t_entry * e)4224dba21f1SNavdeep Parhar vlan_prio(const struct l2t_entry *e)
4234dba21f1SNavdeep Parhar {
4244dba21f1SNavdeep Parhar return e->vlan >> 13;
4254dba21f1SNavdeep Parhar }
4264dba21f1SNavdeep Parhar
427733b9277SNavdeep Parhar static char
l2e_state(const struct l2t_entry * e)428733b9277SNavdeep Parhar l2e_state(const struct l2t_entry *e)
429733b9277SNavdeep Parhar {
430733b9277SNavdeep Parhar switch (e->state) {
431733b9277SNavdeep Parhar case L2T_STATE_VALID: return 'V'; /* valid, fast-path entry */
432733b9277SNavdeep Parhar case L2T_STATE_STALE: return 'S'; /* needs revalidation, but usable */
433733b9277SNavdeep Parhar case L2T_STATE_SYNC_WRITE: return 'W';
43409fe6320SNavdeep Parhar case L2T_STATE_RESOLVING: return STAILQ_EMPTY(&e->wr_list) ? 'R' : 'A';
435733b9277SNavdeep Parhar case L2T_STATE_SWITCHING: return 'X';
436bddf7343SJohn Baldwin case L2T_STATE_TLS: return 'T';
437733b9277SNavdeep Parhar default: return 'U';
438733b9277SNavdeep Parhar }
439733b9277SNavdeep Parhar }
440733b9277SNavdeep Parhar
441733b9277SNavdeep Parhar int
sysctl_l2t(SYSCTL_HANDLER_ARGS)442733b9277SNavdeep Parhar sysctl_l2t(SYSCTL_HANDLER_ARGS)
443733b9277SNavdeep Parhar {
444733b9277SNavdeep Parhar struct adapter *sc = arg1;
445733b9277SNavdeep Parhar struct l2t_data *l2t = sc->l2t;
446733b9277SNavdeep Parhar struct l2t_entry *e;
447733b9277SNavdeep Parhar struct sbuf *sb;
448733b9277SNavdeep Parhar int rc, i, header = 0;
4490a0a697cSNavdeep Parhar char ip[INET6_ADDRSTRLEN];
450733b9277SNavdeep Parhar
451733b9277SNavdeep Parhar if (l2t == NULL)
452733b9277SNavdeep Parhar return (ENXIO);
453733b9277SNavdeep Parhar
454733b9277SNavdeep Parhar rc = sysctl_wire_old_buffer(req, 0);
455733b9277SNavdeep Parhar if (rc != 0)
456733b9277SNavdeep Parhar return (rc);
457733b9277SNavdeep Parhar
458733b9277SNavdeep Parhar sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
459733b9277SNavdeep Parhar if (sb == NULL)
460733b9277SNavdeep Parhar return (ENOMEM);
461733b9277SNavdeep Parhar
462733b9277SNavdeep Parhar e = &l2t->l2tab[0];
4630a0a697cSNavdeep Parhar for (i = 0; i < l2t->l2t_size; i++, e++) {
464733b9277SNavdeep Parhar mtx_lock(&e->lock);
465733b9277SNavdeep Parhar if (e->state == L2T_STATE_UNUSED)
466733b9277SNavdeep Parhar goto skip;
467733b9277SNavdeep Parhar
468733b9277SNavdeep Parhar if (header == 0) {
469733b9277SNavdeep Parhar sbuf_printf(sb, " Idx IP address "
470733b9277SNavdeep Parhar "Ethernet address VLAN/P LP State Users Port");
471733b9277SNavdeep Parhar header = 1;
472733b9277SNavdeep Parhar }
473bddf7343SJohn Baldwin if (e->state >= L2T_STATE_SWITCHING)
474733b9277SNavdeep Parhar ip[0] = 0;
4750a0a697cSNavdeep Parhar else {
4760a0a697cSNavdeep Parhar inet_ntop(e->ipv6 ? AF_INET6 : AF_INET, &e->addr[0],
4770a0a697cSNavdeep Parhar &ip[0], sizeof(ip));
4780a0a697cSNavdeep Parhar }
479733b9277SNavdeep Parhar
4800a0a697cSNavdeep Parhar /*
4810a0a697cSNavdeep Parhar * XXX: IPv6 addresses may not align properly in the output.
4820a0a697cSNavdeep Parhar */
483733b9277SNavdeep Parhar sbuf_printf(sb, "\n%4u %-15s %02x:%02x:%02x:%02x:%02x:%02x %4d"
484733b9277SNavdeep Parhar " %u %2u %c %5u %s",
485733b9277SNavdeep Parhar e->idx, ip, e->dmac[0], e->dmac[1], e->dmac[2],
486733b9277SNavdeep Parhar e->dmac[3], e->dmac[4], e->dmac[5],
487733b9277SNavdeep Parhar e->vlan & 0xfff, vlan_prio(e), e->lport,
488733b9277SNavdeep Parhar l2e_state(e), atomic_load_acq_int(&e->refcnt),
489954712e8SJustin Hibbits e->ifp ? if_name(e->ifp) : "-");
490733b9277SNavdeep Parhar skip:
491733b9277SNavdeep Parhar mtx_unlock(&e->lock);
492733b9277SNavdeep Parhar }
493733b9277SNavdeep Parhar
494733b9277SNavdeep Parhar rc = sbuf_finish(sb);
495733b9277SNavdeep Parhar sbuf_delete(sb);
496733b9277SNavdeep Parhar
497733b9277SNavdeep Parhar return (rc);
498733b9277SNavdeep Parhar }
499