1*2dae2a74SNavdeep Parhar /*- 2*2dae2a74SNavdeep Parhar * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3*2dae2a74SNavdeep Parhar * 4*2dae2a74SNavdeep Parhar * Copyright (c) 2018 Chelsio Communications, Inc. 5*2dae2a74SNavdeep Parhar * All rights reserved. 6*2dae2a74SNavdeep Parhar * 7*2dae2a74SNavdeep Parhar * Redistribution and use in source and binary forms, with or without 8*2dae2a74SNavdeep Parhar * modification, are permitted provided that the following conditions 9*2dae2a74SNavdeep Parhar * are met: 10*2dae2a74SNavdeep Parhar * 1. Redistributions of source code must retain the above copyright 11*2dae2a74SNavdeep Parhar * notice, this list of conditions and the following disclaimer. 12*2dae2a74SNavdeep Parhar * 2. Redistributions in binary form must reproduce the above copyright 13*2dae2a74SNavdeep Parhar * notice, this list of conditions and the following disclaimer in the 14*2dae2a74SNavdeep Parhar * documentation and/or other materials provided with the distribution. 15*2dae2a74SNavdeep Parhar * 16*2dae2a74SNavdeep Parhar * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17*2dae2a74SNavdeep Parhar * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18*2dae2a74SNavdeep Parhar * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19*2dae2a74SNavdeep Parhar * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20*2dae2a74SNavdeep Parhar * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21*2dae2a74SNavdeep Parhar * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22*2dae2a74SNavdeep Parhar * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23*2dae2a74SNavdeep Parhar * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24*2dae2a74SNavdeep Parhar * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25*2dae2a74SNavdeep Parhar * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26*2dae2a74SNavdeep Parhar * SUCH DAMAGE. 27*2dae2a74SNavdeep Parhar */ 28*2dae2a74SNavdeep Parhar #include <sys/cdefs.h> 29*2dae2a74SNavdeep Parhar __FBSDID("$FreeBSD$"); 30*2dae2a74SNavdeep Parhar 31*2dae2a74SNavdeep Parhar #include "opt_inet.h" 32*2dae2a74SNavdeep Parhar #include "opt_inet6.h" 33*2dae2a74SNavdeep Parhar 34*2dae2a74SNavdeep Parhar #include <sys/param.h> 35*2dae2a74SNavdeep Parhar #include <sys/eventhandler.h> 36*2dae2a74SNavdeep Parhar #include <sys/systm.h> 37*2dae2a74SNavdeep Parhar #include <sys/kernel.h> 38*2dae2a74SNavdeep Parhar #include <sys/module.h> 39*2dae2a74SNavdeep Parhar #include <sys/bus.h> 40*2dae2a74SNavdeep Parhar #include <sys/lock.h> 41*2dae2a74SNavdeep Parhar #include <sys/mutex.h> 42*2dae2a74SNavdeep Parhar #include <sys/rwlock.h> 43*2dae2a74SNavdeep Parhar #include <sys/socket.h> 44*2dae2a74SNavdeep Parhar #include <sys/sbuf.h> 45*2dae2a74SNavdeep Parhar #include <netinet/in.h> 46*2dae2a74SNavdeep Parhar 47*2dae2a74SNavdeep Parhar #include "common/common.h" 48*2dae2a74SNavdeep Parhar #include "common/t4_msg.h" 49*2dae2a74SNavdeep Parhar #include "t4_smt.h" 50*2dae2a74SNavdeep Parhar 51*2dae2a74SNavdeep Parhar /* 52*2dae2a74SNavdeep Parhar * Module locking notes: There is a RW lock protecting the SMAC table as a 53*2dae2a74SNavdeep Parhar * whole plus a spinlock per SMT entry. Entry lookups and allocations happen 54*2dae2a74SNavdeep Parhar * under the protection of the table lock, individual entry changes happen 55*2dae2a74SNavdeep Parhar * while holding that entry's spinlock. The table lock nests outside the 56*2dae2a74SNavdeep Parhar * entry locks. Allocations of new entries take the table lock as writers so 57*2dae2a74SNavdeep Parhar * no other lookups can happen while allocating new entries. Entry updates 58*2dae2a74SNavdeep Parhar * take the table lock as readers so multiple entries can be updated in 59*2dae2a74SNavdeep Parhar * parallel. An SMT entry can be dropped by decrementing its reference count 60*2dae2a74SNavdeep Parhar * and therefore can happen in parallel with entry allocation but no entry 61*2dae2a74SNavdeep Parhar * can change state or increment its ref count during allocation as both of 62*2dae2a74SNavdeep Parhar * these perform lookups. 63*2dae2a74SNavdeep Parhar * 64*2dae2a74SNavdeep Parhar * Note: We do not take references to ifnets in this module because both 65*2dae2a74SNavdeep Parhar * the TOE and the sockets already hold references to the interfaces and the 66*2dae2a74SNavdeep Parhar * lifetime of an SMT entry is fully contained in the lifetime of the TOE. 67*2dae2a74SNavdeep Parhar */ 68*2dae2a74SNavdeep Parhar 69*2dae2a74SNavdeep Parhar /* 70*2dae2a74SNavdeep Parhar * Allocate a free SMT entry. Must be called with smt_data.lock held. 71*2dae2a74SNavdeep Parhar */ 72*2dae2a74SNavdeep Parhar struct smt_entry * 73*2dae2a74SNavdeep Parhar t4_find_or_alloc_sme(struct smt_data *s, uint8_t *smac) 74*2dae2a74SNavdeep Parhar { 75*2dae2a74SNavdeep Parhar struct smt_entry *end, *e; 76*2dae2a74SNavdeep Parhar struct smt_entry *first_free = NULL; 77*2dae2a74SNavdeep Parhar 78*2dae2a74SNavdeep Parhar rw_assert(&s->lock, RA_WLOCKED); 79*2dae2a74SNavdeep Parhar for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) { 80*2dae2a74SNavdeep Parhar if (atomic_load_acq_int(&e->refcnt) == 0) { 81*2dae2a74SNavdeep Parhar if (!first_free) 82*2dae2a74SNavdeep Parhar first_free = e; 83*2dae2a74SNavdeep Parhar } else { 84*2dae2a74SNavdeep Parhar if (e->state == SMT_STATE_SWITCHING) { 85*2dae2a74SNavdeep Parhar /* 86*2dae2a74SNavdeep Parhar * This entry is actually in use. See if we can 87*2dae2a74SNavdeep Parhar * re-use it? 88*2dae2a74SNavdeep Parhar */ 89*2dae2a74SNavdeep Parhar if (memcmp(e->smac, smac, ETHER_ADDR_LEN) == 0) 90*2dae2a74SNavdeep Parhar goto found_reuse; 91*2dae2a74SNavdeep Parhar } 92*2dae2a74SNavdeep Parhar } 93*2dae2a74SNavdeep Parhar } 94*2dae2a74SNavdeep Parhar if (first_free) { 95*2dae2a74SNavdeep Parhar e = first_free; 96*2dae2a74SNavdeep Parhar goto found; 97*2dae2a74SNavdeep Parhar } 98*2dae2a74SNavdeep Parhar return NULL; 99*2dae2a74SNavdeep Parhar 100*2dae2a74SNavdeep Parhar found: 101*2dae2a74SNavdeep Parhar e->state = SMT_STATE_UNUSED; 102*2dae2a74SNavdeep Parhar found_reuse: 103*2dae2a74SNavdeep Parhar atomic_add_int(&e->refcnt, 1); 104*2dae2a74SNavdeep Parhar return e; 105*2dae2a74SNavdeep Parhar } 106*2dae2a74SNavdeep Parhar 107*2dae2a74SNavdeep Parhar /* 108*2dae2a74SNavdeep Parhar * Write an SMT entry. Must be called with the entry locked. 109*2dae2a74SNavdeep Parhar */ 110*2dae2a74SNavdeep Parhar int 111*2dae2a74SNavdeep Parhar t4_write_sme(struct smt_entry *e) 112*2dae2a74SNavdeep Parhar { 113*2dae2a74SNavdeep Parhar struct smt_data *s; 114*2dae2a74SNavdeep Parhar struct sge_wrq *wrq; 115*2dae2a74SNavdeep Parhar struct adapter *sc; 116*2dae2a74SNavdeep Parhar struct wrq_cookie cookie; 117*2dae2a74SNavdeep Parhar struct cpl_smt_write_req *req; 118*2dae2a74SNavdeep Parhar struct cpl_t6_smt_write_req *t6req; 119*2dae2a74SNavdeep Parhar u8 row; 120*2dae2a74SNavdeep Parhar 121*2dae2a74SNavdeep Parhar mtx_assert(&e->lock, MA_OWNED); 122*2dae2a74SNavdeep Parhar 123*2dae2a74SNavdeep Parhar MPASS(e->wrq != NULL); 124*2dae2a74SNavdeep Parhar wrq = e->wrq; 125*2dae2a74SNavdeep Parhar sc = wrq->adapter; 126*2dae2a74SNavdeep Parhar MPASS(wrq->adapter != NULL); 127*2dae2a74SNavdeep Parhar s = sc->smt; 128*2dae2a74SNavdeep Parhar 129*2dae2a74SNavdeep Parhar 130*2dae2a74SNavdeep Parhar if (chip_id(sc) <= CHELSIO_T5) { 131*2dae2a74SNavdeep Parhar /* Source MAC Table (SMT) contains 256 SMAC entries 132*2dae2a74SNavdeep Parhar * organized in 128 rows of 2 entries each. 133*2dae2a74SNavdeep Parhar */ 134*2dae2a74SNavdeep Parhar req = start_wrq_wr(wrq, howmany(sizeof(*req), 16), &cookie); 135*2dae2a74SNavdeep Parhar if (req == NULL) 136*2dae2a74SNavdeep Parhar return (ENOMEM); 137*2dae2a74SNavdeep Parhar INIT_TP_WR(req, 0); 138*2dae2a74SNavdeep Parhar /* Each row contains an SMAC pair. 139*2dae2a74SNavdeep Parhar * LSB selects the SMAC entry within a row 140*2dae2a74SNavdeep Parhar */ 141*2dae2a74SNavdeep Parhar row = (e->idx >> 1); 142*2dae2a74SNavdeep Parhar if (e->idx & 1) { 143*2dae2a74SNavdeep Parhar req->pfvf1 = 0x0; 144*2dae2a74SNavdeep Parhar memcpy(req->src_mac1, e->smac, ETHER_ADDR_LEN); 145*2dae2a74SNavdeep Parhar /* fill pfvf0/src_mac0 with entry 146*2dae2a74SNavdeep Parhar * at prev index from smt-tab. 147*2dae2a74SNavdeep Parhar */ 148*2dae2a74SNavdeep Parhar req->pfvf0 = 0x0; 149*2dae2a74SNavdeep Parhar memcpy(req->src_mac0, s->smtab[e->idx - 1].smac, 150*2dae2a74SNavdeep Parhar ETHER_ADDR_LEN); 151*2dae2a74SNavdeep Parhar } else { 152*2dae2a74SNavdeep Parhar req->pfvf0 = 0x0; 153*2dae2a74SNavdeep Parhar memcpy(req->src_mac0, e->smac, ETHER_ADDR_LEN); 154*2dae2a74SNavdeep Parhar /* fill pfvf1/src_mac1 with entry 155*2dae2a74SNavdeep Parhar * at next index from smt-tab 156*2dae2a74SNavdeep Parhar */ 157*2dae2a74SNavdeep Parhar req->pfvf1 = 0x0; 158*2dae2a74SNavdeep Parhar memcpy(req->src_mac1, s->smtab[e->idx + 1].smac, 159*2dae2a74SNavdeep Parhar ETHER_ADDR_LEN); 160*2dae2a74SNavdeep Parhar } 161*2dae2a74SNavdeep Parhar } else { 162*2dae2a74SNavdeep Parhar /* Source MAC Table (SMT) contains 256 SMAC entries */ 163*2dae2a74SNavdeep Parhar t6req = start_wrq_wr(wrq, howmany(sizeof(*t6req), 16), &cookie); 164*2dae2a74SNavdeep Parhar if (t6req == NULL) 165*2dae2a74SNavdeep Parhar return (ENOMEM); 166*2dae2a74SNavdeep Parhar INIT_TP_WR(t6req, 0); 167*2dae2a74SNavdeep Parhar req = (struct cpl_smt_write_req *)t6req; 168*2dae2a74SNavdeep Parhar 169*2dae2a74SNavdeep Parhar /* fill pfvf0/src_mac0 from smt-tab */ 170*2dae2a74SNavdeep Parhar req->pfvf0 = 0x0; 171*2dae2a74SNavdeep Parhar memcpy(req->src_mac0, s->smtab[e->idx].smac, ETHER_ADDR_LEN); 172*2dae2a74SNavdeep Parhar row = e->idx; 173*2dae2a74SNavdeep Parhar } 174*2dae2a74SNavdeep Parhar OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, e->idx | 175*2dae2a74SNavdeep Parhar V_TID_QID(e->iqid))); 176*2dae2a74SNavdeep Parhar req->params = htonl(V_SMTW_NORPL(0) | 177*2dae2a74SNavdeep Parhar V_SMTW_IDX(row) | 178*2dae2a74SNavdeep Parhar V_SMTW_OVLAN_IDX(0)); 179*2dae2a74SNavdeep Parhar 180*2dae2a74SNavdeep Parhar commit_wrq_wr(wrq, req, &cookie); 181*2dae2a74SNavdeep Parhar 182*2dae2a74SNavdeep Parhar return (0); 183*2dae2a74SNavdeep Parhar } 184*2dae2a74SNavdeep Parhar 185*2dae2a74SNavdeep Parhar /* 186*2dae2a74SNavdeep Parhar * Allocate an SMT entry for use by a switching rule. 187*2dae2a74SNavdeep Parhar */ 188*2dae2a74SNavdeep Parhar struct smt_entry * 189*2dae2a74SNavdeep Parhar t4_smt_alloc_switching(struct smt_data *s, uint8_t *smac) 190*2dae2a74SNavdeep Parhar { 191*2dae2a74SNavdeep Parhar struct smt_entry *e; 192*2dae2a74SNavdeep Parhar 193*2dae2a74SNavdeep Parhar MPASS(s != NULL); 194*2dae2a74SNavdeep Parhar rw_wlock(&s->lock); 195*2dae2a74SNavdeep Parhar e = t4_find_or_alloc_sme(s, smac); 196*2dae2a74SNavdeep Parhar rw_wunlock(&s->lock); 197*2dae2a74SNavdeep Parhar return e; 198*2dae2a74SNavdeep Parhar } 199*2dae2a74SNavdeep Parhar 200*2dae2a74SNavdeep Parhar /* 201*2dae2a74SNavdeep Parhar * Sets/updates the contents of a switching SMT entry that has been allocated 202*2dae2a74SNavdeep Parhar * with an earlier call to @t4_smt_alloc_switching. 203*2dae2a74SNavdeep Parhar */ 204*2dae2a74SNavdeep Parhar int 205*2dae2a74SNavdeep Parhar t4_smt_set_switching(struct adapter *sc, struct smt_entry *e, uint16_t pfvf, 206*2dae2a74SNavdeep Parhar uint8_t *smac) 207*2dae2a74SNavdeep Parhar { 208*2dae2a74SNavdeep Parhar int rc = 0; 209*2dae2a74SNavdeep Parhar 210*2dae2a74SNavdeep Parhar if (atomic_load_acq_int(&e->refcnt) == 1) { 211*2dae2a74SNavdeep Parhar /* Setup the entry for the first time */ 212*2dae2a74SNavdeep Parhar mtx_lock(&e->lock); 213*2dae2a74SNavdeep Parhar e->wrq = &sc->sge.mgmtq; 214*2dae2a74SNavdeep Parhar e->iqid = sc->sge.fwq.abs_id; 215*2dae2a74SNavdeep Parhar e->pfvf = pfvf; 216*2dae2a74SNavdeep Parhar e->state = SMT_STATE_SWITCHING; 217*2dae2a74SNavdeep Parhar memcpy(e->smac, smac, ETHER_ADDR_LEN); 218*2dae2a74SNavdeep Parhar rc = t4_write_sme(e); 219*2dae2a74SNavdeep Parhar mtx_unlock(&e->lock); 220*2dae2a74SNavdeep Parhar } 221*2dae2a74SNavdeep Parhar 222*2dae2a74SNavdeep Parhar return (rc); 223*2dae2a74SNavdeep Parhar } 224*2dae2a74SNavdeep Parhar 225*2dae2a74SNavdeep Parhar int 226*2dae2a74SNavdeep Parhar t4_init_smt(struct adapter *sc, int flags) 227*2dae2a74SNavdeep Parhar { 228*2dae2a74SNavdeep Parhar int i, smt_size; 229*2dae2a74SNavdeep Parhar struct smt_data *s; 230*2dae2a74SNavdeep Parhar 231*2dae2a74SNavdeep Parhar smt_size = SMT_SIZE; 232*2dae2a74SNavdeep Parhar s = malloc(sizeof(*s) + smt_size * sizeof (struct smt_entry), M_CXGBE, 233*2dae2a74SNavdeep Parhar M_ZERO | flags); 234*2dae2a74SNavdeep Parhar if (!s) 235*2dae2a74SNavdeep Parhar return (ENOMEM); 236*2dae2a74SNavdeep Parhar 237*2dae2a74SNavdeep Parhar s->smt_size = smt_size; 238*2dae2a74SNavdeep Parhar rw_init(&s->lock, "SMT"); 239*2dae2a74SNavdeep Parhar 240*2dae2a74SNavdeep Parhar for (i = 0; i < smt_size; i++) { 241*2dae2a74SNavdeep Parhar struct smt_entry *e = &s->smtab[i]; 242*2dae2a74SNavdeep Parhar 243*2dae2a74SNavdeep Parhar e->idx = i; 244*2dae2a74SNavdeep Parhar e->state = SMT_STATE_UNUSED; 245*2dae2a74SNavdeep Parhar mtx_init(&e->lock, "SMT_E", NULL, MTX_DEF); 246*2dae2a74SNavdeep Parhar atomic_store_rel_int(&e->refcnt, 0); 247*2dae2a74SNavdeep Parhar } 248*2dae2a74SNavdeep Parhar 249*2dae2a74SNavdeep Parhar sc->smt = s; 250*2dae2a74SNavdeep Parhar 251*2dae2a74SNavdeep Parhar return (0); 252*2dae2a74SNavdeep Parhar } 253*2dae2a74SNavdeep Parhar 254*2dae2a74SNavdeep Parhar int 255*2dae2a74SNavdeep Parhar t4_free_smt(struct smt_data *s) 256*2dae2a74SNavdeep Parhar { 257*2dae2a74SNavdeep Parhar int i; 258*2dae2a74SNavdeep Parhar 259*2dae2a74SNavdeep Parhar for (i = 0; i < s->smt_size; i++) 260*2dae2a74SNavdeep Parhar mtx_destroy(&s->smtab[i].lock); 261*2dae2a74SNavdeep Parhar rw_destroy(&s->lock); 262*2dae2a74SNavdeep Parhar free(s, M_CXGBE); 263*2dae2a74SNavdeep Parhar 264*2dae2a74SNavdeep Parhar return (0); 265*2dae2a74SNavdeep Parhar } 266*2dae2a74SNavdeep Parhar 267*2dae2a74SNavdeep Parhar int 268*2dae2a74SNavdeep Parhar do_smt_write_rpl(struct sge_iq *iq, const struct rss_header *rss, 269*2dae2a74SNavdeep Parhar struct mbuf *m) 270*2dae2a74SNavdeep Parhar { 271*2dae2a74SNavdeep Parhar struct adapter *sc = iq->adapter; 272*2dae2a74SNavdeep Parhar const struct cpl_smt_write_rpl *rpl = (const void *)(rss + 1); 273*2dae2a74SNavdeep Parhar unsigned int tid = GET_TID(rpl); 274*2dae2a74SNavdeep Parhar unsigned int smtidx = G_TID_TID(tid); 275*2dae2a74SNavdeep Parhar 276*2dae2a74SNavdeep Parhar if (__predict_false(rpl->status != CPL_ERR_NONE)) { 277*2dae2a74SNavdeep Parhar struct smt_entry *e = &sc->smt->smtab[smtidx]; 278*2dae2a74SNavdeep Parhar log(LOG_ERR, 279*2dae2a74SNavdeep Parhar "Unexpected SMT_WRITE_RPL (%u) for entry at hw_idx %u\n", 280*2dae2a74SNavdeep Parhar rpl->status, smtidx); 281*2dae2a74SNavdeep Parhar mtx_lock(&e->lock); 282*2dae2a74SNavdeep Parhar e->state = SMT_STATE_ERROR; 283*2dae2a74SNavdeep Parhar mtx_unlock(&e->lock); 284*2dae2a74SNavdeep Parhar return (EINVAL); 285*2dae2a74SNavdeep Parhar } 286*2dae2a74SNavdeep Parhar 287*2dae2a74SNavdeep Parhar return (0); 288*2dae2a74SNavdeep Parhar } 289*2dae2a74SNavdeep Parhar 290*2dae2a74SNavdeep Parhar #ifdef SBUF_DRAIN 291*2dae2a74SNavdeep Parhar static char 292*2dae2a74SNavdeep Parhar smt_state(const struct smt_entry *e) 293*2dae2a74SNavdeep Parhar { 294*2dae2a74SNavdeep Parhar switch (e->state) { 295*2dae2a74SNavdeep Parhar case SMT_STATE_SWITCHING: return 'X'; 296*2dae2a74SNavdeep Parhar case SMT_STATE_ERROR: return 'E'; 297*2dae2a74SNavdeep Parhar default: return 'U'; 298*2dae2a74SNavdeep Parhar } 299*2dae2a74SNavdeep Parhar } 300*2dae2a74SNavdeep Parhar 301*2dae2a74SNavdeep Parhar int 302*2dae2a74SNavdeep Parhar sysctl_smt(SYSCTL_HANDLER_ARGS) 303*2dae2a74SNavdeep Parhar { 304*2dae2a74SNavdeep Parhar struct adapter *sc = arg1; 305*2dae2a74SNavdeep Parhar struct smt_data *smt = sc->smt; 306*2dae2a74SNavdeep Parhar struct smt_entry *e; 307*2dae2a74SNavdeep Parhar struct sbuf *sb; 308*2dae2a74SNavdeep Parhar int rc, i, header = 0; 309*2dae2a74SNavdeep Parhar 310*2dae2a74SNavdeep Parhar if (smt == NULL) 311*2dae2a74SNavdeep Parhar return (ENXIO); 312*2dae2a74SNavdeep Parhar 313*2dae2a74SNavdeep Parhar rc = sysctl_wire_old_buffer(req, 0); 314*2dae2a74SNavdeep Parhar if (rc != 0) 315*2dae2a74SNavdeep Parhar return (rc); 316*2dae2a74SNavdeep Parhar 317*2dae2a74SNavdeep Parhar sb = sbuf_new_for_sysctl(NULL, NULL, SMT_SIZE, req); 318*2dae2a74SNavdeep Parhar if (sb == NULL) 319*2dae2a74SNavdeep Parhar return (ENOMEM); 320*2dae2a74SNavdeep Parhar 321*2dae2a74SNavdeep Parhar e = &smt->smtab[0]; 322*2dae2a74SNavdeep Parhar for (i = 0; i < smt->smt_size; i++, e++) { 323*2dae2a74SNavdeep Parhar mtx_lock(&e->lock); 324*2dae2a74SNavdeep Parhar if (e->state == SMT_STATE_UNUSED) 325*2dae2a74SNavdeep Parhar goto skip; 326*2dae2a74SNavdeep Parhar 327*2dae2a74SNavdeep Parhar if (header == 0) { 328*2dae2a74SNavdeep Parhar sbuf_printf(sb, " Idx " 329*2dae2a74SNavdeep Parhar "Ethernet address State Users"); 330*2dae2a74SNavdeep Parhar header = 1; 331*2dae2a74SNavdeep Parhar } 332*2dae2a74SNavdeep Parhar sbuf_printf(sb, "\n%4u %02x:%02x:%02x:%02x:%02x:%02x " 333*2dae2a74SNavdeep Parhar "%c %5u", 334*2dae2a74SNavdeep Parhar e->idx, e->smac[0], e->smac[1], e->smac[2], 335*2dae2a74SNavdeep Parhar e->smac[3], e->smac[4], e->smac[5], 336*2dae2a74SNavdeep Parhar smt_state(e), atomic_load_acq_int(&e->refcnt)); 337*2dae2a74SNavdeep Parhar skip: 338*2dae2a74SNavdeep Parhar mtx_unlock(&e->lock); 339*2dae2a74SNavdeep Parhar } 340*2dae2a74SNavdeep Parhar 341*2dae2a74SNavdeep Parhar rc = sbuf_finish(sb); 342*2dae2a74SNavdeep Parhar sbuf_delete(sb); 343*2dae2a74SNavdeep Parhar 344*2dae2a74SNavdeep Parhar return (rc); 345*2dae2a74SNavdeep Parhar } 346*2dae2a74SNavdeep Parhar #endif 347