xref: /freebsd/sys/dev/cxgbe/tom/t4_tom_l2t.c (revision 0a0a697c73b3dca1e0e52884e4da993f351c0f9f)
109fe6320SNavdeep Parhar /*-
209fe6320SNavdeep Parhar  * Copyright (c) 2012 Chelsio Communications, Inc.
309fe6320SNavdeep Parhar  * All rights reserved.
409fe6320SNavdeep Parhar  *
509fe6320SNavdeep Parhar  * Redistribution and use in source and binary forms, with or without
609fe6320SNavdeep Parhar  * modification, are permitted provided that the following conditions
709fe6320SNavdeep Parhar  * are met:
809fe6320SNavdeep Parhar  * 1. Redistributions of source code must retain the above copyright
909fe6320SNavdeep Parhar  *    notice, this list of conditions and the following disclaimer.
1009fe6320SNavdeep Parhar  * 2. Redistributions in binary form must reproduce the above copyright
1109fe6320SNavdeep Parhar  *    notice, this list of conditions and the following disclaimer in the
1209fe6320SNavdeep Parhar  *    documentation and/or other materials provided with the distribution.
1309fe6320SNavdeep Parhar  *
1409fe6320SNavdeep Parhar  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1509fe6320SNavdeep Parhar  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1609fe6320SNavdeep Parhar  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1709fe6320SNavdeep Parhar  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1809fe6320SNavdeep Parhar  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1909fe6320SNavdeep Parhar  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2009fe6320SNavdeep Parhar  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2109fe6320SNavdeep Parhar  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2209fe6320SNavdeep Parhar  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2309fe6320SNavdeep Parhar  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2409fe6320SNavdeep Parhar  * SUCH DAMAGE.
2509fe6320SNavdeep Parhar  */
2609fe6320SNavdeep Parhar #include <sys/cdefs.h>
2709fe6320SNavdeep Parhar __FBSDID("$FreeBSD$");
2809fe6320SNavdeep Parhar 
2909fe6320SNavdeep Parhar #include "opt_inet.h"
30*0a0a697cSNavdeep Parhar #include "opt_inet6.h"
3109fe6320SNavdeep Parhar 
3209fe6320SNavdeep Parhar #ifdef TCP_OFFLOAD
3309fe6320SNavdeep Parhar #include <sys/param.h>
3409fe6320SNavdeep Parhar #include <sys/systm.h>
3509fe6320SNavdeep Parhar #include <sys/kernel.h>
3609fe6320SNavdeep Parhar #include <sys/module.h>
3709fe6320SNavdeep Parhar #include <sys/bus.h>
38*0a0a697cSNavdeep Parhar #include <sys/fnv_hash.h>
3909fe6320SNavdeep Parhar #include <sys/lock.h>
4009fe6320SNavdeep Parhar #include <sys/mutex.h>
4109fe6320SNavdeep Parhar #include <sys/rwlock.h>
4209fe6320SNavdeep Parhar #include <sys/socket.h>
4309fe6320SNavdeep Parhar #include <sys/sbuf.h>
4409fe6320SNavdeep Parhar #include <net/if.h>
4509fe6320SNavdeep Parhar #include <net/if_types.h>
4609fe6320SNavdeep Parhar #include <net/ethernet.h>
4709fe6320SNavdeep Parhar #include <net/if_vlan_var.h>
4809fe6320SNavdeep Parhar #include <net/route.h>
4909fe6320SNavdeep Parhar #include <netinet/in.h>
5009fe6320SNavdeep Parhar #include <netinet/toecore.h>
5109fe6320SNavdeep Parhar 
5209fe6320SNavdeep Parhar #include "common/common.h"
5309fe6320SNavdeep Parhar #include "common/t4_msg.h"
5409fe6320SNavdeep Parhar #include "tom/t4_tom_l2t.h"
5509fe6320SNavdeep Parhar #include "tom/t4_tom.h"
5609fe6320SNavdeep Parhar 
5709fe6320SNavdeep Parhar #define VLAN_NONE	0xfff
5809fe6320SNavdeep Parhar 
5909fe6320SNavdeep Parhar static inline void
6009fe6320SNavdeep Parhar l2t_hold(struct l2t_data *d, struct l2t_entry *e)
6109fe6320SNavdeep Parhar {
62*0a0a697cSNavdeep Parhar 
6309fe6320SNavdeep Parhar 	if (atomic_fetchadd_int(&e->refcnt, 1) == 0)  /* 0 -> 1 transition */
6409fe6320SNavdeep Parhar 		atomic_subtract_int(&d->nfree, 1);
6509fe6320SNavdeep Parhar }
6609fe6320SNavdeep Parhar 
67*0a0a697cSNavdeep Parhar static inline u_int
68*0a0a697cSNavdeep Parhar l2_hash(struct l2t_data *d, const struct sockaddr *sa, int ifindex)
6909fe6320SNavdeep Parhar {
70*0a0a697cSNavdeep Parhar 	u_int hash, half = d->l2t_size / 2, start = 0;
71*0a0a697cSNavdeep Parhar 	const void *key;
72*0a0a697cSNavdeep Parhar 	size_t len;
73*0a0a697cSNavdeep Parhar 
74*0a0a697cSNavdeep Parhar 	KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6,
75*0a0a697cSNavdeep Parhar 	    ("%s: sa %p has unexpected sa_family %d", __func__, sa,
76*0a0a697cSNavdeep Parhar 	    sa->sa_family));
77*0a0a697cSNavdeep Parhar 
78*0a0a697cSNavdeep Parhar 	if (sa->sa_family == AF_INET) {
79*0a0a697cSNavdeep Parhar 		const struct sockaddr_in *sin = (const void *)sa;
80*0a0a697cSNavdeep Parhar 
81*0a0a697cSNavdeep Parhar 		key = &sin->sin_addr;
82*0a0a697cSNavdeep Parhar 		len = sizeof(sin->sin_addr);
83*0a0a697cSNavdeep Parhar 	} else {
84*0a0a697cSNavdeep Parhar 		const struct sockaddr_in6 *sin6 = (const void *)sa;
85*0a0a697cSNavdeep Parhar 
86*0a0a697cSNavdeep Parhar 		key = &sin6->sin6_addr;
87*0a0a697cSNavdeep Parhar 		len = sizeof(sin6->sin6_addr);
88*0a0a697cSNavdeep Parhar 		start = half;
89*0a0a697cSNavdeep Parhar 	}
90*0a0a697cSNavdeep Parhar 
91*0a0a697cSNavdeep Parhar 	hash = fnv_32_buf(key, len, FNV1_32_INIT);
92*0a0a697cSNavdeep Parhar 	hash = fnv_32_buf(&ifindex, sizeof(ifindex), hash);
93*0a0a697cSNavdeep Parhar 	hash %= half;
94*0a0a697cSNavdeep Parhar 
95*0a0a697cSNavdeep Parhar 	return (hash + start);
96*0a0a697cSNavdeep Parhar }
97*0a0a697cSNavdeep Parhar 
98*0a0a697cSNavdeep Parhar static inline int
99*0a0a697cSNavdeep Parhar l2_cmp(const struct sockaddr *sa, struct l2t_entry *e)
100*0a0a697cSNavdeep Parhar {
101*0a0a697cSNavdeep Parhar 
102*0a0a697cSNavdeep Parhar 	KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6,
103*0a0a697cSNavdeep Parhar 	    ("%s: sa %p has unexpected sa_family %d", __func__, sa,
104*0a0a697cSNavdeep Parhar 	    sa->sa_family));
105*0a0a697cSNavdeep Parhar 
106*0a0a697cSNavdeep Parhar 	if (sa->sa_family == AF_INET) {
107*0a0a697cSNavdeep Parhar 		const struct sockaddr_in *sin = (const void *)sa;
108*0a0a697cSNavdeep Parhar 
109*0a0a697cSNavdeep Parhar 		return (e->addr[0] != sin->sin_addr.s_addr);
110*0a0a697cSNavdeep Parhar 	} else {
111*0a0a697cSNavdeep Parhar 		const struct sockaddr_in6 *sin6 = (const void *)sa;
112*0a0a697cSNavdeep Parhar 
113*0a0a697cSNavdeep Parhar 		return (memcmp(&e->addr[0], &sin6->sin6_addr, sizeof(e->addr)));
114*0a0a697cSNavdeep Parhar 	}
115*0a0a697cSNavdeep Parhar }
116*0a0a697cSNavdeep Parhar 
117*0a0a697cSNavdeep Parhar static inline void
118*0a0a697cSNavdeep Parhar l2_store(const struct sockaddr *sa, struct l2t_entry *e)
119*0a0a697cSNavdeep Parhar {
120*0a0a697cSNavdeep Parhar 
121*0a0a697cSNavdeep Parhar 	KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6,
122*0a0a697cSNavdeep Parhar 	    ("%s: sa %p has unexpected sa_family %d", __func__, sa,
123*0a0a697cSNavdeep Parhar 	    sa->sa_family));
124*0a0a697cSNavdeep Parhar 
125*0a0a697cSNavdeep Parhar 	if (sa->sa_family == AF_INET) {
126*0a0a697cSNavdeep Parhar 		const struct sockaddr_in *sin = (const void *)sa;
127*0a0a697cSNavdeep Parhar 
128*0a0a697cSNavdeep Parhar 		e->addr[0] = sin->sin_addr.s_addr;
129*0a0a697cSNavdeep Parhar 		e->ipv6 = 0;
130*0a0a697cSNavdeep Parhar 	} else {
131*0a0a697cSNavdeep Parhar 		const struct sockaddr_in6 *sin6 = (const void *)sa;
132*0a0a697cSNavdeep Parhar 
133*0a0a697cSNavdeep Parhar 		memcpy(&e->addr[0], &sin6->sin6_addr, sizeof(e->addr));
134*0a0a697cSNavdeep Parhar 		e->ipv6 = 1;
135*0a0a697cSNavdeep Parhar 	}
13609fe6320SNavdeep Parhar }
13709fe6320SNavdeep Parhar 
13809fe6320SNavdeep Parhar /*
13909fe6320SNavdeep Parhar  * Add a WR to an L2T entry's queue of work requests awaiting resolution.
14009fe6320SNavdeep Parhar  * Must be called with the entry's lock held.
14109fe6320SNavdeep Parhar  */
14209fe6320SNavdeep Parhar static inline void
14309fe6320SNavdeep Parhar arpq_enqueue(struct l2t_entry *e, struct wrqe *wr)
14409fe6320SNavdeep Parhar {
14509fe6320SNavdeep Parhar 	mtx_assert(&e->lock, MA_OWNED);
14609fe6320SNavdeep Parhar 
14709fe6320SNavdeep Parhar 	STAILQ_INSERT_TAIL(&e->wr_list, wr, link);
14809fe6320SNavdeep Parhar }
14909fe6320SNavdeep Parhar 
15009fe6320SNavdeep Parhar static inline void
15109fe6320SNavdeep Parhar send_pending(struct adapter *sc, struct l2t_entry *e)
15209fe6320SNavdeep Parhar {
15309fe6320SNavdeep Parhar 	struct wrqe *wr;
15409fe6320SNavdeep Parhar 
15509fe6320SNavdeep Parhar 	mtx_assert(&e->lock, MA_OWNED);
15609fe6320SNavdeep Parhar 
15709fe6320SNavdeep Parhar 	while ((wr = STAILQ_FIRST(&e->wr_list)) != NULL) {
15809fe6320SNavdeep Parhar 		STAILQ_REMOVE_HEAD(&e->wr_list, link);
15909fe6320SNavdeep Parhar 		t4_wrq_tx(sc, wr);
16009fe6320SNavdeep Parhar 	}
16109fe6320SNavdeep Parhar }
16209fe6320SNavdeep Parhar 
16309fe6320SNavdeep Parhar static void
16409fe6320SNavdeep Parhar resolution_failed_for_wr(struct wrqe *wr)
16509fe6320SNavdeep Parhar {
166*0a0a697cSNavdeep Parhar 	log(LOG_ERR, "%s: leaked work request %p, wr_len %d\n", __func__, wr,
16709fe6320SNavdeep Parhar 	    wr->wr_len);
16809fe6320SNavdeep Parhar 
16909fe6320SNavdeep Parhar 	/* free(wr, M_CXGBE); */
17009fe6320SNavdeep Parhar }
17109fe6320SNavdeep Parhar 
17209fe6320SNavdeep Parhar static void
17309fe6320SNavdeep Parhar resolution_failed(struct l2t_entry *e)
17409fe6320SNavdeep Parhar {
17509fe6320SNavdeep Parhar 	struct wrqe *wr;
17609fe6320SNavdeep Parhar 
17709fe6320SNavdeep Parhar 	mtx_assert(&e->lock, MA_OWNED);
17809fe6320SNavdeep Parhar 
17909fe6320SNavdeep Parhar 	while ((wr = STAILQ_FIRST(&e->wr_list)) != NULL) {
18009fe6320SNavdeep Parhar 		STAILQ_REMOVE_HEAD(&e->wr_list, link);
18109fe6320SNavdeep Parhar 		resolution_failed_for_wr(wr);
18209fe6320SNavdeep Parhar 	}
18309fe6320SNavdeep Parhar }
18409fe6320SNavdeep Parhar 
18509fe6320SNavdeep Parhar static void
18609fe6320SNavdeep Parhar update_entry(struct adapter *sc, struct l2t_entry *e, uint8_t *lladdr,
18709fe6320SNavdeep Parhar     uint16_t vtag)
18809fe6320SNavdeep Parhar {
18909fe6320SNavdeep Parhar 
19009fe6320SNavdeep Parhar 	mtx_assert(&e->lock, MA_OWNED);
19109fe6320SNavdeep Parhar 
19209fe6320SNavdeep Parhar 	/*
19309fe6320SNavdeep Parhar 	 * The entry may be in active use (e->refcount > 0) or not.  We update
19409fe6320SNavdeep Parhar 	 * it even when it's not as this simplifies the case where we decide to
19509fe6320SNavdeep Parhar 	 * reuse the entry later.
19609fe6320SNavdeep Parhar 	 */
19709fe6320SNavdeep Parhar 
19809fe6320SNavdeep Parhar 	if (lladdr == NULL &&
19909fe6320SNavdeep Parhar 	    (e->state == L2T_STATE_RESOLVING || e->state == L2T_STATE_FAILED)) {
20009fe6320SNavdeep Parhar 		/*
20109fe6320SNavdeep Parhar 		 * Never got a valid L2 address for this one.  Just mark it as
20209fe6320SNavdeep Parhar 		 * failed instead of removing it from the hash (for which we'd
20309fe6320SNavdeep Parhar 		 * need to wlock the table).
20409fe6320SNavdeep Parhar 		 */
20509fe6320SNavdeep Parhar 		e->state = L2T_STATE_FAILED;
20609fe6320SNavdeep Parhar 		resolution_failed(e);
20709fe6320SNavdeep Parhar 		return;
20809fe6320SNavdeep Parhar 
20909fe6320SNavdeep Parhar 	} else if (lladdr == NULL) {
21009fe6320SNavdeep Parhar 
21109fe6320SNavdeep Parhar 		/* Valid or already-stale entry was deleted (or expired) */
21209fe6320SNavdeep Parhar 
21309fe6320SNavdeep Parhar 		KASSERT(e->state == L2T_STATE_VALID ||
21409fe6320SNavdeep Parhar 		    e->state == L2T_STATE_STALE,
21509fe6320SNavdeep Parhar 		    ("%s: lladdr NULL, state %d", __func__, e->state));
21609fe6320SNavdeep Parhar 
21709fe6320SNavdeep Parhar 		e->state = L2T_STATE_STALE;
21809fe6320SNavdeep Parhar 
21909fe6320SNavdeep Parhar 	} else {
22009fe6320SNavdeep Parhar 
22109fe6320SNavdeep Parhar 		if (e->state == L2T_STATE_RESOLVING ||
22209fe6320SNavdeep Parhar 		    e->state == L2T_STATE_FAILED ||
22309fe6320SNavdeep Parhar 		    memcmp(e->dmac, lladdr, ETHER_ADDR_LEN)) {
22409fe6320SNavdeep Parhar 
22509fe6320SNavdeep Parhar 			/* unresolved -> resolved; or dmac changed */
22609fe6320SNavdeep Parhar 
22709fe6320SNavdeep Parhar 			memcpy(e->dmac, lladdr, ETHER_ADDR_LEN);
22809fe6320SNavdeep Parhar 			e->vlan = vtag;
22909fe6320SNavdeep Parhar 			t4_write_l2e(sc, e, 1);
23009fe6320SNavdeep Parhar 		}
23109fe6320SNavdeep Parhar 		e->state = L2T_STATE_VALID;
23209fe6320SNavdeep Parhar 	}
23309fe6320SNavdeep Parhar }
23409fe6320SNavdeep Parhar 
23509fe6320SNavdeep Parhar static int
23609fe6320SNavdeep Parhar resolve_entry(struct adapter *sc, struct l2t_entry *e)
23709fe6320SNavdeep Parhar {
23809fe6320SNavdeep Parhar 	struct tom_data *td = sc->tom_softc;
23909fe6320SNavdeep Parhar 	struct toedev *tod = &td->tod;
24009fe6320SNavdeep Parhar 	struct sockaddr_in sin = {0};
241*0a0a697cSNavdeep Parhar 	struct sockaddr_in6 sin6 = {0};
242*0a0a697cSNavdeep Parhar 	struct sockaddr *sa;
24309fe6320SNavdeep Parhar 	uint8_t dmac[ETHER_ADDR_LEN];
24409fe6320SNavdeep Parhar 	uint16_t vtag = VLAN_NONE;
24509fe6320SNavdeep Parhar 	int rc;
24609fe6320SNavdeep Parhar 
247*0a0a697cSNavdeep Parhar 	if (e->ipv6 == 0) {
24809fe6320SNavdeep Parhar 		sin.sin_family = AF_INET;
24909fe6320SNavdeep Parhar 		sin.sin_len = sizeof(struct sockaddr_in);
250*0a0a697cSNavdeep Parhar 		sin.sin_addr.s_addr = e->addr[0];
251*0a0a697cSNavdeep Parhar 		sa = (void *)&sin;
252*0a0a697cSNavdeep Parhar 	} else {
253*0a0a697cSNavdeep Parhar 		sin6.sin6_family = AF_INET6;
254*0a0a697cSNavdeep Parhar 		sin6.sin6_len = sizeof(struct sockaddr_in6);
255*0a0a697cSNavdeep Parhar 		memcpy(&sin6.sin6_addr, &e->addr[0], sizeof(e->addr));
256*0a0a697cSNavdeep Parhar 		sa = (void *)&sin6;
257*0a0a697cSNavdeep Parhar 	}
25809fe6320SNavdeep Parhar 
259*0a0a697cSNavdeep Parhar 	rc = toe_l2_resolve(tod, e->ifp, sa, dmac, &vtag);
26009fe6320SNavdeep Parhar 	if (rc == EWOULDBLOCK)
26109fe6320SNavdeep Parhar 		return (rc);
26209fe6320SNavdeep Parhar 
26309fe6320SNavdeep Parhar 	mtx_lock(&e->lock);
26409fe6320SNavdeep Parhar 	update_entry(sc, e, rc == 0 ? dmac : NULL, vtag);
26509fe6320SNavdeep Parhar 	mtx_unlock(&e->lock);
26609fe6320SNavdeep Parhar 
26709fe6320SNavdeep Parhar 	return (rc);
26809fe6320SNavdeep Parhar }
26909fe6320SNavdeep Parhar 
27009fe6320SNavdeep Parhar int
27109fe6320SNavdeep Parhar t4_l2t_send_slow(struct adapter *sc, struct wrqe *wr, struct l2t_entry *e)
27209fe6320SNavdeep Parhar {
27309fe6320SNavdeep Parhar 
27409fe6320SNavdeep Parhar again:
27509fe6320SNavdeep Parhar 	switch (e->state) {
27609fe6320SNavdeep Parhar 	case L2T_STATE_STALE:     /* entry is stale, kick off revalidation */
27709fe6320SNavdeep Parhar 
27809fe6320SNavdeep Parhar 		if (resolve_entry(sc, e) != EWOULDBLOCK)
27909fe6320SNavdeep Parhar 			goto again;	/* entry updated, re-examine state */
28009fe6320SNavdeep Parhar 
28109fe6320SNavdeep Parhar 		/* Fall through */
28209fe6320SNavdeep Parhar 
28309fe6320SNavdeep Parhar 	case L2T_STATE_VALID:     /* fast-path, send the packet on */
28409fe6320SNavdeep Parhar 
28509fe6320SNavdeep Parhar 		t4_wrq_tx(sc, wr);
28609fe6320SNavdeep Parhar 		return (0);
28709fe6320SNavdeep Parhar 
28809fe6320SNavdeep Parhar 	case L2T_STATE_RESOLVING:
28909fe6320SNavdeep Parhar 	case L2T_STATE_SYNC_WRITE:
29009fe6320SNavdeep Parhar 
29109fe6320SNavdeep Parhar 		mtx_lock(&e->lock);
29209fe6320SNavdeep Parhar 		if (e->state != L2T_STATE_SYNC_WRITE &&
29309fe6320SNavdeep Parhar 		    e->state != L2T_STATE_RESOLVING) {
29409fe6320SNavdeep Parhar 			/* state changed by the time we got here */
29509fe6320SNavdeep Parhar 			mtx_unlock(&e->lock);
29609fe6320SNavdeep Parhar 			goto again;
29709fe6320SNavdeep Parhar 		}
29809fe6320SNavdeep Parhar 		arpq_enqueue(e, wr);
29909fe6320SNavdeep Parhar 		mtx_unlock(&e->lock);
30009fe6320SNavdeep Parhar 
30109fe6320SNavdeep Parhar 		if (resolve_entry(sc, e) == EWOULDBLOCK)
30209fe6320SNavdeep Parhar 			break;
30309fe6320SNavdeep Parhar 
30409fe6320SNavdeep Parhar 		mtx_lock(&e->lock);
30509fe6320SNavdeep Parhar 		if (e->state == L2T_STATE_VALID && !STAILQ_EMPTY(&e->wr_list))
30609fe6320SNavdeep Parhar 			send_pending(sc, e);
30709fe6320SNavdeep Parhar 		if (e->state == L2T_STATE_FAILED)
30809fe6320SNavdeep Parhar 			resolution_failed(e);
30909fe6320SNavdeep Parhar 		mtx_unlock(&e->lock);
31009fe6320SNavdeep Parhar 		break;
31109fe6320SNavdeep Parhar 
31209fe6320SNavdeep Parhar 	case L2T_STATE_FAILED:
31309fe6320SNavdeep Parhar 		resolution_failed_for_wr(wr);
31409fe6320SNavdeep Parhar 		return (EHOSTUNREACH);
31509fe6320SNavdeep Parhar 	}
31609fe6320SNavdeep Parhar 
31709fe6320SNavdeep Parhar 	return (0);
31809fe6320SNavdeep Parhar }
31909fe6320SNavdeep Parhar 
32009fe6320SNavdeep Parhar /*
32109fe6320SNavdeep Parhar  * Called when an L2T entry has no more users.  The entry is left in the hash
32209fe6320SNavdeep Parhar  * table since it is likely to be reused but we also bump nfree to indicate
32309fe6320SNavdeep Parhar  * that the entry can be reallocated for a different neighbor.  We also drop
32409fe6320SNavdeep Parhar  * the existing neighbor reference in case the neighbor is going away and is
32509fe6320SNavdeep Parhar  * waiting on our reference.
32609fe6320SNavdeep Parhar  *
32709fe6320SNavdeep Parhar  * Because entries can be reallocated to other neighbors once their ref count
32809fe6320SNavdeep Parhar  * drops to 0 we need to take the entry's lock to avoid races with a new
32909fe6320SNavdeep Parhar  * incarnation.
33009fe6320SNavdeep Parhar  */
33109fe6320SNavdeep Parhar 
33209fe6320SNavdeep Parhar static int
33309fe6320SNavdeep Parhar do_l2t_write_rpl2(struct sge_iq *iq, const struct rss_header *rss,
33409fe6320SNavdeep Parhar     struct mbuf *m)
33509fe6320SNavdeep Parhar {
33609fe6320SNavdeep Parhar 	struct adapter *sc = iq->adapter;
33709fe6320SNavdeep Parhar 	const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1);
33809fe6320SNavdeep Parhar 	unsigned int tid = GET_TID(rpl);
339*0a0a697cSNavdeep Parhar 	unsigned int idx = tid % L2T_SIZE;
34009fe6320SNavdeep Parhar 	int rc;
34109fe6320SNavdeep Parhar 
34209fe6320SNavdeep Parhar 	rc = do_l2t_write_rpl(iq, rss, m);
34309fe6320SNavdeep Parhar 	if (rc != 0)
34409fe6320SNavdeep Parhar 		return (rc);
34509fe6320SNavdeep Parhar 
34609fe6320SNavdeep Parhar 	if (tid & F_SYNC_WR) {
347*0a0a697cSNavdeep Parhar 		struct l2t_entry *e = &sc->l2t->l2tab[idx - sc->vres.l2t.start];
34809fe6320SNavdeep Parhar 
34909fe6320SNavdeep Parhar 		mtx_lock(&e->lock);
35009fe6320SNavdeep Parhar 		if (e->state != L2T_STATE_SWITCHING) {
35109fe6320SNavdeep Parhar 			send_pending(sc, e);
35209fe6320SNavdeep Parhar 			e->state = L2T_STATE_VALID;
35309fe6320SNavdeep Parhar 		}
35409fe6320SNavdeep Parhar 		mtx_unlock(&e->lock);
35509fe6320SNavdeep Parhar 	}
35609fe6320SNavdeep Parhar 
35709fe6320SNavdeep Parhar 	return (0);
35809fe6320SNavdeep Parhar }
35909fe6320SNavdeep Parhar 
36009fe6320SNavdeep Parhar void
36109fe6320SNavdeep Parhar t4_init_l2t_cpl_handlers(struct adapter *sc)
36209fe6320SNavdeep Parhar {
36309fe6320SNavdeep Parhar 
36409fe6320SNavdeep Parhar 	t4_register_cpl_handler(sc, CPL_L2T_WRITE_RPL, do_l2t_write_rpl2);
36509fe6320SNavdeep Parhar }
36609fe6320SNavdeep Parhar 
36709fe6320SNavdeep Parhar void
36809fe6320SNavdeep Parhar t4_uninit_l2t_cpl_handlers(struct adapter *sc)
36909fe6320SNavdeep Parhar {
37009fe6320SNavdeep Parhar 
37109fe6320SNavdeep Parhar 	t4_register_cpl_handler(sc, CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
37209fe6320SNavdeep Parhar }
37309fe6320SNavdeep Parhar 
37409fe6320SNavdeep Parhar /*
37509fe6320SNavdeep Parhar  * The TOE wants an L2 table entry that it can use to reach the next hop over
37609fe6320SNavdeep Parhar  * the specified port.  Produce such an entry - create one if needed.
37709fe6320SNavdeep Parhar  *
37809fe6320SNavdeep Parhar  * Note that the ifnet could be a pseudo-device like if_vlan, if_lagg, etc. on
37909fe6320SNavdeep Parhar  * top of the real cxgbe interface.
38009fe6320SNavdeep Parhar  */
38109fe6320SNavdeep Parhar struct l2t_entry *
38209fe6320SNavdeep Parhar t4_l2t_get(struct port_info *pi, struct ifnet *ifp, struct sockaddr *sa)
38309fe6320SNavdeep Parhar {
38409fe6320SNavdeep Parhar 	struct l2t_entry *e;
38509fe6320SNavdeep Parhar 	struct l2t_data *d = pi->adapter->l2t;
386*0a0a697cSNavdeep Parhar 	u_int hash, smt_idx = pi->port_id;
38709fe6320SNavdeep Parhar 
388*0a0a697cSNavdeep Parhar 	KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6,
389*0a0a697cSNavdeep Parhar 	    ("%s: sa %p has unexpected sa_family %d", __func__, sa,
390*0a0a697cSNavdeep Parhar 	    sa->sa_family));
39109fe6320SNavdeep Parhar 
39209fe6320SNavdeep Parhar #ifndef VLAN_TAG
39309fe6320SNavdeep Parhar 	if (ifp->if_type == IFT_L2VLAN)
39409fe6320SNavdeep Parhar 		return (NULL);
39509fe6320SNavdeep Parhar #endif
39609fe6320SNavdeep Parhar 
397*0a0a697cSNavdeep Parhar 	hash = l2_hash(d, sa, ifp->if_index);
39809fe6320SNavdeep Parhar 	rw_wlock(&d->lock);
39909fe6320SNavdeep Parhar 	for (e = d->l2tab[hash].first; e; e = e->next) {
400*0a0a697cSNavdeep Parhar 		if (l2_cmp(sa, e) == 0 && e->ifp == ifp &&
401*0a0a697cSNavdeep Parhar 		    e->smt_idx == smt_idx) {
40209fe6320SNavdeep Parhar 			l2t_hold(d, e);
40309fe6320SNavdeep Parhar 			goto done;
40409fe6320SNavdeep Parhar 		}
40509fe6320SNavdeep Parhar 	}
40609fe6320SNavdeep Parhar 
40709fe6320SNavdeep Parhar 	/* Need to allocate a new entry */
40809fe6320SNavdeep Parhar 	e = t4_alloc_l2e(d);
40909fe6320SNavdeep Parhar 	if (e) {
41009fe6320SNavdeep Parhar 		mtx_lock(&e->lock);          /* avoid race with t4_l2t_free */
41109fe6320SNavdeep Parhar 		e->next = d->l2tab[hash].first;
41209fe6320SNavdeep Parhar 		d->l2tab[hash].first = e;
41309fe6320SNavdeep Parhar 
41409fe6320SNavdeep Parhar 		e->state = L2T_STATE_RESOLVING;
415*0a0a697cSNavdeep Parhar 		l2_store(sa, e);
41609fe6320SNavdeep Parhar 		e->ifp = ifp;
41709fe6320SNavdeep Parhar 		e->smt_idx = smt_idx;
41809fe6320SNavdeep Parhar 		e->hash = hash;
41909fe6320SNavdeep Parhar 		e->lport = pi->lport;
42009fe6320SNavdeep Parhar 		atomic_store_rel_int(&e->refcnt, 1);
42109fe6320SNavdeep Parhar #ifdef VLAN_TAG
42209fe6320SNavdeep Parhar 		if (ifp->if_type == IFT_L2VLAN)
42309fe6320SNavdeep Parhar 			VLAN_TAG(ifp, &e->vlan);
42409fe6320SNavdeep Parhar 		else
42509fe6320SNavdeep Parhar 			e->vlan = VLAN_NONE;
42609fe6320SNavdeep Parhar #endif
42709fe6320SNavdeep Parhar 		mtx_unlock(&e->lock);
42809fe6320SNavdeep Parhar 	}
42909fe6320SNavdeep Parhar done:
43009fe6320SNavdeep Parhar 	rw_wunlock(&d->lock);
43109fe6320SNavdeep Parhar 	return e;
43209fe6320SNavdeep Parhar }
43309fe6320SNavdeep Parhar 
43409fe6320SNavdeep Parhar /*
43509fe6320SNavdeep Parhar  * Called when the host's ARP layer makes a change to some entry that is loaded
43609fe6320SNavdeep Parhar  * into the HW L2 table.
43709fe6320SNavdeep Parhar  */
43809fe6320SNavdeep Parhar void
43909fe6320SNavdeep Parhar t4_l2_update(struct toedev *tod, struct ifnet *ifp, struct sockaddr *sa,
44009fe6320SNavdeep Parhar     uint8_t *lladdr, uint16_t vtag)
44109fe6320SNavdeep Parhar {
44209fe6320SNavdeep Parhar 	struct adapter *sc = tod->tod_softc;
44309fe6320SNavdeep Parhar 	struct l2t_entry *e;
44409fe6320SNavdeep Parhar 	struct l2t_data *d = sc->l2t;
445*0a0a697cSNavdeep Parhar 	u_int hash;
44609fe6320SNavdeep Parhar 
44709fe6320SNavdeep Parhar 	KASSERT(d != NULL, ("%s: no L2 table", __func__));
44809fe6320SNavdeep Parhar 
449*0a0a697cSNavdeep Parhar 	hash = l2_hash(d, sa, ifp->if_index);
45009fe6320SNavdeep Parhar 	rw_rlock(&d->lock);
45109fe6320SNavdeep Parhar 	for (e = d->l2tab[hash].first; e; e = e->next) {
452*0a0a697cSNavdeep Parhar 		if (l2_cmp(sa, e) == 0 && e->ifp == ifp) {
45309fe6320SNavdeep Parhar 			mtx_lock(&e->lock);
45409fe6320SNavdeep Parhar 			if (atomic_load_acq_int(&e->refcnt))
45509fe6320SNavdeep Parhar 				goto found;
45609fe6320SNavdeep Parhar 			e->state = L2T_STATE_STALE;
45709fe6320SNavdeep Parhar 			mtx_unlock(&e->lock);
45809fe6320SNavdeep Parhar 			break;
45909fe6320SNavdeep Parhar 		}
46009fe6320SNavdeep Parhar 	}
46109fe6320SNavdeep Parhar 	rw_runlock(&d->lock);
46209fe6320SNavdeep Parhar 
46309fe6320SNavdeep Parhar 	/*
46409fe6320SNavdeep Parhar 	 * This is of no interest to us.  We've never had an offloaded
46509fe6320SNavdeep Parhar 	 * connection to this destination, and we aren't attempting one right
46609fe6320SNavdeep Parhar 	 * now.
46709fe6320SNavdeep Parhar 	 */
46809fe6320SNavdeep Parhar 	return;
46909fe6320SNavdeep Parhar 
47009fe6320SNavdeep Parhar found:
47109fe6320SNavdeep Parhar 	rw_runlock(&d->lock);
47209fe6320SNavdeep Parhar 
47309fe6320SNavdeep Parhar 	KASSERT(e->state != L2T_STATE_UNUSED,
47409fe6320SNavdeep Parhar 	    ("%s: unused entry in the hash.", __func__));
47509fe6320SNavdeep Parhar 
47609fe6320SNavdeep Parhar 	update_entry(sc, e, lladdr, vtag);
47709fe6320SNavdeep Parhar 	mtx_unlock(&e->lock);
47809fe6320SNavdeep Parhar }
47909fe6320SNavdeep Parhar #endif
480