xref: /freebsd/sys/dev/cxgbe/t4_l2t.c (revision 4dba21f17e7d77718b76685b2635b33efd520db9)
1*4dba21f1SNavdeep Parhar /*-
2*4dba21f1SNavdeep Parhar  * Copyright (c) 2011 Chelsio Communications, Inc.
3*4dba21f1SNavdeep Parhar  * All rights reserved.
4*4dba21f1SNavdeep Parhar  *
5*4dba21f1SNavdeep Parhar  * Redistribution and use in source and binary forms, with or without
6*4dba21f1SNavdeep Parhar  * modification, are permitted provided that the following conditions
7*4dba21f1SNavdeep Parhar  * are met:
8*4dba21f1SNavdeep Parhar  * 1. Redistributions of source code must retain the above copyright
9*4dba21f1SNavdeep Parhar  *    notice, this list of conditions and the following disclaimer.
10*4dba21f1SNavdeep Parhar  * 2. Redistributions in binary form must reproduce the above copyright
11*4dba21f1SNavdeep Parhar  *    notice, this list of conditions and the following disclaimer in the
12*4dba21f1SNavdeep Parhar  *    documentation and/or other materials provided with the distribution.
13*4dba21f1SNavdeep Parhar  *
14*4dba21f1SNavdeep Parhar  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15*4dba21f1SNavdeep Parhar  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16*4dba21f1SNavdeep Parhar  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17*4dba21f1SNavdeep Parhar  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18*4dba21f1SNavdeep Parhar  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19*4dba21f1SNavdeep Parhar  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20*4dba21f1SNavdeep Parhar  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21*4dba21f1SNavdeep Parhar  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22*4dba21f1SNavdeep Parhar  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23*4dba21f1SNavdeep Parhar  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24*4dba21f1SNavdeep Parhar  * SUCH DAMAGE.
25*4dba21f1SNavdeep Parhar  */
26*4dba21f1SNavdeep Parhar #include <sys/cdefs.h>
27*4dba21f1SNavdeep Parhar __FBSDID("$FreeBSD$");
28*4dba21f1SNavdeep Parhar 
29*4dba21f1SNavdeep Parhar #include "opt_inet.h"
30*4dba21f1SNavdeep Parhar 
31*4dba21f1SNavdeep Parhar #include <sys/param.h>
32*4dba21f1SNavdeep Parhar #include <sys/systm.h>
33*4dba21f1SNavdeep Parhar #include <sys/kernel.h>
34*4dba21f1SNavdeep Parhar #include <sys/module.h>
35*4dba21f1SNavdeep Parhar #include <sys/bus.h>
36*4dba21f1SNavdeep Parhar #include <sys/lock.h>
37*4dba21f1SNavdeep Parhar #include <sys/mutex.h>
38*4dba21f1SNavdeep Parhar #include <sys/rwlock.h>
39*4dba21f1SNavdeep Parhar #include <sys/socket.h>
40*4dba21f1SNavdeep Parhar #include <net/if.h>
41*4dba21f1SNavdeep Parhar #include <net/ethernet.h>
42*4dba21f1SNavdeep Parhar #include <net/if_vlan_var.h>
43*4dba21f1SNavdeep Parhar #include <net/if_dl.h>
44*4dba21f1SNavdeep Parhar #include <net/if_llatbl.h>
45*4dba21f1SNavdeep Parhar #include <net/route.h>
46*4dba21f1SNavdeep Parhar #include <netinet/in.h>
47*4dba21f1SNavdeep Parhar #include <netinet/in_var.h>
48*4dba21f1SNavdeep Parhar #include <netinet/if_ether.h>
49*4dba21f1SNavdeep Parhar 
50*4dba21f1SNavdeep Parhar #include "common/common.h"
51*4dba21f1SNavdeep Parhar #include "common/jhash.h"
52*4dba21f1SNavdeep Parhar #include "common/t4_msg.h"
53*4dba21f1SNavdeep Parhar #include "offload.h"
54*4dba21f1SNavdeep Parhar #include "t4_l2t.h"
55*4dba21f1SNavdeep Parhar 
56*4dba21f1SNavdeep Parhar /* identifies sync vs async L2T_WRITE_REQs */
57*4dba21f1SNavdeep Parhar #define S_SYNC_WR    12
58*4dba21f1SNavdeep Parhar #define V_SYNC_WR(x) ((x) << S_SYNC_WR)
59*4dba21f1SNavdeep Parhar #define F_SYNC_WR    V_SYNC_WR(1)
60*4dba21f1SNavdeep Parhar 
61*4dba21f1SNavdeep Parhar enum {
62*4dba21f1SNavdeep Parhar 	L2T_STATE_VALID,	/* entry is up to date */
63*4dba21f1SNavdeep Parhar 	L2T_STATE_STALE,	/* entry may be used but needs revalidation */
64*4dba21f1SNavdeep Parhar 	L2T_STATE_RESOLVING,	/* entry needs address resolution */
65*4dba21f1SNavdeep Parhar 	L2T_STATE_SYNC_WRITE,	/* synchronous write of entry underway */
66*4dba21f1SNavdeep Parhar 
67*4dba21f1SNavdeep Parhar 	/* when state is one of the below the entry is not hashed */
68*4dba21f1SNavdeep Parhar 	L2T_STATE_SWITCHING,	/* entry is being used by a switching filter */
69*4dba21f1SNavdeep Parhar 	L2T_STATE_UNUSED	/* entry not in use */
70*4dba21f1SNavdeep Parhar };
71*4dba21f1SNavdeep Parhar 
72*4dba21f1SNavdeep Parhar struct l2t_data {
73*4dba21f1SNavdeep Parhar 	struct rwlock lock;
74*4dba21f1SNavdeep Parhar 	volatile int nfree;	/* number of free entries */
75*4dba21f1SNavdeep Parhar 	struct l2t_entry *rover;/* starting point for next allocation */
76*4dba21f1SNavdeep Parhar 	struct l2t_entry l2tab[L2T_SIZE];
77*4dba21f1SNavdeep Parhar };
78*4dba21f1SNavdeep Parhar 
79*4dba21f1SNavdeep Parhar /*
80*4dba21f1SNavdeep Parhar  * Module locking notes:  There is a RW lock protecting the L2 table as a
81*4dba21f1SNavdeep Parhar  * whole plus a spinlock per L2T entry.  Entry lookups and allocations happen
82*4dba21f1SNavdeep Parhar  * under the protection of the table lock, individual entry changes happen
83*4dba21f1SNavdeep Parhar  * while holding that entry's spinlock.  The table lock nests outside the
84*4dba21f1SNavdeep Parhar  * entry locks.  Allocations of new entries take the table lock as writers so
85*4dba21f1SNavdeep Parhar  * no other lookups can happen while allocating new entries.  Entry updates
86*4dba21f1SNavdeep Parhar  * take the table lock as readers so multiple entries can be updated in
87*4dba21f1SNavdeep Parhar  * parallel.  An L2T entry can be dropped by decrementing its reference count
88*4dba21f1SNavdeep Parhar  * and therefore can happen in parallel with entry allocation but no entry
89*4dba21f1SNavdeep Parhar  * can change state or increment its ref count during allocation as both of
90*4dba21f1SNavdeep Parhar  * these perform lookups.
91*4dba21f1SNavdeep Parhar  *
92*4dba21f1SNavdeep Parhar  * Note: We do not take refereces to ifnets in this module because both
93*4dba21f1SNavdeep Parhar  * the TOE and the sockets already hold references to the interfaces and the
94*4dba21f1SNavdeep Parhar  * lifetime of an L2T entry is fully contained in the lifetime of the TOE.
95*4dba21f1SNavdeep Parhar  */
96*4dba21f1SNavdeep Parhar static inline unsigned int
97*4dba21f1SNavdeep Parhar vlan_prio(const struct l2t_entry *e)
98*4dba21f1SNavdeep Parhar {
99*4dba21f1SNavdeep Parhar 	return e->vlan >> 13;
100*4dba21f1SNavdeep Parhar }
101*4dba21f1SNavdeep Parhar 
102*4dba21f1SNavdeep Parhar static inline void
103*4dba21f1SNavdeep Parhar l2t_hold(struct l2t_data *d, struct l2t_entry *e)
104*4dba21f1SNavdeep Parhar {
105*4dba21f1SNavdeep Parhar 	if (atomic_fetchadd_int(&e->refcnt, 1) == 0)  /* 0 -> 1 transition */
106*4dba21f1SNavdeep Parhar 		atomic_add_int(&d->nfree, -1);
107*4dba21f1SNavdeep Parhar }
108*4dba21f1SNavdeep Parhar 
109*4dba21f1SNavdeep Parhar /*
110*4dba21f1SNavdeep Parhar  * To avoid having to check address families we do not allow v4 and v6
111*4dba21f1SNavdeep Parhar  * neighbors to be on the same hash chain.  We keep v4 entries in the first
112*4dba21f1SNavdeep Parhar  * half of available hash buckets and v6 in the second.
113*4dba21f1SNavdeep Parhar  */
114*4dba21f1SNavdeep Parhar enum {
115*4dba21f1SNavdeep Parhar 	L2T_SZ_HALF = L2T_SIZE / 2,
116*4dba21f1SNavdeep Parhar 	L2T_HASH_MASK = L2T_SZ_HALF - 1
117*4dba21f1SNavdeep Parhar };
118*4dba21f1SNavdeep Parhar 
119*4dba21f1SNavdeep Parhar static inline unsigned int
120*4dba21f1SNavdeep Parhar arp_hash(const uint32_t *key, int ifindex)
121*4dba21f1SNavdeep Parhar {
122*4dba21f1SNavdeep Parhar 	return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
123*4dba21f1SNavdeep Parhar }
124*4dba21f1SNavdeep Parhar 
125*4dba21f1SNavdeep Parhar static inline unsigned int
126*4dba21f1SNavdeep Parhar ipv6_hash(const uint32_t *key, int ifindex)
127*4dba21f1SNavdeep Parhar {
128*4dba21f1SNavdeep Parhar 	uint32_t xor = key[0] ^ key[1] ^ key[2] ^ key[3];
129*4dba21f1SNavdeep Parhar 
130*4dba21f1SNavdeep Parhar 	return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
131*4dba21f1SNavdeep Parhar }
132*4dba21f1SNavdeep Parhar 
133*4dba21f1SNavdeep Parhar static inline unsigned int
134*4dba21f1SNavdeep Parhar addr_hash(const uint32_t *addr, int addr_len, int ifindex)
135*4dba21f1SNavdeep Parhar {
136*4dba21f1SNavdeep Parhar 	return addr_len == 4 ? arp_hash(addr, ifindex) :
137*4dba21f1SNavdeep Parhar 			       ipv6_hash(addr, ifindex);
138*4dba21f1SNavdeep Parhar }
139*4dba21f1SNavdeep Parhar 
140*4dba21f1SNavdeep Parhar /*
141*4dba21f1SNavdeep Parhar  * Checks if an L2T entry is for the given IP/IPv6 address.  It does not check
142*4dba21f1SNavdeep Parhar  * whether the L2T entry and the address are of the same address family.
143*4dba21f1SNavdeep Parhar  * Callers ensure an address is only checked against L2T entries of the same
144*4dba21f1SNavdeep Parhar  * family, something made trivial by the separation of IP and IPv6 hash chains
145*4dba21f1SNavdeep Parhar  * mentioned above.  Returns 0 if there's a match,
146*4dba21f1SNavdeep Parhar  */
147*4dba21f1SNavdeep Parhar static inline int
148*4dba21f1SNavdeep Parhar addreq(const struct l2t_entry *e, const uint32_t *addr)
149*4dba21f1SNavdeep Parhar {
150*4dba21f1SNavdeep Parhar 	if (e->v6)
151*4dba21f1SNavdeep Parhar 		return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
152*4dba21f1SNavdeep Parhar 		       (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
153*4dba21f1SNavdeep Parhar 	return e->addr[0] ^ addr[0];
154*4dba21f1SNavdeep Parhar }
155*4dba21f1SNavdeep Parhar 
156*4dba21f1SNavdeep Parhar /*
157*4dba21f1SNavdeep Parhar  * Write an L2T entry.  Must be called with the entry locked (XXX: really?).
158*4dba21f1SNavdeep Parhar  * The write may be synchronous or asynchronous.
159*4dba21f1SNavdeep Parhar  */
160*4dba21f1SNavdeep Parhar static int
161*4dba21f1SNavdeep Parhar write_l2e(struct adapter *sc, struct l2t_entry *e, int sync)
162*4dba21f1SNavdeep Parhar {
163*4dba21f1SNavdeep Parhar 	struct mbuf *m;
164*4dba21f1SNavdeep Parhar 	struct cpl_l2t_write_req *req;
165*4dba21f1SNavdeep Parhar 
166*4dba21f1SNavdeep Parhar 	if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
167*4dba21f1SNavdeep Parhar 		return (ENOMEM);
168*4dba21f1SNavdeep Parhar 
169*4dba21f1SNavdeep Parhar 	req = mtod(m, struct cpl_l2t_write_req *);
170*4dba21f1SNavdeep Parhar 	m->m_pkthdr.len = m->m_len = sizeof(*req);
171*4dba21f1SNavdeep Parhar 
172*4dba21f1SNavdeep Parhar 	INIT_TP_WR(req, 0);
173*4dba21f1SNavdeep Parhar 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx |
174*4dba21f1SNavdeep Parhar 	    V_SYNC_WR(sync) | V_TID_QID(sc->sge.fwq.abs_id)));
175*4dba21f1SNavdeep Parhar 	req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!sync));
176*4dba21f1SNavdeep Parhar 	req->l2t_idx = htons(e->idx);
177*4dba21f1SNavdeep Parhar 	req->vlan = htons(e->vlan);
178*4dba21f1SNavdeep Parhar 	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
179*4dba21f1SNavdeep Parhar 
180*4dba21f1SNavdeep Parhar 	t4_mgmt_tx(sc, m);
181*4dba21f1SNavdeep Parhar 
182*4dba21f1SNavdeep Parhar 	if (sync && e->state != L2T_STATE_SWITCHING)
183*4dba21f1SNavdeep Parhar 		e->state = L2T_STATE_SYNC_WRITE;
184*4dba21f1SNavdeep Parhar 
185*4dba21f1SNavdeep Parhar 	return (0);
186*4dba21f1SNavdeep Parhar }
187*4dba21f1SNavdeep Parhar 
188*4dba21f1SNavdeep Parhar /*
189*4dba21f1SNavdeep Parhar  * Add a packet to an L2T entry's queue of packets awaiting resolution.
190*4dba21f1SNavdeep Parhar  * Must be called with the entry's lock held.
191*4dba21f1SNavdeep Parhar  */
192*4dba21f1SNavdeep Parhar static inline void
193*4dba21f1SNavdeep Parhar arpq_enqueue(struct l2t_entry *e, struct mbuf *m)
194*4dba21f1SNavdeep Parhar {
195*4dba21f1SNavdeep Parhar 	mtx_assert(&e->lock, MA_OWNED);
196*4dba21f1SNavdeep Parhar 
197*4dba21f1SNavdeep Parhar 	m->m_next = NULL;
198*4dba21f1SNavdeep Parhar 	if (e->arpq_head)
199*4dba21f1SNavdeep Parhar 		e->arpq_tail->m_next = m;
200*4dba21f1SNavdeep Parhar 	else
201*4dba21f1SNavdeep Parhar 		e->arpq_head = m;
202*4dba21f1SNavdeep Parhar 	e->arpq_tail = m;
203*4dba21f1SNavdeep Parhar }
204*4dba21f1SNavdeep Parhar 
205*4dba21f1SNavdeep Parhar /*
206*4dba21f1SNavdeep Parhar  * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
207*4dba21f1SNavdeep Parhar  */
208*4dba21f1SNavdeep Parhar static struct l2t_entry *
209*4dba21f1SNavdeep Parhar alloc_l2e(struct l2t_data *d)
210*4dba21f1SNavdeep Parhar {
211*4dba21f1SNavdeep Parhar 	struct l2t_entry *end, *e, **p;
212*4dba21f1SNavdeep Parhar 
213*4dba21f1SNavdeep Parhar 	rw_assert(&d->lock, RA_WLOCKED);
214*4dba21f1SNavdeep Parhar 
215*4dba21f1SNavdeep Parhar 	if (!atomic_load_acq_int(&d->nfree))
216*4dba21f1SNavdeep Parhar 		return (NULL);
217*4dba21f1SNavdeep Parhar 
218*4dba21f1SNavdeep Parhar 	/* there's definitely a free entry */
219*4dba21f1SNavdeep Parhar 	for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
220*4dba21f1SNavdeep Parhar 		if (atomic_load_acq_int(&e->refcnt) == 0)
221*4dba21f1SNavdeep Parhar 			goto found;
222*4dba21f1SNavdeep Parhar 
223*4dba21f1SNavdeep Parhar 	for (e = d->l2tab; atomic_load_acq_int(&e->refcnt); ++e) ;
224*4dba21f1SNavdeep Parhar found:
225*4dba21f1SNavdeep Parhar 	d->rover = e + 1;
226*4dba21f1SNavdeep Parhar 	atomic_add_int(&d->nfree, -1);
227*4dba21f1SNavdeep Parhar 
228*4dba21f1SNavdeep Parhar 	/*
229*4dba21f1SNavdeep Parhar 	 * The entry we found may be an inactive entry that is
230*4dba21f1SNavdeep Parhar 	 * presently in the hash table.  We need to remove it.
231*4dba21f1SNavdeep Parhar 	 */
232*4dba21f1SNavdeep Parhar 	if (e->state < L2T_STATE_SWITCHING) {
233*4dba21f1SNavdeep Parhar 		for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) {
234*4dba21f1SNavdeep Parhar 			if (*p == e) {
235*4dba21f1SNavdeep Parhar 				*p = e->next;
236*4dba21f1SNavdeep Parhar 				e->next = NULL;
237*4dba21f1SNavdeep Parhar 				break;
238*4dba21f1SNavdeep Parhar 			}
239*4dba21f1SNavdeep Parhar 		}
240*4dba21f1SNavdeep Parhar 	}
241*4dba21f1SNavdeep Parhar 
242*4dba21f1SNavdeep Parhar 	e->state = L2T_STATE_UNUSED;
243*4dba21f1SNavdeep Parhar 	return e;
244*4dba21f1SNavdeep Parhar }
245*4dba21f1SNavdeep Parhar 
246*4dba21f1SNavdeep Parhar /*
247*4dba21f1SNavdeep Parhar  * Called when an L2T entry has no more users.  The entry is left in the hash
248*4dba21f1SNavdeep Parhar  * table since it is likely to be reused but we also bump nfree to indicate
249*4dba21f1SNavdeep Parhar  * that the entry can be reallocated for a different neighbor.  We also drop
250*4dba21f1SNavdeep Parhar  * the existing neighbor reference in case the neighbor is going away and is
251*4dba21f1SNavdeep Parhar  * waiting on our reference.
252*4dba21f1SNavdeep Parhar  *
253*4dba21f1SNavdeep Parhar  * Because entries can be reallocated to other neighbors once their ref count
254*4dba21f1SNavdeep Parhar  * drops to 0 we need to take the entry's lock to avoid races with a new
255*4dba21f1SNavdeep Parhar  * incarnation.
256*4dba21f1SNavdeep Parhar  */
257*4dba21f1SNavdeep Parhar static void
258*4dba21f1SNavdeep Parhar t4_l2e_free(struct l2t_entry *e)
259*4dba21f1SNavdeep Parhar {
260*4dba21f1SNavdeep Parhar 	struct llentry *lle = NULL;
261*4dba21f1SNavdeep Parhar 	struct l2t_data *d;
262*4dba21f1SNavdeep Parhar 
263*4dba21f1SNavdeep Parhar 	mtx_lock(&e->lock);
264*4dba21f1SNavdeep Parhar 	if (atomic_load_acq_int(&e->refcnt) == 0) {  /* hasn't been recycled */
265*4dba21f1SNavdeep Parhar 		lle = e->lle;
266*4dba21f1SNavdeep Parhar 		e->lle = NULL;
267*4dba21f1SNavdeep Parhar 		/*
268*4dba21f1SNavdeep Parhar 		 * Don't need to worry about the arpq, an L2T entry can't be
269*4dba21f1SNavdeep Parhar 		 * released if any packets are waiting for resolution as we
270*4dba21f1SNavdeep Parhar 		 * need to be able to communicate with the device to close a
271*4dba21f1SNavdeep Parhar 		 * connection.
272*4dba21f1SNavdeep Parhar 		 */
273*4dba21f1SNavdeep Parhar 	}
274*4dba21f1SNavdeep Parhar 	mtx_unlock(&e->lock);
275*4dba21f1SNavdeep Parhar 
276*4dba21f1SNavdeep Parhar 	d = container_of(e, struct l2t_data, l2tab[e->idx]);
277*4dba21f1SNavdeep Parhar 	atomic_add_int(&d->nfree, 1);
278*4dba21f1SNavdeep Parhar 
279*4dba21f1SNavdeep Parhar 	if (lle)
280*4dba21f1SNavdeep Parhar 		LLE_FREE(lle);
281*4dba21f1SNavdeep Parhar }
282*4dba21f1SNavdeep Parhar 
283*4dba21f1SNavdeep Parhar void
284*4dba21f1SNavdeep Parhar t4_l2t_release(struct l2t_entry *e)
285*4dba21f1SNavdeep Parhar {
286*4dba21f1SNavdeep Parhar 	if (atomic_fetchadd_int(&e->refcnt, -1) == 1)
287*4dba21f1SNavdeep Parhar 		t4_l2e_free(e);
288*4dba21f1SNavdeep Parhar }
289*4dba21f1SNavdeep Parhar 
290*4dba21f1SNavdeep Parhar /*
291*4dba21f1SNavdeep Parhar  * Allocate an L2T entry for use by a switching rule.  Such need to be
292*4dba21f1SNavdeep Parhar  * explicitly freed and while busy they are not on any hash chain, so normal
293*4dba21f1SNavdeep Parhar  * address resolution updates do not see them.
294*4dba21f1SNavdeep Parhar  */
295*4dba21f1SNavdeep Parhar struct l2t_entry *
296*4dba21f1SNavdeep Parhar t4_l2t_alloc_switching(struct l2t_data *d)
297*4dba21f1SNavdeep Parhar {
298*4dba21f1SNavdeep Parhar 	struct l2t_entry *e;
299*4dba21f1SNavdeep Parhar 
300*4dba21f1SNavdeep Parhar 	rw_rlock(&d->lock);
301*4dba21f1SNavdeep Parhar 	e = alloc_l2e(d);
302*4dba21f1SNavdeep Parhar 	if (e) {
303*4dba21f1SNavdeep Parhar 		mtx_lock(&e->lock);          /* avoid race with t4_l2t_free */
304*4dba21f1SNavdeep Parhar 		e->state = L2T_STATE_SWITCHING;
305*4dba21f1SNavdeep Parhar 		atomic_store_rel_int(&e->refcnt, 1);
306*4dba21f1SNavdeep Parhar 		mtx_unlock(&e->lock);
307*4dba21f1SNavdeep Parhar 	}
308*4dba21f1SNavdeep Parhar 	rw_runlock(&d->lock);
309*4dba21f1SNavdeep Parhar 	return e;
310*4dba21f1SNavdeep Parhar }
311*4dba21f1SNavdeep Parhar 
312*4dba21f1SNavdeep Parhar /*
313*4dba21f1SNavdeep Parhar  * Sets/updates the contents of a switching L2T entry that has been allocated
314*4dba21f1SNavdeep Parhar  * with an earlier call to @t4_l2t_alloc_switching.
315*4dba21f1SNavdeep Parhar  */
316*4dba21f1SNavdeep Parhar int
317*4dba21f1SNavdeep Parhar t4_l2t_set_switching(struct adapter *sc, struct l2t_entry *e, uint16_t vlan,
318*4dba21f1SNavdeep Parhar     uint8_t port, uint8_t *eth_addr)
319*4dba21f1SNavdeep Parhar {
320*4dba21f1SNavdeep Parhar 	e->vlan = vlan;
321*4dba21f1SNavdeep Parhar 	e->lport = port;
322*4dba21f1SNavdeep Parhar 	memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
323*4dba21f1SNavdeep Parhar 	return write_l2e(sc, e, 0);
324*4dba21f1SNavdeep Parhar }
325*4dba21f1SNavdeep Parhar 
326*4dba21f1SNavdeep Parhar struct l2t_data *
327*4dba21f1SNavdeep Parhar t4_init_l2t(int flags)
328*4dba21f1SNavdeep Parhar {
329*4dba21f1SNavdeep Parhar 	int i;
330*4dba21f1SNavdeep Parhar 	struct l2t_data *d;
331*4dba21f1SNavdeep Parhar 
332*4dba21f1SNavdeep Parhar 	d = malloc(sizeof(*d), M_CXGBE, M_ZERO | flags);
333*4dba21f1SNavdeep Parhar 	if (!d)
334*4dba21f1SNavdeep Parhar 		return (NULL);
335*4dba21f1SNavdeep Parhar 
336*4dba21f1SNavdeep Parhar 	d->rover = d->l2tab;
337*4dba21f1SNavdeep Parhar 	atomic_store_rel_int(&d->nfree, L2T_SIZE);
338*4dba21f1SNavdeep Parhar 	rw_init(&d->lock, "L2T");
339*4dba21f1SNavdeep Parhar 
340*4dba21f1SNavdeep Parhar 	for (i = 0; i < L2T_SIZE; i++) {
341*4dba21f1SNavdeep Parhar 		d->l2tab[i].idx = i;
342*4dba21f1SNavdeep Parhar 		d->l2tab[i].state = L2T_STATE_UNUSED;
343*4dba21f1SNavdeep Parhar 		mtx_init(&d->l2tab[i].lock, "L2T_E", NULL, MTX_DEF);
344*4dba21f1SNavdeep Parhar 		atomic_store_rel_int(&d->l2tab[i].refcnt, 0);
345*4dba21f1SNavdeep Parhar 	}
346*4dba21f1SNavdeep Parhar 
347*4dba21f1SNavdeep Parhar 	return (d);
348*4dba21f1SNavdeep Parhar }
349*4dba21f1SNavdeep Parhar 
350*4dba21f1SNavdeep Parhar int
351*4dba21f1SNavdeep Parhar t4_free_l2t(struct l2t_data *d)
352*4dba21f1SNavdeep Parhar {
353*4dba21f1SNavdeep Parhar 	int i;
354*4dba21f1SNavdeep Parhar 
355*4dba21f1SNavdeep Parhar 	for (i = 0; i < L2T_SIZE; i++)
356*4dba21f1SNavdeep Parhar 		mtx_destroy(&d->l2tab[i].lock);
357*4dba21f1SNavdeep Parhar 	rw_destroy(&d->lock);
358*4dba21f1SNavdeep Parhar 	free(d, M_CXGBE);
359*4dba21f1SNavdeep Parhar 
360*4dba21f1SNavdeep Parhar 	return (0);
361*4dba21f1SNavdeep Parhar }
362