xref: /freebsd/sys/dev/cxgbe/tom/t4_tom_l2t.c (revision b3e7694832e81d7a904a10f525f8797b753bf0d3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2012 Chelsio Communications, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #ifdef TCP_OFFLOAD
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #include <sys/bus.h>
40 #include <sys/fnv_hash.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/rwlock.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/sbuf.h>
47 #include <sys/taskqueue.h>
48 #include <net/if.h>
49 #include <net/if_types.h>
50 #include <net/ethernet.h>
51 #include <net/if_vlan_var.h>
52 #include <net/route.h>
53 #include <netinet/in.h>
54 #include <netinet/in_pcb.h>
55 #include <netinet/tcp_var.h>
56 #include <netinet/toecore.h>
57 
58 #include "common/common.h"
59 #include "common/t4_msg.h"
60 #include "tom/t4_tom_l2t.h"
61 #include "tom/t4_tom.h"
62 
63 #define VLAN_NONE	0xfff
64 
65 static inline void
66 l2t_hold(struct l2t_data *d, struct l2t_entry *e)
67 {
68 
69 	if (atomic_fetchadd_int(&e->refcnt, 1) == 0)  /* 0 -> 1 transition */
70 		atomic_subtract_int(&d->nfree, 1);
71 }
72 
73 static inline u_int
74 l2_hash(struct l2t_data *d, const struct sockaddr *sa, int ifindex)
75 {
76 	u_int hash, half = d->l2t_size / 2, start = 0;
77 	const void *key;
78 	size_t len;
79 
80 	KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6,
81 	    ("%s: sa %p has unexpected sa_family %d", __func__, sa,
82 	    sa->sa_family));
83 
84 	if (sa->sa_family == AF_INET) {
85 		const struct sockaddr_in *sin = (const void *)sa;
86 
87 		key = &sin->sin_addr;
88 		len = sizeof(sin->sin_addr);
89 	} else {
90 		const struct sockaddr_in6 *sin6 = (const void *)sa;
91 
92 		key = &sin6->sin6_addr;
93 		len = sizeof(sin6->sin6_addr);
94 		start = half;
95 	}
96 
97 	hash = fnv_32_buf(key, len, FNV1_32_INIT);
98 	hash = fnv_32_buf(&ifindex, sizeof(ifindex), hash);
99 	hash %= half;
100 
101 	return (hash + start);
102 }
103 
104 static inline int
105 l2_cmp(const struct sockaddr *sa, struct l2t_entry *e)
106 {
107 
108 	KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6,
109 	    ("%s: sa %p has unexpected sa_family %d", __func__, sa,
110 	    sa->sa_family));
111 
112 	if (sa->sa_family == AF_INET) {
113 		const struct sockaddr_in *sin = (const void *)sa;
114 
115 		return (e->addr[0] != sin->sin_addr.s_addr);
116 	} else {
117 		const struct sockaddr_in6 *sin6 = (const void *)sa;
118 
119 		return (memcmp(&e->addr[0], &sin6->sin6_addr, sizeof(e->addr)));
120 	}
121 }
122 
123 static inline void
124 l2_store(const struct sockaddr *sa, struct l2t_entry *e)
125 {
126 
127 	KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6,
128 	    ("%s: sa %p has unexpected sa_family %d", __func__, sa,
129 	    sa->sa_family));
130 
131 	if (sa->sa_family == AF_INET) {
132 		const struct sockaddr_in *sin = (const void *)sa;
133 
134 		e->addr[0] = sin->sin_addr.s_addr;
135 		e->ipv6 = 0;
136 	} else {
137 		const struct sockaddr_in6 *sin6 = (const void *)sa;
138 
139 		memcpy(&e->addr[0], &sin6->sin6_addr, sizeof(e->addr));
140 		e->ipv6 = 1;
141 	}
142 }
143 
144 /*
145  * Add a WR to an L2T entry's queue of work requests awaiting resolution.
146  * Must be called with the entry's lock held.
147  */
148 static inline void
149 arpq_enqueue(struct l2t_entry *e, struct wrqe *wr)
150 {
151 	mtx_assert(&e->lock, MA_OWNED);
152 
153 	STAILQ_INSERT_TAIL(&e->wr_list, wr, link);
154 }
155 
156 static inline void
157 send_pending(struct adapter *sc, struct l2t_entry *e)
158 {
159 	struct wrqe *wr;
160 
161 	mtx_assert(&e->lock, MA_OWNED);
162 
163 	while ((wr = STAILQ_FIRST(&e->wr_list)) != NULL) {
164 		STAILQ_REMOVE_HEAD(&e->wr_list, link);
165 		t4_wrq_tx(sc, wr);
166 	}
167 }
168 
169 static void
170 resolution_failed(struct adapter *sc, struct l2t_entry *e)
171 {
172 	struct tom_data *td = sc->tom_softc;
173 
174 	mtx_assert(&e->lock, MA_OWNED);
175 
176 	mtx_lock(&td->unsent_wr_lock);
177 	STAILQ_CONCAT(&td->unsent_wr_list, &e->wr_list);
178 	mtx_unlock(&td->unsent_wr_lock);
179 
180 	taskqueue_enqueue(taskqueue_thread, &td->reclaim_wr_resources);
181 }
182 
183 static void
184 update_entry(struct adapter *sc, struct l2t_entry *e, uint8_t *lladdr,
185     uint16_t vtag)
186 {
187 
188 	mtx_assert(&e->lock, MA_OWNED);
189 
190 	/*
191 	 * The entry may be in active use (e->refcount > 0) or not.  We update
192 	 * it even when it's not as this simplifies the case where we decide to
193 	 * reuse the entry later.
194 	 */
195 
196 	if (lladdr == NULL &&
197 	    (e->state == L2T_STATE_RESOLVING || e->state == L2T_STATE_FAILED)) {
198 		/*
199 		 * Never got a valid L2 address for this one.  Just mark it as
200 		 * failed instead of removing it from the hash (for which we'd
201 		 * need to wlock the table).
202 		 */
203 		e->state = L2T_STATE_FAILED;
204 		resolution_failed(sc, e);
205 		return;
206 
207 	} else if (lladdr == NULL) {
208 
209 		/* Valid or already-stale entry was deleted (or expired) */
210 
211 		KASSERT(e->state == L2T_STATE_VALID ||
212 		    e->state == L2T_STATE_STALE,
213 		    ("%s: lladdr NULL, state %d", __func__, e->state));
214 
215 		e->state = L2T_STATE_STALE;
216 
217 	} else {
218 
219 		if (e->state == L2T_STATE_RESOLVING ||
220 		    e->state == L2T_STATE_FAILED ||
221 		    memcmp(e->dmac, lladdr, ETHER_ADDR_LEN)) {
222 
223 			/* unresolved -> resolved; or dmac changed */
224 
225 			memcpy(e->dmac, lladdr, ETHER_ADDR_LEN);
226 			e->vlan = vtag;
227 			t4_write_l2e(e, 1);
228 		}
229 		e->state = L2T_STATE_VALID;
230 	}
231 }
232 
233 static int
234 resolve_entry(struct adapter *sc, struct l2t_entry *e)
235 {
236 	struct tom_data *td = sc->tom_softc;
237 	struct toedev *tod = &td->tod;
238 	struct sockaddr_in sin = {0};
239 	struct sockaddr_in6 sin6 = {0};
240 	struct sockaddr *sa;
241 	uint8_t dmac[ETHER_HDR_LEN];
242 	uint16_t vtag;
243 	int rc;
244 
245 	if (e->ipv6 == 0) {
246 		sin.sin_family = AF_INET;
247 		sin.sin_len = sizeof(struct sockaddr_in);
248 		sin.sin_addr.s_addr = e->addr[0];
249 		sa = (void *)&sin;
250 	} else {
251 		sin6.sin6_family = AF_INET6;
252 		sin6.sin6_len = sizeof(struct sockaddr_in6);
253 		memcpy(&sin6.sin6_addr, &e->addr[0], sizeof(e->addr));
254 		sa = (void *)&sin6;
255 	}
256 
257 	vtag = EVL_MAKETAG(VLAN_NONE, 0, 0);
258 	rc = toe_l2_resolve(tod, e->ifp, sa, dmac, &vtag);
259 	if (rc == EWOULDBLOCK)
260 		return (rc);
261 
262 	mtx_lock(&e->lock);
263 	update_entry(sc, e, rc == 0 ? dmac : NULL, vtag);
264 	mtx_unlock(&e->lock);
265 
266 	return (rc);
267 }
268 
269 int
270 t4_l2t_send_slow(struct adapter *sc, struct wrqe *wr, struct l2t_entry *e)
271 {
272 
273 again:
274 	switch (e->state) {
275 	case L2T_STATE_STALE:     /* entry is stale, kick off revalidation */
276 
277 		resolve_entry(sc, e);
278 
279 		/* Fall through */
280 
281 	case L2T_STATE_VALID:     /* fast-path, send the packet on */
282 
283 		t4_wrq_tx(sc, wr);
284 		return (0);
285 
286 	case L2T_STATE_RESOLVING:
287 	case L2T_STATE_SYNC_WRITE:
288 
289 		mtx_lock(&e->lock);
290 		if (e->state != L2T_STATE_SYNC_WRITE &&
291 		    e->state != L2T_STATE_RESOLVING) {
292 			/* state changed by the time we got here */
293 			mtx_unlock(&e->lock);
294 			goto again;
295 		}
296 		arpq_enqueue(e, wr);
297 		mtx_unlock(&e->lock);
298 
299 		if (resolve_entry(sc, e) == EWOULDBLOCK)
300 			break;
301 
302 		mtx_lock(&e->lock);
303 		if (e->state == L2T_STATE_VALID && !STAILQ_EMPTY(&e->wr_list))
304 			send_pending(sc, e);
305 		if (e->state == L2T_STATE_FAILED)
306 			resolution_failed(sc, e);
307 		mtx_unlock(&e->lock);
308 		break;
309 
310 	case L2T_STATE_FAILED:
311 		return (EHOSTUNREACH);
312 	}
313 
314 	return (0);
315 }
316 
317 int
318 do_l2t_write_rpl2(struct sge_iq *iq, const struct rss_header *rss,
319     struct mbuf *m)
320 {
321 	struct adapter *sc = iq->adapter;
322 	const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1);
323 	unsigned int tid = GET_TID(rpl);
324 	unsigned int idx = tid % L2T_SIZE;
325 
326 	if (__predict_false(rpl->status != CPL_ERR_NONE)) {
327 		log(LOG_ERR,
328 		    "Unexpected L2T_WRITE_RPL (%u) for entry at hw_idx %u\n",
329 		    rpl->status, idx);
330 		return (EINVAL);
331 	}
332 
333 	if (tid & F_SYNC_WR) {
334 		struct l2t_entry *e = &sc->l2t->l2tab[idx - sc->vres.l2t.start];
335 
336 		mtx_lock(&e->lock);
337 		if (e->state != L2T_STATE_SWITCHING) {
338 			send_pending(sc, e);
339 			e->state = L2T_STATE_VALID;
340 		}
341 		mtx_unlock(&e->lock);
342 	}
343 
344 	return (0);
345 }
346 
347 /*
348  * The TOE wants an L2 table entry that it can use to reach the next hop over
349  * the specified port.  Produce such an entry - create one if needed.
350  *
351  * Note that the ifnet could be a pseudo-device like if_vlan, if_lagg, etc. on
352  * top of the real cxgbe interface.
353  */
354 struct l2t_entry *
355 t4_l2t_get(struct port_info *pi, if_t ifp, struct sockaddr *sa)
356 {
357 	struct l2t_entry *e;
358 	struct adapter *sc = pi->adapter;
359 	struct l2t_data *d = sc->l2t;
360 	u_int hash, smt_idx = pi->port_id;
361 	uint16_t vid, pcp, vtag;
362 
363 	KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6,
364 	    ("%s: sa %p has unexpected sa_family %d", __func__, sa,
365 	    sa->sa_family));
366 
367 	vid = VLAN_NONE;
368 	pcp = 0;
369 	if (if_gettype(ifp) == IFT_L2VLAN) {
370 		VLAN_TAG(ifp, &vid);
371 		VLAN_PCP(ifp, &pcp);
372 	} else if ((pcp = if_getpcp(ifp)) != IFNET_PCP_NONE)
373 		vid = 0;
374 	else
375 		pcp = 0;
376 	vtag = EVL_MAKETAG(vid, pcp, 0);
377 
378 	hash = l2_hash(d, sa, if_getindex(ifp));
379 	rw_wlock(&d->lock);
380 	for (e = d->l2tab[hash].first; e; e = e->next) {
381 		if (l2_cmp(sa, e) == 0 && e->ifp == ifp && e->vlan == vtag &&
382 		    e->smt_idx == smt_idx) {
383 			l2t_hold(d, e);
384 			goto done;
385 		}
386 	}
387 
388 	/* Need to allocate a new entry */
389 	e = t4_alloc_l2e(d);
390 	if (e) {
391 		mtx_lock(&e->lock);          /* avoid race with t4_l2t_free */
392 		e->next = d->l2tab[hash].first;
393 		d->l2tab[hash].first = e;
394 
395 		e->state = L2T_STATE_RESOLVING;
396 		l2_store(sa, e);
397 		e->ifp = ifp;
398 		e->smt_idx = smt_idx;
399 		e->hash = hash;
400 		e->lport = pi->lport;
401 		e->wrq = &sc->sge.ctrlq[pi->port_id];
402 		e->iqid = sc->sge.ofld_rxq[pi->vi[0].first_ofld_rxq].iq.abs_id;
403 		atomic_store_rel_int(&e->refcnt, 1);
404 		e->vlan = vtag;
405 		mtx_unlock(&e->lock);
406 	}
407 done:
408 	rw_wunlock(&d->lock);
409 	return e;
410 }
411 
412 /*
413  * Called when the host's ARP layer makes a change to some entry that is loaded
414  * into the HW L2 table.
415  */
416 void
417 t4_l2_update(struct toedev *tod, if_t ifp, struct sockaddr *sa,
418     uint8_t *lladdr, uint16_t vtag)
419 {
420 	struct adapter *sc = tod->tod_softc;
421 	struct l2t_entry *e;
422 	struct l2t_data *d = sc->l2t;
423 	u_int hash;
424 
425 	KASSERT(d != NULL, ("%s: no L2 table", __func__));
426 
427 	hash = l2_hash(d, sa, if_getindex(ifp));
428 	rw_rlock(&d->lock);
429 	for (e = d->l2tab[hash].first; e; e = e->next) {
430 		if (l2_cmp(sa, e) == 0 && e->ifp == ifp) {
431 			mtx_lock(&e->lock);
432 			if (atomic_load_acq_int(&e->refcnt))
433 				goto found;
434 			e->state = L2T_STATE_STALE;
435 			mtx_unlock(&e->lock);
436 			break;
437 		}
438 	}
439 	rw_runlock(&d->lock);
440 
441 	/*
442 	 * This is of no interest to us.  We've never had an offloaded
443 	 * connection to this destination, and we aren't attempting one right
444 	 * now.
445 	 */
446 	return;
447 
448 found:
449 	rw_runlock(&d->lock);
450 
451 	KASSERT(e->state != L2T_STATE_UNUSED,
452 	    ("%s: unused entry in the hash.", __func__));
453 
454 	update_entry(sc, e, lladdr, vtag);
455 	mtx_unlock(&e->lock);
456 }
457 #endif
458