xref: /illumos-gate/usr/src/uts/common/io/cxgbe/t4nex/t4_l2t.c (revision 8c69cc8fbe729fa7b091e901c4b50508ccc6bb33)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source. A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * This file is part of the Chelsio T4 support code.
14  *
15  * Copyright (C) 2010-2013 Chelsio Communications.  All rights reserved.
16  *
17  * This program is distributed in the hope that it will be useful, but WITHOUT
18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
20  * release for licensing terms and conditions.
21  */
22 
23 #include <sys/ddi.h>
24 #include <sys/sunddi.h>
25 #include <sys/sunndi.h>
26 #include <sys/atomic.h>
27 #include <sys/dlpi.h>
28 #include <sys/pattr.h>
29 #include <sys/strsubr.h>
30 #include <sys/stream.h>
31 #include <sys/strsun.h>
32 #include <sys/ethernet.h>
33 #include <inet/ip.h>
34 #include <inet/ipclassifier.h>
35 #include <inet/tcp.h>
36 
37 #include "common/common.h"
38 #include "common/t4_msg.h"
39 #include "common/t4_regs.h"
40 #include "common/t4_regs_values.h"
41 #include "t4_l2t.h"
42 
43 /* identifies sync vs async L2T_WRITE_REQs */
44 #define	S_SYNC_WR	12
45 #define	V_SYNC_WR(x)	((x) << S_SYNC_WR)
46 #define	F_SYNC_WR	V_SYNC_WR(1)
47 #define	VLAN_NONE	0xfff
48 
49 /*
50  * jhash.h: Jenkins hash support.
51  *
52  * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
53  *
54  * http://burtleburtle.net/bob/hash/
55  *
56  * These are the credits from Bob's sources:
57  *
58  * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
59  * hash(), hash2(), hash3, and mix() are externally useful functions.
60  * Routines to test the hash are included if SELF_TEST is defined.
61  * You can use this free for any purpose.  It has no warranty.
62  */
63 
64 /* NOTE: Arguments are modified. */
65 #define	__jhash_mix(a, b, c) \
66 { \
67 	a -= b; a -= c; a ^= (c>>13); \
68 	b -= c; b -= a; b ^= (a<<8); \
69 	c -= a; c -= b; c ^= (b>>13); \
70 	a -= b; a -= c; a ^= (c>>12);  \
71 	b -= c; b -= a; b ^= (a<<16); \
72 	c -= a; c -= b; c ^= (b>>5); \
73 	a -= b; a -= c; a ^= (c>>3);  \
74 	b -= c; b -= a; b ^= (a<<10); \
75 	c -= a; c -= b; c ^= (b>>15); \
76 }
77 
78 /* The golden ration: an arbitrary value */
79 #define	JHASH_GOLDEN_RATIO	0x9e3779b9
80 
81 /*
82  * A special ultra-optimized versions that knows they are hashing exactly
83  * 3, 2 or 1 word(s).
84  *
85  * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
86  *	 done at the end is not done here.
87  */
88 static inline u32
89 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
90 {
91 	a += JHASH_GOLDEN_RATIO;
92 	b += JHASH_GOLDEN_RATIO;
93 	c += initval;
94 
95 	__jhash_mix(a, b, c);
96 
97 	return (c);
98 }
99 
100 static inline u32
101 jhash_2words(u32 a, u32 b, u32 initval)
102 {
103 	return (jhash_3words(a, b, 0, initval));
104 }
105 
106 #ifndef container_of
107 #define	container_of(p, s, f) ((s *)(((uint8_t *)(p)) - offsetof(s, f)))
108 #endif
109 
110 #if defined(__GNUC__)
111 #define	likely(x)	__builtin_expect((x), 1)
112 #define	unlikely(x)	__builtin_expect((x), 0)
113 #else
114 #define	likely(x)	(x)
115 #define	unlikely(x)	(x)
116 #endif /* defined(__GNUC__) */
117 
118 enum {
119 	L2T_STATE_VALID,	/* entry is up to date */
120 	L2T_STATE_STALE,	/* entry may be used but needs revalidation */
121 	L2T_STATE_RESOLVING,	/* entry needs address resolution */
122 	L2T_STATE_SYNC_WRITE,	/* synchronous write of entry underway */
123 
124 	/* when state is one of the below the entry is not hashed */
125 	L2T_STATE_SWITCHING,	/* entry is being used by a switching filter */
126 	L2T_STATE_UNUSED	/* entry not in use */
127 };
128 
129 struct l2t_data {
130 	krwlock_t lock;
131 	u_int l2t_size;
132 	volatile uint_t nfree;	 /* number of free entries */
133 	struct l2t_entry *rover; /* starting point for next allocation */
134 	struct l2t_entry l2tab[];
135 };
136 
137 #define	VLAN_NONE	0xfff
138 #define	SA(x)		((struct sockaddr *)(x))
139 #define	SIN(x)		((struct sockaddr_in *)(x))
140 #define	SINADDR(x)	(SIN(x)->sin_addr.s_addr)
141 #define	atomic_read(x) atomic_add_int_nv(x, 0)
142 
143 #ifdef TCP_OFFLOAD_ENABLE
144 /*
145  * Allocate a free L2T entry.
146  * Must be called with l2t_data.lockatomic_load_acq_int held.
147  */
148 static struct l2t_entry *
149 alloc_l2e(struct l2t_data *d)
150 {
151 	struct l2t_entry *end, *e, **p;
152 
153 	ASSERT(rw_write_held(&d->lock));
154 
155 	if (!atomic_read(&d->nfree))
156 		return (NULL);
157 
158 	/* there's definitely a free entry */
159 	for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
160 		if (atomic_read(&e->refcnt) == 0)
161 			goto found;
162 
163 	for (e = d->l2tab; atomic_read(&e->refcnt); ++e)
164 		/* */;
165 found:
166 	d->rover = e + 1;
167 	atomic_dec_uint(&d->nfree);
168 
169 	/*
170 	 * The entry we found may be an inactive entry that is
171 	 * presently in the hash table.  We need to remove it.
172 	 */
173 	if (e->state < L2T_STATE_SWITCHING) {
174 		for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) {
175 			if (*p == e) {
176 				*p = e->next;
177 				e->next = NULL;
178 				break;
179 			}
180 		}
181 	}
182 
183 	e->state = L2T_STATE_UNUSED;
184 	return (e);
185 }
186 
187 /*
188  * Write an L2T entry.  Must be called with the entry locked.
189  * The write may be synchronous or asynchronous.
190  */
191 static int
192 write_l2e(adapter_t *sc, struct l2t_entry *e, int sync)
193 {
194 	mblk_t *m;
195 	struct cpl_l2t_write_req *req;
196 	int idx = e->idx + sc->vres.l2t.start;
197 
198 	ASSERT(MUTEX_HELD(&e->lock));
199 
200 	if ((m = allocb(sizeof (*req), BPRI_HI)) == NULL)
201 		return (ENOMEM);
202 
203 	/* LINTED: E_BAD_PTR_CAST_ALIGN */
204 	req = (struct cpl_l2t_write_req *)m->b_wptr;
205 
206 	/* LINTED: E_CONSTANT_CONDITION */
207 	INIT_TP_WR(req, 0);
208 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx |
209 	    V_SYNC_WR(sync) | V_TID_QID(sc->sge.fwq.abs_id)));
210 	req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!sync));
211 	req->l2t_idx = htons(idx);
212 	req->vlan = htons(e->vlan);
213 	(void) memcpy(req->dst_mac, e->dmac, sizeof (req->dst_mac));
214 
215 	m->b_wptr += sizeof (*req);
216 
217 	(void) t4_mgmt_tx(sc, m);
218 
219 	if (sync && e->state != L2T_STATE_SWITCHING)
220 		e->state = L2T_STATE_SYNC_WRITE;
221 
222 	return (0);
223 }
224 #endif
225 
226 struct l2t_data *
227 t4_init_l2t(struct adapter *sc)
228 {
229 	int i, l2t_size;
230 	struct l2t_data *d;
231 
232 	l2t_size = sc->vres.l2t.size;
233 	if(l2t_size < 1)
234 		return (NULL);
235 
236 	d = kmem_zalloc(sizeof(*d) + l2t_size * sizeof (struct l2t_entry), KM_SLEEP);
237 	if (!d)
238 		return (NULL);
239 
240 	d->l2t_size = l2t_size;
241 
242 	d->rover = d->l2tab;
243 	(void) atomic_swap_uint(&d->nfree, l2t_size);
244 	rw_init(&d->lock, NULL, RW_DRIVER, NULL);
245 
246 	for (i = 0; i < l2t_size; i++) {
247 		/* LINTED: E_ASSIGN_NARROW_CONV */
248 		d->l2tab[i].idx = i;
249 		d->l2tab[i].state = L2T_STATE_UNUSED;
250 		mutex_init(&d->l2tab[i].lock, NULL, MUTEX_DRIVER, NULL);
251 		(void) atomic_swap_uint(&d->l2tab[i].refcnt, 0);
252 	}
253 
254 #ifdef TCP_OFFLOAD_ENABLE
255 	(void) t4_register_cpl_handler(sc, CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
256 #endif
257 
258 	return (d);
259 }
260 
261 int
262 t4_free_l2t(struct l2t_data *d)
263 {
264 	int i;
265 
266 	for (i = 0; i < L2T_SIZE; i++)
267 		mutex_destroy(&d->l2tab[i].lock);
268 	rw_destroy(&d->lock);
269 	kmem_free(d, sizeof (*d));
270 
271 	return (0);
272 }
273 
274 #ifdef TCP_OFFLOAD_ENABLE
275 static inline void
276 l2t_hold(struct l2t_data *d, struct l2t_entry *e)
277 {
278 	if (atomic_inc_uint_nv(&e->refcnt) == 1)  /* 0 -> 1 transition */
279 		atomic_dec_uint(&d->nfree);
280 }
281 
282 /*
283  * To avoid having to check address families we do not allow v4 and v6
284  * neighbors to be on the same hash chain.  We keep v4 entries in the first
285  * half of available hash buckets and v6 in the second.
286  */
287 enum {
288 	L2T_SZ_HALF = L2T_SIZE / 2,
289 	L2T_HASH_MASK = L2T_SZ_HALF - 1
290 };
291 
292 static inline unsigned int
293 arp_hash(const uint32_t *key, int ifindex)
294 {
295 	return (jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK);
296 }
297 
298 static inline unsigned int
299 ipv6_hash(const uint32_t *key, int ifindex)
300 {
301 	uint32_t xor = key[0] ^ key[1] ^ key[2] ^ key[3];
302 
303 	return (L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK));
304 }
305 
306 static inline unsigned int
307 addr_hash(const uint32_t *addr, int addr_len, int ifindex)
308 {
309 	return (addr_len == 4 ? arp_hash(addr, ifindex) :
310 	    ipv6_hash(addr, ifindex));
311 }
312 
313 /*
314  * Checks if an L2T entry is for the given IP/IPv6 address.  It does not check
315  * whether the L2T entry and the address are of the same address family.
316  * Callers ensure an address is only checked against L2T entries of the same
317  * family, something made trivial by the separation of IP and IPv6 hash chains
318  * mentioned above.  Returns 0 if there's a match,
319  */
320 static inline int
321 addreq(const struct l2t_entry *e, const uint32_t *addr)
322 {
323 	if (e->v6 != 0)
324 		return ((e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
325 		    (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]));
326 	return (e->addr[0] ^ addr[0]);
327 }
328 
329 /*
330  * Add a packet to an L2T entry's queue of packets awaiting resolution.
331  * Must be called with the entry's lock held.
332  */
333 static inline void
334 arpq_enqueue(struct l2t_entry *e, mblk_t *m)
335 {
336 	ASSERT(MUTEX_HELD(&e->lock));
337 
338 	ASSERT(m->b_next == NULL);
339 	if (e->arpq_head != NULL)
340 		e->arpq_tail->b_next = m;
341 	else
342 		e->arpq_head = m;
343 	e->arpq_tail = m;
344 }
345 
346 int
347 t4_l2t_send(struct adapter *sc, mblk_t *m, struct l2t_entry *e)
348 {
349 	sin_t *sin;
350 	ip2mac_t ip2m;
351 
352 	if (e->v6 != 0)
353 		ASSERT(0);
354 again:
355 	switch (e->state) {
356 	case L2T_STATE_STALE:	/* entry is stale, kick off revalidation */
357 
358 	/* Fall through */
359 	case L2T_STATE_VALID:	/* fast-path, send the packet on */
360 		(void) t4_wrq_tx(sc, MBUF_EQ(m), m);
361 		return (0);
362 
363 	case L2T_STATE_RESOLVING:
364 	case L2T_STATE_SYNC_WRITE:
365 		mutex_enter(&e->lock);
366 		if (e->state != L2T_STATE_SYNC_WRITE &&
367 		    e->state != L2T_STATE_RESOLVING) {
368 			/* state changed by the time we got here */
369 			mutex_exit(&e->lock);
370 			goto again;
371 		}
372 		arpq_enqueue(e, m);
373 		mutex_exit(&e->lock);
374 
375 		bzero(&ip2m, sizeof (ip2m));
376 		sin = (sin_t *)&ip2m.ip2mac_pa;
377 		sin->sin_family = AF_INET;
378 		sin->sin_addr.s_addr = e->in_addr;
379 		ip2m.ip2mac_ifindex = e->ifindex;
380 
381 		if (e->state == L2T_STATE_RESOLVING) {
382 			(void) ip2mac(IP2MAC_RESOLVE, &ip2m, t4_l2t_update, e,
383 			    0);
384 			if (ip2m.ip2mac_err == EINPROGRESS)
385 				ASSERT(0);
386 			else if (ip2m.ip2mac_err == 0)
387 				t4_l2t_update(&ip2m, e);
388 			else
389 				ASSERT(0);
390 		}
391 	}
392 
393 	return (0);
394 }
395 
396 /*
397  * Called when an L2T entry has no more users.  The entry is left in the hash
398  * table since it is likely to be reused but we also bump nfree to indicate
399  * that the entry can be reallocated for a different neighbor.  We also drop
400  * the existing neighbor reference in case the neighbor is going away and is
401  * waiting on our reference.
402  *
403  * Because entries can be reallocated to other neighbors once their ref count
404  * drops to 0 we need to take the entry's lock to avoid races with a new
405  * incarnation.
406  */
407 static void
408 t4_l2e_free(struct l2t_entry *e)
409 {
410 	struct l2t_data *d;
411 
412 	mutex_enter(&e->lock);
413 	/* LINTED: E_NOP_IF_STMT */
414 	if (atomic_read(&e->refcnt) == 0) {  /* hasn't been recycled */
415 		/*
416 		 * Don't need to worry about the arpq, an L2T entry can't be
417 		 * released if any packets are waiting for resolution as we
418 		 * need to be able to communicate with the device to close a
419 		 * connection.
420 		 */
421 	}
422 	mutex_exit(&e->lock);
423 
424 	d = container_of(e, struct l2t_data, l2tab[e->idx]);
425 	atomic_inc_uint(&d->nfree);
426 
427 }
428 
429 void
430 t4_l2t_release(struct l2t_entry *e)
431 {
432 	if (atomic_dec_uint_nv(&e->refcnt) == 0)
433 		t4_l2e_free(e);
434 }
435 
436 /* ARGSUSED */
437 int
438 do_l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, mblk_t *m)
439 {
440 	struct adapter *sc = iq->adapter;
441 	const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1);
442 	unsigned int tid = GET_TID(rpl);
443 	unsigned int idx = tid % L2T_SIZE;
444 
445 	if (likely(rpl->status != CPL_ERR_NONE)) {
446 		cxgb_printf(sc->dip, CE_WARN,
447 		    "Unexpected L2T_WRITE_RPL status %u for entry %u",
448 		    rpl->status, idx);
449 		return (-EINVAL);
450 	}
451 
452 	return (0);
453 }
454 
455 /*
456  * The TOE wants an L2 table entry that it can use to reach the next hop over
457  * the specified port.  Produce such an entry - create one if needed.
458  *
459  * Note that the ifnet could be a pseudo-device like if_vlan, if_lagg, etc. on
460  * top of the real cxgbe interface.
461  */
462 struct l2t_entry *
463 t4_l2t_get(struct port_info *pi, conn_t *connp)
464 {
465 	struct l2t_entry *e;
466 	struct l2t_data *d = pi->adapter->l2t;
467 	int addr_len;
468 	uint32_t *addr;
469 	int hash;
470 	int index = \
471 	    connp->conn_ixa->ixa_ire->ire_ill->ill_phyint->phyint_ifindex;
472 	unsigned int smt_idx = pi->port_id;
473 	addr = (uint32_t *)&connp->conn_faddr_v4;
474 	addr_len  = sizeof (connp->conn_faddr_v4);
475 
476 	hash = addr_hash(addr, addr_len, index);
477 
478 	rw_enter(&d->lock, RW_WRITER);
479 	for (e = d->l2tab[hash].first; e; e = e->next) {
480 		if (!addreq(e, addr) && e->smt_idx == smt_idx) {
481 			l2t_hold(d, e);
482 			goto done;
483 		}
484 	}
485 
486 	/* Need to allocate a new entry */
487 	e = alloc_l2e(d);
488 	if (e != NULL) {
489 		mutex_enter(&e->lock);	/* avoid race with t4_l2t_free */
490 		e->state = L2T_STATE_RESOLVING;
491 		(void) memcpy(e->addr, addr, addr_len);
492 		e->in_addr = connp->conn_faddr_v4;
493 		e->ifindex = index;
494 		/* LINTED: E_ASSIGN_NARROW_CONV */
495 		e->smt_idx = smt_idx;
496 		/* LINTED: E_ASSIGN_NARROW_CONV */
497 		e->hash = hash;
498 		e->lport = pi->lport;
499 		e->arpq_head = e->arpq_tail = NULL;
500 		e->v6 = (addr_len == 16);
501 		e->sc = pi->adapter;
502 		(void) atomic_swap_uint(&e->refcnt, 1);
503 		e->vlan = VLAN_NONE;
504 		e->next = d->l2tab[hash].first;
505 		d->l2tab[hash].first = e;
506 		mutex_exit(&e->lock);
507 	} else {
508 		ASSERT(0);
509 	}
510 
511 done:
512 	rw_exit(&d->lock);
513 	return (e);
514 }
515 
516 /*
517  * Called when the host's neighbor layer makes a change to some entry that is
518  * loaded into the HW L2 table.
519  */
520 void
521 t4_l2t_update(ip2mac_t *ip2macp, void *arg)
522 {
523 	struct l2t_entry *e = (struct l2t_entry *)arg;
524 	struct adapter *sc = e->sc;
525 	uchar_t *cp;
526 
527 	if (ip2macp->ip2mac_err != 0) {
528 		ASSERT(0); /* Don't know what to do. Needs to be investigated */
529 	}
530 
531 	mutex_enter(&e->lock);
532 	if (atomic_read(&e->refcnt) != 0)
533 		goto found;
534 	e->state = L2T_STATE_STALE;
535 	mutex_exit(&e->lock);
536 
537 	/* The TOE has no interest in this LLE */
538 	return;
539 
540 found:
541 	if (atomic_read(&e->refcnt) != 0) {
542 
543 		/* Entry is referenced by at least 1 offloaded connection. */
544 
545 		cp = (uchar_t *)LLADDR(&ip2macp->ip2mac_ha);
546 		bcopy(cp, e->dmac, 6);
547 		(void) write_l2e(sc, e, 1);
548 		e->state = L2T_STATE_VALID;
549 
550 	}
551 	mutex_exit(&e->lock);
552 }
553 #endif
554