xref: /freebsd/sys/dev/cxgbe/t4_tracer.c (revision ba3c1f5972d7b90feb6e6da47905ff2757e0fe57)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2013 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: Navdeep Parhar <np@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_inet.h"
34 #include "opt_inet6.h"
35 
36 #include <sys/param.h>
37 #include <sys/eventhandler.h>
38 #include <sys/lock.h>
39 #include <sys/types.h>
40 #include <sys/mbuf.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/sx.h>
44 #include <net/bpf.h>
45 #include <net/ethernet.h>
46 #include <net/if.h>
47 #include <net/if_clone.h>
48 #include <net/if_types.h>
49 
50 #include "common/common.h"
51 #include "common/t4_msg.h"
52 #include "common/t4_regs.h"
53 #include "t4_ioctl.h"
54 
55 /*
56  * Locking notes
57  * =============
58  *
59  * An interface cloner is registered during mod_load and it can be used to
60  * create or destroy the tracing ifnet for an adapter at any time.  It is
61  * possible for the cloned interface to outlive the adapter (adapter disappears
62  * in t4_detach but the tracing ifnet may live till mod_unload when removal of
63  * the cloner finally destroys any remaining cloned interfaces).  When tracing
64  * filters are active, this ifnet is also receiving data.  There are potential
65  * bad races between ifnet create, ifnet destroy, ifnet rx, ifnet ioctl,
66  * cxgbe_detach/t4_detach, mod_unload.
67  *
68  * a) The driver selects an iq for tracing (sc->traceq) inside a synch op.  The
69  *    iq is destroyed inside a synch op too (and sc->traceq updated).
70  * b) The cloner looks for an adapter that matches the name of the ifnet it's
71  *    been asked to create, starts a synch op on that adapter, and proceeds only
72  *    if the adapter has a tracing iq.
73  * c) The cloned ifnet and the adapter are coupled to each other via
74  *    ifp->if_softc and sc->ifp.  These can be modified only with the global
75  *    t4_trace_lock sx as well as the sc->ifp_lock mutex held.  Holding either
76  *    of these will prevent any change.
77  *
78  * The order in which all the locks involved should be acquired are:
79  * t4_list_lock
80  * adapter lock
81  * (begin synch op and let go of the above two)
82  * t4_trace_lock
83  * sc->ifp_lock
84  */
85 
86 static struct sx t4_trace_lock;
87 static const char *t4_cloner_name = "tXnex";
88 static struct if_clone *t4_cloner;
89 
90 /* tracer ifnet routines.  mostly no-ops. */
91 static void tracer_init(void *);
92 static int tracer_ioctl(if_t, unsigned long, caddr_t);
93 static int tracer_transmit(if_t, struct mbuf *);
94 static void tracer_qflush(if_t);
95 static int tracer_media_change(if_t);
96 static void tracer_media_status(if_t, struct ifmediareq *);
97 
98 /* match name (request/response) */
99 struct match_rr {
100 	const char *name;
101 	int lock;	/* set to 1 to returned sc locked. */
102 	struct adapter *sc;
103 	int rc;
104 };
105 
106 static void
107 match_name(struct adapter *sc, void *arg)
108 {
109 	struct match_rr *mrr = arg;
110 
111 	if (strcmp(device_get_nameunit(sc->dev), mrr->name) != 0)
112 		return;
113 
114 	KASSERT(mrr->sc == NULL, ("%s: multiple matches (%p, %p) for %s",
115 	    __func__, mrr->sc, sc, mrr->name));
116 
117 	mrr->sc = sc;
118 	if (mrr->lock)
119 		mrr->rc = begin_synchronized_op(mrr->sc, NULL, 0, "t4clon");
120 	else
121 		mrr->rc = 0;
122 }
123 
124 static int
125 t4_cloner_match(struct if_clone *ifc, const char *name)
126 {
127 
128 	if (strncmp(name, "t4nex", 5) != 0 &&
129 	    strncmp(name, "t5nex", 5) != 0 &&
130 	    strncmp(name, "t6nex", 5) != 0)
131 		return (0);
132 	if (name[5] < '0' || name[5] > '9')
133 		return (0);
134 	return (1);
135 }
136 
137 static int
138 t4_cloner_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
139 {
140 	struct match_rr mrr;
141 	struct adapter *sc;
142 	if_t ifp;
143 	int rc, unit;
144 	const uint8_t lla[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
145 
146 	mrr.name = name;
147 	mrr.lock = 1;
148 	mrr.sc = NULL;
149 	mrr.rc = ENOENT;
150 	t4_iterate(match_name, &mrr);
151 
152 	if (mrr.rc != 0)
153 		return (mrr.rc);
154 	sc = mrr.sc;
155 
156 	KASSERT(sc != NULL, ("%s: name (%s) matched but softc is NULL",
157 	    __func__, name));
158 	ASSERT_SYNCHRONIZED_OP(sc);
159 
160 	sx_xlock(&t4_trace_lock);
161 
162 	if (sc->ifp != NULL) {
163 		rc = EEXIST;
164 		goto done;
165 	}
166 	if (sc->traceq < 0) {
167 		rc = EAGAIN;
168 		goto done;
169 	}
170 
171 
172 	unit = -1;
173 	rc = ifc_alloc_unit(ifc, &unit);
174 	if (rc != 0)
175 		goto done;
176 
177 	ifp = if_alloc(IFT_ETHER);
178 	if (ifp == NULL) {
179 		ifc_free_unit(ifc, unit);
180 		rc = ENOMEM;
181 		goto done;
182 	}
183 
184 	/* Note that if_xname is not <if_dname><if_dunit>. */
185 	if_initname(ifp, name, unit);
186 	if_setdname(ifp, t4_cloner_name);
187 	if_setinitfn(ifp, tracer_init);
188 	if_setflags(ifp, IFF_SIMPLEX | IFF_DRV_RUNNING);
189 	if_setioctlfn(ifp, tracer_ioctl);
190 	if_settransmitfn(ifp, tracer_transmit);
191 	if_setqflushfn(ifp, tracer_qflush);
192 	if_setcapabilities(ifp, IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU);
193 	ifmedia_init(&sc->media, IFM_IMASK, tracer_media_change,
194 	    tracer_media_status);
195 	ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE, 0, NULL);
196 	ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE);
197 	ether_ifattach(ifp, lla);
198 
199 	mtx_lock(&sc->ifp_lock);
200 	if_setsoftc(ifp, sc);
201 	sc->ifp = ifp;
202 	mtx_unlock(&sc->ifp_lock);
203 done:
204 	sx_xunlock(&t4_trace_lock);
205 	end_synchronized_op(sc, 0);
206 	return (rc);
207 }
208 
209 static int
210 t4_cloner_destroy(struct if_clone *ifc, if_t ifp)
211 {
212 	struct adapter *sc;
213 	int unit = if_getdunit(ifp);
214 
215 	sx_xlock(&t4_trace_lock);
216 	sc = if_getsoftc(ifp);
217 	if (sc != NULL) {
218 		mtx_lock(&sc->ifp_lock);
219 		sc->ifp = NULL;
220 		if_setsoftc(ifp, NULL);
221 		mtx_unlock(&sc->ifp_lock);
222 		ifmedia_removeall(&sc->media);
223 	}
224 	ether_ifdetach(ifp);
225 	if_free(ifp);
226 	ifc_free_unit(ifc, unit);
227 	sx_xunlock(&t4_trace_lock);
228 
229 	return (0);
230 }
231 
232 void
233 t4_tracer_modload(void)
234 {
235 
236 	sx_init(&t4_trace_lock, "T4/T5 tracer lock");
237 	t4_cloner = if_clone_advanced(t4_cloner_name, 0, t4_cloner_match,
238 	    t4_cloner_create, t4_cloner_destroy);
239 }
240 
241 void
242 t4_tracer_modunload(void)
243 {
244 
245 	if (t4_cloner != NULL) {
246 		/*
247 		 * The module is being unloaded so the nexus drivers have
248 		 * detached.  The tracing interfaces can not outlive the nexus
249 		 * (ifp->if_softc is the nexus) and must have been destroyed
250 		 * already.  XXX: but if_clone is opaque to us and we can't
251 		 * assert LIST_EMPTY(&t4_cloner->ifc_iflist) at this time.
252 		 */
253 		if_clone_detach(t4_cloner);
254 	}
255 	sx_destroy(&t4_trace_lock);
256 }
257 
258 void
259 t4_tracer_port_detach(struct adapter *sc)
260 {
261 
262 	sx_xlock(&t4_trace_lock);
263 	if (sc->ifp != NULL) {
264 		mtx_lock(&sc->ifp_lock);
265 		if_setsoftc(sc->ifp, NULL);
266 		sc->ifp = NULL;
267 		mtx_unlock(&sc->ifp_lock);
268 	}
269 	ifmedia_removeall(&sc->media);
270 	sx_xunlock(&t4_trace_lock);
271 }
272 
273 int
274 t4_get_tracer(struct adapter *sc, struct t4_tracer *t)
275 {
276 	int rc, i, enabled;
277 	struct trace_params tp;
278 
279 	if (t->idx >= NTRACE) {
280 		t->idx = 0xff;
281 		t->enabled = 0;
282 		t->valid = 0;
283 		return (0);
284 	}
285 
286 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
287 	    "t4gett");
288 	if (rc)
289 		return (rc);
290 
291 	if (hw_off_limits(sc)) {
292 		rc = ENXIO;
293 		goto done;
294 	}
295 
296 	for (i = t->idx; i < NTRACE; i++) {
297 		if (isset(&sc->tracer_valid, t->idx)) {
298 			t4_get_trace_filter(sc, &tp, i, &enabled);
299 			t->idx = i;
300 			t->enabled = enabled;
301 			t->valid = 1;
302 			memcpy(&t->tp.data[0], &tp.data[0], sizeof(t->tp.data));
303 			memcpy(&t->tp.mask[0], &tp.mask[0], sizeof(t->tp.mask));
304 			t->tp.snap_len = tp.snap_len;
305 			t->tp.min_len = tp.min_len;
306 			t->tp.skip_ofst = tp.skip_ofst;
307 			t->tp.skip_len = tp.skip_len;
308 			t->tp.invert = tp.invert;
309 
310 			/* convert channel to port iff 0 <= port < 8. */
311 			if (tp.port < 4)
312 				t->tp.port = sc->chan_map[tp.port];
313 			else if (tp.port < 8)
314 				t->tp.port = sc->chan_map[tp.port - 4] + 4;
315 			else
316 				t->tp.port = tp.port;
317 
318 			goto done;
319 		}
320 	}
321 
322 	t->idx = 0xff;
323 	t->enabled = 0;
324 	t->valid = 0;
325 done:
326 	end_synchronized_op(sc, LOCK_HELD);
327 
328 	return (rc);
329 }
330 
331 int
332 t4_set_tracer(struct adapter *sc, struct t4_tracer *t)
333 {
334 	int rc;
335 	struct trace_params tp, *tpp;
336 
337 	if (t->idx >= NTRACE)
338 		return (EINVAL);
339 
340 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
341 	    "t4sett");
342 	if (rc)
343 		return (rc);
344 
345 	if (hw_off_limits(sc)) {
346 		rc = ENXIO;
347 		goto done;
348 	}
349 
350 	/*
351 	 * If no tracing filter is specified this time then check if the filter
352 	 * at the index is valid anyway because it was set previously.  If so
353 	 * then this is a legitimate enable/disable operation.
354 	 */
355 	if (t->valid == 0) {
356 		if (isset(&sc->tracer_valid, t->idx))
357 			tpp = NULL;
358 		else
359 			rc = EINVAL;
360 		goto done;
361 	}
362 
363 	if (t->tp.port > 19 || t->tp.snap_len > 9600 ||
364 	    t->tp.min_len > M_TFMINPKTSIZE || t->tp.skip_len > M_TFLENGTH ||
365 	    t->tp.skip_ofst > M_TFOFFSET) {
366 		rc = EINVAL;
367 		goto done;
368 	}
369 
370 	memcpy(&tp.data[0], &t->tp.data[0], sizeof(tp.data));
371 	memcpy(&tp.mask[0], &t->tp.mask[0], sizeof(tp.mask));
372 	tp.snap_len = t->tp.snap_len;
373 	tp.min_len = t->tp.min_len;
374 	tp.skip_ofst = t->tp.skip_ofst;
375 	tp.skip_len = t->tp.skip_len;
376 	tp.invert = !!t->tp.invert;
377 
378 	/* convert port to channel iff 0 <= port < 8. */
379 	if (t->tp.port < 4) {
380 		if (sc->port[t->tp.port] == NULL) {
381 			rc = EINVAL;
382 			goto done;
383 		}
384 		tp.port = sc->port[t->tp.port]->tx_chan;
385 	} else if (t->tp.port < 8) {
386 		if (sc->port[t->tp.port - 4] == NULL) {
387 			rc = EINVAL;
388 			goto done;
389 		}
390 		tp.port = sc->port[t->tp.port - 4]->tx_chan + 4;
391 	} else
392 		tp.port = t->tp.port;
393 	tpp = &tp;
394 done:
395 	if (rc == 0) {
396 		rc = -t4_set_trace_filter(sc, tpp, t->idx, t->enabled);
397 		if (rc == 0) {
398 			if (t->enabled) {
399 				setbit(&sc->tracer_valid, t->idx);
400 				if (sc->tracer_enabled == 0) {
401 					t4_set_reg_field(sc, A_MPS_TRC_CFG,
402 					    F_TRCEN, F_TRCEN);
403 				}
404 				setbit(&sc->tracer_enabled, t->idx);
405 			} else {
406 				clrbit(&sc->tracer_enabled, t->idx);
407 				if (sc->tracer_enabled == 0) {
408 					t4_set_reg_field(sc, A_MPS_TRC_CFG,
409 					    F_TRCEN, 0);
410 				}
411 			}
412 		}
413 	}
414 	end_synchronized_op(sc, LOCK_HELD);
415 
416 	return (rc);
417 }
418 
419 int
420 t4_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
421 {
422 	struct adapter *sc = iq->adapter;
423 	if_t ifp;
424 
425 	KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
426 	    rss->opcode));
427 
428 	mtx_lock(&sc->ifp_lock);
429 	ifp = sc->ifp;
430 	if (sc->ifp) {
431 		m_adj(m, sizeof(struct cpl_trace_pkt));
432 		m->m_pkthdr.rcvif = ifp;
433 		ETHER_BPF_MTAP(ifp, m);
434 	}
435 	mtx_unlock(&sc->ifp_lock);
436 	m_freem(m);
437 
438 	return (0);
439 }
440 
441 int
442 t5_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
443 {
444 	struct adapter *sc = iq->adapter;
445 	if_t ifp;
446 
447 	KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
448 	    rss->opcode));
449 
450 	mtx_lock(&sc->ifp_lock);
451 	ifp = sc->ifp;
452 	if (ifp != NULL) {
453 		m_adj(m, sizeof(struct cpl_t5_trace_pkt));
454 		m->m_pkthdr.rcvif = ifp;
455 		ETHER_BPF_MTAP(ifp, m);
456 	}
457 	mtx_unlock(&sc->ifp_lock);
458 	m_freem(m);
459 
460 	return (0);
461 }
462 
463 
464 static void
465 tracer_init(void *arg)
466 {
467 
468 	return;
469 }
470 
471 static int
472 tracer_ioctl(if_t ifp, unsigned long cmd, caddr_t data)
473 {
474 	int rc = 0;
475 	struct adapter *sc;
476 	struct ifreq *ifr = (struct ifreq *)data;
477 
478 	switch (cmd) {
479 	case SIOCSIFMTU:
480 	case SIOCSIFFLAGS:
481 	case SIOCADDMULTI:
482 	case SIOCDELMULTI:
483 	case SIOCSIFCAP:
484 		break;
485 	case SIOCSIFMEDIA:
486 	case SIOCGIFMEDIA:
487 	case SIOCGIFXMEDIA:
488 		sx_xlock(&t4_trace_lock);
489 		sc = if_getsoftc(ifp);
490 		if (sc == NULL)
491 			rc = EIO;
492 		else
493 			rc = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
494 		sx_xunlock(&t4_trace_lock);
495 		break;
496 	default:
497 		rc = ether_ioctl(ifp, cmd, data);
498 	}
499 
500 	return (rc);
501 }
502 
503 static int
504 tracer_transmit(if_t ifp, struct mbuf *m)
505 {
506 
507 	m_freem(m);
508 	return (0);
509 }
510 
511 static void
512 tracer_qflush(if_t ifp)
513 {
514 
515 	return;
516 }
517 
518 static int
519 tracer_media_change(if_t ifp)
520 {
521 
522 	return (EOPNOTSUPP);
523 }
524 
525 static void
526 tracer_media_status(if_t ifp, struct ifmediareq *ifmr)
527 {
528 
529 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
530 
531 	return;
532 }
533