1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013 Chelsio Communications, Inc.
5 * All rights reserved.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/lock.h>
37 #include <sys/types.h>
38 #include <sys/mbuf.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <sys/sx.h>
42 #include <net/bpf.h>
43 #include <net/ethernet.h>
44 #include <net/if.h>
45 #include <net/if_clone.h>
46 #include <net/if_types.h>
47
48 #include "common/common.h"
49 #include "common/t4_msg.h"
50 #include "common/t4_regs.h"
51 #include "t4_ioctl.h"
52
53 /*
54 * Locking notes
55 * =============
56 *
57 * An interface cloner is registered during mod_load and it can be used to
58 * create or destroy the tracing ifnet for an adapter at any time. It is
59 * possible for the cloned interface to outlive the adapter (adapter disappears
60 * in t4_detach but the tracing ifnet may live till mod_unload when removal of
61 * the cloner finally destroys any remaining cloned interfaces). When tracing
62 * filters are active, this ifnet is also receiving data. There are potential
63 * bad races between ifnet create, ifnet destroy, ifnet rx, ifnet ioctl,
64 * cxgbe_detach/t4_detach, mod_unload.
65 *
66 * a) The driver selects an iq for tracing (sc->traceq) inside a synch op. The
67 * iq is destroyed inside a synch op too (and sc->traceq updated).
68 * b) The cloner looks for an adapter that matches the name of the ifnet it's
69 * been asked to create, starts a synch op on that adapter, and proceeds only
70 * if the adapter has a tracing iq.
71 * c) The cloned ifnet and the adapter are coupled to each other via
72 * ifp->if_softc and sc->ifp. These can be modified only with the global
73 * t4_trace_lock sx as well as the sc->ifp_lock mutex held. Holding either
74 * of these will prevent any change.
75 *
76 * The order in which all the locks involved should be acquired are:
77 * t4_list_lock
78 * adapter lock
79 * (begin synch op and let go of the above two)
80 * t4_trace_lock
81 * sc->ifp_lock
82 */
83
84 static struct sx t4_trace_lock;
85 static const char *t4_cloner_name = "tXnex";
86 static struct if_clone *t4_cloner;
87
88 /* tracer ifnet routines. mostly no-ops. */
89 static void tracer_init(void *);
90 static int tracer_ioctl(if_t, unsigned long, caddr_t);
91 static int tracer_transmit(if_t, struct mbuf *);
92 static void tracer_qflush(if_t);
93 static int tracer_media_change(if_t);
94 static void tracer_media_status(if_t, struct ifmediareq *);
95
96 /* match name (request/response) */
97 struct match_rr {
98 const char *name;
99 int lock; /* set to 1 to returned sc locked. */
100 struct adapter *sc;
101 int rc;
102 };
103
104 static void
match_name(struct adapter * sc,void * arg)105 match_name(struct adapter *sc, void *arg)
106 {
107 struct match_rr *mrr = arg;
108
109 if (strcmp(device_get_nameunit(sc->dev), mrr->name) != 0)
110 return;
111
112 KASSERT(mrr->sc == NULL, ("%s: multiple matches (%p, %p) for %s",
113 __func__, mrr->sc, sc, mrr->name));
114
115 mrr->sc = sc;
116 if (mrr->lock)
117 mrr->rc = begin_synchronized_op(mrr->sc, NULL, 0, "t4clon");
118 else
119 mrr->rc = 0;
120 }
121
122 static int
t4_cloner_match(struct if_clone * ifc,const char * name)123 t4_cloner_match(struct if_clone *ifc, const char *name)
124 {
125
126 if (strncmp(name, "t4nex", 5) != 0 &&
127 strncmp(name, "t5nex", 5) != 0 &&
128 strncmp(name, "t6nex", 5) != 0)
129 return (0);
130 if (name[5] < '0' || name[5] > '9')
131 return (0);
132 return (1);
133 }
134
135 static int
t4_cloner_create(struct if_clone * ifc,char * name,size_t len,caddr_t params)136 t4_cloner_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
137 {
138 struct match_rr mrr;
139 struct adapter *sc;
140 if_t ifp;
141 int rc;
142 const uint8_t lla[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
143
144 mrr.name = name;
145 mrr.lock = 1;
146 mrr.sc = NULL;
147 mrr.rc = ENOENT;
148 t4_iterate(match_name, &mrr);
149
150 if (mrr.rc != 0)
151 return (mrr.rc);
152 sc = mrr.sc;
153
154 KASSERT(sc != NULL, ("%s: name (%s) matched but softc is NULL",
155 __func__, name));
156 ASSERT_SYNCHRONIZED_OP(sc);
157
158 sx_xlock(&t4_trace_lock);
159
160 if (sc->ifp != NULL) {
161 rc = EEXIST;
162 goto done;
163 }
164 if (sc->traceq < 0) {
165 rc = EAGAIN;
166 goto done;
167 }
168
169 ifp = if_alloc(IFT_ETHER);
170 /* Note that if_xname is identical to the nexus nameunit */
171 if_initname(ifp, name, -1);
172 if_setdname(ifp, t4_cloner_name);
173 if_setinitfn(ifp, tracer_init);
174 if_setflags(ifp, IFF_SIMPLEX | IFF_DRV_RUNNING);
175 if_setioctlfn(ifp, tracer_ioctl);
176 if_settransmitfn(ifp, tracer_transmit);
177 if_setqflushfn(ifp, tracer_qflush);
178 if_setcapabilities(ifp, IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU);
179 ifmedia_init(&sc->media, IFM_IMASK, tracer_media_change,
180 tracer_media_status);
181 ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE, 0, NULL);
182 ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE);
183 sx_xunlock(&t4_trace_lock);
184 ether_ifattach(ifp, lla);
185 sx_xlock(&t4_trace_lock);
186 mtx_lock(&sc->ifp_lock);
187 if_setsoftc(ifp, sc);
188 sc->ifp = ifp;
189 mtx_unlock(&sc->ifp_lock);
190 rc = 0;
191 done:
192 sx_xunlock(&t4_trace_lock);
193 end_synchronized_op(sc, 0);
194 return (rc);
195 }
196
197 static int
t4_cloner_destroy(struct if_clone * ifc,if_t ifp)198 t4_cloner_destroy(struct if_clone *ifc, if_t ifp)
199 {
200 struct adapter *sc;
201
202 sx_xlock(&t4_trace_lock);
203 sc = if_getsoftc(ifp);
204 if (sc != NULL) {
205 mtx_lock(&sc->ifp_lock);
206 sc->ifp = NULL;
207 if_setsoftc(ifp, NULL);
208 mtx_unlock(&sc->ifp_lock);
209 ifmedia_removeall(&sc->media);
210 }
211 sx_xunlock(&t4_trace_lock);
212 ether_ifdetach(ifp);
213 if_free(ifp);
214
215 return (0);
216 }
217
218 void
t4_tracer_modload(void)219 t4_tracer_modload(void)
220 {
221
222 sx_init(&t4_trace_lock, "T4/T5 tracer lock");
223 t4_cloner = if_clone_advanced(t4_cloner_name, 0, t4_cloner_match,
224 t4_cloner_create, t4_cloner_destroy);
225 }
226
227 void
t4_tracer_modunload(void)228 t4_tracer_modunload(void)
229 {
230
231 if (t4_cloner != NULL) {
232 /*
233 * The module is being unloaded so the nexus drivers have
234 * detached. The tracing interfaces can not outlive the nexus
235 * (ifp->if_softc is the nexus) and must have been destroyed
236 * already. XXX: but if_clone is opaque to us and we can't
237 * assert LIST_EMPTY(&t4_cloner->ifc_iflist) at this time.
238 */
239 if_clone_detach(t4_cloner);
240 }
241 sx_destroy(&t4_trace_lock);
242 }
243
244 void
t4_tracer_port_detach(struct adapter * sc)245 t4_tracer_port_detach(struct adapter *sc)
246 {
247
248 sx_xlock(&t4_trace_lock);
249 if (sc->ifp != NULL) {
250 mtx_lock(&sc->ifp_lock);
251 if_setsoftc(sc->ifp, NULL);
252 sc->ifp = NULL;
253 mtx_unlock(&sc->ifp_lock);
254 }
255 ifmedia_removeall(&sc->media);
256 sx_xunlock(&t4_trace_lock);
257 }
258
259 int
t4_get_tracer(struct adapter * sc,struct t4_tracer * t)260 t4_get_tracer(struct adapter *sc, struct t4_tracer *t)
261 {
262 int rc, i, enabled;
263 struct trace_params tp;
264
265 if (t->idx >= NTRACE) {
266 t->idx = 0xff;
267 t->enabled = 0;
268 t->valid = 0;
269 return (0);
270 }
271
272 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
273 "t4gett");
274 if (rc)
275 return (rc);
276
277 if (hw_off_limits(sc)) {
278 rc = ENXIO;
279 goto done;
280 }
281
282 for (i = t->idx; i < NTRACE; i++) {
283 if (isset(&sc->tracer_valid, t->idx)) {
284 t4_get_trace_filter(sc, &tp, i, &enabled);
285 t->idx = i;
286 t->enabled = enabled;
287 t->valid = 1;
288 memcpy(&t->tp.data[0], &tp.data[0], sizeof(t->tp.data));
289 memcpy(&t->tp.mask[0], &tp.mask[0], sizeof(t->tp.mask));
290 t->tp.snap_len = tp.snap_len;
291 t->tp.min_len = tp.min_len;
292 t->tp.skip_ofst = tp.skip_ofst;
293 t->tp.skip_len = tp.skip_len;
294 t->tp.invert = tp.invert;
295
296 /* convert channel to port iff 0 <= port < 8. */
297 if (tp.port < 4)
298 t->tp.port = sc->chan_map[tp.port];
299 else if (tp.port < 8)
300 t->tp.port = sc->chan_map[tp.port - 4] + 4;
301 else
302 t->tp.port = tp.port;
303
304 goto done;
305 }
306 }
307
308 t->idx = 0xff;
309 t->enabled = 0;
310 t->valid = 0;
311 done:
312 end_synchronized_op(sc, LOCK_HELD);
313
314 return (rc);
315 }
316
317 int
t4_set_tracer(struct adapter * sc,struct t4_tracer * t)318 t4_set_tracer(struct adapter *sc, struct t4_tracer *t)
319 {
320 int rc;
321 struct trace_params tp, *tpp;
322
323 if (t->idx >= NTRACE)
324 return (EINVAL);
325
326 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
327 "t4sett");
328 if (rc)
329 return (rc);
330
331 if (hw_off_limits(sc)) {
332 rc = ENXIO;
333 goto done;
334 }
335
336 /*
337 * If no tracing filter is specified this time then check if the filter
338 * at the index is valid anyway because it was set previously. If so
339 * then this is a legitimate enable/disable operation.
340 */
341 if (t->valid == 0) {
342 if (isset(&sc->tracer_valid, t->idx))
343 tpp = NULL;
344 else
345 rc = EINVAL;
346 goto done;
347 }
348
349 if (t->tp.port > 19 || t->tp.snap_len > 9600 ||
350 t->tp.min_len > M_TFMINPKTSIZE || t->tp.skip_len > M_TFLENGTH ||
351 t->tp.skip_ofst > M_TFOFFSET) {
352 rc = EINVAL;
353 goto done;
354 }
355
356 memcpy(&tp.data[0], &t->tp.data[0], sizeof(tp.data));
357 memcpy(&tp.mask[0], &t->tp.mask[0], sizeof(tp.mask));
358 tp.snap_len = t->tp.snap_len;
359 tp.min_len = t->tp.min_len;
360 tp.skip_ofst = t->tp.skip_ofst;
361 tp.skip_len = t->tp.skip_len;
362 tp.invert = !!t->tp.invert;
363
364 /* convert port to channel iff 0 <= port < 8. */
365 if (t->tp.port < 4) {
366 if (sc->port[t->tp.port] == NULL) {
367 rc = EINVAL;
368 goto done;
369 }
370 tp.port = sc->port[t->tp.port]->tx_chan;
371 } else if (t->tp.port < 8) {
372 if (sc->port[t->tp.port - 4] == NULL) {
373 rc = EINVAL;
374 goto done;
375 }
376 tp.port = sc->port[t->tp.port - 4]->tx_chan + 4;
377 } else
378 tp.port = t->tp.port;
379 tpp = &tp;
380 done:
381 if (rc == 0) {
382 rc = -t4_set_trace_filter(sc, tpp, t->idx, t->enabled);
383 if (rc == 0) {
384 if (t->enabled) {
385 setbit(&sc->tracer_valid, t->idx);
386 if (sc->tracer_enabled == 0) {
387 t4_set_reg_field(sc, A_MPS_TRC_CFG,
388 F_TRCEN, F_TRCEN);
389 }
390 setbit(&sc->tracer_enabled, t->idx);
391 } else {
392 clrbit(&sc->tracer_enabled, t->idx);
393 if (sc->tracer_enabled == 0) {
394 t4_set_reg_field(sc, A_MPS_TRC_CFG,
395 F_TRCEN, 0);
396 }
397 }
398 }
399 }
400 end_synchronized_op(sc, LOCK_HELD);
401
402 return (rc);
403 }
404
405 int
t4_trace_pkt(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)406 t4_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
407 {
408 struct adapter *sc = iq->adapter;
409 if_t ifp;
410
411 KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
412 rss->opcode));
413
414 mtx_lock(&sc->ifp_lock);
415 ifp = sc->ifp;
416 if (sc->ifp) {
417 m_adj(m, sizeof(struct cpl_trace_pkt));
418 m->m_pkthdr.rcvif = ifp;
419 ETHER_BPF_MTAP(ifp, m);
420 }
421 mtx_unlock(&sc->ifp_lock);
422 m_freem(m);
423
424 return (0);
425 }
426
427 int
t5_trace_pkt(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)428 t5_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
429 {
430 struct adapter *sc = iq->adapter;
431 if_t ifp;
432
433 KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
434 rss->opcode));
435
436 mtx_lock(&sc->ifp_lock);
437 ifp = sc->ifp;
438 if (ifp != NULL) {
439 m_adj(m, sizeof(struct cpl_t5_trace_pkt));
440 m->m_pkthdr.rcvif = ifp;
441 ETHER_BPF_MTAP(ifp, m);
442 }
443 mtx_unlock(&sc->ifp_lock);
444 m_freem(m);
445
446 return (0);
447 }
448
449
450 static void
tracer_init(void * arg)451 tracer_init(void *arg)
452 {
453
454 return;
455 }
456
457 static int
tracer_ioctl(if_t ifp,unsigned long cmd,caddr_t data)458 tracer_ioctl(if_t ifp, unsigned long cmd, caddr_t data)
459 {
460 int rc = 0;
461 struct adapter *sc;
462 struct ifreq *ifr = (struct ifreq *)data;
463
464 switch (cmd) {
465 case SIOCSIFMTU:
466 case SIOCSIFFLAGS:
467 case SIOCADDMULTI:
468 case SIOCDELMULTI:
469 case SIOCSIFCAP:
470 break;
471 case SIOCSIFMEDIA:
472 case SIOCGIFMEDIA:
473 case SIOCGIFXMEDIA:
474 sx_xlock(&t4_trace_lock);
475 sc = if_getsoftc(ifp);
476 if (sc == NULL)
477 rc = EIO;
478 else
479 rc = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
480 sx_xunlock(&t4_trace_lock);
481 break;
482 default:
483 rc = ether_ioctl(ifp, cmd, data);
484 }
485
486 return (rc);
487 }
488
489 static int
tracer_transmit(if_t ifp,struct mbuf * m)490 tracer_transmit(if_t ifp, struct mbuf *m)
491 {
492
493 m_freem(m);
494 return (0);
495 }
496
497 static void
tracer_qflush(if_t ifp)498 tracer_qflush(if_t ifp)
499 {
500
501 return;
502 }
503
504 static int
tracer_media_change(if_t ifp)505 tracer_media_change(if_t ifp)
506 {
507
508 return (EOPNOTSUPP);
509 }
510
511 static void
tracer_media_status(if_t ifp,struct ifmediareq * ifmr)512 tracer_media_status(if_t ifp, struct ifmediareq *ifmr)
513 {
514
515 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
516
517 return;
518 }
519