xref: /freebsd/sys/dev/netmap/netmap_pipe.c (revision a25896ca1270e25b657ceaa8d47d5699515f5c25)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2014-2018 Giuseppe Lettieri
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *   1. Redistributions of source code must retain the above copyright
11  *      notice, this list of conditions and the following disclaimer.
12  *   2. Redistributions in binary form must reproduce the above copyright
13  *      notice, this list of conditions and the following disclaimer in the
14  *      documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /* $FreeBSD$ */
30 
31 #if defined(__FreeBSD__)
32 #include <sys/cdefs.h> /* prerequisite */
33 
34 #include <sys/types.h>
35 #include <sys/errno.h>
36 #include <sys/param.h>	/* defines used in kernel.h */
37 #include <sys/kernel.h>	/* types used in module initialization */
38 #include <sys/malloc.h>
39 #include <sys/poll.h>
40 #include <sys/lock.h>
41 #include <sys/rwlock.h>
42 #include <sys/selinfo.h>
43 #include <sys/sysctl.h>
44 #include <sys/socket.h> /* sockaddrs */
45 #include <net/if.h>
46 #include <net/if_var.h>
47 #include <machine/bus.h>	/* bus_dmamap_* */
48 #include <sys/refcount.h>
49 
50 
51 #elif defined(linux)
52 
53 #include "bsd_glue.h"
54 
55 #elif defined(__APPLE__)
56 
57 #warning OSX support is only partial
58 #include "osx_glue.h"
59 
60 #elif defined(_WIN32)
61 #include "win_glue.h"
62 
63 #else
64 
65 #error	Unsupported platform
66 
67 #endif /* unsupported */
68 
69 /*
70  * common headers
71  */
72 
73 #include <net/netmap.h>
74 #include <dev/netmap/netmap_kern.h>
75 #include <dev/netmap/netmap_mem2.h>
76 
77 #ifdef WITH_PIPES
78 
79 #define NM_PIPE_MAXSLOTS	4096
80 #define NM_PIPE_MAXRINGS	256
81 
82 static int netmap_default_pipes = 0; /* ignored, kept for compatibility */
83 SYSBEGIN(vars_pipes);
84 SYSCTL_DECL(_dev_netmap);
85 SYSCTL_INT(_dev_netmap, OID_AUTO, default_pipes, CTLFLAG_RW,
86 		&netmap_default_pipes, 0, "For compatibility only");
87 SYSEND;
88 
89 /* allocate the pipe array in the parent adapter */
90 static int
91 nm_pipe_alloc(struct netmap_adapter *na, u_int npipes)
92 {
93 	size_t old_len, len;
94 	struct netmap_pipe_adapter **npa;
95 
96 	if (npipes <= na->na_max_pipes)
97 		/* we already have more entries that requested */
98 		return 0;
99 
100 	if (npipes < na->na_next_pipe || npipes > NM_MAXPIPES)
101 		return EINVAL;
102 
103 	old_len = sizeof(struct netmap_pipe_adapter *)*na->na_max_pipes;
104 	len = sizeof(struct netmap_pipe_adapter *) * npipes;
105 	npa = nm_os_realloc(na->na_pipes, len, old_len);
106 	if (npa == NULL)
107 		return ENOMEM;
108 
109 	na->na_pipes = npa;
110 	na->na_max_pipes = npipes;
111 
112 	return 0;
113 }
114 
115 /* deallocate the parent array in the parent adapter */
116 void
117 netmap_pipe_dealloc(struct netmap_adapter *na)
118 {
119 	if (na->na_pipes) {
120 		if (na->na_next_pipe > 0) {
121 			D("freeing not empty pipe array for %s (%d dangling pipes)!", na->name,
122 					na->na_next_pipe);
123 		}
124 		nm_os_free(na->na_pipes);
125 		na->na_pipes = NULL;
126 		na->na_max_pipes = 0;
127 		na->na_next_pipe = 0;
128 	}
129 }
130 
131 /* find a pipe endpoint with the given id among the parent's pipes */
132 static struct netmap_pipe_adapter *
133 netmap_pipe_find(struct netmap_adapter *parent, const char *pipe_id)
134 {
135 	int i;
136 	struct netmap_pipe_adapter *na;
137 
138 	for (i = 0; i < parent->na_next_pipe; i++) {
139 		const char *na_pipe_id;
140 		na = parent->na_pipes[i];
141 		na_pipe_id = strrchr(na->up.name,
142 			na->role == NM_PIPE_ROLE_MASTER ? '{' : '}');
143 		KASSERT(na_pipe_id != NULL, ("Invalid pipe name"));
144 		++na_pipe_id;
145 		if (!strcmp(na_pipe_id, pipe_id)) {
146 			return na;
147 		}
148 	}
149 	return NULL;
150 }
151 
152 /* add a new pipe endpoint to the parent array */
153 static int
154 netmap_pipe_add(struct netmap_adapter *parent, struct netmap_pipe_adapter *na)
155 {
156 	if (parent->na_next_pipe >= parent->na_max_pipes) {
157 		u_int npipes = parent->na_max_pipes ?  2*parent->na_max_pipes : 2;
158 		int error = nm_pipe_alloc(parent, npipes);
159 		if (error)
160 			return error;
161 	}
162 
163 	parent->na_pipes[parent->na_next_pipe] = na;
164 	na->parent_slot = parent->na_next_pipe;
165 	parent->na_next_pipe++;
166 	return 0;
167 }
168 
169 /* remove the given pipe endpoint from the parent array */
170 static void
171 netmap_pipe_remove(struct netmap_adapter *parent, struct netmap_pipe_adapter *na)
172 {
173 	u_int n;
174 	n = --parent->na_next_pipe;
175 	if (n != na->parent_slot) {
176 		struct netmap_pipe_adapter **p =
177 			&parent->na_pipes[na->parent_slot];
178 		*p = parent->na_pipes[n];
179 		(*p)->parent_slot = na->parent_slot;
180 	}
181 	parent->na_pipes[n] = NULL;
182 }
183 
184 int
185 netmap_pipe_txsync(struct netmap_kring *txkring, int flags)
186 {
187 	struct netmap_kring *rxkring = txkring->pipe;
188 	u_int k, lim = txkring->nkr_num_slots - 1, nk;
189 	int m; /* slots to transfer */
190 	int complete; /* did we see a complete packet ? */
191 	struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring;
192 
193 	ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
194 	ND(20, "TX before: hwcur %d hwtail %d cur %d head %d tail %d",
195 		txkring->nr_hwcur, txkring->nr_hwtail,
196 		txkring->rcur, txkring->rhead, txkring->rtail);
197 
198 	/* update the hwtail */
199 	txkring->nr_hwtail = txkring->pipe_tail;
200 
201 	m = txkring->rhead - txkring->nr_hwcur; /* new slots */
202 	if (m < 0)
203 		m += txkring->nkr_num_slots;
204 
205 	if (m == 0) {
206 		/* nothing to send */
207 		return 0;
208 	}
209 
210 	for (k = txkring->nr_hwcur, nk = lim + 1, complete = 0; m;
211 			m--, k = nm_next(k, lim), nk = (complete ? k : nk)) {
212 		struct netmap_slot *rs = &rxring->slot[k];
213 		struct netmap_slot *ts = &txring->slot[k];
214 
215 		*rs = *ts;
216 		if (ts->flags & NS_BUF_CHANGED) {
217 			ts->flags &= ~NS_BUF_CHANGED;
218 		}
219 		complete = !(ts->flags & NS_MOREFRAG);
220 	}
221 
222 	txkring->nr_hwcur = k;
223 
224 	ND(20, "TX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
225 		txkring->nr_hwcur, txkring->nr_hwtail,
226 		txkring->rcur, txkring->rhead, txkring->rtail, k);
227 
228 	if (likely(nk <= lim)) {
229 		mb(); /* make sure the slots are updated before publishing them */
230 		rxkring->pipe_tail = nk; /* only publish complete packets */
231 		rxkring->nm_notify(rxkring, 0);
232 	}
233 
234 	return 0;
235 }
236 
237 int
238 netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags)
239 {
240 	struct netmap_kring *txkring = rxkring->pipe;
241 	u_int k, lim = rxkring->nkr_num_slots - 1;
242 	int m; /* slots to release */
243 	struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring;
244 
245 	ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
246 	ND(20, "RX before: hwcur %d hwtail %d cur %d head %d tail %d",
247 		rxkring->nr_hwcur, rxkring->nr_hwtail,
248 		rxkring->rcur, rxkring->rhead, rxkring->rtail);
249 
250 	/* update the hwtail */
251 	rxkring->nr_hwtail = rxkring->pipe_tail;
252 
253 	m = rxkring->rhead - rxkring->nr_hwcur; /* released slots */
254 	if (m < 0)
255 		m += rxkring->nkr_num_slots;
256 
257 	if (m == 0) {
258 		/* nothing to release */
259 		return 0;
260 	}
261 
262 	for (k = rxkring->nr_hwcur; m; m--, k = nm_next(k, lim)) {
263 		struct netmap_slot *rs = &rxring->slot[k];
264 		struct netmap_slot *ts = &txring->slot[k];
265 
266 		if (rs->flags & NS_BUF_CHANGED) {
267 			/* copy the slot and report the buffer change */
268 			*ts = *rs;
269 			rs->flags &= ~NS_BUF_CHANGED;
270 		}
271 	}
272 
273 	mb(); /* make sure the slots are updated before publishing them */
274 	txkring->pipe_tail = nm_prev(k, lim);
275 	rxkring->nr_hwcur = k;
276 
277 	ND(20, "RX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
278 		rxkring->nr_hwcur, rxkring->nr_hwtail,
279 		rxkring->rcur, rxkring->rhead, rxkring->rtail, k);
280 
281 	txkring->nm_notify(txkring, 0);
282 
283 	return 0;
284 }
285 
286 /* Pipe endpoints are created and destroyed together, so that endopoints do not
287  * have to check for the existence of their peer at each ?xsync.
288  *
289  * To play well with the existing netmap infrastructure (refcounts etc.), we
290  * adopt the following strategy:
291  *
292  * 1) The first endpoint that is created also creates the other endpoint and
293  * grabs a reference to it.
294  *
295  *    state A)  user1 --> endpoint1 --> endpoint2
296  *
297  * 2) If, starting from state A, endpoint2 is then registered, endpoint1 gives
298  * its reference to the user:
299  *
300  *    state B)  user1 --> endpoint1     endpoint2 <--- user2
301  *
302  * 3) Assume that, starting from state B endpoint2 is closed. In the unregister
303  * callback endpoint2 notes that endpoint1 is still active and adds a reference
304  * from endpoint1 to itself. When user2 then releases her own reference,
305  * endpoint2 is not destroyed and we are back to state A. A symmetrical state
306  * would be reached if endpoint1 were released instead.
307  *
308  * 4) If, starting from state A, endpoint1 is closed, the destructor notes that
309  * it owns a reference to endpoint2 and releases it.
310  *
311  * Something similar goes on for the creation and destruction of the krings.
312  */
313 
314 
315 /* netmap_pipe_krings_create.
316  *
317  * There are two cases:
318  *
319  * 1) state is
320  *
321  *        usr1 --> e1 --> e2
322  *
323  *    and we are e1. We have to create both sets
324  *    of krings.
325  *
326  * 2) state is
327  *
328  *        usr1 --> e1 --> e2
329  *
330  *    and we are e2. e1 is certainly registered and our
331  *    krings already exist. Nothing to do.
332  */
333 static int
334 netmap_pipe_krings_create(struct netmap_adapter *na)
335 {
336 	struct netmap_pipe_adapter *pna =
337 		(struct netmap_pipe_adapter *)na;
338 	struct netmap_adapter *ona = &pna->peer->up;
339 	int error = 0;
340 	enum txrx t;
341 
342 	if (pna->peer_ref) {
343 		int i;
344 
345 		/* case 1) above */
346 		ND("%p: case 1, create both ends", na);
347 		error = netmap_krings_create(na, 0);
348 		if (error)
349 			goto err;
350 
351 		/* create the krings of the other end */
352 		error = netmap_krings_create(ona, 0);
353 		if (error)
354 			goto del_krings1;
355 
356 		/* cross link the krings and initialize the pipe_tails */
357 		for_rx_tx(t) {
358 			enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */
359 			for (i = 0; i < nma_get_nrings(na, t); i++) {
360 				struct netmap_kring *k1 = NMR(na, t)[i],
361 					            *k2 = NMR(ona, r)[i];
362 				k1->pipe = k2;
363 				k2->pipe = k1;
364 				/* mark all peer-adapter rings as fake */
365 				k2->nr_kflags |= NKR_FAKERING;
366 				/* init tails */
367 				k1->pipe_tail = k1->nr_hwtail;
368 				k2->pipe_tail = k2->nr_hwtail;
369 			}
370 		}
371 
372 	}
373 	return 0;
374 
375 del_krings1:
376 	netmap_krings_delete(na);
377 err:
378 	return error;
379 }
380 
381 /* netmap_pipe_reg.
382  *
383  * There are two cases on registration (onoff==1)
384  *
385  * 1.a) state is
386  *
387  *        usr1 --> e1 --> e2
388  *
389  *      and we are e1. Create the needed rings of the
390  *      other end.
391  *
392  * 1.b) state is
393  *
394  *        usr1 --> e1 --> e2 <-- usr2
395  *
396  *      and we are e2. Drop the ref e1 is holding.
397  *
398  *  There are two additional cases on unregister (onoff==0)
399  *
400  *  2.a) state is
401  *
402  *         usr1 --> e1 --> e2
403  *
404  *       and we are e1. Nothing special to do, e2 will
405  *       be cleaned up by the destructor of e1.
406  *
407  *  2.b) state is
408  *
409  *         usr1 --> e1     e2 <-- usr2
410  *
411  *       and we are either e1 or e2. Add a ref from the
412  *       other end.
413  */
414 static int
415 netmap_pipe_reg(struct netmap_adapter *na, int onoff)
416 {
417 	struct netmap_pipe_adapter *pna =
418 		(struct netmap_pipe_adapter *)na;
419 	struct netmap_adapter *ona = &pna->peer->up;
420 	int i, error = 0;
421 	enum txrx t;
422 
423 	ND("%p: onoff %d", na, onoff);
424 	if (onoff) {
425 		for_rx_tx(t) {
426 			for (i = 0; i < nma_get_nrings(na, t); i++) {
427 				struct netmap_kring *kring = NMR(na, t)[i];
428 
429 				if (nm_kring_pending_on(kring)) {
430 					/* mark the peer ring as needed */
431 					kring->pipe->nr_kflags |= NKR_NEEDRING;
432 				}
433 			}
434 		}
435 
436 		/* create all missing needed rings on the other end.
437 		 * Either our end, or the other, has been marked as
438 		 * fake, so the allocation will not be done twice.
439 		 */
440 		error = netmap_mem_rings_create(ona);
441 		if (error)
442 			return error;
443 
444 		/* In case of no error we put our rings in netmap mode */
445 		for_rx_tx(t) {
446 			for (i = 0; i < nma_get_nrings(na, t) + 1; i++) {
447 				struct netmap_kring *kring = NMR(na, t)[i];
448 				if (nm_kring_pending_on(kring)) {
449 					struct netmap_kring *sring, *dring;
450 
451 					kring->nr_mode = NKR_NETMAP_ON;
452 					if ((kring->nr_kflags & NKR_FAKERING) &&
453 					    (kring->pipe->nr_kflags & NKR_FAKERING)) {
454 						/* this is a re-open of a pipe
455 						 * end-point kept alive by the other end.
456 						 * We need to leave everything as it is
457 						 */
458 						continue;
459 					}
460 
461 					/* copy the buffers from the non-fake ring */
462 					if (kring->nr_kflags & NKR_FAKERING) {
463 						sring = kring->pipe;
464 						dring = kring;
465 					} else {
466 						sring = kring;
467 						dring = kring->pipe;
468 					}
469 					memcpy(dring->ring->slot,
470 					       sring->ring->slot,
471 					       sizeof(struct netmap_slot) *
472 							sring->nkr_num_slots);
473 					/* mark both rings as fake and needed,
474 					 * so that buffers will not be
475 					 * deleted by the standard machinery
476 					 * (we will delete them by ourselves in
477 					 * netmap_pipe_krings_delete)
478 					 */
479 					sring->nr_kflags |=
480 						(NKR_FAKERING | NKR_NEEDRING);
481 					dring->nr_kflags |=
482 						(NKR_FAKERING | NKR_NEEDRING);
483 					kring->nr_mode = NKR_NETMAP_ON;
484 				}
485 			}
486 		}
487 		if (na->active_fds == 0)
488 			na->na_flags |= NAF_NETMAP_ON;
489 	} else {
490 		if (na->active_fds == 0)
491 			na->na_flags &= ~NAF_NETMAP_ON;
492 		for_rx_tx(t) {
493 			for (i = 0; i < nma_get_nrings(na, t) + 1; i++) {
494 				struct netmap_kring *kring = NMR(na, t)[i];
495 
496 				if (nm_kring_pending_off(kring)) {
497 					kring->nr_mode = NKR_NETMAP_OFF;
498 				}
499 			}
500 		}
501 	}
502 
503 	if (na->active_fds) {
504 		ND("active_fds %d", na->active_fds);
505 		return 0;
506 	}
507 
508 	if (pna->peer_ref) {
509 		ND("%p: case 1.a or 2.a, nothing to do", na);
510 		return 0;
511 	}
512 	if (onoff) {
513 		ND("%p: case 1.b, drop peer", na);
514 		pna->peer->peer_ref = 0;
515 		netmap_adapter_put(na);
516 	} else {
517 		ND("%p: case 2.b, grab peer", na);
518 		netmap_adapter_get(na);
519 		pna->peer->peer_ref = 1;
520 	}
521 	return error;
522 }
523 
524 /* netmap_pipe_krings_delete.
525  *
526  * There are two cases:
527  *
528  * 1) state is
529  *
530  *                usr1 --> e1 --> e2
531  *
532  *    and we are e1 (e2 is not registered, so krings_delete cannot be
533  *    called on it);
534  *
535  * 2) state is
536  *
537  *                usr1 --> e1     e2 <-- usr2
538  *
539  *    and we are either e1 or e2.
540  *
541  * In the former case we have to also delete the krings of e2;
542  * in the latter case we do nothing.
543  */
544 static void
545 netmap_pipe_krings_delete(struct netmap_adapter *na)
546 {
547 	struct netmap_pipe_adapter *pna =
548 		(struct netmap_pipe_adapter *)na;
549 	struct netmap_adapter *sna, *ona; /* na of the other end */
550 	enum txrx t;
551 	int i;
552 
553 	if (!pna->peer_ref) {
554 		ND("%p: case 2, kept alive by peer",  na);
555 		return;
556 	}
557 	ona = &pna->peer->up;
558 	/* case 1) above */
559 	ND("%p: case 1, deleting everything", na);
560 	/* To avoid double-frees we zero-out all the buffers in the kernel part
561 	 * of each ring. The reason is this: If the user is behaving correctly,
562 	 * all buffers are found in exactly one slot in the userspace part of
563 	 * some ring.  If the user is not behaving correctly, we cannot release
564 	 * buffers cleanly anyway. In the latter case, the allocator will
565 	 * return to a clean state only when all its users will close.
566 	 */
567 	sna = na;
568 cleanup:
569 	for_rx_tx(t) {
570 		for (i = 0; i < nma_get_nrings(sna, t) + 1; i++) {
571 			struct netmap_kring *kring = NMR(sna, t)[i];
572 			struct netmap_ring *ring = kring->ring;
573 			uint32_t j, lim = kring->nkr_num_slots - 1;
574 
575 			ND("%s ring %p hwtail %u hwcur %u",
576 				kring->name, ring, kring->nr_hwtail, kring->nr_hwcur);
577 
578 			if (ring == NULL)
579 				continue;
580 
581 			if (kring->tx == NR_RX)
582 				ring->slot[kring->pipe_tail].buf_idx = 0;
583 
584 			for (j = nm_next(kring->pipe_tail, lim);
585 			     j != kring->nr_hwcur;
586 			     j = nm_next(j, lim))
587 			{
588 				ND("%s[%d] %u", kring->name, j, ring->slot[j].buf_idx);
589 				ring->slot[j].buf_idx = 0;
590 			}
591 			kring->nr_kflags &= ~(NKR_FAKERING | NKR_NEEDRING);
592 		}
593 
594 	}
595 	if (sna != ona && ona->tx_rings) {
596 		sna = ona;
597 		goto cleanup;
598 	}
599 
600 	netmap_mem_rings_delete(na);
601 	netmap_krings_delete(na); /* also zeroes tx_rings etc. */
602 
603 	if (ona->tx_rings == NULL) {
604 		/* already deleted, we must be on an
605 		 * cleanup-after-error path */
606 		return;
607 	}
608 	netmap_mem_rings_delete(ona);
609 	netmap_krings_delete(ona);
610 }
611 
612 
613 static void
614 netmap_pipe_dtor(struct netmap_adapter *na)
615 {
616 	struct netmap_pipe_adapter *pna =
617 		(struct netmap_pipe_adapter *)na;
618 	ND("%p %p", na, pna->parent_ifp);
619 	if (pna->peer_ref) {
620 		ND("%p: clean up peer", na);
621 		pna->peer_ref = 0;
622 		netmap_adapter_put(&pna->peer->up);
623 	}
624 	if (pna->role == NM_PIPE_ROLE_MASTER)
625 		netmap_pipe_remove(pna->parent, pna);
626 	if (pna->parent_ifp)
627 		if_rele(pna->parent_ifp);
628 	netmap_adapter_put(pna->parent);
629 	pna->parent = NULL;
630 }
631 
632 int
633 netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
634 		struct netmap_mem_d *nmd, int create)
635 {
636 	struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
637 	struct netmap_adapter *pna; /* parent adapter */
638 	struct netmap_pipe_adapter *mna, *sna, *reqna;
639 	struct ifnet *ifp = NULL;
640 	const char *pipe_id = NULL;
641 	int role = 0;
642 	int error, retries = 0;
643 	char *cbra;
644 
645 	/* Try to parse the pipe syntax 'xx{yy' or 'xx}yy'. */
646 	cbra = strrchr(hdr->nr_name, '{');
647 	if (cbra != NULL) {
648 		role = NM_PIPE_ROLE_MASTER;
649 	} else {
650 		cbra = strrchr(hdr->nr_name, '}');
651 		if (cbra != NULL) {
652 			role = NM_PIPE_ROLE_SLAVE;
653 		} else {
654 			ND("not a pipe");
655 			return 0;
656 		}
657 	}
658 	pipe_id = cbra + 1;
659 	if (*pipe_id == '\0' || cbra == hdr->nr_name) {
660 		/* Bracket is the last character, so pipe name is missing;
661 		 * or bracket is the first character, so base port name
662 		 * is missing. */
663 		return EINVAL;
664 	}
665 
666 	if (req->nr_mode != NR_REG_ALL_NIC && req->nr_mode != NR_REG_ONE_NIC) {
667 		/* We only accept modes involving hardware rings. */
668 		return EINVAL;
669 	}
670 
671 	/* first, try to find the parent adapter */
672 	for (;;) {
673 		char nr_name_orig[NETMAP_REQ_IFNAMSIZ];
674 		int create_error;
675 
676 		/* Temporarily remove the pipe suffix. */
677 		strncpy(nr_name_orig, hdr->nr_name, sizeof(nr_name_orig));
678 		*cbra = '\0';
679 		error = netmap_get_na(hdr, &pna, &ifp, nmd, create);
680 		/* Restore the pipe suffix. */
681 		strncpy(hdr->nr_name, nr_name_orig, sizeof(hdr->nr_name));
682 		if (!error)
683 			break;
684 		if (error != ENXIO || retries++) {
685 			ND("parent lookup failed: %d", error);
686 			return error;
687 		}
688 		ND("try to create a persistent vale port");
689 		/* create a persistent vale port and try again */
690 		*cbra = '\0';
691 		NMG_UNLOCK();
692 		create_error = netmap_vi_create(hdr, 1 /* autodelete */);
693 		NMG_LOCK();
694 		strncpy(hdr->nr_name, nr_name_orig, sizeof(hdr->nr_name));
695 		if (create_error && create_error != EEXIST) {
696 			if (create_error != EOPNOTSUPP) {
697 				D("failed to create a persistent vale port: %d", create_error);
698 			}
699 			return error;
700 		}
701 	}
702 
703 	if (NETMAP_OWNED_BY_KERN(pna)) {
704 		ND("parent busy");
705 		error = EBUSY;
706 		goto put_out;
707 	}
708 
709 	/* next, lookup the pipe id in the parent list */
710 	reqna = NULL;
711 	mna = netmap_pipe_find(pna, pipe_id);
712 	if (mna) {
713 		if (mna->role == role) {
714 			ND("found %s directly at %d", pipe_id, mna->parent_slot);
715 			reqna = mna;
716 		} else {
717 			ND("found %s indirectly at %d", pipe_id, mna->parent_slot);
718 			reqna = mna->peer;
719 		}
720 		/* the pipe we have found already holds a ref to the parent,
721 		 * so we need to drop the one we got from netmap_get_na()
722 		 */
723 		netmap_unget_na(pna, ifp);
724 		goto found;
725 	}
726 	ND("pipe %s not found, create %d", pipe_id, create);
727 	if (!create) {
728 		error = ENODEV;
729 		goto put_out;
730 	}
731 	/* we create both master and slave.
732 	 * The endpoint we were asked for holds a reference to
733 	 * the other one.
734 	 */
735 	mna = nm_os_malloc(sizeof(*mna));
736 	if (mna == NULL) {
737 		error = ENOMEM;
738 		goto put_out;
739 	}
740 	snprintf(mna->up.name, sizeof(mna->up.name), "%s{%s", pna->name, pipe_id);
741 
742 	mna->role = NM_PIPE_ROLE_MASTER;
743 	mna->parent = pna;
744 	mna->parent_ifp = ifp;
745 
746 	mna->up.nm_txsync = netmap_pipe_txsync;
747 	mna->up.nm_rxsync = netmap_pipe_rxsync;
748 	mna->up.nm_register = netmap_pipe_reg;
749 	mna->up.nm_dtor = netmap_pipe_dtor;
750 	mna->up.nm_krings_create = netmap_pipe_krings_create;
751 	mna->up.nm_krings_delete = netmap_pipe_krings_delete;
752 	mna->up.nm_mem = netmap_mem_get(pna->nm_mem);
753 	mna->up.na_flags |= NAF_MEM_OWNER;
754 	mna->up.na_lut = pna->na_lut;
755 
756 	mna->up.num_tx_rings = req->nr_tx_rings;
757 	nm_bound_var(&mna->up.num_tx_rings, 1,
758 			1, NM_PIPE_MAXRINGS, NULL);
759 	mna->up.num_rx_rings = req->nr_rx_rings;
760 	nm_bound_var(&mna->up.num_rx_rings, 1,
761 			1, NM_PIPE_MAXRINGS, NULL);
762 	mna->up.num_tx_desc = req->nr_tx_slots;
763 	nm_bound_var(&mna->up.num_tx_desc, pna->num_tx_desc,
764 			1, NM_PIPE_MAXSLOTS, NULL);
765 	mna->up.num_rx_desc = req->nr_rx_slots;
766 	nm_bound_var(&mna->up.num_rx_desc, pna->num_rx_desc,
767 			1, NM_PIPE_MAXSLOTS, NULL);
768 	error = netmap_attach_common(&mna->up);
769 	if (error)
770 		goto free_mna;
771 	/* register the master with the parent */
772 	error = netmap_pipe_add(pna, mna);
773 	if (error)
774 		goto free_mna;
775 
776 	/* create the slave */
777 	sna = nm_os_malloc(sizeof(*mna));
778 	if (sna == NULL) {
779 		error = ENOMEM;
780 		goto unregister_mna;
781 	}
782 	/* most fields are the same, copy from master and then fix */
783 	*sna = *mna;
784 	sna->up.nm_mem = netmap_mem_get(mna->up.nm_mem);
785 	/* swap the number of tx/rx rings */
786 	sna->up.num_tx_rings = mna->up.num_rx_rings;
787 	sna->up.num_rx_rings = mna->up.num_tx_rings;
788 	snprintf(sna->up.name, sizeof(sna->up.name), "%s}%s", pna->name, pipe_id);
789 	sna->role = NM_PIPE_ROLE_SLAVE;
790 	error = netmap_attach_common(&sna->up);
791 	if (error)
792 		goto free_sna;
793 
794 	/* join the two endpoints */
795 	mna->peer = sna;
796 	sna->peer = mna;
797 
798 	/* we already have a reference to the parent, but we
799 	 * need another one for the other endpoint we created
800 	 */
801 	netmap_adapter_get(pna);
802 	/* likewise for the ifp, if any */
803 	if (ifp)
804 		if_ref(ifp);
805 
806 	if (role == NM_PIPE_ROLE_MASTER) {
807 		reqna = mna;
808 		mna->peer_ref = 1;
809 		netmap_adapter_get(&sna->up);
810 	} else {
811 		reqna = sna;
812 		sna->peer_ref = 1;
813 		netmap_adapter_get(&mna->up);
814 	}
815 	ND("created master %p and slave %p", mna, sna);
816 found:
817 
818 	ND("pipe %s %s at %p", pipe_id,
819 		(reqna->role == NM_PIPE_ROLE_MASTER ? "master" : "slave"), reqna);
820 	*na = &reqna->up;
821 	netmap_adapter_get(*na);
822 
823 	/* keep the reference to the parent.
824 	 * It will be released by the req destructor
825 	 */
826 
827 	return 0;
828 
829 free_sna:
830 	nm_os_free(sna);
831 unregister_mna:
832 	netmap_pipe_remove(pna, mna);
833 free_mna:
834 	nm_os_free(mna);
835 put_out:
836 	netmap_unget_na(pna, ifp);
837 	return error;
838 }
839 
840 
841 #endif /* WITH_PIPES */
842