xref: /freebsd/sys/dev/netmap/netmap_pipe.c (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (C) 2014-2018 Giuseppe Lettieri
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *   1. Redistributions of source code must retain the above copyright
11  *      notice, this list of conditions and the following disclaimer.
12  *   2. Redistributions in binary form must reproduce the above copyright
13  *      notice, this list of conditions and the following disclaimer in the
14  *      documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 
30 #if defined(__FreeBSD__)
31 #include <sys/cdefs.h> /* prerequisite */
32 
33 #include <sys/types.h>
34 #include <sys/errno.h>
35 #include <sys/param.h>	/* defines used in kernel.h */
36 #include <sys/kernel.h>	/* types used in module initialization */
37 #include <sys/malloc.h>
38 #include <sys/poll.h>
39 #include <sys/lock.h>
40 #include <sys/rwlock.h>
41 #include <sys/selinfo.h>
42 #include <sys/sysctl.h>
43 #include <sys/socket.h> /* sockaddrs */
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <machine/bus.h>	/* bus_dmamap_* */
47 #include <sys/refcount.h>
48 
49 
50 #elif defined(linux)
51 
52 #include "bsd_glue.h"
53 
54 #elif defined(__APPLE__)
55 
56 #warning OSX support is only partial
57 #include "osx_glue.h"
58 
59 #elif defined(_WIN32)
60 #include "win_glue.h"
61 
62 #else
63 
64 #error	Unsupported platform
65 
66 #endif /* unsupported */
67 
68 /*
69  * common headers
70  */
71 
72 #include <net/netmap.h>
73 #include <dev/netmap/netmap_kern.h>
74 #include <dev/netmap/netmap_mem2.h>
75 
76 #ifdef WITH_PIPES
77 
78 #define NM_PIPE_MAXSLOTS	4096
79 #define NM_PIPE_MAXRINGS	256
80 
81 static int netmap_default_pipes = 0; /* ignored, kept for compatibility */
82 SYSBEGIN(vars_pipes);
83 SYSCTL_DECL(_dev_netmap);
84 SYSCTL_INT(_dev_netmap, OID_AUTO, default_pipes, CTLFLAG_RW,
85 		&netmap_default_pipes, 0, "For compatibility only");
86 SYSEND;
87 
88 /* allocate the pipe array in the parent adapter */
89 static int
90 nm_pipe_alloc(struct netmap_adapter *na, u_int npipes)
91 {
92 	size_t old_len, len;
93 	struct netmap_pipe_adapter **npa;
94 
95 	if (npipes <= na->na_max_pipes)
96 		/* we already have more entries that requested */
97 		return 0;
98 
99 	if (npipes < na->na_next_pipe || npipes > NM_MAXPIPES)
100 		return EINVAL;
101 
102 	old_len = sizeof(struct netmap_pipe_adapter *)*na->na_max_pipes;
103 	len = sizeof(struct netmap_pipe_adapter *) * npipes;
104 	npa = nm_os_realloc(na->na_pipes, len, old_len);
105 	if (npa == NULL)
106 		return ENOMEM;
107 
108 	na->na_pipes = npa;
109 	na->na_max_pipes = npipes;
110 
111 	return 0;
112 }
113 
114 /* deallocate the parent array in the parent adapter */
115 void
116 netmap_pipe_dealloc(struct netmap_adapter *na)
117 {
118 	if (na->na_pipes) {
119 		if (na->na_next_pipe > 0) {
120 			nm_prerr("freeing not empty pipe array for %s (%d dangling pipes)!",
121 			    na->name, na->na_next_pipe);
122 		}
123 		nm_os_free(na->na_pipes);
124 		na->na_pipes = NULL;
125 		na->na_max_pipes = 0;
126 		na->na_next_pipe = 0;
127 	}
128 }
129 
130 /* find a pipe endpoint with the given id among the parent's pipes */
131 static struct netmap_pipe_adapter *
132 netmap_pipe_find(struct netmap_adapter *parent, const char *pipe_id)
133 {
134 	int i;
135 	struct netmap_pipe_adapter *na;
136 
137 	for (i = 0; i < parent->na_next_pipe; i++) {
138 		const char *na_pipe_id;
139 		na = parent->na_pipes[i];
140 		na_pipe_id = strrchr(na->up.name,
141 			na->role == NM_PIPE_ROLE_MASTER ? '{' : '}');
142 		KASSERT(na_pipe_id != NULL, ("Invalid pipe name"));
143 		++na_pipe_id;
144 		if (!strcmp(na_pipe_id, pipe_id)) {
145 			return na;
146 		}
147 	}
148 	return NULL;
149 }
150 
151 /* add a new pipe endpoint to the parent array */
152 static int
153 netmap_pipe_add(struct netmap_adapter *parent, struct netmap_pipe_adapter *na)
154 {
155 	if (parent->na_next_pipe >= parent->na_max_pipes) {
156 		u_int npipes = parent->na_max_pipes ?  2*parent->na_max_pipes : 2;
157 		int error = nm_pipe_alloc(parent, npipes);
158 		if (error)
159 			return error;
160 	}
161 
162 	parent->na_pipes[parent->na_next_pipe] = na;
163 	na->parent_slot = parent->na_next_pipe;
164 	parent->na_next_pipe++;
165 	return 0;
166 }
167 
168 /* remove the given pipe endpoint from the parent array */
169 static void
170 netmap_pipe_remove(struct netmap_adapter *parent, struct netmap_pipe_adapter *na)
171 {
172 	u_int n;
173 	n = --parent->na_next_pipe;
174 	if (n != na->parent_slot) {
175 		struct netmap_pipe_adapter **p =
176 			&parent->na_pipes[na->parent_slot];
177 		*p = parent->na_pipes[n];
178 		(*p)->parent_slot = na->parent_slot;
179 	}
180 	parent->na_pipes[n] = NULL;
181 }
182 
183 int
184 netmap_pipe_txsync(struct netmap_kring *txkring, int flags)
185 {
186 	struct netmap_kring *rxkring = txkring->pipe;
187 	u_int k, lim = txkring->nkr_num_slots - 1, nk;
188 	int m; /* slots to transfer */
189 	int complete; /* did we see a complete packet ? */
190 	struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring;
191 
192 	nm_prdis("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
193 	nm_prdis(20, "TX before: hwcur %d hwtail %d cur %d head %d tail %d",
194 		txkring->nr_hwcur, txkring->nr_hwtail,
195 		txkring->rcur, txkring->rhead, txkring->rtail);
196 
197 	/* update the hwtail */
198 	txkring->nr_hwtail = txkring->pipe_tail;
199 
200 	m = txkring->rhead - txkring->nr_hwcur; /* new slots */
201 	if (m < 0)
202 		m += txkring->nkr_num_slots;
203 
204 	if (m == 0) {
205 		/* nothing to send */
206 		return 0;
207 	}
208 
209 	for (k = txkring->nr_hwcur, nk = lim + 1, complete = 0; m;
210 			m--, k = nm_next(k, lim), nk = (complete ? k : nk)) {
211 		struct netmap_slot *rs = &rxring->slot[k];
212 		struct netmap_slot *ts = &txring->slot[k];
213 		uint64_t off = nm_get_offset(rxkring, rs);
214 
215 		*rs = *ts;
216 		if (nm_get_offset(rxkring, rs) < off) {
217 			nm_write_offset(rxkring, rs, off);
218 		}
219 		if (ts->flags & NS_BUF_CHANGED) {
220 			ts->flags &= ~NS_BUF_CHANGED;
221 		}
222 		complete = !(ts->flags & NS_MOREFRAG);
223 	}
224 
225 	txkring->nr_hwcur = k;
226 
227 	nm_prdis(20, "TX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
228 		txkring->nr_hwcur, txkring->nr_hwtail,
229 		txkring->rcur, txkring->rhead, txkring->rtail, k);
230 
231 	if (likely(nk <= lim)) {
232 		mb(); /* make sure the slots are updated before publishing them */
233 		rxkring->pipe_tail = nk; /* only publish complete packets */
234 		rxkring->nm_notify(rxkring, 0);
235 	}
236 
237 	return 0;
238 }
239 
240 int
241 netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags)
242 {
243 	struct netmap_kring *txkring = rxkring->pipe;
244 	u_int k, lim = rxkring->nkr_num_slots - 1;
245 	int m; /* slots to release */
246 	struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring;
247 
248 	nm_prdis("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
249 	nm_prdis(20, "RX before: hwcur %d hwtail %d cur %d head %d tail %d",
250 		rxkring->nr_hwcur, rxkring->nr_hwtail,
251 		rxkring->rcur, rxkring->rhead, rxkring->rtail);
252 
253 	/* update the hwtail */
254 	rxkring->nr_hwtail = rxkring->pipe_tail;
255 
256 	m = rxkring->rhead - rxkring->nr_hwcur; /* released slots */
257 	if (m < 0)
258 		m += rxkring->nkr_num_slots;
259 
260 	if (m == 0) {
261 		/* nothing to release */
262 		return 0;
263 	}
264 
265 	for (k = rxkring->nr_hwcur; m; m--, k = nm_next(k, lim)) {
266 		struct netmap_slot *rs = &rxring->slot[k];
267 		struct netmap_slot *ts = &txring->slot[k];
268 
269 		/* copy the slot. This also propagates any offset */
270 		*ts = *rs;
271 		if (rs->flags & NS_BUF_CHANGED) {
272 			rs->flags &= ~NS_BUF_CHANGED;
273 		}
274 	}
275 
276 	mb(); /* make sure the slots are updated before publishing them */
277 	txkring->pipe_tail = nm_prev(k, lim);
278 	rxkring->nr_hwcur = k;
279 
280 	nm_prdis(20, "RX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
281 		rxkring->nr_hwcur, rxkring->nr_hwtail,
282 		rxkring->rcur, rxkring->rhead, rxkring->rtail, k);
283 
284 	txkring->nm_notify(txkring, 0);
285 
286 	return 0;
287 }
288 
289 /* Pipe endpoints are created and destroyed together, so that endopoints do not
290  * have to check for the existence of their peer at each ?xsync.
291  *
292  * To play well with the existing netmap infrastructure (refcounts etc.), we
293  * adopt the following strategy:
294  *
295  * 1) The first endpoint that is created also creates the other endpoint and
296  * grabs a reference to it.
297  *
298  *    state A)  user1 --> endpoint1 --> endpoint2
299  *
300  * 2) If, starting from state A, endpoint2 is then registered, endpoint1 gives
301  * its reference to the user:
302  *
303  *    state B)  user1 --> endpoint1     endpoint2 <--- user2
304  *
305  * 3) Assume that, starting from state B endpoint2 is closed. In the unregister
306  * callback endpoint2 notes that endpoint1 is still active and adds a reference
307  * from endpoint1 to itself. When user2 then releases her own reference,
308  * endpoint2 is not destroyed and we are back to state A. A symmetrical state
309  * would be reached if endpoint1 were released instead.
310  *
311  * 4) If, starting from state A, endpoint1 is closed, the destructor notes that
312  * it owns a reference to endpoint2 and releases it.
313  *
314  * Something similar goes on for the creation and destruction of the krings.
315  */
316 
317 
318 int netmap_pipe_krings_create_both(struct netmap_adapter *na,
319 				  struct netmap_adapter *ona)
320 {
321 	enum txrx t;
322 	int error;
323 	int i;
324 
325 	/* case 1) below */
326 	nm_prdis("%p: case 1, create both ends", na);
327 	error = netmap_krings_create(na, 0);
328 	if (error)
329 		return error;
330 
331 	/* create the krings of the other end */
332 	error = netmap_krings_create(ona, 0);
333 	if (error)
334 		goto del_krings1;
335 
336 	/* cross link the krings and initialize the pipe_tails */
337 	for_rx_tx(t) {
338 		enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */
339 		for (i = 0; i < nma_get_nrings(na, t); i++) {
340 			struct netmap_kring *k1 = NMR(na, t)[i],
341 					    *k2 = NMR(ona, r)[i];
342 			k1->pipe = k2;
343 			k2->pipe = k1;
344 			/* mark all peer-adapter rings as fake */
345 			k2->nr_kflags |= NKR_FAKERING;
346 			/* init tails */
347 			k1->pipe_tail = k1->nr_hwtail;
348 			k2->pipe_tail = k2->nr_hwtail;
349 		}
350 	}
351 
352 	return 0;
353 
354 del_krings1:
355 	netmap_krings_delete(na);
356 	return error;
357 }
358 
359 /* netmap_pipe_krings_create.
360  *
361  * There are two cases:
362  *
363  * 1) state is
364  *
365  *        usr1 --> e1 --> e2
366  *
367  *    and we are e1. We have to create both sets
368  *    of krings.
369  *
370  * 2) state is
371  *
372  *        usr1 --> e1 --> e2
373  *
374  *    and we are e2. e1 is certainly registered and our
375  *    krings already exist. Nothing to do.
376  */
377 static int
378 netmap_pipe_krings_create(struct netmap_adapter *na)
379 {
380 	struct netmap_pipe_adapter *pna =
381 		(struct netmap_pipe_adapter *)na;
382 	struct netmap_adapter *ona = &pna->peer->up;
383 
384 	if (pna->peer_ref)
385 		return netmap_pipe_krings_create_both(na, ona);
386 
387 	return 0;
388 }
389 
390 int
391 netmap_pipe_reg_both(struct netmap_adapter *na, struct netmap_adapter *ona)
392 {
393 	int i, error = 0;
394 	enum txrx t;
395 
396 	for_rx_tx(t) {
397 		for (i = 0; i < nma_get_nrings(na, t); i++) {
398 			struct netmap_kring *kring = NMR(na, t)[i];
399 
400 			if (nm_kring_pending_on(kring)) {
401 				/* mark the peer ring as needed */
402 				kring->pipe->nr_kflags |= NKR_NEEDRING;
403 			}
404 		}
405 	}
406 
407 	/* create all missing needed rings on the other end.
408 	 * Either our end, or the other, has been marked as
409 	 * fake, so the allocation will not be done twice.
410 	 */
411 	error = netmap_mem_rings_create(ona);
412 	if (error)
413 		return error;
414 
415 	/* In case of no error we put our rings in netmap mode */
416 	for_rx_tx(t) {
417 		for (i = 0; i < nma_get_nrings(na, t); i++) {
418 			struct netmap_kring *kring = NMR(na, t)[i];
419 			if (nm_kring_pending_on(kring)) {
420 
421 				kring->nr_mode = NKR_NETMAP_ON;
422 				if ((kring->nr_kflags & NKR_FAKERING) &&
423 				    (kring->pipe->nr_kflags & NKR_FAKERING)) {
424 					/* this is a re-open of a pipe
425 					 * end-point kept alive by the other end.
426 					 * We need to leave everything as it is
427 					 */
428 					continue;
429 				}
430 
431 				/* copy the buffers from the non-fake ring
432 				 * (this also propagates any initial offset)
433 				 */
434 				memcpy(kring->pipe->ring->slot,
435 				       kring->ring->slot,
436 				       sizeof(struct netmap_slot) *
437 						kring->nkr_num_slots);
438 				/* copy the offset-related fields */
439 				*(uint64_t *)(uintptr_t)&kring->pipe->ring->offset_mask =
440 					kring->ring->offset_mask;
441 				*(uint64_t *)(uintptr_t)&kring->pipe->ring->buf_align =
442 					kring->ring->buf_align;
443 				/* mark both rings as fake and needed,
444 				 * so that buffers will not be
445 				 * deleted by the standard machinery
446 				 * (we will delete them by ourselves in
447 				 * netmap_pipe_krings_delete)
448 				 */
449 				kring->nr_kflags |=
450 					(NKR_FAKERING | NKR_NEEDRING);
451 				kring->nr_mode = NKR_NETMAP_ON;
452 			}
453 		}
454 	}
455 
456 	return 0;
457 }
458 
459 /* netmap_pipe_reg.
460  *
461  * There are two cases on registration (onoff==1)
462  *
463  * 1.a) state is
464  *
465  *        usr1 --> e1 --> e2
466  *
467  *      and we are e1. Create the needed rings of the
468  *      other end.
469  *
470  * 1.b) state is
471  *
472  *        usr1 --> e1 --> e2 <-- usr2
473  *
474  *      and we are e2. Drop the ref e1 is holding.
475  *
476  *  There are two additional cases on unregister (onoff==0)
477  *
478  *  2.a) state is
479  *
480  *         usr1 --> e1 --> e2
481  *
482  *       and we are e1. Nothing special to do, e2 will
483  *       be cleaned up by the destructor of e1.
484  *
485  *  2.b) state is
486  *
487  *         usr1 --> e1     e2 <-- usr2
488  *
489  *       and we are either e1 or e2. Add a ref from the
490  *       other end.
491  */
492 static int
493 netmap_pipe_reg(struct netmap_adapter *na, int onoff)
494 {
495 	struct netmap_pipe_adapter *pna =
496 		(struct netmap_pipe_adapter *)na;
497 	struct netmap_adapter *ona = &pna->peer->up;
498 	int error = 0;
499 
500 	nm_prdis("%p: onoff %d", na, onoff);
501 	if (onoff) {
502 		error = netmap_pipe_reg_both(na, ona);
503 		if (error) {
504 			return error;
505 		}
506 		if (na->active_fds == 0)
507 			na->na_flags |= NAF_NETMAP_ON;
508 	} else {
509 		if (na->active_fds == 0)
510 			na->na_flags &= ~NAF_NETMAP_ON;
511 		netmap_krings_mode_commit(na, onoff);
512 	}
513 
514 	if (na->active_fds) {
515 		nm_prdis("active_fds %d", na->active_fds);
516 		return 0;
517 	}
518 
519 	if (pna->peer_ref) {
520 		nm_prdis("%p: case 1.a or 2.a, nothing to do", na);
521 		return 0;
522 	}
523 	if (onoff) {
524 		nm_prdis("%p: case 1.b, drop peer", na);
525 		pna->peer->peer_ref = 0;
526 		netmap_adapter_put(na);
527 	} else {
528 		nm_prdis("%p: case 2.b, grab peer", na);
529 		netmap_adapter_get(na);
530 		pna->peer->peer_ref = 1;
531 	}
532 	return error;
533 }
534 
535 void
536 netmap_pipe_krings_delete_both(struct netmap_adapter *na,
537 			       struct netmap_adapter *ona)
538 {
539 	struct netmap_adapter *sna;
540 	enum txrx t;
541 	int i;
542 
543 	/* case 1) below */
544 	nm_prdis("%p: case 1, deleting everything", na);
545 	/* To avoid double-frees we zero-out all the buffers in the kernel part
546 	 * of each ring. The reason is this: If the user is behaving correctly,
547 	 * all buffers are found in exactly one slot in the userspace part of
548 	 * some ring.  If the user is not behaving correctly, we cannot release
549 	 * buffers cleanly anyway. In the latter case, the allocator will
550 	 * return to a clean state only when all its users will close.
551 	 */
552 	sna = na;
553 cleanup:
554 	for_rx_tx(t) {
555 		for (i = 0; i < nma_get_nrings(sna, t); i++) {
556 			struct netmap_kring *kring = NMR(sna, t)[i];
557 			struct netmap_ring *ring = kring->ring;
558 			uint32_t j, lim = kring->nkr_num_slots - 1;
559 
560 			nm_prdis("%s ring %p hwtail %u hwcur %u",
561 				kring->name, ring, kring->nr_hwtail, kring->nr_hwcur);
562 
563 			if (ring == NULL)
564 				continue;
565 
566 			if (kring->tx == NR_RX)
567 				ring->slot[kring->pipe_tail].buf_idx = 0;
568 
569 			for (j = nm_next(kring->pipe_tail, lim);
570 			     j != kring->nr_hwcur;
571 			     j = nm_next(j, lim))
572 			{
573 				nm_prdis("%s[%d] %u", kring->name, j, ring->slot[j].buf_idx);
574 				ring->slot[j].buf_idx = 0;
575 			}
576 			kring->nr_kflags &= ~(NKR_FAKERING | NKR_NEEDRING);
577 		}
578 
579 	}
580 	if (sna != ona && ona->tx_rings) {
581 		sna = ona;
582 		goto cleanup;
583 	}
584 
585 	netmap_mem_rings_delete(na);
586 	netmap_krings_delete(na); /* also zeroes tx_rings etc. */
587 
588 	if (ona->tx_rings == NULL) {
589 		/* already deleted, we must be on an
590 		 * cleanup-after-error path */
591 		return;
592 	}
593 	netmap_mem_rings_delete(ona);
594 	netmap_krings_delete(ona);
595 }
596 
597 /* netmap_pipe_krings_delete.
598  *
599  * There are two cases:
600  *
601  * 1) state is
602  *
603  *                usr1 --> e1 --> e2
604  *
605  *    and we are e1 (e2 is not registered, so krings_delete cannot be
606  *    called on it);
607  *
608  * 2) state is
609  *
610  *                usr1 --> e1     e2 <-- usr2
611  *
612  *    and we are either e1 or e2.
613  *
614  * In the former case we have to also delete the krings of e2;
615  * in the latter case we do nothing.
616  */
617 static void
618 netmap_pipe_krings_delete(struct netmap_adapter *na)
619 {
620 	struct netmap_pipe_adapter *pna =
621 		(struct netmap_pipe_adapter *)na;
622 	struct netmap_adapter *ona; /* na of the other end */
623 
624 	if (!pna->peer_ref) {
625 		nm_prdis("%p: case 2, kept alive by peer",  na);
626 		return;
627 	}
628 	ona = &pna->peer->up;
629 	netmap_pipe_krings_delete_both(na, ona);
630 }
631 
632 
633 static void
634 netmap_pipe_dtor(struct netmap_adapter *na)
635 {
636 	struct netmap_pipe_adapter *pna =
637 		(struct netmap_pipe_adapter *)na;
638 	nm_prdis("%p %p", na, pna->parent_ifp);
639 	if (pna->peer_ref) {
640 		nm_prdis("%p: clean up peer", na);
641 		pna->peer_ref = 0;
642 		netmap_adapter_put(&pna->peer->up);
643 	}
644 	if (pna->role == NM_PIPE_ROLE_MASTER)
645 		netmap_pipe_remove(pna->parent, pna);
646 	if (pna->parent_ifp)
647 		if_rele(pna->parent_ifp);
648 	netmap_adapter_put(pna->parent);
649 	pna->parent = NULL;
650 }
651 
652 int
653 netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
654 		struct netmap_mem_d *nmd, int create)
655 {
656 	struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
657 	struct netmap_adapter *pna; /* parent adapter */
658 	struct netmap_pipe_adapter *mna, *sna, *reqna;
659 	if_t ifp = NULL;
660 	const char *pipe_id = NULL;
661 	int role = 0;
662 	int error, retries = 0;
663 	char *cbra, pipe_char;
664 
665 	/* Try to parse the pipe syntax 'xx{yy' or 'xx}yy'. */
666 	cbra = strrchr(hdr->nr_name, '{');
667 	if (cbra != NULL) {
668 		role = NM_PIPE_ROLE_MASTER;
669 	} else {
670 		cbra = strrchr(hdr->nr_name, '}');
671 		if (cbra != NULL) {
672 			role = NM_PIPE_ROLE_SLAVE;
673 		} else {
674 			nm_prdis("not a pipe");
675 			return 0;
676 		}
677 	}
678 	pipe_char = *cbra;
679 	pipe_id = cbra + 1;
680 	if (*pipe_id == '\0' || cbra == hdr->nr_name) {
681 		/* Bracket is the last character, so pipe name is missing;
682 		 * or bracket is the first character, so base port name
683 		 * is missing. */
684 		return EINVAL;
685 	}
686 
687 	if (req->nr_mode != NR_REG_ALL_NIC && req->nr_mode != NR_REG_ONE_NIC) {
688 		/* We only accept modes involving hardware rings. */
689 		return EINVAL;
690 	}
691 
692 	/* first, try to find the parent adapter */
693 	for (;;) {
694 		int create_error;
695 
696 		/* Temporarily remove the pipe suffix. */
697 		*cbra = '\0';
698 		error = netmap_get_na(hdr, &pna, &ifp, nmd, create);
699 		/* Restore the pipe suffix. */
700 		*cbra = pipe_char;
701 		if (!error)
702 			break;
703 		if (error != ENXIO || retries++) {
704 			nm_prdis("parent lookup failed: %d", error);
705 			return error;
706 		}
707 		nm_prdis("try to create a persistent vale port");
708 		/* create a persistent vale port and try again */
709 		*cbra = '\0';
710 		NMG_UNLOCK();
711 		create_error = netmap_vi_create(hdr, 1 /* autodelete */);
712 		NMG_LOCK();
713 		*cbra = pipe_char;
714 		if (create_error && create_error != EEXIST) {
715 			if (create_error != EOPNOTSUPP) {
716 				nm_prerr("failed to create a persistent vale port: %d",
717 				    create_error);
718 			}
719 			return error;
720 		}
721 	}
722 
723 	if (NETMAP_OWNED_BY_KERN(pna)) {
724 		nm_prdis("parent busy");
725 		error = EBUSY;
726 		goto put_out;
727 	}
728 
729 	/* next, lookup the pipe id in the parent list */
730 	reqna = NULL;
731 	mna = netmap_pipe_find(pna, pipe_id);
732 	if (mna) {
733 		if (mna->role == role) {
734 			nm_prdis("found %s directly at %d", pipe_id, mna->parent_slot);
735 			reqna = mna;
736 		} else {
737 			nm_prdis("found %s indirectly at %d", pipe_id, mna->parent_slot);
738 			reqna = mna->peer;
739 		}
740 		/* the pipe we have found already holds a ref to the parent,
741 		 * so we need to drop the one we got from netmap_get_na()
742 		 */
743 		netmap_unget_na(pna, ifp);
744 		goto found;
745 	}
746 	nm_prdis("pipe %s not found, create %d", pipe_id, create);
747 	if (!create) {
748 		error = ENODEV;
749 		goto put_out;
750 	}
751 	/* we create both master and slave.
752 	 * The endpoint we were asked for holds a reference to
753 	 * the other one.
754 	 */
755 	mna = nm_os_malloc(sizeof(*mna));
756 	if (mna == NULL) {
757 		error = ENOMEM;
758 		goto put_out;
759 	}
760 	snprintf(mna->up.name, sizeof(mna->up.name), "%s{%s", pna->name, pipe_id);
761 
762 	mna->role = NM_PIPE_ROLE_MASTER;
763 	mna->parent = pna;
764 	mna->parent_ifp = ifp;
765 
766 	mna->up.nm_txsync = netmap_pipe_txsync;
767 	mna->up.nm_rxsync = netmap_pipe_rxsync;
768 	mna->up.nm_register = netmap_pipe_reg;
769 	mna->up.nm_dtor = netmap_pipe_dtor;
770 	mna->up.nm_krings_create = netmap_pipe_krings_create;
771 	mna->up.nm_krings_delete = netmap_pipe_krings_delete;
772 	mna->up.nm_mem = netmap_mem_get(pna->nm_mem);
773 	mna->up.na_flags |= NAF_MEM_OWNER | NAF_OFFSETS;
774 	mna->up.na_lut = pna->na_lut;
775 
776 	mna->up.num_tx_rings = req->nr_tx_rings;
777 	nm_bound_var(&mna->up.num_tx_rings, 1,
778 			1, NM_PIPE_MAXRINGS, NULL);
779 	mna->up.num_rx_rings = req->nr_rx_rings;
780 	nm_bound_var(&mna->up.num_rx_rings, 1,
781 			1, NM_PIPE_MAXRINGS, NULL);
782 	mna->up.num_tx_desc = req->nr_tx_slots;
783 	nm_bound_var(&mna->up.num_tx_desc, pna->num_tx_desc,
784 			1, NM_PIPE_MAXSLOTS, NULL);
785 	mna->up.num_rx_desc = req->nr_rx_slots;
786 	nm_bound_var(&mna->up.num_rx_desc, pna->num_rx_desc,
787 			1, NM_PIPE_MAXSLOTS, NULL);
788 	error = netmap_attach_common(&mna->up);
789 	if (error)
790 		goto free_mna;
791 	/* register the master with the parent */
792 	error = netmap_pipe_add(pna, mna);
793 	if (error)
794 		goto free_mna;
795 
796 	/* create the slave */
797 	sna = nm_os_malloc(sizeof(*mna));
798 	if (sna == NULL) {
799 		error = ENOMEM;
800 		goto unregister_mna;
801 	}
802 	/* most fields are the same, copy from master and then fix */
803 	*sna = *mna;
804 	sna->up.nm_mem = netmap_mem_get(mna->up.nm_mem);
805 	/* swap the number of tx/rx rings and slots */
806 	sna->up.num_tx_rings = mna->up.num_rx_rings;
807 	sna->up.num_tx_desc  = mna->up.num_rx_desc;
808 	sna->up.num_rx_rings = mna->up.num_tx_rings;
809 	sna->up.num_rx_desc  = mna->up.num_tx_desc;
810 	snprintf(sna->up.name, sizeof(sna->up.name), "%s}%s", pna->name, pipe_id);
811 	sna->role = NM_PIPE_ROLE_SLAVE;
812 	error = netmap_attach_common(&sna->up);
813 	if (error)
814 		goto free_sna;
815 
816 	/* join the two endpoints */
817 	mna->peer = sna;
818 	sna->peer = mna;
819 
820 	/* we already have a reference to the parent, but we
821 	 * need another one for the other endpoint we created
822 	 */
823 	netmap_adapter_get(pna);
824 	/* likewise for the ifp, if any */
825 	if (ifp)
826 		if_ref(ifp);
827 
828 	if (role == NM_PIPE_ROLE_MASTER) {
829 		reqna = mna;
830 		mna->peer_ref = 1;
831 		netmap_adapter_get(&sna->up);
832 	} else {
833 		reqna = sna;
834 		sna->peer_ref = 1;
835 		netmap_adapter_get(&mna->up);
836 	}
837 	nm_prdis("created master %p and slave %p", mna, sna);
838 found:
839 
840 	nm_prdis("pipe %s %s at %p", pipe_id,
841 		(reqna->role == NM_PIPE_ROLE_MASTER ? "master" : "slave"), reqna);
842 	*na = &reqna->up;
843 	netmap_adapter_get(*na);
844 
845 	/* keep the reference to the parent.
846 	 * It will be released by the req destructor
847 	 */
848 
849 	return 0;
850 
851 free_sna:
852 	nm_os_free(sna);
853 unregister_mna:
854 	netmap_pipe_remove(pna, mna);
855 free_mna:
856 	nm_os_free(mna);
857 put_out:
858 	netmap_unget_na(pna, ifp);
859 	return error;
860 }
861 
862 
863 #endif /* WITH_PIPES */
864