1 /* 2 * Copyright (C) 2014-2016 Giuseppe Lettieri 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* $FreeBSD$ */ 28 29 #if defined(__FreeBSD__) 30 #include <sys/cdefs.h> /* prerequisite */ 31 32 #include <sys/types.h> 33 #include <sys/errno.h> 34 #include <sys/param.h> /* defines used in kernel.h */ 35 #include <sys/kernel.h> /* types used in module initialization */ 36 #include <sys/malloc.h> 37 #include <sys/poll.h> 38 #include <sys/lock.h> 39 #include <sys/rwlock.h> 40 #include <sys/selinfo.h> 41 #include <sys/sysctl.h> 42 #include <sys/socket.h> /* sockaddrs */ 43 #include <net/if.h> 44 #include <net/if_var.h> 45 #include <machine/bus.h> /* bus_dmamap_* */ 46 #include <sys/refcount.h> 47 48 49 #elif defined(linux) 50 51 #include "bsd_glue.h" 52 53 #elif defined(__APPLE__) 54 55 #warning OSX support is only partial 56 #include "osx_glue.h" 57 58 #elif defined(_WIN32) 59 #include "win_glue.h" 60 61 #else 62 63 #error Unsupported platform 64 65 #endif /* unsupported */ 66 67 /* 68 * common headers 69 */ 70 71 #include <net/netmap.h> 72 #include <dev/netmap/netmap_kern.h> 73 #include <dev/netmap/netmap_mem2.h> 74 75 #ifdef WITH_PIPES 76 77 #define NM_PIPE_MAXSLOTS 4096 78 79 static int netmap_default_pipes = 0; /* ignored, kept for compatibility */ 80 SYSBEGIN(vars_pipes); 81 SYSCTL_DECL(_dev_netmap); 82 SYSCTL_INT(_dev_netmap, OID_AUTO, default_pipes, CTLFLAG_RW, &netmap_default_pipes, 0 , ""); 83 SYSEND; 84 85 /* allocate the pipe array in the parent adapter */ 86 static int 87 nm_pipe_alloc(struct netmap_adapter *na, u_int npipes) 88 { 89 size_t old_len, len; 90 struct netmap_pipe_adapter **npa; 91 92 if (npipes <= na->na_max_pipes) 93 /* we already have more entries that requested */ 94 return 0; 95 96 if (npipes < na->na_next_pipe || npipes > NM_MAXPIPES) 97 return EINVAL; 98 99 old_len = sizeof(struct netmap_pipe_adapter *)*na->na_max_pipes; 100 len = sizeof(struct netmap_pipe_adapter *) * npipes; 101 npa = nm_os_realloc(na->na_pipes, len, old_len); 102 if (npa == NULL) 103 return ENOMEM; 104 105 na->na_pipes = npa; 106 na->na_max_pipes = npipes; 107 108 return 0; 109 } 110 111 /* deallocate the parent array in the parent adapter */ 112 void 113 netmap_pipe_dealloc(struct netmap_adapter *na) 114 { 115 if (na->na_pipes) { 116 if (na->na_next_pipe > 0) { 117 D("freeing not empty pipe array for %s (%d dangling pipes)!", na->name, 118 na->na_next_pipe); 119 } 120 nm_os_free(na->na_pipes); 121 na->na_pipes = NULL; 122 na->na_max_pipes = 0; 123 na->na_next_pipe = 0; 124 } 125 } 126 127 /* find a pipe endpoint with the given id among the parent's pipes */ 128 static struct netmap_pipe_adapter * 129 netmap_pipe_find(struct netmap_adapter *parent, u_int pipe_id) 130 { 131 int i; 132 struct netmap_pipe_adapter *na; 133 134 for (i = 0; i < parent->na_next_pipe; i++) { 135 na = parent->na_pipes[i]; 136 if (na->id == pipe_id) { 137 return na; 138 } 139 } 140 return NULL; 141 } 142 143 /* add a new pipe endpoint to the parent array */ 144 static int 145 netmap_pipe_add(struct netmap_adapter *parent, struct netmap_pipe_adapter *na) 146 { 147 if (parent->na_next_pipe >= parent->na_max_pipes) { 148 u_int npipes = parent->na_max_pipes ? 2*parent->na_max_pipes : 2; 149 int error = nm_pipe_alloc(parent, npipes); 150 if (error) 151 return error; 152 } 153 154 parent->na_pipes[parent->na_next_pipe] = na; 155 na->parent_slot = parent->na_next_pipe; 156 parent->na_next_pipe++; 157 return 0; 158 } 159 160 /* remove the given pipe endpoint from the parent array */ 161 static void 162 netmap_pipe_remove(struct netmap_adapter *parent, struct netmap_pipe_adapter *na) 163 { 164 u_int n; 165 n = --parent->na_next_pipe; 166 if (n != na->parent_slot) { 167 struct netmap_pipe_adapter **p = 168 &parent->na_pipes[na->parent_slot]; 169 *p = parent->na_pipes[n]; 170 (*p)->parent_slot = na->parent_slot; 171 } 172 parent->na_pipes[n] = NULL; 173 } 174 175 int 176 netmap_pipe_txsync(struct netmap_kring *txkring, int flags) 177 { 178 struct netmap_kring *rxkring = txkring->pipe; 179 u_int limit; /* slots to transfer */ 180 u_int j, k, lim_tx = txkring->nkr_num_slots - 1, 181 lim_rx = rxkring->nkr_num_slots - 1; 182 int m, busy; 183 184 ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name); 185 ND(2, "before: hwcur %d hwtail %d cur %d head %d tail %d", txkring->nr_hwcur, txkring->nr_hwtail, 186 txkring->rcur, txkring->rhead, txkring->rtail); 187 188 j = rxkring->nr_hwtail; /* RX */ 189 k = txkring->nr_hwcur; /* TX */ 190 m = txkring->rhead - txkring->nr_hwcur; /* new slots */ 191 if (m < 0) 192 m += txkring->nkr_num_slots; 193 limit = m; 194 m = lim_rx; /* max avail space on destination */ 195 busy = j - rxkring->nr_hwcur; /* busy slots */ 196 if (busy < 0) 197 busy += rxkring->nkr_num_slots; 198 m -= busy; /* subtract busy slots */ 199 ND(2, "m %d limit %d", m, limit); 200 if (m < limit) 201 limit = m; 202 203 if (limit == 0) { 204 /* either the rxring is full, or nothing to send */ 205 return 0; 206 } 207 208 while (limit-- > 0) { 209 struct netmap_slot *rs = &rxkring->ring->slot[j]; 210 struct netmap_slot *ts = &txkring->ring->slot[k]; 211 struct netmap_slot tmp; 212 213 /* swap the slots */ 214 tmp = *rs; 215 *rs = *ts; 216 *ts = tmp; 217 218 /* report the buffer change */ 219 ts->flags |= NS_BUF_CHANGED; 220 rs->flags |= NS_BUF_CHANGED; 221 222 j = nm_next(j, lim_rx); 223 k = nm_next(k, lim_tx); 224 } 225 226 mb(); /* make sure the slots are updated before publishing them */ 227 rxkring->nr_hwtail = j; 228 txkring->nr_hwcur = k; 229 txkring->nr_hwtail = nm_prev(k, lim_tx); 230 231 ND(2, "after: hwcur %d hwtail %d cur %d head %d tail %d j %d", txkring->nr_hwcur, txkring->nr_hwtail, 232 txkring->rcur, txkring->rhead, txkring->rtail, j); 233 234 mb(); /* make sure rxkring->nr_hwtail is updated before notifying */ 235 rxkring->nm_notify(rxkring, 0); 236 237 return 0; 238 } 239 240 int 241 netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags) 242 { 243 struct netmap_kring *txkring = rxkring->pipe; 244 uint32_t oldhwcur = rxkring->nr_hwcur; 245 246 ND("%s %x <- %s", rxkring->name, flags, txkring->name); 247 rxkring->nr_hwcur = rxkring->rhead; /* recover user-relased slots */ 248 ND(5, "hwcur %d hwtail %d cur %d head %d tail %d", rxkring->nr_hwcur, rxkring->nr_hwtail, 249 rxkring->rcur, rxkring->rhead, rxkring->rtail); 250 mb(); /* paired with the first mb() in txsync */ 251 252 if (oldhwcur != rxkring->nr_hwcur) { 253 /* we have released some slots, notify the other end */ 254 mb(); /* make sure nr_hwcur is updated before notifying */ 255 txkring->nm_notify(txkring, 0); 256 } 257 return 0; 258 } 259 260 /* Pipe endpoints are created and destroyed together, so that endopoints do not 261 * have to check for the existence of their peer at each ?xsync. 262 * 263 * To play well with the existing netmap infrastructure (refcounts etc.), we 264 * adopt the following strategy: 265 * 266 * 1) The first endpoint that is created also creates the other endpoint and 267 * grabs a reference to it. 268 * 269 * state A) user1 --> endpoint1 --> endpoint2 270 * 271 * 2) If, starting from state A, endpoint2 is then registered, endpoint1 gives 272 * its reference to the user: 273 * 274 * state B) user1 --> endpoint1 endpoint2 <--- user2 275 * 276 * 3) Assume that, starting from state B endpoint2 is closed. In the unregister 277 * callback endpoint2 notes that endpoint1 is still active and adds a reference 278 * from endpoint1 to itself. When user2 then releases her own reference, 279 * endpoint2 is not destroyed and we are back to state A. A symmetrical state 280 * would be reached if endpoint1 were released instead. 281 * 282 * 4) If, starting from state A, endpoint1 is closed, the destructor notes that 283 * it owns a reference to endpoint2 and releases it. 284 * 285 * Something similar goes on for the creation and destruction of the krings. 286 */ 287 288 289 /* netmap_pipe_krings_create. 290 * 291 * There are two cases: 292 * 293 * 1) state is 294 * 295 * usr1 --> e1 --> e2 296 * 297 * and we are e1. We have to create both sets 298 * of krings. 299 * 300 * 2) state is 301 * 302 * usr1 --> e1 --> e2 303 * 304 * and we are e2. e1 is certainly registered and our 305 * krings already exist. Nothing to do. 306 */ 307 static int 308 netmap_pipe_krings_create(struct netmap_adapter *na) 309 { 310 struct netmap_pipe_adapter *pna = 311 (struct netmap_pipe_adapter *)na; 312 struct netmap_adapter *ona = &pna->peer->up; 313 int error = 0; 314 enum txrx t; 315 316 if (pna->peer_ref) { 317 int i; 318 319 /* case 1) above */ 320 ND("%p: case 1, create both ends", na); 321 error = netmap_krings_create(na, 0); 322 if (error) 323 goto err; 324 325 /* create the krings of the other end */ 326 error = netmap_krings_create(ona, 0); 327 if (error) 328 goto del_krings1; 329 330 /* cross link the krings */ 331 for_rx_tx(t) { 332 enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */ 333 for (i = 0; i < nma_get_nrings(na, t); i++) { 334 NMR(na, t)[i].pipe = NMR(ona, r) + i; 335 NMR(ona, r)[i].pipe = NMR(na, t) + i; 336 } 337 } 338 339 } 340 return 0; 341 342 del_krings1: 343 netmap_krings_delete(na); 344 err: 345 return error; 346 } 347 348 /* netmap_pipe_reg. 349 * 350 * There are two cases on registration (onoff==1) 351 * 352 * 1.a) state is 353 * 354 * usr1 --> e1 --> e2 355 * 356 * and we are e1. Create the needed rings of the 357 * other end. 358 * 359 * 1.b) state is 360 * 361 * usr1 --> e1 --> e2 <-- usr2 362 * 363 * and we are e2. Drop the ref e1 is holding. 364 * 365 * There are two additional cases on unregister (onoff==0) 366 * 367 * 2.a) state is 368 * 369 * usr1 --> e1 --> e2 370 * 371 * and we are e1. Nothing special to do, e2 will 372 * be cleaned up by the destructor of e1. 373 * 374 * 2.b) state is 375 * 376 * usr1 --> e1 e2 <-- usr2 377 * 378 * and we are either e1 or e2. Add a ref from the 379 * other end and hide our rings. 380 */ 381 static int 382 netmap_pipe_reg(struct netmap_adapter *na, int onoff) 383 { 384 struct netmap_pipe_adapter *pna = 385 (struct netmap_pipe_adapter *)na; 386 struct netmap_adapter *ona = &pna->peer->up; 387 int i, error = 0; 388 enum txrx t; 389 390 ND("%p: onoff %d", na, onoff); 391 if (onoff) { 392 for_rx_tx(t) { 393 for (i = 0; i < nma_get_nrings(na, t); i++) { 394 struct netmap_kring *kring = &NMR(na, t)[i]; 395 396 if (nm_kring_pending_on(kring)) { 397 /* mark the peer ring as needed */ 398 kring->pipe->nr_kflags |= NKR_NEEDRING; 399 } 400 } 401 } 402 403 /* create all missing needed rings on the other end */ 404 error = netmap_mem_rings_create(ona); 405 if (error) 406 return error; 407 408 /* In case of no error we put our rings in netmap mode */ 409 for_rx_tx(t) { 410 for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { 411 struct netmap_kring *kring = &NMR(na, t)[i]; 412 413 if (nm_kring_pending_on(kring)) { 414 kring->nr_mode = NKR_NETMAP_ON; 415 } 416 } 417 } 418 if (na->active_fds == 0) 419 na->na_flags |= NAF_NETMAP_ON; 420 } else { 421 if (na->active_fds == 0) 422 na->na_flags &= ~NAF_NETMAP_ON; 423 for_rx_tx(t) { 424 for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { 425 struct netmap_kring *kring = &NMR(na, t)[i]; 426 427 if (nm_kring_pending_off(kring)) { 428 kring->nr_mode = NKR_NETMAP_OFF; 429 /* mark the peer ring as no longer needed by us 430 * (it may still be kept if sombody else is using it) 431 */ 432 if (kring->pipe) { 433 kring->pipe->nr_kflags &= ~NKR_NEEDRING; 434 } 435 } 436 } 437 } 438 /* delete all the peer rings that are no longer needed */ 439 netmap_mem_rings_delete(ona); 440 } 441 442 if (na->active_fds) { 443 ND("active_fds %d", na->active_fds); 444 return 0; 445 } 446 447 if (pna->peer_ref) { 448 ND("%p: case 1.a or 2.a, nothing to do", na); 449 return 0; 450 } 451 if (onoff) { 452 ND("%p: case 1.b, drop peer", na); 453 pna->peer->peer_ref = 0; 454 netmap_adapter_put(na); 455 } else { 456 ND("%p: case 2.b, grab peer", na); 457 netmap_adapter_get(na); 458 pna->peer->peer_ref = 1; 459 } 460 return error; 461 } 462 463 /* netmap_pipe_krings_delete. 464 * 465 * There are two cases: 466 * 467 * 1) state is 468 * 469 * usr1 --> e1 --> e2 470 * 471 * and we are e1 (e2 is not registered, so krings_delete cannot be 472 * called on it); 473 * 474 * 2) state is 475 * 476 * usr1 --> e1 e2 <-- usr2 477 * 478 * and we are either e1 or e2. 479 * 480 * In the former case we have to also delete the krings of e2; 481 * in the latter case we do nothing (note that our krings 482 * have already been hidden in the unregister callback). 483 */ 484 static void 485 netmap_pipe_krings_delete(struct netmap_adapter *na) 486 { 487 struct netmap_pipe_adapter *pna = 488 (struct netmap_pipe_adapter *)na; 489 struct netmap_adapter *ona; /* na of the other end */ 490 491 if (!pna->peer_ref) { 492 ND("%p: case 2, kept alive by peer", na); 493 return; 494 } 495 /* case 1) above */ 496 ND("%p: case 1, deleting everything", na); 497 netmap_krings_delete(na); /* also zeroes tx_rings etc. */ 498 ona = &pna->peer->up; 499 if (ona->tx_rings == NULL) { 500 /* already deleted, we must be on an 501 * cleanup-after-error path */ 502 return; 503 } 504 netmap_krings_delete(ona); 505 } 506 507 508 static void 509 netmap_pipe_dtor(struct netmap_adapter *na) 510 { 511 struct netmap_pipe_adapter *pna = 512 (struct netmap_pipe_adapter *)na; 513 ND("%p %p", na, pna->parent_ifp); 514 if (pna->peer_ref) { 515 ND("%p: clean up peer", na); 516 pna->peer_ref = 0; 517 netmap_adapter_put(&pna->peer->up); 518 } 519 if (pna->role == NR_REG_PIPE_MASTER) 520 netmap_pipe_remove(pna->parent, pna); 521 if (pna->parent_ifp) 522 if_rele(pna->parent_ifp); 523 netmap_adapter_put(pna->parent); 524 pna->parent = NULL; 525 } 526 527 int 528 netmap_get_pipe_na(struct nmreq *nmr, struct netmap_adapter **na, 529 struct netmap_mem_d *nmd, int create) 530 { 531 struct nmreq pnmr; 532 struct netmap_adapter *pna; /* parent adapter */ 533 struct netmap_pipe_adapter *mna, *sna, *req; 534 struct ifnet *ifp = NULL; 535 u_int pipe_id; 536 int role = nmr->nr_flags & NR_REG_MASK; 537 int error, retries = 0; 538 539 ND("flags %x", nmr->nr_flags); 540 541 if (role != NR_REG_PIPE_MASTER && role != NR_REG_PIPE_SLAVE) { 542 ND("not a pipe"); 543 return 0; 544 } 545 role = nmr->nr_flags & NR_REG_MASK; 546 547 /* first, try to find the parent adapter */ 548 bzero(&pnmr, sizeof(pnmr)); 549 memcpy(&pnmr.nr_name, nmr->nr_name, IFNAMSIZ); 550 /* pass to parent the requested number of pipes */ 551 pnmr.nr_arg1 = nmr->nr_arg1; 552 for (;;) { 553 int create_error; 554 555 error = netmap_get_na(&pnmr, &pna, &ifp, nmd, create); 556 if (!error) 557 break; 558 if (error != ENXIO || retries++) { 559 ND("parent lookup failed: %d", error); 560 return error; 561 } 562 ND("try to create a persistent vale port"); 563 /* create a persistent vale port and try again */ 564 NMG_UNLOCK(); 565 create_error = netmap_vi_create(&pnmr, 1 /* autodelete */); 566 NMG_LOCK(); 567 if (create_error && create_error != EEXIST) { 568 if (create_error != EOPNOTSUPP) { 569 D("failed to create a persistent vale port: %d", create_error); 570 } 571 return error; 572 } 573 } 574 575 if (NETMAP_OWNED_BY_KERN(pna)) { 576 ND("parent busy"); 577 error = EBUSY; 578 goto put_out; 579 } 580 581 /* next, lookup the pipe id in the parent list */ 582 req = NULL; 583 pipe_id = nmr->nr_ringid & NETMAP_RING_MASK; 584 mna = netmap_pipe_find(pna, pipe_id); 585 if (mna) { 586 if (mna->role == role) { 587 ND("found %d directly at %d", pipe_id, mna->parent_slot); 588 req = mna; 589 } else { 590 ND("found %d indirectly at %d", pipe_id, mna->parent_slot); 591 req = mna->peer; 592 } 593 /* the pipe we have found already holds a ref to the parent, 594 * so we need to drop the one we got from netmap_get_na() 595 */ 596 netmap_unget_na(pna, ifp); 597 goto found; 598 } 599 ND("pipe %d not found, create %d", pipe_id, create); 600 if (!create) { 601 error = ENODEV; 602 goto put_out; 603 } 604 /* we create both master and slave. 605 * The endpoint we were asked for holds a reference to 606 * the other one. 607 */ 608 mna = nm_os_malloc(sizeof(*mna)); 609 if (mna == NULL) { 610 error = ENOMEM; 611 goto put_out; 612 } 613 snprintf(mna->up.name, sizeof(mna->up.name), "%s{%d", pna->name, pipe_id); 614 615 mna->id = pipe_id; 616 mna->role = NR_REG_PIPE_MASTER; 617 mna->parent = pna; 618 mna->parent_ifp = ifp; 619 620 mna->up.nm_txsync = netmap_pipe_txsync; 621 mna->up.nm_rxsync = netmap_pipe_rxsync; 622 mna->up.nm_register = netmap_pipe_reg; 623 mna->up.nm_dtor = netmap_pipe_dtor; 624 mna->up.nm_krings_create = netmap_pipe_krings_create; 625 mna->up.nm_krings_delete = netmap_pipe_krings_delete; 626 mna->up.nm_mem = netmap_mem_get(pna->nm_mem); 627 mna->up.na_flags |= NAF_MEM_OWNER; 628 mna->up.na_lut = pna->na_lut; 629 630 mna->up.num_tx_rings = 1; 631 mna->up.num_rx_rings = 1; 632 mna->up.num_tx_desc = nmr->nr_tx_slots; 633 nm_bound_var(&mna->up.num_tx_desc, pna->num_tx_desc, 634 1, NM_PIPE_MAXSLOTS, NULL); 635 mna->up.num_rx_desc = nmr->nr_rx_slots; 636 nm_bound_var(&mna->up.num_rx_desc, pna->num_rx_desc, 637 1, NM_PIPE_MAXSLOTS, NULL); 638 error = netmap_attach_common(&mna->up); 639 if (error) 640 goto free_mna; 641 /* register the master with the parent */ 642 error = netmap_pipe_add(pna, mna); 643 if (error) 644 goto free_mna; 645 646 /* create the slave */ 647 sna = nm_os_malloc(sizeof(*mna)); 648 if (sna == NULL) { 649 error = ENOMEM; 650 goto unregister_mna; 651 } 652 /* most fields are the same, copy from master and then fix */ 653 *sna = *mna; 654 sna->up.nm_mem = netmap_mem_get(mna->up.nm_mem); 655 snprintf(sna->up.name, sizeof(sna->up.name), "%s}%d", pna->name, pipe_id); 656 sna->role = NR_REG_PIPE_SLAVE; 657 error = netmap_attach_common(&sna->up); 658 if (error) 659 goto free_sna; 660 661 /* join the two endpoints */ 662 mna->peer = sna; 663 sna->peer = mna; 664 665 /* we already have a reference to the parent, but we 666 * need another one for the other endpoint we created 667 */ 668 netmap_adapter_get(pna); 669 /* likewise for the ifp, if any */ 670 if (ifp) 671 if_ref(ifp); 672 673 if (role == NR_REG_PIPE_MASTER) { 674 req = mna; 675 mna->peer_ref = 1; 676 netmap_adapter_get(&sna->up); 677 } else { 678 req = sna; 679 sna->peer_ref = 1; 680 netmap_adapter_get(&mna->up); 681 } 682 ND("created master %p and slave %p", mna, sna); 683 found: 684 685 ND("pipe %d %s at %p", pipe_id, 686 (req->role == NR_REG_PIPE_MASTER ? "master" : "slave"), req); 687 *na = &req->up; 688 netmap_adapter_get(*na); 689 690 /* keep the reference to the parent. 691 * It will be released by the req destructor 692 */ 693 694 return 0; 695 696 free_sna: 697 nm_os_free(sna); 698 unregister_mna: 699 netmap_pipe_remove(pna, mna); 700 free_mna: 701 nm_os_free(mna); 702 put_out: 703 netmap_unget_na(pna, ifp); 704 return error; 705 } 706 707 708 #endif /* WITH_PIPES */ 709