1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2012-2014 Matteo Landi 5 * Copyright (C) 2012-2016 Luigi Rizzo 6 * Copyright (C) 2012-2016 Giuseppe Lettieri 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #ifdef linux 32 #include "bsd_glue.h" 33 #endif /* linux */ 34 35 #ifdef __APPLE__ 36 #include "osx_glue.h" 37 #endif /* __APPLE__ */ 38 39 #ifdef __FreeBSD__ 40 #include <sys/cdefs.h> /* prerequisite */ 41 __FBSDID("$FreeBSD$"); 42 43 #include <sys/types.h> 44 #include <sys/malloc.h> 45 #include <sys/kernel.h> /* MALLOC_DEFINE */ 46 #include <sys/proc.h> 47 #include <vm/vm.h> /* vtophys */ 48 #include <vm/pmap.h> /* vtophys */ 49 #include <sys/socket.h> /* sockaddrs */ 50 #include <sys/selinfo.h> 51 #include <sys/sysctl.h> 52 #include <net/if.h> 53 #include <net/if_var.h> 54 #include <net/vnet.h> 55 #include <machine/bus.h> /* bus_dmamap_* */ 56 57 /* M_NETMAP only used in here */ 58 MALLOC_DECLARE(M_NETMAP); 59 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map"); 60 61 #endif /* __FreeBSD__ */ 62 63 #ifdef _WIN32 64 #include <win_glue.h> 65 #endif 66 67 #include <net/netmap.h> 68 #include <dev/netmap/netmap_kern.h> 69 #include <net/netmap_virt.h> 70 #include "netmap_mem2.h" 71 72 #ifdef _WIN32_USE_SMALL_GENERIC_DEVICES_MEMORY 73 #define NETMAP_BUF_MAX_NUM 8*4096 /* if too big takes too much time to allocate */ 74 #else 75 #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ 76 #endif 77 78 #define NETMAP_POOL_MAX_NAMSZ 32 79 80 81 enum { 82 NETMAP_IF_POOL = 0, 83 NETMAP_RING_POOL, 84 NETMAP_BUF_POOL, 85 NETMAP_POOLS_NR 86 }; 87 88 89 struct netmap_obj_params { 90 u_int size; 91 u_int num; 92 93 u_int last_size; 94 u_int last_num; 95 }; 96 97 struct netmap_obj_pool { 98 char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */ 99 100 /* ---------------------------------------------------*/ 101 /* these are only meaningful if the pool is finalized */ 102 /* (see 'finalized' field in netmap_mem_d) */ 103 size_t memtotal; /* actual total memory space */ 104 105 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ 106 uint32_t *bitmap; /* one bit per buffer, 1 means free */ 107 uint32_t *invalid_bitmap;/* one bit per buffer, 1 means invalid */ 108 uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ 109 110 u_int objtotal; /* actual total number of objects. */ 111 u_int numclusters; /* actual number of clusters */ 112 u_int objfree; /* number of free objects. */ 113 114 int alloc_done; /* we have allocated the memory */ 115 /* ---------------------------------------------------*/ 116 117 /* limits */ 118 u_int objminsize; /* minimum object size */ 119 u_int objmaxsize; /* maximum object size */ 120 u_int nummin; /* minimum number of objects */ 121 u_int nummax; /* maximum number of objects */ 122 123 /* these are changed only by config */ 124 u_int _objtotal; /* total number of objects */ 125 u_int _objsize; /* object size */ 126 u_int _clustsize; /* cluster size */ 127 u_int _clustentries; /* objects per cluster */ 128 u_int _numclusters; /* number of clusters */ 129 130 /* requested values */ 131 u_int r_objtotal; 132 u_int r_objsize; 133 }; 134 135 #define NMA_LOCK_T NM_MTX_T 136 #define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx) 137 #define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx) 138 #define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx) 139 #define NMA_SPINLOCK(n) NM_MTX_SPINLOCK((n)->nm_mtx) 140 #define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx) 141 142 struct netmap_mem_ops { 143 int (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*); 144 int (*nmd_get_info)(struct netmap_mem_d *, uint64_t *size, 145 u_int *memflags, uint16_t *id); 146 147 vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t); 148 int (*nmd_config)(struct netmap_mem_d *); 149 int (*nmd_finalize)(struct netmap_mem_d *); 150 void (*nmd_deref)(struct netmap_mem_d *); 151 ssize_t (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr); 152 void (*nmd_delete)(struct netmap_mem_d *); 153 154 struct netmap_if * (*nmd_if_new)(struct netmap_adapter *, 155 struct netmap_priv_d *); 156 void (*nmd_if_delete)(struct netmap_adapter *, struct netmap_if *); 157 int (*nmd_rings_create)(struct netmap_adapter *); 158 void (*nmd_rings_delete)(struct netmap_adapter *); 159 }; 160 161 struct netmap_mem_d { 162 NMA_LOCK_T nm_mtx; /* protect the allocator */ 163 size_t nm_totalsize; /* shorthand */ 164 165 u_int flags; 166 #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */ 167 #define NETMAP_MEM_HIDDEN 0x8 /* beeing prepared */ 168 int lasterr; /* last error for curr config */ 169 int active; /* active users */ 170 int refcount; 171 /* the three allocators */ 172 struct netmap_obj_pool pools[NETMAP_POOLS_NR]; 173 174 nm_memid_t nm_id; /* allocator identifier */ 175 int nm_grp; /* iommu groupd id */ 176 177 /* list of all existing allocators, sorted by nm_id */ 178 struct netmap_mem_d *prev, *next; 179 180 struct netmap_mem_ops *ops; 181 182 struct netmap_obj_params params[NETMAP_POOLS_NR]; 183 184 #define NM_MEM_NAMESZ 16 185 char name[NM_MEM_NAMESZ]; 186 }; 187 188 int 189 netmap_mem_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) 190 { 191 int rv; 192 193 NMA_LOCK(nmd); 194 rv = nmd->ops->nmd_get_lut(nmd, lut); 195 NMA_UNLOCK(nmd); 196 197 return rv; 198 } 199 200 int 201 netmap_mem_get_info(struct netmap_mem_d *nmd, uint64_t *size, 202 u_int *memflags, nm_memid_t *memid) 203 { 204 int rv; 205 206 NMA_LOCK(nmd); 207 rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid); 208 NMA_UNLOCK(nmd); 209 210 return rv; 211 } 212 213 vm_paddr_t 214 netmap_mem_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off) 215 { 216 vm_paddr_t pa; 217 218 #if defined(__FreeBSD__) 219 /* This function is called by netmap_dev_pager_fault(), which holds a 220 * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we 221 * spin on the trylock. */ 222 NMA_SPINLOCK(nmd); 223 #else 224 NMA_LOCK(nmd); 225 #endif 226 pa = nmd->ops->nmd_ofstophys(nmd, off); 227 NMA_UNLOCK(nmd); 228 229 return pa; 230 } 231 232 static int 233 netmap_mem_config(struct netmap_mem_d *nmd) 234 { 235 if (nmd->active) { 236 /* already in use. Not fatal, but we 237 * cannot change the configuration 238 */ 239 return 0; 240 } 241 242 return nmd->ops->nmd_config(nmd); 243 } 244 245 ssize_t 246 netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *off) 247 { 248 ssize_t rv; 249 250 NMA_LOCK(nmd); 251 rv = nmd->ops->nmd_if_offset(nmd, off); 252 NMA_UNLOCK(nmd); 253 254 return rv; 255 } 256 257 static void 258 netmap_mem_delete(struct netmap_mem_d *nmd) 259 { 260 nmd->ops->nmd_delete(nmd); 261 } 262 263 struct netmap_if * 264 netmap_mem_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) 265 { 266 struct netmap_if *nifp; 267 struct netmap_mem_d *nmd = na->nm_mem; 268 269 NMA_LOCK(nmd); 270 nifp = nmd->ops->nmd_if_new(na, priv); 271 NMA_UNLOCK(nmd); 272 273 return nifp; 274 } 275 276 void 277 netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nif) 278 { 279 struct netmap_mem_d *nmd = na->nm_mem; 280 281 NMA_LOCK(nmd); 282 nmd->ops->nmd_if_delete(na, nif); 283 NMA_UNLOCK(nmd); 284 } 285 286 int 287 netmap_mem_rings_create(struct netmap_adapter *na) 288 { 289 int rv; 290 struct netmap_mem_d *nmd = na->nm_mem; 291 292 NMA_LOCK(nmd); 293 rv = nmd->ops->nmd_rings_create(na); 294 NMA_UNLOCK(nmd); 295 296 return rv; 297 } 298 299 void 300 netmap_mem_rings_delete(struct netmap_adapter *na) 301 { 302 struct netmap_mem_d *nmd = na->nm_mem; 303 304 NMA_LOCK(nmd); 305 nmd->ops->nmd_rings_delete(na); 306 NMA_UNLOCK(nmd); 307 } 308 309 static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *); 310 static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *); 311 static int nm_mem_assign_group(struct netmap_mem_d *, struct device *); 312 static void nm_mem_release_id(struct netmap_mem_d *); 313 314 nm_memid_t 315 netmap_mem_get_id(struct netmap_mem_d *nmd) 316 { 317 return nmd->nm_id; 318 } 319 320 #ifdef NM_DEBUG_MEM_PUTGET 321 #define NM_DBG_REFC(nmd, func, line) \ 322 nm_prinf("%d mem[%d] -> %d", line, (nmd)->nm_id, (nmd)->refcount); 323 #else 324 #define NM_DBG_REFC(nmd, func, line) 325 #endif 326 327 /* circular list of all existing allocators */ 328 static struct netmap_mem_d *netmap_last_mem_d = &nm_mem; 329 NM_MTX_T nm_mem_list_lock; 330 331 struct netmap_mem_d * 332 __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line) 333 { 334 NM_MTX_LOCK(nm_mem_list_lock); 335 nmd->refcount++; 336 NM_DBG_REFC(nmd, func, line); 337 NM_MTX_UNLOCK(nm_mem_list_lock); 338 return nmd; 339 } 340 341 void 342 __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line) 343 { 344 int last; 345 NM_MTX_LOCK(nm_mem_list_lock); 346 last = (--nmd->refcount == 0); 347 if (last) 348 nm_mem_release_id(nmd); 349 NM_DBG_REFC(nmd, func, line); 350 NM_MTX_UNLOCK(nm_mem_list_lock); 351 if (last) 352 netmap_mem_delete(nmd); 353 } 354 355 int 356 netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na) 357 { 358 int lasterr = 0; 359 if (nm_mem_assign_group(nmd, na->pdev) < 0) { 360 return ENOMEM; 361 } 362 363 NMA_LOCK(nmd); 364 365 if (netmap_mem_config(nmd)) 366 goto out; 367 368 nmd->active++; 369 370 nmd->lasterr = nmd->ops->nmd_finalize(nmd); 371 372 if (!nmd->lasterr && na->pdev) { 373 nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na); 374 } 375 376 out: 377 lasterr = nmd->lasterr; 378 NMA_UNLOCK(nmd); 379 380 if (lasterr) 381 netmap_mem_deref(nmd, na); 382 383 return lasterr; 384 } 385 386 static int 387 nm_isset(uint32_t *bitmap, u_int i) 388 { 389 return bitmap[ (i>>5) ] & ( 1U << (i & 31U) ); 390 } 391 392 393 static int 394 netmap_init_obj_allocator_bitmap(struct netmap_obj_pool *p) 395 { 396 u_int n, j; 397 398 if (p->bitmap == NULL) { 399 /* Allocate the bitmap */ 400 n = (p->objtotal + 31) / 32; 401 p->bitmap = nm_os_malloc(sizeof(p->bitmap[0]) * n); 402 if (p->bitmap == NULL) { 403 nm_prerr("Unable to create bitmap (%d entries) for allocator '%s'", (int)n, 404 p->name); 405 return ENOMEM; 406 } 407 p->bitmap_slots = n; 408 } else { 409 memset(p->bitmap, 0, p->bitmap_slots * sizeof(p->bitmap[0])); 410 } 411 412 p->objfree = 0; 413 /* 414 * Set all the bits in the bitmap that have 415 * corresponding buffers to 1 to indicate they are 416 * free. 417 */ 418 for (j = 0; j < p->objtotal; j++) { 419 if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) { 420 if (netmap_debug & NM_DEBUG_MEM) 421 nm_prinf("skipping %s %d", p->name, j); 422 continue; 423 } 424 p->bitmap[ (j>>5) ] |= ( 1U << (j & 31U) ); 425 p->objfree++; 426 } 427 428 if (netmap_verbose) 429 nm_prinf("%s free %u", p->name, p->objfree); 430 if (p->objfree == 0) { 431 if (netmap_verbose) 432 nm_prerr("%s: no objects available", p->name); 433 return ENOMEM; 434 } 435 436 return 0; 437 } 438 439 static int 440 netmap_mem_init_bitmaps(struct netmap_mem_d *nmd) 441 { 442 int i, error = 0; 443 444 for (i = 0; i < NETMAP_POOLS_NR; i++) { 445 struct netmap_obj_pool *p = &nmd->pools[i]; 446 447 error = netmap_init_obj_allocator_bitmap(p); 448 if (error) 449 return error; 450 } 451 452 /* 453 * buffers 0 and 1 are reserved 454 */ 455 if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) { 456 nm_prerr("%s: not enough buffers", nmd->pools[NETMAP_BUF_POOL].name); 457 return ENOMEM; 458 } 459 460 nmd->pools[NETMAP_BUF_POOL].objfree -= 2; 461 if (nmd->pools[NETMAP_BUF_POOL].bitmap) { 462 /* XXX This check is a workaround that prevents a 463 * NULL pointer crash which currently happens only 464 * with ptnetmap guests. 465 * Removed shared-info --> is the bug still there? */ 466 nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U; 467 } 468 return 0; 469 } 470 471 int 472 netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na) 473 { 474 int last_user = 0; 475 NMA_LOCK(nmd); 476 if (na->active_fds <= 0) 477 netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na); 478 if (nmd->active == 1) { 479 last_user = 1; 480 /* 481 * Reset the allocator when it falls out of use so that any 482 * pool resources leaked by unclean application exits are 483 * reclaimed. 484 */ 485 netmap_mem_init_bitmaps(nmd); 486 } 487 nmd->ops->nmd_deref(nmd); 488 489 nmd->active--; 490 if (last_user) { 491 nmd->nm_grp = -1; 492 nmd->lasterr = 0; 493 } 494 495 NMA_UNLOCK(nmd); 496 return last_user; 497 } 498 499 500 /* accessor functions */ 501 static int 502 netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) 503 { 504 lut->lut = nmd->pools[NETMAP_BUF_POOL].lut; 505 #ifdef __FreeBSD__ 506 lut->plut = lut->lut; 507 #endif 508 lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; 509 lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; 510 511 return 0; 512 } 513 514 static struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = { 515 [NETMAP_IF_POOL] = { 516 .size = 1024, 517 .num = 2, 518 }, 519 [NETMAP_RING_POOL] = { 520 .size = 5*PAGE_SIZE, 521 .num = 4, 522 }, 523 [NETMAP_BUF_POOL] = { 524 .size = 2048, 525 .num = 4098, 526 }, 527 }; 528 529 530 /* 531 * nm_mem is the memory allocator used for all physical interfaces 532 * running in netmap mode. 533 * Virtual (VALE) ports will have each its own allocator. 534 */ 535 extern struct netmap_mem_ops netmap_mem_global_ops; /* forward */ 536 struct netmap_mem_d nm_mem = { /* Our memory allocator. */ 537 .pools = { 538 [NETMAP_IF_POOL] = { 539 .name = "netmap_if", 540 .objminsize = sizeof(struct netmap_if), 541 .objmaxsize = 4096, 542 .nummin = 10, /* don't be stingy */ 543 .nummax = 10000, /* XXX very large */ 544 }, 545 [NETMAP_RING_POOL] = { 546 .name = "netmap_ring", 547 .objminsize = sizeof(struct netmap_ring), 548 .objmaxsize = 32*PAGE_SIZE, 549 .nummin = 2, 550 .nummax = 1024, 551 }, 552 [NETMAP_BUF_POOL] = { 553 .name = "netmap_buf", 554 .objminsize = 64, 555 .objmaxsize = 65536, 556 .nummin = 4, 557 .nummax = 1000000, /* one million! */ 558 }, 559 }, 560 561 .params = { 562 [NETMAP_IF_POOL] = { 563 .size = 1024, 564 .num = 100, 565 }, 566 [NETMAP_RING_POOL] = { 567 .size = 9*PAGE_SIZE, 568 .num = 200, 569 }, 570 [NETMAP_BUF_POOL] = { 571 .size = 2048, 572 .num = NETMAP_BUF_MAX_NUM, 573 }, 574 }, 575 576 .nm_id = 1, 577 .nm_grp = -1, 578 579 .prev = &nm_mem, 580 .next = &nm_mem, 581 582 .ops = &netmap_mem_global_ops, 583 584 .name = "1" 585 }; 586 587 588 /* blueprint for the private memory allocators */ 589 /* XXX clang is not happy about using name as a print format */ 590 static const struct netmap_mem_d nm_blueprint = { 591 .pools = { 592 [NETMAP_IF_POOL] = { 593 .name = "%s_if", 594 .objminsize = sizeof(struct netmap_if), 595 .objmaxsize = 4096, 596 .nummin = 1, 597 .nummax = 100, 598 }, 599 [NETMAP_RING_POOL] = { 600 .name = "%s_ring", 601 .objminsize = sizeof(struct netmap_ring), 602 .objmaxsize = 32*PAGE_SIZE, 603 .nummin = 2, 604 .nummax = 1024, 605 }, 606 [NETMAP_BUF_POOL] = { 607 .name = "%s_buf", 608 .objminsize = 64, 609 .objmaxsize = 65536, 610 .nummin = 4, 611 .nummax = 1000000, /* one million! */ 612 }, 613 }, 614 615 .nm_grp = -1, 616 617 .flags = NETMAP_MEM_PRIVATE, 618 619 .ops = &netmap_mem_global_ops, 620 }; 621 622 /* memory allocator related sysctls */ 623 624 #define STRINGIFY(x) #x 625 626 627 #define DECLARE_SYSCTLS(id, name) \ 628 SYSBEGIN(mem2_ ## name); \ 629 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ 630 CTLFLAG_RW, &nm_mem.params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ 631 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ 632 CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ 633 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ 634 CTLFLAG_RW, &nm_mem.params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ 635 SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ 636 CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \ 637 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \ 638 CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \ 639 "Default size of private netmap " STRINGIFY(name) "s"); \ 640 SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \ 641 CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \ 642 "Default number of private netmap " STRINGIFY(name) "s"); \ 643 SYSEND 644 645 SYSCTL_DECL(_dev_netmap); 646 DECLARE_SYSCTLS(NETMAP_IF_POOL, if); 647 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring); 648 DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); 649 650 /* call with nm_mem_list_lock held */ 651 static int 652 nm_mem_assign_id_locked(struct netmap_mem_d *nmd) 653 { 654 nm_memid_t id; 655 struct netmap_mem_d *scan = netmap_last_mem_d; 656 int error = ENOMEM; 657 658 do { 659 /* we rely on unsigned wrap around */ 660 id = scan->nm_id + 1; 661 if (id == 0) /* reserve 0 as error value */ 662 id = 1; 663 scan = scan->next; 664 if (id != scan->nm_id) { 665 nmd->nm_id = id; 666 nmd->prev = scan->prev; 667 nmd->next = scan; 668 scan->prev->next = nmd; 669 scan->prev = nmd; 670 netmap_last_mem_d = nmd; 671 nmd->refcount = 1; 672 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__); 673 error = 0; 674 break; 675 } 676 } while (scan != netmap_last_mem_d); 677 678 return error; 679 } 680 681 /* call with nm_mem_list_lock *not* held */ 682 static int 683 nm_mem_assign_id(struct netmap_mem_d *nmd) 684 { 685 int ret; 686 687 NM_MTX_LOCK(nm_mem_list_lock); 688 ret = nm_mem_assign_id_locked(nmd); 689 NM_MTX_UNLOCK(nm_mem_list_lock); 690 691 return ret; 692 } 693 694 /* call with nm_mem_list_lock held */ 695 static void 696 nm_mem_release_id(struct netmap_mem_d *nmd) 697 { 698 nmd->prev->next = nmd->next; 699 nmd->next->prev = nmd->prev; 700 701 if (netmap_last_mem_d == nmd) 702 netmap_last_mem_d = nmd->prev; 703 704 nmd->prev = nmd->next = NULL; 705 } 706 707 struct netmap_mem_d * 708 netmap_mem_find(nm_memid_t id) 709 { 710 struct netmap_mem_d *nmd; 711 712 NM_MTX_LOCK(nm_mem_list_lock); 713 nmd = netmap_last_mem_d; 714 do { 715 if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) { 716 nmd->refcount++; 717 NM_DBG_REFC(nmd, __FUNCTION__, __LINE__); 718 NM_MTX_UNLOCK(nm_mem_list_lock); 719 return nmd; 720 } 721 nmd = nmd->next; 722 } while (nmd != netmap_last_mem_d); 723 NM_MTX_UNLOCK(nm_mem_list_lock); 724 return NULL; 725 } 726 727 static int 728 nm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev) 729 { 730 int err = 0, id; 731 id = nm_iommu_group_id(dev); 732 if (netmap_debug & NM_DEBUG_MEM) 733 nm_prinf("iommu_group %d", id); 734 735 NMA_LOCK(nmd); 736 737 if (nmd->nm_grp < 0) 738 nmd->nm_grp = id; 739 740 if (nmd->nm_grp != id) { 741 if (netmap_verbose) 742 nm_prerr("iommu group mismatch: %u vs %u", 743 nmd->nm_grp, id); 744 nmd->lasterr = err = ENOMEM; 745 } 746 747 NMA_UNLOCK(nmd); 748 return err; 749 } 750 751 static struct lut_entry * 752 nm_alloc_lut(u_int nobj) 753 { 754 size_t n = sizeof(struct lut_entry) * nobj; 755 struct lut_entry *lut; 756 #ifdef linux 757 lut = vmalloc(n); 758 #else 759 lut = nm_os_malloc(n); 760 #endif 761 return lut; 762 } 763 764 static void 765 nm_free_lut(struct lut_entry *lut, u_int objtotal) 766 { 767 bzero(lut, sizeof(struct lut_entry) * objtotal); 768 #ifdef linux 769 vfree(lut); 770 #else 771 nm_os_free(lut); 772 #endif 773 } 774 775 #if defined(linux) || defined(_WIN32) 776 static struct plut_entry * 777 nm_alloc_plut(u_int nobj) 778 { 779 size_t n = sizeof(struct plut_entry) * nobj; 780 struct plut_entry *lut; 781 lut = vmalloc(n); 782 return lut; 783 } 784 785 static void 786 nm_free_plut(struct plut_entry * lut) 787 { 788 vfree(lut); 789 } 790 #endif /* linux or _WIN32 */ 791 792 793 /* 794 * First, find the allocator that contains the requested offset, 795 * then locate the cluster through a lookup table. 796 */ 797 static vm_paddr_t 798 netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset) 799 { 800 int i; 801 vm_ooffset_t o = offset; 802 vm_paddr_t pa; 803 struct netmap_obj_pool *p; 804 805 p = nmd->pools; 806 807 for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) { 808 if (offset >= p[i].memtotal) 809 continue; 810 // now lookup the cluster's address 811 #ifndef _WIN32 812 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) + 813 offset % p[i]._objsize; 814 #else 815 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr); 816 pa.QuadPart += offset % p[i]._objsize; 817 #endif 818 return pa; 819 } 820 /* this is only in case of errors */ 821 nm_prerr("invalid ofs 0x%x out of 0x%zx 0x%zx 0x%zx", (u_int)o, 822 p[NETMAP_IF_POOL].memtotal, 823 p[NETMAP_IF_POOL].memtotal 824 + p[NETMAP_RING_POOL].memtotal, 825 p[NETMAP_IF_POOL].memtotal 826 + p[NETMAP_RING_POOL].memtotal 827 + p[NETMAP_BUF_POOL].memtotal); 828 #ifndef _WIN32 829 return 0; /* bad address */ 830 #else 831 vm_paddr_t res; 832 res.QuadPart = 0; 833 return res; 834 #endif 835 } 836 837 #ifdef _WIN32 838 839 /* 840 * win32_build_virtual_memory_for_userspace 841 * 842 * This function get all the object making part of the pools and maps 843 * a contiguous virtual memory space for the userspace 844 * It works this way 845 * 1 - allocate a Memory Descriptor List wide as the sum 846 * of the memory needed for the pools 847 * 2 - cycle all the objects in every pool and for every object do 848 * 849 * 2a - cycle all the objects in every pool, get the list 850 * of the physical address descriptors 851 * 2b - calculate the offset in the array of pages desciptor in the 852 * main MDL 853 * 2c - copy the descriptors of the object in the main MDL 854 * 855 * 3 - return the resulting MDL that needs to be mapped in userland 856 * 857 * In this way we will have an MDL that describes all the memory for the 858 * objects in a single object 859 */ 860 861 PMDL 862 win32_build_user_vm_map(struct netmap_mem_d* nmd) 863 { 864 u_int memflags, ofs = 0; 865 PMDL mainMdl, tempMdl; 866 uint64_t memsize; 867 int i, j; 868 869 if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) { 870 nm_prerr("memory not finalised yet"); 871 return NULL; 872 } 873 874 mainMdl = IoAllocateMdl(NULL, memsize, FALSE, FALSE, NULL); 875 if (mainMdl == NULL) { 876 nm_prerr("failed to allocate mdl"); 877 return NULL; 878 } 879 880 NMA_LOCK(nmd); 881 for (i = 0; i < NETMAP_POOLS_NR; i++) { 882 struct netmap_obj_pool *p = &nmd->pools[i]; 883 int clsz = p->_clustsize; 884 int clobjs = p->_clustentries; /* objects per cluster */ 885 int mdl_len = sizeof(PFN_NUMBER) * BYTES_TO_PAGES(clsz); 886 PPFN_NUMBER pSrc, pDst; 887 888 /* each pool has a different cluster size so we need to reallocate */ 889 tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL); 890 if (tempMdl == NULL) { 891 NMA_UNLOCK(nmd); 892 nm_prerr("fail to allocate tempMdl"); 893 IoFreeMdl(mainMdl); 894 return NULL; 895 } 896 pSrc = MmGetMdlPfnArray(tempMdl); 897 /* create one entry per cluster, the lut[] has one entry per object */ 898 for (j = 0; j < p->numclusters; j++, ofs += clsz) { 899 pDst = &MmGetMdlPfnArray(mainMdl)[BYTES_TO_PAGES(ofs)]; 900 MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz); 901 MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */ 902 RtlCopyMemory(pDst, pSrc, mdl_len); /* copy the page descriptors */ 903 mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */ 904 } 905 IoFreeMdl(tempMdl); 906 } 907 NMA_UNLOCK(nmd); 908 return mainMdl; 909 } 910 911 #endif /* _WIN32 */ 912 913 /* 914 * helper function for OS-specific mmap routines (currently only windows). 915 * Given an nmd and a pool index, returns the cluster size and number of clusters. 916 * Returns 0 if memory is finalised and the pool is valid, otherwise 1. 917 * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change. 918 */ 919 920 int 921 netmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters) 922 { 923 if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR) 924 return 1; /* invalid arguments */ 925 // NMA_LOCK_ASSERT(nmd); 926 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { 927 *clustsize = *numclusters = 0; 928 return 1; /* not ready yet */ 929 } 930 *clustsize = nmd->pools[pool]._clustsize; 931 *numclusters = nmd->pools[pool].numclusters; 932 return 0; /* success */ 933 } 934 935 static int 936 netmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size, 937 u_int *memflags, nm_memid_t *id) 938 { 939 int error = 0; 940 error = netmap_mem_config(nmd); 941 if (error) 942 goto out; 943 if (size) { 944 if (nmd->flags & NETMAP_MEM_FINALIZED) { 945 *size = nmd->nm_totalsize; 946 } else { 947 int i; 948 *size = 0; 949 for (i = 0; i < NETMAP_POOLS_NR; i++) { 950 struct netmap_obj_pool *p = nmd->pools + i; 951 *size += ((size_t)p->_numclusters * (size_t)p->_clustsize); 952 } 953 } 954 } 955 if (memflags) 956 *memflags = nmd->flags; 957 if (id) 958 *id = nmd->nm_id; 959 out: 960 return error; 961 } 962 963 /* 964 * we store objects by kernel address, need to find the offset 965 * within the pool to export the value to userspace. 966 * Algorithm: scan until we find the cluster, then add the 967 * actual offset in the cluster 968 */ 969 static ssize_t 970 netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) 971 { 972 int i, k = p->_clustentries, n = p->objtotal; 973 ssize_t ofs = 0; 974 975 for (i = 0; i < n; i += k, ofs += p->_clustsize) { 976 const char *base = p->lut[i].vaddr; 977 ssize_t relofs = (const char *) vaddr - base; 978 979 if (relofs < 0 || relofs >= p->_clustsize) 980 continue; 981 982 ofs = ofs + relofs; 983 nm_prdis("%s: return offset %d (cluster %d) for pointer %p", 984 p->name, ofs, i, vaddr); 985 return ofs; 986 } 987 nm_prerr("address %p is not contained inside any cluster (%s)", 988 vaddr, p->name); 989 return 0; /* An error occurred */ 990 } 991 992 /* Helper functions which convert virtual addresses to offsets */ 993 #define netmap_if_offset(n, v) \ 994 netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v)) 995 996 #define netmap_ring_offset(n, v) \ 997 ((n)->pools[NETMAP_IF_POOL].memtotal + \ 998 netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v))) 999 1000 static ssize_t 1001 netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr) 1002 { 1003 return netmap_if_offset(nmd, addr); 1004 } 1005 1006 /* 1007 * report the index, and use start position as a hint, 1008 * otherwise buffer allocation becomes terribly expensive. 1009 */ 1010 static void * 1011 netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index) 1012 { 1013 uint32_t i = 0; /* index in the bitmap */ 1014 uint32_t mask, j = 0; /* slot counter */ 1015 void *vaddr = NULL; 1016 1017 if (len > p->_objsize) { 1018 nm_prerr("%s request size %d too large", p->name, len); 1019 return NULL; 1020 } 1021 1022 if (p->objfree == 0) { 1023 nm_prerr("no more %s objects", p->name); 1024 return NULL; 1025 } 1026 if (start) 1027 i = *start; 1028 1029 /* termination is guaranteed by p->free, but better check bounds on i */ 1030 while (vaddr == NULL && i < p->bitmap_slots) { 1031 uint32_t cur = p->bitmap[i]; 1032 if (cur == 0) { /* bitmask is fully used */ 1033 i++; 1034 continue; 1035 } 1036 /* locate a slot */ 1037 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) 1038 ; 1039 1040 p->bitmap[i] &= ~mask; /* mark object as in use */ 1041 p->objfree--; 1042 1043 vaddr = p->lut[i * 32 + j].vaddr; 1044 if (index) 1045 *index = i * 32 + j; 1046 } 1047 nm_prdis("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr); 1048 1049 if (start) 1050 *start = i; 1051 return vaddr; 1052 } 1053 1054 1055 /* 1056 * free by index, not by address. 1057 * XXX should we also cleanup the content ? 1058 */ 1059 static int 1060 netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) 1061 { 1062 uint32_t *ptr, mask; 1063 1064 if (j >= p->objtotal) { 1065 nm_prerr("invalid index %u, max %u", j, p->objtotal); 1066 return 1; 1067 } 1068 ptr = &p->bitmap[j / 32]; 1069 mask = (1 << (j % 32)); 1070 if (*ptr & mask) { 1071 nm_prerr("ouch, double free on buffer %d", j); 1072 return 1; 1073 } else { 1074 *ptr |= mask; 1075 p->objfree++; 1076 return 0; 1077 } 1078 } 1079 1080 /* 1081 * free by address. This is slow but is only used for a few 1082 * objects (rings, nifp) 1083 */ 1084 static void 1085 netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) 1086 { 1087 u_int i, j, n = p->numclusters; 1088 1089 for (i = 0, j = 0; i < n; i++, j += p->_clustentries) { 1090 void *base = p->lut[i * p->_clustentries].vaddr; 1091 ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; 1092 1093 /* Given address, is out of the scope of the current cluster.*/ 1094 if (base == NULL || vaddr < base || relofs >= p->_clustsize) 1095 continue; 1096 1097 j = j + relofs / p->_objsize; 1098 /* KASSERT(j != 0, ("Cannot free object 0")); */ 1099 netmap_obj_free(p, j); 1100 return; 1101 } 1102 nm_prerr("address %p is not contained inside any cluster (%s)", 1103 vaddr, p->name); 1104 } 1105 1106 unsigned 1107 netmap_mem_bufsize(struct netmap_mem_d *nmd) 1108 { 1109 return nmd->pools[NETMAP_BUF_POOL]._objsize; 1110 } 1111 1112 #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL) 1113 #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v)) 1114 #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL) 1115 #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v)) 1116 #define netmap_buf_malloc(n, _pos, _index) \ 1117 netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index) 1118 1119 1120 #if 0 /* currently unused */ 1121 /* Return the index associated to the given packet buffer */ 1122 #define netmap_buf_index(n, v) \ 1123 (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n)) 1124 #endif 1125 1126 /* 1127 * allocate extra buffers in a linked list. 1128 * returns the actual number. 1129 */ 1130 uint32_t 1131 netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n) 1132 { 1133 struct netmap_mem_d *nmd = na->nm_mem; 1134 uint32_t i, pos = 0; /* opaque, scan position in the bitmap */ 1135 1136 NMA_LOCK(nmd); 1137 1138 *head = 0; /* default, 'null' index ie empty list */ 1139 for (i = 0 ; i < n; i++) { 1140 uint32_t cur = *head; /* save current head */ 1141 uint32_t *p = netmap_buf_malloc(nmd, &pos, head); 1142 if (p == NULL) { 1143 nm_prerr("no more buffers after %d of %d", i, n); 1144 *head = cur; /* restore */ 1145 break; 1146 } 1147 nm_prdis(5, "allocate buffer %d -> %d", *head, cur); 1148 *p = cur; /* link to previous head */ 1149 } 1150 1151 NMA_UNLOCK(nmd); 1152 1153 return i; 1154 } 1155 1156 static void 1157 netmap_extra_free(struct netmap_adapter *na, uint32_t head) 1158 { 1159 struct lut_entry *lut = na->na_lut.lut; 1160 struct netmap_mem_d *nmd = na->nm_mem; 1161 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1162 uint32_t i, cur, *buf; 1163 1164 nm_prdis("freeing the extra list"); 1165 for (i = 0; head >=2 && head < p->objtotal; i++) { 1166 cur = head; 1167 buf = lut[head].vaddr; 1168 head = *buf; 1169 *buf = 0; 1170 if (netmap_obj_free(p, cur)) 1171 break; 1172 } 1173 if (head != 0) 1174 nm_prerr("breaking with head %d", head); 1175 if (netmap_debug & NM_DEBUG_MEM) 1176 nm_prinf("freed %d buffers", i); 1177 } 1178 1179 1180 /* Return nonzero on error */ 1181 static int 1182 netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) 1183 { 1184 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1185 u_int i = 0; /* slot counter */ 1186 uint32_t pos = 0; /* slot in p->bitmap */ 1187 uint32_t index = 0; /* buffer index */ 1188 1189 for (i = 0; i < n; i++) { 1190 void *vaddr = netmap_buf_malloc(nmd, &pos, &index); 1191 if (vaddr == NULL) { 1192 nm_prerr("no more buffers after %d of %d", i, n); 1193 goto cleanup; 1194 } 1195 slot[i].buf_idx = index; 1196 slot[i].len = p->_objsize; 1197 slot[i].flags = 0; 1198 slot[i].ptr = 0; 1199 } 1200 1201 nm_prdis("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos); 1202 return (0); 1203 1204 cleanup: 1205 while (i > 0) { 1206 i--; 1207 netmap_obj_free(p, slot[i].buf_idx); 1208 } 1209 bzero(slot, n * sizeof(slot[0])); 1210 return (ENOMEM); 1211 } 1212 1213 static void 1214 netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index) 1215 { 1216 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1217 u_int i; 1218 1219 for (i = 0; i < n; i++) { 1220 slot[i].buf_idx = index; 1221 slot[i].len = p->_objsize; 1222 slot[i].flags = 0; 1223 } 1224 } 1225 1226 1227 static void 1228 netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i) 1229 { 1230 struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; 1231 1232 if (i < 2 || i >= p->objtotal) { 1233 nm_prerr("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); 1234 return; 1235 } 1236 netmap_obj_free(p, i); 1237 } 1238 1239 1240 static void 1241 netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) 1242 { 1243 u_int i; 1244 1245 for (i = 0; i < n; i++) { 1246 if (slot[i].buf_idx > 1) 1247 netmap_free_buf(nmd, slot[i].buf_idx); 1248 } 1249 nm_prdis("%s: released some buffers, available: %u", 1250 p->name, p->objfree); 1251 } 1252 1253 static void 1254 netmap_reset_obj_allocator(struct netmap_obj_pool *p) 1255 { 1256 1257 if (p == NULL) 1258 return; 1259 if (p->bitmap) 1260 nm_os_free(p->bitmap); 1261 p->bitmap = NULL; 1262 if (p->invalid_bitmap) 1263 nm_os_free(p->invalid_bitmap); 1264 p->invalid_bitmap = NULL; 1265 if (!p->alloc_done) { 1266 /* allocation was done by somebody else. 1267 * Let them clean up after themselves. 1268 */ 1269 return; 1270 } 1271 if (p->lut) { 1272 u_int i; 1273 1274 /* 1275 * Free each cluster allocated in 1276 * netmap_finalize_obj_allocator(). The cluster start 1277 * addresses are stored at multiples of p->_clusterentries 1278 * in the lut. 1279 */ 1280 for (i = 0; i < p->objtotal; i += p->_clustentries) { 1281 contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP); 1282 } 1283 nm_free_lut(p->lut, p->objtotal); 1284 } 1285 p->lut = NULL; 1286 p->objtotal = 0; 1287 p->memtotal = 0; 1288 p->numclusters = 0; 1289 p->objfree = 0; 1290 p->alloc_done = 0; 1291 } 1292 1293 /* 1294 * Free all resources related to an allocator. 1295 */ 1296 static void 1297 netmap_destroy_obj_allocator(struct netmap_obj_pool *p) 1298 { 1299 if (p == NULL) 1300 return; 1301 netmap_reset_obj_allocator(p); 1302 } 1303 1304 /* 1305 * We receive a request for objtotal objects, of size objsize each. 1306 * Internally we may round up both numbers, as we allocate objects 1307 * in small clusters multiple of the page size. 1308 * We need to keep track of objtotal and clustentries, 1309 * as they are needed when freeing memory. 1310 * 1311 * XXX note -- userspace needs the buffers to be contiguous, 1312 * so we cannot afford gaps at the end of a cluster. 1313 */ 1314 1315 1316 /* call with NMA_LOCK held */ 1317 static int 1318 netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) 1319 { 1320 int i; 1321 u_int clustsize; /* the cluster size, multiple of page size */ 1322 u_int clustentries; /* how many objects per entry */ 1323 1324 /* we store the current request, so we can 1325 * detect configuration changes later */ 1326 p->r_objtotal = objtotal; 1327 p->r_objsize = objsize; 1328 1329 #define MAX_CLUSTSIZE (1<<22) // 4 MB 1330 #define LINE_ROUND NM_CACHE_ALIGN // 64 1331 if (objsize >= MAX_CLUSTSIZE) { 1332 /* we could do it but there is no point */ 1333 nm_prerr("unsupported allocation for %d bytes", objsize); 1334 return EINVAL; 1335 } 1336 /* make sure objsize is a multiple of LINE_ROUND */ 1337 i = (objsize & (LINE_ROUND - 1)); 1338 if (i) { 1339 nm_prinf("aligning object by %d bytes", LINE_ROUND - i); 1340 objsize += LINE_ROUND - i; 1341 } 1342 if (objsize < p->objminsize || objsize > p->objmaxsize) { 1343 nm_prerr("requested objsize %d out of range [%d, %d]", 1344 objsize, p->objminsize, p->objmaxsize); 1345 return EINVAL; 1346 } 1347 if (objtotal < p->nummin || objtotal > p->nummax) { 1348 nm_prerr("requested objtotal %d out of range [%d, %d]", 1349 objtotal, p->nummin, p->nummax); 1350 return EINVAL; 1351 } 1352 /* 1353 * Compute number of objects using a brute-force approach: 1354 * given a max cluster size, 1355 * we try to fill it with objects keeping track of the 1356 * wasted space to the next page boundary. 1357 */ 1358 for (clustentries = 0, i = 1;; i++) { 1359 u_int delta, used = i * objsize; 1360 if (used > MAX_CLUSTSIZE) 1361 break; 1362 delta = used % PAGE_SIZE; 1363 if (delta == 0) { // exact solution 1364 clustentries = i; 1365 break; 1366 } 1367 } 1368 /* exact solution not found */ 1369 if (clustentries == 0) { 1370 nm_prerr("unsupported allocation for %d bytes", objsize); 1371 return EINVAL; 1372 } 1373 /* compute clustsize */ 1374 clustsize = clustentries * objsize; 1375 if (netmap_debug & NM_DEBUG_MEM) 1376 nm_prinf("objsize %d clustsize %d objects %d", 1377 objsize, clustsize, clustentries); 1378 1379 /* 1380 * The number of clusters is n = ceil(objtotal/clustentries) 1381 * objtotal' = n * clustentries 1382 */ 1383 p->_clustentries = clustentries; 1384 p->_clustsize = clustsize; 1385 p->_numclusters = (objtotal + clustentries - 1) / clustentries; 1386 1387 /* actual values (may be larger than requested) */ 1388 p->_objsize = objsize; 1389 p->_objtotal = p->_numclusters * clustentries; 1390 1391 return 0; 1392 } 1393 1394 /* call with NMA_LOCK held */ 1395 static int 1396 netmap_finalize_obj_allocator(struct netmap_obj_pool *p) 1397 { 1398 int i; /* must be signed */ 1399 size_t n; 1400 1401 if (p->lut) { 1402 /* if the lut is already there we assume that also all the 1403 * clusters have already been allocated, possibily by somebody 1404 * else (e.g., extmem). In the latter case, the alloc_done flag 1405 * will remain at zero, so that we will not attempt to 1406 * deallocate the clusters by ourselves in 1407 * netmap_reset_obj_allocator. 1408 */ 1409 return 0; 1410 } 1411 1412 /* optimistically assume we have enough memory */ 1413 p->numclusters = p->_numclusters; 1414 p->objtotal = p->_objtotal; 1415 p->alloc_done = 1; 1416 1417 p->lut = nm_alloc_lut(p->objtotal); 1418 if (p->lut == NULL) { 1419 nm_prerr("Unable to create lookup table for '%s'", p->name); 1420 goto clean; 1421 } 1422 1423 /* 1424 * Allocate clusters, init pointers 1425 */ 1426 1427 n = p->_clustsize; 1428 for (i = 0; i < (int)p->objtotal;) { 1429 int lim = i + p->_clustentries; 1430 char *clust; 1431 1432 /* 1433 * XXX Note, we only need contigmalloc() for buffers attached 1434 * to native interfaces. In all other cases (nifp, netmap rings 1435 * and even buffers for VALE ports or emulated interfaces) we 1436 * can live with standard malloc, because the hardware will not 1437 * access the pages directly. 1438 */ 1439 clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO, 1440 (size_t)0, -1UL, PAGE_SIZE, 0); 1441 if (clust == NULL) { 1442 /* 1443 * If we get here, there is a severe memory shortage, 1444 * so halve the allocated memory to reclaim some. 1445 */ 1446 nm_prerr("Unable to create cluster at %d for '%s' allocator", 1447 i, p->name); 1448 if (i < 2) /* nothing to halve */ 1449 goto out; 1450 lim = i / 2; 1451 for (i--; i >= lim; i--) { 1452 if (i % p->_clustentries == 0 && p->lut[i].vaddr) 1453 contigfree(p->lut[i].vaddr, 1454 n, M_NETMAP); 1455 p->lut[i].vaddr = NULL; 1456 } 1457 out: 1458 p->objtotal = i; 1459 /* we may have stopped in the middle of a cluster */ 1460 p->numclusters = (i + p->_clustentries - 1) / p->_clustentries; 1461 break; 1462 } 1463 /* 1464 * Set lut state for all buffers in the current cluster. 1465 * 1466 * [i, lim) is the set of buffer indexes that cover the 1467 * current cluster. 1468 * 1469 * 'clust' is really the address of the current buffer in 1470 * the current cluster as we index through it with a stride 1471 * of p->_objsize. 1472 */ 1473 for (; i < lim; i++, clust += p->_objsize) { 1474 p->lut[i].vaddr = clust; 1475 #if !defined(linux) && !defined(_WIN32) 1476 p->lut[i].paddr = vtophys(clust); 1477 #endif 1478 } 1479 } 1480 p->memtotal = (size_t)p->numclusters * (size_t)p->_clustsize; 1481 if (netmap_verbose) 1482 nm_prinf("Pre-allocated %d clusters (%d/%zuKB) for '%s'", 1483 p->numclusters, p->_clustsize >> 10, 1484 p->memtotal >> 10, p->name); 1485 1486 return 0; 1487 1488 clean: 1489 netmap_reset_obj_allocator(p); 1490 return ENOMEM; 1491 } 1492 1493 /* call with lock held */ 1494 static int 1495 netmap_mem_params_changed(struct netmap_obj_params* p) 1496 { 1497 int i, rv = 0; 1498 1499 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1500 if (p[i].last_size != p[i].size || p[i].last_num != p[i].num) { 1501 p[i].last_size = p[i].size; 1502 p[i].last_num = p[i].num; 1503 rv = 1; 1504 } 1505 } 1506 return rv; 1507 } 1508 1509 static void 1510 netmap_mem_reset_all(struct netmap_mem_d *nmd) 1511 { 1512 int i; 1513 1514 if (netmap_debug & NM_DEBUG_MEM) 1515 nm_prinf("resetting %p", nmd); 1516 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1517 netmap_reset_obj_allocator(&nmd->pools[i]); 1518 } 1519 nmd->flags &= ~NETMAP_MEM_FINALIZED; 1520 } 1521 1522 static int 1523 netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na) 1524 { 1525 int i, lim = p->objtotal; 1526 struct netmap_lut *lut; 1527 1528 if (na == NULL || na->pdev == NULL) 1529 return 0; 1530 1531 lut = &na->na_lut; 1532 #if defined(__FreeBSD__) 1533 /* On FreeBSD mapping and unmapping is performed by the txsync 1534 * and rxsync routine, packet by packet. */ 1535 (void)i; 1536 (void)lim; 1537 (void)lut; 1538 #elif defined(_WIN32) 1539 (void)i; 1540 (void)lim; 1541 (void)lut; 1542 nm_prerr("unsupported on Windows"); 1543 #else /* linux */ 1544 nm_prdis("unmapping and freeing plut for %s", na->name); 1545 if (lut->plut == NULL) 1546 return 0; 1547 for (i = 0; i < lim; i += p->_clustentries) { 1548 if (lut->plut[i].paddr) 1549 netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize); 1550 } 1551 nm_free_plut(lut->plut); 1552 lut->plut = NULL; 1553 #endif /* linux */ 1554 1555 return 0; 1556 } 1557 1558 static int 1559 netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na) 1560 { 1561 int error = 0; 1562 int i, lim = p->objtotal; 1563 struct netmap_lut *lut = &na->na_lut; 1564 1565 if (na->pdev == NULL) 1566 return 0; 1567 1568 #if defined(__FreeBSD__) 1569 /* On FreeBSD mapping and unmapping is performed by the txsync 1570 * and rxsync routine, packet by packet. */ 1571 (void)i; 1572 (void)lim; 1573 (void)lut; 1574 #elif defined(_WIN32) 1575 (void)i; 1576 (void)lim; 1577 (void)lut; 1578 nm_prerr("unsupported on Windows"); 1579 #else /* linux */ 1580 1581 if (lut->plut != NULL) { 1582 nm_prdis("plut already allocated for %s", na->name); 1583 return 0; 1584 } 1585 1586 nm_prdis("allocating physical lut for %s", na->name); 1587 lut->plut = nm_alloc_plut(lim); 1588 if (lut->plut == NULL) { 1589 nm_prerr("Failed to allocate physical lut for %s", na->name); 1590 return ENOMEM; 1591 } 1592 1593 for (i = 0; i < lim; i += p->_clustentries) { 1594 lut->plut[i].paddr = 0; 1595 } 1596 1597 for (i = 0; i < lim; i += p->_clustentries) { 1598 int j; 1599 1600 if (p->lut[i].vaddr == NULL) 1601 continue; 1602 1603 error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, 1604 p->lut[i].vaddr, p->_clustsize); 1605 if (error) { 1606 nm_prerr("Failed to map cluster #%d from the %s pool", i, p->name); 1607 break; 1608 } 1609 1610 for (j = 1; j < p->_clustentries; j++) { 1611 lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize; 1612 } 1613 } 1614 1615 if (error) 1616 netmap_mem_unmap(p, na); 1617 1618 #endif /* linux */ 1619 1620 return error; 1621 } 1622 1623 static int 1624 netmap_mem_finalize_all(struct netmap_mem_d *nmd) 1625 { 1626 int i; 1627 if (nmd->flags & NETMAP_MEM_FINALIZED) 1628 return 0; 1629 nmd->lasterr = 0; 1630 nmd->nm_totalsize = 0; 1631 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1632 nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]); 1633 if (nmd->lasterr) 1634 goto error; 1635 nmd->nm_totalsize += nmd->pools[i].memtotal; 1636 } 1637 nmd->lasterr = netmap_mem_init_bitmaps(nmd); 1638 if (nmd->lasterr) 1639 goto error; 1640 1641 nmd->flags |= NETMAP_MEM_FINALIZED; 1642 1643 if (netmap_verbose) 1644 nm_prinf("interfaces %zd KB, rings %zd KB, buffers %zd MB", 1645 nmd->pools[NETMAP_IF_POOL].memtotal >> 10, 1646 nmd->pools[NETMAP_RING_POOL].memtotal >> 10, 1647 nmd->pools[NETMAP_BUF_POOL].memtotal >> 20); 1648 1649 if (netmap_verbose) 1650 nm_prinf("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree); 1651 1652 1653 return 0; 1654 error: 1655 netmap_mem_reset_all(nmd); 1656 return nmd->lasterr; 1657 } 1658 1659 /* 1660 * allocator for private memory 1661 */ 1662 static void * 1663 _netmap_mem_private_new(size_t size, struct netmap_obj_params *p, 1664 struct netmap_mem_ops *ops, int *perr) 1665 { 1666 struct netmap_mem_d *d = NULL; 1667 int i, err = 0; 1668 1669 d = nm_os_malloc(size); 1670 if (d == NULL) { 1671 err = ENOMEM; 1672 goto error; 1673 } 1674 1675 *d = nm_blueprint; 1676 d->ops = ops; 1677 1678 err = nm_mem_assign_id(d); 1679 if (err) 1680 goto error_free; 1681 snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id); 1682 1683 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1684 snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ, 1685 nm_blueprint.pools[i].name, 1686 d->name); 1687 d->params[i].num = p[i].num; 1688 d->params[i].size = p[i].size; 1689 } 1690 1691 NMA_LOCK_INIT(d); 1692 1693 err = netmap_mem_config(d); 1694 if (err) 1695 goto error_rel_id; 1696 1697 d->flags &= ~NETMAP_MEM_FINALIZED; 1698 1699 return d; 1700 1701 error_rel_id: 1702 NMA_LOCK_DESTROY(d); 1703 nm_mem_release_id(d); 1704 error_free: 1705 nm_os_free(d); 1706 error: 1707 if (perr) 1708 *perr = err; 1709 return NULL; 1710 } 1711 1712 struct netmap_mem_d * 1713 netmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd, 1714 u_int extra_bufs, u_int npipes, int *perr) 1715 { 1716 struct netmap_mem_d *d = NULL; 1717 struct netmap_obj_params p[NETMAP_POOLS_NR]; 1718 int i; 1719 u_int v, maxd; 1720 /* account for the fake host rings */ 1721 txr++; 1722 rxr++; 1723 1724 /* copy the min values */ 1725 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1726 p[i] = netmap_min_priv_params[i]; 1727 } 1728 1729 /* possibly increase them to fit user request */ 1730 v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr); 1731 if (p[NETMAP_IF_POOL].size < v) 1732 p[NETMAP_IF_POOL].size = v; 1733 v = 2 + 4 * npipes; 1734 if (p[NETMAP_IF_POOL].num < v) 1735 p[NETMAP_IF_POOL].num = v; 1736 maxd = (txd > rxd) ? txd : rxd; 1737 v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd; 1738 if (p[NETMAP_RING_POOL].size < v) 1739 p[NETMAP_RING_POOL].size = v; 1740 /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake) 1741 * and two rx rings (again, 1 normal and 1 fake host) 1742 */ 1743 v = txr + rxr + 8 * npipes; 1744 if (p[NETMAP_RING_POOL].num < v) 1745 p[NETMAP_RING_POOL].num = v; 1746 /* for each pipe we only need the buffers for the 4 "real" rings. 1747 * On the other end, the pipe ring dimension may be different from 1748 * the parent port ring dimension. As a compromise, we allocate twice the 1749 * space actually needed if the pipe rings were the same size as the parent rings 1750 */ 1751 v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs; 1752 /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */ 1753 if (p[NETMAP_BUF_POOL].num < v) 1754 p[NETMAP_BUF_POOL].num = v; 1755 1756 if (netmap_verbose) 1757 nm_prinf("req if %d*%d ring %d*%d buf %d*%d", 1758 p[NETMAP_IF_POOL].num, 1759 p[NETMAP_IF_POOL].size, 1760 p[NETMAP_RING_POOL].num, 1761 p[NETMAP_RING_POOL].size, 1762 p[NETMAP_BUF_POOL].num, 1763 p[NETMAP_BUF_POOL].size); 1764 1765 d = _netmap_mem_private_new(sizeof(*d), p, &netmap_mem_global_ops, perr); 1766 1767 return d; 1768 } 1769 1770 1771 /* call with lock held */ 1772 static int 1773 netmap_mem2_config(struct netmap_mem_d *nmd) 1774 { 1775 int i; 1776 1777 if (!netmap_mem_params_changed(nmd->params)) 1778 goto out; 1779 1780 nm_prdis("reconfiguring"); 1781 1782 if (nmd->flags & NETMAP_MEM_FINALIZED) { 1783 /* reset previous allocation */ 1784 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1785 netmap_reset_obj_allocator(&nmd->pools[i]); 1786 } 1787 nmd->flags &= ~NETMAP_MEM_FINALIZED; 1788 } 1789 1790 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1791 nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i], 1792 nmd->params[i].num, nmd->params[i].size); 1793 if (nmd->lasterr) 1794 goto out; 1795 } 1796 1797 out: 1798 1799 return nmd->lasterr; 1800 } 1801 1802 static int 1803 netmap_mem2_finalize(struct netmap_mem_d *nmd) 1804 { 1805 if (nmd->flags & NETMAP_MEM_FINALIZED) 1806 goto out; 1807 1808 if (netmap_mem_finalize_all(nmd)) 1809 goto out; 1810 1811 nmd->lasterr = 0; 1812 1813 out: 1814 return nmd->lasterr; 1815 } 1816 1817 static void 1818 netmap_mem2_delete(struct netmap_mem_d *nmd) 1819 { 1820 int i; 1821 1822 for (i = 0; i < NETMAP_POOLS_NR; i++) { 1823 netmap_destroy_obj_allocator(&nmd->pools[i]); 1824 } 1825 1826 NMA_LOCK_DESTROY(nmd); 1827 if (nmd != &nm_mem) 1828 nm_os_free(nmd); 1829 } 1830 1831 #ifdef WITH_EXTMEM 1832 /* doubly linekd list of all existing external allocators */ 1833 static struct netmap_mem_ext *netmap_mem_ext_list = NULL; 1834 NM_MTX_T nm_mem_ext_list_lock; 1835 #endif /* WITH_EXTMEM */ 1836 1837 int 1838 netmap_mem_init(void) 1839 { 1840 NM_MTX_INIT(nm_mem_list_lock); 1841 NMA_LOCK_INIT(&nm_mem); 1842 netmap_mem_get(&nm_mem); 1843 #ifdef WITH_EXTMEM 1844 NM_MTX_INIT(nm_mem_ext_list_lock); 1845 #endif /* WITH_EXTMEM */ 1846 return (0); 1847 } 1848 1849 void 1850 netmap_mem_fini(void) 1851 { 1852 netmap_mem_put(&nm_mem); 1853 } 1854 1855 static void 1856 netmap_free_rings(struct netmap_adapter *na) 1857 { 1858 enum txrx t; 1859 1860 for_rx_tx(t) { 1861 u_int i; 1862 for (i = 0; i < netmap_all_rings(na, t); i++) { 1863 struct netmap_kring *kring = NMR(na, t)[i]; 1864 struct netmap_ring *ring = kring->ring; 1865 1866 if (ring == NULL || kring->users > 0 || (kring->nr_kflags & NKR_NEEDRING)) { 1867 if (netmap_debug & NM_DEBUG_MEM) 1868 nm_prinf("NOT deleting ring %s (ring %p, users %d neekring %d)", 1869 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING); 1870 continue; 1871 } 1872 if (netmap_debug & NM_DEBUG_MEM) 1873 nm_prinf("deleting ring %s", kring->name); 1874 if (!(kring->nr_kflags & NKR_FAKERING)) { 1875 nm_prdis("freeing bufs for %s", kring->name); 1876 netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots); 1877 } else { 1878 nm_prdis("NOT freeing bufs for %s", kring->name); 1879 } 1880 netmap_ring_free(na->nm_mem, ring); 1881 kring->ring = NULL; 1882 } 1883 } 1884 } 1885 1886 /* call with NMA_LOCK held * 1887 * 1888 * Allocate netmap rings and buffers for this card 1889 * The rings are contiguous, but have variable size. 1890 * The kring array must follow the layout described 1891 * in netmap_krings_create(). 1892 */ 1893 static int 1894 netmap_mem2_rings_create(struct netmap_adapter *na) 1895 { 1896 enum txrx t; 1897 1898 for_rx_tx(t) { 1899 u_int i; 1900 1901 for (i = 0; i < netmap_all_rings(na, t); i++) { 1902 struct netmap_kring *kring = NMR(na, t)[i]; 1903 struct netmap_ring *ring = kring->ring; 1904 u_int len, ndesc; 1905 1906 if (ring || (!kring->users && !(kring->nr_kflags & NKR_NEEDRING))) { 1907 /* uneeded, or already created by somebody else */ 1908 if (netmap_debug & NM_DEBUG_MEM) 1909 nm_prinf("NOT creating ring %s (ring %p, users %d neekring %d)", 1910 kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING); 1911 continue; 1912 } 1913 if (netmap_debug & NM_DEBUG_MEM) 1914 nm_prinf("creating %s", kring->name); 1915 ndesc = kring->nkr_num_slots; 1916 len = sizeof(struct netmap_ring) + 1917 ndesc * sizeof(struct netmap_slot); 1918 ring = netmap_ring_malloc(na->nm_mem, len); 1919 if (ring == NULL) { 1920 nm_prerr("Cannot allocate %s_ring", nm_txrx2str(t)); 1921 goto cleanup; 1922 } 1923 nm_prdis("txring at %p", ring); 1924 kring->ring = ring; 1925 *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc; 1926 *(int64_t *)(uintptr_t)&ring->buf_ofs = 1927 (na->nm_mem->pools[NETMAP_IF_POOL].memtotal + 1928 na->nm_mem->pools[NETMAP_RING_POOL].memtotal) - 1929 netmap_ring_offset(na->nm_mem, ring); 1930 1931 /* copy values from kring */ 1932 ring->head = kring->rhead; 1933 ring->cur = kring->rcur; 1934 ring->tail = kring->rtail; 1935 *(uint32_t *)(uintptr_t)&ring->nr_buf_size = 1936 netmap_mem_bufsize(na->nm_mem); 1937 nm_prdis("%s h %d c %d t %d", kring->name, 1938 ring->head, ring->cur, ring->tail); 1939 nm_prdis("initializing slots for %s_ring", nm_txrx2str(t)); 1940 if (!(kring->nr_kflags & NKR_FAKERING)) { 1941 /* this is a real ring */ 1942 if (netmap_debug & NM_DEBUG_MEM) 1943 nm_prinf("allocating buffers for %s", kring->name); 1944 if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) { 1945 nm_prerr("Cannot allocate buffers for %s_ring", nm_txrx2str(t)); 1946 goto cleanup; 1947 } 1948 } else { 1949 /* this is a fake ring, set all indices to 0 */ 1950 if (netmap_debug & NM_DEBUG_MEM) 1951 nm_prinf("NOT allocating buffers for %s", kring->name); 1952 netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0); 1953 } 1954 /* ring info */ 1955 *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id; 1956 *(uint16_t *)(uintptr_t)&ring->dir = kring->tx; 1957 } 1958 } 1959 1960 return 0; 1961 1962 cleanup: 1963 /* we cannot actually cleanup here, since we don't own kring->users 1964 * and kring->nr_klags & NKR_NEEDRING. The caller must decrement 1965 * the first or zero-out the second, then call netmap_free_rings() 1966 * to do the cleanup 1967 */ 1968 1969 return ENOMEM; 1970 } 1971 1972 static void 1973 netmap_mem2_rings_delete(struct netmap_adapter *na) 1974 { 1975 /* last instance, release bufs and rings */ 1976 netmap_free_rings(na); 1977 } 1978 1979 1980 /* call with NMA_LOCK held */ 1981 /* 1982 * Allocate the per-fd structure netmap_if. 1983 * 1984 * We assume that the configuration stored in na 1985 * (number of tx/rx rings and descs) does not change while 1986 * the interface is in netmap mode. 1987 */ 1988 static struct netmap_if * 1989 netmap_mem2_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) 1990 { 1991 struct netmap_if *nifp; 1992 ssize_t base; /* handy for relative offsets between rings and nifp */ 1993 u_int i, len, n[NR_TXRX], ntot; 1994 enum txrx t; 1995 1996 ntot = 0; 1997 for_rx_tx(t) { 1998 /* account for the (eventually fake) host rings */ 1999 n[t] = netmap_all_rings(na, t); 2000 ntot += n[t]; 2001 } 2002 /* 2003 * the descriptor is followed inline by an array of offsets 2004 * to the tx and rx rings in the shared memory region. 2005 */ 2006 2007 len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t)); 2008 nifp = netmap_if_malloc(na->nm_mem, len); 2009 if (nifp == NULL) { 2010 NMA_UNLOCK(na->nm_mem); 2011 return NULL; 2012 } 2013 2014 /* initialize base fields -- override const */ 2015 *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; 2016 *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; 2017 *(u_int *)(uintptr_t)&nifp->ni_host_tx_rings = 2018 (na->num_host_tx_rings ? na->num_host_tx_rings : 1); 2019 *(u_int *)(uintptr_t)&nifp->ni_host_rx_rings = 2020 (na->num_host_rx_rings ? na->num_host_rx_rings : 1); 2021 strlcpy(nifp->ni_name, na->name, sizeof(nifp->ni_name)); 2022 2023 /* 2024 * fill the slots for the rx and tx rings. They contain the offset 2025 * between the ring and nifp, so the information is usable in 2026 * userspace to reach the ring from the nifp. 2027 */ 2028 base = netmap_if_offset(na->nm_mem, nifp); 2029 for (i = 0; i < n[NR_TX]; i++) { 2030 /* XXX instead of ofs == 0 maybe use the offset of an error 2031 * ring, like we do for buffers? */ 2032 ssize_t ofs = 0; 2033 2034 if (na->tx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_TX] 2035 && i < priv->np_qlast[NR_TX]) { 2036 ofs = netmap_ring_offset(na->nm_mem, 2037 na->tx_rings[i]->ring) - base; 2038 } 2039 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs; 2040 } 2041 for (i = 0; i < n[NR_RX]; i++) { 2042 /* XXX instead of ofs == 0 maybe use the offset of an error 2043 * ring, like we do for buffers? */ 2044 ssize_t ofs = 0; 2045 2046 if (na->rx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_RX] 2047 && i < priv->np_qlast[NR_RX]) { 2048 ofs = netmap_ring_offset(na->nm_mem, 2049 na->rx_rings[i]->ring) - base; 2050 } 2051 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs; 2052 } 2053 2054 return (nifp); 2055 } 2056 2057 static void 2058 netmap_mem2_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) 2059 { 2060 if (nifp == NULL) 2061 /* nothing to do */ 2062 return; 2063 if (nifp->ni_bufs_head) 2064 netmap_extra_free(na, nifp->ni_bufs_head); 2065 netmap_if_free(na->nm_mem, nifp); 2066 } 2067 2068 static void 2069 netmap_mem2_deref(struct netmap_mem_d *nmd) 2070 { 2071 2072 if (netmap_debug & NM_DEBUG_MEM) 2073 nm_prinf("active = %d", nmd->active); 2074 2075 } 2076 2077 struct netmap_mem_ops netmap_mem_global_ops = { 2078 .nmd_get_lut = netmap_mem2_get_lut, 2079 .nmd_get_info = netmap_mem2_get_info, 2080 .nmd_ofstophys = netmap_mem2_ofstophys, 2081 .nmd_config = netmap_mem2_config, 2082 .nmd_finalize = netmap_mem2_finalize, 2083 .nmd_deref = netmap_mem2_deref, 2084 .nmd_delete = netmap_mem2_delete, 2085 .nmd_if_offset = netmap_mem2_if_offset, 2086 .nmd_if_new = netmap_mem2_if_new, 2087 .nmd_if_delete = netmap_mem2_if_delete, 2088 .nmd_rings_create = netmap_mem2_rings_create, 2089 .nmd_rings_delete = netmap_mem2_rings_delete 2090 }; 2091 2092 int 2093 netmap_mem_pools_info_get(struct nmreq_pools_info *req, 2094 struct netmap_mem_d *nmd) 2095 { 2096 int ret; 2097 2098 ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL, 2099 &req->nr_mem_id); 2100 if (ret) { 2101 return ret; 2102 } 2103 2104 NMA_LOCK(nmd); 2105 req->nr_if_pool_offset = 0; 2106 req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal; 2107 req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize; 2108 2109 req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal; 2110 req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal; 2111 req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize; 2112 2113 req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal + 2114 nmd->pools[NETMAP_RING_POOL].memtotal; 2115 req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; 2116 req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; 2117 NMA_UNLOCK(nmd); 2118 2119 return 0; 2120 } 2121 2122 #ifdef WITH_EXTMEM 2123 struct netmap_mem_ext { 2124 struct netmap_mem_d up; 2125 2126 struct nm_os_extmem *os; 2127 struct netmap_mem_ext *next, *prev; 2128 }; 2129 2130 /* call with nm_mem_list_lock held */ 2131 static void 2132 netmap_mem_ext_register(struct netmap_mem_ext *e) 2133 { 2134 NM_MTX_LOCK(nm_mem_ext_list_lock); 2135 if (netmap_mem_ext_list) 2136 netmap_mem_ext_list->prev = e; 2137 e->next = netmap_mem_ext_list; 2138 netmap_mem_ext_list = e; 2139 e->prev = NULL; 2140 NM_MTX_UNLOCK(nm_mem_ext_list_lock); 2141 } 2142 2143 /* call with nm_mem_list_lock held */ 2144 static void 2145 netmap_mem_ext_unregister(struct netmap_mem_ext *e) 2146 { 2147 if (e->prev) 2148 e->prev->next = e->next; 2149 else 2150 netmap_mem_ext_list = e->next; 2151 if (e->next) 2152 e->next->prev = e->prev; 2153 e->prev = e->next = NULL; 2154 } 2155 2156 static struct netmap_mem_ext * 2157 netmap_mem_ext_search(struct nm_os_extmem *os) 2158 { 2159 struct netmap_mem_ext *e; 2160 2161 NM_MTX_LOCK(nm_mem_ext_list_lock); 2162 for (e = netmap_mem_ext_list; e; e = e->next) { 2163 if (nm_os_extmem_isequal(e->os, os)) { 2164 netmap_mem_get(&e->up); 2165 break; 2166 } 2167 } 2168 NM_MTX_UNLOCK(nm_mem_ext_list_lock); 2169 return e; 2170 } 2171 2172 2173 static void 2174 netmap_mem_ext_delete(struct netmap_mem_d *d) 2175 { 2176 int i; 2177 struct netmap_mem_ext *e = 2178 (struct netmap_mem_ext *)d; 2179 2180 netmap_mem_ext_unregister(e); 2181 2182 for (i = 0; i < NETMAP_POOLS_NR; i++) { 2183 struct netmap_obj_pool *p = &d->pools[i]; 2184 2185 if (p->lut) { 2186 nm_free_lut(p->lut, p->objtotal); 2187 p->lut = NULL; 2188 } 2189 } 2190 if (e->os) 2191 nm_os_extmem_delete(e->os); 2192 netmap_mem2_delete(d); 2193 } 2194 2195 static int 2196 netmap_mem_ext_config(struct netmap_mem_d *nmd) 2197 { 2198 return 0; 2199 } 2200 2201 struct netmap_mem_ops netmap_mem_ext_ops = { 2202 .nmd_get_lut = netmap_mem2_get_lut, 2203 .nmd_get_info = netmap_mem2_get_info, 2204 .nmd_ofstophys = netmap_mem2_ofstophys, 2205 .nmd_config = netmap_mem_ext_config, 2206 .nmd_finalize = netmap_mem2_finalize, 2207 .nmd_deref = netmap_mem2_deref, 2208 .nmd_delete = netmap_mem_ext_delete, 2209 .nmd_if_offset = netmap_mem2_if_offset, 2210 .nmd_if_new = netmap_mem2_if_new, 2211 .nmd_if_delete = netmap_mem2_if_delete, 2212 .nmd_rings_create = netmap_mem2_rings_create, 2213 .nmd_rings_delete = netmap_mem2_rings_delete 2214 }; 2215 2216 struct netmap_mem_d * 2217 netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror) 2218 { 2219 int error = 0; 2220 int i, j; 2221 struct netmap_mem_ext *nme; 2222 char *clust; 2223 size_t off; 2224 struct nm_os_extmem *os = NULL; 2225 int nr_pages; 2226 2227 // XXX sanity checks 2228 if (pi->nr_if_pool_objtotal == 0) 2229 pi->nr_if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num; 2230 if (pi->nr_if_pool_objsize == 0) 2231 pi->nr_if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size; 2232 if (pi->nr_ring_pool_objtotal == 0) 2233 pi->nr_ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num; 2234 if (pi->nr_ring_pool_objsize == 0) 2235 pi->nr_ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size; 2236 if (pi->nr_buf_pool_objtotal == 0) 2237 pi->nr_buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num; 2238 if (pi->nr_buf_pool_objsize == 0) 2239 pi->nr_buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size; 2240 if (netmap_verbose & NM_DEBUG_MEM) 2241 nm_prinf("if %d %d ring %d %d buf %d %d", 2242 pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize, 2243 pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize, 2244 pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize); 2245 2246 os = nm_os_extmem_create(usrptr, pi, &error); 2247 if (os == NULL) { 2248 nm_prerr("os extmem creation failed"); 2249 goto out; 2250 } 2251 2252 nme = netmap_mem_ext_search(os); 2253 if (nme) { 2254 nm_os_extmem_delete(os); 2255 return &nme->up; 2256 } 2257 if (netmap_verbose & NM_DEBUG_MEM) 2258 nm_prinf("not found, creating new"); 2259 2260 nme = _netmap_mem_private_new(sizeof(*nme), 2261 (struct netmap_obj_params[]){ 2262 { pi->nr_if_pool_objsize, pi->nr_if_pool_objtotal }, 2263 { pi->nr_ring_pool_objsize, pi->nr_ring_pool_objtotal }, 2264 { pi->nr_buf_pool_objsize, pi->nr_buf_pool_objtotal }}, 2265 &netmap_mem_ext_ops, 2266 &error); 2267 if (nme == NULL) 2268 goto out_unmap; 2269 2270 nr_pages = nm_os_extmem_nr_pages(os); 2271 2272 /* from now on pages will be released by nme destructor; 2273 * we let res = 0 to prevent release in out_unmap below 2274 */ 2275 nme->os = os; 2276 os = NULL; /* pass ownership */ 2277 2278 clust = nm_os_extmem_nextpage(nme->os); 2279 off = 0; 2280 for (i = 0; i < NETMAP_POOLS_NR; i++) { 2281 struct netmap_obj_pool *p = &nme->up.pools[i]; 2282 struct netmap_obj_params *o = &nme->up.params[i]; 2283 2284 p->_objsize = o->size; 2285 p->_clustsize = o->size; 2286 p->_clustentries = 1; 2287 2288 p->lut = nm_alloc_lut(o->num); 2289 if (p->lut == NULL) { 2290 error = ENOMEM; 2291 goto out_delete; 2292 } 2293 2294 p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t); 2295 p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots); 2296 if (p->invalid_bitmap == NULL) { 2297 error = ENOMEM; 2298 goto out_delete; 2299 } 2300 2301 if (nr_pages == 0) { 2302 p->objtotal = 0; 2303 p->memtotal = 0; 2304 p->objfree = 0; 2305 continue; 2306 } 2307 2308 for (j = 0; j < o->num && nr_pages > 0; j++) { 2309 size_t noff; 2310 2311 p->lut[j].vaddr = clust + off; 2312 #if !defined(linux) && !defined(_WIN32) 2313 p->lut[j].paddr = vtophys(p->lut[j].vaddr); 2314 #endif 2315 nm_prdis("%s %d at %p", p->name, j, p->lut[j].vaddr); 2316 noff = off + p->_objsize; 2317 if (noff < PAGE_SIZE) { 2318 off = noff; 2319 continue; 2320 } 2321 nm_prdis("too big, recomputing offset..."); 2322 while (noff >= PAGE_SIZE) { 2323 char *old_clust = clust; 2324 noff -= PAGE_SIZE; 2325 clust = nm_os_extmem_nextpage(nme->os); 2326 nr_pages--; 2327 nm_prdis("noff %zu page %p nr_pages %d", noff, 2328 page_to_virt(*pages), nr_pages); 2329 if (noff > 0 && !nm_isset(p->invalid_bitmap, j) && 2330 (nr_pages == 0 || 2331 old_clust + PAGE_SIZE != clust)) 2332 { 2333 /* out of space or non contiguous, 2334 * drop this object 2335 * */ 2336 p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U); 2337 nm_prdis("non contiguous at off %zu, drop", noff); 2338 } 2339 if (nr_pages == 0) 2340 break; 2341 } 2342 off = noff; 2343 } 2344 p->objtotal = j; 2345 p->numclusters = p->objtotal; 2346 p->memtotal = j * (size_t)p->_objsize; 2347 nm_prdis("%d memtotal %zu", j, p->memtotal); 2348 } 2349 2350 netmap_mem_ext_register(nme); 2351 2352 return &nme->up; 2353 2354 out_delete: 2355 netmap_mem_put(&nme->up); 2356 out_unmap: 2357 if (os) 2358 nm_os_extmem_delete(os); 2359 out: 2360 if (perror) 2361 *perror = error; 2362 return NULL; 2363 2364 } 2365 #endif /* WITH_EXTMEM */ 2366 2367 2368 #ifdef WITH_PTNETMAP 2369 struct mem_pt_if { 2370 struct mem_pt_if *next; 2371 struct ifnet *ifp; 2372 unsigned int nifp_offset; 2373 }; 2374 2375 /* Netmap allocator for ptnetmap guests. */ 2376 struct netmap_mem_ptg { 2377 struct netmap_mem_d up; 2378 2379 vm_paddr_t nm_paddr; /* physical address in the guest */ 2380 void *nm_addr; /* virtual address in the guest */ 2381 struct netmap_lut buf_lut; /* lookup table for BUF pool in the guest */ 2382 nm_memid_t host_mem_id; /* allocator identifier in the host */ 2383 struct ptnetmap_memdev *ptn_dev;/* ptnetmap memdev */ 2384 struct mem_pt_if *pt_ifs; /* list of interfaces in passthrough */ 2385 }; 2386 2387 /* Link a passthrough interface to a passthrough netmap allocator. */ 2388 static int 2389 netmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, struct ifnet *ifp, 2390 unsigned int nifp_offset) 2391 { 2392 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2393 struct mem_pt_if *ptif = nm_os_malloc(sizeof(*ptif)); 2394 2395 if (!ptif) { 2396 return ENOMEM; 2397 } 2398 2399 NMA_LOCK(nmd); 2400 2401 ptif->ifp = ifp; 2402 ptif->nifp_offset = nifp_offset; 2403 2404 if (ptnmd->pt_ifs) { 2405 ptif->next = ptnmd->pt_ifs; 2406 } 2407 ptnmd->pt_ifs = ptif; 2408 2409 NMA_UNLOCK(nmd); 2410 2411 nm_prinf("ifp=%s,nifp_offset=%u", 2412 ptif->ifp->if_xname, ptif->nifp_offset); 2413 2414 return 0; 2415 } 2416 2417 /* Called with NMA_LOCK(nmd) held. */ 2418 static struct mem_pt_if * 2419 netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, struct ifnet *ifp) 2420 { 2421 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2422 struct mem_pt_if *curr; 2423 2424 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { 2425 if (curr->ifp == ifp) { 2426 return curr; 2427 } 2428 } 2429 2430 return NULL; 2431 } 2432 2433 /* Unlink a passthrough interface from a passthrough netmap allocator. */ 2434 int 2435 netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, struct ifnet *ifp) 2436 { 2437 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2438 struct mem_pt_if *prev = NULL; 2439 struct mem_pt_if *curr; 2440 int ret = -1; 2441 2442 NMA_LOCK(nmd); 2443 2444 for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { 2445 if (curr->ifp == ifp) { 2446 if (prev) { 2447 prev->next = curr->next; 2448 } else { 2449 ptnmd->pt_ifs = curr->next; 2450 } 2451 nm_prinf("removed (ifp=%s,nifp_offset=%u)", 2452 curr->ifp->if_xname, curr->nifp_offset); 2453 nm_os_free(curr); 2454 ret = 0; 2455 break; 2456 } 2457 prev = curr; 2458 } 2459 2460 NMA_UNLOCK(nmd); 2461 2462 return ret; 2463 } 2464 2465 static int 2466 netmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) 2467 { 2468 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2469 2470 if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { 2471 return EINVAL; 2472 } 2473 2474 *lut = ptnmd->buf_lut; 2475 return 0; 2476 } 2477 2478 static int 2479 netmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, uint64_t *size, 2480 u_int *memflags, uint16_t *id) 2481 { 2482 int error = 0; 2483 2484 error = nmd->ops->nmd_config(nmd); 2485 if (error) 2486 goto out; 2487 2488 if (size) 2489 *size = nmd->nm_totalsize; 2490 if (memflags) 2491 *memflags = nmd->flags; 2492 if (id) 2493 *id = nmd->nm_id; 2494 2495 out: 2496 2497 return error; 2498 } 2499 2500 static vm_paddr_t 2501 netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off) 2502 { 2503 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2504 vm_paddr_t paddr; 2505 /* if the offset is valid, just return csb->base_addr + off */ 2506 paddr = (vm_paddr_t)(ptnmd->nm_paddr + off); 2507 nm_prdis("off %lx padr %lx", off, (unsigned long)paddr); 2508 return paddr; 2509 } 2510 2511 static int 2512 netmap_mem_pt_guest_config(struct netmap_mem_d *nmd) 2513 { 2514 /* nothing to do, we are configured on creation 2515 * and configuration never changes thereafter 2516 */ 2517 return 0; 2518 } 2519 2520 static int 2521 netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd) 2522 { 2523 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2524 uint64_t mem_size; 2525 uint32_t bufsize; 2526 uint32_t nbuffers; 2527 uint32_t poolofs; 2528 vm_paddr_t paddr; 2529 char *vaddr; 2530 int i; 2531 int error = 0; 2532 2533 if (nmd->flags & NETMAP_MEM_FINALIZED) 2534 goto out; 2535 2536 if (ptnmd->ptn_dev == NULL) { 2537 nm_prerr("ptnetmap memdev not attached"); 2538 error = ENOMEM; 2539 goto out; 2540 } 2541 /* Map memory through ptnetmap-memdev BAR. */ 2542 error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr, 2543 &ptnmd->nm_addr, &mem_size); 2544 if (error) 2545 goto out; 2546 2547 /* Initialize the lut using the information contained in the 2548 * ptnetmap memory device. */ 2549 bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, 2550 PTNET_MDEV_IO_BUF_POOL_OBJSZ); 2551 nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, 2552 PTNET_MDEV_IO_BUF_POOL_OBJNUM); 2553 2554 /* allocate the lut */ 2555 if (ptnmd->buf_lut.lut == NULL) { 2556 nm_prinf("allocating lut"); 2557 ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers); 2558 if (ptnmd->buf_lut.lut == NULL) { 2559 nm_prerr("lut allocation failed"); 2560 return ENOMEM; 2561 } 2562 } 2563 2564 /* we have physically contiguous memory mapped through PCI BAR */ 2565 poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, 2566 PTNET_MDEV_IO_BUF_POOL_OFS); 2567 vaddr = (char *)(ptnmd->nm_addr) + poolofs; 2568 paddr = ptnmd->nm_paddr + poolofs; 2569 2570 for (i = 0; i < nbuffers; i++) { 2571 ptnmd->buf_lut.lut[i].vaddr = vaddr; 2572 vaddr += bufsize; 2573 paddr += bufsize; 2574 } 2575 2576 ptnmd->buf_lut.objtotal = nbuffers; 2577 ptnmd->buf_lut.objsize = bufsize; 2578 nmd->nm_totalsize = mem_size; 2579 2580 /* Initialize these fields as are needed by 2581 * netmap_mem_bufsize(). 2582 * XXX please improve this, why do we need this 2583 * replication? maybe we nmd->pools[] should no be 2584 * there for the guest allocator? */ 2585 nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize; 2586 nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers; 2587 2588 nmd->flags |= NETMAP_MEM_FINALIZED; 2589 out: 2590 return error; 2591 } 2592 2593 static void 2594 netmap_mem_pt_guest_deref(struct netmap_mem_d *nmd) 2595 { 2596 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2597 2598 if (nmd->active == 1 && 2599 (nmd->flags & NETMAP_MEM_FINALIZED)) { 2600 nmd->flags &= ~NETMAP_MEM_FINALIZED; 2601 /* unmap ptnetmap-memdev memory */ 2602 if (ptnmd->ptn_dev) { 2603 nm_os_pt_memdev_iounmap(ptnmd->ptn_dev); 2604 } 2605 ptnmd->nm_addr = NULL; 2606 ptnmd->nm_paddr = 0; 2607 } 2608 } 2609 2610 static ssize_t 2611 netmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr) 2612 { 2613 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; 2614 2615 return (const char *)(vaddr) - (char *)(ptnmd->nm_addr); 2616 } 2617 2618 static void 2619 netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd) 2620 { 2621 if (nmd == NULL) 2622 return; 2623 if (netmap_verbose) 2624 nm_prinf("deleting %p", nmd); 2625 if (nmd->active > 0) 2626 nm_prerr("bug: deleting mem allocator with active=%d!", nmd->active); 2627 if (netmap_verbose) 2628 nm_prinf("done deleting %p", nmd); 2629 NMA_LOCK_DESTROY(nmd); 2630 nm_os_free(nmd); 2631 } 2632 2633 static struct netmap_if * 2634 netmap_mem_pt_guest_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) 2635 { 2636 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem; 2637 struct mem_pt_if *ptif; 2638 struct netmap_if *nifp = NULL; 2639 2640 ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); 2641 if (ptif == NULL) { 2642 nm_prerr("interface %s is not in passthrough", na->name); 2643 goto out; 2644 } 2645 2646 nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) + 2647 ptif->nifp_offset); 2648 out: 2649 return nifp; 2650 } 2651 2652 static void 2653 netmap_mem_pt_guest_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) 2654 { 2655 struct mem_pt_if *ptif; 2656 2657 ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); 2658 if (ptif == NULL) { 2659 nm_prerr("interface %s is not in passthrough", na->name); 2660 } 2661 } 2662 2663 static int 2664 netmap_mem_pt_guest_rings_create(struct netmap_adapter *na) 2665 { 2666 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem; 2667 struct mem_pt_if *ptif; 2668 struct netmap_if *nifp; 2669 int i, error = -1; 2670 2671 ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); 2672 if (ptif == NULL) { 2673 nm_prerr("interface %s is not in passthrough", na->name); 2674 goto out; 2675 } 2676 2677 2678 /* point each kring to the corresponding backend ring */ 2679 nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset); 2680 for (i = 0; i < netmap_all_rings(na, NR_TX); i++) { 2681 struct netmap_kring *kring = na->tx_rings[i]; 2682 if (kring->ring) 2683 continue; 2684 kring->ring = (struct netmap_ring *) 2685 ((char *)nifp + nifp->ring_ofs[i]); 2686 } 2687 for (i = 0; i < netmap_all_rings(na, NR_RX); i++) { 2688 struct netmap_kring *kring = na->rx_rings[i]; 2689 if (kring->ring) 2690 continue; 2691 kring->ring = (struct netmap_ring *) 2692 ((char *)nifp + 2693 nifp->ring_ofs[netmap_all_rings(na, NR_TX) + i]); 2694 } 2695 2696 error = 0; 2697 out: 2698 return error; 2699 } 2700 2701 static void 2702 netmap_mem_pt_guest_rings_delete(struct netmap_adapter *na) 2703 { 2704 #if 0 2705 enum txrx t; 2706 2707 for_rx_tx(t) { 2708 u_int i; 2709 for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { 2710 struct netmap_kring *kring = &NMR(na, t)[i]; 2711 2712 kring->ring = NULL; 2713 } 2714 } 2715 #endif 2716 } 2717 2718 static struct netmap_mem_ops netmap_mem_pt_guest_ops = { 2719 .nmd_get_lut = netmap_mem_pt_guest_get_lut, 2720 .nmd_get_info = netmap_mem_pt_guest_get_info, 2721 .nmd_ofstophys = netmap_mem_pt_guest_ofstophys, 2722 .nmd_config = netmap_mem_pt_guest_config, 2723 .nmd_finalize = netmap_mem_pt_guest_finalize, 2724 .nmd_deref = netmap_mem_pt_guest_deref, 2725 .nmd_if_offset = netmap_mem_pt_guest_if_offset, 2726 .nmd_delete = netmap_mem_pt_guest_delete, 2727 .nmd_if_new = netmap_mem_pt_guest_if_new, 2728 .nmd_if_delete = netmap_mem_pt_guest_if_delete, 2729 .nmd_rings_create = netmap_mem_pt_guest_rings_create, 2730 .nmd_rings_delete = netmap_mem_pt_guest_rings_delete 2731 }; 2732 2733 /* Called with nm_mem_list_lock held. */ 2734 static struct netmap_mem_d * 2735 netmap_mem_pt_guest_find_memid(nm_memid_t mem_id) 2736 { 2737 struct netmap_mem_d *mem = NULL; 2738 struct netmap_mem_d *scan = netmap_last_mem_d; 2739 2740 do { 2741 /* find ptnetmap allocator through host ID */ 2742 if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref && 2743 ((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) { 2744 mem = scan; 2745 mem->refcount++; 2746 NM_DBG_REFC(mem, __FUNCTION__, __LINE__); 2747 break; 2748 } 2749 scan = scan->next; 2750 } while (scan != netmap_last_mem_d); 2751 2752 return mem; 2753 } 2754 2755 /* Called with nm_mem_list_lock held. */ 2756 static struct netmap_mem_d * 2757 netmap_mem_pt_guest_create(nm_memid_t mem_id) 2758 { 2759 struct netmap_mem_ptg *ptnmd; 2760 int err = 0; 2761 2762 ptnmd = nm_os_malloc(sizeof(struct netmap_mem_ptg)); 2763 if (ptnmd == NULL) { 2764 err = ENOMEM; 2765 goto error; 2766 } 2767 2768 ptnmd->up.ops = &netmap_mem_pt_guest_ops; 2769 ptnmd->host_mem_id = mem_id; 2770 ptnmd->pt_ifs = NULL; 2771 2772 /* Assign new id in the guest (We have the lock) */ 2773 err = nm_mem_assign_id_locked(&ptnmd->up); 2774 if (err) 2775 goto error; 2776 2777 ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED; 2778 ptnmd->up.flags |= NETMAP_MEM_IO; 2779 2780 NMA_LOCK_INIT(&ptnmd->up); 2781 2782 snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id); 2783 2784 2785 return &ptnmd->up; 2786 error: 2787 netmap_mem_pt_guest_delete(&ptnmd->up); 2788 return NULL; 2789 } 2790 2791 /* 2792 * find host id in guest allocators and create guest allocator 2793 * if it is not there 2794 */ 2795 static struct netmap_mem_d * 2796 netmap_mem_pt_guest_get(nm_memid_t mem_id) 2797 { 2798 struct netmap_mem_d *nmd; 2799 2800 NM_MTX_LOCK(nm_mem_list_lock); 2801 nmd = netmap_mem_pt_guest_find_memid(mem_id); 2802 if (nmd == NULL) { 2803 nmd = netmap_mem_pt_guest_create(mem_id); 2804 } 2805 NM_MTX_UNLOCK(nm_mem_list_lock); 2806 2807 return nmd; 2808 } 2809 2810 /* 2811 * The guest allocator can be created by ptnetmap_memdev (during the device 2812 * attach) or by ptnetmap device (ptnet), during the netmap_attach. 2813 * 2814 * The order is not important (we have different order in LINUX and FreeBSD). 2815 * The first one, creates the device, and the second one simply attaches it. 2816 */ 2817 2818 /* Called when ptnetmap_memdev is attaching, to attach a new allocator in 2819 * the guest */ 2820 struct netmap_mem_d * 2821 netmap_mem_pt_guest_attach(struct ptnetmap_memdev *ptn_dev, nm_memid_t mem_id) 2822 { 2823 struct netmap_mem_d *nmd; 2824 struct netmap_mem_ptg *ptnmd; 2825 2826 nmd = netmap_mem_pt_guest_get(mem_id); 2827 2828 /* assign this device to the guest allocator */ 2829 if (nmd) { 2830 ptnmd = (struct netmap_mem_ptg *)nmd; 2831 ptnmd->ptn_dev = ptn_dev; 2832 } 2833 2834 return nmd; 2835 } 2836 2837 /* Called when ptnet device is attaching */ 2838 struct netmap_mem_d * 2839 netmap_mem_pt_guest_new(struct ifnet *ifp, 2840 unsigned int nifp_offset, 2841 unsigned int memid) 2842 { 2843 struct netmap_mem_d *nmd; 2844 2845 if (ifp == NULL) { 2846 return NULL; 2847 } 2848 2849 nmd = netmap_mem_pt_guest_get((nm_memid_t)memid); 2850 2851 if (nmd) { 2852 netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset); 2853 } 2854 2855 return nmd; 2856 } 2857 2858 #endif /* WITH_PTNETMAP */ 2859